linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH] mm: Remove checks for pte_index
@ 2023-08-19  3:18 Matthew Wilcox (Oracle)
  2023-08-19 14:57 ` Mike Rapoport
  0 siblings, 1 reply; 2+ messages in thread
From: Matthew Wilcox (Oracle) @ 2023-08-19  3:18 UTC (permalink / raw)
  To: Andrew Morton, linux-mm
  Cc: Matthew Wilcox (Oracle), Mike Rapoport, Christian Dietrich

Since pte_index is always defined, we don't need to check whether it's
defined or not.  Delete the slow version that doesn't depend on it and
remove the #define since nobody needs to test for it.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Mike Rapoport <rppt@linux.ibm.com>
Cc: Christian Dietrich <stettberger@dokucode.de>
---
 include/linux/pgtable.h |  1 -
 mm/memory.c             | 17 +----------------
 2 files changed, 1 insertion(+), 17 deletions(-)

diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index cb5c1fad1078..1fba072b3dac 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -66,7 +66,6 @@ static inline unsigned long pte_index(unsigned long address)
 {
 	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
 }
-#define pte_index pte_index
 
 #ifndef pmd_index
 static inline unsigned long pmd_index(unsigned long address)
diff --git a/mm/memory.c b/mm/memory.c
index 2947fbc558f6..b7ce04cb058d 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1870,7 +1870,6 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
 	return retval;
 }
 
-#ifdef pte_index
 static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
 			unsigned long addr, struct page *page, pgprot_t prot)
 {
@@ -1885,7 +1884,7 @@ static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
 }
 
 /* insert_pages() amortizes the cost of spinlock operations
- * when inserting pages in a loop. Arch *must* define pte_index.
+ * when inserting pages in a loop.
  */
 static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
 			struct page **pages, unsigned long *num, pgprot_t prot)
@@ -1944,7 +1943,6 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
 	*num = remaining_pages_total;
 	return ret;
 }
-#endif  /* ifdef pte_index */
 
 /**
  * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
@@ -1964,7 +1962,6 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
 int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
 			struct page **pages, unsigned long *num)
 {
-#ifdef pte_index
 	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
 
 	if (addr < vma->vm_start || end_addr >= vma->vm_end)
@@ -1976,18 +1973,6 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
 	}
 	/* Defer page refcount checking till we're about to map that page. */
 	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
-#else
-	unsigned long idx = 0, pgcount = *num;
-	int err = -EINVAL;
-
-	for (; idx < pgcount; ++idx) {
-		err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
-		if (err)
-			break;
-	}
-	*num = pgcount - idx;
-	return err;
-#endif  /* ifdef pte_index */
 }
 EXPORT_SYMBOL(vm_insert_pages);
 
-- 
2.40.1



^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH] mm: Remove checks for pte_index
  2023-08-19  3:18 [PATCH] mm: Remove checks for pte_index Matthew Wilcox (Oracle)
@ 2023-08-19 14:57 ` Mike Rapoport
  0 siblings, 0 replies; 2+ messages in thread
From: Mike Rapoport @ 2023-08-19 14:57 UTC (permalink / raw)
  To: Matthew Wilcox (Oracle); +Cc: Andrew Morton, linux-mm, Christian Dietrich

On Sat, Aug 19, 2023 at 04:18:37AM +0100, Matthew Wilcox (Oracle) wrote:
> Since pte_index is always defined, we don't need to check whether it's
> defined or not.  Delete the slow version that doesn't depend on it and
> remove the #define since nobody needs to test for it.
> 
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Cc: Mike Rapoport <rppt@linux.ibm.com>
> Cc: Christian Dietrich <stettberger@dokucode.de>

Reviewed-by: Mike Rapoport (IBM) <rppt@kernel.org>

> ---
>  include/linux/pgtable.h |  1 -
>  mm/memory.c             | 17 +----------------
>  2 files changed, 1 insertion(+), 17 deletions(-)
> 
> diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
> index cb5c1fad1078..1fba072b3dac 100644
> --- a/include/linux/pgtable.h
> +++ b/include/linux/pgtable.h
> @@ -66,7 +66,6 @@ static inline unsigned long pte_index(unsigned long address)
>  {
>  	return (address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1);
>  }
> -#define pte_index pte_index
>  
>  #ifndef pmd_index
>  static inline unsigned long pmd_index(unsigned long address)
> diff --git a/mm/memory.c b/mm/memory.c
> index 2947fbc558f6..b7ce04cb058d 100644
> --- a/mm/memory.c
> +++ b/mm/memory.c
> @@ -1870,7 +1870,6 @@ static int insert_page(struct vm_area_struct *vma, unsigned long addr,
>  	return retval;
>  }
>  
> -#ifdef pte_index
>  static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
>  			unsigned long addr, struct page *page, pgprot_t prot)
>  {
> @@ -1885,7 +1884,7 @@ static int insert_page_in_batch_locked(struct vm_area_struct *vma, pte_t *pte,
>  }
>  
>  /* insert_pages() amortizes the cost of spinlock operations
> - * when inserting pages in a loop. Arch *must* define pte_index.
> + * when inserting pages in a loop.
>   */
>  static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
>  			struct page **pages, unsigned long *num, pgprot_t prot)
> @@ -1944,7 +1943,6 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
>  	*num = remaining_pages_total;
>  	return ret;
>  }
> -#endif  /* ifdef pte_index */
>  
>  /**
>   * vm_insert_pages - insert multiple pages into user vma, batching the pmd lock.
> @@ -1964,7 +1962,6 @@ static int insert_pages(struct vm_area_struct *vma, unsigned long addr,
>  int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
>  			struct page **pages, unsigned long *num)
>  {
> -#ifdef pte_index
>  	const unsigned long end_addr = addr + (*num * PAGE_SIZE) - 1;
>  
>  	if (addr < vma->vm_start || end_addr >= vma->vm_end)
> @@ -1976,18 +1973,6 @@ int vm_insert_pages(struct vm_area_struct *vma, unsigned long addr,
>  	}
>  	/* Defer page refcount checking till we're about to map that page. */
>  	return insert_pages(vma, addr, pages, num, vma->vm_page_prot);
> -#else
> -	unsigned long idx = 0, pgcount = *num;
> -	int err = -EINVAL;
> -
> -	for (; idx < pgcount; ++idx) {
> -		err = vm_insert_page(vma, addr + (PAGE_SIZE * idx), pages[idx]);
> -		if (err)
> -			break;
> -	}
> -	*num = pgcount - idx;
> -	return err;
> -#endif  /* ifdef pte_index */
>  }
>  EXPORT_SYMBOL(vm_insert_pages);
>  
> -- 
> 2.40.1
> 

-- 
Sincerely yours,
Mike.


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2023-08-19 15:16 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-08-19  3:18 [PATCH] mm: Remove checks for pte_index Matthew Wilcox (Oracle)
2023-08-19 14:57 ` Mike Rapoport

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox