linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Hugh Dickins <hughd@google.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>,
	Mike Rapoport <rppt@kernel.org>,
	 "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
	 Matthew Wilcox <willy@infradead.org>,
	David Hildenbrand <david@redhat.com>,
	 Suren Baghdasaryan <surenb@google.com>,
	 Qi Zheng <zhengqi.arch@bytedance.com>,
	Yang Shi <shy828301@gmail.com>,
	 Mel Gorman <mgorman@techsingularity.net>,
	Peter Xu <peterx@redhat.com>,
	 Peter Zijlstra <peterz@infradead.org>,
	Will Deacon <will@kernel.org>,  Yu Zhao <yuzhao@google.com>,
	Alistair Popple <apopple@nvidia.com>,
	 Ralph Campbell <rcampbell@nvidia.com>,
	Ira Weiny <ira.weiny@intel.com>,
	 Steven Price <steven.price@arm.com>,
	SeongJae Park <sj@kernel.org>,
	 Naoya Horiguchi <naoya.horiguchi@nec.com>,
	 Christophe Leroy <christophe.leroy@csgroup.eu>,
	 Zack Rusin <zackr@vmware.com>, Jason Gunthorpe <jgg@ziepe.ca>,
	 Axel Rasmussen <axelrasmussen@google.com>,
	 Anshuman Khandual <anshuman.khandual@arm.com>,
	 Pasha Tatashin <pasha.tatashin@soleen.com>,
	 Miaohe Lin <linmiaohe@huawei.com>,
	Minchan Kim <minchan@kernel.org>,
	 Christoph Hellwig <hch@infradead.org>,
	Song Liu <song@kernel.org>,
	 Thomas Hellstrom <thomas.hellstrom@linux.intel.com>,
	 linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH 20/31] mm/madvise: clean up pte_offset_map_lock() scans
Date: Sun, 21 May 2023 22:15:06 -0700 (PDT)	[thread overview]
Message-ID: <dceac563-49e-fa34-4463-6183224bdca6@google.com> (raw)
In-Reply-To: <68a97fbe-5c1e-7ac6-72c-7b9c6290b370@google.com>

Came here to make madvise's several pte_offset_map_lock() scans advance
to next extent on failure, and remove superfluous pmd_trans_unstable()
and pmd_none_or_trans_huge_or_clear_bad() calls.  But also did some
nearby cleanup.

swapin_walk_pmd_entry(): don't name an address "index"; don't drop the
lock after every pte, only when calling out to read_swap_cache_async().

madvise_cold_or_pageout_pte_range() and madvise_free_pte_range():
prefer "start_pte" for pointer, orig_pte usually denotes a saved pte
value; leave lazy MMU mode before unlocking; merge the success and
failure paths after split_folio().

Signed-off-by: Hugh Dickins <hughd@google.com>
---
 mm/madvise.c | 122 ++++++++++++++++++++++++++++-----------------------
 1 file changed, 68 insertions(+), 54 deletions(-)

diff --git a/mm/madvise.c b/mm/madvise.c
index b5ffbaf616f5..0af64c4a8f82 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -188,37 +188,43 @@ static int madvise_update_vma(struct vm_area_struct *vma,
 
 #ifdef CONFIG_SWAP
 static int swapin_walk_pmd_entry(pmd_t *pmd, unsigned long start,
-	unsigned long end, struct mm_walk *walk)
+		unsigned long end, struct mm_walk *walk)
 {
 	struct vm_area_struct *vma = walk->private;
-	unsigned long index;
 	struct swap_iocb *splug = NULL;
+	pte_t *ptep = NULL;
+	spinlock_t *ptl;
+	unsigned long addr;
 
-	if (pmd_none_or_trans_huge_or_clear_bad(pmd))
-		return 0;
-
-	for (index = start; index != end; index += PAGE_SIZE) {
+	for (addr = start; addr < end; addr += PAGE_SIZE) {
 		pte_t pte;
 		swp_entry_t entry;
 		struct page *page;
-		spinlock_t *ptl;
-		pte_t *ptep;
 
-		ptep = pte_offset_map_lock(vma->vm_mm, pmd, index, &ptl);
+		if (!ptep++) {
+			ptep = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+			if (!ptep)
+				break;
+		}
+
 		pte = *ptep;
-		pte_unmap_unlock(ptep, ptl);
-
 		if (!is_swap_pte(pte))
 			continue;
 		entry = pte_to_swp_entry(pte);
 		if (unlikely(non_swap_entry(entry)))
 			continue;
 
+		pte_unmap_unlock(ptep, ptl);
+		ptep = NULL;
+
 		page = read_swap_cache_async(entry, GFP_HIGHUSER_MOVABLE,
-					     vma, index, false, &splug);
+					     vma, addr, false, &splug);
 		if (page)
 			put_page(page);
 	}
+
+	if (ptep)
+		pte_unmap_unlock(ptep, ptl);
 	swap_read_unplug(splug);
 	cond_resched();
 
@@ -340,7 +346,7 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
 	bool pageout = private->pageout;
 	struct mm_struct *mm = tlb->mm;
 	struct vm_area_struct *vma = walk->vma;
-	pte_t *orig_pte, *pte, ptent;
+	pte_t *start_pte, *pte, ptent;
 	spinlock_t *ptl;
 	struct folio *folio = NULL;
 	LIST_HEAD(folio_list);
@@ -422,11 +428,11 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
 	}
 
 regular_folio:
-	if (pmd_trans_unstable(pmd))
-		return 0;
 #endif
 	tlb_change_page_size(tlb, PAGE_SIZE);
-	orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+	start_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+	if (!start_pte)
+		return 0;
 	flush_tlb_batched_pending(mm);
 	arch_enter_lazy_mmu_mode();
 	for (; addr < end; pte++, addr += PAGE_SIZE) {
@@ -447,25 +453,28 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
 		 * are sure it's worth. Split it if we are only owner.
 		 */
 		if (folio_test_large(folio)) {
+			int err;
+
 			if (folio_mapcount(folio) != 1)
 				break;
 			if (pageout_anon_only_filter && !folio_test_anon(folio))
 				break;
+			if (!folio_trylock(folio))
+				break;
 			folio_get(folio);
-			if (!folio_trylock(folio)) {
-				folio_put(folio);
-				break;
-			}
-			pte_unmap_unlock(orig_pte, ptl);
-			if (split_folio(folio)) {
-				folio_unlock(folio);
-				folio_put(folio);
-				orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
-				break;
-			}
+			arch_leave_lazy_mmu_mode();
+			pte_unmap_unlock(start_pte, ptl);
+			start_pte = NULL;
+			err = split_folio(folio);
 			folio_unlock(folio);
 			folio_put(folio);
-			orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+			if (err)
+				break;
+			start_pte = pte =
+				pte_offset_map_lock(mm, pmd, addr, &ptl);
+			if (!start_pte)
+				break;
+			arch_enter_lazy_mmu_mode();
 			pte--;
 			addr -= PAGE_SIZE;
 			continue;
@@ -510,8 +519,10 @@ static int madvise_cold_or_pageout_pte_range(pmd_t *pmd,
 			folio_deactivate(folio);
 	}
 
-	arch_leave_lazy_mmu_mode();
-	pte_unmap_unlock(orig_pte, ptl);
+	if (start_pte) {
+		arch_leave_lazy_mmu_mode();
+		pte_unmap_unlock(start_pte, ptl);
+	}
 	if (pageout)
 		reclaim_pages(&folio_list);
 	cond_resched();
@@ -612,7 +623,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
 	struct mm_struct *mm = tlb->mm;
 	struct vm_area_struct *vma = walk->vma;
 	spinlock_t *ptl;
-	pte_t *orig_pte, *pte, ptent;
+	pte_t *start_pte, *pte, ptent;
 	struct folio *folio;
 	int nr_swap = 0;
 	unsigned long next;
@@ -620,13 +631,12 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
 	next = pmd_addr_end(addr, end);
 	if (pmd_trans_huge(*pmd))
 		if (madvise_free_huge_pmd(tlb, vma, pmd, addr, next))
-			goto next;
-
-	if (pmd_trans_unstable(pmd))
-		return 0;
+			return 0;
 
 	tlb_change_page_size(tlb, PAGE_SIZE);
-	orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+	start_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+	if (!start_pte)
+		return 0;
 	flush_tlb_batched_pending(mm);
 	arch_enter_lazy_mmu_mode();
 	for (; addr != end; pte++, addr += PAGE_SIZE) {
@@ -664,23 +674,26 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
 		 * deactivate all pages.
 		 */
 		if (folio_test_large(folio)) {
+			int err;
+
 			if (folio_mapcount(folio) != 1)
-				goto out;
+				break;
+			if (!folio_trylock(folio))
+				break;
 			folio_get(folio);
-			if (!folio_trylock(folio)) {
-				folio_put(folio);
-				goto out;
-			}
-			pte_unmap_unlock(orig_pte, ptl);
-			if (split_folio(folio)) {
-				folio_unlock(folio);
-				folio_put(folio);
-				orig_pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
-				goto out;
-			}
+			arch_leave_lazy_mmu_mode();
+			pte_unmap_unlock(start_pte, ptl);
+			start_pte = NULL;
+			err = split_folio(folio);
 			folio_unlock(folio);
 			folio_put(folio);
-			orig_pte = pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
+			if (err)
+				break;
+			start_pte = pte =
+				pte_offset_map_lock(mm, pmd, addr, &ptl);
+			if (!start_pte)
+				break;
+			arch_enter_lazy_mmu_mode();
 			pte--;
 			addr -= PAGE_SIZE;
 			continue;
@@ -725,17 +738,18 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
 		}
 		folio_mark_lazyfree(folio);
 	}
-out:
+
 	if (nr_swap) {
 		if (current->mm == mm)
 			sync_mm_rss(mm);
-
 		add_mm_counter(mm, MM_SWAPENTS, nr_swap);
 	}
-	arch_leave_lazy_mmu_mode();
-	pte_unmap_unlock(orig_pte, ptl);
+	if (start_pte) {
+		arch_leave_lazy_mmu_mode();
+		pte_unmap_unlock(start_pte, ptl);
+	}
 	cond_resched();
-next:
+
 	return 0;
 }
 
-- 
2.35.3



  parent reply	other threads:[~2023-05-22  5:15 UTC|newest]

Thread overview: 80+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-22  4:46 [PATCH 00/31] mm: allow pte_offset_map[_lock]() to fail Hugh Dickins
2023-05-22  4:49 ` [PATCH 01/31] mm: use pmdp_get_lockless() without surplus barrier() Hugh Dickins
2023-05-24 22:29   ` Peter Xu
2023-05-25 22:35     ` Hugh Dickins
2023-05-26 16:48       ` Peter Xu
2023-05-24 22:54   ` Yu Zhao
2023-05-22  4:51 ` [PATCH 02/31] mm/migrate: remove cruft from migration_entry_wait()s Hugh Dickins
2023-05-23  1:45   ` Alistair Popple
2023-05-24  1:57     ` Hugh Dickins
2023-05-22  4:52 ` [PATCH 03/31] mm/pgtable: kmap_local_page() instead of kmap_atomic() Hugh Dickins
2023-05-26 22:22   ` Peter Xu
2023-05-26 22:42     ` Peter Xu
2023-05-22  4:53 ` [PATCH 04/31] mm/pgtable: allow pte_offset_map[_lock]() to fail Hugh Dickins
2023-05-22 11:17   ` Qi Zheng
2023-05-24  2:22     ` Hugh Dickins
2023-05-24  3:11       ` Qi Zheng
2023-07-05 14:48   ` Aneesh Kumar K.V
2023-07-05 22:26     ` Hugh Dickins
2023-05-22  4:54 ` [PATCH 05/31] mm/filemap: allow pte_offset_map_lock() " Hugh Dickins
2023-05-22 11:23   ` Qi Zheng
2023-05-24  2:35     ` Hugh Dickins
2023-05-24  3:14       ` Qi Zheng
2023-05-22  4:55 ` [PATCH 06/31] mm/page_vma_mapped: delete bogosity in page_vma_mapped_walk() Hugh Dickins
2023-05-22  4:57 ` [PATCH 07/31] mm/page_vma_mapped: reformat map_pte() with less indentation Hugh Dickins
2023-05-22  4:58 ` [PATCH 08/31] mm/page_vma_mapped: pte_offset_map_nolock() not pte_lockptr() Hugh Dickins
2023-05-22 11:41   ` Qi Zheng
2023-05-24  2:44     ` Hugh Dickins
2023-05-22  5:00 ` [PATCH 09/31] mm/pagewalkers: ACTION_AGAIN if pte_offset_map_lock() fails Hugh Dickins
2023-05-23 18:07   ` SeongJae Park
2023-05-22  5:01 ` [PATCH 10/31] mm/pagewalk: walk_pte_range() allow for pte_offset_map() Hugh Dickins
2023-05-22  5:03 ` [PATCH 11/31] mm/vmwgfx: simplify pmd & pud mapping dirty helpers Hugh Dickins
2023-05-22  5:04 ` [PATCH 12/31] mm/vmalloc: vmalloc_to_page() use pte_offset_kernel() Hugh Dickins
2023-05-22  7:27   ` Lorenzo Stoakes
2023-05-22  5:05 ` [PATCH 13/31] mm/hmm: retry if pte_offset_map() fails Hugh Dickins
2023-05-22 12:11   ` Qi Zheng
2023-05-23  2:39     ` Alistair Popple
2023-05-23  6:06       ` Qi Zheng
2023-05-24  2:50         ` Hugh Dickins
2023-05-24  5:16           ` Alistair Popple
2023-05-22  5:06 ` [PATCH 14/31] fs/userfaultfd: " Hugh Dickins
2023-05-24 22:31   ` Peter Xu
2023-05-22  5:07 ` [PATCH 15/31] mm/userfaultfd: allow pte_offset_map_lock() to fail Hugh Dickins
2023-05-24 22:44   ` Peter Xu
2023-05-25 22:06     ` Hugh Dickins
2023-05-26 16:25       ` Peter Xu
2023-05-22  5:08 ` [PATCH 16/31] mm/debug_vm_pgtable,page_table_check: warn pte map fails Hugh Dickins
2023-05-22  5:10 ` [PATCH 17/31] mm/various: give up if pte_offset_map[_lock]() fails Hugh Dickins
2023-05-22 12:24   ` Qi Zheng
2023-05-22 12:37     ` Qi Zheng
2023-05-24  3:20       ` Hugh Dickins
2023-05-22  5:12 ` [PATCH 18/31] mm/mprotect: delete pmd_none_or_clear_bad_unless_trans_huge() Hugh Dickins
2023-05-22  5:13 ` [PATCH 19/31] mm/mremap: retry if either pte_offset_map_*lock() fails Hugh Dickins
2023-05-22  5:15 ` Hugh Dickins [this message]
2023-05-22  5:17 ` [PATCH 21/31] mm/madvise: clean up force_shm_swapin_readahead() Hugh Dickins
2023-05-22  5:18 ` [PATCH 22/31] mm/swapoff: allow pte_offset_map[_lock]() to fail Hugh Dickins
2023-05-22  5:19 ` [PATCH 23/31] mm/mglru: allow pte_offset_map_nolock() " Hugh Dickins
2023-05-22  5:26   ` Yu Zhao
2023-05-22  5:20 ` [PATCH 24/31] mm/migrate_device: allow pte_offset_map_lock() " Hugh Dickins
2023-05-23  2:23   ` Alistair Popple
2023-05-24  3:45     ` Hugh Dickins
2023-05-24  5:11       ` Alistair Popple
2023-05-22  5:22 ` [PATCH 25/31] mm/gup: remove FOLL_SPLIT_PMD use of pmd_trans_unstable() Hugh Dickins
2023-05-23  2:26   ` Yang Shi
2023-05-23  2:44     ` Yang Shi
2023-05-24  4:26       ` Hugh Dickins
2023-05-24 22:45         ` Yang Shi
2023-05-25 21:16           ` Hugh Dickins
2023-05-25 22:33             ` Yang Shi
2023-05-22  5:23 ` [PATCH 26/31] mm/huge_memory: split huge pmd under one pte_offset_map() Hugh Dickins
2023-05-22 23:35   ` Yang Shi
2023-05-22  5:24 ` [PATCH 27/31] mm/khugepaged: allow pte_offset_map[_lock]() to fail Hugh Dickins
2023-05-22 23:54   ` Yang Shi
2023-05-24  4:44     ` Hugh Dickins
2023-05-24 21:59       ` Yang Shi
2023-05-22  5:25 ` [PATCH 28/31] mm/memory: " Hugh Dickins
2023-05-22  5:26 ` [PATCH 29/31] mm/memory: handle_pte_fault() use pte_offset_map_nolock() Hugh Dickins
2023-05-22 12:52   ` Qi Zheng
2023-05-24  4:54     ` Hugh Dickins
2023-05-22  5:27 ` [PATCH 30/31] mm/pgtable: delete pmd_trans_unstable() and friends Hugh Dickins
2023-05-22  5:29 ` [PATCH 31/31] perf/core: Allow pte_offset_map() to fail Hugh Dickins

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=dceac563-49e-fa34-4463-6183224bdca6@google.com \
    --to=hughd@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=anshuman.khandual@arm.com \
    --cc=apopple@nvidia.com \
    --cc=axelrasmussen@google.com \
    --cc=christophe.leroy@csgroup.eu \
    --cc=david@redhat.com \
    --cc=hch@infradead.org \
    --cc=ira.weiny@intel.com \
    --cc=jgg@ziepe.ca \
    --cc=kirill.shutemov@linux.intel.com \
    --cc=linmiaohe@huawei.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@techsingularity.net \
    --cc=mike.kravetz@oracle.com \
    --cc=minchan@kernel.org \
    --cc=naoya.horiguchi@nec.com \
    --cc=pasha.tatashin@soleen.com \
    --cc=peterx@redhat.com \
    --cc=peterz@infradead.org \
    --cc=rcampbell@nvidia.com \
    --cc=rppt@kernel.org \
    --cc=shy828301@gmail.com \
    --cc=sj@kernel.org \
    --cc=song@kernel.org \
    --cc=steven.price@arm.com \
    --cc=surenb@google.com \
    --cc=thomas.hellstrom@linux.intel.com \
    --cc=will@kernel.org \
    --cc=willy@infradead.org \
    --cc=yuzhao@google.com \
    --cc=zackr@vmware.com \
    --cc=zhengqi.arch@bytedance.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox