From: Hugh Dickins <hughd@google.com>
To: Andrew Morton <akpm@linux-foundation.org>
Cc: Mike Kravetz <mike.kravetz@oracle.com>,
Mike Rapoport <rppt@kernel.org>,
"Kirill A. Shutemov" <kirill.shutemov@linux.intel.com>,
Matthew Wilcox <willy@infradead.org>,
David Hildenbrand <david@redhat.com>,
Suren Baghdasaryan <surenb@google.com>,
Qi Zheng <zhengqi.arch@bytedance.com>,
Yang Shi <shy828301@gmail.com>,
Mel Gorman <mgorman@techsingularity.net>,
Peter Xu <peterx@redhat.com>,
Peter Zijlstra <peterz@infradead.org>,
Will Deacon <will@kernel.org>, Yu Zhao <yuzhao@google.com>,
Alistair Popple <apopple@nvidia.com>,
Ralph Campbell <rcampbell@nvidia.com>,
Ira Weiny <ira.weiny@intel.com>,
Steven Price <steven.price@arm.com>,
SeongJae Park <sj@kernel.org>,
Naoya Horiguchi <naoya.horiguchi@nec.com>,
Christophe Leroy <christophe.leroy@csgroup.eu>,
Zack Rusin <zackr@vmware.com>, Jason Gunthorpe <jgg@ziepe.ca>,
Axel Rasmussen <axelrasmussen@google.com>,
Anshuman Khandual <anshuman.khandual@arm.com>,
Pasha Tatashin <pasha.tatashin@soleen.com>,
Miaohe Lin <linmiaohe@huawei.com>,
Minchan Kim <minchan@kernel.org>,
Christoph Hellwig <hch@infradead.org>,
Song Liu <song@kernel.org>,
Thomas Hellstrom <thomas.hellstrom@linux.intel.com>,
linux-kernel@vger.kernel.org, linux-mm@kvack.org
Subject: [PATCH 09/31] mm/pagewalkers: ACTION_AGAIN if pte_offset_map_lock() fails
Date: Sun, 21 May 2023 22:00:15 -0700 (PDT) [thread overview]
Message-ID: <6265ac58-6018-a8c6-cf38-69cba698471@google.com> (raw)
In-Reply-To: <68a97fbe-5c1e-7ac6-72c-7b9c6290b370@google.com>
Simple walk_page_range() users should set ACTION_AGAIN to retry when
pte_offset_map_lock() fails.
No need to check pmd_trans_unstable(): that was precisely to avoid the
possiblity of calling pte_offset_map() on a racily removed or inserted
THP entry, but such cases are now safely handled inside it. Likewise
there is no need to check pmd_none() or pmd_bad() before calling it.
Signed-off-by: Hugh Dickins <hughd@google.com>
---
fs/proc/task_mmu.c | 32 ++++++++++++++++----------------
mm/damon/vaddr.c | 12 ++++++++----
mm/mempolicy.c | 7 ++++---
mm/mincore.c | 9 ++++-----
mm/mlock.c | 4 ++++
5 files changed, 36 insertions(+), 28 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 420510f6a545..dba5052ce09b 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -631,14 +631,11 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
goto out;
}
- if (pmd_trans_unstable(pmd))
- goto out;
- /*
- * The mmap_lock held all the way back in m_start() is what
- * keeps khugepaged out of here and from collapsing things
- * in here.
- */
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ if (!pte) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
for (; addr != end; pte++, addr += PAGE_SIZE)
smaps_pte_entry(pte, addr, walk);
pte_unmap_unlock(pte - 1, ptl);
@@ -1191,10 +1188,11 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
return 0;
}
- if (pmd_trans_unstable(pmd))
- return 0;
-
pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ if (!pte) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
for (; addr != end; pte++, addr += PAGE_SIZE) {
ptent = *pte;
@@ -1538,9 +1536,6 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
spin_unlock(ptl);
return err;
}
-
- if (pmd_trans_unstable(pmdp))
- return 0;
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
/*
@@ -1548,6 +1543,10 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
* goes beyond vma->vm_end.
*/
orig_pte = pte = pte_offset_map_lock(walk->mm, pmdp, addr, &ptl);
+ if (!pte) {
+ walk->action = ACTION_AGAIN;
+ return err;
+ }
for (; addr < end; pte++, addr += PAGE_SIZE) {
pagemap_entry_t pme;
@@ -1887,11 +1886,12 @@ static int gather_pte_stats(pmd_t *pmd, unsigned long addr,
spin_unlock(ptl);
return 0;
}
-
- if (pmd_trans_unstable(pmd))
- return 0;
#endif
orig_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (!pte) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
do {
struct page *page = can_gather_numa_stats(*pte, vma, addr);
if (!page)
diff --git a/mm/damon/vaddr.c b/mm/damon/vaddr.c
index 1fec16d7263e..b8762ff15c3c 100644
--- a/mm/damon/vaddr.c
+++ b/mm/damon/vaddr.c
@@ -318,9 +318,11 @@ static int damon_mkold_pmd_entry(pmd_t *pmd, unsigned long addr,
spin_unlock(ptl);
}
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- return 0;
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (!pte) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
if (!pte_present(*pte))
goto out;
damon_ptep_mkold(pte, walk->mm, addr);
@@ -464,9 +466,11 @@ static int damon_young_pmd_entry(pmd_t *pmd, unsigned long addr,
regular_page:
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
- if (pmd_none(*pmd) || unlikely(pmd_bad(*pmd)))
- return -EINVAL;
pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (!pte) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
if (!pte_present(*pte))
goto out;
folio = damon_get_folio(pte_pfn(*pte));
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 1756389a0609..4d0bcf6f0d52 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -514,10 +514,11 @@ static int queue_folios_pte_range(pmd_t *pmd, unsigned long addr,
if (ptl)
return queue_folios_pmd(pmd, ptl, addr, end, walk);
- if (pmd_trans_unstable(pmd))
- return 0;
-
mapped_pte = pte = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (!pte) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
for (; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
diff --git a/mm/mincore.c b/mm/mincore.c
index 2d5be013a25a..f33f6a0b1ded 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -113,12 +113,11 @@ static int mincore_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
goto out;
}
- if (pmd_trans_unstable(pmd)) {
- __mincore_unmapped_range(addr, end, vma, vec);
- goto out;
- }
-
ptep = pte_offset_map_lock(walk->mm, pmd, addr, &ptl);
+ if (!ptep) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
for (; addr != end; ptep++, addr += PAGE_SIZE) {
pte_t pte = *ptep;
diff --git a/mm/mlock.c b/mm/mlock.c
index 40b43f8740df..9f2b1173b1b1 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -329,6 +329,10 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
}
start_pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl);
+ if (!start_pte) {
+ walk->action = ACTION_AGAIN;
+ return 0;
+ }
for (pte = start_pte; addr != end; pte++, addr += PAGE_SIZE) {
if (!pte_present(*pte))
continue;
--
2.35.3
next prev parent reply other threads:[~2023-05-22 5:00 UTC|newest]
Thread overview: 80+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-22 4:46 [PATCH 00/31] mm: allow pte_offset_map[_lock]() to fail Hugh Dickins
2023-05-22 4:49 ` [PATCH 01/31] mm: use pmdp_get_lockless() without surplus barrier() Hugh Dickins
2023-05-24 22:29 ` Peter Xu
2023-05-25 22:35 ` Hugh Dickins
2023-05-26 16:48 ` Peter Xu
2023-05-24 22:54 ` Yu Zhao
2023-05-22 4:51 ` [PATCH 02/31] mm/migrate: remove cruft from migration_entry_wait()s Hugh Dickins
2023-05-23 1:45 ` Alistair Popple
2023-05-24 1:57 ` Hugh Dickins
2023-05-22 4:52 ` [PATCH 03/31] mm/pgtable: kmap_local_page() instead of kmap_atomic() Hugh Dickins
2023-05-26 22:22 ` Peter Xu
2023-05-26 22:42 ` Peter Xu
2023-05-22 4:53 ` [PATCH 04/31] mm/pgtable: allow pte_offset_map[_lock]() to fail Hugh Dickins
2023-05-22 11:17 ` Qi Zheng
2023-05-24 2:22 ` Hugh Dickins
2023-05-24 3:11 ` Qi Zheng
2023-07-05 14:48 ` Aneesh Kumar K.V
2023-07-05 22:26 ` Hugh Dickins
2023-05-22 4:54 ` [PATCH 05/31] mm/filemap: allow pte_offset_map_lock() " Hugh Dickins
2023-05-22 11:23 ` Qi Zheng
2023-05-24 2:35 ` Hugh Dickins
2023-05-24 3:14 ` Qi Zheng
2023-05-22 4:55 ` [PATCH 06/31] mm/page_vma_mapped: delete bogosity in page_vma_mapped_walk() Hugh Dickins
2023-05-22 4:57 ` [PATCH 07/31] mm/page_vma_mapped: reformat map_pte() with less indentation Hugh Dickins
2023-05-22 4:58 ` [PATCH 08/31] mm/page_vma_mapped: pte_offset_map_nolock() not pte_lockptr() Hugh Dickins
2023-05-22 11:41 ` Qi Zheng
2023-05-24 2:44 ` Hugh Dickins
2023-05-22 5:00 ` Hugh Dickins [this message]
2023-05-23 18:07 ` [PATCH 09/31] mm/pagewalkers: ACTION_AGAIN if pte_offset_map_lock() fails SeongJae Park
2023-05-22 5:01 ` [PATCH 10/31] mm/pagewalk: walk_pte_range() allow for pte_offset_map() Hugh Dickins
2023-05-22 5:03 ` [PATCH 11/31] mm/vmwgfx: simplify pmd & pud mapping dirty helpers Hugh Dickins
2023-05-22 5:04 ` [PATCH 12/31] mm/vmalloc: vmalloc_to_page() use pte_offset_kernel() Hugh Dickins
2023-05-22 7:27 ` Lorenzo Stoakes
2023-05-22 5:05 ` [PATCH 13/31] mm/hmm: retry if pte_offset_map() fails Hugh Dickins
2023-05-22 12:11 ` Qi Zheng
2023-05-23 2:39 ` Alistair Popple
2023-05-23 6:06 ` Qi Zheng
2023-05-24 2:50 ` Hugh Dickins
2023-05-24 5:16 ` Alistair Popple
2023-05-22 5:06 ` [PATCH 14/31] fs/userfaultfd: " Hugh Dickins
2023-05-24 22:31 ` Peter Xu
2023-05-22 5:07 ` [PATCH 15/31] mm/userfaultfd: allow pte_offset_map_lock() to fail Hugh Dickins
2023-05-24 22:44 ` Peter Xu
2023-05-25 22:06 ` Hugh Dickins
2023-05-26 16:25 ` Peter Xu
2023-05-22 5:08 ` [PATCH 16/31] mm/debug_vm_pgtable,page_table_check: warn pte map fails Hugh Dickins
2023-05-22 5:10 ` [PATCH 17/31] mm/various: give up if pte_offset_map[_lock]() fails Hugh Dickins
2023-05-22 12:24 ` Qi Zheng
2023-05-22 12:37 ` Qi Zheng
2023-05-24 3:20 ` Hugh Dickins
2023-05-22 5:12 ` [PATCH 18/31] mm/mprotect: delete pmd_none_or_clear_bad_unless_trans_huge() Hugh Dickins
2023-05-22 5:13 ` [PATCH 19/31] mm/mremap: retry if either pte_offset_map_*lock() fails Hugh Dickins
2023-05-22 5:15 ` [PATCH 20/31] mm/madvise: clean up pte_offset_map_lock() scans Hugh Dickins
2023-05-22 5:17 ` [PATCH 21/31] mm/madvise: clean up force_shm_swapin_readahead() Hugh Dickins
2023-05-22 5:18 ` [PATCH 22/31] mm/swapoff: allow pte_offset_map[_lock]() to fail Hugh Dickins
2023-05-22 5:19 ` [PATCH 23/31] mm/mglru: allow pte_offset_map_nolock() " Hugh Dickins
2023-05-22 5:26 ` Yu Zhao
2023-05-22 5:20 ` [PATCH 24/31] mm/migrate_device: allow pte_offset_map_lock() " Hugh Dickins
2023-05-23 2:23 ` Alistair Popple
2023-05-24 3:45 ` Hugh Dickins
2023-05-24 5:11 ` Alistair Popple
2023-05-22 5:22 ` [PATCH 25/31] mm/gup: remove FOLL_SPLIT_PMD use of pmd_trans_unstable() Hugh Dickins
2023-05-23 2:26 ` Yang Shi
2023-05-23 2:44 ` Yang Shi
2023-05-24 4:26 ` Hugh Dickins
2023-05-24 22:45 ` Yang Shi
2023-05-25 21:16 ` Hugh Dickins
2023-05-25 22:33 ` Yang Shi
2023-05-22 5:23 ` [PATCH 26/31] mm/huge_memory: split huge pmd under one pte_offset_map() Hugh Dickins
2023-05-22 23:35 ` Yang Shi
2023-05-22 5:24 ` [PATCH 27/31] mm/khugepaged: allow pte_offset_map[_lock]() to fail Hugh Dickins
2023-05-22 23:54 ` Yang Shi
2023-05-24 4:44 ` Hugh Dickins
2023-05-24 21:59 ` Yang Shi
2023-05-22 5:25 ` [PATCH 28/31] mm/memory: " Hugh Dickins
2023-05-22 5:26 ` [PATCH 29/31] mm/memory: handle_pte_fault() use pte_offset_map_nolock() Hugh Dickins
2023-05-22 12:52 ` Qi Zheng
2023-05-24 4:54 ` Hugh Dickins
2023-05-22 5:27 ` [PATCH 30/31] mm/pgtable: delete pmd_trans_unstable() and friends Hugh Dickins
2023-05-22 5:29 ` [PATCH 31/31] perf/core: Allow pte_offset_map() to fail Hugh Dickins
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=6265ac58-6018-a8c6-cf38-69cba698471@google.com \
--to=hughd@google.com \
--cc=akpm@linux-foundation.org \
--cc=anshuman.khandual@arm.com \
--cc=apopple@nvidia.com \
--cc=axelrasmussen@google.com \
--cc=christophe.leroy@csgroup.eu \
--cc=david@redhat.com \
--cc=hch@infradead.org \
--cc=ira.weiny@intel.com \
--cc=jgg@ziepe.ca \
--cc=kirill.shutemov@linux.intel.com \
--cc=linmiaohe@huawei.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=mgorman@techsingularity.net \
--cc=mike.kravetz@oracle.com \
--cc=minchan@kernel.org \
--cc=naoya.horiguchi@nec.com \
--cc=pasha.tatashin@soleen.com \
--cc=peterx@redhat.com \
--cc=peterz@infradead.org \
--cc=rcampbell@nvidia.com \
--cc=rppt@kernel.org \
--cc=shy828301@gmail.com \
--cc=sj@kernel.org \
--cc=song@kernel.org \
--cc=steven.price@arm.com \
--cc=surenb@google.com \
--cc=thomas.hellstrom@linux.intel.com \
--cc=will@kernel.org \
--cc=willy@infradead.org \
--cc=yuzhao@google.com \
--cc=zackr@vmware.com \
--cc=zhengqi.arch@bytedance.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox