From: Christoph Hellwig <hch@lst.de>
To: Jason Gunthorpe <jgg@ziepe.ca>
Cc: Jerome Glisse <jglisse@redhat.com>,
Ralph Campbell <rcampbell@nvidia.com>,
linux-mm@kvack.org
Subject: [PATCH 4/5] mm: don't handle the non-fault case in hmm_vma_walk_hole_
Date: Mon, 16 Mar 2020 14:53:09 +0100 [thread overview]
Message-ID: <20200316135310.899364-5-hch@lst.de> (raw)
In-Reply-To: <20200316135310.899364-1-hch@lst.de>
There is just a single caller using hmm_vma_walk_hole_ for the non-fault
case. Use hmm_pfns_fill to fill the whole pfn array with zeroes in only
caller for the non-fault case and remove the non-fault path from
hmm_vma_walk_hole_.
Also rename the function to hmm_vma_fault to better describe what it
does.
Signed-off-by: Christoph Hellwig <hch@lst.de>
---
mm/hmm.c | 38 +++++++++++++++++++-------------------
1 file changed, 19 insertions(+), 19 deletions(-)
diff --git a/mm/hmm.c b/mm/hmm.c
index 6d636373181a..707edba850de 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -73,45 +73,42 @@ static int hmm_pfns_fill(unsigned long addr, unsigned long end,
}
/*
- * hmm_vma_walk_hole_() - handle a range lacking valid pmd or pte(s)
+ * hmm_vma_fault() - fault in a range lacking valid pmd or pte(s)
* @addr: range virtual start address (inclusive)
* @end: range virtual end address (exclusive)
* @fault: should we fault or not ?
* @write_fault: write fault ?
* @walk: mm_walk structure
- * Return: 0 on success, -EBUSY after page fault, or page fault error
+ * Return: -EBUSY after page fault, or page fault error
*
* This function will be called whenever pmd_none() or pte_none() returns true,
* or whenever there is no page directory covering the virtual address range.
*/
-static int hmm_vma_walk_hole_(unsigned long addr, unsigned long end,
+static int hmm_vma_fault(unsigned long addr, unsigned long end,
bool fault, bool write_fault,
struct mm_walk *walk)
{
struct hmm_vma_walk *hmm_vma_walk = walk->private;
struct hmm_range *range = hmm_vma_walk->range;
uint64_t *pfns = range->pfns;
- unsigned long i;
+ unsigned long i = (addr - range->start) >> PAGE_SHIFT;
+ WARN_ON_ONCE(!fault && !write_fault);
hmm_vma_walk->last = addr;
- i = (addr - range->start) >> PAGE_SHIFT;
if (write_fault && walk->vma && !(walk->vma->vm_flags & VM_WRITE))
return -EPERM;
for (; addr < end; addr += PAGE_SIZE, i++) {
- pfns[i] = range->values[HMM_PFN_NONE];
- if (fault || write_fault) {
- int ret;
+ int ret;
- ret = hmm_vma_do_fault(walk, addr, write_fault,
- &pfns[i]);
- if (ret != -EBUSY)
- return ret;
- }
+ pfns[i] = range->values[HMM_PFN_NONE];
+ ret = hmm_vma_do_fault(walk, addr, write_fault, &pfns[i]);
+ if (ret != -EBUSY)
+ return ret;
}
- return (fault || write_fault) ? -EBUSY : 0;
+ return -EBUSY;
}
static inline void hmm_pte_need_fault(const struct hmm_vma_walk *hmm_vma_walk,
@@ -193,7 +190,10 @@ static int hmm_vma_walk_hole(unsigned long addr, unsigned long end,
pfns = &range->pfns[i];
hmm_range_need_fault(hmm_vma_walk, pfns, npages,
0, &fault, &write_fault);
- return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
+ if (fault || write_fault)
+ return hmm_vma_fault(addr, end, fault, write_fault, walk);
+ hmm_vma_walk->last = addr;
+ return hmm_pfns_fill(addr, end, range, HMM_PFN_NONE);
}
static inline uint64_t pmd_to_hmm_pfn_flags(struct hmm_range *range, pmd_t pmd)
@@ -221,7 +221,7 @@ static int hmm_vma_handle_pmd(struct mm_walk *walk, unsigned long addr,
&fault, &write_fault);
if (fault || write_fault)
- return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
+ return hmm_vma_fault(addr, end, fault, write_fault, walk);
pfn = pmd_pfn(pmd) + ((addr & ~PMD_MASK) >> PAGE_SHIFT);
for (i = 0; addr < end; addr += PAGE_SIZE, i++, pfn++) {
@@ -352,7 +352,7 @@ static int hmm_vma_handle_pte(struct mm_walk *walk, unsigned long addr,
fault:
pte_unmap(ptep);
/* Fault any virtual address we were asked to fault */
- return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
+ return hmm_vma_fault(addr, end, fault, write_fault, walk);
}
static int hmm_vma_walk_pmd(pmd_t *pmdp,
@@ -494,7 +494,7 @@ static int hmm_vma_walk_pud(pud_t *pudp, unsigned long start, unsigned long end,
cpu_flags, &fault, &write_fault);
if (fault || write_fault) {
spin_unlock(ptl);
- return hmm_vma_walk_hole_(addr, end, fault, write_fault,
+ return hmm_vma_fault(addr, end, fault, write_fault,
walk);
}
@@ -550,7 +550,7 @@ static int hmm_vma_walk_hugetlb_entry(pte_t *pte, unsigned long hmask,
&fault, &write_fault);
if (fault || write_fault) {
spin_unlock(ptl);
- return hmm_vma_walk_hole_(addr, end, fault, write_fault, walk);
+ return hmm_vma_fault(addr, end, fault, write_fault, walk);
}
pfn = pte_pfn(entry) + ((start & ~hmask) >> PAGE_SHIFT);
--
2.24.1
next prev parent reply other threads:[~2020-03-16 14:04 UTC|newest]
Thread overview: 17+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-16 13:53 misc hmm cleanups Christoph Hellwig
2020-03-16 13:53 ` [PATCH 1/5] mm: don't provide a stub for hmm_range_fault Christoph Hellwig
2020-03-16 14:37 ` Zi Yan
2020-03-16 16:45 ` Jason Gunthorpe
2020-03-16 13:53 ` [PATCH 2/5] mm: remove the unused HMM_FAULT_ALLOW_RETRY flag Christoph Hellwig
2020-03-16 16:44 ` Jason Gunthorpe
2020-03-16 13:53 ` [PATCH 3/5] mm: simplify hmm_vma_walk_hugetlb_entry Christoph Hellwig
2020-03-16 16:43 ` Jason Gunthorpe
2020-03-16 13:53 ` Christoph Hellwig [this message]
2020-03-16 16:43 ` [PATCH 4/5] mm: don't handle the non-fault case in hmm_vma_walk_hole_ Jason Gunthorpe
2020-03-16 13:53 ` [PATCH 5/5] mm: merge hmm_vma_do_fault into into hmm_vma_walk_hole_ Christoph Hellwig
2020-03-16 16:41 ` Jason Gunthorpe
2020-03-16 16:51 ` Christoph Hellwig
2020-03-16 18:01 ` Christoph Hellwig
2020-03-17 18:38 ` misc hmm cleanups Jason Gunthorpe
2020-03-17 18:55 ` Christoph Hellwig
2020-03-19 0:27 ` Jason Gunthorpe
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200316135310.899364-5-hch@lst.de \
--to=hch@lst.de \
--cc=jgg@ziepe.ca \
--cc=jglisse@redhat.com \
--cc=linux-mm@kvack.org \
--cc=rcampbell@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox