From: Steven Price <steven.price@arm.com>
To: Christoph Hellwig <hch@lst.de>,
Linus Torvalds <torvalds@linux-foundation.org>,
Andrew Morton <akpm@linux-foundation.org>
Cc: "Thomas Hellström" <thomas@shipmail.org>,
"Jerome Glisse" <jglisse@redhat.com>,
"Jason Gunthorpe" <jgg@mellanox.com>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org
Subject: Re: [PATCH 2/3] pagewalk: seperate function pointers from iterator data
Date: Fri, 9 Aug 2019 09:57:17 +0100 [thread overview]
Message-ID: <e418faa0-49bf-1bc6-8f77-2849c1b6ae70@arm.com> (raw)
In-Reply-To: <20190808154240.9384-3-hch@lst.de>
Subject: s/seperate/separate/
On 08/08/2019 16:42, Christoph Hellwig wrote:
> The mm_walk structure currently mixed data and code. Split out the
> operations vectors into a new mm_walk_ops structure, and while we
> are changing the API also declare the mm_walk structure inside the
> walk_page_range and walk_page_vma functions.
>
> Based on patch from Linus Torvalds.
>
> Signed-off-by: Christoph Hellwig <hch@lst.de>
> ---
> arch/openrisc/kernel/dma.c | 22 +++--
> arch/powerpc/mm/book3s64/subpage_prot.c | 10 +-
> arch/s390/mm/gmap.c | 33 +++----
> fs/proc/task_mmu.c | 74 +++++++--------
> include/linux/pagewalk.h | 64 +++++++------
> mm/hmm.c | 40 +++-----
> mm/madvise.c | 41 +++-----
> mm/memcontrol.c | 23 +++--
> mm/mempolicy.c | 15 ++-
> mm/migrate.c | 15 ++-
> mm/mincore.c | 15 ++-
> mm/mprotect.c | 24 ++---
> mm/pagewalk.c | 118 ++++++++++++++----------
> 13 files changed, 245 insertions(+), 249 deletions(-)
>
[...]
> diff --git a/mm/pagewalk.c b/mm/pagewalk.c
> index 8a92a961a2ee..28510fc0dde1 100644
> --- a/mm/pagewalk.c
> +++ b/mm/pagewalk.c
> @@ -9,10 +9,11 @@ static int walk_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
> {
> pte_t *pte;
> int err = 0;
> + const struct mm_walk_ops *ops = walk->ops;
>
> pte = pte_offset_map(pmd, addr);
> for (;;) {
> - err = walk->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
> + err = ops->pte_entry(pte, addr, addr + PAGE_SIZE, walk);
> if (err)
> break;
> addr += PAGE_SIZE;
> @@ -30,6 +31,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
> {
> pmd_t *pmd;
> unsigned long next;
> + const struct mm_walk_ops *ops = walk->ops;
> int err = 0;
>
> pmd = pmd_offset(pud, addr);
> @@ -37,8 +39,8 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
> again:
> next = pmd_addr_end(addr, end);
> if (pmd_none(*pmd) || !walk->vma) {
> - if (walk->pte_hole)
> - err = walk->pte_hole(addr, next, walk);
> + if (ops->pte_hole)
> + err = ops->pte_hole(addr, next, walk);
> if (err)
> break;
> continue;
> @@ -47,8 +49,8 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
> * This implies that each ->pmd_entry() handler
> * needs to know about pmd_trans_huge() pmds
> */
> - if (walk->pmd_entry)
> - err = walk->pmd_entry(pmd, addr, next, walk);
> + if (ops->pmd_entry)
> + err = ops->pmd_entry(pmd, addr, next, walk);
> if (err)
> break;
>
> @@ -56,7 +58,7 @@ static int walk_pmd_range(pud_t *pud, unsigned long addr, unsigned long end,
> * Check this here so we only break down trans_huge
> * pages when we _need_ to
> */
> - if (!walk->pte_entry)
> + if (!ops->pte_entry)
> continue;
>
> split_huge_pmd(walk->vma, pmd, addr);
> @@ -75,6 +77,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
> {
> pud_t *pud;
> unsigned long next;
> + const struct mm_walk_ops *ops = walk->ops;
> int err = 0;
>
> pud = pud_offset(p4d, addr);
> @@ -82,18 +85,18 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
> again:
> next = pud_addr_end(addr, end);
> if (pud_none(*pud) || !walk->vma) {
> - if (walk->pte_hole)
> - err = walk->pte_hole(addr, next, walk);
> + if (ops->pte_hole)
> + err = ops->pte_hole(addr, next, walk);
> if (err)
> break;
> continue;
> }
>
> - if (walk->pud_entry) {
> + if (ops->pud_entry) {
> spinlock_t *ptl = pud_trans_huge_lock(pud, walk->vma);
>
> if (ptl) {
> - err = walk->pud_entry(pud, addr, next, walk);
> + err = ops->pud_entry(pud, addr, next, walk);
> spin_unlock(ptl);
> if (err)
> break;
> @@ -105,7 +108,7 @@ static int walk_pud_range(p4d_t *p4d, unsigned long addr, unsigned long end,
> if (pud_none(*pud))
> goto again;
>
> - if (walk->pmd_entry || walk->pte_entry)
> + if (ops->pmd_entry || ops->pte_entry)
> err = walk_pmd_range(pud, addr, next, walk);
> if (err)
> break;
> @@ -119,19 +122,20 @@ static int walk_p4d_range(pgd_t *pgd, unsigned long addr, unsigned long end,
> {
> p4d_t *p4d;
> unsigned long next;
> + const struct mm_walk_ops *ops = walk->ops;
> int err = 0;
>
> p4d = p4d_offset(pgd, addr);
> do {
> next = p4d_addr_end(addr, end);
> if (p4d_none_or_clear_bad(p4d)) {
> - if (walk->pte_hole)
> - err = walk->pte_hole(addr, next, walk);
> + if (ops->pte_hole)
> + err = ops->pte_hole(addr, next, walk);
> if (err)
> break;
> continue;
> }
> - if (walk->pmd_entry || walk->pte_entry)
> + if (ops->pmd_entry || ops->pte_entry)
> err = walk_pud_range(p4d, addr, next, walk);
> if (err)
> break;
> @@ -145,19 +149,20 @@ static int walk_pgd_range(unsigned long addr, unsigned long end,
> {
> pgd_t *pgd;
> unsigned long next;
> + const struct mm_walk_ops *ops = walk->ops;
> int err = 0;
>
> pgd = pgd_offset(walk->mm, addr);
> do {
> next = pgd_addr_end(addr, end);
> if (pgd_none_or_clear_bad(pgd)) {
> - if (walk->pte_hole)
> - err = walk->pte_hole(addr, next, walk);
> + if (ops->pte_hole)
> + err = ops->pte_hole(addr, next, walk);
> if (err)
> break;
> continue;
> }
> - if (walk->pmd_entry || walk->pte_entry)
> + if (ops->pmd_entry || ops->pte_entry)
> err = walk_p4d_range(pgd, addr, next, walk);
> if (err)
> break;
> @@ -183,6 +188,7 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
> unsigned long hmask = huge_page_mask(h);
> unsigned long sz = huge_page_size(h);
> pte_t *pte;
> + const struct mm_walk_ops *ops = walk->ops;
> int err = 0;
>
> do {
> @@ -190,9 +196,9 @@ static int walk_hugetlb_range(unsigned long addr, unsigned long end,
> pte = huge_pte_offset(walk->mm, addr & hmask, sz);
>
> if (pte)
> - err = walk->hugetlb_entry(pte, hmask, addr, next, walk);
> - else if (walk->pte_hole)
> - err = walk->pte_hole(addr, next, walk);
> + err = ops->hugetlb_entry(pte, hmask, addr, next, walk);
> + else if (ops->pte_hole)
> + err = ops->pte_hole(addr, next, walk);
>
> if (err)
> break;
> @@ -220,9 +226,10 @@ static int walk_page_test(unsigned long start, unsigned long end,
> struct mm_walk *walk)
> {
> struct vm_area_struct *vma = walk->vma;
> + const struct mm_walk_ops *ops = walk->ops;
>
> - if (walk->test_walk)
> - return walk->test_walk(start, end, walk);
> + if (ops->test_walk)
> + return ops->test_walk(start, end, walk);
>
> /*
> * vma(VM_PFNMAP) doesn't have any valid struct pages behind VM_PFNMAP
> @@ -234,8 +241,8 @@ static int walk_page_test(unsigned long start, unsigned long end,
> */
> if (vma->vm_flags & VM_PFNMAP) {
> int err = 1;
> - if (walk->pte_hole)
> - err = walk->pte_hole(start, end, walk);
> + if (ops->pte_hole)
> + err = ops->pte_hole(start, end, walk);
> return err ? err : 1;
> }
> return 0;
> @@ -248,7 +255,8 @@ static int __walk_page_range(unsigned long start, unsigned long end,
> struct vm_area_struct *vma = walk->vma;
>
> if (vma && is_vm_hugetlb_page(vma)) {
> - if (walk->hugetlb_entry)
> + const struct mm_walk_ops *ops = walk->ops;
NIT: checkpatch would like a blank line here
> + if (ops->hugetlb_entry)
> err = walk_hugetlb_range(start, end, walk);
> } else
> err = walk_pgd_range(start, end, walk);
> @@ -258,11 +266,13 @@ static int __walk_page_range(unsigned long start, unsigned long end,
>
> /**
> * walk_page_range - walk page table with caller specific callbacks
> - * @start: start address of the virtual address range
> - * @end: end address of the virtual address range
> - * @walk: mm_walk structure defining the callbacks and the target address space
> + * @mm: mm_struct representing the target process of page table walk
> + * @start: start address of the virtual address range
> + * @end: end address of the virtual address range
> + * @ops: operation to call during the walk
> + * @private: private data for callbacks' usage
> *
> - * Recursively walk the page table tree of the process represented by @walk->mm
> + * Recursively walk the page table tree of the process represented by @mm
> * within the virtual address range [@start, @end). During walking, we can do
> * some caller-specific works for each entry, by setting up pmd_entry(),
> * pte_entry(), and/or hugetlb_entry(). If you don't set up for some of these
Missing context:
> *
> * Before starting to walk page table, some callers want to check whether
> * they really want to walk over the current vma, typically by checking
> * its vm_flags. walk_page_test() and @walk->test_walk() are used for this
> * purpose.
@walk->test_walk() should now be @ops->test_walk()
> @@ -283,42 +293,48 @@ static int __walk_page_range(unsigned long start, unsigned long end,
> *
> * struct mm_walk keeps current values of some common data like vma and pmd,
> * which are useful for the access from callbacks. If you want to pass some
> - * caller-specific data to callbacks, @walk->private should be helpful.
> + * caller-specific data to callbacks, @private should be helpful.
> *
> * Locking:
> * Callers of walk_page_range() and walk_page_vma() should hold
> * @walk->mm->mmap_sem, because these function traverse vma list and/or
s/walk->//
Otherwise looks good - I've rebased my series on it and the initial
testing is fine. So for the series:
Reviewed-by: Steven Price <steven.price@arm.com>
Thanks,
Steve
next prev parent reply other threads:[~2019-08-09 8:57 UTC|newest]
Thread overview: 29+ messages / expand[flat|nested] mbox.gz Atom feed top
2019-08-08 15:42 cleanup the walk_page_range interface Christoph Hellwig
2019-08-08 15:42 ` [PATCH 1/3] mm: split out a new pagewalk.h header from mm.h Christoph Hellwig
2019-08-08 15:42 ` [PATCH 2/3] pagewalk: seperate function pointers from iterator data Christoph Hellwig
2019-08-08 20:34 ` Thomas Hellstrom
2019-08-09 8:57 ` Steven Price [this message]
2019-08-08 15:42 ` [PATCH 3/3] pagewalk: use lockdep_assert_held for locking validation Christoph Hellwig
2019-08-08 18:18 ` Matthew Wilcox
2019-08-08 17:50 ` cleanup the walk_page_range interface Linus Torvalds
2019-08-08 21:56 ` Christoph Hellwig
2019-08-08 22:21 ` Thomas Hellstrom
2019-08-09 14:36 ` Christoph Hellwig
2019-08-12 6:17 ` Mike Rapoport
2019-08-16 6:27 ` Christoph Hellwig
2019-08-16 11:57 ` Jason Gunthorpe
2019-08-16 12:32 ` Christoph Hellwig
2019-08-16 16:20 ` Linus Torvalds
2019-08-16 21:06 ` Andrew Morton
2019-08-17 6:41 ` Stephen Rothwell
2019-08-17 6:43 ` Christoph Hellwig
2019-08-17 6:58 ` Stephen Rothwell
2019-08-17 7:37 ` Linus Torvalds
2019-08-23 13:43 ` Jason Gunthorpe
2019-08-23 15:36 ` Steven Price
2019-08-24 22:26 ` Christoph Hellwig
2019-08-27 1:34 ` Jason Gunthorpe
2019-08-27 23:34 ` Andrew Morton
2019-08-27 23:36 ` Jason Gunthorpe
2019-08-28 6:20 ` Christoph Hellwig
2019-08-28 13:23 ` Steven Price
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=e418faa0-49bf-1bc6-8f77-2849c1b6ae70@arm.com \
--to=steven.price@arm.com \
--cc=akpm@linux-foundation.org \
--cc=hch@lst.de \
--cc=jgg@mellanox.com \
--cc=jglisse@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=thomas@shipmail.org \
--cc=torvalds@linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox