linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Miaohe Lin <linmiaohe@huawei.com>
To: Nicholas Piggin <npiggin@gmail.com>
Cc: <linux-kernel@vger.kernel.org>, <linux-arch@vger.kernel.org>,
	<linuxppc-dev@lists.ozlabs.org>,
	Jonathan Cameron <Jonathan.Cameron@Huawei.com>,
	Christoph Hellwig <hch@infradead.org>,
	Christophe Leroy <christophe.leroy@csgroup.eu>,
	Rick Edgecombe <rick.p.edgecombe@intel.com>,
	Ding Tianhong <dingtianhong@huawei.com>,
	Christoph Hellwig <hch@lst.de>, <linux-mm@kvack.org>,
	Andrew Morton <akpm@linux-foundation.org>
Subject: Re: [PATCH v11 03/13] mm/vmalloc: rename vmap_*_range vmap_pages_*_range
Date: Wed, 27 Jan 2021 10:10:33 +0800	[thread overview]
Message-ID: <01fe6df9-8c92-72c9-94cb-797e11160ad7@huawei.com> (raw)
In-Reply-To: <20210126044510.2491820-4-npiggin@gmail.com>

Hi:
On 2021/1/26 12:45, Nicholas Piggin wrote:
> The vmalloc mapper operates on a struct page * array rather than a
> linear physical address, re-name it to make this distinction clear.
> 
> Reviewed-by: Christoph Hellwig <hch@lst.de>
> Signed-off-by: Nicholas Piggin <npiggin@gmail.com>
> ---
>  mm/vmalloc.c | 16 ++++++++--------
>  1 file changed, 8 insertions(+), 8 deletions(-)
> 
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index 62372f9e0167..7f2f36116980 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -189,7 +189,7 @@ void unmap_kernel_range_noflush(unsigned long start, unsigned long size)
>  		arch_sync_kernel_mappings(start, end);
>  }
>  
> -static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
> +static int vmap_pages_pte_range(pmd_t *pmd, unsigned long addr,
>  		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
>  		pgtbl_mod_mask *mask)
>  {
> @@ -217,7 +217,7 @@ static int vmap_pte_range(pmd_t *pmd, unsigned long addr,
>  	return 0;
>  }
>  
> -static int vmap_pmd_range(pud_t *pud, unsigned long addr,
> +static int vmap_pages_pmd_range(pud_t *pud, unsigned long addr,
>  		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
>  		pgtbl_mod_mask *mask)
>  {
> @@ -229,13 +229,13 @@ static int vmap_pmd_range(pud_t *pud, unsigned long addr,
>  		return -ENOMEM;
>  	do {
>  		next = pmd_addr_end(addr, end);
> -		if (vmap_pte_range(pmd, addr, next, prot, pages, nr, mask))
> +		if (vmap_pages_pte_range(pmd, addr, next, prot, pages, nr, mask))
>  			return -ENOMEM;
>  	} while (pmd++, addr = next, addr != end);
>  	return 0;
>  }
>  
> -static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
> +static int vmap_pages_pud_range(p4d_t *p4d, unsigned long addr,
>  		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
>  		pgtbl_mod_mask *mask)
>  {
> @@ -247,13 +247,13 @@ static int vmap_pud_range(p4d_t *p4d, unsigned long addr,
>  		return -ENOMEM;
>  	do {
>  		next = pud_addr_end(addr, end);
> -		if (vmap_pmd_range(pud, addr, next, prot, pages, nr, mask))
> +		if (vmap_pages_pmd_range(pud, addr, next, prot, pages, nr, mask))
>  			return -ENOMEM;
>  	} while (pud++, addr = next, addr != end);
>  	return 0;
>  }
>  
> -static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
> +static int vmap_pages_p4d_range(pgd_t *pgd, unsigned long addr,
>  		unsigned long end, pgprot_t prot, struct page **pages, int *nr,
>  		pgtbl_mod_mask *mask)
>  {
> @@ -265,7 +265,7 @@ static int vmap_p4d_range(pgd_t *pgd, unsigned long addr,
>  		return -ENOMEM;
>  	do {
>  		next = p4d_addr_end(addr, end);
> -		if (vmap_pud_range(p4d, addr, next, prot, pages, nr, mask))
> +		if (vmap_pages_pud_range(p4d, addr, next, prot, pages, nr, mask))
>  			return -ENOMEM;
>  	} while (p4d++, addr = next, addr != end);
>  	return 0;
> @@ -306,7 +306,7 @@ int map_kernel_range_noflush(unsigned long addr, unsigned long size,
>  		next = pgd_addr_end(addr, end);
>  		if (pgd_bad(*pgd))
>  			mask |= PGTBL_PGD_MODIFIED;
> -		err = vmap_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
> +		err = vmap_pages_p4d_range(pgd, addr, next, prot, pages, &nr, &mask);
>  		if (err)
>  			return err;
>  	} while (pgd++, addr = next, addr != end);
> 

Reviewed-by: Miaohe Lin <linmiaohe@huawei.com>


  reply	other threads:[~2021-01-27  2:10 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-01-26  4:44 [PATCH v11 00/13] huge vmalloc mappings Nicholas Piggin
2021-01-26  4:44 ` [PATCH v11 01/13] mm/vmalloc: fix HUGE_VMAP regression by enabling huge pages in vmalloc_to_page Nicholas Piggin
2021-01-26  6:40   ` Miaohe Lin
2021-01-28  3:13   ` Ding Tianhong
2021-02-02 10:22     ` Nicholas Piggin
2021-01-26  4:44 ` [PATCH v11 02/13] mm: apply_to_pte_range warn and fail if a large pte is encountered Nicholas Piggin
2021-01-26  6:49   ` Miaohe Lin
2021-01-26  4:45 ` [PATCH v11 03/13] mm/vmalloc: rename vmap_*_range vmap_pages_*_range Nicholas Piggin
2021-01-27  2:10   ` Miaohe Lin [this message]
2021-01-26  4:45 ` [PATCH v11 04/13] mm/ioremap: rename ioremap_*_range to vmap_*_range Nicholas Piggin
2021-01-26  6:40   ` Christoph Hellwig
2021-01-28  2:38   ` Miaohe Lin
2021-01-26  4:45 ` [PATCH v11 05/13] mm: HUGE_VMAP arch support cleanup Nicholas Piggin
2021-01-26  6:07   ` Ding Tianhong
2021-01-26 13:26   ` kernel test robot
2021-01-27  5:26   ` kernel test robot
2021-01-26  4:45 ` [PATCH v11 06/13] powerpc: inline huge vmap supported functions Nicholas Piggin
2021-01-26  4:45 ` [PATCH v11 07/13] arm64: " Nicholas Piggin
2021-01-26  4:45 ` [PATCH v11 08/13] x86: " Nicholas Piggin
2021-01-26  4:45 ` [PATCH v11 09/13] mm/vmalloc: provide fallback arch huge vmap support functions Nicholas Piggin
2021-01-26  4:45 ` [PATCH v11 10/13] mm: Move vmap_range from mm/ioremap.c to mm/vmalloc.c Nicholas Piggin
2021-01-26  4:45 ` [PATCH v11 11/13] mm/vmalloc: add vmap_range_noflush variant Nicholas Piggin
2021-01-26  4:45 ` [PATCH v11 12/13] mm/vmalloc: Hugepage vmalloc mappings Nicholas Piggin
2021-01-26  6:59   ` Ding Tianhong
2021-01-26  9:47     ` Nicholas Piggin
2021-01-26 11:48       ` Ding Tianhong
2021-01-26  4:45 ` [PATCH v11 13/13] powerpc/64s/radix: Enable huge " Nicholas Piggin
2021-01-27 10:26   ` Michael Ellerman

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=01fe6df9-8c92-72c9-94cb-797e11160ad7@huawei.com \
    --to=linmiaohe@huawei.com \
    --cc=Jonathan.Cameron@Huawei.com \
    --cc=akpm@linux-foundation.org \
    --cc=christophe.leroy@csgroup.eu \
    --cc=dingtianhong@huawei.com \
    --cc=hch@infradead.org \
    --cc=hch@lst.de \
    --cc=linux-arch@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=npiggin@gmail.com \
    --cc=rick.p.edgecombe@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox