From: Mike Rapoport <rppt@kernel.org>
To: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Cc: linux-mm@kvack.org, linux-arch@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-ia64@vger.kernel.org
Subject: Re: [PATCH v3 11/34] ia64: Implement the new page table range API
Date: Fri, 3 Mar 2023 13:56:36 +0200 [thread overview]
Message-ID: <ZAHgdEzqWk4Peyjh@kernel.org> (raw)
In-Reply-To: <20230228213738.272178-12-willy@infradead.org>
On Tue, Feb 28, 2023 at 09:37:14PM +0000, Matthew Wilcox (Oracle) wrote:
> Add set_ptes(), update_mmu_cache_range() and flush_dcache_folio().
> Change the PG_arch_1 (aka PG_dcache_clean) flag from being per-page to
> per-folio, which makes arch_dma_mark_clean() and mark_clean() a little
> more exciting.
>
> Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
> Cc: linux-ia64@vger.kernel.org
> ---
> arch/ia64/hp/common/sba_iommu.c | 26 +++++++++++++++-----------
> arch/ia64/include/asm/cacheflush.h | 14 ++++++++++----
> arch/ia64/include/asm/pgtable.h | 14 +++++++++++++-
> arch/ia64/mm/init.c | 29 +++++++++++++++++++----------
> 4 files changed, 57 insertions(+), 26 deletions(-)
>
> diff --git a/arch/ia64/hp/common/sba_iommu.c b/arch/ia64/hp/common/sba_iommu.c
> index 8ad6946521d8..48d475f10003 100644
> --- a/arch/ia64/hp/common/sba_iommu.c
> +++ b/arch/ia64/hp/common/sba_iommu.c
> @@ -798,22 +798,26 @@ sba_io_pdir_entry(u64 *pdir_ptr, unsigned long vba)
> #endif
>
> #ifdef ENABLE_MARK_CLEAN
> -/**
> +/*
> * Since DMA is i-cache coherent, any (complete) pages that were written via
> * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
> * flush them when they get mapped into an executable vm-area.
> */
> -static void
> -mark_clean (void *addr, size_t size)
> +static void mark_clean(void *addr, size_t size)
> {
> - unsigned long pg_addr, end;
> -
> - pg_addr = PAGE_ALIGN((unsigned long) addr);
> - end = (unsigned long) addr + size;
> - while (pg_addr + PAGE_SIZE <= end) {
> - struct page *page = virt_to_page((void *)pg_addr);
> - set_bit(PG_arch_1, &page->flags);
> - pg_addr += PAGE_SIZE;
> + struct folio *folio = virt_to_folio(addr);
> + ssize_t left = size;
> + size_t offset = offset_in_folio(folio, addr);
> +
> + if (offset) {
> + left -= folio_size(folio) - offset;
> + folio = folio_next(folio);
> + }
> +
> + while (left >= folio_size(folio)) {
> + set_bit(PG_arch_1, &folio->flags);
> + left -= folio_size(folio);
> + folio = folio_next(folio);
> }
> }
> #endif
> diff --git a/arch/ia64/include/asm/cacheflush.h b/arch/ia64/include/asm/cacheflush.h
> index 708c0fa5d975..eac493fa9e0d 100644
> --- a/arch/ia64/include/asm/cacheflush.h
> +++ b/arch/ia64/include/asm/cacheflush.h
> @@ -13,10 +13,16 @@
> #include <asm/page.h>
>
> #define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
> -#define flush_dcache_page(page) \
> -do { \
> - clear_bit(PG_arch_1, &(page)->flags); \
> -} while (0)
> +static inline void flush_dcache_folio(struct folio *folio)
> +{
> + clear_bit(PG_arch_1, &folio->flags);
> +}
> +#define flush_dcache_folio flush_dcache_folio
> +
> +static inline void flush_dcache_page(struct page *page)
> +{
> + flush_dcache_folio(page_folio(page));
> +}
>
> extern void flush_icache_range(unsigned long start, unsigned long end);
> #define flush_icache_range flush_icache_range
> diff --git a/arch/ia64/include/asm/pgtable.h b/arch/ia64/include/asm/pgtable.h
> index 21c97e31a28a..0c2be4ea664b 100644
> --- a/arch/ia64/include/asm/pgtable.h
> +++ b/arch/ia64/include/asm/pgtable.h
> @@ -303,7 +303,18 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
> *ptep = pteval;
> }
>
> -#define set_pte_at(mm,addr,ptep,pteval) set_pte(ptep,pteval)
> +static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
> + pte_t *ptep, pte_t pte, unsigned int nr)
> +{
> + for (;;) {
> + set_pte(ptep, pte);
> + if (--nr == 0)
> + break;
> + ptep++;
> + pte_val(pte) += PAGE_SIZE;
> + }
> +}
> +#define set_pte_at(mm, addr, ptep, pte) set_ptes(mm, add, ptep, pte, 1)
>
> /*
> * Make page protection values cacheable, uncacheable, or write-
> @@ -396,6 +407,7 @@ pte_same (pte_t a, pte_t b)
> return pte_val(a) == pte_val(b);
> }
>
> +#define update_mmu_cache_range(vma, address, ptep, nr) do { } while (0)
> #define update_mmu_cache(vma, address, ptep) do { } while (0)
>
> extern pgd_t swapper_pg_dir[PTRS_PER_PGD];
> diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
> index 7f5353e28516..12aef25944aa 100644
> --- a/arch/ia64/mm/init.c
> +++ b/arch/ia64/mm/init.c
> @@ -50,30 +50,39 @@ void
> __ia64_sync_icache_dcache (pte_t pte)
> {
> unsigned long addr;
> - struct page *page;
> + struct folio *folio;
>
> - page = pte_page(pte);
> - addr = (unsigned long) page_address(page);
> + folio = page_folio(pte_page(pte));
> + addr = (unsigned long)folio_address(folio);
>
> - if (test_bit(PG_arch_1, &page->flags))
> + if (test_bit(PG_arch_1, &folio->flags))
> return; /* i-cache is already coherent with d-cache */
>
> - flush_icache_range(addr, addr + page_size(page));
> - set_bit(PG_arch_1, &page->flags); /* mark page as clean */
> + flush_icache_range(addr, addr + folio_size(folio));
> + set_bit(PG_arch_1, &folio->flags); /* mark page as clean */
> }
>
> /*
> - * Since DMA is i-cache coherent, any (complete) pages that were written via
> + * Since DMA is i-cache coherent, any (complete) folios that were written via
> * DMA can be marked as "clean" so that lazy_mmu_prot_update() doesn't have to
> * flush them when they get mapped into an executable vm-area.
> */
> void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
> {
> - unsigned long pfn = PHYS_PFN(paddr);
> + struct folio *folio = page_folio(phys_to_page(paddr));
> + ssize_t left = size;
> + size_t offset = offset_in_folio(folio, paddr);
Build of defconfig failed miserably for me without this:
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index 12aef25944aa..0775e7870257 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -69,7 +69,8 @@ __ia64_sync_icache_dcache (pte_t pte)
*/
void arch_dma_mark_clean(phys_addr_t paddr, size_t size)
{
- struct folio *folio = page_folio(phys_to_page(paddr));
+ unsigned long pfn = __phys_to_pfn(paddr);
+ struct folio *folio = page_folio(pfn_to_page(pfn));
ssize_t left = size;
size_t offset = offset_in_folio(folio, paddr);
>
> - do {
> + if (offset) {
> + left -= folio_size(folio) - offset;
> + folio = folio_next(folio);
> + }
> +
> + while (left >= (ssize_t)folio_size(folio)) {
> set_bit(PG_arch_1, &pfn_to_page(pfn)->flags);
> - } while (++pfn <= PHYS_PFN(paddr + size - 1));
> + left -= folio_size(folio);
> + folio = folio_next(folio);
> + }
> }
>
> inline void
> --
> 2.39.1
>
--
Sincerely yours,
Mike.
next prev parent reply other threads:[~2023-03-03 11:56 UTC|newest]
Thread overview: 66+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-02-28 21:37 [PATCH v3 00/34] New " Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 01/34] mm: Convert page_table_check_pte_set() to page_table_check_ptes_set() Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 02/34] mm: Add generic flush_icache_pages() and documentation Matthew Wilcox (Oracle)
2023-03-15 9:27 ` Mike Rapoport
2023-02-28 21:37 ` [PATCH v3 03/34] mm: Add folio_flush_mapping() Matthew Wilcox (Oracle)
2023-03-03 10:33 ` Mike Rapoport
2023-02-28 21:37 ` [PATCH v3 04/34] mm: Remove ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 05/34] alpha: Implement the new page table range API Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 06/34] arc: " Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 07/34] arm: " Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 08/34] arm64: " Matthew Wilcox (Oracle)
2023-03-09 11:03 ` Ryan Roberts
2023-02-28 21:37 ` [PATCH v3 09/34] csky: " Matthew Wilcox (Oracle)
2023-03-03 11:40 ` Mike Rapoport
2023-02-28 21:37 ` [PATCH v3 10/34] hexagon: " Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 11/34] ia64: " Matthew Wilcox (Oracle)
2023-03-03 11:56 ` Mike Rapoport [this message]
2023-03-03 14:36 ` Matthew Wilcox
2023-02-28 21:37 ` [PATCH v3 12/34] loongarch: " Matthew Wilcox (Oracle)
2023-03-01 2:04 ` WANG Xuerui
2023-02-28 21:37 ` [PATCH v3 13/34] m68k: " Matthew Wilcox (Oracle)
2023-03-05 10:16 ` Geert Uytterhoeven
2023-03-05 15:28 ` Matthew Wilcox
2023-03-05 16:48 ` Geert Uytterhoeven
2023-03-05 20:44 ` Michael Schmitz
2023-03-06 7:21 ` Geert Uytterhoeven
2023-03-06 23:01 ` Michael Schmitz
2023-02-28 21:37 ` [PATCH v3 14/34] microblaze: " Matthew Wilcox (Oracle)
2023-03-03 10:53 ` Mike Rapoport
2023-03-03 14:38 ` Matthew Wilcox
2023-02-28 21:37 ` [PATCH v3 15/34] mips: " Matthew Wilcox (Oracle)
2023-03-03 12:24 ` Mike Rapoport
2023-02-28 21:37 ` [PATCH v3 16/34] nios2: " Matthew Wilcox (Oracle)
2023-03-03 12:49 ` Mike Rapoport
2023-02-28 21:37 ` [PATCH v3 17/34] openrisc: " Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 18/34] parisc: " Matthew Wilcox (Oracle)
2023-03-02 16:43 ` John David Anglin
2023-03-02 20:40 ` John David Anglin
2023-03-04 16:27 ` John David Anglin
2023-02-28 21:37 ` [PATCH v3 19/34] powerpc: " Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 20/34] riscv: " Matthew Wilcox (Oracle)
2023-03-15 5:23 ` Palmer Dabbelt
2023-02-28 21:37 ` [PATCH v3 21/34] s390: " Matthew Wilcox (Oracle)
2023-03-02 13:31 ` Gerald Schaefer
2023-02-28 21:37 ` [PATCH v3 22/34] superh: " Matthew Wilcox (Oracle)
2023-03-01 8:06 ` Geert Uytterhoeven
2023-03-01 16:17 ` Matthew Wilcox
2023-02-28 21:37 ` [PATCH v3 23/34] sparc32: " Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 24/34] sparc64: " Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 25/34] um: " Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 26/34] x86: " Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 27/34] xtensa: " Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 28/34] mm: Remove page_mapping_file() Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 29/34] mm: Rationalise flush_icache_pages() and flush_icache_page() Matthew Wilcox (Oracle)
2023-03-05 9:53 ` Geert Uytterhoeven
2023-02-28 21:37 ` [PATCH v3 30/34] mm: Use flush_icache_pages() in do_set_pmd() Matthew Wilcox (Oracle)
2023-03-03 14:02 ` Mike Rapoport
2023-03-03 16:02 ` Matthew Wilcox
2023-02-28 21:37 ` [PATCH v3 31/34] filemap: Add filemap_map_folio_range() Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 32/34] rmap: add folio_add_file_rmap_range() Matthew Wilcox (Oracle)
2023-03-01 3:04 ` Yin, Fengwei
2023-02-28 21:37 ` [PATCH v3 33/34] mm: Convert do_set_pte() to set_pte_range() Matthew Wilcox (Oracle)
2023-02-28 21:37 ` [PATCH v3 34/34] filemap: Batch PTE mappings Matthew Wilcox (Oracle)
2023-03-03 14:19 ` [PATCH v3 00/34] New page table range API Mike Rapoport
2023-03-05 10:15 ` Geert Uytterhoeven
2023-03-09 11:09 ` Ryan Roberts
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=ZAHgdEzqWk4Peyjh@kernel.org \
--to=rppt@kernel.org \
--cc=linux-arch@vger.kernel.org \
--cc=linux-ia64@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox