From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-arch@vger.kernel.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
linux-mm@kvack.org, linux-kernel@vger.kernel.org,
"James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>,
Helge Deller <deller@gmx.de>,
linux-parisc@vger.kernel.org
Subject: [PATCH v4 19/36] parisc: Implement the new page table range API
Date: Wed, 15 Mar 2023 05:14:27 +0000 [thread overview]
Message-ID: <20230315051444.3229621-20-willy@infradead.org> (raw)
In-Reply-To: <20230315051444.3229621-1-willy@infradead.org>
Add set_ptes(), update_mmu_cache_range(), flush_dcache_folio()
and flush_icache_pages(). Change the PG_arch_1 (aka PG_dcache_dirty) flag
from being per-page to per-folio.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Helge Deller <deller@gmx.de>
Cc: linux-parisc@vger.kernel.org
---
arch/parisc/include/asm/cacheflush.h | 14 ++--
arch/parisc/include/asm/pgtable.h | 37 ++++++----
arch/parisc/kernel/cache.c | 101 +++++++++++++++++++--------
3 files changed, 103 insertions(+), 49 deletions(-)
diff --git a/arch/parisc/include/asm/cacheflush.h b/arch/parisc/include/asm/cacheflush.h
index 0bdee6724132..2cdc0ea562d6 100644
--- a/arch/parisc/include/asm/cacheflush.h
+++ b/arch/parisc/include/asm/cacheflush.h
@@ -43,16 +43,20 @@ void invalidate_kernel_vmap_range(void *vaddr, int size);
#define flush_cache_vmap(start, end) flush_cache_all()
#define flush_cache_vunmap(start, end) flush_cache_all()
+void flush_dcache_folio(struct folio *folio);
+#define flush_dcache_folio flush_dcache_folio
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
-void flush_dcache_page(struct page *page);
+static inline void flush_dcache_page(struct page *page)
+{
+ flush_dcache_folio(page_folio(page));
+}
#define flush_dcache_mmap_lock(mapping) xa_lock_irq(&mapping->i_pages)
#define flush_dcache_mmap_unlock(mapping) xa_unlock_irq(&mapping->i_pages)
-#define flush_icache_page(vma,page) do { \
- flush_kernel_dcache_page_addr(page_address(page)); \
- flush_kernel_icache_page(page_address(page)); \
-} while (0)
+void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
+ unsigned int nr);
+#define flush_icache_page(vma, page) flush_icache_pages(vma, page, 1)
#define flush_icache_range(s,e) do { \
flush_kernel_dcache_range_asm(s,e); \
diff --git a/arch/parisc/include/asm/pgtable.h b/arch/parisc/include/asm/pgtable.h
index e2950f5db7c9..ca6afe1980a5 100644
--- a/arch/parisc/include/asm/pgtable.h
+++ b/arch/parisc/include/asm/pgtable.h
@@ -73,15 +73,6 @@ extern void __update_cache(pte_t pte);
mb(); \
} while(0)
-#define set_pte_at(mm, addr, pteptr, pteval) \
- do { \
- if (pte_present(pteval) && \
- pte_user(pteval)) \
- __update_cache(pteval); \
- *(pteptr) = (pteval); \
- purge_tlb_entries(mm, addr); \
- } while (0)
-
#endif /* !__ASSEMBLY__ */
#define pte_ERROR(e) \
@@ -285,7 +276,7 @@ extern unsigned long *empty_zero_page;
#define pte_none(x) (pte_val(x) == 0)
#define pte_present(x) (pte_val(x) & _PAGE_PRESENT)
#define pte_user(x) (pte_val(x) & _PAGE_USER)
-#define pte_clear(mm, addr, xp) set_pte_at(mm, addr, xp, __pte(0))
+#define pte_clear(mm, addr, xp) set_pte(xp, __pte(0))
#define pmd_flag(x) (pmd_val(x) & PxD_FLAG_MASK)
#define pmd_address(x) ((unsigned long)(pmd_val(x) &~ PxD_FLAG_MASK) << PxD_VALUE_SHIFT)
@@ -391,11 +382,29 @@ static inline unsigned long pmd_page_vaddr(pmd_t pmd)
extern void paging_init (void);
+static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pte, unsigned int nr)
+{
+ if (pte_present(pte) && pte_user(pte))
+ __update_cache(pte);
+ for (;;) {
+ *ptep = pte;
+ purge_tlb_entries(mm, addr);
+ if (--nr == 0)
+ break;
+ ptep++;
+ pte_val(pte) += 1 << PFN_PTE_SHIFT;
+ addr += PAGE_SIZE;
+ }
+}
+#define set_ptes set_ptes
+
/* Used for deferring calls to flush_dcache_page() */
#define PG_dcache_dirty PG_arch_1
-#define update_mmu_cache(vms,addr,ptep) __update_cache(*ptep)
+#define update_mmu_cache_range(vma, addr, ptep, nr) __update_cache(*ptep)
+#define update_mmu_cache(vma, addr, ptep) __update_cache(*ptep)
/*
* Encode/decode swap entries and swap PTEs. Swap PTEs are all PTEs that
@@ -450,7 +459,7 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma, unsigned
if (!pte_young(pte)) {
return 0;
}
- set_pte_at(vma->vm_mm, addr, ptep, pte_mkold(pte));
+ set_pte(ptep, pte_mkold(pte));
return 1;
}
@@ -460,14 +469,14 @@ static inline pte_t ptep_get_and_clear(struct mm_struct *mm, unsigned long addr,
pte_t old_pte;
old_pte = *ptep;
- set_pte_at(mm, addr, ptep, __pte(0));
+ set_pte(ptep, __pte(0));
return old_pte;
}
static inline void ptep_set_wrprotect(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
- set_pte_at(mm, addr, ptep, pte_wrprotect(*ptep));
+ set_pte(ptep, pte_wrprotect(*ptep));
}
#define pte_same(A,B) (pte_val(A) == pte_val(B))
diff --git a/arch/parisc/kernel/cache.c b/arch/parisc/kernel/cache.c
index 1d3b8bc8a623..ceaa268fc1a6 100644
--- a/arch/parisc/kernel/cache.c
+++ b/arch/parisc/kernel/cache.c
@@ -92,11 +92,11 @@ static inline void flush_data_cache(void)
/* Kernel virtual address of pfn. */
#define pfn_va(pfn) __va(PFN_PHYS(pfn))
-void
-__update_cache(pte_t pte)
+void __update_cache(pte_t pte)
{
unsigned long pfn = pte_pfn(pte);
- struct page *page;
+ struct folio *folio;
+ unsigned int nr;
/* We don't have pte special. As a result, we can be called with
an invalid pfn and we don't need to flush the kernel dcache page.
@@ -104,13 +104,17 @@ __update_cache(pte_t pte)
if (!pfn_valid(pfn))
return;
- page = pfn_to_page(pfn);
- if (page_mapping_file(page) &&
- test_bit(PG_dcache_dirty, &page->flags)) {
- flush_kernel_dcache_page_addr(pfn_va(pfn));
- clear_bit(PG_dcache_dirty, &page->flags);
+ folio = page_folio(pfn_to_page(pfn));
+ pfn = folio_pfn(folio);
+ nr = folio_nr_pages(folio);
+ if (folio_flush_mapping(folio) &&
+ test_bit(PG_dcache_dirty, &folio->flags)) {
+ while (nr--)
+ flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
+ clear_bit(PG_dcache_dirty, &folio->flags);
} else if (parisc_requires_coherency())
- flush_kernel_dcache_page_addr(pfn_va(pfn));
+ while (nr--)
+ flush_kernel_dcache_page_addr(pfn_va(pfn + nr));
}
void
@@ -364,6 +368,20 @@ static void flush_user_cache_page(struct vm_area_struct *vma, unsigned long vmad
preempt_enable();
}
+void flush_icache_pages(struct vm_area_struct *vma, struct page *page,
+ unsigned int nr)
+{
+ void *kaddr = page_address(page);
+
+ for (;;) {
+ flush_kernel_dcache_page_addr(kaddr);
+ flush_kernel_icache_page(kaddr);
+ if (--nr == 0)
+ break;
+ page += PAGE_SIZE;
+ }
+}
+
static inline pte_t *get_ptep(struct mm_struct *mm, unsigned long addr)
{
pte_t *ptep = NULL;
@@ -392,26 +410,30 @@ static inline bool pte_needs_flush(pte_t pte)
== (_PAGE_PRESENT | _PAGE_ACCESSED);
}
-void flush_dcache_page(struct page *page)
+void flush_dcache_folio(struct folio *folio)
{
- struct address_space *mapping = page_mapping_file(page);
- struct vm_area_struct *mpnt;
- unsigned long offset;
+ struct address_space *mapping = folio_flush_mapping(folio);
+ struct vm_area_struct *vma;
unsigned long addr, old_addr = 0;
+ void *kaddr;
unsigned long count = 0;
+ unsigned long i, nr;
pgoff_t pgoff;
if (mapping && !mapping_mapped(mapping)) {
- set_bit(PG_dcache_dirty, &page->flags);
+ set_bit(PG_dcache_dirty, &folio->flags);
return;
}
- flush_kernel_dcache_page_addr(page_address(page));
+ nr = folio_nr_pages(folio);
+ kaddr = folio_address(folio);
+ for (i = 0; i < nr; i++)
+ flush_kernel_dcache_page_addr(kaddr + i * PAGE_SIZE);
if (!mapping)
return;
- pgoff = page->index;
+ pgoff = folio->index;
/*
* We have carefully arranged in arch_get_unmapped_area() that
@@ -421,15 +443,29 @@ void flush_dcache_page(struct page *page)
* on machines that support equivalent aliasing
*/
flush_dcache_mmap_lock(mapping);
- vma_interval_tree_foreach(mpnt, &mapping->i_mmap, pgoff, pgoff) {
- offset = (pgoff - mpnt->vm_pgoff) << PAGE_SHIFT;
- addr = mpnt->vm_start + offset;
- if (parisc_requires_coherency()) {
- pte_t *ptep;
+ vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff + nr - 1) {
+ unsigned long offset = pgoff - vma->vm_pgoff;
+ unsigned long pfn = folio_pfn(folio);
+
+ addr = vma->vm_start;
+ nr = folio_nr_pages(folio);
+ if (offset > -nr) {
+ pfn -= offset;
+ nr += offset;
+ } else {
+ addr += offset * PAGE_SIZE;
+ }
+ if (addr + nr * PAGE_SIZE > vma->vm_end)
+ nr = (vma->vm_end - addr) / PAGE_SIZE;
- ptep = get_ptep(mpnt->vm_mm, addr);
- if (ptep && pte_needs_flush(*ptep))
- flush_user_cache_page(mpnt, addr);
+ if (parisc_requires_coherency()) {
+ for (i = 0; i < nr; i++) {
+ pte_t *ptep = get_ptep(vma->vm_mm,
+ addr + i * PAGE_SIZE);
+ if (ptep && pte_needs_flush(*ptep))
+ flush_user_cache_page(vma,
+ addr + i * PAGE_SIZE);
+ }
} else {
/*
* The TLB is the engine of coherence on parisc:
@@ -442,27 +478,32 @@ void flush_dcache_page(struct page *page)
* in (until the user or kernel specifically
* accesses it, of course)
*/
- flush_tlb_page(mpnt, addr);
+ for (i = 0; i < nr; i++)
+ flush_tlb_page(vma, addr + i * PAGE_SIZE);
if (old_addr == 0 || (old_addr & (SHM_COLOUR - 1))
!= (addr & (SHM_COLOUR - 1))) {
- __flush_cache_page(mpnt, addr, page_to_phys(page));
+ for (i = 0; i < nr; i++)
+ __flush_cache_page(vma,
+ addr + i * PAGE_SIZE,
+ (pfn + i) * PAGE_SIZE);
/*
* Software is allowed to have any number
* of private mappings to a page.
*/
- if (!(mpnt->vm_flags & VM_SHARED))
+ if (!(vma->vm_flags & VM_SHARED))
continue;
if (old_addr)
pr_err("INEQUIVALENT ALIASES 0x%lx and 0x%lx in file %pD\n",
- old_addr, addr, mpnt->vm_file);
- old_addr = addr;
+ old_addr, addr, vma->vm_file);
+ if (nr == folio_nr_pages(folio))
+ old_addr = addr;
}
}
WARN_ON(++count == 4096);
}
flush_dcache_mmap_unlock(mapping);
}
-EXPORT_SYMBOL(flush_dcache_page);
+EXPORT_SYMBOL(flush_dcache_folio);
/* Defined in arch/parisc/kernel/pacache.S */
EXPORT_SYMBOL(flush_kernel_dcache_range_asm);
--
2.39.2
next prev parent reply other threads:[~2023-03-15 5:15 UTC|newest]
Thread overview: 146+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-15 5:14 [PATCH v4 00/36] New " Matthew Wilcox (Oracle)
2023-03-15 5:14 ` [PATCH v4 01/36] mm: Convert page_table_check_pte_set() to page_table_check_ptes_set() Matthew Wilcox (Oracle)
2023-03-15 9:21 ` Mike Rapoport
2023-03-23 18:36 ` Pasha Tatashin
2023-05-25 2:16 ` Anshuman Khandual
2023-03-15 5:14 ` [PATCH v4 02/36] mm: Add generic flush_icache_pages() and documentation Matthew Wilcox (Oracle)
2023-03-15 9:27 ` Mike Rapoport
2023-05-25 2:23 ` Anshuman Khandual
2023-03-15 5:14 ` [PATCH v4 03/36] mm: Add folio_flush_mapping() Matthew Wilcox (Oracle)
2023-03-15 9:28 ` Mike Rapoport
2023-05-25 2:35 ` Anshuman Khandual
2023-03-15 5:14 ` [PATCH v4 04/36] mm: Remove ARCH_IMPLEMENTS_FLUSH_DCACHE_FOLIO Matthew Wilcox (Oracle)
2023-03-15 9:28 ` Mike Rapoport
2023-05-25 2:43 ` Anshuman Khandual
2023-03-15 5:14 ` [PATCH v4 05/36] mm: Add default definition of set_ptes() Matthew Wilcox (Oracle)
2023-03-15 9:34 ` Mike Rapoport
2023-05-25 3:01 ` Anshuman Khandual
2023-05-25 4:06 ` Matthew Wilcox
2023-03-15 5:14 ` [PATCH v4 06/36] alpha: Implement the new page table range API Matthew Wilcox (Oracle)
2023-03-15 9:41 ` Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 07/36] arc: " Matthew Wilcox (Oracle)
2023-03-15 9:44 ` Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 08/36] arm: " Matthew Wilcox (Oracle)
2023-03-15 9:48 ` Mike Rapoport
2023-03-15 10:56 ` Russell King (Oracle)
2023-03-15 5:14 ` [PATCH v4 09/36] arm64: " Matthew Wilcox (Oracle)
2023-03-15 9:49 ` Mike Rapoport
2023-05-25 3:35 ` Anshuman Khandual
2023-05-25 4:05 ` Matthew Wilcox
2023-05-25 4:43 ` Anshuman Khandual
2023-03-15 5:14 ` [PATCH v4 10/36] csky: " Matthew Wilcox (Oracle)
2023-03-15 9:50 ` Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 11/36] hexagon: " Matthew Wilcox (Oracle)
2023-03-15 9:54 ` Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 12/36] ia64: " Matthew Wilcox (Oracle)
2023-03-15 9:55 ` Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 13/36] loongarch: " Matthew Wilcox (Oracle)
2023-03-15 10:07 ` Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 14/36] m68k: " Matthew Wilcox (Oracle)
2023-03-15 7:43 ` Geert Uytterhoeven
2023-03-16 16:32 ` Geert Uytterhoeven
2023-03-15 10:07 ` Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 15/36] microblaze: " Matthew Wilcox (Oracle)
2023-03-15 10:07 ` Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 16/36] mips: " Matthew Wilcox (Oracle)
2023-03-15 10:08 ` Mike Rapoport
2023-03-15 10:50 ` Thomas Bogendoerfer
2023-03-15 20:33 ` Matthew Wilcox
2023-03-17 15:29 ` Thomas Bogendoerfer
2023-03-19 18:45 ` Thomas Bogendoerfer
2023-03-19 20:16 ` Matthew Wilcox
2023-03-21 11:30 ` Thomas Bogendoerfer
2023-03-15 5:14 ` [PATCH v4 17/36] nios2: " Matthew Wilcox (Oracle)
2023-03-15 10:08 ` Mike Rapoport
2023-06-13 22:45 ` Dinh Nguyen
2023-07-10 20:18 ` Matthew Wilcox
2023-07-10 23:10 ` Dinh Nguyen
2023-03-15 5:14 ` [PATCH v4 18/36] openrisc: " Matthew Wilcox (Oracle)
2023-03-15 10:09 ` Mike Rapoport
2023-03-15 5:14 ` Matthew Wilcox (Oracle) [this message]
2023-03-15 10:09 ` [PATCH v4 19/36] parisc: " Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 20/36] powerpc: " Matthew Wilcox (Oracle)
2023-03-15 9:43 ` Christophe Leroy
2023-03-15 10:18 ` Christophe Leroy
2023-03-17 3:47 ` Matthew Wilcox
2023-03-18 9:19 ` Christophe Leroy
2023-07-10 20:24 ` Matthew Wilcox
2023-07-11 4:40 ` Christophe Leroy
2023-03-15 10:09 ` Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 21/36] riscv: " Matthew Wilcox (Oracle)
2023-03-15 10:10 ` Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 22/36] s390: " Matthew Wilcox (Oracle)
2023-03-15 10:10 ` Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 23/36] superh: " Matthew Wilcox (Oracle)
2023-03-15 7:22 ` John Paul Adrian Glaubitz
2023-03-15 7:36 ` John Paul Adrian Glaubitz
2023-03-15 10:10 ` Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 24/36] sparc32: " Matthew Wilcox (Oracle)
2023-03-15 10:11 ` Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 25/36] sparc64: " Matthew Wilcox (Oracle)
2023-03-15 10:11 ` Mike Rapoport
2025-08-03 12:05 ` John Paul Adrian Glaubitz
2025-08-03 19:08 ` Anthony Yznaga
2025-08-04 5:12 ` John Paul Adrian Glaubitz
2025-08-04 5:36 ` John Paul Adrian Glaubitz
2025-08-04 6:58 ` John Paul Adrian Glaubitz
2025-08-04 7:48 ` John Paul Adrian Glaubitz
2025-08-04 9:38 ` John Paul Adrian Glaubitz
2023-03-15 5:14 ` [PATCH v4 26/36] um: " Matthew Wilcox (Oracle)
2023-03-15 10:12 ` Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 27/36] x86: " Matthew Wilcox (Oracle)
2023-03-15 10:12 ` Mike Rapoport
2023-03-15 10:34 ` Peter Zijlstra
2023-03-15 11:16 ` Mike Rapoport
2023-03-15 11:19 ` Peter Zijlstra
2023-03-15 16:12 ` Matthew Wilcox
2023-03-15 5:14 ` [PATCH v4 28/36] xtensa: " Matthew Wilcox (Oracle)
2023-03-15 10:12 ` Mike Rapoport
2023-03-15 5:14 ` [PATCH v4 29/36] mm: Remove page_mapping_file() Matthew Wilcox (Oracle)
2023-05-25 3:50 ` Anshuman Khandual
2023-05-25 4:03 ` Matthew Wilcox
2023-05-25 4:46 ` Anshuman Khandual
2023-05-25 5:37 ` Anshuman Khandual
2023-03-15 5:14 ` [PATCH v4 30/36] mm: Rationalise flush_icache_pages() and flush_icache_page() Matthew Wilcox (Oracle)
2023-03-15 5:14 ` [PATCH v4 31/36] mm: Tidy up set_ptes definition Matthew Wilcox (Oracle)
2023-05-25 6:20 ` Anshuman Khandual
2023-03-15 5:14 ` [PATCH v4 32/36] mm: Use flush_icache_pages() in do_set_pmd() Matthew Wilcox (Oracle)
2023-05-25 6:31 ` Anshuman Khandual
2023-03-15 5:14 ` [PATCH v4 33/36] filemap: Add filemap_map_folio_range() Matthew Wilcox (Oracle)
2023-03-15 5:14 ` [PATCH v4 34/36] rmap: add folio_add_file_rmap_range() Matthew Wilcox (Oracle)
2023-03-15 13:34 ` Ryan Roberts
2023-03-15 16:08 ` Ryan Roberts
2023-03-15 22:58 ` Yin Fengwei
2023-03-16 16:27 ` Yin, Fengwei
2023-03-16 16:34 ` Ryan Roberts
2023-03-17 8:23 ` Yin, Fengwei
2023-03-17 12:46 ` Ryan Roberts
2023-03-17 13:28 ` Yin, Fengwei
2023-03-15 5:14 ` [PATCH v4 35/36] mm: Convert do_set_pte() to set_pte_range() Matthew Wilcox (Oracle)
2023-03-15 15:26 ` Ryan Roberts
2023-03-16 16:23 ` Yin, Fengwei
2023-03-16 16:38 ` Ryan Roberts
2023-03-16 16:41 ` Yin, Fengwei
2023-03-16 16:50 ` Ryan Roberts
2023-03-16 17:52 ` Matthew Wilcox
2023-03-17 1:58 ` Yin, Fengwei
2023-03-17 3:44 ` Matthew Wilcox
2023-03-17 6:33 ` Yin, Fengwei
2023-03-17 8:00 ` Ryan Roberts
2023-03-17 8:19 ` Yin, Fengwei
2023-03-17 13:00 ` Ryan Roberts
2023-03-17 13:44 ` Yin, Fengwei
2023-03-24 14:58 ` Will Deacon
2023-03-24 15:11 ` Matthew Wilcox
2023-03-24 17:23 ` Will Deacon
2023-03-27 1:23 ` Yin Fengwei
2023-03-20 13:38 ` Yin, Fengwei
2023-03-20 14:08 ` Matthew Wilcox
2023-03-21 1:58 ` Yin, Fengwei
2023-03-21 5:13 ` Yin Fengwei
2023-05-30 8:07 ` [PATCH 0/4] New page table range API fixup patches Yin Fengwei
2023-05-30 8:07 ` [PATCH 1/4] filemap: avoid interfere with xas.xa_index Yin Fengwei
2023-05-30 8:07 ` [PATCH 2/4] rmap: fix typo in folio_add_file_rmap_range() Yin Fengwei
2023-05-30 8:07 ` [PATCH 3/4] mm: mark PTEs referencing the accessed folio young Yin Fengwei
2023-05-30 8:07 ` [PATCH 4/4] filemap: Check address range in filemap_map_folio_range() Yin Fengwei
2023-03-15 5:14 ` [PATCH v4 36/36] filemap: Batch PTE mappings Matthew Wilcox (Oracle)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230315051444.3229621-20-willy@infradead.org \
--to=willy@infradead.org \
--cc=James.Bottomley@HansenPartnership.com \
--cc=deller@gmx.de \
--cc=linux-arch@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-parisc@vger.kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox