From: Matthew Wilcox <willy@infradead.org>
To: linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>,
kirill.shutemov@linux.intel.com, pasha.tatashin@soleen.com
Subject: [PATCH 1/5] mm: Constify a lot of struct page arguments
Date: Wed, 8 Apr 2020 08:01:44 -0700 [thread overview]
Message-ID: <20200408150148.25290-2-willy@infradead.org> (raw)
In-Reply-To: <20200408150148.25290-1-willy@infradead.org>
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
For an upcoming patch, we want to be able to pass a const struct page
to dump_page(). That means some inline functions have to become macros
so they can return a struct page which is the same const-ness as their
argument.
Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
include/linux/mm.h | 28 +++++++++++-----------
include/linux/mm_types.h | 11 ++-------
include/linux/mmdebug.h | 4 ++--
include/linux/page-flags.h | 41 +++++++++++++++------------------
include/linux/page_owner.h | 6 ++---
include/linux/page_ref.h | 4 ++--
include/linux/pageblock-flags.h | 2 +-
include/linux/pagemap.h | 4 ++--
include/linux/swap.h | 2 +-
mm/debug.c | 6 ++---
mm/hugetlb.c | 6 ++---
mm/page_alloc.c | 12 +++++-----
mm/page_owner.c | 2 +-
mm/swapfile.c | 6 ++---
mm/util.c | 6 ++---
15 files changed, 65 insertions(+), 75 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index e2f938c5a9d8..61aa63449e7e 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -764,7 +764,7 @@ static inline void *kvcalloc(size_t n, size_t size, gfp_t flags)
extern void kvfree(const void *addr);
-static inline int compound_mapcount(struct page *page)
+static inline int compound_mapcount(const struct page *page)
{
VM_BUG_ON_PAGE(!PageCompound(page), page);
page = compound_head(page);
@@ -781,9 +781,9 @@ static inline void page_mapcount_reset(struct page *page)
atomic_set(&(page)->_mapcount, -1);
}
-int __page_mapcount(struct page *page);
+int __page_mapcount(const struct page *page);
-static inline int page_mapcount(struct page *page)
+static inline int page_mapcount(const struct page *page)
{
VM_BUG_ON_PAGE(PageSlab(page), page);
@@ -857,14 +857,14 @@ static inline compound_page_dtor *get_compound_page_dtor(struct page *page)
return compound_page_dtors[page[1].compound_dtor];
}
-static inline unsigned int compound_order(struct page *page)
+static inline unsigned int compound_order(const struct page *page)
{
if (!PageHead(page))
return 0;
return page[1].compound_order;
}
-static inline bool hpage_pincount_available(struct page *page)
+static inline bool hpage_pincount_available(const struct page *page)
{
/*
* Can the page->hpage_pinned_refcount field be used? That field is in
@@ -875,7 +875,7 @@ static inline bool hpage_pincount_available(struct page *page)
return PageCompound(page) && compound_order(page) > 1;
}
-static inline int compound_pincount(struct page *page)
+static inline int compound_pincount(const struct page *page)
{
VM_BUG_ON_PAGE(!hpage_pincount_available(page), page);
page = compound_head(page);
@@ -1495,12 +1495,12 @@ void page_address_init(void);
extern void *page_rmapping(struct page *page);
extern struct anon_vma *page_anon_vma(struct page *page);
-extern struct address_space *page_mapping(struct page *page);
+extern struct address_space *page_mapping(const struct page *page);
-extern struct address_space *__page_file_mapping(struct page *);
+extern struct address_space *__page_file_mapping(const struct page *);
static inline
-struct address_space *page_file_mapping(struct page *page)
+struct address_space *page_file_mapping(const struct page *page)
{
if (unlikely(PageSwapCache(page)))
return __page_file_mapping(page);
@@ -1508,13 +1508,13 @@ struct address_space *page_file_mapping(struct page *page)
return page->mapping;
}
-extern pgoff_t __page_file_index(struct page *page);
+extern pgoff_t __page_file_index(const struct page *page);
/*
* Return the pagecache index of the passed page. Regular pagecache pages
* use ->index whereas swapcache pages use swp_offset(->private)
*/
-static inline pgoff_t page_index(struct page *page)
+static inline pgoff_t page_index(const struct page *page)
{
if (unlikely(PageSwapCache(page)))
return __page_file_index(page);
@@ -1522,15 +1522,15 @@ static inline pgoff_t page_index(struct page *page)
}
bool page_mapped(struct page *page);
-struct address_space *page_mapping(struct page *page);
-struct address_space *page_mapping_file(struct page *page);
+struct address_space *page_mapping(const struct page *page);
+struct address_space *page_mapping_file(const struct page *page);
/*
* Return true only if the page has been allocated with
* ALLOC_NO_WATERMARKS and the low watermark was not
* met implying that the system is under some pressure.
*/
-static inline bool page_is_pfmemalloc(struct page *page)
+static inline bool page_is_pfmemalloc(const struct page *page)
{
/*
* Page index cannot be this large so this must be
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 4aba6c0c2ba8..a8c3fa076f43 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -221,15 +221,8 @@ struct page {
#endif
} _struct_page_alignment;
-static inline atomic_t *compound_mapcount_ptr(struct page *page)
-{
- return &page[1].compound_mapcount;
-}
-
-static inline atomic_t *compound_pincount_ptr(struct page *page)
-{
- return &page[2].hpage_pinned_refcount;
-}
+#define compound_mapcount_ptr(page) (&(page)[1].compound_mapcount)
+#define compound_pincount_ptr(page) (&(page)[2].hpage_pinned_refcount)
/*
* Used for sizing the vmemmap region on some architectures
diff --git a/include/linux/mmdebug.h b/include/linux/mmdebug.h
index 2ad72d2c8cc5..71246df469a0 100644
--- a/include/linux/mmdebug.h
+++ b/include/linux/mmdebug.h
@@ -9,8 +9,8 @@ struct page;
struct vm_area_struct;
struct mm_struct;
-extern void dump_page(struct page *page, const char *reason);
-extern void __dump_page(struct page *page, const char *reason);
+extern void dump_page(const struct page *page, const char *reason);
+extern void __dump_page(const struct page *page, const char *reason);
void dump_vma(const struct vm_area_struct *vma);
void dump_mm(const struct mm_struct *mm);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 222f6f7b2bb3..f1ab1f2e6aba 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -175,23 +175,20 @@ enum pageflags {
#ifndef __GENERATING_BOUNDS_H
-struct page; /* forward declaration */
+#define compound_head(page) ({ \
+ __typeof__(page) _page = page; \
+ unsigned long head = READ_ONCE(_page->compound_head); \
+ if (unlikely(head & 1)) \
+ _page = (void *)(head - 1); \
+ _page; \
+})
-static inline struct page *compound_head(struct page *page)
-{
- unsigned long head = READ_ONCE(page->compound_head);
-
- if (unlikely(head & 1))
- return (struct page *) (head - 1);
- return page;
-}
-
-static __always_inline int PageTail(struct page *page)
+static __always_inline int PageTail(const struct page *page)
{
return READ_ONCE(page->compound_head) & 1;
}
-static __always_inline int PageCompound(struct page *page)
+static __always_inline int PageCompound(const struct page *page)
{
return test_bit(PG_head, &page->flags) || PageTail(page);
}
@@ -252,7 +249,7 @@ static inline void page_init_poison(struct page *page, size_t size)
* Macros to create function definitions for page flags
*/
#define TESTPAGEFLAG(uname, lname, policy) \
-static __always_inline int Page##uname(struct page *page) \
+static __always_inline int Page##uname(const struct page *page) \
{ return test_bit(PG_##lname, &policy(page, 0)->flags); }
#define SETPAGEFLAG(uname, lname, policy) \
@@ -385,7 +382,7 @@ PAGEFLAG_FALSE(HighMem)
#endif
#ifdef CONFIG_SWAP
-static __always_inline int PageSwapCache(struct page *page)
+static __always_inline int PageSwapCache(const struct page *page)
{
#ifdef CONFIG_THP_SWAP
page = compound_head(page);
@@ -474,7 +471,7 @@ static __always_inline int PageMappingFlags(struct page *page)
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
}
-static __always_inline int PageAnon(struct page *page)
+static __always_inline int PageAnon(const struct page *page)
{
page = compound_head(page);
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
@@ -493,7 +490,7 @@ static __always_inline int __PageMovable(struct page *page)
* is found in VM_MERGEABLE vmas. It's a PageAnon page, pointing not to any
* anon_vma, but to that page's node of the stable tree.
*/
-static __always_inline int PageKsm(struct page *page)
+static __always_inline int PageKsm(const struct page *page)
{
page = compound_head(page);
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
@@ -586,14 +583,14 @@ static inline void ClearPageCompound(struct page *page)
#define PG_head_mask ((1UL << PG_head))
#ifdef CONFIG_HUGETLB_PAGE
-int PageHuge(struct page *page);
-int PageHeadHuge(struct page *page);
-bool page_huge_active(struct page *page);
+int PageHuge(const struct page *page);
+int PageHeadHuge(const struct page *page);
+bool page_huge_active(const struct page *page);
#else
TESTPAGEFLAG_FALSE(Huge)
TESTPAGEFLAG_FALSE(HeadHuge)
-static inline bool page_huge_active(struct page *page)
+static inline bool page_huge_active(const struct page *page)
{
return 0;
}
@@ -667,7 +664,7 @@ static inline int PageTransCompoundMap(struct page *page)
* and hugetlbfs pages, so it should only be called when it's known
* that hugetlbfs pages aren't involved.
*/
-static inline int PageTransTail(struct page *page)
+static inline int PageTransTail(const struct page *page)
{
return PageTail(page);
}
@@ -685,7 +682,7 @@ static inline int PageTransTail(struct page *page)
*
* See also __split_huge_pmd_locked() and page_remove_anon_compound_rmap().
*/
-static inline int PageDoubleMap(struct page *page)
+static inline int PageDoubleMap(const struct page *page)
{
return PageHead(page) && test_bit(PG_double_map, &page[1].flags);
}
diff --git a/include/linux/page_owner.h b/include/linux/page_owner.h
index 8679ccd722e8..16d4885e2f7a 100644
--- a/include/linux/page_owner.h
+++ b/include/linux/page_owner.h
@@ -14,7 +14,7 @@ extern void __set_page_owner(struct page *page,
extern void __split_page_owner(struct page *page, unsigned int order);
extern void __copy_page_owner(struct page *oldpage, struct page *newpage);
extern void __set_page_owner_migrate_reason(struct page *page, int reason);
-extern void __dump_page_owner(struct page *page);
+extern void __dump_page_owner(const struct page *page);
extern void pagetypeinfo_showmixedcount_print(struct seq_file *m,
pg_data_t *pgdat, struct zone *zone);
@@ -46,7 +46,7 @@ static inline void set_page_owner_migrate_reason(struct page *page, int reason)
if (static_branch_unlikely(&page_owner_inited))
__set_page_owner_migrate_reason(page, reason);
}
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
{
if (static_branch_unlikely(&page_owner_inited))
__dump_page_owner(page);
@@ -69,7 +69,7 @@ static inline void copy_page_owner(struct page *oldpage, struct page *newpage)
static inline void set_page_owner_migrate_reason(struct page *page, int reason)
{
}
-static inline void dump_page_owner(struct page *page)
+static inline void dump_page_owner(const struct page *page)
{
}
#endif /* CONFIG_PAGE_OWNER */
diff --git a/include/linux/page_ref.h b/include/linux/page_ref.h
index d27701199a4d..f2c4872f50f0 100644
--- a/include/linux/page_ref.h
+++ b/include/linux/page_ref.h
@@ -62,12 +62,12 @@ static inline void __page_ref_unfreeze(struct page *page, int v)
#endif
-static inline int page_ref_count(struct page *page)
+static inline int page_ref_count(const struct page *page)
{
return atomic_read(&page->_refcount);
}
-static inline int page_count(struct page *page)
+static inline int page_count(const struct page *page)
{
return atomic_read(&compound_head(page)->_refcount);
}
diff --git a/include/linux/pageblock-flags.h b/include/linux/pageblock-flags.h
index c066fec5b74b..aa13dca7bf04 100644
--- a/include/linux/pageblock-flags.h
+++ b/include/linux/pageblock-flags.h
@@ -54,7 +54,7 @@ extern unsigned int pageblock_order;
/* Forward declaration */
struct page;
-unsigned long get_pfnblock_flags_mask(struct page *page,
+unsigned long get_pfnblock_flags_mask(const struct page *page,
unsigned long pfn,
unsigned long end_bitidx,
unsigned long mask);
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index a8f7bd8ea1c6..ca1aaa8ce813 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -401,7 +401,7 @@ static inline struct page *read_mapping_page(struct address_space *mapping,
* Get index of the page with in radix-tree
* (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
*/
-static inline pgoff_t page_to_index(struct page *page)
+static inline pgoff_t page_to_index(const struct page *page)
{
pgoff_t pgoff;
@@ -421,7 +421,7 @@ static inline pgoff_t page_to_index(struct page *page)
* Get the offset in PAGE_SIZE.
* (TODO: hugepage should have ->index in PAGE_SIZE)
*/
-static inline pgoff_t page_to_pgoff(struct page *page)
+static inline pgoff_t page_to_pgoff(const struct page *page)
{
if (unlikely(PageHeadHuge(page)))
return page->index << compound_order(page);
diff --git a/include/linux/swap.h b/include/linux/swap.h
index b835d8dbea0e..6e602ce0eb0c 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -464,7 +464,7 @@ extern int page_swapcount(struct page *);
extern int __swap_count(swp_entry_t entry);
extern int __swp_swapcount(swp_entry_t entry);
extern int swp_swapcount(swp_entry_t entry);
-extern struct swap_info_struct *page_swap_info(struct page *);
+extern struct swap_info_struct *page_swap_info(const struct page *);
extern struct swap_info_struct *swp_swap_info(swp_entry_t entry);
extern bool reuse_swap_page(struct page *, int *);
extern int try_to_free_swap(struct page *);
diff --git a/mm/debug.c b/mm/debug.c
index 2189357f0987..69862d3b04e5 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -42,9 +42,9 @@ const struct trace_print_flags vmaflag_names[] = {
{0, NULL}
};
-void __dump_page(struct page *page, const char *reason)
+void __dump_page(const struct page *page, const char *reason)
{
- struct page *head = compound_head(page);
+ const struct page *head = compound_head(page);
struct address_space *mapping;
bool page_poisoned = PagePoisoned(page);
bool compound = PageCompound(page);
@@ -140,7 +140,7 @@ void __dump_page(struct page *page, const char *reason)
#endif
}
-void dump_page(struct page *page, const char *reason)
+void dump_page(const struct page *page, const char *reason)
{
__dump_page(page, reason);
dump_page_owner(page);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index f5fb53fdfa02..0131974369cb 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1305,7 +1305,7 @@ struct hstate *size_to_hstate(unsigned long size)
*
* This function can be called for tail pages, but never returns true for them.
*/
-bool page_huge_active(struct page *page)
+bool page_huge_active(const struct page *page)
{
VM_BUG_ON_PAGE(!PageHuge(page), page);
return PageHead(page) && PagePrivate(&page[1]);
@@ -1509,7 +1509,7 @@ static void prep_compound_gigantic_page(struct page *page, unsigned int order)
* transparent huge pages. See the PageTransHuge() documentation for more
* details.
*/
-int PageHuge(struct page *page)
+int PageHuge(const struct page *page)
{
if (!PageCompound(page))
return 0;
@@ -1523,7 +1523,7 @@ EXPORT_SYMBOL_GPL(PageHuge);
* PageHeadHuge() only returns true for hugetlbfs head page, but not for
* normal or transparent huge pages.
*/
-int PageHeadHuge(struct page *page_head)
+int PageHeadHuge(const struct page *page_head)
{
if (!PageHead(page_head))
return 0;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 114c56c3685d..9634c6e44197 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -446,7 +446,7 @@ static inline bool defer_init(int nid, unsigned long pfn, unsigned long end_pfn)
#endif
/* Return a pointer to the bitmap storing bits affecting a block of pages */
-static inline unsigned long *get_pageblock_bitmap(struct page *page,
+static inline unsigned long *get_pageblock_bitmap(const struct page *page,
unsigned long pfn)
{
#ifdef CONFIG_SPARSEMEM
@@ -456,7 +456,7 @@ static inline unsigned long *get_pageblock_bitmap(struct page *page,
#endif /* CONFIG_SPARSEMEM */
}
-static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
+static inline int pfn_to_bitidx(const struct page *page, unsigned long pfn)
{
#ifdef CONFIG_SPARSEMEM
pfn &= (PAGES_PER_SECTION-1);
@@ -476,7 +476,8 @@ static inline int pfn_to_bitidx(struct page *page, unsigned long pfn)
*
* Return: pageblock_bits flags
*/
-static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page,
+static __always_inline
+unsigned long __get_pfnblock_flags_mask(const struct page *page,
unsigned long pfn,
unsigned long end_bitidx,
unsigned long mask)
@@ -495,9 +496,8 @@ static __always_inline unsigned long __get_pfnblock_flags_mask(struct page *page
return (word >> (BITS_PER_LONG - bitidx - 1)) & mask;
}
-unsigned long get_pfnblock_flags_mask(struct page *page, unsigned long pfn,
- unsigned long end_bitidx,
- unsigned long mask)
+unsigned long get_pfnblock_flags_mask(const struct page *page,
+ unsigned long pfn, unsigned long end_bitidx, unsigned long mask)
{
return __get_pfnblock_flags_mask(page, pfn, end_bitidx, mask);
}
diff --git a/mm/page_owner.c b/mm/page_owner.c
index 18ecde9f45b2..a22afbb95c46 100644
--- a/mm/page_owner.c
+++ b/mm/page_owner.c
@@ -399,7 +399,7 @@ print_page_owner(char __user *buf, size_t count, unsigned long pfn,
return -ENOMEM;
}
-void __dump_page_owner(struct page *page)
+void __dump_page_owner(const struct page *page)
{
struct page_ext *page_ext = lookup_page_ext(page);
struct page_owner *page_owner;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 5871a2aa86a5..9d9e01f1716c 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -3489,7 +3489,7 @@ struct swap_info_struct *swp_swap_info(swp_entry_t entry)
return swap_type_to_swap_info(swp_type(entry));
}
-struct swap_info_struct *page_swap_info(struct page *page)
+struct swap_info_struct *page_swap_info(const struct page *page)
{
swp_entry_t entry = { .val = page_private(page) };
return swp_swap_info(entry);
@@ -3498,13 +3498,13 @@ struct swap_info_struct *page_swap_info(struct page *page)
/*
* out-of-line __page_file_ methods to avoid include hell.
*/
-struct address_space *__page_file_mapping(struct page *page)
+struct address_space *__page_file_mapping(const struct page *page)
{
return page_swap_info(page)->swap_file->f_mapping;
}
EXPORT_SYMBOL_GPL(__page_file_mapping);
-pgoff_t __page_file_index(struct page *page)
+pgoff_t __page_file_index(const struct page *page)
{
swp_entry_t swap = { .val = page_private(page) };
return swp_offset(swap);
diff --git a/mm/util.c b/mm/util.c
index 988d11e6c17c..05f5b36f81f9 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -655,7 +655,7 @@ struct anon_vma *page_anon_vma(struct page *page)
return __page_rmapping(page);
}
-struct address_space *page_mapping(struct page *page)
+struct address_space *page_mapping(const struct page *page)
{
struct address_space *mapping;
@@ -683,7 +683,7 @@ EXPORT_SYMBOL(page_mapping);
/*
* For file cache pages, return the address_space, otherwise return NULL
*/
-struct address_space *page_mapping_file(struct page *page)
+struct address_space *page_mapping_file(const struct page *page)
{
if (unlikely(PageSwapCache(page)))
return NULL;
@@ -691,7 +691,7 @@ struct address_space *page_mapping_file(struct page *page)
}
/* Slow path of page_mapcount() for compound pages */
-int __page_mapcount(struct page *page)
+int __page_mapcount(const struct page *page)
{
int ret;
--
2.25.1
next prev parent reply other threads:[~2020-04-08 15:02 UTC|newest]
Thread overview: 25+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-04-08 15:01 [PATCH 0/5] Improve page poisoning implementation Matthew Wilcox
2020-04-08 15:01 ` Matthew Wilcox [this message]
2020-04-08 15:14 ` [PATCH 1/5] mm: Constify a lot of struct page arguments Pavel Tatashin
2020-04-09 14:09 ` Kirill A. Shutemov
2020-04-09 14:15 ` Matthew Wilcox
2020-04-09 14:28 ` Kirill A. Shutemov
2020-04-09 14:32 ` Matthew Wilcox
2020-04-09 14:47 ` Kirill A. Shutemov
2020-04-09 15:00 ` Kirill A. Shutemov
2020-04-09 17:12 ` Jason Gunthorpe
2020-04-09 20:47 ` John Hubbard
2020-04-08 15:01 ` [PATCH 2/5] mm: Rename PF_POISONED_PAGE to page_poison_check Matthew Wilcox
2020-04-08 15:21 ` Pavel Tatashin
2020-04-09 14:14 ` Kirill A. Shutemov
2020-04-08 15:01 ` [PATCH 3/5] mm: Remove casting away of constness Matthew Wilcox
2020-04-08 15:23 ` Pavel Tatashin
2020-04-09 14:19 ` Kirill A. Shutemov
2020-04-08 15:01 ` [PATCH 4/5] mm: Check for page poison in both page_to_nid implementations Matthew Wilcox
2020-04-08 15:24 ` Pavel Tatashin
2020-04-09 14:21 ` Kirill A. Shutemov
2020-04-08 15:01 ` [PATCH 5/5] mm: Check page poison before finding a head page Matthew Wilcox
2020-04-08 15:32 ` Pavel Tatashin
2020-04-09 14:25 ` Kirill A. Shutemov
2020-04-08 15:11 ` [PATCH 0/5] Improve page poisoning implementation Pavel Tatashin
2020-04-09 20:55 ` John Hubbard
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200408150148.25290-2-willy@infradead.org \
--to=willy@infradead.org \
--cc=kirill.shutemov@linux.intel.com \
--cc=linux-mm@kvack.org \
--cc=pasha.tatashin@soleen.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox