linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Matthew Wilcox (Oracle)" <willy@infradead.org>
To: linux-mm@kvack.org
Cc: "Matthew Wilcox (Oracle)" <willy@infradead.org>
Subject: [PATCH 58/62] mm/kasan: Convert to struct slab
Date: Mon,  4 Oct 2021 14:46:46 +0100	[thread overview]
Message-ID: <20211004134650.4031813-59-willy@infradead.org> (raw)
In-Reply-To: <20211004134650.4031813-1-willy@infradead.org>

This should all be split up and done better.

Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>
---
 include/linux/kasan.h    |  8 ++++----
 include/linux/slab_def.h |  6 +++---
 include/linux/slub_def.h |  8 ++++----
 mm/kasan/common.c        | 23 ++++++++++++-----------
 mm/kasan/generic.c       |  8 ++++----
 mm/kasan/kasan.h         |  2 +-
 mm/kasan/quarantine.c    |  2 +-
 mm/kasan/report.c        | 16 ++++++++--------
 mm/kasan/report_tags.c   | 10 +++++-----
 mm/slab.c                |  2 +-
 mm/slub.c                |  2 +-
 11 files changed, 44 insertions(+), 43 deletions(-)

diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index dd874a1ee862..59c860295618 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -188,11 +188,11 @@ static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
 	return 0;
 }
 
-void __kasan_poison_slab(struct page *page);
-static __always_inline void kasan_poison_slab(struct page *page)
+void __kasan_poison_slab(struct slab *slab);
+static __always_inline void kasan_poison_slab(struct slab *slab)
 {
 	if (kasan_enabled())
-		__kasan_poison_slab(page);
+		__kasan_poison_slab(slab);
 }
 
 void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
@@ -317,7 +317,7 @@ static inline void kasan_cache_create(struct kmem_cache *cache,
 				      slab_flags_t *flags) {}
 static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
 static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
-static inline void kasan_poison_slab(struct page *page) {}
+static inline void kasan_poison_slab(struct slab *slab) {}
 static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
 					void *object) {}
 static inline void kasan_poison_object_data(struct kmem_cache *cache,
diff --git a/include/linux/slab_def.h b/include/linux/slab_def.h
index f81a41f9d5d1..f1bfcb10f5e0 100644
--- a/include/linux/slab_def.h
+++ b/include/linux/slab_def.h
@@ -87,11 +87,11 @@ struct kmem_cache {
 	struct kmem_cache_node *node[MAX_NUMNODES];
 };
 
-static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
+static inline void *nearest_obj(struct kmem_cache *cache, struct slab *slab,
 				void *x)
 {
-	void *object = x - (x - page->s_mem) % cache->size;
-	void *last_object = page->s_mem + (cache->num - 1) * cache->size;
+	void *object = x - (x - slab->s_mem) % cache->size;
+	void *last_object = slab->s_mem + (cache->num - 1) * cache->size;
 
 	if (unlikely(object > last_object))
 		return last_object;
diff --git a/include/linux/slub_def.h b/include/linux/slub_def.h
index 994a60da2f2e..4db01470a9e3 100644
--- a/include/linux/slub_def.h
+++ b/include/linux/slub_def.h
@@ -167,11 +167,11 @@ static inline void sysfs_slab_release(struct kmem_cache *s)
 
 void *fixup_red_left(struct kmem_cache *s, void *p);
 
-static inline void *nearest_obj(struct kmem_cache *cache, struct page *page,
+static inline void *nearest_obj(struct kmem_cache *cache, struct slab *slab,
 				void *x) {
-	void *object = x - (x - page_address(page)) % cache->size;
-	void *last_object = page_address(page) +
-		(page->objects - 1) * cache->size;
+	void *object = x - (x - slab_address(slab)) % cache->size;
+	void *last_object = slab_address(slab) +
+		(slab->objects - 1) * cache->size;
 	void *result = (unlikely(object > last_object)) ? last_object : object;
 
 	result = fixup_red_left(cache, result);
diff --git a/mm/kasan/common.c b/mm/kasan/common.c
index f3972af7fa1b..85774174a437 100644
--- a/mm/kasan/common.c
+++ b/mm/kasan/common.c
@@ -247,8 +247,9 @@ struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
 }
 #endif
 
-void __kasan_poison_slab(struct page *page)
+void __kasan_poison_slab(struct slab *slab)
 {
+	struct page *page = slab_page(slab);
 	unsigned long i;
 
 	for (i = 0; i < compound_nr(page); i++)
@@ -341,7 +342,7 @@ static inline bool ____kasan_slab_free(struct kmem_cache *cache, void *object,
 	if (is_kfence_address(object))
 		return false;
 
-	if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
+	if (unlikely(nearest_obj(cache, virt_to_slab(object), object) !=
 	    object)) {
 		kasan_report_invalid_free(tagged_object, ip);
 		return true;
@@ -401,9 +402,9 @@ void __kasan_kfree_large(void *ptr, unsigned long ip)
 
 void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
 {
-	struct page *page;
+	struct slab *slab;
 
-	page = virt_to_head_page(ptr);
+	slab = virt_to_slab(ptr);
 
 	/*
 	 * Even though this function is only called for kmem_cache_alloc and
@@ -411,12 +412,12 @@ void __kasan_slab_free_mempool(void *ptr, unsigned long ip)
 	 * !PageSlab() when the size provided to kmalloc is larger than
 	 * KMALLOC_MAX_SIZE, and kmalloc falls back onto page_alloc.
 	 */
-	if (unlikely(!PageSlab(page))) {
+	if (unlikely(!slab_test_cache(slab))) {
 		if (____kasan_kfree_large(ptr, ip))
 			return;
-		kasan_poison(ptr, page_size(page), KASAN_FREE_PAGE, false);
+		kasan_poison(ptr, slab_size(slab), KASAN_FREE_PAGE, false);
 	} else {
-		____kasan_slab_free(page->slab_cache, ptr, ip, false, false);
+		____kasan_slab_free(slab->slab_cache, ptr, ip, false, false);
 	}
 }
 
@@ -560,7 +561,7 @@ void * __must_check __kasan_kmalloc_large(const void *ptr, size_t size,
 
 void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flags)
 {
-	struct page *page;
+	struct slab *slab;
 
 	if (unlikely(object == ZERO_SIZE_PTR))
 		return (void *)object;
@@ -572,13 +573,13 @@ void * __must_check __kasan_krealloc(const void *object, size_t size, gfp_t flag
 	 */
 	kasan_unpoison(object, size, false);
 
-	page = virt_to_head_page(object);
+	slab = virt_to_slab(object);
 
 	/* Piggy-back on kmalloc() instrumentation to poison the redzone. */
-	if (unlikely(!PageSlab(page)))
+	if (unlikely(!slab_test_cache(slab)))
 		return __kasan_kmalloc_large(object, size, flags);
 	else
-		return ____kasan_kmalloc(page->slab_cache, object, size, flags);
+		return ____kasan_kmalloc(slab->slab_cache, object, size, flags);
 }
 
 bool __kasan_check_byte(const void *address, unsigned long ip)
diff --git a/mm/kasan/generic.c b/mm/kasan/generic.c
index c3f5ba7a294a..6153f85b90cb 100644
--- a/mm/kasan/generic.c
+++ b/mm/kasan/generic.c
@@ -330,16 +330,16 @@ DEFINE_ASAN_SET_SHADOW(f8);
 
 void kasan_record_aux_stack(void *addr)
 {
-	struct page *page = kasan_addr_to_page(addr);
+	struct slab *slab = kasan_addr_to_slab(addr);
 	struct kmem_cache *cache;
 	struct kasan_alloc_meta *alloc_meta;
 	void *object;
 
-	if (is_kfence_address(addr) || !(page && PageSlab(page)))
+	if (is_kfence_address(addr) || !(slab && slab_test_cache(slab)))
 		return;
 
-	cache = page->slab_cache;
-	object = nearest_obj(cache, page, addr);
+	cache = slab->slab_cache;
+	object = nearest_obj(cache, slab, addr);
 	alloc_meta = kasan_get_alloc_meta(cache, object);
 	if (!alloc_meta)
 		return;
diff --git a/mm/kasan/kasan.h b/mm/kasan/kasan.h
index 8bf568a80eb8..8f9aca95db72 100644
--- a/mm/kasan/kasan.h
+++ b/mm/kasan/kasan.h
@@ -249,7 +249,7 @@ bool kasan_report(unsigned long addr, size_t size,
 		bool is_write, unsigned long ip);
 void kasan_report_invalid_free(void *object, unsigned long ip);
 
-struct page *kasan_addr_to_page(const void *addr);
+struct slab *kasan_addr_to_slab(const void *addr);
 
 depot_stack_handle_t kasan_save_stack(gfp_t flags);
 void kasan_set_track(struct kasan_track *track, gfp_t flags);
diff --git a/mm/kasan/quarantine.c b/mm/kasan/quarantine.c
index d8ccff4c1275..587da8995f2d 100644
--- a/mm/kasan/quarantine.c
+++ b/mm/kasan/quarantine.c
@@ -117,7 +117,7 @@ static unsigned long quarantine_batch_size;
 
 static struct kmem_cache *qlink_to_cache(struct qlist_node *qlink)
 {
-	return virt_to_head_page(qlink)->slab_cache;
+	return virt_to_slab(qlink)->slab_cache;
 }
 
 static void *qlink_to_object(struct qlist_node *qlink, struct kmem_cache *cache)
diff --git a/mm/kasan/report.c b/mm/kasan/report.c
index 884a950c7026..49b58221755a 100644
--- a/mm/kasan/report.c
+++ b/mm/kasan/report.c
@@ -151,11 +151,11 @@ static void print_track(struct kasan_track *track, const char *prefix)
 	}
 }
 
-struct page *kasan_addr_to_page(const void *addr)
+struct slab *kasan_addr_to_slab(const void *addr)
 {
 	if ((addr >= (void *)PAGE_OFFSET) &&
 			(addr < high_memory))
-		return virt_to_head_page(addr);
+		return virt_to_slab(addr);
 	return NULL;
 }
 
@@ -251,14 +251,14 @@ static inline bool init_task_stack_addr(const void *addr)
 
 static void print_address_description(void *addr, u8 tag)
 {
-	struct page *page = kasan_addr_to_page(addr);
+	struct slab *slab = kasan_addr_to_slab(addr);
 
 	dump_stack_lvl(KERN_ERR);
 	pr_err("\n");
 
-	if (page && PageSlab(page)) {
-		struct kmem_cache *cache = page->slab_cache;
-		void *object = nearest_obj(cache, page,	addr);
+	if (slab && slab_test_cache(slab)) {
+		struct kmem_cache *cache = slab->slab_cache;
+		void *object = nearest_obj(cache, slab,	addr);
 
 		describe_object(cache, object, addr, tag);
 	}
@@ -268,9 +268,9 @@ static void print_address_description(void *addr, u8 tag)
 		pr_err(" %pS\n", addr);
 	}
 
-	if (page) {
+	if (slab) {
 		pr_err("The buggy address belongs to the page:\n");
-		dump_page(page, "kasan: bad access detected");
+		dump_page(slab_page(slab), "kasan: bad access detected");
 	}
 
 	kasan_print_address_stack_frame(addr);
diff --git a/mm/kasan/report_tags.c b/mm/kasan/report_tags.c
index 8a319fc16dab..16a3c55ce698 100644
--- a/mm/kasan/report_tags.c
+++ b/mm/kasan/report_tags.c
@@ -12,7 +12,7 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
 #ifdef CONFIG_KASAN_TAGS_IDENTIFY
 	struct kasan_alloc_meta *alloc_meta;
 	struct kmem_cache *cache;
-	struct page *page;
+	struct slab *slab;
 	const void *addr;
 	void *object;
 	u8 tag;
@@ -20,10 +20,10 @@ const char *kasan_get_bug_type(struct kasan_access_info *info)
 
 	tag = get_tag(info->access_addr);
 	addr = kasan_reset_tag(info->access_addr);
-	page = kasan_addr_to_page(addr);
-	if (page && PageSlab(page)) {
-		cache = page->slab_cache;
-		object = nearest_obj(cache, page, (void *)addr);
+	slab = kasan_addr_to_slab(addr);
+	if (slab && SlabAllocation(slab)) {
+		cache = slab->slab_cache;
+		object = nearest_obj(cache, slab, (void *)addr);
 		alloc_meta = kasan_get_alloc_meta(cache, object);
 
 		if (alloc_meta) {
diff --git a/mm/slab.c b/mm/slab.c
index 3e9cd3ecc9ab..8cbb6e91922e 100644
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2612,7 +2612,7 @@ static struct slab *cache_grow_begin(struct kmem_cache *cachep,
 	 * slab_address() in the latter returns a non-tagged pointer,
 	 * as it should be for slab pages.
 	 */
-	kasan_poison_slab(slab_page(slab));
+	kasan_poison_slab(slab);
 
 	/* Get slab management. */
 	freelist = alloc_slabmgmt(cachep, slab, offset,
diff --git a/mm/slub.c b/mm/slub.c
index 659b30afbb58..998c1eefd205 100644
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -1918,7 +1918,7 @@ static struct slab *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
 	account_slab(slab, oo_order(oo), s, flags);
 
 	slab->slab_cache = s;
-	kasan_poison_slab(slab_page(slab));
+	kasan_poison_slab(slab);
 
 	start = slab_address(slab);
 
-- 
2.32.0



  parent reply	other threads:[~2021-10-04 14:59 UTC|newest]

Thread overview: 77+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-10-04 13:45 [PATCH 00/62] Separate struct slab from struct page Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 01/62] mm: Convert page_to_section() to pgflags_section() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 02/62] mm: Add pgflags_nid() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 03/62] mm: Split slab into its own type Matthew Wilcox (Oracle)
2021-10-05 16:10   ` David Hildenbrand
2021-10-05 18:48     ` Matthew Wilcox
2021-10-12  7:25       ` David Hildenbrand
2021-10-12 14:13         ` Matthew Wilcox
2021-10-12 14:17           ` David Hildenbrand
2021-10-13 18:08             ` Johannes Weiner
2021-10-13 18:31               ` Matthew Wilcox
2021-10-14  7:22                 ` David Hildenbrand
2021-10-14 12:44                   ` Johannes Weiner
2021-10-14 13:08                     ` Matthew Wilcox
2021-10-04 13:45 ` [PATCH 04/62] mm: Add account_slab() and unaccount_slab() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 05/62] mm: Convert virt_to_cache() to use struct slab Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 06/62] mm: Convert __ksize() to " Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 07/62] mm: Use struct slab in kmem_obj_info() Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 08/62] mm: Convert check_heap_object() to use struct slab Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 09/62] mm/slub: Convert process_slab() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 10/62] mm/slub: Convert detached_freelist to use " Matthew Wilcox (Oracle)
2021-10-04 13:45 ` [PATCH 11/62] mm/slub: Convert kfree() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 12/62] mm/slub: Convert __slab_free() to take " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 13/62] mm/slub: Convert new_slab() to return " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 14/62] mm/slub: Convert early_kmem_cache_node_alloc() to use " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 15/62] mm/slub: Convert kmem_cache_cpu to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 16/62] mm/slub: Convert show_slab_objects() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 17/62] mm/slub: Convert validate_slab() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 18/62] mm/slub: Convert count_partial() to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 19/62] mm/slub: Convert bootstrap() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 20/62] mm/slub: Convert __kmem_cache_do_shrink() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 21/62] mm/slub: Convert free_partial() to use " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 22/62] mm/slub: Convert list_slab_objects() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 23/62] mm/slub: Convert slab_alloc_node() to use " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 24/62] mm/slub: Convert get_freelist() to take " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 25/62] mm/slub: Convert node_match() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 26/62] mm/slub: Convert slab flushing to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 27/62] mm/slub: Convert __unfreeze_partials to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 28/62] mm/slub: Convert deactivate_slab() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 29/62] mm/slub: Convert acquire_slab() to take a struct page Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 30/62] mm/slub: Convert partial slab management to struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 31/62] mm/slub: Convert slab freeing " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 32/62] mm/slub: Convert shuffle_freelist " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 33/62] mm/slub: Remove struct page argument to next_freelist_entry() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 34/62] mm/slub: Remove struct page argument from setup_object() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 35/62] mm/slub: Convert freelist_corrupted() to struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 36/62] mm/slub: Convert full slab management " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 37/62] mm/slub: Convert free_consistency_checks() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 38/62] mm/slub: Convert alloc_debug_processing() to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 39/62] mm/slub: Convert check_object() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 40/62] mm/slub: Convert on_freelist() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 41/62] mm/slub: Convert check_slab() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 42/62] mm/slub: Convert check_valid_pointer() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 43/62] mm/slub: Convert object_err() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 44/62] mm/slub: Convert print_trailer() to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 45/62] mm/slub: Convert slab_err() to take a " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 46/62] mm/slub: Convert print_page_info() to print_slab_info() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 47/62] mm/slub: Convert trace() to take a struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 48/62] mm/slub: Convert cmpxchg_double_slab to " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 49/62] mm/slub: Convert get_map() and __fill_map() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 50/62] mm/slub: Convert slab_lock() and slab_unlock() " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 51/62] mm/slub: Convert setup_page_debug() to setup_slab_debug() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 52/62] mm/slub: Convert pfmemalloc_match() to take a struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 53/62] mm/slub: Remove pfmemalloc_match_unsafe() Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 54/62] mm: Convert slab to use struct slab Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 55/62] mm: Convert slob " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 56/62] mm: Convert slub " Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 57/62] memcg: Convert object cgroups from struct page to " Matthew Wilcox (Oracle)
2021-10-11 17:13   ` Johannes Weiner
2021-10-12  3:16     ` Matthew Wilcox
2021-10-04 13:46 ` Matthew Wilcox (Oracle) [this message]
2021-10-04 13:46 ` [PATCH 59/62] zsmalloc: Stop using slab fields in struct page Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 60/62] bootmem: Use page->index instead of page->freelist Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 61/62] iommu: Use put_pages_list Matthew Wilcox (Oracle)
2021-10-04 13:46 ` [PATCH 62/62] mm: Remove slab from struct page Matthew Wilcox (Oracle)
2021-10-11 20:07 ` [PATCH 00/62] Separate struct " Johannes Weiner
2021-10-12  3:30   ` Matthew Wilcox

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211004134650.4031813-59-willy@infradead.org \
    --to=willy@infradead.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox