linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Tong Tiangen <tongtiangen@huawei.com>
To: Andrew Morton <akpm@linux-foundation.org>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>, Borislav Petkov <bp@alien8.de>,
	Dave Hansen <dave.hansen@linux.intel.com>,
	Catalin Marinas <catalin.marinas@arm.com>,
	Will Deacon <will@kernel.org>,
	Alexander Viro <viro@zeniv.linux.org.uk>, <x86@kernel.org>,
	"H. Peter Anvin" <hpa@zytor.com>
Cc: <linux-arm-kernel@lists.infradead.org>,
	<linux-kernel@vger.kernel.org>, <linux-mm@kvack.org>,
	Tong Tiangen <tongtiangen@huawei.com>
Subject: [RFC PATCH -next V2 7/7] arm64: add pagecache reading to machine check safe
Date: Wed, 6 Apr 2022 09:13:11 +0000	[thread overview]
Message-ID: <20220406091311.3354723-8-tongtiangen@huawei.com> (raw)
In-Reply-To: <20220406091311.3354723-1-tongtiangen@huawei.com>

When user process reading file, the data is cached in pagecache and
the data belongs to the user process, When machine check error is
encountered during pagecache reading, killing the user process and
isolate the user page with hardware memory errors is a more reasonable
choice than kernel panic.

The __arch_copy_mc_to_user() in copy_to_user_mc.S is largely borrows
from __arch_copy_to_user() in copy_to_user.S and the main difference
is __arch_copy_mc_to_user() add the extable entry to support machine
check safe.

In _copy_page_to_iter(), machine check safe only considered ITER_IOVEC
which is used by pagecache reading.

Signed-off-by: Tong Tiangen <tongtiangen@huawei.com>
---
 arch/arm64/include/asm/uaccess.h | 15 ++++++
 arch/arm64/lib/Makefile          |  2 +-
 arch/arm64/lib/copy_to_user_mc.S | 78 +++++++++++++++++++++++++++++
 include/linux/uio.h              |  9 +++-
 lib/iov_iter.c                   | 85 +++++++++++++++++++++++++-------
 5 files changed, 170 insertions(+), 19 deletions(-)
 create mode 100644 arch/arm64/lib/copy_to_user_mc.S

diff --git a/arch/arm64/include/asm/uaccess.h b/arch/arm64/include/asm/uaccess.h
index 24b662407fbd..f0d5e811165a 100644
--- a/arch/arm64/include/asm/uaccess.h
+++ b/arch/arm64/include/asm/uaccess.h
@@ -448,6 +448,21 @@ extern long strncpy_from_user(char *dest, const char __user *src, long count);
 
 extern __must_check long strnlen_user(const char __user *str, long n);
 
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+extern unsigned long __must_check __arch_copy_mc_to_user(void __user *to,
+							 const void *from, unsigned long n);
+static inline unsigned long __must_check
+copy_mc_to_user(void __user *to, const void *from, unsigned long n)
+{
+	uaccess_ttbr0_enable();
+	n = __arch_copy_mc_to_user(__uaccess_mask_ptr(to), from, n);
+	uaccess_ttbr0_disable();
+
+	return n;
+}
+#define copy_mc_to_user copy_mc_to_user
+#endif
+
 #ifdef CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE
 struct page;
 void memcpy_page_flushcache(char *to, struct page *page, size_t offset, size_t len);
diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile
index 29c578414b12..9b3571227fb4 100644
--- a/arch/arm64/lib/Makefile
+++ b/arch/arm64/lib/Makefile
@@ -23,4 +23,4 @@ obj-$(CONFIG_ARM64_MTE) += mte.o
 
 obj-$(CONFIG_KASAN_SW_TAGS) += kasan_sw_tags.o
 
-obj-$(CONFIG_ARCH_HAS_CPY_MC) += copy_page_mc.o
+obj-$(CONFIG_ARCH_HAS_COPY_MC) += copy_page_mc.o copy_to_user_mc.o
diff --git a/arch/arm64/lib/copy_to_user_mc.S b/arch/arm64/lib/copy_to_user_mc.S
new file mode 100644
index 000000000000..9d228ff15446
--- /dev/null
+++ b/arch/arm64/lib/copy_to_user_mc.S
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 ARM Ltd.
+ */
+
+#include <linux/linkage.h>
+
+#include <asm/asm-uaccess.h>
+#include <asm/assembler.h>
+#include <asm/cache.h>
+
+/*
+ * Copy to user space from a kernel buffer (alignment handled by the hardware)
+ *
+ * Parameters:
+ *	x0 - to
+ *	x1 - from
+ *	x2 - n
+ * Returns:
+ *	x0 - bytes not copied
+ */
+	.macro ldrb1 reg, ptr, val
+	1000: ldrb  \reg, [\ptr], \val;
+	_asm_extable_mc 1000b, 9998f;
+	.endm
+
+	.macro strb1 reg, ptr, val
+	user_ldst_mc 9998f, sttrb, \reg, \ptr, \val
+	.endm
+
+	.macro ldrh1 reg, ptr, val
+	1001: ldrh  \reg, [\ptr], \val;
+	_asm_extable_mc 1001b, 9998f;
+	.endm
+
+	.macro strh1 reg, ptr, val
+	user_ldst_mc 9997f, sttrh, \reg, \ptr, \val
+	.endm
+
+	.macro ldr1 reg, ptr, val
+	1002: ldr \reg, [\ptr], \val;
+	_asm_extable_mc 1002b, 9998f;
+	.endm
+
+	.macro str1 reg, ptr, val
+	user_ldst_mc 9997f, sttr, \reg, \ptr, \val
+	.endm
+
+	.macro ldp1 reg1, reg2, ptr, val
+	1003: ldp \reg1, \reg2, [\ptr], \val;
+	_asm_extable_mc 1003b, 9998f;
+	.endm
+
+	.macro stp1 reg1, reg2, ptr, val
+	user_stp 9997f, \reg1, \reg2, \ptr, \val
+	.endm
+
+end	.req	x5
+srcin	.req	x15
+SYM_FUNC_START(__arch_copy_mc_to_user)
+	add	end, x0, x2
+	mov	srcin, x1
+#include "copy_template.S"
+	mov	x0, #0
+	ret
+
+	// Exception fixups
+9997:	cbz	x0, 9998f			// Check machine check exception
+	cmp	dst, dstin
+	b.ne	9998f
+	// Before being absolutely sure we couldn't copy anything, try harder
+	ldrb	tmp1w, [srcin]
+USER(9998f, sttrb tmp1w, [dst])
+	add	dst, dst, #1
+9998:	sub	x0, end, dst			// bytes not copied
+	ret
+SYM_FUNC_END(__arch_copy_mc_to_user)
+EXPORT_SYMBOL(__arch_copy_mc_to_user)
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 739285fe5a2f..539d9ee9b032 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -147,10 +147,17 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i);
 size_t _copy_from_iter(void *addr, size_t bytes, struct iov_iter *i);
 size_t _copy_from_iter_nocache(void *addr, size_t bytes, struct iov_iter *i);
 
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+size_t copy_mc_page_to_iter(struct page *page, size_t offset, size_t bytes,
+			    struct iov_iter *i);
+#else
+#define copy_mc_page_to_iter copy_page_to_iter
+#endif
+
 static inline size_t copy_folio_to_iter(struct folio *folio, size_t offset,
 		size_t bytes, struct iov_iter *i)
 {
-	return copy_page_to_iter(&folio->page, offset, bytes, i);
+	return copy_mc_page_to_iter(&folio->page, offset, bytes, i);
 }
 
 static __always_inline __must_check
diff --git a/lib/iov_iter.c b/lib/iov_iter.c
index 6dd5330f7a99..2c5f3bb6391d 100644
--- a/lib/iov_iter.c
+++ b/lib/iov_iter.c
@@ -157,6 +157,19 @@ static int copyout(void __user *to, const void *from, size_t n)
 	return n;
 }
 
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+static int copyout_mc(void __user *to, const void *from, size_t n)
+{
+	if (access_ok(to, n)) {
+		instrument_copy_to_user(to, from, n);
+		n = copy_mc_to_user((__force void *) to, from, n);
+	}
+	return n;
+}
+#else
+#define copyout_mc copyout
+#endif
+
 static int copyin(void *to, const void __user *from, size_t n)
 {
 	if (should_fail_usercopy())
@@ -169,7 +182,7 @@ static int copyin(void *to, const void __user *from, size_t n)
 }
 
 static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t bytes,
-			 struct iov_iter *i)
+			 struct iov_iter *i, bool mc_safe)
 {
 	size_t skip, copy, left, wanted;
 	const struct iovec *iov;
@@ -194,7 +207,10 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
 		from = kaddr + offset;
 
 		/* first chunk, usually the only one */
-		left = copyout(buf, from, copy);
+		if (mc_safe)
+			left = copyout_mc(buf, from, copy);
+		else
+			left = copyout(buf, from, copy);
 		copy -= left;
 		skip += copy;
 		from += copy;
@@ -204,7 +220,10 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
 			iov++;
 			buf = iov->iov_base;
 			copy = min(bytes, iov->iov_len);
-			left = copyout(buf, from, copy);
+			if (mc_safe)
+				left = copyout_mc(buf, from, copy);
+			else
+				left = copyout(buf, from, copy);
 			copy -= left;
 			skip = copy;
 			from += copy;
@@ -223,7 +242,10 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
 
 	kaddr = kmap(page);
 	from = kaddr + offset;
-	left = copyout(buf, from, copy);
+	if (mc_safe)
+		left = copyout_mc(buf, from, copy);
+	else
+		left = copyout(buf, from, copy);
 	copy -= left;
 	skip += copy;
 	from += copy;
@@ -232,7 +254,10 @@ static size_t copy_page_to_iter_iovec(struct page *page, size_t offset, size_t b
 		iov++;
 		buf = iov->iov_base;
 		copy = min(bytes, iov->iov_len);
-		left = copyout(buf, from, copy);
+		if (mc_safe)
+			left = copyout_mc(buf, from, copy);
+		else
+			left = copyout(buf, from, copy);
 		copy -= left;
 		skip = copy;
 		from += copy;
@@ -674,15 +699,6 @@ size_t _copy_to_iter(const void *addr, size_t bytes, struct iov_iter *i)
 EXPORT_SYMBOL(_copy_to_iter);
 
 #ifdef CONFIG_ARCH_HAS_COPY_MC
-static int copyout_mc(void __user *to, const void *from, size_t n)
-{
-	if (access_ok(to, n)) {
-		instrument_copy_to_user(to, from, n);
-		n = copy_mc_to_user((__force void *) to, from, n);
-	}
-	return n;
-}
-
 static size_t copy_mc_pipe_to_iter(const void *addr, size_t bytes,
 				struct iov_iter *i)
 {
@@ -846,10 +862,10 @@ static inline bool page_copy_sane(struct page *page, size_t offset, size_t n)
 }
 
 static size_t __copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
-			 struct iov_iter *i)
+			 struct iov_iter *i, bool mc_safe)
 {
 	if (likely(iter_is_iovec(i)))
-		return copy_page_to_iter_iovec(page, offset, bytes, i);
+		return copy_page_to_iter_iovec(page, offset, bytes, i, mc_safe);
 	if (iov_iter_is_bvec(i) || iov_iter_is_kvec(i) || iov_iter_is_xarray(i)) {
 		void *kaddr = kmap_local_page(page);
 		size_t wanted = _copy_to_iter(kaddr + offset, bytes, i);
@@ -878,7 +894,7 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 	offset %= PAGE_SIZE;
 	while (1) {
 		size_t n = __copy_page_to_iter(page, offset,
-				min(bytes, (size_t)PAGE_SIZE - offset), i);
+				min(bytes, (size_t)PAGE_SIZE - offset), i, false);
 		res += n;
 		bytes -= n;
 		if (!bytes || !n)
@@ -893,6 +909,41 @@ size_t copy_page_to_iter(struct page *page, size_t offset, size_t bytes,
 }
 EXPORT_SYMBOL(copy_page_to_iter);
 
+#ifdef CONFIG_ARCH_HAS_COPY_MC
+/**
+ * copy_mc_page_to_iter - copy page to iter with source memory error exception handling.
+ *
+ * The filemap_read deploys this for pagecache reading and the main differences between
+ * this and typical copy_page_to_iter() is call __copy_page_to_iter with mc_safe true.
+ *
+ * Return: number of bytes copied (may be %0)
+ */
+size_t copy_mc_page_to_iter(struct page *page, size_t offset, size_t bytes,
+			 struct iov_iter *i)
+{
+	size_t res = 0;
+
+	if (unlikely(!page_copy_sane(page, offset, bytes)))
+		return 0;
+	page += offset / PAGE_SIZE; // first subpage
+	offset %= PAGE_SIZE;
+	while (1) {
+		size_t n = __copy_page_to_iter(page, offset,
+				min(bytes, (size_t)PAGE_SIZE - offset), i, true);
+		res += n;
+		bytes -= n;
+		if (!bytes || !n)
+			break;
+		offset += n;
+		if (offset == PAGE_SIZE) {
+			page++;
+			offset = 0;
+		}
+	}
+	return res;
+}
+#endif
+
 size_t copy_page_from_iter(struct page *page, size_t offset, size_t bytes,
 			 struct iov_iter *i)
 {
-- 
2.18.0.huawei.25



  parent reply	other threads:[~2022-04-06  8:58 UTC|newest]

Thread overview: 27+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-04-06  9:13 [RFC PATCH -next V2 0/7]arm64: add machine check safe support Tong Tiangen
2022-04-06  9:13 ` [RFC PATCH -next V2 1/7] x86: fix copy_mc_to_user compile error Tong Tiangen
     [not found]   ` <Yk1bu88TcjXw/iU4@zn.tnic>
2022-04-06 10:02     ` Tong Tiangen
2022-04-06  9:13 ` [RFC PATCH -next V2 2/7] arm64: fix page_address return value in copy_highpage Tong Tiangen
2022-04-06 10:22   ` Mark Rutland
2022-04-06 12:47     ` Tong Tiangen
2022-04-06  9:13 ` [RFC PATCH -next V2 3/7] arm64: add support for machine check error safe Tong Tiangen
2022-04-06 10:58   ` Mark Rutland
2022-04-07 14:26     ` Tong Tiangen
2022-04-06  9:13 ` [RFC PATCH -next V2 4/7] arm64: add copy_from_user to machine check safe Tong Tiangen
2022-04-06 11:19   ` Mark Rutland
2022-04-07 14:28     ` Tong Tiangen
2022-04-06  9:13 ` [RFC PATCH -next V2 5/7] arm64: add get_user " Tong Tiangen
2022-04-06 11:22   ` Mark Rutland
2022-04-07 14:38     ` Tong Tiangen
2022-04-08 15:22       ` Mark Rutland
2022-04-09  9:17         ` Tong Tiangen
2022-04-06  9:13 ` [RFC PATCH -next V2 6/7] arm64: add cow " Tong Tiangen
2022-04-06  9:13 ` Tong Tiangen [this message]
2022-04-06 11:27   ` [RFC PATCH -next V2 7/7] arm64: add pagecache reading " Mark Rutland
2022-04-07 14:56     ` Tong Tiangen
2022-04-07 15:53       ` Robin Murphy
2022-04-08  2:43         ` Tong Tiangen
2022-04-08 11:11           ` Robin Murphy
2022-04-09  9:24             ` Tong Tiangen
2022-04-06 10:04 ` [RFC PATCH -next V2 0/7]arm64: add machine check safe support Mark Rutland
2022-04-07  4:21   ` Tong Tiangen

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20220406091311.3354723-8-tongtiangen@huawei.com \
    --to=tongtiangen@huawei.com \
    --cc=akpm@linux-foundation.org \
    --cc=bp@alien8.de \
    --cc=catalin.marinas@arm.com \
    --cc=dave.hansen@linux.intel.com \
    --cc=hpa@zytor.com \
    --cc=linux-arm-kernel@lists.infradead.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mingo@redhat.com \
    --cc=tglx@linutronix.de \
    --cc=viro@zeniv.linux.org.uk \
    --cc=will@kernel.org \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox