linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: <artem.kuzin@huawei.com>
To: <x86@kernel.org>, <tglx@linutronix.de>, <mingo@redhat.com>,
	<bp@alien8.de>, <dave.hansen@linux.intel.com>, <hpa@zytor.com>,
	<luto@kernel.org>, <peterz@infradead.org>,
	<akpm@linux-foundation.org>, <urezki@gmail.com>,
	<hch@infradead.org>, <lstoakes@gmail.com>, <mcgrof@kernel.org>,
	<rmk+kernel@armlinux.org.uk>
Cc: <nikita.panov@huawei-partners.com>,
	<alexander.grubnikov@huawei.com>, <stepanov.anatoly@huawei.com>,
	<guohanjun@huawei.com>, <weiyongjun1@huawei.com>,
	<wangkefeng.wang@huawei.com>, <judy.chenhui@huawei.com>,
	<yusongping@huawei.com>, <kang.sun@huawei.com>,
	<linux-mm@kvack.org>, <linux-modules@vger.kernel.org>
Subject: [PATCH RFC 04/12] x86: add support of memory protection for NUMA replicas
Date: Thu, 28 Dec 2023 21:10:48 +0800	[thread overview]
Message-ID: <20231228131056.602411-5-artem.kuzin@huawei.com> (raw)
In-Reply-To: <20231228131056.602411-1-artem.kuzin@huawei.com>

From: Artem Kuzin <artem.kuzin@huawei.com>

Co-developed-by: Nikita Panov <nikita.panov@huawei-partners.com>
Signed-off-by: Nikita Panov <nikita.panov@huawei-partners.com>
Co-developed-by: Alexander Grubnikov <alexander.grubnikov@huawei.com>
Signed-off-by: Alexander Grubnikov <alexander.grubnikov@huawei.com>
Signed-off-by: Artem Kuzin <artem.kuzin@huawei.com>
---
 arch/x86/include/asm/set_memory.h |  14 +++
 arch/x86/mm/pat/set_memory.c      | 150 +++++++++++++++++++++++++++++-
 include/asm-generic/set_memory.h  |  12 +++
 include/linux/set_memory.h        |  10 ++
 4 files changed, 185 insertions(+), 1 deletion(-)

diff --git a/arch/x86/include/asm/set_memory.h b/arch/x86/include/asm/set_memory.h
index a5e89641bd2d..1efa15a08ef0 100644
--- a/arch/x86/include/asm/set_memory.h
+++ b/arch/x86/include/asm/set_memory.h
@@ -7,7 +7,9 @@
 #include <asm-generic/set_memory.h>
 
 #define set_memory_rox set_memory_rox
+#define numa_set_memory_rox numa_set_memory_rox
 int set_memory_rox(unsigned long addr, int numpages);
+int numa_set_memory_rox(unsigned long addr, int numpages);
 
 /*
  * The set_memory_* API can be used to change various attributes of a virtual
@@ -58,6 +60,18 @@ int set_pages_array_uc(struct page **pages, int addrinarray);
 int set_pages_array_wc(struct page **pages, int addrinarray);
 int set_pages_array_wb(struct page **pages, int addrinarray);
 
+#ifdef CONFIG_KERNEL_REPLICATION
+int numa_set_memory_np(unsigned long addr, int numpages);
+int numa_set_memory_np_noalias(unsigned long addr, int numpages);
+int numa_set_memory_global(unsigned long addr, int numpages);
+int numa_set_memory_nonglobal(unsigned long addr, int numpages);
+#else
+#define numa_set_memory_np set_memory_np
+#define numa_set_memory_np_noalias set_memory_np_noalias
+#define numa_set_memory_global set_memory_global
+#define numa_set_memory_nonglobal set_memory_nonglobal
+#endif /* CONFIG_KERNEL_REPLICATION */
+
 /*
  * For legacy compatibility with the old APIs, a few functions
  * are provided that work on a "struct page".
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index df4182b6449f..ceba209ee653 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -22,6 +22,7 @@
 #include <linux/cc_platform.h>
 #include <linux/set_memory.h>
 #include <linux/memregion.h>
+#include <linux/numa_replication.h>
 
 #include <asm/e820/api.h>
 #include <asm/processor.h>
@@ -1790,7 +1791,7 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int primary)
 	return ret;
 }
 
-static int change_page_attr_set_clr(unsigned long *addr, int numpages,
+static int change_page_attr_set_clr_pgd(pgd_t *pgd, unsigned long *addr, int numpages,
 				    pgprot_t mask_set, pgprot_t mask_clr,
 				    int force_split, int in_flag,
 				    struct page **pages)
@@ -1845,6 +1846,7 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
 	cpa.flags = in_flag;
 	cpa.curpage = 0;
 	cpa.force_split = force_split;
+	cpa.pgd = pgd;
 
 	ret = __change_page_attr_set_clr(&cpa, 1);
 
@@ -1873,6 +1875,15 @@ static int change_page_attr_set_clr(unsigned long *addr, int numpages,
 	return ret;
 }
 
+static int change_page_attr_set_clr(unsigned long *addr, int numpages,
+				    pgprot_t mask_set, pgprot_t mask_clr,
+				    int force_split, int in_flag,
+				    struct page **pages)
+{
+	return change_page_attr_set_clr_pgd(NULL, addr, numpages, mask_set,
+					    mask_clr, force_split, in_flag, pages);
+}
+
 static inline int change_page_attr_set(unsigned long *addr, int numpages,
 				       pgprot_t mask, int array)
 {
@@ -1880,6 +1891,13 @@ static inline int change_page_attr_set(unsigned long *addr, int numpages,
 		(array ? CPA_ARRAY : 0), NULL);
 }
 
+static inline int change_page_attr_set_pgd(pgd_t *pgd, unsigned long *addr, int numpages,
+				       pgprot_t mask, int array)
+{
+	return change_page_attr_set_clr_pgd(pgd, addr, numpages, mask, __pgprot(0), 0,
+		(array ? CPA_ARRAY : 0), NULL);
+}
+
 static inline int change_page_attr_clear(unsigned long *addr, int numpages,
 					 pgprot_t mask, int array)
 {
@@ -1887,6 +1905,13 @@ static inline int change_page_attr_clear(unsigned long *addr, int numpages,
 		(array ? CPA_ARRAY : 0), NULL);
 }
 
+static inline int change_page_attr_clear_pgd(pgd_t *pgd, unsigned long *addr, int numpages,
+					 pgprot_t mask, int array)
+{
+	return change_page_attr_set_clr_pgd(pgd, addr, numpages, __pgprot(0), mask, 0,
+		(array ? CPA_ARRAY : 0), NULL);
+}
+
 static inline int cpa_set_pages_array(struct page **pages, int numpages,
 				       pgprot_t mask)
 {
@@ -2122,6 +2147,129 @@ int set_memory_global(unsigned long addr, int numpages)
 				    __pgprot(_PAGE_GLOBAL), 0);
 }
 
+#ifdef CONFIG_KERNEL_REPLICATION
+int numa_set_memory_x(unsigned long addr, int numpages)
+{
+	int ret = 0;
+	int nid;
+
+	if (!(__supported_pte_mask & _PAGE_NX))
+		return 0;
+	for_each_replica(nid)
+		ret |= change_page_attr_clear_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+						 __pgprot(_PAGE_NX), 0);
+
+	return ret;
+}
+
+int numa_set_memory_nx(unsigned long addr, int numpages)
+{
+	int ret = 0;
+	int nid;
+
+	if (!(__supported_pte_mask & _PAGE_NX))
+		return 0;
+	for_each_replica(nid)
+		ret |= change_page_attr_set_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+						__pgprot(_PAGE_NX), 0);
+
+	return ret;
+}
+
+int numa_set_memory_ro(unsigned long addr, int numpages)
+{
+	int ret = 0;
+	int nid;
+
+	for_each_replica(nid)
+		ret |= change_page_attr_clear_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+						  __pgprot(_PAGE_RW), 0);
+
+	return ret;
+}
+
+int numa_set_memory_rox(unsigned long addr, int numpages)
+{
+	int nid;
+
+	int ret = 0;
+	pgprot_t clr = __pgprot(_PAGE_RW);
+
+	if (__supported_pte_mask & _PAGE_NX)
+		clr.pgprot |= _PAGE_NX;
+
+	for_each_online_node(nid) {
+		ret |= change_page_attr_clear_pgd(init_mm.pgd_numa[nid], &addr, numpages, clr, 0);
+		if (!is_text_replicated())
+			break;
+	}
+	return ret;
+}
+
+int numa_set_memory_rw(unsigned long addr, int numpages)
+{
+	int ret = 0;
+	int nid;
+
+	for_each_replica(nid)
+		ret |= change_page_attr_set_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+						__pgprot(_PAGE_RW), 0);
+
+	return ret;
+}
+
+int numa_set_memory_np(unsigned long addr, int numpages)
+{
+	int ret = 0;
+	int nid;
+
+	for_each_replica(nid)
+		ret |= change_page_attr_clear_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+						  __pgprot(_PAGE_PRESENT), 0);
+
+	return ret;
+}
+
+int numa_set_memory_np_noalias(unsigned long addr, int numpages)
+{
+	int ret = 0;
+	int nid;
+	int cpa_flags = CPA_NO_CHECK_ALIAS;
+
+	for_each_replica(nid)
+		ret |= change_page_attr_set_clr_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+						    __pgprot(0),
+						    __pgprot(_PAGE_PRESENT), 0,
+						    cpa_flags, NULL);
+
+	return ret;
+}
+
+int numa_set_memory_global(unsigned long addr, int numpages)
+{
+	int ret = 0;
+	int nid;
+
+	for_each_replica(nid)
+		ret |= change_page_attr_set_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+						__pgprot(_PAGE_GLOBAL), 0);
+
+	return ret;
+}
+
+int numa_set_memory_nonglobal(unsigned long addr, int numpages)
+{
+	int ret = 0;
+	int nid;
+
+	for_each_replica(nid)
+		ret |= change_page_attr_clear_pgd(init_mm.pgd_numa[nid], &addr, numpages,
+						  __pgprot(_PAGE_GLOBAL), 0);
+
+	return ret;
+}
+#endif
+
 /*
  * __set_memory_enc_pgtable() is used for the hypervisors that get
  * informed about "encryption" status via page tables.
diff --git a/include/asm-generic/set_memory.h b/include/asm-generic/set_memory.h
index c86abf6bc7ba..886639600e64 100644
--- a/include/asm-generic/set_memory.h
+++ b/include/asm-generic/set_memory.h
@@ -10,4 +10,16 @@ int set_memory_rw(unsigned long addr, int numpages);
 int set_memory_x(unsigned long addr, int numpages);
 int set_memory_nx(unsigned long addr, int numpages);
 
+#ifdef CONFIG_KERNEL_REPLICATION
+int numa_set_memory_ro(unsigned long addr, int numpages);
+int numa_set_memory_rw(unsigned long addr, int numpages);
+int numa_set_memory_x(unsigned long addr, int numpages);
+int numa_set_memory_nx(unsigned long addr, int numpages);
+#else
+#define numa_set_memory_ro set_memory_ro
+#define numa_set_memory_rw set_memory_rw
+#define numa_set_memory_x  set_memory_x
+#define numa_set_memory_nx set_memory_nx
+#endif /* CONFIG_KERNEL_REPLICATION */
+
 #endif
diff --git a/include/linux/set_memory.h b/include/linux/set_memory.h
index 95ac8398ee72..3213bfd335dd 100644
--- a/include/linux/set_memory.h
+++ b/include/linux/set_memory.h
@@ -24,6 +24,16 @@ static inline int set_memory_rox(unsigned long addr, int numpages)
 }
 #endif
 
+#ifndef numa_set_memory_rox
+static inline int numa_set_memory_rox(unsigned long addr, int numpages)
+{
+	int ret = numa_set_memory_ro(addr, numpages);
+	if (ret)
+		return ret;
+	return numa_set_memory_x(addr, numpages);
+}
+#endif
+
 #ifndef CONFIG_ARCH_HAS_SET_DIRECT_MAP
 static inline int set_direct_map_invalid_noflush(struct page *page)
 {
-- 
2.34.1



  parent reply	other threads:[~2023-12-28 13:12 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-12-28 13:10 [PATCH RFC 00/12] x86 NUMA-aware kernel replication artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 01/12] mm: allow per-NUMA node local PUD/PMD allocation artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 02/12] mm: add config option and per-NUMA node VMS support artem.kuzin
2024-01-03 19:43   ` Christoph Lameter (Ampere)
2024-01-09 16:57     ` Artem Kuzin
2024-01-25 15:07       ` Dave Hansen
2024-01-29  6:22         ` Artem Kuzin
2024-01-30 23:36           ` Dave Hansen
2023-12-28 13:10 ` [PATCH RFC 03/12] mm: per-NUMA node replication core infrastructure artem.kuzin
2023-12-28 13:10 ` artem.kuzin [this message]
2024-01-09  6:46   ` [PATCH RFC 04/12] x86: add support of memory protection for NUMA replicas Garg, Shivank
2024-01-09 15:53     ` a00561249@china.huawei.com
2024-01-10  6:19       ` Garg, Shivank
2023-12-28 13:10 ` [PATCH RFC 05/12] x86: enable memory protection for replicated memory artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 06/12] x86: align kernel text and rodata using HUGE_PAGE boundary artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 07/12] x86: enable per-NUMA node kernel text and rodata replication artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 08/12] x86: make kernel text patching aware about replicas artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 09/12] x86: add support of NUMA replication for efi page tables artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 10/12] mm: add replicas allocation support for vmalloc artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 11/12] x86: add kernel modules text and rodata replication support artem.kuzin
2023-12-28 13:10 ` [PATCH RFC 12/12] mm: set memory permissions for BPF handlers replicas artem.kuzin
2024-01-10 10:03 ` [PATCH RFC 00/12] x86 NUMA-aware kernel replication Russell King (Oracle)
2024-01-25  4:30 ` Garg, Shivank
2024-01-29  7:51   ` Artem Kuzin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20231228131056.602411-5-artem.kuzin@huawei.com \
    --to=artem.kuzin@huawei.com \
    --cc=akpm@linux-foundation.org \
    --cc=alexander.grubnikov@huawei.com \
    --cc=bp@alien8.de \
    --cc=dave.hansen@linux.intel.com \
    --cc=guohanjun@huawei.com \
    --cc=hch@infradead.org \
    --cc=hpa@zytor.com \
    --cc=judy.chenhui@huawei.com \
    --cc=kang.sun@huawei.com \
    --cc=linux-mm@kvack.org \
    --cc=linux-modules@vger.kernel.org \
    --cc=lstoakes@gmail.com \
    --cc=luto@kernel.org \
    --cc=mcgrof@kernel.org \
    --cc=mingo@redhat.com \
    --cc=nikita.panov@huawei-partners.com \
    --cc=peterz@infradead.org \
    --cc=rmk+kernel@armlinux.org.uk \
    --cc=stepanov.anatoly@huawei.com \
    --cc=tglx@linutronix.de \
    --cc=urezki@gmail.com \
    --cc=wangkefeng.wang@huawei.com \
    --cc=weiyongjun1@huawei.com \
    --cc=x86@kernel.org \
    --cc=yusongping@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox