From: "Björn Töpel" <bjorn@kernel.org>
To: Paul Walmsley <paul.walmsley@sifive.com>,
Palmer Dabbelt <palmer@dabbelt.com>,
Albert Ou <aou@eecs.berkeley.edu>,
linux-riscv@lists.infradead.org
Cc: "Björn Töpel" <bjorn@rivosinc.com>,
linux-kernel@vger.kernel.org, linux-mm@kvack.org,
"David Hildenbrand" <david@redhat.com>,
"Oscar Salvador" <osalvador@suse.de>,
virtualization@lists.linux-foundation.org, linux@rivosinc.com,
"Alexandre Ghiti" <alexghiti@rivosinc.com>
Subject: [PATCH 4/7] riscv: mm: Add memory hot add/remove support
Date: Fri, 12 May 2023 16:57:34 +0200 [thread overview]
Message-ID: <20230512145737.985671-5-bjorn@kernel.org> (raw)
In-Reply-To: <20230512145737.985671-1-bjorn@kernel.org>
From: Björn Töpel <bjorn@rivosinc.com>
From an arch perspective, a couple of callbacks needs to be
implemented to support hotplugging:
arch_add_memory() This callback is responsible for updating the
linear/direct map, and call into the memory hotplugging generic code
via __add_pages().
arch_remove_memory() In this callback the linear/direct map is tore
down.
vmemmap_free() The function tears down the vmemmap mappings (if
CONFIG_SPARSEMEM_VMEMMAP is in-use), and also deallocates the backing
vmemmap pages. Note that for persistent memory, an alternative
allocator for the backing pages can be used -- the vmem_altmap. This
means that when the backing pages are cleared, extra care is needed so
that the correct deallocation method is used. Note that RISC-V
populates the vmemmap using vmemmap_populate_basepages(), so currently
no hugepages are used for the backing store.
The page table unmap/teardown functions are heavily based (copied!)
from the x86 tree. The same remove_pgd_mapping() is used in both
vmemmap_free() and arch_remove_memory(), but in the latter function
the backing pages are not removed.
Signed-off-by: Björn Töpel <bjorn@rivosinc.com>
---
arch/riscv/mm/init.c | 233 +++++++++++++++++++++++++++++++++++++++++++
1 file changed, 233 insertions(+)
diff --git a/arch/riscv/mm/init.c b/arch/riscv/mm/init.c
index aea8ccb3f4ae..a468708d1e1c 100644
--- a/arch/riscv/mm/init.c
+++ b/arch/riscv/mm/init.c
@@ -1444,3 +1444,236 @@ int __meminit vmemmap_populate(unsigned long start, unsigned long end, int node,
return vmemmap_populate_basepages(start, end, node, NULL);
}
#endif
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+static void __meminit free_pte_table(pte_t *pte_start, pmd_t *pmd)
+{
+ pte_t *pte;
+ int i;
+
+ for (i = 0; i < PTRS_PER_PTE; i++) {
+ pte = pte_start + i;
+ if (!pte_none(*pte))
+ return;
+ }
+
+ free_pages((unsigned long)page_address(pmd_page(*pmd)), 0);
+ pmd_clear(pmd);
+}
+
+static void __meminit free_pmd_table(pmd_t *pmd_start, pud_t *pud)
+{
+ pmd_t *pmd;
+ int i;
+
+ for (i = 0; i < PTRS_PER_PMD; i++) {
+ pmd = pmd_start + i;
+ if (!pmd_none(*pmd))
+ return;
+ }
+
+ free_pages((unsigned long)page_address(pud_page(*pud)), 0);
+ pud_clear(pud);
+}
+
+static void __meminit free_pud_table(pud_t *pud_start, p4d_t *p4d)
+{
+ pud_t *pud;
+ int i;
+
+ for (i = 0; i < PTRS_PER_PUD; i++) {
+ pud = pud_start + i;
+ if (!pud_none(*pud))
+ return;
+ }
+
+ free_pages((unsigned long)page_address(p4d_page(*p4d)), 0);
+ p4d_clear(p4d);
+}
+
+static void __meminit free_vmemmap_storage(struct page *page, size_t size,
+ struct vmem_altmap *altmap)
+{
+ if (altmap)
+ vmem_altmap_free(altmap, size >> PAGE_SHIFT);
+ else
+ free_pages((unsigned long)page_address(page), get_order(size));
+}
+
+static void __meminit remove_pte_mapping(pte_t *pte_base, unsigned long addr, unsigned long end,
+ bool is_vmemmap, struct vmem_altmap *altmap)
+{
+ unsigned long next;
+ pte_t *ptep, pte;
+
+ for (; addr < end; addr = next) {
+ next = (addr + PAGE_SIZE) & PAGE_MASK;
+ if (next > end)
+ next = end;
+
+ ptep = pte_base + pte_index(addr);
+ pte = READ_ONCE(*ptep);
+
+ if (!pte_present(*ptep))
+ continue;
+
+ pte_clear(&init_mm, addr, ptep);
+ if (is_vmemmap)
+ free_vmemmap_storage(pte_page(pte), PAGE_SIZE, altmap);
+ }
+}
+
+static void __meminit remove_pmd_mapping(pmd_t *pmd_base, unsigned long addr, unsigned long end,
+ bool is_vmemmap, struct vmem_altmap *altmap)
+{
+ unsigned long next;
+ pte_t *pte_base;
+ pmd_t *pmdp, pmd;
+
+ for (; addr < end; addr = next) {
+ next = pmd_addr_end(addr, end);
+ pmdp = pmd_base + pmd_index(addr);
+ pmd = READ_ONCE(*pmdp);
+
+ if (!pmd_present(pmd))
+ continue;
+
+ if (pmd_leaf(pmd)) {
+ pmd_clear(pmdp);
+ if (is_vmemmap)
+ free_vmemmap_storage(pmd_page(pmd), PMD_SIZE, altmap);
+ continue;
+ }
+
+ pte_base = (pte_t *)pmd_page_vaddr(*pmdp);
+ remove_pte_mapping(pte_base, addr, next, is_vmemmap, altmap);
+ free_pte_table(pte_base, pmdp);
+ }
+}
+
+static void __meminit remove_pud_mapping(pud_t *pud_base, unsigned long addr, unsigned long end,
+ bool is_vmemmap, struct vmem_altmap *altmap)
+{
+ unsigned long next;
+ pud_t *pudp, pud;
+ pmd_t *pmd_base;
+
+ for (; addr < end; addr = next) {
+ next = pud_addr_end(addr, end);
+ pudp = pud_base + pud_index(addr);
+ pud = READ_ONCE(*pudp);
+
+ if (!pud_present(pud))
+ continue;
+
+ if (pud_leaf(pud)) {
+ if (pgtable_l4_enabled) {
+ pud_clear(pudp);
+ if (is_vmemmap)
+ free_vmemmap_storage(pud_page(pud), PUD_SIZE, altmap);
+ }
+ continue;
+ }
+
+ pmd_base = pmd_offset(pudp, 0);
+ remove_pmd_mapping(pmd_base, addr, next, is_vmemmap, altmap);
+
+ if (pgtable_l4_enabled)
+ free_pmd_table(pmd_base, pudp);
+ }
+}
+
+static void __meminit remove_p4d_mapping(p4d_t *p4d_base, unsigned long addr, unsigned long end,
+ bool is_vmemmap, struct vmem_altmap *altmap)
+{
+ unsigned long next;
+ p4d_t *p4dp, p4d;
+ pud_t *pud_base;
+
+ for (; addr < end; addr = next) {
+ next = p4d_addr_end(addr, end);
+ p4dp = p4d_base + p4d_index(addr);
+ p4d = READ_ONCE(*p4dp);
+
+ if (!p4d_present(p4d))
+ continue;
+
+ if (p4d_leaf(p4d)) {
+ if (pgtable_l5_enabled) {
+ p4d_clear(p4dp);
+ if (is_vmemmap)
+ free_vmemmap_storage(p4d_page(p4d), P4D_SIZE, altmap);
+ }
+ continue;
+ }
+
+ pud_base = pud_offset(p4dp, 0);
+ remove_pud_mapping(pud_base, addr, next, is_vmemmap, altmap);
+
+ if (pgtable_l5_enabled)
+ free_pud_table(pud_base, p4dp);
+ }
+}
+
+static void __meminit remove_pgd_mapping(unsigned long va, unsigned long end, bool is_vmemmap,
+ struct vmem_altmap *altmap)
+{
+ unsigned long addr, next;
+ p4d_t *p4d_base;
+ pgd_t *pgd;
+
+ for (addr = va; addr < end; addr = next) {
+ next = pgd_addr_end(addr, end);
+ pgd = pgd_offset_k(addr);
+
+ if (!pgd_present(*pgd))
+ continue;
+
+ if (pgd_leaf(*pgd))
+ continue;
+
+ p4d_base = p4d_offset(pgd, 0);
+ remove_p4d_mapping(p4d_base, addr, next, is_vmemmap, altmap);
+ }
+
+ flush_tlb_all();
+}
+
+static void __meminit remove_linear_mapping(phys_addr_t start, u64 size)
+{
+ unsigned long va = (unsigned long)__va(start);
+ unsigned long end = (unsigned long)__va(start + size);
+
+ remove_pgd_mapping(va, end, false, NULL);
+}
+
+int __ref arch_add_memory(int nid, u64 start, u64 size, struct mhp_params *params)
+{
+ int ret;
+
+ create_linear_mapping_range(start, start + size, params);
+ flush_tlb_all();
+ ret = __add_pages(nid, start >> PAGE_SHIFT, size >> PAGE_SHIFT, params);
+ if (ret) {
+ remove_linear_mapping(start, size);
+ return ret;
+ }
+
+ max_pfn = PFN_UP(start + size);
+ max_low_pfn = max_pfn;
+ return 0;
+}
+
+void __ref arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap)
+{
+ __remove_pages(start >> PAGE_SHIFT, size >> PAGE_SHIFT, altmap);
+ remove_linear_mapping(start, size);
+}
+
+#ifdef CONFIG_SPARSEMEM_VMEMMAP
+void __ref vmemmap_free(unsigned long start, unsigned long end, struct vmem_altmap *altmap)
+{
+ remove_pgd_mapping(start, end, true, altmap);
+}
+#endif /* CONFIG_SPARSEMEM_VMEMMAP */
+#endif /* CONFIG_MEMORY_HOTPLUG */
--
2.39.2
next prev parent reply other threads:[~2023-05-12 14:58 UTC|newest]
Thread overview: 14+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-12 14:57 [PATCH 0/7] riscv: Memory Hot(Un)Plug support Björn Töpel
2023-05-12 14:57 ` [PATCH 1/7] riscv: mm: Pre-allocate PGD leaves to avoid synchronization Björn Töpel
2023-05-12 14:57 ` [PATCH 2/7] riscv: mm: Change attribute from __init to __meminit for page functions Björn Töpel
2023-05-12 14:57 ` [PATCH 3/7] riscv: mm: Refactor create_linear_mapping_range() for hot add Björn Töpel
2023-06-21 23:56 ` Palmer Dabbelt
2023-06-22 4:56 ` Björn Töpel
2023-05-12 14:57 ` Björn Töpel [this message]
2023-05-12 14:57 ` [PATCH 5/7] riscv: Enable memory hot add/remove arch kbuild support Björn Töpel
2023-05-12 14:57 ` [PATCH 6/7] virtio-mem: Enable virtio-mem for RISC-V Björn Töpel
2023-05-12 14:57 ` [PATCH 7/7] riscv: mm: Pre-allocate vmalloc PGD leaves Björn Töpel
2023-05-17 13:49 ` [PATCH 0/7] riscv: Memory Hot(Un)Plug support David Hildenbrand
2023-05-17 18:53 ` Björn Töpel
2023-05-21 9:15 ` Björn Töpel
2023-05-22 8:21 ` David Hildenbrand
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230512145737.985671-5-bjorn@kernel.org \
--to=bjorn@kernel.org \
--cc=alexghiti@rivosinc.com \
--cc=aou@eecs.berkeley.edu \
--cc=bjorn@rivosinc.com \
--cc=david@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-riscv@lists.infradead.org \
--cc=linux@rivosinc.com \
--cc=osalvador@suse.de \
--cc=palmer@dabbelt.com \
--cc=paul.walmsley@sifive.com \
--cc=virtualization@lists.linux-foundation.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox