From: "Vishal Moola (Oracle)" <vishal.moola@gmail.com>
To: linux-kernel@vger.kernel.org, linux-mm@kvack.org, x86@kernel.org,
"Mike Rapoport (Microsoft)" <rppt@kernel.org>,
Dave Hansen <dave.hansen@linux.intel.com>
Cc: akpm@linux-foundation.org,
"Matthew Wilcox (Oracle)" <willy@infradead.org>,
Andy Lutomirski <luto@kernel.org>,
Peter Zijlstra <peterz@infradead.org>,
"Vishal Moola (Oracle)" <vishal.moola@gmail.com>
Subject: [PATCH v4 3/4] x86/mm/pat: Convert pmd code to use ptdescs
Date: Wed, 4 Feb 2026 17:35:26 -0800 [thread overview]
Message-ID: <20260205013527.322157-4-vishal.moola@gmail.com> (raw)
In-Reply-To: <20260205013527.322157-1-vishal.moola@gmail.com>
We need all allocation and free sites to use the ptdesc APIs in order to
allocate them separately from regular pages. Convert these pmd
allocation/free sites to use ptdescs.
populate_pgd() also allocates pagetables that may later be freed by
try_to_free_pmd_page(), so allocate ptdescs there as well.
Also, rename *_pmd_page() functions to *_pmd(). Rename them now to avoid
any confusion later. Eventually these allocations will be backed by a
ptdesc not a page, but that's not important to callers either.
Signed-off-by: Vishal Moola (Oracle) <vishal.moola@gmail.com>
---
arch/x86/mm/pat/set_memory.c | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
diff --git a/arch/x86/mm/pat/set_memory.c b/arch/x86/mm/pat/set_memory.c
index c6c68fbbb046..dfe05cdf460c 100644
--- a/arch/x86/mm/pat/set_memory.c
+++ b/arch/x86/mm/pat/set_memory.c
@@ -1412,7 +1412,7 @@ static bool try_to_free_pte(pte_t *pte)
return true;
}
-static bool try_to_free_pmd_page(pmd_t *pmd)
+static bool try_to_free_pmd(pmd_t *pmd)
{
int i;
@@ -1420,7 +1420,7 @@ static bool try_to_free_pmd_page(pmd_t *pmd)
if (!pmd_none(pmd[i]))
return false;
- free_page((unsigned long)pmd);
+ pgtable_free_addr(pmd);
return true;
}
@@ -1446,7 +1446,7 @@ static void __unmap_pmd_range(pud_t *pud, pmd_t *pmd,
unsigned long start, unsigned long end)
{
if (unmap_pte_range(pmd, start, end))
- if (try_to_free_pmd_page(pud_pgtable(*pud)))
+ if (try_to_free_pmd(pud_pgtable(*pud)))
pud_clear(pud);
}
@@ -1490,7 +1490,7 @@ static void unmap_pmd_range(pud_t *pud, unsigned long start, unsigned long end)
* Try again to free the PMD page if haven't succeeded above.
*/
if (!pud_none(*pud))
- if (try_to_free_pmd_page(pud_pgtable(*pud)))
+ if (try_to_free_pmd(pud_pgtable(*pud)))
pud_clear(pud);
}
@@ -1547,9 +1547,9 @@ static int alloc_pte(pmd_t *pmd)
return 0;
}
-static int alloc_pmd_page(pud_t *pud)
+static int alloc_pmd(pud_t *pud)
{
- pmd_t *pmd = (pmd_t *)get_zeroed_page(GFP_KERNEL);
+ pmd_t *pmd = (pmd_t *) pgtable_alloc_addr(GFP_KERNEL, 0);
if (!pmd)
return -1;
@@ -1622,7 +1622,7 @@ static long populate_pmd(struct cpa_data *cpa,
* We cannot use a 1G page so allocate a PMD page if needed.
*/
if (pud_none(*pud))
- if (alloc_pmd_page(pud))
+ if (alloc_pmd(pud))
return -1;
pmd = pmd_offset(pud, start);
@@ -1678,7 +1678,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
* Need a PMD page?
*/
if (pud_none(*pud))
- if (alloc_pmd_page(pud))
+ if (alloc_pmd(pud))
return -1;
cur_pages = populate_pmd(cpa, start, pre_end, cur_pages,
@@ -1715,7 +1715,7 @@ static int populate_pud(struct cpa_data *cpa, unsigned long start, p4d_t *p4d,
pud = pud_offset(p4d, start);
if (pud_none(*pud))
- if (alloc_pmd_page(pud))
+ if (alloc_pmd(pud))
return -1;
tmp = populate_pmd(cpa, start, end, cpa->numpages - cur_pages,
@@ -1743,7 +1743,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
pgd_entry = cpa->pgd + pgd_index(addr);
if (pgd_none(*pgd_entry)) {
- p4d = (p4d_t *)get_zeroed_page(GFP_KERNEL);
+ p4d = (p4d_t *)pgtable_alloc_addr(GFP_KERNEL, 0);
if (!p4d)
return -1;
@@ -1755,7 +1755,7 @@ static int populate_pgd(struct cpa_data *cpa, unsigned long addr)
*/
p4d = p4d_offset(pgd_entry, addr);
if (p4d_none(*p4d)) {
- pud = (pud_t *)get_zeroed_page(GFP_KERNEL);
+ pud = (pud_t *)pgtable_alloc_addr(GFP_KERNEL, 0);
if (!pud)
return -1;
--
2.52.0
next prev parent reply other threads:[~2026-02-05 1:35 UTC|newest]
Thread overview: 7+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-02-05 1:35 [PATCH v4 0/4] Convert 64-bit x86/mm/pat to ptdescs Vishal Moola (Oracle)
2026-02-05 1:35 ` [PATCH v4 1/4] mm: Add address apis for ptdescs Vishal Moola (Oracle)
2026-02-07 7:45 ` Mike Rapoport
2026-02-07 10:25 ` Vishal Moola (Oracle)
2026-02-05 1:35 ` [PATCH v4 2/4] x86/mm/pat: Convert pte code to use ptdescs Vishal Moola (Oracle)
2026-02-05 1:35 ` Vishal Moola (Oracle) [this message]
2026-02-05 1:35 ` [PATCH v4 4/4] x86/mm/pat: Convert split_large_page() " Vishal Moola (Oracle)
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260205013527.322157-4-vishal.moola@gmail.com \
--to=vishal.moola@gmail.com \
--cc=akpm@linux-foundation.org \
--cc=dave.hansen@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=luto@kernel.org \
--cc=peterz@infradead.org \
--cc=rppt@kernel.org \
--cc=willy@infradead.org \
--cc=x86@kernel.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox