linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2] shmem: support huge_fault to avoid pmd split
@ 2022-07-26 13:27 Liu Zixian
  2022-07-26 23:23 ` kernel test robot
  2022-07-28  5:09 ` kernel test robot
  0 siblings, 2 replies; 3+ messages in thread
From: Liu Zixian @ 2022-07-26 13:27 UTC (permalink / raw)
  To: hughd, akpm, linux-mm; +Cc: linfeilong, liuzixian4, willy

Transparent hugepage of tmpfs is useful to improve TLB miss, but
it will be split during cow memory fault.
This will happen if we mprotect and rewrite code segment (which is
private file map) to hotpatch a running process.

Users of huge= mount option prefer huge pages after cow.
We can avoid the splitting by adding a huge_fault function.

---
v2: removed redundant prep_transhuge_page

Signed-off-by: Liu Zixian <liuzixian4@huawei.com>
---
 mm/shmem.c | 45 +++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 45 insertions(+)

diff --git a/mm/shmem.c b/mm/shmem.c
index a6f565308..5074dff08 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -2120,6 +2120,50 @@ static vm_fault_t shmem_fault(struct vm_fault *vmf)
 	return ret;
 }
 
+static vm_fault_t shmem_huge_fault(struct vm_fault *vmf, enum page_entry_size pe_size)
+{
+	vm_fault_t ret = VM_FAULT_FALLBACK;
+	unsigned long haddr = vmf->address & HPAGE_PMD_MASK;
+	struct page *old_page, *new_page;
+	int gfp_flags = GFP_HIGHUSER_MOVABLE | __GFP_COMP;
+
+	/* read or shared fault will not split huge pmd */
+	if (!(vmf->flags & FAULT_FLAG_WRITE)
+			|| (vmf->vma->vm_flags & VM_SHARED))
+		return VM_FAULT_FALLBACK;
+	if (pe_size != PE_SIZE_PMD)
+		return VM_FAULT_FALLBACK;
+
+	if (pmd_none(*vmf->pmd)) {
+		if (shmem_fault(vmf) & VM_FAULT_ERROR)
+			goto out;
+		if (!PageTransHuge(vmf->page))
+			goto out;
+		old_page = vmf->page;
+	} else {
+		old_page = pmd_page(*vmf->pmd);
+		page_remove_rmap(old_page, vmf->vma, true);
+		pmdp_huge_clear_flush(vmf->vma, haddr, vmf->pmd);
+		add_mm_counter(vmf->vma->vm_mm, MM_SHMEMPAGES, -HPAGE_PMD_NR);
+	}
+
+	new_page = &vma_alloc_folio(gfp_flags, HPAGE_PMD_ORDER,
+			vmf->vma, haddr, true)->page;
+	if (!new_page)
+		goto out;
+	copy_user_huge_page(new_page, old_page, haddr, vmf->vma, HPAGE_PMD_NR);
+	__SetPageUptodate(new_page);
+
+	ret = do_set_pmd(vmf, new_page);
+
+out:
+	if (vmf->page) {
+		unlock_page(vmf->page);
+		put_page(vmf->page);
+	}
+	return ret;
+}
+
 unsigned long shmem_get_unmapped_area(struct file *file,
 				      unsigned long uaddr, unsigned long len,
 				      unsigned long pgoff, unsigned long flags)
@@ -3884,6 +3928,7 @@ static const struct super_operations shmem_ops = {
 
 static const struct vm_operations_struct shmem_vm_ops = {
 	.fault		= shmem_fault,
+	.huge_fault	= shmem_huge_fault,
 	.map_pages	= filemap_map_pages,
 #ifdef CONFIG_NUMA
 	.set_policy     = shmem_set_policy,
-- 
2.33.0



^ permalink raw reply	[flat|nested] 3+ messages in thread

end of thread, other threads:[~2022-07-28  5:09 UTC | newest]

Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2022-07-26 13:27 [PATCH v2] shmem: support huge_fault to avoid pmd split Liu Zixian
2022-07-26 23:23 ` kernel test robot
2022-07-28  5:09 ` kernel test robot

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox