linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Matthew Wilcox <willy@infradead.org>
To: linux-mm@kvack.org
Subject: Is uprobe_write_opcode() OK?
Date: Thu, 20 Feb 2025 20:29:59 +0000	[thread overview]
Message-ID: <Z7eQx1eLtqTnIkX7@casper.infradead.org> (raw)

I just wrote the patch below, but now I'm wondering if it's
perpetuating the mistake of not using our existing COW mechanism
to handle uprobes.  Anyone looked at this code recently?

commit 8471400297a4
Author: Matthew Wilcox (Oracle) <willy@infradead.org>
Date:   Thu Feb 20 15:04:46 2025 -0500

    uprobes: Use a folio instead of a page
    
    Allocate an order-0 folio instead of a page.  Saves a few calls to
    compound_head().
    
    Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 2ca797cbe465..d4330870cf6a 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -158,17 +158,16 @@ static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
  * @vma:      vma that holds the pte pointing to page
  * @addr:     address the old @page is mapped at
  * @old_page: the page we are replacing by new_page
- * @new_page: the modified page we replace page by
+ * @new_folio: the modified folio we replace @page with
  *
- * If @new_page is NULL, only unmap @old_page.
+ * If @new_folio is NULL, only unmap @old_page.
  *
  * Returns 0 on success, negative error code otherwise.
  */
 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
-				struct page *old_page, struct page *new_page)
+				struct page *old_page, struct folio *new_folio)
 {
 	struct folio *old_folio = page_folio(old_page);
-	struct folio *new_folio;
 	struct mm_struct *mm = vma->vm_mm;
 	DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
 	int err;
@@ -177,8 +176,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
 				addr + PAGE_SIZE);
 
-	if (new_page) {
-		new_folio = page_folio(new_page);
+	if (new_folio) {
 		err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL);
 		if (err)
 			return err;
@@ -193,7 +191,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 		goto unlock;
 	VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
 
-	if (new_page) {
+	if (new_folio) {
 		folio_get(new_folio);
 		folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE);
 		folio_add_lru_vma(new_folio, vma);
@@ -208,9 +206,9 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 
 	flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte)));
 	ptep_clear_flush(vma, addr, pvmw.pte);
-	if (new_page)
+	if (new_folio)
 		set_pte_at(mm, addr, pvmw.pte,
-			   mk_pte(new_page, vma->vm_page_prot));
+			   folio_mk_pte(new_folio, vma->vm_page_prot));
 
 	folio_remove_rmap_pte(old_folio, old_page, vma);
 	if (!folio_mapped(old_folio))
@@ -474,7 +472,8 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 			unsigned long vaddr, uprobe_opcode_t opcode)
 {
 	struct uprobe *uprobe;
-	struct page *old_page, *new_page;
+	struct page *old_page;
+	struct folio *new_folio;
 	struct vm_area_struct *vma;
 	int ret, is_register, ref_ctr_updated = 0;
 	bool orig_page_huge = false;
@@ -519,13 +518,14 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 		goto put_old;
 
 	ret = -ENOMEM;
-	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
-	if (!new_page)
+	new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr);
+	if (!new_folio)
 		goto put_old;
 
-	__SetPageUptodate(new_page);
-	copy_highpage(new_page, old_page);
-	copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
+	copy_highpage(folio_page(new_folio, 0), old_page);
+	copy_to_page(folio_page(new_folio, 0), vaddr, &opcode,
+			UPROBE_SWBP_INSN_SIZE);
+	__folio_mark_uptodate(new_folio);
 
 	if (!is_register) {
 		struct page *orig_page;
@@ -539,10 +539,11 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 
 		if (orig_page) {
 			if (PageUptodate(orig_page) &&
-			    pages_identical(new_page, orig_page)) {
+			    pages_identical(folio_page(new_folio, 0),
+				    orig_page)) {
 				/* let go new_page */
-				put_page(new_page);
-				new_page = NULL;
+				folio_put(new_folio);
+				new_folio = NULL;
 
 				if (PageCompound(orig_page))
 					orig_page_huge = true;
@@ -551,9 +552,9 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 		}
 	}
 
-	ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page);
-	if (new_page)
-		put_page(new_page);
+	ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_folio);
+	if (new_folio)
+		folio_put(new_folio);
 put_old:
 	put_page(old_page);
 



             reply	other threads:[~2025-02-20 20:30 UTC|newest]

Thread overview: 2+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-02-20 20:29 Matthew Wilcox [this message]
2025-02-24  9:53 ` David Hildenbrand

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Z7eQx1eLtqTnIkX7@casper.infradead.org \
    --to=willy@infradead.org \
    --cc=linux-mm@kvack.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox