linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* Is uprobe_write_opcode() OK?
@ 2025-02-20 20:29 Matthew Wilcox
  2025-02-24  9:53 ` David Hildenbrand
  0 siblings, 1 reply; 2+ messages in thread
From: Matthew Wilcox @ 2025-02-20 20:29 UTC (permalink / raw)
  To: linux-mm

I just wrote the patch below, but now I'm wondering if it's
perpetuating the mistake of not using our existing COW mechanism
to handle uprobes.  Anyone looked at this code recently?

commit 8471400297a4
Author: Matthew Wilcox (Oracle) <willy@infradead.org>
Date:   Thu Feb 20 15:04:46 2025 -0500

    uprobes: Use a folio instead of a page
    
    Allocate an order-0 folio instead of a page.  Saves a few calls to
    compound_head().
    
    Signed-off-by: Matthew Wilcox (Oracle) <willy@infradead.org>

diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index 2ca797cbe465..d4330870cf6a 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -158,17 +158,16 @@ static loff_t vaddr_to_offset(struct vm_area_struct *vma, unsigned long vaddr)
  * @vma:      vma that holds the pte pointing to page
  * @addr:     address the old @page is mapped at
  * @old_page: the page we are replacing by new_page
- * @new_page: the modified page we replace page by
+ * @new_folio: the modified folio we replace @page with
  *
- * If @new_page is NULL, only unmap @old_page.
+ * If @new_folio is NULL, only unmap @old_page.
  *
  * Returns 0 on success, negative error code otherwise.
  */
 static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
-				struct page *old_page, struct page *new_page)
+				struct page *old_page, struct folio *new_folio)
 {
 	struct folio *old_folio = page_folio(old_page);
-	struct folio *new_folio;
 	struct mm_struct *mm = vma->vm_mm;
 	DEFINE_FOLIO_VMA_WALK(pvmw, old_folio, vma, addr, 0);
 	int err;
@@ -177,8 +176,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 	mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
 				addr + PAGE_SIZE);
 
-	if (new_page) {
-		new_folio = page_folio(new_page);
+	if (new_folio) {
 		err = mem_cgroup_charge(new_folio, vma->vm_mm, GFP_KERNEL);
 		if (err)
 			return err;
@@ -193,7 +191,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 		goto unlock;
 	VM_BUG_ON_PAGE(addr != pvmw.address, old_page);
 
-	if (new_page) {
+	if (new_folio) {
 		folio_get(new_folio);
 		folio_add_new_anon_rmap(new_folio, vma, addr, RMAP_EXCLUSIVE);
 		folio_add_lru_vma(new_folio, vma);
@@ -208,9 +206,9 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
 
 	flush_cache_page(vma, addr, pte_pfn(ptep_get(pvmw.pte)));
 	ptep_clear_flush(vma, addr, pvmw.pte);
-	if (new_page)
+	if (new_folio)
 		set_pte_at(mm, addr, pvmw.pte,
-			   mk_pte(new_page, vma->vm_page_prot));
+			   folio_mk_pte(new_folio, vma->vm_page_prot));
 
 	folio_remove_rmap_pte(old_folio, old_page, vma);
 	if (!folio_mapped(old_folio))
@@ -474,7 +472,8 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 			unsigned long vaddr, uprobe_opcode_t opcode)
 {
 	struct uprobe *uprobe;
-	struct page *old_page, *new_page;
+	struct page *old_page;
+	struct folio *new_folio;
 	struct vm_area_struct *vma;
 	int ret, is_register, ref_ctr_updated = 0;
 	bool orig_page_huge = false;
@@ -519,13 +518,14 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 		goto put_old;
 
 	ret = -ENOMEM;
-	new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vaddr);
-	if (!new_page)
+	new_folio = vma_alloc_folio(GFP_HIGHUSER_MOVABLE, 0, vma, vaddr);
+	if (!new_folio)
 		goto put_old;
 
-	__SetPageUptodate(new_page);
-	copy_highpage(new_page, old_page);
-	copy_to_page(new_page, vaddr, &opcode, UPROBE_SWBP_INSN_SIZE);
+	copy_highpage(folio_page(new_folio, 0), old_page);
+	copy_to_page(folio_page(new_folio, 0), vaddr, &opcode,
+			UPROBE_SWBP_INSN_SIZE);
+	__folio_mark_uptodate(new_folio);
 
 	if (!is_register) {
 		struct page *orig_page;
@@ -539,10 +539,11 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 
 		if (orig_page) {
 			if (PageUptodate(orig_page) &&
-			    pages_identical(new_page, orig_page)) {
+			    pages_identical(folio_page(new_folio, 0),
+				    orig_page)) {
 				/* let go new_page */
-				put_page(new_page);
-				new_page = NULL;
+				folio_put(new_folio);
+				new_folio = NULL;
 
 				if (PageCompound(orig_page))
 					orig_page_huge = true;
@@ -551,9 +552,9 @@ int uprobe_write_opcode(struct arch_uprobe *auprobe, struct mm_struct *mm,
 		}
 	}
 
-	ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_page);
-	if (new_page)
-		put_page(new_page);
+	ret = __replace_page(vma, vaddr & PAGE_MASK, old_page, new_folio);
+	if (new_folio)
+		folio_put(new_folio);
 put_old:
 	put_page(old_page);
 



^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: Is uprobe_write_opcode() OK?
  2025-02-20 20:29 Is uprobe_write_opcode() OK? Matthew Wilcox
@ 2025-02-24  9:53 ` David Hildenbrand
  0 siblings, 0 replies; 2+ messages in thread
From: David Hildenbrand @ 2025-02-24  9:53 UTC (permalink / raw)
  To: Matthew Wilcox, linux-mm

On 20.02.25 21:29, Matthew Wilcox wrote:
> I just wrote the patch below, but now I'm wondering if it's
> perpetuating the mistake of not using our existing COW mechanism
> to handle uprobes.  Anyone looked at this code recently?

I have a bigger rewrite in the queue:

https://lore.kernel.org/linux-mm/20240604122548.359952-2-david@redhat.com/T/

That will do exactly that: leave COW to core MM.

I'll respin that soonish.

-- 
Cheers,

David / dhildenb



^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2025-02-24  9:53 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2025-02-20 20:29 Is uprobe_write_opcode() OK? Matthew Wilcox
2025-02-24  9:53 ` David Hildenbrand

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox