linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Jinchao Wang <wangjinchao600@gmail.com>
To: Muchun Song <muchun.song@linux.dev>,
	Oscar Salvador <osalvador@suse.de>,
	David Hildenbrand <david@kernel.org>,
	"Matthew Wilcox (Oracle)" <willy@infradead.org>,
	Andrew Morton <akpm@linux-foundation.org>,
	Lorenzo Stoakes <lorenzo.stoakes@oracle.com>,
	"Liam R. Howlett" <Liam.Howlett@oracle.com>,
	Vlastimil Babka <vbabka@suse.cz>, Mike Rapoport <rppt@kernel.org>,
	Suren Baghdasaryan <surenb@google.com>,
	Michal Hocko <mhocko@suse.com>,
	linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	linux-fsdevel@vger.kernel.org
Cc: Jinchao Wang <wangjinchao600@gmail.com>,
	syzbot+2d9c96466c978346b55f@syzkaller.appspotmail.com
Subject: [PATCH 2/2] Fix an AB-BA deadlock in hugetlbfs_punch_hole() involving page migration.
Date: Thu,  8 Jan 2026 20:39:25 +0800	[thread overview]
Message-ID: <20260108123957.1123502-2-wangjinchao600@gmail.com> (raw)
In-Reply-To: <20260108123957.1123502-1-wangjinchao600@gmail.com>

The deadlock occurs due to the following lock ordering:

Task A (punch_hole):             Task B (migration):
--------------------             -------------------
1. i_mmap_lock_write(mapping)    1. folio_lock(folio)
2. folio_lock(folio)             2. i_mmap_lock_read(mapping)
   (blocks waiting for B)           (blocks waiting for A)

Task A is blocked in the punch-hole path:
  hugetlbfs_fallocate
    hugetlbfs_punch_hole
      hugetlbfs_zero_partial_page
        filemap_lock_hugetlb_folio
          filemap_lock_folio
            __filemap_get_folio
              folio_lock

Task B is blocked in the migration path:
  migrate_pages
    migrate_hugetlbs
      unmap_and_move_huge_page
        remove_migration_ptes
          __rmap_walk_file
            i_mmap_lock_read

To break this circular dependency, use filemap_lock_folio_nowait() in
the punch-hole path. If the folio is already locked, Task A drops the
i_mmap_rwsem and retries. This allows Task B to finish its rmap walk
and release the folio lock.

Link: https://lore.kernel.org/all/68e9715a.050a0220.1186a4.000d.GAE@google.com

Reported-by: syzbot+2d9c96466c978346b55f@syzkaller.appspotmail.com
Signed-off-by: Jinchao Wang <wangjinchao600@gmail.com>
---
 fs/hugetlbfs/inode.c    | 34 +++++++++++++++++++++++-----------
 include/linux/hugetlb.h |  2 +-
 2 files changed, 24 insertions(+), 12 deletions(-)

diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 3b4c152c5c73..e903344aa0ec 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -653,17 +653,16 @@ static void hugetlb_vmtruncate(struct inode *inode, loff_t offset)
 	remove_inode_hugepages(inode, offset, LLONG_MAX);
 }
 
-static void hugetlbfs_zero_partial_page(struct hstate *h,
-					struct address_space *mapping,
-					loff_t start,
-					loff_t end)
+static int hugetlbfs_zero_partial_page(struct hstate *h,
+				       struct address_space *mapping,
+				       loff_t start, loff_t end)
 {
 	pgoff_t idx = start >> huge_page_shift(h);
 	struct folio *folio;
 
 	folio = filemap_lock_hugetlb_folio(h, mapping, idx);
 	if (IS_ERR(folio))
-		return;
+		return PTR_ERR(folio);
 
 	start = start & ~huge_page_mask(h);
 	end = end & ~huge_page_mask(h);
@@ -674,6 +673,7 @@ static void hugetlbfs_zero_partial_page(struct hstate *h,
 
 	folio_unlock(folio);
 	folio_put(folio);
+	return 0;
 }
 
 static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
@@ -683,6 +683,7 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 	struct hstate *h = hstate_inode(inode);
 	loff_t hpage_size = huge_page_size(h);
 	loff_t hole_start, hole_end;
+	int rc;
 
 	/*
 	 * hole_start and hole_end indicate the full pages within the hole.
@@ -698,12 +699,18 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 		return -EPERM;
 	}
 
+repeat:
 	i_mmap_lock_write(mapping);
 
 	/* If range starts before first full page, zero partial page. */
-	if (offset < hole_start)
-		hugetlbfs_zero_partial_page(h, mapping,
-				offset, min(offset + len, hole_start));
+	if (offset < hole_start) {
+		rc = hugetlbfs_zero_partial_page(h, mapping, offset,
+						 min(offset + len, hole_start));
+		if (rc == -EAGAIN) {
+			i_mmap_unlock_write(mapping);
+			goto repeat;
+		}
+	}
 
 	/* Unmap users of full pages in the hole. */
 	if (hole_end > hole_start) {
@@ -714,9 +721,14 @@ static long hugetlbfs_punch_hole(struct inode *inode, loff_t offset, loff_t len)
 	}
 
 	/* If range extends beyond last full page, zero partial page. */
-	if ((offset + len) > hole_end && (offset + len) > hole_start)
-		hugetlbfs_zero_partial_page(h, mapping,
-				hole_end, offset + len);
+	if ((offset + len) > hole_end && (offset + len) > hole_start) {
+		rc = hugetlbfs_zero_partial_page(h, mapping, hole_end,
+						 offset + len);
+		if (rc == -EAGAIN) {
+			i_mmap_unlock_write(mapping);
+			goto repeat;
+		}
+	}
 
 	i_mmap_unlock_write(mapping);
 
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 019a1c5281e4..ad55b9dada0a 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -814,7 +814,7 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
 static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
 				struct address_space *mapping, pgoff_t idx)
 {
-	return filemap_lock_folio(mapping, idx << huge_page_order(h));
+	return filemap_lock_folio_nowait(mapping, idx << huge_page_order(h));
 }
 
 #include <asm/hugetlb.h>
-- 
2.43.0



  reply	other threads:[~2026-01-08 12:41 UTC|newest]

Thread overview: 4+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2026-01-08 12:39 [PATCH 1/2] mm: add filemap_lock_folio_nowait helper Jinchao Wang
2026-01-08 12:39 ` Jinchao Wang [this message]
2026-01-08 14:09   ` [PATCH 2/2] Fix an AB-BA deadlock in hugetlbfs_punch_hole() involving page migration Matthew Wilcox
2026-01-09  2:17     ` Jinchao Wang

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20260108123957.1123502-2-wangjinchao600@gmail.com \
    --to=wangjinchao600@gmail.com \
    --cc=Liam.Howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=david@kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=lorenzo.stoakes@oracle.com \
    --cc=mhocko@suse.com \
    --cc=muchun.song@linux.dev \
    --cc=osalvador@suse.de \
    --cc=rppt@kernel.org \
    --cc=surenb@google.com \
    --cc=syzbot+2d9c96466c978346b55f@syzkaller.appspotmail.com \
    --cc=vbabka@suse.cz \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox