From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from kanga.kvack.org (kanga.kvack.org [205.233.56.17]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 0A775F01832 for ; Fri, 6 Mar 2026 12:32:48 +0000 (UTC) Received: by kanga.kvack.org (Postfix) id 506986B0005; Fri, 6 Mar 2026 07:32:47 -0500 (EST) Received: by kanga.kvack.org (Postfix, from userid 40) id 4B9E06B0098; Fri, 6 Mar 2026 07:32:47 -0500 (EST) X-Delivered-To: int-list-linux-mm@kvack.org Received: by kanga.kvack.org (Postfix, from userid 63042) id 3A54E6B0099; Fri, 6 Mar 2026 07:32:47 -0500 (EST) X-Delivered-To: linux-mm@kvack.org Received: from relay.hostedemail.com (smtprelay0015.hostedemail.com [216.40.44.15]) by kanga.kvack.org (Postfix) with ESMTP id 2440D6B0005 for ; Fri, 6 Mar 2026 07:32:47 -0500 (EST) Received: from smtpin16.hostedemail.com (a10.router.float.18 [10.200.18.1]) by unirelay10.hostedemail.com (Postfix) with ESMTP id A1CD8C2074 for ; Fri, 6 Mar 2026 12:32:46 +0000 (UTC) X-FDA: 84515577132.16.E8B1419 Received: from tor.source.kernel.org (tor.source.kernel.org [172.105.4.254]) by imf30.hostedemail.com (Postfix) with ESMTP id DD9C980004 for ; Fri, 6 Mar 2026 12:32:44 +0000 (UTC) Authentication-Results: imf30.hostedemail.com; dkim=pass header.d=kernel.org header.s=k20201202 header.b=T4qMHjOg; spf=pass (imf30.hostedemail.com: domain of ljs@kernel.org designates 172.105.4.254 as permitted sender) smtp.mailfrom=ljs@kernel.org; dmarc=pass (policy=quarantine) header.from=kernel.org ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=hostedemail.com; s=arc-20220608; t=1772800364; h=from:from:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:cc:mime-version:mime-version: content-type:content-type:content-transfer-encoding: in-reply-to:in-reply-to:references:references:dkim-signature; bh=02/fAKzxan/r3x+GQlI7FN7xlTftr/6C8o39530bgUs=; b=ChE+OcZmjdfeOE1QUpF4ZYEsvrGbp7BzWBADdM+DRDx7nnNf1dCVudS4YV+f6XUm9ETCDY TOx3QtmIZRCNVYccJsopWjuWvMRjV5LAha/0JBwJtedroWeHJuYThHo3yrL6bcif0M1RF/ 9zPH8CzESfZA63Pf0BJHh5Zb65JS7x4= ARC-Seal: i=1; s=arc-20220608; d=hostedemail.com; t=1772800364; a=rsa-sha256; cv=none; b=f0kZeZvzS1oLp5HR+3PJP4RmEVMF6XxM9Tq+3AkeW4HYOGlcVgbJgiLvhtSTbqkQzv5dGs LmyTxStNI2GZ1GdTfeB0rpqVLNaRI6Scy3LukqLgUD7ZqOliHd3yaz/ARWa+mf5xgE0X0L SImXhb0S7DC2/iSWiLjfNDSnouZvqTw= ARC-Authentication-Results: i=1; imf30.hostedemail.com; dkim=pass header.d=kernel.org header.s=k20201202 header.b=T4qMHjOg; spf=pass (imf30.hostedemail.com: domain of ljs@kernel.org designates 172.105.4.254 as permitted sender) smtp.mailfrom=ljs@kernel.org; dmarc=pass (policy=quarantine) header.from=kernel.org Received: from smtp.kernel.org (transwarp.subspace.kernel.org [100.75.92.58]) by tor.source.kernel.org (Postfix) with ESMTP id 5C6FD60018; Fri, 6 Mar 2026 12:32:44 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 77675C4CEF7; Fri, 6 Mar 2026 12:32:43 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1772800364; bh=QVn8Y7AXnOu5aPCMlNbbvX0FUbONENXusGGY7trKhug=; h=Date:From:To:Cc:Subject:References:In-Reply-To:From; b=T4qMHjOgk3jgWcvjrkPPEKLsolahZSrljiWXH/PIK77OKljPOm5wuV74Xo62bGU2b W2MZJZyOEeSTvlWTQKvpw5ckVkmpOqUnkKU8hUrtVMu+d5hRTorZsy9a/BEQnu2MNk nkpwmlrF4FIcx3IYFoqiolIPYTFgWBFiAXMfD1sq6oLSmSACgNEMYXkg5K3N6Dj5og 5jrJz2ywPJ9P8z00grks+YpbG2l3REUrSAfVA1y7lqHfuba58RIKaBs/unDG3xXkGc mn8Gi0kijcty5232VA10s00CPwrG4FcWsdUqqvgT+wM0NufnQahvqjPZh7U/cyIOmO l9QVmPUmpBsCw== Date: Fri, 6 Mar 2026 12:32:40 +0000 From: "Lorenzo Stoakes (Oracle)" To: "David Hildenbrand (Arm)" Cc: linux-kernel@vger.kernel.org, "linux-mm @ kvack . org" , Andrew Morton , Lorenzo Stoakes , "Liam R. Howlett" , Vlastimil Babka , Mike Rapoport , Suren Baghdasaryan , Michal Hocko , Jann Horn , Pedro Falcato , David Rientjes , Shakeel Butt , "Matthew Wilcox (Oracle)" , Alice Ryhl , Madhavan Srinivasan , Michael Ellerman , Christian Borntraeger , Janosch Frank , Claudio Imbrenda , Alexander Gordeev , Gerald Schaefer , Heiko Carstens , Vasily Gorbik , Jarkko Sakkinen , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Greg Kroah-Hartman , Arve =?utf-8?B?SGrDuG5uZXbDpWc=?= , Todd Kjos , Christian Brauner , Carlos Llamas , Ian Abbott , H Hartley Sweeten , Jani Nikula , Joonas Lahtinen , Rodrigo Vivi , Tvrtko Ursulin , David Airlie , Simona Vetter , Jason Gunthorpe , Leon Romanovsky , Dimitri Sivanich , Arnd Bergmann , Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko , Peter Zijlstra , Arnaldo Carvalho de Melo , Namhyung Kim , Andy Lutomirski , Vincenzo Frascino , Eric Dumazet , Neal Cardwell , "David S. Miller" , David Ahern , Jakub Kicinski , Paolo Abeni , Miguel Ojeda , linuxppc-dev@lists.ozlabs.org, kvm@vger.kernel.org, linux-s390@vger.kernel.org, linux-sgx@vger.kernel.org, intel-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, linux-rdma@vger.kernel.org, bpf@vger.kernel.org, linux-perf-users@vger.kernel.org, linux-fsdevel@vger.kernel.org, netdev@vger.kernel.org, rust-for-linux@vger.kernel.org, x86@kernel.org Subject: Re: [PATCH v1 14/16] mm: rename zap_page_range_single() to zap_vma_range() Message-ID: References: <20260227200848.114019-1-david@kernel.org> <20260227200848.114019-15-david@kernel.org> MIME-Version: 1.0 Content-Type: text/plain; charset=us-ascii Content-Disposition: inline In-Reply-To: <20260227200848.114019-15-david@kernel.org> X-Rspam-User: X-Rspamd-Server: rspam11 X-Rspamd-Queue-Id: DD9C980004 X-Stat-Signature: d6xab7xf8cj3mgos6mxgamh64n66rz55 X-HE-Tag: 1772800364-56353 X-HE-Meta: U2FsdGVkX1/uJYE9INKpWiO+g3u0mZrV3nWyvv0gXGf4YRhwCJfs0eKmANKUBPSP+HD1wCnh1RNePRxYLF/eLdyBtxjtoObpOgY0cOwHfvqmM0vKu8cB51RHsUMqTBv/ip9M4Q1sxIeTRgSxgJ3QyBbD5OkRREkMrJSw8fCRBWkDInnA4wWl6rpYEmkv3VLfF5x8n9lknk20T20Ep34OGGehKCfo/PNMiDCY+nY4t3vEvO8J0E38ym09EN7ndCCHd7mitndb+uz0765l0GsrYtCiOyj1wvfYC3bBbRWifcW6UCa4J4EWab8sDJR01P/1M2IhyqBf2GKnGuDYex0TjcZhnvF/2GO1FRK6LBPg9orJJJTh+sqAp+42iVydGv8ClYL5EqnfQ+DG8gp6vOq2BogUCqWwp/lg5n9WZADV/Zq+24gVl32RN5o93RqEr+SRcczN+A65U6ksNjoDS4cQCJI2MQg/N5iNmfB3gOleL82J5t094V4o7z8quOzGupsbZr57MPrMNY256ncntfM75FFmRh5UgEIVbLFpJ/d5E4XKBcMAym5y47zeqnZjpGJ30mnDL6Q1I0ppwCJO89CIwW13RcHjgPoFrPlNBHDsHdYlI3pp8hwtmIFatJMBHKrsxWQLs3aXvLfOQZ8+wUav/VS3YusYkrXmA7n9OHjgBzGqdn+ChUOYOjmXhitXLAQM8b3SH0Mf+4wLd3+GaA6vWm6CqCjwOn0zeKwG0ssDqIXK5eE/m/uAjHE28dGJka54VlDUNXX048qdIo3jWEQLuFN6Sd+zWWyC46oIiJ5bkeV9E8Dd0mT1azppKqFpBt2GmHN/2E2hVB2BhRYW9OlByYYHR+QjpeehjDQPkvsD2xjdUDmMDse2JwLyjAPPhOmb7aqUAs8QGa4cZ6MEbIJDDm5LdCx2UBQPmZn/wAbQuOKQ97A933RJcep9N6xWmh5N8TYu27P8pkfgtr4dZXp PqtnLdTA MMO0j5sZ+WEAxjD5ga/gMFNhKIFK6I+hxTNuVq877ztkZJWZ2D7eI7KYs8O0bPipKA8TcLPNtQrp1CZ61y39RHqJpKe5l62zQXF9S4Vf/DZwNBZVxW9gsL7P3Xlnucn34jFiA965M2u04HHIfTus93mpzeXSSnlFOK9un87lKopDXTtN2l96fRsXPSotifAaQXAmxbalNs9VnqqdAVVdNr5Zwb4p89lmNwZLSH2Pz+PWPf9bkZ2R/YYXD4oe6V925Wyemeivz9tPgH0LifqW62So/xFXeGlJOX+bW Sender: owner-linux-mm@kvack.org Precedence: bulk X-Loop: owner-majordomo@kvack.org List-ID: List-Subscribe: List-Unsubscribe: On Fri, Feb 27, 2026 at 09:08:45PM +0100, David Hildenbrand (Arm) wrote: > Let's rename it to make it better match our new naming scheme. > > While at it, polish the kerneldoc. > > Signed-off-by: David Hildenbrand (Arm) LGTM, again obviously assuming the rustfmt stuff is sorted :>), so: Reviewed-by: Lorenzo Stoakes (Oracle) > --- > arch/s390/mm/gmap_helpers.c | 2 +- > drivers/android/binder/page_range.rs | 4 ++-- > drivers/android/binder_alloc.c | 2 +- > include/linux/mm.h | 4 ++-- > kernel/bpf/arena.c | 2 +- > kernel/events/core.c | 2 +- > mm/madvise.c | 4 ++-- > mm/memory.c | 14 +++++++------- > net/ipv4/tcp.c | 6 +++--- > rust/kernel/mm/virt.rs | 4 ++-- > 10 files changed, 22 insertions(+), 22 deletions(-) > > diff --git a/arch/s390/mm/gmap_helpers.c b/arch/s390/mm/gmap_helpers.c > index ae2d59a19313..f8789ffcc05c 100644 > --- a/arch/s390/mm/gmap_helpers.c > +++ b/arch/s390/mm/gmap_helpers.c > @@ -89,7 +89,7 @@ void gmap_helper_discard(struct mm_struct *mm, unsigned long vmaddr, unsigned lo > if (!vma) > return; > if (!is_vm_hugetlb_page(vma)) > - zap_page_range_single(vma, vmaddr, min(end, vma->vm_end) - vmaddr); > + zap_vma_range(vma, vmaddr, min(end, vma->vm_end) - vmaddr); > vmaddr = vma->vm_end; > } > } > diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs > index fdd97112ef5c..2fddd4ed8d4c 100644 > --- a/drivers/android/binder/page_range.rs > +++ b/drivers/android/binder/page_range.rs > @@ -130,7 +130,7 @@ pub(crate) struct ShrinkablePageRange { > pid: Pid, > /// The mm for the relevant process. > mm: ARef, > - /// Used to synchronize calls to `vm_insert_page` and `zap_page_range_single`. > + /// Used to synchronize calls to `vm_insert_page` and `zap_vma_range`. > #[pin] > mm_lock: Mutex<()>, > /// Spinlock protecting changes to pages. > @@ -719,7 +719,7 @@ fn drop(self: Pin<&mut Self>) { > > if let Some(vma) = mmap_read.vma_lookup(vma_addr) { > let user_page_addr = vma_addr + (page_index << PAGE_SHIFT); > - vma.zap_page_range_single(user_page_addr, PAGE_SIZE); > + vma.zap_vma_range(user_page_addr, PAGE_SIZE); > } > > drop(mmap_read); > diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c > index dd2046bd5cde..e4488ad86a65 100644 > --- a/drivers/android/binder_alloc.c > +++ b/drivers/android/binder_alloc.c > @@ -1185,7 +1185,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, > if (vma) { > trace_binder_unmap_user_start(alloc, index); > > - zap_page_range_single(vma, page_addr, PAGE_SIZE); > + zap_vma_range(vma, page_addr, PAGE_SIZE); > > trace_binder_unmap_user_end(alloc, index); > } > diff --git a/include/linux/mm.h b/include/linux/mm.h > index 4bd1500b9630..833bedd3f739 100644 > --- a/include/linux/mm.h > +++ b/include/linux/mm.h > @@ -2835,7 +2835,7 @@ struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr, > > void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, > unsigned long size); > -void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, > +void zap_vma_range(struct vm_area_struct *vma, unsigned long address, > unsigned long size); > /** > * zap_vma - zap all page table entries in a vma > @@ -2843,7 +2843,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, > */ > static inline void zap_vma(struct vm_area_struct *vma) > { > - zap_page_range_single(vma, vma->vm_start, vma->vm_end - vma->vm_start); > + zap_vma_range(vma, vma->vm_start, vma->vm_end - vma->vm_start); > } > struct mmu_notifier_range; > > diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c > index c34510d83b1f..37843c6a4764 100644 > --- a/kernel/bpf/arena.c > +++ b/kernel/bpf/arena.c > @@ -656,7 +656,7 @@ static void zap_pages(struct bpf_arena *arena, long uaddr, long page_cnt) > guard(mutex)(&arena->lock); > /* iterate link list under lock */ > list_for_each_entry(vml, &arena->vma_list, head) > - zap_page_range_single(vml->vma, uaddr, PAGE_SIZE * page_cnt); > + zap_vma_range(vml->vma, uaddr, PAGE_SIZE * page_cnt); > } > > static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt, bool sleepable) > diff --git a/kernel/events/core.c b/kernel/events/core.c > index c94c56c94104..5ee02817c3bc 100644 > --- a/kernel/events/core.c > +++ b/kernel/events/core.c > @@ -7215,7 +7215,7 @@ static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma) > #ifdef CONFIG_MMU > /* Clear any partial mappings on error. */ > if (err) > - zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE); > + zap_vma_range(vma, vma->vm_start, nr_pages * PAGE_SIZE); > #endif > > return err; > diff --git a/mm/madvise.c b/mm/madvise.c > index fb5fcdff2b66..6e66f56ff1a6 100644 > --- a/mm/madvise.c > +++ b/mm/madvise.c > @@ -832,7 +832,7 @@ static int madvise_free_single_vma(struct madvise_behavior *madv_behavior) > * Application no longer needs these pages. If the pages are dirty, > * it's OK to just throw them away. The app will be more careful about > * data it wants to keep. Be sure to free swap resources too. The > - * zap_page_range_single call sets things up for shrink_active_list to actually > + * zap_vma_range call sets things up for shrink_active_list to actually > * free these pages later if no one else has touched them in the meantime, > * although we could add these pages to a global reuse list for > * shrink_active_list to pick up before reclaiming other pages. > @@ -1191,7 +1191,7 @@ static long madvise_guard_install(struct madvise_behavior *madv_behavior) > * OK some of the range have non-guard pages mapped, zap > * them. This leaves existing guard pages in place. > */ > - zap_page_range_single(vma, range->start, range->end - range->start); > + zap_vma_range(vma, range->start, range->end - range->start); > } > > /* > diff --git a/mm/memory.c b/mm/memory.c > index e611e9af4e85..dd737b6d28c0 100644 > --- a/mm/memory.c > +++ b/mm/memory.c > @@ -2215,14 +2215,14 @@ void zap_vma_range_batched(struct mmu_gather *tlb, > } > > /** > - * zap_page_range_single - remove user pages in a given range > - * @vma: vm_area_struct holding the applicable pages > - * @address: starting address of pages to zap > + * zap_vma_range - zap all page table entries in a vma range > + * @vma: the vma covering the range to zap > + * @address: starting address of the range to zap > * @size: number of bytes to zap > * > - * The range must fit into one VMA. > + * The provided address range must be fully contained within @vma. > */ > -void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, > +void zap_vma_range(struct vm_area_struct *vma, unsigned long address, > unsigned long size) > { > struct mmu_gather tlb; > @@ -2250,7 +2250,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, > !(vma->vm_flags & VM_PFNMAP)) > return; > > - zap_page_range_single(vma, address, size); > + zap_vma_range(vma, address, size); > } > EXPORT_SYMBOL_GPL(zap_vma_ptes); > > @@ -3018,7 +3018,7 @@ static int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long add > * maintain page reference counts, and callers may free > * pages due to the error. So zap it early. > */ > - zap_page_range_single(vma, addr, size); > + zap_vma_range(vma, addr, size); > return error; > } > > diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c > index befcde27dee7..cb4477ef1529 100644 > --- a/net/ipv4/tcp.c > +++ b/net/ipv4/tcp.c > @@ -2104,7 +2104,7 @@ static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, > maybe_zap_len = total_bytes_to_map - /* All bytes to map */ > *length + /* Mapped or pending */ > (pages_remaining * PAGE_SIZE); /* Failed map. */ > - zap_page_range_single(vma, *address, maybe_zap_len); > + zap_vma_range(vma, *address, maybe_zap_len); > err = 0; > } > > @@ -2112,7 +2112,7 @@ static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, > unsigned long leftover_pages = pages_remaining; > int bytes_mapped; > > - /* We called zap_page_range_single, try to reinsert. */ > + /* We called zap_vma_range, try to reinsert. */ > err = vm_insert_pages(vma, *address, > pending_pages, > &pages_remaining); > @@ -2269,7 +2269,7 @@ static int tcp_zerocopy_receive(struct sock *sk, > total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); > if (total_bytes_to_map) { > if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT)) > - zap_page_range_single(vma, address, total_bytes_to_map); > + zap_vma_range(vma, address, total_bytes_to_map); > zc->length = total_bytes_to_map; > zc->recv_skip_hint = 0; > } else { > diff --git a/rust/kernel/mm/virt.rs b/rust/kernel/mm/virt.rs > index b8e59e4420f3..04b3cc925d67 100644 > --- a/rust/kernel/mm/virt.rs > +++ b/rust/kernel/mm/virt.rs > @@ -113,7 +113,7 @@ pub fn end(&self) -> usize { > /// kernel goes further in freeing unused page tables, but for the purposes of this operation > /// we must only assume that the leaf level is cleared. > #[inline] > - pub fn zap_page_range_single(&self, address: usize, size: usize) { > + pub fn zap_vma_range(&self, address: usize, size: usize) { > let (end, did_overflow) = address.overflowing_add(size); > if did_overflow || address < self.start() || self.end() < end { > // TODO: call WARN_ONCE once Rust version of it is added > @@ -124,7 +124,7 @@ pub fn zap_page_range_single(&self, address: usize, size: usize) { > // sufficient for this method call. This method has no requirements on the vma flags. The > // address range is checked to be within the vma. > unsafe { > - bindings::zap_page_range_single(self.as_ptr(), address, size) > + bindings::zap_vma_range(self.as_ptr(), address, size) > }; > } > > -- > 2.43.0 >