From mboxrd@z Thu Jan 1 00:00:00 1970 Return-Path: X-Spam-Checker-Version: SpamAssassin 3.4.0 (2014-02-07) on aws-us-west-2-korg-lkml-1.web.codeaurora.org Received: from kanga.kvack.org (kanga.kvack.org [205.233.56.17]) (using TLSv1 with cipher DHE-RSA-AES256-SHA (256/256 bits)) (No client certificate requested) by smtp.lore.kernel.org (Postfix) with ESMTPS id 10D0E1039893 for ; Fri, 27 Feb 2026 20:13:03 +0000 (UTC) Received: by kanga.kvack.org (Postfix) id 71E2D6B00EE; Fri, 27 Feb 2026 15:13:02 -0500 (EST) Received: by kanga.kvack.org (Postfix, from userid 40) id 6E8CE6B00F1; Fri, 27 Feb 2026 15:13:02 -0500 (EST) X-Delivered-To: int-list-linux-mm@kvack.org Received: by kanga.kvack.org (Postfix, from userid 63042) id 5B39B6B00F2; Fri, 27 Feb 2026 15:13:02 -0500 (EST) X-Delivered-To: linux-mm@kvack.org Received: from relay.hostedemail.com (smtprelay0010.hostedemail.com [216.40.44.10]) by kanga.kvack.org (Postfix) with ESMTP id 45C956B00EE for ; Fri, 27 Feb 2026 15:13:02 -0500 (EST) Received: from smtpin04.hostedemail.com (a10.router.float.18 [10.200.18.1]) by unirelay03.hostedemail.com (Postfix) with ESMTP id 165B2BBB39 for ; Fri, 27 Feb 2026 20:13:02 +0000 (UTC) X-FDA: 84491335404.04.B4FA194 Received: from tor.source.kernel.org (tor.source.kernel.org [172.105.4.254]) by imf28.hostedemail.com (Postfix) with ESMTP id 71F8AC0007 for ; Fri, 27 Feb 2026 20:13:00 +0000 (UTC) Authentication-Results: imf28.hostedemail.com; dkim=pass header.d=kernel.org header.s=k20201202 header.b=JmPRcYz6; spf=pass (imf28.hostedemail.com: domain of david@kernel.org designates 172.105.4.254 as permitted sender) smtp.mailfrom=david@kernel.org; dmarc=pass (policy=quarantine) header.from=kernel.org ARC-Message-Signature: i=1; a=rsa-sha256; c=relaxed/relaxed; d=hostedemail.com; s=arc-20220608; t=1772223180; h=from:from:sender:reply-to:subject:subject:date:date: message-id:message-id:to:to:cc:cc:mime-version:mime-version: content-type:content-transfer-encoding:content-transfer-encoding: in-reply-to:in-reply-to:references:references:dkim-signature; bh=A4zxOggyb9U7iPva++/crQzwEJduJ7dDQydh3VuCXl0=; b=1o1YgbXHSftZ+L5N0F9Y6b+1dgLxauGO9eFbxyy7Lc0EKcZe6r1p64n2PxG9HbHMhG+jvB JkMK1gHtONX8A4qQzRKI+JzkEgcNnN06ImTe3+pGgbtGv4Gf5Ca5Ybtbi7huV53Jaqm5Um CMW5IsUoxg0fVvOdvWDldEvOj48fkX0= ARC-Authentication-Results: i=1; imf28.hostedemail.com; dkim=pass header.d=kernel.org header.s=k20201202 header.b=JmPRcYz6; spf=pass (imf28.hostedemail.com: domain of david@kernel.org designates 172.105.4.254 as permitted sender) smtp.mailfrom=david@kernel.org; dmarc=pass (policy=quarantine) header.from=kernel.org ARC-Seal: i=1; s=arc-20220608; d=hostedemail.com; t=1772223180; a=rsa-sha256; cv=none; b=QkgxUncZFDTQcZ6kjgBqZ1fDuNvUZM7XfioRTvo0vHciarHO79GeLVSh+16W36tT/Mg0J7 mU1+rdaQf7kgtEhqiOg3x/zIueC+N3DYykuReIuusUkccuilzWd595EOZZUeljr2FmqSpt TRT6ouDO9Lkv3qvOZDwUi9kBX7qL/h8= Received: from smtp.kernel.org (transwarp.subspace.kernel.org [100.75.92.58]) by tor.source.kernel.org (Postfix) with ESMTP id DF79160126; Fri, 27 Feb 2026 20:12:59 +0000 (UTC) Received: by smtp.kernel.org (Postfix) with ESMTPSA id 4803DC116C6; Fri, 27 Feb 2026 20:12:44 +0000 (UTC) DKIM-Signature: v=1; a=rsa-sha256; c=relaxed/simple; d=kernel.org; s=k20201202; t=1772223179; bh=EEBn7CIEirsPder1HwHu0zod3+a5jIJfhj8EcLUi1fg=; h=From:To:Cc:Subject:Date:In-Reply-To:References:From; b=JmPRcYz6lMupOBBWcIq2xxzw3ZjqLWbh14Q/EzEzzNOPR2bXzATrQ2or0dA35uMRH qQSbMuAtuGPbagAqsNu+bWeITN3fvbriqugq0wAAivv3oBid5ydJfxz6kgH0ggobHr FcqROWqyx31WFNdZnxZqeabN5HI3T4ByIsihTtRpKekFZlg/CHSxlrBVD1TU/sPfLX PtMx1nnJm8R4PN5iGseQ/FJczpNis/rWhHyMfhYrvINAqI8qSVqCSM1beRCreQXnWF uj0oTpBBfR/1UoZQaN9rvvSM//RXewIO1Dwy6rYXGjaI0s2HIGsuNkmVQpYm8jSfVj CC7PaJu+lCODg== From: "David Hildenbrand (Arm)" To: linux-kernel@vger.kernel.org Cc: "linux-mm @ kvack . org" , "David Hildenbrand (Arm)" , Andrew Morton , Lorenzo Stoakes , "Liam R. Howlett" , Vlastimil Babka , Mike Rapoport , Suren Baghdasaryan , Michal Hocko , Jann Horn , Pedro Falcato , David Rientjes , Shakeel Butt , "Matthew Wilcox (Oracle)" , Alice Ryhl , Madhavan Srinivasan , Michael Ellerman , Christian Borntraeger , Janosch Frank , Claudio Imbrenda , Alexander Gordeev , Gerald Schaefer , Heiko Carstens , Vasily Gorbik , Jarkko Sakkinen , Thomas Gleixner , Ingo Molnar , Borislav Petkov , Greg Kroah-Hartman , =?UTF-8?q?Arve=20Hj=C3=B8nnev=C3=A5g?= , Todd Kjos , Christian Brauner , Carlos Llamas , Ian Abbott , H Hartley Sweeten , Jani Nikula , Joonas Lahtinen , Rodrigo Vivi , Tvrtko Ursulin , David Airlie , Simona Vetter , Jason Gunthorpe , Leon Romanovsky , Dimitri Sivanich , Arnd Bergmann , Alexei Starovoitov , Daniel Borkmann , Andrii Nakryiko , Peter Zijlstra , Arnaldo Carvalho de Melo , Namhyung Kim , Andy Lutomirski , Vincenzo Frascino , Eric Dumazet , Neal Cardwell , "David S. Miller" , David Ahern , Jakub Kicinski , Paolo Abeni , Miguel Ojeda , linuxppc-dev@lists.ozlabs.org, kvm@vger.kernel.org, linux-s390@vger.kernel.org, linux-sgx@vger.kernel.org, intel-gfx@lists.freedesktop.org, dri-devel@lists.freedesktop.org, linux-rdma@vger.kernel.org, bpf@vger.kernel.org, linux-perf-users@vger.kernel.org, linux-fsdevel@vger.kernel.org, netdev@vger.kernel.org, rust-for-linux@vger.kernel.org, x86@kernel.org Subject: [PATCH v1 14/16] mm: rename zap_page_range_single() to zap_vma_range() Date: Fri, 27 Feb 2026 21:08:45 +0100 Message-ID: <20260227200848.114019-15-david@kernel.org> X-Mailer: git-send-email 2.43.0 In-Reply-To: <20260227200848.114019-1-david@kernel.org> References: <20260227200848.114019-1-david@kernel.org> MIME-Version: 1.0 Content-Transfer-Encoding: 8bit X-Rspamd-Server: rspam10 X-Rspamd-Queue-Id: 71F8AC0007 X-Stat-Signature: bob65q4bk58w6if3thao1dhs7x4puwc5 X-Rspam-User: X-HE-Tag: 1772223180-621787 X-HE-Meta: U2FsdGVkX18yyv5YBF0SIQwFMKg0jTlMkdsVvwNyg6fGHXFVCU4EYNxa7WePJYOk9u/sfcoZ9bA92ny3hVwRPBEiTTOFleku8KmXolhDbMncDXR40g1oF0vFAijqRMVXUpb/45G6NfxPmoLGDoG4BCnOts3AfBcdQzQFEWi1tqzy2JDv//QeDt+w1gjmSym7GO/Tj8AkJfdUsqn5n5mRxHVOeCFWz8aKYhHZFDnbw/QoNSSTwaVy95MoVoDE+HSI6QJeOFYVxPRQGpKz6+IgySS2fRecgxHbZBU8Dc+8lB8lSDdKuL779FbTG76IVlrMjTH1QlEW/vDORz2NIGDWNBnI4F45nXEl3awALN8hnBD8TflCLbaQvNB31DibS9O3NpzbcLuXWqV+dvBm7/TdR0e/+uOwJAOQaafGtTIzJ4OEdK7Ucp+rCj/z1qUcUZ4AxnRYKFWHKYV/mWuaaIK/f7RvLZeebWgGV3pzSdLBLeF5pvYh4nVqiE6rJwm1BZJ2cAIQkYw03u4f1SAd5rIEqhYQULhUFNX7XnVnmc/EnvSvsrVAton8xCdSSSZ2FDFbEH9QlMS/GUPoOMpfVvzxcIIUL8Sfean8zMcp7Q5kJulC/+2xHwmnyWIHdO+vE6UwqkSBKGeePCS8xbaf2zj9Reqx4i6YdyP3NTMDGbX+1965QKF4utYGw/vkE74AJHW8vhul9Dtbd5+/++SLSrBlbFljfcWIY1nZigPkyQUF2v40+9c1pcViNxKjzTvUNrD4WN5QdQOQo0URoY+YF2jw3QZMzLSPAvJkbUZ75k0IHqgrpMADS+4V80t1nrfekg9J1EbWSDPcYdgAbMndV3fvr1u9utx5sc9RRFdEDbTjty98UeY+pIcybeM77YH2YkuIlkE5psu5reQFxGgsaYaXLmz/qN/O00F3dM2EGTB8POArd31aL8msavsjIulUg6EPvgvbKEVOdnahdMo1S2x bQhMU1ij BjZiWeOP0LAMq2itPTjQ7pkjRw6nmn9WpyeZmPy9/IJsyl79Gg3gVYZol0/f4805AEJWr+r6mjwk8Si9eRtdlRI0yCR5sjretlzdxR31BWAYnUImkMojqcZP4XX60QmGt6BlOha7HxEe+nFoXWvqexyir4cF/uTURK2SWl6ttlfabp1lFA5Ab9HKbigjqDEiWNm73ndKKL0vPE27ZjyRhob4fa68541dIz6FapTTHaE9J7EG7bz9xmXXJSq2D6ChlQzOfFYFCOa18LOEX5LtUvH8KPw== Sender: owner-linux-mm@kvack.org Precedence: bulk X-Loop: owner-majordomo@kvack.org List-ID: List-Subscribe: List-Unsubscribe: Let's rename it to make it better match our new naming scheme. While at it, polish the kerneldoc. Signed-off-by: David Hildenbrand (Arm) --- arch/s390/mm/gmap_helpers.c | 2 +- drivers/android/binder/page_range.rs | 4 ++-- drivers/android/binder_alloc.c | 2 +- include/linux/mm.h | 4 ++-- kernel/bpf/arena.c | 2 +- kernel/events/core.c | 2 +- mm/madvise.c | 4 ++-- mm/memory.c | 14 +++++++------- net/ipv4/tcp.c | 6 +++--- rust/kernel/mm/virt.rs | 4 ++-- 10 files changed, 22 insertions(+), 22 deletions(-) diff --git a/arch/s390/mm/gmap_helpers.c b/arch/s390/mm/gmap_helpers.c index ae2d59a19313..f8789ffcc05c 100644 --- a/arch/s390/mm/gmap_helpers.c +++ b/arch/s390/mm/gmap_helpers.c @@ -89,7 +89,7 @@ void gmap_helper_discard(struct mm_struct *mm, unsigned long vmaddr, unsigned lo if (!vma) return; if (!is_vm_hugetlb_page(vma)) - zap_page_range_single(vma, vmaddr, min(end, vma->vm_end) - vmaddr); + zap_vma_range(vma, vmaddr, min(end, vma->vm_end) - vmaddr); vmaddr = vma->vm_end; } } diff --git a/drivers/android/binder/page_range.rs b/drivers/android/binder/page_range.rs index fdd97112ef5c..2fddd4ed8d4c 100644 --- a/drivers/android/binder/page_range.rs +++ b/drivers/android/binder/page_range.rs @@ -130,7 +130,7 @@ pub(crate) struct ShrinkablePageRange { pid: Pid, /// The mm for the relevant process. mm: ARef, - /// Used to synchronize calls to `vm_insert_page` and `zap_page_range_single`. + /// Used to synchronize calls to `vm_insert_page` and `zap_vma_range`. #[pin] mm_lock: Mutex<()>, /// Spinlock protecting changes to pages. @@ -719,7 +719,7 @@ fn drop(self: Pin<&mut Self>) { if let Some(vma) = mmap_read.vma_lookup(vma_addr) { let user_page_addr = vma_addr + (page_index << PAGE_SHIFT); - vma.zap_page_range_single(user_page_addr, PAGE_SIZE); + vma.zap_vma_range(user_page_addr, PAGE_SIZE); } drop(mmap_read); diff --git a/drivers/android/binder_alloc.c b/drivers/android/binder_alloc.c index dd2046bd5cde..e4488ad86a65 100644 --- a/drivers/android/binder_alloc.c +++ b/drivers/android/binder_alloc.c @@ -1185,7 +1185,7 @@ enum lru_status binder_alloc_free_page(struct list_head *item, if (vma) { trace_binder_unmap_user_start(alloc, index); - zap_page_range_single(vma, page_addr, PAGE_SIZE); + zap_vma_range(vma, page_addr, PAGE_SIZE); trace_binder_unmap_user_end(alloc, index); } diff --git a/include/linux/mm.h b/include/linux/mm.h index 4bd1500b9630..833bedd3f739 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2835,7 +2835,7 @@ struct page *vm_normal_page_pud(struct vm_area_struct *vma, unsigned long addr, void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, unsigned long size); -void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, +void zap_vma_range(struct vm_area_struct *vma, unsigned long address, unsigned long size); /** * zap_vma - zap all page table entries in a vma @@ -2843,7 +2843,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, */ static inline void zap_vma(struct vm_area_struct *vma) { - zap_page_range_single(vma, vma->vm_start, vma->vm_end - vma->vm_start); + zap_vma_range(vma, vma->vm_start, vma->vm_end - vma->vm_start); } struct mmu_notifier_range; diff --git a/kernel/bpf/arena.c b/kernel/bpf/arena.c index c34510d83b1f..37843c6a4764 100644 --- a/kernel/bpf/arena.c +++ b/kernel/bpf/arena.c @@ -656,7 +656,7 @@ static void zap_pages(struct bpf_arena *arena, long uaddr, long page_cnt) guard(mutex)(&arena->lock); /* iterate link list under lock */ list_for_each_entry(vml, &arena->vma_list, head) - zap_page_range_single(vml->vma, uaddr, PAGE_SIZE * page_cnt); + zap_vma_range(vml->vma, uaddr, PAGE_SIZE * page_cnt); } static void arena_free_pages(struct bpf_arena *arena, long uaddr, long page_cnt, bool sleepable) diff --git a/kernel/events/core.c b/kernel/events/core.c index c94c56c94104..5ee02817c3bc 100644 --- a/kernel/events/core.c +++ b/kernel/events/core.c @@ -7215,7 +7215,7 @@ static int map_range(struct perf_buffer *rb, struct vm_area_struct *vma) #ifdef CONFIG_MMU /* Clear any partial mappings on error. */ if (err) - zap_page_range_single(vma, vma->vm_start, nr_pages * PAGE_SIZE); + zap_vma_range(vma, vma->vm_start, nr_pages * PAGE_SIZE); #endif return err; diff --git a/mm/madvise.c b/mm/madvise.c index fb5fcdff2b66..6e66f56ff1a6 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -832,7 +832,7 @@ static int madvise_free_single_vma(struct madvise_behavior *madv_behavior) * Application no longer needs these pages. If the pages are dirty, * it's OK to just throw them away. The app will be more careful about * data it wants to keep. Be sure to free swap resources too. The - * zap_page_range_single call sets things up for shrink_active_list to actually + * zap_vma_range call sets things up for shrink_active_list to actually * free these pages later if no one else has touched them in the meantime, * although we could add these pages to a global reuse list for * shrink_active_list to pick up before reclaiming other pages. @@ -1191,7 +1191,7 @@ static long madvise_guard_install(struct madvise_behavior *madv_behavior) * OK some of the range have non-guard pages mapped, zap * them. This leaves existing guard pages in place. */ - zap_page_range_single(vma, range->start, range->end - range->start); + zap_vma_range(vma, range->start, range->end - range->start); } /* diff --git a/mm/memory.c b/mm/memory.c index e611e9af4e85..dd737b6d28c0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2215,14 +2215,14 @@ void zap_vma_range_batched(struct mmu_gather *tlb, } /** - * zap_page_range_single - remove user pages in a given range - * @vma: vm_area_struct holding the applicable pages - * @address: starting address of pages to zap + * zap_vma_range - zap all page table entries in a vma range + * @vma: the vma covering the range to zap + * @address: starting address of the range to zap * @size: number of bytes to zap * - * The range must fit into one VMA. + * The provided address range must be fully contained within @vma. */ -void zap_page_range_single(struct vm_area_struct *vma, unsigned long address, +void zap_vma_range(struct vm_area_struct *vma, unsigned long address, unsigned long size) { struct mmu_gather tlb; @@ -2250,7 +2250,7 @@ void zap_vma_ptes(struct vm_area_struct *vma, unsigned long address, !(vma->vm_flags & VM_PFNMAP)) return; - zap_page_range_single(vma, address, size); + zap_vma_range(vma, address, size); } EXPORT_SYMBOL_GPL(zap_vma_ptes); @@ -3018,7 +3018,7 @@ static int remap_pfn_range_notrack(struct vm_area_struct *vma, unsigned long add * maintain page reference counts, and callers may free * pages due to the error. So zap it early. */ - zap_page_range_single(vma, addr, size); + zap_vma_range(vma, addr, size); return error; } diff --git a/net/ipv4/tcp.c b/net/ipv4/tcp.c index befcde27dee7..cb4477ef1529 100644 --- a/net/ipv4/tcp.c +++ b/net/ipv4/tcp.c @@ -2104,7 +2104,7 @@ static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, maybe_zap_len = total_bytes_to_map - /* All bytes to map */ *length + /* Mapped or pending */ (pages_remaining * PAGE_SIZE); /* Failed map. */ - zap_page_range_single(vma, *address, maybe_zap_len); + zap_vma_range(vma, *address, maybe_zap_len); err = 0; } @@ -2112,7 +2112,7 @@ static int tcp_zerocopy_vm_insert_batch_error(struct vm_area_struct *vma, unsigned long leftover_pages = pages_remaining; int bytes_mapped; - /* We called zap_page_range_single, try to reinsert. */ + /* We called zap_vma_range, try to reinsert. */ err = vm_insert_pages(vma, *address, pending_pages, &pages_remaining); @@ -2269,7 +2269,7 @@ static int tcp_zerocopy_receive(struct sock *sk, total_bytes_to_map = avail_len & ~(PAGE_SIZE - 1); if (total_bytes_to_map) { if (!(zc->flags & TCP_RECEIVE_ZEROCOPY_FLAG_TLB_CLEAN_HINT)) - zap_page_range_single(vma, address, total_bytes_to_map); + zap_vma_range(vma, address, total_bytes_to_map); zc->length = total_bytes_to_map; zc->recv_skip_hint = 0; } else { diff --git a/rust/kernel/mm/virt.rs b/rust/kernel/mm/virt.rs index b8e59e4420f3..04b3cc925d67 100644 --- a/rust/kernel/mm/virt.rs +++ b/rust/kernel/mm/virt.rs @@ -113,7 +113,7 @@ pub fn end(&self) -> usize { /// kernel goes further in freeing unused page tables, but for the purposes of this operation /// we must only assume that the leaf level is cleared. #[inline] - pub fn zap_page_range_single(&self, address: usize, size: usize) { + pub fn zap_vma_range(&self, address: usize, size: usize) { let (end, did_overflow) = address.overflowing_add(size); if did_overflow || address < self.start() || self.end() < end { // TODO: call WARN_ONCE once Rust version of it is added @@ -124,7 +124,7 @@ pub fn zap_page_range_single(&self, address: usize, size: usize) { // sufficient for this method call. This method has no requirements on the vma flags. The // address range is checked to be within the vma. unsafe { - bindings::zap_page_range_single(self.as_ptr(), address, size) + bindings::zap_vma_range(self.as_ptr(), address, size) }; } -- 2.43.0