linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Liam Howlett <liam.howlett@oracle.com>
To: "maple-tree@lists.infradead.org" <maple-tree@lists.infradead.org>,
	"linux-mm@kvack.org" <linux-mm@kvack.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	Andrew Morton <akpm@linux-foundation.org>
Cc: Song Liu <songliubraving@fb.com>,
	Davidlohr Bueso <dave@stgolabs.net>,
	"Paul E . McKenney" <paulmck@kernel.org>,
	Matthew Wilcox <willy@infradead.org>,
	Laurent Dufour <ldufour@linux.ibm.com>,
	David Rientjes <rientjes@google.com>,
	Axel Rasmussen <axelrasmussen@google.com>,
	Suren Baghdasaryan <surenb@google.com>,
	Vlastimil Babka <vbabka@suse.cz>, Rik van Riel <riel@surriel.com>,
	Peter Zijlstra <peterz@infradead.org>,
	Michel Lespinasse <walken.cr@gmail.com>,
	Jerome Glisse <jglisse@redhat.com>,
	Minchan Kim <minchan@google.com>,
	Joel Fernandes <joelaf@google.com>,
	Rom Lemarchand <romlem@google.com>,
	Liam Howlett <liam.howlett@oracle.com>
Subject: [PATCH v4 25/66] mm/mmap: Move mmap_region() below do_munmap()
Date: Wed, 1 Dec 2021 14:30:02 +0000	[thread overview]
Message-ID: <20211201142918.921493-26-Liam.Howlett@oracle.com> (raw)
In-Reply-To: <20211201142918.921493-1-Liam.Howlett@oracle.com>

From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>

Relocation of code for the next commit.  There should be no changes
here.

Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
---
 mm/mmap.c | 460 +++++++++++++++++++++++++++---------------------------
 1 file changed, 230 insertions(+), 230 deletions(-)

diff --git a/mm/mmap.c b/mm/mmap.c
index fddf38ca0589..436e136a64f1 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -1659,236 +1659,6 @@ static inline int accountable_mapping(struct file *file, vm_flags_t vm_flags)
 	return (vm_flags & (VM_NORESERVE | VM_SHARED | VM_WRITE)) == VM_WRITE;
 }
 
-unsigned long mmap_region(struct file *file, unsigned long addr,
-		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
-		struct list_head *uf)
-{
-	struct mm_struct *mm = current->mm;
-	struct vm_area_struct *vma = NULL;
-	struct vm_area_struct *prev, *next;
-	pgoff_t pglen = len >> PAGE_SHIFT;
-	unsigned long charged = 0;
-	unsigned long end = addr + len;
-	unsigned long merge_start = addr, merge_end = end;
-	pgoff_t vm_pgoff;
-	int error;
-	MA_STATE(mas, &mm->mm_mt, addr, end - 1);
-
-	/* Check against address space limit. */
-	if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
-		unsigned long nr_pages;
-
-		/*
-		 * MAP_FIXED may remove pages of mappings that intersects with
-		 * requested mapping. Account for the pages it would unmap.
-		 */
-		nr_pages = count_vma_pages_range(mm, addr, end);
-
-		if (!may_expand_vm(mm, vm_flags,
-					(len >> PAGE_SHIFT) - nr_pages))
-			return -ENOMEM;
-	}
-
-	/* Unmap any existing mapping in the area */
-	if (do_munmap(mm, addr, len, uf))
-		return -ENOMEM;
-
-	/*
-	 * Private writable mapping: check memory availability
-	 */
-	if (accountable_mapping(file, vm_flags)) {
-		charged = len >> PAGE_SHIFT;
-		if (security_vm_enough_memory_mm(mm, charged))
-			return -ENOMEM;
-		vm_flags |= VM_ACCOUNT;
-	}
-
-	next = mas_next(&mas, ULONG_MAX);
-	prev = mas_prev(&mas, 0);
-	if (vm_flags & VM_SPECIAL)
-		goto cannot_expand;
-
-	/* Attempt to expand an old mapping */
-	/* Check next */
-	if (next && next->vm_start == end && vma_policy(next) &&
-	    can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
-				 NULL_VM_UFFD_CTX)) {
-		merge_end = next->vm_end;
-		vma = next;
-		vm_pgoff = next->vm_pgoff - pglen;
-	}
-
-	/* Check prev */
-	if (prev && prev->vm_end == addr && !vma_policy(prev) &&
-	    can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
-				NULL_VM_UFFD_CTX)) {
-		merge_start = prev->vm_start;
-		vma = prev;
-		vm_pgoff = prev->vm_pgoff;
-	}
-
-
-	/* Actually expand, if possible */
-	if (vma &&
-	    !vma_expand(&mas, vma, merge_start, merge_end, vm_pgoff, next)) {
-		khugepaged_enter_vma_merge(prev, vm_flags);
-		goto expanded;
-	}
-
-	mas.index = addr;
-	mas.last = end - 1;
-cannot_expand:
-	/*
-	 * Determine the object being mapped and call the appropriate
-	 * specific mapper. the address has already been validated, but
-	 * not unmapped, but the maps are removed from the list.
-	 */
-	vma = vm_area_alloc(mm);
-	if (!vma) {
-		error = -ENOMEM;
-		goto unacct_error;
-	}
-
-	vma->vm_start = addr;
-	vma->vm_end = end;
-	vma->vm_flags = vm_flags;
-	vma->vm_page_prot = vm_get_page_prot(vm_flags);
-	vma->vm_pgoff = pgoff;
-
-	if (file) {
-		if (vm_flags & VM_SHARED) {
-			error = mapping_map_writable(file->f_mapping);
-			if (error)
-				goto free_vma;
-		}
-
-		vma->vm_file = get_file(file);
-		error = call_mmap(file, vma);
-		if (error)
-			goto unmap_and_free_vma;
-
-		/* Can addr have changed??
-		 *
-		 * Answer: Yes, several device drivers can do it in their
-		 *         f_op->mmap method. -DaveM
-		 */
-		WARN_ON_ONCE(addr != vma->vm_start);
-
-		addr = vma->vm_start;
-
-		/* If vm_flags changed after call_mmap(), we should try merge vma again
-		 * as we may succeed this time.
-		 */
-		if (unlikely(vm_flags != vma->vm_flags && prev &&
-			     prev->vm_end == addr && !vma_policy(prev) &&
-			     can_vma_merge_after(prev, vm_flags, NULL, file,
-						 pgoff, NULL_VM_UFFD_CTX))) {
-			merge_start = prev->vm_start;
-			vm_pgoff = prev->vm_pgoff;
-			if (!vma_expand(&mas, prev, merge_start, merge_end,
-					vm_pgoff, next)) {
-				/* ->mmap() can change vma->vm_file and fput the original file. So
-				 * fput the vma->vm_file here or we would add an extra fput for file
-				 * and cause general protection fault ultimately.
-				 */
-				fput(vma->vm_file);
-				vm_area_free(vma);
-				vma = prev;
-				/* Update vm_flags and possible addr to pick up the change. We don't
-				 * warn here if addr changed as the vma is not linked by vma_link().
-				 */
-				addr = vma->vm_start;
-				vm_flags = vma->vm_flags;
-				goto unmap_writable;
-			}
-		}
-
-		mas_set(&mas, addr);
-		vm_flags = vma->vm_flags;
-	} else if (vm_flags & VM_SHARED) {
-		error = shmem_zero_setup(vma);
-		if (error)
-			goto free_vma;
-	} else {
-		vma_set_anonymous(vma);
-	}
-
-	/* Allow architectures to sanity-check the vm_flags */
-	if (!arch_validate_flags(vma->vm_flags)) {
-		error = -EINVAL;
-		if (file)
-			goto unmap_and_free_vma;
-		else
-			goto free_vma;
-	}
-
-	if (vma->vm_file)
-		i_mmap_lock_write(vma->vm_file->f_mapping);
-
-	vma_mas_store(vma, &mas);
-	__vma_link_list(mm, vma, prev);
-	mm->map_count++;
-	if (vma->vm_file) {
-		if (vma->vm_flags & VM_SHARED)
-			mapping_allow_writable(vma->vm_file->f_mapping);
-
-		flush_dcache_mmap_lock(vma->vm_file->f_mapping);
-		vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap);
-		flush_dcache_mmap_unlock(vma->vm_file->f_mapping);
-		i_mmap_unlock_write(vma->vm_file->f_mapping);
-	}
-
-	/* Once vma denies write, undo our temporary denial count */
-unmap_writable:
-	if (file && vm_flags & VM_SHARED)
-		mapping_unmap_writable(file->f_mapping);
-	file = vma->vm_file;
-expanded:
-	perf_event_mmap(vma);
-
-	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
-	if (vm_flags & VM_LOCKED) {
-		if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
-		    is_vm_hugetlb_page(vma) ||
-		    vma == get_gate_vma(current->mm))
-			vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
-		else
-			mm->locked_vm += (len >> PAGE_SHIFT);
-	}
-
-	if (file)
-		uprobe_mmap(vma);
-
-	/*
-	 * New (or expanded) vma always get soft dirty status.
-	 * Otherwise user-space soft-dirty page tracker won't
-	 * be able to distinguish situation when vma area unmapped,
-	 * then new mapped in-place (which must be aimed as
-	 * a completely new data area).
-	 */
-	vma->vm_flags |= VM_SOFTDIRTY;
-
-	vma_set_page_prot(vma);
-
-	return addr;
-
-unmap_and_free_vma:
-	fput(vma->vm_file);
-	vma->vm_file = NULL;
-
-	/* Undo any partial mapping done by a device driver. */
-	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
-	charged = 0;
-	if (vm_flags & VM_SHARED)
-		mapping_unmap_writable(file->f_mapping);
-free_vma:
-	vm_area_free(vma);
-unacct_error:
-	if (charged)
-		vm_unacct_memory(charged);
-	return error;
-}
-
 /* unmapped_area() Find an area between the low_limit and the high_limit with
  * the correct alignment and offset, all from @info. Note: current->mm is used
  * for the search.
@@ -2741,6 +2511,236 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len,
 	return __do_munmap(mm, start, len, uf, false);
 }
 
+unsigned long mmap_region(struct file *file, unsigned long addr,
+		unsigned long len, vm_flags_t vm_flags, unsigned long pgoff,
+		struct list_head *uf)
+{
+	struct mm_struct *mm = current->mm;
+	struct vm_area_struct *vma = NULL;
+	struct vm_area_struct *prev, *next;
+	pgoff_t pglen = len >> PAGE_SHIFT;
+	unsigned long charged = 0;
+	unsigned long end = addr + len;
+	unsigned long merge_start = addr, merge_end = end;
+	pgoff_t vm_pgoff;
+	int error;
+	MA_STATE(mas, &mm->mm_mt, addr, end - 1);
+
+	/* Check against address space limit. */
+	if (!may_expand_vm(mm, vm_flags, len >> PAGE_SHIFT)) {
+		unsigned long nr_pages;
+
+		/*
+		 * MAP_FIXED may remove pages of mappings that intersects with
+		 * requested mapping. Account for the pages it would unmap.
+		 */
+		nr_pages = count_vma_pages_range(mm, addr, end);
+
+		if (!may_expand_vm(mm, vm_flags,
+					(len >> PAGE_SHIFT) - nr_pages))
+			return -ENOMEM;
+	}
+
+	/* Unmap any existing mapping in the area */
+	if (do_munmap(mm, addr, len, uf))
+		return -ENOMEM;
+
+	/*
+	 * Private writable mapping: check memory availability
+	 */
+	if (accountable_mapping(file, vm_flags)) {
+		charged = len >> PAGE_SHIFT;
+		if (security_vm_enough_memory_mm(mm, charged))
+			return -ENOMEM;
+		vm_flags |= VM_ACCOUNT;
+	}
+
+	next = mas_next(&mas, ULONG_MAX);
+	prev = mas_prev(&mas, 0);
+	if (vm_flags & VM_SPECIAL)
+		goto cannot_expand;
+
+	/* Attempt to expand an old mapping */
+	/* Check next */
+	if (next && next->vm_start == end && vma_policy(next) &&
+	    can_vma_merge_before(next, vm_flags, NULL, file, pgoff+pglen,
+				 NULL_VM_UFFD_CTX)) {
+		merge_end = next->vm_end;
+		vma = next;
+		vm_pgoff = next->vm_pgoff - pglen;
+	}
+
+	/* Check prev */
+	if (prev && prev->vm_end == addr && !vma_policy(prev) &&
+	    can_vma_merge_after(prev, vm_flags, NULL, file, pgoff,
+				NULL_VM_UFFD_CTX)) {
+		merge_start = prev->vm_start;
+		vma = prev;
+		vm_pgoff = prev->vm_pgoff;
+	}
+
+
+	/* Actually expand, if possible */
+	if (vma &&
+	    !vma_expand(&mas, vma, merge_start, merge_end, vm_pgoff, next)) {
+		khugepaged_enter_vma_merge(prev, vm_flags);
+		goto expanded;
+	}
+
+	mas.index = addr;
+	mas.last = end - 1;
+cannot_expand:
+	/*
+	 * Determine the object being mapped and call the appropriate
+	 * specific mapper. the address has already been validated, but
+	 * not unmapped, but the maps are removed from the list.
+	 */
+	vma = vm_area_alloc(mm);
+	if (!vma) {
+		error = -ENOMEM;
+		goto unacct_error;
+	}
+
+	vma->vm_start = addr;
+	vma->vm_end = end;
+	vma->vm_flags = vm_flags;
+	vma->vm_page_prot = vm_get_page_prot(vm_flags);
+	vma->vm_pgoff = pgoff;
+
+	if (file) {
+		if (vm_flags & VM_SHARED) {
+			error = mapping_map_writable(file->f_mapping);
+			if (error)
+				goto free_vma;
+		}
+
+		vma->vm_file = get_file(file);
+		error = call_mmap(file, vma);
+		if (error)
+			goto unmap_and_free_vma;
+
+		/* Can addr have changed??
+		 *
+		 * Answer: Yes, several device drivers can do it in their
+		 *         f_op->mmap method. -DaveM
+		 */
+		WARN_ON_ONCE(addr != vma->vm_start);
+
+		addr = vma->vm_start;
+
+		/* If vm_flags changed after call_mmap(), we should try merge vma again
+		 * as we may succeed this time.
+		 */
+		if (unlikely(vm_flags != vma->vm_flags && prev &&
+			     prev->vm_end == addr && !vma_policy(prev) &&
+			     can_vma_merge_after(prev, vm_flags, NULL, file,
+						 pgoff, NULL_VM_UFFD_CTX))) {
+			merge_start = prev->vm_start;
+			vm_pgoff = prev->vm_pgoff;
+			if (!vma_expand(&mas, prev, merge_start, merge_end,
+					vm_pgoff, next)) {
+				/* ->mmap() can change vma->vm_file and fput the original file. So
+				 * fput the vma->vm_file here or we would add an extra fput for file
+				 * and cause general protection fault ultimately.
+				 */
+				fput(vma->vm_file);
+				vm_area_free(vma);
+				vma = prev;
+				/* Update vm_flags and possible addr to pick up the change. We don't
+				 * warn here if addr changed as the vma is not linked by vma_link().
+				 */
+				addr = vma->vm_start;
+				vm_flags = vma->vm_flags;
+				goto unmap_writable;
+			}
+		}
+
+		mas_set(&mas, addr);
+		vm_flags = vma->vm_flags;
+	} else if (vm_flags & VM_SHARED) {
+		error = shmem_zero_setup(vma);
+		if (error)
+			goto free_vma;
+	} else {
+		vma_set_anonymous(vma);
+	}
+
+	/* Allow architectures to sanity-check the vm_flags */
+	if (!arch_validate_flags(vma->vm_flags)) {
+		error = -EINVAL;
+		if (file)
+			goto unmap_and_free_vma;
+		else
+			goto free_vma;
+	}
+
+	if (vma->vm_file)
+		i_mmap_lock_write(vma->vm_file->f_mapping);
+
+	vma_mas_store(vma, &mas);
+	__vma_link_list(mm, vma, prev);
+	mm->map_count++;
+	if (vma->vm_file) {
+		if (vma->vm_flags & VM_SHARED)
+			mapping_allow_writable(vma->vm_file->f_mapping);
+
+		flush_dcache_mmap_lock(vma->vm_file->f_mapping);
+		vma_interval_tree_insert(vma, &vma->vm_file->f_mapping->i_mmap);
+		flush_dcache_mmap_unlock(vma->vm_file->f_mapping);
+		i_mmap_unlock_write(vma->vm_file->f_mapping);
+	}
+
+	/* Once vma denies write, undo our temporary denial count */
+unmap_writable:
+	if (file && vm_flags & VM_SHARED)
+		mapping_unmap_writable(file->f_mapping);
+	file = vma->vm_file;
+expanded:
+	perf_event_mmap(vma);
+
+	vm_stat_account(mm, vm_flags, len >> PAGE_SHIFT);
+	if (vm_flags & VM_LOCKED) {
+		if ((vm_flags & VM_SPECIAL) || vma_is_dax(vma) ||
+		    is_vm_hugetlb_page(vma) ||
+		    vma == get_gate_vma(current->mm))
+			vma->vm_flags &= VM_LOCKED_CLEAR_MASK;
+		else
+			mm->locked_vm += (len >> PAGE_SHIFT);
+	}
+
+	if (file)
+		uprobe_mmap(vma);
+
+	/*
+	 * New (or expanded) vma always get soft dirty status.
+	 * Otherwise user-space soft-dirty page tracker won't
+	 * be able to distinguish situation when vma area unmapped,
+	 * then new mapped in-place (which must be aimed as
+	 * a completely new data area).
+	 */
+	vma->vm_flags |= VM_SOFTDIRTY;
+
+	vma_set_page_prot(vma);
+
+	return addr;
+
+unmap_and_free_vma:
+	fput(vma->vm_file);
+	vma->vm_file = NULL;
+
+	/* Undo any partial mapping done by a device driver. */
+	unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end);
+	charged = 0;
+	if (vm_flags & VM_SHARED)
+		mapping_unmap_writable(file->f_mapping);
+free_vma:
+	vm_area_free(vma);
+unacct_error:
+	if (charged)
+		vm_unacct_memory(charged);
+	return error;
+}
+
 static int __vm_munmap(unsigned long start, size_t len, bool downgrade)
 {
 	int ret;
-- 
2.30.2


  parent reply	other threads:[~2021-12-01 14:45 UTC|newest]

Thread overview: 180+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2021-12-01 14:29 [PATCH v4 00/66] Introducing the Maple Tree Liam Howlett
2021-12-01 14:29 ` [PATCH v4 01/66] radix tree test suite: Add pr_err define Liam Howlett
2021-12-01 14:29 ` [PATCH v4 02/66] radix tree test suite: Add kmem_cache_set_non_kernel() Liam Howlett
2021-12-01 14:29 ` [PATCH v4 04/66] radix tree test suite: Add support for slab bulk APIs Liam Howlett
2021-12-01 14:29 ` [PATCH v4 03/66] radix tree test suite: Add allocation counts and size to kmem_cache Liam Howlett
2021-12-01 14:29 ` [PATCH v4 05/66] Maple Tree: Add new data structure Liam Howlett
2021-12-07 15:34   ` Vlastimil Babka
2021-12-08 15:47     ` Matthew Wilcox
2021-12-08 17:20     ` Liam Howlett
2021-12-15 12:54   ` Vlastimil Babka
2021-12-15 17:52     ` Liam Howlett
2021-12-01 14:29 ` [PATCH v4 06/66] mm: Start tracking VMAs with maple tree Liam Howlett
2021-12-07 18:01   ` Vlastimil Babka
2021-12-08 18:11     ` Liam Howlett
2021-12-01 14:29 ` [PATCH v4 07/66] mm: Add VMA iterator Liam Howlett
2021-12-09 15:26   ` Vlastimil Babka
2021-12-10  2:02     ` Liam Howlett
2021-12-10 15:08       ` Vlastimil Babka
2021-12-10 18:24         ` Liam Howlett
2021-12-01 14:29 ` [PATCH v4 08/66] mmap: Use the VMA iterator in count_vma_pages_range() Liam Howlett
2021-12-09 15:54   ` Vlastimil Babka
2021-12-10  1:35     ` Liam Howlett
2021-12-01 14:29 ` [PATCH v4 09/66] mm/mmap: Use the maple tree in find_vma() instead of the rbtree Liam Howlett
2021-12-15 13:05   ` Vlastimil Babka
2021-12-15 18:09     ` Liam Howlett
2022-01-13 15:46       ` Vlastimil Babka
2021-12-01 14:29 ` [PATCH v4 10/66] mm/mmap: Use the maple tree for find_vma_prev() " Liam Howlett
2021-12-15 14:33   ` Vlastimil Babka
2021-12-15 16:40   ` Vlastimil Babka
2021-12-15 18:19     ` Liam Howlett
2021-12-01 14:29 ` [PATCH v4 11/66] mm/mmap: Use maple tree for unmapped_area{_topdown} Liam Howlett
2021-12-15 16:43   ` Vlastimil Babka
2021-12-15 18:28     ` Liam Howlett
2021-12-01 14:29 ` [PATCH v4 12/66] kernel/fork: Use maple tree for dup_mmap() during forking Liam Howlett
2021-12-16 11:09   ` Vlastimil Babka
2022-01-03 16:45     ` Liam Howlett
2021-12-01 14:29 ` [PATCH v4 13/66] damon: Convert __damon_va_three_regions to use the VMA iterator Liam Howlett
2021-12-01 14:29 ` [PATCH v4 15/66] mm: Convert vma_lookup() to use the Maple Tree Liam Howlett
2021-12-17 11:59   ` Vlastimil Babka
2022-01-03 17:07     ` Liam Howlett
2021-12-01 14:29 ` [PATCH v4 14/66] proc: Remove VMA rbtree use from nommu Liam Howlett
2021-12-01 14:29 ` [PATCH v4 16/66] mm: Remove rb tree Liam Howlett
2022-01-12 12:02   ` Vlastimil Babka
2022-01-17  1:12     ` Liam Howlett
2021-12-01 14:29 ` [PATCH v4 19/66] mm: Optimize find_exact_vma() to use vma_lookup() Liam Howlett
2022-01-12 16:31   ` Vlastimil Babka
2021-12-01 14:29 ` [PATCH v4 18/66] xen: Use vma_lookup() in privcmd_ioctl_mmap() Liam Howlett
2022-01-12 16:01   ` Vlastimil Babka
2022-01-18  0:01     ` Liam Howlett
2021-12-01 14:29 ` [PATCH v4 17/66] mmap: Change zeroing of maple tree in __vma_adjust Liam Howlett
2022-01-12 14:55   ` Vlastimil Babka
2022-01-17 20:02     ` Liam Howlett
2021-12-01 14:29 ` [PATCH v4 21/66] mm/mmap: Change do_brk_flags() to expand existing VMA and add do_brk_munmap() Liam Howlett
2022-01-13 12:59   ` Vlastimil Babka
2022-01-19  3:03     ` Liam Howlett
2022-01-21 12:41       ` Vlastimil Babka
2022-01-13 15:28   ` Vlastimil Babka
2022-01-19 15:51     ` Liam Howlett
2021-12-01 14:29 ` [PATCH v4 20/66] mm/khugepaged: Optimize collapse_pte_mapped_thp() by using vma_lookup() Liam Howlett
2022-01-12 16:42   ` Vlastimil Babka
2021-12-01 14:29 ` [PATCH v4 22/66] mm: Use maple tree operations for find_vma_intersection() and find_vma() Liam Howlett
2022-01-13 15:53   ` Vlastimil Babka
2022-01-19 16:56     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 23/66] mm/mmap: Use advanced maple tree API for mmap_region() Liam Howlett
2022-01-17 16:38   ` Vlastimil Babka
2022-01-21 18:11     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 24/66] mm: Remove vmacache Liam Howlett
2022-01-17 17:01   ` Vlastimil Babka
2021-12-01 14:30 ` Liam Howlett [this message]
2021-12-01 14:30 ` [PATCH v4 27/66] mm/mmap: Change do_brk_munmap() to use do_mas_align_munmap() Liam Howlett
2022-01-18 11:57   ` Vlastimil Babka
2022-01-22  1:53     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 26/66] mm/mmap: Reorganize munmap to use maple states Liam Howlett
2022-01-18 10:39   ` Vlastimil Babka
2022-01-21 19:31     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 28/66] arm64: Remove mmap linked list from vdso Liam Howlett
2022-01-18 12:03   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 29/66] parisc: Remove mmap linked list from cache handling Liam Howlett
2022-01-18 12:06   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 31/66] s390: Remove vma linked list walks Liam Howlett
2022-01-18 12:12   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 32/66] x86: " Liam Howlett
2022-01-18 12:12   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 30/66] powerpc: Remove mmap " Liam Howlett
2022-01-18 12:10   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 34/66] cxl: Remove vma linked list walk Liam Howlett
2022-01-18 12:37   ` Vlastimil Babka
2022-01-25 16:32     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 33/66] xtensa: Remove vma linked list walks Liam Howlett
2022-01-18 12:23   ` Vlastimil Babka
2022-01-25 16:17     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 37/66] binfmt_elf: Remove vma linked list walk Liam Howlett
2022-01-19  9:57   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 36/66] um: " Liam Howlett
2022-01-18 18:41   ` Vlastimil Babka
2022-01-25 16:38     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 35/66] optee: " Liam Howlett
2022-01-18 18:15   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 38/66] coredump: " Liam Howlett
2022-01-19 10:31   ` Vlastimil Babka
2022-01-25 17:00     ` Matthew Wilcox
2021-12-01 14:30 ` [PATCH v4 39/66] binfmt_elf: Take the mmap lock when walking the VMA list Liam Howlett
2022-01-19 10:53   ` Vlastimil Babka
2022-01-31 17:41     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 41/66] fs/proc/base: Use maple tree iterators in place of linked list Liam Howlett
2022-01-19 11:10   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 40/66] exec: Use VMA iterator instead " Liam Howlett
2022-01-19 11:06   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 42/66] fs/proc/task_mmu: Stop using linked list and highest_vm_end Liam Howlett
2022-01-21 11:52   ` Vlastimil Babka
2022-01-27 20:14     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 44/66] ipc/shm: Use VMA iterator instead of linked list Liam Howlett
2022-01-21 12:25   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 43/66] userfaultfd: Use maple tree iterator to iterate VMAs Liam Howlett
2022-01-19 16:26   ` Vlastimil Babka
2022-01-25 20:47     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 46/66] perf: Use VMA iterator Liam Howlett
2022-01-19 16:47   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 47/66] sched: Use maple tree iterator to walk VMAs Liam Howlett
2022-01-19 16:49   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 45/66] acct: Use VMA iterator instead of linked list Liam Howlett
2022-01-19 16:44   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 48/66] fork: Use VMA iterator Liam Howlett
2022-01-19 16:51   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 49/66] bpf: Remove VMA linked list Liam Howlett
2022-01-19 17:04   ` Vlastimil Babka
2022-01-25 21:37     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 50/66] mm/gup: Use maple tree navigation instead of " Liam Howlett
2022-01-19 17:39   ` Vlastimil Babka
2022-01-25 21:54     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 51/66] mm/khugepaged: Use maple tree iterators instead of vma " Liam Howlett
2022-01-19 17:48   ` Vlastimil Babka
2022-01-25 22:03     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 52/66] mm/ksm: " Liam Howlett
2022-01-19 17:58   ` Vlastimil Babka
2022-01-26  2:29     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 54/66] mm/memcontrol: Stop using mm->highest_vm_end Liam Howlett
2022-01-20 11:21   ` Vlastimil Babka
2022-01-26  2:34     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 55/66] mm/mempolicy: Use maple tree iterators instead of vma linked list Liam Howlett
2022-01-20 11:58   ` Vlastimil Babka
2022-01-26  2:48     ` Liam Howlett
2022-01-26  9:22       ` Vlastimil Babka
2022-01-27 17:25         ` Liam Howlett
2022-01-27 17:33           ` Vlastimil Babka
2022-01-27 23:03             ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 53/66] mm/madvise: Use vma_find() " Liam Howlett
2022-01-19 18:00   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 56/66] mm/mlock: Use maple tree iterators " Liam Howlett
2022-01-20 12:16   ` Vlastimil Babka
2022-01-26 16:41     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 57/66] mm/mprotect: Use maple tree navigation " Liam Howlett
2022-01-20 12:23   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 60/66] mm/oom_kill: Use maple tree iterators " Liam Howlett
2022-01-20 12:43   ` Vlastimil Babka
2022-01-26 17:02     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 59/66] mm/msync: Use vma_find() " Liam Howlett
2022-01-20 12:42   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 58/66] mm/mremap: " Liam Howlett
2022-01-20 12:27   ` Vlastimil Babka
2022-01-26 16:59     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 61/66] mm/pagewalk: " Liam Howlett
2022-01-20 12:43   ` Vlastimil Babka
2021-12-01 14:30 ` [PATCH v4 62/66] mm/swapfile: Use maple tree iterator " Liam Howlett
2022-01-20 12:46   ` Vlastimil Babka
2022-01-26 17:08     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 63/66] i915: Use the VMA iterator Liam Howlett
2022-01-20 14:59   ` Vlastimil Babka
2022-01-20 15:50     ` Matthew Wilcox
2022-01-20 17:39       ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 64/66] nommu: Remove uses of VMA linked list Liam Howlett
2022-01-20 15:06   ` Vlastimil Babka
2022-01-20 15:54     ` Matthew Wilcox
2022-01-20 17:06       ` Vlastimil Babka
2022-01-27 16:36         ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 65/66] mm: Remove the vma " Liam Howlett
2022-01-20 17:41   ` Vlastimil Babka
2022-01-26 20:29     ` Liam Howlett
2021-12-01 14:30 ` [PATCH v4 66/66] mm/mmap: Drop range_has_overlap() function Liam Howlett
2022-01-21  9:51   ` Vlastimil Babka

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20211201142918.921493-26-Liam.Howlett@oracle.com \
    --to=liam.howlett@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=axelrasmussen@google.com \
    --cc=dave@stgolabs.net \
    --cc=jglisse@redhat.com \
    --cc=joelaf@google.com \
    --cc=ldufour@linux.ibm.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maple-tree@lists.infradead.org \
    --cc=minchan@google.com \
    --cc=paulmck@kernel.org \
    --cc=peterz@infradead.org \
    --cc=riel@surriel.com \
    --cc=rientjes@google.com \
    --cc=romlem@google.com \
    --cc=songliubraving@fb.com \
    --cc=surenb@google.com \
    --cc=vbabka@suse.cz \
    --cc=walken.cr@gmail.com \
    --cc=willy@infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox