From: "Liam R. Howlett" <Liam.Howlett@Oracle.com>
To: maple-tree@lists.infradead.org, linux-mm@kvack.org,
linux-kernel@vger.kernel.org
Cc: Andrew Morton <akpm@google.com>, Song Liu <songliubraving@fb.com>,
Davidlohr Bueso <dave@stgolabs.net>,
"Paul E . McKenney" <paulmck@kernel.org>,
Matthew Wilcox <willy@infradead.org>,
Jerome Glisse <jglisse@redhat.com>,
David Rientjes <rientjes@google.com>,
Axel Rasmussen <axelrasmussen@google.com>,
Suren Baghdasaryan <surenb@google.com>,
Vlastimil Babka <vbabka@suse.cz>
Subject: [PATCH 10/28] mm/mmap: Change unmapped_area and unmapped_area_topdown to use maple tree
Date: Thu, 10 Dec 2020 12:03:44 -0500 [thread overview]
Message-ID: <20201210170402.3468568-11-Liam.Howlett@Oracle.com> (raw)
In-Reply-To: <20201210170402.3468568-1-Liam.Howlett@Oracle.com>
Use the new maple tree data structure to find an unmapped area.
Signed-off-by: Liam R. Howlett <Liam.Howlett@Oracle.com>
---
mm/mmap.c | 249 ++++++------------------------------------------------
1 file changed, 27 insertions(+), 222 deletions(-)
diff --git a/mm/mmap.c b/mm/mmap.c
index 3a9e04df021e3..071303779f906 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -2050,260 +2050,65 @@ unsigned long mmap_region(struct file *file, unsigned long addr,
return error;
}
+/* unmapped_area() Find an area between the low_limit and the high_limit with
+ * the correct alignment and offset, all from @info. Note: current->mm is used
+ * for the search.
+ *
+ * @info: The unmapped area information including the range (low_limit -
+ * hight_limit), the alignment offset and mask.
+ *
+ * Return: A memory address or -ENOMEM.
+ */
static unsigned long unmapped_area(struct vm_unmapped_area_info *info)
{
- /*
- * We implement the search by looking for an rbtree node that
- * immediately follows a suitable gap. That is,
- * - gap_start = vma->vm_prev->vm_end <= info->high_limit - length;
- * - gap_end = vma->vm_start >= info->low_limit + length;
- * - gap_end - gap_start >= length
- */
+ unsigned long length, gap;
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma;
- unsigned long length, low_limit, high_limit, gap_start, gap_end;
- unsigned long gap;
+ MA_STATE(mas, ¤t->mm->mm_mt, 0, 0);
+ validate_mm(current->mm);
- MA_STATE(mas, &mm->mm_mt, 0, 0);
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
return -ENOMEM;
-
- /* Maple tree is self contained. */
- rcu_read_lock();
if (mas_get_empty_area(&mas, info->low_limit, info->high_limit - 1,
length)) {
rcu_read_unlock();
return -ENOMEM;
}
- rcu_read_unlock();
gap = mas.index;
gap += (info->align_offset - gap) & info->align_mask;
-
- /* Adjust search limits by the desired length */
- if (info->high_limit < length)
- return -ENOMEM;
- high_limit = info->high_limit - length;
-
- if (info->low_limit > high_limit)
- return -ENOMEM;
- low_limit = info->low_limit + length;
-
- /* Check if rbtree root looks promising */
- if (RB_EMPTY_ROOT(&mm->mm_rb))
- goto check_highest;
- vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
- if (vma->rb_subtree_gap < length)
- goto check_highest;
-
- while (true) {
- /* Visit left subtree if it looks promising */
- gap_end = vm_start_gap(vma);
- if (gap_end >= low_limit && vma->vm_rb.rb_left) {
- struct vm_area_struct *left =
- rb_entry(vma->vm_rb.rb_left,
- struct vm_area_struct, vm_rb);
- if (left->rb_subtree_gap >= length) {
- vma = left;
- continue;
- }
- }
-
- gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
-check_current:
- /* Check if current node has a suitable gap */
- if (gap_start > high_limit)
- return -ENOMEM;
- if (gap_end >= low_limit &&
- gap_end > gap_start && gap_end - gap_start >= length)
- goto found;
-
- /* Visit right subtree if it looks promising */
- if (vma->vm_rb.rb_right) {
- struct vm_area_struct *right =
- rb_entry(vma->vm_rb.rb_right,
- struct vm_area_struct, vm_rb);
- if (right->rb_subtree_gap >= length) {
- vma = right;
- continue;
- }
- }
-
- /* Go back up the rbtree to find next candidate node */
- while (true) {
- struct rb_node *prev = &vma->vm_rb;
- if (!rb_parent(prev))
- goto check_highest;
- vma = rb_entry(rb_parent(prev),
- struct vm_area_struct, vm_rb);
- if (prev == vma->vm_rb.rb_left) {
- gap_start = vm_end_gap(vma->vm_prev);
- gap_end = vm_start_gap(vma);
- goto check_current;
- }
- }
- }
-
-check_highest:
- /* Check highest gap, which does not precede any rbtree node */
- gap_start = mm->highest_vm_end;
- gap_end = ULONG_MAX; /* Only for VM_BUG_ON below */
- if (gap_start > high_limit)
- return -ENOMEM;
-
-found:
- /* We found a suitable gap. Clip it with the original low_limit. */
- if (gap_start < info->low_limit)
- gap_start = info->low_limit;
-
- /* Adjust gap address to the desired alignment */
- gap_start += (info->align_offset - gap_start) & info->align_mask;
-
- VM_BUG_ON(gap_start + info->length > info->high_limit);
- VM_BUG_ON(gap_start + info->length > gap_end);
-
- VM_BUG_ON(gap != gap_start);
- return gap_start;
+ return gap;
}
+/* unmapped_area() Find an area between the low_limit and the high_limit with
+ * the correct alignment and offset at the highest available address, all from
+ * @info. Note: current->mm is used for the search.
+ *
+ * @info: The unmapped area information including the range (low_limit -
+ * hight_limit), the alignment offset and mask.
+ *
+ * Return: A memory address or -ENOMEM.
+ */
static unsigned long unmapped_area_topdown(struct vm_unmapped_area_info *info)
{
- struct mm_struct *mm = current->mm;
- struct vm_area_struct *vma = NULL;
- unsigned long length, low_limit, high_limit, gap_start, gap_end;
- unsigned long gap;
+ unsigned long length, gap;
- MA_STATE(mas, &mm->mm_mt, 0, 0);
- validate_mm_mt(mm);
+ MA_STATE(mas, ¤t->mm->mm_mt, 0, 0);
+ validate_mm_mt(current->mm);
/* Adjust search length to account for worst case alignment overhead */
length = info->length + info->align_mask;
if (length < info->length)
return -ENOMEM;
- rcu_read_lock();
- if (mas_get_empty_area_rev(&mas, info->low_limit, info->high_limit,
+ if (mas_get_empty_area_rev(&mas, info->low_limit, info->high_limit - 1,
length)) {
rcu_read_unlock();
return -ENOMEM;
}
- rcu_read_unlock();
gap = (mas.index + info->align_mask) & ~info->align_mask;
gap -= info->align_offset & info->align_mask;
- /*
- * Adjust search limits by the desired length.
- * See implementation comment at top of unmapped_area().
- */
- gap_end = info->high_limit;
- if (gap_end < length)
- return -ENOMEM;
- high_limit = gap_end - length;
-
- if (info->low_limit > high_limit)
- return -ENOMEM;
- low_limit = info->low_limit + length;
-
- /* Check highest gap, which does not precede any rbtree node */
- gap_start = mm->highest_vm_end;
- if (gap_start <= high_limit)
- goto found_highest;
-
- /* Check if rbtree root looks promising */
- if (RB_EMPTY_ROOT(&mm->mm_rb))
- return -ENOMEM;
- vma = rb_entry(mm->mm_rb.rb_node, struct vm_area_struct, vm_rb);
- if (vma->rb_subtree_gap < length)
- return -ENOMEM;
-
- while (true) {
- /* Visit right subtree if it looks promising */
- gap_start = vma->vm_prev ? vm_end_gap(vma->vm_prev) : 0;
- if (gap_start <= high_limit && vma->vm_rb.rb_right) {
- struct vm_area_struct *right =
- rb_entry(vma->vm_rb.rb_right,
- struct vm_area_struct, vm_rb);
- if (right->rb_subtree_gap >= length) {
- vma = right;
- continue;
- }
- }
-
-check_current:
- /* Check if current node has a suitable gap */
- gap_end = vm_start_gap(vma);
- if (gap_end < low_limit)
- return -ENOMEM;
- if (gap_start <= high_limit &&
- gap_end > gap_start && gap_end - gap_start >= length)
- goto found;
-
- /* Visit left subtree if it looks promising */
- if (vma->vm_rb.rb_left) {
- struct vm_area_struct *left =
- rb_entry(vma->vm_rb.rb_left,
- struct vm_area_struct, vm_rb);
- if (left->rb_subtree_gap >= length) {
- vma = left;
- continue;
- }
- }
-
- /* Go back up the rbtree to find next candidate node */
- while (true) {
- struct rb_node *prev = &vma->vm_rb;
- if (!rb_parent(prev))
- return -ENOMEM;
- vma = rb_entry(rb_parent(prev),
- struct vm_area_struct, vm_rb);
- if (prev == vma->vm_rb.rb_right) {
- gap_start = vma->vm_prev ?
- vm_end_gap(vma->vm_prev) : 0;
- goto check_current;
- }
- }
- }
-
-found:
- /* We found a suitable gap. Clip it with the original high_limit. */
- if (gap_end > info->high_limit)
- gap_end = info->high_limit;
-
-found_highest:
- /* Compute highest gap address at the desired alignment */
- gap_end -= info->length;
- gap_end -= (gap_end - info->align_offset) & info->align_mask;
-
- VM_BUG_ON(gap_end < info->low_limit);
- VM_BUG_ON(gap_end < gap_start);
-
- if (gap != gap_end) {
- pr_err("%s: %px Gap was found: mt %lu gap_end %lu\n", __func__,
- mm, gap, gap_end);
- pr_err("window was %lu - %lu size %lu\n", info->high_limit,
- info->low_limit, length);
- pr_err("mas.min %lu max %lu mas.last %lu\n", mas.min, mas.max,
- mas.last);
- pr_err("mas.index %lu align mask %lu offset %lu\n", mas.index,
- info->align_mask, info->align_offset);
- pr_err("rb_find_vma find on %lu => %px (%px)\n", mas.index,
- find_vma(mm, mas.index), vma);
-#if defined(CONFIG_DEBUG_MAPLE_TREE)
- mt_dump(&mm->mm_mt);
-#endif
- {
- struct vm_area_struct *dv = mm->mmap;
-
- while (dv) {
- printk("vma %px %lu-%lu\n", dv, dv->vm_start, dv->vm_end);
- dv = dv->vm_next;
- }
- }
- VM_BUG_ON(gap != gap_end);
- }
-
- return gap_end;
+ return gap;
}
/*
--
2.28.0
next prev parent reply other threads:[~2020-12-10 17:04 UTC|newest]
Thread overview: 31+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-12-10 17:03 [PATCH 00/28] RFC mm: Introducing the Maple Tree Liam R. Howlett
2020-12-10 17:03 ` [PATCH 01/28] radix tree test suite: Enhancements for " Liam R. Howlett
2020-12-10 17:03 ` [PATCH 02/28] radix tree test suite: Add support for fallthrough attribute Liam R. Howlett
2020-12-10 17:03 ` [PATCH 03/28] radix tree test suite: Add support for kmem_cache_free_bulk Liam R. Howlett
2020-12-10 17:03 ` [PATCH 04/28] radix tree test suite: Add keme_cache_alloc_bulk() support Liam R. Howlett
2020-12-10 17:03 ` [PATCH 05/28] Maple Tree: Add new data structure Liam R. Howlett
2020-12-10 17:03 ` [PATCH 06/28] mm: Start tracking VMAs with maple tree Liam R. Howlett
2020-12-11 19:30 ` kernel test robot
2020-12-10 17:03 ` [PATCH 07/28] mm/mmap: Introduce unlock_range() for code cleanup Liam R. Howlett
2020-12-10 17:03 ` [PATCH 08/28] mm/mmap: Change find_vma() to use the maple tree Liam R. Howlett
2020-12-10 17:03 ` [PATCH 09/28] mm/mmap: Change find_vma_prev() to use " Liam R. Howlett
2020-12-10 17:03 ` Liam R. Howlett [this message]
2020-12-10 17:03 ` [PATCH 11/28] kernel/fork: Convert dup_mmap " Liam R. Howlett
2020-12-10 17:03 ` [PATCH 12/28] mm: Remove rb tree Liam R. Howlett
2020-12-10 17:03 ` [PATCH 13/28] mm/gup: Expose mm_populate_vma() for use when the vma is known Liam R. Howlett
2020-12-10 21:03 ` kernel test robot
2020-12-10 17:03 ` [PATCH 14/28] mm/mmap: Change do_brk_flags() to expand existing VMA and add do_brk_munmap() Liam R. Howlett
2020-12-10 17:03 ` [PATCH 15/28] mm/mmap: Change vm_brk_flags() to use mm_populate_vma() Liam R. Howlett
2020-12-10 17:03 ` [PATCH 16/28] mm: Move find_vma_intersection to mmap.c and change implementation to maple tree Liam R. Howlett
2020-12-10 17:03 ` [PATCH 17/28] mm/mmap: Change mmap_region to use maple tree state Liam R. Howlett
2020-12-10 17:03 ` [PATCH 18/28] mm/mmap: Drop munmap_vma_range() Liam R. Howlett
2020-12-10 17:03 ` [PATCH 19/28] mm: Remove vmacache Liam R. Howlett
2020-12-10 17:03 ` [PATCH 20/28] mm/mmap: Change __do_munmap() to avoid unnecessary lookups Liam R. Howlett
2020-12-10 17:03 ` [PATCH 21/28] mm/mmap: Change __do_munmap() to use a ma_state Liam R. Howlett
2020-12-10 17:03 ` [PATCH 22/28] mm/mmap: Move mmap_region() below do_munmap() Liam R. Howlett
2020-12-10 17:03 ` [PATCH 23/28] mm/mmap: Add do_mas_munmap() and wraper for __do_munmap() Liam R. Howlett
2020-12-10 17:03 ` [PATCH 24/28] mmap: Use find_vma_intersection in do_mmap() for overlap Liam R. Howlett
2020-12-10 17:03 ` [PATCH 25/28] mmap: Remove __do_munmap() in favour of do_mas_munmap() Liam R. Howlett
2020-12-10 17:04 ` [PATCH 26/28] mm/mmap: Change do_brk_munmap() to use do_mas_align_munmap() Liam R. Howlett
2020-12-10 17:04 ` [PATCH 27/28] mmap: Update count_vma_pages_range() to only use one ma_state Liam R. Howlett
2020-12-10 17:04 ` [PATCH 28/28] mmap: make remove_vma_list() inline Liam R. Howlett
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20201210170402.3468568-11-Liam.Howlett@Oracle.com \
--to=liam.howlett@oracle.com \
--cc=akpm@google.com \
--cc=axelrasmussen@google.com \
--cc=dave@stgolabs.net \
--cc=jglisse@redhat.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=maple-tree@lists.infradead.org \
--cc=paulmck@kernel.org \
--cc=rientjes@google.com \
--cc=songliubraving@fb.com \
--cc=surenb@google.com \
--cc=vbabka@suse.cz \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox