* [PATCH 1/1] mm: replace vma_start_write() with vma_start_write_killable()
@ 2026-02-09 22:08 Suren Baghdasaryan
2026-02-10 4:30 ` kernel test robot
` (3 more replies)
0 siblings, 4 replies; 6+ messages in thread
From: Suren Baghdasaryan @ 2026-02-09 22:08 UTC (permalink / raw)
To: akpm
Cc: willy, david, ziy, matthew.brost, joshua.hahnjy, rakie.kim,
byungchul, gourry, ying.huang, apopple, lorenzo.stoakes,
baolin.wang, Liam.Howlett, npache, ryan.roberts, dev.jain,
baohua, lance.yang, vbabka, jannh, rppt, mhocko, pfalcato, kees,
maddy, npiggin, mpe, chleroy, linux-mm, linuxppc-dev, kvm,
linux-kernel, surenb
Now that we have vma_start_write_killable() we can replace most of the
vma_start_write() calls with it, improving reaction time to the kill
signal.
There are several places which are left untouched by this patch:
1. free_pgtables() because function should free page tables even if a
fatal signal is pending.
2. userfaultd code, where some paths calling vma_start_write() can
handle EINTR and some can't without a deeper code refactoring.
3. vm_flags_{set|mod|clear} require refactoring that involves moving
vma_start_write() out of these functions and replacing it with
vma_assert_write_locked(), then callers of these functions should
lock the vma themselves using vma_start_write_killable() whenever
possible.
Suggested-by: Matthew Wilcox <willy@infradead.org>
Signed-off-by: Suren Baghdasaryan <surenb@google.com>
---
arch/powerpc/kvm/book3s_hv_uvmem.c | 5 +-
include/linux/mempolicy.h | 5 +-
mm/khugepaged.c | 5 +-
mm/madvise.c | 4 +-
mm/memory.c | 2 +
mm/mempolicy.c | 23 ++++++--
mm/mlock.c | 20 +++++--
mm/mprotect.c | 4 +-
mm/mremap.c | 4 +-
mm/pagewalk.c | 20 +++++--
mm/vma.c | 94 +++++++++++++++++++++---------
mm/vma_exec.c | 6 +-
12 files changed, 139 insertions(+), 53 deletions(-)
diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
index 7cf9310de0ec..69750edcf8d5 100644
--- a/arch/powerpc/kvm/book3s_hv_uvmem.c
+++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
@@ -410,7 +410,10 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm,
ret = H_STATE;
break;
}
- vma_start_write(vma);
+ if (vma_start_write_killable(vma)) {
+ ret = H_STATE;
+ break;
+ }
/* Copy vm_flags to avoid partial modifications in ksm_madvise */
vm_flags = vma->vm_flags;
ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index 0fe96f3ab3ef..762930edde5a 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -137,7 +137,7 @@ bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
extern void numa_policy_init(void);
extern void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new);
-extern void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
+extern int mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new);
extern int huge_node(struct vm_area_struct *vma,
unsigned long addr, gfp_t gfp_flags,
@@ -251,8 +251,9 @@ static inline void mpol_rebind_task(struct task_struct *tsk,
{
}
-static inline void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
+static inline int mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
+ return 0;
}
static inline int huge_node(struct vm_area_struct *vma,
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index fa1e57fd2c46..392dde66fa86 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1150,7 +1150,10 @@ static enum scan_result collapse_huge_page(struct mm_struct *mm, unsigned long a
if (result != SCAN_SUCCEED)
goto out_up_write;
/* check if the pmd is still valid */
- vma_start_write(vma);
+ if (vma_start_write_killable(vma)) {
+ result = SCAN_FAIL;
+ goto out_up_write;
+ }
result = check_pmd_still_valid(mm, address, pmd);
if (result != SCAN_SUCCEED)
goto out_up_write;
diff --git a/mm/madvise.c b/mm/madvise.c
index 8debb2d434aa..b41e64231c31 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -173,7 +173,9 @@ static int madvise_update_vma(vm_flags_t new_flags,
madv_behavior->vma = vma;
/* vm_flags is protected by the mmap_lock held in write mode. */
- vma_start_write(vma);
+ if (vma_start_write_killable(vma))
+ return -EINTR;
+
vm_flags_reset(vma, new_flags);
if (set_new_anon_name)
return replace_anon_vma_name(vma, anon_name);
diff --git a/mm/memory.c b/mm/memory.c
index d6d273eb2189..3831e3026615 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -379,6 +379,8 @@ void free_pgd_range(struct mmu_gather *tlb,
* page tables that should be removed. This can differ from the vma mappings on
* some archs that may have mappings that need to be removed outside the vmas.
* Note that the prev->vm_end and next->vm_start are often used.
+ * Note: we don't use vma_start_write_killable() because page tables should be
+ * freed even if the task is being killed.
*
* The vma_end differs from the pg_end when a dup_mmap() failed and the tree has
* unrelated data to the mm_struct being torn down.
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index dbd48502ac24..3de7ab4f4cee 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -556,17 +556,25 @@ void mpol_rebind_task(struct task_struct *tsk, const nodemask_t *new)
*
* Call holding a reference to mm. Takes mm->mmap_lock during call.
*/
-void mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
+int mpol_rebind_mm(struct mm_struct *mm, nodemask_t *new)
{
struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, 0);
+ int ret = 0;
+
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
- mmap_write_lock(mm);
for_each_vma(vmi, vma) {
- vma_start_write(vma);
+ if (vma_start_write_killable(vma)) {
+ ret = -EINTR;
+ break;
+ }
mpol_rebind_policy(vma->vm_policy, new);
}
mmap_write_unlock(mm);
+
+ return ret;
}
static const struct mempolicy_operations mpol_ops[MPOL_MAX] = {
@@ -1785,7 +1793,8 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
return -EINVAL;
if (end == start)
return 0;
- mmap_write_lock(mm);
+ if (mmap_write_lock_killable(mm))
+ return -EINTR;
prev = vma_prev(&vmi);
for_each_vma_range(vmi, vma, end) {
/*
@@ -1808,7 +1817,11 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
break;
}
- vma_start_write(vma);
+ if (vma_start_write_killable(vma)) {
+ err = -EINTR;
+ break;
+ }
+
new->home_node = home_node;
err = mbind_range(&vmi, vma, &prev, start, end, new);
mpol_put(new);
diff --git a/mm/mlock.c b/mm/mlock.c
index 2f699c3497a5..2885b858aa0f 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -420,7 +420,7 @@ static int mlock_pte_range(pmd_t *pmd, unsigned long addr,
* Called for mlock(), mlock2() and mlockall(), to set @vma VM_LOCKED;
* called for munlock() and munlockall(), to clear VM_LOCKED from @vma.
*/
-static void mlock_vma_pages_range(struct vm_area_struct *vma,
+static int mlock_vma_pages_range(struct vm_area_struct *vma,
unsigned long start, unsigned long end, vm_flags_t newflags)
{
static const struct mm_walk_ops mlock_walk_ops = {
@@ -441,7 +441,9 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma,
*/
if (newflags & VM_LOCKED)
newflags |= VM_IO;
- vma_start_write(vma);
+ if (vma_start_write_killable(vma))
+ return -EINTR;
+
vm_flags_reset_once(vma, newflags);
lru_add_drain();
@@ -452,6 +454,7 @@ static void mlock_vma_pages_range(struct vm_area_struct *vma,
newflags &= ~VM_IO;
vm_flags_reset_once(vma, newflags);
}
+ return 0;
}
/*
@@ -501,10 +504,12 @@ static int mlock_fixup(struct vma_iterator *vmi, struct vm_area_struct *vma,
*/
if ((newflags & VM_LOCKED) && (oldflags & VM_LOCKED)) {
/* No work to do, and mlocking twice would be wrong */
- vma_start_write(vma);
+ ret = vma_start_write_killable(vma);
+ if (ret)
+ goto out;
vm_flags_reset(vma, newflags);
} else {
- mlock_vma_pages_range(vma, start, end, newflags);
+ ret = mlock_vma_pages_range(vma, start, end, newflags);
}
out:
*prev = vma;
@@ -733,9 +738,12 @@ static int apply_mlockall_flags(int flags)
error = mlock_fixup(&vmi, vma, &prev, vma->vm_start, vma->vm_end,
newflags);
- /* Ignore errors, but prev needs fixing up. */
- if (error)
+ /* Ignore errors except EINTR, but prev needs fixing up. */
+ if (error) {
+ if (error == -EINTR)
+ break;
prev = vma;
+ }
cond_resched();
}
out:
diff --git a/mm/mprotect.c b/mm/mprotect.c
index c0571445bef7..49dbb7156936 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -765,7 +765,9 @@ mprotect_fixup(struct vma_iterator *vmi, struct mmu_gather *tlb,
* vm_flags and vm_page_prot are protected by the mmap_lock
* held in write mode.
*/
- vma_start_write(vma);
+ error = vma_start_write_killable(vma);
+ if (error < 0)
+ goto fail;
vm_flags_reset_once(vma, newflags);
if (vma_wants_manual_pte_write_upgrade(vma))
mm_cp_flags |= MM_CP_TRY_CHANGE_WRITABLE;
diff --git a/mm/mremap.c b/mm/mremap.c
index 2be876a70cc0..aef1e5f373c7 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -1286,7 +1286,9 @@ static unsigned long move_vma(struct vma_remap_struct *vrm)
return -ENOMEM;
/* We don't want racing faults. */
- vma_start_write(vrm->vma);
+ err = vma_start_write_killable(vrm->vma);
+ if (err)
+ return err;
/* Perform copy step. */
err = copy_vma_and_data(vrm, &new_vma);
diff --git a/mm/pagewalk.c b/mm/pagewalk.c
index a94c401ab2cf..dc9f7a7709c6 100644
--- a/mm/pagewalk.c
+++ b/mm/pagewalk.c
@@ -425,14 +425,13 @@ static inline void process_mm_walk_lock(struct mm_struct *mm,
mmap_assert_write_locked(mm);
}
-static inline void process_vma_walk_lock(struct vm_area_struct *vma,
+static inline int process_vma_walk_lock(struct vm_area_struct *vma,
enum page_walk_lock walk_lock)
{
#ifdef CONFIG_PER_VMA_LOCK
switch (walk_lock) {
case PGWALK_WRLOCK:
- vma_start_write(vma);
- break;
+ return vma_start_write_killable(vma);
case PGWALK_WRLOCK_VERIFY:
vma_assert_write_locked(vma);
break;
@@ -444,6 +443,7 @@ static inline void process_vma_walk_lock(struct vm_area_struct *vma,
break;
}
#endif
+ return 0;
}
/*
@@ -487,7 +487,9 @@ int walk_page_range_mm_unsafe(struct mm_struct *mm, unsigned long start,
if (ops->pte_hole)
err = ops->pte_hole(start, next, -1, &walk);
} else { /* inside vma */
- process_vma_walk_lock(vma, ops->walk_lock);
+ err = process_vma_walk_lock(vma, ops->walk_lock);
+ if (err)
+ break;
walk.vma = vma;
next = min(end, vma->vm_end);
vma = find_vma(mm, vma->vm_end);
@@ -704,6 +706,7 @@ int walk_page_range_vma_unsafe(struct vm_area_struct *vma, unsigned long start,
.vma = vma,
.private = private,
};
+ int err;
if (start >= end || !walk.mm)
return -EINVAL;
@@ -711,7 +714,9 @@ int walk_page_range_vma_unsafe(struct vm_area_struct *vma, unsigned long start,
return -EINVAL;
process_mm_walk_lock(walk.mm, ops->walk_lock);
- process_vma_walk_lock(vma, ops->walk_lock);
+ err = process_vma_walk_lock(vma, ops->walk_lock);
+ if (err)
+ return err;
return __walk_page_range(start, end, &walk);
}
@@ -734,6 +739,7 @@ int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
.vma = vma,
.private = private,
};
+ int err;
if (!walk.mm)
return -EINVAL;
@@ -741,7 +747,9 @@ int walk_page_vma(struct vm_area_struct *vma, const struct mm_walk_ops *ops,
return -EINVAL;
process_mm_walk_lock(walk.mm, ops->walk_lock);
- process_vma_walk_lock(vma, ops->walk_lock);
+ err = process_vma_walk_lock(vma, ops->walk_lock);
+ if (err)
+ return err;
return __walk_page_range(vma->vm_start, vma->vm_end, &walk);
}
diff --git a/mm/vma.c b/mm/vma.c
index be64f781a3aa..3cfb81b3b7cf 100644
--- a/mm/vma.c
+++ b/mm/vma.c
@@ -540,8 +540,12 @@ __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
if (new->vm_ops && new->vm_ops->open)
new->vm_ops->open(new);
- vma_start_write(vma);
- vma_start_write(new);
+ err = vma_start_write_killable(vma);
+ if (err)
+ goto out_fput;
+ err = vma_start_write_killable(new);
+ if (err)
+ goto out_fput;
init_vma_prep(&vp, vma);
vp.insert = new;
@@ -574,6 +578,9 @@ __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
return 0;
+out_fput:
+ if (new->vm_file)
+ fput(new->vm_file);
out_free_mpol:
mpol_put(vma_policy(new));
out_free_vmi:
@@ -895,16 +902,22 @@ static __must_check struct vm_area_struct *vma_merge_existing_range(
}
/* No matter what happens, we will be adjusting middle. */
- vma_start_write(middle);
+ err = vma_start_write_killable(middle);
+ if (err)
+ goto abort;
if (merge_right) {
- vma_start_write(next);
+ err = vma_start_write_killable(next);
+ if (err)
+ goto abort;
vmg->target = next;
sticky_flags |= (next->vm_flags & VM_STICKY);
}
if (merge_left) {
- vma_start_write(prev);
+ err = vma_start_write_killable(prev);
+ if (err)
+ goto abort;
vmg->target = prev;
sticky_flags |= (prev->vm_flags & VM_STICKY);
}
@@ -1155,10 +1168,12 @@ int vma_expand(struct vma_merge_struct *vmg)
struct vm_area_struct *next = vmg->next;
bool remove_next = false;
vm_flags_t sticky_flags;
- int ret = 0;
+ int ret;
mmap_assert_write_locked(vmg->mm);
- vma_start_write(target);
+ ret = vma_start_write_killable(target);
+ if (ret)
+ return ret;
if (next && target != next && vmg->end == next->vm_end)
remove_next = true;
@@ -1186,17 +1201,19 @@ int vma_expand(struct vma_merge_struct *vmg)
* Note that, by convention, callers ignore OOM for this case, so
* we don't need to account for vmg->give_up_on_mm here.
*/
- if (remove_next)
+ if (remove_next) {
+ ret = vma_start_write_killable(next);
+ if (ret)
+ return ret;
ret = dup_anon_vma(target, next, &anon_dup);
+ }
if (!ret && vmg->copied_from)
ret = dup_anon_vma(target, vmg->copied_from, &anon_dup);
if (ret)
return ret;
- if (remove_next) {
- vma_start_write(next);
+ if (remove_next)
vmg->__remove_next = true;
- }
if (commit_merge(vmg))
goto nomem;
@@ -1229,6 +1246,7 @@ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff)
{
struct vma_prepare vp;
+ int err;
WARN_ON((vma->vm_start != start) && (vma->vm_end != end));
@@ -1240,7 +1258,11 @@ int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
if (vma_iter_prealloc(vmi, NULL))
return -ENOMEM;
- vma_start_write(vma);
+ err = vma_start_write_killable(vma);
+ if (err) {
+ vma_iter_free(vmi);
+ return err;
+ }
init_vma_prep(&vp, vma);
vma_prepare(&vp);
@@ -1430,7 +1452,9 @@ static int vms_gather_munmap_vmas(struct vma_munmap_struct *vms,
if (error)
goto end_split_failed;
}
- vma_start_write(next);
+ error = vma_start_write_killable(next);
+ if (error)
+ goto munmap_gather_failed;
mas_set(mas_detach, vms->vma_count++);
error = mas_store_gfp(mas_detach, next, GFP_KERNEL);
if (error)
@@ -1824,12 +1848,17 @@ static void vma_link_file(struct vm_area_struct *vma, bool hold_rmap_lock)
static int vma_link(struct mm_struct *mm, struct vm_area_struct *vma)
{
VMA_ITERATOR(vmi, mm, 0);
+ int err;
vma_iter_config(&vmi, vma->vm_start, vma->vm_end);
if (vma_iter_prealloc(&vmi, vma))
return -ENOMEM;
- vma_start_write(vma);
+ err = vma_start_write_killable(vma);
+ if (err) {
+ vma_iter_free(&vmi);
+ return err;
+ }
vma_iter_store_new(&vmi, vma);
vma_link_file(vma, /* hold_rmap_lock= */false);
mm->map_count++;
@@ -2211,9 +2240,8 @@ int mm_take_all_locks(struct mm_struct *mm)
* is reached.
*/
for_each_vma(vmi, vma) {
- if (signal_pending(current))
+ if (vma_start_write_killable(vma))
goto out_unlock;
- vma_start_write(vma);
}
vma_iter_init(&vmi, mm, 0);
@@ -2549,7 +2577,9 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
#endif
/* Lock the VMA since it is modified after insertion into VMA tree */
- vma_start_write(vma);
+ error = vma_start_write_killable(vma);
+ if (error)
+ goto free_iter_vma;
vma_iter_store_new(vmi, vma);
map->mm->map_count++;
vma_link_file(vma, map->hold_file_rmap_lock);
@@ -2860,6 +2890,7 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long addr, unsigned long len, vm_flags_t vm_flags)
{
struct mm_struct *mm = current->mm;
+ int err = -ENOMEM;
/*
* Check against address space limits by the changed size
@@ -2904,7 +2935,10 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
vma_set_range(vma, addr, addr + len, addr >> PAGE_SHIFT);
vm_flags_init(vma, vm_flags);
vma->vm_page_prot = vm_get_page_prot(vm_flags);
- vma_start_write(vma);
+ if (vma_start_write_killable(vma)) {
+ err = -EINTR;
+ goto mas_store_fail;
+ }
if (vma_iter_store_gfp(vmi, vma, GFP_KERNEL))
goto mas_store_fail;
@@ -2924,7 +2958,7 @@ int do_brk_flags(struct vma_iterator *vmi, struct vm_area_struct *vma,
vm_area_free(vma);
unacct_fail:
vm_unacct_memory(len >> PAGE_SHIFT);
- return -ENOMEM;
+ return err;
}
/**
@@ -3085,7 +3119,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *next;
unsigned long gap_addr;
- int error = 0;
+ int error;
VMA_ITERATOR(vmi, mm, vma->vm_start);
if (!(vma->vm_flags & VM_GROWSUP))
@@ -3122,12 +3156,14 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma))) {
- vma_iter_free(&vmi);
- return -ENOMEM;
+ error = -ENOMEM;
+ goto free;
}
/* Lock the VMA before expanding to prevent concurrent page faults */
- vma_start_write(vma);
+ error = vma_start_write_killable(vma);
+ if (error)
+ goto free;
/* We update the anon VMA tree. */
anon_vma_lock_write(vma->anon_vma);
@@ -3156,6 +3192,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
}
}
anon_vma_unlock_write(vma->anon_vma);
+free:
vma_iter_free(&vmi);
validate_mm(mm);
return error;
@@ -3170,7 +3207,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *prev;
- int error = 0;
+ int error;
VMA_ITERATOR(vmi, mm, vma->vm_start);
if (!(vma->vm_flags & VM_GROWSDOWN))
@@ -3201,12 +3238,14 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
/* We must make sure the anon_vma is allocated. */
if (unlikely(anon_vma_prepare(vma))) {
- vma_iter_free(&vmi);
- return -ENOMEM;
+ error = -ENOMEM;
+ goto free;
}
/* Lock the VMA before expanding to prevent concurrent page faults */
- vma_start_write(vma);
+ error = vma_start_write_killable(vma);
+ if (error)
+ goto free;
/* We update the anon VMA tree. */
anon_vma_lock_write(vma->anon_vma);
@@ -3236,6 +3275,7 @@ int expand_downwards(struct vm_area_struct *vma, unsigned long address)
}
}
anon_vma_unlock_write(vma->anon_vma);
+free:
vma_iter_free(&vmi);
validate_mm(mm);
return error;
diff --git a/mm/vma_exec.c b/mm/vma_exec.c
index 8134e1afca68..a4addc2a8480 100644
--- a/mm/vma_exec.c
+++ b/mm/vma_exec.c
@@ -40,6 +40,7 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
struct vm_area_struct *next;
struct mmu_gather tlb;
PAGETABLE_MOVE(pmc, vma, vma, old_start, new_start, length);
+ int err;
BUG_ON(new_start > new_end);
@@ -55,8 +56,9 @@ int relocate_vma_down(struct vm_area_struct *vma, unsigned long shift)
* cover the whole range: [new_start, old_end)
*/
vmg.target = vma;
- if (vma_expand(&vmg))
- return -ENOMEM;
+ err = vma_expand(&vmg);
+ if (err)
+ return err;
/*
* move the page tables downwards, on failure we rely on
base-commit: a1a876489abcc1e75b03bd3b2f6739ceeaaec8c5
--
2.53.0.rc2.204.g2597b5adb4-goog
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 1/1] mm: replace vma_start_write() with vma_start_write_killable()
2026-02-09 22:08 [PATCH 1/1] mm: replace vma_start_write() with vma_start_write_killable() Suren Baghdasaryan
@ 2026-02-10 4:30 ` kernel test robot
2026-02-10 9:19 ` kernel test robot
` (2 subsequent siblings)
3 siblings, 0 replies; 6+ messages in thread
From: kernel test robot @ 2026-02-10 4:30 UTC (permalink / raw)
To: Suren Baghdasaryan, akpm
Cc: oe-kbuild-all, willy, david, ziy, matthew.brost, joshua.hahnjy,
rakie.kim, byungchul, gourry, ying.huang, apopple,
lorenzo.stoakes, baolin.wang, Liam.Howlett, npache, ryan.roberts,
dev.jain, baohua, lance.yang, vbabka, jannh, rppt, mhocko,
pfalcato, kees, maddy, npiggin, mpe, chleroy, linux-mm
Hi Suren,
kernel test robot noticed the following build warnings:
[auto build test WARNING on a1a876489abcc1e75b03bd3b2f6739ceeaaec8c5]
url: https://github.com/intel-lab-lkp/linux/commits/Suren-Baghdasaryan/mm-replace-vma_start_write-with-vma_start_write_killable/20260210-061104
base: a1a876489abcc1e75b03bd3b2f6739ceeaaec8c5
patch link: https://lore.kernel.org/r/20260209220849.2126486-1-surenb%40google.com
patch subject: [PATCH 1/1] mm: replace vma_start_write() with vma_start_write_killable()
config: nios2-allnoconfig (https://download.01.org/0day-ci/archive/20260210/202602101205.7QyEq3MD-lkp@intel.com/config)
compiler: nios2-linux-gcc (GCC) 11.5.0
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260210/202602101205.7QyEq3MD-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202602101205.7QyEq3MD-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> Warning: mm/memory.c:381 duplicate section name 'Note'
>> Warning: mm/memory.c:381 duplicate section name 'Note'
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 1/1] mm: replace vma_start_write() with vma_start_write_killable()
2026-02-09 22:08 [PATCH 1/1] mm: replace vma_start_write() with vma_start_write_killable() Suren Baghdasaryan
2026-02-10 4:30 ` kernel test robot
@ 2026-02-10 9:19 ` kernel test robot
2026-02-10 21:18 ` Jann Horn
2026-02-11 3:55 ` Ritesh Harjani
3 siblings, 0 replies; 6+ messages in thread
From: kernel test robot @ 2026-02-10 9:19 UTC (permalink / raw)
To: Suren Baghdasaryan, akpm
Cc: llvm, oe-kbuild-all, willy, david, ziy, matthew.brost,
joshua.hahnjy, rakie.kim, byungchul, gourry, ying.huang, apopple,
lorenzo.stoakes, baolin.wang, Liam.Howlett, npache, ryan.roberts,
dev.jain, baohua, lance.yang, vbabka, jannh, rppt, mhocko,
pfalcato, kees, maddy, npiggin, mpe, chleroy, linux-mm
Hi Suren,
kernel test robot noticed the following build warnings:
[auto build test WARNING on a1a876489abcc1e75b03bd3b2f6739ceeaaec8c5]
url: https://github.com/intel-lab-lkp/linux/commits/Suren-Baghdasaryan/mm-replace-vma_start_write-with-vma_start_write_killable/20260210-061104
base: a1a876489abcc1e75b03bd3b2f6739ceeaaec8c5
patch link: https://lore.kernel.org/r/20260209220849.2126486-1-surenb%40google.com
patch subject: [PATCH 1/1] mm: replace vma_start_write() with vma_start_write_killable()
config: x86_64-kexec (https://download.01.org/0day-ci/archive/20260210/202602101014.tdhEXT8X-lkp@intel.com/config)
compiler: clang version 20.1.8 (https://github.com/llvm/llvm-project 87f0227cb60147a26a1eeb4fb06e3b505e9c7261)
reproduce (this is a W=1 build): (https://download.01.org/0day-ci/archive/20260210/202602101014.tdhEXT8X-lkp@intel.com/reproduce)
If you fix the issue in a separate patch/commit (i.e. not just a new version of
the same patch/commit), kindly add following tags
| Reported-by: kernel test robot <lkp@intel.com>
| Closes: https://lore.kernel.org/oe-kbuild-all/202602101014.tdhEXT8X-lkp@intel.com/
All warnings (new ones prefixed by >>):
>> Warning: mm/memory.c:381 duplicate section name 'Note'
>> Warning: mm/memory.c:381 duplicate section name 'Note'
--
0-DAY CI Kernel Test Service
https://github.com/intel/lkp-tests/wiki
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 1/1] mm: replace vma_start_write() with vma_start_write_killable()
2026-02-09 22:08 [PATCH 1/1] mm: replace vma_start_write() with vma_start_write_killable() Suren Baghdasaryan
2026-02-10 4:30 ` kernel test robot
2026-02-10 9:19 ` kernel test robot
@ 2026-02-10 21:18 ` Jann Horn
2026-02-10 23:41 ` Suren Baghdasaryan
2026-02-11 3:55 ` Ritesh Harjani
3 siblings, 1 reply; 6+ messages in thread
From: Jann Horn @ 2026-02-10 21:18 UTC (permalink / raw)
To: Suren Baghdasaryan
Cc: akpm, willy, david, ziy, matthew.brost, joshua.hahnjy, rakie.kim,
byungchul, gourry, ying.huang, apopple, lorenzo.stoakes,
baolin.wang, Liam.Howlett, npache, ryan.roberts, dev.jain,
baohua, lance.yang, vbabka, rppt, mhocko, pfalcato, kees, maddy,
npiggin, mpe, chleroy, linux-mm, linuxppc-dev, kvm, linux-kernel
On Mon, Feb 9, 2026 at 11:08 PM Suren Baghdasaryan <surenb@google.com> wrote:
> Now that we have vma_start_write_killable() we can replace most of the
> vma_start_write() calls with it, improving reaction time to the kill
> signal.
[...]
> diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> index dbd48502ac24..3de7ab4f4cee 100644
> --- a/mm/mempolicy.c
> +++ b/mm/mempolicy.c
[...]
> @@ -1808,7 +1817,11 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
> break;
> }
>
> - vma_start_write(vma);
> + if (vma_start_write_killable(vma)) {
> + err = -EINTR;
Doesn't this need mpol_put(new)? Or less complicated, move the
vma_start_write_killable() up to somewhere above the mpol_dup() call.
> + break;
> + }
> +
> new->home_node = home_node;
> err = mbind_range(&vmi, vma, &prev, start, end, new);
> mpol_put(new);
[...]
> diff --git a/mm/pagewalk.c b/mm/pagewalk.c
> index a94c401ab2cf..dc9f7a7709c6 100644
> --- a/mm/pagewalk.c
> +++ b/mm/pagewalk.c
> @@ -425,14 +425,13 @@ static inline void process_mm_walk_lock(struct mm_struct *mm,
> mmap_assert_write_locked(mm);
> }
>
> -static inline void process_vma_walk_lock(struct vm_area_struct *vma,
> +static inline int process_vma_walk_lock(struct vm_area_struct *vma,
> enum page_walk_lock walk_lock)
> {
> #ifdef CONFIG_PER_VMA_LOCK
> switch (walk_lock) {
> case PGWALK_WRLOCK:
> - vma_start_write(vma);
> - break;
> + return vma_start_write_killable(vma);
There are two users of PGWALK_WRLOCK in arch/s390/mm/gmap.c code that
don't check pagewalk return values, have you checked that they are not
negatively affected by this new possible error return?
> case PGWALK_WRLOCK_VERIFY:
> vma_assert_write_locked(vma);
> break;
[...]
> diff --git a/mm/vma.c b/mm/vma.c
> index be64f781a3aa..3cfb81b3b7cf 100644
> --- a/mm/vma.c
> +++ b/mm/vma.c
> @@ -540,8 +540,12 @@ __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
> if (new->vm_ops && new->vm_ops->open)
> new->vm_ops->open(new);
>
> - vma_start_write(vma);
> - vma_start_write(new);
> + err = vma_start_write_killable(vma);
> + if (err)
> + goto out_fput;
> + err = vma_start_write_killable(new);
> + if (err)
> + goto out_fput;
What about the new->vm_ops->open() call and the anon_vma_clone()
above? I don't think the error path properly undoes either. These
calls should probably be moved further up, so that the point of no
return in this function stays where it was.
> init_vma_prep(&vp, vma);
> vp.insert = new;
[...]
> @@ -1155,10 +1168,12 @@ int vma_expand(struct vma_merge_struct *vmg)
> struct vm_area_struct *next = vmg->next;
> bool remove_next = false;
> vm_flags_t sticky_flags;
> - int ret = 0;
> + int ret;
>
> mmap_assert_write_locked(vmg->mm);
> - vma_start_write(target);
> + ret = vma_start_write_killable(target);
> + if (ret)
> + return ret;
>
> if (next && target != next && vmg->end == next->vm_end)
> remove_next = true;
> @@ -1186,17 +1201,19 @@ int vma_expand(struct vma_merge_struct *vmg)
> * Note that, by convention, callers ignore OOM for this case, so
> * we don't need to account for vmg->give_up_on_mm here.
> */
> - if (remove_next)
> + if (remove_next) {
> + ret = vma_start_write_killable(next);
> + if (ret)
> + return ret;
> ret = dup_anon_vma(target, next, &anon_dup);
> + }
> if (!ret && vmg->copied_from)
> ret = dup_anon_vma(target, vmg->copied_from, &anon_dup);
> if (ret)
> return ret;
nit: the control flow here is kinda chaotic, with some "if (ret)
return ret;" mixed with "if (!ret && ...) ret = ...;".
>
> - if (remove_next) {
> - vma_start_write(next);
> + if (remove_next)
> vmg->__remove_next = true;
> - }
> if (commit_merge(vmg))
> goto nomem;
>
[...]
> @@ -2211,9 +2240,8 @@ int mm_take_all_locks(struct mm_struct *mm)
> * is reached.
> */
> for_each_vma(vmi, vma) {
> - if (signal_pending(current))
> + if (vma_start_write_killable(vma))
> goto out_unlock;
> - vma_start_write(vma);
nit: might want to keep the signal_pending() so that this can sort of
be interrupted by non-fatal signals, which seems to be the intention
> }
>
> vma_iter_init(&vmi, mm, 0);
> @@ -2549,7 +2577,9 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
> #endif
>
> /* Lock the VMA since it is modified after insertion into VMA tree */
> - vma_start_write(vma);
> + error = vma_start_write_killable(vma);
> + if (error)
> + goto free_iter_vma;
This seems way past the point of no return, we've already called the
->mmap() handler which I think means removing the VMA again would
require a ->close() call. The VMA should be locked further up if we
want to do it killably.
> vma_iter_store_new(vmi, vma);
> map->mm->map_count++;
> vma_link_file(vma, map->hold_file_rmap_lock);
>
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 1/1] mm: replace vma_start_write() with vma_start_write_killable()
2026-02-10 21:18 ` Jann Horn
@ 2026-02-10 23:41 ` Suren Baghdasaryan
0 siblings, 0 replies; 6+ messages in thread
From: Suren Baghdasaryan @ 2026-02-10 23:41 UTC (permalink / raw)
To: Jann Horn
Cc: akpm, willy, david, ziy, matthew.brost, joshua.hahnjy, rakie.kim,
byungchul, gourry, ying.huang, apopple, lorenzo.stoakes,
baolin.wang, Liam.Howlett, npache, ryan.roberts, dev.jain,
baohua, lance.yang, vbabka, rppt, mhocko, pfalcato, kees, maddy,
npiggin, mpe, chleroy, linux-mm, linuxppc-dev, kvm, linux-kernel
On Tue, Feb 10, 2026 at 1:19 PM Jann Horn <jannh@google.com> wrote:
>
> On Mon, Feb 9, 2026 at 11:08 PM Suren Baghdasaryan <surenb@google.com> wrote:
> > Now that we have vma_start_write_killable() we can replace most of the
> > vma_start_write() calls with it, improving reaction time to the kill
> > signal.
> [...]
> > diff --git a/mm/mempolicy.c b/mm/mempolicy.c
> > index dbd48502ac24..3de7ab4f4cee 100644
> > --- a/mm/mempolicy.c
> > +++ b/mm/mempolicy.c
> [...]
> > @@ -1808,7 +1817,11 @@ SYSCALL_DEFINE4(set_mempolicy_home_node, unsigned long, start, unsigned long, le
> > break;
> > }
> >
> > - vma_start_write(vma);
> > + if (vma_start_write_killable(vma)) {
> > + err = -EINTR;
>
> Doesn't this need mpol_put(new)? Or less complicated, move the
> vma_start_write_killable() up to somewhere above the mpol_dup() call.
Thanks for the review, Jann!
Yes you are right. I'll move it before mpol_dup().
>
> > + break;
> > + }
> > +
> > new->home_node = home_node;
> > err = mbind_range(&vmi, vma, &prev, start, end, new);
> > mpol_put(new);
> [...]
> > diff --git a/mm/pagewalk.c b/mm/pagewalk.c
> > index a94c401ab2cf..dc9f7a7709c6 100644
> > --- a/mm/pagewalk.c
> > +++ b/mm/pagewalk.c
> > @@ -425,14 +425,13 @@ static inline void process_mm_walk_lock(struct mm_struct *mm,
> > mmap_assert_write_locked(mm);
> > }
> >
> > -static inline void process_vma_walk_lock(struct vm_area_struct *vma,
> > +static inline int process_vma_walk_lock(struct vm_area_struct *vma,
> > enum page_walk_lock walk_lock)
> > {
> > #ifdef CONFIG_PER_VMA_LOCK
> > switch (walk_lock) {
> > case PGWALK_WRLOCK:
> > - vma_start_write(vma);
> > - break;
> > + return vma_start_write_killable(vma);
>
> There are two users of PGWALK_WRLOCK in arch/s390/mm/gmap.c code that
> don't check pagewalk return values, have you checked that they are not
> negatively affected by this new possible error return?
Uh, even the ones which check for the error like queue_pages_range()
don't seem to handle it well. I'll split this part into a separate
patch as I think it will be sizable and will go over all users to
ensure they handle the new error.
>
> > case PGWALK_WRLOCK_VERIFY:
> > vma_assert_write_locked(vma);
> > break;
> [...]
> > diff --git a/mm/vma.c b/mm/vma.c
> > index be64f781a3aa..3cfb81b3b7cf 100644
> > --- a/mm/vma.c
> > +++ b/mm/vma.c
> > @@ -540,8 +540,12 @@ __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
> > if (new->vm_ops && new->vm_ops->open)
> > new->vm_ops->open(new);
> >
> > - vma_start_write(vma);
> > - vma_start_write(new);
> > + err = vma_start_write_killable(vma);
> > + if (err)
> > + goto out_fput;
> > + err = vma_start_write_killable(new);
> > + if (err)
> > + goto out_fput;
>
> What about the new->vm_ops->open() call and the anon_vma_clone()
> above? I don't think the error path properly undoes either. These
> calls should probably be moved further up, so that the point of no
> return in this function stays where it was.
Ack.
>
> > init_vma_prep(&vp, vma);
> > vp.insert = new;
> [...]
> > @@ -1155,10 +1168,12 @@ int vma_expand(struct vma_merge_struct *vmg)
> > struct vm_area_struct *next = vmg->next;
> > bool remove_next = false;
> > vm_flags_t sticky_flags;
> > - int ret = 0;
> > + int ret;
> >
> > mmap_assert_write_locked(vmg->mm);
> > - vma_start_write(target);
> > + ret = vma_start_write_killable(target);
> > + if (ret)
> > + return ret;
> >
> > if (next && target != next && vmg->end == next->vm_end)
> > remove_next = true;
> > @@ -1186,17 +1201,19 @@ int vma_expand(struct vma_merge_struct *vmg)
> > * Note that, by convention, callers ignore OOM for this case, so
> > * we don't need to account for vmg->give_up_on_mm here.
> > */
> > - if (remove_next)
> > + if (remove_next) {
> > + ret = vma_start_write_killable(next);
> > + if (ret)
> > + return ret;
> > ret = dup_anon_vma(target, next, &anon_dup);
> > + }
> > if (!ret && vmg->copied_from)
> > ret = dup_anon_vma(target, vmg->copied_from, &anon_dup);
> > if (ret)
> > return ret;
>
> nit: the control flow here is kinda chaotic, with some "if (ret)
> return ret;" mixed with "if (!ret && ...) ret = ...;".
I'll see what I can do about it but probably as a separate patch.
>
> >
> > - if (remove_next) {
> > - vma_start_write(next);
> > + if (remove_next)
> > vmg->__remove_next = true;
> > - }
> > if (commit_merge(vmg))
> > goto nomem;
> >
> [...]
> > @@ -2211,9 +2240,8 @@ int mm_take_all_locks(struct mm_struct *mm)
> > * is reached.
> > */
> > for_each_vma(vmi, vma) {
> > - if (signal_pending(current))
> > + if (vma_start_write_killable(vma))
> > goto out_unlock;
> > - vma_start_write(vma);
>
> nit: might want to keep the signal_pending() so that this can sort of
> be interrupted by non-fatal signals, which seems to be the intention
Yes, I will bring back that check.
>
> > }
> >
> > vma_iter_init(&vmi, mm, 0);
> > @@ -2549,7 +2577,9 @@ static int __mmap_new_vma(struct mmap_state *map, struct vm_area_struct **vmap)
> > #endif
> >
> > /* Lock the VMA since it is modified after insertion into VMA tree */
> > - vma_start_write(vma);
> > + error = vma_start_write_killable(vma);
> > + if (error)
> > + goto free_iter_vma;
>
> This seems way past the point of no return, we've already called the
> ->mmap() handler which I think means removing the VMA again would
> require a ->close() call. The VMA should be locked further up if we
> want to do it killably.
Yeah, I realized this big issue after posting the patch. Moving it up
seems possible, so I'll try that.
Thanks,
Suren.
>
> > vma_iter_store_new(vmi, vma);
> > map->mm->map_count++;
> > vma_link_file(vma, map->hold_file_rmap_lock);
> >
^ permalink raw reply [flat|nested] 6+ messages in thread
* Re: [PATCH 1/1] mm: replace vma_start_write() with vma_start_write_killable()
2026-02-09 22:08 [PATCH 1/1] mm: replace vma_start_write() with vma_start_write_killable() Suren Baghdasaryan
` (2 preceding siblings ...)
2026-02-10 21:18 ` Jann Horn
@ 2026-02-11 3:55 ` Ritesh Harjani
3 siblings, 0 replies; 6+ messages in thread
From: Ritesh Harjani @ 2026-02-11 3:55 UTC (permalink / raw)
To: Suren Baghdasaryan, akpm
Cc: willy, david, ziy, matthew.brost, joshua.hahnjy, rakie.kim,
byungchul, gourry, ying.huang, apopple, lorenzo.stoakes,
baolin.wang, Liam.Howlett, npache, ryan.roberts, dev.jain,
baohua, lance.yang, vbabka, jannh, rppt, mhocko, pfalcato, kees,
maddy, npiggin, mpe, chleroy, linux-mm, linuxppc-dev, kvm,
linux-kernel, surenb
Suren Baghdasaryan <surenb@google.com> writes:
> Now that we have vma_start_write_killable() we can replace most of the
> vma_start_write() calls with it, improving reaction time to the kill
> signal.
>
> There are several places which are left untouched by this patch:
>
> 1. free_pgtables() because function should free page tables even if a
> fatal signal is pending.
>
> 2. userfaultd code, where some paths calling vma_start_write() can
> handle EINTR and some can't without a deeper code refactoring.
>
> 3. vm_flags_{set|mod|clear} require refactoring that involves moving
> vma_start_write() out of these functions and replacing it with
> vma_assert_write_locked(), then callers of these functions should
> lock the vma themselves using vma_start_write_killable() whenever
> possible.
>
> Suggested-by: Matthew Wilcox <willy@infradead.org>
> Signed-off-by: Suren Baghdasaryan <surenb@google.com>
> ---
> arch/powerpc/kvm/book3s_hv_uvmem.c | 5 +-
> include/linux/mempolicy.h | 5 +-
> mm/khugepaged.c | 5 +-
> mm/madvise.c | 4 +-
> mm/memory.c | 2 +
> mm/mempolicy.c | 23 ++++++--
> mm/mlock.c | 20 +++++--
> mm/mprotect.c | 4 +-
> mm/mremap.c | 4 +-
> mm/pagewalk.c | 20 +++++--
> mm/vma.c | 94 +++++++++++++++++++++---------
> mm/vma_exec.c | 6 +-
> 12 files changed, 139 insertions(+), 53 deletions(-)
>
> diff --git a/arch/powerpc/kvm/book3s_hv_uvmem.c b/arch/powerpc/kvm/book3s_hv_uvmem.c
> index 7cf9310de0ec..69750edcf8d5 100644
> --- a/arch/powerpc/kvm/book3s_hv_uvmem.c
> +++ b/arch/powerpc/kvm/book3s_hv_uvmem.c
> @@ -410,7 +410,10 @@ static int kvmppc_memslot_page_merge(struct kvm *kvm,
> ret = H_STATE;
> break;
> }
> - vma_start_write(vma);
> + if (vma_start_write_killable(vma)) {
> + ret = H_STATE;
> + break;
> + }
> /* Copy vm_flags to avoid partial modifications in ksm_madvise */
> vm_flags = vma->vm_flags;
> ret = ksm_madvise(vma, vma->vm_start, vma->vm_end,
The above change w.r.t. powerpc error handling in function
kvmppc_memslot_page_merge() looks good to me.
Please feel free to add:
Reviewed-by: Ritesh Harjani (IBM) <ritesh.list@gmail.com> # powerpc
-ritesh
^ permalink raw reply [flat|nested] 6+ messages in thread
end of thread, other threads:[~2026-02-11 4:08 UTC | newest]
Thread overview: 6+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2026-02-09 22:08 [PATCH 1/1] mm: replace vma_start_write() with vma_start_write_killable() Suren Baghdasaryan
2026-02-10 4:30 ` kernel test robot
2026-02-10 9:19 ` kernel test robot
2026-02-10 21:18 ` Jann Horn
2026-02-10 23:41 ` Suren Baghdasaryan
2026-02-11 3:55 ` Ritesh Harjani
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox