* [PATCH v2] mm/mmu_notifier: Remove unused mmu_notifier_range_update_to_read_only export
@ 2023-01-10 2:57 Alistair Popple
2023-01-10 6:47 ` Christoph Hellwig
2023-01-10 17:53 ` Mike Kravetz
0 siblings, 2 replies; 3+ messages in thread
From: Alistair Popple @ 2023-01-10 2:57 UTC (permalink / raw)
To: Andrew Morton, linux-mm
Cc: John Hubbard, Ralph Campbell, Jérôme Glisse, Ira Weiny,
Jason Gunthorpe, Christoph Hellwig, Mike Kravetz,
Alistair Popple, Mike Rapoport
mmu_notifier_range_update_to_read_only() was originally introduced in
commit c6d23413f81b ("mm/mmu_notifier:
mmu_notifier_range_update_to_read_only() helper") as an optimisation
for device drivers that know a range has only been mapped
read-only. However there are no users of this feature so remove it. As
it is the only user of the struct mmu_notifier_range.vma field remove
that also.
Signed-off-by: Alistair Popple <apopple@nvidia.com>
Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
---
Changes for v2:
- Added Mike's Acked-by
- Added Jason's Reviewed-by
- Removed the now unused vma parameter from
mmu_notififer_range_init{_owner}()
---
fs/proc/task_mmu.c | 2 +-
include/linux/mmu_notifier.h | 13 +++++--------
kernel/events/uprobes.c | 2 +-
mm/huge_memory.c | 4 ++--
mm/hugetlb.c | 13 ++++++-------
mm/khugepaged.c | 6 +++---
mm/ksm.c | 5 ++---
mm/madvise.c | 2 +-
mm/mapping_dirty_helpers.c | 2 +-
mm/memory.c | 12 ++++++------
mm/migrate_device.c | 4 ++--
mm/mmu_notifier.c | 10 ----------
mm/mprotect.c | 2 +-
mm/mremap.c | 2 +-
mm/oom_kill.c | 2 +-
mm/rmap.c | 11 +++++------
16 files changed, 38 insertions(+), 54 deletions(-)
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 8a74cdcc9af0..b61d00af6cc2 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -1300,7 +1300,7 @@ static ssize_t clear_refs_write(struct file *file, const char __user *buf,
inc_tlb_flush_pending(mm);
mmu_notifier_range_init(&range, MMU_NOTIFY_SOFT_DIRTY,
- 0, NULL, mm, 0, -1UL);
+ 0, mm, 0, -1UL);
mmu_notifier_invalidate_range_start(&range);
}
walk_page_range(mm, 0, -1, &clear_refs_walk_ops, &cp);
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index d6c06e140277..64a3e051c3c4 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -269,7 +269,6 @@ extern struct lockdep_map __mmu_notifier_invalidate_range_start_map;
#endif
struct mmu_notifier_range {
- struct vm_area_struct *vma;
struct mm_struct *mm;
unsigned long start;
unsigned long end;
@@ -514,12 +513,10 @@ static inline void mmu_notifier_subscriptions_destroy(struct mm_struct *mm)
static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
enum mmu_notifier_event event,
unsigned flags,
- struct vm_area_struct *vma,
struct mm_struct *mm,
unsigned long start,
unsigned long end)
{
- range->vma = vma;
range->event = event;
range->mm = mm;
range->start = start;
@@ -530,10 +527,10 @@ static inline void mmu_notifier_range_init(struct mmu_notifier_range *range,
static inline void mmu_notifier_range_init_owner(
struct mmu_notifier_range *range,
enum mmu_notifier_event event, unsigned int flags,
- struct vm_area_struct *vma, struct mm_struct *mm,
- unsigned long start, unsigned long end, void *owner)
+ struct mm_struct *mm, unsigned long start,
+ unsigned long end, void *owner)
{
- mmu_notifier_range_init(range, event, flags, vma, mm, start, end);
+ mmu_notifier_range_init(range, event, flags, mm, start, end);
range->owner = owner;
}
@@ -659,9 +656,9 @@ static inline void _mmu_notifier_range_init(struct mmu_notifier_range *range,
range->end = end;
}
-#define mmu_notifier_range_init(range,event,flags,vma,mm,start,end) \
+#define mmu_notifier_range_init(range,event,flags,mm,start,end) \
_mmu_notifier_range_init(range, start, end)
-#define mmu_notifier_range_init_owner(range, event, flags, vma, mm, start, \
+#define mmu_notifier_range_init_owner(range, event, flags, mm, start, \
end, owner) \
_mmu_notifier_range_init(range, start, end)
diff --git a/kernel/events/uprobes.c b/kernel/events/uprobes.c
index d9e357b7e17c..29f36d2ae129 100644
--- a/kernel/events/uprobes.c
+++ b/kernel/events/uprobes.c
@@ -161,7 +161,7 @@ static int __replace_page(struct vm_area_struct *vma, unsigned long addr,
int err;
struct mmu_notifier_range range;
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
addr + PAGE_SIZE);
if (new_page) {
diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 811d19b5c4f6..39fd20026172 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1980,7 +1980,7 @@ void __split_huge_pud(struct vm_area_struct *vma, pud_t *pud,
spinlock_t *ptl;
struct mmu_notifier_range range;
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address & HPAGE_PUD_MASK,
(address & HPAGE_PUD_MASK) + HPAGE_PUD_SIZE);
mmu_notifier_invalidate_range_start(&range);
@@ -2270,7 +2270,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
spinlock_t *ptl;
struct mmu_notifier_range range;
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address & HPAGE_PMD_MASK,
(address & HPAGE_PMD_MASK) + HPAGE_PMD_SIZE);
mmu_notifier_invalidate_range_start(&range);
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index e36ca75311a5..77cf3910819d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -4797,7 +4797,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src,
int ret = 0;
if (cow) {
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src_vma, src,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, src,
src_vma->vm_start,
src_vma->vm_end);
mmu_notifier_invalidate_range_start(&range);
@@ -5005,7 +5005,7 @@ int move_hugetlb_page_tables(struct vm_area_struct *vma,
struct mmu_notifier_range range;
bool shared_pmd = false;
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, old_addr,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, old_addr,
old_end);
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
/*
@@ -5084,8 +5084,7 @@ static void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct
/*
* If sharing possible, alert mmu notifiers of worst case.
*/
- mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
- end);
+ mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, mm, start, end);
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
mmu_notifier_invalidate_range_start(&range);
last_addr_mask = hugetlb_mask_last_page(h);
@@ -5434,7 +5433,7 @@ static vm_fault_t hugetlb_wp(struct mm_struct *mm, struct vm_area_struct *vma,
pages_per_huge_page(h));
__SetPageUptodate(new_page);
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, haddr,
haddr + huge_page_size(h));
mmu_notifier_invalidate_range_start(&range);
@@ -6423,7 +6422,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma,
* range if PMD sharing is possible.
*/
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_VMA,
- 0, vma, mm, start, end);
+ 0, mm, start, end);
adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
BUG_ON(address >= end);
@@ -7451,7 +7450,7 @@ void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
* No need to call adjust_range_if_pmd_sharing_possible(), because
* we have already done the PUD_SIZE alignment.
*/
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
start, end);
mmu_notifier_invalidate_range_start(&range);
hugetlb_vma_lock_write(vma);
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index 3703a56571c1..0dd71f6e1739 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -1032,8 +1032,8 @@ static int collapse_huge_page(struct mm_struct *mm, unsigned long address,
anon_vma_lock_write(vma->anon_vma);
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm,
- address, address + HPAGE_PMD_SIZE);
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, address,
+ address + HPAGE_PMD_SIZE);
mmu_notifier_invalidate_range_start(&range);
pte = pte_offset_map(pmd, address);
@@ -1411,7 +1411,7 @@ static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *v
if (vma->anon_vma)
lockdep_assert_held_write(&vma->anon_vma->root->rwsem);
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, NULL, mm, addr,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
addr + HPAGE_PMD_SIZE);
mmu_notifier_invalidate_range_start(&range);
pmd = pmdp_collapse_flush(vma, addr, pmdp);
diff --git a/mm/ksm.c b/mm/ksm.c
index c19fcca9bc03..47e8eb8e0b2d 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -1029,8 +1029,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
BUG_ON(PageTransCompound(page));
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
- pvmw.address,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address,
pvmw.address + PAGE_SIZE);
mmu_notifier_invalidate_range_start(&range);
@@ -1137,7 +1136,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
if (!pmd_present(pmde) || pmd_trans_huge(pmde))
goto out;
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, addr,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, addr,
addr + PAGE_SIZE);
mmu_notifier_invalidate_range_start(&range);
diff --git a/mm/madvise.c b/mm/madvise.c
index b913ba6efc10..38e1700e9b9d 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -750,7 +750,7 @@ static int madvise_free_single_vma(struct vm_area_struct *vma,
range.end = min(vma->vm_end, end_addr);
if (range.end <= vma->vm_start)
return -EINVAL;
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
range.start, range.end);
lru_add_drain();
diff --git a/mm/mapping_dirty_helpers.c b/mm/mapping_dirty_helpers.c
index 1b0ab8fcfd8b..fca62dfd001b 100644
--- a/mm/mapping_dirty_helpers.c
+++ b/mm/mapping_dirty_helpers.c
@@ -191,7 +191,7 @@ static int wp_clean_pre_vma(unsigned long start, unsigned long end,
wpwalk->tlbflush_end = start;
mmu_notifier_range_init(&wpwalk->range, MMU_NOTIFY_PROTECTION_PAGE, 0,
- walk->vma, walk->mm, start, end);
+ walk->mm, start, end);
mmu_notifier_invalidate_range_start(&wpwalk->range);
flush_cache_range(walk->vma, start, end);
diff --git a/mm/memory.c b/mm/memory.c
index 8c8420934d60..da2e29e51d89 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1307,7 +1307,7 @@ copy_page_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma)
if (is_cow) {
mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
- 0, src_vma, src_mm, addr, end);
+ 0, src_mm, addr, end);
mmu_notifier_invalidate_range_start(&range);
/*
* Disabling preemption is not needed for the write side, as
@@ -1717,7 +1717,7 @@ void unmap_vmas(struct mmu_gather *tlb, struct maple_tree *mt,
};
MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
- mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
start_addr, end_addr);
mmu_notifier_invalidate_range_start(&range);
do {
@@ -1744,7 +1744,7 @@ void zap_page_range(struct vm_area_struct *vma, unsigned long start,
MA_STATE(mas, mt, vma->vm_end, vma->vm_end);
lru_add_drain();
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
start, start + size);
tlb_gather_mmu(&tlb, vma->vm_mm);
update_hiwater_rss(vma->vm_mm);
@@ -1773,7 +1773,7 @@ void zap_page_range_single(struct vm_area_struct *vma, unsigned long address,
struct mmu_gather tlb;
lru_add_drain();
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address, end);
if (is_vm_hugetlb_page(vma))
adjust_range_if_pmd_sharing_possible(vma, &range.start,
@@ -3143,7 +3143,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf)
__SetPageUptodate(new_page);
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
vmf->address & PAGE_MASK,
(vmf->address & PAGE_MASK) + PAGE_SIZE);
mmu_notifier_invalidate_range_start(&range);
@@ -3625,7 +3625,7 @@ static vm_fault_t remove_device_exclusive_entry(struct vm_fault *vmf)
if (!folio_lock_or_retry(folio, vma->vm_mm, vmf->flags))
return VM_FAULT_RETRY;
- mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
+ mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
vma->vm_mm, vmf->address & PAGE_MASK,
(vmf->address & PAGE_MASK) + PAGE_SIZE, NULL);
mmu_notifier_invalidate_range_start(&range);
diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index 721b2365dbca..6c3740318a98 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -306,7 +306,7 @@ static void migrate_vma_collect(struct migrate_vma *migrate)
* private page mappings that won't be migrated.
*/
mmu_notifier_range_init_owner(&range, MMU_NOTIFY_MIGRATE, 0,
- migrate->vma, migrate->vma->vm_mm, migrate->start, migrate->end,
+ migrate->vma->vm_mm, migrate->start, migrate->end,
migrate->pgmap_owner);
mmu_notifier_invalidate_range_start(&range);
@@ -733,7 +733,7 @@ static void __migrate_device_pages(unsigned long *src_pfns,
notified = true;
mmu_notifier_range_init_owner(&range,
- MMU_NOTIFY_MIGRATE, 0, migrate->vma,
+ MMU_NOTIFY_MIGRATE, 0,
migrate->vma->vm_mm, addr, migrate->end,
migrate->pgmap_owner);
mmu_notifier_invalidate_range_start(&range);
diff --git a/mm/mmu_notifier.c b/mm/mmu_notifier.c
index f45ff1b7626a..50c0dde1354f 100644
--- a/mm/mmu_notifier.c
+++ b/mm/mmu_notifier.c
@@ -1120,13 +1120,3 @@ void mmu_notifier_synchronize(void)
synchronize_srcu(&srcu);
}
EXPORT_SYMBOL_GPL(mmu_notifier_synchronize);
-
-bool
-mmu_notifier_range_update_to_read_only(const struct mmu_notifier_range *range)
-{
- if (!range->vma || range->event != MMU_NOTIFY_PROTECTION_VMA)
- return false;
- /* Return true if the vma still have the read flag set. */
- return range->vma->vm_flags & VM_READ;
-}
-EXPORT_SYMBOL_GPL(mmu_notifier_range_update_to_read_only);
diff --git a/mm/mprotect.c b/mm/mprotect.c
index 668bfaa6ed2a..c12c15fdf007 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -381,7 +381,7 @@ static inline unsigned long change_pmd_range(struct mmu_gather *tlb,
if (!range.start) {
mmu_notifier_range_init(&range,
MMU_NOTIFY_PROTECTION_VMA, 0,
- vma, vma->vm_mm, addr, end);
+ vma->vm_mm, addr, end);
mmu_notifier_invalidate_range_start(&range);
}
diff --git a/mm/mremap.c b/mm/mremap.c
index e465ffe279bb..d6cabaab738d 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -498,7 +498,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
new_addr, len);
flush_cache_range(vma, old_addr, old_end);
- mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, vma->vm_mm,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma->vm_mm,
old_addr, old_end);
mmu_notifier_invalidate_range_start(&range);
diff --git a/mm/oom_kill.c b/mm/oom_kill.c
index 1276e49b31b0..044e1eed720e 100644
--- a/mm/oom_kill.c
+++ b/mm/oom_kill.c
@@ -542,7 +542,7 @@ static bool __oom_reap_task_mm(struct mm_struct *mm)
struct mmu_gather tlb;
mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0,
- vma, mm, vma->vm_start,
+ mm, vma->vm_start,
vma->vm_end);
tlb_gather_mmu(&tlb, mm);
if (mmu_notifier_invalidate_range_start_nonblock(&range)) {
diff --git a/mm/rmap.c b/mm/rmap.c
index 2ec925e5fa6a..130349cb4240 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -950,9 +950,8 @@ static int page_vma_mkclean_one(struct page_vma_mapped_walk *pvmw)
* We have to assume the worse case ie pmd for invalidation. Note that
* the folio can not be freed from this function.
*/
- mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE,
- 0, vma, vma->vm_mm, address,
- vma_address_end(pvmw));
+ mmu_notifier_range_init(&range, MMU_NOTIFY_PROTECTION_PAGE, 0,
+ vma->vm_mm, address, vma_address_end(pvmw));
mmu_notifier_invalidate_range_start(&range);
while (page_vma_mapped_walk(pvmw)) {
@@ -1499,7 +1498,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
* try_to_unmap() must hold a reference on the folio.
*/
range.end = vma_address_end(&pvmw);
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address, range.end);
if (folio_test_hugetlb(folio)) {
/*
@@ -1874,7 +1873,7 @@ static bool try_to_migrate_one(struct folio *folio, struct vm_area_struct *vma,
* try_to_unmap() must hold a reference on the page.
*/
range.end = vma_address_end(&pvmw);
- mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, vma->vm_mm,
+ mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma->vm_mm,
address, range.end);
if (folio_test_hugetlb(folio)) {
/*
@@ -2204,7 +2203,7 @@ static bool page_make_device_exclusive_one(struct folio *folio,
swp_entry_t entry;
pte_t swp_pte;
- mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0, vma,
+ mmu_notifier_range_init_owner(&range, MMU_NOTIFY_EXCLUSIVE, 0,
vma->vm_mm, address, min(vma->vm_end,
address + folio_size(folio)),
args->owner);
--
2.35.1
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH v2] mm/mmu_notifier: Remove unused mmu_notifier_range_update_to_read_only export
2023-01-10 2:57 [PATCH v2] mm/mmu_notifier: Remove unused mmu_notifier_range_update_to_read_only export Alistair Popple
@ 2023-01-10 6:47 ` Christoph Hellwig
2023-01-10 17:53 ` Mike Kravetz
1 sibling, 0 replies; 3+ messages in thread
From: Christoph Hellwig @ 2023-01-10 6:47 UTC (permalink / raw)
To: Alistair Popple
Cc: Andrew Morton, linux-mm, John Hubbard, Ralph Campbell,
Jérôme Glisse, Ira Weiny, Jason Gunthorpe,
Christoph Hellwig, Mike Kravetz, Mike Rapoport
Looks good:
Reviewed-by: Christoph Hellwig <hch@lst.de>
^ permalink raw reply [flat|nested] 3+ messages in thread
* Re: [PATCH v2] mm/mmu_notifier: Remove unused mmu_notifier_range_update_to_read_only export
2023-01-10 2:57 [PATCH v2] mm/mmu_notifier: Remove unused mmu_notifier_range_update_to_read_only export Alistair Popple
2023-01-10 6:47 ` Christoph Hellwig
@ 2023-01-10 17:53 ` Mike Kravetz
1 sibling, 0 replies; 3+ messages in thread
From: Mike Kravetz @ 2023-01-10 17:53 UTC (permalink / raw)
To: Alistair Popple
Cc: Andrew Morton, linux-mm, John Hubbard, Ralph Campbell,
Jérôme Glisse, Ira Weiny, Jason Gunthorpe,
Christoph Hellwig, Mike Rapoport
On 01/10/23 13:57, Alistair Popple wrote:
> mmu_notifier_range_update_to_read_only() was originally introduced in
> commit c6d23413f81b ("mm/mmu_notifier:
> mmu_notifier_range_update_to_read_only() helper") as an optimisation
> for device drivers that know a range has only been mapped
> read-only. However there are no users of this feature so remove it. As
> it is the only user of the struct mmu_notifier_range.vma field remove
> that also.
>
> Signed-off-by: Alistair Popple <apopple@nvidia.com>
> Acked-by: Mike Rapoport (IBM) <rppt@kernel.org>
> Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
>
> ---
>
> Changes for v2:
>
> - Added Mike's Acked-by
>
> - Added Jason's Reviewed-by
>
> - Removed the now unused vma parameter from
> mmu_notififer_range_init{_owner}()
> ---
> fs/proc/task_mmu.c | 2 +-
> include/linux/mmu_notifier.h | 13 +++++--------
> kernel/events/uprobes.c | 2 +-
> mm/huge_memory.c | 4 ++--
> mm/hugetlb.c | 13 ++++++-------
> mm/khugepaged.c | 6 +++---
> mm/ksm.c | 5 ++---
> mm/madvise.c | 2 +-
> mm/mapping_dirty_helpers.c | 2 +-
> mm/memory.c | 12 ++++++------
> mm/migrate_device.c | 4 ++--
> mm/mmu_notifier.c | 10 ----------
> mm/mprotect.c | 2 +-
> mm/mremap.c | 2 +-
> mm/oom_kill.c | 2 +-
> mm/rmap.c | 11 +++++------
> 16 files changed, 38 insertions(+), 54 deletions(-)
Thanks for removing the now unused vma parameter.
There has been some code movement in hugetlb.c and memory.c, so Andrew may
need to fixup a bit. However, that should be straight forward.
Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com>
--
Mike Kravetz
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2023-01-10 17:53 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-01-10 2:57 [PATCH v2] mm/mmu_notifier: Remove unused mmu_notifier_range_update_to_read_only export Alistair Popple
2023-01-10 6:47 ` Christoph Hellwig
2023-01-10 17:53 ` Mike Kravetz
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox