linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* [PATCH v2 0/2] mm: remove arch's private VM_FAULT_BADMAP/BADACCESS
@ 2024-04-11 13:09 Kefeng Wang
  2024-04-11 13:09 ` [PATCH v2 1/2] arm64: mm: drop VM_FAULT_BADMAP/VM_FAULT_BADACCESS Kefeng Wang
  2024-04-11 13:09 ` [PATCH v2 2/2] arm: " Kefeng Wang
  0 siblings, 2 replies; 4+ messages in thread
From: Kefeng Wang @ 2024-04-11 13:09 UTC (permalink / raw)
  To: Andrew Morton, Russell King, Catalin Marinas
  Cc: Will Deacon, linux-arm-kernel, linux-mm, Cristian Marussi,
	Mark Brown, Aishwarya TCV, Kefeng Wang

Directly set SEGV_MAPRR or SEGV_ACCERR for arm/arm64 to remove the last
two arch's private vm_fault reasons.

v2:
- fix unbalanced mmap lock and set si_code to SEGV_MAPERR by default
  before error handling, which also fix ltp fail reported by Aishwarya

Kefeng Wang (2):
  arm64: mm: drop VM_FAULT_BADMAP/VM_FAULT_BADACCESS
  arm: mm: drop VM_FAULT_BADMAP/VM_FAULT_BADACCESS

 arch/arm/mm/fault.c   | 30 +++++++++++++++---------------
 arch/arm64/mm/fault.c | 43 ++++++++++++++++++++-----------------------
 2 files changed, 35 insertions(+), 38 deletions(-)

-- 
2.41.0



^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH v2 1/2] arm64: mm: drop VM_FAULT_BADMAP/VM_FAULT_BADACCESS
  2024-04-11 13:09 [PATCH v2 0/2] mm: remove arch's private VM_FAULT_BADMAP/BADACCESS Kefeng Wang
@ 2024-04-11 13:09 ` Kefeng Wang
  2024-04-11 17:31   ` Catalin Marinas
  2024-04-11 13:09 ` [PATCH v2 2/2] arm: " Kefeng Wang
  1 sibling, 1 reply; 4+ messages in thread
From: Kefeng Wang @ 2024-04-11 13:09 UTC (permalink / raw)
  To: Andrew Morton, Russell King, Catalin Marinas
  Cc: Will Deacon, linux-arm-kernel, linux-mm, Cristian Marussi,
	Mark Brown, Aishwarya TCV, Kefeng Wang

If bad map or access, directly set si_code to SEGV_MAPRR or SEGV_ACCERR,
also set fault to 0 and goto error handling, which make us to drop the
arch's special vm fault reason.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 arch/arm64/mm/fault.c | 43 ++++++++++++++++++++-----------------------
 1 file changed, 20 insertions(+), 23 deletions(-)

diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c
index 405f9aa831bd..5b7e6ada3125 100644
--- a/arch/arm64/mm/fault.c
+++ b/arch/arm64/mm/fault.c
@@ -500,9 +500,6 @@ static bool is_write_abort(unsigned long esr)
 	return (esr & ESR_ELx_WNR) && !(esr & ESR_ELx_CM);
 }
 
-#define VM_FAULT_BADMAP		((__force vm_fault_t)0x010000)
-#define VM_FAULT_BADACCESS	((__force vm_fault_t)0x020000)
-
 static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
 				   struct pt_regs *regs)
 {
@@ -513,6 +510,7 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
 	unsigned int mm_flags = FAULT_FLAG_DEFAULT;
 	unsigned long addr = untagged_addr(far);
 	struct vm_area_struct *vma;
+	int si_code;
 
 	if (kprobe_page_fault(regs, esr))
 		return 0;
@@ -572,9 +570,10 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
 
 	if (!(vma->vm_flags & vm_flags)) {
 		vma_end_read(vma);
-		fault = VM_FAULT_BADACCESS;
+		fault = 0;
+		si_code = SEGV_ACCERR;
 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
-		goto done;
+		goto bad_area;
 	}
 	fault = handle_mm_fault(vma, addr, mm_flags | FAULT_FLAG_VMA_LOCK, regs);
 	if (!(fault & (VM_FAULT_RETRY | VM_FAULT_COMPLETED)))
@@ -599,15 +598,19 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
 retry:
 	vma = lock_mm_and_find_vma(mm, addr, regs);
 	if (unlikely(!vma)) {
-		fault = VM_FAULT_BADMAP;
-		goto done;
+		fault = 0;
+		si_code = SEGV_MAPERR;
+		goto bad_area;
 	}
 
-	if (!(vma->vm_flags & vm_flags))
-		fault = VM_FAULT_BADACCESS;
-	else
-		fault = handle_mm_fault(vma, addr, mm_flags, regs);
+	if (!(vma->vm_flags & vm_flags)) {
+		mmap_read_unlock(mm);
+		fault = 0;
+		si_code = SEGV_ACCERR;
+		goto bad_area;
+	}
 
+	fault = handle_mm_fault(vma, addr, mm_flags, regs);
 	/* Quick path to respond to signals */
 	if (fault_signal_pending(fault, regs)) {
 		if (!user_mode(regs))
@@ -626,13 +629,12 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
 	mmap_read_unlock(mm);
 
 done:
-	/*
-	 * Handle the "normal" (no error) case first.
-	 */
-	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
-			      VM_FAULT_BADACCESS))))
+	/* Handle the "normal" (no error) case first. */
+	if (likely(!(fault & VM_FAULT_ERROR)))
 		return 0;
 
+	si_code = SEGV_MAPERR;
+bad_area:
 	/*
 	 * If we are in kernel mode at this point, we have no context to
 	 * handle this fault with.
@@ -667,13 +669,8 @@ static int __kprobes do_page_fault(unsigned long far, unsigned long esr,
 
 		arm64_force_sig_mceerr(BUS_MCEERR_AR, far, lsb, inf->name);
 	} else {
-		/*
-		 * Something tried to access memory that isn't in our memory
-		 * map.
-		 */
-		arm64_force_sig_fault(SIGSEGV,
-				      fault == VM_FAULT_BADACCESS ? SEGV_ACCERR : SEGV_MAPERR,
-				      far, inf->name);
+		/* Something tried to access memory that out of memory map */
+		arm64_force_sig_fault(SIGSEGV, si_code, far, inf->name);
 	}
 
 	return 0;
-- 
2.41.0



^ permalink raw reply	[flat|nested] 4+ messages in thread

* [PATCH v2 2/2] arm: mm: drop VM_FAULT_BADMAP/VM_FAULT_BADACCESS
  2024-04-11 13:09 [PATCH v2 0/2] mm: remove arch's private VM_FAULT_BADMAP/BADACCESS Kefeng Wang
  2024-04-11 13:09 ` [PATCH v2 1/2] arm64: mm: drop VM_FAULT_BADMAP/VM_FAULT_BADACCESS Kefeng Wang
@ 2024-04-11 13:09 ` Kefeng Wang
  1 sibling, 0 replies; 4+ messages in thread
From: Kefeng Wang @ 2024-04-11 13:09 UTC (permalink / raw)
  To: Andrew Morton, Russell King, Catalin Marinas
  Cc: Will Deacon, linux-arm-kernel, linux-mm, Cristian Marussi,
	Mark Brown, Aishwarya TCV, Kefeng Wang

If bad map or access, directly set code to SEGV_MAPRR or SEGV_ACCERR,
also set fault to 0 and goto error handling, which make us to drop the
arch's special vm fault reason.

Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
---
 arch/arm/mm/fault.c | 30 +++++++++++++++---------------
 1 file changed, 15 insertions(+), 15 deletions(-)

diff --git a/arch/arm/mm/fault.c b/arch/arm/mm/fault.c
index 5c4b417e24f9..45c141a6e087 100644
--- a/arch/arm/mm/fault.c
+++ b/arch/arm/mm/fault.c
@@ -226,9 +226,6 @@ void do_bad_area(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 }
 
 #ifdef CONFIG_MMU
-#define VM_FAULT_BADMAP		((__force vm_fault_t)0x010000)
-#define VM_FAULT_BADACCESS	((__force vm_fault_t)0x020000)
-
 static inline bool is_permission_fault(unsigned int fsr)
 {
 	int fs = fsr_fs(fsr);
@@ -295,7 +292,8 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 	if (!(vma->vm_flags & vm_flags)) {
 		vma_end_read(vma);
 		count_vm_vma_lock_event(VMA_LOCK_SUCCESS);
-		fault = VM_FAULT_BADACCESS;
+		fault = 0;
+		code = SEGV_ACCERR;
 		goto bad_area;
 	}
 	fault = handle_mm_fault(vma, addr, flags | FAULT_FLAG_VMA_LOCK, regs);
@@ -321,7 +319,8 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 retry:
 	vma = lock_mm_and_find_vma(mm, addr, regs);
 	if (unlikely(!vma)) {
-		fault = VM_FAULT_BADMAP;
+		fault = 0;
+		code = SEGV_MAPERR;
 		goto bad_area;
 	}
 
@@ -329,10 +328,14 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 	 * ok, we have a good vm_area for this memory access, check the
 	 * permissions on the VMA allow for the fault which occurred.
 	 */
-	if (!(vma->vm_flags & vm_flags))
-		fault = VM_FAULT_BADACCESS;
-	else
-		fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
+	if (!(vma->vm_flags & vm_flags)) {
+		mmap_read_unlock(mm);
+		fault = 0;
+		code = SEGV_ACCERR;
+		goto bad_area;
+	}
+
+	fault = handle_mm_fault(vma, addr & PAGE_MASK, flags, regs);
 
 	/* If we need to retry but a fatal signal is pending, handle the
 	 * signal first. We do not need to release the mmap_lock because
@@ -358,12 +361,11 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 	mmap_read_unlock(mm);
 done:
 
-	/*
-	 * Handle the "normal" case first - VM_FAULT_MAJOR
-	 */
-	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
+	/* Handle the "normal" case first */
+	if (likely(!(fault & VM_FAULT_ERROR)))
 		return 0;
 
+        code = SEGV_MAPERR;
 bad_area:
 	/*
 	 * If we are in kernel mode at this point, we
@@ -395,8 +397,6 @@ do_page_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 		 * isn't in our memory map..
 		 */
 		sig = SIGSEGV;
-		code = fault == VM_FAULT_BADACCESS ?
-			SEGV_ACCERR : SEGV_MAPERR;
 	}
 
 	__do_user_fault(addr, fsr, sig, code, regs);
-- 
2.41.0



^ permalink raw reply	[flat|nested] 4+ messages in thread

* Re: [PATCH v2 1/2] arm64: mm: drop VM_FAULT_BADMAP/VM_FAULT_BADACCESS
  2024-04-11 13:09 ` [PATCH v2 1/2] arm64: mm: drop VM_FAULT_BADMAP/VM_FAULT_BADACCESS Kefeng Wang
@ 2024-04-11 17:31   ` Catalin Marinas
  0 siblings, 0 replies; 4+ messages in thread
From: Catalin Marinas @ 2024-04-11 17:31 UTC (permalink / raw)
  To: Kefeng Wang
  Cc: Andrew Morton, Russell King, Will Deacon, linux-arm-kernel,
	linux-mm, Cristian Marussi, Mark Brown, Aishwarya TCV

On Thu, Apr 11, 2024 at 09:09:24PM +0800, Kefeng Wang wrote:
> If bad map or access, directly set si_code to SEGV_MAPRR or SEGV_ACCERR,
> also set fault to 0 and goto error handling, which make us to drop the
> arch's special vm fault reason.
> 
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>

Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>


^ permalink raw reply	[flat|nested] 4+ messages in thread

end of thread, other threads:[~2024-04-11 17:31 UTC | newest]

Thread overview: 4+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2024-04-11 13:09 [PATCH v2 0/2] mm: remove arch's private VM_FAULT_BADMAP/BADACCESS Kefeng Wang
2024-04-11 13:09 ` [PATCH v2 1/2] arm64: mm: drop VM_FAULT_BADMAP/VM_FAULT_BADACCESS Kefeng Wang
2024-04-11 17:31   ` Catalin Marinas
2024-04-11 13:09 ` [PATCH v2 2/2] arm: " Kefeng Wang

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox