linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Zhang, Tianfei" <tianfei.zhang@intel.com>
To: "Ren, Qiaowei" <qiaowei.ren@intel.com>,
	"H. Peter Anvin" <hpa@zytor.com>,
	Thomas Gleixner <tglx@linutronix.de>,
	Ingo Molnar <mingo@redhat.com>,
	"Hansen, Dave" <dave.hansen@intel.com>
Cc: "x86@kernel.org" <x86@kernel.org>,
	"linux-kernel@vger.kernel.org" <linux-kernel@vger.kernel.org>,
	"linux-mm@kvack.org" <linux-mm@kvack.org>
Subject: RE: [PATCH v7 09/10] x86, mpx: cleanup unused bound tables
Date: Tue, 22 Jul 2014 00:50:18 +0000	[thread overview]
Message-ID: <BA6F50564D52C24884F9840E07E32DEC17D5E280@CDSMSX102.ccr.corp.intel.com> (raw)
In-Reply-To: <1405921124-4230-10-git-send-email-qiaowei.ren@intel.com>



> -----Original Message-----
> From: owner-linux-mm@kvack.org [mailto:owner-linux-mm@kvack.org] On
> Behalf Of Qiaowei Ren
> Sent: Monday, July 21, 2014 1:39 PM
> To: H. Peter Anvin; Thomas Gleixner; Ingo Molnar; Hansen, Dave
> Cc: x86@kernel.org; linux-kernel@vger.kernel.org; linux-mm@kvack.org; Ren,
> Qiaowei
> Subject: [PATCH v7 09/10] x86, mpx: cleanup unused bound tables
> 
> Since the kernel allocated those tables on-demand without userspace
> knowledge, it is also responsible for freeing them when the associated
> mappings go away.
> 
> Here, the solution for this issue is to hook do_munmap() to check whether one
> process is MPX enabled. If yes, those bounds tables covered in the virtual
> address region which is being unmapped will be freed also.
> 
> Signed-off-by: Qiaowei Ren <qiaowei.ren@intel.com>
> ---
>  arch/x86/include/asm/mmu_context.h |   16 +++
>  arch/x86/include/asm/mpx.h         |    9 ++
>  arch/x86/mm/mpx.c                  |  181
> ++++++++++++++++++++++++++++++++++++
>  include/asm-generic/mmu_context.h  |    6 +
>  mm/mmap.c                          |    2 +
>  5 files changed, 214 insertions(+), 0 deletions(-)
> 
> diff --git a/arch/x86/include/asm/mmu_context.h
> b/arch/x86/include/asm/mmu_context.h
> index be12c53..af70d4f 100644
> --- a/arch/x86/include/asm/mmu_context.h
> +++ b/arch/x86/include/asm/mmu_context.h
> @@ -6,6 +6,7 @@
>  #include <asm/pgalloc.h>
>  #include <asm/tlbflush.h>
>  #include <asm/paravirt.h>
> +#include <asm/mpx.h>
>  #ifndef CONFIG_PARAVIRT
>  #include <asm-generic/mm_hooks.h>
> 
> @@ -96,4 +97,19 @@ do {						\
>  } while (0)
>  #endif
> 
> +static inline void arch_unmap(struct mm_struct *mm,
> +		struct vm_area_struct *vma,
> +		unsigned long start, unsigned long end) { #ifdef
> CONFIG_X86_INTEL_MPX

"#indef" new line

> +	/*
> +	 * Check whether this vma comes from MPX-enabled application.
> +	 * If so, release this vma related bound tables.
> +	 */
> +	if (mm->bd_addr && !(vma->vm_flags & VM_MPX))
> +		mpx_unmap(mm, start, end);
> +
> +#endif
> +}
> +
>  #endif /* _ASM_X86_MMU_CONTEXT_H */
> diff --git a/arch/x86/include/asm/mpx.h b/arch/x86/include/asm/mpx.h index
> 6cb0853..e848a74 100644
> --- a/arch/x86/include/asm/mpx.h
> +++ b/arch/x86/include/asm/mpx.h
> @@ -42,6 +42,13 @@
>  #define MPX_BD_SIZE_BYTES
> (1UL<<(MPX_BD_ENTRY_OFFSET+MPX_BD_ENTRY_SHIFT))
>  #define MPX_BT_SIZE_BYTES
> (1UL<<(MPX_BT_ENTRY_OFFSET+MPX_BT_ENTRY_SHIFT))
> 
> +#define MPX_BD_ENTRY_MASK	((1<<MPX_BD_ENTRY_OFFSET)-1)
> +#define MPX_BT_ENTRY_MASK	((1<<MPX_BT_ENTRY_OFFSET)-1)
> +#define MPX_GET_BD_ENTRY_OFFSET(addr)
> 	((((addr)>>(MPX_BT_ENTRY_OFFSET+ \
> +		MPX_IGN_BITS)) & MPX_BD_ENTRY_MASK) <<
> MPX_BD_ENTRY_SHIFT)
> +#define MPX_GET_BT_ENTRY_OFFSET(addr)	((((addr)>>MPX_IGN_BITS) & \
> +		MPX_BT_ENTRY_MASK) << MPX_BT_ENTRY_SHIFT)
> +
>  #define MPX_BNDSTA_ERROR_CODE	0x3
>  #define MPX_BNDCFG_ENABLE_FLAG	0x1
>  #define MPX_BD_ENTRY_VALID_FLAG	0x1
> @@ -63,6 +70,8 @@ struct mpx_insn {
>  #define MAX_MPX_INSN_SIZE	15
> 
>  unsigned long mpx_mmap(unsigned long len);
> +void mpx_unmap(struct mm_struct *mm,
> +		unsigned long start, unsigned long end);
> 
>  #ifdef CONFIG_X86_INTEL_MPX
>  int do_mpx_bt_fault(struct xsave_struct *xsave_buf); diff --git
> a/arch/x86/mm/mpx.c b/arch/x86/mm/mpx.c index e1b28e6..d29ec9c 100644
> --- a/arch/x86/mm/mpx.c
> +++ b/arch/x86/mm/mpx.c
> @@ -2,6 +2,7 @@
>  #include <linux/syscalls.h>
>  #include <asm/mpx.h>
>  #include <asm/mman.h>
> +#include <asm/mmu_context.h>
>  #include <linux/sched/sysctl.h>
> 
>  static const char *mpx_mapping_name(struct vm_area_struct *vma) @@
> -77,3 +78,183 @@ out:
>  	up_write(&mm->mmap_sem);
>  	return ret;
>  }
> +
> +/*
> + * Get the base of bounds tables pointed by specific bounds
> + * directory entry.
> + */
> +static int get_bt_addr(long __user *bd_entry, unsigned long *bt_addr,
> +		unsigned int *valid)
> +{
> +	if (get_user(*bt_addr, bd_entry))
> +		return -EFAULT;
> +
> +	*valid = *bt_addr & MPX_BD_ENTRY_VALID_FLAG;
> +	*bt_addr &= MPX_BT_ADDR_MASK;
> +
> +	/*
> +	 * If this bounds directory entry is nonzero, and meanwhile
> +	 * the valid bit is zero, one SIGSEGV will be produced due to
> +	 * this unexpected situation.
> +	 */
> +	if (!(*valid) && *bt_addr)
> +		force_sig(SIGSEGV, current);
> +
> +	return 0;
> +}
> +
> +/*
> + * Free the backing physical pages of bounds table 'bt_addr'.
> + * Assume start...end is within that bounds table.
> + */
> +static void zap_bt_entries(struct mm_struct *mm, unsigned long bt_addr,
> +		unsigned long start, unsigned long end) {
> +	struct vm_area_struct *vma;
> +
> +	/* Find the vma which overlaps this bounds table */
> +	vma = find_vma(mm, bt_addr);
> +	if (!vma || vma->vm_start > bt_addr ||
> +			vma->vm_end < bt_addr+MPX_BT_SIZE_BYTES)
> +		return;
> +
> +	zap_page_range(vma, start, end, NULL); }
> +
> +static void unmap_single_bt(struct mm_struct *mm, long __user *bd_entry,
> +		unsigned long bt_addr)
> +{
> +	if (user_atomic_cmpxchg_inatomic(&bt_addr, bd_entry,
> +			bt_addr | MPX_BD_ENTRY_VALID_FLAG, 0))
> +		return;
> +
> +	/*
> +	 * to avoid recursion, do_munmap() will check whether it comes
> +	 * from one bounds table through VM_MPX flag.
> +	 */
> +	do_munmap(mm, bt_addr & MPX_BT_ADDR_MASK,
> MPX_BT_SIZE_BYTES); }
> +
> +/*
> + * If the bounds table pointed by bounds directory 'bd_entry' is
> + * not shared, unmap this whole bounds table. Otherwise, only free
> + * those backing physical pages of bounds table entries covered
> + * in this virtual address region start...end.
> + */
> +static void unmap_shared_bt(struct mm_struct *mm, long __user *bd_entry,
> +		unsigned long start, unsigned long end,
> +		bool prev_shared, bool next_shared)
> +{
> +	unsigned long bt_addr;
> +	unsigned int bde_valid = 0;
> +
> +	if (get_bt_addr(bd_entry, &bt_addr, &bde_valid) || !bde_valid)
> +		return;
> +
> +	if (prev_shared && next_shared)
> +		zap_bt_entries(mm, bt_addr,
> +			bt_addr+MPX_GET_BT_ENTRY_OFFSET(start),
> +			bt_addr+MPX_GET_BT_ENTRY_OFFSET(end-1));
> +	else if (prev_shared)
> +		zap_bt_entries(mm, bt_addr,
> +			bt_addr+MPX_GET_BT_ENTRY_OFFSET(start),
> +			bt_addr+MPX_BT_SIZE_BYTES);
> +	else if (next_shared)
> +		zap_bt_entries(mm, bt_addr, bt_addr,
> +			bt_addr+MPX_GET_BT_ENTRY_OFFSET(end-1));
> +	else
> +		unmap_single_bt(mm, bd_entry, bt_addr); }

"}" new line

> +
> +/*
> + * A virtual address region being munmap()ed might share bounds table
> + * with adjacent VMAs. We only need to free the backing physical
> + * memory of these shared bounds tables entries covered in this virtual
> + * address region.
> + *
> + * the VMAs covering the virtual address region start...end have
> +already
> + * been split if necessary and removed from the VMA list.
> + */
> +static void unmap_side_bts(struct mm_struct *mm, unsigned long start,
> +		unsigned long end)
> +{
> +	long __user *bde_start, *bde_end;
> +	struct vm_area_struct *prev, *next;
> +	bool prev_shared = false, next_shared = false;
> +
> +	bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start);
> +	bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1);
> +
> +	/*
> +	 * Check whether bde_start and bde_end are shared with adjacent
> +	 * VMAs. Because the VMAs covering the virtual address region
> +	 * start...end have already been removed from the VMA list, if
> +	 * next is not NULL it will satisfy start < end <= next->vm_start.
> +	 * And if prev is not NULL, prev->vm_end <= start < end.
> +	 */
> +	next = find_vma_prev(mm, start, &prev);
> +	if (prev && MPX_GET_BD_ENTRY_OFFSET(prev->vm_end-1) ==
> (long)bde_start)
> +		prev_shared = true;
> +	if (next && MPX_GET_BD_ENTRY_OFFSET(next->vm_start) ==
> (long)bde_end)
> +		next_shared = true;
> +
> +	/*
> +	 * This virtual address region being munmap()ed is only
> +	 * covered by one bounds table.
> +	 *
> +	 * In this case, if this table is also shared with adjacent
> +	 * VMAs, only part of the backing physical memory of the bounds
> +	 * table need be freeed. Otherwise the whole bounds table need
> +	 * be unmapped.
> +	 */
> +	if (bde_start == bde_end) {
> +		unmap_shared_bt(mm, bde_start, start, end,
> +				prev_shared, next_shared);
> +		return;
> +	}
> +
> +	/*
> +	 * If more than one bounds tables are covered in this virtual
> +	 * address region being munmap()ed, we need to separately check
> +	 * whether bde_start and bde_end are shared with adjacent VMAs.
> +	 */
> +	unmap_shared_bt(mm, bde_start, start, end, prev_shared, false);
> +	unmap_shared_bt(mm, bde_end, start, end, false, next_shared); }
> +
> +/*
> + * Free unused bounds tables covered in a virtual address region being
> + * munmap()ed. Assume end > start.
> + *
> + * This function will be called by do_munmap(), and the VMAs covering
> + * the virtual address region start...end have already been split if
> + * necessary and remvoed from the VMA list.
> + */
> +void mpx_unmap(struct mm_struct *mm,
> +		unsigned long start, unsigned long end) {
> +	long __user *bd_entry, *bde_start, *bde_end;
> +	unsigned long bt_addr;
> +	unsigned int bde_valid;
> +
> +	/*
> +	 * unmap bounds tables pointed out by start/end bounds directory
> +	 * entries, or only free part of their backing physical memroy
> +	 * if they are shared with adjacent VMAs.
> +	 */
> +	unmap_side_bts(mm, start, end);
> +
> +	/*
> +	 * unmap those bounds table which are entirely covered in this
> +	 * virtual address region.
> +	 */
> +	bde_start = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(start);
> +	bde_end = mm->bd_addr + MPX_GET_BD_ENTRY_OFFSET(end-1);
> +	for (bd_entry = bde_start + 1; bd_entry < bde_end; bd_entry++) {
> +		if (get_bt_addr(bd_entry, &bt_addr, &bde_valid))
> +			return;
> +		if (!bde_valid)
> +			continue;
> +		unmap_single_bt(mm, bd_entry, bt_addr);
> +	}
> +}
> diff --git a/include/asm-generic/mmu_context.h
> b/include/asm-generic/mmu_context.h
> index a7eec91..ac558ca 100644
> --- a/include/asm-generic/mmu_context.h
> +++ b/include/asm-generic/mmu_context.h
> @@ -42,4 +42,10 @@ static inline void activate_mm(struct mm_struct
> *prev_mm,  {  }
> 
> +static inline void arch_unmap(struct mm_struct *mm,
> +			struct vm_area_struct *vma,
> +			unsigned long start, unsigned long end) { }
> +
>  #endif /* __ASM_GENERIC_MMU_CONTEXT_H */ diff --git a/mm/mmap.c
> b/mm/mmap.c index 129b847..8550d84 100644
> --- a/mm/mmap.c
> +++ b/mm/mmap.c
> @@ -2560,6 +2560,8 @@ int do_munmap(struct mm_struct *mm, unsigned
> long start, size_t len)
>  	/* Fix up all other VM information */
>  	remove_vma_list(mm, vma);
> 
> +	arch_unmap(mm, vma, start, end);
> +
>  	return 0;
>  }
> 
> --
> 1.7.1
> 
> --
> To unsubscribe, send a message with 'unsubscribe linux-mm' in the body to
> majordomo@kvack.org.  For more info on Linux MM,
> see: http://www.linux-mm.org/ .
> Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

--
To unsubscribe, send a message with 'unsubscribe linux-mm' in
the body to majordomo@kvack.org.  For more info on Linux MM,
see: http://www.linux-mm.org/ .
Don't email: <a href=mailto:"dont@kvack.org"> email@kvack.org </a>

  reply	other threads:[~2014-07-22  0:50 UTC|newest]

Thread overview: 29+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2014-07-21  5:38 [PATCH v7 00/10] Intel MPX support Qiaowei Ren
2014-07-21  5:38 ` [PATCH v7 01/10] x86, mpx: introduce VM_MPX to indicate that a VMA is MPX specific Qiaowei Ren
2014-07-21  5:38 ` [PATCH v7 02/10] x86, mpx: add MPX specific mmap interface Qiaowei Ren
2014-07-21  5:38 ` [PATCH v7 03/10] x86, mpx: add macro cpu_has_mpx Qiaowei Ren
2014-07-22 16:18   ` Dave Hansen
2014-07-23  2:35     ` Ren, Qiaowei
2014-07-23 16:02       ` Dave Hansen
2014-07-24  0:56         ` Ren, Qiaowei
2014-07-24  4:46           ` Dave Hansen
2014-07-24  5:23             ` Ren, Qiaowei
2014-07-21  5:38 ` [PATCH v7 04/10] x86, mpx: hook #BR exception handler to allocate bound tables Qiaowei Ren
2014-07-21  5:38 ` [PATCH v7 05/10] x86, mpx: extend siginfo structure to include bound violation information Qiaowei Ren
2014-07-22  0:42   ` Zhang, Tianfei
2014-07-21  5:38 ` [PATCH v7 06/10] mips: sync struct siginfo with general version Qiaowei Ren
2014-07-21  5:38 ` [PATCH v7 07/10] x86, mpx: decode MPX instruction to get bound violation information Qiaowei Ren
2014-07-21  6:07   ` Andi Kleen
2014-07-21  6:11     ` Ren, Qiaowei
2014-07-21  5:38 ` [PATCH v7 08/10] x86, mpx: add prctl commands PR_MPX_REGISTER, PR_MPX_UNREGISTER Qiaowei Ren
2014-07-21  6:09   ` Andi Kleen
2014-10-13 17:41     ` Dave Hansen
2014-10-14  1:44       ` Ren, Qiaowei
2014-07-23 16:20   ` Dave Hansen
2014-07-21  5:38 ` [PATCH v7 09/10] x86, mpx: cleanup unused bound tables Qiaowei Ren
2014-07-22  0:50   ` Zhang, Tianfei [this message]
2014-07-23 16:38   ` Dave Hansen
2014-07-24  0:49     ` Ren, Qiaowei
2014-07-24  1:04       ` Dave Hansen
2014-07-24  1:27         ` Ren, Qiaowei
2014-07-21  5:38 ` [PATCH v7 10/10] x86, mpx: add documentation on Intel MPX Qiaowei Ren

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=BA6F50564D52C24884F9840E07E32DEC17D5E280@CDSMSX102.ccr.corp.intel.com \
    --to=tianfei.zhang@intel.com \
    --cc=dave.hansen@intel.com \
    --cc=hpa@zytor.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mingo@redhat.com \
    --cc=qiaowei.ren@intel.com \
    --cc=tglx@linutronix.de \
    --cc=x86@kernel.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox