linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Xin Hao <haoxing990@gmail.com>
To: Byungchul Park <byungchul@sk.com>,
	linux-kernel@vger.kernel.org, linux-mm@kvack.org
Cc: kernel_team@skhynix.com, akpm@linux-foundation.org,
	ying.huang@intel.com, namit@vmware.com, xhao@linux.alibaba.com,
	mgorman@techsingularity.net, hughd@google.com,
	willy@infradead.org, david@redhat.com
Subject: Re: [RFC 1/2] mm/rmap: Recognize non-writable TLB entries during TLB batch flush
Date: Thu, 17 Aug 2023 10:18:10 +0800	[thread overview]
Message-ID: <a3e9e30c-6ddd-422a-8f24-cc22ed601b42@gmail.com> (raw)
In-Reply-To: <20230804061850.21498-2-byungchul@sk.com>

[-- Attachment #1: Type: text/plain, Size: 6281 bytes --]


在 2023/8/4 14:18, Byungchul Park 写道:
> Functionally, no change. This is a preparation for CONFIG_MIGRC that
> requires to recognize non-writable TLB entries and makes use of them to
> batch more aggressively or even skip TLB flushes.
>
> While at it, changed struct tlbflush_unmap's ->flush_required(boolean)
> to ->nr_flush_required(int) in order to take account of not only whether
> it has been requested or not, but also the exact number of the requests.
> That will be used in CONFIG_MIGRC implementation too.
>
> Signed-off-by: Byungchul Park<byungchul@sk.com>
> ---
>   arch/x86/include/asm/tlbflush.h |  2 ++
>   arch/x86/mm/tlb.c               |  7 +++++++
>   include/linux/mm_types_task.h   |  4 ++--
>   include/linux/sched.h           |  1 +
>   mm/internal.h                   |  4 ++++
>   mm/rmap.c                       | 29 ++++++++++++++++++++++++-----
>   6 files changed, 40 insertions(+), 7 deletions(-)
>
> diff --git a/arch/x86/include/asm/tlbflush.h b/arch/x86/include/asm/tlbflush.h
> index 75bfaa421030..63504cde364b 100644
> --- a/arch/x86/include/asm/tlbflush.h
> +++ b/arch/x86/include/asm/tlbflush.h
> @@ -279,6 +279,8 @@ static inline void arch_tlbbatch_add_mm(struct arch_tlbflush_unmap_batch *batch,
>   }
>   
>   extern void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch);
> +extern void arch_tlbbatch_fold(struct arch_tlbflush_unmap_batch *bdst,
> +			       struct arch_tlbflush_unmap_batch *bsrc);
>   
>   static inline bool pte_flags_need_flush(unsigned long oldflags,
>   					unsigned long newflags,
> diff --git a/arch/x86/mm/tlb.c b/arch/x86/mm/tlb.c
> index 267acf27480a..69d145f1fff1 100644
> --- a/arch/x86/mm/tlb.c
> +++ b/arch/x86/mm/tlb.c
> @@ -1265,6 +1265,13 @@ void arch_tlbbatch_flush(struct arch_tlbflush_unmap_batch *batch)
>   	put_cpu();
>   }
>   
> +void arch_tlbbatch_fold(struct arch_tlbflush_unmap_batch *bdst,
> +			struct arch_tlbflush_unmap_batch *bsrc)
> +{
> +	cpumask_or(&bdst->cpumask, &bdst->cpumask, &bsrc->cpumask);
> +	cpumask_clear(&bsrc->cpumask);
> +}
> +
>   /*
>    * Blindly accessing user memory from NMI context can be dangerous
>    * if we're in the middle of switching the current user task or
> diff --git a/include/linux/mm_types_task.h b/include/linux/mm_types_task.h
> index 5414b5c6a103..6f3bb757eb46 100644
> --- a/include/linux/mm_types_task.h
> +++ b/include/linux/mm_types_task.h
> @@ -59,8 +59,8 @@ struct tlbflush_unmap_batch {
>   	 */
>   	struct arch_tlbflush_unmap_batch arch;
>   
> -	/* True if a flush is needed. */
> -	bool flush_required;
> +	/* The number of flush requested. */
> +	int nr_flush_required;
>   
>   	/*
>   	 * If true then the PTE was dirty when unmapped. The entry must be
> diff --git a/include/linux/sched.h b/include/linux/sched.h
> index eed5d65b8d1f..2232b2cdfce8 100644
> --- a/include/linux/sched.h
> +++ b/include/linux/sched.h
> @@ -1322,6 +1322,7 @@ struct task_struct {
>   #endif
>   
>   	struct tlbflush_unmap_batch	tlb_ubc;
> +	struct tlbflush_unmap_batch	tlb_ubc_nowr;
Maybe 'tlb_ubc_nowr' can replace 'tlb_ubc' later.
>   
>   	/* Cache last used pipe for splice(): */
>   	struct pipe_inode_info		*splice_pipe;
> diff --git a/mm/internal.h b/mm/internal.h
> index 68410c6d97ac..b90d516ad41f 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -840,6 +840,7 @@ extern struct workqueue_struct *mm_percpu_wq;
>   void try_to_unmap_flush(void);
>   void try_to_unmap_flush_dirty(void);
>   void flush_tlb_batched_pending(struct mm_struct *mm);
> +void fold_ubc_nowr(void);
>   #else
>   static inline void try_to_unmap_flush(void)
>   {
> @@ -850,6 +851,9 @@ static inline void try_to_unmap_flush_dirty(void)
>   static inline void flush_tlb_batched_pending(struct mm_struct *mm)
>   {
>   }
> +static inline void fold_ubc_nowr(void)
> +{
> +}
>   #endif /* CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH */
>   
>   extern const struct trace_print_flags pageflag_names[];
> diff --git a/mm/rmap.c b/mm/rmap.c
> index 19392e090bec..d18460a48485 100644
> --- a/mm/rmap.c
> +++ b/mm/rmap.c
> @@ -605,6 +605,22 @@ struct anon_vma *folio_lock_anon_vma_read(struct folio *folio,
>   }
>   
>   #ifdef CONFIG_ARCH_WANT_BATCHED_UNMAP_TLB_FLUSH
> +
> +void fold_ubc_nowr(void)
> +{
> +	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
> +	struct tlbflush_unmap_batch *tlb_ubc_nowr = &current->tlb_ubc_nowr;
> +
> +	if (!tlb_ubc_nowr->nr_flush_required)
> +		return;
> +
> +	arch_tlbbatch_fold(&tlb_ubc->arch, &tlb_ubc_nowr->arch);
> +	tlb_ubc->writable = tlb_ubc->writable || tlb_ubc_nowr->writable;
> +	tlb_ubc->nr_flush_required += tlb_ubc_nowr->nr_flush_required;
> +	tlb_ubc_nowr->nr_flush_required = 0;
> +	tlb_ubc_nowr->writable = false;
> +}
> +
>   /*
>    * Flush TLB entries for recently unmapped pages from remote CPUs. It is
>    * important if a PTE was dirty when it was unmapped that it's flushed
> @@ -615,11 +631,12 @@ void try_to_unmap_flush(void)
>   {
>   	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
>   
> -	if (!tlb_ubc->flush_required)
> +	fold_ubc_nowr();
> +	if (!tlb_ubc->nr_flush_required)
>   		return;
>   
>   	arch_tlbbatch_flush(&tlb_ubc->arch);
> -	tlb_ubc->flush_required = false;
> +	tlb_ubc->nr_flush_required = 0;
>   	tlb_ubc->writable = false;
>   }
>   
> @@ -627,8 +644,9 @@ void try_to_unmap_flush(void)
>   void try_to_unmap_flush_dirty(void)
>   {
>   	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
> +	struct tlbflush_unmap_batch *tlb_ubc_nowr = &current->tlb_ubc_nowr;
>   
> -	if (tlb_ubc->writable)
> +	if (tlb_ubc->writable || tlb_ubc_nowr->writable)
>   		try_to_unmap_flush();
>   }
>   
> @@ -644,15 +662,16 @@ void try_to_unmap_flush_dirty(void)
>   
>   static void set_tlb_ubc_flush_pending(struct mm_struct *mm, pte_t pteval)
>   {
> -	struct tlbflush_unmap_batch *tlb_ubc = &current->tlb_ubc;
> +	struct tlbflush_unmap_batch *tlb_ubc;
>   	int batch;
>   	bool writable = pte_dirty(pteval);
>   
>   	if (!pte_accessible(mm, pteval))
>   		return;
>   
> +	tlb_ubc = pte_write(pteval) ? &current->tlb_ubc : &current->tlb_ubc_nowr;
>   	arch_tlbbatch_add_mm(&tlb_ubc->arch, mm);
> -	tlb_ubc->flush_required = true;
> +	tlb_ubc->nr_flush_required += 1;
>   
>   	/*
>   	 * Ensure compiler does not re-order the setting of tlb_flush_batched

[-- Attachment #2: Type: text/html, Size: 6672 bytes --]

  reply	other threads:[~2023-08-17  2:18 UTC|newest]

Thread overview: 16+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-08-04  6:18 [RFC 0/2] Reduce TLB flushes under some specific conditions Byungchul Park
2023-08-04  6:18 ` [RFC 1/2] mm/rmap: Recognize non-writable TLB entries during TLB batch flush Byungchul Park
2023-08-17  2:18   ` Xin Hao [this message]
2023-08-04  6:18 ` [RFC 2/2] mm: Defer TLB flush by keeping both src and dst folios at migration Byungchul Park
2023-08-04 16:08   ` Zi Yan
2023-08-07  0:43     ` Byungchul Park
2023-08-04 17:32   ` Nadav Amit
2023-08-07  1:42     ` Byungchul Park
2023-08-07  5:05     ` Byungchul Park
2023-08-15  1:27   ` Huang, Ying
2023-08-16  0:13     ` Byungchul Park
2023-08-16  1:01       ` Huang, Ying
2023-08-16  2:40         ` Byungchul Park
2023-08-21  1:28         ` Byungchul Park
2023-08-21  2:51           ` Huang, Ying
2023-08-17  8:16     ` Byungchul Park

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=a3e9e30c-6ddd-422a-8f24-cc22ed601b42@gmail.com \
    --to=haoxing990@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=byungchul@sk.com \
    --cc=david@redhat.com \
    --cc=hughd@google.com \
    --cc=kernel_team@skhynix.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=mgorman@techsingularity.net \
    --cc=namit@vmware.com \
    --cc=willy@infradead.org \
    --cc=xhao@linux.alibaba.com \
    --cc=ying.huang@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox