linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Sidhartha Kumar <sidhartha.kumar@oracle.com>
To: Liu Shixin <liushixin2@huawei.com>,
	Liu Zixian <liuzixian4@huawei.com>,
	Mike Kravetz <mike.kravetz@oracle.com>,
	Muchun Song <songmuchun@bytedance.com>,
	Andrew Morton <akpm@linux-foundation.org>
Cc: linux-mm@kvack.org, linux-kernel@vger.kernel.org,
	Kefeng Wang <wangkefeng.wang@huawei.com>
Subject: Re: [PATCH] mm: hugetlb: fix UAF in hugetlb_handle_userfault
Date: Wed, 21 Sep 2022 12:31:58 -0500	[thread overview]
Message-ID: <dd2900e4-fbfc-67c0-73cd-9ddcb1963737@oracle.com> (raw)
In-Reply-To: <20220921083440.1267903-1-liushixin2@huawei.com>



On 9/21/22 3:34 AM, Liu Shixin wrote:
> The vma_lock and hugetlb_fault_mutex are dropped before handling
> userfault and reacquire them again after handle_userfault(), but
> reacquire the vma_lock could lead to UAF[1] due to the following
> race,
>
> hugetlb_fault
>    hugetlb_no_page
>      /*unlock vma_lock */
>      hugetlb_handle_userfault
>        handle_userfault
>          /* unlock mm->mmap_lock*/
>                                             vm_mmap_pgoff
>                                               do_mmap
>                                                 mmap_region
>                                                   munmap_vma_range
>                                                     /* clean old vma */
>          /* lock vma_lock again  <--- UAF */
>      /* unlock vma_lock */
>
> Since the vma_lock will unlock immediately after hugetlb_handle_userfault(),
> let's drop the unneeded lock and unlock in hugetlb_handle_userfault() to fix
> the issue.
>
> [1] https://lore.kernel.org/linux-mm/20220921014457.1668-1-liuzixian4@huawei.com/
> Reported-by: Liu Zixian <liuzixian4@huawei.com>
> Signed-off-by: Liu Shixin <liushixin2@huawei.com>
> Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
> ---
>   mm/hugetlb.c | 30 +++++++++++-------------------
>   1 file changed, 11 insertions(+), 19 deletions(-)
>
> diff --git a/mm/hugetlb.c b/mm/hugetlb.c
> index 9b8526d27c29..5a5d466692cf 100644
> --- a/mm/hugetlb.c
> +++ b/mm/hugetlb.c
> @@ -5489,7 +5489,6 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
>   						  unsigned long addr,
>   						  unsigned long reason)
>   {
> -	vm_fault_t ret;
>   	u32 hash;
>   	struct vm_fault vmf = {
>   		.vma = vma,
> @@ -5508,17 +5507,12 @@ static inline vm_fault_t hugetlb_handle_userfault(struct vm_area_struct *vma,
>   
>   	/*
>   	 * vma_lock and hugetlb_fault_mutex must be
> -	 * dropped before handling userfault.  Reacquire
> -	 * after handling fault to make calling code simpler.
> +	 * dropped before handling userfault.
>   	 */
>   	hugetlb_vma_unlock_read(vma);
>   	hash = hugetlb_fault_mutex_hash(mapping, idx);
>   	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
> -	ret = handle_userfault(&vmf, reason);
> -	mutex_lock(&hugetlb_fault_mutex_table[hash]);
> -	hugetlb_vma_lock_read(vma);
> -
> -	return ret;
> +	return handle_userfault(&vmf, reason);
>   }
>   
>   static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
> @@ -5537,6 +5531,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
>   	unsigned long haddr = address & huge_page_mask(h);
>   	bool new_page, new_pagecache_page = false;
>   	bool reserve_alloc = false;
> +	u32 hash = hugetlb_fault_mutex_hash(mapping, idx);
>   
>   	/*
>   	 * Currently, we are forced to kill the process in the event the
> @@ -5547,7 +5542,7 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
>   	if (is_vma_resv_set(vma, HPAGE_RESV_UNMAPPED)) {
>   		pr_warn_ratelimited("PID %d killed due to inadequate hugepage pool\n",
>   			   current->pid);
> -		return ret;
> +		goto out;
>   	}
>   
>   	/*
> @@ -5561,12 +5556,10 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
>   		if (idx >= size)
>   			goto out;
>   		/* Check for page in userfault range */
> -		if (userfaultfd_missing(vma)) {
> -			ret = hugetlb_handle_userfault(vma, mapping, idx,
> +		if (userfaultfd_missing(vma))
> +			return hugetlb_handle_userfault(vma, mapping, idx,
>   						       flags, haddr, address,
>   						       VM_UFFD_MISSING);
> -			goto out;
> -		}
>   
>   		page = alloc_huge_page(vma, haddr, 0);
>   		if (IS_ERR(page)) {
> @@ -5634,10 +5627,9 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
>   		if (userfaultfd_minor(vma)) {
>   			unlock_page(page);
>   			put_page(page);
> -			ret = hugetlb_handle_userfault(vma, mapping, idx,
> +			return hugetlb_handle_userfault(vma, mapping, idx,
>   						       flags, haddr, address,
>   						       VM_UFFD_MINOR);
> -			goto out;
>   		}
>   	}
>   
> @@ -5695,6 +5687,8 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm,
>   
>   	unlock_page(page);
>   out:
> +	hugetlb_vma_unlock_read(vma);
> +	mutex_unlock(&hugetlb_fault_mutex_table[hash]);
>   	return ret;
>   
>   backout:
> @@ -5792,11 +5786,9 @@ vm_fault_t hugetlb_fault(struct mm_struct *mm, struct vm_area_struct *vma,
>   
>   	entry = huge_ptep_get(ptep);
>   	/* PTE markers should be handled the same way as none pte */
> -	if (huge_pte_none_mostly(entry)) {
> -		ret = hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
> +	if (huge_pte_none_mostly(entry))
> +		return hugetlb_no_page(mm, vma, mapping, idx, address, ptep,
>   				      entry, flags);
> -		goto out_mutex;
> -	}
>   
>   	ret = 0;
>   

I've been looking at this as well.
Reviewed-by: Sidhartha Kumar <sidhartha.kumar@oracle.com>



  reply	other threads:[~2022-09-21 17:32 UTC|newest]

Thread overview: 11+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-09-21  8:34 Liu Shixin
2022-09-21 17:31 ` Sidhartha Kumar [this message]
2022-09-21 17:48 ` Mike Kravetz
2022-09-21 23:57   ` Mike Kravetz
2022-09-22  0:57     ` John Hubbard
2022-09-22  2:35       ` Mike Kravetz
2022-09-22  7:46     ` David Hildenbrand
2022-09-22 17:18       ` Mike Kravetz
2022-09-22 15:14     ` Peter Xu
2022-09-21 19:07 ` Andrew Morton
2022-09-22  1:58   ` Liu Shixin

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=dd2900e4-fbfc-67c0-73cd-9ddcb1963737@oracle.com \
    --to=sidhartha.kumar@oracle.com \
    --cc=akpm@linux-foundation.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=liushixin2@huawei.com \
    --cc=liuzixian4@huawei.com \
    --cc=mike.kravetz@oracle.com \
    --cc=songmuchun@bytedance.com \
    --cc=wangkefeng.wang@huawei.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox