linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: Uladzislau Rezki <urezki@gmail.com>
To: Ryosuke Yasuoka <ryasuoka@redhat.com>
Cc: maarten.lankhorst@linux.intel.com, mripard@kernel.org,
	tzimmermann@suse.de, airlied@gmail.com, simona@ffwll.ch,
	kraxel@redhat.com, gurchetansingh@chromium.org,
	olvaffe@gmail.com, akpm@linux-foundation.org, urezki@gmail.com,
	hch@infradead.org, dmitry.osipenko@collabora.com,
	jfalempe@redhat.com, dri-devel@lists.freedesktop.org,
	linux-kernel@vger.kernel.org, virtualization@lists.linux.dev,
	linux-mm@kvack.org
Subject: Re: [PATCH drm-next 1/2] vmalloc: Add atomic_vmap
Date: Wed, 5 Mar 2025 18:27:48 +0100	[thread overview]
Message-ID: <Z8iJlOyBjsGfqvM_@pc636> (raw)
In-Reply-To: <20250305152555.318159-2-ryasuoka@redhat.com>

On Thu, Mar 06, 2025 at 12:25:53AM +0900, Ryosuke Yasuoka wrote:
> Some drivers can use vmap in drm_panic, however, vmap is sleepable and
> takes locks. Since drm_panic will vmap in panic handler, atomic_vmap
> requests pages with GFP_ATOMIC and maps KVA without locks and sleep.
> 
> Signed-off-by: Ryosuke Yasuoka <ryasuoka@redhat.com>
> ---
>  include/linux/vmalloc.h |   2 +
>  mm/internal.h           |   5 ++
>  mm/vmalloc.c            | 105 ++++++++++++++++++++++++++++++++++++++++
>  3 files changed, 112 insertions(+)
> 
> diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
> index 31e9ffd936e3..c7a2a9a1976d 100644
> --- a/include/linux/vmalloc.h
> +++ b/include/linux/vmalloc.h
> @@ -190,6 +190,8 @@ void * __must_check vrealloc_noprof(const void *p, size_t size, gfp_t flags)
>  extern void vfree(const void *addr);
>  extern void vfree_atomic(const void *addr);
>  
> +extern void *atomic_vmap(struct page **pages, unsigned int count,
> +			 unsigned long flags, pgprot_t prot);
>  extern void *vmap(struct page **pages, unsigned int count,
>  			unsigned long flags, pgprot_t prot);
>  void *vmap_pfn(unsigned long *pfns, unsigned int count, pgprot_t prot);
> diff --git a/mm/internal.h b/mm/internal.h
> index 109ef30fee11..134b332bf5b9 100644
> --- a/mm/internal.h
> +++ b/mm/internal.h
> @@ -1278,6 +1278,11 @@ int numa_migrate_check(struct folio *folio, struct vm_fault *vmf,
>  void free_zone_device_folio(struct folio *folio);
>  int migrate_device_coherent_folio(struct folio *folio);
>  
> +struct vm_struct *atomic_get_vm_area_node(unsigned long size, unsigned long align,
> +					  unsigned long shift, unsigned long flags,
> +					  unsigned long start, unsigned long end, int node,
> +					  gfp_t gfp_mask, const void *caller);
> +
>  struct vm_struct *__get_vm_area_node(unsigned long size,
>  				     unsigned long align, unsigned long shift,
>  				     unsigned long flags, unsigned long start,
> diff --git a/mm/vmalloc.c b/mm/vmalloc.c
> index a6e7acebe9ad..f5c93779c60a 100644
> --- a/mm/vmalloc.c
> +++ b/mm/vmalloc.c
> @@ -1945,6 +1945,57 @@ static inline void setup_vmalloc_vm(struct vm_struct *vm,
>  	va->vm = vm;
>  }
>  
> +static struct vmap_area *atomic_alloc_vmap_area(unsigned long size,
> +						unsigned long align,
> +						unsigned long vstart, unsigned long vend,
> +						int node, gfp_t gfp_mask,
> +						unsigned long va_flags, struct vm_struct *vm)
> +{
> +	struct vmap_node *vn;
> +	struct vmap_area *va;
> +	unsigned long addr;
> +
> +	if (unlikely(!size || offset_in_page(size) || !is_power_of_2(align)))
> +		return ERR_PTR(-EINVAL);
> +
> +	if (unlikely(!vmap_initialized))
> +		return ERR_PTR(-EBUSY);
> +
> +	va = kmem_cache_alloc_node(vmap_area_cachep, gfp_mask, node);
> +	if (unlikely(!va))
> +		return ERR_PTR(-ENOMEM);
> +
> +	/*
> +	 * Only scan the relevant parts containing pointers to other objects
> +	 * to avoid false negatives.
> +	 */
> +	kmemleak_scan_area(&va->rb_node, SIZE_MAX, gfp_mask);
> +
> +	addr = __alloc_vmap_area(&free_vmap_area_root, &free_vmap_area_list,
> +				 size, align, vstart, vend);
> +
> +	trace_alloc_vmap_area(addr, size, align, vstart, vend, addr == vend);
> +
> +	va->va_start = addr;
> +	va->va_end = addr + size;
> +	va->vm = NULL;
> +	va->flags = va_flags;
> +
> +	vm->addr = (void *)va->va_start;
> +	vm->size = va_size(va);
> +	va->vm = vm;
> +
> +	vn = addr_to_node(va->va_start);
> +
> +	insert_vmap_area(va, &vn->busy.root, &vn->busy.head);
> +
> +	BUG_ON(!IS_ALIGNED(va->va_start, align));
> +	BUG_ON(va->va_start < vstart);
> +	BUG_ON(va->va_end > vend);
> +
> +	return va;
> +}
> +
>  /*
>   * Allocate a region of KVA of the specified size and alignment, within the
>   * vstart and vend. If vm is passed in, the two will also be bound.
> @@ -3106,6 +3157,33 @@ static void clear_vm_uninitialized_flag(struct vm_struct *vm)
>  	vm->flags &= ~VM_UNINITIALIZED;
>  }
>  
> +struct vm_struct *atomic_get_vm_area_node(unsigned long size, unsigned long align,
> +					  unsigned long shift, unsigned long flags,
> +					  unsigned long start, unsigned long end, int node,
> +					  gfp_t gfp_mask, const void *caller)
> +{
> +	struct vmap_area *va;
> +	struct vm_struct *area;
> +
> +	size = ALIGN(size, 1ul << shift);
> +	if (unlikely(!size))
> +		return NULL;
> +
> +	area = kzalloc_node(sizeof(*area), gfp_mask, node);
> +	if (unlikely(!area))
> +		return NULL;
> +
> +	size += PAGE_SIZE;
> +	area->flags = flags;
> +	area->caller = caller;
> +
> +	va = atomic_alloc_vmap_area(size, align, start, end, node, gfp_mask, 0, area);
> +	if (IS_ERR(va))
> +		return NULL;
> +
> +	return area;
> +}
> +
>  struct vm_struct *__get_vm_area_node(unsigned long size,
>  		unsigned long align, unsigned long shift, unsigned long flags,
>  		unsigned long start, unsigned long end, int node,
> @@ -3418,6 +3496,33 @@ void vunmap(const void *addr)
>  }
>  EXPORT_SYMBOL(vunmap);
>  
> +void *atomic_vmap(struct page **pages, unsigned int count,
> +		  unsigned long flags, pgprot_t prot)
> +{
> +	struct vm_struct *area;
> +	unsigned long addr;
> +	unsigned long size;		/* In bytes */
> +
> +	if (count > totalram_pages())
> +		return NULL;
> +
> +	size = (unsigned long)count << PAGE_SHIFT;
> +	area = atomic_get_vm_area_node(size, 1, PAGE_SHIFT, flags,
> +				       VMALLOC_START, VMALLOC_END,
> +				       NUMA_NO_NODE, GFP_ATOMIC,
> +				       __builtin_return_address(0));
> +	if (!area)
> +		return NULL;
> +
> +	addr = (unsigned long)area->addr;
> +	if (vmap_pages_range(addr, addr + size, pgprot_nx(prot),
> +			     pages, PAGE_SHIFT) < 0) {
> +		return NULL;
> +	}
> +
> +	return area->addr;
> +}
> +
>  /**
>   * vmap - map an array of pages into virtually contiguous space
>   * @pages: array of page pointers
> -- 
> 2.48.1
> 
It is copy-paste code, so it is odd. The proposal is not a way forward
to me. Unfortunately vmalloc is not compatible with GFP_ATOMIC, there
is at least one place it is a page-table allocation entries where it is
hard-coded to the GFP_KERNEL.

Doing this without locks and synchronizations is not possible.

--
Uladzislau Rezki


  parent reply	other threads:[~2025-03-05 18:44 UTC|newest]

Thread overview: 14+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-03-05 15:25 [PATCH drm-next 0/2] Enhance drm_panic Support for Virtio-GPU Ryosuke Yasuoka
2025-03-05 15:25 ` [PATCH drm-next 1/2] vmalloc: Add atomic_vmap Ryosuke Yasuoka
2025-03-05 17:08   ` Markus Elfring
2025-03-05 17:27   ` Uladzislau Rezki [this message]
2025-03-06  4:52   ` Matthew Wilcox
2025-03-06 13:24     ` Jocelyn Falempe
2025-03-06 14:04       ` Uladzislau Rezki
2025-03-06 15:52       ` Simona Vetter
2025-03-07  7:54         ` Jocelyn Falempe
2025-03-09  8:07           ` Ryosuke Yasuoka
2025-03-10 10:23             ` Jocelyn Falempe
2025-03-05 15:25 ` [PATCH drm-next 2/2] drm/virtio: Use atomic_vmap to work drm_panic in GUI Ryosuke Yasuoka
2025-03-06 23:56   ` kernel test robot
2025-03-07  3:07   ` kernel test robot

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=Z8iJlOyBjsGfqvM_@pc636 \
    --to=urezki@gmail.com \
    --cc=airlied@gmail.com \
    --cc=akpm@linux-foundation.org \
    --cc=dmitry.osipenko@collabora.com \
    --cc=dri-devel@lists.freedesktop.org \
    --cc=gurchetansingh@chromium.org \
    --cc=hch@infradead.org \
    --cc=jfalempe@redhat.com \
    --cc=kraxel@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=maarten.lankhorst@linux.intel.com \
    --cc=mripard@kernel.org \
    --cc=olvaffe@gmail.com \
    --cc=ryasuoka@redhat.com \
    --cc=simona@ffwll.ch \
    --cc=tzimmermann@suse.de \
    --cc=virtualization@lists.linux.dev \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox