linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
* Re: [PATCH net] page_pool: Revert "page_pool: disable dma mapping support..."
       [not found] <20211117075652.58299-1-linyunsheng@huawei.com>
@ 2021-11-17 11:48 ` Jesper Dangaard Brouer
  2021-11-17 11:51   ` Ilias Apalodimas
  0 siblings, 1 reply; 2+ messages in thread
From: Jesper Dangaard Brouer @ 2021-11-17 11:48 UTC (permalink / raw)
  To: Yunsheng Lin, davem, kuba
  Cc: brouer, netdev, linux-kernel, linuxarm, hawk, ilias.apalodimas,
	akpm, peterz, vbabka, willy, will, feng.tang, jgg, ebiederm,
	aarcange, guillaume.tucker, Linux-MM

Added CC: linux-mm@kvack.org

On 17/11/2021 08.56, Yunsheng Lin wrote:
> This reverts commit d00e60ee54b12de945b8493cf18c1ada9e422514.
> 
> As reported by Guillaume in [1]:
> Enabling LPAE always enables CONFIG_ARCH_DMA_ADDR_T_64BIT
> in 32-bit systems, which breaks the bootup proceess when a
> ethernet driver is using page pool with PP_FLAG_DMA_MAP flag.
> As we were hoping we had no active consumers for such system
> when we removed the dma mapping support, and LPAE seems like
> a common feature for 32 bits system, so revert it.
> 
> 1. https://www.spinics.net/lists/netdev/msg779890.html
> 
> Fixes: d00e60ee54b1 ("page_pool: disable dma mapping support for 32-bit arch with 64-bit DMA")
> Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
> ---
>   include/linux/mm_types.h | 13 ++++++++++++-
>   include/net/page_pool.h  | 12 +++++++++++-
>   net/core/page_pool.c     | 10 ++++------
>   3 files changed, 27 insertions(+), 8 deletions(-)

Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>

Too bad that we have to keep this code-uglyness in struct page, and 
handling in page_pool.


> diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> index bb8c6f5f19bc..c3a6e6209600 100644
> --- a/include/linux/mm_types.h
> +++ b/include/linux/mm_types.h
> @@ -105,7 +105,18 @@ struct page {
>   			struct page_pool *pp;
>   			unsigned long _pp_mapping_pad;
>   			unsigned long dma_addr;
> -			atomic_long_t pp_frag_count;
> +			union {
> +				/**
> +				 * dma_addr_upper: might require a 64-bit
> +				 * value on 32-bit architectures.
> +				 */
> +				unsigned long dma_addr_upper;
> +				/**
> +				 * For frag page support, not supported in
> +				 * 32-bit architectures with 64-bit DMA.
> +				 */
> +				atomic_long_t pp_frag_count;
> +			};
>   		};
>   		struct {	/* slab, slob and slub */
>   			union {
> diff --git a/include/net/page_pool.h b/include/net/page_pool.h
> index 3855f069627f..a4082406a003 100644
> --- a/include/net/page_pool.h
> +++ b/include/net/page_pool.h
> @@ -216,14 +216,24 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
>   	page_pool_put_full_page(pool, page, true);
>   }
>   
> +#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT	\
> +		(sizeof(dma_addr_t) > sizeof(unsigned long))
> +
>   static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
>   {
> -	return page->dma_addr;
> +	dma_addr_t ret = page->dma_addr;
> +
> +	if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
> +		ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
> +
> +	return ret;
>   }
>   
>   static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
>   {
>   	page->dma_addr = addr;
> +	if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
> +		page->dma_addr_upper = upper_32_bits(addr);
>   }
>   
>   static inline void page_pool_set_frag_count(struct page *page, long nr)
> diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> index 9b60e4301a44..1a6978427d6c 100644
> --- a/net/core/page_pool.c
> +++ b/net/core/page_pool.c
> @@ -49,12 +49,6 @@ static int page_pool_init(struct page_pool *pool,
>   	 * which is the XDP_TX use-case.
>   	 */
>   	if (pool->p.flags & PP_FLAG_DMA_MAP) {
> -		/* DMA-mapping is not supported on 32-bit systems with
> -		 * 64-bit DMA mapping.
> -		 */
> -		if (sizeof(dma_addr_t) > sizeof(unsigned long))
> -			return -EOPNOTSUPP;
> -
>   		if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
>   		    (pool->p.dma_dir != DMA_BIDIRECTIONAL))
>   			return -EINVAL;
> @@ -75,6 +69,10 @@ static int page_pool_init(struct page_pool *pool,
>   		 */
>   	}
>   
> +	if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
> +	    pool->p.flags & PP_FLAG_PAGE_FRAG)
> +		return -EINVAL;
> +
>   	if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
>   		return -ENOMEM;
>   
> 



^ permalink raw reply	[flat|nested] 2+ messages in thread

* Re: [PATCH net] page_pool: Revert "page_pool: disable dma mapping support..."
  2021-11-17 11:48 ` [PATCH net] page_pool: Revert "page_pool: disable dma mapping support..." Jesper Dangaard Brouer
@ 2021-11-17 11:51   ` Ilias Apalodimas
  0 siblings, 0 replies; 2+ messages in thread
From: Ilias Apalodimas @ 2021-11-17 11:51 UTC (permalink / raw)
  To: Jesper Dangaard Brouer
  Cc: Yunsheng Lin, davem, kuba, brouer, netdev, linux-kernel,
	linuxarm, hawk, akpm, peterz, vbabka, willy, will, feng.tang,
	jgg, ebiederm, aarcange, guillaume.tucker, Linux-MM

On Wed, 17 Nov 2021 at 13:48, Jesper Dangaard Brouer <jbrouer@redhat.com> wrote:
>
> Added CC: linux-mm@kvack.org
>
> On 17/11/2021 08.56, Yunsheng Lin wrote:
> > This reverts commit d00e60ee54b12de945b8493cf18c1ada9e422514.
> >
> > As reported by Guillaume in [1]:
> > Enabling LPAE always enables CONFIG_ARCH_DMA_ADDR_T_64BIT
> > in 32-bit systems, which breaks the bootup proceess when a
> > ethernet driver is using page pool with PP_FLAG_DMA_MAP flag.
> > As we were hoping we had no active consumers for such system
> > when we removed the dma mapping support, and LPAE seems like
> > a common feature for 32 bits system, so revert it.
> >
> > 1. https://www.spinics.net/lists/netdev/msg779890.html
> >
> > Fixes: d00e60ee54b1 ("page_pool: disable dma mapping support for 32-bit arch with 64-bit DMA")
> > Signed-off-by: Yunsheng Lin <linyunsheng@huawei.com>
> > ---
> >   include/linux/mm_types.h | 13 ++++++++++++-
> >   include/net/page_pool.h  | 12 +++++++++++-
> >   net/core/page_pool.c     | 10 ++++------
> >   3 files changed, 27 insertions(+), 8 deletions(-)
>
> Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
>
> Too bad that we have to keep this code-uglyness in struct page, and
> handling in page_pool.

Indeed :(

Acked-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
>
>
> > diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
> > index bb8c6f5f19bc..c3a6e6209600 100644
> > --- a/include/linux/mm_types.h
> > +++ b/include/linux/mm_types.h
> > @@ -105,7 +105,18 @@ struct page {
> >                       struct page_pool *pp;
> >                       unsigned long _pp_mapping_pad;
> >                       unsigned long dma_addr;
> > -                     atomic_long_t pp_frag_count;
> > +                     union {
> > +                             /**
> > +                              * dma_addr_upper: might require a 64-bit
> > +                              * value on 32-bit architectures.
> > +                              */
> > +                             unsigned long dma_addr_upper;
> > +                             /**
> > +                              * For frag page support, not supported in
> > +                              * 32-bit architectures with 64-bit DMA.
> > +                              */
> > +                             atomic_long_t pp_frag_count;
> > +                     };
> >               };
> >               struct {        /* slab, slob and slub */
> >                       union {
> > diff --git a/include/net/page_pool.h b/include/net/page_pool.h
> > index 3855f069627f..a4082406a003 100644
> > --- a/include/net/page_pool.h
> > +++ b/include/net/page_pool.h
> > @@ -216,14 +216,24 @@ static inline void page_pool_recycle_direct(struct page_pool *pool,
> >       page_pool_put_full_page(pool, page, true);
> >   }
> >
> > +#define PAGE_POOL_DMA_USE_PP_FRAG_COUNT      \
> > +             (sizeof(dma_addr_t) > sizeof(unsigned long))
> > +
> >   static inline dma_addr_t page_pool_get_dma_addr(struct page *page)
> >   {
> > -     return page->dma_addr;
> > +     dma_addr_t ret = page->dma_addr;
> > +
> > +     if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
> > +             ret |= (dma_addr_t)page->dma_addr_upper << 16 << 16;
> > +
> > +     return ret;
> >   }
> >
> >   static inline void page_pool_set_dma_addr(struct page *page, dma_addr_t addr)
> >   {
> >       page->dma_addr = addr;
> > +     if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT)
> > +             page->dma_addr_upper = upper_32_bits(addr);
> >   }
> >
> >   static inline void page_pool_set_frag_count(struct page *page, long nr)
> > diff --git a/net/core/page_pool.c b/net/core/page_pool.c
> > index 9b60e4301a44..1a6978427d6c 100644
> > --- a/net/core/page_pool.c
> > +++ b/net/core/page_pool.c
> > @@ -49,12 +49,6 @@ static int page_pool_init(struct page_pool *pool,
> >        * which is the XDP_TX use-case.
> >        */
> >       if (pool->p.flags & PP_FLAG_DMA_MAP) {
> > -             /* DMA-mapping is not supported on 32-bit systems with
> > -              * 64-bit DMA mapping.
> > -              */
> > -             if (sizeof(dma_addr_t) > sizeof(unsigned long))
> > -                     return -EOPNOTSUPP;
> > -
> >               if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
> >                   (pool->p.dma_dir != DMA_BIDIRECTIONAL))
> >                       return -EINVAL;
> > @@ -75,6 +69,10 @@ static int page_pool_init(struct page_pool *pool,
> >                */
> >       }
> >
> > +     if (PAGE_POOL_DMA_USE_PP_FRAG_COUNT &&
> > +         pool->p.flags & PP_FLAG_PAGE_FRAG)
> > +             return -EINVAL;
> > +
> >       if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
> >               return -ENOMEM;
> >
> >
>


^ permalink raw reply	[flat|nested] 2+ messages in thread

end of thread, other threads:[~2021-11-17 11:52 UTC | newest]

Thread overview: 2+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
     [not found] <20211117075652.58299-1-linyunsheng@huawei.com>
2021-11-17 11:48 ` [PATCH net] page_pool: Revert "page_pool: disable dma mapping support..." Jesper Dangaard Brouer
2021-11-17 11:51   ` Ilias Apalodimas

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox