From: Byungchul Park <byungchul@sk.com>
To: willy@infradead.org, netdev@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
kernel_team@skhynix.com, kuba@kernel.org, almasrymina@google.com,
ilias.apalodimas@linaro.org, harry.yoo@oracle.com,
hawk@kernel.org, akpm@linux-foundation.org, davem@davemloft.net,
john.fastabend@gmail.com, andrew+netdev@lunn.ch,
asml.silence@gmail.com, toke@redhat.com, tariqt@nvidia.com,
edumazet@google.com, pabeni@redhat.com, saeedm@nvidia.com,
leon@kernel.org, ast@kernel.org, daniel@iogearbox.net,
david@redhat.com, lorenzo.stoakes@oracle.com,
Liam.Howlett@oracle.com, vbabka@suse.cz, rppt@kernel.org,
surenb@google.com, mhocko@suse.com, horms@kernel.org,
linux-rdma@vger.kernel.org, bpf@vger.kernel.org,
vishal.moola@gmail.com
Subject: [RFC v4 17/18] mt76: use netmem descriptor and APIs for page pool
Date: Wed, 4 Jun 2025 11:52:45 +0900 [thread overview]
Message-ID: <20250604025246.61616-18-byungchul@sk.com> (raw)
In-Reply-To: <20250604025246.61616-1-byungchul@sk.com>
To simplify struct page, the effort to separate its own descriptor from
struct page is required and the work for page pool is on going.
Use netmem descriptor and APIs for page pool in mt76 code.
Signed-off-by: Byungchul Park <byungchul@sk.com>
Reviewed-by: Mina Almasry <almasrymina@google.com>
---
drivers/net/wireless/mediatek/mt76/dma.c | 6 ++---
drivers/net/wireless/mediatek/mt76/mt76.h | 12 +++++-----
.../net/wireless/mediatek/mt76/sdio_txrx.c | 24 +++++++++----------
drivers/net/wireless/mediatek/mt76/usb.c | 10 ++++----
4 files changed, 26 insertions(+), 26 deletions(-)
diff --git a/drivers/net/wireless/mediatek/mt76/dma.c b/drivers/net/wireless/mediatek/mt76/dma.c
index 35b4ec91979e..41b529b95877 100644
--- a/drivers/net/wireless/mediatek/mt76/dma.c
+++ b/drivers/net/wireless/mediatek/mt76/dma.c
@@ -820,10 +820,10 @@ mt76_add_fragment(struct mt76_dev *dev, struct mt76_queue *q, void *data,
int nr_frags = shinfo->nr_frags;
if (nr_frags < ARRAY_SIZE(shinfo->frags)) {
- struct page *page = virt_to_head_page(data);
- int offset = data - page_address(page) + q->buf_offset;
+ netmem_ref netmem = virt_to_head_netmem(data);
+ int offset = data - netmem_address(netmem) + q->buf_offset;
- skb_add_rx_frag(skb, nr_frags, page, offset, len, q->buf_size);
+ skb_add_rx_frag_netmem(skb, nr_frags, netmem, offset, len, q->buf_size);
} else {
mt76_put_page_pool_buf(data, allow_direct);
}
diff --git a/drivers/net/wireless/mediatek/mt76/mt76.h b/drivers/net/wireless/mediatek/mt76/mt76.h
index 5f8d81cda6cd..16d09b6d8270 100644
--- a/drivers/net/wireless/mediatek/mt76/mt76.h
+++ b/drivers/net/wireless/mediatek/mt76/mt76.h
@@ -1795,21 +1795,21 @@ int mt76_rx_token_consume(struct mt76_dev *dev, void *ptr,
int mt76_create_page_pool(struct mt76_dev *dev, struct mt76_queue *q);
static inline void mt76_put_page_pool_buf(void *buf, bool allow_direct)
{
- struct page *page = virt_to_head_page(buf);
+ netmem_ref netmem = virt_to_head_netmem(buf);
- page_pool_put_full_page(page->pp, page, allow_direct);
+ page_pool_put_full_netmem(netmem_get_pp(netmem), netmem, allow_direct);
}
static inline void *
mt76_get_page_pool_buf(struct mt76_queue *q, u32 *offset, u32 size)
{
- struct page *page;
+ netmem_ref netmem;
- page = page_pool_dev_alloc_frag(q->page_pool, offset, size);
- if (!page)
+ netmem = page_pool_dev_alloc_netmem(q->page_pool, offset, &size);
+ if (!netmem)
return NULL;
- return page_address(page) + *offset;
+ return netmem_address(netmem) + *offset;
}
static inline void mt76_set_tx_blocked(struct mt76_dev *dev, bool blocked)
diff --git a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
index 0a927a7313a6..b1d89b6f663d 100644
--- a/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
+++ b/drivers/net/wireless/mediatek/mt76/sdio_txrx.c
@@ -68,14 +68,14 @@ mt76s_build_rx_skb(void *data, int data_len, int buf_len)
skb_put_data(skb, data, len);
if (data_len > len) {
- struct page *page;
+ netmem_ref netmem;
data += len;
- page = virt_to_head_page(data);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, data - page_address(page),
- data_len - len, buf_len);
- get_page(page);
+ netmem = virt_to_head_netmem(data);
+ skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags,
+ netmem, data - netmem_address(netmem),
+ data_len - len, buf_len);
+ get_netmem(netmem);
}
return skb;
@@ -88,7 +88,7 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
struct mt76_queue *q = &dev->q_rx[qid];
struct mt76_sdio *sdio = &dev->sdio;
int len = 0, err, i;
- struct page *page;
+ netmem_ref netmem;
u8 *buf, *end;
for (i = 0; i < intr->rx.num[qid]; i++)
@@ -100,11 +100,11 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
if (len > sdio->func->cur_blksize)
len = roundup(len, sdio->func->cur_blksize);
- page = __dev_alloc_pages(GFP_KERNEL, get_order(len));
- if (!page)
+ netmem = page_to_netmem(__dev_alloc_pages(GFP_KERNEL, get_order(len)));
+ if (!netmem)
return -ENOMEM;
- buf = page_address(page);
+ buf = netmem_address(netmem);
sdio_claim_host(sdio->func);
err = sdio_readsb(sdio->func, buf, MCR_WRDR(qid), len);
@@ -112,7 +112,7 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
if (err < 0) {
dev_err(dev->dev, "sdio read data failed:%d\n", err);
- put_page(page);
+ put_netmem(netmem);
return err;
}
@@ -140,7 +140,7 @@ mt76s_rx_run_queue(struct mt76_dev *dev, enum mt76_rxq_id qid,
}
buf += round_up(len + 4, 4);
}
- put_page(page);
+ put_netmem(netmem);
spin_lock_bh(&q->lock);
q->head = (q->head + i) % q->ndesc;
diff --git a/drivers/net/wireless/mediatek/mt76/usb.c b/drivers/net/wireless/mediatek/mt76/usb.c
index f9e67b8c3b3c..1ea80c87a839 100644
--- a/drivers/net/wireless/mediatek/mt76/usb.c
+++ b/drivers/net/wireless/mediatek/mt76/usb.c
@@ -478,7 +478,7 @@ mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
head_room = drv_flags & MT_DRV_RX_DMA_HDR ? 0 : MT_DMA_HDR_LEN;
if (SKB_WITH_OVERHEAD(buf_size) < head_room + len) {
- struct page *page;
+ netmem_ref netmem;
/* slow path, not enough space for data and
* skb_shared_info
@@ -489,10 +489,10 @@ mt76u_build_rx_skb(struct mt76_dev *dev, void *data,
skb_put_data(skb, data + head_room, MT_SKB_HEAD_LEN);
data += head_room + MT_SKB_HEAD_LEN;
- page = virt_to_head_page(data);
- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags,
- page, data - page_address(page),
- len - MT_SKB_HEAD_LEN, buf_size);
+ netmem = virt_to_head_netmem(data);
+ skb_add_rx_frag_netmem(skb, skb_shinfo(skb)->nr_frags,
+ netmem, data - netmem_address(netmem),
+ len - MT_SKB_HEAD_LEN, buf_size);
return skb;
}
--
2.17.1
next prev parent reply other threads:[~2025-06-04 2:53 UTC|newest]
Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-04 2:52 [RFC v4 00/18] Split netmem from struct page Byungchul Park
2025-06-04 2:52 ` [RFC v4 01/18] netmem: introduce struct netmem_desc mirroring " Byungchul Park
2025-06-04 16:53 ` Toke Høiland-Jørgensen
2025-06-05 10:03 ` Pavel Begunkov
2025-06-05 10:04 ` Pavel Begunkov
2025-06-05 19:34 ` Mina Almasry
2025-06-04 2:52 ` [RFC v4 02/18] netmem: introduce netmem alloc APIs to wrap page alloc APIs Byungchul Park
2025-06-04 15:14 ` Suren Baghdasaryan
2025-06-05 0:53 ` Byungchul Park
2025-06-05 10:05 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 03/18] page_pool: use netmem alloc/put APIs in __page_pool_alloc_page_order() Byungchul Park
2025-06-04 16:54 ` Toke Høiland-Jørgensen
2025-06-05 10:26 ` Pavel Begunkov
2025-06-05 19:39 ` Mina Almasry
2025-06-05 20:27 ` Pavel Begunkov
2025-06-05 20:34 ` Mina Almasry
2025-06-04 2:52 ` [RFC v4 04/18] page_pool: rename __page_pool_alloc_page_order() to __page_pool_alloc_netmem_order() Byungchul Park
2025-06-04 16:54 ` Toke Høiland-Jørgensen
2025-06-05 10:28 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 05/18] page_pool: use netmem alloc/put APIs in __page_pool_alloc_pages_slow() Byungchul Park
2025-06-04 17:02 ` Toke Høiland-Jørgensen
2025-06-05 10:30 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 06/18] page_pool: rename page_pool_return_page() to page_pool_return_netmem() Byungchul Park
2025-06-04 17:03 ` Toke Høiland-Jørgensen
2025-06-05 10:31 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 07/18] page_pool: use netmem put API in page_pool_return_netmem() Byungchul Park
2025-06-04 16:54 ` Toke Høiland-Jørgensen
2025-06-05 10:33 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 08/18] page_pool: rename __page_pool_release_page_dma() to __page_pool_release_netmem_dma() Byungchul Park
2025-06-04 16:55 ` Toke Høiland-Jørgensen
2025-06-05 10:34 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 09/18] page_pool: rename __page_pool_put_page() to __page_pool_put_netmem() Byungchul Park
2025-06-04 16:55 ` Toke Høiland-Jørgensen
2025-06-05 10:35 ` Pavel Begunkov
2025-06-05 10:39 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 10/18] page_pool: rename __page_pool_alloc_pages_slow() to __page_pool_alloc_netmems_slow() Byungchul Park
2025-06-04 17:03 ` Toke Høiland-Jørgensen
2025-06-04 2:52 ` [RFC v4 11/18] mlx4: use netmem descriptor and APIs for page pool Byungchul Park
2025-06-04 2:52 ` [RFC v4 12/18] netmem: use _Generic to cover const casting for page_to_netmem() Byungchul Park
2025-06-04 16:55 ` Toke Høiland-Jørgensen
2025-06-05 10:40 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 13/18] netmem: remove __netmem_get_pp() Byungchul Park
2025-06-04 16:56 ` Toke Høiland-Jørgensen
2025-06-05 10:41 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 14/18] page_pool: make page_pool_get_dma_addr() just wrap page_pool_get_dma_addr_netmem() Byungchul Park
2025-06-04 16:57 ` Toke Høiland-Jørgensen
2025-06-05 10:45 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 15/18] netdevsim: use netmem descriptor and APIs for page pool Byungchul Park
2025-06-04 2:52 ` [RFC v4 16/18] netmem: introduce a netmem API, virt_to_head_netmem() Byungchul Park
2025-06-04 16:59 ` Toke Høiland-Jørgensen
2025-06-05 10:45 ` Pavel Begunkov
2025-06-05 19:43 ` Mina Almasry
2025-06-04 2:52 ` Byungchul Park [this message]
2025-06-04 2:52 ` [RFC v4 18/18] page_pool: access ->pp_magic through struct netmem_desc in page_pool_page_is_pp() Byungchul Park
2025-06-04 16:59 ` Toke Høiland-Jørgensen
2025-06-05 10:56 ` Pavel Begunkov
2025-06-05 11:49 ` Harry Yoo
2025-06-05 12:17 ` Harry Yoo
2025-06-05 13:28 ` Pavel Begunkov
2025-06-05 19:47 ` Mina Almasry
2025-06-05 20:16 ` Pavel Begunkov
2025-06-04 3:23 ` [RFC v4 00/18] Split netmem from struct page Byungchul Park
2025-06-05 19:55 ` Mina Almasry
2025-06-09 4:22 ` Byungchul Park
2025-06-09 7:53 ` Byungchul Park
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250604025246.61616-18-byungchul@sk.com \
--to=byungchul@sk.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=almasrymina@google.com \
--cc=andrew+netdev@lunn.ch \
--cc=asml.silence@gmail.com \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=david@redhat.com \
--cc=edumazet@google.com \
--cc=harry.yoo@oracle.com \
--cc=hawk@kernel.org \
--cc=horms@kernel.org \
--cc=ilias.apalodimas@linaro.org \
--cc=john.fastabend@gmail.com \
--cc=kernel_team@skhynix.com \
--cc=kuba@kernel.org \
--cc=leon@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-rdma@vger.kernel.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mhocko@suse.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=rppt@kernel.org \
--cc=saeedm@nvidia.com \
--cc=surenb@google.com \
--cc=tariqt@nvidia.com \
--cc=toke@redhat.com \
--cc=vbabka@suse.cz \
--cc=vishal.moola@gmail.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox