From: Byungchul Park <byungchul@sk.com>
To: willy@infradead.org, netdev@vger.kernel.org
Cc: linux-kernel@vger.kernel.org, linux-mm@kvack.org,
kernel_team@skhynix.com, kuba@kernel.org, almasrymina@google.com,
ilias.apalodimas@linaro.org, harry.yoo@oracle.com,
hawk@kernel.org, akpm@linux-foundation.org, davem@davemloft.net,
john.fastabend@gmail.com, andrew+netdev@lunn.ch,
asml.silence@gmail.com, toke@redhat.com, tariqt@nvidia.com,
edumazet@google.com, pabeni@redhat.com, saeedm@nvidia.com,
leon@kernel.org, ast@kernel.org, daniel@iogearbox.net,
david@redhat.com, lorenzo.stoakes@oracle.com,
Liam.Howlett@oracle.com, vbabka@suse.cz, rppt@kernel.org,
surenb@google.com, mhocko@suse.com, horms@kernel.org,
linux-rdma@vger.kernel.org, bpf@vger.kernel.org,
vishal.moola@gmail.com
Subject: [RFC v4 01/18] netmem: introduce struct netmem_desc mirroring struct page
Date: Wed, 4 Jun 2025 11:52:29 +0900 [thread overview]
Message-ID: <20250604025246.61616-2-byungchul@sk.com> (raw)
In-Reply-To: <20250604025246.61616-1-byungchul@sk.com>
To simplify struct page, the page pool members of struct page should be
moved to other, allowing these members to be removed from struct page.
Introduce a network memory descriptor to store the members, struct
netmem_desc, and make it union'ed with the existing fields in struct
net_iov, allowing to organize the fields of struct net_iov.
Signed-off-by: Byungchul Park <byungchul@sk.com>
---
include/net/netmem.h | 94 ++++++++++++++++++++++++++++++++++----------
1 file changed, 73 insertions(+), 21 deletions(-)
diff --git a/include/net/netmem.h b/include/net/netmem.h
index 386164fb9c18..2687c8051ca5 100644
--- a/include/net/netmem.h
+++ b/include/net/netmem.h
@@ -12,6 +12,50 @@
#include <linux/mm.h>
#include <net/net_debug.h>
+/* These fields in struct page are used by the page_pool and net stack:
+ *
+ * struct {
+ * unsigned long pp_magic;
+ * struct page_pool *pp;
+ * unsigned long _pp_mapping_pad;
+ * unsigned long dma_addr;
+ * atomic_long_t pp_ref_count;
+ * };
+ *
+ * We mirror the page_pool fields here so the page_pool can access these
+ * fields without worrying whether the underlying fields belong to a
+ * page or netmem_desc.
+ *
+ * CAUTION: Do not update the fields in netmem_desc without also
+ * updating the anonymous aliasing union in struct net_iov.
+ */
+struct netmem_desc {
+ unsigned long _flags;
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ unsigned long _pp_mapping_pad;
+ unsigned long dma_addr;
+ atomic_long_t pp_ref_count;
+};
+
+#define NETMEM_DESC_ASSERT_OFFSET(pg, desc) \
+ static_assert(offsetof(struct page, pg) == \
+ offsetof(struct netmem_desc, desc))
+NETMEM_DESC_ASSERT_OFFSET(flags, _flags);
+NETMEM_DESC_ASSERT_OFFSET(pp_magic, pp_magic);
+NETMEM_DESC_ASSERT_OFFSET(pp, pp);
+NETMEM_DESC_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad);
+NETMEM_DESC_ASSERT_OFFSET(dma_addr, dma_addr);
+NETMEM_DESC_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
+#undef NETMEM_DESC_ASSERT_OFFSET
+
+/*
+ * Since struct netmem_desc uses the space in struct page, the size
+ * should be checked, until struct netmem_desc has its own instance from
+ * slab, to avoid conflicting with other members within struct page.
+ */
+static_assert(sizeof(struct netmem_desc) <= offsetof(struct page, _refcount));
+
/* net_iov */
DECLARE_STATIC_KEY_FALSE(page_pool_mem_providers);
@@ -31,12 +75,25 @@ enum net_iov_type {
};
struct net_iov {
- enum net_iov_type type;
- unsigned long pp_magic;
- struct page_pool *pp;
+ union {
+ struct netmem_desc desc;
+
+ /* XXX: The following part should be removed once all
+ * the references to them are converted so as to be
+ * accessed via netmem_desc e.g. niov->desc.pp instead
+ * of niov->pp.
+ */
+ struct {
+ unsigned long _flags;
+ unsigned long pp_magic;
+ struct page_pool *pp;
+ unsigned long _pp_mapping_pad;
+ unsigned long dma_addr;
+ atomic_long_t pp_ref_count;
+ };
+ };
struct net_iov_area *owner;
- unsigned long dma_addr;
- atomic_long_t pp_ref_count;
+ enum net_iov_type type;
};
struct net_iov_area {
@@ -48,27 +105,22 @@ struct net_iov_area {
unsigned long base_virtual;
};
-/* These fields in struct page are used by the page_pool and net stack:
+/* net_iov is union'ed with struct netmem_desc mirroring struct page, so
+ * the page_pool can access these fields without worrying whether the
+ * underlying fields are accessed via netmem_desc or directly via
+ * net_iov, until all the references to them are converted so as to be
+ * accessed via netmem_desc e.g. niov->desc.pp instead of niov->pp.
*
- * struct {
- * unsigned long pp_magic;
- * struct page_pool *pp;
- * unsigned long _pp_mapping_pad;
- * unsigned long dma_addr;
- * atomic_long_t pp_ref_count;
- * };
- *
- * We mirror the page_pool fields here so the page_pool can access these fields
- * without worrying whether the underlying fields belong to a page or net_iov.
- *
- * The non-net stack fields of struct page are private to the mm stack and must
- * never be mirrored to net_iov.
+ * The non-net stack fields of struct page are private to the mm stack
+ * and must never be mirrored to net_iov.
*/
-#define NET_IOV_ASSERT_OFFSET(pg, iov) \
- static_assert(offsetof(struct page, pg) == \
+#define NET_IOV_ASSERT_OFFSET(desc, iov) \
+ static_assert(offsetof(struct netmem_desc, desc) == \
offsetof(struct net_iov, iov))
+NET_IOV_ASSERT_OFFSET(_flags, _flags);
NET_IOV_ASSERT_OFFSET(pp_magic, pp_magic);
NET_IOV_ASSERT_OFFSET(pp, pp);
+NET_IOV_ASSERT_OFFSET(_pp_mapping_pad, _pp_mapping_pad);
NET_IOV_ASSERT_OFFSET(dma_addr, dma_addr);
NET_IOV_ASSERT_OFFSET(pp_ref_count, pp_ref_count);
#undef NET_IOV_ASSERT_OFFSET
--
2.17.1
next prev parent reply other threads:[~2025-06-04 2:53 UTC|newest]
Thread overview: 65+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-06-04 2:52 [RFC v4 00/18] Split netmem from " Byungchul Park
2025-06-04 2:52 ` Byungchul Park [this message]
2025-06-04 16:53 ` [RFC v4 01/18] netmem: introduce struct netmem_desc mirroring " Toke Høiland-Jørgensen
2025-06-05 10:03 ` Pavel Begunkov
2025-06-05 10:04 ` Pavel Begunkov
2025-06-05 19:34 ` Mina Almasry
2025-06-04 2:52 ` [RFC v4 02/18] netmem: introduce netmem alloc APIs to wrap page alloc APIs Byungchul Park
2025-06-04 15:14 ` Suren Baghdasaryan
2025-06-05 0:53 ` Byungchul Park
2025-06-05 10:05 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 03/18] page_pool: use netmem alloc/put APIs in __page_pool_alloc_page_order() Byungchul Park
2025-06-04 16:54 ` Toke Høiland-Jørgensen
2025-06-05 10:26 ` Pavel Begunkov
2025-06-05 19:39 ` Mina Almasry
2025-06-05 20:27 ` Pavel Begunkov
2025-06-05 20:34 ` Mina Almasry
2025-06-04 2:52 ` [RFC v4 04/18] page_pool: rename __page_pool_alloc_page_order() to __page_pool_alloc_netmem_order() Byungchul Park
2025-06-04 16:54 ` Toke Høiland-Jørgensen
2025-06-05 10:28 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 05/18] page_pool: use netmem alloc/put APIs in __page_pool_alloc_pages_slow() Byungchul Park
2025-06-04 17:02 ` Toke Høiland-Jørgensen
2025-06-05 10:30 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 06/18] page_pool: rename page_pool_return_page() to page_pool_return_netmem() Byungchul Park
2025-06-04 17:03 ` Toke Høiland-Jørgensen
2025-06-05 10:31 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 07/18] page_pool: use netmem put API in page_pool_return_netmem() Byungchul Park
2025-06-04 16:54 ` Toke Høiland-Jørgensen
2025-06-05 10:33 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 08/18] page_pool: rename __page_pool_release_page_dma() to __page_pool_release_netmem_dma() Byungchul Park
2025-06-04 16:55 ` Toke Høiland-Jørgensen
2025-06-05 10:34 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 09/18] page_pool: rename __page_pool_put_page() to __page_pool_put_netmem() Byungchul Park
2025-06-04 16:55 ` Toke Høiland-Jørgensen
2025-06-05 10:35 ` Pavel Begunkov
2025-06-05 10:39 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 10/18] page_pool: rename __page_pool_alloc_pages_slow() to __page_pool_alloc_netmems_slow() Byungchul Park
2025-06-04 17:03 ` Toke Høiland-Jørgensen
2025-06-04 2:52 ` [RFC v4 11/18] mlx4: use netmem descriptor and APIs for page pool Byungchul Park
2025-06-04 2:52 ` [RFC v4 12/18] netmem: use _Generic to cover const casting for page_to_netmem() Byungchul Park
2025-06-04 16:55 ` Toke Høiland-Jørgensen
2025-06-05 10:40 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 13/18] netmem: remove __netmem_get_pp() Byungchul Park
2025-06-04 16:56 ` Toke Høiland-Jørgensen
2025-06-05 10:41 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 14/18] page_pool: make page_pool_get_dma_addr() just wrap page_pool_get_dma_addr_netmem() Byungchul Park
2025-06-04 16:57 ` Toke Høiland-Jørgensen
2025-06-05 10:45 ` Pavel Begunkov
2025-06-04 2:52 ` [RFC v4 15/18] netdevsim: use netmem descriptor and APIs for page pool Byungchul Park
2025-06-04 2:52 ` [RFC v4 16/18] netmem: introduce a netmem API, virt_to_head_netmem() Byungchul Park
2025-06-04 16:59 ` Toke Høiland-Jørgensen
2025-06-05 10:45 ` Pavel Begunkov
2025-06-05 19:43 ` Mina Almasry
2025-06-04 2:52 ` [RFC v4 17/18] mt76: use netmem descriptor and APIs for page pool Byungchul Park
2025-06-04 2:52 ` [RFC v4 18/18] page_pool: access ->pp_magic through struct netmem_desc in page_pool_page_is_pp() Byungchul Park
2025-06-04 16:59 ` Toke Høiland-Jørgensen
2025-06-05 10:56 ` Pavel Begunkov
2025-06-05 11:49 ` Harry Yoo
2025-06-05 12:17 ` Harry Yoo
2025-06-05 13:28 ` Pavel Begunkov
2025-06-05 19:47 ` Mina Almasry
2025-06-05 20:16 ` Pavel Begunkov
2025-06-04 3:23 ` [RFC v4 00/18] Split netmem from struct page Byungchul Park
2025-06-05 19:55 ` Mina Almasry
2025-06-09 4:22 ` Byungchul Park
2025-06-09 7:53 ` Byungchul Park
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250604025246.61616-2-byungchul@sk.com \
--to=byungchul@sk.com \
--cc=Liam.Howlett@oracle.com \
--cc=akpm@linux-foundation.org \
--cc=almasrymina@google.com \
--cc=andrew+netdev@lunn.ch \
--cc=asml.silence@gmail.com \
--cc=ast@kernel.org \
--cc=bpf@vger.kernel.org \
--cc=daniel@iogearbox.net \
--cc=davem@davemloft.net \
--cc=david@redhat.com \
--cc=edumazet@google.com \
--cc=harry.yoo@oracle.com \
--cc=hawk@kernel.org \
--cc=horms@kernel.org \
--cc=ilias.apalodimas@linaro.org \
--cc=john.fastabend@gmail.com \
--cc=kernel_team@skhynix.com \
--cc=kuba@kernel.org \
--cc=leon@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-mm@kvack.org \
--cc=linux-rdma@vger.kernel.org \
--cc=lorenzo.stoakes@oracle.com \
--cc=mhocko@suse.com \
--cc=netdev@vger.kernel.org \
--cc=pabeni@redhat.com \
--cc=rppt@kernel.org \
--cc=saeedm@nvidia.com \
--cc=surenb@google.com \
--cc=tariqt@nvidia.com \
--cc=toke@redhat.com \
--cc=vbabka@suse.cz \
--cc=vishal.moola@gmail.com \
--cc=willy@infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox