linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: David Rientjes <rientjes@google.com>
To: Pasha Tatashin <pasha.tatashin@soleen.com>
Cc: Andrew Morton <akpm@linux-foundation.org>,
	alim.akhtar@samsung.com,  alyssa@rosenzweig.io,
	asahi@lists.linux.dev, baolu.lu@linux.intel.com,
	 bhelgaas@google.com, cgroups@vger.kernel.org, corbet@lwn.net,
	 david@redhat.com, dwmw2@infradead.org, hannes@cmpxchg.org,
	heiko@sntech.de,  iommu@lists.linux.dev,
	jernej.skrabec@gmail.com, jonathanh@nvidia.com,  joro@8bytes.org,
	krzysztof.kozlowski@linaro.org, linux-doc@vger.kernel.org,
	 linux-fsdevel@vger.kernel.org, linux-kernel@vger.kernel.org,
	 linux-mm@kvack.org, linux-rockchip@lists.infradead.org,
	 linux-samsung-soc@vger.kernel.org, linux-sunxi@lists.linux.dev,
	 linux-tegra@vger.kernel.org, lizefan.x@bytedance.com,
	marcan@marcan.st,  mhiramat@kernel.org, m.szyprowski@samsung.com,
	paulmck@kernel.org,  rdunlap@infradead.org, robin.murphy@arm.com,
	samuel@sholland.org,  suravee.suthikulpanit@amd.com,
	sven@svenpeter.dev,  thierry.reding@gmail.com, tj@kernel.org,
	tomas.mudrunka@gmail.com,  vdumpa@nvidia.com, wens@csie.org,
	will@kernel.org, yu-cheng.yu@intel.com
Subject: Re: [PATCH v2 01/10] iommu/vt-d: add wrapper functions for page allocations
Date: Thu, 14 Dec 2023 09:58:23 -0800 (PST)	[thread overview]
Message-ID: <776e17af-ae25-16a0-f443-66f3972b00c0@google.com> (raw)
In-Reply-To: <20231130201504.2322355-2-pasha.tatashin@soleen.com>

On Thu, 30 Nov 2023, Pasha Tatashin wrote:

> diff --git a/drivers/iommu/iommu-pages.h b/drivers/iommu/iommu-pages.h
> new file mode 100644
> index 000000000000..2332f807d514
> --- /dev/null
> +++ b/drivers/iommu/iommu-pages.h
> @@ -0,0 +1,199 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/*
> + * Copyright (c) 2023, Google LLC.
> + * Pasha Tatashin <pasha.tatashin@soleen.com>
> + */
> +
> +#ifndef __IOMMU_PAGES_H
> +#define __IOMMU_PAGES_H
> +
> +#include <linux/vmstat.h>
> +#include <linux/gfp.h>
> +#include <linux/mm.h>
> +
> +/*
> + * All page allocation that are performed in the IOMMU subsystem must use one of
> + * the functions below.  This is necessary for the proper accounting as IOMMU
> + * state can be rather large, i.e. multiple gigabytes in size.
> + */
> +
> +/**
> + * __iommu_alloc_pages_node - allocate a zeroed page of a given order from
> + * specific NUMA node.
> + * @nid: memory NUMA node id

NUMA_NO_NODE if no locality requirements?

> + * @gfp: buddy allocator flags
> + * @order: page order
> + *
> + * returns the head struct page of the allocated page.
> + */
> +static inline struct page *__iommu_alloc_pages_node(int nid, gfp_t gfp,
> +						    int order)
> +{
> +	struct page *pages;

s/pages/page/ here and later in this file.

> +
> +	pages = alloc_pages_node(nid, gfp | __GFP_ZERO, order);
> +	if (!pages)

unlikely()?

> +		return NULL;
> +
> +	return pages;
> +}
> +
> +/**
> + * __iommu_alloc_pages - allocate a zeroed page of a given order.
> + * @gfp: buddy allocator flags
> + * @order: page order
> + *
> + * returns the head struct page of the allocated page.
> + */
> +static inline struct page *__iommu_alloc_pages(gfp_t gfp, int order)
> +{
> +	struct page *pages;
> +
> +	pages = alloc_pages(gfp | __GFP_ZERO, order);
> +	if (!pages)
> +		return NULL;
> +
> +	return pages;
> +}
> +
> +/**
> + * __iommu_alloc_page_node - allocate a zeroed page at specific NUMA node.
> + * @nid: memory NUMA node id
> + * @gfp: buddy allocator flags
> + *
> + * returns the struct page of the allocated page.
> + */
> +static inline struct page *__iommu_alloc_page_node(int nid, gfp_t gfp)
> +{
> +	return __iommu_alloc_pages_node(nid, gfp, 0);
> +}
> +
> +/**
> + * __iommu_alloc_page - allocate a zeroed page
> + * @gfp: buddy allocator flags
> + *
> + * returns the struct page of the allocated page.
> + */
> +static inline struct page *__iommu_alloc_page(gfp_t gfp)
> +{
> +	return __iommu_alloc_pages(gfp, 0);
> +}
> +
> +/**
> + * __iommu_free_pages - free page of a given order
> + * @pages: head struct page of the page

I think "pages" implies more than one page, this is just a (potentially 
compound) page?

> + * @order: page order
> + */
> +static inline void __iommu_free_pages(struct page *pages, int order)
> +{
> +	if (!pages)
> +		return;
> +
> +	__free_pages(pages, order);
> +}
> +
> +/**
> + * __iommu_free_page - free page
> + * @page: struct page of the page
> + */
> +static inline void __iommu_free_page(struct page *page)
> +{
> +	__iommu_free_pages(page, 0);
> +}
> +
> +/**
> + * iommu_alloc_pages_node - allocate a zeroed page of a given order from
> + * specific NUMA node.
> + * @nid: memory NUMA node id
> + * @gfp: buddy allocator flags
> + * @order: page order
> + *
> + * returns the virtual address of the allocated page
> + */
> +static inline void *iommu_alloc_pages_node(int nid, gfp_t gfp, int order)
> +{
> +	struct page *pages = __iommu_alloc_pages_node(nid, gfp, order);
> +
> +	if (!pages)
> +		return NULL;
> +
> +	return page_address(pages);
> +}
> +
> +/**
> + * iommu_alloc_pages - allocate a zeroed page of a given order
> + * @gfp: buddy allocator flags
> + * @order: page order
> + *
> + * returns the virtual address of the allocated page
> + */
> +static inline void *iommu_alloc_pages(gfp_t gfp, int order)
> +{
> +	struct page *pages = __iommu_alloc_pages(gfp, order);
> +
> +	if (!pages)
> +		return NULL;
> +
> +	return page_address(pages);
> +}
> +
> +/**
> + * iommu_alloc_page_node - allocate a zeroed page at specific NUMA node.
> + * @nid: memory NUMA node id
> + * @gfp: buddy allocator flags
> + *
> + * returns the virtual address of the allocated page
> + */
> +static inline void *iommu_alloc_page_node(int nid, gfp_t gfp)
> +{
> +	return iommu_alloc_pages_node(nid, gfp, 0);
> +}
> +
> +/**
> + * iommu_alloc_page - allocate a zeroed page
> + * @gfp: buddy allocator flags
> + *
> + * returns the virtual address of the allocated page
> + */
> +static inline void *iommu_alloc_page(gfp_t gfp)
> +{
> +	return iommu_alloc_pages(gfp, 0);
> +}
> +
> +/**
> + * iommu_free_pages - free page of a given order
> + * @virt: virtual address of the page to be freed.
> + * @order: page order
> + */
> +static inline void iommu_free_pages(void *virt, int order)
> +{
> +	if (!virt)
> +		return;
> +
> +	__iommu_free_pages(virt_to_page(virt), order);
> +}
> +
> +/**
> + * iommu_free_page - free page
> + * @virt: virtual address of the page to be freed.
> + */
> +static inline void iommu_free_page(void *virt)
> +{
> +	iommu_free_pages(virt, 0);
> +}
> +
> +/**
> + * iommu_free_pages_list - free a list of pages.
> + * @pages: the head of the lru list to be freed.

Document the locking requirements for this?

> + */
> +static inline void iommu_free_pages_list(struct list_head *pages)
> +{
> +	while (!list_empty(pages)) {
> +		struct page *p = list_entry(pages->prev, struct page, lru);
> +
> +		list_del(&p->lru);
> +		put_page(p);
> +	}
> +}
> +
> +#endif	/* __IOMMU_PAGES_H */
> -- 
> 2.43.0.rc2.451.g8631bc7472-goog
> 
> 
> 


  reply	other threads:[~2023-12-14 17:58 UTC|newest]

Thread overview: 32+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-11-30 20:14 [PATCH v2 00/10] IOMMU memory observability Pasha Tatashin
2023-11-30 20:14 ` [PATCH v2 01/10] iommu/vt-d: add wrapper functions for page allocations Pasha Tatashin
2023-12-14 17:58   ` David Rientjes [this message]
2023-12-14 19:16     ` Pasha Tatashin
2023-12-24 21:30       ` David Rientjes
2023-12-24 21:48         ` Matthew Wilcox
2023-12-26 17:57           ` Pasha Tatashin
2023-12-26  6:09       ` Liu, Jingqi
2023-12-26 17:14         ` Pasha Tatashin
2023-11-30 20:14 ` [PATCH v2 02/10] iommu/amd: use page allocation function provided by iommu-pages.h Pasha Tatashin
2023-12-24 21:34   ` David Rientjes
2023-11-30 20:14 ` [PATCH v2 03/10] iommu/io-pgtable-arm: " Pasha Tatashin
2023-12-24 21:34   ` David Rientjes
2023-11-30 20:14 ` [PATCH v2 04/10] iommu/io-pgtable-dart: " Pasha Tatashin
2023-12-24 21:36   ` David Rientjes
2023-12-26 18:08     ` Pasha Tatashin
2023-11-30 20:14 ` [PATCH v2 05/10] iommu/exynos: " Pasha Tatashin
2023-12-24 21:37   ` David Rientjes
2023-11-30 20:15 ` [PATCH v2 06/10] iommu/rockchip: " Pasha Tatashin
2023-12-24 21:39   ` David Rientjes
2023-11-30 20:15 ` [PATCH v2 07/10] iommu/sun50i: " Pasha Tatashin
2023-12-24 21:39   ` David Rientjes
2023-11-30 20:15 ` [PATCH v2 08/10] iommu/tegra-smmu: " Pasha Tatashin
2023-12-24 21:40   ` David Rientjes
2023-11-30 20:15 ` [PATCH v2 09/10] iommu: observability of the IOMMU allocations Pasha Tatashin
2023-12-14 17:59   ` David Rientjes
2023-12-14 19:18     ` Pasha Tatashin
2023-11-30 20:15 ` [PATCH v2 10/10] iommu: account IOMMU allocated memory Pasha Tatashin
2023-12-14 18:02   ` David Rientjes
2023-12-15 21:11     ` Pasha Tatashin
2023-12-24 21:44       ` David Rientjes
2023-12-24 21:49 ` [PATCH v2 00/10] IOMMU memory observability David Rientjes

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=776e17af-ae25-16a0-f443-66f3972b00c0@google.com \
    --to=rientjes@google.com \
    --cc=akpm@linux-foundation.org \
    --cc=alim.akhtar@samsung.com \
    --cc=alyssa@rosenzweig.io \
    --cc=asahi@lists.linux.dev \
    --cc=baolu.lu@linux.intel.com \
    --cc=bhelgaas@google.com \
    --cc=cgroups@vger.kernel.org \
    --cc=corbet@lwn.net \
    --cc=david@redhat.com \
    --cc=dwmw2@infradead.org \
    --cc=hannes@cmpxchg.org \
    --cc=heiko@sntech.de \
    --cc=iommu@lists.linux.dev \
    --cc=jernej.skrabec@gmail.com \
    --cc=jonathanh@nvidia.com \
    --cc=joro@8bytes.org \
    --cc=krzysztof.kozlowski@linaro.org \
    --cc=linux-doc@vger.kernel.org \
    --cc=linux-fsdevel@vger.kernel.org \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-mm@kvack.org \
    --cc=linux-rockchip@lists.infradead.org \
    --cc=linux-samsung-soc@vger.kernel.org \
    --cc=linux-sunxi@lists.linux.dev \
    --cc=linux-tegra@vger.kernel.org \
    --cc=lizefan.x@bytedance.com \
    --cc=m.szyprowski@samsung.com \
    --cc=marcan@marcan.st \
    --cc=mhiramat@kernel.org \
    --cc=pasha.tatashin@soleen.com \
    --cc=paulmck@kernel.org \
    --cc=rdunlap@infradead.org \
    --cc=robin.murphy@arm.com \
    --cc=samuel@sholland.org \
    --cc=suravee.suthikulpanit@amd.com \
    --cc=sven@svenpeter.dev \
    --cc=thierry.reding@gmail.com \
    --cc=tj@kernel.org \
    --cc=tomas.mudrunka@gmail.com \
    --cc=vdumpa@nvidia.com \
    --cc=wens@csie.org \
    --cc=will@kernel.org \
    --cc=yu-cheng.yu@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox