linux-mm.kvack.org archive mirror
 help / color / mirror / Atom feed
From: "Huang, Ying" <ying.huang@intel.com>
To: "Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com>
Cc: linux-mm@kvack.org,  akpm@linux-foundation.org,
	 mpe@ellerman.id.au, linuxppc-dev@lists.ozlabs.org,
	 npiggin@gmail.com, christophe.leroy@csgroup.eu,
	 Oscar Salvador <osalvador@suse.de>,
	 David Hildenbrand <david@redhat.com>,
	 Michal Hocko <mhocko@suse.com>,
	 Vishal Verma <vishal.l.verma@intel.com>
Subject: Re: [PATCH v3 4/7] mm/hotplug: Allow pageblock alignment via altmap reservation
Date: Tue, 11 Jul 2023 14:21:49 +0800	[thread overview]
Message-ID: <87ilardl36.fsf@yhuang6-desk2.ccr.corp.intel.com> (raw)
In-Reply-To: <20230711044834.72809-5-aneesh.kumar@linux.ibm.com> (Aneesh Kumar K. V.'s message of "Tue, 11 Jul 2023 10:18:30 +0530")

"Aneesh Kumar K.V" <aneesh.kumar@linux.ibm.com> writes:

> Add a new kconfig option that can be selected if we want to allow
> pageblock alignment by reserving pages in the vmemmap altmap area.
> This implies we will be reserving some pages for every memoryblock
> This also allows the memmap on memory feature to be widely useful
> with different memory block size values.
>
> Signed-off-by: Aneesh Kumar K.V <aneesh.kumar@linux.ibm.com>
> ---
>  mm/Kconfig          |  9 +++++++
>  mm/memory_hotplug.c | 59 +++++++++++++++++++++++++++++++++++++--------
>  2 files changed, 58 insertions(+), 10 deletions(-)
>
> diff --git a/mm/Kconfig b/mm/Kconfig
> index 932349271e28..88a1472b2086 100644
> --- a/mm/Kconfig
> +++ b/mm/Kconfig
> @@ -570,6 +570,15 @@ config MHP_MEMMAP_ON_MEMORY
>  	depends on MEMORY_HOTPLUG && SPARSEMEM_VMEMMAP
>  	depends on ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
>  
> +config MHP_RESERVE_PAGES_MEMMAP_ON_MEMORY
> +       bool "Allow Reserving pages for page block aligment"
> +       depends on MHP_MEMMAP_ON_MEMORY
> +       help
> +	This option allows memmap on memory feature to be more useful
> +	with different memory block sizes. This is achieved by marking some pages
> +	in each memory block as reserved so that we can get page-block alignment
> +	for the remaining pages.
> +
>  endif # MEMORY_HOTPLUG
>  
>  config ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE
> diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
> index 07c99b0cc371..f36aec1f7626 100644
> --- a/mm/memory_hotplug.c
> +++ b/mm/memory_hotplug.c
> @@ -1252,15 +1252,17 @@ static inline bool arch_supports_memmap_on_memory(unsigned long size)
>  {
>  	unsigned long nr_vmemmap_pages = size >> PAGE_SHIFT;
>  	unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page);
> -	unsigned long remaining_size = size - vmemmap_size;
>  
> -	return IS_ALIGNED(vmemmap_size, PMD_SIZE) &&
> -		IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT));
> +	return IS_ALIGNED(vmemmap_size, PMD_SIZE);
>  }
>  #endif
>  
>  static bool mhp_supports_memmap_on_memory(unsigned long size)
>  {
> +	unsigned long nr_vmemmap_pages = size >> PAGE_SHIFT;
> +	unsigned long vmemmap_size = nr_vmemmap_pages * sizeof(struct page);
> +	unsigned long remaining_size = size - vmemmap_size;
> +
>  	/*
>  	 * Besides having arch support and the feature enabled at runtime, we
>  	 * need a few more assumptions to hold true:
> @@ -1287,9 +1289,30 @@ static bool mhp_supports_memmap_on_memory(unsigned long size)
>  	 *       altmap as an alternative source of memory, and we do not exactly
>  	 *       populate a single PMD.
>  	 */
> -	return mhp_memmap_on_memory() &&
> -		size == memory_block_size_bytes() &&
> -		arch_supports_memmap_on_memory(size);
> +	if (!mhp_memmap_on_memory() || size != memory_block_size_bytes())
> +		return false;
> +	 /*
> +	  * Without page reservation remaining pages should be pageblock aligned.
> +	  */
> +	if (!IS_ENABLED(CONFIG_MHP_RESERVE_PAGES_MEMMAP_ON_MEMORY) &&
> +	    !IS_ALIGNED(remaining_size, (pageblock_nr_pages << PAGE_SHIFT)))
> +		return false;
> +
> +	return arch_supports_memmap_on_memory(size);
> +}
> +
> +static inline unsigned long memory_block_align_base(unsigned long size)
> +{
> +	if (IS_ENABLED(CONFIG_MHP_RESERVE_PAGES_MEMMAP_ON_MEMORY)) {
> +		unsigned long align;
> +		unsigned long nr_vmemmap_pages = size >> PAGE_SHIFT;
> +		unsigned long vmemmap_size;
> +
> +		vmemmap_size = (nr_vmemmap_pages * sizeof(struct page)) >> PAGE_SHIFT;

DIV_ROUND_UP()?

> +		align = pageblock_align(vmemmap_size) - vmemmap_size;
> +		return align;
> +	} else
> +		return 0;
>  }
>  
>  /*
> @@ -1302,7 +1325,11 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
>  {
>  	struct mhp_params params = { .pgprot = pgprot_mhp(PAGE_KERNEL) };
>  	enum memblock_flags memblock_flags = MEMBLOCK_NONE;
> -	struct vmem_altmap mhp_altmap = {};
> +	struct vmem_altmap mhp_altmap = {
> +		.base_pfn =  PHYS_PFN(res->start),
> +		.end_pfn  =  PHYS_PFN(res->end),
> +		.reserve  = memory_block_align_base(resource_size(res)),
> +	};
>  	struct memory_group *group = NULL;
>  	u64 start, size;
>  	bool new_node = false;
> @@ -1347,8 +1374,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
>  	 */
>  	if (mhp_flags & MHP_MEMMAP_ON_MEMORY) {
>  		if (mhp_supports_memmap_on_memory(size)) {
> -			mhp_altmap.free = PHYS_PFN(size);
> -			mhp_altmap.base_pfn = PHYS_PFN(start);
> +			mhp_altmap.free = PHYS_PFN(size) - mhp_altmap.reserve;
>  			params.altmap = &mhp_altmap;
>  		}
>  		/* fallback to not using altmap  */
> @@ -1360,7 +1386,7 @@ int __ref add_memory_resource(int nid, struct resource *res, mhp_t mhp_flags)
>  		goto error;
>  
>  	/* create memory block devices after memory was added */
> -	ret = create_memory_block_devices(start, size, mhp_altmap.alloc,
> +	ret = create_memory_block_devices(start, size, mhp_altmap.alloc + mhp_altmap.reserve,
>  					  group);
>  	if (ret) {
>  		arch_remove_memory(start, size, NULL);
> @@ -2260,3 +2286,16 @@ int offline_and_remove_memory(u64 start, u64 size)
>  }
>  EXPORT_SYMBOL_GPL(offline_and_remove_memory);
>  #endif /* CONFIG_MEMORY_HOTREMOVE */
> +
> +static int __init memory_hotplug_init(void)
> +{
> +
> +	if (IS_ENABLED(CONFIG_MHP_RESERVE_PAGES_MEMMAP_ON_MEMORY) &&
> +	    mhp_memmap_on_memory()) {
> +		pr_info("Memory hotplug will reserve %ld pages in each memory block\n",
> +			memory_block_align_base(memory_block_size_bytes()));
> +
> +	}
> +	return 0;
> +}
> +module_init(memory_hotplug_init);


  reply	other threads:[~2023-07-11  6:23 UTC|newest]

Thread overview: 28+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-07-11  4:48 [PATCH v3 0/7] Add support for memmap on memory feature on ppc64 Aneesh Kumar K.V
2023-07-11  4:48 ` [PATCH v3 1/7] mm/hotplug: Simplify ARCH_MHP_MEMMAP_ON_MEMORY_ENABLE kconfig Aneesh Kumar K.V
2023-07-11  4:48 ` [PATCH v3 2/7] mm/hotplug: Allow memmap on memory hotplug request to fallback Aneesh Kumar K.V
2023-07-11 10:23   ` David Hildenbrand
2023-07-11 15:58     ` Aneesh Kumar K V
2023-07-11  4:48 ` [PATCH v3 3/7] mm/hotplug: Allow architecture to override memmap on memory support check Aneesh Kumar K.V
2023-07-11 10:36   ` David Hildenbrand
2023-07-11 16:07     ` Aneesh Kumar K V
2023-07-11 16:09       ` David Hildenbrand
2023-07-12 20:07         ` John Hubbard
2023-07-13  9:08           ` David Hildenbrand
2023-07-14 23:14             ` John Hubbard
2023-07-11  4:48 ` [PATCH v3 4/7] mm/hotplug: Allow pageblock alignment via altmap reservation Aneesh Kumar K.V
2023-07-11  6:21   ` Huang, Ying [this message]
2023-07-11  8:20     ` Aneesh Kumar K V
2023-07-11 17:19   ` David Hildenbrand
2023-07-12  3:16     ` Aneesh Kumar K V
2023-07-12  7:22       ` David Hildenbrand
2023-07-12 13:50         ` Aneesh Kumar K.V
2023-07-12 19:06           ` David Hildenbrand
2023-07-11  4:48 ` [PATCH v3 5/7] powerpc/book3s64/memhotplug: Enable memmap on memory for radix Aneesh Kumar K.V
2023-07-11 15:26   ` David Hildenbrand
2023-07-11 15:40     ` Aneesh Kumar K V
2023-07-11 15:44       ` David Hildenbrand
2023-07-11 15:46         ` Aneesh Kumar K V
2023-07-11  4:48 ` [PATCH v3 6/7] dax/kmem: Always enroll hotplugged memory for memmap_on_memory Aneesh Kumar K.V
2023-07-11 10:21   ` David Hildenbrand
2023-07-11  4:48 ` [PATCH v3 7/7] mm/hotplug: Embed vmem_altmap details in memory block Aneesh Kumar K.V

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=87ilardl36.fsf@yhuang6-desk2.ccr.corp.intel.com \
    --to=ying.huang@intel.com \
    --cc=akpm@linux-foundation.org \
    --cc=aneesh.kumar@linux.ibm.com \
    --cc=christophe.leroy@csgroup.eu \
    --cc=david@redhat.com \
    --cc=linux-mm@kvack.org \
    --cc=linuxppc-dev@lists.ozlabs.org \
    --cc=mhocko@suse.com \
    --cc=mpe@ellerman.id.au \
    --cc=npiggin@gmail.com \
    --cc=osalvador@suse.de \
    --cc=vishal.l.verma@intel.com \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox