* [PATCH v2 1/6] mm/page_isolation: don't pass gfp flags to isolate_single_pageblock()
2024-12-03 8:37 [PATCH v2 0/6] mm/page_alloc: gfp flags cleanups for alloc_contig_*() David Hildenbrand
@ 2024-12-03 8:37 ` David Hildenbrand
2024-12-03 8:37 ` [PATCH v2 2/6] mm/page_isolation: don't pass gfp flags to start_isolate_page_range() David Hildenbrand
` (4 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: David Hildenbrand @ 2024-12-03 8:37 UTC (permalink / raw)
To: linux-kernel
Cc: linux-mm, linuxppc-dev, David Hildenbrand, Andrew Morton,
Oscar Salvador, Zi Yan, Michael Ellerman, Nicholas Piggin,
Christophe Leroy, Naveen N Rao, Madhavan Srinivasan
The flags are no longer used, we can stop passing them to
isolate_single_pageblock().
Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
mm/page_isolation.c | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index 7e04047977cf..e680d40d96de 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -286,7 +286,6 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* within a free or in-use page.
* @boundary_pfn: pageblock-aligned pfn that a page might cross
* @flags: isolation flags
- * @gfp_flags: GFP flags used for migrating pages
* @isolate_before: isolate the pageblock before the boundary_pfn
* @skip_isolation: the flag to skip the pageblock isolation in second
* isolate_single_pageblock()
@@ -306,8 +305,7 @@ __first_valid_page(unsigned long pfn, unsigned long nr_pages)
* the in-use page then splitting the free page.
*/
static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
- gfp_t gfp_flags, bool isolate_before, bool skip_isolation,
- int migratetype)
+ bool isolate_before, bool skip_isolation, int migratetype)
{
unsigned long start_pfn;
unsigned long isolate_pageblock;
@@ -489,7 +487,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
bool skip_isolation = false;
/* isolate [isolate_start, isolate_start + pageblock_nr_pages) pageblock */
- ret = isolate_single_pageblock(isolate_start, flags, gfp_flags, false,
+ ret = isolate_single_pageblock(isolate_start, flags, false,
skip_isolation, migratetype);
if (ret)
return ret;
@@ -498,7 +496,7 @@ int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
skip_isolation = true;
/* isolate [isolate_end - pageblock_nr_pages, isolate_end) pageblock */
- ret = isolate_single_pageblock(isolate_end, flags, gfp_flags, true,
+ ret = isolate_single_pageblock(isolate_end, flags, true,
skip_isolation, migratetype);
if (ret) {
unset_migratetype_isolate(pfn_to_page(isolate_start), migratetype);
--
2.47.1
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH v2 2/6] mm/page_isolation: don't pass gfp flags to start_isolate_page_range()
2024-12-03 8:37 [PATCH v2 0/6] mm/page_alloc: gfp flags cleanups for alloc_contig_*() David Hildenbrand
2024-12-03 8:37 ` [PATCH v2 1/6] mm/page_isolation: don't pass gfp flags to isolate_single_pageblock() David Hildenbrand
@ 2024-12-03 8:37 ` David Hildenbrand
2024-12-03 8:37 ` [PATCH v2 3/6] mm/page_alloc: make __alloc_contig_migrate_range() static David Hildenbrand
` (3 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: David Hildenbrand @ 2024-12-03 8:37 UTC (permalink / raw)
To: linux-kernel
Cc: linux-mm, linuxppc-dev, David Hildenbrand, Andrew Morton,
Oscar Salvador, Zi Yan, Michael Ellerman, Nicholas Piggin,
Christophe Leroy, Naveen N Rao, Madhavan Srinivasan
The parameter is unused, so let's stop passing it.
Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
include/linux/page-isolation.h | 2 +-
mm/memory_hotplug.c | 3 +--
mm/page_alloc.c | 2 +-
mm/page_isolation.c | 4 +---
4 files changed, 4 insertions(+), 7 deletions(-)
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 73dc2c1841ec..898bb788243b 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -31,7 +31,7 @@ bool move_freepages_block_isolate(struct zone *zone, struct page *page,
int migratetype);
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype, int flags, gfp_t gfp_flags);
+ int migratetype, int flags);
void undo_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
int migratetype);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index c43b4e7fb298..9b184ba064a0 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -1992,8 +1992,7 @@ int offline_pages(unsigned long start_pfn, unsigned long nr_pages,
/* set above range as isolated */
ret = start_isolate_page_range(start_pfn, end_pfn,
MIGRATE_MOVABLE,
- MEMORY_OFFLINE | REPORT_FAILURE,
- GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL);
+ MEMORY_OFFLINE | REPORT_FAILURE);
if (ret) {
reason = "failure to isolate range";
goto failed_removal_pcplists_disabled;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index cc3296cf8c95..f371fbf2145b 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6451,7 +6451,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
* put back to page allocator so that buddy can use them.
*/
- ret = start_isolate_page_range(start, end, migratetype, 0, gfp_mask);
+ ret = start_isolate_page_range(start, end, migratetype, 0);
if (ret)
goto done;
diff --git a/mm/page_isolation.c b/mm/page_isolation.c
index e680d40d96de..c608e9d72865 100644
--- a/mm/page_isolation.c
+++ b/mm/page_isolation.c
@@ -442,8 +442,6 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
* and PageOffline() pages.
* REPORT_FAILURE - report details about the failure to
* isolate the range
- * @gfp_flags: GFP flags used for migrating pages that sit across the
- * range boundaries.
*
* Making page-allocation-type to be MIGRATE_ISOLATE means free pages in
* the range will never be allocated. Any free pages and pages freed in the
@@ -476,7 +474,7 @@ static int isolate_single_pageblock(unsigned long boundary_pfn, int flags,
* Return: 0 on success and -EBUSY if any part of range cannot be isolated.
*/
int start_isolate_page_range(unsigned long start_pfn, unsigned long end_pfn,
- int migratetype, int flags, gfp_t gfp_flags)
+ int migratetype, int flags)
{
unsigned long pfn;
struct page *page;
--
2.47.1
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH v2 3/6] mm/page_alloc: make __alloc_contig_migrate_range() static
2024-12-03 8:37 [PATCH v2 0/6] mm/page_alloc: gfp flags cleanups for alloc_contig_*() David Hildenbrand
2024-12-03 8:37 ` [PATCH v2 1/6] mm/page_isolation: don't pass gfp flags to isolate_single_pageblock() David Hildenbrand
2024-12-03 8:37 ` [PATCH v2 2/6] mm/page_isolation: don't pass gfp flags to start_isolate_page_range() David Hildenbrand
@ 2024-12-03 8:37 ` David Hildenbrand
2024-12-03 8:37 ` [PATCH v2 4/6] mm/page_alloc: sort out the alloc_contig_range() gfp flags mess David Hildenbrand
` (2 subsequent siblings)
5 siblings, 0 replies; 7+ messages in thread
From: David Hildenbrand @ 2024-12-03 8:37 UTC (permalink / raw)
To: linux-kernel
Cc: linux-mm, linuxppc-dev, David Hildenbrand, Andrew Morton,
Oscar Salvador, Zi Yan, Michael Ellerman, Nicholas Piggin,
Christophe Leroy, Naveen N Rao, Madhavan Srinivasan
The single user is in page_alloc.c.
Reviewed-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
mm/internal.h | 4 ----
mm/page_alloc.c | 5 ++---
2 files changed, 2 insertions(+), 7 deletions(-)
diff --git a/mm/internal.h b/mm/internal.h
index 74713b44bedb..4bd3685c33ef 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -839,10 +839,6 @@ int
isolate_migratepages_range(struct compact_control *cc,
unsigned long low_pfn, unsigned long end_pfn);
-int __alloc_contig_migrate_range(struct compact_control *cc,
- unsigned long start, unsigned long end,
- int migratetype);
-
/* Free whole pageblock and set its migration type to MIGRATE_CMA. */
void init_cma_reserved_pageblock(struct page *page);
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index f371fbf2145b..ce7589a4ec01 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6284,9 +6284,8 @@ static void alloc_contig_dump_pages(struct list_head *page_list)
* @migratetype: using migratetype to filter the type of migration in
* trace_mm_alloc_contig_migrate_range_info.
*/
-int __alloc_contig_migrate_range(struct compact_control *cc,
- unsigned long start, unsigned long end,
- int migratetype)
+static int __alloc_contig_migrate_range(struct compact_control *cc,
+ unsigned long start, unsigned long end, int migratetype)
{
/* This function is based on compact_zone() from compaction.c. */
unsigned int nr_reclaimed;
--
2.47.1
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH v2 4/6] mm/page_alloc: sort out the alloc_contig_range() gfp flags mess
2024-12-03 8:37 [PATCH v2 0/6] mm/page_alloc: gfp flags cleanups for alloc_contig_*() David Hildenbrand
` (2 preceding siblings ...)
2024-12-03 8:37 ` [PATCH v2 3/6] mm/page_alloc: make __alloc_contig_migrate_range() static David Hildenbrand
@ 2024-12-03 8:37 ` David Hildenbrand
2024-12-03 8:37 ` [PATCH v2 5/6] mm/page_alloc: forward the gfp flags from alloc_contig_range() to post_alloc_hook() David Hildenbrand
2024-12-03 8:37 ` [PATCH v2 6/6] powernv/memtrace: use __GFP_ZERO with alloc_contig_pages() David Hildenbrand
5 siblings, 0 replies; 7+ messages in thread
From: David Hildenbrand @ 2024-12-03 8:37 UTC (permalink / raw)
To: linux-kernel
Cc: linux-mm, linuxppc-dev, David Hildenbrand, Andrew Morton,
Oscar Salvador, Zi Yan, Michael Ellerman, Nicholas Piggin,
Christophe Leroy, Naveen N Rao, Madhavan Srinivasan
It's all a bit complicated for alloc_contig_range(). For example, we don't
support many flags, so let's start bailing out on unsupported
ones -- ignoring the placement hints, as we are already given the range
to allocate.
While we currently set cc.gfp_mask, in __alloc_contig_migrate_range() we
simply create yet another GFP mask whereby we ignore the reclaim flags
specify by the caller. That looks very inconsistent.
Let's clean it up, constructing the gfp flags used for
compaction/migration exactly once. Update the documentation of the
gfp_mask parameter for alloc_contig_range() and alloc_contig_pages().
Acked-by: Zi Yan <ziy@nvidia.com>
Signed-off-by: David Hildenbrand <david@redhat.com>
---
mm/page_alloc.c | 48 ++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 44 insertions(+), 4 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index ce7589a4ec01..54594cc4f650 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6294,7 +6294,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
int ret = 0;
struct migration_target_control mtc = {
.nid = zone_to_nid(cc->zone),
- .gfp_mask = GFP_USER | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL,
+ .gfp_mask = cc->gfp_mask,
.reason = MR_CONTIG_RANGE,
};
struct page *page;
@@ -6390,6 +6390,39 @@ static void split_free_pages(struct list_head *list)
}
}
+static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
+{
+ const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
+ const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+ const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+
+ /*
+ * We are given the range to allocate; node, mobility and placement
+ * hints are irrelevant at this point. We'll simply ignore them.
+ */
+ gfp_mask &= ~(GFP_ZONEMASK | __GFP_RECLAIMABLE | __GFP_WRITE |
+ __GFP_HARDWALL | __GFP_THISNODE | __GFP_MOVABLE);
+
+ /*
+ * We only support most reclaim flags (but not NOFAIL/NORETRY), and
+ * selected action flags.
+ */
+ if (gfp_mask & ~(reclaim_mask | action_mask))
+ return -EINVAL;
+
+ /*
+ * Flags to control page compaction/migration/reclaim, to free up our
+ * page range. Migratable pages are movable, __GFP_MOVABLE is implied
+ * for them.
+ *
+ * Traditionally we always had __GFP_HARDWALL|__GFP_RETRY_MAYFAIL set,
+ * keep doing that to not degrade callers.
+ */
+ *gfp_cc_mask = (gfp_mask & (reclaim_mask | cc_action_mask)) |
+ __GFP_HARDWALL | __GFP_MOVABLE | __GFP_RETRY_MAYFAIL;
+ return 0;
+}
+
/**
* alloc_contig_range() -- tries to allocate given range of pages
* @start: start PFN to allocate
@@ -6398,7 +6431,9 @@ static void split_free_pages(struct list_head *list)
* #MIGRATE_MOVABLE or #MIGRATE_CMA). All pageblocks
* in range must have the same migratetype and it must
* be either of the two.
- * @gfp_mask: GFP mask to use during compaction
+ * @gfp_mask: GFP mask. Node/zone/placement hints are ignored; only some
+ * action and reclaim modifiers are supported. Reclaim modifiers
+ * control allocation behavior during compaction/migration/reclaim.
*
* The PFN range does not have to be pageblock aligned. The PFN range must
* belong to a single zone.
@@ -6424,11 +6459,14 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
.mode = MIGRATE_SYNC,
.ignore_skip_hint = true,
.no_set_skip_hint = true,
- .gfp_mask = current_gfp_context(gfp_mask),
.alloc_contig = true,
};
INIT_LIST_HEAD(&cc.migratepages);
+ gfp_mask = current_gfp_context(gfp_mask);
+ if (__alloc_contig_verify_gfp_mask(gfp_mask, (gfp_t *)&cc.gfp_mask))
+ return -EINVAL;
+
/*
* What we do here is we mark all pageblocks in range as
* MIGRATE_ISOLATE. Because pageblock and max order pages may
@@ -6571,7 +6609,9 @@ static bool zone_spans_last_pfn(const struct zone *zone,
/**
* alloc_contig_pages() -- tries to find and allocate contiguous range of pages
* @nr_pages: Number of contiguous pages to allocate
- * @gfp_mask: GFP mask to limit search and used during compaction
+ * @gfp_mask: GFP mask. Node/zone/placement hints limit the search; only some
+ * action and reclaim modifiers are supported. Reclaim modifiers
+ * control allocation behavior during compaction/migration/reclaim.
* @nid: Target node
* @nodemask: Mask for other possible nodes
*
--
2.47.1
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH v2 5/6] mm/page_alloc: forward the gfp flags from alloc_contig_range() to post_alloc_hook()
2024-12-03 8:37 [PATCH v2 0/6] mm/page_alloc: gfp flags cleanups for alloc_contig_*() David Hildenbrand
` (3 preceding siblings ...)
2024-12-03 8:37 ` [PATCH v2 4/6] mm/page_alloc: sort out the alloc_contig_range() gfp flags mess David Hildenbrand
@ 2024-12-03 8:37 ` David Hildenbrand
2024-12-03 8:37 ` [PATCH v2 6/6] powernv/memtrace: use __GFP_ZERO with alloc_contig_pages() David Hildenbrand
5 siblings, 0 replies; 7+ messages in thread
From: David Hildenbrand @ 2024-12-03 8:37 UTC (permalink / raw)
To: linux-kernel
Cc: linux-mm, linuxppc-dev, David Hildenbrand, Andrew Morton,
Oscar Salvador, Zi Yan, Michael Ellerman, Nicholas Piggin,
Christophe Leroy, Naveen N Rao, Madhavan Srinivasan
In the __GFP_COMP case, we already pass the gfp_flags to
prep_new_page()->post_alloc_hook(). However, in the !__GFP_COMP case, we
essentially pass only hardcoded __GFP_MOVABLE to post_alloc_hook(),
preventing some action modifiers from being effective..
Let's pass our now properly adjusted gfp flags there as well.
This way, we can now support __GFP_ZERO for alloc_contig_*().
As a side effect, we now also support __GFP_SKIP_ZERO and__GFP_ZEROTAGS;
but we'll keep the more special stuff (KASAN, NOLOCKDEP) disabled for
now.
It's worth noting that with __GFP_ZERO, we might unnecessarily zero pages
when we have to release part of our range using free_contig_range() again.
This can be optimized in the future, if ever required; the caller we'll
be converting (powernv/memtrace) next won't trigger this.
Signed-off-by: David Hildenbrand <david@redhat.com>
---
mm/page_alloc.c | 9 +++++----
1 file changed, 5 insertions(+), 4 deletions(-)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 54594cc4f650..71d70bc0ad79 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -6364,7 +6364,7 @@ static int __alloc_contig_migrate_range(struct compact_control *cc,
return (ret < 0) ? ret : 0;
}
-static void split_free_pages(struct list_head *list)
+static void split_free_pages(struct list_head *list, gfp_t gfp_mask)
{
int order;
@@ -6375,7 +6375,7 @@ static void split_free_pages(struct list_head *list)
list_for_each_entry_safe(page, next, &list[order], lru) {
int i;
- post_alloc_hook(page, order, __GFP_MOVABLE);
+ post_alloc_hook(page, order, gfp_mask);
set_page_refcounted(page);
if (!order)
continue;
@@ -6393,7 +6393,8 @@ static void split_free_pages(struct list_head *list)
static int __alloc_contig_verify_gfp_mask(gfp_t gfp_mask, gfp_t *gfp_cc_mask)
{
const gfp_t reclaim_mask = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
- const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
+ const gfp_t action_mask = __GFP_COMP | __GFP_RETRY_MAYFAIL | __GFP_NOWARN |
+ __GFP_ZERO | __GFP_ZEROTAGS | __GFP_SKIP_ZERO;
const gfp_t cc_action_mask = __GFP_RETRY_MAYFAIL | __GFP_NOWARN;
/*
@@ -6541,7 +6542,7 @@ int alloc_contig_range_noprof(unsigned long start, unsigned long end,
}
if (!(gfp_mask & __GFP_COMP)) {
- split_free_pages(cc.freepages);
+ split_free_pages(cc.freepages, gfp_mask);
/* Free head and tail (if any) */
if (start != outer_start)
--
2.47.1
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH v2 6/6] powernv/memtrace: use __GFP_ZERO with alloc_contig_pages()
2024-12-03 8:37 [PATCH v2 0/6] mm/page_alloc: gfp flags cleanups for alloc_contig_*() David Hildenbrand
` (4 preceding siblings ...)
2024-12-03 8:37 ` [PATCH v2 5/6] mm/page_alloc: forward the gfp flags from alloc_contig_range() to post_alloc_hook() David Hildenbrand
@ 2024-12-03 8:37 ` David Hildenbrand
5 siblings, 0 replies; 7+ messages in thread
From: David Hildenbrand @ 2024-12-03 8:37 UTC (permalink / raw)
To: linux-kernel
Cc: linux-mm, linuxppc-dev, David Hildenbrand, Andrew Morton,
Oscar Salvador, Zi Yan, Michael Ellerman, Nicholas Piggin,
Christophe Leroy, Naveen N Rao, Madhavan Srinivasan
alloc_contig_pages()->alloc_contig_range() now supports __GFP_ZERO,
so let's use that instead to resolve our TODO.
Signed-off-by: David Hildenbrand <david@redhat.com>
---
arch/powerpc/platforms/powernv/memtrace.c | 31 +++++------------------
1 file changed, 6 insertions(+), 25 deletions(-)
diff --git a/arch/powerpc/platforms/powernv/memtrace.c b/arch/powerpc/platforms/powernv/memtrace.c
index 877720c64515..4ac9808e55a4 100644
--- a/arch/powerpc/platforms/powernv/memtrace.c
+++ b/arch/powerpc/platforms/powernv/memtrace.c
@@ -88,26 +88,6 @@ static void flush_dcache_range_chunked(unsigned long start, unsigned long stop,
}
}
-static void memtrace_clear_range(unsigned long start_pfn,
- unsigned long nr_pages)
-{
- unsigned long pfn;
-
- /* As HIGHMEM does not apply, use clear_page() directly. */
- for (pfn = start_pfn; pfn < start_pfn + nr_pages; pfn++) {
- if (IS_ALIGNED(pfn, PAGES_PER_SECTION))
- cond_resched();
- clear_page(__va(PFN_PHYS(pfn)));
- }
- /*
- * Before we go ahead and use this range as cache inhibited range
- * flush the cache.
- */
- flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
- (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
- FLUSH_CHUNK_SIZE);
-}
-
static u64 memtrace_alloc_node(u32 nid, u64 size)
{
const unsigned long nr_pages = PHYS_PFN(size);
@@ -119,17 +99,18 @@ static u64 memtrace_alloc_node(u32 nid, u64 size)
* by alloc_contig_pages().
*/
page = alloc_contig_pages(nr_pages, GFP_KERNEL | __GFP_THISNODE |
- __GFP_NOWARN, nid, NULL);
+ __GFP_NOWARN | __GFP_ZERO, nid, NULL);
if (!page)
return 0;
start_pfn = page_to_pfn(page);
/*
- * Clear the range while we still have a linear mapping.
- *
- * TODO: use __GFP_ZERO with alloc_contig_pages() once supported.
+ * Before we go ahead and use this range as cache inhibited range
+ * flush the cache.
*/
- memtrace_clear_range(start_pfn, nr_pages);
+ flush_dcache_range_chunked((unsigned long)pfn_to_kaddr(start_pfn),
+ (unsigned long)pfn_to_kaddr(start_pfn + nr_pages),
+ FLUSH_CHUNK_SIZE);
/*
* Set pages PageOffline(), to indicate that nobody (e.g., hibernation,
--
2.47.1
^ permalink raw reply [flat|nested] 7+ messages in thread