* [PATCH v4 1/4] arm64: mm: use arm64_dma_phys_limit instead of calling max_zone_dma_phys()
2019-09-06 12:06 [PATCH v4 0/4] Raspberry Pi 4 DMA addressing support Nicolas Saenz Julienne
@ 2019-09-06 12:06 ` Nicolas Saenz Julienne
2019-09-06 12:06 ` [PATCH v4 2/4] arm64: rename variables used to calculate ZONE_DMA32's size Nicolas Saenz Julienne
` (2 subsequent siblings)
3 siblings, 0 replies; 7+ messages in thread
From: Nicolas Saenz Julienne @ 2019-09-06 12:06 UTC (permalink / raw)
To: catalin.marinas, hch, wahrenst, marc.zyngier, robh+dt,
linux-arm-kernel, linux-mm, linux-riscv, linux-kernel
Cc: f.fainelli, will, robin.murphy, nsaenzjulienne, mbrugger,
linux-rpi-kernel, phill, m.szyprowski
By the time we call zones_sizes_init() arm64_dma_phys_limit already
contains the result of max_zone_dma_phys(). We use the variable instead
of calling the function directly to save some precious cpu time.
Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
---
arch/arm64/mm/init.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index f3c795278def..6112d6c90fa8 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -181,7 +181,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
#ifdef CONFIG_ZONE_DMA32
- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma_phys_limit);
#endif
max_zone_pfns[ZONE_NORMAL] = max;
--
2.23.0
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH v4 2/4] arm64: rename variables used to calculate ZONE_DMA32's size
2019-09-06 12:06 [PATCH v4 0/4] Raspberry Pi 4 DMA addressing support Nicolas Saenz Julienne
2019-09-06 12:06 ` [PATCH v4 1/4] arm64: mm: use arm64_dma_phys_limit instead of calling max_zone_dma_phys() Nicolas Saenz Julienne
@ 2019-09-06 12:06 ` Nicolas Saenz Julienne
2019-09-06 12:06 ` [PATCH v4 3/4] arm64: use both ZONE_DMA and ZONE_DMA32 Nicolas Saenz Julienne
2019-09-06 12:06 ` [PATCH v4 4/4] mm: refresh ZONE_DMA and ZONE_DMA32 comments in 'enum zone_type' Nicolas Saenz Julienne
3 siblings, 0 replies; 7+ messages in thread
From: Nicolas Saenz Julienne @ 2019-09-06 12:06 UTC (permalink / raw)
To: catalin.marinas, hch, wahrenst, marc.zyngier, robh+dt,
linux-arm-kernel, linux-mm, linux-riscv, linux-kernel
Cc: f.fainelli, will, robin.murphy, nsaenzjulienne, mbrugger,
linux-rpi-kernel, phill, m.szyprowski
Let the name indicate that they are used to calculate ZONE_DMA32's size
as opposed to ZONE_DMA.
Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
---
arch/arm64/mm/init.c | 30 +++++++++++++++---------------
1 file changed, 15 insertions(+), 15 deletions(-)
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 098c0f5bedf6..8e9bc64c5878 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -56,7 +56,7 @@ EXPORT_SYMBOL(physvirt_offset);
struct page *vmemmap __ro_after_init;
EXPORT_SYMBOL(vmemmap);
-phys_addr_t arm64_dma_phys_limit __ro_after_init;
+phys_addr_t arm64_dma32_phys_limit __ro_after_init;
#ifdef CONFIG_KEXEC_CORE
/*
@@ -174,7 +174,7 @@ static void __init reserve_elfcorehdr(void)
* currently assumes that for memory starting above 4G, 32-bit devices will
* use a DMA offset.
*/
-static phys_addr_t __init max_zone_dma_phys(void)
+static phys_addr_t __init max_zone_dma32_phys(void)
{
phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
return min(offset + (1ULL << 32), memblock_end_of_DRAM());
@@ -187,7 +187,7 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
#ifdef CONFIG_ZONE_DMA32
- max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma_phys_limit);
+ max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
#endif
max_zone_pfns[ZONE_NORMAL] = max;
@@ -200,16 +200,16 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
struct memblock_region *reg;
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
- unsigned long max_dma = min;
+ unsigned long max_dma32 = min;
memset(zone_size, 0, sizeof(zone_size));
/* 4GB maximum for 32-bit only capable devices */
#ifdef CONFIG_ZONE_DMA32
- max_dma = PFN_DOWN(arm64_dma_phys_limit);
- zone_size[ZONE_DMA32] = max_dma - min;
+ max_dma32 = PFN_DOWN(arm64_dma32_phys_limit);
+ zone_size[ZONE_DMA32] = max_dma32 - min;
#endif
- zone_size[ZONE_NORMAL] = max - max_dma;
+ zone_size[ZONE_NORMAL] = max - max_dma32;
memcpy(zhole_size, zone_size, sizeof(zhole_size));
@@ -221,14 +221,14 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
continue;
#ifdef CONFIG_ZONE_DMA32
- if (start < max_dma) {
- unsigned long dma_end = min(end, max_dma);
+ if (start < max_dma32) {
+ unsigned long dma_end = min(end, max_dma32);
zhole_size[ZONE_DMA32] -= dma_end - start;
}
#endif
- if (end > max_dma) {
+ if (end > max_dma32) {
unsigned long normal_end = min(end, max);
- unsigned long normal_start = max(start, max_dma);
+ unsigned long normal_start = max(start, max_dma32);
zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
}
}
@@ -420,9 +420,9 @@ void __init arm64_memblock_init(void)
/* 4GB maximum for 32-bit only capable devices */
if (IS_ENABLED(CONFIG_ZONE_DMA32))
- arm64_dma_phys_limit = max_zone_dma_phys();
+ arm64_dma32_phys_limit = max_zone_dma32_phys();
else
- arm64_dma_phys_limit = PHYS_MASK + 1;
+ arm64_dma32_phys_limit = PHYS_MASK + 1;
reserve_crashkernel();
@@ -430,7 +430,7 @@ void __init arm64_memblock_init(void)
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
- dma_contiguous_reserve(arm64_dma_phys_limit);
+ dma_contiguous_reserve(arm64_dma32_phys_limit);
}
void __init bootmem_init(void)
@@ -534,7 +534,7 @@ static void __init free_unused_memmap(void)
void __init mem_init(void)
{
if (swiotlb_force == SWIOTLB_FORCE ||
- max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
+ max_pfn > (arm64_dma32_phys_limit >> PAGE_SHIFT))
swiotlb_init(1);
else
swiotlb_force = SWIOTLB_NO_FORCE;
--
2.23.0
^ permalink raw reply [flat|nested] 7+ messages in thread* [PATCH v4 3/4] arm64: use both ZONE_DMA and ZONE_DMA32
2019-09-06 12:06 [PATCH v4 0/4] Raspberry Pi 4 DMA addressing support Nicolas Saenz Julienne
2019-09-06 12:06 ` [PATCH v4 1/4] arm64: mm: use arm64_dma_phys_limit instead of calling max_zone_dma_phys() Nicolas Saenz Julienne
2019-09-06 12:06 ` [PATCH v4 2/4] arm64: rename variables used to calculate ZONE_DMA32's size Nicolas Saenz Julienne
@ 2019-09-06 12:06 ` Nicolas Saenz Julienne
2019-09-08 21:27 ` Catalin Marinas
2019-09-06 12:06 ` [PATCH v4 4/4] mm: refresh ZONE_DMA and ZONE_DMA32 comments in 'enum zone_type' Nicolas Saenz Julienne
3 siblings, 1 reply; 7+ messages in thread
From: Nicolas Saenz Julienne @ 2019-09-06 12:06 UTC (permalink / raw)
To: catalin.marinas, hch, wahrenst, marc.zyngier, robh+dt,
linux-arm-kernel, linux-mm, linux-riscv, Will Deacon
Cc: f.fainelli, robin.murphy, nsaenzjulienne, linux-kernel, mbrugger,
linux-rpi-kernel, phill, m.szyprowski
So far all arm64 devices have supported 32 bit DMA masks for their
peripherals. This is not true anymore for the Raspberry Pi 4 as most of
it's peripherals can only address the first GB of memory on a total of
up to 4 GB.
This goes against ZONE_DMA32's intent, as it's expected for ZONE_DMA32
to be addressable with a 32 bit mask. So it was decided to re-introduce
ZONE_DMA in arm64.
ZONE_DMA will contain the lower 1G of memory, which is currently the
memory area addressable by any peripheral on an arm64 device.
ZONE_DMA32 will contain the rest of the 32 bit addressable memory.
Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
---
Changes in v4:
- Fixed issue when NUMA=n and ZONE_DMA=n
- Merged two max_zone_dma*_phys() functions
Changes in v3:
- Used fixed size ZONE_DMA
- Fix check befor swiotlb_init()
Changes in v2:
- Update comment to reflect new zones split
- ZONE_DMA will never be left empty
arch/arm64/Kconfig | 4 +++
arch/arm64/include/asm/page.h | 2 ++
arch/arm64/mm/init.c | 51 ++++++++++++++++++++++++++---------
3 files changed, 44 insertions(+), 13 deletions(-)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig
index 6b6362b83004..2dbe0165bd15 100644
--- a/arch/arm64/Kconfig
+++ b/arch/arm64/Kconfig
@@ -267,6 +267,10 @@ config GENERIC_CSUM
config GENERIC_CALIBRATE_DELAY
def_bool y
+config ZONE_DMA
+ bool "Support DMA zone" if EXPERT
+ default y
+
config ZONE_DMA32
bool "Support DMA32 zone" if EXPERT
default y
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h
index d39ddb258a04..7b8c98830101 100644
--- a/arch/arm64/include/asm/page.h
+++ b/arch/arm64/include/asm/page.h
@@ -38,4 +38,6 @@ extern int pfn_valid(unsigned long);
#include <asm-generic/getorder.h>
+#define ARCH_ZONE_DMA_BITS 30
+
#endif
diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c
index 8e9bc64c5878..95ee7e839c75 100644
--- a/arch/arm64/mm/init.c
+++ b/arch/arm64/mm/init.c
@@ -56,6 +56,13 @@ EXPORT_SYMBOL(physvirt_offset);
struct page *vmemmap __ro_after_init;
EXPORT_SYMBOL(vmemmap);
+/*
+ * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of
+ * memory as some devices, namely the Raspberry Pi 4, have peripherals with
+ * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32
+ * bit addressable memory area.
+ */
+phys_addr_t arm64_dma_phys_limit __ro_after_init;
phys_addr_t arm64_dma32_phys_limit __ro_after_init;
#ifdef CONFIG_KEXEC_CORE
@@ -169,15 +176,16 @@ static void __init reserve_elfcorehdr(void)
{
}
#endif /* CONFIG_CRASH_DUMP */
+
/*
- * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It
- * currently assumes that for memory starting above 4G, 32-bit devices will
- * use a DMA offset.
+ * Return the maximum physical address for a zone with a given address size
+ * limit. It currently assumes that for memory starting above 4G, 32-bit
+ * devices will use a DMA offset.
*/
-static phys_addr_t __init max_zone_dma32_phys(void)
+static phys_addr_t __init max_zone_phys(unsigned int zone_bits)
{
phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
- return min(offset + (1ULL << 32), memblock_end_of_DRAM());
+ return min(offset + (1ULL << zone_bits), memblock_end_of_DRAM());
}
#ifdef CONFIG_NUMA
@@ -186,6 +194,9 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
{
unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
+#ifdef CONFIG_ZONE_DMA
+ max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit);
+#endif
#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit);
#endif
@@ -201,13 +212,18 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
struct memblock_region *reg;
unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
unsigned long max_dma32 = min;
+ unsigned long max_dma = min;
memset(zone_size, 0, sizeof(zone_size));
- /* 4GB maximum for 32-bit only capable devices */
+#ifdef CONFIG_ZONE_DMA
+ max_dma = PFN_DOWN(arm64_dma_phys_limit);
+ zone_size[ZONE_DMA] = max_dma - min;
+ max_dma32 = max_dma;
+#endif
#ifdef CONFIG_ZONE_DMA32
max_dma32 = PFN_DOWN(arm64_dma32_phys_limit);
- zone_size[ZONE_DMA32] = max_dma32 - min;
+ zone_size[ZONE_DMA32] = max_dma32 - max_dma;
#endif
zone_size[ZONE_NORMAL] = max - max_dma32;
@@ -219,11 +235,17 @@ static void __init zone_sizes_init(unsigned long min, unsigned long max)
if (start >= max)
continue;
-
+#ifdef CONFIG_ZONE_DMA
+ if (start < max_dma) {
+ unsigned long dma_end = min_not_zero(end, max_dma);
+ zhole_size[ZONE_DMA] -= dma_end - start;
+ }
+#endif
#ifdef CONFIG_ZONE_DMA32
if (start < max_dma32) {
- unsigned long dma_end = min(end, max_dma32);
- zhole_size[ZONE_DMA32] -= dma_end - start;
+ unsigned long dma32_end = min(end, max_dma32);
+ unsigned long dma32_start = max(start, max_dma);
+ zhole_size[ZONE_DMA32] -= dma32_end - dma32_start;
}
#endif
if (end > max_dma32) {
@@ -418,9 +440,11 @@ void __init arm64_memblock_init(void)
early_init_fdt_scan_reserved_mem();
- /* 4GB maximum for 32-bit only capable devices */
+ if (IS_ENABLED(CONFIG_ZONE_DMA))
+ arm64_dma_phys_limit = max_zone_phys(ARCH_ZONE_DMA_BITS);
+
if (IS_ENABLED(CONFIG_ZONE_DMA32))
- arm64_dma32_phys_limit = max_zone_dma32_phys();
+ arm64_dma32_phys_limit = max_zone_phys(32);
else
arm64_dma32_phys_limit = PHYS_MASK + 1;
@@ -430,7 +454,7 @@ void __init arm64_memblock_init(void)
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
- dma_contiguous_reserve(arm64_dma32_phys_limit);
+ dma_contiguous_reserve(arm64_dma_phys_limit ? : arm64_dma32_phys_limit);
}
void __init bootmem_init(void)
@@ -534,6 +558,7 @@ static void __init free_unused_memmap(void)
void __init mem_init(void)
{
if (swiotlb_force == SWIOTLB_FORCE ||
+ max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT) ||
max_pfn > (arm64_dma32_phys_limit >> PAGE_SHIFT))
swiotlb_init(1);
else
--
2.23.0
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [PATCH v4 3/4] arm64: use both ZONE_DMA and ZONE_DMA32
2019-09-06 12:06 ` [PATCH v4 3/4] arm64: use both ZONE_DMA and ZONE_DMA32 Nicolas Saenz Julienne
@ 2019-09-08 21:27 ` Catalin Marinas
2019-09-09 8:46 ` Nicolas Saenz Julienne
0 siblings, 1 reply; 7+ messages in thread
From: Catalin Marinas @ 2019-09-08 21:27 UTC (permalink / raw)
To: Nicolas Saenz Julienne
Cc: hch, wahrenst, marc.zyngier, robh+dt, linux-arm-kernel, linux-mm,
linux-riscv, Will Deacon, f.fainelli, robin.murphy, linux-kernel,
mbrugger, linux-rpi-kernel, phill, m.szyprowski
On Fri, Sep 06, 2019 at 02:06:14PM +0200, Nicolas Saenz Julienne wrote:
> @@ -430,7 +454,7 @@ void __init arm64_memblock_init(void)
>
> high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
>
> - dma_contiguous_reserve(arm64_dma32_phys_limit);
> + dma_contiguous_reserve(arm64_dma_phys_limit ? : arm64_dma32_phys_limit);
> }
>
> void __init bootmem_init(void)
> @@ -534,6 +558,7 @@ static void __init free_unused_memmap(void)
> void __init mem_init(void)
> {
> if (swiotlb_force == SWIOTLB_FORCE ||
> + max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT) ||
> max_pfn > (arm64_dma32_phys_limit >> PAGE_SHIFT))
> swiotlb_init(1);
So here we want to initialise the swiotlb only if we need bounce
buffers. Prior to this patch, we assumed that swiotlb is needed if
max_pfn is beyond the reach of 32-bit devices. With ZONE_DMA, we need to
lower this limit to arm64_dma_phys_limit.
If ZONE_DMA is enabled, just comparing max_pfn with arm64_dma_phys_limit
is sufficient since the dma32 one limit always higher. However, if
ZONE_DMA is disabled, arm64_dma_phys_limit is 0, so we may initialise
swiotlb unnecessarily. I guess you need a similar check to the
dma_contiguous_reserve() above.
With that:
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
Unless there are other objections, I can queue this series for 5.5 in a
few weeks time (too late for 5.4).
--
Catalin
^ permalink raw reply [flat|nested] 7+ messages in thread* Re: [PATCH v4 3/4] arm64: use both ZONE_DMA and ZONE_DMA32
2019-09-08 21:27 ` Catalin Marinas
@ 2019-09-09 8:46 ` Nicolas Saenz Julienne
0 siblings, 0 replies; 7+ messages in thread
From: Nicolas Saenz Julienne @ 2019-09-09 8:46 UTC (permalink / raw)
To: Catalin Marinas
Cc: f.fainelli, mbrugger, marc.zyngier, robin.murphy, linux-kernel,
linux-mm, robh+dt, wahrenst, m.szyprowski, linux-riscv, phill,
Will Deacon, hch, linux-arm-kernel, linux-rpi-kernel
[-- Attachment #1: Type: text/plain, Size: 1571 bytes --]
On Sun, 2019-09-08 at 22:27 +0100, Catalin Marinas wrote:
> On Fri, Sep 06, 2019 at 02:06:14PM +0200, Nicolas Saenz Julienne wrote:
> > @@ -430,7 +454,7 @@ void __init arm64_memblock_init(void)
> >
> > high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
> >
> > - dma_contiguous_reserve(arm64_dma32_phys_limit);
> > + dma_contiguous_reserve(arm64_dma_phys_limit ? : arm64_dma32_phys_limit);
> > }
> >
> > void __init bootmem_init(void)
> > @@ -534,6 +558,7 @@ static void __init free_unused_memmap(void)
> > void __init mem_init(void)
> > {
> > if (swiotlb_force == SWIOTLB_FORCE ||
> > + max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT) ||
> > max_pfn > (arm64_dma32_phys_limit >> PAGE_SHIFT))
> > swiotlb_init(1);
>
> So here we want to initialise the swiotlb only if we need bounce
> buffers. Prior to this patch, we assumed that swiotlb is needed if
> max_pfn is beyond the reach of 32-bit devices. With ZONE_DMA, we need to
> lower this limit to arm64_dma_phys_limit.
>
> If ZONE_DMA is enabled, just comparing max_pfn with arm64_dma_phys_limit
> is sufficient since the dma32 one limit always higher. However, if
> ZONE_DMA is disabled, arm64_dma_phys_limit is 0, so we may initialise
> swiotlb unnecessarily. I guess you need a similar check to the
> dma_contiguous_reserve() above.
Of course.
>
> With that:
>
> Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
>
> Unless there are other objections, I can queue this series for 5.5 in a
> few weeks time (too late for 5.4).
Thanks!
[-- Attachment #2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 488 bytes --]
^ permalink raw reply [flat|nested] 7+ messages in thread
* [PATCH v4 4/4] mm: refresh ZONE_DMA and ZONE_DMA32 comments in 'enum zone_type'
2019-09-06 12:06 [PATCH v4 0/4] Raspberry Pi 4 DMA addressing support Nicolas Saenz Julienne
` (2 preceding siblings ...)
2019-09-06 12:06 ` [PATCH v4 3/4] arm64: use both ZONE_DMA and ZONE_DMA32 Nicolas Saenz Julienne
@ 2019-09-06 12:06 ` Nicolas Saenz Julienne
3 siblings, 0 replies; 7+ messages in thread
From: Nicolas Saenz Julienne @ 2019-09-06 12:06 UTC (permalink / raw)
To: catalin.marinas, hch, wahrenst, marc.zyngier, robh+dt,
linux-arm-kernel, linux-mm, linux-riscv, Paul Walmsley,
Palmer Dabbelt, Albert Ou
Cc: f.fainelli, will, robin.murphy, nsaenzjulienne, linux-kernel,
mbrugger, linux-rpi-kernel, phill, m.szyprowski
These zones usage has evolved with time and the comments were outdated.
This joins both ZONE_DMA and ZONE_DMA32 explanation and gives up to date
examples on how they are used on different architectures.
Signed-off-by: Nicolas Saenz Julienne <nsaenzjulienne@suse.de>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Reviewed-by: Catalin Marinas <catalin.marinas@arm.com>
---
Changes in v3:
- Update comment to match changes in arm64
Changes in v2:
- Try another approach merging both ZONE_DMA comments into one
- Address Christoph's comments
- If this approach doesn't get much traction I'll just drop the patch
from the series as it's not really essential
include/linux/mmzone.h | 45 ++++++++++++++++++++++++------------------
1 file changed, 26 insertions(+), 19 deletions(-)
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 3f38c30d2f13..bf1b916c9ecb 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -357,33 +357,40 @@ struct per_cpu_nodestat {
#endif /* !__GENERATING_BOUNDS.H */
enum zone_type {
-#ifdef CONFIG_ZONE_DMA
/*
- * ZONE_DMA is used when there are devices that are not able
- * to do DMA to all of addressable memory (ZONE_NORMAL). Then we
- * carve out the portion of memory that is needed for these devices.
- * The range is arch specific.
+ * ZONE_DMA and ZONE_DMA32 are used when there are peripherals not able
+ * to DMA to all of the addressable memory (ZONE_NORMAL).
+ * On architectures where this area covers the whole 32 bit address
+ * space ZONE_DMA32 is used. ZONE_DMA is left for the ones with smaller
+ * DMA addressing constraints. This distinction is important as a 32bit
+ * DMA mask is assumed when ZONE_DMA32 is defined. Some 64-bit
+ * platforms may need both zones as they support peripherals with
+ * different DMA addressing limitations.
+ *
+ * Some examples:
+ *
+ * - i386 and x86_64 have a fixed 16M ZONE_DMA and ZONE_DMA32 for the
+ * rest of the lower 4G.
+ *
+ * - arm only uses ZONE_DMA, the size, up to 4G, may vary depending on
+ * the specific device.
+ *
+ * - arm64 has a fixed 1G ZONE_DMA and ZONE_DMA32 for the rest of the
+ * lower 4G.
*
- * Some examples
+ * - powerpc only uses ZONE_DMA, the size, up to 2G, may vary
+ * depending on the specific device.
*
- * Architecture Limit
- * ---------------------------
- * parisc, ia64, sparc <4G
- * s390, powerpc <2G
- * arm Various
- * alpha Unlimited or 0-16MB.
+ * - s390 uses ZONE_DMA fixed to the lower 2G.
*
- * i386, x86_64 and multiple other arches
- * <16M.
+ * - ia64 and riscv only use ZONE_DMA32.
+ *
+ * - parisc uses neither.
*/
+#ifdef CONFIG_ZONE_DMA
ZONE_DMA,
#endif
#ifdef CONFIG_ZONE_DMA32
- /*
- * x86_64 needs two ZONE_DMAs because it supports devices that are
- * only able to do DMA to the lower 16M but also 32 bit devices that
- * can only do DMA areas below 4G.
- */
ZONE_DMA32,
#endif
/*
--
2.23.0
^ permalink raw reply [flat|nested] 7+ messages in thread