From: Ahmad Fatoum <a.fatoum@pengutronix.de>
To: barebox@lists.infradead.org
Cc: Ahmad Fatoum <a.fatoum@pengutronix.de>
Subject: [PATCH 11/22] ARM: mmu: make force_pages a maptype_t flag
Date: Wed, 6 Aug 2025 14:37:03 +0200 [thread overview]
Message-ID: <20250806123714.2092620-12-a.fatoum@pengutronix.de> (raw)
In-Reply-To: <20250806123714.2092620-1-a.fatoum@pengutronix.de>
The case with force_page == false is the default and having to write an
extra parameter everywhere is needless visual clutter. Especially if we
are going to add new parameters or OR further flags, it's more readable
to use a single parameter for the flags instead of multiple.
Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
---
arch/arm/cpu/mmu-common.h | 3 +++
arch/arm/cpu/mmu_32.c | 18 ++++++++++--------
arch/arm/cpu/mmu_64.c | 21 +++++++++++----------
3 files changed, 24 insertions(+), 18 deletions(-)
diff --git a/arch/arm/cpu/mmu-common.h b/arch/arm/cpu/mmu-common.h
index 01d081db426e..a111e15a21b4 100644
--- a/arch/arm/cpu/mmu-common.h
+++ b/arch/arm/cpu/mmu-common.h
@@ -9,10 +9,13 @@
#include <linux/ioport.h>
#include <linux/kernel.h>
#include <linux/sizes.h>
+#include <linux/bits.h>
#define ARCH_MAP_CACHED_RWX MAP_ARCH(2)
#define ARCH_MAP_CACHED_RO MAP_ARCH(3)
+#define ARCH_MAP_FLAG_PAGEWISE BIT(31)
+
struct device;
void dma_inv_range(void *ptr, size_t size);
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 4b7f370edaea..e43d9d0d4606 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -266,8 +266,9 @@ static uint32_t get_pmd_flags(maptype_t map_type)
}
static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t size,
- maptype_t map_type, bool force_pages)
+ maptype_t map_type)
{
+ bool force_pages = map_type & ARCH_MAP_FLAG_PAGEWISE;
u32 virt_addr = (u32)_virt_addr;
u32 pte_flags, pmd_flags;
uint32_t *ttb = get_ttb();
@@ -363,16 +364,16 @@ static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t s
tlb_invalidate();
}
-static void early_remap_range(u32 addr, size_t size, maptype_t map_type, bool force_pages)
+static void early_remap_range(u32 addr, size_t size, maptype_t map_type)
{
- __arch_remap_range((void *)addr, addr, size, map_type, force_pages);
+ __arch_remap_range((void *)addr, addr, size, map_type);
}
int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, maptype_t map_type)
{
map_type = arm_mmu_maybe_skip_permissions(map_type);
- __arch_remap_range(virt_addr, phys_addr, size, map_type, false);
+ __arch_remap_range(virt_addr, phys_addr, size, map_type);
if (maptype_is_compatible(map_type, MAP_UNCACHED))
dma_inv_range(virt_addr, size);
@@ -643,7 +644,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
* map the bulk of the memory as sections to avoid allocating too many page tables
* at this early stage
*/
- early_remap_range(membase, barebox_start - membase, ARCH_MAP_CACHED_RWX, false);
+ early_remap_range(membase, barebox_start - membase, ARCH_MAP_CACHED_RWX);
/*
* Map the remainder of the memory explicitly with two level page tables. This is
* the place where barebox proper ends at. In barebox proper we'll remap the code
@@ -653,10 +654,11 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
* a break-before-make sequence which we can't do when barebox proper is running
* at the location being remapped.
*/
- early_remap_range(barebox_start, barebox_size, ARCH_MAP_CACHED_RWX, true);
- early_remap_range(optee_start, OPTEE_SIZE, MAP_UNCACHED, false);
+ early_remap_range(barebox_start, barebox_size,
+ ARCH_MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
+ early_remap_range(optee_start, OPTEE_SIZE, MAP_UNCACHED);
early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext),
- ARCH_MAP_CACHED_RWX, false);
+ ARCH_MAP_CACHED_RWX);
__mmu_cache_on();
}
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 0bd5e4dc98c4..6e617a15a6d7 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -146,8 +146,9 @@ static void split_block(uint64_t *pte, int level)
}
static int __arch_remap_range(uint64_t virt, uint64_t phys, uint64_t size,
- maptype_t map_type, bool force_pages)
+ maptype_t map_type)
{
+ bool force_pages = map_type & ARCH_MAP_FLAG_PAGEWISE;
unsigned long attr = get_pte_attrs(map_type);
uint64_t *ttb = get_ttb();
uint64_t block_size;
@@ -312,9 +313,9 @@ static void flush_cacheable_pages(void *start, size_t size)
v8_flush_dcache_range(flush_start, flush_end);
}
-static void early_remap_range(uint64_t addr, size_t size, maptype_t map_type, bool force_pages)
+static void early_remap_range(uint64_t addr, size_t size, maptype_t map_type)
{
- __arch_remap_range(addr, addr, size, map_type, force_pages);
+ __arch_remap_range(addr, addr, size, map_type);
}
int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, maptype_t map_type)
@@ -324,7 +325,7 @@ int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, maptyp
if (!maptype_is_compatible(map_type, MAP_CACHED))
flush_cacheable_pages(virt_addr, size);
- return __arch_remap_range((uint64_t)virt_addr, phys_addr, (uint64_t)size, map_type, false);
+ return __arch_remap_range((uint64_t)virt_addr, phys_addr, (uint64_t)size, map_type);
}
static void mmu_enable(void)
@@ -419,7 +420,7 @@ static void early_init_range(size_t total_level0_tables)
uint64_t addr = 0;
while (total_level0_tables--) {
- early_remap_range(addr, L0_XLAT_SIZE, MAP_UNCACHED, false);
+ early_remap_range(addr, L0_XLAT_SIZE, MAP_UNCACHED);
split_block(ttb, 0);
addr += L0_XLAT_SIZE;
ttb++;
@@ -451,7 +452,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
*/
early_init_range(2);
- early_remap_range(membase, memsize, ARCH_MAP_CACHED_RWX, false);
+ early_remap_range(membase, memsize, ARCH_MAP_CACHED_RWX);
if (optee_get_membase(&optee_membase)) {
optee_membase = membase + memsize - OPTEE_SIZE;
@@ -459,18 +460,18 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
barebox_size = optee_membase - barebox_start;
early_remap_range(optee_membase - barebox_size, barebox_size,
- ARCH_MAP_CACHED_RWX, true);
+ ARCH_MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
} else {
barebox_size = membase + memsize - barebox_start;
early_remap_range(membase + memsize - barebox_size, barebox_size,
- ARCH_MAP_CACHED_RWX, true);
+ ARCH_MAP_CACHED_RWX | ARCH_MAP_FLAG_PAGEWISE);
}
- early_remap_range(optee_membase, OPTEE_SIZE, MAP_FAULT, false);
+ early_remap_range(optee_membase, OPTEE_SIZE, MAP_FAULT);
early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext),
- ARCH_MAP_CACHED_RWX, false);
+ ARCH_MAP_CACHED_RWX);
mmu_enable();
}
--
2.39.5
next prev parent reply other threads:[~2025-08-06 13:06 UTC|newest]
Thread overview: 24+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-06 12:36 [PATCH 00/22] ARM: mmu: refactor 32-bit and 64-bit code Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 01/22] ARM: mmu: introduce new maptype_t type Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 02/22] ARM: mmu: compare only lowest 16 bits for map type Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 03/22] ARM: mmu: prefix pre-MMU functions with early_ Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 04/22] ARM: mmu: panic when alloc_pte fails Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 05/22] ARM: mmu32: introduce new mmu_addr_t type Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 06/22] ARM: mmu: provide zero page control in PBL Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 07/22] ARM: mmu: print map type as string Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 08/22] ARM: mmu64: rename create_sections to __arch_remap_range Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 09/22] ARM: mmu: move get_pte_attrs call into __arch_remap_range Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 10/22] ARM: mmu64: print debug message in __arch_remap_range Ahmad Fatoum
2025-08-06 12:37 ` Ahmad Fatoum [this message]
2025-08-06 12:37 ` [PATCH 12/22] ARM: mmu64: move granule_size to the top of the file Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 13/22] ARM: mmu64: fix benign off-by-one in flush_cacheable_pages Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 14/22] ARM: mmu64: make flush_cacheable_pages less 64-bit dependent Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 15/22] ARM: mmu64: allow asserting last level page in __find_pte Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 16/22] ARM: mmu64: rename __find_pte to find_pte Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 17/22] ARM: mmu32: rework find_pte to have ARM64 find_pte semantics Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 18/22] ARM: mmu64: factor out flush_cacheable_pages for reusability Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 19/22] ARM: mmu32: flush only cacheable pages on remap Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 20/22] ARM: mmu32: factor out set_pte_range helper Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 21/22] ARM: mmu64: " Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 22/22] ARM: mmu: define dma_alloc_writecombine in common code Ahmad Fatoum
2025-08-07 7:24 ` [PATCH 00/22] ARM: mmu: refactor 32-bit and 64-bit code Sascha Hauer
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250806123714.2092620-12-a.fatoum@pengutronix.de \
--to=a.fatoum@pengutronix.de \
--cc=barebox@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox