From: Ahmad Fatoum <a.fatoum@barebox.org>
To: barebox@lists.infradead.org
Cc: Ahmad Fatoum <a.fatoum@pengutronix.de>,
Ahmad Fatoum <a.fatoum@barebox.org>
Subject: [PATCH v4 12/13] ARM: mmu64: map memory for barebox proper pagewise
Date: Mon, 4 Aug 2025 19:22:32 +0200 [thread overview]
Message-ID: <20250804172233.2158462-13-a.fatoum@barebox.org> (raw)
In-Reply-To: <20250804172233.2158462-1-a.fatoum@barebox.org>
From: Sascha Hauer <s.hauer@pengutronix.de>
Map the remainder of the memory explicitly with two level page tables. This is
the place where barebox proper ends at. In barebox proper we'll remap the code
segments readonly/executable and the ro segments readonly/execute never. For this
we need the memory being mapped pagewise. We can't do the split up from section
wise mapping to pagewise mapping later because that would require us to do
a break-before-make sequence which we can't do when barebox proper is running
at the location being remapped.
Reviewed-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
Signed-off-by: Ahmad Fatoum <a.fatoum@barebox.org>
---
arch/arm/cpu/mmu_64.c | 40 +++++++++++++++++++++++++++++-----------
1 file changed, 29 insertions(+), 11 deletions(-)
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 54d4a4e9c638..6fd767d983b7 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -10,6 +10,7 @@
#include <init.h>
#include <mmu.h>
#include <errno.h>
+#include <range.h>
#include <zero_page.h>
#include <linux/sizes.h>
#include <asm/memory.h>
@@ -128,7 +129,7 @@ static void split_block(uint64_t *pte, int level)
}
static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
- uint64_t attr)
+ uint64_t attr, bool force_pages)
{
uint64_t *ttb = get_ttb();
uint64_t block_size;
@@ -151,14 +152,18 @@ static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
while (size) {
table = ttb;
for (level = 0; level < 4; level++) {
+ bool block_aligned;
block_shift = level2shift(level);
idx = (addr & level2mask(level)) >> block_shift;
block_size = (1ULL << block_shift);
pte = table + idx;
- if (size >= block_size && IS_ALIGNED(addr, block_size) &&
- IS_ALIGNED(phys, block_size)) {
+ block_aligned = size >= block_size &&
+ IS_ALIGNED(addr, block_size) &&
+ IS_ALIGNED(phys, block_size);
+
+ if ((force_pages && level == 3) || (!force_pages && block_aligned)) {
type = (level == 3) ?
PTE_TYPE_PAGE : PTE_TYPE_BLOCK;
@@ -299,14 +304,14 @@ static unsigned long get_pte_attrs(unsigned flags)
}
}
-static void early_remap_range(uint64_t addr, size_t size, unsigned flags)
+static void early_remap_range(uint64_t addr, size_t size, unsigned flags, bool force_pages)
{
unsigned long attrs = get_pte_attrs(flags);
if (WARN_ON(attrs == ~0UL))
return;
- create_sections(addr, addr, size, attrs);
+ create_sections(addr, addr, size, attrs, force_pages);
}
int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsigned flags)
@@ -319,7 +324,7 @@ int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, unsign
if (flags != MAP_CACHED)
flush_cacheable_pages(virt_addr, size);
- create_sections((uint64_t)virt_addr, phys_addr, (uint64_t)size, attrs);
+ create_sections((uint64_t)virt_addr, phys_addr, (uint64_t)size, attrs, false);
return 0;
}
@@ -416,7 +421,7 @@ static void init_range(size_t total_level0_tables)
uint64_t addr = 0;
while (total_level0_tables--) {
- early_remap_range(addr, L0_XLAT_SIZE, MAP_UNCACHED);
+ early_remap_range(addr, L0_XLAT_SIZE, MAP_UNCACHED, false);
split_block(ttb, 0);
addr += L0_XLAT_SIZE;
ttb++;
@@ -427,6 +432,7 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
{
int el;
u64 optee_membase;
+ unsigned long barebox_size;
unsigned long ttb = arm_mem_ttb(membase + memsize);
if (get_cr() & CR_M)
@@ -447,14 +453,26 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize, unsigned lon
*/
init_range(2);
- early_remap_range(membase, memsize, MAP_CACHED);
+ early_remap_range(membase, memsize, MAP_CACHED, false);
- if (optee_get_membase(&optee_membase))
+ if (optee_get_membase(&optee_membase)) {
optee_membase = membase + memsize - OPTEE_SIZE;
- early_remap_range(optee_membase, OPTEE_SIZE, MAP_FAULT);
+ barebox_size = optee_membase - barebox_start;
- early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext), MAP_CACHED);
+ early_remap_range(optee_membase - barebox_size, barebox_size,
+ ARCH_MAP_CACHED_RWX, true);
+ } else {
+ barebox_size = membase + memsize - barebox_start;
+
+ early_remap_range(membase + memsize - barebox_size, barebox_size,
+ ARCH_MAP_CACHED_RWX, true);
+ }
+
+ early_remap_range(optee_membase, OPTEE_SIZE, MAP_FAULT, false);
+
+ early_remap_range(PAGE_ALIGN_DOWN((uintptr_t)_stext), PAGE_ALIGN(_etext - _stext),
+ MAP_CACHED, false);
mmu_enable();
}
--
2.39.5
next prev parent reply other threads:[~2025-08-04 18:29 UTC|newest]
Thread overview: 15+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-08-04 17:22 [PATCH v4 00/13] ARM: Map sections RO/XN Ahmad Fatoum
2025-08-04 17:22 ` [PATCH v4 01/13] mmu: explicitly map executable non-SDRAM regions with MAP_CODE Ahmad Fatoum
2025-08-04 17:22 ` [PATCH v4 02/13] ARM: pass barebox base to mmu_early_enable() Ahmad Fatoum
2025-08-04 17:22 ` [PATCH v4 03/13] ARM: mmu: move ARCH_MAP_WRITECOMBINE to header Ahmad Fatoum
2025-08-04 17:22 ` [PATCH v4 04/13] ARM: mmu: map memory for barebox proper pagewise Ahmad Fatoum
2025-08-04 17:22 ` [PATCH v4 05/13] ARM: mmu: skip TLB invalidation if remapping zero bytes Ahmad Fatoum
2025-08-04 17:22 ` [PATCH v4 06/13] ARM: mmu: provide setup_trap_pages for both 32- and 64-bit Ahmad Fatoum
2025-08-04 17:22 ` [PATCH v4 07/13] ARM: mmu: share common memory bank remapping code Ahmad Fatoum
2025-08-04 17:22 ` [PATCH v4 08/13] ARM: mmu: make mmu_remap_memory_banks clearer with helper Ahmad Fatoum
2025-08-04 17:22 ` [PATCH v4 09/13] partition: rename region_overlap_end to region_overlap_end_inclusive Ahmad Fatoum
2025-08-04 17:22 ` [PATCH v4 10/13] partition: define new region_overlap_end_exclusive helper Ahmad Fatoum
2025-08-04 17:22 ` [PATCH v4 11/13] ARM: mmu: map text segment ro and data segments execute never Ahmad Fatoum
2025-08-04 17:22 ` Ahmad Fatoum [this message]
2025-08-04 17:22 ` [PATCH v4 13/13] ARM: mmu64: " Ahmad Fatoum
2025-08-05 10:18 ` [PATCH v4 00/13] ARM: Map sections RO/XN Sascha Hauer
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250804172233.2158462-13-a.fatoum@barebox.org \
--to=a.fatoum@barebox.org \
--cc=a.fatoum@pengutronix.de \
--cc=barebox@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox