mail archive of the barebox mailing list
 help / color / mirror / Atom feed
From: Sascha Hauer <s.hauer@pengutronix.de>
To: Barebox List <barebox@lists.infradead.org>
Subject: [PATCH v2 34/34] ARM: mmu64: Use two level pagetables in early code
Date: Wed, 17 May 2023 11:03:40 +0200	[thread overview]
Message-ID: <20230517090340.3954615-35-s.hauer@pengutronix.de> (raw)
In-Reply-To: <20230517090340.3954615-1-s.hauer@pengutronix.de>

So far we used 1GiB sized sections in the early MMU setup. This has
the disadvantage that we can't use the MMU in early code when we
require a finer granularity. Rockchip for example keeps TF-A code
in the lower memory so the code just skipped MMU initialization.
Also we can't properly map the OP-TEE space at the end of SDRAM non
executable.

With this patch we now use two level page tables and can map with 4KiB
granularity.

The MMU setup in barebox proper changes as well. Instead of disabling
the MMU for reconfiguration we can now keep the MMU enabled and just
add the mappings for SDRAM banks not known to the early code.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 arch/arm/cpu/mmu_64.c | 97 +++++++++----------------------------------
 1 file changed, 20 insertions(+), 77 deletions(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index d32eecf144..2f9b5098a3 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -22,7 +22,10 @@
 
 #include "mmu_64.h"
 
-static uint64_t *ttb;
+static uint64_t *get_ttb(void)
+{
+	return (uint64_t *)get_ttbr(current_el());
+}
 
 static void set_table(uint64_t *pt, uint64_t *table_addr)
 {
@@ -42,7 +45,7 @@ static uint64_t *alloc_pte(void)
 	if (idx * GRANULE_SIZE >= ARM_EARLY_PAGETABLE_SIZE)
 		return NULL;
 
-	return (void *)ttb + idx * GRANULE_SIZE;
+	return get_ttb() + idx * GRANULE_SIZE;
 }
 #else
 static uint64_t *alloc_pte(void)
@@ -63,7 +66,7 @@ static __maybe_unused uint64_t *find_pte(uint64_t addr)
 	uint64_t idx;
 	int i;
 
-	pte = ttb;
+	pte = get_ttb();
 
 	for (i = 0; i < 4; i++) {
 		block_shift = level2shift(i);
@@ -112,6 +115,7 @@ static void split_block(uint64_t *pte, int level)
 static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
 			    uint64_t attr)
 {
+	uint64_t *ttb = get_ttb();
 	uint64_t block_size;
 	uint64_t block_shift;
 	uint64_t *pte;
@@ -121,9 +125,6 @@ static void create_sections(uint64_t virt, uint64_t phys, uint64_t size,
 	uint64_t type;
 	int level;
 
-	if (!ttb)
-		arm_mmu_not_initialized_error();
-
 	addr = virt;
 
 	attr &= ~PTE_TYPE_MASK;
@@ -192,37 +193,23 @@ static void mmu_enable(void)
 void __mmu_init(bool mmu_on)
 {
 	struct memory_bank *bank;
-	unsigned int el;
-
-	if (mmu_on)
-		mmu_disable();
-
-	ttb = alloc_pte();
-	el = current_el();
-	set_ttbr_tcr_mair(el, (uint64_t)ttb, calc_tcr(el, BITS_PER_VA),
-			  MEMORY_ATTRIBUTES);
 
-	pr_debug("ttb: 0x%p\n", ttb);
-
-	/* create a flat mapping */
-	arch_remap_range(0, 1UL << (BITS_PER_VA - 1), MAP_UNCACHED);
-
-	/* Map sdram cached. */
 	for_each_memory_bank(bank) {
 		struct resource *rsv;
+		resource_size_t pos;
 
-		arch_remap_range((void *)bank->start, bank->size, MAP_CACHED);
+		pos = bank->start;
 
 		for_each_reserved_region(bank, rsv) {
-			arch_remap_range((void *)resource_first_page(rsv),
-					 resource_count_pages(rsv), MAP_UNCACHED);
+			arch_remap_range((void *)pos, rsv->start - pos, MAP_CACHED);
+			pos = rsv->end + 1;
 		}
+
+		arch_remap_range((void *)pos, bank->start + bank->size - pos, MAP_CACHED);
 	}
 
 	/* Make zero page faulting to catch NULL pointer derefs */
 	zero_page_faulting();
-
-	mmu_enable();
 }
 
 void mmu_disable(void)
@@ -256,42 +243,6 @@ void dma_flush_range(void *ptr, size_t size)
 	v8_flush_dcache_range(start, end);
 }
 
-static void early_create_sections(void *ttb, uint64_t virt, uint64_t phys,
-				  uint64_t size, uint64_t attr)
-{
-	uint64_t block_size;
-	uint64_t block_shift;
-	uint64_t *pte;
-	uint64_t idx;
-	uint64_t addr;
-	uint64_t *table;
-
-	addr = virt;
-
-	attr &= ~PTE_TYPE_MASK;
-
-	table = ttb;
-
-	while (1) {
-		block_shift = level2shift(1);
-		idx = (addr & level2mask(1)) >> block_shift;
-		block_size = (1ULL << block_shift);
-
-		pte = table + idx;
-
-		*pte = phys | attr | PTE_TYPE_BLOCK;
-
-		if (size < block_size)
-			break;
-
-		addr += block_size;
-		phys += block_size;
-		size -= block_size;
-	}
-}
-
-#define EARLY_BITS_PER_VA 39
-
 void mmu_early_enable(unsigned long membase, unsigned long memsize)
 {
 	int el;
@@ -299,24 +250,16 @@ void mmu_early_enable(unsigned long membase, unsigned long memsize)
 
 	pr_debug("enabling MMU, ttb @ 0x%08lx\n", ttb);
 
-	/*
-	 * For the early code we only create level 1 pagetables which only
-	 * allow for a 1GiB granularity. If our membase is not aligned to that
-	 * bail out without enabling the MMU.
-	 */
-	if (membase & ((1ULL << level2shift(1)) - 1))
-		return;
+	el = current_el();
+	set_ttbr_tcr_mair(el, ttb, calc_tcr(el, BITS_PER_VA), MEMORY_ATTRIBUTES);
 
 	memset((void *)ttb, 0, GRANULE_SIZE);
 
-	el = current_el();
-	set_ttbr_tcr_mair(el, ttb, calc_tcr(el, EARLY_BITS_PER_VA), MEMORY_ATTRIBUTES);
-	early_create_sections((void *)ttb, 0, 0, 1UL << (EARLY_BITS_PER_VA - 1),
-			attrs_uncached_mem());
-	early_create_sections((void *)ttb, membase, membase, memsize - OPTEE_SIZE, CACHED_MEM);
-	tlb_invalidate();
-	isb();
-	set_cr(get_cr() | CR_M);
+	arch_remap_range(0, 1UL << (BITS_PER_VA - 1), MAP_UNCACHED);
+	arch_remap_range((void *)membase, memsize - OPTEE_SIZE, MAP_CACHED);
+	arch_remap_range((void *)membase + memsize - OPTEE_SIZE, OPTEE_SIZE, MAP_FAULT);
+
+	mmu_enable();
 }
 
 void mmu_early_disable(void)
-- 
2.39.2




      parent reply	other threads:[~2023-05-17  9:41 UTC|newest]

Thread overview: 68+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2023-05-17  9:03 [PATCH v2 00/34] ARM: MMU rework Sascha Hauer
2023-05-17  9:03 ` [PATCH v2 01/34] ARM: remove unused membase argument Sascha Hauer
2023-05-17 12:45   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 02/34] ARM: remove unused define Sascha Hauer
2023-05-17 12:45   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 03/34] ARM: rename __arm_mem_scratch to arm_mem_scratch Sascha Hauer
2023-05-17 12:46   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 04/34] ARM: put scratch mem area below OP-TEE Sascha Hauer
2023-05-17 12:48   ` Ahmad Fatoum
2023-05-17 13:14     ` Sascha Hauer
2023-05-17 15:50       ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 05/34] ARM: add arm_mem_optee() Sascha Hauer
2023-05-17 12:53   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 06/34] ARM: make arm_mem_scratch() a static inline function Sascha Hauer
2023-05-17 12:53   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 07/34] ARM: define stack base consistently Sascha Hauer
2023-05-17 12:55   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 08/34] ARM: move arm_mem_scratch_get() lower for consistency Sascha Hauer
2023-05-17 12:57   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 09/34] ARM: drop cache function initialization Sascha Hauer
2023-05-17  9:03 ` [PATCH v2 10/34] ARM: Add _32 suffix to aarch32 specific filenames Sascha Hauer
2023-05-17  9:03 ` [PATCH v2 11/34] ARM: cpu.c: remove unused include Sascha Hauer
2023-05-17  9:03 ` [PATCH v2 12/34] ARM: mmu-common.c: use common mmu include Sascha Hauer
2023-05-17  9:03 ` [PATCH v2 13/34] ARM: mmu32: rename mmu.h to mmu_32.h Sascha Hauer
2023-05-17  9:03 ` [PATCH v2 14/34] ARM: mmu: implement MAP_FAULT Sascha Hauer
2023-05-17  9:03 ` [PATCH v2 15/34] ARM: mmu64: Use arch_remap_range where possible Sascha Hauer
2023-05-17  9:03 ` [PATCH v2 16/34] ARM: mmu32: implement zero_page_*() Sascha Hauer
2023-05-17  9:03 ` [PATCH v2 17/34] ARM: i.MX: Drop HAB workaround Sascha Hauer
2023-05-17 13:01   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 18/34] ARM: Move early MMU after malloc initialization Sascha Hauer
2023-05-17  9:03 ` [PATCH v2 19/34] ARM: mmu: move dma_sync_single_for_device to extra file Sascha Hauer
2023-05-17  9:03 ` [PATCH v2 20/34] ARM: mmu: merge mmu-early_xx.c into mmu_xx.c Sascha Hauer
2023-05-17  9:03 ` [PATCH v2 21/34] ARM: mmu: alloc 64k for early page tables Sascha Hauer
2023-05-17 13:03   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 22/34] ARM: mmu32: create alloc_pte() Sascha Hauer
2023-05-17 13:07   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 23/34] ARM: mmu64: " Sascha Hauer
2023-05-17 13:15   ` Ahmad Fatoum
2023-05-17 13:17   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 24/34] ARM: mmu: drop ttb argument Sascha Hauer
2023-05-17 13:23   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 25/34] ARM: mmu: always do MMU initialization early when MMU is enabled Sascha Hauer
2023-05-17 13:29   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 26/34] ARM: mmu32: Assume MMU is on Sascha Hauer
2023-05-17 13:36   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 27/34] ARM: mmu32: Fix pmd_flags_to_pte() for ARMv4/5/6 Sascha Hauer
2023-05-17 13:39   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 28/34] ARM: mmu32: Add pte_flags_to_pmd() Sascha Hauer
2023-05-17 13:43   ` Ahmad Fatoum
2023-05-17 14:44     ` Sascha Hauer
2023-05-17  9:03 ` [PATCH v2 29/34] ARM: mmu32: add get_pte_flags, get_pmd_flags Sascha Hauer
2023-05-17 13:46   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 30/34] ARM: mmu32: move functions into c file Sascha Hauer
2023-05-17 13:48   ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 31/34] ARM: mmu32: read TTB value from register Sascha Hauer
2023-05-17 13:58   ` Ahmad Fatoum
2023-05-17 14:39     ` Sascha Hauer
2023-05-19  6:53       ` Ahmad Fatoum
2023-05-19  7:44         ` Sascha Hauer
2023-05-19  7:52           ` Ahmad Fatoum
2023-05-17  9:03 ` [PATCH v2 32/34] ARM: mmu32: Use pages for early MMU setup Sascha Hauer
2023-05-17 14:21   ` Ahmad Fatoum
2023-05-22  8:14     ` Sascha Hauer
2023-05-17  9:03 ` [PATCH v2 33/34] ARM: mmu32: Skip reserved ranges during initialization Sascha Hauer
2023-05-17 14:43   ` Ahmad Fatoum
2023-05-17 14:55     ` Sascha Hauer
2023-05-17 15:56       ` Ahmad Fatoum
2023-05-17  9:03 ` Sascha Hauer [this message]

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20230517090340.3954615-35-s.hauer@pengutronix.de \
    --to=s.hauer@pengutronix.de \
    --cc=barebox@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox