From: Sascha Hauer <s.hauer@pengutronix.de>
To: Barebox List <barebox@lists.infradead.org>
Subject: [PATCH 13/27] ARM: mmu: merge mmu-early_xx.c into mmu_xx.c
Date: Fri, 12 May 2023 13:09:54 +0200 [thread overview]
Message-ID: <20230512111008.1120833-14-s.hauer@pengutronix.de> (raw)
In-Reply-To: <20230512111008.1120833-1-s.hauer@pengutronix.de>
The code will be further consolidated, so move it together for easier
code sharing.
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
arch/arm/cpu/Makefile | 4 +-
arch/arm/cpu/mmu-early_32.c | 62 -------------------------
arch/arm/cpu/mmu-early_64.c | 93 -------------------------------------
arch/arm/cpu/mmu_32.c | 50 ++++++++++++++++++++
arch/arm/cpu/mmu_64.c | 76 ++++++++++++++++++++++++++++++
5 files changed, 128 insertions(+), 157 deletions(-)
delete mode 100644 arch/arm/cpu/mmu-early_32.c
delete mode 100644 arch/arm/cpu/mmu-early_64.c
diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
index cd5f36eb49..0e4fa69229 100644
--- a/arch/arm/cpu/Makefile
+++ b/arch/arm/cpu/Makefile
@@ -3,10 +3,10 @@
obj-y += cpu.o
obj-$(CONFIG_ARM_EXCEPTIONS) += exceptions_$(S64_32).o interrupts_$(S64_32).o
-obj-$(CONFIG_MMU) += mmu_$(S64_32).o mmu-common.o
+obj-$(CONFIG_MMU) += mmu-common.o
+obj-pbl-$(CONFIG_MMU) += mmu_$(S64_32).o
obj-$(CONFIG_MMU) += dma_$(S64_32).o
obj-pbl-y += lowlevel_$(S64_32).o
-obj-pbl-$(CONFIG_MMU) += mmu-early_$(S64_32).o
obj-pbl-$(CONFIG_CPU_32v7) += hyp.o
AFLAGS_hyp.o :=-Wa,-march=armv7-a -Wa,-mcpu=all
AFLAGS_hyp.pbl.o :=-Wa,-march=armv7-a -Wa,-mcpu=all
diff --git a/arch/arm/cpu/mmu-early_32.c b/arch/arm/cpu/mmu-early_32.c
deleted file mode 100644
index 94bde44c9b..0000000000
--- a/arch/arm/cpu/mmu-early_32.c
+++ /dev/null
@@ -1,62 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <common.h>
-#include <asm/mmu.h>
-#include <errno.h>
-#include <linux/sizes.h>
-#include <asm/memory.h>
-#include <asm/system.h>
-#include <asm/cache.h>
-#include <asm-generic/sections.h>
-
-#include "mmu_32.h"
-
-static uint32_t *ttb;
-
-static inline void map_region(unsigned long start, unsigned long size,
- uint64_t flags)
-
-{
- start = ALIGN_DOWN(start, SZ_1M);
- size = ALIGN(size, SZ_1M);
-
- create_sections(ttb, start, start + size - 1, flags);
-}
-
-void mmu_early_enable(unsigned long membase, unsigned long memsize,
- unsigned long _ttb)
-{
- ttb = (uint32_t *)_ttb;
-
- set_ttbr(ttb);
-
- /* For the XN bit to take effect, we can't be using DOMAIN_MANAGER. */
- if (cpu_architecture() >= CPU_ARCH_ARMv7)
- set_domain(DOMAIN_CLIENT);
- else
- set_domain(DOMAIN_MANAGER);
-
- /*
- * This marks the whole address space as uncachable as well as
- * unexecutable if possible
- */
- create_flat_mapping(ttb);
-
- /*
- * There can be SoCs that have a section shared between device memory
- * and the on-chip RAM hosting the PBL. Thus mark this section
- * uncachable, but executable.
- * On such SoCs, executing from OCRAM could cause the instruction
- * prefetcher to speculatively access that device memory, triggering
- * potential errant behavior.
- *
- * If your SoC has such a memory layout, you should rewrite the code
- * here to map the OCRAM page-wise.
- */
- map_region((unsigned long)_stext, _etext - _stext, PMD_SECT_DEF_UNCACHED);
-
- /* maps main memory as cachable */
- map_region(membase, memsize, PMD_SECT_DEF_CACHED);
-
- __mmu_cache_on();
-}
diff --git a/arch/arm/cpu/mmu-early_64.c b/arch/arm/cpu/mmu-early_64.c
deleted file mode 100644
index d1f4a046bb..0000000000
--- a/arch/arm/cpu/mmu-early_64.c
+++ /dev/null
@@ -1,93 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-
-#include <common.h>
-#include <dma-dir.h>
-#include <init.h>
-#include <mmu.h>
-#include <errno.h>
-#include <linux/sizes.h>
-#include <asm/memory.h>
-#include <asm/pgtable64.h>
-#include <asm/barebox-arm.h>
-#include <asm/system.h>
-#include <asm/cache.h>
-#include <memory.h>
-#include <asm/system_info.h>
-
-#include "mmu_64.h"
-
-static void create_sections(void *ttb, uint64_t virt, uint64_t phys,
- uint64_t size, uint64_t attr)
-{
- uint64_t block_size;
- uint64_t block_shift;
- uint64_t *pte;
- uint64_t idx;
- uint64_t addr;
- uint64_t *table;
-
- addr = virt;
-
- attr &= ~PTE_TYPE_MASK;
-
- table = ttb;
-
- while (1) {
- block_shift = level2shift(1);
- idx = (addr & level2mask(1)) >> block_shift;
- block_size = (1ULL << block_shift);
-
- pte = table + idx;
-
- *pte = phys | attr | PTE_TYPE_BLOCK;
-
- if (size < block_size)
- break;
-
- addr += block_size;
- phys += block_size;
- size -= block_size;
- }
-}
-
-#define EARLY_BITS_PER_VA 39
-
-void mmu_early_enable(unsigned long membase, unsigned long memsize,
- unsigned long ttb)
-{
- int el;
-
- /*
- * For the early code we only create level 1 pagetables which only
- * allow for a 1GiB granularity. If our membase is not aligned to that
- * bail out without enabling the MMU.
- */
- if (membase & ((1ULL << level2shift(1)) - 1))
- return;
-
- memset((void *)ttb, 0, GRANULE_SIZE);
-
- el = current_el();
- set_ttbr_tcr_mair(el, ttb, calc_tcr(el, EARLY_BITS_PER_VA), MEMORY_ATTRIBUTES);
- create_sections((void *)ttb, 0, 0, 1UL << (EARLY_BITS_PER_VA - 1),
- attrs_uncached_mem());
- create_sections((void *)ttb, membase, membase, memsize, CACHED_MEM);
- tlb_invalidate();
- isb();
- set_cr(get_cr() | CR_M);
-}
-
-void mmu_early_disable(void)
-{
- unsigned int cr;
-
- cr = get_cr();
- cr &= ~(CR_M | CR_C);
-
- set_cr(cr);
- v8_flush_dcache_all();
- tlb_invalidate();
-
- dsb();
- isb();
-}
diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 10f447874c..12fe892400 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -494,3 +494,53 @@ void *dma_alloc_writecombine(size_t size, dma_addr_t *dma_handle)
{
return dma_alloc_map(size, dma_handle, ARCH_MAP_WRITECOMBINE);
}
+
+static uint32_t *ttb;
+
+static inline void map_region(unsigned long start, unsigned long size,
+ uint64_t flags)
+
+{
+ start = ALIGN_DOWN(start, SZ_1M);
+ size = ALIGN(size, SZ_1M);
+
+ create_sections(ttb, start, start + size - 1, flags);
+}
+
+void mmu_early_enable(unsigned long membase, unsigned long memsize,
+ unsigned long _ttb)
+{
+ ttb = (uint32_t *)_ttb;
+
+ set_ttbr(ttb);
+
+ /* For the XN bit to take effect, we can't be using DOMAIN_MANAGER. */
+ if (cpu_architecture() >= CPU_ARCH_ARMv7)
+ set_domain(DOMAIN_CLIENT);
+ else
+ set_domain(DOMAIN_MANAGER);
+
+ /*
+ * This marks the whole address space as uncachable as well as
+ * unexecutable if possible
+ */
+ create_flat_mapping(ttb);
+
+ /*
+ * There can be SoCs that have a section shared between device memory
+ * and the on-chip RAM hosting the PBL. Thus mark this section
+ * uncachable, but executable.
+ * On such SoCs, executing from OCRAM could cause the instruction
+ * prefetcher to speculatively access that device memory, triggering
+ * potential errant behavior.
+ *
+ * If your SoC has such a memory layout, you should rewrite the code
+ * here to map the OCRAM page-wise.
+ */
+ map_region((unsigned long)_stext, _etext - _stext, PMD_SECT_DEF_UNCACHED);
+
+ /* maps main memory as cachable */
+ map_region(membase, memsize, PMD_SECT_DEF_CACHED);
+
+ __mmu_cache_on();
+}
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index 9150de1676..55ada960c5 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -241,3 +241,79 @@ void dma_flush_range(void *ptr, size_t size)
v8_flush_dcache_range(start, end);
}
+
+static void early_create_sections(void *ttb, uint64_t virt, uint64_t phys,
+ uint64_t size, uint64_t attr)
+{
+ uint64_t block_size;
+ uint64_t block_shift;
+ uint64_t *pte;
+ uint64_t idx;
+ uint64_t addr;
+ uint64_t *table;
+
+ addr = virt;
+
+ attr &= ~PTE_TYPE_MASK;
+
+ table = ttb;
+
+ while (1) {
+ block_shift = level2shift(1);
+ idx = (addr & level2mask(1)) >> block_shift;
+ block_size = (1ULL << block_shift);
+
+ pte = table + idx;
+
+ *pte = phys | attr | PTE_TYPE_BLOCK;
+
+ if (size < block_size)
+ break;
+
+ addr += block_size;
+ phys += block_size;
+ size -= block_size;
+ }
+}
+
+#define EARLY_BITS_PER_VA 39
+
+void mmu_early_enable(unsigned long membase, unsigned long memsize,
+ unsigned long ttb)
+{
+ int el;
+
+ /*
+ * For the early code we only create level 1 pagetables which only
+ * allow for a 1GiB granularity. If our membase is not aligned to that
+ * bail out without enabling the MMU.
+ */
+ if (membase & ((1ULL << level2shift(1)) - 1))
+ return;
+
+ memset((void *)ttb, 0, GRANULE_SIZE);
+
+ el = current_el();
+ set_ttbr_tcr_mair(el, ttb, calc_tcr(el, EARLY_BITS_PER_VA), MEMORY_ATTRIBUTES);
+ early_create_sections((void *)ttb, 0, 0, 1UL << (EARLY_BITS_PER_VA - 1),
+ attrs_uncached_mem());
+ early_create_sections((void *)ttb, membase, membase, memsize, CACHED_MEM);
+ tlb_invalidate();
+ isb();
+ set_cr(get_cr() | CR_M);
+}
+
+void mmu_early_disable(void)
+{
+ unsigned int cr;
+
+ cr = get_cr();
+ cr &= ~(CR_M | CR_C);
+
+ set_cr(cr);
+ v8_flush_dcache_all();
+ tlb_invalidate();
+
+ dsb();
+ isb();
+}
--
2.39.2
next prev parent reply other threads:[~2023-05-12 11:12 UTC|newest]
Thread overview: 41+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-05-12 11:09 [PATCH 00/27] ARM: MMU rework Sascha Hauer
2023-05-12 11:09 ` [PATCH 01/27] ARM: fix scratch mem position with OP-TEE Sascha Hauer
2023-05-12 17:17 ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 02/27] ARM: drop cache function initialization Sascha Hauer
2023-05-12 17:19 ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 03/27] ARM: Add _32 suffix to aarch32 specific filenames Sascha Hauer
2023-05-12 17:21 ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 04/27] ARM: cpu.c: remove unused include Sascha Hauer
2023-05-12 17:22 ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 05/27] ARM: mmu-common.c: use common mmu include Sascha Hauer
2023-05-12 17:23 ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 06/27] ARM: mmu32: rename mmu.h to mmu_32.h Sascha Hauer
2023-05-12 17:23 ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 07/27] ARM: mmu: implement MAP_FAULT Sascha Hauer
2023-05-12 11:09 ` [PATCH 08/27] ARM: mmu64: Use arch_remap_range where possible Sascha Hauer
2023-05-12 17:40 ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 09/27] ARM: mmu32: implement zero_page_*() Sascha Hauer
2023-05-12 11:09 ` [PATCH 10/27] ARM: i.MX: Drop HAB workaround Sascha Hauer
2023-05-12 18:09 ` Ahmad Fatoum
2023-05-16 8:23 ` Sascha Hauer
2023-05-12 11:09 ` [PATCH 11/27] ARM: Move early MMU after malloc initialization Sascha Hauer
2023-05-12 18:10 ` Ahmad Fatoum
2023-05-12 11:09 ` [PATCH 12/27] ARM: mmu: move dma_sync_single_for_device to extra file Sascha Hauer
2023-05-12 18:30 ` Ahmad Fatoum
2023-05-16 9:09 ` Sascha Hauer
2023-05-12 11:09 ` Sascha Hauer [this message]
2023-05-12 11:09 ` [PATCH 14/27] ARM: mmu: alloc 64k for early page tables Sascha Hauer
2023-05-12 11:09 ` [PATCH 15/27] ARM: mmu32: create alloc_pte() Sascha Hauer
2023-05-12 11:09 ` [PATCH 16/27] ARM: mmu64: " Sascha Hauer
2023-05-12 11:09 ` [PATCH 17/27] ARM: mmu: drop ttb argument Sascha Hauer
2023-05-12 11:09 ` [PATCH 18/27] ARM: mmu: always do MMU initialization early when MMU is enabled Sascha Hauer
2023-05-12 11:10 ` [PATCH 19/27] ARM: mmu32: Assume MMU is on Sascha Hauer
2023-05-12 11:10 ` [PATCH 20/27] ARM: mmu32: Fix pmd_flags_to_pte() for ARMv4/5/6 Sascha Hauer
2023-05-12 11:10 ` [PATCH 21/27] ARM: mmu32: Add pte_flags_to_pmd() Sascha Hauer
2023-05-12 11:10 ` [PATCH 22/27] ARM: mmu32: add get_pte_flags, get_pmd_flags Sascha Hauer
2023-05-12 11:10 ` [PATCH 23/27] ARM: mmu32: move functions into c file Sascha Hauer
2023-05-12 11:10 ` [PATCH 24/27] ARM: mmu32: read TTB value from register Sascha Hauer
2023-05-12 11:10 ` [PATCH 25/27] ARM: mmu32: Use pages for early MMU setup Sascha Hauer
2023-05-12 11:10 ` [PATCH 26/27] ARM: mmu32: Skip reserved ranges during initialization Sascha Hauer
2023-05-12 11:10 ` [PATCH 27/27] ARM: mmu64: Use two level pagetables in early code Sascha Hauer
2023-05-16 10:55 ` Sascha Hauer
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230512111008.1120833-14-s.hauer@pengutronix.de \
--to=s.hauer@pengutronix.de \
--cc=barebox@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox