mail archive of the barebox mailing list
 help / color / mirror / Atom feed
* [PATCH] memory: remap immediately in reserve_sdram_region()
@ 2023-10-23  8:44 Sascha Hauer
  0 siblings, 0 replies; only message in thread
From: Sascha Hauer @ 2023-10-23  8:44 UTC (permalink / raw)
  To: Barebox List

reserve_sdram_region() has the purpose of reserving SDRAM regions from
being accessed by the CPU. Right now this remaps the reserved region
during MMU setup. Instead of doing this, remap the region immediately.

The MMU may be enabled already by early code. This means that when
reserve_sdram_region() is called with MMU enabled, we can't rely on
the region being mapped non-executable right after the call, but only
when __mmu_init() is executed. This patch relaxes this constraint.

Also, reserve_sdram_region() may now be called after __mmu_init() is
executed.

So far we silently aligned the remapped region to page boundaries, but
really calling reserve_sdram_region() with non page aligned boundaries
has undesired effects on the regions between the reserved region and
the page boundaries. Stay with this behaviour, but warn the user when
the to be reserved region is not page aligned as this really shouldn't
happen.

Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
 arch/arm/cpu/mmu_32.c |  2 +-
 arch/arm/cpu/mmu_64.c |  3 +--
 common/memory.c       | 26 ++++++++++++++++++++++++++
 include/memory.h      |  9 ++-------
 4 files changed, 30 insertions(+), 10 deletions(-)

diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index 07b2250677..d0ada5866f 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -558,8 +558,8 @@ void __mmu_init(bool mmu_on)
 
 		pos = bank->start;
 
+		/* Skip reserved regions */
 		for_each_reserved_region(bank, rsv) {
-			remap_range((void *)rsv->start, resource_size(rsv), MAP_UNCACHED);
 			remap_range((void *)pos, rsv->start - pos, MAP_CACHED);
 			pos = rsv->end + 1;
 		}
diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index fb57260c90..b718cb1efa 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -243,9 +243,8 @@ void __mmu_init(bool mmu_on)
 
 		pos = bank->start;
 
+		/* Skip reserved regions */
 		for_each_reserved_region(bank, rsv) {
-			remap_range((void *)resource_first_page(rsv),
-				    resource_count_pages(rsv), MAP_UNCACHED);
 			remap_range((void *)pos, rsv->start - pos, MAP_CACHED);
 			pos = rsv->end + 1;
 		}
diff --git a/common/memory.c b/common/memory.c
index 0ae9e7383c..d560d444b0 100644
--- a/common/memory.c
+++ b/common/memory.c
@@ -15,6 +15,7 @@
 #include <asm/sections.h>
 #include <malloc.h>
 #include <of.h>
+#include <mmu.h>
 
 /*
  * Begin and End of memory area for malloc(), and current "brk"
@@ -211,6 +212,31 @@ struct resource *__request_sdram_region(const char *name, unsigned flags,
 	return NULL;
 }
 
+/* use for secure firmware to inhibit speculation */
+struct resource *reserve_sdram_region(const char *name, resource_size_t start,
+				      resource_size_t size)
+{
+	struct resource *res;
+
+	res = __request_sdram_region(name, IORESOURCE_BUSY, start, size);
+	if (IS_ERR(res))
+		return ERR_CAST(res);
+
+	if (!IS_ALIGNED(start, PAGE_SIZE)) {
+		pr_err("%s: %s start is not page aligned\n", __func__, name);
+		start = ALIGN_DOWN(start, PAGE_SIZE);
+	}
+
+	if (!IS_ALIGNED(size, PAGE_SIZE)) {
+		pr_err("%s: %s size is not page aligned\n", __func__, name);
+		size = ALIGN(size, PAGE_SIZE);
+	}
+
+	remap_range((void *)start, size, MAP_UNCACHED);
+
+	return res;
+}
+
 int release_sdram_region(struct resource *res)
 {
 	return release_region(res);
diff --git a/include/memory.h b/include/memory.h
index 9c2a037610..d8691972ec 100644
--- a/include/memory.h
+++ b/include/memory.h
@@ -43,13 +43,8 @@ static inline struct resource *request_sdram_region(const char *name,
 	return __request_sdram_region(name, 0, start, size);
 }
 
-/* use for secure firmware to inhibit speculation */
-static inline struct resource *reserve_sdram_region(const char *name,
-						    resource_size_t start,
-						    resource_size_t size)
-{
-	return __request_sdram_region(name, IORESOURCE_BUSY, start, size);
-}
+struct resource *reserve_sdram_region(const char *name, resource_size_t start,
+				      resource_size_t size);
 
 int release_sdram_region(struct resource *res);
 
-- 
2.39.2




^ permalink raw reply	[flat|nested] only message in thread

only message in thread, other threads:[~2023-10-23  8:45 UTC | newest]

Thread overview: (only message) (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2023-10-23  8:44 [PATCH] memory: remap immediately in reserve_sdram_region() Sascha Hauer

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox