mail archive of the barebox mailing list
 help / color / mirror / Atom feed
From: Ahmad Fatoum <a.fatoum@pengutronix.de>
To: barebox@lists.infradead.org
Cc: Ahmad Fatoum <a.fatoum@pengutronix.de>
Subject: [PATCH 21/22] ARM: mmu64: factor out set_pte_range helper
Date: Wed,  6 Aug 2025 14:37:13 +0200	[thread overview]
Message-ID: <20250806123714.2092620-22-a.fatoum@pengutronix.de> (raw)
In-Reply-To: <20250806123714.2092620-1-a.fatoum@pengutronix.de>

By adding a helper that sets multiple PTEs at once, we can generalize
the break-before-make handling into one single place.

No functional change.

Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
---
 arch/arm/cpu/mmu_64.c | 52 ++++++++++++++++++++++---------------------
 1 file changed, 27 insertions(+), 25 deletions(-)

diff --git a/arch/arm/cpu/mmu_64.c b/arch/arm/cpu/mmu_64.c
index d8ba7a171c2d..edb1b34aca7f 100644
--- a/arch/arm/cpu/mmu_64.c
+++ b/arch/arm/cpu/mmu_64.c
@@ -67,9 +67,20 @@ static void set_pte(uint64_t *pt, uint64_t val)
 	WRITE_ONCE(*pt, val);
 }
 
-static void set_table(uint64_t *pt, uint64_t *table_addr)
+static void set_pte_range(unsigned level, uint64_t *virt, phys_addr_t phys,
+			  size_t count, uint64_t attrs, bool bbm)
 {
-	set_pte(pt, PTE_TYPE_TABLE | (uint64_t)table_addr);
+	unsigned granularity = granule_size(level);
+	if (!bbm)
+		goto write_attrs;
+
+	 // TODO break-before-make missing
+
+write_attrs:
+	for (int i = 0; i < count; i++, phys += granularity)
+		set_pte(&virt[i], phys | attrs);
+
+	dma_flush_range(virt, count * sizeof(*virt));
 }
 
 #ifdef __PBL__
@@ -160,37 +171,29 @@ static unsigned long get_pte_attrs(maptype_t map_type)
 #define MAX_PTE_ENTRIES 512
 
 /* Splits a block PTE into table with subpages spanning the old block */
-static void split_block(uint64_t *pte, int level)
+static void split_block(uint64_t *pte, int level, bool bbm)
 {
 	uint64_t old_pte = *pte;
 	uint64_t *new_table;
-	uint64_t i = 0;
-	int levelshift;
+	u64 flags = 0;
 
 	if ((*pte & PTE_TYPE_MASK) == PTE_TYPE_TABLE)
 		return;
 
-	/* level describes the parent level, we need the child ones */
-	levelshift = level2shift(level + 1);
-
 	new_table = alloc_pte();
 
-	for (i = 0; i < MAX_PTE_ENTRIES; i++) {
-		set_pte(&new_table[i], old_pte | (i << levelshift));
+	/* Level 3 block PTEs have the table type */
+	if ((level + 1) == 3)
+		flags |= PTE_TYPE_TABLE;
 
-		/* Level 3 block PTEs have the table type */
-		if ((level + 1) == 3)
-			new_table[i] |= PTE_TYPE_TABLE;
-	}
+	set_pte_range(level + 1, new_table, old_pte, MAX_PTE_ENTRIES, flags, bbm);
 
-	/* Set the new table into effect
-	 * TODO: break-before-make missing
-	 */
-	set_table(pte, new_table);
+	/* level describes the parent level, we need the child ones */
+	set_pte_range(level, pte, (uint64_t)new_table, 1, PTE_TYPE_TABLE, bbm);
 }
 
 static int __arch_remap_range(uint64_t virt, uint64_t phys, uint64_t size,
-			      maptype_t map_type)
+			      maptype_t map_type, bool bbm)
 {
 	bool force_pages = map_type & ARCH_MAP_FLAG_PAGEWISE;
 	unsigned long attr = get_pte_attrs(map_type);
@@ -235,14 +238,13 @@ static int __arch_remap_range(uint64_t virt, uint64_t phys, uint64_t size,
 				type = (level == 3) ?
 					PTE_TYPE_PAGE : PTE_TYPE_BLOCK;
 
-				/* TODO: break-before-make missing */
-				set_pte(pte, phys | attr | type);
+				set_pte_range(level, pte, phys, 1, attr | type, bbm);
 				addr += block_size;
 				phys += block_size;
 				size -= block_size;
 				break;
 			} else {
-				split_block(pte, level);
+				split_block(pte, level, bbm);
 			}
 
 			table = get_level_table(pte);
@@ -277,7 +279,7 @@ static inline void dma_flush_range_end(unsigned long start, unsigned long end)
 
 static void early_remap_range(uint64_t addr, size_t size, maptype_t map_type)
 {
-	__arch_remap_range(addr, addr, size, map_type);
+	__arch_remap_range(addr, addr, size, map_type, false);
 }
 
 int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, maptype_t map_type)
@@ -287,7 +289,7 @@ int arch_remap_range(void *virt_addr, phys_addr_t phys_addr, size_t size, maptyp
 	if (!maptype_is_compatible(map_type, MAP_CACHED))
 		flush_cacheable_pages(virt_addr, size);
 
-	return __arch_remap_range((uint64_t)virt_addr, phys_addr, (uint64_t)size, map_type);
+	return __arch_remap_range((uint64_t)virt_addr, phys_addr, (uint64_t)size, map_type, true);
 }
 
 static void mmu_enable(void)
@@ -383,7 +385,7 @@ static void early_init_range(size_t total_level0_tables)
 
 	while (total_level0_tables--) {
 		early_remap_range(addr, L0_XLAT_SIZE, MAP_UNCACHED);
-		split_block(ttb, 0);
+		split_block(ttb, 0, false);
 		addr += L0_XLAT_SIZE;
 		ttb++;
 	}
-- 
2.39.5




  parent reply	other threads:[~2025-08-06 13:06 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-08-06 12:36 [PATCH 00/22] ARM: mmu: refactor 32-bit and 64-bit code Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 01/22] ARM: mmu: introduce new maptype_t type Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 02/22] ARM: mmu: compare only lowest 16 bits for map type Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 03/22] ARM: mmu: prefix pre-MMU functions with early_ Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 04/22] ARM: mmu: panic when alloc_pte fails Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 05/22] ARM: mmu32: introduce new mmu_addr_t type Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 06/22] ARM: mmu: provide zero page control in PBL Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 07/22] ARM: mmu: print map type as string Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 08/22] ARM: mmu64: rename create_sections to __arch_remap_range Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 09/22] ARM: mmu: move get_pte_attrs call into __arch_remap_range Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 10/22] ARM: mmu64: print debug message in __arch_remap_range Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 11/22] ARM: mmu: make force_pages a maptype_t flag Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 12/22] ARM: mmu64: move granule_size to the top of the file Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 13/22] ARM: mmu64: fix benign off-by-one in flush_cacheable_pages Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 14/22] ARM: mmu64: make flush_cacheable_pages less 64-bit dependent Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 15/22] ARM: mmu64: allow asserting last level page in __find_pte Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 16/22] ARM: mmu64: rename __find_pte to find_pte Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 17/22] ARM: mmu32: rework find_pte to have ARM64 find_pte semantics Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 18/22] ARM: mmu64: factor out flush_cacheable_pages for reusability Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 19/22] ARM: mmu32: flush only cacheable pages on remap Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 20/22] ARM: mmu32: factor out set_pte_range helper Ahmad Fatoum
2025-08-06 12:37 ` Ahmad Fatoum [this message]
2025-08-06 12:37 ` [PATCH 22/22] ARM: mmu: define dma_alloc_writecombine in common code Ahmad Fatoum
2025-08-07  7:24 ` [PATCH 00/22] ARM: mmu: refactor 32-bit and 64-bit code Sascha Hauer

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250806123714.2092620-22-a.fatoum@pengutronix.de \
    --to=a.fatoum@pengutronix.de \
    --cc=barebox@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox