mail archive of the barebox mailing list
 help / color / mirror / Atom feed
From: Ahmad Fatoum <a.fatoum@pengutronix.de>
To: barebox@lists.infradead.org
Cc: Ahmad Fatoum <a.fatoum@pengutronix.de>
Subject: [PATCH 20/22] ARM: mmu32: factor out set_pte_range helper
Date: Wed,  6 Aug 2025 14:37:12 +0200	[thread overview]
Message-ID: <20250806123714.2092620-21-a.fatoum@pengutronix.de> (raw)
In-Reply-To: <20250806123714.2092620-1-a.fatoum@pengutronix.de>

By adding a helper that sets multiple PTEs at once, we can generalize
the break-before-make handling into one single place.

No functional change.

Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
---
 arch/arm/cpu/mmu_32.c | 83 ++++++++++++++++++++++---------------------
 1 file changed, 42 insertions(+), 41 deletions(-)

diff --git a/arch/arm/cpu/mmu_32.c b/arch/arm/cpu/mmu_32.c
index a76d403e3477..7cf04ea9412a 100644
--- a/arch/arm/cpu/mmu_32.c
+++ b/arch/arm/cpu/mmu_32.c
@@ -106,6 +106,23 @@ static void set_pte(uint32_t *pt, uint32_t val)
 	WRITE_ONCE(*pt, val);
 }
 
+static void set_pte_range(unsigned level, uint32_t *virt, phys_addr_t phys,
+			  size_t count, uint32_t attrs, bool bbm)
+{
+	unsigned granularity = granule_size(level);
+
+	if (!bbm)
+		goto write_attrs;
+
+	 // TODO break-before-make missing
+
+write_attrs:
+	for (int i = 0; i < count; i++, phys += granularity)
+		set_pte(&virt[i], phys | attrs);
+
+	dma_flush_range(virt, count * sizeof(*virt));
+}
+
 #ifdef __PBL__
 static uint32_t *alloc_pte(void)
 {
@@ -203,11 +220,11 @@ void dma_inv_range(void *ptr, size_t size)
  * Not yet exported, but may be later if someone finds use for it.
  */
 static u32 *arm_create_pte(unsigned long virt, unsigned long phys,
-			   uint32_t flags)
+			   uint32_t flags, bool bbm)
 {
 	uint32_t *ttb = get_ttb();
 	u32 *table;
-	int i, ttb_idx;
+	int ttb_idx;
 
 	virt = ALIGN_DOWN(virt, PGDIR_SIZE);
 	phys = ALIGN_DOWN(phys, PGDIR_SIZE);
@@ -216,16 +233,9 @@ static u32 *arm_create_pte(unsigned long virt, unsigned long phys,
 
 	ttb_idx = pgd_index(virt);
 
-	for (i = 0; i < PTRS_PER_PTE; i++) {
-		set_pte(&table[i], phys | PTE_TYPE_SMALL | flags);
-		virt += PAGE_SIZE;
-		phys += PAGE_SIZE;
-	}
-	dma_flush_range(table, PTRS_PER_PTE * sizeof(u32));
+	set_pte_range(2, table, phys, PTRS_PER_PTE, PTE_TYPE_SMALL | flags, bbm);
 
-	// TODO break-before-make missing
-	set_pte(&ttb[ttb_idx], (unsigned long)table | PMD_TYPE_TABLE);
-	dma_flush_range(&ttb[ttb_idx], sizeof(u32));
+	set_pte_range(1, &ttb[ttb_idx], (unsigned long)table, 1, PMD_TYPE_TABLE, bbm);
 
 	return table;
 }
@@ -335,6 +345,7 @@ static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t s
 			       maptype_t map_type)
 {
 	bool force_pages = map_type & ARCH_MAP_FLAG_PAGEWISE;
+	bool mmu_on;
 	u32 virt_addr = (u32)_virt_addr;
 	u32 pte_flags, pmd_flags;
 	uint32_t *ttb = get_ttb();
@@ -351,30 +362,30 @@ static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t s
 	if (!size)
 		return;
 
+	mmu_on = get_cr() & CR_M;
+
 	while (size) {
 		const bool pgdir_size_aligned = IS_ALIGNED(virt_addr, PGDIR_SIZE);
 		u32 *pgd = (u32 *)&ttb[pgd_index(virt_addr)];
+		u32 flags;
 		size_t chunk;
 
 		if (size >= PGDIR_SIZE && pgdir_size_aligned &&
 		    IS_ALIGNED(phys_addr, PGDIR_SIZE) &&
 		    !pgd_type_table(*pgd) && !force_pages) {
-			u32 val;
 			/*
 			 * TODO: Add code to discard a page table and
 			 * replace it with a section
 			 */
 			chunk = PGDIR_SIZE;
-			val = phys_addr | pmd_flags;
+			flags = pmd_flags;
 			if (!maptype_is_compatible(map_type, MAP_FAULT))
-				val |= PMD_TYPE_SECT;
-			// TODO break-before-make missing
-			set_pte(pgd, val);
-			dma_flush_range(pgd, sizeof(*pgd));
+				flags |= PMD_TYPE_SECT;
+			set_pte_range(1, pgd, phys_addr, 1, flags, mmu_on);
 		} else {
 			unsigned int num_ptes;
 			u32 *table = NULL;
-			unsigned int i, level;
+			unsigned int level;
 			u32 *pte;
 			/*
 			 * We only want to cover pages up until next
@@ -401,23 +412,14 @@ static void __arch_remap_range(void *_virt_addr, phys_addr_t phys_addr, size_t s
 				 * and create a new page table for it
 				 */
 				table = arm_create_pte(virt_addr, phys_addr,
-						       pmd_flags_to_pte(*pgd));
+						       pmd_flags_to_pte(*pgd), mmu_on);
 				pte = find_pte(ttb, virt_addr, NULL);
 			}
 
-			for (i = 0; i < num_ptes; i++) {
-				u32 val;
-
-				val = phys_addr + i * PAGE_SIZE;
-				val |= pte_flags;
-				if (!maptype_is_compatible(map_type, MAP_FAULT))
-					val |= PTE_TYPE_SMALL;
-
-				// TODO break-before-make missing
-				set_pte(&pte[i], val);
-			}
-
-			dma_flush_range(pte, num_ptes * sizeof(u32));
+			flags = pte_flags;
+			if (!maptype_is_compatible(map_type, MAP_FAULT))
+				flags |= PTE_TYPE_SMALL;
+			set_pte_range(2, pte, phys_addr, num_ptes, flags, mmu_on);
 		}
 
 		virt_addr += chunk;
@@ -461,6 +463,7 @@ static void early_create_sections(unsigned long first, unsigned long last,
 	unsigned long ttb_end = pgd_index(last) + 1;
 	unsigned int i, addr = first;
 
+	/* This always runs with MMU disabled, so just opencode the loop */
 	for (i = ttb_start; i < ttb_end; i++) {
 		set_pte(&ttb[i], addr | flags);
 		addr += PGDIR_SIZE;
@@ -475,13 +478,11 @@ static inline void early_create_flat_mapping(void)
 
 void *map_io_sections(unsigned long phys, void *_start, size_t size)
 {
-	unsigned long start = (unsigned long)_start, sec;
+	unsigned long start = (unsigned long)_start;
 	uint32_t *ttb = get_ttb();
 
-	for (sec = start; sec < start + size; sec += PGDIR_SIZE, phys += PGDIR_SIZE) {
-		// TODO break-before-make missing
-		set_pte(&ttb[pgd_index(sec)], phys | get_pmd_flags(MAP_UNCACHED));
-	}
+	set_pte_range(1, &ttb[pgd_index(start)], phys, size / PGDIR_SIZE,
+		      get_pmd_flags(MAP_UNCACHED), true);
 
 	dma_flush_range(ttb, 0x4000);
 	tlb_invalidate();
@@ -523,11 +524,11 @@ static void create_vector_table(unsigned long adr)
 		vectors = xmemalign(PAGE_SIZE, PAGE_SIZE);
 		pr_debug("Creating vector table, virt = 0x%p, phys = 0x%08lx\n",
 			 vectors, adr);
-		arm_create_pte(adr, adr, get_pte_flags(MAP_UNCACHED));
+
+		arm_create_pte(adr, adr, get_pte_flags(MAP_UNCACHED), true);
 		pte = find_pte(get_ttb(), adr, NULL);
-		// TODO break-before-make missing
-		set_pte(pte, (u32)vectors | PTE_TYPE_SMALL |
-			get_pte_flags(MAP_CACHED));
+		set_pte_range(2, pte, (u32)vectors, 1, PTE_TYPE_SMALL |
+			      get_pte_flags(MAP_CACHED), true);
 	}
 
 	arm_fixup_vectors();
-- 
2.39.5




  parent reply	other threads:[~2025-08-06 13:06 UTC|newest]

Thread overview: 24+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2025-08-06 12:36 [PATCH 00/22] ARM: mmu: refactor 32-bit and 64-bit code Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 01/22] ARM: mmu: introduce new maptype_t type Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 02/22] ARM: mmu: compare only lowest 16 bits for map type Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 03/22] ARM: mmu: prefix pre-MMU functions with early_ Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 04/22] ARM: mmu: panic when alloc_pte fails Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 05/22] ARM: mmu32: introduce new mmu_addr_t type Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 06/22] ARM: mmu: provide zero page control in PBL Ahmad Fatoum
2025-08-06 12:36 ` [PATCH 07/22] ARM: mmu: print map type as string Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 08/22] ARM: mmu64: rename create_sections to __arch_remap_range Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 09/22] ARM: mmu: move get_pte_attrs call into __arch_remap_range Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 10/22] ARM: mmu64: print debug message in __arch_remap_range Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 11/22] ARM: mmu: make force_pages a maptype_t flag Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 12/22] ARM: mmu64: move granule_size to the top of the file Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 13/22] ARM: mmu64: fix benign off-by-one in flush_cacheable_pages Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 14/22] ARM: mmu64: make flush_cacheable_pages less 64-bit dependent Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 15/22] ARM: mmu64: allow asserting last level page in __find_pte Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 16/22] ARM: mmu64: rename __find_pte to find_pte Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 17/22] ARM: mmu32: rework find_pte to have ARM64 find_pte semantics Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 18/22] ARM: mmu64: factor out flush_cacheable_pages for reusability Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 19/22] ARM: mmu32: flush only cacheable pages on remap Ahmad Fatoum
2025-08-06 12:37 ` Ahmad Fatoum [this message]
2025-08-06 12:37 ` [PATCH 21/22] ARM: mmu64: factor out set_pte_range helper Ahmad Fatoum
2025-08-06 12:37 ` [PATCH 22/22] ARM: mmu: define dma_alloc_writecombine in common code Ahmad Fatoum
2025-08-07  7:24 ` [PATCH 00/22] ARM: mmu: refactor 32-bit and 64-bit code Sascha Hauer

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20250806123714.2092620-21-a.fatoum@pengutronix.de \
    --to=a.fatoum@pengutronix.de \
    --cc=barebox@lists.infradead.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox