mail archive of the barebox mailing list
 help / color / mirror / Atom feed
* [PATCH] ARM: mmu: fix cache flushing when replacing a section with a PTE
@ 2018-08-01  8:38 Lucas Stach
  2018-08-01  8:38 ` [PATCH 1/3] PCI: link PCI devices with potentially existing OF nodes Lucas Stach
                   ` (3 more replies)
  0 siblings, 4 replies; 7+ messages in thread
From: Lucas Stach @ 2018-08-01  8:38 UTC (permalink / raw)
  To: barebox

When replacing a section with a PTE, we must make sure that the newly
initialized PTE entries are flushed from the cache before changing the
entry in the TTB. Otherwise a L1 TLB miss causes the hardware pagetable
walker to walk into a PTE with undefined content, causing exactly that
behaviour.

Move all the necessary cache flushing to arm_create_pte(), to avoid any
caller getting this wrong in the future.

Fixes: e3e54c644180 (ARM: mmu: Implement on-demand PTE allocation)
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
---
 arch/arm/cpu/mmu.c | 77 +++++++++++++++++++---------------------------
 1 file changed, 32 insertions(+), 45 deletions(-)

diff --git a/arch/arm/cpu/mmu.c b/arch/arm/cpu/mmu.c
index ae189ecdeb16..712c3930fb71 100644
--- a/arch/arm/cpu/mmu.c
+++ b/arch/arm/cpu/mmu.c
@@ -85,34 +85,6 @@ static void arm_mmu_not_initialized_error(void)
 	panic("MMU not initialized\n");
 }
 
-/*
- * Create a second level translation table for the given virtual address.
- * We initially create a flat uncached mapping on it.
- * Not yet exported, but may be later if someone finds use for it.
- */
-static u32 *arm_create_pte(unsigned long virt, uint32_t flags)
-{
-	u32 *table;
-	int i;
-
-	virt = ALIGN_DOWN(virt, PGDIR_SIZE);
-
-	table = xmemalign(PTRS_PER_PTE * sizeof(u32),
-			  PTRS_PER_PTE * sizeof(u32));
-
-	if (!ttb)
-		arm_mmu_not_initialized_error();
-
-	ttb[pgd_index(virt)] = (unsigned long)table | PMD_TYPE_TABLE;
-
-	for (i = 0; i < PTRS_PER_PTE; i++) {
-		table[i] = virt | PTE_TYPE_SMALL | flags;
-		virt += PAGE_SIZE;
-	}
-
-	return table;
-}
-
 static bool pgd_type_table(u32 pgd)
 {
 	return (pgd & PMD_TYPE_MASK) == PMD_TYPE_TABLE;
@@ -152,6 +124,38 @@ static void dma_inv_range(unsigned long start, unsigned long end)
 	__dma_inv_range(start, end);
 }
 
+/*
+ * Create a second level translation table for the given virtual address.
+ * We initially create a flat uncached mapping on it.
+ * Not yet exported, but may be later if someone finds use for it.
+ */
+static u32 *arm_create_pte(unsigned long virt, uint32_t flags)
+{
+	u32 *table;
+	int i, ttb_idx;
+
+	virt = ALIGN_DOWN(virt, PGDIR_SIZE);
+
+	table = xmemalign(PTRS_PER_PTE * sizeof(u32),
+			  PTRS_PER_PTE * sizeof(u32));
+
+	if (!ttb)
+		arm_mmu_not_initialized_error();
+
+	ttb_idx = pgd_index(virt);
+
+	for (i = 0; i < PTRS_PER_PTE; i++) {
+		table[i] = virt | PTE_TYPE_SMALL | flags;
+		virt += PAGE_SIZE;
+	}
+	dma_flush_range(table, PTRS_PER_PTE * sizeof(u32));
+
+	ttb[ttb_idx] = (unsigned long)table | PMD_TYPE_TABLE;
+	dma_flush_range(ttb, sizeof(u32));
+
+	return table;
+}
+
 int arch_remap_range(void *start, size_t size, unsigned flags)
 {
 	u32 addr = (u32)start;
@@ -227,12 +231,6 @@ int arch_remap_range(void *start, size_t size, unsigned flags)
 				table = arm_create_pte(addr, pte_flags_cached);
 				pte = find_pte(addr);
 				BUG_ON(!pte);
-				/*
-				 * We just split this section and
-				 * modified it's Level 1 descriptor,
-				 * so it needs to be flushed.
-				 */
-				dma_flush_range(pgd, sizeof(*pgd));
 			}
 
 			for (i = 0; i < num_ptes; i++) {
@@ -240,17 +238,6 @@ int arch_remap_range(void *start, size_t size, unsigned flags)
 				pte[i] |= pte_flags | PTE_TYPE_SMALL;
 			}
 
-			if (table) {
-				/*
-				 * If we just created a new page
-				 * table, the whole table would have
-				 * to be flushed, not just PTEs that
-				 * we touched when re-mapping.
-				 */
-				pte = table;
-				num_ptes = PTRS_PER_PTE;
-			}
-
 			dma_flush_range(pte, num_ptes * sizeof(u32));
 		}
 
-- 
2.18.0


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 7+ messages in thread
* [PATCH] ARM: mmu: fix cache flushing when replacing a section with a PTE
@ 2018-07-25 15:01 Lucas Stach
  0 siblings, 0 replies; 7+ messages in thread
From: Lucas Stach @ 2018-07-25 15:01 UTC (permalink / raw)
  To: barebox; +Cc: Andrey Smirnov

When replacing a section with a PTE, we must make sure that the newly
initialized PTE entries are flushed from the cache before changing the
entry in the TTB. Otherwise a L1 TLB miss causes the hardware pagetable
walker to walk into a PTE with undefined content, causing exactly that
behaviour.

Move all the necessary cache flushing to arm_create_pte(), to avoid any
caller getting this wrong in the future.

Fixes: e3e54c644180 (ARM: mmu: Implement on-demand PTE allocation)
Signed-off-by: Lucas Stach <l.stach@pengutronix.de>
---
 arch/arm/cpu/mmu.c | 77 +++++++++++++++++++---------------------------
 1 file changed, 32 insertions(+), 45 deletions(-)

diff --git a/arch/arm/cpu/mmu.c b/arch/arm/cpu/mmu.c
index ae189ecdeb16..712c3930fb71 100644
--- a/arch/arm/cpu/mmu.c
+++ b/arch/arm/cpu/mmu.c
@@ -85,34 +85,6 @@ static void arm_mmu_not_initialized_error(void)
 	panic("MMU not initialized\n");
 }
 
-/*
- * Create a second level translation table for the given virtual address.
- * We initially create a flat uncached mapping on it.
- * Not yet exported, but may be later if someone finds use for it.
- */
-static u32 *arm_create_pte(unsigned long virt, uint32_t flags)
-{
-	u32 *table;
-	int i;
-
-	virt = ALIGN_DOWN(virt, PGDIR_SIZE);
-
-	table = xmemalign(PTRS_PER_PTE * sizeof(u32),
-			  PTRS_PER_PTE * sizeof(u32));
-
-	if (!ttb)
-		arm_mmu_not_initialized_error();
-
-	ttb[pgd_index(virt)] = (unsigned long)table | PMD_TYPE_TABLE;
-
-	for (i = 0; i < PTRS_PER_PTE; i++) {
-		table[i] = virt | PTE_TYPE_SMALL | flags;
-		virt += PAGE_SIZE;
-	}
-
-	return table;
-}
-
 static bool pgd_type_table(u32 pgd)
 {
 	return (pgd & PMD_TYPE_MASK) == PMD_TYPE_TABLE;
@@ -152,6 +124,38 @@ static void dma_inv_range(unsigned long start, unsigned long end)
 	__dma_inv_range(start, end);
 }
 
+/*
+ * Create a second level translation table for the given virtual address.
+ * We initially create a flat uncached mapping on it.
+ * Not yet exported, but may be later if someone finds use for it.
+ */
+static u32 *arm_create_pte(unsigned long virt, uint32_t flags)
+{
+	u32 *table;
+	int i, ttb_idx;
+
+	virt = ALIGN_DOWN(virt, PGDIR_SIZE);
+
+	table = xmemalign(PTRS_PER_PTE * sizeof(u32),
+			  PTRS_PER_PTE * sizeof(u32));
+
+	if (!ttb)
+		arm_mmu_not_initialized_error();
+
+	ttb_idx = pgd_index(virt);
+
+	for (i = 0; i < PTRS_PER_PTE; i++) {
+		table[i] = virt | PTE_TYPE_SMALL | flags;
+		virt += PAGE_SIZE;
+	}
+	dma_flush_range(table, PTRS_PER_PTE * sizeof(u32));
+
+	ttb[ttb_idx] = (unsigned long)table | PMD_TYPE_TABLE;
+	dma_flush_range(ttb, sizeof(u32));
+
+	return table;
+}
+
 int arch_remap_range(void *start, size_t size, unsigned flags)
 {
 	u32 addr = (u32)start;
@@ -227,12 +231,6 @@ int arch_remap_range(void *start, size_t size, unsigned flags)
 				table = arm_create_pte(addr, pte_flags_cached);
 				pte = find_pte(addr);
 				BUG_ON(!pte);
-				/*
-				 * We just split this section and
-				 * modified it's Level 1 descriptor,
-				 * so it needs to be flushed.
-				 */
-				dma_flush_range(pgd, sizeof(*pgd));
 			}
 
 			for (i = 0; i < num_ptes; i++) {
@@ -240,17 +238,6 @@ int arch_remap_range(void *start, size_t size, unsigned flags)
 				pte[i] |= pte_flags | PTE_TYPE_SMALL;
 			}
 
-			if (table) {
-				/*
-				 * If we just created a new page
-				 * table, the whole table would have
-				 * to be flushed, not just PTEs that
-				 * we touched when re-mapping.
-				 */
-				pte = table;
-				num_ptes = PTRS_PER_PTE;
-			}
-
 			dma_flush_range(pte, num_ptes * sizeof(u32));
 		}
 
-- 
2.18.0


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 7+ messages in thread

end of thread, other threads:[~2018-08-08  7:28 UTC | newest]

Thread overview: 7+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2018-08-01  8:38 [PATCH] ARM: mmu: fix cache flushing when replacing a section with a PTE Lucas Stach
2018-08-01  8:38 ` [PATCH 1/3] PCI: link PCI devices with potentially existing OF nodes Lucas Stach
2018-08-08  7:28   ` Sascha Hauer
2018-08-01  8:38 ` [PATCH 2/3] pci: add quirk infrastructure Lucas Stach
2018-08-01  8:38 ` [PATCH 3/3] ARM: imx6: gw54xx: add fixup for PCIe switch Lucas Stach
2018-08-01  9:01 ` [PATCH] ARM: mmu: fix cache flushing when replacing a section with a PTE Lucas Stach
  -- strict thread matches above, loose matches on Subject: below --
2018-07-25 15:01 Lucas Stach

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox