mail archive of the barebox mailing list
 help / color / mirror / Atom feed
* [PATCH 1/5] kvx: Implement setjmp/longjmp/initjmp
@ 2021-03-01 15:58 Jules Maselbas
  2021-03-01 15:58 ` [PATCH 2/5] kvx: Implement dcache invalidation primitive Jules Maselbas
                   ` (3 more replies)
  0 siblings, 4 replies; 14+ messages in thread
From: Jules Maselbas @ 2021-03-01 15:58 UTC (permalink / raw)
  To: barebox; +Cc: Yann Sionneau, Jules Maselbas, Ahmad Fatoum

Signed-off-by: Jules Maselbas <jmaselbas@kalray.eu>
---
 arch/kvx/Kconfig              |  1 +
 arch/kvx/include/asm/setjmp.h | 15 +++++++
 arch/kvx/lib/Makefile         |  2 +-
 arch/kvx/lib/setjmp.S         | 85 +++++++++++++++++++++++++++++++++++
 4 files changed, 102 insertions(+), 1 deletion(-)
 create mode 100644 arch/kvx/include/asm/setjmp.h
 create mode 100644 arch/kvx/lib/setjmp.S

diff --git a/arch/kvx/Kconfig b/arch/kvx/Kconfig
index 3327021e1..8483ae6e6 100644
--- a/arch/kvx/Kconfig
+++ b/arch/kvx/Kconfig
@@ -11,6 +11,7 @@ config KVX
 	select ELF
 	select FLEXIBLE_BOOTARGS
 	select GENERIC_FIND_NEXT_BIT
+	select HAS_ARCH_SJLJ
 	select LIBFDT
 	select MFD_SYSCON
 	select OF_BAREBOX_DRIVERS
diff --git a/arch/kvx/include/asm/setjmp.h b/arch/kvx/include/asm/setjmp.h
new file mode 100644
index 000000000..3cfc698d9
--- /dev/null
+++ b/arch/kvx/include/asm/setjmp.h
@@ -0,0 +1,15 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-FileCopyrightText: 2021 Jules Maselbas <jmaselbas@kalray.eu>, Kalray Inc. */
+
+#ifndef _ASM_KVX_SETJMP_H_
+#define _ASM_KVX_SETJMP_H_
+
+#include <linux/types.h>
+
+typedef u64 jmp_buf[22];
+
+int initjmp(jmp_buf jmp, void __noreturn (*func)(void), void *stack_top);
+int setjmp(jmp_buf jmp) __attribute__((returns_twice));
+void longjmp(jmp_buf jmp, int ret) __attribute__((noreturn));
+
+#endif /* _ASM_KVX_SETJMP_H_ */
diff --git a/arch/kvx/lib/Makefile b/arch/kvx/lib/Makefile
index 6e56462da..cee08b0fa 100644
--- a/arch/kvx/lib/Makefile
+++ b/arch/kvx/lib/Makefile
@@ -3,4 +3,4 @@
 # Copyright (C) 2019 Kalray Inc.
 #
 
-obj-y	+= cpuinfo.o board.o dtb.o poweroff.o bootm.o
+obj-y	+= cpuinfo.o board.o dtb.o poweroff.o bootm.o setjmp.o
diff --git a/arch/kvx/lib/setjmp.S b/arch/kvx/lib/setjmp.S
new file mode 100644
index 000000000..829299711
--- /dev/null
+++ b/arch/kvx/lib/setjmp.S
@@ -0,0 +1,85 @@
+/* SPDX-License-Identifier: LGPL-2.1 */
+/* SPDX-FileCopyrightText: 2021 Jules Maselbas <jmaselbas@kalray.eu>, Kalray Inc. */
+
+#define REG_SIZE 8
+
+#include <linux/linkage.h>
+
+/* jmp_buf layout:
+ * [0]  = $ra,  $sp,  $cs,  $r14,
+ * [4]  = $r20, $r21, $r22, $r23,
+ * [8]  = $r24, $r25, $r26, $r27,
+ * [12] = $r28, $r29, $r30, $r31,
+ * [16] = $r18, $r19,
+ * [18] = $lc,  $le,  $ls,  xxxx
+ */
+
+/**
+ * int initjmp(jmp_buf jmp, void __noreturn (*func)(void), void *stack_top);
+ */
+ENTRY(initjmp)
+	/* store $ra */
+	sd (0 * REG_SIZE)[$r0] = $r1
+	;;
+	/* store $sp */
+	sd (1 * REG_SIZE)[$r0] = $r2
+	make $r0 = 0
+	ret
+	;;
+ENDPROC(initjmp)
+
+/**
+ * int setjmp(jmp_buf jmp);
+ */
+ENTRY(setjmp)
+	sq (16 * REG_SIZE)[$r0] = $r18r19
+	get $r40 = $ra
+	copyd $r41 = $sp
+	;;
+	so (4 * REG_SIZE)[$r0] = $r20r21r22r23
+	get $r42 = $cs
+	copyd $r43 = $r14
+	;;
+	so (0 * REG_SIZE)[$r0] = $r40r41r42r43
+	get $r40 = $lc
+	;;
+	so (8 * REG_SIZE)[$r0] = $r24r25r26r27
+	get $r41 = $le
+	;;
+	so (12 * REG_SIZE)[$r0] = $r28r29r30r31
+	get $r42 = $ls
+	;;
+	so (18 * REG_SIZE)[$r0] = $r40r41r42r43
+	make $r0 = 0
+	ret
+	;;
+ENDPROC(setjmp)
+
+/**
+ * void longjmp(jmp_buf jmp, int ret);
+ */
+ENTRY(longjmp)
+	lo $r40r41r42r43 = (0 * REG_SIZE)[$r0]
+	;;
+	lo $r44r45r46r47 = (18 * REG_SIZE)[$r0]
+	set $ra = $r40
+	copyd $sp = $r41
+	;;
+	lo $r20r21r22r23 = (4 * REG_SIZE)[$r0]
+	set $cs = $r42
+	copyd $r14 = $r43
+	;;
+	lo $r24r25r26r27 = (8 * REG_SIZE)[$r0]
+	set $lc = $r44
+	;;
+	lo $r28r29r30r31 = (12 * REG_SIZE)[$r0]
+	set $le = $r45
+	;;
+	lq $r18r19 = (16 * REG_SIZE)[$r0]
+	set $ls = $r46
+	;;
+	/* According to man, if retval is equal to 0, then we should return 1 */
+	maxud $r0 = $r1, 1
+	ret
+	;;
+ENDPROC(longjmp)
-- 
2.17.1



_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 2/5] kvx: Implement dcache invalidation primitive
  2021-03-01 15:58 [PATCH 1/5] kvx: Implement setjmp/longjmp/initjmp Jules Maselbas
@ 2021-03-01 15:58 ` Jules Maselbas
  2021-03-02  8:40   ` Ahmad Fatoum
  2021-03-01 15:58 ` [PATCH 3/5] kvx: Implement dma handling primitives Jules Maselbas
                   ` (2 subsequent siblings)
  3 siblings, 1 reply; 14+ messages in thread
From: Jules Maselbas @ 2021-03-01 15:58 UTC (permalink / raw)
  To: barebox; +Cc: Yann Sionneau, Jules Maselbas, Ahmad Fatoum

From: Yann Sionneau <ysionneau@kalray.eu>

Signed-off-by: Yann Sionneau <ysionneau@kalray.eu>
Signed-off-by: Jules Maselbas <jmaselbas@kalray.eu>
---
 arch/kvx/Kconfig             |  1 +
 arch/kvx/include/asm/cache.h | 13 ++++++++++
 arch/kvx/lib/Makefile        |  2 +-
 arch/kvx/lib/cache.c         | 50 ++++++++++++++++++++++++++++++++++++
 4 files changed, 65 insertions(+), 1 deletion(-)
 create mode 100644 arch/kvx/lib/cache.c

diff --git a/arch/kvx/Kconfig b/arch/kvx/Kconfig
index 8483ae6e6..4e02613ec 100644
--- a/arch/kvx/Kconfig
+++ b/arch/kvx/Kconfig
@@ -12,6 +12,7 @@ config KVX
 	select FLEXIBLE_BOOTARGS
 	select GENERIC_FIND_NEXT_BIT
 	select HAS_ARCH_SJLJ
+	select HAS_CACHE
 	select LIBFDT
 	select MFD_SYSCON
 	select OF_BAREBOX_DRIVERS
diff --git a/arch/kvx/include/asm/cache.h b/arch/kvx/include/asm/cache.h
index 3be176725..efdbcc630 100644
--- a/arch/kvx/include/asm/cache.h
+++ b/arch/kvx/include/asm/cache.h
@@ -15,4 +15,17 @@ static inline void sync_caches_for_execution(void)
 	__builtin_kvx_barrier();
 }
 
+void kvx_dcache_invalidate_mem_area(uint64_t addr, int size);
+
+static inline void sync_dcache_icache(void)
+{
+	sync_caches_for_execution();
+}
+
+static inline void dcache_inval(void)
+{
+	__builtin_kvx_fence();
+	__builtin_kvx_dinval();
+}
+
 #endif /* __KVX_CACHE_H */
diff --git a/arch/kvx/lib/Makefile b/arch/kvx/lib/Makefile
index cee08b0fa..d271ebccf 100644
--- a/arch/kvx/lib/Makefile
+++ b/arch/kvx/lib/Makefile
@@ -3,4 +3,4 @@
 # Copyright (C) 2019 Kalray Inc.
 #
 
-obj-y	+= cpuinfo.o board.o dtb.o poweroff.o bootm.o setjmp.o
+obj-y	+= cpuinfo.o board.o dtb.o poweroff.o bootm.o setjmp.o cache.o
diff --git a/arch/kvx/lib/cache.c b/arch/kvx/lib/cache.c
new file mode 100644
index 000000000..4e128891a
--- /dev/null
+++ b/arch/kvx/lib/cache.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: 2021 Yann Sionneau <ysionneau@kalray.eu>, Kalray Inc.
+
+#include <asm/cache.h>
+
+#define K1_DCACHE_REFILL         (12)
+#define K1_DCACHE_HWLOOP         (1)
+#define K1_DCACHE_REFILL_PERCENT (80)
+#define K1_DCACHE_LINE_SIZE    (64)
+#define K1_DCACHE_SIZE (128*1024)
+
+void kvx_dcache_invalidate_mem_area(uint64_t addr, int size)
+{
+	/* if hwloop iterations cost < _K1_DCACHE_REFILL_PERCENT cache refill,
+	 * use hwloop, otherwise invalid the whole cache
+	 */
+	if (size <
+	(K1_DCACHE_REFILL_PERCENT * (K1_DCACHE_REFILL * K1_DCACHE_SIZE))
+			/ (100 * (K1_DCACHE_REFILL + K1_DCACHE_HWLOOP))) {
+		/* number of lines that must be invalidated */
+		int invalid_lines = ((addr + size) -
+					(addr & (~(K1_DCACHE_LINE_SIZE - 1))));
+
+		invalid_lines = invalid_lines / K1_DCACHE_LINE_SIZE
+				+ (0 != (invalid_lines % K1_DCACHE_LINE_SIZE));
+		if (__builtin_constant_p(invalid_lines) && invalid_lines <= 2) {
+			/* when inlining (and doing constant folding),
+			 *  gcc is able to unroll small loops
+			 */
+			int i;
+
+			for (i = 0; i < invalid_lines; i++) {
+				__builtin_kvx_dinvall((void *)(addr
+						+ i * K1_DCACHE_LINE_SIZE));
+			}
+		} else if (invalid_lines > 0) {
+			__asm__ __volatile__ (
+				"loopdo %1, 0f\n;;\n"
+				"dinvall 0[%0]\n"
+				"addd %0 = %0, %2\n;;\n"
+				"0:\n"
+				: "+r"(addr)
+				: "r" (invalid_lines),
+				"i" (K1_DCACHE_LINE_SIZE)
+				: "ls", "le", "lc", "memory");
+		}
+	} else {
+		__builtin_kvx_dinval();
+	}
+}
-- 
2.17.1



_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 3/5] kvx: Implement dma handling primitives
  2021-03-01 15:58 [PATCH 1/5] kvx: Implement setjmp/longjmp/initjmp Jules Maselbas
  2021-03-01 15:58 ` [PATCH 2/5] kvx: Implement dcache invalidation primitive Jules Maselbas
@ 2021-03-01 15:58 ` Jules Maselbas
  2021-03-02  8:37   ` Ahmad Fatoum
  2021-03-01 15:58 ` [PATCH 4/5] kvx: Request enough privilege to boot Linux Jules Maselbas
  2021-03-01 15:58 ` [PATCH 5/5] kvx: lib: dtb: Remove unused variable Jules Maselbas
  3 siblings, 1 reply; 14+ messages in thread
From: Jules Maselbas @ 2021-03-01 15:58 UTC (permalink / raw)
  To: barebox; +Cc: Yann Sionneau, Jules Maselbas, Ahmad Fatoum

From: Yann Sionneau <ysionneau@kalray.eu>

Signed-off-by: Yann Sionneau <ysionneau@kalray.eu>
Signed-off-by: Jules Maselbas <jmaselbas@kalray.eu>
---
 arch/kvx/Kconfig                |  1 +
 arch/kvx/include/asm/dma.h      | 35 +++++++++++++
 arch/kvx/include/asm/sys_arch.h |  3 ++
 arch/kvx/lib/Makefile           |  2 +-
 arch/kvx/lib/dma-default.c      | 91 +++++++++++++++++++++++++++++++++
 5 files changed, 131 insertions(+), 1 deletion(-)
 create mode 100644 arch/kvx/include/asm/dma.h
 create mode 100644 arch/kvx/lib/dma-default.c

diff --git a/arch/kvx/Kconfig b/arch/kvx/Kconfig
index 4e02613ec..093444088 100644
--- a/arch/kvx/Kconfig
+++ b/arch/kvx/Kconfig
@@ -13,6 +13,7 @@ config KVX
 	select GENERIC_FIND_NEXT_BIT
 	select HAS_ARCH_SJLJ
 	select HAS_CACHE
+	select HAS_DMA
 	select LIBFDT
 	select MFD_SYSCON
 	select OF_BAREBOX_DRIVERS
diff --git a/arch/kvx/include/asm/dma.h b/arch/kvx/include/asm/dma.h
new file mode 100644
index 000000000..79eed77af
--- /dev/null
+++ b/arch/kvx/include/asm/dma.h
@@ -0,0 +1,35 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/* SPDX-FileCopyrightText: 2021 Yann Sionneau <ysionneau@kalray.eu>, Kalray Inc. */
+
+#ifndef __ASM_DMA_H
+#define __ASM_DMA_H
+
+#include <common.h>
+
+#define KVX_DDR_32BIT_RAM_WINDOW_BA	(0x80000000ULL)
+#define KVX_DDR_64BIT_RAM_WINDOW_BA	(0x100000000ULL)
+#define MAX_32BIT_ADDR			(0xffffffffULL)
+
+#define dma_alloc dma_alloc
+static inline void *dma_alloc(size_t size)
+{
+	return xmemalign(64, ALIGN(size, 64));
+}
+
+static inline void *dma_alloc_coherent(size_t size, dma_addr_t *dma_handle)
+{
+	void *ret = xmemalign(PAGE_SIZE, size);
+
+	if (dma_handle)
+		*dma_handle = (dma_addr_t)(uintptr_t)ret;
+
+	return ret;
+}
+
+static inline void dma_free_coherent(void *mem, dma_addr_t dma_handle,
+				     size_t size)
+{
+	free(mem);
+}
+
+#endif /* __ASM_DMA_H */
diff --git a/arch/kvx/include/asm/sys_arch.h b/arch/kvx/include/asm/sys_arch.h
index 9df32c4e7..ce07a5598 100644
--- a/arch/kvx/include/asm/sys_arch.h
+++ b/arch/kvx/include/asm/sys_arch.h
@@ -11,6 +11,9 @@
 #define EXCEPTION_STRIDE	0x40
 #define EXCEPTION_ALIGNMENT	0x100
 
+#define kvx_cluster_id() ((int) \
+	((kvx_sfr_get(PCR) & KVX_SFR_PCR_CID_MASK) \
+					>> KVX_SFR_PCR_CID_SHIFT))
 #define KVX_SFR_START(__sfr_reg) \
 	(KVX_SFR_## __sfr_reg ## _SHIFT)
 
diff --git a/arch/kvx/lib/Makefile b/arch/kvx/lib/Makefile
index d271ebccf..c730e1c23 100644
--- a/arch/kvx/lib/Makefile
+++ b/arch/kvx/lib/Makefile
@@ -3,4 +3,4 @@
 # Copyright (C) 2019 Kalray Inc.
 #
 
-obj-y	+= cpuinfo.o board.o dtb.o poweroff.o bootm.o setjmp.o cache.o
+obj-y	+= cpuinfo.o board.o dtb.o poweroff.o bootm.o setjmp.o cache.o dma-default.o
diff --git a/arch/kvx/lib/dma-default.c b/arch/kvx/lib/dma-default.c
new file mode 100644
index 000000000..755a8c66f
--- /dev/null
+++ b/arch/kvx/lib/dma-default.c
@@ -0,0 +1,91 @@
+// SPDX-License-Identifier: GPL-2.0-only
+// SPDX-FileCopyrightText: 2021 Yann Sionneau <ysionneau@kalray.eu>, Kalray Inc.
+
+#include <dma.h>
+#include <asm/barrier.h>
+#include <asm/io.h>
+#include <asm/cache.h>
+#include <asm/sfr.h>
+#include <asm/sys_arch.h>
+
+/*
+ * The implementation of arch should follow the following rules:
+ *		map		for_cpu		for_device	unmap
+ * TO_DEV	writeback	none		writeback	none
+ * FROM_DEV	invalidate	invalidate(*)	invalidate	invalidate(*)
+ * BIDIR	writeback	invalidate	writeback	invalidate
+ *
+ * (*) - only necessary if the CPU speculatively prefetches.
+ *
+ * (see https://lkml.org/lkml/2018/5/18/979)
+ */
+
+void dma_sync_single_for_device(dma_addr_t addr, size_t size,
+				enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_FROM_DEVICE:
+		kvx_dcache_invalidate_mem_area(addr, size);
+		break;
+	case DMA_TO_DEVICE:
+	case DMA_BIDIRECTIONAL:
+		/* allow device to read buffer written by CPU */
+		wmb();
+		break;
+	default:
+		BUG();
+	}
+}
+
+void dma_sync_single_for_cpu(dma_addr_t addr, size_t size,
+				enum dma_data_direction dir)
+{
+	switch (dir) {
+	case DMA_FROM_DEVICE:
+	case DMA_TO_DEVICE:
+		break;
+	case DMA_BIDIRECTIONAL:
+		kvx_dcache_invalidate_mem_area(addr, size);
+		break;
+	default:
+		BUG();
+	}
+}
+
+#define KVX_DDR_ALIAS_OFFSET \
+	(KVX_DDR_64BIT_RAM_WINDOW_BA - KVX_DDR_32BIT_RAM_WINDOW_BA)
+#define KVX_DDR_ALIAS_WINDOW \
+	(KVX_DDR_64BIT_RAM_WINDOW_BA + KVX_DDR_ALIAS_OFFSET)
+
+/* Local smem is aliased between 0 and 16MB */
+#define KVX_SMEM_LOCAL_ALIAS 0x1000000ULL
+
+dma_addr_t dma_map_single(struct device_d *dev, void *ptr, size_t size,
+			  enum dma_data_direction dir)
+{
+	uintptr_t addr = (uintptr_t) ptr;
+
+	dma_sync_single_for_device(addr, size, dir);
+
+	/* Local smem alias should never be used for dma */
+	if (addr < KVX_SMEM_LOCAL_ALIAS)
+		return addr + (1 + kvx_cluster_id()) * KVX_SMEM_LOCAL_ALIAS;
+
+	if (dev->dma_mask && addr <= dev->dma_mask)
+		return addr;
+
+	if (addr >= KVX_DDR_ALIAS_WINDOW)
+		return DMA_ERROR_CODE;
+
+	addr -= KVX_DDR_ALIAS_OFFSET;
+	if (dev->dma_mask && addr > dev->dma_mask)
+		return DMA_ERROR_CODE;
+
+	return addr;
+}
+
+void dma_unmap_single(struct device_d *dev, dma_addr_t addr, size_t size,
+		      enum dma_data_direction dir)
+{
+	dma_sync_single_for_cpu(addr, size, dir);
+}
-- 
2.17.1



_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 4/5] kvx: Request enough privilege to boot Linux
  2021-03-01 15:58 [PATCH 1/5] kvx: Implement setjmp/longjmp/initjmp Jules Maselbas
  2021-03-01 15:58 ` [PATCH 2/5] kvx: Implement dcache invalidation primitive Jules Maselbas
  2021-03-01 15:58 ` [PATCH 3/5] kvx: Implement dma handling primitives Jules Maselbas
@ 2021-03-01 15:58 ` Jules Maselbas
  2021-03-01 15:58 ` [PATCH 5/5] kvx: lib: dtb: Remove unused variable Jules Maselbas
  3 siblings, 0 replies; 14+ messages in thread
From: Jules Maselbas @ 2021-03-01 15:58 UTC (permalink / raw)
  To: barebox; +Cc: Yann Sionneau, Jules Maselbas, Ahmad Fatoum

At startup barebox must take all the privileges that will be
requested by Linux.

Signed-off-by: Jules Maselbas <jmaselbas@kalray.eu>
---
 arch/kvx/cpu/start.S             |  4 ++++
 arch/kvx/include/asm/privilege.h | 16 ++++++++++++++++
 2 files changed, 20 insertions(+)

diff --git a/arch/kvx/cpu/start.S b/arch/kvx/cpu/start.S
index d90272c71..a647e8a51 100644
--- a/arch/kvx/cpu/start.S
+++ b/arch/kvx/cpu/start.S
@@ -115,6 +115,10 @@ ENDPROC(kvx_start)
 	wfxl $psow = $r21 ;\
 	;; ;\
 	wfxm $psow = $r22 ;\
+	;; ;\
+	make $r21 = DO_WFXL_VALUE_##__pl ;\
+	;; ;\
+	wfxl $dow = $r21 ;\
 	;;
 
 /**
diff --git a/arch/kvx/include/asm/privilege.h b/arch/kvx/include/asm/privilege.h
index 36b9ade49..f183b24d4 100644
--- a/arch/kvx/include/asm/privilege.h
+++ b/arch/kvx/include/asm/privilege.h
@@ -113,6 +113,21 @@
 #define ITO_WFXM_VALUE_PL_CUR_PLUS_1	ITO_WFXM_VALUE(PL_CUR_PLUS_1)
 #define ITO_WFXM_VALUE_PL_CUR		ITO_WFXM_VALUE(PL_CUR)
 
+/**
+ * Debug Owner configuration
+ */
+
+#define DO_WFXL_OWN(__field, __pl) \
+	SFR_SET_VAL_WFXL(DO, __field, __pl)
+
+#define DO_WFXL_VALUE(__pl) (DO_WFXL_OWN(B0, __pl) | \
+			     DO_WFXL_OWN(B1, __pl) | \
+			     DO_WFXL_OWN(W0, __pl) | \
+			     DO_WFXL_OWN(W1, __pl))
+
+#define DO_WFXL_VALUE_PL_CUR_PLUS_1     DO_WFXL_VALUE(PL_CUR_PLUS_1)
+#define DO_WFXL_VALUE_PL_CUR            DO_WFXL_VALUE(PL_CUR)
+
 /**
  * Misc owner configuration
  */
@@ -160,6 +175,7 @@
 					 PSO_WFXL_OWN(IE, __pl) | \
 					 PSO_WFXL_OWN(HLE, __pl) | \
 					 PSO_WFXL_OWN(SRE, __pl) | \
+					 PSO_WFXL_OWN(DAUS, __pl) | \
 					 PSO_WFXL_OWN(ICE, __pl) | \
 					 PSO_WFXL_OWN(USE, __pl) | \
 					 PSO_WFXL_OWN(DCE, __pl) | \
-- 
2.17.1



_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


^ permalink raw reply	[flat|nested] 14+ messages in thread

* [PATCH 5/5] kvx: lib: dtb: Remove unused variable
  2021-03-01 15:58 [PATCH 1/5] kvx: Implement setjmp/longjmp/initjmp Jules Maselbas
                   ` (2 preceding siblings ...)
  2021-03-01 15:58 ` [PATCH 4/5] kvx: Request enough privilege to boot Linux Jules Maselbas
@ 2021-03-01 15:58 ` Jules Maselbas
  3 siblings, 0 replies; 14+ messages in thread
From: Jules Maselbas @ 2021-03-01 15:58 UTC (permalink / raw)
  To: barebox; +Cc: Yann Sionneau, Jules Maselbas, Ahmad Fatoum

Local variables `root` and `ret` are not used anymore, remove them.

Signed-off-by: Jules Maselbas <jmaselbas@kalray.eu>
---
 arch/kvx/lib/dtb.c | 3 ---
 1 file changed, 3 deletions(-)

diff --git a/arch/kvx/lib/dtb.c b/arch/kvx/lib/dtb.c
index 54ffddaf0..09898017c 100644
--- a/arch/kvx/lib/dtb.c
+++ b/arch/kvx/lib/dtb.c
@@ -9,9 +9,6 @@
 
 static int of_kvx_init(void)
 {
-	int ret;
-	struct device_node *root;
-
 	barebox_register_fdt(boot_dtb);
 
 	return 0;
-- 
2.17.1



_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 3/5] kvx: Implement dma handling primitives
  2021-03-01 15:58 ` [PATCH 3/5] kvx: Implement dma handling primitives Jules Maselbas
@ 2021-03-02  8:37   ` Ahmad Fatoum
  2021-03-02  8:44     ` Ahmad Fatoum
  2021-03-02 10:14     ` Lucas Stach
  0 siblings, 2 replies; 14+ messages in thread
From: Ahmad Fatoum @ 2021-03-02  8:37 UTC (permalink / raw)
  To: Jules Maselbas, barebox; +Cc: Yann Sionneau

Hello Jules, Yann,

On 01.03.21 16:58, Jules Maselbas wrote:
> From: Yann Sionneau <ysionneau@kalray.eu>

Some comments inline. I am not a cache cohereny expert, so take
it with a grain of salt.

> 
> Signed-off-by: Yann Sionneau <ysionneau@kalray.eu>
> Signed-off-by: Jules Maselbas <jmaselbas@kalray.eu>
> ---

> --- /dev/null
> +++ b/arch/kvx/include/asm/dma.h
> @@ -0,0 +1,35 @@
> +/* SPDX-License-Identifier: GPL-2.0-only */
> +/* SPDX-FileCopyrightText: 2021 Yann Sionneau <ysionneau@kalray.eu>, Kalray Inc. */
> +
> +#ifndef __ASM_DMA_H
> +#define __ASM_DMA_H
> +
> +#include <common.h>
> +
> +#define KVX_DDR_32BIT_RAM_WINDOW_BA	(0x80000000ULL)
> +#define KVX_DDR_64BIT_RAM_WINDOW_BA	(0x100000000ULL)
> +#define MAX_32BIT_ADDR			(0xffffffffULL)
> +
> +#define dma_alloc dma_alloc
> +static inline void *dma_alloc(size_t size)
> +{
> +	return xmemalign(64, ALIGN(size, 64));
> +}
> +
> +static inline void *dma_alloc_coherent(size_t size, dma_addr_t *dma_handle)
> +{
> +	void *ret = xmemalign(PAGE_SIZE, size);
> +
> +	if (dma_handle)
> +		*dma_handle = (dma_addr_t)(uintptr_t)ret;
> +
> +	return ret;
> +}

This would imply that the CPU barebox is booting is coherent with all

devices that barebox needs to access. Is that the case?

(See below)

> +
> +static inline void dma_free_coherent(void *mem, dma_addr_t dma_handle,
> +				     size_t size)
> +{
> +	free(mem);
> +}
> +
> +#endif /* __ASM_DMA_H */
> diff --git a/arch/kvx/include/asm/sys_arch.h b/arch/kvx/include/asm/sys_arch.h
> index 9df32c4e7..ce07a5598 100644
> --- a/arch/kvx/include/asm/sys_arch.h
> +++ b/arch/kvx/include/asm/sys_arch.h
> @@ -11,6 +11,9 @@
>  #define EXCEPTION_STRIDE	0x40
>  #define EXCEPTION_ALIGNMENT	0x100
>  
> +#define kvx_cluster_id() ((int) \
> +	((kvx_sfr_get(PCR) & KVX_SFR_PCR_CID_MASK) \
> +					>> KVX_SFR_PCR_CID_SHIFT))
>  #define KVX_SFR_START(__sfr_reg) \
>  	(KVX_SFR_## __sfr_reg ## _SHIFT)
>  
> diff --git a/arch/kvx/lib/Makefile b/arch/kvx/lib/Makefile
> index d271ebccf..c730e1c23 100644
> --- a/arch/kvx/lib/Makefile
> +++ b/arch/kvx/lib/Makefile
> @@ -3,4 +3,4 @@
>  # Copyright (C) 2019 Kalray Inc.
>  #
>  
> -obj-y	+= cpuinfo.o board.o dtb.o poweroff.o bootm.o setjmp.o cache.o
> +obj-y	+= cpuinfo.o board.o dtb.o poweroff.o bootm.o setjmp.o cache.o dma-default.o
> diff --git a/arch/kvx/lib/dma-default.c b/arch/kvx/lib/dma-default.c
> new file mode 100644
> index 000000000..755a8c66f
> --- /dev/null
> +++ b/arch/kvx/lib/dma-default.c
> @@ -0,0 +1,91 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +// SPDX-FileCopyrightText: 2021 Yann Sionneau <ysionneau@kalray.eu>, Kalray Inc.
> +
> +#include <dma.h>
> +#include <asm/barrier.h>
> +#include <asm/io.h>
> +#include <asm/cache.h>
> +#include <asm/sfr.h>
> +#include <asm/sys_arch.h>
> +
> +/*
> + * The implementation of arch should follow the following rules:
> + *		map		for_cpu		for_device	unmap
> + * TO_DEV	writeback	none		writeback	none
> + * FROM_DEV	invalidate	invalidate(*)	invalidate	invalidate(*)
> + * BIDIR	writeback	invalidate	writeback	invalidate
> + *
> + * (*) - only necessary if the CPU speculatively prefetches.
> + *
> + * (see https://lkml.org/lkml/2018/5/18/979)
> + */
> +
> +void dma_sync_single_for_device(dma_addr_t addr, size_t size,
> +				enum dma_data_direction dir)
> +{
> +	switch (dir) {
> +	case DMA_FROM_DEVICE:
> +		kvx_dcache_invalidate_mem_area(addr, size);
> +		break;
> +	case DMA_TO_DEVICE:
> +	case DMA_BIDIRECTIONAL:
> +		/* allow device to read buffer written by CPU */
> +		wmb();

If the interconnect was indeed coherent, like dma_alloc_coherent
above hints, you wouldn't need any barriers here..?

> +		break;
> +	default:
> +		BUG();
> +	}
> +}
> +
> +void dma_sync_single_for_cpu(dma_addr_t addr, size_t size,
> +				enum dma_data_direction dir)
> +{
> +	switch (dir) {
> +	case DMA_FROM_DEVICE:
> +	case DMA_TO_DEVICE:
> +		break;
> +	case DMA_BIDIRECTIONAL:
> +		kvx_dcache_invalidate_mem_area(addr, size);
> +		break;
> +	default:
> +		BUG();
> +	}
> +}
> +
> +#define KVX_DDR_ALIAS_OFFSET \
> +	(KVX_DDR_64BIT_RAM_WINDOW_BA - KVX_DDR_32BIT_RAM_WINDOW_BA)
> +#define KVX_DDR_ALIAS_WINDOW \
> +	(KVX_DDR_64BIT_RAM_WINDOW_BA + KVX_DDR_ALIAS_OFFSET)
> +
> +/* Local smem is aliased between 0 and 16MB */
> +#define KVX_SMEM_LOCAL_ALIAS 0x1000000ULL
> +
> +dma_addr_t dma_map_single(struct device_d *dev, void *ptr, size_t size,
> +			  enum dma_data_direction dir)
> +{
> +	uintptr_t addr = (uintptr_t) ptr;
> +
> +	dma_sync_single_for_device(addr, size, dir);
> +
> +	/* Local smem alias should never be used for dma */
> +	if (addr < KVX_SMEM_LOCAL_ALIAS)
> +		return addr + (1 + kvx_cluster_id()) * KVX_SMEM_LOCAL_ALIAS;
> +
> +	if (dev->dma_mask && addr <= dev->dma_mask)
> +		return addr;
> +
> +	if (addr >= KVX_DDR_ALIAS_WINDOW)
> +		return DMA_ERROR_CODE;
> +
> +	addr -= KVX_DDR_ALIAS_OFFSET;
> +	if (dev->dma_mask && addr > dev->dma_mask)
> +		return DMA_ERROR_CODE;
> +
> +	return addr;
> +}
> +
> +void dma_unmap_single(struct device_d *dev, dma_addr_t addr, size_t size,
> +		      enum dma_data_direction dir)
> +{
> +	dma_sync_single_for_cpu(addr, size, dir);
> +}
> 

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 2/5] kvx: Implement dcache invalidation primitive
  2021-03-01 15:58 ` [PATCH 2/5] kvx: Implement dcache invalidation primitive Jules Maselbas
@ 2021-03-02  8:40   ` Ahmad Fatoum
  2021-03-02 11:44     ` Jules Maselbas
  0 siblings, 1 reply; 14+ messages in thread
From: Ahmad Fatoum @ 2021-03-02  8:40 UTC (permalink / raw)
  To: Jules Maselbas, barebox; +Cc: Yann Sionneau

Hello,

On 01.03.21 16:58, Jules Maselbas wrote:
> From: Yann Sionneau <ysionneau@kalray.eu>
> 
> Signed-off-by: Yann Sionneau <ysionneau@kalray.eu>
> Signed-off-by: Jules Maselbas <jmaselbas@kalray.eu>
> ---
>  arch/kvx/Kconfig             |  1 +
>  arch/kvx/include/asm/cache.h | 13 ++++++++++
>  arch/kvx/lib/Makefile        |  2 +-
>  arch/kvx/lib/cache.c         | 50 ++++++++++++++++++++++++++++++++++++
>  4 files changed, 65 insertions(+), 1 deletion(-)
>  create mode 100644 arch/kvx/lib/cache.c
> 
> diff --git a/arch/kvx/Kconfig b/arch/kvx/Kconfig
> index 8483ae6e6..4e02613ec 100644
> --- a/arch/kvx/Kconfig
> +++ b/arch/kvx/Kconfig
> @@ -12,6 +12,7 @@ config KVX
>  	select FLEXIBLE_BOOTARGS
>  	select GENERIC_FIND_NEXT_BIT
>  	select HAS_ARCH_SJLJ
> +	select HAS_CACHE
>  	select LIBFDT
>  	select MFD_SYSCON
>  	select OF_BAREBOX_DRIVERS
> diff --git a/arch/kvx/include/asm/cache.h b/arch/kvx/include/asm/cache.h
> index 3be176725..efdbcc630 100644
> --- a/arch/kvx/include/asm/cache.h
> +++ b/arch/kvx/include/asm/cache.h
> @@ -15,4 +15,17 @@ static inline void sync_caches_for_execution(void)
>  	__builtin_kvx_barrier();
>  }
>  
> +void kvx_dcache_invalidate_mem_area(uint64_t addr, int size);
> +
> +static inline void sync_dcache_icache(void)
> +{
> +	sync_caches_for_execution();
> +}
> +
> +static inline void dcache_inval(void)
> +{
> +	__builtin_kvx_fence();
> +	__builtin_kvx_dinval();
> +}
> +
>  #endif /* __KVX_CACHE_H */
> diff --git a/arch/kvx/lib/Makefile b/arch/kvx/lib/Makefile
> index cee08b0fa..d271ebccf 100644
> --- a/arch/kvx/lib/Makefile
> +++ b/arch/kvx/lib/Makefile
> @@ -3,4 +3,4 @@
>  # Copyright (C) 2019 Kalray Inc.
>  #
>  
> -obj-y	+= cpuinfo.o board.o dtb.o poweroff.o bootm.o setjmp.o
> +obj-y	+= cpuinfo.o board.o dtb.o poweroff.o bootm.o setjmp.o cache.o
> diff --git a/arch/kvx/lib/cache.c b/arch/kvx/lib/cache.c
> new file mode 100644
> index 000000000..4e128891a
> --- /dev/null
> +++ b/arch/kvx/lib/cache.c
> @@ -0,0 +1,50 @@
> +// SPDX-License-Identifier: GPL-2.0-only
> +// SPDX-FileCopyrightText: 2021 Yann Sionneau <ysionneau@kalray.eu>, Kalray Inc.
> +
> +#include <asm/cache.h>
> +
> +#define K1_DCACHE_REFILL         (12)
> +#define K1_DCACHE_HWLOOP         (1)
> +#define K1_DCACHE_REFILL_PERCENT (80)
> +#define K1_DCACHE_LINE_SIZE    (64)
> +#define K1_DCACHE_SIZE (128*1024)
> +
> +void kvx_dcache_invalidate_mem_area(uint64_t addr, int size)
> +{
> +	/* if hwloop iterations cost < _K1_DCACHE_REFILL_PERCENT cache refill,
> +	 * use hwloop, otherwise invalid the whole cache
> +	 */
> +	if (size <
> +	(K1_DCACHE_REFILL_PERCENT * (K1_DCACHE_REFILL * K1_DCACHE_SIZE))
> +			/ (100 * (K1_DCACHE_REFILL + K1_DCACHE_HWLOOP))) {
> +		/* number of lines that must be invalidated */
> +		int invalid_lines = ((addr + size) -
> +					(addr & (~(K1_DCACHE_LINE_SIZE - 1))));
> +
> +		invalid_lines = invalid_lines / K1_DCACHE_LINE_SIZE
> +				+ (0 != (invalid_lines % K1_DCACHE_LINE_SIZE));
> +		if (__builtin_constant_p(invalid_lines) && invalid_lines <= 2) {

Note that currently this will always be false, because of lack of link time
optimization. You could split this away the check into the header and leave the
juicy parts here if you want to have this optimization.

> +			/* when inlining (and doing constant folding),
> +			 *  gcc is able to unroll small loops
> +			 */
> +			int i;
> +
> +			for (i = 0; i < invalid_lines; i++) {
> +				__builtin_kvx_dinvall((void *)(addr
> +						+ i * K1_DCACHE_LINE_SIZE));
> +			}
> +		} else if (invalid_lines > 0) {
> +			__asm__ __volatile__ (
> +				"loopdo %1, 0f\n;;\n"
> +				"dinvall 0[%0]\n"
> +				"addd %0 = %0, %2\n;;\n"
> +				"0:\n"
> +				: "+r"(addr)
> +				: "r" (invalid_lines),
> +				"i" (K1_DCACHE_LINE_SIZE)
> +				: "ls", "le", "lc", "memory");
> +		}
> +	} else {
> +		__builtin_kvx_dinval();
> +	}
> +}
> 

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 3/5] kvx: Implement dma handling primitives
  2021-03-02  8:37   ` Ahmad Fatoum
@ 2021-03-02  8:44     ` Ahmad Fatoum
  2021-03-02 10:14     ` Lucas Stach
  1 sibling, 0 replies; 14+ messages in thread
From: Ahmad Fatoum @ 2021-03-02  8:44 UTC (permalink / raw)
  To: Jules Maselbas, barebox; +Cc: Yann Sionneau


On 02.03.21 09:37, Ahmad Fatoum wrote:
> Hello Jules, Yann,
> 
>> +void dma_sync_single_for_device(dma_addr_t addr, size_t size,
>> +				enum dma_data_direction dir)
>> +{
>> +	switch (dir) {
>> +	case DMA_FROM_DEVICE:
>> +		kvx_dcache_invalidate_mem_area(addr, size);
>> +		break;
>> +	case DMA_TO_DEVICE:
>> +	case DMA_BIDIRECTIONAL:
>> +		/* allow device to read buffer written by CPU */
>> +		wmb();
> 
> If the interconnect was indeed coherent, like dma_alloc_coherent
> above hints, you wouldn't need any barriers here..?

Wrong context. You wouldn't need the cache invalidation* above.

> 
>> +		break;
>> +	default:
>> +		BUG();
>> +	}
>> +}
>> +
>> +void dma_sync_single_for_cpu(dma_addr_t addr, size_t size,
>> +				enum dma_data_direction dir)
>> +{
>> +	switch (dir) {
>> +	case DMA_FROM_DEVICE:
>> +	case DMA_TO_DEVICE:
>> +		break;
>> +	case DMA_BIDIRECTIONAL:
>> +		kvx_dcache_invalidate_mem_area(addr, size);
>> +		break;
>> +	default:
>> +		BUG();
>> +	}
>> +}
>> +
>> +#define KVX_DDR_ALIAS_OFFSET \
>> +	(KVX_DDR_64BIT_RAM_WINDOW_BA - KVX_DDR_32BIT_RAM_WINDOW_BA)
>> +#define KVX_DDR_ALIAS_WINDOW \
>> +	(KVX_DDR_64BIT_RAM_WINDOW_BA + KVX_DDR_ALIAS_OFFSET)
>> +
>> +/* Local smem is aliased between 0 and 16MB */
>> +#define KVX_SMEM_LOCAL_ALIAS 0x1000000ULL
>> +
>> +dma_addr_t dma_map_single(struct device_d *dev, void *ptr, size_t size,
>> +			  enum dma_data_direction dir)
>> +{
>> +	uintptr_t addr = (uintptr_t) ptr;
>> +
>> +	dma_sync_single_for_device(addr, size, dir);
>> +
>> +	/* Local smem alias should never be used for dma */
>> +	if (addr < KVX_SMEM_LOCAL_ALIAS)
>> +		return addr + (1 + kvx_cluster_id()) * KVX_SMEM_LOCAL_ALIAS;
>> +
>> +	if (dev->dma_mask && addr <= dev->dma_mask)
>> +		return addr;
>> +
>> +	if (addr >= KVX_DDR_ALIAS_WINDOW)
>> +		return DMA_ERROR_CODE;
>> +
>> +	addr -= KVX_DDR_ALIAS_OFFSET;
>> +	if (dev->dma_mask && addr > dev->dma_mask)
>> +		return DMA_ERROR_CODE;
>> +
>> +	return addr;
>> +}
>> +
>> +void dma_unmap_single(struct device_d *dev, dma_addr_t addr, size_t size,
>> +		      enum dma_data_direction dir)
>> +{
>> +	dma_sync_single_for_cpu(addr, size, dir);
>> +}
>>
> 

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 3/5] kvx: Implement dma handling primitives
  2021-03-02  8:37   ` Ahmad Fatoum
  2021-03-02  8:44     ` Ahmad Fatoum
@ 2021-03-02 10:14     ` Lucas Stach
  2021-03-02 10:58       ` Jules Maselbas
  1 sibling, 1 reply; 14+ messages in thread
From: Lucas Stach @ 2021-03-02 10:14 UTC (permalink / raw)
  To: Ahmad Fatoum, Jules Maselbas, barebox; +Cc: Yann Sionneau

Am Dienstag, dem 02.03.2021 um 09:37 +0100 schrieb Ahmad Fatoum:
> Hello Jules, Yann,
> 
> On 01.03.21 16:58, Jules Maselbas wrote:
> > From: Yann Sionneau <ysionneau@kalray.eu>
> 
> Some comments inline. I am not a cache cohereny expert, so take
> it with a grain of salt.
> 
> > 
> > Signed-off-by: Yann Sionneau <ysionneau@kalray.eu>
> > Signed-off-by: Jules Maselbas <jmaselbas@kalray.eu>
> > ---
> 
> > --- /dev/null
> > +++ b/arch/kvx/include/asm/dma.h
> > @@ -0,0 +1,35 @@
> > +/* SPDX-License-Identifier: GPL-2.0-only */
> > +/* SPDX-FileCopyrightText: 2021 Yann Sionneau <ysionneau@kalray.eu>, Kalray Inc. */
> > +
> > +#ifndef __ASM_DMA_H
> > +#define __ASM_DMA_H
> > +
> > +#include <common.h>
> > +
> > +#define KVX_DDR_32BIT_RAM_WINDOW_BA	(0x80000000ULL)
> > +#define KVX_DDR_64BIT_RAM_WINDOW_BA	(0x100000000ULL)
> > +#define MAX_32BIT_ADDR			(0xffffffffULL)
> > +
> > +#define dma_alloc dma_alloc
> > +static inline void *dma_alloc(size_t size)
> > +{
> > +	return xmemalign(64, ALIGN(size, 64));
> > +}
> > +
> > +static inline void *dma_alloc_coherent(size_t size, dma_addr_t *dma_handle)
> > +{
> > +	void *ret = xmemalign(PAGE_SIZE, size);
> > +
> > +	if (dma_handle)
> > +		*dma_handle = (dma_addr_t)(uintptr_t)ret;
> > +
> > +	return ret;
> > +}
> 
> This would imply that the CPU barebox is booting is coherent with all
> 
> devices that barebox needs to access. Is that the case?
> 
> (See below)
> 
> > +
> > +static inline void dma_free_coherent(void *mem, dma_addr_t dma_handle,
> > +				     size_t size)
> > +{
> > +	free(mem);
> > +}
> > +
> > +#endif /* __ASM_DMA_H */
> > diff --git a/arch/kvx/include/asm/sys_arch.h b/arch/kvx/include/asm/sys_arch.h
> > index 9df32c4e7..ce07a5598 100644
> > --- a/arch/kvx/include/asm/sys_arch.h
> > +++ b/arch/kvx/include/asm/sys_arch.h
> > @@ -11,6 +11,9 @@
> >  #define EXCEPTION_STRIDE	0x40
> >  #define EXCEPTION_ALIGNMENT	0x100
> >  
> > 
> > 
> > 
> > 
> > 
> > 
> > 
> > +#define kvx_cluster_id() ((int) \
> > +	((kvx_sfr_get(PCR) & KVX_SFR_PCR_CID_MASK) \
> > +					>> KVX_SFR_PCR_CID_SHIFT))
> >  #define KVX_SFR_START(__sfr_reg) \
> >  	(KVX_SFR_## __sfr_reg ## _SHIFT)
> >  
> > 
> > 
> > 
> > 
> > 
> > 
> > 
> > diff --git a/arch/kvx/lib/Makefile b/arch/kvx/lib/Makefile
> > index d271ebccf..c730e1c23 100644
> > --- a/arch/kvx/lib/Makefile
> > +++ b/arch/kvx/lib/Makefile
> > @@ -3,4 +3,4 @@
> >  # Copyright (C) 2019 Kalray Inc.
> >  #
> >  
> > 
> > 
> > 
> > 
> > 
> > 
> > 
> > -obj-y	+= cpuinfo.o board.o dtb.o poweroff.o bootm.o setjmp.o cache.o
> > +obj-y	+= cpuinfo.o board.o dtb.o poweroff.o bootm.o setjmp.o cache.o dma-default.o
> > diff --git a/arch/kvx/lib/dma-default.c b/arch/kvx/lib/dma-default.c
> > new file mode 100644
> > index 000000000..755a8c66f
> > --- /dev/null
> > +++ b/arch/kvx/lib/dma-default.c
> > @@ -0,0 +1,91 @@
> > +// SPDX-License-Identifier: GPL-2.0-only
> > +// SPDX-FileCopyrightText: 2021 Yann Sionneau <ysionneau@kalray.eu>, Kalray Inc.
> > +
> > +#include <dma.h>
> > +#include <asm/barrier.h>
> > +#include <asm/io.h>
> > +#include <asm/cache.h>
> > +#include <asm/sfr.h>
> > +#include <asm/sys_arch.h>
> > +
> > +/*
> > + * The implementation of arch should follow the following rules:
> > + *		map		for_cpu		for_device	unmap
> > + * TO_DEV	writeback	none		writeback	none
> > + * FROM_DEV	invalidate	invalidate(*)	invalidate	invalidate(*)
> > + * BIDIR	writeback	invalidate	writeback	invalidate
> > + *
> > + * (*) - only necessary if the CPU speculatively prefetches.
> > + *
> > + * (see https://lkml.org/lkml/2018/5/18/979)
> > + */
> > +
> > +void dma_sync_single_for_device(dma_addr_t addr, size_t size,
> > +				enum dma_data_direction dir)
> > +{
> > +	switch (dir) {
> > +	case DMA_FROM_DEVICE:
> > +		kvx_dcache_invalidate_mem_area(addr, size);

Why do you need to explicitly invalidate, but not flush? Even if the
CPU speculatively prefetches, the coherency protocol should make sure
to invalidate the speculatively loaded lines, right?

> > +		break;
> > +	case DMA_TO_DEVICE:
> > +	case DMA_BIDIRECTIONAL:
> > +		/* allow device to read buffer written by CPU */
> > +		wmb();
> 
> If the interconnect was indeed coherent, like dma_alloc_coherent
> above hints, you wouldn't need any barriers here..?

Coherency does not imply strict ordering, so the barriers are in fact
correct, as the CPU write buffers and/or the interconnect can still
change the ordering of the writes as seen by a remote observer.

> > +		break;
> > +	default:
> > +		BUG();
> > +	}
> > +}
> > +
> > +void dma_sync_single_for_cpu(dma_addr_t addr, size_t size,
> > +				enum dma_data_direction dir)
> > +{
> > +	switch (dir) {
> > +	case DMA_FROM_DEVICE:
> > +	case DMA_TO_DEVICE:
> > +		break;
> > +	case DMA_BIDIRECTIONAL:
> > +		kvx_dcache_invalidate_mem_area(addr, size);
> > +		break;
> > +	default:
> > +		BUG();
> > +	}
> > +}
> > +
> > +#define KVX_DDR_ALIAS_OFFSET \
> > +	(KVX_DDR_64BIT_RAM_WINDOW_BA - KVX_DDR_32BIT_RAM_WINDOW_BA)
> > +#define KVX_DDR_ALIAS_WINDOW \
> > +	(KVX_DDR_64BIT_RAM_WINDOW_BA + KVX_DDR_ALIAS_OFFSET)
> > +
> > +/* Local smem is aliased between 0 and 16MB */
> > +#define KVX_SMEM_LOCAL_ALIAS 0x1000000ULL
> > +
> > +dma_addr_t dma_map_single(struct device_d *dev, void *ptr, size_t size,
> > +			  enum dma_data_direction dir)
> > +{
> > +	uintptr_t addr = (uintptr_t) ptr;
> > +
> > +	dma_sync_single_for_device(addr, size, dir);
> > +
> > +	/* Local smem alias should never be used for dma */
> > +	if (addr < KVX_SMEM_LOCAL_ALIAS)
> > +		return addr + (1 + kvx_cluster_id()) * KVX_SMEM_LOCAL_ALIAS;
> > +
> > +	if (dev->dma_mask && addr <= dev->dma_mask)
> > +		return addr;
> > +
> > +	if (addr >= KVX_DDR_ALIAS_WINDOW)
> > +		return DMA_ERROR_CODE;
> > +
> > +	addr -= KVX_DDR_ALIAS_OFFSET;
> > +	if (dev->dma_mask && addr > dev->dma_mask)
> > +		return DMA_ERROR_CODE;
> > +
> > +	return addr;
> > +}
> > +
> > +void dma_unmap_single(struct device_d *dev, dma_addr_t addr, size_t size,
> > +		      enum dma_data_direction dir)
> > +{
> > +	dma_sync_single_for_cpu(addr, size, dir);
> > +}
> > 
> 



_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 3/5] kvx: Implement dma handling primitives
  2021-03-02 10:14     ` Lucas Stach
@ 2021-03-02 10:58       ` Jules Maselbas
  2021-03-03  9:14         ` Lucas Stach
  0 siblings, 1 reply; 14+ messages in thread
From: Jules Maselbas @ 2021-03-02 10:58 UTC (permalink / raw)
  To: Lucas Stach; +Cc: Ahmad Fatoum, barebox, Yann Sionneau

Hi Lucas and Ahmad,

On Tue, Mar 02, 2021 at 11:14:09AM +0100, Lucas Stach wrote:
> Am Dienstag, dem 02.03.2021 um 09:37 +0100 schrieb Ahmad Fatoum:
> > Hello Jules, Yann,
> > 
> > On 01.03.21 16:58, Jules Maselbas wrote:
> > > From: Yann Sionneau <ysionneau@kalray.eu>
> > Some comments inline. I am not a cache cohereny expert, so take
> > it with a grain of salt.
> > 
> > > +static inline void *dma_alloc_coherent(size_t size, dma_addr_t *dma_handle)
> > > +{
> > > +	void *ret = xmemalign(PAGE_SIZE, size);
> > > +
> > > +	if (dma_handle)
> > > +		*dma_handle = (dma_addr_t)(uintptr_t)ret;
> > > +
> > > +	return ret;
> > > +}
> > 
> > This would imply that the CPU barebox is booting is coherent with all
> > 
> > devices that barebox needs to access. Is that the case?
> > 
> > (See below)
> > 
This is bogus, memory is not coherent with all devices, this should be
handled by the mmu, which is currently not supported in our barebox port.
Using this can lead to coherency issues. We can either drop this
function, so that is leads to an error at link time, or add a call to
BUG for a runtime error.

Right now we aren't using any driver that require dma_alloc_coherent,
but we use drivers that requires dma_alloc and dma_map_single instead.

> > > +/*
> > > + * The implementation of arch should follow the following rules:
> > > + *		map		for_cpu		for_device	unmap
> > > + * TO_DEV	writeback	none		writeback	none
> > > + * FROM_DEV	invalidate	invalidate(*)	invalidate	invalidate(*)
> > > + * BIDIR	writeback	invalidate	writeback	invalidate
> > > + *
> > > + * (*) - only necessary if the CPU speculatively prefetches.
> > > + *
> > > + * (see https://lkml.org/lkml/2018/5/18/979)
> > > + */
> > > +
> > > +void dma_sync_single_for_device(dma_addr_t addr, size_t size,
> > > +				enum dma_data_direction dir)
> > > +{
> > > +	switch (dir) {
> > > +	case DMA_FROM_DEVICE:
> > > +		kvx_dcache_invalidate_mem_area(addr, size);
> 
> Why do you need to explicitly invalidate, but not flush? Even if the
> CPU speculatively prefetches, the coherency protocol should make sure
> to invalidate the speculatively loaded lines, right?
Since we don't have a coherent memory, here we need to invalidate L1
dcache to let the CPU see deivce's writes in memory.
Also every write goes through the cache, flush is not required.

> 
> > > +		break;
> > > +	case DMA_TO_DEVICE:
> > > +	case DMA_BIDIRECTIONAL:
> > > +		/* allow device to read buffer written by CPU */
> > > +		wmb();
> > 
> > If the interconnect was indeed coherent, like dma_alloc_coherent
> > above hints, you wouldn't need any barriers here..?
> 
> Coherency does not imply strict ordering, so the barriers are in fact
> correct, as the CPU write buffers and/or the interconnect can still
> change the ordering of the writes as seen by a remote observer.


Best,
Jules 


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 2/5] kvx: Implement dcache invalidation primitive
  2021-03-02  8:40   ` Ahmad Fatoum
@ 2021-03-02 11:44     ` Jules Maselbas
  0 siblings, 0 replies; 14+ messages in thread
From: Jules Maselbas @ 2021-03-02 11:44 UTC (permalink / raw)
  To: Ahmad Fatoum; +Cc: barebox, Yann Sionneau

On Tue, Mar 02, 2021 at 09:40:50AM +0100, Ahmad Fatoum wrote:
> Hello,
> 
> > +
> > +void kvx_dcache_invalidate_mem_area(uint64_t addr, int size)
> > +{
> > +	/* if hwloop iterations cost < _K1_DCACHE_REFILL_PERCENT cache refill,
> > +	 * use hwloop, otherwise invalid the whole cache
> > +	 */
> > +	if (size <
> > +	(K1_DCACHE_REFILL_PERCENT * (K1_DCACHE_REFILL * K1_DCACHE_SIZE))
> > +			/ (100 * (K1_DCACHE_REFILL + K1_DCACHE_HWLOOP))) {
> > +		/* number of lines that must be invalidated */
> > +		int invalid_lines = ((addr + size) -
> > +					(addr & (~(K1_DCACHE_LINE_SIZE - 1))));
> > +
> > +		invalid_lines = invalid_lines / K1_DCACHE_LINE_SIZE
> > +				+ (0 != (invalid_lines % K1_DCACHE_LINE_SIZE));
> > +		if (__builtin_constant_p(invalid_lines) && invalid_lines <= 2) {
> 
> Note that currently this will always be false, because of lack of link time
> optimization. You could split this away the check into the header and leave the
> juicy parts here if you want to have this optimization.
> 
Yes we can drop one branch, I am tempted to always invalidate the whole
cache and be done with it. I will send a new patch anyway.

> > +			/* when inlining (and doing constant folding),
> > +			 *  gcc is able to unroll small loops
> > +			 */
> > +			int i;
> > +
> > +			for (i = 0; i < invalid_lines; i++) {
> > +				__builtin_kvx_dinvall((void *)(addr
> > +						+ i * K1_DCACHE_LINE_SIZE));
> > +			}
> > +		} else if (invalid_lines > 0) {
> > +			__asm__ __volatile__ (
> > +				"loopdo %1, 0f\n;;\n"
> > +				"dinvall 0[%0]\n"
> > +				"addd %0 = %0, %2\n;;\n"
> > +				"0:\n"
> > +				: "+r"(addr)
> > +				: "r" (invalid_lines),
> > +				"i" (K1_DCACHE_LINE_SIZE)
> > +				: "ls", "le", "lc", "memory");
> > +		}
> > +	} else {
> > +		__builtin_kvx_dinval();
> > +	}
> > +}
> > 
> 
> -- 
> Pengutronix e.K.                           |                             |
> Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
> 31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
> Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |
> 


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 3/5] kvx: Implement dma handling primitives
  2021-03-02 10:58       ` Jules Maselbas
@ 2021-03-03  9:14         ` Lucas Stach
  2021-03-03  9:33           ` Yann Sionneau
  0 siblings, 1 reply; 14+ messages in thread
From: Lucas Stach @ 2021-03-03  9:14 UTC (permalink / raw)
  To: Jules Maselbas; +Cc: Ahmad Fatoum, barebox, Yann Sionneau

Hi Jules,

Am Dienstag, dem 02.03.2021 um 11:58 +0100 schrieb Jules Maselbas:
> Hi Lucas and Ahmad,
> 
> On Tue, Mar 02, 2021 at 11:14:09AM +0100, Lucas Stach wrote:
> > Am Dienstag, dem 02.03.2021 um 09:37 +0100 schrieb Ahmad Fatoum:
> > > Hello Jules, Yann,
> > > 
> > > On 01.03.21 16:58, Jules Maselbas wrote:
> > > > From: Yann Sionneau <ysionneau@kalray.eu>
> > > Some comments inline. I am not a cache cohereny expert, so take
> > > it with a grain of salt.
> > > 
> > > > +static inline void *dma_alloc_coherent(size_t size, dma_addr_t *dma_handle)
> > > > +{
> > > > +	void *ret = xmemalign(PAGE_SIZE, size);
> > > > +
> > > > +	if (dma_handle)
> > > > +		*dma_handle = (dma_addr_t)(uintptr_t)ret;
> > > > +
> > > > +	return ret;
> > > > +}
> > > 
> > > This would imply that the CPU barebox is booting is coherent with all
> > > 
> > > devices that barebox needs to access. Is that the case?
> > > 
> > > (See below)
> > > 
> This is bogus, memory is not coherent with all devices, this should be
> handled by the mmu, which is currently not supported in our barebox port.
> Using this can lead to coherency issues. We can either drop this
> function, so that is leads to an error at link time, or add a call to
> BUG for a runtime error.
> 
> Right now we aren't using any driver that require dma_alloc_coherent,
> but we use drivers that requires dma_alloc and dma_map_single instead.

I would vote for a BUILD_BUG_ON_MSG in this function, so you get a
compile time error and you can state what needs to be done in order to
get rid of the failure.

> > > > +/*
> > > > + * The implementation of arch should follow the following rules:
> > > > + *		map		for_cpu		for_device	unmap
> > > > + * TO_DEV	writeback	none		writeback	none
> > > > + * FROM_DEV	invalidate	invalidate(*)	invalidate	invalidate(*)
> > > > + * BIDIR	writeback	invalidate	writeback	invalidate
> > > > + *
> > > > + * (*) - only necessary if the CPU speculatively prefetches.
> > > > + *
> > > > + * (see https://lkml.org/lkml/2018/5/18/979)
> > > > + */
> > > > +
> > > > +void dma_sync_single_for_device(dma_addr_t addr, size_t size,
> > > > +				enum dma_data_direction dir)
> > > > +{
> > > > +	switch (dir) {
> > > > +	case DMA_FROM_DEVICE:
> > > > +		kvx_dcache_invalidate_mem_area(addr, size);
> > 
> > Why do you need to explicitly invalidate, but not flush? Even if the
> > CPU speculatively prefetches, the coherency protocol should make sure
> > to invalidate the speculatively loaded lines, right?
> Since we don't have a coherent memory, here we need to invalidate L1
> dcache to let the CPU see deivce's writes in memory.
> Also every write goes through the cache, flush is not required.

Ah, if all your caches are write-through that makes sense. Can you add
a comment somewhere stating that this implementation assumes WT caches
on KVX? This way we can avoid the confusion Ahamd and myself fell into
when glancing over the code.

Regards,
Lucas


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 3/5] kvx: Implement dma handling primitives
  2021-03-03  9:14         ` Lucas Stach
@ 2021-03-03  9:33           ` Yann Sionneau
  2021-03-03  9:52             ` Ahmad Fatoum
  0 siblings, 1 reply; 14+ messages in thread
From: Yann Sionneau @ 2021-03-03  9:33 UTC (permalink / raw)
  To: Lucas Stach, Jules Maselbas; +Cc: Ahmad Fatoum, barebox

On 03/03/2021 10:14, Lucas Stach wrote:

> Hi Jules,
>
> Am Dienstag, dem 02.03.2021 um 11:58 +0100 schrieb Jules Maselbas:
>> Hi Lucas and Ahmad,
>>
>> On Tue, Mar 02, 2021 at 11:14:09AM +0100, Lucas Stach wrote:
>>> Am Dienstag, dem 02.03.2021 um 09:37 +0100 schrieb Ahmad Fatoum:
>>>> Hello Jules, Yann,
>>>>
>>>> On 01.03.21 16:58, Jules Maselbas wrote:
>>>>> From: Yann Sionneau <ysionneau@kalray.eu>
>>>> Some comments inline. I am not a cache cohereny expert, so take
>>>> it with a grain of salt.
>>>>
>>>>> +static inline void *dma_alloc_coherent(size_t size, dma_addr_t *dma_handle)
>>>>> +{
>>>>> +	void *ret = xmemalign(PAGE_SIZE, size);
>>>>> +
>>>>> +	if (dma_handle)
>>>>> +		*dma_handle = (dma_addr_t)(uintptr_t)ret;
>>>>> +
>>>>> +	return ret;
>>>>> +}
>>>> This would imply that the CPU barebox is booting is coherent with all
>>>>
>>>> devices that barebox needs to access. Is that the case?
>>>>
>>>> (See below)
>>>>
>> This is bogus, memory is not coherent with all devices, this should be
>> handled by the mmu, which is currently not supported in our barebox port.
>> Using this can lead to coherency issues. We can either drop this
>> function, so that is leads to an error at link time, or add a call to
>> BUG for a runtime error.
>>
>> Right now we aren't using any driver that require dma_alloc_coherent,
>> but we use drivers that requires dma_alloc and dma_map_single instead.
> I would vote for a BUILD_BUG_ON_MSG in this function, so you get a
> compile time error and you can state what needs to be done in order to
> get rid of the failure.

If we define the function and put a BUILD_BUG_ON_MSG() inside, I am 
guessing that all builds will fail, right?

But we only want the builds that actually call this function to fail.

Maybe we can just define dma_alloc_coherent() as being a macro, to 
BUILD_BUG_ON_MSG.

Like:

#define dma_alloc_coherent(a, b) BUILD_BUG_ON_MSG(1, "dma_alloc_coherent 
is not supported yet on KVX. You would need to add MMU support to be 
able to map uncached pages")

What do you think?

>
>>>>> +/*
>>>>> + * The implementation of arch should follow the following rules:
>>>>> + *		map		for_cpu		for_device	unmap
>>>>> + * TO_DEV	writeback	none		writeback	none
>>>>> + * FROM_DEV	invalidate	invalidate(*)	invalidate	invalidate(*)
>>>>> + * BIDIR	writeback	invalidate	writeback	invalidate
>>>>> + *
>>>>> + * (*) - only necessary if the CPU speculatively prefetches.
>>>>> + *
>>>>> + * (see https://lkml.org/lkml/2018/5/18/979)
>>>>> + */
>>>>> +
>>>>> +void dma_sync_single_for_device(dma_addr_t addr, size_t size,
>>>>> +				enum dma_data_direction dir)
>>>>> +{
>>>>> +	switch (dir) {
>>>>> +	case DMA_FROM_DEVICE:
>>>>> +		kvx_dcache_invalidate_mem_area(addr, size);
>>> Why do you need to explicitly invalidate, but not flush? Even if the
>>> CPU speculatively prefetches, the coherency protocol should make sure
>>> to invalidate the speculatively loaded lines, right?
>> Since we don't have a coherent memory, here we need to invalidate L1
>> dcache to let the CPU see deivce's writes in memory.
>> Also every write goes through the cache, flush is not required.
> Ah, if all your caches are write-through that makes sense. Can you add
> a comment somewhere stating that this implementation assumes WT caches
> on KVX? This way we can avoid the confusion Ahamd and myself fell into
> when glancing over the code.
>
> Regards,
> Lucas
>
>


_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox


^ permalink raw reply	[flat|nested] 14+ messages in thread

* Re: [PATCH 3/5] kvx: Implement dma handling primitives
  2021-03-03  9:33           ` Yann Sionneau
@ 2021-03-03  9:52             ` Ahmad Fatoum
  0 siblings, 0 replies; 14+ messages in thread
From: Ahmad Fatoum @ 2021-03-03  9:52 UTC (permalink / raw)
  To: Yann Sionneau, Lucas Stach, Jules Maselbas; +Cc: barebox

Hello Yann,

On 03.03.21 10:33, Yann Sionneau wrote:
> On 03/03/2021 10:14, Lucas Stach wrote:
>>> Right now we aren't using any driver that require dma_alloc_coherent,
>>> but we use drivers that requires dma_alloc and dma_map_single instead.
>> I would vote for a BUILD_BUG_ON_MSG in this function, so you get a
>> compile time error and you can state what needs to be done in order to
>> get rid of the failure.
> 
> If we define the function and put a BUILD_BUG_ON_MSG() inside, I am guessing that all builds will fail, right?
> 
> But we only want the builds that actually call this function to fail.
> 
> Maybe we can just define dma_alloc_coherent() as being a macro, to BUILD_BUG_ON_MSG.
> 
> Like:
> 
> #define dma_alloc_coherent(a, b) BUILD_BUG_ON_MSG(1, "dma_alloc_coherent is not supported yet on KVX. You would need to add MMU support to be able to map uncached pages")

If the macro is expanded, it will fail. Even if the code is ultimately unused because
of linker section garbage collection. You could define dma_alloc_coherent with following
body:

{
	extern void *coherent_allocation_not_implemented_on_kvx(void);
	/* You would need to add MMU support to be able to map uncached pages */
	return coherent_allocation_not_implemented_on_kvx();
}

If after linker GC, a reference to dma_alloc_coherent remains, you will get a linker
error explaining why.

Cheers,
Ahmad

> 
> What do you think?
> 
>>
>>>>>> +/*
>>>>>> + * The implementation of arch should follow the following rules:
>>>>>> + *        map        for_cpu        for_device    unmap
>>>>>> + * TO_DEV    writeback    none        writeback    none
>>>>>> + * FROM_DEV    invalidate    invalidate(*)    invalidate    invalidate(*)
>>>>>> + * BIDIR    writeback    invalidate    writeback    invalidate
>>>>>> + *
>>>>>> + * (*) - only necessary if the CPU speculatively prefetches.
>>>>>> + *
>>>>>> + * (see https://lkml.org/lkml/2018/5/18/979)
>>>>>> + */
>>>>>> +
>>>>>> +void dma_sync_single_for_device(dma_addr_t addr, size_t size,
>>>>>> +                enum dma_data_direction dir)
>>>>>> +{
>>>>>> +    switch (dir) {
>>>>>> +    case DMA_FROM_DEVICE:
>>>>>> +        kvx_dcache_invalidate_mem_area(addr, size);
>>>> Why do you need to explicitly invalidate, but not flush? Even if the
>>>> CPU speculatively prefetches, the coherency protocol should make sure
>>>> to invalidate the speculatively loaded lines, right?
>>> Since we don't have a coherent memory, here we need to invalidate L1
>>> dcache to let the CPU see deivce's writes in memory.
>>> Also every write goes through the cache, flush is not required.
>> Ah, if all your caches are write-through that makes sense. Can you add
>> a comment somewhere stating that this implementation assumes WT caches
>> on KVX? This way we can avoid the confusion Ahamd and myself fell into
>> when glancing over the code.
>>
>> Regards,
>> Lucas
>>
>>
> 
> 

-- 
Pengutronix e.K.                           |                             |
Steuerwalder Str. 21                       | http://www.pengutronix.de/  |
31137 Hildesheim, Germany                  | Phone: +49-5121-206917-0    |
Amtsgericht Hildesheim, HRA 2686           | Fax:   +49-5121-206917-5555 |

_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox

^ permalink raw reply	[flat|nested] 14+ messages in thread

end of thread, other threads:[~2021-03-03 22:44 UTC | newest]

Thread overview: 14+ messages (download: mbox.gz / follow: Atom feed)
-- links below jump to the message on this page --
2021-03-01 15:58 [PATCH 1/5] kvx: Implement setjmp/longjmp/initjmp Jules Maselbas
2021-03-01 15:58 ` [PATCH 2/5] kvx: Implement dcache invalidation primitive Jules Maselbas
2021-03-02  8:40   ` Ahmad Fatoum
2021-03-02 11:44     ` Jules Maselbas
2021-03-01 15:58 ` [PATCH 3/5] kvx: Implement dma handling primitives Jules Maselbas
2021-03-02  8:37   ` Ahmad Fatoum
2021-03-02  8:44     ` Ahmad Fatoum
2021-03-02 10:14     ` Lucas Stach
2021-03-02 10:58       ` Jules Maselbas
2021-03-03  9:14         ` Lucas Stach
2021-03-03  9:33           ` Yann Sionneau
2021-03-03  9:52             ` Ahmad Fatoum
2021-03-01 15:58 ` [PATCH 4/5] kvx: Request enough privilege to boot Linux Jules Maselbas
2021-03-01 15:58 ` [PATCH 5/5] kvx: lib: dtb: Remove unused variable Jules Maselbas

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox