* [PATCH 1/9] Add print_hex_dump kernel implementation
2020-09-18 8:45 [PATCH 0/9] barebox KASan support Sascha Hauer
@ 2020-09-18 8:45 ` Sascha Hauer
2020-09-18 8:45 ` [PATCH 2/9] Add _RET_IP_ macro Sascha Hauer
` (8 subsequent siblings)
9 siblings, 0 replies; 19+ messages in thread
From: Sascha Hauer @ 2020-09-18 8:45 UTC (permalink / raw)
To: Barebox List
This gives us proper support of the different DUMP_PREFIX_* flags
and also the ability to print hexdumps to buffers.
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
include/printk.h | 20 +++--
lib/hexdump.c | 212 +++++++++++++++++++++++++++++++++++++++++++++--
2 files changed, 216 insertions(+), 16 deletions(-)
diff --git a/include/printk.h b/include/printk.h
index 6a027175ab..9941ddb12c 100644
--- a/include/printk.h
+++ b/include/printk.h
@@ -115,14 +115,6 @@ int __pr_memory_display(int level, const void *addr, loff_t offs, unsigned nbyte
(offs), (nbytes), (size), (swab), pr_fmt("")) : 0; \
})
-#define DUMP_PREFIX_OFFSET 0
-static inline void print_hex_dump(const char *level, const char *prefix_str,
- int prefix_type, int rowsize, int groupsize,
- const void *buf, size_t len, bool ascii)
-{
- memory_display(buf, 0, len, groupsize, 0);
-}
-
struct log_entry {
struct list_head list;
char *msg;
@@ -156,4 +148,16 @@ struct va_format {
va_list *va;
};
+enum {
+ DUMP_PREFIX_NONE,
+ DUMP_PREFIX_ADDRESS,
+ DUMP_PREFIX_OFFSET
+};
+extern int hex_dump_to_buffer(const void *buf, size_t len, int rowsize,
+ int groupsize, char *linebuf, size_t linebuflen,
+ bool ascii);
+extern void print_hex_dump(const char *level, const char *prefix_str,
+ int prefix_type, int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii);
+
#endif
diff --git a/lib/hexdump.c b/lib/hexdump.c
index 3b1d5e6736..93f345e881 100644
--- a/lib/hexdump.c
+++ b/lib/hexdump.c
@@ -1,15 +1,14 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* lib/hexdump.c
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation. See README and COPYING for
- * more details.
*/
#include <common.h>
-#include <linux/kernel.h>
+#include <linux/types.h>
#include <linux/ctype.h>
+#include <linux/log2.h>
+#include <printk.h>
+#include <asm/unaligned.h>
const char hex_asc[] = "0123456789abcdef";
EXPORT_SYMBOL(hex_asc);
@@ -40,7 +39,7 @@ EXPORT_SYMBOL(hex_to_bin);
* @src: ascii hexadecimal string
* @count: result length
*
- * Return 0 on success, -1 in case of bad input.
+ * Return 0 on success, -EINVAL in case of bad input.
*/
int hex2bin(u8 *dst, const char *src, size_t count)
{
@@ -49,7 +48,7 @@ int hex2bin(u8 *dst, const char *src, size_t count)
int lo = hex_to_bin(*src++);
if ((hi < 0) || (lo < 0))
- return -1;
+ return -EINVAL;
*dst++ = (hi << 4) | lo;
}
@@ -72,3 +71,200 @@ char *bin2hex(char *dst, const void *src, size_t count)
return dst;
}
EXPORT_SYMBOL(bin2hex);
+
+/**
+ * hex_dump_to_buffer - convert a blob of data to "hex ASCII" in memory
+ * @buf: data blob to dump
+ * @len: number of bytes in the @buf
+ * @rowsize: number of bytes to print per line; must be 16 or 32
+ * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
+ * @linebuf: where to put the converted data
+ * @linebuflen: total size of @linebuf, including space for terminating NUL
+ * @ascii: include ASCII after the hex output
+ *
+ * hex_dump_to_buffer() works on one "line" of output at a time, i.e.,
+ * 16 or 32 bytes of input data converted to hex + ASCII output.
+ *
+ * Given a buffer of u8 data, hex_dump_to_buffer() converts the input data
+ * to a hex + ASCII dump at the supplied memory location.
+ * The converted output is always NUL-terminated.
+ *
+ * E.g.:
+ * hex_dump_to_buffer(frame->data, frame->len, 16, 1,
+ * linebuf, sizeof(linebuf), true);
+ *
+ * example output buffer:
+ * 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
+ *
+ * Return:
+ * The amount of bytes placed in the buffer without terminating NUL. If the
+ * output was truncated, then the return value is the number of bytes
+ * (excluding the terminating NUL) which would have been written to the final
+ * string if enough space had been available.
+ */
+int hex_dump_to_buffer(const void *buf, size_t len, int rowsize, int groupsize,
+ char *linebuf, size_t linebuflen, bool ascii)
+{
+ const u8 *ptr = buf;
+ int ngroups;
+ u8 ch;
+ int j, lx = 0;
+ int ascii_column;
+ int ret;
+
+ if (rowsize != 16 && rowsize != 32)
+ rowsize = 16;
+
+ if (len > rowsize) /* limit to one line at a time */
+ len = rowsize;
+ if (!is_power_of_2(groupsize) || groupsize > 8)
+ groupsize = 1;
+ if ((len % groupsize) != 0) /* no mixed size output */
+ groupsize = 1;
+
+ ngroups = len / groupsize;
+ ascii_column = rowsize * 2 + rowsize / groupsize + 1;
+
+ if (!linebuflen)
+ goto overflow1;
+
+ if (!len)
+ goto nil;
+
+ if (groupsize == 8) {
+ const u64 *ptr8 = buf;
+
+ for (j = 0; j < ngroups; j++) {
+ ret = snprintf(linebuf + lx, linebuflen - lx,
+ "%s%16.16llx", j ? " " : "",
+ get_unaligned(ptr8 + j));
+ if (ret >= linebuflen - lx)
+ goto overflow1;
+ lx += ret;
+ }
+ } else if (groupsize == 4) {
+ const u32 *ptr4 = buf;
+
+ for (j = 0; j < ngroups; j++) {
+ ret = snprintf(linebuf + lx, linebuflen - lx,
+ "%s%8.8x", j ? " " : "",
+ get_unaligned(ptr4 + j));
+ if (ret >= linebuflen - lx)
+ goto overflow1;
+ lx += ret;
+ }
+ } else if (groupsize == 2) {
+ const u16 *ptr2 = buf;
+
+ for (j = 0; j < ngroups; j++) {
+ ret = snprintf(linebuf + lx, linebuflen - lx,
+ "%s%4.4x", j ? " " : "",
+ get_unaligned(ptr2 + j));
+ if (ret >= linebuflen - lx)
+ goto overflow1;
+ lx += ret;
+ }
+ } else {
+ for (j = 0; j < len; j++) {
+ if (linebuflen < lx + 2)
+ goto overflow2;
+ ch = ptr[j];
+ linebuf[lx++] = hex_asc_hi(ch);
+ if (linebuflen < lx + 2)
+ goto overflow2;
+ linebuf[lx++] = hex_asc_lo(ch);
+ if (linebuflen < lx + 2)
+ goto overflow2;
+ linebuf[lx++] = ' ';
+ }
+ if (j)
+ lx--;
+ }
+ if (!ascii)
+ goto nil;
+
+ while (lx < ascii_column) {
+ if (linebuflen < lx + 2)
+ goto overflow2;
+ linebuf[lx++] = ' ';
+ }
+ for (j = 0; j < len; j++) {
+ if (linebuflen < lx + 2)
+ goto overflow2;
+ ch = ptr[j];
+ linebuf[lx++] = (isascii(ch) && isprint(ch)) ? ch : '.';
+ }
+nil:
+ linebuf[lx] = '\0';
+ return lx;
+overflow2:
+ linebuf[lx++] = '\0';
+overflow1:
+ return ascii ? ascii_column + len : (groupsize * 2 + 1) * ngroups - 1;
+}
+EXPORT_SYMBOL(hex_dump_to_buffer);
+
+/**
+ * print_hex_dump - print a text hex dump to syslog for a binary blob of data
+ * @level: kernel log level (e.g. KERN_DEBUG)
+ * @prefix_str: string to prefix each line with;
+ * caller supplies trailing spaces for alignment if desired
+ * @prefix_type: controls whether prefix of an offset, address, or none
+ * is printed (%DUMP_PREFIX_OFFSET, %DUMP_PREFIX_ADDRESS, %DUMP_PREFIX_NONE)
+ * @rowsize: number of bytes to print per line; must be 16 or 32
+ * @groupsize: number of bytes to print at a time (1, 2, 4, 8; default = 1)
+ * @buf: data blob to dump
+ * @len: number of bytes in the @buf
+ * @ascii: include ASCII after the hex output
+ *
+ * Given a buffer of u8 data, print_hex_dump() prints a hex + ASCII dump
+ * to the kernel log at the specified kernel log level, with an optional
+ * leading prefix.
+ *
+ * print_hex_dump() works on one "line" of output at a time, i.e.,
+ * 16 or 32 bytes of input data converted to hex + ASCII output.
+ * print_hex_dump() iterates over the entire input @buf, breaking it into
+ * "line size" chunks to format and print.
+ *
+ * E.g.:
+ * print_hex_dump(KERN_DEBUG, "raw data: ", DUMP_PREFIX_ADDRESS,
+ * 16, 1, frame->data, frame->len, true);
+ *
+ * Example output using %DUMP_PREFIX_OFFSET and 1-byte mode:
+ * 0009ab42: 40 41 42 43 44 45 46 47 48 49 4a 4b 4c 4d 4e 4f @ABCDEFGHIJKLMNO
+ * Example output using %DUMP_PREFIX_ADDRESS and 4-byte mode:
+ * ffffffff88089af0: 73727170 77767574 7b7a7978 7f7e7d7c pqrstuvwxyz{|}~.
+ */
+void print_hex_dump(const char *level, const char *prefix_str, int prefix_type,
+ int rowsize, int groupsize,
+ const void *buf, size_t len, bool ascii)
+{
+ const u8 *ptr = buf;
+ int i, linelen, remaining = len;
+ unsigned char linebuf[32 * 3 + 2 + 32 + 1];
+
+ if (rowsize != 16 && rowsize != 32)
+ rowsize = 16;
+
+ for (i = 0; i < len; i += rowsize) {
+ linelen = min(remaining, rowsize);
+ remaining -= rowsize;
+
+ hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize,
+ linebuf, sizeof(linebuf), ascii);
+
+ switch (prefix_type) {
+ case DUMP_PREFIX_ADDRESS:
+ printk("%s%s%p: %s\n",
+ level, prefix_str, ptr + i, linebuf);
+ break;
+ case DUMP_PREFIX_OFFSET:
+ printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf);
+ break;
+ default:
+ printk("%s%s%s\n", level, prefix_str, linebuf);
+ break;
+ }
+ }
+}
+EXPORT_SYMBOL(print_hex_dump);
--
2.28.0
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 2/9] Add _RET_IP_ macro
2020-09-18 8:45 [PATCH 0/9] barebox KASan support Sascha Hauer
2020-09-18 8:45 ` [PATCH 1/9] Add print_hex_dump kernel implementation Sascha Hauer
@ 2020-09-18 8:45 ` Sascha Hauer
2020-09-18 8:45 ` [PATCH 3/9] Kallsyms: Also resolve global variables Sascha Hauer
` (7 subsequent siblings)
9 siblings, 0 replies; 19+ messages in thread
From: Sascha Hauer @ 2020-09-18 8:45 UTC (permalink / raw)
To: Barebox List
Use in KAsan code, so added for barebox
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
include/linux/kernel.h | 2 ++
1 file changed, 2 insertions(+)
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 23c23a73f5..787571a5a0 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -229,6 +229,8 @@ extern long long simple_strtoll(const char *,char **,unsigned int);
} \
)
+#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+
extern const char hex_asc[];
#define hex_asc_lo(x) hex_asc[((x) & 0x0f)]
#define hex_asc_hi(x) hex_asc[((x) & 0xf0) >> 4]
--
2.28.0
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 3/9] Kallsyms: Also resolve global variables
2020-09-18 8:45 [PATCH 0/9] barebox KASan support Sascha Hauer
2020-09-18 8:45 ` [PATCH 1/9] Add print_hex_dump kernel implementation Sascha Hauer
2020-09-18 8:45 ` [PATCH 2/9] Add _RET_IP_ macro Sascha Hauer
@ 2020-09-18 8:45 ` Sascha Hauer
2020-09-22 16:17 ` Michael Grzeschik
2020-09-18 8:45 ` [PATCH 4/9] Add constructor support Sascha Hauer
` (6 subsequent siblings)
9 siblings, 1 reply; 19+ messages in thread
From: Sascha Hauer @ 2020-09-18 8:45 UTC (permalink / raw)
To: Barebox List
Increase the area used for resolving symbols so that global variables
can also be resolved to names.
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
common/kallsyms.c | 4 ++--
1 file changed, 2 insertions(+), 2 deletions(-)
diff --git a/common/kallsyms.c b/common/kallsyms.c
index e15dec5dfc..2c16ab2884 100644
--- a/common/kallsyms.c
+++ b/common/kallsyms.c
@@ -15,8 +15,8 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
static inline int is_kernel_text(unsigned long addr)
{
- if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext))
- return 1;
+ if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
+ return 1;
return 0;
}
--
2.28.0
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 3/9] Kallsyms: Also resolve global variables
2020-09-18 8:45 ` [PATCH 3/9] Kallsyms: Also resolve global variables Sascha Hauer
@ 2020-09-22 16:17 ` Michael Grzeschik
2020-09-28 8:30 ` Sascha Hauer
0 siblings, 1 reply; 19+ messages in thread
From: Michael Grzeschik @ 2020-09-22 16:17 UTC (permalink / raw)
To: Sascha Hauer; +Cc: Barebox List
[-- Attachment #1.1: Type: text/plain, Size: 1190 bytes --]
Hi!
On Fri, Sep 18, 2020 at 10:45:26AM +0200, Sascha Hauer wrote:
>Increase the area used for resolving symbols so that global variables
>can also be resolved to names.
>
>Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
>---
> common/kallsyms.c | 4 ++--
> 1 file changed, 2 insertions(+), 2 deletions(-)
>
>diff --git a/common/kallsyms.c b/common/kallsyms.c
>index e15dec5dfc..2c16ab2884 100644
>--- a/common/kallsyms.c
>+++ b/common/kallsyms.c
>@@ -15,8 +15,8 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
>
> static inline int is_kernel_text(unsigned long addr)
> {
>- if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext))
>- return 1;
>+ if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
>+ return 1;
tabs vs spaces.
> return 0;
> }
>
>--
>2.28.0
mgr
--
Pengutronix e.K. | |
Steuerwalder Str. 21 | http://www.pengutronix.de/ |
31137 Hildesheim, Germany | Phone: +49-5121-206917-0 |
Amtsgericht Hildesheim, HRA 2686 | Fax: +49-5121-206917-5555 |
[-- Attachment #1.2: signature.asc --]
[-- Type: application/pgp-signature, Size: 833 bytes --]
[-- Attachment #2: Type: text/plain, Size: 149 bytes --]
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 3/9] Kallsyms: Also resolve global variables
2020-09-22 16:17 ` Michael Grzeschik
@ 2020-09-28 8:30 ` Sascha Hauer
0 siblings, 0 replies; 19+ messages in thread
From: Sascha Hauer @ 2020-09-28 8:30 UTC (permalink / raw)
To: Michael Grzeschik; +Cc: Barebox List
On Tue, Sep 22, 2020 at 06:17:22PM +0200, Michael Grzeschik wrote:
> Hi!
>
> On Fri, Sep 18, 2020 at 10:45:26AM +0200, Sascha Hauer wrote:
> > Increase the area used for resolving symbols so that global variables
> > can also be resolved to names.
> >
> > Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
> > ---
> > common/kallsyms.c | 4 ++--
> > 1 file changed, 2 insertions(+), 2 deletions(-)
> >
> > diff --git a/common/kallsyms.c b/common/kallsyms.c
> > index e15dec5dfc..2c16ab2884 100644
> > --- a/common/kallsyms.c
> > +++ b/common/kallsyms.c
> > @@ -15,8 +15,8 @@ extern const unsigned long kallsyms_markers[] __attribute__((weak));
> >
> > static inline int is_kernel_text(unsigned long addr)
> > {
> > - if ((addr >= (unsigned long)_stext && addr <= (unsigned long)_etext))
> > - return 1;
> > + if (addr >= (unsigned long)_stext && addr <= (unsigned long)_end)
> > + return 1;
>
> tabs vs spaces.
Added a separate patch fixing it.
Sascha
--
Pengutronix e.K. | |
Steuerwalder Str. 21 | http://www.pengutronix.de/ |
31137 Hildesheim, Germany | Phone: +49-5121-206917-0 |
Amtsgericht Hildesheim, HRA 2686 | Fax: +49-5121-206917-5555 |
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 4/9] Add constructor support
2020-09-18 8:45 [PATCH 0/9] barebox KASan support Sascha Hauer
` (2 preceding siblings ...)
2020-09-18 8:45 ` [PATCH 3/9] Kallsyms: Also resolve global variables Sascha Hauer
@ 2020-09-18 8:45 ` Sascha Hauer
2020-09-18 8:45 ` [PATCH 5/9] pbl: Alias memcpy and memset Sascha Hauer
` (5 subsequent siblings)
9 siblings, 0 replies; 19+ messages in thread
From: Sascha Hauer @ 2020-09-18 8:45 UTC (permalink / raw)
To: Barebox List
Call constructors (gcc-generated initcall-like functions) during barebox
start. Constructors are e.g. used for kasan initialization.
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
common/startup.c | 15 +++++++++++++++
include/asm-generic/barebox.lds.h | 12 ++++++++++++
include/asm-generic/sections.h | 3 +++
lib/Kconfig | 3 +++
4 files changed, 33 insertions(+)
diff --git a/common/startup.c b/common/startup.c
index 1c58e41288..6cb0588ae6 100644
--- a/common/startup.c
+++ b/common/startup.c
@@ -366,6 +366,19 @@ static int run_init(void)
return 0;
}
+typedef void (*ctor_fn_t)(void);
+
+/* Call all constructor functions linked into the kernel. */
+static void do_ctors(void)
+{
+#ifdef CONFIG_CONSTRUCTORS
+ ctor_fn_t *fn = (ctor_fn_t *) __ctors_start;
+
+ for (; fn < (ctor_fn_t *) __ctors_end; fn++)
+ (*fn)();
+#endif
+}
+
int (*barebox_main)(void);
void __noreturn start_barebox(void)
@@ -376,6 +389,8 @@ void __noreturn start_barebox(void)
if (!IS_ENABLED(CONFIG_SHELL_NONE) && IS_ENABLED(CONFIG_COMMAND_SUPPORT))
barebox_main = run_init;
+ do_ctors();
+
for (initcall = __barebox_initcalls_start;
initcall < __barebox_initcalls_end; initcall++) {
pr_debug("initcall-> %pS\n", *initcall);
diff --git a/include/asm-generic/barebox.lds.h b/include/asm-generic/barebox.lds.h
index 138e9405a1..6971e2c1d2 100644
--- a/include/asm-generic/barebox.lds.h
+++ b/include/asm-generic/barebox.lds.h
@@ -113,12 +113,24 @@
KEEP(*(.rsa_keys.rodata.*)); \
__rsa_keys_end = .; \
+#ifdef CONFIG_CONSTRUCTORS
+#define KERNEL_CTORS() . = ALIGN(8); \
+ __ctors_start = .; \
+ KEEP(*(.ctors)) \
+ KEEP(*(SORT(.init_array.*))) \
+ KEEP(*(.init_array)) \
+ __ctors_end = .;
+#else
+#define KERNEL_CTORS()
+#endif
+
#define RO_DATA_SECTION \
BAREBOX_INITCALLS \
BAREBOX_EXITCALLS \
BAREBOX_CMDS \
BAREBOX_RATP_CMDS \
BAREBOX_SYMS \
+ KERNEL_CTORS() \
BAREBOX_MAGICVARS \
BAREBOX_CLK_TABLE \
BAREBOX_DTB \
diff --git a/include/asm-generic/sections.h b/include/asm-generic/sections.h
index f584cad48d..870bff21f6 100644
--- a/include/asm-generic/sections.h
+++ b/include/asm-generic/sections.h
@@ -13,6 +13,9 @@ extern void *_barebox_image_size;
extern void *_barebox_bare_init_size;
extern void *_barebox_pbl_size;
+/* Start and end of .ctors section - used for constructor calls. */
+extern char __ctors_start[], __ctors_end[];
+
#define barebox_image_size (__image_end - __image_start)
#define barebox_bare_init_size (unsigned int)&_barebox_bare_init_size
#define barebox_pbl_size (__piggydata_start - __image_start)
diff --git a/lib/Kconfig b/lib/Kconfig
index b4a8079700..90552f3c27 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -60,6 +60,9 @@ config REED_SOLOMON
config BASE64
bool "include base64 encode/decode support"
+config CONSTRUCTORS
+ bool
+
config GENERIC_FIND_NEXT_BIT
def_bool n
--
2.28.0
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 5/9] pbl: Alias memcpy and memset
2020-09-18 8:45 [PATCH 0/9] barebox KASan support Sascha Hauer
` (3 preceding siblings ...)
2020-09-18 8:45 ` [PATCH 4/9] Add constructor support Sascha Hauer
@ 2020-09-18 8:45 ` Sascha Hauer
2020-09-18 8:45 ` [PATCH 6/9] string: Add nokasan variants of default memcpy/memset Sascha Hauer
` (4 subsequent siblings)
9 siblings, 0 replies; 19+ messages in thread
From: Sascha Hauer @ 2020-09-18 8:45 UTC (permalink / raw)
To: Barebox List
With KASan the memcpy/memset functions are instrumented as well, but
some code will still have to call the non instrumented versions __memcpy
and __memset. Add aliases for them to PBL to make them available.
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
pbl/string.c | 7 +++++++
1 file changed, 7 insertions(+)
diff --git a/pbl/string.c b/pbl/string.c
index 46bf0b32b3..e6c0997ebc 100644
--- a/pbl/string.c
+++ b/pbl/string.c
@@ -6,6 +6,7 @@
#include <linux/types.h>
#include <linux/string.h>
+#include <linux/compiler.h>
void *memcpy(void *__dest, __const void *__src, size_t __n)
{
@@ -41,6 +42,9 @@ void *memcpy(void *__dest, __const void *__src, size_t __n)
return __dest;
}
+void *__memcpy(void *__dest, __const void *__src, size_t __n)
+ __alias(memcpy);
+
void *memmove(void *__dest, __const void *__src, size_t count)
{
unsigned char *d = __dest;
@@ -120,6 +124,9 @@ void *memset(void *s, int c, size_t count)
return s;
}
+void *__memset(void *s, int c, size_t count)
+ __alias(memset);
+
/**
* strnlen - Find the length of a length-limited string
* @s: The string to be sized
--
2.28.0
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 6/9] string: Add nokasan variants of default memcpy/memset
2020-09-18 8:45 [PATCH 0/9] barebox KASan support Sascha Hauer
` (4 preceding siblings ...)
2020-09-18 8:45 ` [PATCH 5/9] pbl: Alias memcpy and memset Sascha Hauer
@ 2020-09-18 8:45 ` Sascha Hauer
2020-09-18 8:45 ` [PATCH 7/9] sandbox: rename KASan to ASan Sascha Hauer
` (3 subsequent siblings)
9 siblings, 0 replies; 19+ messages in thread
From: Sascha Hauer @ 2020-09-18 8:45 UTC (permalink / raw)
To: Barebox List
Add nokasan variants of __default_memcpy and default_memset.
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
include/string.h | 4 ++++
lib/string.c | 28 +++++++++++++++++++++++++---
2 files changed, 29 insertions(+), 3 deletions(-)
diff --git a/include/string.h b/include/string.h
index 120a613d46..727bc51934 100644
--- a/include/string.h
+++ b/include/string.h
@@ -7,6 +7,10 @@
int strtobool(const char *str, int *val);
void *__default_memset(void *, int, __kernel_size_t);
+void *__nokasan_default_memset(void *, int, __kernel_size_t);
+
void *__default_memcpy(void * dest,const void *src,size_t count);
+void *__nokasan_default_memcpy(void * dest,const void *src,size_t count);
+
#endif /* __STRING_H */
diff --git a/lib/string.c b/lib/string.c
index 7548fd3581..b950dbfd5f 100644
--- a/lib/string.c
+++ b/lib/string.c
@@ -491,7 +491,18 @@ char *strswab(const char *s)
*
* Do not use memset() to access IO space, use memset_io() instead.
*/
-void *__default_memset(void * s,int c,size_t count)
+void *__default_memset(void * s, int c, size_t count)
+{
+ char *xs = (char *) s;
+
+ while (count--)
+ *xs++ = c;
+
+ return s;
+}
+EXPORT_SYMBOL(__default_memset);
+
+void __no_sanitize_address *__nokasan_default_memset(void * s, int c, size_t count)
{
char *xs = (char *) s;
@@ -515,7 +526,18 @@ void *memset(void *s, int c, size_t count) __alias(__default_memset);
* You should not use this function to access IO space, use memcpy_toio()
* or memcpy_fromio() instead.
*/
-void *__default_memcpy(void * dest,const void *src,size_t count)
+void *__default_memcpy(void * dest,const void *src, size_t count)
+{
+ char *tmp = (char *) dest, *s = (char *) src;
+
+ while (count--)
+ *tmp++ = *s++;
+
+ return dest;
+}
+
+void __no_sanitize_address *__nokasan_default_memcpy(void * dest,
+ const void *src, size_t count)
{
char *tmp = (char *) dest, *s = (char *) src;
@@ -524,7 +546,7 @@ void *__default_memcpy(void * dest,const void *src,size_t count)
return dest;
}
-EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(__default_memcpy);
#ifndef __HAVE_ARCH_MEMCPY
void *memcpy(void * dest, const void *src, size_t count)
--
2.28.0
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 7/9] sandbox: rename KASan to ASan
2020-09-18 8:45 [PATCH 0/9] barebox KASan support Sascha Hauer
` (5 preceding siblings ...)
2020-09-18 8:45 ` [PATCH 6/9] string: Add nokasan variants of default memcpy/memset Sascha Hauer
@ 2020-09-18 8:45 ` Sascha Hauer
2020-09-18 8:45 ` [PATCH 8/9] Add KASan support Sascha Hauer
` (2 subsequent siblings)
9 siblings, 0 replies; 19+ messages in thread
From: Sascha Hauer @ 2020-09-18 8:45 UTC (permalink / raw)
To: Barebox List
sandbox really has ASan support, that is address sanitizer with the help
of the userspace library libasan. In contrast KASan is used on real
hardware where we have to implement our own support code. Rename sandbox
KASan to ASan to not clash with upcoming KASan support.
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
arch/sandbox/Kconfig | 4 ++--
arch/sandbox/Makefile | 2 +-
arch/sandbox/os/Makefile | 4 ----
arch/sandbox/os/common.c | 2 +-
common/Kconfig | 10 +++++-----
5 files changed, 9 insertions(+), 13 deletions(-)
diff --git a/arch/sandbox/Kconfig b/arch/sandbox/Kconfig
index 40e04919d2..81f7a96bd6 100644
--- a/arch/sandbox/Kconfig
+++ b/arch/sandbox/Kconfig
@@ -5,7 +5,7 @@ config SANDBOX
select OFTREE
select GPIOLIB
select ARCH_HAS_UBSAN_SANITIZE_ALL
- select HAVE_ARCH_KASAN
+ select HAVE_ARCH_ASAN
select HAS_DMA
default y
@@ -22,7 +22,7 @@ config SANDBOX_UNWIND
bool
default y
select ARCH_HAS_STACK_DUMP
- depends on UBSAN || KASAN
+ depends on UBSAN || ASAN
config PHYS_ADDR_T_64BIT
bool
diff --git a/arch/sandbox/Makefile b/arch/sandbox/Makefile
index 27021222dc..ce1fe3b672 100644
--- a/arch/sandbox/Makefile
+++ b/arch/sandbox/Makefile
@@ -44,7 +44,7 @@ ifeq ($(CONFIG_GPIO_LIBFTDI1),y)
FTDI1_LIBS := $(shell pkg-config libftdi1 --libs)
endif
-ifeq ($(CONFIG_KASAN),y)
+ifeq ($(CONFIG_ASAN),y)
KBUILD_CPPFLAGS += -fsanitize=address
SANITIZER_LIBS += -fsanitize=address
endif
diff --git a/arch/sandbox/os/Makefile b/arch/sandbox/os/Makefile
index b2bd768bcb..15d688bfdd 100644
--- a/arch/sandbox/os/Makefile
+++ b/arch/sandbox/os/Makefile
@@ -8,10 +8,6 @@ KBUILD_CPPFLAGS += -DCONFIG_MALLOC_SIZE=$(CONFIG_MALLOC_SIZE)
KBUILD_CFLAGS := -Wall
-ifeq ($(CONFIG_KASAN),y)
-KBUILD_CPPFLAGS += -DCONFIG_KASAN=1
-endif
-
NOSTDINC_FLAGS :=
ifeq ($(CONFIG_SANDBOX_LINUX_I386),y)
diff --git a/arch/sandbox/os/common.c b/arch/sandbox/os/common.c
index 69fadb3b47..9fb5faf41d 100644
--- a/arch/sandbox/os/common.c
+++ b/arch/sandbox/os/common.c
@@ -347,7 +347,7 @@ int main(int argc, char *argv[])
int fdno = 0, envno = 0, option_index = 0;
char *aux;
-#ifdef CONFIG_KASAN
+#ifdef CONFIG_ASAN
__sanitizer_set_death_callback(cookmode);
#endif
diff --git a/common/Kconfig b/common/Kconfig
index b350f5c355..3626eb2f29 100644
--- a/common/Kconfig
+++ b/common/Kconfig
@@ -1375,11 +1375,11 @@ config PBL_BREAK
source "lib/Kconfig.ubsan"
-config KASAN
- bool "KASAN: runtime memory debugger"
- depends on HAVE_ARCH_KASAN
+config ASAN
+ bool "ASAN: runtime memory debugger"
+ depends on HAVE_ARCH_ASAN
help
- Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
+ Enables ASAN (AddressSANitizer) - runtime memory debugger,
designed to find out-of-bounds accesses and use-after-free bugs.
config COMPILE_TEST
@@ -1404,5 +1404,5 @@ config DDR_SPD
bool
select CRC_ITU_T
-config HAVE_ARCH_KASAN
+config HAVE_ARCH_ASAN
bool
--
2.28.0
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 8/9] Add KASan support
2020-09-18 8:45 [PATCH 0/9] barebox KASan support Sascha Hauer
` (6 preceding siblings ...)
2020-09-18 8:45 ` [PATCH 7/9] sandbox: rename KASan to ASan Sascha Hauer
@ 2020-09-18 8:45 ` Sascha Hauer
2020-09-18 10:15 ` Ahmad Fatoum
2020-09-18 8:45 ` [PATCH 9/9] ARM: " Sascha Hauer
2020-09-28 14:33 ` [PATCH 0/9] barebox " Ahmad Fatoum
9 siblings, 1 reply; 19+ messages in thread
From: Sascha Hauer @ 2020-09-18 8:45 UTC (permalink / raw)
To: Barebox List
KernelAddressSANitizer (KASAN) is a dynamic memory error detector. It
provides a fast and comprehensive solution for finding use-after-free
and out-of-bounds bugs.
This adds support for KASan to barebox. It is basically a stripped down
version taken from the Linux Kernel as of v5.9-rc1.
Quoting the initial Linux commit 0b24becc810d ("kasan: add kernel address
sanitizer infrastructure") describes what KASan does:
| KASAN uses compile-time instrumentation for checking every memory access,
| therefore GCC > v4.9.2 required. v4.9.2 almost works, but has issues with
| putting symbol aliases into the wrong section, which breaks kasan
| instrumentation of globals.
|
| Basic idea:
|
| The main idea of KASAN is to use shadow memory to record whether each byte
| of memory is safe to access or not, and use compiler's instrumentation to
| check the shadow memory on each memory access.
|
| Address sanitizer uses 1/8 of the memory addressable in kernel for shadow
| memory and uses direct mapping with a scale and offset to translate a
| memory address to its corresponding shadow address.
|
| For every 8 bytes there is one corresponding byte of shadow memory.
| The following encoding used for each shadow byte: 0 means that all 8 bytes
| of the corresponding memory region are valid for access; k (1 <= k <= 7)
| means that the first k bytes are valid for access, and other (8 - k) bytes
| are not; Any negative value indicates that the entire 8-bytes are
| inaccessible. Different negative values used to distinguish between
| different kinds of inaccessible memory (redzones, freed memory) (see
| mm/kasan/kasan.h).
|
| To be able to detect accesses to bad memory we need a special compiler.
| Such compiler inserts a specific function calls (__asan_load*(addr),
| __asan_store*(addr)) before each memory access of size 1, 2, 4, 8 or 16.
|
| These functions check whether memory region is valid to access or not by
| checking corresponding shadow memory. If access is not valid an error
| printed.
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
Makefile | 6 +-
common/Makefile | 1 +
common/tlsf.c | 34 +++-
include/linux/kasan.h | 89 +++++++++++
lib/Kconfig | 2 +
lib/Makefile | 1 +
lib/kasan/Kconfig | 16 ++
lib/kasan/Makefile | 14 ++
lib/kasan/common.c | 108 +++++++++++++
lib/kasan/generic.c | 315 +++++++++++++++++++++++++++++++++++++
lib/kasan/generic_report.c | 150 ++++++++++++++++++
lib/kasan/kasan.h | 164 +++++++++++++++++++
lib/kasan/report.c | 199 +++++++++++++++++++++++
scripts/Makefile.kasan | 17 ++
scripts/Makefile.lib | 10 ++
15 files changed, 1119 insertions(+), 7 deletions(-)
create mode 100644 include/linux/kasan.h
create mode 100644 lib/kasan/Kconfig
create mode 100644 lib/kasan/Makefile
create mode 100644 lib/kasan/common.c
create mode 100644 lib/kasan/generic.c
create mode 100644 lib/kasan/generic_report.c
create mode 100644 lib/kasan/kasan.h
create mode 100644 lib/kasan/report.c
create mode 100644 scripts/Makefile.kasan
diff --git a/Makefile b/Makefile
index 9060680367..461b0f2285 100644
--- a/Makefile
+++ b/Makefile
@@ -448,6 +448,7 @@ export LDFLAGS_barebox
export LDFLAGS_pbl
export CFLAGS_UBSAN
+export CFLAGS_KASAN CFLAGS_KASAN_NOSANITIZE
# Files to ignore in find ... statements
@@ -636,7 +637,10 @@ KBUILD_CFLAGS += $(call cc-option,-Wno-pointer-sign,)
# change __FILE__ to the relative path from the srctree
KBUILD_CFLAGS += $(call cc-option,-fmacro-prefix-map=$(srctree)/=)
-include $(srctree)/scripts/Makefile.ubsan
+include-y +=scripts/Makefile.ubsan
+include-$(CONFIG_KASAN) += scripts/Makefile.kasan
+
+include $(addprefix $(srctree)/, $(include-y))
# KBUILD_IMAGE: Default barebox image to build
# Depending on the architecture, this can be either compressed or not.
diff --git a/common/Makefile b/common/Makefile
index ad5146a301..faf0415ef3 100644
--- a/common/Makefile
+++ b/common/Makefile
@@ -38,6 +38,7 @@ obj-$(CONFIG_GREGORIAN_CALENDER) += date.o
obj-$(CONFIG_KALLSYMS) += kallsyms.o
obj-$(CONFIG_MALLOC_DLMALLOC) += dlmalloc.o
obj-$(CONFIG_MALLOC_TLSF) += tlsf_malloc.o tlsf.o calloc.o
+KASAN_SANITIZE_tlsf.o := n
obj-$(CONFIG_MALLOC_DUMMY) += dummy_malloc.o calloc.o
obj-$(CONFIG_MEMINFO) += meminfo.o
obj-$(CONFIG_MENU) += menu.o
diff --git a/common/tlsf.c b/common/tlsf.c
index 86cc684ab6..fa845e5788 100644
--- a/common/tlsf.c
+++ b/common/tlsf.c
@@ -3,6 +3,7 @@
#include <string.h>
#include <tlsf.h>
#include "tlsfbits.h"
+#include <linux/kasan.h>
#define CHAR_BIT 8
@@ -529,7 +530,7 @@ static void block_trim_free(control_t* control, block_header_t* block, size_t si
}
/* Trim any trailing block space off the end of a used block, return to pool. */
-static void block_trim_used(control_t* control, block_header_t* block, size_t size)
+static void block_trim_used(control_t* control, block_header_t* block, size_t size, size_t used)
{
tlsf_assert(!block_is_free(block) && "block must be used");
if (block_can_split(block, size))
@@ -541,6 +542,10 @@ static void block_trim_used(control_t* control, block_header_t* block, size_t si
remaining_block = block_merge_next(control, remaining_block);
block_insert(control, remaining_block);
}
+
+ kasan_poison_shadow(&block->size, size + 2 * sizeof(size_t),
+ KASAN_KMALLOC_REDZONE);
+ kasan_unpoison_shadow(block_to_ptr(block), used);
}
static block_header_t* block_trim_free_leading(control_t* control, block_header_t* block, size_t size)
@@ -589,7 +594,8 @@ static block_header_t* block_locate_free(control_t* control, size_t size)
return block;
}
-static void* block_prepare_used(control_t* control, block_header_t* block, size_t size)
+static void* block_prepare_used(control_t* control, block_header_t* block,
+ size_t size, size_t used)
{
void* p = 0;
if (block)
@@ -598,6 +604,10 @@ static void* block_prepare_used(control_t* control, block_header_t* block, size_
block_trim_free(control, block, size);
block_mark_as_used(block);
p = block_to_ptr(block);
+
+ kasan_poison_shadow(&block->size, size + 2 * sizeof(size_t),
+ KASAN_KMALLOC_REDZONE);
+ kasan_unpoison_shadow(p, used);
}
return p;
}
@@ -926,13 +936,20 @@ void* tlsf_malloc(tlsf_t tlsf, size_t size)
control_t* control = tlsf_cast(control_t*, tlsf);
const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
block_header_t* block = block_locate_free(control, adjust);
- return block_prepare_used(control, block, adjust);
+ void *ret;
+
+ ret = block_prepare_used(control, block, adjust, size);
+ if (!ret)
+ return ret;
+
+ return ret;
}
void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
{
control_t* control = tlsf_cast(control_t*, tlsf);
const size_t adjust = adjust_request_size(size, ALIGN_SIZE);
+ void *ret;
/*
** We must allocate an additional minimum block size bytes so that if
@@ -983,7 +1000,11 @@ void* tlsf_memalign(tlsf_t tlsf, size_t align, size_t size)
}
}
- return block_prepare_used(control, block, adjust);
+ ret = block_prepare_used(control, block, adjust, size);
+ if (!ret)
+ return NULL;
+
+ return ret;
}
void tlsf_free(tlsf_t tlsf, void* ptr)
@@ -994,6 +1015,7 @@ void tlsf_free(tlsf_t tlsf, void* ptr)
control_t* control = tlsf_cast(control_t*, tlsf);
block_header_t* block = block_from_ptr(ptr);
tlsf_assert(!block_is_free(block) && "block already marked as free");
+ kasan_poison_shadow(ptr, block_size(block), 0xff);
block_mark_as_free(block);
block = block_merge_prev(control, block);
block = block_merge_next(control, block);
@@ -1050,7 +1072,7 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
if (p)
{
const size_t minsize = tlsf_min(cursize, size);
- memcpy(p, ptr, minsize);
+ __memcpy(p, ptr, minsize);
tlsf_free(tlsf, ptr);
}
}
@@ -1064,7 +1086,7 @@ void* tlsf_realloc(tlsf_t tlsf, void* ptr, size_t size)
}
/* Trim the resulting block and return the original pointer. */
- block_trim_used(control, block, adjust);
+ block_trim_used(control, block, adjust, size);
p = ptr;
}
}
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
new file mode 100644
index 0000000000..7c184cd0e2
--- /dev/null
+++ b/include/linux/kasan.h
@@ -0,0 +1,89 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_KASAN_H
+#define _LINUX_KASAN_H
+
+#include <linux/types.h>
+
+/*
+ * On 64bit architectures tlsf aligns all allocations to a 64bit
+ * boundary, otherwise they are only 32bit aligned.
+ */
+#ifdef CONFIG_64BIT
+#define KASAN_SHADOW_SCALE_SHIFT 3
+#else
+#define KASAN_SHADOW_SCALE_SHIFT 2
+#endif
+
+#define KASAN_TAG_KERNEL 0xFF /* native kernel pointers tag */
+#define KASAN_TAG_INVALID 0xFE /* inaccessible memory tag */
+#define KASAN_TAG_MAX 0xFD /* maximum value for random tags */
+
+#define KASAN_FREE_PAGE 0xFF /* page was freed */
+#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocations */
+#define KASAN_KMALLOC_REDZONE 0xFC /* redzone inside slub object */
+#define KASAN_KMALLOC_FREE 0xFB /* object was freed (kmem_cache_free/kfree) */
+#define KASAN_KMALLOC_FREETRACK 0xFA /* object was freed and has free track set */
+
+#define KASAN_GLOBAL_REDZONE 0xF9 /* redzone for global variable */
+#define KASAN_VMALLOC_INVALID 0xF8 /* unallocated space in vmapped page */
+
+/*
+ * Stack redzone shadow values
+ * (Those are compiler's ABI, don't change them)
+ */
+#define KASAN_STACK_LEFT 0xF1
+#define KASAN_STACK_MID 0xF2
+#define KASAN_STACK_RIGHT 0xF3
+#define KASAN_STACK_PARTIAL 0xF4
+
+/*
+ * alloca redzone shadow values
+ */
+#define KASAN_ALLOCA_LEFT 0xCA
+#define KASAN_ALLOCA_RIGHT 0xCB
+
+#ifdef CONFIG_KASAN
+
+extern unsigned long kasan_shadow_start;
+extern unsigned long kasan_shadow_base;
+
+static inline void *kasan_mem_to_shadow(const void *addr)
+{
+ unsigned long a = (unsigned long)addr;
+
+ a -= kasan_shadow_start;
+ a >>= KASAN_SHADOW_SCALE_SHIFT;
+ a += kasan_shadow_base;
+
+ return (void *)a;
+}
+
+void kasan_init(unsigned long membase, unsigned long memsize,
+ unsigned long shadow_base);
+
+/* Enable reporting bugs after kasan_disable_current() */
+extern void kasan_enable_current(void);
+
+/* Disable reporting bugs for current task */
+extern void kasan_disable_current(void);
+
+void kasan_poison_shadow(const void *address, size_t size, u8 value);
+void kasan_unpoison_shadow(const void *address, size_t size);
+
+bool kasan_save_enable_multi_shot(void);
+void kasan_restore_multi_shot(bool enabled);
+
+#else /* CONFIG_KASAN */
+
+static inline void kasan_poison_shadow(const void *address, size_t size, u8 value) {}
+static inline void kasan_unpoison_shadow(const void *address, size_t size) {}
+
+static inline void kasan_enable_current(void) {}
+static inline void kasan_disable_current(void) {}
+
+static inline void kasan_init(unsigned long membase, unsigned long memsize,
+ unsigned long shadow_base) {}
+
+#endif /* CONFIG_KASAN */
+
+#endif /* LINUX_KASAN_H */
diff --git a/lib/Kconfig b/lib/Kconfig
index 90552f3c27..6d909c1ac8 100644
--- a/lib/Kconfig
+++ b/lib/Kconfig
@@ -154,6 +154,8 @@ source "lib/logo/Kconfig"
source "lib/bootstrap/Kconfig"
+source "lib/kasan/Kconfig"
+
config PRINTF_UUID
bool
diff --git a/lib/Makefile b/lib/Makefile
index 73399a1bf1..ba6af6f2ab 100644
--- a/lib/Makefile
+++ b/lib/Makefile
@@ -7,6 +7,7 @@ obj-y += string.o
obj-y += strtox.o
obj-y += kstrtox.o
obj-y += vsprintf.o
+obj-$(CONFIG_KASAN) += kasan/
pbl-$(CONFIG_PBL_CONSOLE) += vsprintf.o
obj-y += div64.o
pbl-y += div64.o
diff --git a/lib/kasan/Kconfig b/lib/kasan/Kconfig
new file mode 100644
index 0000000000..7a18cf95be
--- /dev/null
+++ b/lib/kasan/Kconfig
@@ -0,0 +1,16 @@
+source "scripts/Kconfig.include"
+
+config HAVE_ARCH_KASAN
+ bool
+
+config CC_HAS_KASAN_GENERIC
+ def_bool $(cc-option, -fsanitize=kernel-address)
+
+config KASAN
+ bool "KASAN: runtime memory debugger"
+ depends on (HAVE_ARCH_KASAN && CC_HAS_KASAN_GENERIC)
+ depends on MALLOC_TLSF
+ select CONSTRUCTORS
+ help
+ Enables KASAN (KernelAddressSANitizer) - runtime memory debugger,
+ designed to find out-of-bounds accesses and use-after-free bugs.
diff --git a/lib/kasan/Makefile b/lib/kasan/Makefile
new file mode 100644
index 0000000000..31e9d890d5
--- /dev/null
+++ b/lib/kasan/Makefile
@@ -0,0 +1,14 @@
+
+obj-y += generic_report.o generic.o report.o common.o test_kasan.o
+KASAN_SANITIZE_generic_report.o := n
+KASAN_SANITIZE_generic.o := n
+KASAN_SANITIZE_report.o := n
+KASAN_SANITIZE_common.o := n
+
+CC_FLAGS_KASAN_RUNTIME := $(call cc-option, -fno-conserve-stack)
+CC_FLAGS_KASAN_RUNTIME += -fno-stack-protector
+
+CFLAGS_generic_report.o := $(CC_FLAGS_KASAN_RUNTIME)
+CFLAGS_generic.o := $(CC_FLAGS_KASAN_RUNTIME)
+CFLAGS_report.o := $(CC_FLAGS_KASAN_RUNTIME)
+CFLAGS_common.o := $(CC_FLAGS_KASAN_RUNTIME)
diff --git a/lib/kasan/common.c b/lib/kasan/common.c
new file mode 100644
index 0000000000..1ebf66a7b8
--- /dev/null
+++ b/lib/kasan/common.c
@@ -0,0 +1,108 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains common generic and tag-based KASAN code.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * Some code borrowed from https://github.com/xairy/kasan-prototype by
+ * Andrey Konovalov <andreyknvl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <common.h>
+#include <linux/kasan.h>
+#include <linux/kernel.h>
+
+#include "kasan.h"
+
+int kasan_depth;
+
+void kasan_enable_current(void)
+{
+ kasan_depth++;
+}
+
+void kasan_disable_current(void)
+{
+ kasan_depth--;
+}
+
+#undef memset
+void *memset(void *addr, int c, size_t len)
+{
+ if (!check_memory_region((unsigned long)addr, len, true, _RET_IP_))
+ return NULL;
+
+ return __memset(addr, c, len);
+}
+
+#ifdef __HAVE_ARCH_MEMMOVE
+#undef memmove
+void *memmove(void *dest, const void *src, size_t len)
+{
+ if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
+ !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
+ return NULL;
+
+ return __memmove(dest, src, len);
+}
+#endif
+
+#undef memcpy
+void *memcpy(void *dest, const void *src, size_t len)
+{
+ if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
+ !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
+ return NULL;
+
+ return __memcpy(dest, src, len);
+}
+
+/*
+ * Poisons the shadow memory for 'size' bytes starting from 'addr'.
+ * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
+ */
+void kasan_poison_shadow(const void *address, size_t size, u8 value)
+{
+ void *shadow_start, *shadow_end;
+
+ /*
+ * Perform shadow offset calculation based on untagged address, as
+ * some of the callers (e.g. kasan_poison_object_data) pass tagged
+ * addresses to this function.
+ */
+ address = reset_tag(address);
+
+ shadow_start = kasan_mem_to_shadow(address);
+ shadow_end = kasan_mem_to_shadow(address + size);
+
+ __memset(shadow_start, value, shadow_end - shadow_start);
+}
+
+void kasan_unpoison_shadow(const void *address, size_t size)
+{
+ u8 tag = get_tag(address);
+
+ /*
+ * Perform shadow offset calculation based on untagged address, as
+ * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
+ * addresses to this function.
+ */
+ address = reset_tag(address);
+
+ kasan_poison_shadow(address, size, tag);
+
+ if (size & KASAN_SHADOW_MASK) {
+ u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
+
+ if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
+ *shadow = tag;
+ else
+ *shadow = size & KASAN_SHADOW_MASK;
+ }
+}
diff --git a/lib/kasan/generic.c b/lib/kasan/generic.c
new file mode 100644
index 0000000000..b33a6c1a6c
--- /dev/null
+++ b/lib/kasan/generic.c
@@ -0,0 +1,315 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains core generic KASAN code.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * Some code borrowed from https://github.com/xairy/kasan-prototype by
+ * Andrey Konovalov <andreyknvl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <common.h>
+
+#include "kasan.h"
+
+unsigned long kasan_shadow_start;
+unsigned long kasan_shadow_base;
+
+/*
+ * All functions below always inlined so compiler could
+ * perform better optimizations in each of __asan_loadX/__assn_storeX
+ * depending on memory access size X.
+ */
+
+static __always_inline bool memory_is_poisoned_1(unsigned long addr)
+{
+ s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
+
+ if (unlikely(shadow_value)) {
+ s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
+ return unlikely(last_accessible_byte >= shadow_value);
+ }
+
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned_2_4_8(unsigned long addr,
+ unsigned long size)
+{
+ u8 *shadow_addr = (u8 *)kasan_mem_to_shadow((void *)addr);
+
+ /*
+ * Access crosses 8(shadow size)-byte boundary. Such access maps
+ * into 2 shadow bytes, so we need to check them both.
+ */
+ if (unlikely(((addr + size - 1) & KASAN_SHADOW_MASK) < size - 1))
+ return *shadow_addr || memory_is_poisoned_1(addr + size - 1);
+
+ return memory_is_poisoned_1(addr + size - 1);
+}
+
+static __always_inline bool memory_is_poisoned_16(unsigned long addr)
+{
+ u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
+
+ /* Unaligned 16-bytes access maps into 3 shadow bytes. */
+ if (unlikely(!IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
+ return *shadow_addr || memory_is_poisoned_1(addr + 15);
+
+ return *shadow_addr;
+}
+
+static __always_inline unsigned long bytes_is_nonzero(const u8 *start,
+ size_t size)
+{
+ while (size) {
+ if (unlikely(*start))
+ return (unsigned long)start;
+ start++;
+ size--;
+ }
+
+ return 0;
+}
+
+static __always_inline unsigned long memory_is_nonzero(const void *start,
+ const void *end)
+{
+ unsigned int words;
+ unsigned long ret;
+ unsigned int prefix = (unsigned long)start % 8;
+
+ if (end - start <= 16)
+ return bytes_is_nonzero(start, end - start);
+
+ if (prefix) {
+ prefix = 8 - prefix;
+ ret = bytes_is_nonzero(start, prefix);
+ if (unlikely(ret))
+ return ret;
+ start += prefix;
+ }
+
+ words = (end - start) / 8;
+ while (words) {
+ if (unlikely(*(u64 *)start))
+ return bytes_is_nonzero(start, 8);
+ start += 8;
+ words--;
+ }
+
+ return bytes_is_nonzero(start, (end - start) % 8);
+}
+
+static __always_inline bool memory_is_poisoned_n(unsigned long addr,
+ size_t size)
+{
+ unsigned long ret;
+
+ ret = memory_is_nonzero(kasan_mem_to_shadow((void *)addr),
+ kasan_mem_to_shadow((void *)addr + size - 1) + 1);
+
+ if (unlikely(ret)) {
+ unsigned long last_byte = addr + size - 1;
+ s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
+
+ if (unlikely(ret != (unsigned long)last_shadow ||
+ ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
+ return true;
+ }
+ return false;
+}
+
+static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
+{
+ if (__builtin_constant_p(size)) {
+ switch (size) {
+ case 1:
+ return memory_is_poisoned_1(addr);
+ case 2:
+ case 4:
+ case 8:
+ return memory_is_poisoned_2_4_8(addr, size);
+ case 16:
+ return memory_is_poisoned_16(addr);
+ default:
+ BUILD_BUG();
+ }
+ }
+
+ return memory_is_poisoned_n(addr, size);
+}
+
+static bool kasan_initialized;
+
+static __always_inline bool check_memory_region_inline(unsigned long addr,
+ size_t size, bool write,
+ unsigned long ret_ip)
+{
+ if (!kasan_initialized)
+ return true;
+
+ if (addr < kasan_shadow_start)
+ return true;
+
+ if (unlikely(size == 0))
+ return true;
+
+ if (unlikely(addr + size < addr))
+ return !kasan_report(addr, size, write, ret_ip);
+
+ if (addr < kasan_shadow_base)
+ return true;
+
+ if (likely(!memory_is_poisoned(addr, size)))
+ return true;
+
+ return !kasan_report(addr, size, write, ret_ip);
+}
+
+void kasan_init(unsigned long membase, unsigned long memsize,
+ unsigned long shadow_base)
+{
+ kasan_shadow_start = membase;
+ kasan_shadow_base = shadow_base;
+
+ kasan_unpoison_shadow((void *)membase, memsize);
+ kasan_initialized = true;
+}
+
+bool __no_sanitize_address check_memory_region(unsigned long addr,
+ size_t size, bool write,
+ unsigned long ret_ip)
+{
+ return check_memory_region_inline(addr, size, write, ret_ip);
+}
+
+static void register_global(struct kasan_global *global)
+{
+ size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
+
+ kasan_unpoison_shadow(global->beg, global->size);
+
+ kasan_poison_shadow(global->beg + aligned_size,
+ global->size_with_redzone - aligned_size,
+ KASAN_GLOBAL_REDZONE);
+}
+
+void __asan_register_globals(struct kasan_global *globals, size_t size)
+{
+ int i;
+
+ for (i = 0; i < size; i++)
+ register_global(&globals[i]);
+}
+EXPORT_SYMBOL(__asan_register_globals);
+
+void __asan_unregister_globals(struct kasan_global *globals, size_t size)
+{
+}
+EXPORT_SYMBOL(__asan_unregister_globals);
+
+#define DEFINE_ASAN_LOAD_STORE(size) \
+ void __no_sanitize_address __asan_load##size(unsigned long addr) \
+ { \
+ check_memory_region_inline(addr, size, false, _RET_IP_);\
+ } \
+ EXPORT_SYMBOL(__asan_load##size); \
+ __alias(__asan_load##size) \
+ void __no_sanitize_address __asan_load##size##_noabort(unsigned long); \
+ EXPORT_SYMBOL(__asan_load##size##_noabort); \
+ void __asan_store##size(unsigned long addr) \
+ { \
+ check_memory_region_inline(addr, size, true, _RET_IP_); \
+ } \
+ EXPORT_SYMBOL(__asan_store##size); \
+ __alias(__asan_store##size) \
+ void __asan_store##size##_noabort(unsigned long); \
+ EXPORT_SYMBOL(__asan_store##size##_noabort)
+
+DEFINE_ASAN_LOAD_STORE(1);
+DEFINE_ASAN_LOAD_STORE(2);
+DEFINE_ASAN_LOAD_STORE(4);
+DEFINE_ASAN_LOAD_STORE(8);
+DEFINE_ASAN_LOAD_STORE(16);
+
+void __asan_loadN(unsigned long addr, size_t size)
+{
+ check_memory_region(addr, size, false, _RET_IP_);
+}
+EXPORT_SYMBOL(__asan_loadN);
+
+__alias(__asan_loadN)
+void __asan_loadN_noabort(unsigned long, size_t);
+EXPORT_SYMBOL(__asan_loadN_noabort);
+
+void __asan_storeN(unsigned long addr, size_t size)
+{
+ check_memory_region(addr, size, true, _RET_IP_);
+}
+EXPORT_SYMBOL(__asan_storeN);
+
+__alias(__asan_storeN)
+void __asan_storeN_noabort(unsigned long, size_t);
+EXPORT_SYMBOL(__asan_storeN_noabort);
+
+/* to shut up compiler complaints */
+void __asan_handle_no_return(void) {}
+EXPORT_SYMBOL(__asan_handle_no_return);
+
+/* Emitted by compiler to poison alloca()ed objects. */
+void __asan_alloca_poison(unsigned long addr, size_t size)
+{
+ size_t rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
+ size_t padding_size = round_up(size, KASAN_ALLOCA_REDZONE_SIZE) -
+ rounded_up_size;
+ size_t rounded_down_size = round_down(size, KASAN_SHADOW_SCALE_SIZE);
+
+ const void *left_redzone = (const void *)(addr -
+ KASAN_ALLOCA_REDZONE_SIZE);
+ const void *right_redzone = (const void *)(addr + rounded_up_size);
+
+ WARN_ON(!IS_ALIGNED(addr, KASAN_ALLOCA_REDZONE_SIZE));
+
+ kasan_unpoison_shadow((const void *)(addr + rounded_down_size),
+ size - rounded_down_size);
+ kasan_poison_shadow(left_redzone, KASAN_ALLOCA_REDZONE_SIZE,
+ KASAN_ALLOCA_LEFT);
+ kasan_poison_shadow(right_redzone,
+ padding_size + KASAN_ALLOCA_REDZONE_SIZE,
+ KASAN_ALLOCA_RIGHT);
+}
+EXPORT_SYMBOL(__asan_alloca_poison);
+
+/* Emitted by compiler to unpoison alloca()ed areas when the stack unwinds. */
+void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom)
+{
+ if (unlikely(!stack_top || stack_top > stack_bottom))
+ return;
+
+ kasan_unpoison_shadow(stack_top, stack_bottom - stack_top);
+}
+EXPORT_SYMBOL(__asan_allocas_unpoison);
+
+/* Emitted by the compiler to [un]poison local variables. */
+#define DEFINE_ASAN_SET_SHADOW(byte) \
+ void __asan_set_shadow_##byte(const void *addr, size_t size) \
+ { \
+ __memset((void *)addr, 0x##byte, size); \
+ } \
+ EXPORT_SYMBOL(__asan_set_shadow_##byte)
+
+DEFINE_ASAN_SET_SHADOW(00);
+DEFINE_ASAN_SET_SHADOW(f1);
+DEFINE_ASAN_SET_SHADOW(f2);
+DEFINE_ASAN_SET_SHADOW(f3);
+DEFINE_ASAN_SET_SHADOW(f5);
+DEFINE_ASAN_SET_SHADOW(f8);
diff --git a/lib/kasan/generic_report.c b/lib/kasan/generic_report.c
new file mode 100644
index 0000000000..1cc5829e8d
--- /dev/null
+++ b/lib/kasan/generic_report.c
@@ -0,0 +1,150 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains generic KASAN specific error reporting code.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * Some code borrowed from https://github.com/xairy/kasan-prototype by
+ * Andrey Konovalov <andreyknvl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <common.h>
+#include <linux/bitops.h>
+
+#include <asm/sections.h>
+
+#include "kasan.h"
+
+void *find_first_bad_addr(void *addr, size_t size)
+{
+ void *p = addr;
+
+ while (p < addr + size && !(*(u8 *)kasan_mem_to_shadow(p)))
+ p += KASAN_SHADOW_SCALE_SIZE;
+ return p;
+}
+
+static const char *get_shadow_bug_type(struct kasan_access_info *info)
+{
+ const char *bug_type = "unknown-crash";
+ u8 *shadow_addr;
+
+ shadow_addr = (u8 *)kasan_mem_to_shadow(info->first_bad_addr);
+
+ /*
+ * If shadow byte value is in [0, KASAN_SHADOW_SCALE_SIZE) we can look
+ * at the next shadow byte to determine the type of the bad access.
+ */
+ if (*shadow_addr > 0 && *shadow_addr <= KASAN_SHADOW_SCALE_SIZE - 1)
+ shadow_addr++;
+
+ switch (*shadow_addr) {
+ case 0 ... KASAN_SHADOW_SCALE_SIZE - 1:
+ /*
+ * In theory it's still possible to see these shadow values
+ * due to a data race in the kernel code.
+ */
+ bug_type = "out-of-bounds";
+ break;
+ case KASAN_PAGE_REDZONE:
+ case KASAN_KMALLOC_REDZONE:
+ bug_type = "slab-out-of-bounds";
+ break;
+ case KASAN_GLOBAL_REDZONE:
+ bug_type = "global-out-of-bounds";
+ break;
+ case KASAN_STACK_LEFT:
+ case KASAN_STACK_MID:
+ case KASAN_STACK_RIGHT:
+ case KASAN_STACK_PARTIAL:
+ bug_type = "stack-out-of-bounds";
+ break;
+ case KASAN_FREE_PAGE:
+ case KASAN_KMALLOC_FREE:
+ case KASAN_KMALLOC_FREETRACK:
+ bug_type = "use-after-free";
+ break;
+ case KASAN_ALLOCA_LEFT:
+ case KASAN_ALLOCA_RIGHT:
+ bug_type = "alloca-out-of-bounds";
+ break;
+ case KASAN_VMALLOC_INVALID:
+ bug_type = "vmalloc-out-of-bounds";
+ break;
+ }
+
+ return bug_type;
+}
+
+static const char *get_wild_bug_type(struct kasan_access_info *info)
+{
+ const char *bug_type = "unknown-crash";
+
+ if ((unsigned long)info->access_addr < PAGE_SIZE)
+ bug_type = "null-ptr-deref";
+ else
+ bug_type = "wild-memory-access";
+
+ return bug_type;
+}
+
+const char *get_bug_type(struct kasan_access_info *info)
+{
+ /*
+ * If access_size is a negative number, then it has reason to be
+ * defined as out-of-bounds bug type.
+ *
+ * Casting negative numbers to size_t would indeed turn up as
+ * a large size_t and its value will be larger than ULONG_MAX/2,
+ * so that this can qualify as out-of-bounds.
+ */
+ if (info->access_addr + info->access_size < info->access_addr)
+ return "out-of-bounds";
+
+ if (addr_has_shadow(info->access_addr))
+ return get_shadow_bug_type(info);
+ return get_wild_bug_type(info);
+}
+
+#define DEFINE_ASAN_REPORT_LOAD(size) \
+void __asan_report_load##size##_noabort(unsigned long addr) \
+{ \
+ kasan_report(addr, size, false, _RET_IP_); \
+} \
+EXPORT_SYMBOL(__asan_report_load##size##_noabort)
+
+#define DEFINE_ASAN_REPORT_STORE(size) \
+void __asan_report_store##size##_noabort(unsigned long addr) \
+{ \
+ kasan_report(addr, size, true, _RET_IP_); \
+} \
+EXPORT_SYMBOL(__asan_report_store##size##_noabort)
+
+DEFINE_ASAN_REPORT_LOAD(1);
+DEFINE_ASAN_REPORT_LOAD(2);
+DEFINE_ASAN_REPORT_LOAD(4);
+DEFINE_ASAN_REPORT_LOAD(8);
+DEFINE_ASAN_REPORT_LOAD(16);
+DEFINE_ASAN_REPORT_STORE(1);
+DEFINE_ASAN_REPORT_STORE(2);
+DEFINE_ASAN_REPORT_STORE(4);
+DEFINE_ASAN_REPORT_STORE(8);
+DEFINE_ASAN_REPORT_STORE(16);
+
+void __asan_report_load_n_noabort(unsigned long addr, size_t size)
+{
+ kasan_report(addr, size, false, _RET_IP_);
+}
+EXPORT_SYMBOL(__asan_report_load_n_noabort);
+
+void __asan_report_store_n_noabort(unsigned long addr, size_t size)
+{
+ kasan_report(addr, size, true, _RET_IP_);
+}
+EXPORT_SYMBOL(__asan_report_store_n_noabort);
diff --git a/lib/kasan/kasan.h b/lib/kasan/kasan.h
new file mode 100644
index 0000000000..e17f49dbec
--- /dev/null
+++ b/lib/kasan/kasan.h
@@ -0,0 +1,164 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __MM_KASAN_KASAN_H
+#define __MM_KASAN_KASAN_H
+
+#include <linux/kasan.h>
+#include <linux/linkage.h>
+
+#define KASAN_SHADOW_SCALE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
+#define KASAN_SHADOW_MASK (KASAN_SHADOW_SCALE_SIZE - 1)
+
+#define KASAN_ALLOCA_REDZONE_SIZE 32
+
+/*
+ * Stack frame marker (compiler ABI).
+ */
+#define KASAN_CURRENT_STACK_FRAME_MAGIC 0x41B58AB3
+
+/* Don't break randconfig/all*config builds */
+#ifndef KASAN_ABI_VERSION
+#define KASAN_ABI_VERSION 1
+#endif
+
+struct kasan_access_info {
+ const void *access_addr;
+ const void *first_bad_addr;
+ size_t access_size;
+ bool is_write;
+ unsigned long ip;
+};
+
+/* The layout of struct dictated by compiler */
+struct kasan_source_location {
+ const char *filename;
+ int line_no;
+ int column_no;
+};
+
+/* The layout of struct dictated by compiler */
+struct kasan_global {
+ const void *beg; /* Address of the beginning of the global variable. */
+ size_t size; /* Size of the global variable. */
+ size_t size_with_redzone; /* Size of the variable + size of the red zone. 32 bytes aligned */
+ const void *name;
+ const void *module_name; /* Name of the module where the global variable is declared. */
+ unsigned long has_dynamic_init; /* This needed for C++ */
+#if KASAN_ABI_VERSION >= 4
+ struct kasan_source_location *location;
+#endif
+#if KASAN_ABI_VERSION >= 5
+ char *odr_indicator;
+#endif
+};
+
+static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
+{
+ unsigned long sa = (unsigned long)shadow_addr;
+
+ sa -= kasan_shadow_base;
+ sa <<= KASAN_SHADOW_SCALE_SHIFT;
+ sa += kasan_shadow_start;
+
+ return (void *)sa;
+}
+
+static inline bool addr_has_shadow(const void *addr)
+{
+ return (addr >= (void *)kasan_shadow_start);
+}
+
+/**
+ * check_memory_region - Check memory region, and report if invalid access.
+ * @addr: the accessed address
+ * @size: the accessed size
+ * @write: true if access is a write access
+ * @ret_ip: return address
+ * @return: true if access was valid, false if invalid
+ */
+bool check_memory_region(unsigned long addr, size_t size, bool write,
+ unsigned long ret_ip);
+
+void *find_first_bad_addr(void *addr, size_t size);
+const char *get_bug_type(struct kasan_access_info *info);
+
+bool kasan_report(unsigned long addr, size_t size,
+ bool is_write, unsigned long ip);
+void kasan_report_invalid_free(void *object, unsigned long ip);
+
+#ifndef arch_kasan_set_tag
+static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
+{
+ return addr;
+}
+#endif
+#ifndef arch_kasan_reset_tag
+#define arch_kasan_reset_tag(addr) ((void *)(addr))
+#endif
+#ifndef arch_kasan_get_tag
+#define arch_kasan_get_tag(addr) 0
+#endif
+
+#define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag)))
+#define reset_tag(addr) ((void *)arch_kasan_reset_tag(addr))
+#define get_tag(addr) arch_kasan_get_tag(addr)
+
+/*
+ * Exported functions for interfaces called from assembly or from generated
+ * code. Declarations here to avoid warning about missing declarations.
+ */
+asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
+void __asan_register_globals(struct kasan_global *globals, size_t size);
+void __asan_unregister_globals(struct kasan_global *globals, size_t size);
+void __asan_handle_no_return(void);
+void __asan_alloca_poison(unsigned long addr, size_t size);
+void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom);
+
+void __asan_load1(unsigned long addr);
+void __asan_store1(unsigned long addr);
+void __asan_load2(unsigned long addr);
+void __asan_store2(unsigned long addr);
+void __asan_load4(unsigned long addr);
+void __asan_store4(unsigned long addr);
+void __asan_load8(unsigned long addr);
+void __asan_store8(unsigned long addr);
+void __asan_load16(unsigned long addr);
+void __asan_store16(unsigned long addr);
+void __asan_loadN(unsigned long addr, size_t size);
+void __asan_storeN(unsigned long addr, size_t size);
+
+void __asan_load1_noabort(unsigned long addr);
+void __asan_store1_noabort(unsigned long addr);
+void __asan_load2_noabort(unsigned long addr);
+void __asan_store2_noabort(unsigned long addr);
+void __asan_load4_noabort(unsigned long addr);
+void __asan_store4_noabort(unsigned long addr);
+void __asan_load8_noabort(unsigned long addr);
+void __asan_store8_noabort(unsigned long addr);
+void __asan_load16_noabort(unsigned long addr);
+void __asan_store16_noabort(unsigned long addr);
+void __asan_loadN_noabort(unsigned long addr, size_t size);
+void __asan_storeN_noabort(unsigned long addr, size_t size);
+
+void __asan_report_load1_noabort(unsigned long addr);
+void __asan_report_store1_noabort(unsigned long addr);
+void __asan_report_load2_noabort(unsigned long addr);
+void __asan_report_store2_noabort(unsigned long addr);
+void __asan_report_load4_noabort(unsigned long addr);
+void __asan_report_store4_noabort(unsigned long addr);
+void __asan_report_load8_noabort(unsigned long addr);
+void __asan_report_store8_noabort(unsigned long addr);
+void __asan_report_load16_noabort(unsigned long addr);
+void __asan_report_store16_noabort(unsigned long addr);
+void __asan_report_load_n_noabort(unsigned long addr, size_t size);
+void __asan_report_store_n_noabort(unsigned long addr, size_t size);
+
+void __asan_set_shadow_00(const void *addr, size_t size);
+void __asan_set_shadow_f1(const void *addr, size_t size);
+void __asan_set_shadow_f2(const void *addr, size_t size);
+void __asan_set_shadow_f3(const void *addr, size_t size);
+void __asan_set_shadow_f5(const void *addr, size_t size);
+void __asan_set_shadow_f8(const void *addr, size_t size);
+
+extern int kasan_depth;
+
+#endif
diff --git a/lib/kasan/report.c b/lib/kasan/report.c
new file mode 100644
index 0000000000..b7b2d032ee
--- /dev/null
+++ b/lib/kasan/report.c
@@ -0,0 +1,199 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * This file contains common generic and tag-based KASAN error reporting code.
+ *
+ * Copyright (c) 2014 Samsung Electronics Co., Ltd.
+ * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
+ *
+ * Some code borrowed from https://github.com/xairy/kasan-prototype by
+ * Andrey Konovalov <andreyknvl@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ */
+
+#include <common.h>
+#include <linux/bitops.h>
+#include <linux/kernel.h>
+#include <printk.h>
+#include <asm-generic/sections.h>
+
+#include "kasan.h"
+
+/* Shadow layout customization. */
+#define SHADOW_BYTES_PER_BLOCK 1
+#define SHADOW_BLOCKS_PER_ROW 16
+#define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK)
+#define SHADOW_ROWS_AROUND_ADDR 2
+
+static unsigned long kasan_flags;
+
+#define KASAN_BIT_REPORTED 0
+#define KASAN_BIT_MULTI_SHOT 1
+
+bool kasan_save_enable_multi_shot(void)
+{
+ return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
+}
+EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
+
+void kasan_restore_multi_shot(bool enabled)
+{
+ if (!enabled)
+ clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
+}
+EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
+
+static void print_error_description(struct kasan_access_info *info)
+{
+ pr_err("BUG: KASAN: %s in %pS\n",
+ get_bug_type(info), (void *)info->ip);
+ pr_err("%s of size %zu at addr %px\n",
+ info->is_write ? "Write" : "Read", info->access_size,
+ info->access_addr);
+}
+
+static void start_report(unsigned long *flags)
+{
+ /*
+ * Make sure we don't end up in loop.
+ */
+ kasan_disable_current();
+ pr_err("==================================================================\n");
+}
+
+static void end_report(unsigned long *flags)
+{
+ pr_err("==================================================================\n");
+ kasan_enable_current();
+}
+
+static inline bool kernel_or_module_addr(const void *addr)
+{
+ if (addr >= (void *)_stext && addr < (void *)_end)
+ return true;
+ return false;
+}
+
+static void print_address_description(void *addr, u8 tag)
+{
+ dump_stack();
+ pr_err("\n");
+
+ if (kernel_or_module_addr(addr)) {
+ pr_err("The buggy address belongs to the variable:\n");
+ pr_err(" %pS\n", addr);
+ }
+}
+
+static bool row_is_guilty(const void *row, const void *guilty)
+{
+ return (row <= guilty) && (guilty < row + SHADOW_BYTES_PER_ROW);
+}
+
+static int shadow_pointer_offset(const void *row, const void *shadow)
+{
+ /* The length of ">ff00ff00ff00ff00: " is
+ * 3 + (BITS_PER_LONG/8)*2 chars.
+ */
+ return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 +
+ (shadow - row) / SHADOW_BYTES_PER_BLOCK + 1;
+}
+
+static void print_shadow_for_address(const void *addr)
+{
+ int i;
+ const void *shadow = kasan_mem_to_shadow(addr);
+ const void *shadow_row;
+
+ shadow_row = (void *)round_down((unsigned long)shadow,
+ SHADOW_BYTES_PER_ROW)
+ - SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW;
+
+ pr_err("Memory state around the buggy address:\n");
+
+ for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) {
+ const void *kaddr = kasan_shadow_to_mem(shadow_row);
+ char buffer[4 + (BITS_PER_LONG/8)*2];
+ char shadow_buf[SHADOW_BYTES_PER_ROW];
+
+ snprintf(buffer, sizeof(buffer),
+ (i == 0) ? ">%px: " : " %px: ", kaddr);
+ /*
+ * We should not pass a shadow pointer to generic
+ * function, because generic functions may try to
+ * access kasan mapping for the passed address.
+ */
+ memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW);
+ print_hex_dump(KERN_ERR, buffer,
+ DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1,
+ shadow_buf, SHADOW_BYTES_PER_ROW, 0);
+
+ if (row_is_guilty(shadow_row, shadow))
+ printf("%*c\n",
+ shadow_pointer_offset(shadow_row, shadow),
+ '^');
+
+ shadow_row += SHADOW_BYTES_PER_ROW;
+ }
+}
+
+static bool report_enabled(void)
+{
+ if (kasan_depth)
+ return false;
+ if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
+ return true;
+ return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
+}
+
+static void __kasan_report(unsigned long addr, size_t size, bool is_write,
+ unsigned long ip)
+{
+ struct kasan_access_info info;
+ void *tagged_addr;
+ void *untagged_addr;
+ unsigned long flags;
+
+ tagged_addr = (void *)addr;
+ untagged_addr = reset_tag(tagged_addr);
+
+ info.access_addr = tagged_addr;
+ if (addr_has_shadow(untagged_addr))
+ info.first_bad_addr = find_first_bad_addr(tagged_addr, size);
+ else
+ info.first_bad_addr = untagged_addr;
+ info.access_size = size;
+ info.is_write = is_write;
+ info.ip = ip;
+
+ start_report(&flags);
+
+ print_error_description(&info);
+ pr_err("\n");
+
+ if (addr_has_shadow(untagged_addr)) {
+ print_address_description(untagged_addr, get_tag(tagged_addr));
+ pr_err("\n");
+ print_shadow_for_address(info.first_bad_addr);
+ } else {
+ dump_stack();
+ }
+
+ end_report(&flags);
+}
+
+bool kasan_report(unsigned long addr, size_t size, bool is_write,
+ unsigned long ip)
+{
+ bool ret = false;
+
+ if (likely(report_enabled())) {
+ __kasan_report(addr, size, is_write, ip);
+ ret = true;
+ }
+
+ return ret;
+}
diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
new file mode 100644
index 0000000000..83f6aa543d
--- /dev/null
+++ b/scripts/Makefile.kasan
@@ -0,0 +1,17 @@
+ # SPDX-License-Identifier: GPL-2.0
+ifdef CONFIG_KASAN
+CFLAGS_KASAN_NOSANITIZE := -fno-builtin
+KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
+endif
+
+CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
+
+cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
+
+CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL) \
+ $(call cc-param,asan-globals=1) \
+ $(call cc-param,asan-instrument-allocas=1)
+
+ifndef CONFIG_CPU_64
+CFLAGS_KASAN += $(call cc-param,asan-stack=1)
+endif
diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
index 337430cd00..ab7d9f2bdf 100644
--- a/scripts/Makefile.lib
+++ b/scripts/Makefile.lib
@@ -127,6 +127,16 @@ _c_flags = $(KBUILD_CFLAGS) $(ccflags-y) $(CFLAGS_$(target-stem).o)
_a_flags = $(KBUILD_AFLAGS) $(asflags-y) $(AFLAGS_$(target-stem).o)
_cpp_flags = $(KBUILD_CPPFLAGS) $(cppflags-y) $(CPPFLAGS_$(target-stem).lds)
+#
+# Enable address sanitizer flags for kernel except some files or directories
+# we don't want to check (depends on variables KASAN_SANITIZE_obj.o, KASAN_SANITIZE)
+#
+ifeq ($(CONFIG_KASAN),y)
+_c_flags += $(if $(part-of-pbl),, $(if $(patsubst n%,, \
+ $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \
+ $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE)))
+endif
+
ifeq ($(CONFIG_UBSAN),y)
_CFLAGS_UBSAN = $(eval _CFLAGS_UBSAN := $(CFLAGS_UBSAN))$(_CFLAGS_UBSAN)
_c_flags += $(if $(patsubst n%,, \
--
2.28.0
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 8/9] Add KASan support
2020-09-18 8:45 ` [PATCH 8/9] Add KASan support Sascha Hauer
@ 2020-09-18 10:15 ` Ahmad Fatoum
2020-09-21 6:24 ` Sascha Hauer
0 siblings, 1 reply; 19+ messages in thread
From: Ahmad Fatoum @ 2020-09-18 10:15 UTC (permalink / raw)
To: Sascha Hauer, Barebox List
On 9/18/20 10:45 AM, Sascha Hauer wrote:
> - return block_prepare_used(control, block, adjust);
> + void *ret;
> +
> + ret = block_prepare_used(control, block, adjust, size);
> + if (!ret)
> + return ret;
> +
> + return ret;
Debugging leftover? You can just return the function result directly.
> - return block_prepare_used(control, block, adjust);
> + ret = block_prepare_used(control, block, adjust, size);
> + if (!ret)
> + return NULL;
> +
> + return ret;
Likewise
[snip]
> diff --git a/lib/kasan/report.c b/lib/kasan/report.c
> new file mode 100644
> index 0000000000..b7b2d032ee
> --- /dev/null
> +++ b/lib/kasan/report.c
> @@ -0,0 +1,199 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * This file contains common generic and tag-based KASAN error reporting code.
> + *
> + * Copyright (c) 2014 Samsung Electronics Co., Ltd.
> + * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
> + *
> + * Some code borrowed from https://github.com/xairy/kasan-prototype by
> + * Andrey Konovalov <andreyknvl@gmail.com>
> + *
> + * This program is free software; you can redistribute it and/or modify
> + * it under the terms of the GNU General Public License version 2 as
> + * published by the Free Software Foundation.
> + *
> + */
> +
> +#include <common.h>
> +#include <linux/bitops.h>
> +#include <linux/kernel.h>
> +#include <printk.h>
> +#include <asm-generic/sections.h>
> +
> +#include "kasan.h"
> +
> +/* Shadow layout customization. */
> +#define SHADOW_BYTES_PER_BLOCK 1
> +#define SHADOW_BLOCKS_PER_ROW 16
> +#define SHADOW_BYTES_PER_ROW (SHADOW_BLOCKS_PER_ROW * SHADOW_BYTES_PER_BLOCK)
> +#define SHADOW_ROWS_AROUND_ADDR 2
> +
> +static unsigned long kasan_flags;
> +
> +#define KASAN_BIT_REPORTED 0
> +#define KASAN_BIT_MULTI_SHOT 1
> +
> +bool kasan_save_enable_multi_shot(void)
> +{
> + return test_and_set_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
> +}
> +EXPORT_SYMBOL_GPL(kasan_save_enable_multi_shot);
> +
> +void kasan_restore_multi_shot(bool enabled)
> +{
> + if (!enabled)
> + clear_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags);
> +}
> +EXPORT_SYMBOL_GPL(kasan_restore_multi_shot);
> +
> +static void print_error_description(struct kasan_access_info *info)
> +{
> + pr_err("BUG: KASAN: %s in %pS\n",
> + get_bug_type(info), (void *)info->ip);
> + pr_err("%s of size %zu at addr %px\n",
> + info->is_write ? "Write" : "Read", info->access_size,
> + info->access_addr);
I just removed the pr_err in ubsan with this rationale:
common: ubsan: replace pr_err with printf
The pr_print family of functions also writes to the barebox
log buffer, which we don't require for printing UBSan errors,
which is a debugging aid. This also improves UBSan coverage as now
undefined behavior within pr_print may be reported as well.
Should we use plain printf here as well? Less code to execute
= less chance to run into a recursion.
> +}
> +
> +static void start_report(unsigned long *flags)
> +{
> + /*
> + * Make sure we don't end up in loop.
> + */
> + kasan_disable_current();
> + pr_err("==================================================================\n");
> +}
> +
> +static void end_report(unsigned long *flags)
> +{
> + pr_err("==================================================================\n");
> + kasan_enable_current();
> +}
> +
> +static inline bool kernel_or_module_addr(const void *addr)
> +{
> + if (addr >= (void *)_stext && addr < (void *)_end)
> + return true;
> + return false;
> +}
> +
> +static void print_address_description(void *addr, u8 tag)
> +{
> + dump_stack();
> + pr_err("\n");
> +
> + if (kernel_or_module_addr(addr)) {
> + pr_err("The buggy address belongs to the variable:\n");
> + pr_err(" %pS\n", addr);
> + }
> +}
> +
> +static bool row_is_guilty(const void *row, const void *guilty)
> +{
> + return (row <= guilty) && (guilty < row + SHADOW_BYTES_PER_ROW);
> +}
> +
> +static int shadow_pointer_offset(const void *row, const void *shadow)
> +{
> + /* The length of ">ff00ff00ff00ff00: " is
> + * 3 + (BITS_PER_LONG/8)*2 chars.
> + */
> + return 3 + (BITS_PER_LONG/8)*2 + (shadow - row)*2 +
> + (shadow - row) / SHADOW_BYTES_PER_BLOCK + 1;
> +}
> +
> +static void print_shadow_for_address(const void *addr)
> +{
> + int i;
> + const void *shadow = kasan_mem_to_shadow(addr);
> + const void *shadow_row;
> +
> + shadow_row = (void *)round_down((unsigned long)shadow,
> + SHADOW_BYTES_PER_ROW)
> + - SHADOW_ROWS_AROUND_ADDR * SHADOW_BYTES_PER_ROW;
> +
> + pr_err("Memory state around the buggy address:\n");
> +
> + for (i = -SHADOW_ROWS_AROUND_ADDR; i <= SHADOW_ROWS_AROUND_ADDR; i++) {
> + const void *kaddr = kasan_shadow_to_mem(shadow_row);
> + char buffer[4 + (BITS_PER_LONG/8)*2];
> + char shadow_buf[SHADOW_BYTES_PER_ROW];
> +
> + snprintf(buffer, sizeof(buffer),
> + (i == 0) ? ">%px: " : " %px: ", kaddr);
> + /*
> + * We should not pass a shadow pointer to generic
> + * function, because generic functions may try to
> + * access kasan mapping for the passed address.
> + */
> + memcpy(shadow_buf, shadow_row, SHADOW_BYTES_PER_ROW);
> + print_hex_dump(KERN_ERR, buffer,
> + DUMP_PREFIX_NONE, SHADOW_BYTES_PER_ROW, 1,
> + shadow_buf, SHADOW_BYTES_PER_ROW, 0);
> +
> + if (row_is_guilty(shadow_row, shadow))
> + printf("%*c\n",
> + shadow_pointer_offset(shadow_row, shadow),
> + '^');
> +
> + shadow_row += SHADOW_BYTES_PER_ROW;
> + }
> +}
> +
> +static bool report_enabled(void)
> +{
> + if (kasan_depth)
> + return false;
> + if (test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags))
> + return true;
> + return !test_and_set_bit(KASAN_BIT_REPORTED, &kasan_flags);
> +}
> +
> +static void __kasan_report(unsigned long addr, size_t size, bool is_write,
> + unsigned long ip)
> +{
> + struct kasan_access_info info;
> + void *tagged_addr;
> + void *untagged_addr;
> + unsigned long flags;
> +
> + tagged_addr = (void *)addr;
> + untagged_addr = reset_tag(tagged_addr);
> +
> + info.access_addr = tagged_addr;
> + if (addr_has_shadow(untagged_addr))
> + info.first_bad_addr = find_first_bad_addr(tagged_addr, size);
> + else
> + info.first_bad_addr = untagged_addr;
> + info.access_size = size;
> + info.is_write = is_write;
> + info.ip = ip;
> +
> + start_report(&flags);
> +
> + print_error_description(&info);
> + pr_err("\n");
> +
> + if (addr_has_shadow(untagged_addr)) {
> + print_address_description(untagged_addr, get_tag(tagged_addr));
> + pr_err("\n");
> + print_shadow_for_address(info.first_bad_addr);
> + } else {
> + dump_stack();
> + }
> +
> + end_report(&flags);
> +}
> +
> +bool kasan_report(unsigned long addr, size_t size, bool is_write,
> + unsigned long ip)
> +{
> + bool ret = false;
> +
> + if (likely(report_enabled())) {
> + __kasan_report(addr, size, is_write, ip);
> + ret = true;
> + }
> +
> + return ret;
> +}
> diff --git a/scripts/Makefile.kasan b/scripts/Makefile.kasan
> new file mode 100644
> index 0000000000..83f6aa543d
> --- /dev/null
> +++ b/scripts/Makefile.kasan
> @@ -0,0 +1,17 @@
> + # SPDX-License-Identifier: GPL-2.0
> +ifdef CONFIG_KASAN
> +CFLAGS_KASAN_NOSANITIZE := -fno-builtin
> +KASAN_SHADOW_OFFSET ?= $(CONFIG_KASAN_SHADOW_OFFSET)
> +endif
> +
> +CFLAGS_KASAN_MINIMAL := -fsanitize=kernel-address
> +
> +cc-param = $(call cc-option, -mllvm -$(1), $(call cc-option, --param $(1)))
> +
> +CFLAGS_KASAN := $(CFLAGS_KASAN_MINIMAL) \
> + $(call cc-param,asan-globals=1) \
> + $(call cc-param,asan-instrument-allocas=1)
> +
> +ifndef CONFIG_CPU_64
> +CFLAGS_KASAN += $(call cc-param,asan-stack=1)
> +endif
> diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib
> index 337430cd00..ab7d9f2bdf 100644
> --- a/scripts/Makefile.lib
> +++ b/scripts/Makefile.lib
> @@ -127,6 +127,16 @@ _c_flags = $(KBUILD_CFLAGS) $(ccflags-y) $(CFLAGS_$(target-stem).o)
> _a_flags = $(KBUILD_AFLAGS) $(asflags-y) $(AFLAGS_$(target-stem).o)
> _cpp_flags = $(KBUILD_CPPFLAGS) $(cppflags-y) $(CPPFLAGS_$(target-stem).lds)
>
> +#
> +# Enable address sanitizer flags for kernel except some files or directories
> +# we don't want to check (depends on variables KASAN_SANITIZE_obj.o, KASAN_SANITIZE)
> +#
> +ifeq ($(CONFIG_KASAN),y)
> +_c_flags += $(if $(part-of-pbl),, $(if $(patsubst n%,, \
> + $(KASAN_SANITIZE_$(basetarget).o)$(KASAN_SANITIZE)y), \
> + $(CFLAGS_KASAN), $(CFLAGS_KASAN_NOSANITIZE)))
> +endif
> +
> ifeq ($(CONFIG_UBSAN),y)
> _CFLAGS_UBSAN = $(eval _CFLAGS_UBSAN := $(CFLAGS_UBSAN))$(_CFLAGS_UBSAN)
> _c_flags += $(if $(patsubst n%,, \
>
--
Pengutronix e.K. | |
Steuerwalder Str. 21 | http://www.pengutronix.de/ |
31137 Hildesheim, Germany | Phone: +49-5121-206917-0 |
Amtsgericht Hildesheim, HRA 2686 | Fax: +49-5121-206917-5555 |
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 8/9] Add KASan support
2020-09-18 10:15 ` Ahmad Fatoum
@ 2020-09-21 6:24 ` Sascha Hauer
0 siblings, 0 replies; 19+ messages in thread
From: Sascha Hauer @ 2020-09-21 6:24 UTC (permalink / raw)
To: Ahmad Fatoum; +Cc: Barebox List
On Fri, Sep 18, 2020 at 12:15:53PM +0200, Ahmad Fatoum wrote:
>
>
> On 9/18/20 10:45 AM, Sascha Hauer wrote:
> > - return block_prepare_used(control, block, adjust);
> > + void *ret;
> > +
> > + ret = block_prepare_used(control, block, adjust, size);
> > + if (!ret)
> > + return ret;
> > +
> > + return ret;
>
> Debugging leftover? You can just return the function result directly.
Not exactly debugging leftover. I had the poisoning code here in an
earlier version and haven't rolled it back completely when moving the
code elsewhere. Fixed.
> > +static void print_error_description(struct kasan_access_info *info)
> > +{
> > + pr_err("BUG: KASAN: %s in %pS\n",
> > + get_bug_type(info), (void *)info->ip);
> > + pr_err("%s of size %zu at addr %px\n",
> > + info->is_write ? "Write" : "Read", info->access_size,
> > + info->access_addr);
>
> I just removed the pr_err in ubsan with this rationale:
>
> common: ubsan: replace pr_err with printf
>
> The pr_print family of functions also writes to the barebox
> log buffer, which we don't require for printing UBSan errors,
> which is a debugging aid. This also improves UBSan coverage as now
> undefined behavior within pr_print may be reported as well.
>
> Should we use plain printf here as well? Less code to execute
> = less chance to run into a recursion.
I am not sure. I did a quick
foo = strdup("Hallo");
free(foo);
pr_err(foo);
At least this worked as expected. I can't really tell at the moment if
this covers all cases. I think there will be surprises when we call
pr_err inside a pr_err which faults. On the other hand I would really
expect such a message to be in the log.
Sascha
--
Pengutronix e.K. | |
Steuerwalder Str. 21 | http://www.pengutronix.de/ |
31137 Hildesheim, Germany | Phone: +49-5121-206917-0 |
Amtsgericht Hildesheim, HRA 2686 | Fax: +49-5121-206917-5555 |
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* [PATCH 9/9] ARM: Add KASan support
2020-09-18 8:45 [PATCH 0/9] barebox KASan support Sascha Hauer
` (7 preceding siblings ...)
2020-09-18 8:45 ` [PATCH 8/9] Add KASan support Sascha Hauer
@ 2020-09-18 8:45 ` Sascha Hauer
2021-02-09 9:25 ` Ahmad Fatoum
2020-09-28 14:33 ` [PATCH 0/9] barebox " Ahmad Fatoum
9 siblings, 1 reply; 19+ messages in thread
From: Sascha Hauer @ 2020-09-18 8:45 UTC (permalink / raw)
To: Barebox List
This adds KASan support to the ARM architecture. What we are doing is:
* Add __no_sanitize_address attribute to various lowlevel functions
which do not run in a proper C environment
* Add non-instrumented variants of memset/memcpy (prefixed with '__')
* make original memcpy/memset weak symbols so strong definitions in
lib/kasan/common.c can replace them
* Use non-instrumented memcpy in early functions
* call kasan_init()
Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
---
arch/arm/Kconfig | 1 +
arch/arm/cpu/Makefile | 2 ++
arch/arm/cpu/common.c | 2 +-
arch/arm/cpu/setupc.S | 6 +++---
arch/arm/cpu/start.c | 7 +++++--
arch/arm/include/asm/string.h | 2 ++
arch/arm/lib32/barebox.lds.S | 4 +++-
arch/arm/lib32/memcpy.S | 3 +++
arch/arm/lib32/memset.S | 4 +++-
arch/arm/lib64/string.c | 26 +++++++++++++++++++++-----
10 files changed, 44 insertions(+), 13 deletions(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 95fd8ecfe7..57ead9a783 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -4,6 +4,7 @@ config ARM
select HAS_CACHE
select HAVE_CONFIGURABLE_TEXT_BASE if !RELOCATABLE
select HAVE_IMAGE_COMPRESSION
+ select HAVE_ARCH_KASAN
default y
config ARM_LINUX
diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
index f7f9c30415..e7a6e3e6fb 100644
--- a/arch/arm/cpu/Makefile
+++ b/arch/arm/cpu/Makefile
@@ -9,6 +9,7 @@ AFLAGS_hyp.o :=-Wa,-march=armv7-a -Wa,-mcpu=all
AFLAGS_hyp.pbl.o :=-Wa,-march=armv7-a -Wa,-mcpu=all
obj-y += start.o entry.o entry_ll$(S64).o
+KASAN_SANITIZE_start.o := n
pbl-$(CONFIG_BOARD_ARM_GENERIC_DT) += board-dt-2nd.o
pbl-$(CONFIG_BOARD_ARM_GENERIC_DT_AARCH64) += board-dt-2nd-aarch64.o
@@ -51,3 +52,4 @@ pbl-y += entry.o entry_ll$(S64).o
pbl-y += uncompress.o
obj-pbl-y += common.o sections.o
+KASAN_SANITIZE_common.o := n
diff --git a/arch/arm/cpu/common.c b/arch/arm/cpu/common.c
index 33f148fc0e..8cfcc8f6ce 100644
--- a/arch/arm/cpu/common.c
+++ b/arch/arm/cpu/common.c
@@ -120,7 +120,7 @@ void relocate_to_current_adr(void)
dstart += sizeof(*rel);
}
- memset(dynsym, 0, (unsigned long)dynend - (unsigned long)dynsym);
+ __memset(dynsym, 0, (unsigned long)dynend - (unsigned long)dynsym);
#else
#error "Architecture not specified"
#endif
diff --git a/arch/arm/cpu/setupc.S b/arch/arm/cpu/setupc.S
index 8ae7c89a2c..55aa105b21 100644
--- a/arch/arm/cpu/setupc.S
+++ b/arch/arm/cpu/setupc.S
@@ -21,12 +21,12 @@ ENTRY(setup_c)
ldr r2,=__bss_start
sub r2, r2, r0
add r1, r0, r4
- bl memcpy /* memcpy(_text, _text + offset, __bss_start - _text) */
+ bl __memcpy /* memcpy(_text, _text + offset, __bss_start - _text) */
1: ldr r0, =__bss_start
mov r1, #0
ldr r2, =__bss_stop
sub r2, r2, r0
- bl memset /* clear bss */
+ bl __memset /* clear bss */
bl sync_caches_for_execution
sub lr, r5, r4 /* adjust return address to new location */
pop {r4, r5}
@@ -67,7 +67,7 @@ ENTRY(relocate_to_adr)
sub r7, r7, r1 /* sub address where we are actually running */
add r7, r7, r0 /* add address where we are going to run */
- bl memcpy /* copy binary */
+ bl __memcpy /* copy binary */
bl sync_caches_for_execution
diff --git a/arch/arm/cpu/start.c b/arch/arm/cpu/start.c
index aeca459cb1..f48f5beea8 100644
--- a/arch/arm/cpu/start.c
+++ b/arch/arm/cpu/start.c
@@ -15,6 +15,7 @@
#include <asm/unaligned.h>
#include <asm/cache.h>
#include <asm/mmu.h>
+#include <linux/kasan.h>
#include <memory.h>
#include <uncompress.h>
#include <malloc.h>
@@ -135,7 +136,7 @@ static int barebox_memory_areas_init(void)
}
device_initcall(barebox_memory_areas_init);
-__noreturn void barebox_non_pbl_start(unsigned long membase,
+__noreturn __no_sanitize_address void barebox_non_pbl_start(unsigned long membase,
unsigned long memsize, void *boarddata)
{
unsigned long endmem = membase + memsize;
@@ -233,6 +234,8 @@ __noreturn void barebox_non_pbl_start(unsigned long membase,
pr_debug("initializing malloc pool at 0x%08lx (size 0x%08lx)\n",
malloc_start, malloc_end - malloc_start);
+ kasan_init(membase, memsize, malloc_start - (memsize >> KASAN_SHADOW_SCALE_SHIFT));
+
mem_malloc_init((void *)malloc_start, (void *)malloc_end - 1);
if (IS_ENABLED(CONFIG_BOOTM_OPTEE))
@@ -259,7 +262,7 @@ void start(unsigned long membase, unsigned long memsize, void *boarddata);
* First function in the uncompressed image. We get here from
* the pbl. The stack already has been set up by the pbl.
*/
-void NAKED __section(.text_entry) start(unsigned long membase,
+void NAKED __no_sanitize_address __section(.text_entry) start(unsigned long membase,
unsigned long memsize, void *boarddata)
{
barebox_non_pbl_start(membase, memsize, boarddata);
diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
index 435647abda..fb577cfcd5 100644
--- a/arch/arm/include/asm/string.h
+++ b/arch/arm/include/asm/string.h
@@ -5,8 +5,10 @@
#define __HAVE_ARCH_MEMCPY
extern void *memcpy(void *, const void *, __kernel_size_t);
+extern void *__memcpy(void *, const void *, __kernel_size_t);
#define __HAVE_ARCH_MEMSET
extern void *memset(void *, int, __kernel_size_t);
+extern void *__memset(void *, int, __kernel_size_t);
#endif
diff --git a/arch/arm/lib32/barebox.lds.S b/arch/arm/lib32/barebox.lds.S
index ed279279a2..54d9b3e381 100644
--- a/arch/arm/lib32/barebox.lds.S
+++ b/arch/arm/lib32/barebox.lds.S
@@ -77,7 +77,9 @@ SECTIONS
_sdata = .;
. = ALIGN(4);
- .data : { *(.data*) }
+ .data : { *(.data*)
+ CONSTRUCTORS
+ }
.barebox_imd : { BAREBOX_IMD }
diff --git a/arch/arm/lib32/memcpy.S b/arch/arm/lib32/memcpy.S
index 5123691ca9..0fcdaa88e6 100644
--- a/arch/arm/lib32/memcpy.S
+++ b/arch/arm/lib32/memcpy.S
@@ -56,9 +56,12 @@
/* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
+.weak memcpy
ENTRY(memcpy)
+ENTRY(__memcpy)
#include "copy_template.S"
+ENDPROC(__memcpy)
ENDPROC(memcpy)
diff --git a/arch/arm/lib32/memset.S b/arch/arm/lib32/memset.S
index c4d2672038..6079dd89f6 100644
--- a/arch/arm/lib32/memset.S
+++ b/arch/arm/lib32/memset.S
@@ -15,6 +15,8 @@
.text
.align 5
+.weak memset
+ENTRY(__memset)
ENTRY(memset)
ands r3, r0, #3 @ 1 unaligned?
mov ip, r0 @ preserve r0 as return value
@@ -121,4 +123,4 @@ ENTRY(memset)
add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
b 1b
ENDPROC(memset)
-
+ENDPROC(__memset)
diff --git a/arch/arm/lib64/string.c b/arch/arm/lib64/string.c
index cb26331527..a2cf09e58e 100644
--- a/arch/arm/lib64/string.c
+++ b/arch/arm/lib64/string.c
@@ -5,18 +5,34 @@
void *__arch_memset(void *dst, int c, __kernel_size_t size);
void *__arch_memcpy(void * dest, const void *src, size_t count);
-void *memset(void *dst, int c, __kernel_size_t size)
+static void *_memset(void *dst, int c, __kernel_size_t size)
{
if (likely(get_cr() & CR_M))
return __arch_memset(dst, c, size);
- return __default_memset(dst, c, size);
+ return __nokasan_default_memset(dst, c, size);
}
-void *memcpy(void * dest, const void *src, size_t count)
+void __weak *memset(void *dst, int c, __kernel_size_t size)
+{
+ return _memset(dst, c, size);
+}
+
+void *__memset(void *dst, int c, __kernel_size_t size)
+ __alias(_memset);
+
+static void *_memcpy(void * dest, const void *src, size_t count)
{
if (likely(get_cr() & CR_M))
return __arch_memcpy(dest, src, count);
- return __default_memcpy(dest, src, count);
-}
\ No newline at end of file
+ return __nokasan_default_memcpy(dest, src, count);
+}
+
+void __weak *memcpy(void * dest, const void *src, size_t count)
+{
+ return _memcpy(dest, src, count);
+}
+
+void *__memcpy(void * dest, const void *src, size_t count)
+ __alias(_memcpy);
--
2.28.0
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 9/9] ARM: Add KASan support
2020-09-18 8:45 ` [PATCH 9/9] ARM: " Sascha Hauer
@ 2021-02-09 9:25 ` Ahmad Fatoum
2021-02-10 9:26 ` Sascha Hauer
0 siblings, 1 reply; 19+ messages in thread
From: Ahmad Fatoum @ 2021-02-09 9:25 UTC (permalink / raw)
To: Sascha Hauer, Barebox List
Hello Sascha,
On 18.09.20 10:45, Sascha Hauer wrote:
> This adds KASan support to the ARM architecture. What we are doing is:
>
> * Add __no_sanitize_address attribute to various lowlevel functions
> which do not run in a proper C environment
> * Add non-instrumented variants of memset/memcpy (prefixed with '__')
> * make original memcpy/memset weak symbols so strong definitions in
> lib/kasan/common.c can replace them
> * Use non-instrumented memcpy in early functions
> * call kasan_init()
>
> Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
> ---
> arch/arm/Kconfig | 1 +
> arch/arm/cpu/Makefile | 2 ++
> arch/arm/cpu/common.c | 2 +-
> arch/arm/cpu/setupc.S | 6 +++---
Shouldn't you've touched setupc_64.S here as well?
> arch/arm/cpu/start.c | 7 +++++--
> arch/arm/include/asm/string.h | 2 ++
> arch/arm/lib32/barebox.lds.S | 4 +++-
> arch/arm/lib32/memcpy.S | 3 +++
> arch/arm/lib32/memset.S | 4 +++-
> arch/arm/lib64/string.c | 26 +++++++++++++++++++++-----
> 10 files changed, 44 insertions(+), 13 deletions(-)
>
> diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
> index 95fd8ecfe7..57ead9a783 100644
> --- a/arch/arm/Kconfig
> +++ b/arch/arm/Kconfig
> @@ -4,6 +4,7 @@ config ARM
> select HAS_CACHE
> select HAVE_CONFIGURABLE_TEXT_BASE if !RELOCATABLE
> select HAVE_IMAGE_COMPRESSION
> + select HAVE_ARCH_KASAN
> default y
>
> config ARM_LINUX
> diff --git a/arch/arm/cpu/Makefile b/arch/arm/cpu/Makefile
> index f7f9c30415..e7a6e3e6fb 100644
> --- a/arch/arm/cpu/Makefile
> +++ b/arch/arm/cpu/Makefile
> @@ -9,6 +9,7 @@ AFLAGS_hyp.o :=-Wa,-march=armv7-a -Wa,-mcpu=all
> AFLAGS_hyp.pbl.o :=-Wa,-march=armv7-a -Wa,-mcpu=all
>
> obj-y += start.o entry.o entry_ll$(S64).o
> +KASAN_SANITIZE_start.o := n
>
> pbl-$(CONFIG_BOARD_ARM_GENERIC_DT) += board-dt-2nd.o
> pbl-$(CONFIG_BOARD_ARM_GENERIC_DT_AARCH64) += board-dt-2nd-aarch64.o
> @@ -51,3 +52,4 @@ pbl-y += entry.o entry_ll$(S64).o
> pbl-y += uncompress.o
>
> obj-pbl-y += common.o sections.o
> +KASAN_SANITIZE_common.o := n
> diff --git a/arch/arm/cpu/common.c b/arch/arm/cpu/common.c
> index 33f148fc0e..8cfcc8f6ce 100644
> --- a/arch/arm/cpu/common.c
> +++ b/arch/arm/cpu/common.c
> @@ -120,7 +120,7 @@ void relocate_to_current_adr(void)
> dstart += sizeof(*rel);
> }
>
> - memset(dynsym, 0, (unsigned long)dynend - (unsigned long)dynsym);
> + __memset(dynsym, 0, (unsigned long)dynend - (unsigned long)dynsym);
> #else
> #error "Architecture not specified"
> #endif
> diff --git a/arch/arm/cpu/setupc.S b/arch/arm/cpu/setupc.S
> index 8ae7c89a2c..55aa105b21 100644
> --- a/arch/arm/cpu/setupc.S
> +++ b/arch/arm/cpu/setupc.S
> @@ -21,12 +21,12 @@ ENTRY(setup_c)
> ldr r2,=__bss_start
> sub r2, r2, r0
> add r1, r0, r4
> - bl memcpy /* memcpy(_text, _text + offset, __bss_start - _text) */
> + bl __memcpy /* memcpy(_text, _text + offset, __bss_start - _text) */
> 1: ldr r0, =__bss_start
> mov r1, #0
> ldr r2, =__bss_stop
> sub r2, r2, r0
> - bl memset /* clear bss */
> + bl __memset /* clear bss */
> bl sync_caches_for_execution
> sub lr, r5, r4 /* adjust return address to new location */
> pop {r4, r5}
> @@ -67,7 +67,7 @@ ENTRY(relocate_to_adr)
> sub r7, r7, r1 /* sub address where we are actually running */
> add r7, r7, r0 /* add address where we are going to run */
>
> - bl memcpy /* copy binary */
> + bl __memcpy /* copy binary */
>
> bl sync_caches_for_execution
>
> diff --git a/arch/arm/cpu/start.c b/arch/arm/cpu/start.c
> index aeca459cb1..f48f5beea8 100644
> --- a/arch/arm/cpu/start.c
> +++ b/arch/arm/cpu/start.c
> @@ -15,6 +15,7 @@
> #include <asm/unaligned.h>
> #include <asm/cache.h>
> #include <asm/mmu.h>
> +#include <linux/kasan.h>
> #include <memory.h>
> #include <uncompress.h>
> #include <malloc.h>
> @@ -135,7 +136,7 @@ static int barebox_memory_areas_init(void)
> }
> device_initcall(barebox_memory_areas_init);
>
> -__noreturn void barebox_non_pbl_start(unsigned long membase,
> +__noreturn __no_sanitize_address void barebox_non_pbl_start(unsigned long membase,
> unsigned long memsize, void *boarddata)
> {
> unsigned long endmem = membase + memsize;
> @@ -233,6 +234,8 @@ __noreturn void barebox_non_pbl_start(unsigned long membase,
> pr_debug("initializing malloc pool at 0x%08lx (size 0x%08lx)\n",
> malloc_start, malloc_end - malloc_start);
>
> + kasan_init(membase, memsize, malloc_start - (memsize >> KASAN_SHADOW_SCALE_SHIFT));
> +
> mem_malloc_init((void *)malloc_start, (void *)malloc_end - 1);
>
> if (IS_ENABLED(CONFIG_BOOTM_OPTEE))
> @@ -259,7 +262,7 @@ void start(unsigned long membase, unsigned long memsize, void *boarddata);
> * First function in the uncompressed image. We get here from
> * the pbl. The stack already has been set up by the pbl.
> */
> -void NAKED __section(.text_entry) start(unsigned long membase,
> +void NAKED __no_sanitize_address __section(.text_entry) start(unsigned long membase,
> unsigned long memsize, void *boarddata)
> {
> barebox_non_pbl_start(membase, memsize, boarddata);
> diff --git a/arch/arm/include/asm/string.h b/arch/arm/include/asm/string.h
> index 435647abda..fb577cfcd5 100644
> --- a/arch/arm/include/asm/string.h
> +++ b/arch/arm/include/asm/string.h
> @@ -5,8 +5,10 @@
>
> #define __HAVE_ARCH_MEMCPY
> extern void *memcpy(void *, const void *, __kernel_size_t);
> +extern void *__memcpy(void *, const void *, __kernel_size_t);
> #define __HAVE_ARCH_MEMSET
> extern void *memset(void *, int, __kernel_size_t);
> +extern void *__memset(void *, int, __kernel_size_t);
>
> #endif
>
> diff --git a/arch/arm/lib32/barebox.lds.S b/arch/arm/lib32/barebox.lds.S
> index ed279279a2..54d9b3e381 100644
> --- a/arch/arm/lib32/barebox.lds.S
> +++ b/arch/arm/lib32/barebox.lds.S
> @@ -77,7 +77,9 @@ SECTIONS
> _sdata = .;
>
> . = ALIGN(4);
> - .data : { *(.data*) }
> + .data : { *(.data*)
> + CONSTRUCTORS
> + }
>
> .barebox_imd : { BAREBOX_IMD }
>
> diff --git a/arch/arm/lib32/memcpy.S b/arch/arm/lib32/memcpy.S
> index 5123691ca9..0fcdaa88e6 100644
> --- a/arch/arm/lib32/memcpy.S
> +++ b/arch/arm/lib32/memcpy.S
> @@ -56,9 +56,12 @@
>
> /* Prototype: void *memcpy(void *dest, const void *src, size_t n); */
>
> +.weak memcpy
> ENTRY(memcpy)
> +ENTRY(__memcpy)
>
> #include "copy_template.S"
>
> +ENDPROC(__memcpy)
> ENDPROC(memcpy)
>
> diff --git a/arch/arm/lib32/memset.S b/arch/arm/lib32/memset.S
> index c4d2672038..6079dd89f6 100644
> --- a/arch/arm/lib32/memset.S
> +++ b/arch/arm/lib32/memset.S
> @@ -15,6 +15,8 @@
> .text
> .align 5
>
> +.weak memset
> +ENTRY(__memset)
> ENTRY(memset)
> ands r3, r0, #3 @ 1 unaligned?
> mov ip, r0 @ preserve r0 as return value
> @@ -121,4 +123,4 @@ ENTRY(memset)
> add r2, r2, r3 @ 1 (r2 = r2 - (4 - r3))
> b 1b
> ENDPROC(memset)
> -
> +ENDPROC(__memset)
> diff --git a/arch/arm/lib64/string.c b/arch/arm/lib64/string.c
> index cb26331527..a2cf09e58e 100644
> --- a/arch/arm/lib64/string.c
> +++ b/arch/arm/lib64/string.c
> @@ -5,18 +5,34 @@
> void *__arch_memset(void *dst, int c, __kernel_size_t size);
> void *__arch_memcpy(void * dest, const void *src, size_t count);
>
> -void *memset(void *dst, int c, __kernel_size_t size)
> +static void *_memset(void *dst, int c, __kernel_size_t size)
> {
> if (likely(get_cr() & CR_M))
> return __arch_memset(dst, c, size);
>
> - return __default_memset(dst, c, size);
> + return __nokasan_default_memset(dst, c, size);
> }
>
> -void *memcpy(void * dest, const void *src, size_t count)
> +void __weak *memset(void *dst, int c, __kernel_size_t size)
> +{
> + return _memset(dst, c, size);
> +}
> +
> +void *__memset(void *dst, int c, __kernel_size_t size)
> + __alias(_memset);
> +
> +static void *_memcpy(void * dest, const void *src, size_t count)
> {
> if (likely(get_cr() & CR_M))
> return __arch_memcpy(dest, src, count);
>
> - return __default_memcpy(dest, src, count);
> -}
> \ No newline at end of file
> + return __nokasan_default_memcpy(dest, src, count);
> +}
> +
> +void __weak *memcpy(void * dest, const void *src, size_t count)
> +{
> + return _memcpy(dest, src, count);
> +}
> +
> +void *__memcpy(void * dest, const void *src, size_t count)
> + __alias(_memcpy);
>
--
Pengutronix e.K. | |
Steuerwalder Str. 21 | http://www.pengutronix.de/ |
31137 Hildesheim, Germany | Phone: +49-5121-206917-0 |
Amtsgericht Hildesheim, HRA 2686 | Fax: +49-5121-206917-5555 |
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 9/9] ARM: Add KASan support
2021-02-09 9:25 ` Ahmad Fatoum
@ 2021-02-10 9:26 ` Sascha Hauer
2021-02-10 9:27 ` Ahmad Fatoum
0 siblings, 1 reply; 19+ messages in thread
From: Sascha Hauer @ 2021-02-10 9:26 UTC (permalink / raw)
To: Ahmad Fatoum; +Cc: Barebox List
On Tue, Feb 09, 2021 at 10:25:57AM +0100, Ahmad Fatoum wrote:
> Hello Sascha,
>
> On 18.09.20 10:45, Sascha Hauer wrote:
> > This adds KASan support to the ARM architecture. What we are doing is:
> >
> > * Add __no_sanitize_address attribute to various lowlevel functions
> > which do not run in a proper C environment
> > * Add non-instrumented variants of memset/memcpy (prefixed with '__')
> > * make original memcpy/memset weak symbols so strong definitions in
> > lib/kasan/common.c can replace them
> > * Use non-instrumented memcpy in early functions
> > * call kasan_init()
> >
> > Signed-off-by: Sascha Hauer <s.hauer@pengutronix.de>
> > ---
> > arch/arm/Kconfig | 1 +
> > arch/arm/cpu/Makefile | 2 ++
> > arch/arm/cpu/common.c | 2 +-
> > arch/arm/cpu/setupc.S | 6 +++---
>
> Shouldn't you've touched setupc_64.S here as well?
Likely, yes. How did you stumble upon this?
Sascha
--
Pengutronix e.K. | |
Steuerwalder Str. 21 | http://www.pengutronix.de/ |
31137 Hildesheim, Germany | Phone: +49-5121-206917-0 |
Amtsgericht Hildesheim, HRA 2686 | Fax: +49-5121-206917-5555 |
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 0/9] barebox KASan support
2020-09-18 8:45 [PATCH 0/9] barebox KASan support Sascha Hauer
` (8 preceding siblings ...)
2020-09-18 8:45 ` [PATCH 9/9] ARM: " Sascha Hauer
@ 2020-09-28 14:33 ` Ahmad Fatoum
2020-09-28 15:06 ` Sascha Hauer
9 siblings, 1 reply; 19+ messages in thread
From: Ahmad Fatoum @ 2020-09-28 14:33 UTC (permalink / raw)
To: Sascha Hauer, Barebox List
Hello Sascha,
On 9/18/20 10:45 AM, Sascha Hauer wrote:
> This series adds KASan support to barebox. We already have ASan support
> for sandbox which uses libasan, this time we get KASan for use on real
> hardware. The KASan support is based on the Kernel code and contains the
> subset of features we need for barebox. KASan is currently supported on
> ARM(32/64) only and requires a supported allocator. Currently only the
> TLSF allocator is supported.
Should the md/mw/memcpy commands be exempt from KASAN checks?
>
> Sascha
>
> Sascha Hauer (9):
> Add print_hex_dump kernel implementation
> Add _RET_IP_ macro
> Kallsyms: Also resolve global variables
> Add constructor support
> pbl: Alias memcpy and memset
> string: Add nokasan variants of default memcpy/memset
> sandbox: rename KASan to ASan
> Add KASan support
> ARM: Add KASan support
>
> Makefile | 6 +-
> arch/arm/Kconfig | 1 +
> arch/arm/cpu/Makefile | 2 +
> arch/arm/cpu/common.c | 2 +-
> arch/arm/cpu/setupc.S | 6 +-
> arch/arm/cpu/start.c | 7 +-
> arch/arm/include/asm/string.h | 2 +
> arch/arm/lib32/barebox.lds.S | 4 +-
> arch/arm/lib32/memcpy.S | 3 +
> arch/arm/lib32/memset.S | 4 +-
> arch/arm/lib64/string.c | 26 ++-
> arch/sandbox/Kconfig | 4 +-
> arch/sandbox/Makefile | 2 +-
> arch/sandbox/os/Makefile | 4 -
> arch/sandbox/os/common.c | 2 +-
> common/Kconfig | 10 +-
> common/Makefile | 1 +
> common/kallsyms.c | 4 +-
> common/startup.c | 15 ++
> common/tlsf.c | 34 +++-
> include/asm-generic/barebox.lds.h | 12 ++
> include/asm-generic/sections.h | 3 +
> include/linux/kasan.h | 89 +++++++++
> include/linux/kernel.h | 2 +
> include/printk.h | 20 +-
> include/string.h | 4 +
> lib/Kconfig | 5 +
> lib/Makefile | 1 +
> lib/hexdump.c | 212 +++++++++++++++++++-
> lib/kasan/Kconfig | 16 ++
> lib/kasan/Makefile | 14 ++
> lib/kasan/common.c | 108 ++++++++++
> lib/kasan/generic.c | 315 ++++++++++++++++++++++++++++++
> lib/kasan/generic_report.c | 150 ++++++++++++++
> lib/kasan/kasan.h | 164 ++++++++++++++++
> lib/kasan/report.c | 199 +++++++++++++++++++
> lib/string.c | 28 ++-
> pbl/string.c | 7 +
> scripts/Makefile.kasan | 17 ++
> scripts/Makefile.lib | 10 +
> 40 files changed, 1461 insertions(+), 54 deletions(-)
> create mode 100644 include/linux/kasan.h
> create mode 100644 lib/kasan/Kconfig
> create mode 100644 lib/kasan/Makefile
> create mode 100644 lib/kasan/common.c
> create mode 100644 lib/kasan/generic.c
> create mode 100644 lib/kasan/generic_report.c
> create mode 100644 lib/kasan/kasan.h
> create mode 100644 lib/kasan/report.c
> create mode 100644 scripts/Makefile.kasan
>
--
Pengutronix e.K. | |
Steuerwalder Str. 21 | http://www.pengutronix.de/ |
31137 Hildesheim, Germany | Phone: +49-5121-206917-0 |
Amtsgericht Hildesheim, HRA 2686 | Fax: +49-5121-206917-5555 |
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread
* Re: [PATCH 0/9] barebox KASan support
2020-09-28 14:33 ` [PATCH 0/9] barebox " Ahmad Fatoum
@ 2020-09-28 15:06 ` Sascha Hauer
0 siblings, 0 replies; 19+ messages in thread
From: Sascha Hauer @ 2020-09-28 15:06 UTC (permalink / raw)
To: Ahmad Fatoum; +Cc: Barebox List
On Mon, Sep 28, 2020 at 04:33:21PM +0200, Ahmad Fatoum wrote:
> Hello Sascha,
>
> On 9/18/20 10:45 AM, Sascha Hauer wrote:
> > This series adds KASan support to barebox. We already have ASan support
> > for sandbox which uses libasan, this time we get KASan for use on real
> > hardware. The KASan support is based on the Kernel code and contains the
> > subset of features we need for barebox. KASan is currently supported on
> > ARM(32/64) only and requires a supported allocator. Currently only the
> > TLSF allocator is supported.
>
> Should the md/mw/memcpy commands be exempt from KASAN checks?
I asked myself the same question. For now I decided that including these
commands in KASan checks helps debugging KASan. You can easily trigger a
KASan exception with them. Also you shouldn't have any business
manipulating the malloc area outside the allocated regions.
Sascha
--
Pengutronix e.K. | |
Steuerwalder Str. 21 | http://www.pengutronix.de/ |
31137 Hildesheim, Germany | Phone: +49-5121-206917-0 |
Amtsgericht Hildesheim, HRA 2686 | Fax: +49-5121-206917-5555 |
_______________________________________________
barebox mailing list
barebox@lists.infradead.org
http://lists.infradead.org/mailman/listinfo/barebox
^ permalink raw reply [flat|nested] 19+ messages in thread