From: Ahmad Fatoum <a.fatoum@pengutronix.de>
To: barebox@lists.infradead.org
Cc: Ahmad Fatoum <a.fatoum@pengutronix.de>
Subject: [PATCH 08/18] include: import Linux word-at-a-time.h
Date: Fri, 10 Nov 2023 22:44:11 +0100 [thread overview]
Message-ID: <20231110214421.2726093-9-a.fatoum@pengutronix.de> (raw)
In-Reply-To: <20231110214421.2726093-1-a.fatoum@pengutronix.de>
The Linux <linux/word-at-a-time.h> interface is used to optimize
searching for bytes in strings by doing word-size comparisons. This will
be used in the implementation of strscpy in a follow-up commit, so
import the generic version here.
A good overview on the interface is available at LWN[1]. Note that
it discuss Linux v3.5. The asm-generic version imported here works
also on little-endian and not only big-endian[2].
[1]: https://lwn.net/Articles/501492/
[2]: Linux kernel commit a6e2f029ae34 ("Make asm/word-at-a-time.h
available on all architectures").
Signed-off-by: Ahmad Fatoum <a.fatoum@pengutronix.de>
---
arch/arm/include/asm/word-at-a-time.h | 59 ++++++++
arch/kvx/include/asm/word-at-a-time.h | 2 +
arch/mips/include/asm/word-at-a-time.h | 2 +
arch/openrisc/include/asm/word-at-a-time.h | 2 +
arch/powerpc/include/asm/word-at-a-time.h | 156 +++++++++++++++++++++
arch/riscv/include/asm/word-at-a-time.h | 48 +++++++
arch/sandbox/include/asm/word-at-a-time.h | 2 +
arch/x86/include/asm/word-at-a-time.h | 73 ++++++++++
include/asm-generic/word-at-a-time.h | 121 ++++++++++++++++
include/linux/kernel.h | 8 ++
10 files changed, 473 insertions(+)
create mode 100644 arch/arm/include/asm/word-at-a-time.h
create mode 100644 arch/kvx/include/asm/word-at-a-time.h
create mode 100644 arch/mips/include/asm/word-at-a-time.h
create mode 100644 arch/openrisc/include/asm/word-at-a-time.h
create mode 100644 arch/powerpc/include/asm/word-at-a-time.h
create mode 100644 arch/riscv/include/asm/word-at-a-time.h
create mode 100644 arch/sandbox/include/asm/word-at-a-time.h
create mode 100644 arch/x86/include/asm/word-at-a-time.h
create mode 100644 include/asm-generic/word-at-a-time.h
diff --git a/arch/arm/include/asm/word-at-a-time.h b/arch/arm/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..f22198a66be3
--- /dev/null
+++ b/arch/arm/include/asm/word-at-a-time.h
@@ -0,0 +1,59 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2013 ARM Ltd.
+ */
+#ifndef __ASM_WORD_AT_A_TIME_H
+#define __ASM_WORD_AT_A_TIME_H
+
+#if !defined(__AARCH64EB__) && !defined(__ARMEB__)
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits,
+ const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+#define prep_zero_mask(a, bits, c) (bits)
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ unsigned long ret;
+
+#if __LINUX_ARM_ARCH__ >= 8
+ ret = fls64(mask) >> 3;
+#elif __LINUX_ARM_ARCH__ >= 5
+ /* We have clz available. */
+ ret = fls(mask) >> 3;
+#else
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ ret = (0x0ff0001 + mask) >> 23;
+ /* Fix the 1 for 00 case */
+ ret &= mask;
+#endif
+
+ return ret;
+}
+
+#define zero_bytemask(mask) (mask)
+
+#else /* __AARCH64EB__ || __ARMEB__ */
+#include <asm-generic/word-at-a-time.h>
+#endif
+
+#endif /* __ASM_WORD_AT_A_TIME_H */
diff --git a/arch/kvx/include/asm/word-at-a-time.h b/arch/kvx/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..f6306fb896a1
--- /dev/null
+++ b/arch/kvx/include/asm/word-at-a-time.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <asm-generic/word-at-a-time.h>
diff --git a/arch/mips/include/asm/word-at-a-time.h b/arch/mips/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..f6306fb896a1
--- /dev/null
+++ b/arch/mips/include/asm/word-at-a-time.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <asm-generic/word-at-a-time.h>
diff --git a/arch/openrisc/include/asm/word-at-a-time.h b/arch/openrisc/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..f6306fb896a1
--- /dev/null
+++ b/arch/openrisc/include/asm/word-at-a-time.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <asm-generic/word-at-a-time.h>
diff --git a/arch/powerpc/include/asm/word-at-a-time.h b/arch/powerpc/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..1de46fbd2e15
--- /dev/null
+++ b/arch/powerpc/include/asm/word-at-a-time.h
@@ -0,0 +1,156 @@
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+/*
+ * Word-at-a-time interfaces for PowerPC.
+ */
+
+#include <linux/kernel.h>
+
+#ifdef __BIG_ENDIAN__
+
+struct word_at_a_time {
+ const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+ unsigned long mask = (val & c->low_bits) + c->low_bits;
+ return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+ long leading_zero_bits;
+
+ asm (PPC_CNTLZL "%0,%1" : "=r" (leading_zero_bits) : "r" (mask));
+ return leading_zero_bits >> 3;
+}
+
+static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+ unsigned long rhs = val | c->low_bits;
+ *data = rhs;
+ return (val + c->high_bits) & ~rhs;
+}
+
+static inline unsigned long zero_bytemask(unsigned long mask)
+{
+ return ~1ul << __fls(mask);
+}
+
+#else
+
+#ifdef CONFIG_64BIT
+
+/* unused */
+struct word_at_a_time {
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { }
+
+/* This will give us 0xff for a NULL char and 0x00 elsewhere */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long ret;
+ unsigned long zero = 0;
+
+ asm("cmpb %0,%1,%2" : "=r" (ret) : "r" (a), "r" (zero));
+ *bits = ret;
+
+ return ret;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+/* Alan Modra's little-endian strlen tail for 64-bit */
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ unsigned long leading_zero_bits;
+ long trailing_zero_bit_mask;
+
+ asm("addi %1,%2,-1\n\t"
+ "andc %1,%1,%2\n\t"
+ "popcntd %0,%1"
+ : "=r" (leading_zero_bits), "=&r" (trailing_zero_bit_mask)
+ : "b" (bits));
+
+ return leading_zero_bits;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return mask >> 3;
+}
+
+/* This assumes that we never ask for an all 1s bitmask */
+static inline unsigned long zero_bytemask(unsigned long mask)
+{
+ return (1UL << mask) - 1;
+}
+
+#else /* 32-bit case */
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+/*
+ * This is largely generic for little-endian machines, but the
+ * optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return count_masked_bytes(mask);
+}
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+#endif /* CONFIG_64BIT */
+
+#endif /* __BIG_ENDIAN__ */
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/arch/riscv/include/asm/word-at-a-time.h b/arch/riscv/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..7c086ac6ecd4
--- /dev/null
+++ b/arch/riscv/include/asm/word-at-a-time.h
@@ -0,0 +1,48 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (C) 2012 Regents of the University of California
+ *
+ * Derived from arch/x86/include/asm/word-at-a-time.h
+ */
+
+#ifndef _ASM_RISCV_WORD_AT_A_TIME_H
+#define _ASM_RISCV_WORD_AT_A_TIME_H
+
+
+#include <linux/kernel.h>
+
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+static inline unsigned long has_zero(unsigned long val,
+ unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((val - c->one_bits) & ~val) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long val,
+ unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return fls64(mask) >> 3;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+#endif /* _ASM_RISCV_WORD_AT_A_TIME_H */
diff --git a/arch/sandbox/include/asm/word-at-a-time.h b/arch/sandbox/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..f6306fb896a1
--- /dev/null
+++ b/arch/sandbox/include/asm/word-at-a-time.h
@@ -0,0 +1,2 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+#include <asm-generic/word-at-a-time.h>
diff --git a/arch/x86/include/asm/word-at-a-time.h b/arch/x86/include/asm/word-at-a-time.h
new file mode 100644
index 000000000000..a7e57b7fd617
--- /dev/null
+++ b/arch/x86/include/asm/word-at-a-time.h
@@ -0,0 +1,73 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+#include <linux/kernel.h>
+
+/*
+ * This is largely generic for little-endian machines, but the
+ * optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+ return mask*0x0001020304050608ul >> 56;
+}
+
+#else /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return count_masked_bytes(mask);
+}
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/asm-generic/word-at-a-time.h b/include/asm-generic/word-at-a-time.h
new file mode 100644
index 000000000000..95a1d214108a
--- /dev/null
+++ b/include/asm-generic/word-at-a-time.h
@@ -0,0 +1,121 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _ASM_WORD_AT_A_TIME_H
+#define _ASM_WORD_AT_A_TIME_H
+
+#include <linux/kernel.h>
+#include <asm/byteorder.h>
+
+#ifdef __BIG_ENDIAN
+
+struct word_at_a_time {
+ const unsigned long high_bits, low_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0xfe) + 1, REPEAT_BYTE(0x7f) }
+
+/* Bit set in the bytes that have a zero */
+static inline long prep_zero_mask(unsigned long val, unsigned long rhs, const struct word_at_a_time *c)
+{
+ unsigned long mask = (val & c->low_bits) + c->low_bits;
+ return ~(mask | rhs);
+}
+
+#define create_zero_mask(mask) (mask)
+
+static inline long find_zero(unsigned long mask)
+{
+ long byte = 0;
+#ifdef CONFIG_64BIT
+ if (mask >> 32)
+ mask >>= 32;
+ else
+ byte = 4;
+#endif
+ if (mask >> 16)
+ mask >>= 16;
+ else
+ byte += 2;
+ return (mask >> 8) ? byte : byte + 1;
+}
+
+static inline unsigned long has_zero(unsigned long val, unsigned long *data, const struct word_at_a_time *c)
+{
+ unsigned long rhs = val | c->low_bits;
+ *data = rhs;
+ return (val + c->high_bits) & ~rhs;
+}
+
+#ifndef zero_bytemask
+#define zero_bytemask(mask) (~1ul << __fls(mask))
+#endif
+
+#else
+
+/*
+ * The optimal byte mask counting is probably going to be something
+ * that is architecture-specific. If you have a reliably fast
+ * bit count instruction, that might be better than the multiply
+ * and shift, for example.
+ */
+struct word_at_a_time {
+ const unsigned long one_bits, high_bits;
+};
+
+#define WORD_AT_A_TIME_CONSTANTS { REPEAT_BYTE(0x01), REPEAT_BYTE(0x80) }
+
+#ifdef CONFIG_64BIT
+
+/*
+ * Jan Achrenius on G+: microoptimized version of
+ * the simpler "(mask & ONEBYTES) * ONEBYTES >> 56"
+ * that works for the bytemasks without having to
+ * mask them first.
+ */
+static inline long count_masked_bytes(unsigned long mask)
+{
+ return mask*0x0001020304050608ul >> 56;
+}
+
+#else /* 32-bit case */
+
+/* Carl Chatfield / Jan Achrenius G+ version for 32-bit */
+static inline long count_masked_bytes(long mask)
+{
+ /* (000000 0000ff 00ffff ffffff) -> ( 1 1 2 3 ) */
+ long a = (0x0ff0001+mask) >> 23;
+ /* Fix the 1 for 00 case */
+ return a & mask;
+}
+
+#endif
+
+/* Return nonzero if it has a zero */
+static inline unsigned long has_zero(unsigned long a, unsigned long *bits, const struct word_at_a_time *c)
+{
+ unsigned long mask = ((a - c->one_bits) & ~a) & c->high_bits;
+ *bits = mask;
+ return mask;
+}
+
+static inline unsigned long prep_zero_mask(unsigned long a, unsigned long bits, const struct word_at_a_time *c)
+{
+ return bits;
+}
+
+static inline unsigned long create_zero_mask(unsigned long bits)
+{
+ bits = (bits - 1) & ~bits;
+ return bits >> 7;
+}
+
+/* The mask we created is directly usable as a bytemask */
+#define zero_bytemask(mask) (mask)
+
+static inline unsigned long find_zero(unsigned long mask)
+{
+ return count_masked_bytes(mask);
+}
+
+#endif /* __BIG_ENDIAN */
+
+#endif /* _ASM_WORD_AT_A_TIME_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index da6121888ebd..4e50f6075189 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -11,6 +11,14 @@
#include <linux/instruction_pointer.h>
#include <linux/minmax.h>
+/**
+ * REPEAT_BYTE - repeat the value @x multiple times as an unsigned long value
+ * @x: value to repeat
+ *
+ * NOTE: @x is not checked for > 0xff; larger values produce odd results.
+ */
+#define REPEAT_BYTE(x) ((~0ul / 0xff) * (x))
+
#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a) - 1)
#define ALIGN_DOWN(x, a) ALIGN((x) - ((a) - 1), (a))
#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask))
--
2.39.2
next prev parent reply other threads:[~2023-11-10 21:46 UTC|newest]
Thread overview: 23+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-11-10 21:44 [PATCH 00/18] prepare for porting OP-TEE communication support Ahmad Fatoum
2023-11-10 21:44 ` [PATCH 01/18] include: provide linux/errno.h Ahmad Fatoum
2023-11-10 21:44 ` [PATCH 02/18] include: add linux/refcount.h Ahmad Fatoum
2023-11-10 21:44 ` [PATCH 03/18] bitops: split off linux/bits.h Ahmad Fatoum
2023-11-10 21:44 ` [PATCH 04/18] include: import <linux/instruction_pointer.h> Ahmad Fatoum
2023-11-10 21:44 ` [PATCH 05/18] asm-generic: split off typeconfused readl and friends Ahmad Fatoum
2023-11-13 12:40 ` Sascha Hauer
2023-11-10 21:44 ` [PATCH 06/18] asm-generic: migrate relaxed helpers into asm-generic/io.h Ahmad Fatoum
2023-11-10 21:44 ` [PATCH 07/18] include: add linux/io.h with strict prototypes Ahmad Fatoum
2023-11-10 21:44 ` Ahmad Fatoum [this message]
2023-11-10 21:44 ` [PATCH 09/18] string: implement strscpy Ahmad Fatoum
2023-11-10 21:44 ` [PATCH 10/18] of: add CONFIG_OF for Linux compatibility Ahmad Fatoum
2023-11-10 21:44 ` [PATCH 11/18] include: asm-generic/atomic.h: define atomic_cmpxchg Ahmad Fatoum
2023-11-10 21:44 ` [PATCH 12/18] kbuild: build barebox for -std=gnu11 Ahmad Fatoum
2023-11-10 21:44 ` [PATCH 13/18] include: linux/idr.h: implement more Linux API Ahmad Fatoum
2023-11-10 21:44 ` [PATCH 14/18] include: implement dev_warn_once and friends Ahmad Fatoum
2023-11-13 12:38 ` Sascha Hauer
2023-11-10 21:44 ` [PATCH 15/18] include: add blocking notifier aliases Ahmad Fatoum
2023-11-13 12:39 ` Sascha Hauer
2023-11-10 21:44 ` [PATCH 16/18] include: add Linux ktime API Ahmad Fatoum
2023-11-10 21:44 ` [PATCH 17/18] of: define of_devices_ensure_probed_by_compatible Ahmad Fatoum
2023-11-13 12:47 ` Sascha Hauer
2023-11-10 21:44 ` [PATCH 18/18] include: add linux/device.h wrapper around driver.h Ahmad Fatoum
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20231110214421.2726093-9-a.fatoum@pengutronix.de \
--to=a.fatoum@pengutronix.de \
--cc=barebox@lists.infradead.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox