This is an automated email from the ASF dual-hosted git repository.
xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git
The following commit(s) were added to refs/heads/master by this push:
new f3213efc7f armv8-r/libc: optimize libc string apis with asm
f3213efc7f is described below
commit f3213efc7f25bb0b462eb98a96e16f9793803334
Author: Jinliang Li <[email protected]>
AuthorDate: Fri Nov 8 13:44:58 2024 +0800
armv8-r/libc: optimize libc string apis with asm
Optimize libc string apis(memcpy/memset/memmove/memchr/strcmp/strlen)
with arm32 assembly instruction including vfp and neon.
Add arch releated elf parsing
Signed-off-by: Jinliang Li <[email protected]>
---
libs/libc/machine/arm/CMakeLists.txt | 2 +
libs/libc/machine/arm/Kconfig | 4 +
libs/libc/machine/arm/Make.defs | 2 +
libs/libc/machine/arm/armv8-r/CMakeLists.txt | 55 ++
libs/libc/machine/arm/armv8-r/Kconfig | 63 +++
libs/libc/machine/arm/armv8-r/Make.defs | 57 ++
libs/libc/machine/arm/armv8-r/arch_elf.c | 484 +++++++++++++++++
libs/libc/machine/arm/armv8-r/gnu/acle-compat.h | 183 +++++++
libs/libc/machine/arm/armv8-r/gnu/arch_memchr.S | 397 ++++++++++++++
libs/libc/machine/arm/armv8-r/gnu/arch_memcpy.S | 632 +++++++++++++++++++++++
libs/libc/machine/arm/armv8-r/gnu/arch_memmove.S | 72 +++
libs/libc/machine/arm/armv8-r/gnu/arch_memset.S | 152 ++++++
libs/libc/machine/arm/armv8-r/gnu/arch_strcmp.S | 308 +++++++++++
libs/libc/machine/arm/armv8-r/gnu/arch_strlen.S | 190 +++++++
14 files changed, 2601 insertions(+)
diff --git a/libs/libc/machine/arm/CMakeLists.txt
b/libs/libc/machine/arm/CMakeLists.txt
index 560c77d4c9..fc28d501c2 100644
--- a/libs/libc/machine/arm/CMakeLists.txt
+++ b/libs/libc/machine/arm/CMakeLists.txt
@@ -44,6 +44,8 @@ elseif(CONFIG_ARCH_ARMV7M) # All ARMv7-M
add_subdirectory(armv7-m)
elseif(CONFIG_ARCH_ARMV8M) # All ARMv8-M
add_subdirectory(armv8-m)
+elseif(CONFIG_ARCH_ARMV8R) # All ARMv8-R
+ add_subdirectory(armv8-r)
endif()
if(NOT CONFIG_LIBSUPCXX_TOOLCHAIN)
diff --git a/libs/libc/machine/arm/Kconfig b/libs/libc/machine/arm/Kconfig
index fd9814bbf3..9e30ea26d5 100644
--- a/libs/libc/machine/arm/Kconfig
+++ b/libs/libc/machine/arm/Kconfig
@@ -27,3 +27,7 @@ endif
if ARCH_ARMV8M
source "libs/libc/machine/arm/armv8-m/Kconfig"
endif
+
+if ARCH_ARMV8R
+source "libs/libc/machine/arm/armv8-r/Kconfig"
+endif
diff --git a/libs/libc/machine/arm/Make.defs b/libs/libc/machine/arm/Make.defs
index c17428d1c2..0f32db14f3 100644
--- a/libs/libc/machine/arm/Make.defs
+++ b/libs/libc/machine/arm/Make.defs
@@ -42,6 +42,8 @@ else ifeq ($(CONFIG_ARCH_ARMV7M),y) # All ARMv7-M
include $(TOPDIR)/libs/libc/machine/arm/armv7-m/Make.defs
else ifeq ($(CONFIG_ARCH_ARMV8M),y) # All ARMv8-M
include $(TOPDIR)/libs/libc/machine/arm/armv8-m/Make.defs
+else ifeq ($(CONFIG_ARCH_ARMV8R),y) # All ARMv8-R
+include $(TOPDIR)/libs/libc/machine/arm/armv8-r/Make.defs
endif
ifneq ($(CONFIG_LIBSUPCXX_TOOLCHAIN),y)
diff --git a/libs/libc/machine/arm/armv8-r/CMakeLists.txt
b/libs/libc/machine/arm/armv8-r/CMakeLists.txt
new file mode 100644
index 0000000000..3a8aa560fa
--- /dev/null
+++ b/libs/libc/machine/arm/armv8-r/CMakeLists.txt
@@ -0,0 +1,55 @@
+#
##############################################################################
+# libs/libc/machine/arm/armv8-r/CMakeLists.txt
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
contributor
+# license agreements. See the NOTICE file distributed with this work for
+# additional information regarding copyright ownership. The ASF licenses this
+# file to you under the Apache License, Version 2.0 (the "License"); you may
not
+# use this file except in compliance with the License. You may obtain a copy
of
+# the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations under
+# the License.
+#
+#
##############################################################################
+
+if(CONFIG_ARCH_TOOLCHAIN_GNU)
+ set(ARCH_TOOLCHAIN_DIR gnu)
+endif()
+
+if(CONFIG_ARMV8R_MEMCHR)
+ list(APPEND SRCS ${ARCH_TOOLCHAIN_DIR}/arch_memchr.S)
+endif()
+
+if(CONFIG_ARMV8R_MEMCPY)
+ list(APPEND SRCS ${ARCH_TOOLCHAIN_DIR}/arch_memcpy.S)
+endif()
+
+if(CONFIG_ARMV8R_MEMMOVE)
+ list(APPEND SRCS ${ARCH_TOOLCHAIN_DIR}/arch_memmove.S)
+endif()
+
+if(CONFIG_ARMV8R_MEMSET)
+ list(APPEND SRCS ${ARCH_TOOLCHAIN_DIR}/arch_memset.S)
+endif()
+
+if(CONFIG_ARMV8R_STRCMP)
+ list(APPEND SRCS ${ARCH_TOOLCHAIN_DIR}/arch_strcmp.S)
+endif()
+
+if(CONFIG_ARMV8R_STRLEN)
+ list(APPEND SRCS ${ARCH_TOOLCHAIN_DIR}/arch_strlen.S)
+endif()
+
+if(CONFIG_LIBC_ARCH_ELF)
+ list(APPEND SRCS arch_elf.c)
+endif()
+
+target_sources(c PRIVATE ${SRCS})
diff --git a/libs/libc/machine/arm/armv8-r/Kconfig
b/libs/libc/machine/arm/armv8-r/Kconfig
new file mode 100644
index 0000000000..5f2e2ad1b8
--- /dev/null
+++ b/libs/libc/machine/arm/armv8-r/Kconfig
@@ -0,0 +1,63 @@
+#
+# For a description of the syntax of this configuration file,
+# see the file kconfig-language.txt in the NuttX tools repository.
+#
+
+config ARMV8R_STRING_FUNCTION
+ bool "Enable optimized ARMV8R specific string function"
+ default n
+ depends on ARCH_TOOLCHAIN_GNU
+ select ARMV8R_MEMCHR
+ select ARMV8R_MEMCPY
+ select ARMV8R_MEMMOVE
+ select ARMV8R_MEMSET
+ select ARMV8R_STRCMP
+ select ARMV8R_STRLEN
+
+config ARMV8R_MEMCHR
+ bool "Enable optimized memchr() for ARMv8-R"
+ default n
+ select LIBC_ARCH_MEMCHR
+ depends on ARCH_TOOLCHAIN_GNU
+ ---help---
+ Enable optimized ARMv8-R specific memchr() library function
+
+config ARMV8R_MEMCPY
+ bool "Enable optimized memcpy() for ARMv8-R"
+ select LIBC_ARCH_MEMCPY
+ depends on ARCH_TOOLCHAIN_GNU
+ ---help---
+ Enable optimized ARMv8-R specific memcpy() library function
+
+config ARMV8R_MEMMOVE
+ bool "Enable optimized memmove() for ARMv8-R"
+ default n
+ select LIBC_ARCH_MEMMOVE
+ depends on ARCH_TOOLCHAIN_GNU
+ ---help---
+ Enable optimized ARMv8-R specific memmove() library function
+
+config ARMV8R_MEMSET
+ bool "Enable optimized memset() for ARMv8-R"
+ default n
+ select LIBC_ARCH_MEMSET
+ depends on ARCH_TOOLCHAIN_GNU
+ depends on ARM_NEON
+ ---help---
+ Enable optimized ARMv8-R specific memset() library function
+
+config ARMV8R_STRCMP
+ bool "Enable optimized strcmp() for ARMv8-R"
+ default n
+ select LIBC_ARCH_STRCMP
+ depends on ARCH_TOOLCHAIN_GNU
+ ---help---
+ Enable optimized ARMv8-R specific strcmp() library function
+
+config ARMV8R_STRLEN
+ bool "Enable optimized strlen() for ARMv8-R"
+ default n
+ select LIBC_ARCH_STRLEN
+ depends on ARCH_TOOLCHAIN_GNU
+ ---help---
+ Enable optimized ARMv8-R specific strlen() library function
diff --git a/libs/libc/machine/arm/armv8-r/Make.defs
b/libs/libc/machine/arm/armv8-r/Make.defs
new file mode 100644
index 0000000000..bacb1dad5b
--- /dev/null
+++ b/libs/libc/machine/arm/armv8-r/Make.defs
@@ -0,0 +1,57 @@
+############################################################################
+# libs/libc/machine/arm/armv8-r/Make.defs
+#
+# SPDX-License-Identifier: Apache-2.0
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements. See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership. The
+# ASF licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the
+# License. You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+############################################################################
+
+ifeq ($(CONFIG_ARMV8R_MEMCHR),y)
+ASRCS += arch_memchr.S
+endif
+
+ifeq ($(CONFIG_ARMV8R_MEMCPY),y)
+ASRCS += arch_memcpy.S
+endif
+
+ifeq ($(CONFIG_ARMV8R_MEMMOVE),y)
+ASRCS += arch_memmove.S
+endif
+
+ifeq ($(CONFIG_ARMV8R_MEMSET),y)
+ASRCS += arch_memset.S
+endif
+
+ifeq ($(CONFIG_ARMV8R_STRCMP),y)
+ASRCS += arch_strcmp.S
+endif
+
+ifeq ($(CONFIG_ARMV8R_STRLEN),y)
+ASRCS += arch_strlen.S
+endif
+
+ifeq ($(CONFIG_LIBC_ARCH_ELF),y)
+CSRCS += arch_elf.c
+endif
+
+ifeq ($(CONFIG_ARCH_TOOLCHAIN_GNU),y)
+DEPPATH += --dep-path machine/arm/armv8-r/gnu
+VPATH += :machine/arm/armv8-r/gnu
+endif
+
+DEPPATH += --dep-path machine/arm/armv8-r
+VPATH += :machine/arm/armv8-r
diff --git a/libs/libc/machine/arm/armv8-r/arch_elf.c
b/libs/libc/machine/arm/armv8-r/arch_elf.c
new file mode 100644
index 0000000000..98ca32b1f2
--- /dev/null
+++ b/libs/libc/machine/arm/armv8-r/arch_elf.c
@@ -0,0 +1,484 @@
+/****************************************************************************
+ * libs/libc/machine/arm/armv8-r/arch_elf.c
+ *
+ * SPDX-License-Identifier: Apache-2.0
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership. The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <inttypes.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <debug.h>
+
+#include <nuttx/elf.h>
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_checkarch
+ *
+ * Description:
+ * Given the ELF header in 'hdr', verify that the ELF file is appropriate
+ * for the current, configured architecture. Every architecture that uses
+ * the ELF loader must provide this function.
+ *
+ * Input Parameters:
+ * hdr - The ELF header read from the ELF file.
+ *
+ * Returned Value:
+ * True if the architecture supports this ELF file.
+ *
+ ****************************************************************************/
+
+bool up_checkarch(const Elf32_Ehdr *ehdr)
+{
+ /* Make sure it's an ARM executable */
+
+ if (ehdr->e_machine != EM_ARM)
+ {
+ berr("ERROR: Not for ARM: e_machine=%04x\n", ehdr->e_machine);
+ return false;
+ }
+
+ /* Make sure that 32-bit objects are supported */
+
+ if (ehdr->e_ident[EI_CLASS] != ELFCLASS32)
+ {
+ berr("ERROR: Need 32-bit objects: e_ident[EI_CLASS]=%02x\n",
+ ehdr->e_ident[EI_CLASS]);
+ return false;
+ }
+
+ /* Verify endian-ness */
+
+#ifdef CONFIG_ENDIAN_BIG
+ if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB)
+#else
+ if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB)
+#endif
+ {
+ berr("ERROR: Wrong endian-ness: e_ident[EI_DATA]=%02x\n",
+ ehdr->e_ident[EI_DATA]);
+ return false;
+ }
+
+ /* Make sure the entry point address is properly aligned */
+
+#ifdef CONFIG_ARM_THUMB
+ if ((ehdr->e_entry & 2) != 0)
+#else
+ if ((ehdr->e_entry & 3) != 0)
+#endif
+ {
+ berr("ERROR: Entry point is not properly aligned: %08" PRIx32 "\n",
+ ehdr->e_entry);
+ return false;
+ }
+
+ /* TODO: Check ABI here. */
+
+ return true;
+}
+
+/****************************************************************************
+ * Name: up_relocate and up_relocateadd
+ *
+ * Description:
+ * Perform an architecture-specific ELF relocation. Every architecture
+ * that uses the ELF loader must provide this function.
+ *
+ * Input Parameters:
+ * rel - The relocation type
+ * sym - The ELF symbol structure containing the fully resolved value.
+ * There are a few relocation types for a few architectures that do
+ * not require symbol information. For those, this value will be
+ * NULL. Implementations of these functions must be able to handle
+ * that case.
+ * addr - The address that requires the relocation.
+ *
+ * Returned Value:
+ * Zero (OK) if the relocation was successful. Otherwise, a negated errno
+ * value indicating the cause of the relocation failure.
+ *
+ ****************************************************************************/
+
+int up_relocate(const Elf32_Rel *rel, const Elf32_Sym *sym, uintptr_t addr,
+ void *arch_data)
+{
+ int32_t offset;
+ unsigned int relotype;
+
+#ifdef CONFIG_ARM_THUMB
+ uint32_t upper_insn;
+ uint32_t lower_insn;
+#endif
+
+ /* All relocations except R_ARM_V4BX depend upon having valid symbol
+ * information.
+ */
+
+ relotype = ELF32_R_TYPE(rel->r_info);
+ if (sym == NULL && relotype != R_ARM_NONE && relotype != R_ARM_V4BX)
+ {
+ return -EINVAL;
+ }
+
+ /* Handle the relocation by relocation type */
+
+ switch (relotype)
+ {
+ case R_ARM_NONE:
+ {
+ /* No relocation */
+ }
+ break;
+
+ case R_ARM_PC24:
+ case R_ARM_CALL:
+ case R_ARM_JUMP24:
+ {
+ binfo("Performing PC24 [%" PRId32 "] link "
+ "at addr %08lx [%08lx] to sym '%p' st_value=%08lx\n",
+ ELF32_R_TYPE(rel->r_info), (long)addr,
+ (long)(*(uint32_t *)addr),
+ sym, (long)sym->st_value);
+
+ offset = (*(uint32_t *)addr & 0x00ffffff) << 2;
+ if (offset & 0x02000000)
+ {
+ offset -= 0x04000000;
+ }
+
+ offset += sym->st_value - addr;
+
+#ifdef CONFIG_ARM_THUMB
+ if ((offset & 2) != 0 || offset < (int32_t) 0xfe000000 ||
+#else
+ if ((offset & 3) != 0 || offset < (int32_t) 0xfe000000 ||
+#endif
+ offset >= (int32_t) 0x02000000)
+ {
+ berr("ERROR: PC24 [%" PRId32 "] relocation out of range, "
+ "offset=%08lx\n",
+ ELF32_R_TYPE(rel->r_info), offset);
+
+ return -EINVAL;
+ }
+
+ offset >>= 2;
+
+ *(uint32_t *)addr &= 0xff000000;
+ *(uint32_t *)addr |= offset & 0x00ffffff;
+ }
+ break;
+
+ case R_ARM_ABS32:
+ case R_ARM_TARGET1: /* New ABI: TARGET1 always treated as ABS32 */
+ {
+ binfo("Performing ABS32 link "
+ "at addr=%08lx [%08lx] to sym=%p st_value=%08lx\n",
+ (long)addr, (long)(*(uint32_t *)addr), sym,
+ (long)sym->st_value);
+
+ *(uint32_t *)addr += sym->st_value;
+ }
+ break;
+
+ case R_ARM_V4BX:
+ {
+ binfo("Performing V4BX link at addr=%08lx [%08lx]\n",
+ (long)addr, (long)(*(uint32_t *)addr));
+
+ /* Preserve only Rm and the condition code */
+
+ *(uint32_t *)addr &= 0xf000000f;
+
+ /* Change instruction to 'mov pc, Rm' */
+
+ *(uint32_t *)addr |= 0x01a0f000;
+ }
+ break;
+
+ case R_ARM_PREL31:
+ {
+ binfo("Performing PREL31 link "
+ "at addr=%08lx [%08lx] to sym=%p st_value=%08lx\n",
+ (long)addr, (long)(*(uint32_t *)addr), sym,
+ (long)sym->st_value);
+
+ offset = *(uint32_t *)addr + sym->st_value - addr;
+ *(uint32_t *)addr = offset & 0x7fffffff;
+ }
+ break;
+
+ case R_ARM_MOVW_ABS_NC:
+ case R_ARM_MOVT_ABS:
+ {
+ binfo("Performing MOVx_ABS [%" PRId32 "] link "
+ "at addr=%08lx [%08lx] to sym=%p st_value=%08lx\n",
+ ELF32_R_TYPE(rel->r_info), (long)addr,
+ (long)(*(uint32_t *)addr),
+ sym, (long)sym->st_value);
+
+ offset = *(uint32_t *)addr;
+ offset = ((offset & 0xf0000) >> 4) | (offset & 0xfff);
+
+ offset += sym->st_value;
+ if (ELF32_R_TYPE(rel->r_info) == R_ARM_MOVT_ABS)
+ {
+ offset >>= 16;
+ }
+
+ *(uint32_t *)addr &= 0xfff0f000;
+ *(uint32_t *)addr |= ((offset & 0xf000) << 4) | (offset & 0x0fff);
+ }
+ break;
+
+#ifdef CONFIG_ARM_THUMB
+ case R_ARM_THM_MOVW_ABS_NC:
+ case R_ARM_THM_MOVT_ABS:
+ {
+ /* Thumb BL and B.W instructions. Encoding:
+ *
+ * upper_insn:
+ *
+ * 1 1 1 1 1 1
+ * 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 Instruction
+ * +----------+---+--------------------------+----------+
+ * |1 1 1 |OP1| OP2 | | 32-Bit
+ * +----------+---+--+-----+-----------------+----------+
+ * |1 1 1 | 1 0| i |1 0 1 1 0 0 | imm4 | MOVT
+ * +----------+------+-----+-----------------+----------+
+ *
+ * lower_insn:
+ *
+ * 1 1 1 1 1 1
+ * 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 Instructions
+ * +---+-------------------------------------------------+
+ * |OP | | 32-Bit
+ * +---+----------+--------+-----------------------------+
+ * |0 | imm3 | Rd | imm8 | MOVT
+ * +---+----------+--------+-----------------------------+
+ *
+ * The 16-bit immediate value is encoded in these bits:
+ *
+ * i = imm16[11] = upper_insn[10]
+ * imm4 = imm16[12:15] = upper_insn[3:0]
+ * imm3 = imm16[8:10] = lower_insn[14:12]
+ * imm8 = imm16[0:7] = lower_insn[7:0]
+ */
+
+ upper_insn = (uint32_t)(*(uint16_t *)addr);
+ lower_insn = (uint32_t)(*(uint16_t *)(addr + 2));
+
+ binfo("Performing THM_MOVx [%" PRId32 "] link "
+ "at addr=%08lx [%04x %04x] to sym=%p st_value=%08lx\n",
+ ELF32_R_TYPE(rel->r_info), (long)addr,
+ (int)upper_insn, (int)lower_insn,
+ sym, (long)sym->st_value);
+
+ /* Extract the 16-bit offset from the 32-bit instruction */
+
+ offset = ((upper_insn & 0x000f) << 12) | /* imm4 -> imm16[8:10] */
+ ((upper_insn & 0x0400) << 1) | /* i -> imm16[11] */
+ ((lower_insn & 0x7000) >> 4) | /* imm3 -> imm16[8:10] */
+ (lower_insn & 0x00ff); /* imm8 -> imm16[0:7] */
+
+ /* And perform the relocation */
+
+ binfo(" offset=%08" PRIx32 " branch target=%08" PRIx32 "\n",
+ offset, offset + sym->st_value);
+
+ offset += sym->st_value;
+
+ /* Update the immediate value in the instruction.
+ * For MOVW we want the bottom 16-bits; for MOVT we want
+ * the top 16-bits.
+ */
+
+ if (ELF32_R_TYPE(rel->r_info) == R_ARM_THM_MOVT_ABS)
+ {
+ offset >>= 16;
+ }
+
+ upper_insn = ((upper_insn & 0xfbf0) | ((offset & 0xf000) >> 12) |
+ ((offset & 0x0800) >> 1));
+ *(uint16_t *)addr = (uint16_t)upper_insn;
+
+ lower_insn = ((lower_insn & 0x8f00) | ((offset & 0x0700) << 4) |
+ (offset & 0x00ff));
+ *(uint16_t *)(addr + 2) = (uint16_t)lower_insn;
+
+ binfo(" insn [%04x %04x]\n",
+ (int)upper_insn, (int)lower_insn);
+ }
+ break;
+
+ case R_ARM_THM_CALL:
+ case R_ARM_THM_JUMP24:
+ {
+ uint32_t S;
+ uint32_t J1;
+ uint32_t J2;
+
+ /* Thumb BL and B.W instructions. Encoding:
+ *
+ * upper_insn:
+ *
+ * 1 1 1 1 1 1
+ * 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 Instructions
+ * +----------+---+--------------------------+----------+
+ * |1 1 1 |OP1| OP2 | | 32-Bit
+ * +----------+---+--+-----+-----------------+----------+
+ * |1 1 1 | 1 0| S | imm10 | BL
+ * +----------+------+-----+----------------------------+
+ *
+ * lower_insn:
+ *
+ * 1 1 1 1 1 1
+ * 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 Instructions
+ * +---+------------------------------------------------+
+ * |OP | | 32-Bit
+ * +---+--+---+---+---+---------------------------------+
+ * |1 1 |J1 | 1 |J2 | imm11 | BL
+ * +------+---+---+---+--------------------------------+
+ *
+ * The branch target is encoded in these bits:
+ *
+ * S = upper_insn[10]
+ * imm10 = upper_insn[0:9]
+ * imm11 = lower_insn[0:10]
+ * J1 = lower_insn[13]
+ * J2 = lower_insn[11]
+ */
+
+ upper_insn = (uint32_t)(*(uint16_t *)addr);
+ lower_insn = (uint32_t)(*(uint16_t *)(addr + 2));
+
+ binfo("Performing THM_JUMP24 [%" PRId32 "] link "
+ "at addr=%08lx [%04x %04x] to sym=%p st_value=%08lx\n",
+ ELF32_R_TYPE(rel->r_info), (long)addr,
+ (int)upper_insn, (int)lower_insn,
+ sym, (long)sym->st_value);
+
+ /* Extract the 25-bit offset from the 32-bit instruction:
+ *
+ * offset[24] = S
+ * offset[23] = ~(J1 ^ S)
+ * offset[22] = ~(J2 ^ S)]
+ * offset[12:21] = imm10
+ * offset[1:11] = imm11
+ * offset[0] = 0
+ */
+
+ S = (upper_insn >> 10) & 1;
+ J1 = (lower_insn >> 13) & 1;
+ J2 = (lower_insn >> 11) & 1;
+
+ offset = (S << 24) | /* S - > offset[24] */
+ ((~(J1 ^ S) & 1) << 23) | /* J1 -> offset[23] */
+ ((~(J2 ^ S) & 1) << 22) | /* J2 -> offset[22] */
+ ((upper_insn & 0x03ff) << 12) | /* imm10 -> offset[12:21] */
+ ((lower_insn & 0x07ff) << 1); /* imm11 -> offset[1:11] */
+ /* 0 -> offset[0] */
+
+ /* Sign extend */
+
+ if (offset & 0x01000000)
+ {
+ offset -= 0x02000000;
+ }
+
+ /* And perform the relocation */
+
+ binfo(" S=%" PRId32 " J1=%" PRId32 " J2=%" PRId32
+ " offset=%08" PRIx32 " branch target=%08" PRIx32 "\n",
+ S, J1, J2, offset, offset + sym->st_value - addr);
+
+ offset += sym->st_value - addr;
+
+ /* Is this a function symbol? If so, then the branch target must be
+ * an odd Thumb address
+ */
+
+ if (ELF32_ST_TYPE(sym->st_info) == STT_FUNC && (offset & 1) == 0)
+ {
+ berr("ERROR: ERROR: JUMP24 [%" PRId32 "] "
+ "requires odd offset, offset=%08" PRIx32 "\n",
+ ELF32_R_TYPE(rel->r_info), offset);
+
+ return -EINVAL;
+ }
+
+ /* Check the range of the offset */
+
+ if (offset < (int32_t)0xff000000 || offset >= (int32_t)0x01000000)
+ {
+ berr("ERROR: ERROR: JUMP24 [%" PRId32 "] "
+ "relocation out of range, branch target=%08" PRIx32 "\n",
+ ELF32_R_TYPE(rel->r_info), offset);
+
+ return -EINVAL;
+ }
+
+ /* Now, reconstruct the 32-bit instruction using the new, relocated
+ * branch target.
+ */
+
+ S = (offset >> 24) & 1;
+ J1 = S ^ (~(offset >> 23) & 1);
+ J2 = S ^ (~(offset >> 22) & 1);
+
+ upper_insn = ((upper_insn & 0xf800) | (S << 10) |
+ ((offset >> 12) & 0x03ff));
+ *(uint16_t *)addr = (uint16_t)upper_insn;
+
+ lower_insn = ((lower_insn & 0xd000) | (J1 << 13) | (J2 << 11) |
+ ((offset >> 1) & 0x07ff));
+ *(uint16_t *)(addr + 2) = (uint16_t)lower_insn;
+
+ binfo(" S=%" PRId32 " J1=%" PRId32 " J2=%" PRId32
+ " insn [%04" PRIx32 " %04" PRIx32 "]\n",
+ S, J1, J2, upper_insn, lower_insn);
+ }
+ break;
+#endif /* CONFIG_ARM_THUMB */
+
+ default:
+ berr("ERROR: Unsupported relocation: %" PRId32 "\n",
+ ELF32_R_TYPE(rel->r_info));
+ return -EINVAL;
+ }
+
+ return OK;
+}
+
+int up_relocateadd(const Elf32_Rela *rel, const Elf32_Sym *sym,
+ uintptr_t addr, void *arch_data)
+{
+ berr("ERROR: RELA relocation not supported\n");
+ return -ENOSYS;
+}
diff --git a/libs/libc/machine/arm/armv8-r/gnu/acle-compat.h
b/libs/libc/machine/arm/armv8-r/gnu/acle-compat.h
new file mode 100644
index 0000000000..5cf798162d
--- /dev/null
+++ b/libs/libc/machine/arm/armv8-r/gnu/acle-compat.h
@@ -0,0 +1,183 @@
+/****************************************************************************
+ * libs/libc/machine/arm/armv8-r/gnu/acle-compat.h
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: 2014 ARM Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ ****************************************************************************/
+
+#ifndef __LIBS_LIBC_MACHINE_ARM_ARMV8R_GNU_ACLE_COMPAT_H
+#define __LIBS_LIBC_MACHINE_ARM_ARMV8R_GNU_ACLE_COMPAT_H
+
+#ifndef __ARM_ARCH
+
+/* ACLE standardises a set of pre-defines that describe the ARM architecture.
+ * These were mostly implemented in GCC around GCC-4.8; older versions
+ * have no, or only partial support. To provide a level of backwards
+ * compatibility we try to work out what the definitions should be, given
+ * the older pre-defines that GCC did produce. This isn't complete, but
+ * it should be enough for use by routines that depend on this header.
+ */
+
+/* No need to handle ARMv8, GCC had ACLE support before that. */
+
+# ifdef __ARM_ARCH_7__
+/* The common subset of ARMv7 in all profiles. */
+# define __ARM_ARCH 7
+# define __ARM_ARCH_ISA_THUMB 2
+# define __ARM_FEATURE_CLZ
+# define __ARM_FEATURE_LDREX 7
+# define __ARM_FEATURE_UNALIGNED
+# endif
+
+# if defined (__ARM_ARCH_7A__) || defined (__ARM_ARCH_7R__)
+# define __ARM_ARCH 7
+# define __ARM_ARCH_ISA_THUMB 2
+# define __ARM_ARCH_ISA_ARM
+# define __ARM_FEATURE_CLZ
+# define __ARM_FEATURE_SIMD32
+# define __ARM_FEATURE_DSP
+# define __ARM_FEATURE_QBIT
+# define __ARM_FEATURE_SAT
+# define __ARM_FEATURE_LDREX 15
+# define __ARM_FEATURE_UNALIGNED
+# ifdef __ARM_ARCH_7A__
+# define __ARM_ARCH_PROFILE 'A'
+# else
+# define __ARM_ARCH_PROFILE 'R'
+# endif
+# endif
+
+# ifdef __ARM_ARCH_7EM__
+# define __ARM_ARCH 7
+# define __ARM_ARCH_ISA_THUMB 2
+# define __ARM_FEATURE_CLZ
+# define __ARM_FEATURE_SIMD32
+# define __ARM_FEATURE_DSP
+# define __ARM_FEATURE_QBIT
+# define __ARM_FEATURE_SAT
+# define __ARM_FEATURE_LDREX 7
+# define __ARM_FEATURE_UNALIGNED
+# define __ARM_ARCH_PROFILE 'M'
+# endif
+
+# ifdef __ARM_ARCH_7M__
+# define __ARM_ARCH 7
+# define __ARM_ARCH_ISA_THUMB 2
+# define __ARM_FEATURE_CLZ
+# define __ARM_FEATURE_QBIT
+# define __ARM_FEATURE_SAT
+# define __ARM_FEATURE_LDREX 7
+# define __ARM_FEATURE_UNALIGNED
+# define __ARM_ARCH_PROFILE 'M'
+# endif
+
+# ifdef __ARM_ARCH_6T2__
+# define __ARM_ARCH 6
+# define __ARM_ARCH_ISA_THUMB 2
+# define __ARM_ARCH_ISA_ARM
+# define __ARM_FEATURE_CLZ
+# define __ARM_FEATURE_SIMD32
+# define __ARM_FEATURE_DSP
+# define __ARM_FEATURE_QBIT
+# define __ARM_FEATURE_SAT
+# define __ARM_FEATURE_LDREX 4
+# define __ARM_FEATURE_UNALIGNED
+# endif
+
+# ifdef __ARM_ARCH_6M__
+# define __ARM_ARCH 6
+# define __ARM_ARCH_ISA_THUMB 1
+# define __ARM_ARCH_PROFILE 'M'
+# endif
+
+# if defined (__ARM_ARCH_6__) || defined (__ARM_ARCH_6J__) \
+ || defined (__ARM_ARCH_6K__) || defined (__ARM_ARCH_6Z__) \
+ || defined (__ARM_ARCH_6ZK__)
+# define __ARM_ARCH 6
+# define __ARM_ARCH_ISA_THUMB 1
+# define __ARM_ARCH_ISA_ARM
+# define __ARM_FEATURE_CLZ
+# define __ARM_FEATURE_SIMD32
+# define __ARM_FEATURE_DSP
+# define __ARM_FEATURE_QBIT
+# define __ARM_FEATURE_SAT
+# define __ARM_FEATURE_UNALIGNED
+# ifndef __thumb__
+# if defined (__ARM_ARCH_6K__) || defined (__ARM_ARCH_6ZK__)
+# define __ARM_FEATURE_LDREX 15
+# else
+# define __ARM_FEATURE_LDREX 4
+# endif
+# endif
+# endif
+
+# if defined (__ARM_ARCH_5TE__) || defined (__ARM_ARCH_5E__)
+# define __ARM_ARCH 5
+# define __ARM_ARCH_ISA_ARM
+# ifdef __ARM_ARCH_5TE__
+# define __ARM_ARCH_ISA_THUMB 1
+# endif
+# define __ARM_FEATURE_CLZ
+# define __ARM_FEATURE_DSP
+# endif
+
+# if defined (__ARM_ARCH_5T__) || defined (__ARM_ARCH_5__)
+# define __ARM_ARCH 5
+# define __ARM_ARCH_ISA_ARM
+# ifdef __ARM_ARCH_5TE__
+# define __ARM_ARCH_ISA_THUMB 1
+# endif
+# define __ARM_FEATURE_CLZ
+# endif
+
+# ifdef __ARM_ARCH_4T__
+# define __ARM_ARCH 4
+# define __ARM_ARCH_ISA_ARM
+# define __ARM_ARCH_ISA_THUMB 1
+# endif
+
+# ifdef __ARM_ARCH_4__
+# define __ARM_ARCH 4
+# define __ARM_ARCH_ISA_ARM
+# endif
+
+# if defined (__ARM_ARCH_3__) || defined (__ARM_ARCH_3M__)
+# define __ARM_ARCH 3
+# define __ARM_ARCH_ISA_ARM
+# endif
+
+# ifdef __ARM_ARCH_2__
+# define __ARM_ARCH 2
+# define __ARM_ARCH_ISA_ARM
+# endif
+
+# ifdef __ARMEB__
+# define __ARM_BIG_ENDIAN
+# endif
+
+#endif
+
+#endif /* __LIBS_LIBC_MACHINE_ARM_ARMV8R_GNU_ACLE_COMPAT_H */
diff --git a/libs/libc/machine/arm/armv8-r/gnu/arch_memchr.S
b/libs/libc/machine/arm/armv8-r/gnu/arch_memchr.S
new file mode 100644
index 0000000000..d50a05d233
--- /dev/null
+++ b/libs/libc/machine/arm/armv8-r/gnu/arch_memchr.S
@@ -0,0 +1,397 @@
+/****************************************************************************
+ * libs/libc/machine/arm/armv8-r/gnu/arch_memchr.S
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: 2010-2011, Linaro Limited, 2015 ARM Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Linaro Limited nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Written by Dave Gilbert <[email protected]>
+ *
+ * This memchr routine is optimised on a Cortex-A9 and should work on
+ * all ARMv7 processors. It has a fast path for short sizes, and has
+ * an optimised path for large data sets; the worst case is finding the
+ * match early in a large data set.
+ *
+ * Copyright (c) 2015 ARM Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Linaro nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************/
+
+#include "libc.h"
+
+#ifdef LIBC_BUILD_MEMCHR
+
+@ 2011-02-07 [email protected]
+@ Extracted from local git a5b438d861
+@ 2011-07-14 [email protected]
+@ Import endianness fix from local git ea786f1b
+@ 2011-10-11 [email protected]
+@ Import from cortex-strings bzr rev 63
+@ Flip to ldrd (as suggested by Greta Yorsh)
+@ Make conditional on CPU type
+@ tidy
+
+@ This code requires armv6t2 or later. Uses Thumb2.
+
+ .syntax unified
+
+#include "acle-compat.h"
+
+@ NOTE: This ifdef MUST match the one in memchr-stub.c
+#if defined (__ARM_NEON__) || defined (__ARM_NEON)
+#if __ARM_ARCH >= 8 && __ARM_ARCH_PROFILE == 'R'
+ .arch armv8-r
+#else
+ .arch armv7-a
+#endif
+ .fpu neon
+
+
+/* Arguments */
+#define srcin r0
+#define chrin r1
+#define cntin r2
+
+/* Retval */
+#define result r0 /* Live range does not overlap with srcin */
+
+/* Working registers */
+#define src r1 /* Live range does not overlap with chrin */
+#define tmp r3
+#define synd r0 /* No overlap with srcin or result */
+#define soff r12
+
+/* Working NEON registers */
+#define vrepchr q0
+#define vdata0 q1
+#define vdata0_0 d2 /* Lower half of vdata0 */
+#define vdata0_1 d3 /* Upper half of vdata0 */
+#define vdata1 q2
+#define vdata1_0 d4 /* Lower half of vhas_chr0 */
+#define vdata1_1 d5 /* Upper half of vhas_chr0 */
+#define vrepmask q3
+#define vrepmask0 d6
+#define vrepmask1 d7
+#define vend q4
+#define vend0 d8
+#define vend1 d9
+
+/*
+ * Core algorithm:
+ *
+ * For each 32-byte chunk we calculate a 32-bit syndrome value, with one bit
per
+ * byte. Each bit is set if the relevant byte matched the requested character
+ * and cleared otherwise. Since the bits in the syndrome reflect exactly the
+ * order in which things occur in the original string, counting trailing zeros
+ * allows to identify exactly which byte has matched.
+ */
+
+ .text
+ .thumb_func
+ .align 4
+ .p2align 4,,15
+ .global memchr
+ .type memchr,%function
+
+memchr:
+ .cfi_sections .debug_frame
+ .cfi_startproc
+ /* Use a simple loop if there are less than 8 bytes to search. */
+ cmp cntin, #7
+ bhi .Llargestr
+ and chrin, chrin, #0xff
+
+.Lsmallstr:
+ subs cntin, cntin, #1
+ blo .Lnotfound /* Return not found if reached end. */
+ ldrb tmp, [srcin], #1
+ cmp tmp, chrin
+ bne .Lsmallstr /* Loop again if not found. */
+ /* Otherwise fixup address and return. */
+ sub result, result, #1
+ bx lr
+
+
+.Llargestr:
+ vdup.8 vrepchr, chrin /* Duplicate char across all lanes. */
+ /*
+ * Magic constant 0x8040201008040201 allows us to identify which lane
+ * matches the requested byte.
+ */
+ movw tmp, #0x0201
+ movt tmp, #0x0804
+ lsl soff, tmp, #4
+ vmov vrepmask0, tmp, soff
+ vmov vrepmask1, tmp, soff
+ /* Work with aligned 32-byte chunks */
+ bic src, srcin, #31
+ ands soff, srcin, #31
+ beq .Lloopintro /* Go straight to main loop if it's aligned. */
+
+ /*
+ * Input string is not 32-byte aligned. We calculate the syndrome
+ * value for the aligned 32 bytes block containing the first bytes
+ * and mask the irrelevant part.
+ */
+ vld1.8 {vdata0, vdata1}, [src:256]!
+ sub tmp, soff, #32
+ adds cntin, cntin, tmp
+ vceq.i8 vdata0, vdata0, vrepchr
+ vceq.i8 vdata1, vdata1, vrepchr
+ vand vdata0, vdata0, vrepmask
+ vand vdata1, vdata1, vrepmask
+ vpadd.i8 vdata0_0, vdata0_0, vdata0_1
+ vpadd.i8 vdata1_0, vdata1_0, vdata1_1
+ vpadd.i8 vdata0_0, vdata0_0, vdata1_0
+ vpadd.i8 vdata0_0, vdata0_0, vdata0_0
+ vmov synd, vdata0_0[0]
+
+ /* Clear the soff lower bits */
+ lsr synd, synd, soff
+ lsl synd, synd, soff
+ /* The first block can also be the last */
+ bls .Lmasklast
+ /* Have we found something already? */
+ cbnz synd, .Ltail
+
+
+.Lloopintro:
+ vpush {vend}
+ /* 264/265 correspond to d8/d9 for q4 */
+ .cfi_adjust_cfa_offset 16
+ .cfi_rel_offset 264, 0
+ .cfi_rel_offset 265, 8
+ .p2align 3,,7
+.Lloop:
+ vld1.8 {vdata0, vdata1}, [src:256]!
+ subs cntin, cntin, #32
+ vceq.i8 vdata0, vdata0, vrepchr
+ vceq.i8 vdata1, vdata1, vrepchr
+ /* If we're out of data we finish regardless of the result. */
+ bls .Lend
+ /* Use a fast check for the termination condition. */
+ vorr vend, vdata0, vdata1
+ vorr vend0, vend0, vend1
+ vmov synd, tmp, vend0
+ orrs synd, synd, tmp
+ /* We're not out of data, loop if we haven't found the character. */
+ beq .Lloop
+
+.Lend:
+ vpop {vend}
+ .cfi_adjust_cfa_offset -16
+ .cfi_restore 264
+ .cfi_restore 265
+
+ /* Termination condition found, let's calculate the syndrome value. */
+ vand vdata0, vdata0, vrepmask
+ vand vdata1, vdata1, vrepmask
+ vpadd.i8 vdata0_0, vdata0_0, vdata0_1
+ vpadd.i8 vdata1_0, vdata1_0, vdata1_1
+ vpadd.i8 vdata0_0, vdata0_0, vdata1_0
+ vpadd.i8 vdata0_0, vdata0_0, vdata0_0
+ vmov synd, vdata0_0[0]
+ cbz synd, .Lnotfound
+ bhi .Ltail
+
+
+.Lmasklast:
+ /* Clear the (-cntin) upper bits to avoid out-of-bounds matches. */
+ neg cntin, cntin
+ lsl synd, synd, cntin
+ lsrs synd, synd, cntin
+ it eq
+ moveq src, #0 /* If no match, set src to 0 so the retval is 0. */
+
+
+.Ltail:
+ /* Count the trailing zeros using bit reversing */
+ rbit synd, synd
+ /* Compensate the last post-increment */
+ sub src, src, #32
+ /* Count the leading zeros */
+ clz synd, synd
+ /* Compute the potential result and return */
+ add result, src, synd
+ bx lr
+
+
+.Lnotfound:
+ /* Set result to NULL if not found and return */
+ mov result, #0
+ bx lr
+
+ .cfi_endproc
+ .size memchr, . - memchr
+
+#elif __ARM_ARCH_ISA_THUMB >= 2 && defined (__ARM_FEATURE_DSP)
+
+#if __ARM_ARCH_PROFILE == 'M'
+ .arch armv7e-m
+#else
+ .arch armv6t2
+#endif
+
+@ this lets us check a flag in a 00/ff byte easily in either endianness
+#ifdef __ARMEB__
+#define CHARTSTMASK(c) 1<<(31-(c*8))
+#else
+#define CHARTSTMASK(c) 1<<(c*8)
+#endif
+ .text
+ .thumb
+
+@ ---------------------------------------------------------------------------
+ .thumb_func
+ .align 2
+ .p2align 4,,15
+ .global memchr
+ .type memchr,%function
+memchr:
+ @ r0 = start of memory to scan
+ @ r1 = character to look for
+ @ r2 = length
+ @ returns r0 = pointer to character or NULL if not found
+ and r1,r1,#0xff @ Don't trust the caller to pass a char
+
+ cmp r2,#16 @ If short don't bother with anything clever
+ blt 20f
+
+ tst r0, #7 @ If it's already aligned skip the next bit
+ beq 10f
+
+ @ Work up to an aligned point
+5:
+ ldrb r3, [r0],#1
+ subs r2, r2, #1
+ cmp r3, r1
+ beq 50f @ If it matches exit found
+ tst r0, #7
+ cbz r2, 40f @ If we run off the end, exit not found
+ bne 5b @ If not aligned yet then do next byte
+
+10:
+ @ We are aligned, we know we have at least 8 bytes to work with
+ push {r4,r5,r6,r7}
+ orr r1, r1, r1, lsl #8 @ expand the match word across all bytes
+ orr r1, r1, r1, lsl #16
+ bic r4, r2, #7 @ Number of double words to work with * 8
+ mvns r7, #0 @ all F's
+ movs r3, #0
+
+15:
+ ldrd r5,r6,[r0],#8
+ subs r4, r4, #8
+ eor r5,r5, r1 @ r5,r6 have 00's where bytes match the target
+ eor r6,r6, r1
+ uadd8 r5, r5, r7 @ Par add 0xff - sets GE bits for bytes!=0
+ sel r5, r3, r7 @ bytes are 00 for none-00 bytes,
+ @ or ff for 00 bytes - NOTE INVERSION
+ uadd8 r6, r6, r7 @ Par add 0xff - sets GE bits for bytes!=0
+ sel r6, r5, r7 @ chained....bytes are 00 for none-00 bytes
+ @ or ff for 00 bytes - NOTE INVERSION
+ cbnz r6, 60f
+ bne 15b @ (Flags from the subs above)
+
+ pop {r4,r5,r6,r7}
+ and r1,r1,#0xff @ r1 back to a single character
+ and r2,r2,#7 @ Leave the count remaining as the number
+ @ after the double words have been done
+
+20:
+ cbz r2, 40f @ 0 length or hit the end already then not found
+
+21: @ Post aligned section, or just a short call
+ ldrb r3,[r0],#1
+ subs r2,r2,#1
+ eor r3,r3,r1 @ r3 = 0 if match - doesn't break flags from sub
+ cbz r3, 50f
+ bne 21b @ on r2 flags
+
+40:
+ movs r0,#0 @ not found
+ bx lr
+
+50:
+ subs r0,r0,#1 @ found
+ bx lr
+
+60: @ We're here because the fast path found a hit
+ @ now we have to track down exactly which word it was
+ @ r0 points to the start of the double word after the one tested
+ @ r5 has the 00/ff pattern for the first word, r6 has the chained value
+ cmp r5, #0
+ itte eq
+ moveq r5, r6 @ the end is in the 2nd word
+ subeq r0,r0,#3 @ Points to 2nd byte of 2nd word
+ subne r0,r0,#7 @ or 2nd byte of 1st word
+
+ @ r0 currently points to the 2nd byte of the word containing the hit
+ tst r5, # CHARTSTMASK(0) @ 1st character
+ bne 61f
+ adds r0,r0,#1
+ tst r5, # CHARTSTMASK(1) @ 2nd character
+ ittt eq
+ addeq r0,r0,#1
+ tsteq r5, # (3<<15) @ 2nd & 3rd character
+ @ If not the 3rd must be the last one
+ addeq r0,r0,#1
+
+61:
+ pop {r4,r5,r6,r7}
+ subs r0,r0,#1
+ bx lr
+#else
+ /* Defined in memchr-stub.c. */
+#endif
+
+#endif
diff --git a/libs/libc/machine/arm/armv8-r/gnu/arch_memcpy.S
b/libs/libc/machine/arm/armv8-r/gnu/arch_memcpy.S
new file mode 100644
index 0000000000..fd20c5c1bd
--- /dev/null
+++ b/libs/libc/machine/arm/armv8-r/gnu/arch_memcpy.S
@@ -0,0 +1,632 @@
+/****************************************************************************
+ * libs/libc/machine/arm/armv8-r/gnu/arch_memcpy.S
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: 2013, Linaro Limited
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Linaro Limited nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************/
+
+#include "libc.h"
+
+#ifdef LIBC_BUILD_MEMCPY
+
+/*
+ * This memcpy routine is optimised for Cortex-A15 cores and takes advantage
+ * of VFP or NEON when built with the appropriate flags.
+ *
+ * Assumptions:
+ *
+ * ARMv6 (ARMv7-a if using Neon)
+ * ARM state
+ * Unaligned accesses
+ * LDRD/STRD support unaligned word accesses
+ *
+ * If compiled with GCC, this file should be enclosed within following
+ * pre-processing check:
+ * if defined (__ARM_ARCH_7A__) && defined (__ARM_FEATURE_UNALIGNED)
+ *
+ */
+
+ .syntax unified
+ /* This implementation requires ARM state. */
+ .arm
+
+#ifdef __ARM_NEON__
+
+ .fpu neon
+ .arch armv8-r
+# define FRAME_SIZE 4
+# define USE_VFP
+# define USE_NEON
+
+#elif !defined (__SOFTFP__)
+
+ .arch armv6
+ .fpu vfpv2
+# define FRAME_SIZE 32
+# define USE_VFP
+
+#else
+ .arch armv6
+# define FRAME_SIZE 32
+
+#endif
+
+/* Old versions of GAS incorrectly implement the NEON align semantics. */
+#ifdef BROKEN_ASM_NEON_ALIGN
+#define ALIGN(addr, align) addr,:align
+#else
+#define ALIGN(addr, align) addr:align
+#endif
+
+#define PC_OFFSET 8 /* PC pipeline compensation. */
+#define INSN_SIZE 4
+
+/* Call parameters. */
+#define dstin r0
+#define src r1
+#define count r2
+
+/* Locals. */
+#define tmp1 r3
+#define dst ip
+#define tmp2 r10
+
+#ifndef USE_NEON
+/* For bulk copies using GP registers. */
+#define A_l r2 /* Call-clobbered. */
+#define A_h r3 /* Call-clobbered. */
+#define B_l r4
+#define B_h r5
+#define C_l r6
+#define C_h r7
+#define D_l r8
+#define D_h r9
+#endif
+
+/* Number of lines ahead to pre-fetch data. If you change this the code
+ below will need adjustment to compensate. */
+
+#define prefetch_lines 5
+
+#ifdef USE_VFP
+ .macro cpy_line_vfp vreg, base
+ vstr \vreg, [dst, #\base]
+ vldr \vreg, [src, #\base]
+ vstr d0, [dst, #\base + 8]
+ vldr d0, [src, #\base + 8]
+ vstr d1, [dst, #\base + 16]
+ vldr d1, [src, #\base + 16]
+ vstr d2, [dst, #\base + 24]
+ vldr d2, [src, #\base + 24]
+ vstr \vreg, [dst, #\base + 32]
+ vldr \vreg, [src, #\base + prefetch_lines * 64 - 32]
+ vstr d0, [dst, #\base + 40]
+ vldr d0, [src, #\base + 40]
+ vstr d1, [dst, #\base + 48]
+ vldr d1, [src, #\base + 48]
+ vstr d2, [dst, #\base + 56]
+ vldr d2, [src, #\base + 56]
+ .endm
+
+ .macro cpy_tail_vfp vreg, base
+ vstr \vreg, [dst, #\base]
+ vldr \vreg, [src, #\base]
+ vstr d0, [dst, #\base + 8]
+ vldr d0, [src, #\base + 8]
+ vstr d1, [dst, #\base + 16]
+ vldr d1, [src, #\base + 16]
+ vstr d2, [dst, #\base + 24]
+ vldr d2, [src, #\base + 24]
+ vstr \vreg, [dst, #\base + 32]
+ vstr d0, [dst, #\base + 40]
+ vldr d0, [src, #\base + 40]
+ vstr d1, [dst, #\base + 48]
+ vldr d1, [src, #\base + 48]
+ vstr d2, [dst, #\base + 56]
+ vldr d2, [src, #\base + 56]
+ .endm
+#endif
+
+ .macro def_fn f p2align=0
+ .text
+ .p2align \p2align
+ .global \f
+ .type \f, %function
+\f:
+ .endm
+
+def_fn memcpy p2align=6
+
+ mov dst, dstin /* Preserve dstin, we need to return it. */
+ cmp count, #64
+ bge .Lcpy_not_short
+ /* Deal with small copies quickly by dropping straight into the
+ exit block. */
+
+.Ltail63unaligned:
+#ifdef USE_NEON
+ and tmp1, count, #0x38
+ rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE)
+ add pc, pc, tmp1
+ vld1.8 {d0}, [src]! /* 14 words to go. */
+ vst1.8 {d0}, [dst]!
+ vld1.8 {d0}, [src]! /* 12 words to go. */
+ vst1.8 {d0}, [dst]!
+ vld1.8 {d0}, [src]! /* 10 words to go. */
+ vst1.8 {d0}, [dst]!
+ vld1.8 {d0}, [src]! /* 8 words to go. */
+ vst1.8 {d0}, [dst]!
+ vld1.8 {d0}, [src]! /* 6 words to go. */
+ vst1.8 {d0}, [dst]!
+ vld1.8 {d0}, [src]! /* 4 words to go. */
+ vst1.8 {d0}, [dst]!
+ vld1.8 {d0}, [src]! /* 2 words to go. */
+ vst1.8 {d0}, [dst]!
+
+ tst count, #4
+ ldrne tmp1, [src], #4
+ strne tmp1, [dst], #4
+#else
+ /* Copy up to 15 full words of data. May not be aligned. */
+ /* Cannot use VFP for unaligned data. */
+ and tmp1, count, #0x3c
+ add dst, dst, tmp1
+ add src, src, tmp1
+ rsb tmp1, tmp1, #(60 - PC_OFFSET/2 + INSN_SIZE/2)
+ /* Jump directly into the sequence below at the correct offset. */
+ add pc, pc, tmp1, lsl #1
+
+ ldr tmp1, [src, #-60] /* 15 words to go. */
+ str tmp1, [dst, #-60]
+
+ ldr tmp1, [src, #-56] /* 14 words to go. */
+ str tmp1, [dst, #-56]
+ ldr tmp1, [src, #-52]
+ str tmp1, [dst, #-52]
+
+ ldr tmp1, [src, #-48] /* 12 words to go. */
+ str tmp1, [dst, #-48]
+ ldr tmp1, [src, #-44]
+ str tmp1, [dst, #-44]
+
+ ldr tmp1, [src, #-40] /* 10 words to go. */
+ str tmp1, [dst, #-40]
+ ldr tmp1, [src, #-36]
+ str tmp1, [dst, #-36]
+
+ ldr tmp1, [src, #-32] /* 8 words to go. */
+ str tmp1, [dst, #-32]
+ ldr tmp1, [src, #-28]
+ str tmp1, [dst, #-28]
+
+ ldr tmp1, [src, #-24] /* 6 words to go. */
+ str tmp1, [dst, #-24]
+ ldr tmp1, [src, #-20]
+ str tmp1, [dst, #-20]
+
+ ldr tmp1, [src, #-16] /* 4 words to go. */
+ str tmp1, [dst, #-16]
+ ldr tmp1, [src, #-12]
+ str tmp1, [dst, #-12]
+
+ ldr tmp1, [src, #-8] /* 2 words to go. */
+ str tmp1, [dst, #-8]
+ ldr tmp1, [src, #-4]
+ str tmp1, [dst, #-4]
+#endif
+
+ lsls count, count, #31
+ ldrhcs tmp1, [src], #2
+ ldrbne src, [src] /* Src is dead, use as a scratch. */
+ strhcs tmp1, [dst], #2
+ strbne src, [dst]
+ bx lr
+
+.Lcpy_not_short:
+ /* At least 64 bytes to copy, but don't know the alignment yet. */
+ str tmp2, [sp, #-FRAME_SIZE]!
+ and tmp2, src, #7
+ and tmp1, dst, #7
+ cmp tmp1, tmp2
+ bne .Lcpy_notaligned
+
+#ifdef USE_VFP
+ /* Magic dust alert! Force VFP on Cortex-A9. Experiments show
+ that the FP pipeline is much better at streaming loads and
+ stores. This is outside the critical loop. */
+ vmov.f32 s0, s0
+#endif
+
+ /* SRC and DST have the same mutual 32-bit alignment, but we may
+ still need to pre-copy some bytes to get to natural alignment.
+ We bring DST into full 64-bit alignment. */
+ lsls tmp2, dst, #29
+ beq 1f
+ rsbs tmp2, tmp2, #0
+ sub count, count, tmp2, lsr #29
+ ldrmi tmp1, [src], #4
+ strmi tmp1, [dst], #4
+ lsls tmp2, tmp2, #2
+ ldrhcs tmp1, [src], #2
+ ldrbne tmp2, [src], #1
+ strhcs tmp1, [dst], #2
+ strbne tmp2, [dst], #1
+
+1:
+ subs tmp2, count, #64 /* Use tmp2 for count. */
+ blt .Ltail63aligned
+
+ cmp tmp2, #512
+ bge .Lcpy_body_long
+
+.Lcpy_body_medium: /* Count in tmp2. */
+#ifdef USE_VFP
+1:
+ vldr d0, [src, #0]
+ subs tmp2, tmp2, #64
+ vldr d1, [src, #8]
+ vstr d0, [dst, #0]
+ vldr d0, [src, #16]
+ vstr d1, [dst, #8]
+ vldr d1, [src, #24]
+ vstr d0, [dst, #16]
+ vldr d0, [src, #32]
+ vstr d1, [dst, #24]
+ vldr d1, [src, #40]
+ vstr d0, [dst, #32]
+ vldr d0, [src, #48]
+ vstr d1, [dst, #40]
+ vldr d1, [src, #56]
+ vstr d0, [dst, #48]
+ add src, src, #64
+ vstr d1, [dst, #56]
+ add dst, dst, #64
+ bge 1b
+ tst tmp2, #0x3f
+ beq .Ldone
+
+.Ltail63aligned: /* Count in tmp2. */
+ and tmp1, tmp2, #0x38
+ add dst, dst, tmp1
+ add src, src, tmp1
+ rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE)
+ add pc, pc, tmp1
+
+ vldr d0, [src, #-56] /* 14 words to go. */
+ vstr d0, [dst, #-56]
+ vldr d0, [src, #-48] /* 12 words to go. */
+ vstr d0, [dst, #-48]
+ vldr d0, [src, #-40] /* 10 words to go. */
+ vstr d0, [dst, #-40]
+ vldr d0, [src, #-32] /* 8 words to go. */
+ vstr d0, [dst, #-32]
+ vldr d0, [src, #-24] /* 6 words to go. */
+ vstr d0, [dst, #-24]
+ vldr d0, [src, #-16] /* 4 words to go. */
+ vstr d0, [dst, #-16]
+ vldr d0, [src, #-8] /* 2 words to go. */
+ vstr d0, [dst, #-8]
+#else
+ sub src, src, #8
+ sub dst, dst, #8
+1:
+ ldrd A_l, A_h, [src, #8]
+ strd A_l, A_h, [dst, #8]
+ ldrd A_l, A_h, [src, #16]
+ strd A_l, A_h, [dst, #16]
+ ldrd A_l, A_h, [src, #24]
+ strd A_l, A_h, [dst, #24]
+ ldrd A_l, A_h, [src, #32]
+ strd A_l, A_h, [dst, #32]
+ ldrd A_l, A_h, [src, #40]
+ strd A_l, A_h, [dst, #40]
+ ldrd A_l, A_h, [src, #48]
+ strd A_l, A_h, [dst, #48]
+ ldrd A_l, A_h, [src, #56]
+ strd A_l, A_h, [dst, #56]
+ ldrd A_l, A_h, [src, #64]!
+ strd A_l, A_h, [dst, #64]!
+ subs tmp2, tmp2, #64
+ bge 1b
+ tst tmp2, #0x3f
+ bne 1f
+ ldr tmp2,[sp], #FRAME_SIZE
+ bx lr
+1:
+ add src, src, #8
+ add dst, dst, #8
+
+.Ltail63aligned: /* Count in tmp2. */
+ /* Copy up to 7 d-words of data. Similar to Ltail63unaligned, but
+ we know that the src and dest are 32-bit aligned so we can use
+ LDRD/STRD to improve efficiency. */
+ /* TMP2 is now negative, but we don't care about that. The bottom
+ six bits still tell us how many bytes are left to copy. */
+
+ and tmp1, tmp2, #0x38
+ add dst, dst, tmp1
+ add src, src, tmp1
+ rsb tmp1, tmp1, #(56 - PC_OFFSET + INSN_SIZE)
+ add pc, pc, tmp1
+ ldrd A_l, A_h, [src, #-56] /* 14 words to go. */
+ strd A_l, A_h, [dst, #-56]
+ ldrd A_l, A_h, [src, #-48] /* 12 words to go. */
+ strd A_l, A_h, [dst, #-48]
+ ldrd A_l, A_h, [src, #-40] /* 10 words to go. */
+ strd A_l, A_h, [dst, #-40]
+ ldrd A_l, A_h, [src, #-32] /* 8 words to go. */
+ strd A_l, A_h, [dst, #-32]
+ ldrd A_l, A_h, [src, #-24] /* 6 words to go. */
+ strd A_l, A_h, [dst, #-24]
+ ldrd A_l, A_h, [src, #-16] /* 4 words to go. */
+ strd A_l, A_h, [dst, #-16]
+ ldrd A_l, A_h, [src, #-8] /* 2 words to go. */
+ strd A_l, A_h, [dst, #-8]
+
+#endif
+ tst tmp2, #4
+ ldrne tmp1, [src], #4
+ strne tmp1, [dst], #4
+ lsls tmp2, tmp2, #31 /* Count (tmp2) now dead. */
+ ldrhcs tmp1, [src], #2
+ ldrbne tmp2, [src]
+ strhcs tmp1, [dst], #2
+ strbne tmp2, [dst]
+
+.Ldone:
+ ldr tmp2, [sp], #FRAME_SIZE
+ bx lr
+
+.Lcpy_body_long: /* Count in tmp2. */
+
+ /* Long copy. We know that there's at least (prefetch_lines * 64)
+ bytes to go. */
+#ifdef USE_VFP
+ /* Don't use PLD. Instead, read some data in advance of the current
+ copy position into a register. This should act like a PLD
+ operation but we won't have to repeat the transfer. */
+
+ vldr d3, [src, #0]
+ vldr d4, [src, #64]
+ vldr d5, [src, #128]
+ vldr d6, [src, #192]
+ vldr d7, [src, #256]
+
+ vldr d0, [src, #8]
+ vldr d1, [src, #16]
+ vldr d2, [src, #24]
+ add src, src, #32
+
+ subs tmp2, tmp2, #prefetch_lines * 64 * 2
+ blt 2f
+1:
+ cpy_line_vfp d3, 0
+ cpy_line_vfp d4, 64
+ cpy_line_vfp d5, 128
+ add dst, dst, #3 * 64
+ add src, src, #3 * 64
+ cpy_line_vfp d6, 0
+ cpy_line_vfp d7, 64
+ add dst, dst, #2 * 64
+ add src, src, #2 * 64
+ subs tmp2, tmp2, #prefetch_lines * 64
+ bge 1b
+
+2:
+ cpy_tail_vfp d3, 0
+ cpy_tail_vfp d4, 64
+ cpy_tail_vfp d5, 128
+ add src, src, #3 * 64
+ add dst, dst, #3 * 64
+ cpy_tail_vfp d6, 0
+ vstr d7, [dst, #64]
+ vldr d7, [src, #64]
+ vstr d0, [dst, #64 + 8]
+ vldr d0, [src, #64 + 8]
+ vstr d1, [dst, #64 + 16]
+ vldr d1, [src, #64 + 16]
+ vstr d2, [dst, #64 + 24]
+ vldr d2, [src, #64 + 24]
+ vstr d7, [dst, #64 + 32]
+ add src, src, #96
+ vstr d0, [dst, #64 + 40]
+ vstr d1, [dst, #64 + 48]
+ vstr d2, [dst, #64 + 56]
+ add dst, dst, #128
+ add tmp2, tmp2, #prefetch_lines * 64
+ b .Lcpy_body_medium
+#else
+ /* Long copy. Use an SMS style loop to maximize the I/O
+ bandwidth of the core. We don't have enough spare registers
+ to synthesise prefetching, so use PLD operations. */
+ /* Pre-bias src and dst. */
+ sub src, src, #8
+ sub dst, dst, #8
+ pld [src, #8]
+ pld [src, #72]
+ subs tmp2, tmp2, #64
+ pld [src, #136]
+ ldrd A_l, A_h, [src, #8]
+ strd B_l, B_h, [sp, #8]
+ ldrd B_l, B_h, [src, #16]
+ strd C_l, C_h, [sp, #16]
+ ldrd C_l, C_h, [src, #24]
+ strd D_l, D_h, [sp, #24]
+ pld [src, #200]
+ ldrd D_l, D_h, [src, #32]!
+ b 1f
+ .p2align 6
+2:
+ pld [src, #232]
+ strd A_l, A_h, [dst, #40]
+ ldrd A_l, A_h, [src, #40]
+ strd B_l, B_h, [dst, #48]
+ ldrd B_l, B_h, [src, #48]
+ strd C_l, C_h, [dst, #56]
+ ldrd C_l, C_h, [src, #56]
+ strd D_l, D_h, [dst, #64]!
+ ldrd D_l, D_h, [src, #64]!
+ subs tmp2, tmp2, #64
+1:
+ strd A_l, A_h, [dst, #8]
+ ldrd A_l, A_h, [src, #8]
+ strd B_l, B_h, [dst, #16]
+ ldrd B_l, B_h, [src, #16]
+ strd C_l, C_h, [dst, #24]
+ ldrd C_l, C_h, [src, #24]
+ strd D_l, D_h, [dst, #32]
+ ldrd D_l, D_h, [src, #32]
+ bcs 2b
+ /* Save the remaining bytes and restore the callee-saved regs. */
+ strd A_l, A_h, [dst, #40]
+ add src, src, #40
+ strd B_l, B_h, [dst, #48]
+ ldrd B_l, B_h, [sp, #8]
+ strd C_l, C_h, [dst, #56]
+ ldrd C_l, C_h, [sp, #16]
+ strd D_l, D_h, [dst, #64]
+ ldrd D_l, D_h, [sp, #24]
+ add dst, dst, #72
+ tst tmp2, #0x3f
+ bne .Ltail63aligned
+ ldr tmp2, [sp], #FRAME_SIZE
+ bx lr
+#endif
+
+.Lcpy_notaligned:
+ pld [src]
+ pld [src, #64]
+ /* There's at least 64 bytes to copy, but there is no mutual
+ alignment. */
+ /* Bring DST to 64-bit alignment. */
+ lsls tmp2, dst, #29
+ pld [src, #(2 * 64)]
+ beq 1f
+ rsbs tmp2, tmp2, #0
+ sub count, count, tmp2, lsr #29
+ ldrmi tmp1, [src], #4
+ strmi tmp1, [dst], #4
+ lsls tmp2, tmp2, #2
+ ldrbne tmp1, [src], #1
+ ldrhcs tmp2, [src], #2
+ strbne tmp1, [dst], #1
+ strhcs tmp2, [dst], #2
+1:
+ pld [src, #(3 * 64)]
+ subs count, count, #64
+ ldrmi tmp2, [sp], #FRAME_SIZE
+ bmi .Ltail63unaligned
+ pld [src, #(4 * 64)]
+
+#ifdef USE_NEON
+ vld1.8 {d0-d3}, [src]!
+ vld1.8 {d4-d7}, [src]!
+ subs count, count, #64
+ bmi 2f
+1:
+ pld [src, #(4 * 64)]
+ vst1.8 {d0-d3}, [ALIGN (dst, 64)]!
+ vld1.8 {d0-d3}, [src]!
+ vst1.8 {d4-d7}, [ALIGN (dst, 64)]!
+ vld1.8 {d4-d7}, [src]!
+ subs count, count, #64
+ bpl 1b
+2:
+ vst1.8 {d0-d3}, [ALIGN (dst, 64)]!
+ vst1.8 {d4-d7}, [ALIGN (dst, 64)]!
+ ands count, count, #0x3f
+#else
+ /* Use an SMS style loop to maximize the I/O bandwidth. */
+ sub src, src, #4
+ sub dst, dst, #8
+ subs tmp2, count, #64 /* Use tmp2 for count. */
+ ldr A_l, [src, #4]
+ ldr A_h, [src, #8]
+ strd B_l, B_h, [sp, #8]
+ ldr B_l, [src, #12]
+ ldr B_h, [src, #16]
+ strd C_l, C_h, [sp, #16]
+ ldr C_l, [src, #20]
+ ldr C_h, [src, #24]
+ strd D_l, D_h, [sp, #24]
+ ldr D_l, [src, #28]
+ ldr D_h, [src, #32]!
+ b 1f
+ .p2align 6
+2:
+ pld [src, #(5 * 64) - (32 - 4)]
+ strd A_l, A_h, [dst, #40]
+ ldr A_l, [src, #36]
+ ldr A_h, [src, #40]
+ strd B_l, B_h, [dst, #48]
+ ldr B_l, [src, #44]
+ ldr B_h, [src, #48]
+ strd C_l, C_h, [dst, #56]
+ ldr C_l, [src, #52]
+ ldr C_h, [src, #56]
+ strd D_l, D_h, [dst, #64]!
+ ldr D_l, [src, #60]
+ ldr D_h, [src, #64]!
+ subs tmp2, tmp2, #64
+1:
+ strd A_l, A_h, [dst, #8]
+ ldr A_l, [src, #4]
+ ldr A_h, [src, #8]
+ strd B_l, B_h, [dst, #16]
+ ldr B_l, [src, #12]
+ ldr B_h, [src, #16]
+ strd C_l, C_h, [dst, #24]
+ ldr C_l, [src, #20]
+ ldr C_h, [src, #24]
+ strd D_l, D_h, [dst, #32]
+ ldr D_l, [src, #28]
+ ldr D_h, [src, #32]
+ bcs 2b
+
+ /* Save the remaining bytes and restore the callee-saved regs. */
+ strd A_l, A_h, [dst, #40]
+ add src, src, #36
+ strd B_l, B_h, [dst, #48]
+ ldrd B_l, B_h, [sp, #8]
+ strd C_l, C_h, [dst, #56]
+ ldrd C_l, C_h, [sp, #16]
+ strd D_l, D_h, [dst, #64]
+ ldrd D_l, D_h, [sp, #24]
+ add dst, dst, #72
+ ands count, tmp2, #0x3f
+#endif
+ ldr tmp2, [sp], #FRAME_SIZE
+ bne .Ltail63unaligned
+ bx lr
+
+ .size memcpy, . - memcpy
+
+#endif
diff --git a/libs/libc/machine/arm/armv8-r/gnu/arch_memmove.S
b/libs/libc/machine/arm/armv8-r/gnu/arch_memmove.S
new file mode 100644
index 0000000000..44eafa9b27
--- /dev/null
+++ b/libs/libc/machine/arm/armv8-r/gnu/arch_memmove.S
@@ -0,0 +1,72 @@
+/****************************************************************************
+ * libs/libc/machine/arm/armv8-r/gnu/arch_memmove.S
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: 2015 ARM Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************/
+
+#include "libc.h"
+
+#ifdef LIBC_BUILD_MEMMOVE
+
+ .thumb
+ .syntax unified
+ .global memmove
+ .type memmove, %function
+memmove:
+ cmp r0, r1
+ push {r4}
+ bls 3f
+ adds r3, r1, r2
+ cmp r0, r3
+ bcs 3f
+ adds r1, r0, r2
+ cbz r2, 2f
+ subs r2, r3, r2
+1:
+ ldrb r4, [r3, #-1]!
+ cmp r2, r3
+ strb r4, [r1, #-1]!
+ bne 1b
+2:
+ pop {r4}
+ bx lr
+3:
+ cmp r2, #0
+ beq 2b
+ add r2, r2, r1
+ subs r3, r0, #1
+4:
+ ldrb r4, [r1], #1
+ cmp r2, r1
+ strb r4, [r3, #1]!
+ bne 4b
+ pop {r4}
+ bx lr
+ .size memmove, . - memmove
+
+#endif
diff --git a/libs/libc/machine/arm/armv8-r/gnu/arch_memset.S
b/libs/libc/machine/arm/armv8-r/gnu/arch_memset.S
new file mode 100644
index 0000000000..ec577192a8
--- /dev/null
+++ b/libs/libc/machine/arm/armv8-r/gnu/arch_memset.S
@@ -0,0 +1,152 @@
+/****************************************************************************
+ * libs/libc/machine/arm/armv8-r/gnu/arch_memset.S
+ *
+ * SPDX-License-Identifier: BSD-2-Clause
+ * SPDX-FileCopyrightText: 2013 The Android Open Source Project
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+ * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+ * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+ * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+ * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+ * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+ * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
+ * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
+ * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ ****************************************************************************/
+
+#include "libc.h"
+
+#ifdef LIBC_BUILD_MEMSET
+
+ .arm
+ .syntax unified
+ .global memset
+ .type memset, %function
+memset:
+ mov r3, r0
+ // At this point only d0, d1 are going to be used below.
+ vdup.8 q0, r1
+ cmp r2, #16
+ blo .L_set_less_than_16_unknown_align
+
+.L_check_alignment:
+ // Align destination to a double word to avoid the store crossing
+ // a cache line boundary.
+ ands ip, r3, #7
+ bne .L_do_double_word_align
+
+.L_double_word_aligned:
+ // Duplicate since the less than 64 can use d2, d3.
+ vmov q1, q0
+ subs r2, #64
+ blo .L_set_less_than_64
+
+ // Duplicate the copy value so that we can store 64 bytes at a time.
+ vmov q2, q0
+ vmov q3, q0
+
+1:
+ // Main loop stores 64 bytes at a time.
+ subs r2, #64
+ vstmia r3!, {d0 - d7}
+ bge 1b
+
+.L_set_less_than_64:
+ // Restore r2 to the count of bytes left to set.
+ add r2, #64
+ lsls ip, r2, #27
+ bcc .L_set_less_than_32
+ // Set 32 bytes.
+ vstmia r3!, {d0 - d3}
+
+.L_set_less_than_32:
+ bpl .L_set_less_than_16
+ // Set 16 bytes.
+ vstmia r3!, {d0, d1}
+
+.L_set_less_than_16:
+ // Less than 16 bytes to set.
+ lsls ip, r2, #29
+ bcc .L_set_less_than_8
+
+ // Set 8 bytes.
+ vstmia r3!, {d0}
+
+.L_set_less_than_8:
+ bpl .L_set_less_than_4
+ // Set 4 bytes
+ vst1.32 {d0[0]}, [r3]!
+
+.L_set_less_than_4:
+ lsls ip, r2, #31
+ it ne
+ strbne r1, [r3], #1
+ itt cs
+ strbcs r1, [r3], #1
+ strbcs r1, [r3]
+ bx lr
+
+.L_do_double_word_align:
+ rsb ip, ip, #8
+ sub r2, r2, ip
+
+ // Do this comparison now, otherwise we'll need to save a
+ // register to the stack since we've used all available
+ // registers.
+ cmp ip, #4
+ blo 1f
+
+ // Need to do a four byte copy.
+ movs ip, ip, lsl #31
+ it mi
+ strbmi r1, [r3], #1
+ itt cs
+ strbcs r1, [r3], #1
+ strbcs r1, [r3], #1
+ vst1.32 {d0[0]}, [r3]!
+ b .L_double_word_aligned
+
+1:
+ // No four byte copy.
+ movs ip, ip, lsl #31
+ it mi
+ strbmi r1, [r3], #1
+ itt cs
+ strbcs r1, [r3], #1
+ strbcs r1, [r3], #1
+ b .L_double_word_aligned
+
+.L_set_less_than_16_unknown_align:
+ // Set up to 15 bytes.
+ movs ip, r2, lsl #29
+ bcc 1f
+ vst1.8 {d0}, [r3]!
+1:
+ bge 2f
+ vst1.32 {d0[0]}, [r3]!
+2:
+ movs ip, r2, lsl #31
+ it mi
+ strbmi r1, [r3], #1
+ itt cs
+ strbcs r1, [r3], #1
+ strbcs r1, [r3], #1
+ bx lr
+ .size memset, . - memset
+
+#endif
diff --git a/libs/libc/machine/arm/armv8-r/gnu/arch_strcmp.S
b/libs/libc/machine/arm/armv8-r/gnu/arch_strcmp.S
new file mode 100644
index 0000000000..ca95f1b81b
--- /dev/null
+++ b/libs/libc/machine/arm/armv8-r/gnu/arch_strcmp.S
@@ -0,0 +1,308 @@
+/****************************************************************************
+ * libs/libc/machine/arm/armv8-r/gnu/arch_strcmp.S
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: 2011 The Android Open Source Project,2008 ARM Ltd
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. The name of the company may not be used to endorse or promote
+ * products derived from this software without specific prior written
+ * permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY ARM LTD ``AS IS'' AND ANY EXPRESS OR IMPLIED
+ * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
+ * IN NO EVENT SHALL ARM LTD BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
+ * TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
+ * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
+ * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
+ * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************/
+
+#include "libc.h"
+
+#ifdef LIBC_BUILD_STRCMP
+
+#ifdef __ARMEB__
+#define SHFT2LSB lsl
+#define SHFT2LSBEQ lsleq
+#define SHFT2MSB lsr
+#define SHFT2MSBEQ lsreq
+#define MSB 0x000000ff
+#define LSB 0xff000000
+#else
+#define SHFT2LSB lsr
+#define SHFT2LSBEQ lsreq
+#define SHFT2MSB lsl
+#define SHFT2MSBEQ lsleq
+#define MSB 0xff000000
+#define LSB 0x000000ff
+#endif
+#define magic1(REG) REG
+#define magic2(REG) REG, lsl #7
+
+ .arm
+ .syntax unified
+ .global strcmp
+ .type strcmp, %function
+strcmp:
+ pld [r0, #0]
+ pld [r1, #0]
+ eor r2, r0, r1
+ tst r2, #3
+ /* Strings not at same byte offset from a word boundary. */
+ bne .Lstrcmp_unaligned
+ ands r2, r0, #3
+ bic r0, r0, #3
+ bic r1, r1, #3
+ ldr ip, [r0], #4
+ it eq
+ ldreq r3, [r1], #4
+ beq 1f
+ /* Although s1 and s2 have identical initial alignment, they are
+ * not currently word aligned. Rather than comparing bytes,
+ * make sure that any bytes fetched from before the addressed
+ * bytes are forced to 0xff. Then they will always compare
+ * equal.
+ */
+ eor r2, r2, #3
+ lsl r2, r2, #3
+ mvn r3, #MSB
+ SHFT2LSB r2, r3, r2
+ ldr r3, [r1], #4
+ orr ip, ip, r2
+ orr r3, r3, r2
+1:
+ /* Load the 'magic' constant 0x01010101. */
+ str r4, [sp, #-4]!
+ mov r4, #1
+ orr r4, r4, r4, lsl #8
+ orr r4, r4, r4, lsl #16
+ .p2align 2
+4:
+ pld [r0, #8]
+ pld [r1, #8]
+ sub r2, ip, magic1(r4)
+ cmp ip, r3
+ itttt eq
+ /* check for any zero bytes in first word */
+ biceq r2, r2, ip
+ tsteq r2, magic2(r4)
+ ldreq ip, [r0], #4
+ ldreq r3, [r1], #4
+ beq 4b
+2:
+ /* There's a zero or a different byte in the word */
+ SHFT2MSB r0, ip, #24
+ SHFT2LSB ip, ip, #8
+ cmp r0, #1
+ it cs
+ cmpcs r0, r3, SHFT2MSB #24
+ it eq
+ SHFT2LSBEQ r3, r3, #8
+ beq 2b
+ /* On a big-endian machine, r0 contains the desired byte in bits
+ * 0-7; on a little-endian machine they are in bits 24-31. In
+ * both cases the other bits in r0 are all zero. For r3 the
+ * interesting byte is at the other end of the word, but the
+ * other bits are not necessarily zero. We need a signed result
+ * representing the difference in the unsigned bytes, so for the
+ * little-endian case we can't just shift the interesting bits up.
+ */
+#ifdef __ARMEB__
+ sub r0, r0, r3, lsr #24
+#else
+ and r3, r3, #255
+ /* No RSB instruction in Thumb2 */
+#ifdef __thumb2__
+ lsr r0, r0, #24
+ sub r0, r0, r3
+#else
+ rsb r0, r3, r0, lsr #24
+#endif
+#endif
+ ldr r4, [sp], #4
+ bx lr
+.Lstrcmp_unaligned:
+ wp1 .req r0
+ wp2 .req r1
+ b1 .req r2
+ w1 .req r4
+ w2 .req r5
+ t1 .req ip
+ @ r3 is scratch
+ /* First of all, compare bytes until wp1(sp1) is word-aligned. */
+1:
+ tst wp1, #3
+ beq 2f
+ ldrb r2, [wp1], #1
+ ldrb r3, [wp2], #1
+ cmp r2, #1
+ it cs
+ cmpcs r2, r3
+ beq 1b
+ sub r0, r2, r3
+ bx lr
+2:
+ str r5, [sp, #-4]!
+ str r4, [sp, #-4]!
+ mov b1, #1
+ orr b1, b1, b1, lsl #8
+ orr b1, b1, b1, lsl #16
+ and t1, wp2, #3
+ bic wp2, wp2, #3
+ ldr w1, [wp1], #4
+ ldr w2, [wp2], #4
+ cmp t1, #2
+ beq 2f
+ bhi 3f
+ /* Critical inner Loop: Block with 3 bytes initial overlap */
+ .p2align 2
+1:
+ bic t1, w1, #MSB
+ cmp t1, w2, SHFT2LSB #8
+ sub r3, w1, b1
+ bic r3, r3, w1
+ bne 4f
+ ands r3, r3, b1, lsl #7
+ it eq
+ ldreq w2, [wp2], #4
+ bne 5f
+ eor t1, t1, w1
+ cmp t1, w2, SHFT2MSB #24
+ bne 6f
+ ldr w1, [wp1], #4
+ b 1b
+4:
+ SHFT2LSB w2, w2, #8
+ b 8f
+5:
+#ifdef __ARMEB__
+ /* The syndrome value may contain false ones if the string ends
+ * with the bytes 0x01 0x00
+ */
+ tst w1, #0xff000000
+ itt ne
+ tstne w1, #0x00ff0000
+ tstne w1, #0x0000ff00
+ beq 7f
+#else
+ bics r3, r3, #0xff000000
+ bne 7f
+#endif
+ ldrb w2, [wp2]
+ SHFT2LSB t1, w1, #24
+#ifdef __ARMEB__
+ lsl w2, w2, #24
+#endif
+ b 8f
+6:
+ SHFT2LSB t1, w1, #24
+ and w2, w2, #LSB
+ b 8f
+ /* Critical inner Loop: Block with 2 bytes initial overlap */
+ .p2align 2
+2:
+ SHFT2MSB t1, w1, #16
+ sub r3, w1, b1
+ SHFT2LSB t1, t1, #16
+ bic r3, r3, w1
+ cmp t1, w2, SHFT2LSB #16
+ bne 4f
+ ands r3, r3, b1, lsl #7
+ it eq
+ ldreq w2, [wp2], #4
+ bne 5f
+ eor t1, t1, w1
+ cmp t1, w2, SHFT2MSB #16
+ bne 6f
+ ldr w1, [wp1], #4
+ b 2b
+5:
+#ifdef __ARMEB__
+ /* The syndrome value may contain false ones if the string ends
+ * with the bytes 0x01 0x00
+ */
+ tst w1, #0xff000000
+ it ne
+ tstne w1, #0x00ff0000
+ beq 7f
+#else
+ lsls r3, r3, #16
+ bne 7f
+#endif
+ ldrh w2, [wp2]
+ SHFT2LSB t1, w1, #16
+#ifdef __ARMEB__
+ lsl w2, w2, #16
+#endif
+ b 8f
+6:
+ SHFT2MSB w2, w2, #16
+ SHFT2LSB t1, w1, #16
+4:
+ SHFT2LSB w2, w2, #16
+ b 8f
+ /* Critical inner Loop: Block with 1 byte initial overlap */
+ .p2align 2
+3:
+ and t1, w1, #LSB
+ cmp t1, w2, SHFT2LSB #24
+ sub r3, w1, b1
+ bic r3, r3, w1
+ bne 4f
+ ands r3, r3, b1, lsl #7
+ it eq
+ ldreq w2, [wp2], #4
+ bne 5f
+ eor t1, t1, w1
+ cmp t1, w2, SHFT2MSB #8
+ bne 6f
+ ldr w1, [wp1], #4
+ b 3b
+4:
+ SHFT2LSB w2, w2, #24
+ b 8f
+5:
+ /* The syndrome value may contain false ones if the string ends
+ * with the bytes 0x01 0x00
+ */
+ tst w1, #LSB
+ beq 7f
+ ldr w2, [wp2], #4
+6:
+ SHFT2LSB t1, w1, #8
+ bic w2, w2, #MSB
+ b 8f
+7:
+ mov r0, #0
+ ldr r4, [sp], #4
+ ldr r5, [sp], #4
+ bx lr
+8:
+ and r2, t1, #LSB
+ and r0, w2, #LSB
+ cmp r0, #1
+ it cs
+ cmpcs r0, r2
+ itt eq
+ SHFT2LSBEQ t1, t1, #8
+ SHFT2LSBEQ w2, w2, #8
+ beq 8b
+ sub r0, r2, r0
+ ldr r4, [sp], #4
+ ldr r5, [sp], #4
+ bx lr
+ .size strcmp, . - strcmp
+
+#endif
diff --git a/libs/libc/machine/arm/armv8-r/gnu/arch_strlen.S
b/libs/libc/machine/arm/armv8-r/gnu/arch_strlen.S
new file mode 100644
index 0000000000..f2d7103989
--- /dev/null
+++ b/libs/libc/machine/arm/armv8-r/gnu/arch_strlen.S
@@ -0,0 +1,190 @@
+/****************************************************************************
+ * libs/libc/machine/arm/armv8-r/gnu/arch_strlen.S
+ *
+ * SPDX-License-Identifier: BSD-3-Clause
+ * SPDX-FileCopyrightText: 2010-2011,2013 Linaro Limited,215 ARM Ltd.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ *
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * * Neither the name of Linaro Limited nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * Assumes:
+ * ARMv6T2 or ARMv7E-M, AArch32
+ *
+ * Copyright (c) 2015 ARM Ltd.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * * Neither the name of the Linaro nor the
+ * names of its contributors may be used to endorse or promote products
+ * derived from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ ****************************************************************************/
+
+#include "libc.h"
+
+#ifdef LIBC_BUILD_STRLEN
+
+#include "acle-compat.h"
+
+ .macro def_fn f p2align=0
+ .text
+ .p2align \p2align
+ .global \f
+ .type \f, %function
+\f:
+ .endm
+
+#ifdef __ARMEB__
+#define S2LO lsl
+#define S2HI lsr
+#else
+#define S2LO lsr
+#define S2HI lsl
+#endif
+
+ /* This code requires Thumb. */
+#if __ARM_ARCH_PROFILE == 'M'
+ .arch armv7e-m
+#else
+ .arch armv6t2
+#endif
+ .eabi_attribute Tag_ARM_ISA_use, 0
+ .thumb
+ .syntax unified
+
+/* Parameters and result. */
+#define srcin r0
+#define result r0
+
+/* Internal variables. */
+#define src r1
+#define data1a r2
+#define data1b r3
+#define const_m1 r12
+#define const_0 r4
+#define tmp1 r4 /* Overlaps const_0 */
+#define tmp2 r5
+
+def_fn strlen p2align=6
+ pld [srcin, #0]
+ strd r4, r5, [sp, #-8]!
+ bic src, srcin, #7
+ mvn const_m1, #0
+ ands tmp1, srcin, #7 /* (8 - bytes) to alignment. */
+ pld [src, #32]
+ bne.w .Lmisaligned8
+ mov const_0, #0
+ mov result, #-8
+.Lloop_aligned:
+ /* Bytes 0-7. */
+ ldrd data1a, data1b, [src]
+ pld [src, #64]
+ add result, result, #8
+.Lstart_realigned:
+ uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
+ sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
+ uadd8 data1b, data1b, const_m1
+ sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
+ cbnz data1b, .Lnull_found
+
+ /* Bytes 8-15. */
+ ldrd data1a, data1b, [src, #8]
+ uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
+ add result, result, #8
+ sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
+ uadd8 data1b, data1b, const_m1
+ sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
+ cbnz data1b, .Lnull_found
+
+ /* Bytes 16-23. */
+ ldrd data1a, data1b, [src, #16]
+ uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
+ add result, result, #8
+ sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
+ uadd8 data1b, data1b, const_m1
+ sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
+ cbnz data1b, .Lnull_found
+
+ /* Bytes 24-31. */
+ ldrd data1a, data1b, [src, #24]
+ add src, src, #32
+ uadd8 data1a, data1a, const_m1 /* Saturating GE<0:3> set. */
+ add result, result, #8
+ sel data1a, const_0, const_m1 /* Select based on GE<0:3>. */
+ uadd8 data1b, data1b, const_m1
+ sel data1b, data1a, const_m1 /* Only used if d1a == 0. */
+ cmp data1b, #0
+ beq .Lloop_aligned
+
+.Lnull_found:
+ cmp data1a, #0
+ itt eq
+ addeq result, result, #4
+ moveq data1a, data1b
+#ifndef __ARMEB__
+ rev data1a, data1a
+#endif
+ clz data1a, data1a
+ ldrd r4, r5, [sp], #8
+ add result, result, data1a, lsr #3 /* Bits -> Bytes. */
+ bx lr
+
+.Lmisaligned8:
+ ldrd data1a, data1b, [src]
+ and tmp2, tmp1, #3
+ rsb result, tmp1, #0
+ lsl tmp2, tmp2, #3 /* Bytes -> bits. */
+ tst tmp1, #4
+ pld [src, #64]
+ S2HI tmp2, const_m1, tmp2
+ orn data1a, data1a, tmp2
+ itt ne
+ ornne data1b, data1b, tmp2
+ movne data1a, const_m1
+ mov const_0, #0
+ b .Lstart_realigned
+ .size strlen, . - strlen
+
+#endif