This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit 489bd1527102af7935f9376a5a6052e3a6891b0d
Author: dongjiuzhu1 <dongjiuz...@xiaomi.com>
AuthorDate: Mon Oct 16 20:27:15 2023 +0800

    arch/arm64: support relocate for aarch64
    
    Signed-off-by: dongjiuzhu1 <dongjiuz...@xiaomi.com>
---
 arch/arm64/include/elf.h           | 217 ++++++++++
 include/elf.h                      |   1 +
 libs/libc/machine/arm64/Make.defs  |   4 +
 libs/libc/machine/arm64/arch_elf.c | 798 +++++++++++++++++++++++++++++++++++++
 4 files changed, 1020 insertions(+)

diff --git a/arch/arm64/include/elf.h b/arch/arm64/include/elf.h
new file mode 100644
index 0000000000..7a7b9ea231
--- /dev/null
+++ b/arch/arm64/include/elf.h
@@ -0,0 +1,217 @@
+/****************************************************************************
+ * arch/arm64/include/elf.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/* Reference: "ELF for the ARM 64-bit Architecture," ARM IHI 0056B, current
+ * through AArch64 ABI release 1.0, May 22, 2013, ARM Limited.
+ */
+
+#ifndef __ARCH_ARM64_INCLUDE_ELF_H
+#define __ARCH_ARM64_INCLUDE_ELF_H
+
+/****************************************************************************
+ * Pre-processor Prototypes
+ ****************************************************************************/
+
+/* 4.2.1 ELF Identification.  Should have:
+ *
+ * e_machine         = EM_AARCH64
+ * e_ident[EI_CLASS] = ELFCLASS64
+ * e_ident[EI_DATA]  = ELFDATA2LSB (little endian) or
+ *                     ELFDATA2MSB (big endian)
+ */
+
+#define EM_ARCH  EM_AARCH64
+
+/* e_flags: there are no processor-specific flags so this field
+ * shall contain zero.
+ */
+
+/* Table 4-2, Processor specific section types */
+
+#define SHT_AARCH64_ATTRIBUTES  0x70000003 /* Object file compatibility 
attributes */
+
+/* 4.6.3 Relocation codes
+ *
+ * S (when used on its own) is the address of the symbol.
+ * A is the addend for the relocation.
+ * P is the address of the place being relocated (derived from r_offset).
+ * X is the result of a relocation operation, before any masking or
+ *   bit-selection operation is applied
+ * Page(expr) is the page address of the expression expr, defined as
+ *   (expr & ~0xFFF). (This applies even if the machine page size supported
+ *   by the platform has a different value.)
+ * GOT is the address of the Global Offset Table, the table of code and data
+ *   addresses to be resolved at dynamic link time. The GOT and each entry in
+ *   it must be 64-bit aligned.
+ * GDAT(S+A) represents a 64-bit entry in the GOT for address S+A. The entry
+ *   will be relocated at run time with relocation R_AARCH64_GLOB_DAT(S+A).
+ * G(expr) is the address of the GOT entry for the expression expr.
+ * Delta(S) if S is a normal symbol, resolves to the difference between the
+ *   static link address of S and the execution address of S. If S is the
+ *   null symbol (ELF symbol index 0), resolves to the difference between
+ *   the static link address of P and the execution address of P.
+ * Indirect(expr) represents the result of calling expr as a function. The
+ *   result is the return value from the function that is returned in r0.
+ *   The arguments passed to the function are defined by the platform ABI.
+ * [msb:lsb] is a bit-mask operation representing the selection of bits in
+ *   a value. The bits selected range from lsb up to msb inclusive. For
+ *   example, ‘bits [3:0]’ represents the bits under the mask 0x0000000F.
+ *   When range checking is applied to a value, it is applied before the
+ *   masking operation is performed.
+ */
+
+#define R_AARCH64_NONE                         0    /* Miscellaneous No 
relocation */
+#define R_AARCH64_ABS64                        257  /* Direct 64 bit. */
+#define R_AARCH64_ABS32                        258  /* Direct 32 bit.  */
+#define R_AARCH64_ABS16                        259  /* Direct 16-bit.  */
+#define R_AARCH64_PREL64                       260  /* PC-relative 64-bit. */
+#define R_AARCH64_PREL32                       261  /* PC-relative 32-bit. */
+#define R_AARCH64_PREL16                       262  /* PC-relative 16-bit. */
+#define R_AARCH64_MOVW_UABS_G0                 263  /* Dir. MOVZ imm. from 
bits 15:0.  */
+#define R_AARCH64_MOVW_UABS_G0_NC              264  /* Likewise for MOVK; no 
check.  */
+#define R_AARCH64_MOVW_UABS_G1                 265  /* Dir. MOVZ imm. from 
bits 31:16.  */
+#define R_AARCH64_MOVW_UABS_G1_NC              266  /* Likewise for MOVK; no 
check.  */
+#define R_AARCH64_MOVW_UABS_G2                 267  /* Dir. MOVZ imm. from 
bits 47:32.  */
+#define R_AARCH64_MOVW_UABS_G2_NC              268  /* Likewise for MOVK; no 
check.  */
+#define R_AARCH64_MOVW_UABS_G3                 269  /* Dir. MOV{K,Z} imm. from 
63:48.  */
+#define R_AARCH64_MOVW_SABS_G0                 270  /* Dir. MOV{N,Z} imm. from 
15:0.  */
+#define R_AARCH64_MOVW_SABS_G1                 271  /* Dir. MOV{N,Z} imm. from 
31:16.  */
+#define R_AARCH64_MOVW_SABS_G2                 272  /* Dir. MOV{N,Z} imm. from 
47:32.  */
+#define R_AARCH64_LD_PREL_LO19                 273  /* PC-rel. LD imm. from 
bits 20:2.  */
+#define R_AARCH64_ADR_PREL_LO21                274  /* PC-rel. ADR imm. from 
bits 20:0.  */
+#define R_AARCH64_ADR_PREL_PG_HI21             275  /* Page-rel. ADRP imm. 
from 32:12.  */
+#define R_AARCH64_ADR_PREL_PG_HI21_NC          276  /* Likewise; no overflow 
check.  */
+#define R_AARCH64_ADD_ABS_LO12_NC              277  /* Dir. ADD imm. from bits 
11:0.  */
+#define R_AARCH64_LDST8_ABS_LO12_NC            278  /* Likewise for LD/ST; no 
check. */
+#define R_AARCH64_TSTBR14                      279  /* PC-rel. TBZ/TBNZ imm. 
from 15:2.  */
+#define R_AARCH64_CONDBR19                     280  /* PC-rel. cond. br. imm. 
from 20:2. */
+#define R_AARCH64_JUMP26                       282  /* PC-rel. B imm. from 
bits 27:2.  */
+#define R_AARCH64_CALL26                       283  /* Likewise for CALL.  */
+#define R_AARCH64_LDST16_ABS_LO12_NC           284  /* Dir. ADD imm. from bits 
11:1.  */
+#define R_AARCH64_LDST32_ABS_LO12_NC           285  /* Likewise for bits 11:2. 
 */
+#define R_AARCH64_LDST64_ABS_LO12_NC           286  /* Likewise for bits 11:3. 
 */
+#define R_AARCH64_MOVW_PREL_G0                 287  /* PC-rel. MOV{N,Z} imm. 
from 15:0.  */
+#define R_AARCH64_MOVW_PREL_G0_NC              288  /* Likewise for MOVK; no 
check.  */
+#define R_AARCH64_MOVW_PREL_G1                 289  /* PC-rel. MOV{N,Z} imm. 
from 31:16. */
+#define R_AARCH64_MOVW_PREL_G1_NC              290  /* Likewise for MOVK; no 
check.  */
+#define R_AARCH64_MOVW_PREL_G2                 291  /* PC-rel. MOV{N,Z} imm. 
from 47:32. */
+#define R_AARCH64_MOVW_PREL_G2_NC              292  /* Likewise for MOVK; no 
check.  */
+#define R_AARCH64_MOVW_PREL_G3                 293  /* PC-rel. MOV{N,Z} imm. 
from 63:48. */
+#define R_AARCH64_LDST128_ABS_LO12_NC          299  /* Dir. ADD imm. from bits 
11:4.  */
+#define R_AARCH64_MOVW_GOTOFF_G0               300  /* GOT-rel. off. MOV{N,Z} 
imm. 15:0. */
+#define R_AARCH64_MOVW_GOTOFF_G0_NC            301  /* Likewise for MOVK; no 
check.  */
+#define R_AARCH64_MOVW_GOTOFF_G1               302  /* GOT-rel. o. MOV{N,Z} 
imm. 31:16.  */
+#define R_AARCH64_MOVW_GOTOFF_G1_NC            303  /* Likewise for MOVK; no 
check.  */
+#define R_AARCH64_MOVW_GOTOFF_G2               304  /* GOT-rel. o. MOV{N,Z} 
imm. 47:32.  */
+#define R_AARCH64_MOVW_GOTOFF_G2_NC            305  /* Likewise for MOVK; no 
check.  */
+#define R_AARCH64_MOVW_GOTOFF_G3               306  /* GOT-rel. o. MOV{N,Z} 
imm. 63:48.  */
+#define R_AARCH64_GOTREL64                     307  /* GOT-relative 64-bit.  */
+#define R_AARCH64_GOTREL32                     308  /* GOT-relative 32-bit.  */
+#define R_AARCH64_GOT_LD_PREL19                309  /* PC-rel. GOT off. load 
imm. 20:2.  */
+#define R_AARCH64_LD64_GOTOFF_LO15             310  /* GOT-rel. off. LD/ST 
imm. 14:3.  */
+#define R_AARCH64_ADR_GOT_PAGE                 311  /* P-page-rel. GOT off. 
ADRP 32:12.  */
+#define R_AARCH64_LD64_GOT_LO12_NC             312  /* Dir. GOT off. LD/ST 
imm. 11:3.  */
+#define R_AARCH64_LD64_GOTPAGE_LO15            313  /* GOT-page-rel. GOT off. 
LD/ST 14:3 */
+#define R_AARCH64_TLSGD_ADR_PREL21             512  /* PC-relative ADR imm. 
20:0.  */
+#define R_AARCH64_TLSGD_ADR_PAGE21             513  /* page-rel. ADRP imm. 
32:12.  */
+#define R_AARCH64_TLSGD_ADD_LO12_NC            514  /* direct ADD imm. from 
11:0.  */
+#define R_AARCH64_TLSGD_MOVW_G1                515  /* GOT-rel. MOV{N,Z} 
31:16.  */
+#define R_AARCH64_TLSGD_MOVW_G0_NC             516  /* GOT-rel. MOVK imm. 
15:0.  */
+#define R_AARCH64_TLSLD_ADR_PREL21             517  /* Like 512; local dynamic 
model.  */
+#define R_AARCH64_TLSLD_ADR_PAGE21             518  /* Like 513; local dynamic 
model.  */
+#define R_AARCH64_TLSLD_ADD_LO12_NC            519  /* Like 514; local dynamic 
model.  */
+#define R_AARCH64_TLSLD_MOVW_G1                520  /* Like 515; local dynamic 
model.  */
+#define R_AARCH64_TLSLD_MOVW_G0_NC             521  /* Like 516; local dynamic 
model.  */
+#define R_AARCH64_TLSLD_LD_PREL19              522  /* TLS PC-rel. load imm. 
20:2.  */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G2         523  /* TLS DTP-rel. MOV{N,Z} 
47:32.  */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G1         524  /* TLS DTP-rel. MOV{N,Z} 
31:16.  */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G1_NC      525  /* Likewise; MOVK; no 
check.  */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G0         526  /* TLS DTP-rel. MOV{N,Z} 
15:0.  */
+#define R_AARCH64_TLSLD_MOVW_DTPREL_G0_NC      527  /* Likewise; MOVK; no 
check.  */
+#define R_AARCH64_TLSLD_ADD_DTPREL_HI12        528  /* DTP-rel. ADD imm. from 
23:12. */
+#define R_AARCH64_TLSLD_ADD_DTPREL_LO12        529  /* DTP-rel. ADD imm. from 
11:0.  */
+#define R_AARCH64_TLSLD_ADD_DTPREL_LO12_NC     530  /* Likewise; no ovfl. 
check.  */
+#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12      531  /* DTP-rel. LD/ST imm. 
11:0.  */
+#define R_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC   532  /* Likewise; no check.  */
+#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12     533  /* DTP-rel. LD/ST imm. 
11:1.  */
+#define R_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC  534  /* Likewise; no check.  */
+#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12     535  /* DTP-rel. LD/ST imm. 
11:2.  */
+#define R_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC  536  /* Likewise; no check.  */
+#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12     537  /* DTP-rel. LD/ST imm. 
11:3.  */
+#define R_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC  538  /* Likewise; no check.  */
+#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G1       539  /* GOT-rel. MOV{N,Z} 
31:16.  */
+#define R_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC    540  /* GOT-rel. MOVK 15:0.  */
+#define R_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21    541  /* Page-rel. ADRP 32:12.  
*/
+#define R_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC  542  /* Direct LD off. 11:3.  */
+#define R_AARCH64_TLSIE_LD_GOTTPREL_PREL19     543  /* PC-rel. load imm. 20:2. 
 */
+#define R_AARCH64_TLSLE_MOVW_TPREL_G2          544  /* TLS TP-rel. MOV{N,Z} 
47:32.  */
+#define R_AARCH64_TLSLE_MOVW_TPREL_G1          545  /* TLS TP-rel. MOV{N,Z} 
31:16.  */
+#define R_AARCH64_TLSLE_MOVW_TPREL_G1_NC       546  /* Likewise; MOVK; no 
check.  */
+#define R_AARCH64_TLSLE_MOVW_TPREL_G0          547  /* TLS TP-rel. MOV{N,Z} 
15:0.  */
+#define R_AARCH64_TLSLE_MOVW_TPREL_G0_NC       548  /* Likewise; MOVK; no 
check.  */
+#define R_AARCH64_TLSLE_ADD_TPREL_HI12         549  /* TP-rel. ADD imm. 23:12. 
 */
+#define R_AARCH64_TLSLE_ADD_TPREL_LO12         550  /* TP-rel. ADD imm. 11:0.  
*/
+#define R_AARCH64_TLSLE_ADD_TPREL_LO12_NC      551  /* Likewise; no ovfl. 
check.  */
+#define R_AARCH64_TLSLE_LDST8_TPREL_LO12       552  /* TP-rel. LD/ST off. 
11:0.  */
+#define R_AARCH64_TLSLE_LDST8_TPREL_LO12_NC    553  /* Likewise; no ovfl. 
check. */
+#define R_AARCH64_TLSLE_LDST16_TPREL_LO12      554  /* TP-rel. LD/ST off. 
11:1.  */
+#define R_AARCH64_TLSLE_LDST16_TPREL_LO12_NC   555  /* Likewise; no check.  */
+#define R_AARCH64_TLSLE_LDST32_TPREL_LO12      556  /* TP-rel. LD/ST off. 
11:2.  */
+#define R_AARCH64_TLSLE_LDST32_TPREL_LO12_NC   557  /* Likewise; no check.  */
+#define R_AARCH64_TLSLE_LDST64_TPREL_LO12      558  /* TP-rel. LD/ST off. 
11:3.  */
+#define R_AARCH64_TLSLE_LDST64_TPREL_LO12_NC   559  /* Likewise; no check.  */
+#define R_AARCH64_TLSDESC_LD_PREL19            560  /* PC-rel. load immediate 
20:2.  */
+#define R_AARCH64_TLSDESC_ADR_PREL21           561  /* PC-rel. ADR immediate 
20:0.  */
+#define R_AARCH64_TLSDESC_ADR_PAGE21           562  /* Page-rel. ADRP imm. 
32:12.  */
+#define R_AARCH64_TLSDESC_LD64_LO12            563  /* Direct LD off. from 
11:3.  */
+#define R_AARCH64_TLSDESC_ADD_LO12             564  /* Direct ADD imm. from 
11:0.  */
+#define R_AARCH64_TLSDESC_OFF_G1               565  /* GOT-rel. MOV{N,Z} imm. 
31:16.  */
+#define R_AARCH64_TLSDESC_OFF_G0_NC            566  /* GOT-rel. MOVK imm. 
15:0; no ck.  */
+#define R_AARCH64_TLSDESC_LDR                  567  /* Relax LDR.  */
+#define R_AARCH64_TLSDESC_ADD                  568  /* Relax ADD.  */
+#define R_AARCH64_TLSDESC_CALL                 569  /* Relax BLR.  */
+#define R_AARCH64_TLSLE_LDST128_TPREL_LO12     570  /* TP-rel. LD/ST off. 
11:4.  */
+#define R_AARCH64_TLSLE_LDST128_TPREL_LO12_NC  571  /* Likewise; no check.  */
+#define R_AARCH64_TLSLD_LDST128_DTPREL_LO12    572  /* DTP-rel. LD/ST imm. 
11:4. */
+#define R_AARCH64_TLSLD_LDST128_DTPREL_LO12_NC 573  /* Likewise; no check.  */
+#define R_AARCH64_COPY                         1024 /* Copy symbol at runtime. 
 */
+#define R_AARCH64_GLOB_DAT                     1025 /* Create GOT entry.  */
+#define R_AARCH64_JUMP_SLOT                    1026 /* Create PLT entry.  */
+#define R_AARCH64_RELATIVE                     1027 /* Adjust by program base. 
 */
+#define R_AARCH64_TLS_DTPMOD                   1028 /* Module number, 64 bit.  
*/
+#define R_AARCH64_TLS_DTPREL                   1029 /* Module-relative offset, 
64 bit.  */
+#define R_AARCH64_TLS_TPREL                    1030 /* TP-relative offset, 64 
bit.  */
+#define R_AARCH64_TLSDESC                      1031 /* TLS Descriptor.  */
+#define R_AARCH64_IRELATIVE                    1032 /* STT_GNU_IFUNC 
relocation.  */
+
+/* 5.1 Program Header */
+
+#define PT_AARCH64_ARCHEXT  0x70000000  /* Reserved for architecture 
compatibility information */
+#define PT_AARCH64_UNWIND   0x70000001  /* Reserved for exception unwinding 
tables */
+
+/****************************************************************************
+ * Public Types
+ ****************************************************************************/
+
+/****************************************************************************
+ * Public Data
+ ****************************************************************************/
+
+#endif /* __ARCH_ARM64_INCLUDE_ELF_H */
diff --git a/include/elf.h b/include/elf.h
index 570b2d328b..fbd8e5391a 100644
--- a/include/elf.h
+++ b/include/elf.h
@@ -82,6 +82,7 @@
 #define EM_V850            87     /* NEC v850 */
 #define EM_M32R            88     /* Renesas M32R */
 #define EM_XTENSA          94     /* Tensilica Xtensa */
+#define EM_AARCH64         183    /* ARM-64 Architecture */
 #define EM_RISCV           243    /* RISC-V */
 #define EM_ALPHA           0x9026
 #define EM_CYGNUS_V850     0x9080
diff --git a/libs/libc/machine/arm64/Make.defs 
b/libs/libc/machine/arm64/Make.defs
index d8ad94b76d..f4ff27d46e 100644
--- a/libs/libc/machine/arm64/Make.defs
+++ b/libs/libc/machine/arm64/Make.defs
@@ -18,6 +18,10 @@
 #
 ############################################################################
 
+ifeq ($(CONFIG_LIBC_ARCH_ELF_64BIT),y)
+CSRCS += arch_elf.c
+endif
+
 ifeq ($(CONFIG_ARM64_MEMCHR),y)
 ASRCS += arch_memchr.S
 endif
diff --git a/libs/libc/machine/arm64/arch_elf.c 
b/libs/libc/machine/arm64/arch_elf.c
new file mode 100644
index 0000000000..ab61357df8
--- /dev/null
+++ b/libs/libc/machine/arm64/arch_elf.c
@@ -0,0 +1,798 @@
+/****************************************************************************
+ * libs/libc/machine/arm64/arch_elf.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <inttypes.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <debug.h>
+#include <endian.h>
+
+#include <nuttx/compiler.h>
+#include <nuttx/bits.h>
+#include <nuttx/elf.h>
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+/* For triggering a fault on purpose (reserved) */
+
+#define FAULT_BRK_IMM           0x100
+
+/* BRK instruction encoding
+ * The #imm16 value should be placed at bits[20:5] within BRK ins
+ */
+
+#define AARCH64_BREAK_MON       0xd4200000
+
+/* BRK instruction for provoking a fault on purpose
+ * Unlike kgdb, #imm16 value with unallocated handler is used for faulting.
+ */
+
+#define AARCH64_BREAK_FAULT     (AARCH64_BREAK_MON | (FAULT_BRK_IMM << 5))
+
+#define ADR_IMM_HILOSPLIT       2
+#define ADR_IMM_SIZE            (2 * 1024 * 1024)
+#define ADR_IMM_LOMASK          ((1 << ADR_IMM_HILOSPLIT) - 1)
+#define ADR_IMM_HIMASK          ((ADR_IMM_SIZE >> ADR_IMM_HILOSPLIT) - 1)
+#define ADR_IMM_LOSHIFT         29
+#define ADR_IMM_HISHIFT         5
+
+#define INSN_SF_BIT             BIT(31)
+#define INSN_N_BIT              BIT(22)
+#define INSN_LSL_12             BIT(22)
+
+/****************************************************************************
+ * Private Types
+ ****************************************************************************/
+
+enum reloc_op_e
+{
+  RELOC_OP_NONE,
+  RELOC_OP_ABS,
+  RELOC_OP_PREL,
+  RELOC_OP_PAGE,
+};
+
+enum insn_movw_imm_type_e
+{
+  INSN_IMM_MOVNZ,
+  INSN_IMM_MOVKZ,
+};
+
+enum insn_imm_type_e
+{
+  INSN_IMM_ADR,
+  INSN_IMM_26,
+  INSN_IMM_19,
+  INSN_IMM_16,
+  INSN_IMM_14,
+  INSN_IMM_12,
+  INSN_IMM_N,
+  INSN_IMM_MAX
+};
+
+/****************************************************************************
+ * Private Functions
+ ****************************************************************************/
+
+static uint32_t
+aarch64_insn_encode_immediate(enum insn_imm_type_e type,
+                              uint32_t insn, uint64_t imm)
+{
+  uint32_t immlo;
+  uint32_t immhi;
+  uint32_t mask;
+  int shift;
+
+  if (insn == AARCH64_BREAK_FAULT)
+    {
+      return AARCH64_BREAK_FAULT;
+    }
+
+  switch (type)
+    {
+      case INSN_IMM_ADR:
+        {
+          shift = 0;
+          immlo = (imm & ADR_IMM_LOMASK) << ADR_IMM_LOSHIFT;
+          imm >>= ADR_IMM_HILOSPLIT;
+          immhi = (imm & ADR_IMM_HIMASK) << ADR_IMM_HISHIFT;
+          imm = immlo | immhi;
+          mask = (ADR_IMM_LOMASK << ADR_IMM_LOSHIFT) |
+                 (ADR_IMM_HIMASK << ADR_IMM_HISHIFT);
+        }
+        break;
+
+      case INSN_IMM_26:
+        {
+          mask = BIT(26) - 1;
+          shift = 0;
+        }
+        break;
+
+      case INSN_IMM_19:
+        {
+          mask = BIT(19) - 1;
+          shift = 5;
+        }
+        break;
+
+      case INSN_IMM_16:
+        {
+          mask = BIT(16) - 1;
+          shift = 5;
+        }
+        break;
+
+      case INSN_IMM_14:
+        {
+          mask = BIT(14) - 1;
+          shift = 5;
+        }
+        break;
+
+      case INSN_IMM_12:
+        {
+          mask = BIT(12) - 1;
+          shift = 10;
+        }
+        break;
+
+      default:
+        {
+          berr("unknown immediate encoding %d\n", type);
+
+          return AARCH64_BREAK_FAULT;
+        }
+    }
+
+  /* Update the immediate field. */
+
+  insn &= ~(mask << shift);
+  insn |= (imm & mask) << shift;
+
+  return insn;
+}
+
+static uint64_t do_reloc(enum reloc_op_e op,
+                         uintptr_t place, uint64_t val)
+{
+  switch (op)
+    {
+      case RELOC_OP_ABS:
+        return val;
+      case RELOC_OP_PREL:
+        return val - (uint64_t)place;
+      case RELOC_OP_PAGE:
+        return (val & ~0xfff) - ((uint64_t)place & ~0xfff);
+      case RELOC_OP_NONE:
+        return 0;
+    }
+
+  return 0;
+}
+
+static int reloc_data(enum reloc_op_e op, uintptr_t place,
+                      uint64_t val, int len)
+{
+  int64_t sval = do_reloc(op, place, val);
+
+  /* The ELF psABI for AArch64 documents the 16-bit and 32-bit place
+   * relative and absolute relocations as having a range of [-2^15, 2^16)
+   * or [-2^31, 2^32), respectively. However, in order to be able to
+   * detect overflows reliably, we have to choose whether we interpret
+   * such quantities as signed or as unsigned, and stick with it.
+   * The way we organize our address space requires a signed
+   * interpretation of 32-bit relative references, so let's use that
+   * for all R_AARCH64_PRELxx relocations. This means our upper
+   * bound for overflow detection should be Sxx_MAX rather than Uxx_MAX.
+   */
+
+  switch (len)
+    {
+      case 16:
+        {
+          *(int16_t *)place = sval;
+          switch (op)
+            {
+              case RELOC_OP_ABS:
+                {
+                  if (sval < 0 || sval > UINT16_MAX)
+                    {
+                      return -ERANGE;
+                    }
+                }
+                break;
+
+              case RELOC_OP_PREL:
+                {
+                  if (sval < INT16_MIN || sval > INT16_MAX)
+                    {
+                      return -ERANGE;
+                    }
+                }
+                break;
+
+              default:
+                {
+                  berr("Invalid 16-bit data relocation (%d)\n", op);
+                  return -EINVAL;
+                }
+            }
+        }
+        break;
+
+      case 32:
+        {
+          *(int32_t *)place = sval;
+          switch (op)
+            {
+              case RELOC_OP_ABS:
+                {
+                  if (sval < 0 || sval > UINT32_MAX)
+                    {
+                      return -ERANGE;
+                    }
+                }
+                break;
+
+              case RELOC_OP_PREL:
+                {
+                  if (sval < INT32_MIN || sval > INT32_MAX)
+                    {
+                      return -ERANGE;
+                    }
+                }
+                break;
+
+              default:
+                {
+                  berr("Invalid 32-bit data relocation (%d)\n", op);
+                  return -EINVAL;
+                }
+            }
+        }
+        break;
+
+      case 64:
+        {
+          *(int64_t *)place = sval;
+        }
+        break;
+
+      default:
+        {
+          berr("Invalid length (%d) for data relocation\n", len);
+          return -EINVAL;
+        }
+    }
+
+  return 0;
+}
+
+static int reloc_insn_movw(enum reloc_op_e op, uintptr_t place,
+                           uint64_t val, int lsb,
+                           enum insn_movw_imm_type_e imm_type)
+{
+  uint32_t insn = htole32(*(uint32_t *)place);
+  uint64_t imm;
+  int64_t sval;
+
+  sval = do_reloc(op, place, val);
+  imm = sval >> lsb;
+
+  if (imm_type == INSN_IMM_MOVNZ)
+    {
+      /* For signed MOVW relocations, we have to manipulate the
+       * instruction encoding depending on whether or not the
+       * immediate is less than zero.
+       */
+
+      insn &= ~(3 << 29);
+      if (sval >= 0)
+        {
+          /* >=0: Set the instruction to MOVZ (opcode 10b). */
+
+          insn |= 2 << 29;
+        }
+      else
+        {
+          /* <0: Set the instruction to MOVN (opcode 00b).
+           *     Since we've masked the opcode already, we
+           *     don't need to do anything other than
+           *     inverting the new immediate field.
+           */
+
+          imm = ~imm;
+        }
+    }
+
+  /* Update the instruction with the new encoding. */
+
+  insn = aarch64_insn_encode_immediate(INSN_IMM_16, insn, imm);
+  *(uint32_t *)place = le32toh(insn);
+
+  if (imm > UINT16_MAX)
+    {
+      return -ERANGE;
+    }
+
+  return 0;
+}
+
+static int reloc_insn_imm(enum reloc_op_e op, uintptr_t place,
+                          uint64_t val, int lsb, int len,
+                          enum insn_imm_type_e imm_type)
+{
+  int64_t sval;
+  uint64_t imm;
+  uint64_t imm_mask;
+  uint32_t insn = le32toh(*(uint32_t *)place);
+
+  /* Calculate the relocation value. */
+
+  sval = do_reloc(op, place, val);
+  sval >>= lsb;
+
+  /* Extract the value bits and shift them to bit 0. */
+
+  imm_mask = (BIT(lsb + len) - 1) >> lsb;
+  imm = sval & imm_mask;
+
+  /* Update the instruction's immediate field. */
+
+  insn = aarch64_insn_encode_immediate(imm_type, insn, imm);
+  *(uint32_t *)place = htole32(insn);
+
+  /* Extract the upper value bits (including the sign bit) and
+   * shift them to bit 0.
+   */
+
+  sval = (int64_t)(sval & ~(imm_mask >> 1)) >> (len - 1);
+
+  /* Overflow has occurred if the upper bits are not all equal to
+   * the sign bit of the value.
+   */
+
+  if ((uint64_t)(sval + 1) >= 2)
+    {
+      return -ERANGE;
+    }
+
+  return 0;
+}
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: up_checkarch
+ *
+ * Description:
+ *   Given the ELF header in 'hdr', verify that the ELF file is appropriate
+ *   for the current, configured architecture.  Every architecture that uses
+ *   the ELF loader must provide this function.
+ *
+ * Input Parameters:
+ *   hdr - The ELF header read from the ELF file.
+ *
+ * Returned Value:
+ *   True if the architecture supports this ELF file.
+ *
+ ****************************************************************************/
+
+bool up_checkarch(const Elf64_Ehdr *ehdr)
+{
+  /* Make sure it's an ARM executable */
+
+  if (ehdr->e_machine != EM_AARCH64)
+    {
+      berr("ERROR: Not for AARCH64: e_machine=%04x\n", ehdr->e_machine);
+      return false;
+    }
+
+  /* Make sure that 64-bit objects are supported */
+
+  if (ehdr->e_ident[EI_CLASS] != ELFCLASS64)
+    {
+      berr("ERROR: Need 64-bit objects: e_ident[EI_CLASS]=%02x\n",
+           ehdr->e_ident[EI_CLASS]);
+      return false;
+    }
+
+  /* Verify endian-ness */
+
+#ifdef CONFIG_ENDIAN_BIG
+  if (ehdr->e_ident[EI_DATA] != ELFDATA2MSB)
+#else
+  if (ehdr->e_ident[EI_DATA] != ELFDATA2LSB)
+#endif
+    {
+      berr("ERROR: Wrong endian-ness: e_ident[EI_DATA]=%02x\n",
+           ehdr->e_ident[EI_DATA]);
+      return false;
+    }
+
+  /* TODO:  Check ABI here. */
+
+  return true;
+}
+
+/****************************************************************************
+ * Name: up_relocate and up_relocateadd
+ *
+ * Description:
+ *   Perform an architecture-specific ELF relocation.  Every architecture
+ *   that uses the ELF loader must provide this function.
+ *
+ * Input Parameters:
+ *   rel - The relocation type
+ *   sym - The ELF symbol structure containing the fully resolved value.
+ *         There are a few relocation types for a few architectures that do
+ *         not require symbol information.  For those, this value will be
+ *         NULL.  Implementations of these functions must be able to handle
+ *         that case.
+ *   addr - The address that requires the relocation.
+ *
+ * Returned Value:
+ *   Zero (OK) if the relocation was successful.  Otherwise, a negated errno
+ *   value indicating the cause of the relocation failure.
+ *
+ ****************************************************************************/
+
+int up_relocate(const Elf64_Rel *rel, const Elf64_Sym *sym, uintptr_t addr)
+{
+  berr("ERROR: REL relocation not supported\n");
+  return -ENOSYS;
+}
+
+int up_relocateadd(const Elf64_Rela *rel, const Elf64_Sym *sym,
+                   uintptr_t addr)
+{
+  bool overflow_check = true;
+  uint64_t val;
+  int ret = 0;
+
+  /* addr corresponds to P in the AArch64 ELF document. */
+
+  /* val corresponds to (S + A) in the AArch64 ELF document. */
+
+  val = sym->st_value + rel->r_addend;
+
+  /* Handle the relocation by relocation type */
+
+  switch (ELF64_R_TYPE(rel->r_info))
+    {
+      case R_AARCH64_NONE:
+        {
+          /* No relocation */
+        }
+        break;
+
+      /* Data relocations */
+
+      case R_AARCH64_ABS64:
+        {
+          overflow_check = false;
+          ret = reloc_data(RELOC_OP_ABS, addr, val, 64);
+        }
+        break;
+
+      case R_AARCH64_ABS32:
+        {
+          ret = reloc_data(RELOC_OP_ABS, addr, val, 32);
+        }
+        break;
+
+      case R_AARCH64_ABS16:
+        {
+          ret = reloc_data(RELOC_OP_ABS, addr, val, 16);
+        }
+        break;
+
+      case R_AARCH64_PREL64:
+        {
+          overflow_check = false;
+          ret = reloc_data(RELOC_OP_PREL, addr, val, 64);
+        }
+        break;
+
+      case R_AARCH64_PREL32:
+        {
+          ret = reloc_data(RELOC_OP_PREL, addr, val, 32);
+        }
+        break;
+
+      case R_AARCH64_PREL16:
+        {
+          ret = reloc_data(RELOC_OP_PREL, addr, val, 16);
+        }
+        break;
+
+      case R_AARCH64_MOVW_UABS_G0_NC:
+        {
+          overflow_check = false;
+        }
+
+        /* fallthrough */
+
+      case R_AARCH64_MOVW_UABS_G0:
+        {
+          ret = reloc_insn_movw(RELOC_OP_ABS, addr, val, 0,
+                                INSN_IMM_MOVKZ);
+        }
+        break;
+
+      case R_AARCH64_MOVW_UABS_G1_NC:
+        {
+          overflow_check = false;
+        }
+
+        /* fallthrough */
+
+      case R_AARCH64_MOVW_UABS_G1:
+        {
+          ret = reloc_insn_movw(RELOC_OP_ABS, addr, val, 16,
+                                INSN_IMM_MOVKZ);
+        }
+        break;
+
+      case R_AARCH64_MOVW_UABS_G2_NC:
+        {
+          overflow_check = false;
+        }
+
+        /* fallthrough */
+
+      case R_AARCH64_MOVW_UABS_G2:
+        {
+          ret = reloc_insn_movw(RELOC_OP_ABS, addr, val, 32,
+                                INSN_IMM_MOVKZ);
+        }
+        break;
+
+      case R_AARCH64_MOVW_UABS_G3:
+        {
+          /* We're using the top bits so we can't overflow. */
+
+          overflow_check = false;
+          ret = reloc_insn_movw(RELOC_OP_ABS, addr, val, 48,
+                                INSN_IMM_MOVKZ);
+        }
+        break;
+
+      case R_AARCH64_MOVW_SABS_G0:
+        {
+          ret = reloc_insn_movw(RELOC_OP_ABS, addr, val, 0,
+                                INSN_IMM_MOVNZ);
+        }
+        break;
+
+      case R_AARCH64_MOVW_SABS_G1:
+        {
+          ret = reloc_insn_movw(RELOC_OP_ABS, addr, val, 16,
+                                INSN_IMM_MOVNZ);
+        }
+        break;
+
+      case R_AARCH64_MOVW_SABS_G2:
+        {
+          ret = reloc_insn_movw(RELOC_OP_ABS, addr, val, 32,
+                                INSN_IMM_MOVNZ);
+        }
+        break;
+
+      case R_AARCH64_MOVW_PREL_G0_NC:
+        {
+          overflow_check = false;
+          ret = reloc_insn_movw(RELOC_OP_PREL, addr, val, 0,
+                                INSN_IMM_MOVKZ);
+        }
+        break;
+
+      case R_AARCH64_MOVW_PREL_G0:
+        {
+          ret = reloc_insn_movw(RELOC_OP_PREL, addr, val, 0,
+                                INSN_IMM_MOVNZ);
+        }
+        break;
+
+      case R_AARCH64_MOVW_PREL_G1_NC:
+        {
+          overflow_check = false;
+          ret = reloc_insn_movw(RELOC_OP_PREL, addr, val, 16,
+                                INSN_IMM_MOVKZ);
+        }
+        break;
+
+      case R_AARCH64_MOVW_PREL_G1:
+        {
+          ret = reloc_insn_movw(RELOC_OP_PREL, addr, val, 16,
+                                INSN_IMM_MOVNZ);
+        }
+        break;
+
+      case R_AARCH64_MOVW_PREL_G2_NC:
+        {
+          overflow_check = false;
+          ret = reloc_insn_movw(RELOC_OP_PREL, addr, val, 32,
+                                INSN_IMM_MOVKZ);
+        }
+        break;
+
+      case R_AARCH64_MOVW_PREL_G2:
+        {
+          ret = reloc_insn_movw(RELOC_OP_PREL, addr, val, 32,
+                                INSN_IMM_MOVNZ);
+        }
+        break;
+
+      case R_AARCH64_MOVW_PREL_G3:
+        {
+          /* We're using the top bits so we can't overflow. */
+
+          overflow_check = false;
+          ret = reloc_insn_movw(RELOC_OP_PREL, addr, val, 48,
+                                INSN_IMM_MOVNZ);
+        }
+        break;
+
+      /* Immediate instruction relocations. */
+
+      case R_AARCH64_LD_PREL_LO19:
+        {
+          ret = reloc_insn_imm(RELOC_OP_PREL, addr, val, 2, 19,
+                               INSN_IMM_19);
+        }
+        break;
+
+      case R_AARCH64_ADR_PREL_LO21:
+        {
+          ret = reloc_insn_imm(RELOC_OP_PREL, addr, val, 0, 21,
+                               INSN_IMM_ADR);
+        }
+        break;
+
+      case R_AARCH64_ADR_PREL_PG_HI21_NC:
+        {
+          overflow_check = false;
+        }
+
+        /* fallthrough */
+
+      case R_AARCH64_ADR_PREL_PG_HI21:
+        {
+          if (((uint64_t)addr & 0xfff) < 0xff8)
+            {
+              ret = reloc_insn_imm(RELOC_OP_PAGE, addr, val, 12, 21,
+                                   INSN_IMM_ADR);
+            }
+          else
+            {
+              uint32_t insn;
+
+              /* patch ADRP to ADR if it is in range */
+
+              ret = reloc_insn_imm(RELOC_OP_PREL, addr, val & ~0xfff, 0, 21,
+                                   INSN_IMM_ADR);
+              if (ret == 0)
+                {
+                  insn = le32toh(*(uint32_t *)addr);
+                  insn &= ~BIT(31);
+                  *(uint32_t *)addr = htole32(insn);
+                }
+              else
+                {
+                  berr("Out of range for ADR\n");
+                  return -EINVAL;
+                }
+            }
+        }
+        break;
+
+      case R_AARCH64_ADD_ABS_LO12_NC:
+      case R_AARCH64_LDST8_ABS_LO12_NC:
+        {
+          overflow_check = false;
+          ret = reloc_insn_imm(RELOC_OP_ABS, addr, val, 0, 12,
+                               INSN_IMM_12);
+        }
+        break;
+
+      case R_AARCH64_LDST16_ABS_LO12_NC:
+        {
+          overflow_check = false;
+          ret = reloc_insn_imm(RELOC_OP_ABS, addr, val, 1, 11,
+                               INSN_IMM_12);
+        }
+        break;
+
+      case R_AARCH64_LDST32_ABS_LO12_NC:
+        {
+          overflow_check = false;
+          ret = reloc_insn_imm(RELOC_OP_ABS, addr, val, 2, 10,
+                               INSN_IMM_12);
+        }
+        break;
+
+      case R_AARCH64_LDST64_ABS_LO12_NC:
+        {
+          overflow_check = false;
+          ret = reloc_insn_imm(RELOC_OP_ABS, addr, val, 3, 9,
+                               INSN_IMM_12);
+        }
+        break;
+
+      case R_AARCH64_LDST128_ABS_LO12_NC:
+        {
+          overflow_check = false;
+          ret = reloc_insn_imm(RELOC_OP_ABS, addr, val, 4, 8,
+                               INSN_IMM_12);
+        }
+        break;
+
+      case R_AARCH64_TSTBR14:
+        {
+          ret = reloc_insn_imm(RELOC_OP_PREL, addr, val, 2, 14,
+                               INSN_IMM_14);
+        }
+        break;
+
+      case R_AARCH64_CONDBR19:
+        {
+          ret = reloc_insn_imm(RELOC_OP_PREL, addr, val, 2, 19,
+                               INSN_IMM_19);
+        }
+        break;
+
+      case R_AARCH64_JUMP26:
+      case R_AARCH64_CALL26:
+        {
+          ret = reloc_insn_imm(RELOC_OP_PREL, addr, val, 2, 26,
+                               INSN_IMM_26);
+        }
+        break;
+
+      default:
+        berr("ERROR: Unsupported relocation: %"PRIu64"\n",
+             ELF64_R_TYPE(rel->r_info));
+        return -EINVAL;
+    }
+
+  if (overflow_check && ret == -ERANGE)
+    {
+      goto overflow;
+    }
+
+  return OK;
+
+overflow:
+  berr("ERROR: overflow in relocation type %"PRIu64" val %"PRIu64"\n",
+       ELF64_R_TYPE(rel->r_info), val);
+  return -ENOEXEC;
+}


Reply via email to