pkarashchenko commented on code in PR #6478:
URL: https://github.com/apache/incubator-nuttx/pull/6478#discussion_r906659963


##########
arch/arm64/include/irq.h:
##########
@@ -0,0 +1,438 @@
+/****************************************************************************
+ * arch/arm64/include/irq.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/* This file should never be included directly but, rather, only indirectly
+ * through nuttx/irq.h
+ */
+
+#ifndef __ARCH_ARM64_INCLUDE_IRQ_H
+#define __ARCH_ARM64_INCLUDE_IRQ_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+/* Include NuttX-specific IRQ definitions */
+
+#include <nuttx/irq.h>
+
+/* Include chip-specific IRQ definitions (including IRQ numbers) */
+
+#include <arch/chip/irq.h>
+
+#include <nuttx/config.h>
+
+#ifndef __ASSEMBLY__
+#  include <stdint.h>
+#  include <arch/arch.h>
+#endif
+
+/****************************************************************************
+ * Exception stack frame format:
+ *
+ * x0 ~ x18, x30 (lr), spsr and elr
+ *    Corruptible Registers and exception context
+ *    reference to Armv8-A Instruction Set Architecture
+ *    (ARM062-948681440-3280, Issue 1.1), chapter 11 PCS
+ *    need to be saved in all exception
+ *
+ * x19 ~ x29, sp_el0, sp_elx
+ *    Callee-saved Registers and SP pointer
+ *    reference to Armv8-A Instruction Set Architecture
+ *    (ARM062-948681440-3280, Issue 1.1), chapter 11 PCS
+ *    These registers frame is allocated on stack frame
+ *    when a exception is occurred and saved at task switch
+ *    or crash exception
+ *    check arm64_vectors.S for detail
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Registers and exception context
+ * Note:
+ * REG_EXEC_DEPTH indicate the task's exception depth
+ *
+ ****************************************************************************/
+
+#define REG_X0              (0)
+#define REG_X1              (1)
+#define REG_X2              (2)
+#define REG_X3              (3)
+#define REG_X4              (4)
+#define REG_X5              (5)
+#define REG_X6              (6)
+#define REG_X7              (7)
+#define REG_X8              (8)
+#define REG_X9              (9)
+#define REG_X10             (10)
+#define REG_X11             (11)
+#define REG_X12             (12)
+#define REG_X13             (13)
+#define REG_X14             (14)
+#define REG_X15             (15)
+#define REG_X16             (16)
+#define REG_X17             (17)
+#define REG_X18             (18)
+#define REG_X19             (19)
+#define REG_X20             (20)
+#define REG_X21             (21)
+#define REG_X22             (22)
+#define REG_X23             (23)
+#define REG_X24             (24)
+#define REG_X25             (25)
+#define REG_X26             (26)
+#define REG_X27             (27)
+#define REG_X28             (28)
+#define REG_X29             (29)
+#define REG_X30             (30)
+#define REG_SP_ELX          (31)
+#define REG_ELR             (32)
+#define REG_SPSR            (33)
+#define REG_SP_EL0          (34)
+#define REG_EXE_DEPTH       (35)
+#define REG_TPIDR_EL0       (36)
+#define REG_TPIDR_EL1       (37)
+
+/* In Armv8-A Architecture, the stack must align with 16 byte */
+
+#define XCPTCONTEXT_GP_REGS (38)
+#define XCPTCONTEXT_GP_SIZE (8 * XCPTCONTEXT_GP_REGS)
+
+#ifdef CONFIG_ARCH_FPU
+
+/****************************************************************************
+ * q0 ~ q31(128bit), fpsr, fpcr
+ *    armv8 fpu registers and context
+ *    With CONFIG_ARCH_FPU is enabled, armv8 fpu registers context
+ *    is allocated on stack frame at exception and store/restore
+ *    when switching FPU context
+ *    check arm64_fpu.c for detail
+ *
+ ****************************************************************************/
+
+/* 128bit registers */
+
+#define FPU_REG_Q0          (0)
+#define FPU_REG_Q1          (1)
+#define FPU_REG_Q2          (2)
+#define FPU_REG_Q3          (3)
+#define FPU_REG_Q4          (4)
+#define FPU_REG_Q5          (5)
+#define FPU_REG_Q6          (6)
+#define FPU_REG_Q7          (7)
+#define FPU_REG_Q8          (8)
+#define FPU_REG_Q9          (9)
+#define FPU_REG_Q10         (10)
+#define FPU_REG_Q11         (11)
+#define FPU_REG_Q12         (12)
+#define FPU_REG_Q13         (13)
+#define FPU_REG_Q14         (14)
+#define FPU_REG_Q15         (15)
+#define FPU_REG_Q16         (16)
+#define FPU_REG_Q17         (17)
+#define FPU_REG_Q18         (18)
+#define FPU_REG_Q19         (19)
+#define FPU_REG_Q20         (20)
+#define FPU_REG_Q21         (21)
+#define FPU_REG_Q22         (22)
+#define FPU_REG_Q23         (23)
+#define FPU_REG_Q24         (24)
+#define FPU_REG_Q25         (25)
+#define FPU_REG_Q26         (26)
+#define FPU_REG_Q27         (27)
+#define FPU_REG_Q28         (28)
+#define FPU_REG_Q29         (29)
+#define FPU_REG_Q30         (30)
+#define FPU_REG_Q31         (31)
+
+/* 32 bit registers
+ */
+#define FPU_REG_FPSR        (0)
+#define FPU_REG_FPCR        (1)
+
+/* FPU registers(Q0~Q31, 128bit): 32x2 = 64
+ * FPU FPSR/SPSR(32 bit) : 1
+ * FPU TRAP: 1
+ * 64 + 1 + 1 = 66
+ */
+#define ARM64_FPU_REGS      (66)
+#else
+#define ARM64_FPU_REGS      (0)
+#endif
+
+#define FPUCONTEXT_SIZE     (8 * ARM64_FPU_REGS)
+
+#define XCPTCONTEXT_REGS    (XCPTCONTEXT_GP_REGS + ARM64_FPU_REGS)
+#define XCPTCONTEXT_SIZE    (8 * XCPTCONTEXT_REGS)
+
+#ifndef __ASSEMBLY__
+
+/****************************************************************************
+ * Public Data
+ ****************************************************************************/
+#ifdef __cplusplus
+#define EXTERN extern "C"
+extern "C"
+{
+#else
+#define EXTERN extern
+#endif
+
+/* g_current_regs[] holds a references to the current interrupt level
+ * register storage structure.  If is non-NULL only during interrupt
+ * processing.  Access to g_current_regs[] must be through the macro
+ * CURRENT_REGS for portability.
+ */
+
+/* For the case of architectures with multiple CPUs, then there must be one
+ * such value for each processor that can receive an interrupt.
+ */
+
+EXTERN volatile uint64_t *g_current_regs[CONFIG_SMP_NCPUS];
+#define CURRENT_REGS (g_current_regs[up_cpu_index()])
+
+#ifdef CONFIG_ARCH_FPU
+
+/****************************************************************************
+ * armv8 fpu registers and context
+ ****************************************************************************/
+
+struct fpu_reg
+{
+  __int128 q[32];
+  uint32_t fpsr;
+  uint32_t fpcr;
+  uint64_t fpu_trap;
+};
+
+#endif
+
+/****************************************************************************
+ * Registers and exception context
+ ****************************************************************************/
+
+struct regs_context
+{
+  uint64_t  regs[31];  /* x0~x30 */
+  uint64_t  sp_elx;
+  uint64_t  elr;
+  uint64_t  spsr;
+  uint64_t  sp_el0;
+  uint64_t  exe_depth;
+  uint64_t  tpidr_el0;
+  uint64_t  tpidr_el1;
+};
+
+struct xcptcontext
+{
+  /* The following function pointer is non-zero if there are pending signals
+   * to be processed.
+   */
+
+  void *sigdeliver; /* Actual type is sig_deliver_t */
+
+#ifdef CONFIG_BUILD_KERNEL
+  /* This is the saved address to use when returning from a user-space
+   * signal handler.
+   */
+
+  uint64_t sigreturn;
+
+#endif
+  /* task stack reg context */
+
+  uint64_t * regs;
+
+  /* task context, for signal process */
+
+  uint64_t * sig_save_reg;
+
+#ifdef CONFIG_ARCH_FPU
+  struct fpu_reg * fpu_regs;
+  struct fpu_reg * sig_save_fpu_regs;
+#endif
+
+  /* Extra fault address register saved for common paging logic.  In the
+   * case of the pre-fetch abort, this value is the same as regs[REG_R15];
+   * For the case of the data abort, this value is the value of the fault
+   * address register (FAR) at the time of data abort exception.
+   */
+
+#ifdef CONFIG_PAGING
+  uintptr_t far;
+#endif
+
+#ifdef CONFIG_LIB_SYSCALL
+  /* The following array holds the return address and the exc_return value
+   * needed to return from each nested system call.
+   */
+
+  uint8_t nsyscalls;
+  struct xcpt_syscall_s syscall[CONFIG_SYS_NNEST];
+#endif
+
+#ifdef CONFIG_ARCH_ADDRENV
+#ifdef CONFIG_ARCH_STACK_DYNAMIC
+  /* This array holds the physical address of the level 2 page table used
+   * to map the thread's stack memory.  This array will be initially of
+   * zeroed and would be back-up up with pages during page fault exception
+   * handling to support dynamically sized stacks for each thread.
+   */
+
+  uintptr_t *ustack[ARCH_STACK_NSECTS];
+#endif
+
+#ifdef CONFIG_ARCH_KERNEL_STACK
+  /* In this configuration, all syscalls execute from an internal kernel
+   * stack.  Why?  Because when we instantiate and initialize the address
+   * environment of the new user process, we will temporarily lose the
+   * address environment of the old user process, including its stack
+   * contents.  The kernel C logic will crash immediately with no valid
+   * stack in place.
+   */
+
+  uint64_t *ustkptr;  /* Saved user stack pointer */
+  uint64_t *kstack;   /* Allocate base of the (aligned) kernel stack */
+  uint64_t *kstkptr;  /* Saved kernel stack pointer */
+#endif
+#endif
+};
+
+/* Name: up_irq_save, up_irq_restore, and friends.
+ *
+ * NOTE: This function should never be called from application code and,
+ * as a general rule unless you really know what you are doing, this
+ * function should not be called directly from operation system code either:
+ * Typically, the wrapper functions, enter_critical_section() and
+ * leave_critical section(), are probably what you really want.
+ */
+
+/* Return the current IRQ state */
+
+static inline irqstate_t irqstate(void)
+{
+  irqstate_t flags;
+
+  __asm__ __volatile__

Review Comment:
   Minor, optional. Can be one line as in `up_irq_restore`



##########
arch/arm64/src/common/arm64_arch.h:
##########
@@ -0,0 +1,531 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_arch.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+#define ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#ifndef __ASSEMBLY__
+# include <stdint.h>
+#endif
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+#define STRINGIFY(x)    #x
+#define ARRAY_SIZE(x)   (sizeof(x) / sizeof((x)[0]))
+
+/* define MAX(a, b)/MIN(a, b)
+ * The larger/smaller value between a and b.
+ * Arguments are evaluated twice.
+ */
+#ifndef MIN
+#define MAX(a, b)       (((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef MIN
+#define MIN(a, b)       (((a) < (b)) ? (a) : (b))

Review Comment:
   ```suggestion
   #  define MIN(a, b)     (((a) < (b)) ? (a) : (b))
   ```



##########
arch/arm64/src/common/arm64_arch.h:
##########
@@ -0,0 +1,531 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_arch.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+#define ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#ifndef __ASSEMBLY__
+# include <stdint.h>
+#endif
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+#define STRINGIFY(x)    #x
+#define ARRAY_SIZE(x)   (sizeof(x) / sizeof((x)[0]))
+
+/* define MAX(a, b)/MIN(a, b)
+ * The larger/smaller value between a and b.
+ * Arguments are evaluated twice.
+ */
+#ifndef MIN
+#define MAX(a, b)       (((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef MIN
+#define MIN(a, b)       (((a) < (b)) ? (a) : (b))
+#endif
+
+/* Number of bytes in @p x kibibytes/mebibytes/gibibytes */
+#define KB(x)           ((x) << 10)
+#define MB(x)           (KB(x) << 10)
+#define GB(x)           (MB(x) << 10)
+
+/* Unsigned integer with bit position n set (signed in
+ * assembly language).
+ */
+#define BIT(n)          (1UL << (n))
+#define BIT64(_n)       (1ULL << (_n))
+
+/* Bit mask with bits 0 through n-1 (inclusive) set,
+ * or 0 if n is 0.
+ */
+#define BIT_MASK(n)     (BIT(n) - 1)
+#define BIT64_MASK(n)   (BIT64(n) - 1ULL)

Review Comment:
   Optional
   ```suggestion
   #define BIT64_MASK(n)   (BIT64(n) - BIT64(0))
   ```



##########
arch/arm64/src/common/arm64_arch.h:
##########
@@ -0,0 +1,531 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_arch.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+#define ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#ifndef __ASSEMBLY__
+# include <stdint.h>
+#endif
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+#define STRINGIFY(x)    #x
+#define ARRAY_SIZE(x)   (sizeof(x) / sizeof((x)[0]))
+
+/* define MAX(a, b)/MIN(a, b)
+ * The larger/smaller value between a and b.
+ * Arguments are evaluated twice.
+ */
+#ifndef MIN
+#define MAX(a, b)       (((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef MIN
+#define MIN(a, b)       (((a) < (b)) ? (a) : (b))
+#endif
+
+/* Number of bytes in @p x kibibytes/mebibytes/gibibytes */
+#define KB(x)           ((x) << 10)
+#define MB(x)           (KB(x) << 10)
+#define GB(x)           (MB(x) << 10)
+
+/* Unsigned integer with bit position n set (signed in
+ * assembly language).
+ */
+#define BIT(n)          (1UL << (n))

Review Comment:
   optional
   ```suggestion
   #define BIT(n)          (UINT32_C(1) << (n))
   ```



##########
arch/arm64/src/common/arm64_arch.h:
##########
@@ -0,0 +1,531 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_arch.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+#define ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#ifndef __ASSEMBLY__
+# include <stdint.h>
+#endif
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+#define STRINGIFY(x)    #x
+#define ARRAY_SIZE(x)   (sizeof(x) / sizeof((x)[0]))
+
+/* define MAX(a, b)/MIN(a, b)
+ * The larger/smaller value between a and b.
+ * Arguments are evaluated twice.
+ */
+#ifndef MIN
+#define MAX(a, b)       (((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef MIN
+#define MIN(a, b)       (((a) < (b)) ? (a) : (b))
+#endif
+
+/* Number of bytes in @p x kibibytes/mebibytes/gibibytes */
+#define KB(x)           ((x) << 10)
+#define MB(x)           (KB(x) << 10)
+#define GB(x)           (MB(x) << 10)
+
+/* Unsigned integer with bit position n set (signed in
+ * assembly language).
+ */
+#define BIT(n)          (1UL << (n))
+#define BIT64(_n)       (1ULL << (_n))
+
+/* Bit mask with bits 0 through n-1 (inclusive) set,
+ * or 0 if n is 0.
+ */
+#define BIT_MASK(n)     (BIT(n) - 1)
+#define BIT64_MASK(n)   (BIT64(n) - 1ULL)
+
+#define DAIFSET_FIQ_BIT     BIT(0)
+#define DAIFSET_IRQ_BIT     BIT(1)
+#define DAIFSET_ABT_BIT     BIT(2)
+#define DAIFSET_DBG_BIT     BIT(3)
+
+#define DAIFCLR_FIQ_BIT     BIT(0)
+#define DAIFCLR_IRQ_BIT     BIT(1)
+#define DAIFCLR_ABT_BIT     BIT(2)
+#define DAIFCLR_DBG_BIT     BIT(3)
+
+#define DAIF_FIQ_BIT        BIT(6)
+#define DAIF_IRQ_BIT        BIT(7)
+#define DAIF_ABT_BIT        BIT(8)
+#define DAIF_DBG_BIT        BIT(9)
+
+#define DAIF_MASK           (0xf << 6)
+
+/* SPSR M[3:0] define
+ *
+ * Arm® Architecture Registers Armv8, for Armv8-A architecture profile
+ * ( DDI 0595, ID121321 ), defined:
+ * SPSR_EL1: Saved Program Status Register (EL1)
+ * SPSR_EL2: Saved Program Status Register (EL2)
+ * SPSR_EL3: Saved Program Status Register (EL3)
+ *
+ * reference to Programmer’s Guide for ARMv8-A
+ * (ARM DEN0024A, ID050815 ), 4.1.2 Stack pointer
+ *
+ * The T suffix, indicates use of the SP_EL0 stack pointer.
+ * The H suffix, indicates use of the SP_ELx stack pointer.
+ *
+ */
+
+#define SPSR_DAIF_SHIFT     (6)
+#define SPSR_DAIF_MASK      (0xf << SPSR_DAIF_SHIFT)
+
+#define SPSR_MODE_EL0T      (0x0)
+#define SPSR_MODE_EL1T      (0x4)
+#define SPSR_MODE_EL1H      (0x5)
+#define SPSR_MODE_EL2T      (0x8)
+#define SPSR_MODE_EL2H      (0x9)
+#define SPSR_MODE_MASK      (0xf)
+
+/* Arm® Architecture Registers Armv8, for Armv8-A architecture profile
+ * ( DDI 0595, ID121321 ), defined:
+ *
+ * SCTLR_EL1: System Control Register (EL1)
+ * SCTLR_EL2: System Control Register (EL2)
+ * SCTLR_EL3: System Control Register (EL3)
+ *
+ */
+
+#define SCTLR_EL3_RES1      (BIT(29) | BIT(28) | BIT(23) | \
+                             BIT(22) | BIT(18) | BIT(16) | \
+                             BIT(11) | BIT(5)  | BIT(4))
+
+#define SCTLR_EL2_RES1      (BIT(29) | BIT(28) | BIT(23) | \
+                             BIT(22) | BIT(18) | BIT(16) | \
+                             BIT(11) | BIT(5)  | BIT(4))
+
+#define SCTLR_EL1_RES1      (BIT(29) | BIT(28) | BIT(23) | \
+                             BIT(22) | BIT(20) | BIT(11))
+
+#define SCTLR_M_BIT         BIT(0)
+#define SCTLR_A_BIT         BIT(1)
+#define SCTLR_C_BIT         BIT(2)
+#define SCTLR_SA_BIT        BIT(3)
+#define SCTLR_I_BIT         BIT(12)
+
+/* CurrentEL: Current Exception Level */
+
+#define MODE_EL_SHIFT       (0x2)
+#define MODE_EL_MASK        (0x3)
+
+#define MODE_EL3            (0x3)
+#define MODE_EL2            (0x2)
+#define MODE_EL1            (0x1)
+#define MODE_EL0            (0x0)
+
+#define GET_EL(_mode)  (((_mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
+
+/* MPIDR_EL1, Multiprocessor Affinity Register */
+
+#define MPIDR_AFFLVL_MASK   (0xff)
+
+#define MPIDR_AFF0_SHIFT    (0)
+#define MPIDR_AFF1_SHIFT    (8)
+#define MPIDR_AFF2_SHIFT    (16)
+#define MPIDR_AFF3_SHIFT    (32)
+
+#define MPIDR_AFFLVL(mpidr, aff_level) \
+  (((mpidr) >> MPIDR_AFF ## aff_level ## _SHIFT) & MPIDR_AFFLVL_MASK)
+
+#define GET_MPIDR()             read_sysreg(mpidr_el1)
+#define MPIDR_TO_CORE(mpidr)    MPIDR_AFFLVL(mpidr, 0)

Review Comment:
   ```suggestion
   #define MPIDR_TO_CORE(mpidr)    MPIDR_AFFLVL((mpidr), 0)
   ```



##########
arch/arm64/src/common/arm64_cache.c:
##########
@@ -0,0 +1,449 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_cache.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+#include <nuttx/cache.h>
+#include <nuttx/irq.h>
+
+#include <nuttx/arch.h>
+#include <arch/irq.h>
+#include <arch/chip/chip.h>
+#include <nuttx/spinlock.h>
+
+#include "arm64_arch.h"
+#include "arm64_internal.h"
+#include "arm64_mmu.h"
+
+/****************************************************************************
+ * Pre-processor Macros
+ ****************************************************************************/
+
+/* Common operations for the caches
+ *
+ * WB means write-back and intends to transfer dirty cache lines to memory in
+ * a copy-back cache policy. May be a no-op in write-back cache policy.
+ *
+ * INVD means invalidate and will mark cache lines as not valid. A future
+ * access to the associated address is guaranteed to generate a memory fetch.
+ *
+ * armv8 data cache instruction:
+ *
+ * DC CIVAC (WB+INVD):
+ *   Data or unified Cache line Clean and Invalidate by VA to PoC
+ *   Clean and Invalidate data cache by address to Point of Coherency.
+ *
+ * DC CVAC (WB):
+ *   Data or unified Cache line Clean by VA to PoC
+ *   Clean data cache by address to Point of Coherency.
+ *
+ * DC IVAC (INVD):
+ *   Data or unified Cache line Invalidate by VA to PoC
+ *   Invalidate data cache by address to Point of Coherency
+ */
+
+#define CACHE_OP_WB         BIT(0)
+#define CACHE_OP_INVD       BIT(1)
+#define CACHE_OP_WB_INVD    (CACHE_OP_WB | CACHE_OP_INVD)
+
+#define LINE_MASK(line)             ((line) - 1)
+#define LINE_ALIGN_DOWN(a, line)    ((a) & ~LINE_MASK(line))
+#define LINE_ALIGN_UP(a, line) \
+  (((a) + LINE_MASK(line)) & ~LINE_MASK(line))
+
+#define dc_ops(op, val)                                          \
+  ({                                                             \
+    __asm__ volatile ("dc " op ", %0" : : "r" (val) : "memory"); \
+  })
+
+/* IC IALLUIS, Instruction Cache Invalidate All to PoU, Inner Shareable
+ * Purpose
+ * Invalidate all instruction caches in the Inner Shareable domain of
+ * the PE executing the instruction to the Point of Unification.
+ */
+
+static inline void __ic_iallu(void)
+{
+  __asm__ volatile ("ic  iallu" : : : "memory");
+}
+
+/* IC IALLU, Instruction Cache Invalidate All to PoU
+ * Purpose
+ * Invalidate all instruction caches of the PE executing
+ * the instruction to the Point of Unification.
+ */
+
+static inline void __ic_ialluis(void)
+{
+  __asm__ volatile ("ic  ialluis" : : : "memory");
+}
+
+size_t dcache_line_size;
+
+/****************************************************************************
+ * Private Function Prototypes
+ ****************************************************************************/
+
+/* operation for data cache by virtual address to PoC */
+
+static inline int arm64_dcache_range(uintptr_t start_addr,
+                                    uintptr_t end_addr, int op)
+{
+  /* Align address to line size */
+
+  start_addr = LINE_ALIGN_DOWN(start_addr, dcache_line_size);
+
+  while (start_addr < end_addr)
+    {
+      switch (op)
+        {
+        case CACHE_OP_WB:
+        {
+          dc_ops("cvac", start_addr);
+          break;
+        }
+
+        case CACHE_OP_INVD:
+        {
+          dc_ops("ivac", start_addr);
+          break;
+        }
+
+        case CACHE_OP_WB_INVD:
+        {
+          dc_ops("civac", start_addr);
+          break;
+        }

Review Comment:
   add 2 spaces and maybe `default`



##########
arch/arm64/src/common/arm64_fatal.c:
##########
@@ -0,0 +1,360 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_fatal.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <sys/types.h>
+#include <stdint.h>
+
+#include <arch/irq.h>
+#include <debug.h>
+#include <assert.h>
+#include <sched.h>
+#include <nuttx/arch.h>
+#include <nuttx/kmalloc.h>
+#include <nuttx/tls.h>
+#include <nuttx/board.h>
+#include <arch/chip/chip.h>
+#include <nuttx/syslog/syslog.h>
+#include "sched/sched.h"
+#include "irq/irq.h"
+#include "arm64_arch.h"
+#include "arm64_internal.h"
+#include "arm64_fatal.h"
+#include "arm64_mmu.h"
+#include "arm64_fatal.h"
+#include "arm64_arch_timer.h"
+
+#ifdef CONFIG_ARCH_FPU
+#include "arm64_fpu.h"
+#endif
+
+/****************************************************************************
+ * Private Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: print_ec_cause
+ ****************************************************************************/
+
+static void print_ec_cause(uint64_t esr)
+{
+  uint32_t ec = (uint32_t)esr >> 26;
+
+  switch (ec)
+    {
+    case 0b000000:
+    {
+      sinfo("Unknown reason\n");
+      break;
+    }
+
+    case 0b000001:
+    {
+      sinfo("Trapped WFI or WFE instruction execution\n");
+      break;
+    }
+
+    case 0b000011:
+    {
+      sinfo(
+        "Trapped MCR or MRC access with (coproc==0b1111) that "
+        "is not reported using EC 0b000000\n");
+      break;
+    }
+
+    case 0b000100:
+    {
+      sinfo(
+        "Trapped MCRR or MRRC access with (coproc==0b1111) "
+        "that is not reported using EC 0b000000\n");
+      break;
+    }
+
+    case 0b000101:
+    {
+      sinfo("Trapped MCR or MRC access with (coproc==0b1110)\n");
+      break;
+    }
+
+    case 0b000110:
+    {
+      sinfo("Trapped LDC or STC access\n");
+      break;
+    }
+
+    case 0b000111:
+    {
+      sinfo(
+        "Trapped access to SVE, Advanced SIMD, or "
+        "floating-point functionality\n");
+      break;
+    }
+
+    case 0b001100:
+    {
+      sinfo("Trapped MRRC access with (coproc==0b1110)\n");
+      break;
+    }
+
+    case 0b001101:
+    {
+      sinfo("Branch Target Exception\n");
+      break;
+    }
+
+    case 0b001110:
+    {
+      sinfo("Illegal Execution state\n");
+      break;
+    }
+
+    case 0b010001:
+    {
+      sinfo("SVC instruction execution in AArch32 state\n");
+      break;
+    }
+
+    case 0b011000:
+    {
+      sinfo(
+        "Trapped MSR, MRS or System instruction execution in "
+        "AArch64 state, that is not reported using EC "
+        "0b000000, 0b000001 or 0b000111\n");
+      break;
+    }
+
+    case 0b011001:
+    {
+      sinfo("Trapped access to SVE functionality\n");
+      break;
+    }
+
+    case 0b100000:
+    {
+      sinfo(
+        "Instruction Abort from a lower Exception level, that "
+        "might be using AArch32 or AArch64\n");
+      break;
+    }
+
+    case 0b100001:
+    {
+      sinfo(
+        "Instruction Abort taken without a change in Exception level.\n");
+      break;
+    }
+
+    case 0b100010:
+    {
+      sinfo("PC alignment fault exception.\n");
+      break;
+    }
+
+    case 0b100100:
+    {
+      sinfo(
+        "Data Abort from a lower Exception level, that might "
+        "be using AArch32 or AArch64\n");
+      break;
+    }
+
+    case 0b100101:
+    {
+      sinfo("Data Abort taken without a change in Exception level\n");
+      break;
+    }
+
+    case 0b100110:
+    {
+      sinfo("SP alignment fault exception\n");
+      break;
+    }
+
+    case 0b101000:
+    {
+      sinfo("Trapped floating-point exception taken from AArch32 state\n");
+      break;
+    }
+
+    case 0b101100:
+    {
+      sinfo("Trapped floating-point exception taken from AArch64 state.\n");
+      break;
+    }
+
+    case 0b101111:
+    {
+      sinfo("SError interrupt\n");
+      break;
+    }
+
+    case 0b110000:
+    {
+      sinfo(
+        "Breakpoint exception from a lower Exception level, "
+        "that might be using AArch32 or AArch64\n");
+      break;
+    }
+
+    case 0b110001:
+    {
+      sinfo(
+        "Breakpoint exception taken without a change in "
+        "Exception level\n");
+      break;
+    }
+
+    case 0b110010:
+    {
+      sinfo(
+        "Software Step exception from a lower Exception level, "
+        "that might be using AArch32 or AArch64\n");
+      break;
+    }
+
+    case 0b110011:
+    {
+      sinfo(
+        "Software Step exception taken without a change in "
+        "Exception level\n");
+      break;
+    }
+
+    case 0b110100:
+    {
+      sinfo(
+        "Watchpoint exception from a lower Exception level, "
+        "that might be using AArch32 or AArch64\n");
+      break;
+    }
+
+    case 0b110101:
+    {
+      sinfo(
+        "Watchpoint exception taken without a change in "
+        "Exception level.\n");
+      break;
+    }
+
+    case 0b111000:
+    {
+      sinfo("BKPT instruction execution in AArch32 state\n");
+      break;
+    }
+
+    case 0b111100:
+    {
+      sinfo("BRK instruction execution in AArch64 state.\n");
+      break;
+    }
+
+    default:
+      break;
+    }
+}
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: arm64_fatal_error
+ *
+ * Description:
+ *
+ ****************************************************************************/
+
+void arm64_fatal_error(unsigned int reason, struct regs_context * reg)
+{
+  uint64_t el, esr, elr, far;
+  int cpu = up_cpu_index();
+
+  sinfo("reason = %d\n", reason);
+  sinfo("arm64_fatal_error: CPU%d task: %s\n", cpu, running_task()->name);
+
+  if (reason != K_ERR_SPURIOUS_IRQ)
+    {
+      __asm__ volatile ("mrs %0, CurrentEL" : "=r" (el));
+
+      switch (GET_EL(el))
+        {
+        case MODE_EL1:
+        {
+          sinfo("CurrentEL: MODE_EL1\n");
+          __asm__ volatile ("mrs %0, esr_el1" : "=r" (esr));
+          __asm__ volatile ("mrs %0, far_el1" : "=r" (far));
+          __asm__ volatile ("mrs %0, elr_el1" : "=r" (elr));
+          break;
+        }
+
+        case MODE_EL2:
+        {
+          sinfo("CurrentEL: MODE_EL2\n");
+          __asm__ volatile ("mrs %0, esr_el2" : "=r" (esr));
+          __asm__ volatile ("mrs %0, far_el2" : "=r" (far));
+          __asm__ volatile ("mrs %0, elr_el2" : "=r" (elr));
+          break;
+        }
+
+        case MODE_EL3:
+        {
+          sinfo("CurrentEL: MODE_EL3\n");
+          __asm__ volatile ("mrs %0, esr_el3" : "=r" (esr));
+          __asm__ volatile ("mrs %0, far_el3" : "=r" (far));
+          __asm__ volatile ("mrs %0, elr_el3" : "=r" (elr));
+          break;
+        }
+
+        default:
+        {
+          sinfo("CurrentEL: unknown\n");

Review Comment:
   Add 2 spaces fro `case` scope



##########
arch/arm64/src/common/arm64_arch.h:
##########
@@ -0,0 +1,531 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_arch.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+#define ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#ifndef __ASSEMBLY__
+# include <stdint.h>

Review Comment:
   ```suggestion
   #  include <stdint.h>
   ```



##########
arch/arm64/src/common/addrenv.h:
##########
@@ -0,0 +1,105 @@
+/****************************************************************************
+ * arch/arm64/src/common/addrenv.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef __ARCH_ARM64_SRC_COMMON_ADDRENV_H
+#define __ARCH_ARM64_SRC_COMMON_ADDRENV_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <sys/types.h>
+#include <stdint.h>
+#include "arm64_internal.h"
+
+#ifdef CONFIG_ARCH_ADDRENV
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+/* Aligned size of the kernel stack */
+
+#ifdef CONFIG_ARCH_KERNEL_STACK
+#  define ARCH_KERNEL_STACKSIZE  STACK_ALIGN_UP(CONFIG_ARCH_KERNEL_STACKSIZE)
+#endif
+
+/****************************************************************************
+ * Inline Functions
+ ****************************************************************************/
+
+#ifndef __ASSEMBLY__
+
+#endif /* __ASSEMBLY__ */
+
+/****************************************************************************
+ * Public Data
+ ****************************************************************************/
+
+#ifndef __ASSEMBLY__
+#ifdef __cplusplus
+#define EXTERN extern "C"
+extern "C"
+{
+#else
+#define EXTERN extern
+#endif
+
+/****************************************************************************
+ * Public Function Prototypes
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: arm_addrenv_create_region
+ *
+ * Description:
+ *   Create one memory region.
+ *
+ * Returned Value:
+ *   On success, the number of pages allocated is returned.  Otherwise, a
+ *   negated errno value is returned.
+ *
+ ****************************************************************************/
+
+int arm64_addrenv_create_region(uintptr_t **list, size_t listlen,
+                                uintptr_t vaddr, size_t regionsize,
+                                uint32_t mmuflags);
+
+/****************************************************************************
+ * Name: arm_addrenv_destroy_region
+ *
+ * Description:
+ *   Destroy one memory region.
+ *
+ ****************************************************************************/
+
+void arm64_addrenv_destroy_region(uintptr_t **list, size_tt listlen,

Review Comment:
   ```suggestion
   void arm64_addrenv_destroy_region(uintptr_t **list, size_t listlen,
   ```



##########
arch/arm64/src/common/arm64_arch.h:
##########
@@ -0,0 +1,531 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_arch.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+#define ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#ifndef __ASSEMBLY__
+# include <stdint.h>
+#endif
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+#define STRINGIFY(x)    #x
+#define ARRAY_SIZE(x)   (sizeof(x) / sizeof((x)[0]))
+
+/* define MAX(a, b)/MIN(a, b)
+ * The larger/smaller value between a and b.
+ * Arguments are evaluated twice.
+ */
+#ifndef MIN
+#define MAX(a, b)       (((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef MIN
+#define MIN(a, b)       (((a) < (b)) ? (a) : (b))
+#endif
+
+/* Number of bytes in @p x kibibytes/mebibytes/gibibytes */
+#define KB(x)           ((x) << 10)
+#define MB(x)           (KB(x) << 10)
+#define GB(x)           (MB(x) << 10)
+
+/* Unsigned integer with bit position n set (signed in
+ * assembly language).
+ */
+#define BIT(n)          (1UL << (n))
+#define BIT64(_n)       (1ULL << (_n))

Review Comment:
   ```suggestion
   #define BIT64(n)        (1ULL << (n))
   ```
   or even
   ```suggestion
   #define BIT64(n)        (UINT64_C(1) << (n))
   ```



##########
arch/arm64/src/common/arm64_arch.h:
##########
@@ -0,0 +1,531 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_arch.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+#define ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#ifndef __ASSEMBLY__
+# include <stdint.h>
+#endif
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+#define STRINGIFY(x)    #x
+#define ARRAY_SIZE(x)   (sizeof(x) / sizeof((x)[0]))
+
+/* define MAX(a, b)/MIN(a, b)
+ * The larger/smaller value between a and b.
+ * Arguments are evaluated twice.
+ */
+#ifndef MIN
+#define MAX(a, b)       (((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef MIN
+#define MIN(a, b)       (((a) < (b)) ? (a) : (b))
+#endif
+
+/* Number of bytes in @p x kibibytes/mebibytes/gibibytes */
+#define KB(x)           ((x) << 10)
+#define MB(x)           (KB(x) << 10)
+#define GB(x)           (MB(x) << 10)
+
+/* Unsigned integer with bit position n set (signed in
+ * assembly language).
+ */
+#define BIT(n)          (1UL << (n))
+#define BIT64(_n)       (1ULL << (_n))
+
+/* Bit mask with bits 0 through n-1 (inclusive) set,
+ * or 0 if n is 0.
+ */
+#define BIT_MASK(n)     (BIT(n) - 1)
+#define BIT64_MASK(n)   (BIT64(n) - 1ULL)
+
+#define DAIFSET_FIQ_BIT     BIT(0)
+#define DAIFSET_IRQ_BIT     BIT(1)
+#define DAIFSET_ABT_BIT     BIT(2)
+#define DAIFSET_DBG_BIT     BIT(3)
+
+#define DAIFCLR_FIQ_BIT     BIT(0)
+#define DAIFCLR_IRQ_BIT     BIT(1)
+#define DAIFCLR_ABT_BIT     BIT(2)
+#define DAIFCLR_DBG_BIT     BIT(3)
+
+#define DAIF_FIQ_BIT        BIT(6)
+#define DAIF_IRQ_BIT        BIT(7)
+#define DAIF_ABT_BIT        BIT(8)
+#define DAIF_DBG_BIT        BIT(9)
+
+#define DAIF_MASK           (0xf << 6)
+
+/* SPSR M[3:0] define
+ *
+ * Arm® Architecture Registers Armv8, for Armv8-A architecture profile
+ * ( DDI 0595, ID121321 ), defined:
+ * SPSR_EL1: Saved Program Status Register (EL1)
+ * SPSR_EL2: Saved Program Status Register (EL2)
+ * SPSR_EL3: Saved Program Status Register (EL3)
+ *
+ * reference to Programmer’s Guide for ARMv8-A
+ * (ARM DEN0024A, ID050815 ), 4.1.2 Stack pointer
+ *
+ * The T suffix, indicates use of the SP_EL0 stack pointer.
+ * The H suffix, indicates use of the SP_ELx stack pointer.
+ *
+ */
+
+#define SPSR_DAIF_SHIFT     (6)
+#define SPSR_DAIF_MASK      (0xf << SPSR_DAIF_SHIFT)
+
+#define SPSR_MODE_EL0T      (0x0)
+#define SPSR_MODE_EL1T      (0x4)
+#define SPSR_MODE_EL1H      (0x5)
+#define SPSR_MODE_EL2T      (0x8)
+#define SPSR_MODE_EL2H      (0x9)
+#define SPSR_MODE_MASK      (0xf)
+
+/* Arm® Architecture Registers Armv8, for Armv8-A architecture profile
+ * ( DDI 0595, ID121321 ), defined:
+ *
+ * SCTLR_EL1: System Control Register (EL1)
+ * SCTLR_EL2: System Control Register (EL2)
+ * SCTLR_EL3: System Control Register (EL3)
+ *
+ */
+
+#define SCTLR_EL3_RES1      (BIT(29) | BIT(28) | BIT(23) | \
+                             BIT(22) | BIT(18) | BIT(16) | \
+                             BIT(11) | BIT(5)  | BIT(4))
+
+#define SCTLR_EL2_RES1      (BIT(29) | BIT(28) | BIT(23) | \
+                             BIT(22) | BIT(18) | BIT(16) | \
+                             BIT(11) | BIT(5)  | BIT(4))
+
+#define SCTLR_EL1_RES1      (BIT(29) | BIT(28) | BIT(23) | \
+                             BIT(22) | BIT(20) | BIT(11))
+
+#define SCTLR_M_BIT         BIT(0)
+#define SCTLR_A_BIT         BIT(1)
+#define SCTLR_C_BIT         BIT(2)
+#define SCTLR_SA_BIT        BIT(3)
+#define SCTLR_I_BIT         BIT(12)
+
+/* CurrentEL: Current Exception Level */
+
+#define MODE_EL_SHIFT       (0x2)
+#define MODE_EL_MASK        (0x3)
+
+#define MODE_EL3            (0x3)
+#define MODE_EL2            (0x2)
+#define MODE_EL1            (0x1)
+#define MODE_EL0            (0x0)
+
+#define GET_EL(_mode)  (((_mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)

Review Comment:
   optional
   ```suggestion
   #define GET_EL(mode)  (((mode) >> MODE_EL_SHIFT) & MODE_EL_MASK)
   ```



##########
arch/arm64/src/common/arm64_arch.h:
##########
@@ -0,0 +1,531 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_arch.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+#define ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#ifndef __ASSEMBLY__
+# include <stdint.h>
+#endif
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+#define STRINGIFY(x)    #x
+#define ARRAY_SIZE(x)   (sizeof(x) / sizeof((x)[0]))
+
+/* define MAX(a, b)/MIN(a, b)
+ * The larger/smaller value between a and b.
+ * Arguments are evaluated twice.
+ */
+#ifndef MIN
+#define MAX(a, b)       (((a) > (b)) ? (a) : (b))

Review Comment:
   ```suggestion
   #  define MAX(a, b)     (((a) > (b)) ? (a) : (b))
   ```



##########
arch/arm64/src/common/arm64_arch.h:
##########
@@ -0,0 +1,531 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_arch.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+#define ___ARCH_ARM64_SRC_COMMON_ARM64_ARCH_H
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#ifndef __ASSEMBLY__
+# include <stdint.h>
+#endif
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+#define STRINGIFY(x)    #x
+#define ARRAY_SIZE(x)   (sizeof(x) / sizeof((x)[0]))
+
+/* define MAX(a, b)/MIN(a, b)
+ * The larger/smaller value between a and b.
+ * Arguments are evaluated twice.
+ */
+#ifndef MIN
+#define MAX(a, b)       (((a) > (b)) ? (a) : (b))
+#endif
+
+#ifndef MIN
+#define MIN(a, b)       (((a) < (b)) ? (a) : (b))
+#endif
+
+/* Number of bytes in @p x kibibytes/mebibytes/gibibytes */
+#define KB(x)           ((x) << 10)
+#define MB(x)           (KB(x) << 10)
+#define GB(x)           (MB(x) << 10)

Review Comment:
   `GB` is `10 + 10 +10 == 30` bits shift, but only 2 bits left for value. 
Maybe can be converted to `uul` or use `UINT64_C` at least for `GB`?



##########
arch/arm64/src/common/arm64_assert.c:
##########
@@ -0,0 +1,582 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_assert.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include <nuttx/irq.h>
+#include <nuttx/arch.h>
+#include <assert.h>
+#include <debug.h>
+#include <nuttx/board.h>
+#include <nuttx/syslog/syslog.h>
+#include <nuttx/usb/usbdev_trace.h>
+
+#include "sched/sched.h"
+#include "irq/irq.h"
+#include "arm64_arch.h"
+#include "arm64_internal.h"
+#include "chip.h"
+
+#ifdef CONFIG_ARCH_FPU
+#include "arm64_fpu.h"
+#endif
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+/* USB trace dumping */
+
+#ifndef CONFIG_USBDEV_TRACE
+#  undef CONFIG_ARCH_USBDUMP
+#endif
+
+#ifndef CONFIG_BOARD_RESET_ON_ASSERT
+#  define CONFIG_BOARD_RESET_ON_ASSERT 0
+#endif
+
+/****************************************************************************
+ * Name: arm_registerdump
+ ****************************************************************************/
+
+static void arm64_registerdump(struct regs_context * regs)
+{
+  _alert("stack = %p\n", regs);
+  _alert("x0:   0x%-16"PRIx64"  x1:   0x%"PRIx64"\n",
+    regs->regs[REG_X0], regs->regs[REG_X1]);
+  _alert("x2:   0x%-16"PRIx64"  x3:   0x%"PRIx64"\n",
+    regs->regs[REG_X2], regs->regs[REG_X3]);
+  _alert("x4:   0x%-16"PRIx64"  x5:   0x%"PRIx64"\n",
+    regs->regs[REG_X4], regs->regs[REG_X5]);
+  _alert("x6:   0x%-16"PRIx64"  x7:   0x%"PRIx64"\n",
+    regs->regs[REG_X6], regs->regs[REG_X7]);
+  _alert("x8:   0x%-16"PRIx64"  x9:   0x%"PRIx64"\n",
+    regs->regs[REG_X8], regs->regs[REG_X9]);
+  _alert("x10:  0x%-16"PRIx64"  x11:  0x%"PRIx64"\n",
+    regs->regs[REG_X10], regs->regs[REG_X11]);
+  _alert("x12:  0x%-16"PRIx64"  x13:  0x%"PRIx64"\n",
+    regs->regs[REG_X12], regs->regs[REG_X13]);
+  _alert("x14:  0x%-16"PRIx64"  x15:  0x%"PRIx64"\n",
+    regs->regs[REG_X14], regs->regs[REG_X15]);
+  _alert("x16:  0x%-16"PRIx64"  x17:  0x%"PRIx64"\n",
+    regs->regs[REG_X16], regs->regs[REG_X17]);
+  _alert("x18:  0x%-16"PRIx64"  x19:  0x%"PRIx64"\n",
+    regs->regs[REG_X18], regs->regs[REG_X19]);
+  _alert("x20:  0x%-16"PRIx64"  x21:  0x%"PRIx64"\n",
+    regs->regs[REG_X20], regs->regs[REG_X21]);
+  _alert("x22:  0x%-16"PRIx64"  x23:  0x%"PRIx64"\n",
+    regs->regs[REG_X22], regs->regs[REG_X23]);
+  _alert("x24:  0x%-16"PRIx64"  x25:  0x%"PRIx64"\n",
+    regs->regs[REG_X24], regs->regs[REG_X25]);
+  _alert("x26:  0x%-16"PRIx64"  x27:  0x%"PRIx64"\n",
+    regs->regs[REG_X26], regs->regs[REG_X27]);
+  _alert("x28:  0x%-16"PRIx64"  x29:  0x%"PRIx64"\n",
+    regs->regs[REG_X28], regs->regs[REG_X29]);
+  _alert("x30:  0x%-16"PRIx64"\n", regs->regs[REG_X30]);
+
+  _alert("\n");
+  _alert("STATUS Registers:\n");
+  _alert("SPSR:      0x%-16"PRIx64"\n", regs->spsr);
+  _alert("ELR:       0x%-16"PRIx64"\n", regs->elr);
+  _alert("SP_EL0:    0x%-16"PRIx64"\n", regs->sp_el0);
+  _alert("SP_ELX:    0x%-16"PRIx64"\n", regs->sp_elx);
+  _alert("TPIDR_EL0: 0x%-16"PRIx64"\n", regs->tpidr_el0);
+  _alert("TPIDR_EL1: 0x%-16"PRIx64"\n", regs->tpidr_el1);
+  _alert("EXE_DEPTH: 0x%-16"PRIx64"\n", regs->exe_depth);
+}
+
+#ifdef CONFIG_ARCH_STACKDUMP
+
+/****************************************************************************
+ * Private Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: arm_stackdump
+ ****************************************************************************/
+
+static void arm64_stackdump(uint64_t sp, uint64_t stack_top)
+{
+  uint64_t stack;
+
+  /* Flush any buffered SYSLOG data to avoid overwrite */
+
+  syslog_flush();
+
+  for (stack = sp & ~0x1f; stack < (stack_top & ~0x1f); stack += 64)
+    {
+      uint64_t *ptr = (uint64_t *)stack;
+      _alert("%08" PRIx64 ": %08" PRIx64 " %08" PRIx64 " %08" PRIx64
+             " %08" PRIx64 " %08" PRIx64 " %08" PRIx64 " %08" PRIx64
+             " %08" PRIx64 "\n",
+             stack, ptr[0], ptr[1], ptr[2], ptr[3],
+             ptr[4], ptr[5], ptr[6], ptr[7]);
+    }
+}
+
+/****************************************************************************
+ * Name: arm_dump_task
+ ****************************************************************************/
+
+static void arm64_dump_task(struct tcb_s *tcb, void *arg)
+{
+  char args[64] = "";
+#ifdef CONFIG_STACK_COLORATION
+  uint64_t stack_filled = 0;
+  uint64_t stack_used;
+#endif
+#ifdef CONFIG_SCHED_CPULOAD
+  struct cpuload_s cpuload;
+  uint64_t fracpart;
+  uint64_t intpart;
+  uint64_t tmp;
+
+  clock_cpuload(tcb->pid, &cpuload);
+
+  if (cpuload.total > 0)
+    {
+      tmp      = (1000 * cpuload.active) / cpuload.total;
+      intpart  = tmp / 10;
+      fracpart = tmp - 10 * intpart;
+    }
+  else
+    {
+      intpart  = 0;
+      fracpart = 0;
+    }
+#endif
+
+#ifdef CONFIG_STACK_COLORATION
+  stack_used = up_check_tcbstack(tcb);
+  if (tcb->adj_stack_size > 0 && stack_used > 0)
+    {
+      /* Use fixed-point math with one decimal place */
+
+      stack_filled = 10 * 100 * stack_used / tcb->adj_stack_size;
+    }
+#endif
+
+#ifndef CONFIG_DISABLE_PTHREAD
+  if ((tcb->flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD)
+    {
+      struct pthread_tcb_s *ptcb = (struct pthread_tcb_s *)tcb;
+
+      snprintf(args, sizeof(args), "%p ", ptcb->arg);
+    }
+  else
+#endif
+    {
+      FAR char **argv = tcb->group->tg_info->argv + 1;

Review Comment:
   ```suggestion
         char **argv = tcb->group->tg_info->argv + 1;
   ```



##########
arch/arm64/src/common/arm64_cache.c:
##########
@@ -0,0 +1,449 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_cache.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+#include <nuttx/cache.h>
+#include <nuttx/irq.h>
+
+#include <nuttx/arch.h>
+#include <arch/irq.h>
+#include <arch/chip/chip.h>
+#include <nuttx/spinlock.h>
+
+#include "arm64_arch.h"
+#include "arm64_internal.h"
+#include "arm64_mmu.h"
+
+/****************************************************************************
+ * Pre-processor Macros
+ ****************************************************************************/
+
+/* Common operations for the caches
+ *
+ * WB means write-back and intends to transfer dirty cache lines to memory in
+ * a copy-back cache policy. May be a no-op in write-back cache policy.
+ *
+ * INVD means invalidate and will mark cache lines as not valid. A future
+ * access to the associated address is guaranteed to generate a memory fetch.
+ *
+ * armv8 data cache instruction:
+ *
+ * DC CIVAC (WB+INVD):
+ *   Data or unified Cache line Clean and Invalidate by VA to PoC
+ *   Clean and Invalidate data cache by address to Point of Coherency.
+ *
+ * DC CVAC (WB):
+ *   Data or unified Cache line Clean by VA to PoC
+ *   Clean data cache by address to Point of Coherency.
+ *
+ * DC IVAC (INVD):
+ *   Data or unified Cache line Invalidate by VA to PoC
+ *   Invalidate data cache by address to Point of Coherency
+ */
+
+#define CACHE_OP_WB         BIT(0)
+#define CACHE_OP_INVD       BIT(1)
+#define CACHE_OP_WB_INVD    (CACHE_OP_WB | CACHE_OP_INVD)
+
+#define LINE_MASK(line)             ((line) - 1)
+#define LINE_ALIGN_DOWN(a, line)    ((a) & ~LINE_MASK(line))
+#define LINE_ALIGN_UP(a, line) \
+  (((a) + LINE_MASK(line)) & ~LINE_MASK(line))
+
+#define dc_ops(op, val)                                          \
+  ({                                                             \
+    __asm__ volatile ("dc " op ", %0" : : "r" (val) : "memory"); \
+  })
+
+/* IC IALLUIS, Instruction Cache Invalidate All to PoU, Inner Shareable
+ * Purpose
+ * Invalidate all instruction caches in the Inner Shareable domain of
+ * the PE executing the instruction to the Point of Unification.
+ */
+
+static inline void __ic_iallu(void)
+{
+  __asm__ volatile ("ic  iallu" : : : "memory");
+}
+
+/* IC IALLU, Instruction Cache Invalidate All to PoU
+ * Purpose
+ * Invalidate all instruction caches of the PE executing
+ * the instruction to the Point of Unification.
+ */
+
+static inline void __ic_ialluis(void)
+{
+  __asm__ volatile ("ic  ialluis" : : : "memory");
+}
+
+size_t dcache_line_size;
+
+/****************************************************************************
+ * Private Function Prototypes
+ ****************************************************************************/
+
+/* operation for data cache by virtual address to PoC */
+
+static inline int arm64_dcache_range(uintptr_t start_addr,
+                                    uintptr_t end_addr, int op)

Review Comment:
   ```suggestion
                                        uintptr_t end_addr, int op)
   ```



##########
arch/arm64/src/common/arm64_assert.c:
##########
@@ -0,0 +1,582 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_assert.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+
+#include <nuttx/irq.h>
+#include <nuttx/arch.h>
+#include <assert.h>
+#include <debug.h>
+#include <nuttx/board.h>
+#include <nuttx/syslog/syslog.h>
+#include <nuttx/usb/usbdev_trace.h>
+
+#include "sched/sched.h"
+#include "irq/irq.h"
+#include "arm64_arch.h"
+#include "arm64_internal.h"
+#include "chip.h"
+
+#ifdef CONFIG_ARCH_FPU
+#include "arm64_fpu.h"
+#endif
+
+/****************************************************************************
+ * Pre-processor Definitions
+ ****************************************************************************/
+
+/* USB trace dumping */
+
+#ifndef CONFIG_USBDEV_TRACE
+#  undef CONFIG_ARCH_USBDUMP
+#endif
+
+#ifndef CONFIG_BOARD_RESET_ON_ASSERT
+#  define CONFIG_BOARD_RESET_ON_ASSERT 0
+#endif
+
+/****************************************************************************
+ * Name: arm_registerdump
+ ****************************************************************************/
+
+static void arm64_registerdump(struct regs_context * regs)
+{
+  _alert("stack = %p\n", regs);
+  _alert("x0:   0x%-16"PRIx64"  x1:   0x%"PRIx64"\n",
+    regs->regs[REG_X0], regs->regs[REG_X1]);
+  _alert("x2:   0x%-16"PRIx64"  x3:   0x%"PRIx64"\n",
+    regs->regs[REG_X2], regs->regs[REG_X3]);
+  _alert("x4:   0x%-16"PRIx64"  x5:   0x%"PRIx64"\n",
+    regs->regs[REG_X4], regs->regs[REG_X5]);
+  _alert("x6:   0x%-16"PRIx64"  x7:   0x%"PRIx64"\n",
+    regs->regs[REG_X6], regs->regs[REG_X7]);
+  _alert("x8:   0x%-16"PRIx64"  x9:   0x%"PRIx64"\n",
+    regs->regs[REG_X8], regs->regs[REG_X9]);
+  _alert("x10:  0x%-16"PRIx64"  x11:  0x%"PRIx64"\n",
+    regs->regs[REG_X10], regs->regs[REG_X11]);
+  _alert("x12:  0x%-16"PRIx64"  x13:  0x%"PRIx64"\n",
+    regs->regs[REG_X12], regs->regs[REG_X13]);
+  _alert("x14:  0x%-16"PRIx64"  x15:  0x%"PRIx64"\n",
+    regs->regs[REG_X14], regs->regs[REG_X15]);
+  _alert("x16:  0x%-16"PRIx64"  x17:  0x%"PRIx64"\n",
+    regs->regs[REG_X16], regs->regs[REG_X17]);
+  _alert("x18:  0x%-16"PRIx64"  x19:  0x%"PRIx64"\n",
+    regs->regs[REG_X18], regs->regs[REG_X19]);
+  _alert("x20:  0x%-16"PRIx64"  x21:  0x%"PRIx64"\n",
+    regs->regs[REG_X20], regs->regs[REG_X21]);
+  _alert("x22:  0x%-16"PRIx64"  x23:  0x%"PRIx64"\n",
+    regs->regs[REG_X22], regs->regs[REG_X23]);
+  _alert("x24:  0x%-16"PRIx64"  x25:  0x%"PRIx64"\n",
+    regs->regs[REG_X24], regs->regs[REG_X25]);
+  _alert("x26:  0x%-16"PRIx64"  x27:  0x%"PRIx64"\n",
+    regs->regs[REG_X26], regs->regs[REG_X27]);
+  _alert("x28:  0x%-16"PRIx64"  x29:  0x%"PRIx64"\n",
+    regs->regs[REG_X28], regs->regs[REG_X29]);
+  _alert("x30:  0x%-16"PRIx64"\n", regs->regs[REG_X30]);
+
+  _alert("\n");
+  _alert("STATUS Registers:\n");
+  _alert("SPSR:      0x%-16"PRIx64"\n", regs->spsr);
+  _alert("ELR:       0x%-16"PRIx64"\n", regs->elr);
+  _alert("SP_EL0:    0x%-16"PRIx64"\n", regs->sp_el0);
+  _alert("SP_ELX:    0x%-16"PRIx64"\n", regs->sp_elx);
+  _alert("TPIDR_EL0: 0x%-16"PRIx64"\n", regs->tpidr_el0);
+  _alert("TPIDR_EL1: 0x%-16"PRIx64"\n", regs->tpidr_el1);
+  _alert("EXE_DEPTH: 0x%-16"PRIx64"\n", regs->exe_depth);
+}
+
+#ifdef CONFIG_ARCH_STACKDUMP
+
+/****************************************************************************
+ * Private Functions
+ ****************************************************************************/
+
+/****************************************************************************
+ * Name: arm_stackdump
+ ****************************************************************************/
+
+static void arm64_stackdump(uint64_t sp, uint64_t stack_top)
+{
+  uint64_t stack;
+
+  /* Flush any buffered SYSLOG data to avoid overwrite */
+
+  syslog_flush();
+
+  for (stack = sp & ~0x1f; stack < (stack_top & ~0x1f); stack += 64)
+    {
+      uint64_t *ptr = (uint64_t *)stack;
+      _alert("%08" PRIx64 ": %08" PRIx64 " %08" PRIx64 " %08" PRIx64
+             " %08" PRIx64 " %08" PRIx64 " %08" PRIx64 " %08" PRIx64
+             " %08" PRIx64 "\n",
+             stack, ptr[0], ptr[1], ptr[2], ptr[3],
+             ptr[4], ptr[5], ptr[6], ptr[7]);
+    }
+}
+
+/****************************************************************************
+ * Name: arm_dump_task
+ ****************************************************************************/
+
+static void arm64_dump_task(struct tcb_s *tcb, void *arg)
+{
+  char args[64] = "";
+#ifdef CONFIG_STACK_COLORATION
+  uint64_t stack_filled = 0;
+  uint64_t stack_used;
+#endif
+#ifdef CONFIG_SCHED_CPULOAD
+  struct cpuload_s cpuload;
+  uint64_t fracpart;
+  uint64_t intpart;
+  uint64_t tmp;
+
+  clock_cpuload(tcb->pid, &cpuload);
+
+  if (cpuload.total > 0)
+    {
+      tmp      = (1000 * cpuload.active) / cpuload.total;
+      intpart  = tmp / 10;
+      fracpart = tmp - 10 * intpart;
+    }
+  else
+    {
+      intpart  = 0;
+      fracpart = 0;
+    }
+#endif
+
+#ifdef CONFIG_STACK_COLORATION
+  stack_used = up_check_tcbstack(tcb);
+  if (tcb->adj_stack_size > 0 && stack_used > 0)
+    {
+      /* Use fixed-point math with one decimal place */
+
+      stack_filled = 10 * 100 * stack_used / tcb->adj_stack_size;
+    }
+#endif
+
+#ifndef CONFIG_DISABLE_PTHREAD
+  if ((tcb->flags & TCB_FLAG_TTYPE_MASK) == TCB_FLAG_TTYPE_PTHREAD)
+    {
+      struct pthread_tcb_s *ptcb = (struct pthread_tcb_s *)tcb;
+
+      snprintf(args, sizeof(args), "%p ", ptcb->arg);
+    }
+  else
+#endif
+    {
+      FAR char **argv = tcb->group->tg_info->argv + 1;
+      size_t npos = 0;
+
+      while (*argv != NULL && npos < sizeof(args))
+        {
+          npos += snprintf(args + npos, sizeof(args) - npos, " %s", *argv++);
+        }
+    }
+
+  /* Dump interesting properties of this task */
+
+  _alert("  %4d   %4d"
+#ifdef CONFIG_SMP
+         "  %4d"
+#endif
+#ifdef CONFIG_STACK_COLORATION
+         "   %7lu"
+#endif
+         "   %7lu"
+#ifdef CONFIG_STACK_COLORATION
+         "   %3" PRId64 ".%1" PRId64 "%%%c"
+#endif
+#ifdef CONFIG_SCHED_CPULOAD
+         "   %3" PRId64 ".%01" PRId64 "%%"
+#endif
+#if CONFIG_TASK_NAME_SIZE > 0
+         "   %s %s\n",
+#else
+         "   %s\n",
+#endif
+         tcb->pid, tcb->sched_priority,
+#ifdef CONFIG_SMP
+         tcb->cpu,
+#endif
+#ifdef CONFIG_STACK_COLORATION
+         (unsigned long)up_check_tcbstack(tcb),
+#endif
+         (unsigned long)tcb->adj_stack_size
+#ifdef CONFIG_STACK_COLORATION
+         , stack_filled / 10, stack_filled % 10,
+         (stack_filled >= 10 * 80 ? '!' : ' ')
+#endif
+#ifdef CONFIG_SCHED_CPULOAD
+         , intpart, fracpart
+#endif
+#if CONFIG_TASK_NAME_SIZE > 0

Review Comment:
   Please sync this file with ARM 32 version. It has been reworked a bit.



##########
arch/arm64/src/common/arm64_boot.c:
##########
@@ -0,0 +1,185 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_boot.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <nuttx/arch.h>
+#include <nuttx/init.h>
+
+#include "arm64_internal.h"
+#include "arm64_arch.h"
+
+extern void *_vector_table[];
+
+/****************************************************************************
+ * Public Functions
+ ****************************************************************************/
+
+void arm64_boot_el3_init(void)
+{
+  uint64_t reg;
+
+  /* Setup vector table */
+
+  write_sysreg((uint64_t)_vector_table, vbar_el3);
+  __ISB();
+
+  reg   = 0U;                   /* Mostly RES0 */
+  reg   &= ~(CPTR_TTA_BIT |     /* Do not trap sysreg accesses */
+             CPTR_TFP_BIT |     /* Do not trap SVE, SIMD and FP */
+             CPTR_TCPAC_BIT);   /* Do not trap CPTR_EL2 / CPACR_EL1 accesses
+                                 */
+
+  /* CPTR_EL3, Architectural Feature Trap Register (EL3) */
+
+  write_sysreg(reg, cptr_el3);
+
+  reg   = 0U;               /* Reset */
+  reg   |= SCR_NS_BIT;      /* EL2 / EL3 non-secure */
+  reg   |= (SCR_RES1 |      /* RES1 */
+            SCR_RW_BIT |    /* EL2 execution state is AArch64 */
+            SCR_ST_BIT |    /* Do not trap EL1 accesses to timer */
+            SCR_HCE_BIT |   /* Do not trap HVC */
+            SCR_SMD_BIT);   /* Do not trap SMC */
+  write_sysreg(reg, scr_el3);
+
+  reg   = read_sysreg(ICC_SRE_EL3);
+  reg   |= (ICC_SRE_ELX_DFB_BIT |   /* Disable FIQ bypass */
+            ICC_SRE_ELX_DIB_BIT |   /* Disable IRQ bypass */
+            ICC_SRE_ELX_SRE_BIT |   /* System register interface is used */
+            ICC_SRE_EL3_EN_BIT);    /* Enables lower Exception level access to
+                                     * ICC_SRE_EL1 */
+  write_sysreg(reg, ICC_SRE_EL3);
+
+  __ISB();
+}
+
+void arm64_boot_el3_get_next_el(uint64_t switch_addr)
+{
+  uint64_t spsr;
+
+  write_sysreg(switch_addr, elr_el3);
+
+  /* Mask the DAIF */
+
+  spsr  = SPSR_DAIF_MASK;
+  spsr  |= SPSR_MODE_EL2T;
+
+  write_sysreg(spsr, spsr_el3);
+}
+
+void arm64_boot_el2_init(void)
+{
+  uint64_t reg;
+
+  reg   = read_sysreg(sctlr_el2);
+  reg   |= (SCTLR_EL2_RES1 |    /* RES1 */
+            SCTLR_I_BIT |       /* Enable i-cache */
+            SCTLR_SA_BIT);      /* Enable SP alignment check */
+  write_sysreg(reg, sctlr_el2);
+
+  reg   = read_sysreg(hcr_el2);
+  reg   |= HCR_RW_BIT;      /* EL1 Execution state is AArch64 */
+  write_sysreg(reg, hcr_el2);
+
+  reg   = 0U;                   /* RES0 */
+  reg   |= CPTR_EL2_RES1;       /* RES1 */
+  reg   &= ~(CPTR_TFP_BIT |     /* Do not trap SVE, SIMD and FP */
+             CPTR_TCPAC_BIT);   /* Do not trap CPACR_EL1 accesses */
+  write_sysreg(reg, cptr_el2);
+
+  /* Enable EL1 access to timers */
+
+  reg   = read_sysreg(cnthctl_el2);
+  reg   |= (CNTHCTL_EL2_EL1PCEN_EN | CNTHCTL_EL2_EL1PCTEN_EN);
+  write_sysreg(reg, cnthctl_el2);
+
+  zero_sysreg(cntvoff_el2);       /* Set 64-bit virtual timer offset to 0 */
+
+#ifdef CONFIG_ARCH_ARMV8R
+  zero_sysreg(cnthps_ctl_el2);
+#else
+  zero_sysreg(cnthp_ctl_el2);
+#endif
+
+  /* Enable this if/when we use the hypervisor timer.
+   * write_cnthp_cval_el2(~(uint64_t)0);
+   */
+
+  __ISB();
+}
+
+void arm64_boot_el1_init(void)
+{
+  uint64_t reg;
+
+  /* Setup vector table */
+
+  write_sysreg((uint64_t)_vector_table, vbar_el1);
+  __ISB();
+
+  reg   = 0U;                       /* RES0 */
+  reg   |= CPACR_EL1_FPEN_NOTRAP;   /* Do not trap NEON/SIMD/FP initially */
+
+  /* TODO: CONFIG_FLOAT_*_FORBIDDEN */
+
+  write_sysreg(reg, cpacr_el1);
+
+  reg   = read_sysreg(sctlr_el1);
+  reg   |= (SCTLR_EL1_RES1 |    /* RES1 */
+            SCTLR_I_BIT |       /* Enable i-cache */
+            SCTLR_SA_BIT);      /* Enable SP alignment check */
+  write_sysreg(reg, sctlr_el1);
+
+  write_sysreg((~(uint64_t)0), cntv_cval_el0);
+
+  /* Enable these if/when we use the corresponding timers.
+   * write_cntp_cval_el0(~(uint64_t)0);
+   * write_cntps_cval_el1(~(uint64_t)0);
+   */
+
+  __ISB();
+}
+
+/* These simple memset alternatives are necessary
+ * as the function at libc is depend on the MMU
+ * to be active.
+ */
+
+static void boot_early_memset(void *dst, int c, size_t n)
+{
+  uint8_t *d = dst;
+
+  while (n--)
+    {
+      *d++ = c;
+    }
+}
+
+void arm64_boot_primary_c_routine(void)
+{
+  boot_early_memset(_sbss, 0, _ebss - _sbss);

Review Comment:
   Can we clear with 64bits loop?



##########
arch/arm64/src/common/arm64_fpu.c:
##########
@@ -0,0 +1,249 @@
+/***************************************************************************
+ * arch/arm64/src/common/arm64_fpu.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ***************************************************************************/
+
+/***************************************************************************
+ * Included Files
+ ***************************************************************************/
+
+#include <nuttx/config.h>
+
+#include <inttypes.h>
+#include <stdint.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+#include <debug.h>
+#include <nuttx/sched.h>
+#include <nuttx/arch.h>
+#include <arch/irq.h>
+
+#include "sched/sched.h"
+#include "arm64_arch.h"
+#include "arm64_vfork.h"
+#include "arm64_internal.h"
+#include "arm64_fatal.h"
+#include "arm64_fpu.h"
+
+static struct fpu_reg g_idle_thread_fpu[CONFIG_SMP_NCPUS];
+
+struct arm64_cpu_fpu_context
+{
+  /* owner of current CPU's FPU */
+
+  struct tcb_s * fpu_owner;
+
+  struct tcb_s * idle_thread;
+
+  /* for statistic propose */
+
+  int save_count;
+  int restore_count;
+  int switch_count;
+  int exe_depth_count;
+};
+
+static struct arm64_cpu_fpu_context g_cpu_fpu_ctx[CONFIG_SMP_NCPUS];
+
+/***************************************************************************
+ * Private Data
+ ***************************************************************************/
+
+/***************************************************************************
+ * Public Functions
+ ***************************************************************************/
+
+void arm64_init_fpu(struct tcb_s *tcb)
+{
+  if (tcb->pid < CONFIG_SMP_NCPUS)
+    {
+      memset(&g_cpu_fpu_ctx[this_cpu()], 0,
+             sizeof(struct arm64_cpu_fpu_context));
+      g_cpu_fpu_ctx[this_cpu()].idle_thread = tcb;
+
+      tcb->xcp.fpu_regs = &g_idle_thread_fpu[this_cpu()];
+    }
+
+  memset(tcb->xcp.fpu_regs, 0, sizeof(struct fpu_reg));
+  tcb->xcp.fpu_regs->fpu_trap = 0;
+}
+
+void arm64_destory_fpu(struct tcb_s * tcb)
+{
+  struct tcb_s * owner;
+
+  /* save current fpu owner's context */
+
+  owner = g_cpu_fpu_ctx[this_cpu()].fpu_owner;
+
+  if (owner == tcb)
+    {
+      g_cpu_fpu_ctx[this_cpu()].fpu_owner = NULL;
+    }
+}
+
+/* enable FPU access trap */
+
+static void arm64_fpu_access_trap_enable(void)
+{
+  uint64_t cpacr;
+
+  cpacr = read_sysreg(cpacr_el1);
+  cpacr &= ~CPACR_EL1_FPEN_NOTRAP;
+  write_sysreg(cpacr, cpacr_el1);
+
+  __ISB();
+}
+
+/* disable FPU access trap */
+
+static void arm64_fpu_access_trap_disable(void)
+{
+  uint64_t cpacr;
+
+  cpacr = read_sysreg(cpacr_el1);
+
+  cpacr |= CPACR_EL1_FPEN_NOTRAP;
+
+  write_sysreg(cpacr, cpacr_el1);
+
+  __ISB();
+}
+
+/***************************************************************************
+ * Name: arm64_fpu_enter_exception
+ *
+ * Description:
+ *   called at every time get into a exception
+ *
+ ***************************************************************************/
+
+void arm64_fpu_enter_exception(void)
+{
+}
+
+void arm64_fpu_exit_exception(void)
+{
+}
+
+void arm64_fpu_trap(struct esf_reg * regs)
+{
+  struct tcb_s * owner;
+
+  /* disable fpu trap access */
+
+  arm64_fpu_access_trap_disable();
+
+  /* save current fpu owner's context */
+
+  owner = g_cpu_fpu_ctx[this_cpu()].fpu_owner;
+
+  if (owner != NULL)
+    {
+      arm64_fpu_save(owner->xcp.fpu_regs);
+      __DSB();
+      g_cpu_fpu_ctx[this_cpu()].save_count++;
+      g_cpu_fpu_ctx[this_cpu()].fpu_owner = NULL;
+    }
+
+  if (arch_get_exception_depth() > 1)
+    {
+      /* if get_exception_depth > 1
+       * it means FPU access exception occurred in exception context
+       * switch FPU owner to idle thread
+       */
+
+      owner = g_cpu_fpu_ctx[this_cpu()].idle_thread;
+    }
+  else
+    {
+      owner = (struct tcb_s *)arch_get_current_tcb();
+    }
+
+  /* restore our context */
+
+  arm64_fpu_restore(owner->xcp.fpu_regs);
+  g_cpu_fpu_ctx[this_cpu()].restore_count++;
+
+  /* become new owner */
+
+  g_cpu_fpu_ctx[this_cpu()].fpu_owner   = owner;
+  owner->xcp.fpu_regs->fpu_trap         = 1;
+}
+
+void arm64_fpu_context_restore(void)
+{
+  struct tcb_s *new_tcb = (struct tcb_s *)arch_get_current_tcb();
+
+  arm64_fpu_access_trap_enable();
+
+  if (new_tcb->xcp.fpu_regs->fpu_trap == 0)
+    {
+      /* FPU trap hasn't happened at this task */
+
+      arm64_fpu_access_trap_enable();
+    }
+  else
+    {
+      /* FPU trap has happened at this task */
+
+      if (new_tcb == g_cpu_fpu_ctx[this_cpu()].fpu_owner)
+        {
+          arm64_fpu_access_trap_disable();
+        }
+      else
+        {
+          arm64_fpu_access_trap_enable();
+        }
+    }
+
+  g_cpu_fpu_ctx[this_cpu()].switch_count++;
+}
+
+void arm64_fpu_enable(void)
+{
+  irqstate_t flags = up_irq_save();
+
+  arm64_fpu_access_trap_enable();
+  up_irq_restore(flags);
+}
+
+void arm64_fpu_disable(void)
+{
+  irqstate_t flags = up_irq_save();
+
+  arm64_fpu_access_trap_disable();
+  up_irq_restore(flags);
+}
+
+/***************************************************************************
+ * Name: up_fpucmp
+ *
+ * Description:
+ *   compare FPU areas from thread context
+ *
+ ***************************************************************************/
+
+bool up_fpucmp(const void *saveregs1, const void *saveregs2)
+{
+  const uint64_t  *regs1  = saveregs1 + XCPTCONTEXT_GP_SIZE;
+  const uint64_t  *regs2  = saveregs2 + XCPTCONTEXT_GP_SIZE;

Review Comment:
   ```suggestion
     const uint64_t *regs1  = saveregs1 + XCPTCONTEXT_GP_SIZE;
     const uint64_t *regs2  = saveregs2 + XCPTCONTEXT_GP_SIZE;
   ```



##########
arch/arm64/src/common/arm64_fatal.h:
##########
@@ -0,0 +1,77 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_fatal.h
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+#ifndef __ARCH_ARM64_SRC_COMMON_ARM64_FATAL_H
+#define __ARCH_ARM64_SRC_COMMON_ARM64_FATAL_H
+
+/**
+ * @defgroup fatal_apis Fatal error APIs
+ * @ingroup kernel_apis
+ * @{
+ */
+
+#define K_ERR_CPU_EXCEPTION            (0)
+#define K_ERR_CPU_MODE32               (1)
+#define K_ERR_SPURIOUS_IRQ             (2)
+
+#ifndef __ASSEMBLY__
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+#include <stdint.h>

Review Comment:
   `#include <stdbool.h>`



##########
arch/arm64/src/common/arm64_cache.c:
##########
@@ -0,0 +1,449 @@
+/****************************************************************************
+ * arch/arm64/src/common/arm64_cache.c
+ *
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.  The
+ * ASF licenses this file to you under the Apache License, Version 2.0 (the
+ * "License"); you may not use this file except in compliance with the
+ * License.  You may obtain a copy of the License at
+ *
+ *   http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+ * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+ * License for the specific language governing permissions and limitations
+ * under the License.
+ *
+ ****************************************************************************/
+
+/****************************************************************************
+ * Included Files
+ ****************************************************************************/
+
+#include <nuttx/config.h>
+#include <nuttx/cache.h>
+#include <nuttx/irq.h>
+
+#include <nuttx/arch.h>
+#include <arch/irq.h>
+#include <arch/chip/chip.h>
+#include <nuttx/spinlock.h>
+
+#include "arm64_arch.h"
+#include "arm64_internal.h"
+#include "arm64_mmu.h"
+
+/****************************************************************************
+ * Pre-processor Macros
+ ****************************************************************************/
+
+/* Common operations for the caches
+ *
+ * WB means write-back and intends to transfer dirty cache lines to memory in
+ * a copy-back cache policy. May be a no-op in write-back cache policy.
+ *
+ * INVD means invalidate and will mark cache lines as not valid. A future
+ * access to the associated address is guaranteed to generate a memory fetch.
+ *
+ * armv8 data cache instruction:
+ *
+ * DC CIVAC (WB+INVD):
+ *   Data or unified Cache line Clean and Invalidate by VA to PoC
+ *   Clean and Invalidate data cache by address to Point of Coherency.
+ *
+ * DC CVAC (WB):
+ *   Data or unified Cache line Clean by VA to PoC
+ *   Clean data cache by address to Point of Coherency.
+ *
+ * DC IVAC (INVD):
+ *   Data or unified Cache line Invalidate by VA to PoC
+ *   Invalidate data cache by address to Point of Coherency
+ */
+
+#define CACHE_OP_WB         BIT(0)
+#define CACHE_OP_INVD       BIT(1)
+#define CACHE_OP_WB_INVD    (CACHE_OP_WB | CACHE_OP_INVD)
+
+#define LINE_MASK(line)             ((line) - 1)
+#define LINE_ALIGN_DOWN(a, line)    ((a) & ~LINE_MASK(line))
+#define LINE_ALIGN_UP(a, line) \
+  (((a) + LINE_MASK(line)) & ~LINE_MASK(line))
+
+#define dc_ops(op, val)                                          \
+  ({                                                             \
+    __asm__ volatile ("dc " op ", %0" : : "r" (val) : "memory"); \
+  })
+
+/* IC IALLUIS, Instruction Cache Invalidate All to PoU, Inner Shareable
+ * Purpose
+ * Invalidate all instruction caches in the Inner Shareable domain of
+ * the PE executing the instruction to the Point of Unification.
+ */
+
+static inline void __ic_iallu(void)
+{
+  __asm__ volatile ("ic  iallu" : : : "memory");
+}
+
+/* IC IALLU, Instruction Cache Invalidate All to PoU
+ * Purpose
+ * Invalidate all instruction caches of the PE executing
+ * the instruction to the Point of Unification.
+ */
+
+static inline void __ic_ialluis(void)
+{
+  __asm__ volatile ("ic  ialluis" : : : "memory");
+}
+
+size_t dcache_line_size;
+
+/****************************************************************************
+ * Private Function Prototypes
+ ****************************************************************************/
+
+/* operation for data cache by virtual address to PoC */
+
+static inline int arm64_dcache_range(uintptr_t start_addr,
+                                    uintptr_t end_addr, int op)
+{
+  /* Align address to line size */
+
+  start_addr = LINE_ALIGN_DOWN(start_addr, dcache_line_size);
+
+  while (start_addr < end_addr)
+    {
+      switch (op)
+        {
+        case CACHE_OP_WB:
+        {
+          dc_ops("cvac", start_addr);
+          break;
+        }
+
+        case CACHE_OP_INVD:
+        {
+          dc_ops("ivac", start_addr);
+          break;
+        }
+
+        case CACHE_OP_WB_INVD:
+        {
+          dc_ops("civac", start_addr);
+          break;
+        }
+        }
+      start_addr += dcache_line_size;
+    }
+
+  __DSB();
+
+  return 0;
+}
+
+/* operation for all data cache */
+
+static inline int arm64_dcache_all(int op)
+{
+  uint32_t  clidr_el1;
+  uint32_t  csselr_el1;
+  uint32_t  ccsidr_el1;
+  uint8_t   loc;
+  uint8_t   ctype;
+  uint8_t   cache_level;
+  uint8_t   line_size;
+  uint8_t   way_pos;
+  uint32_t  max_ways;
+  uint32_t  max_sets;
+  uint32_t  dc_val;
+  uint32_t  set;
+  uint32_t  way;
+
+  /* Data barrier before start */
+
+  __DSB();
+
+  clidr_el1 = read_sysreg(clidr_el1);
+
+  loc = (clidr_el1 >> CLIDR_EL1_LOC_SHIFT) & CLIDR_EL1_LOC_MASK;
+  if (!loc)
+    {
+      return 0;
+    }
+
+  for (cache_level = 0; cache_level < loc; cache_level++)
+    {
+      ctype =
+        (clidr_el1 >>
+         CLIDR_EL1_CTYPE_SHIFT(cache_level)) & CLIDR_EL1_CTYPE_MASK;
+
+      /* No data cache, continue */
+
+      if (ctype < 2)
+        {
+          continue;
+        }
+
+      /* select cache level */
+
+      csselr_el1 = cache_level << 1;
+      write_sysreg(csselr_el1, csselr_el1);
+      __ISB();
+
+      ccsidr_el1    = read_sysreg(ccsidr_el1);
+      line_size     =
+        (ccsidr_el1 >> CCSIDR_EL1_LN_SZ_SHIFT & CCSIDR_EL1_LN_SZ_MASK) + 4;
+      max_ways =
+        (ccsidr_el1 >> CCSIDR_EL1_WAYS_SHIFT) & CCSIDR_EL1_WAYS_MASK;
+      max_sets =
+        (ccsidr_el1 >> CCSIDR_EL1_SETS_SHIFT) & CCSIDR_EL1_SETS_MASK;
+
+      /* 32-log2(ways), bit position of way in DC operand */
+
+      way_pos = __builtin_clz(max_ways);
+
+      for (set = 0; set <= max_sets; set++)
+        {
+          for (way = 0; way <= max_ways; way++)
+            {
+              /* way number, aligned to pos in DC operand */
+
+              dc_val = way << way_pos;
+
+              /* cache level, aligned to pos in DC operand */
+
+              dc_val |= csselr_el1;
+
+              /* set number, aligned to pos in DC operand */
+
+              dc_val |= set << line_size;
+              switch (op)
+                {
+                case CACHE_OP_WB:
+                {
+                  dc_ops("csw", dc_val);
+                  break;
+                }
+
+                case CACHE_OP_INVD:
+                {
+                  dc_ops("isw", dc_val);
+                  break;
+                }
+
+                case CACHE_OP_WB_INVD:
+                {
+                  dc_ops("cisw", dc_val);
+                  break;
+                }

Review Comment:
   Add two spaces and maybe `default` case



-- 
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

To unsubscribe, e-mail: commits-unsubscr...@nuttx.apache.org

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org

Reply via email to