From: Kuo-Jung Su <dant...@faraday-tech.com>

Here is the list of verified cores:

1. FA606TE (ARMv5TE, no mmu)
2. FA626TE (ARMv5TE)

Signed-off-by: Kuo-Jung Su <dant...@faraday-tech.com>
CC: Albert Aribaud <albert.u.b...@aribaud.net>
---
Changes for v8:
        - add arm_init_before_mmu() & mmu_page_table_flush()

Changes for v7:
        - Update license to use SPDX identifiers.

Changes for v6:
        - Nothing updates

Changes for v5:
        - Initial commit

 arch/arm/cpu/faraday/Makefile  |   10 +
 arch/arm/cpu/faraday/cache.c   |  190 +++++++++++++++++++
 arch/arm/cpu/faraday/config.mk |   15 ++
 arch/arm/cpu/faraday/cpu.c     |  176 +++++++++++++++++
 arch/arm/cpu/faraday/start.S   |  407 ++++++++++++++++++++++++++++++++++++++++
 5 files changed, 798 insertions(+)
 create mode 100644 arch/arm/cpu/faraday/Makefile
 create mode 100644 arch/arm/cpu/faraday/cache.c
 create mode 100644 arch/arm/cpu/faraday/config.mk
 create mode 100644 arch/arm/cpu/faraday/cpu.c
 create mode 100644 arch/arm/cpu/faraday/start.S

diff --git a/arch/arm/cpu/faraday/Makefile b/arch/arm/cpu/faraday/Makefile
new file mode 100644
index 0000000..c859238
--- /dev/null
+++ b/arch/arm/cpu/faraday/Makefile
@@ -0,0 +1,10 @@
+#
+# (C) Copyright 2000-2003
+# Wolfgang Denk, DENX Software Engineering, w...@denx.de.
+#
+# SPDX-License-Identifier:     GPL-2.0+
+#
+
+extra-y := start.o
+
+obj-y   += cpu.o cache.o
diff --git a/arch/arm/cpu/faraday/cache.c b/arch/arm/cpu/faraday/cache.c
new file mode 100644
index 0000000..fe74732
--- /dev/null
+++ b/arch/arm/cpu/faraday/cache.c
@@ -0,0 +1,190 @@
+/*
+ * (C) Copyright 2013
+ * Faraday Technology Corporation. <http://www.faraday-tech.com/tw/>
+ * Kuo-Jung Su <dant...@gmail.com>
+ *
+ * SPDX-License-Identifier:     GPL-2.0+
+ */
+
+#include <common.h>
+#include <command.h>
+#include <asm/io.h>
+#include <asm/system.h>
+
+/*
+ * I-Cache
+ */
+
+#ifdef CONFIG_SYS_ICACHE_OFF
+
+void invalidate_icache_all(void)
+{
+}
+
+#else  /* #ifdef CONFIG_SYS_ICACHE_OFF */
+
+void invalidate_icache_all(void)
+{
+       __asm__ __volatile__ (
+               "mov r0, #0\n"
+               "mcr p15, 0, r0, c7, c5, 0\n"
+               : /* output */
+               : /* input */
+               : "r0" /* clobber list */
+       );
+}
+
+#endif /* #ifdef CONFIG_SYS_ICACHE_OFF */
+
+/*
+ * D-Cache
+ */
+
+#ifdef CONFIG_SYS_DCACHE_OFF
+
+void flush_dcache_all(void)
+{
+}
+
+void flush_dcache_range(unsigned long start, unsigned long stop)
+{
+}
+
+void invalidate_dcache_range(unsigned long start, unsigned long stop)
+{
+}
+
+void invalidate_dcache_all(void)
+{
+}
+
+void flush_cache(unsigned long start, unsigned long size)
+{
+}
+
+void arm_init_before_mmu(void)
+{
+}
+
+void mmu_page_table_flush(unsigned long start, unsigned long stop)
+{
+}
+
+#else  /* #ifdef CONFIG_SYS_DCACHE_OFF */
+
+void flush_dcache_all(void)
+{
+       __asm__ __volatile__ (
+               "mov r0,#0\n"
+               "mcr p15,0,r0,c7,c14,0\n" /* clean & invalidate d-cache all */
+               "mcr p15,0,r0,c7,c10,4\n" /* drain write buffer */
+               : /* output */
+               : /* input */
+               : "r0" /* clobber list */
+       );
+}
+
+void flush_dcache_range(unsigned long start, unsigned long stop)
+{
+       unsigned long align = CONFIG_SYS_CACHELINE_SIZE;
+       unsigned long mask  = ~(align - 1);
+
+       /* aligned to cache line */
+       stop  = (stop + (align - 1)) & mask;
+       start = start & mask;
+
+       __asm__ __volatile__ (
+               "1:\n"
+               "mcr p15,0,%0,c7,c14,1\n" /* clean & invalidate d-cache line */
+               "add %0,%0,%2\n"
+               "cmp %0,%1\n"
+               "blo 1b\n"
+               "mov r0,#0\n"
+               "mcr p15,0,r0,c7,c10,4\n" /* drain write buffer */
+               : "+r"(start) /* output */
+               : "r"(stop), "r"(align) /* input */
+               : "r0" /* clobber list */
+       );
+}
+
+void invalidate_dcache_range(unsigned long start, unsigned long stop)
+{
+       unsigned long align = CONFIG_SYS_CACHELINE_SIZE;
+       unsigned long mask  = ~(align - 1);
+
+       /* aligned to cache line */
+       stop  = (stop + (align - 1)) & mask;
+       start = start & mask;
+
+       __asm__ __volatile__ (
+               "1:\n"
+               "mcr p15,0,%0,c7,c6,1\n" /* invalidate cache line */
+               "add %0,%0,%2\n"
+               "cmp %0,%1\n"
+               "blo 1b\n"
+               : "+r"(start) /* output */
+               : "r"(stop), "r"(align) /* input */
+               : "r0" /* clobber list */
+       );
+}
+
+void invalidate_dcache_all(void)
+{
+       __asm__ __volatile__ (
+               "mov r0,#0\n"
+               "mcr p15,0,r0,c7,c6,0\n" /* invalidate d-cache all */
+               : /* output */
+               : /* input */
+               : "r0"/* clobber list */
+       );
+}
+
+void flush_cache(unsigned long start, unsigned long size)
+{
+       flush_dcache_range(start, start + size);
+}
+
+static void invalidate_utlb_all(void)
+{
+       /* invalidate U-TLB all */
+       __asm__ __volatile__ (
+               "mov r0,#0\n"
+               "mcr p15, 0, r0, c8, c7, 0\n"
+               : /* output */
+               : /* input */
+               : "r0" /* clobber list */
+       );
+}
+
+static void invalidate_utlb_range(unsigned long start, unsigned long stop)
+{
+       /* make it page aligned */
+       start &= 0xfffff000;
+       stop = (stop + 4095) & 0xfffff000;
+
+       /* invalidate U-TLB entry */
+       __asm__ __volatile__ (
+               "1:\n"
+               "mcr p15, 0, %0, c8, c7, 1\n"
+               "add %0, %0, #4096\n"
+               "cmp %0, %1\n"
+               "blo 1b\n"
+               : "+r"(start)
+               : "r"(stop)
+               : "r0"
+       );
+}
+
+void arm_init_before_mmu(void)
+{
+       invalidate_dcache_all();
+       invalidate_utlb_all();
+}
+
+void mmu_page_table_flush(unsigned long start, unsigned long stop)
+{
+       flush_dcache_range(start, stop);
+       invalidate_utlb_range(start, stop);
+}
+
+#endif    /* !defined(CONFIG_SYS_DCACHE_OFF) */
diff --git a/arch/arm/cpu/faraday/config.mk b/arch/arm/cpu/faraday/config.mk
new file mode 100644
index 0000000..f39e70b
--- /dev/null
+++ b/arch/arm/cpu/faraday/config.mk
@@ -0,0 +1,15 @@
+#
+# (C) Copyright 2000-2003
+# Wolfgang Denk, DENX Software Engineering, w...@denx.de.
+#
+# SPDX-License-Identifier:     GPL-2.0+
+#
+
+PLATFORM_CPPFLAGS += -march=armv5te
+# =========================================================================
+#
+# Supply options according to compiler version
+#
+# =========================================================================
+PF_RELFLAGS_SLB_AT := $(call cc-option,-mshort-load-bytes,$(call 
cc-option,-malignment-traps,))
+PLATFORM_RELFLAGS += $(PF_RELFLAGS_SLB_AT)
diff --git a/arch/arm/cpu/faraday/cpu.c b/arch/arm/cpu/faraday/cpu.c
new file mode 100644
index 0000000..d3c8a9e
--- /dev/null
+++ b/arch/arm/cpu/faraday/cpu.c
@@ -0,0 +1,176 @@
+/*
+ * (C) Copyright 2013
+ * Faraday Technology Corporation. <http://www.faraday-tech.com/tw/>
+ * Kuo-Jung Su <dant...@gmail.com>
+ *
+ * SPDX-License-Identifier:     GPL-2.0+
+ */
+
+#include <common.h>
+#include <malloc.h>
+#include <command.h>
+#include <asm/io.h>
+#include <asm/system.h>
+#include <faraday/ftwdt010_wdt.h>
+
+DECLARE_GLOBAL_DATA_PTR;
+
+/* Vendor ID */
+#define ARM_CPU_VID(x)      (((x) >> 24) & 0xff)
+#define ARM_CPU_ARM         0x41
+#define ARM_CPU_FARADAY     0x66
+
+/* Product ID */
+#define ARM_CPU_PID(x)      (((x) >>  4) & 0x0fff)
+
+/* Instruction Set Architecture */
+#define ARM_CPU_ISA(x)      (((x) >> 16) & 0xff)
+#define ARM_CPU_ARMV4       0x01
+#define ARM_CPU_ARMV5TE     0x05
+
+/* ARM Core ID (no rev.) */
+#define ARM_CPU_CID(x)      (((x) >>  4) & 0x0fffffff)
+#define ARM_CPU_FA526       0x6601526 /* ARMv4 */
+#define ARM_CPU_FA626       0x6601626 /* ARMv4 */
+#define ARM_CPU_FA606TE     0x6605606 /* ARMv5TE, no MMU */
+#define ARM_CPU_FA616TE     0x6605616 /* ARMv5TE */
+#define ARM_CPU_FA626TE     0x6605626 /* ARMv5TE */
+#define ARM_CPU_FA726TE     0x6605726 /* ARMv5TE */
+
+#ifdef CONFIG_ARCH_CPU_INIT
+int arch_cpu_init(void)
+{
+       unsigned int id;
+
+       __asm__ __volatile__ (
+               "mrc p15, 0, %0, c0, c0, 0\n"
+               : "=r"(id) /* output */
+               : /* input */
+               : "r0" /* clobber list */
+       );
+
+       switch (ARM_CPU_CID(id)) {
+       case ARM_CPU_FA606TE: /* FA606TE */
+               gd->arch.has_mmu = 0;
+               break;
+       default:
+               gd->arch.has_mmu = 1;
+               break;
+       }
+
+       gd->arch.cpu_id = id;
+
+       return 0;
+}
+#endif    /* #ifdef CONFIG_ARCH_CPU_INIT */
+
+#ifdef CONFIG_DISPLAY_CPUINFO
+int print_cpuinfo(void)
+{
+       char cpu_name[32];
+       uint vid = ARM_CPU_VID(gd->arch.cpu_id);
+       uint pid = ARM_CPU_PID(gd->arch.cpu_id);
+
+       switch (vid) {
+       case ARM_CPU_FARADAY: /* Faraday */
+               switch (ARM_CPU_ISA(gd->arch.cpu_id)) {
+               case ARM_CPU_ARMV5TE:
+                       sprintf(cpu_name, "FA%xTE", pid);
+                       break;
+               default:
+                       sprintf(cpu_name, "FA%x", pid);
+                       break;
+               }
+               break;
+       case ARM_CPU_ARM: /* ARM */
+               if ((pid & 0xff0) == 0xc00)
+                       sprintf(cpu_name, "Cortex-A%u", (pid & 0x00f));
+               else if (pid >= 0xa00)
+                       sprintf(cpu_name, "ARM%x", 0x1000 + (pid - 0xa00));
+               else
+                       sprintf(cpu_name, "ARM%x", pid);
+               break;
+       default:
+               sprintf(cpu_name, "Unknown");
+               break;
+       }
+
+       printf("CPU:   %s\n", cpu_name);
+
+       return 0;
+}
+#endif /* CONFIG_DISPLAY_CPUINFO */
+
+int cleanup_before_linux(void)
+{
+       /*
+        * this function is called just before we call linux
+        * it prepares the processor for linux
+        *
+        * we turn off caches etc ...
+        */
+#ifndef CONFIG_SPL_BUILD
+       disable_interrupts();
+#endif
+
+       /*
+        * Turn off I-cache and invalidate it
+        */
+       icache_disable();
+       invalidate_icache_all();
+
+       /*
+        * Turn off D-cache
+        * dcache_disable() in turn flushes the d-cache and disables MMU
+        */
+       dcache_disable();
+
+       /*
+        * After D-cache is flushed and before it is disabled there may
+        * be some new valid entries brought into the cache. We are sure
+        * that these lines are not dirty and will not affect our execution.
+        * So just invalidate the entire d-cache again to avoid coherency
+        * problems for kernel
+        */
+       invalidate_dcache_all();
+
+       return 0;
+}
+
+void reset_cpu(unsigned long ignored)
+{
+#ifdef CONFIG_FTWDT010_BASE
+       struct ftwdt010_wdt *regs = (void __iomem *)CONFIG_FTWDT010_BASE;
+
+       /* Disable WDT */
+       writel(0, &regs->wdcr);
+       /* Timeout in 1000 ticks */
+       writel(1000, &regs->wdload);
+       /* Enable WDT with system reset */
+       writel(FTWDT010_WDCR_ENABLE | FTWDT010_WDCR_RST, &regs->wdcr);
+#endif
+}
+
+/*
+ * This arch_preboot_os() overrides the weak function
+ * in "cmd_bootm.c".
+ */
+void arch_preboot_os(void)
+{
+       cleanup_before_linux();
+}
+
+/*
+ * This enable_caches() overrides the weak function
+ * in "arch/arm/lib/cache.c".
+ */
+void enable_caches(void)
+{
+       icache_enable();
+       dcache_enable();
+
+       if (mmu_enabled())
+               puts("MMU:   on\n");
+       else
+               puts("MMU:   off\n");
+}
diff --git a/arch/arm/cpu/faraday/start.S b/arch/arm/cpu/faraday/start.S
new file mode 100644
index 0000000..32ce92e
--- /dev/null
+++ b/arch/arm/cpu/faraday/start.S
@@ -0,0 +1,407 @@
+/*
+ * u-boot - Startup Code for Faraday CPU-core
+ *
+ * Base is arch/arm/cpu/arm926ejs/start.S
+ *
+ * (C) Copyright 2013 Faraday Technology
+ * Kuo-Jung Su <dant...@gmail.com>
+ *
+ * SPDX-License-Identifier:    GPL-2.0+
+ */
+
+#include <asm-offsets.h>
+#include <config.h>
+#include <common.h>
+#include <version.h>
+
+/*
+ *************************************************************************
+ *
+ * Jump vector table as in table 3.1 in [1]
+ *
+ *************************************************************************
+ */
+
+
+#ifdef CONFIG_SYS_DV_NOR_BOOT_CFG
+.globl _start
+_start:
+.globl _NOR_BOOT_CFG
+_NOR_BOOT_CFG:
+       .word   CONFIG_SYS_DV_NOR_BOOT_CFG
+       b       reset
+#else
+.globl _start
+_start:
+       b       reset
+#endif
+#ifdef CONFIG_SPL_BUILD
+/* No exception handlers in preloader */
+       ldr     pc, _hang
+       ldr     pc, _hang
+       ldr     pc, _hang
+       ldr     pc, _hang
+       ldr     pc, _hang
+       ldr     pc, _hang
+       ldr     pc, _hang
+
+_hang:
+       .word   do_hang
+/* pad to 64 byte boundary */
+       .word   0x12345678
+       .word   0x12345678
+       .word   0x12345678
+       .word   0x12345678
+       .word   0x12345678
+       .word   0x12345678
+       .word   0x12345678
+#else
+       ldr     pc, _undefined_instruction
+       ldr     pc, _software_interrupt
+       ldr     pc, _prefetch_abort
+       ldr     pc, _data_abort
+       ldr     pc, _not_used
+       ldr     pc, _irq
+       ldr     pc, _fiq
+
+_undefined_instruction:
+       .word undefined_instruction
+_software_interrupt:
+       .word software_interrupt
+_prefetch_abort:
+       .word prefetch_abort
+_data_abort:
+       .word data_abort
+_not_used:
+       .word not_used
+_irq:
+       .word irq
+_fiq:
+       .word fiq
+
+#endif /* CONFIG_SPL_BUILD */
+       .balignl 16,0xdeadbeef
+
+
+/*
+ *************************************************************************
+ *
+ * Startup Code (reset vector)
+ *
+ * do important init only if we don't start from memory!
+ * setup Memory and board specific bits prior to relocation.
+ * relocate armboot to ram
+ * setup stack
+ *
+ *************************************************************************
+ */
+
+.globl _TEXT_BASE
+_TEXT_BASE:
+#if defined(CONFIG_SPL_BUILD) && defined(CONFIG_SPL_TEXT_BASE)
+       .word   CONFIG_SPL_TEXT_BASE
+#else
+       .word   CONFIG_SYS_TEXT_BASE
+#endif
+
+/*
+ * These are defined in the board-specific linker script.
+ * Subtracting _start from them lets the linker put their
+ * relative position in the executable instead of leaving
+ * them null.
+ */
+.globl _bss_start_ofs
+_bss_start_ofs:
+       .word __bss_start - _start
+
+.globl _bss_end_ofs
+_bss_end_ofs:
+       .word __bss_end - _start
+
+.globl _end_ofs
+_end_ofs:
+       .word _end - _start
+
+#ifdef CONFIG_USE_IRQ
+/* IRQ stack memory (calculated at run-time) */
+.globl IRQ_STACK_START
+IRQ_STACK_START:
+       .word   0x0badc0de
+
+/* IRQ stack memory (calculated at run-time) */
+.globl FIQ_STACK_START
+FIQ_STACK_START:
+       .word 0x0badc0de
+#endif
+
+/* IRQ stack memory (calculated at run-time) + 8 bytes */
+.globl IRQ_STACK_START_IN
+IRQ_STACK_START_IN:
+       .word   0x0badc0de
+
+/*
+ * the actual reset code
+ */
+
+reset:
+       /*
+        * set the cpu to SVC32 mode
+        */
+       mrs     r0,cpsr
+       bic     r0,r0,#0x1f
+       orr     r0,r0,#0xd3
+       msr     cpsr,r0
+
+       /*
+        * we do sys-critical inits only at reboot,
+        * not when booting from ram!
+        */
+#ifndef CONFIG_SKIP_LOWLEVEL_INIT
+       bl      cpu_init_crit
+#endif
+
+       /*
+        * With the following bootstrap relocation, we could flawless
+        * boot from either ROM or NOR flash.
+        */
+       adr r0, _start      /* r0 <- current position of code   */
+       ldr r1, _TEXT_BASE  /* test if we run from flash or RAM */
+       teq r0, r1          /* don't reloc during debug         */
+       bleq _main
+       ldr r2, _end_ofs    /* r2 <- size of u-boot             */
+       add r2, r0, r2      /* r2 <- source end address         */
+
+reloc_loop:
+       ldmia r0!, {r3-r10} /* copy from source address [r0]    */
+       stmia r1!, {r3-r10} /* copy to   target address [r1]    */
+       cmp r0, r2          /* until source end addreee [r2]    */
+       blo reloc_loop
+
+       ldr pc, =_main
+
+/*----------------------------------------------------------------------*/
+
+       .globl  c_runtime_cpu_setup
+c_runtime_cpu_setup:
+       mov pc, lr
+
+/*
+ *************************************************************************
+ *
+ * CPU_init_critical registers
+ *
+ * setup important registers
+ * setup memory timing
+ *
+ *************************************************************************
+ */
+#ifndef CONFIG_SKIP_LOWLEVEL_INIT
+cpu_init_crit:
+       /*
+        * flush D cache before disabling it
+        */
+       mov     r0, #0
+       mcr     p15, 0, r0, c7, c14, 0  /* clean & invalidate D cache */
+       mcr     p15, 0, r0, c8, c7, 0   /* invalidate TLB */
+       mcr     p15, 0, r0, c7, c5, 0   /* invalidate I Cache */
+       mcr     p15, 0, r0, c7, c10, 4  /* drain write buffer */
+
+       /*
+        * disable MMU and D cache
+        * enable I cache if CONFIG_SYS_ICACHE_OFF is not defined
+        */
+       mrc     p15, 0, r0, c1, c0, 0
+       bic     r0, r0, #0x00000300     /* clear bits 9:8 (---- --RS) */
+       bic     r0, r0, #0x00000087     /* clear bits 7, 2:0 (B--- -CAM) */
+#ifdef CONFIG_SYS_EXCEPTION_VECTORS_HIGH
+       orr     r0, r0, #0x00002000     /* set bit 13 (--V- ----) */
+#else
+       bic     r0, r0, #0x00002000     /* clear bit 13 (--V- ----) */
+#endif
+       orr     r0, r0, #0x00000002     /* set bit 2 (A) Align */
+#ifndef CONFIG_SYS_ICACHE_OFF
+       orr     r0, r0, #0x00001000     /* set bit 12 (I) I-Cache */
+#endif
+       mcr     p15, 0, r0, c1, c0, 0
+
+       /*
+        * Go setup Memory and board specific bits prior to relocation.
+        */
+       mov     ip, lr          /* perserve link reg across call */
+       bl      lowlevel_init   /* go setup pll,mux,memory */
+       mov     lr, ip          /* restore link */
+       mov     pc, lr          /* back to my caller */
+#endif /* CONFIG_SKIP_LOWLEVEL_INIT */
+
+#ifndef CONFIG_SPL_BUILD
+/*
+ *************************************************************************
+ *
+ * Interrupt handling
+ *
+ *************************************************************************
+ */
+
+@
+@ IRQ stack frame.
+@
+#define S_FRAME_SIZE   72
+
+#define S_OLD_R0       68
+#define S_PSR          64
+#define S_PC           60
+#define S_LR           56
+#define S_SP           52
+
+#define S_IP           48
+#define S_FP           44
+#define S_R10          40
+#define S_R9           36
+#define S_R8           32
+#define S_R7           28
+#define S_R6           24
+#define S_R5           20
+#define S_R4           16
+#define S_R3           12
+#define S_R2           8
+#define S_R1           4
+#define S_R0           0
+
+#define MODE_SVC 0x13
+#define I_BIT   0x80
+
+/*
+ * use bad_save_user_regs for abort/prefetch/undef/swi ...
+ * use irq_save_user_regs / irq_restore_user_regs for IRQ/FIQ handling
+ */
+
+       .macro  bad_save_user_regs
+       @ carve out a frame on current user stack
+       sub     sp, sp, #S_FRAME_SIZE
+       stmia   sp, {r0 - r12}  @ Save user registers (now in svc mode) r0-r12
+       ldr     r2, IRQ_STACK_START_IN
+       @ get values for "aborted" pc and cpsr (into parm regs)
+       ldmia   r2, {r2 - r3}
+       add     r0, sp, #S_FRAME_SIZE           @ grab pointer to old stack
+       add     r5, sp, #S_SP
+       mov     r1, lr
+       stmia   r5, {r0 - r3}   @ save sp_SVC, lr_SVC, pc, cpsr
+       mov     r0, sp          @ save current stack into r0 (param register)
+       .endm
+
+       .macro  irq_save_user_regs
+       sub     sp, sp, #S_FRAME_SIZE
+       stmia   sp, {r0 - r12}                  @ Calling r0-r12
+       @ !!!! R8 NEEDS to be saved !!!! a reserved stack spot would be good.
+       add     r8, sp, #S_PC
+       stmdb   r8, {sp, lr}^           @ Calling SP, LR
+       str     lr, [r8, #0]            @ Save calling PC
+       mrs     r6, spsr
+       str     r6, [r8, #4]            @ Save CPSR
+       str     r0, [r8, #8]            @ Save OLD_R0
+       mov     r0, sp
+       .endm
+
+       .macro  irq_restore_user_regs
+       ldmia   sp, {r0 - lr}^                  @ Calling r0 - lr
+       mov     r0, r0
+       ldr     lr, [sp, #S_PC]                 @ Get PC
+       add     sp, sp, #S_FRAME_SIZE
+       subs    pc, lr, #4              @ return & move spsr_svc into cpsr
+       .endm
+
+       .macro get_bad_stack
+       ldr     r13, IRQ_STACK_START_IN         @ setup our mode stack
+
+       str     lr, [r13]       @ save caller lr in position 0 of saved stack
+       mrs     lr, spsr        @ get the spsr
+       str     lr, [r13, #4]   @ save spsr in position 1 of saved stack
+       mov     r13, #MODE_SVC  @ prepare SVC-Mode
+       @ msr   spsr_c, r13
+       msr     spsr, r13       @ switch modes, make sure moves will execute
+       mov     lr, pc          @ capture return pc
+       movs    pc, lr          @ jump to next instruction & switch modes.
+       .endm
+
+       .macro get_irq_stack                    @ setup IRQ stack
+       ldr     sp, IRQ_STACK_START
+       .endm
+
+       .macro get_fiq_stack                    @ setup FIQ stack
+       ldr     sp, FIQ_STACK_START
+       .endm
+#endif /* CONFIG_SPL_BUILD */
+
+/*
+ * exception handlers
+ */
+#ifdef CONFIG_SPL_BUILD
+       .align  5
+do_hang:
+       ldr     sp, _TEXT_BASE                  /* switch to abort stack */
+1:
+       bl      1b                              /* hang and never return */
+#else  /* !CONFIG_SPL_BUILD */
+       .align  5
+undefined_instruction:
+       get_bad_stack
+       bad_save_user_regs
+       bl      do_undefined_instruction
+
+       .align  5
+software_interrupt:
+       get_bad_stack
+       bad_save_user_regs
+       bl      do_software_interrupt
+
+       .align  5
+prefetch_abort:
+       get_bad_stack
+       bad_save_user_regs
+       bl      do_prefetch_abort
+
+       .align  5
+data_abort:
+       get_bad_stack
+       bad_save_user_regs
+       bl      do_data_abort
+
+       .align  5
+not_used:
+       get_bad_stack
+       bad_save_user_regs
+       bl      do_not_used
+
+#ifdef CONFIG_USE_IRQ
+
+       .align  5
+irq:
+       get_irq_stack
+       irq_save_user_regs
+       bl      do_irq
+       irq_restore_user_regs
+
+       .align  5
+fiq:
+       get_fiq_stack
+       /* someone ought to write a more effiction fiq_save_user_regs */
+       irq_save_user_regs
+       bl      do_fiq
+       irq_restore_user_regs
+
+#else
+
+       .align  5
+irq:
+       get_bad_stack
+       bad_save_user_regs
+       bl      do_irq
+
+       .align  5
+fiq:
+       get_bad_stack
+       bad_save_user_regs
+       bl      do_fiq
+
+#endif
+#endif /* CONFIG_SPL_BUILD */
--
1.7.9.5

_______________________________________________
U-Boot mailing list
U-Boot@lists.denx.de
http://lists.denx.de/mailman/listinfo/u-boot

Reply via email to