Author: imp Date: Fri Jul 27 18:33:09 2018 New Revision: 336773 URL: https://svnweb.freebsd.org/changeset/base/336773
Log: Remove xscale support The OLD XSCALE stuff hasn't been useful in a while. The original committer (cognet@) was the only one that had boards for it. He's blessed this removal. Newer XSCALE (GUMSTIX) is for hardware that's quite old. After discussion on arm@, it was clear there was no support for keeping it. Differential Review: https://reviews.freebsd.org/D16313 Deleted: head/lib/libc/arm/string/memcpy_xscale.S head/lib/libpmc/pmc.xscale.3 head/share/man/man4/man4.arm/npe.4 head/sys/arm/arm/bcopyinout_xscale.S head/sys/arm/arm/cpufunc_asm_xscale.S head/sys/arm/arm/cpufunc_asm_xscale_c3.S head/sys/arm/conf/CRB head/sys/arm/conf/GUMSTIX head/sys/arm/conf/GUMSTIX-QEMU head/sys/arm/conf/GUMSTIX.hints head/sys/arm/xscale/i8134x/crb_machdep.c head/sys/arm/xscale/i8134x/files.crb head/sys/arm/xscale/i8134x/files.i81342 head/sys/arm/xscale/i8134x/i80321_timer.c head/sys/arm/xscale/i8134x/i80321_wdog.c head/sys/arm/xscale/i8134x/i80321reg.h head/sys/arm/xscale/i8134x/i80321var.h head/sys/arm/xscale/i8134x/i81342.c head/sys/arm/xscale/i8134x/i81342_mcu.c head/sys/arm/xscale/i8134x/i81342_pci.c head/sys/arm/xscale/i8134x/i81342_space.c head/sys/arm/xscale/i8134x/i81342reg.h head/sys/arm/xscale/i8134x/i81342var.h head/sys/arm/xscale/i8134x/iq81342_7seg.c head/sys/arm/xscale/i8134x/iq81342reg.h head/sys/arm/xscale/i8134x/iq81342var.h head/sys/arm/xscale/i8134x/obio.c head/sys/arm/xscale/i8134x/obiovar.h head/sys/arm/xscale/i8134x/std.crb head/sys/arm/xscale/i8134x/std.i81342 head/sys/arm/xscale/i8134x/uart_bus_i81342.c head/sys/arm/xscale/i8134x/uart_cpu_i81342.c head/sys/arm/xscale/pxa/files.pxa head/sys/arm/xscale/pxa/if_smc_smi.c head/sys/arm/xscale/pxa/pxa_gpio.c head/sys/arm/xscale/pxa/pxa_icu.c head/sys/arm/xscale/pxa/pxa_machdep.c head/sys/arm/xscale/pxa/pxa_obio.c head/sys/arm/xscale/pxa/pxa_smi.c head/sys/arm/xscale/pxa/pxa_space.c head/sys/arm/xscale/pxa/pxa_timer.c head/sys/arm/xscale/pxa/pxareg.h head/sys/arm/xscale/pxa/pxavar.h head/sys/arm/xscale/pxa/std.pxa head/sys/arm/xscale/pxa/uart_bus_pxa.c head/sys/arm/xscale/pxa/uart_cpu_pxa.c head/sys/arm/xscale/std.xscale head/sys/dev/hwpmc/hwpmc_xscale.h Modified: head/lib/libc/arm/string/memcpy.S head/lib/libpmc/Makefile head/lib/libpmc/libpmc.c head/lib/libpmc/pmc.3 head/share/man/man4/man4.arm/Makefile head/share/mk/bsd.cpu.mk head/sys/arm/arm/bcopyinout.S head/sys/arm/arm/cpufunc.c head/sys/arm/arm/dump_machdep.c head/sys/arm/arm/elf_trampoline.c head/sys/arm/arm/exception.S head/sys/arm/arm/identcpu-v4.c head/sys/arm/arm/pmap-v4.c head/sys/arm/arm/trap-v4.c head/sys/arm/arm/vm_machdep.c head/sys/arm/conf/NOTES head/sys/arm/include/armreg.h head/sys/arm/include/cpu-v4.h head/sys/arm/include/cpufunc.h head/sys/arm/include/intr.h head/sys/arm/include/md_var.h head/sys/arm/include/pmap-v4.h head/sys/arm/include/pmc_mdep.h head/sys/arm/include/pte-v4.h head/sys/conf/Makefile.arm head/sys/conf/files.arm head/sys/conf/options.arm head/sys/dev/hwpmc/pmc_events.h head/sys/sys/pmc.h Modified: head/lib/libc/arm/string/memcpy.S ============================================================================== --- head/lib/libc/arm/string/memcpy.S Fri Jul 27 18:31:30 2018 (r336772) +++ head/lib/libc/arm/string/memcpy.S Fri Jul 27 18:33:09 2018 (r336773) @@ -2,8 +2,4 @@ #include <machine/asm.h> __FBSDID("$FreeBSD$"); -#if !defined(_ARM_ARCH_5E) || defined(_STANDALONE) #include "memcpy_arm.S" -#else -#include "memcpy_xscale.S" -#endif Modified: head/lib/libpmc/Makefile ============================================================================== --- head/lib/libpmc/Makefile Fri Jul 27 18:31:30 2018 (r336772) +++ head/lib/libpmc/Makefile Fri Jul 27 18:33:09 2018 (r336773) @@ -75,7 +75,6 @@ MAN+= pmc.tsc.3 MAN+= pmc.ucf.3 MAN+= pmc.westmere.3 MAN+= pmc.westmereuc.3 -MAN+= pmc.xscale.3 MLINKS+= \ pmc_allocate.3 pmc_release.3 \ Modified: head/lib/libpmc/libpmc.c ============================================================================== --- head/lib/libpmc/libpmc.c Fri Jul 27 18:31:30 2018 (r336772) +++ head/lib/libpmc/libpmc.c Fri Jul 27 18:33:09 2018 (r336773) @@ -59,10 +59,6 @@ static int tsc_allocate_pmc(enum pmc_event _pe, char * struct pmc_op_pmcallocate *_pmc_config); #endif #if defined(__arm__) -#if defined(__XSCALE__) -static int xscale_allocate_pmc(enum pmc_event _pe, char *_ctrspec, - struct pmc_op_pmcallocate *_pmc_config); -#endif static int armv7_allocate_pmc(enum pmc_event _pe, char *_ctrspec, struct pmc_op_pmcallocate *_pmc_config); #endif @@ -140,7 +136,6 @@ struct pmc_class_descr { PMC_CLASSDEP_TABLE(iaf, IAF); PMC_CLASSDEP_TABLE(k8, K8); -PMC_CLASSDEP_TABLE(xscale, XSCALE); PMC_CLASSDEP_TABLE(armv7, ARMV7); PMC_CLASSDEP_TABLE(armv8, ARMV8); PMC_CLASSDEP_TABLE(mips24k, MIPS24K); @@ -186,7 +181,6 @@ static const struct pmc_event_descr cortex_a57_event_t } PMC_MDEP_TABLE(k8, K8, PMC_CLASS_SOFT, PMC_CLASS_TSC); -PMC_MDEP_TABLE(xscale, XSCALE, PMC_CLASS_SOFT, PMC_CLASS_XSCALE); PMC_MDEP_TABLE(cortex_a8, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); PMC_MDEP_TABLE(cortex_a9, ARMV7, PMC_CLASS_SOFT, PMC_CLASS_ARMV7); PMC_MDEP_TABLE(cortex_a53, ARMV8, PMC_CLASS_SOFT, PMC_CLASS_ARMV8); @@ -224,9 +218,6 @@ PMC_CLASS_TABLE_DESC(k8, K8, k8, k8); PMC_CLASS_TABLE_DESC(tsc, TSC, tsc, tsc); #endif #if defined(__arm__) -#if defined(__XSCALE__) -PMC_CLASS_TABLE_DESC(xscale, XSCALE, xscale, xscale); -#endif PMC_CLASS_TABLE_DESC(cortex_a8, ARMV7, cortex_a8, armv7); PMC_CLASS_TABLE_DESC(cortex_a9, ARMV7, cortex_a9, armv7); #endif @@ -757,29 +748,7 @@ soft_allocate_pmc(enum pmc_event pe, char *ctrspec, } #if defined(__arm__) -#if defined(__XSCALE__) -static struct pmc_event_alias xscale_aliases[] = { - EV_ALIAS("branches", "BRANCH_RETIRED"), - EV_ALIAS("branch-mispredicts", "BRANCH_MISPRED"), - EV_ALIAS("dc-misses", "DC_MISS"), - EV_ALIAS("ic-misses", "IC_MISS"), - EV_ALIAS("instructions", "INSTR_RETIRED"), - EV_ALIAS(NULL, NULL) -}; -static int -xscale_allocate_pmc(enum pmc_event pe, char *ctrspec __unused, - struct pmc_op_pmcallocate *pmc_config __unused) -{ - switch (pe) { - default: - break; - } - - return (0); -} -#endif - static struct pmc_event_alias cortex_a8_aliases[] = { EV_ALIAS("dc-misses", "L1_DCACHE_REFILL"), EV_ALIAS("ic-misses", "L1_ICACHE_REFILL"), @@ -1237,10 +1206,6 @@ pmc_event_names_of_class(enum pmc_class cl, const char ev = k8_event_table; count = PMC_EVENT_TABLE_SIZE(k8); break; - case PMC_CLASS_XSCALE: - ev = xscale_event_table; - count = PMC_EVENT_TABLE_SIZE(xscale); - break; case PMC_CLASS_ARMV7: switch (cpu_info.pm_cputype) { default: @@ -1482,12 +1447,6 @@ pmc_init(void) PMC_MDEP_INIT(generic); break; #if defined(__arm__) -#if defined(__XSCALE__) - case PMC_CPU_INTEL_XSCALE: - PMC_MDEP_INIT(xscale); - pmc_class_table[n] = &xscale_class_table_descr; - break; -#endif case PMC_CPU_ARMV7_CORTEX_A8: PMC_MDEP_INIT(cortex_a8); pmc_class_table[n] = &cortex_a8_class_table_descr; @@ -1616,9 +1575,6 @@ _pmc_name_of_event(enum pmc_event pe, enum pmc_cputype if (pe >= PMC_EV_K8_FIRST && pe <= PMC_EV_K8_LAST) { ev = k8_event_table; evfence = k8_event_table + PMC_EVENT_TABLE_SIZE(k8); - } else if (pe >= PMC_EV_XSCALE_FIRST && pe <= PMC_EV_XSCALE_LAST) { - ev = xscale_event_table; - evfence = xscale_event_table + PMC_EVENT_TABLE_SIZE(xscale); } else if (pe >= PMC_EV_ARMV7_FIRST && pe <= PMC_EV_ARMV7_LAST) { switch (cpu) { case PMC_CPU_ARMV7_CORTEX_A8: Modified: head/lib/libpmc/pmc.3 ============================================================================== --- head/lib/libpmc/pmc.3 Fri Jul 27 18:31:30 2018 (r336772) +++ head/lib/libpmc/pmc.3 Fri Jul 27 18:33:09 2018 (r336773) @@ -545,7 +545,6 @@ API is .Xr pmc.tsc 3 , .Xr pmc.westmere 3 , .Xr pmc.westmereuc 3 , -.Xr pmc.xscale 3 , .Xr pmc_allocate 3 , .Xr pmc_attach 3 , .Xr pmc_capabilities 3 , Modified: head/share/man/man4/man4.arm/Makefile ============================================================================== --- head/share/man/man4/man4.arm/Makefile Fri Jul 27 18:31:30 2018 (r336772) +++ head/share/man/man4/man4.arm/Makefile Fri Jul 27 18:33:09 2018 (r336773) @@ -16,7 +16,6 @@ MAN= \ imx6_snvs.4 \ imx_wdog.4 \ mge.4 \ - npe.4 \ ti_adc.4 MLINKS= cgem.4 if_cgem.4 Modified: head/share/mk/bsd.cpu.mk ============================================================================== --- head/share/mk/bsd.cpu.mk Fri Jul 27 18:31:30 2018 (r336772) +++ head/share/mk/bsd.cpu.mk Fri Jul 27 18:33:09 2018 (r336773) @@ -104,11 +104,7 @@ _CPUCFLAGS = -march=${CPUTYPE} . elif ${MACHINE_CPUARCH} == "amd64" _CPUCFLAGS = -march=${CPUTYPE} . elif ${MACHINE_CPUARCH} == "arm" -. if ${CPUTYPE} == "xscale" -#XXX: gcc doesn't seem to like -mcpu=xscale, and dies while rebuilding itself -#_CPUCFLAGS = -mcpu=xscale -_CPUCFLAGS = -march=armv5te -D__XSCALE__ -. elif ${CPUTYPE:M*soft*} != "" +. if ${CPUTYPE:M*soft*} != "" _CPUCFLAGS = -mfloat-abi=softfp . elif ${CPUTYPE} == "cortexa" _CPUCFLAGS = -march=armv7 -mfpu=vfp Modified: head/sys/arm/arm/bcopyinout.S ============================================================================== --- head/sys/arm/arm/bcopyinout.S Fri Jul 27 18:31:30 2018 (r336772) +++ head/sys/arm/arm/bcopyinout.S Fri Jul 27 18:33:09 2018 (r336773) @@ -47,9 +47,6 @@ .word _C_LABEL(_min_memcpy_size) __FBSDID("$FreeBSD$"); -#ifdef _ARM_ARCH_5E -#include <arm/arm/bcopyinout_xscale.S> -#else .text .align 2 @@ -559,7 +556,6 @@ ENTRY(copyout) RET END(copyout) -#endif /* * int badaddr_read_1(const uint8_t *src, uint8_t *dest) Modified: head/sys/arm/arm/cpufunc.c ============================================================================== --- head/sys/arm/arm/cpufunc.c Fri Jul 27 18:31:30 2018 (r336772) +++ head/sys/arm/arm/cpufunc.c Fri Jul 27 18:33:09 2018 (r336773) @@ -61,10 +61,6 @@ __FBSDID("$FreeBSD$"); #include <machine/cpufunc.h> -#if defined(CPU_XSCALE_81342) -#include <arm/xscale/i8134x/i81342reg.h> -#endif - /* PRIMARY CACHE VARIABLES */ int arm_picache_size; int arm_picache_line_size; @@ -254,109 +250,6 @@ struct cpu_functions pj4bv7_cpufuncs = { }; #endif /* CPU_MV_PJ4B */ -#if defined(CPU_XSCALE_PXA2X0) - -struct cpu_functions xscale_cpufuncs = { - /* CPU functions */ - - xscale_cpwait, /* cpwait */ - - /* MMU functions */ - - xscale_control, /* control */ - xscale_setttb, /* setttb */ - - /* TLB functions */ - - armv4_tlb_flushID, /* tlb_flushID */ - xscale_tlb_flushID_SE, /* tlb_flushID_SE */ - armv4_tlb_flushD, /* tlb_flushD */ - armv4_tlb_flushD_SE, /* tlb_flushD_SE */ - - /* Cache operations */ - - xscale_cache_syncI_rng, /* icache_sync_range */ - - xscale_cache_purgeD, /* dcache_wbinv_all */ - xscale_cache_purgeD_rng, /* dcache_wbinv_range */ - xscale_cache_flushD_rng, /* dcache_inv_range */ - xscale_cache_cleanD_rng, /* dcache_wb_range */ - - xscale_cache_flushID, /* idcache_inv_all */ - xscale_cache_purgeID, /* idcache_wbinv_all */ - xscale_cache_purgeID_rng, /* idcache_wbinv_range */ - cpufunc_nullop, /* l2cache_wbinv_all */ - (void *)cpufunc_nullop, /* l2cache_wbinv_range */ - (void *)cpufunc_nullop, /* l2cache_inv_range */ - (void *)cpufunc_nullop, /* l2cache_wb_range */ - (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ - - /* Other functions */ - - armv4_drain_writebuf, /* drain_writebuf */ - - xscale_cpu_sleep, /* sleep */ - - /* Soft functions */ - - xscale_context_switch, /* context_switch */ - - xscale_setup /* cpu setup */ -}; -#endif -/* CPU_XSCALE_PXA2X0 */ - -#ifdef CPU_XSCALE_81342 -struct cpu_functions xscalec3_cpufuncs = { - /* CPU functions */ - - xscale_cpwait, /* cpwait */ - - /* MMU functions */ - - xscale_control, /* control */ - xscalec3_setttb, /* setttb */ - - /* TLB functions */ - - armv4_tlb_flushID, /* tlb_flushID */ - xscale_tlb_flushID_SE, /* tlb_flushID_SE */ - armv4_tlb_flushD, /* tlb_flushD */ - armv4_tlb_flushD_SE, /* tlb_flushD_SE */ - - /* Cache operations */ - - xscalec3_cache_syncI_rng, /* icache_sync_range */ - - xscalec3_cache_purgeD, /* dcache_wbinv_all */ - xscalec3_cache_purgeD_rng, /* dcache_wbinv_range */ - xscale_cache_flushD_rng, /* dcache_inv_range */ - xscalec3_cache_cleanD_rng, /* dcache_wb_range */ - - xscale_cache_flushID, /* idcache_inv_all */ - xscalec3_cache_purgeID, /* idcache_wbinv_all */ - xscalec3_cache_purgeID_rng, /* idcache_wbinv_range */ - xscalec3_l2cache_purge, /* l2cache_wbinv_all */ - xscalec3_l2cache_purge_rng, /* l2cache_wbinv_range */ - xscalec3_l2cache_flush_rng, /* l2cache_inv_range */ - xscalec3_l2cache_clean_rng, /* l2cache_wb_range */ - (void *)cpufunc_nullop, /* l2cache_drain_writebuf */ - - /* Other functions */ - - armv4_drain_writebuf, /* drain_writebuf */ - - xscale_cpu_sleep, /* sleep */ - - /* Soft functions */ - - xscalec3_context_switch, /* context_switch */ - - xscale_setup /* cpu setup */ -}; -#endif /* CPU_XSCALE_81342 */ - - #if defined(CPU_FA526) struct cpu_functions fa526_cpufuncs = { /* CPU functions */ @@ -462,9 +355,7 @@ u_int cpu_reset_needs_v4_MMU_disable; /* flag used in #if defined(CPU_ARM9) || \ defined (CPU_ARM9E) || \ defined(CPU_ARM1176) || \ - defined(CPU_XSCALE_PXA2X0) || \ defined(CPU_FA526) || defined(CPU_MV_PJ4B) || \ - defined(CPU_XSCALE_81342) || \ defined(CPU_CORTEXA) || defined(CPU_KRAIT) /* Global cache line sizes, use 32 as default */ @@ -589,7 +480,7 @@ get_cachetype_cp15(void) arm_dcache_align_mask = arm_dcache_align - 1; } } -#endif /* ARM9 || XSCALE */ +#endif /* ARM9 */ /* * Cannot panic here as we may not have a console yet ... @@ -697,29 +588,6 @@ set_cpufuncs(void) } #endif /* CPU_FA526 */ -#if defined(CPU_XSCALE_81342) - if (cputype == CPU_ID_81342) { - cpufuncs = xscalec3_cpufuncs; - cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ - get_cachetype_cp15(); - pmap_pte_init_xscale(); - goto out; - } -#endif /* CPU_XSCALE_81342 */ -#ifdef CPU_XSCALE_PXA2X0 - /* ignore core revision to test PXA2xx CPUs */ - if ((cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA250 || - (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA27X || - (cputype & ~CPU_ID_XSCALE_COREREV_MASK) == CPU_ID_PXA210) { - - cpufuncs = xscale_cpufuncs; - cpu_reset_needs_v4_MMU_disable = 1; /* XScale needs it */ - get_cachetype_cp15(); - pmap_pte_init_xscale(); - - goto out; - } -#endif /* CPU_XSCALE_PXA2X0 */ /* * Bzzzz. And the answer was ... */ @@ -932,71 +800,3 @@ fa526_setup(void) cpu_control(0xffffffff, cpuctrl); } #endif /* CPU_FA526 */ - -#if defined(CPU_XSCALE_PXA2X0) || defined(CPU_XSCALE_81342) -void -xscale_setup(void) -{ - uint32_t auxctl; - int cpuctrl, cpuctrlmask; - - /* - * The XScale Write Buffer is always enabled. Our option - * is to enable/disable coalescing. Note that bits 6:3 - * must always be enabled. - */ - - cpuctrl = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE - | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE - | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE - | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_LABT_ENABLE - | CPU_CONTROL_BPRD_ENABLE; - cpuctrlmask = CPU_CONTROL_MMU_ENABLE | CPU_CONTROL_32BP_ENABLE - | CPU_CONTROL_32BD_ENABLE | CPU_CONTROL_SYST_ENABLE - | CPU_CONTROL_IC_ENABLE | CPU_CONTROL_DC_ENABLE - | CPU_CONTROL_WBUF_ENABLE | CPU_CONTROL_ROM_ENABLE - | CPU_CONTROL_BEND_ENABLE | CPU_CONTROL_AFLT_ENABLE - | CPU_CONTROL_LABT_ENABLE | CPU_CONTROL_BPRD_ENABLE - | CPU_CONTROL_CPCLK | CPU_CONTROL_VECRELOC | \ - CPU_CONTROL_L2_ENABLE; - -#ifndef ARM32_DISABLE_ALIGNMENT_FAULTS - cpuctrl |= CPU_CONTROL_AFLT_ENABLE; -#endif - -#ifdef __ARMEB__ - cpuctrl |= CPU_CONTROL_BEND_ENABLE; -#endif - - if (vector_page == ARM_VECTORS_HIGH) - cpuctrl |= CPU_CONTROL_VECRELOC; -#ifdef CPU_XSCALE_CORE3 - cpuctrl |= CPU_CONTROL_L2_ENABLE; -#endif - - /* Clear out the cache */ - cpu_idcache_wbinv_all(); - - /* - * Set the control register. Note that bits 6:3 must always - * be set to 1. - */ -/* cpu_control(cpuctrlmask, cpuctrl);*/ - cpu_control(0xffffffff, cpuctrl); - - /* Make sure write coalescing is turned on */ - __asm __volatile("mrc p15, 0, %0, c1, c0, 1" - : "=r" (auxctl)); -#ifdef XSCALE_NO_COALESCE_WRITES - auxctl |= XSCALE_AUXCTL_K; -#else - auxctl &= ~XSCALE_AUXCTL_K; -#endif -#ifdef CPU_XSCALE_CORE3 - auxctl |= XSCALE_AUXCTL_LLR; - auxctl |= XSCALE_AUXCTL_MD_MASK; -#endif - __asm __volatile("mcr p15, 0, %0, c1, c0, 1" - : : "r" (auxctl)); -} -#endif /* CPU_XSCALE_PXA2X0 */ Modified: head/sys/arm/arm/dump_machdep.c ============================================================================== --- head/sys/arm/arm/dump_machdep.c Fri Jul 27 18:31:30 2018 (r336772) +++ head/sys/arm/arm/dump_machdep.c Fri Jul 27 18:33:09 2018 (r336773) @@ -63,9 +63,6 @@ dumpsys_wbinv_all(void) * part of stopping. */ dcache_wbinv_poc_all(); -#ifdef __XSCALE__ - xscale_cache_clean_minidata(); -#endif } void Modified: head/sys/arm/arm/elf_trampoline.c ============================================================================== --- head/sys/arm/arm/elf_trampoline.c Fri Jul 27 18:31:30 2018 (r336772) +++ head/sys/arm/arm/elf_trampoline.c Fri Jul 27 18:33:09 2018 (r336773) @@ -70,16 +70,6 @@ extern void fa526_idcache_wbinv_all(void); #elif defined(CPU_ARM9E) #define cpu_idcache_wbinv_all armv5_ec_idcache_wbinv_all extern void armv5_ec_idcache_wbinv_all(void); -#elif defined(CPU_XSCALE_PXA2X0) -#define cpu_idcache_wbinv_all xscale_cache_purgeID -extern void xscale_cache_purgeID(void); -#elif defined(CPU_XSCALE_81342) -#define cpu_idcache_wbinv_all xscalec3_cache_purgeID -extern void xscalec3_cache_purgeID(void); -#endif -#ifdef CPU_XSCALE_81342 -#define cpu_l2cache_wbinv_all xscalec3_l2cache_purge -extern void xscalec3_l2cache_purge(void); #elif defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY) #define cpu_l2cache_wbinv_all sheeva_l2cache_wbinv_all extern void sheeva_l2cache_wbinv_all(void); Modified: head/sys/arm/arm/exception.S ============================================================================== --- head/sys/arm/arm/exception.S Fri Jul 27 18:31:30 2018 (r336772) +++ head/sys/arm/arm/exception.S Fri Jul 27 18:33:09 2018 (r336773) @@ -321,10 +321,6 @@ END(exception_exit) * on exit (without transitioning back through the abort mode stack). */ ASENTRY_NP(prefetch_abort_entry) -#ifdef __XSCALE__ - nop /* Make absolutely sure any pending */ - nop /* imprecise aborts have occurred. */ -#endif sub lr, lr, #4 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Return from handler via standard */ @@ -341,10 +337,6 @@ END(prefetch_abort_entry) * on exit (without transitioning back through the abort mode stack). */ ASENTRY_NP(data_abort_entry) -#ifdef __XSCALE__ - nop /* Make absolutely sure any pending */ - nop /* imprecise aborts have occurred. */ -#endif sub lr, lr, #8 /* Adjust the lr. Transition to scv32 */ PUSHFRAMEINSVC /* mode stack, build trapframe there. */ adr lr, exception_exit /* Exception exit routine */ Modified: head/sys/arm/arm/identcpu-v4.c ============================================================================== --- head/sys/arm/arm/identcpu-v4.c Fri Jul 27 18:31:30 2018 (r336772) +++ head/sys/arm/arm/identcpu-v4.c Fri Jul 27 18:33:09 2018 (r336773) @@ -65,60 +65,6 @@ static const char * const generic_steppings[16] = { "rev 12", "rev 13", "rev 14", "rev 15", }; -static const char * const xscale_steppings[16] = { - "step A-0", "step A-1", "step B-0", "step C-0", - "step D-0", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -static const char * const i80219_steppings[16] = { - "step A-0", "rev 1", "rev 2", "rev 3", - "rev 4", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -static const char * const i80321_steppings[16] = { - "step A-0", "step B-0", "rev 2", "rev 3", - "rev 4", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -static const char * const i81342_steppings[16] = { - "step A-0", "rev 1", "rev 2", "rev 3", - "rev 4", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -/* Steppings for PXA2[15]0 */ -static const char * const pxa2x0_steppings[16] = { - "step A-0", "step A-1", "step B-0", "step B-1", - "step B-2", "step C-0", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -/* Steppings for PXA255/26x. - * rev 5: PXA26x B0, rev 6: PXA255 A0 - */ -static const char * const pxa255_steppings[16] = { - "rev 0", "rev 1", "rev 2", "step A-0", - "rev 4", "step B-0", "step A-0", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - -/* Stepping for PXA27x */ -static const char * const pxa27x_steppings[16] = { - "step A-0", "step A-1", "step B-0", "step B-1", - "step C-0", "rev 5", "rev 6", "rev 7", - "rev 8", "rev 9", "rev 10", "rev 11", - "rev 12", "rev 13", "rev 14", "rev 15", -}; - struct cpuidtab { u_int32_t cpuid; enum cpu_class cpu_class; @@ -158,41 +104,6 @@ const struct cpuidtab cpuids[] = { { CPU_ID_ARM1026EJS, CPU_CLASS_ARM10EJ, "ARM1026EJ-S", generic_steppings }, - { CPU_ID_80200, CPU_CLASS_XSCALE, "i80200", - xscale_steppings }, - - { CPU_ID_80321_400, CPU_CLASS_XSCALE, "i80321 400MHz", - i80321_steppings }, - { CPU_ID_80321_600, CPU_CLASS_XSCALE, "i80321 600MHz", - i80321_steppings }, - { CPU_ID_80321_400_B0, CPU_CLASS_XSCALE, "i80321 400MHz", - i80321_steppings }, - { CPU_ID_80321_600_B0, CPU_CLASS_XSCALE, "i80321 600MHz", - i80321_steppings }, - - { CPU_ID_81342, CPU_CLASS_XSCALE, "i81342", - i81342_steppings }, - - { CPU_ID_80219_400, CPU_CLASS_XSCALE, "i80219 400MHz", - i80219_steppings }, - { CPU_ID_80219_600, CPU_CLASS_XSCALE, "i80219 600MHz", - i80219_steppings }, - - { CPU_ID_PXA27X, CPU_CLASS_XSCALE, "PXA27x", - pxa27x_steppings }, - { CPU_ID_PXA250A, CPU_CLASS_XSCALE, "PXA250", - pxa2x0_steppings }, - { CPU_ID_PXA210A, CPU_CLASS_XSCALE, "PXA210", - pxa2x0_steppings }, - { CPU_ID_PXA250B, CPU_CLASS_XSCALE, "PXA250", - pxa2x0_steppings }, - { CPU_ID_PXA210B, CPU_CLASS_XSCALE, "PXA210", - pxa2x0_steppings }, - { CPU_ID_PXA250C, CPU_CLASS_XSCALE, "PXA255", - pxa255_steppings }, - { CPU_ID_PXA210C, CPU_CLASS_XSCALE, "PXA210", - pxa2x0_steppings }, - { CPU_ID_MV88FR131, CPU_CLASS_MARVELL, "Feroceon 88FR131", generic_steppings }, @@ -214,7 +125,6 @@ const struct cpu_classtab cpu_classes[] = { { "ARM9EJ-S", "CPU_ARM9E" }, /* CPU_CLASS_ARM9EJS */ { "ARM10E", "CPU_ARM10" }, /* CPU_CLASS_ARM10E */ { "ARM10EJ", "CPU_ARM10" }, /* CPU_CLASS_ARM10EJ */ - { "XScale", "CPU_XSCALE_..." }, /* CPU_CLASS_XSCALE */ { "Marvell", "CPU_MARVELL" }, /* CPU_CLASS_MARVELL */ }; @@ -313,13 +223,9 @@ identify_arm_cpu(void) case CPU_CLASS_ARM9EJS: case CPU_CLASS_ARM10E: case CPU_CLASS_ARM10EJ: - case CPU_CLASS_XSCALE: case CPU_CLASS_MARVELL: print_enadis(ctrl & CPU_CONTROL_DC_ENABLE, "DC"); print_enadis(ctrl & CPU_CONTROL_IC_ENABLE, "IC"); -#ifdef CPU_XSCALE_81342 - print_enadis(ctrl & CPU_CONTROL_L2_ENABLE, "L2"); -#endif #if defined(SOC_MV_KIRKWOOD) || defined(SOC_MV_DISCOVERY) i = sheeva_control_ext(0, 0); print_enadis(i & MV_WA_ENABLE, "WA"); Modified: head/sys/arm/arm/pmap-v4.c ============================================================================== --- head/sys/arm/arm/pmap-v4.c Fri Jul 27 18:31:30 2018 (r336772) +++ head/sys/arm/arm/pmap-v4.c Fri Jul 27 18:33:09 2018 (r336773) @@ -406,10 +406,6 @@ static struct rwlock pvh_global_lock; void pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offset_t a_offs, vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); -#if ARM_MMU_XSCALE == 1 -void pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs, - vm_paddr_t b_phys, vm_offset_t b_offs, int cnt); -#endif /* * This list exists for the benefit of pmap_map_chunk(). It keeps track @@ -501,177 +497,7 @@ pmap_pte_init_generic(void) #endif /* ARM_MMU_GENERIC != 0 */ -#if ARM_MMU_XSCALE == 1 -#if (ARM_NMMUS > 1) || defined (CPU_XSCALE_CORE3) -static u_int xscale_use_minidata; -#endif - -void -pmap_pte_init_xscale(void) -{ - uint32_t auxctl; - int write_through = 0; - - pte_l1_s_cache_mode = L1_S_B|L1_S_C|L1_S_XSCALE_P; - pte_l1_s_cache_mask = L1_S_CACHE_MASK_xscale; - - pte_l2_l_cache_mode = L2_B|L2_C; - pte_l2_l_cache_mask = L2_L_CACHE_MASK_xscale; - - pte_l2_s_cache_mode = L2_B|L2_C; - pte_l2_s_cache_mask = L2_S_CACHE_MASK_xscale; - - pte_l1_s_cache_mode_pt = L1_S_C; - pte_l2_l_cache_mode_pt = L2_C; - pte_l2_s_cache_mode_pt = L2_C; -#ifdef XSCALE_CACHE_READ_WRITE_ALLOCATE - /* - * The XScale core has an enhanced mode where writes that - * miss the cache cause a cache line to be allocated. This - * is significantly faster than the traditional, write-through - * behavior of this case. - */ - pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_X); - pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_X); - pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_X); -#endif /* XSCALE_CACHE_READ_WRITE_ALLOCATE */ -#ifdef XSCALE_CACHE_WRITE_THROUGH - /* - * Some versions of the XScale core have various bugs in - * their cache units, the work-around for which is to run - * the cache in write-through mode. Unfortunately, this - * has a major (negative) impact on performance. So, we - * go ahead and run fast-and-loose, in the hopes that we - * don't line up the planets in a way that will trip the - * bugs. - * - * However, we give you the option to be slow-but-correct. - */ - write_through = 1; -#elif defined(XSCALE_CACHE_WRITE_BACK) - /* force write back cache mode */ - write_through = 0; -#elif defined(CPU_XSCALE_PXA2X0) - /* - * Intel PXA2[15]0 processors are known to have a bug in - * write-back cache on revision 4 and earlier (stepping - * A[01] and B[012]). Fixed for C0 and later. - */ - { - uint32_t id, type; - - id = cpu_ident(); - type = id & ~(CPU_ID_XSCALE_COREREV_MASK|CPU_ID_REVISION_MASK); - - if (type == CPU_ID_PXA250 || type == CPU_ID_PXA210) { - if ((id & CPU_ID_REVISION_MASK) < 5) { - /* write through for stepping A0-1 and B0-2 */ - write_through = 1; - } - } - } -#endif /* XSCALE_CACHE_WRITE_THROUGH */ - - if (write_through) { - pte_l1_s_cache_mode = L1_S_C; - pte_l2_l_cache_mode = L2_C; - pte_l2_s_cache_mode = L2_C; - } - -#if (ARM_NMMUS > 1) - xscale_use_minidata = 1; -#endif - - pte_l2_s_prot_u = L2_S_PROT_U_xscale; - pte_l2_s_prot_w = L2_S_PROT_W_xscale; - pte_l2_s_prot_mask = L2_S_PROT_MASK_xscale; - - pte_l1_s_proto = L1_S_PROTO_xscale; - pte_l1_c_proto = L1_C_PROTO_xscale; - pte_l2_s_proto = L2_S_PROTO_xscale; - -#ifdef CPU_XSCALE_CORE3 - pmap_copy_page_func = pmap_copy_page_generic; - pmap_copy_page_offs_func = pmap_copy_page_offs_generic; - pmap_zero_page_func = pmap_zero_page_generic; - xscale_use_minidata = 0; - /* Make sure it is L2-cachable */ - pte_l1_s_cache_mode |= L1_S_XSCALE_TEX(TEX_XSCALE_T); - pte_l1_s_cache_mode_pt = pte_l1_s_cache_mode &~ L1_S_XSCALE_P; - pte_l2_l_cache_mode |= L2_XSCALE_L_TEX(TEX_XSCALE_T) ; - pte_l2_l_cache_mode_pt = pte_l1_s_cache_mode; - pte_l2_s_cache_mode |= L2_XSCALE_T_TEX(TEX_XSCALE_T); - pte_l2_s_cache_mode_pt = pte_l2_s_cache_mode; - -#else - pmap_copy_page_func = pmap_copy_page_xscale; - pmap_copy_page_offs_func = pmap_copy_page_offs_xscale; - pmap_zero_page_func = pmap_zero_page_xscale; -#endif - - /* - * Disable ECC protection of page table access, for now. - */ - __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); - auxctl &= ~XSCALE_AUXCTL_P; - __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); -} - /* - * xscale_setup_minidata: - * - * Set up the mini-data cache clean area. We require the - * caller to allocate the right amount of physically and - * virtually contiguous space. - */ -extern vm_offset_t xscale_minidata_clean_addr; -extern vm_size_t xscale_minidata_clean_size; /* already initialized */ -void -xscale_setup_minidata(vm_offset_t l1pt, vm_offset_t va, vm_paddr_t pa) -{ - pd_entry_t *pde = (pd_entry_t *) l1pt; - pt_entry_t *pte; - vm_size_t size; - uint32_t auxctl; - - xscale_minidata_clean_addr = va; - - /* Round it to page size. */ - size = (xscale_minidata_clean_size + L2_S_OFFSET) & L2_S_FRAME; - - for (; size != 0; - va += L2_S_SIZE, pa += L2_S_SIZE, size -= L2_S_SIZE) { - pte = (pt_entry_t *) kernel_pt_lookup( - pde[L1_IDX(va)] & L1_C_ADDR_MASK); - if (pte == NULL) - panic("xscale_setup_minidata: can't find L2 table for " - "VA 0x%08x", (u_int32_t) va); - pte[l2pte_index(va)] = - L2_S_PROTO | pa | L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); - } - - /* - * Configure the mini-data cache for write-back with - * read/write-allocate. - * - * NOTE: In order to reconfigure the mini-data cache, we must - * make sure it contains no valid data! In order to do that, - * we must issue a global data cache invalidate command! - * - * WE ASSUME WE ARE RUNNING UN-CACHED WHEN THIS ROUTINE IS CALLED! - * THIS IS VERY IMPORTANT! - */ - - /* Invalidate data and mini-data. */ - __asm __volatile("mcr p15, 0, %0, c7, c6, 0" : : "r" (0)); - __asm __volatile("mrc p15, 0, %0, c1, c0, 1" : "=r" (auxctl)); - auxctl = (auxctl & ~XSCALE_AUXCTL_MD_MASK) | XSCALE_AUXCTL_MD_WB_RWA; - __asm __volatile("mcr p15, 0, %0, c1, c0, 1" : : "r" (auxctl)); -} -#endif - -/* * Allocate an L1 translation table for the specified pmap. * This is called at pmap creation time. */ @@ -3955,7 +3781,7 @@ pmap_remove(pmap_t pm, vm_offset_t sva, vm_offset_t ev * StrongARM accesses to non-cached pages are non-burst making writing * _any_ bulk data very slow. */ -#if ARM_MMU_GENERIC != 0 || defined(CPU_XSCALE_CORE3) +#if ARM_MMU_GENERIC != 0 void pmap_zero_page_generic(vm_paddr_t phys, int off, int size) { @@ -3984,78 +3810,7 @@ pmap_zero_page_generic(vm_paddr_t phys, int off, int s } #endif /* ARM_MMU_GENERIC != 0 */ -#if ARM_MMU_XSCALE == 1 -void -pmap_zero_page_xscale(vm_paddr_t phys, int off, int size) -{ - - if (_arm_bzero && size >= _min_bzero_size && - _arm_bzero((void *)(phys + off), size, IS_PHYSICAL) == 0) - return; - - mtx_lock(&cmtx); - /* - * Hook in the page, zero it, and purge the cache for that - * zeroed page. Invalidate the TLB as needed. - */ - *cdst_pte = L2_S_PROTO | phys | - L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ - PTE_SYNC(cdst_pte); - cpu_tlb_flushD_SE(cdstp); - cpu_cpwait(); - if (off || size != PAGE_SIZE) - bzero((void *)(cdstp + off), size); - else - bzero_page(cdstp); - mtx_unlock(&cmtx); - xscale_cache_clean_minidata(); -} - /* - * Change the PTEs for the specified kernel mappings such that they - * will use the mini data cache instead of the main data cache. - */ -void -pmap_use_minicache(vm_offset_t va, vm_size_t size) -{ - struct l2_bucket *l2b; - pt_entry_t *ptep, *sptep, pte; - vm_offset_t next_bucket, eva; - -#if (ARM_NMMUS > 1) || defined(CPU_XSCALE_CORE3) - if (xscale_use_minidata == 0) - return; -#endif - - eva = va + size; - - while (va < eva) { - next_bucket = L2_NEXT_BUCKET(va); - if (next_bucket > eva) - next_bucket = eva; - - l2b = pmap_get_l2_bucket(kernel_pmap, va); - - sptep = ptep = &l2b->l2b_kva[l2pte_index(va)]; - - while (va < next_bucket) { - pte = *ptep; - if (!l2pte_minidata(pte)) { - cpu_dcache_wbinv_range(va, PAGE_SIZE); - cpu_tlb_flushD_SE(va); - *ptep = pte & ~L2_B; - } - ptep++; - va += PAGE_SIZE; - } - PTE_SYNC_RANGE(sptep, (u_int)(ptep - sptep)); - } - cpu_cpwait(); -} -#endif /* ARM_MMU_XSCALE == 1 */ - -/* * pmap_zero_page zeros the specified hardware page by mapping * the page into KVM and using bzero to clear its contents. */ @@ -4185,7 +3940,7 @@ pmap_clean_page(struct pv_entry *pv, boolean_t is_src) * hook points. The same comment regarding cachability as in * pmap_zero_page also applies here. */ -#if ARM_MMU_GENERIC != 0 || defined (CPU_XSCALE_CORE3) +#if ARM_MMU_GENERIC != 0 void pmap_copy_page_generic(vm_paddr_t src, vm_paddr_t dst) { @@ -4251,72 +4006,6 @@ pmap_copy_page_offs_generic(vm_paddr_t a_phys, vm_offs cpu_l2cache_wbinv_range(cdstp + b_offs, cnt); } #endif /* ARM_MMU_GENERIC != 0 */ - -#if ARM_MMU_XSCALE == 1 -void -pmap_copy_page_xscale(vm_paddr_t src, vm_paddr_t dst) -{ -#if 0 - /* XXX: Only needed for pmap_clean_page(), which is commented out. */ - struct vm_page *src_pg = PHYS_TO_VM_PAGE(src); -#endif - - /* - * Clean the source page. Hold the source page's lock for - * the duration of the copy so that no other mappings can - * be created while we have a potentially aliased mapping. - */ -#if 0 - /* - * XXX: Not needed while we call cpu_dcache_wbinv_all() in - * pmap_copy_page(). - */ - (void) pmap_clean_page(TAILQ_FIRST(&src_pg->md.pv_list), TRUE); -#endif - /* - * Map the pages into the page hook points, copy them, and purge - * the cache for the appropriate page. Invalidate the TLB - * as required. - */ - mtx_lock(&cmtx); - *csrc_pte = L2_S_PROTO | src | - L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ - PTE_SYNC(csrc_pte); - *cdst_pte = L2_S_PROTO | dst | - L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); /* mini-data */ - PTE_SYNC(cdst_pte); - cpu_tlb_flushD_SE(csrcp); - cpu_tlb_flushD_SE(cdstp); - cpu_cpwait(); - bcopy_page(csrcp, cdstp); - mtx_unlock(&cmtx); - xscale_cache_clean_minidata(); -} - -void -pmap_copy_page_offs_xscale(vm_paddr_t a_phys, vm_offset_t a_offs, - vm_paddr_t b_phys, vm_offset_t b_offs, int cnt) -{ - - mtx_lock(&cmtx); - *csrc_pte = L2_S_PROTO | a_phys | - L2_S_PROT(PTE_KERNEL, VM_PROT_READ) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); - PTE_SYNC(csrc_pte); - *cdst_pte = L2_S_PROTO | b_phys | - L2_S_PROT(PTE_KERNEL, VM_PROT_WRITE) | - L2_C | L2_XSCALE_T_TEX(TEX_XSCALE_X); - PTE_SYNC(cdst_pte); - cpu_tlb_flushD_SE(csrcp); - cpu_tlb_flushD_SE(cdstp); - cpu_cpwait(); - bcopy((char *)csrcp + a_offs, (char *)cdstp + b_offs, cnt); - mtx_unlock(&cmtx); - xscale_cache_clean_minidata(); -} -#endif /* ARM_MMU_XSCALE == 1 */ void pmap_copy_page(vm_page_t src, vm_page_t dst) Modified: head/sys/arm/arm/trap-v4.c ============================================================================== --- head/sys/arm/arm/trap-v4.c Fri Jul 27 18:31:30 2018 (r336772) +++ head/sys/arm/arm/trap-v4.c Fri Jul 27 18:33:09 2018 (r336773) @@ -514,13 +514,6 @@ dab_align(struct trapframe *tf, u_int fsr, u_int far, * If pcb_onfault is set, flag the fault and return to the handler. * If the fault occurred in user mode, give the process a SIGBUS. * - * Note: On XScale, FAULT_BUSERR_0, FAULT_BUSERR_1, and FAULT_BUSERR_2 - * can be flagged as imprecise in the FSR. This causes a real headache - * since some of the machine state is lost. In this case, tf->tf_pc - * may not actually point to the offending instruction. In fact, if - * we've taken a double abort fault, it generally points somewhere near - * the top of "data_abort_entry" in exception.S. - * * In all other cases, these data aborts are considered fatal. */ static int @@ -528,52 +521,6 @@ dab_buserr(struct trapframe *tf, u_int fsr, u_int far, struct ksig *ksig) { struct pcb *pcb = td->td_pcb; - -#ifdef __XSCALE__ - if ((fsr & FAULT_IMPRECISE) != 0 && - (tf->tf_spsr & PSR_MODE) == PSR_ABT32_MODE) { - /* - * Oops, an imprecise, double abort fault. We've lost the - * r14_abt/spsr_abt values corresponding to the original - * abort, and the spsr saved in the trapframe indicates - * ABT mode. - */ - tf->tf_spsr &= ~PSR_MODE; - - /* - * We use a simple heuristic to determine if the double abort *** DIFF OUTPUT TRUNCATED AT 1000 LINES *** _______________________________________________ svn-src-head@freebsd.org mailing list https://lists.freebsd.org/mailman/listinfo/svn-src-head To unsubscribe, send any mail to "svn-src-head-unsubscr...@freebsd.org"