Module Name:    src
Committed By:   matt
Date:           Sat Feb 27 07:58:53 UTC 2010

Modified Files:
        src/sys/arch/mips/conf [matt-nb5-mips64]: files.mips
        src/sys/arch/mips/include [matt-nb5-mips64]: cpu.h locore.h pmap.h
        src/sys/arch/mips/mips [matt-nb5-mips64]: mipsX_subr.S mips_machdep.c
            pmap.c pmap_tlb.c
Added Files:
        src/sys/arch/mips/mips [matt-nb5-mips64]: mips_fixup.c

Log Message:
Add mipsXX_tlb_enter which modifies/sets a specific TLB entry with a new
mapping (useful for wired TLB entries).
Add mips_fixup_exceptions which will walk through the exception vectors
and allows the fixup of any cpu_info references to be changed to a more
MP-friendly incarnation.
Define a common fixup method to use a wired TLB entry at -PAGE_SIZE allowing
direct loads using a negative based from the zero register.
Change varible pmap_tlb_info t pmap_tlb0_info.


To generate a diff of this commit:
cvs rdiff -u -r1.58.24.8 -r1.58.24.9 src/sys/arch/mips/conf/files.mips
cvs rdiff -u -r1.90.16.22 -r1.90.16.23 src/sys/arch/mips/include/cpu.h
cvs rdiff -u -r1.78.36.1.2.15 -r1.78.36.1.2.16 \
    src/sys/arch/mips/include/locore.h
cvs rdiff -u -r1.54.26.10 -r1.54.26.11 src/sys/arch/mips/include/pmap.h
cvs rdiff -u -r1.26.36.1.2.26 -r1.26.36.1.2.27 \
    src/sys/arch/mips/mips/mipsX_subr.S
cvs rdiff -u -r0 -r1.1.2.1 src/sys/arch/mips/mips/mips_fixup.c
cvs rdiff -u -r1.205.4.1.2.1.2.36 -r1.205.4.1.2.1.2.37 \
    src/sys/arch/mips/mips/mips_machdep.c
cvs rdiff -u -r1.179.16.18 -r1.179.16.19 src/sys/arch/mips/mips/pmap.c
cvs rdiff -u -r1.1.2.4 -r1.1.2.5 src/sys/arch/mips/mips/pmap_tlb.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/mips/conf/files.mips
diff -u src/sys/arch/mips/conf/files.mips:1.58.24.8 src/sys/arch/mips/conf/files.mips:1.58.24.9
--- src/sys/arch/mips/conf/files.mips:1.58.24.8	Sat Feb  6 18:18:01 2010
+++ src/sys/arch/mips/conf/files.mips	Sat Feb 27 07:58:52 2010
@@ -1,4 +1,4 @@
-#	$NetBSD: files.mips,v 1.58.24.8 2010/02/06 18:18:01 cliff Exp $
+#	$NetBSD: files.mips,v 1.58.24.9 2010/02/27 07:58:52 matt Exp $
 #
 
 defflag	opt_cputype.h		NOFPU FPEMUL
@@ -39,6 +39,7 @@
 file	arch/mips/mips/pmap_tlb.c
 file	arch/mips/mips/trap.c			# trap handlers
 file	arch/mips/mips/syscall.c		# syscall entries
+file	arch/mips/mips/mips_fixup.c		mips3 | mips4 | mips32 | mips64
 file	arch/mips/mips/mips_machdep.c
 file	arch/mips/mips/mips_softint.c
 file	arch/mips/mips/sig_machdep.c		# signal delivery

Index: src/sys/arch/mips/include/cpu.h
diff -u src/sys/arch/mips/include/cpu.h:1.90.16.22 src/sys/arch/mips/include/cpu.h:1.90.16.23
--- src/sys/arch/mips/include/cpu.h:1.90.16.22	Thu Feb 25 05:24:53 2010
+++ src/sys/arch/mips/include/cpu.h	Sat Feb 27 07:58:52 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: cpu.h,v 1.90.16.22 2010/02/25 05:24:53 matt Exp $	*/
+/*	$NetBSD: cpu.h,v 1.90.16.23 2010/02/27 07:58:52 matt Exp $	*/
 
 /*-
  * Copyright (c) 1992, 1993
@@ -144,6 +144,7 @@
 	 * Per-cpu pmap information
 	 */
 	uint32_t ci_ksp_tlb_slot;	/* reserved tlb entry for kernel stack */
+	int ci_tlb_slot;		/* reserved tlb entry for cpu_info */
 	struct pmap_tlb_info *ci_tlb_info;
 	struct segtab *ci_pmap_segbase;
 	vaddr_t ci_pmap_srcbase;	/* starting VA of ephemeral src space */

Index: src/sys/arch/mips/include/locore.h
diff -u src/sys/arch/mips/include/locore.h:1.78.36.1.2.15 src/sys/arch/mips/include/locore.h:1.78.36.1.2.16
--- src/sys/arch/mips/include/locore.h:1.78.36.1.2.15	Thu Feb 25 05:45:12 2010
+++ src/sys/arch/mips/include/locore.h	Sat Feb 27 07:58:52 2010
@@ -1,4 +1,4 @@
-/* $NetBSD: locore.h,v 1.78.36.1.2.15 2010/02/25 05:45:12 matt Exp $ */
+/* $NetBSD: locore.h,v 1.78.36.1.2.16 2010/02/27 07:58:52 matt Exp $ */
 
 /*
  * This file should not be included by MI code!!!
@@ -42,8 +42,13 @@
 uint32_t mips_cp0_status_read(void);
 void	mips_cp0_status_write(uint32_t);
 
-void softint_process(uint32_t);
-void softint_fast_dispatch(struct lwp *, int);
+void	softint_process(uint32_t);
+void	softint_fast_dispatch(struct lwp *, int);
+
+typedef bool (*mips_fixup_callback_t)(int32_t, uint32_t [2]);
+ 
+bool	mips_fixup_exceptions(mips_fixup_callback_t);
+bool	mips_fixup_zero_relative(int32_t, uint32_t [2]);
 
 #ifdef MIPS1
 void	mips1_tlb_set_asid(uint32_t);
@@ -53,11 +58,12 @@
 void	mips1_tlb_invalidate_addr(vaddr_t);
 u_int	mips1_tlb_record_asids(u_long *, uint32_t);
 int	mips1_tlb_update(vaddr_t, uint32_t);
+void	mips1_tlb_enter(size_t, vaddr_t, uint32_t);
 void	mips1_tlb_read_indexed(size_t, struct tlbmask *);
 void	mips1_wbflush(void);
 void	mips1_lwp_trampoline(void);
 void	mips1_setfunc_trampoline(void);
-void	mips1_cpu_switch_resume(void);
+void	mips1_cpu_switch_resume(struct lwp *);
 
 uint32_t tx3900_cp0_config_read(void);
 #endif
@@ -70,12 +76,13 @@
 void	mips3_tlb_invalidate_addr(vaddr_t);
 u_int	mips3_tlb_record_asids(u_long *, uint32_t);
 int	mips3_tlb_update(vaddr_t, uint32_t);
+void	mips3_tlb_enter(size_t, vaddr_t, uint32_t);
 void	mips3_tlb_read_indexed(size_t, struct tlbmask *);
 void	mips3_tlb_write_indexed_VPS(size_t, struct tlbmask *);
 void	mips3_wbflush(void);
 void	mips3_lwp_trampoline(void);
 void	mips3_setfunc_trampoline(void);
-void	mips3_cpu_switch_resume(void);
+void	mips3_cpu_switch_resume(struct lwp *);
 void	mips3_pagezero(void *dst);
 
 #ifdef MIPS3_5900
@@ -86,12 +93,13 @@
 void	mips5900_tlb_invalidate_addr(vaddr_t);
 u_int	mips5900_tlb_record_asids(u_long *, uint32_t);
 int	mips5900_tlb_update(vaddr_t, uint32_t);
+void	mips5900_tlb_enter(size_t, vaddr_t, uint32_t);
 void	mips5900_tlb_read_indexed(size_t, struct tlbmask *);
 void	mips5900_tlb_write_indexed_VPS(size_t, struct tlbmask *);
 void	mips5900_wbflush(void);
 void	mips5900_lwp_trampoline(void);
 void	mips5900_setfunc_trampoline(void);
-void	mips5900_cpu_switch_resume(void);
+void	mips5900_cpu_switch_resume(struct lwp *);
 void	mips5900_pagezero(void *dst);
 #endif
 #endif
@@ -104,12 +112,13 @@
 void	mips32_tlb_invalidate_addr(vaddr_t);
 u_int	mips32_tlb_record_asids(u_long *, uint32_t);
 int	mips32_tlb_update(vaddr_t, uint32_t);
+void	mips32_tlb_enter(size_t, vaddr_t, uint32_t);
 void	mips32_tlb_read_indexed(size_t, struct tlbmask *);
 void	mips32_tlb_write_indexed_VPS(size_t, struct tlbmask *);
 void	mips32_wbflush(void);
 void	mips32_lwp_trampoline(void);
 void	mips32_setfunc_trampoline(void);
-void	mips32_cpu_switch_resume(void);
+void	mips32_cpu_switch_resume(struct lwp *);
 #endif
 
 #ifdef MIPS64
@@ -120,12 +129,13 @@
 void	mips64_tlb_invalidate_addr(vaddr_t);
 u_int	mips64_tlb_record_asids(u_long *, uint32_t);
 int	mips64_tlb_update(vaddr_t, uint32_t);
+void	mips64_tlb_enter(size_t, vaddr_t, uint32_t);
 void	mips64_tlb_read_indexed(size_t, struct tlbmask *);
 void	mips64_tlb_write_indexed_VPS(size_t, struct tlbmask *);
 void	mips64_wbflush(void);
 void	mips64_lwp_trampoline(void);
 void	mips64_setfunc_trampoline(void);
-void	mips64_cpu_switch_resume(void);
+void	mips64_cpu_switch_resume(struct lwp *);
 void	mips64_pagezero(void *dst);
 #endif
 
@@ -295,15 +305,16 @@
  * locore function, and macros which jump through it.
  */
 typedef struct  {
-	void (*ljv_tlb_set_asid)(uint32_t pid);
-	void (*ljv_tlb_invalidate_asids)(uint32_t, uint32_t);
-	void (*ljv_tlb_invalidate_addr)(vaddr_t);
-	void (*ljv_tlb_invalidate_globals)(void);
-	void (*ljv_tlb_invalidate_all)(void);
-	u_int (*ljv_tlb_record_asids)(u_long *, uint32_t);
-	int  (*ljv_tlb_update)(vaddr_t, uint32_t);
-	void (*ljv_tlb_read_indexed)(size_t, struct tlbmask *);
-	void (*ljv_wbflush)(void);
+	void	(*ljv_tlb_set_asid)(uint32_t pid);
+	void	(*ljv_tlb_invalidate_asids)(uint32_t, uint32_t);
+	void	(*ljv_tlb_invalidate_addr)(vaddr_t);
+	void	(*ljv_tlb_invalidate_globals)(void);
+	void	(*ljv_tlb_invalidate_all)(void);
+	u_int	(*ljv_tlb_record_asids)(u_long *, uint32_t);
+	int	(*ljv_tlb_update)(vaddr_t, uint32_t);
+	void	(*ljv_tlb_enter)(size_t, vaddr_t, uint32_t);
+	void	(*ljv_tlb_read_indexed)(size_t, struct tlbmask *);
+	void	(*ljv_wbflush)(void);
 } mips_locore_jumpvec_t;
 
 void	mips_set_wbflush(void (*)(void));
@@ -313,13 +324,13 @@
 void	logstacktrace(void);
 
 struct locoresw {
-	uintptr_t lsw_cpu_switch_resume;
-	uintptr_t lsw_lwp_trampoline;
-	void (*lsw_cpu_idle)(void);
-	uintptr_t lsw_setfunc_trampoline;
-	void (*lsw_boot_secondary_processors)(void);
-	int (*lsw_send_ipi)(struct cpu_info *, int);
-	void (*lsw_cpu_offline_md)(void);
+	void		(*lsw_cpu_switch_resume)(struct lwp *);
+	uintptr_t	lsw_lwp_trampoline;
+	void		(*lsw_cpu_idle)(void);
+	uintptr_t	lsw_setfunc_trampoline;
+	void		(*lsw_boot_secondary_processors)(void);
+	int		(*lsw_send_ipi)(struct cpu_info *, int);
+	void		(*lsw_cpu_offline_md)(void);
 };
 
 struct mips_vmfreelist {
@@ -342,6 +353,7 @@
 #define tlb_invalidate_all	mips1_tlb_invalidate_all
 #define tlb_record_asids	mips1_tlb_record_asids
 #define tlb_update		mips1_tlb_update
+#define tlb_enter		mips1_tlb_enter
 #define tlb_read_indexed	mips1_tlb_read_indexed
 #define wbflush			mips1_wbflush
 #define lwp_trampoline		mips1_lwp_trampoline
@@ -354,6 +366,7 @@
 #define tlb_invalidate_all	mips3_tlb_invalidate_all
 #define tlb_record_asids	mips3_tlb_record_asids
 #define tlb_update		mips3_tlb_update
+#define tlb_enter		mips3_tlb_enter
 #define tlb_read_indexed	mips3_tlb_read_indexed
 #define tlb_write_indexed_VPS	mips3_tlb_write_indexed_VPS
 #define lwp_trampoline		mips3_lwp_trampoline
@@ -367,6 +380,7 @@
 #define tlb_invalidate_all	mips32_tlb_invalidate_all
 #define tlb_record_asids	mips32_tlb_record_asids
 #define tlb_update		mips32_tlb_update
+#define tlb_enter		mips32_tlb_enter
 #define tlb_read_indexed	mips32_tlb_read_indexed
 #define tlb_write_indexed_VPS	mips32_tlb_write_indexed_VPS
 #define lwp_trampoline		mips32_lwp_trampoline
@@ -381,6 +395,7 @@
 #define tlb_invalidate_all	mips64_tlb_invalidate_all
 #define tlb_record_asids	mips64_tlb_record_asids
 #define tlb_update		mips64_tlb_update
+#define tlb_enter		mips64_tlb_enter
 #define tlb_read_indexed	mips64_tlb_read_indexed
 #define tlb_write_indexed_VPS	mips64_tlb_write_indexed_VPS
 #define lwp_trampoline		mips64_lwp_trampoline
@@ -394,6 +409,7 @@
 #define tlb_invalidate_all	mips5900_tlb_invalidate_all
 #define tlb_record_asids	mips5900_tlb_record_asids
 #define tlb_update		mips5900_tlb_update
+#define tlb_enter		mips5900_tlb_enter
 #define tlb_read_indexed	mips5900_tlb_read_indexed
 #define tlb_write_indexed_VPS	mips5900_tlb_write_indexed_VPS
 #define lwp_trampoline		mips5900_lwp_trampoline
@@ -407,6 +423,7 @@
 #define tlb_invalidate_all	(*mips_locore_jumpvec.ljv_tlb_invalidate_all)
 #define tlb_record_asids	(*mips_locore_jumpvec.ljv_tlb_record_asids)
 #define tlb_update		(*mips_locore_jumpvec.ljv_tlb_update)
+#define tlb_enter		(*mips_locore_jumpvec.ljv_tlb_enter)
 #define tlb_read_indexed	(*mips_locore_jumpvec.ljv_tlb_read_indexed)
 #define wbflush			(*mips_locore_jumpvec.ljv_wbflush)
 #define lwp_trampoline		mips_locoresw.lsw_lwp_trampoline

Index: src/sys/arch/mips/include/pmap.h
diff -u src/sys/arch/mips/include/pmap.h:1.54.26.10 src/sys/arch/mips/include/pmap.h:1.54.26.11
--- src/sys/arch/mips/include/pmap.h:1.54.26.10	Thu Feb 25 05:53:23 2010
+++ src/sys/arch/mips/include/pmap.h	Sat Feb 27 07:58:52 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.h,v 1.54.26.10 2010/02/25 05:53:23 matt Exp $	*/
+/*	$NetBSD: pmap.h,v 1.54.26.11 2010/02/27 07:58:52 matt Exp $	*/
 
 /*
  * Copyright (c) 1992, 1993
@@ -210,7 +210,7 @@
 };
 
 extern struct pmap_kernel kernel_pmap_store;
-extern struct pmap_tlb_info pmap_tlb_info;
+extern struct pmap_tlb_info pmap_tlb0_info;
 extern paddr_t mips_avail_start;
 extern paddr_t mips_avail_end;
 extern vaddr_t mips_virtual_end;

Index: src/sys/arch/mips/mips/mipsX_subr.S
diff -u src/sys/arch/mips/mips/mipsX_subr.S:1.26.36.1.2.26 src/sys/arch/mips/mips/mipsX_subr.S:1.26.36.1.2.27
--- src/sys/arch/mips/mips/mipsX_subr.S:1.26.36.1.2.26	Thu Feb 25 05:45:12 2010
+++ src/sys/arch/mips/mips/mipsX_subr.S	Sat Feb 27 07:58:52 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: mipsX_subr.S,v 1.26.36.1.2.26 2010/02/25 05:45:12 matt Exp $	*/
+/*	$NetBSD: mipsX_subr.S,v 1.26.36.1.2.27 2010/02/27 07:58:52 matt Exp $	*/
 
 /*
  * Copyright 2002 Wasabi Systems, Inc.
@@ -497,6 +497,7 @@
 #ifndef _LP64
 	nop					#0f
 #endif
+	.p2align 4
 MIPSX(kernelfault):
 	j	_C_LABEL(MIPSX(kernel_tlb_miss)) #10: kernel exception
 	 nop					#11: branch delay slot
@@ -2572,6 +2573,103 @@
 	COP0_SYNC				# XXXX - not executed!!
 END(MIPSX(tlb_invalidate_all))
 
+/*
+ * mipsX_tlb_enter(size_t tlb_index, vaddr_t va, uint32_t pte);
+ */
+LEAF(MIPSX(tlb_enter))
+	.set	noat
+	mfc0	ta0, MIPS_COP_0_TLB_HI		# save EntryHi
+
+	and	a3, a1, MIPS3_PG_ODDPG		# select odd page bit
+	xor	a3, a1				# clear it.
+	mtc0	a3, MIPS_COP_0_TLB_HI		# set the VA for tlbp
+	COP0_SYNC
+	nop
+	nop
+
+	and	t2, a2, MIPS3_PG_G		# make prototype tlb_lo0
+	and	t3, a2, MIPS3_PG_G		# make prototype tlb_lo1
+
+	tlbp					# is va in TLB?
+	nop
+	nop
+	nop
+
+	mfc0	v0, MIPS_COP_0_TLB_INDEX	# was it in the TLB?
+	bltz	v0, 1f				# nope
+	 nop
+
+	/*
+	 * Get the existing tlb_lo's because we need to replace one of them.
+	 */
+	mfc0	t2, MIPS_COP_0_TLB_LO0		# save for update
+	mfc0	t3, MIPS_COP_0_TLB_LO1		# save for update
+
+	/*
+	 * If it's already where we want, no reason to invalidate it.
+	 */
+	beq	v0, a0, 2f			# already where we want it?
+	 nop					
+
+	/*
+	 * Make an
+	 */
+	sll	t1, 1 + PGSHIFT			# make a fake addr for the entry
+	lui	v1, %hi(MIPS_KSEG0_START)
+	or	t1, v1
+	mtc0	t1, MIPS_COP_0_TLB_HI
+	COP0_SYNC
+	nop
+	nop
+
+	and	t0, a2, MIPS3_PG_G		# make prototype tlb_lo
+	mtc0	t0, MIPS_COP_0_TLB_LO0		# use an invalid tlb_lo
+	COP0_SYNC
+	mtc0	t0, MIPS_COP_0_TLB_LO1		# use an invalid tlb_lo
+	COP0_SYNC
+
+	tlbwi					# now write the invalid TLB
+	COP0_SYNC
+
+	mtc0	a3, MIPS_COP_0_TLB_HI		# retore the addr for new TLB
+	COP0_SYNC
+	nop
+	nop
+1:
+	mtc0	a0, MIPS_COP_0_TLB_INDEX	# set the index
+	COP0_SYNC
+
+2:
+	and	v1, a1, MIPS3_PG_ODDPG		# odd or even page
+	sll	v1, 31 - PGSHIFT		# move to MSB
+	sra	v1, 31				# v1 a mask (0/~0 = even/odd)
+	not	v0, v1				# v0 a mask (~0/0 = even/odd)
+
+	and	ta2, t2, v1
+	and	ta3, a2, v0
+	or	t2, ta2, ta3			# t2 = (v1 & t2) | (~v1 & a2)
+	and	ta2, t3, v0
+	and	ta3, a2, v1
+	or	t3, ta2, ta3			# t3 = (~v1 & t3) | (v1 & a2)
+
+	mtc0	t2, MIPS_COP_0_TLB_LO0		# set tlb_lo0 (even)
+	COP0_SYNC
+	mtc0	t3, MIPS_COP_0_TLB_LO1		# set tlb_lo1 (odd)
+	COP0_SYNC
+
+	tlbwi					# enter it into the TLB
+	nop
+	nop
+	nop
+
+	mtc0	ta1, MIPS_COP_0_TLB_HI		# restore EntryHi
+	COP0_SYNC
+
+	j	ra
+	 nop
+	.set	at
+END(MIPSX(tlb_enter))
+
 #ifdef USE_64BIT_INSTRUCTIONS
 LEAF(MIPSX(pagezero))
 	li	a1, PAGE_SIZE >> 6
@@ -2595,6 +2693,19 @@
 
 	.rdata
 
+	.globl _C_LABEL(MIPSX(locore_vec))
+_C_LABEL(MIPSX(locore_vec)):
+	PTR_WORD _C_LABEL(MIPSX(tlb_set_asid))
+	PTR_WORD _C_LABEL(MIPSX(tlb_invalidate_asids))
+	PTR_WORD _C_LABEL(MIPSX(tlb_invalidate_addr))
+	PTR_WORD _C_LABEL(MIPSX(tlb_invalidate_globals))
+	PTR_WORD _C_LABEL(MIPSX(tlb_invalidate_all))
+	PTR_WORD _C_LABEL(MIPSX(tlb_record_asids))
+	PTR_WORD _C_LABEL(MIPSX(tlb_update))
+	PTR_WORD _C_LABEL(MIPSX(tlb_enter))
+	PTR_WORD _C_LABEL(MIPSX(tlb_read_indexed))
+	PTR_WORD _C_LABEL(MIPSX(wbflush))
+
 	.globl _C_LABEL(MIPSX(locoresw))
 _C_LABEL(MIPSX(locoresw)):
 	PTR_WORD _C_LABEL(MIPSX(cpu_switch_resume))

Index: src/sys/arch/mips/mips/mips_machdep.c
diff -u src/sys/arch/mips/mips/mips_machdep.c:1.205.4.1.2.1.2.36 src/sys/arch/mips/mips/mips_machdep.c:1.205.4.1.2.1.2.37
--- src/sys/arch/mips/mips/mips_machdep.c:1.205.4.1.2.1.2.36	Thu Feb 25 05:45:12 2010
+++ src/sys/arch/mips/mips/mips_machdep.c	Sat Feb 27 07:58:52 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: mips_machdep.c,v 1.205.4.1.2.1.2.36 2010/02/25 05:45:12 matt Exp $	*/
+/*	$NetBSD: mips_machdep.c,v 1.205.4.1.2.1.2.37 2010/02/27 07:58:52 matt Exp $	*/
 
 /*
  * Copyright 2002 Wasabi Systems, Inc.
@@ -112,7 +112,7 @@
 
 #include <sys/cdefs.h>			/* RCS ID & Copyright macro defns */
 
-__KERNEL_RCSID(0, "$NetBSD: mips_machdep.c,v 1.205.4.1.2.1.2.36 2010/02/25 05:45:12 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: mips_machdep.c,v 1.205.4.1.2.1.2.37 2010/02/27 07:58:52 matt Exp $");
 
 #include "opt_cputype.h"
 #include "opt_compat_netbsd32.h"
@@ -138,6 +138,7 @@
 #include <sys/cpu.h>
 #include <sys/atomic.h>
 #include <sys/ucontext.h>
+#include <sys/bitops.h>
 
 #include <mips/kcore.h>
 
@@ -221,12 +222,18 @@
 	.mips_fpu_id = 0xffffffff,
 };
 
-struct cpu_info cpu_info_store = {
+struct cpu_info cpu_info_store
+#ifdef MULTIPROCESSOR
+	__section(".data1")
+	__aligned(1LU << ilog2((2*sizeof(struct cpu_info)-1)))
+#endif
+    = {
 	.ci_curlwp = &lwp0,
 	.ci_fpcurlwp = &lwp0,
-	.ci_tlb_info = &pmap_tlb_info,
+	.ci_tlb_info = &pmap_tlb0_info,
 	.ci_pmap_segbase = (void *)(MIPS_KSEG2_START + 0x1eadbeef),
 	.ci_cpl = IPL_HIGH,
+	.ci_tlb_slot = -1,
 };
 
 struct	user *proc0paddr;
@@ -524,21 +531,11 @@
 /*
  * MIPS-I locore function vector
  */
-static const mips_locore_jumpvec_t mips1_locore_vec = {
-	.ljv_tlb_set_asid		= mips1_tlb_set_asid,
-	.ljv_tlb_invalidate_addr	= mips1_tlb_invalidate_addr,
-	.ljv_tlb_invalidate_all		= mips1_tlb_invalidate_all,
-	.ljv_tlb_invalidate_asids	= mips1_tlb_invalidate_asids,
-	.ljv_tlb_invalidate_globals	= mips1_tlb_invalidate_globals,
-	.ljv_tlb_record_asids		= mips1_tlb_record_asids,
-	.ljv_tlb_update			= mips1_tlb_update,
-	.ljv_tlb_read_indexed		= mips1_tlb_read_indexed,
-	.ljv_wbflush			= mips1_wbflush,
-};
 
 static void
 mips1_vector_init(void)
 {
+	extern const mips_locore_jumpvec_t mips1_locore_vec;
 	extern char mips1_utlb_miss[], mips1_utlb_miss_end[];
 	extern char mips1_exception[], mips1_exception_end[];
 
@@ -569,25 +566,10 @@
 #endif /* MIPS1 */
 
 #if defined(MIPS3)
-#ifndef MIPS3_5900	/* XXX */
-/*
- * MIPS III locore function vector
- */
-static const mips_locore_jumpvec_t mips3_locore_vec = {
-	.ljv_tlb_set_asid		= mips3_tlb_set_asid,
-	.ljv_tlb_invalidate_addr	= mips3_tlb_invalidate_addr,
-	.ljv_tlb_invalidate_all		= mips3_tlb_invalidate_all,
-	.ljv_tlb_invalidate_asids	= mips3_tlb_invalidate_asids,
-	.ljv_tlb_invalidate_globals	= mips3_tlb_invalidate_globals,
-	.ljv_tlb_record_asids		= mips3_tlb_record_asids,
-	.ljv_tlb_update			= mips3_tlb_update,
-	.ljv_tlb_read_indexed		= mips3_tlb_read_indexed,
-	.ljv_wbflush			= mips3_wbflush,
-};
-
 static void
 mips3_vector_init(void)
 {
+	extern const mips_locore_jumpvec_t mips3_locore_vec;
 	/* r4000 exception handler address and end */
 	extern char mips3_exception[], mips3_exception_end[];
 
@@ -629,28 +611,13 @@
 	/* Clear BEV in SR so we start handling our own exceptions */
 	mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
 }
-#endif /* !MIPS3_5900 */
+#endif /* MIPS3 */
 
 #if defined(MIPS3_5900)	/* XXX */
-/*
- * MIPS R5900 locore function vector.
- * Same as MIPS32 - all MMU registers are 32bit.
- */
-static const mips_locore_jumpvec_t r5900_locore_vec = {
-	.ljv_tlb_set_asid		= mips5900_tlb_set_asid,
-	.ljv_tlb_invalidate_addr	= mips5900_tlb_invalidate_addr,
-	.ljv_tlb_invalidate_all		= mips5900_tlb_invalidate_all,
-	.ljv_tlb_invalidate_asids	= mips5900_tlb_invalidate_asids,
-	.ljv_tlb_invalidate_globals	= mips5900_tlb_invalidate_globals,
-	.ljv_tlb_record_asids		= mips5900_tlb_record_asids,
-	.ljv_tlb_update			= mips5900_tlb_update,
-	.ljv_tlb_read_indexed		= mips5900_tlb_read_indexed,
-	.ljv_wbflush			= mips5900_wbflush,
-};
-
 static void
 r5900_vector_init(void)
 {
+	extern const mips_locore_jumpvec_t r5900_locore_vec;
 	extern char mips5900_exception[], mips5900_exception_end[];
 	extern char mips5900_tlb_miss[], mips5900_tlb_miss_end[];
 	size_t esz = mips5900_exception_end - mips5900_exception;
@@ -681,27 +648,14 @@
 	mips_cp0_status_write(mips_cp0_status_read() & ~MIPS_SR_BEV);
 }
 #endif /* MIPS3_5900 */
-#endif /* MIPS3 */
 
 #if defined(MIPS32)
-/*
- * MIPS32 locore function vector
- */
-static const mips_locore_jumpvec_t mips32_locore_vec = {
-	.ljv_tlb_set_asid		= mips32_tlb_set_asid,
-	.ljv_tlb_invalidate_addr	= mips32_tlb_invalidate_addr,
-	.ljv_tlb_invalidate_all		= mips32_tlb_invalidate_all,
-	.ljv_tlb_invalidate_asids	= mips32_tlb_invalidate_asids,
-	.ljv_tlb_invalidate_globals	= mips32_tlb_invalidate_globals,
-	.ljv_tlb_record_asids		= mips32_tlb_record_asids,
-	.ljv_tlb_update			= mips32_tlb_update,
-	.ljv_tlb_read_indexed		= mips32_tlb_read_indexed,
-	.ljv_wbflush			= mips32_wbflush,
-};
-
 static void
 mips32_vector_init(void)
 {
+	/* MIPS32 locore function vector */
+	extern const mips_locore_jumpvec_t mips32_locore_vec;
+
 	/* r4000 exception handler address */
 	extern char mips32_exception[];
 
@@ -748,24 +702,12 @@
 #endif /* MIPS32 */
 
 #if defined(MIPS64)
-/*
- * MIPS64 locore function vector
- */
-const mips_locore_jumpvec_t mips64_locore_vec = {
-	.ljv_tlb_set_asid		= mips64_tlb_set_asid,
-	.ljv_tlb_invalidate_addr	= mips64_tlb_invalidate_addr,
-	.ljv_tlb_invalidate_all		= mips64_tlb_invalidate_all,
-	.ljv_tlb_invalidate_asids	= mips64_tlb_invalidate_asids,
-	.ljv_tlb_invalidate_globals	= mips64_tlb_invalidate_globals,
-	.ljv_tlb_record_asids		= mips64_tlb_record_asids,
-	.ljv_tlb_update			= mips64_tlb_update,
-	.ljv_tlb_read_indexed		= mips64_tlb_read_indexed,
-	.ljv_wbflush			= mips64_wbflush,
-};
-
 static void
 mips64_vector_init(void)
 {
+	/* MIPS64 locore function vector */
+	extern const mips_locore_jumpvec_t mips64_locore_vec;
+
 	/* r4000 exception handler address */
 	extern char mips64_exception[];
 
@@ -980,7 +922,7 @@
 		mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_4K);
 		mips3_cp0_wired_write(0);
 		mips5900_tlb_invalidate_all();
-		mips3_cp0_wired_write(pmap_tlb_info.ti_wired);
+		mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
 		r5900_vector_init();
 		mips_locoresw = mips5900_locoresw;
 #else /* MIPS3_5900 */
@@ -992,7 +934,7 @@
 		mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_4K);
 		mips3_cp0_wired_write(0);
 		mips3_tlb_invalidate_all();
-		mips3_cp0_wired_write(pmap_tlb_info.ti_wired);
+		mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
 		mips3_vector_init();
 		mips_locoresw = mips3_locoresw;
 #endif /* MIPS3_5900 */
@@ -1004,7 +946,7 @@
 		mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_4K);
 		mips3_cp0_wired_write(0);
 		mips32_tlb_invalidate_all();
-		mips3_cp0_wired_write(pmap_tlb_info.ti_wired);
+		mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
 		mips32_vector_init();
 		mips_locoresw = mips32_locoresw;
 		break;
@@ -1015,7 +957,7 @@
 		mips3_cp0_pg_mask_write(MIPS3_PG_SIZE_4K);
 		mips3_cp0_wired_write(0);
 		mips64_tlb_invalidate_all();
-		mips3_cp0_wired_write(pmap_tlb_info.ti_wired);
+		mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
 		mips64_vector_init();
 		mips_locoresw = mips64_locoresw;
 		break;
@@ -2483,8 +2425,8 @@
 void
 cpu_boot_secondary_processors(void)
 {
-	if (pmap_tlb_info.ti_wired != MIPS3_TLB_WIRED_UPAGES)
-		mips3_cp0_wired_write(pmap_tlb_info.ti_wired);
+	if (pmap_tlb0_info.ti_wired != MIPS3_TLB_WIRED_UPAGES)
+		mips3_cp0_wired_write(pmap_tlb0_info.ti_wired);
 
 	(*mips_locoresw.lsw_boot_secondary_processors)();
 }

Index: src/sys/arch/mips/mips/pmap.c
diff -u src/sys/arch/mips/mips/pmap.c:1.179.16.18 src/sys/arch/mips/mips/pmap.c:1.179.16.19
--- src/sys/arch/mips/mips/pmap.c:1.179.16.18	Thu Feb 25 05:53:23 2010
+++ src/sys/arch/mips/mips/pmap.c	Sat Feb 27 07:58:52 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.179.16.18 2010/02/25 05:53:23 matt Exp $	*/
+/*	$NetBSD: pmap.c,v 1.179.16.19 2010/02/27 07:58:52 matt Exp $	*/
 
 /*-
  * Copyright (c) 1998, 2001 The NetBSD Foundation, Inc.
@@ -67,7 +67,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.179.16.18 2010/02/25 05:53:23 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.179.16.19 2010/02/27 07:58:52 matt Exp $");
 
 /*
  *	Manages physical address maps.
@@ -464,7 +464,7 @@
 {
 	vsize_t bufsz;
 
-	pmap_tlb_info_init(&pmap_tlb_info);	/* init the lock */
+	pmap_tlb_info_init(&pmap_tlb0_info);		/* init the lock */
 
 	/*
 	 * Compute the number of pages kmem_map will have.

Index: src/sys/arch/mips/mips/pmap_tlb.c
diff -u src/sys/arch/mips/mips/pmap_tlb.c:1.1.2.4 src/sys/arch/mips/mips/pmap_tlb.c:1.1.2.5
--- src/sys/arch/mips/mips/pmap_tlb.c:1.1.2.4	Thu Feb 25 05:53:23 2010
+++ src/sys/arch/mips/mips/pmap_tlb.c	Sat Feb 27 07:58:52 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap_tlb.c,v 1.1.2.4 2010/02/25 05:53:23 matt Exp $	*/
+/*	$NetBSD: pmap_tlb.c,v 1.1.2.5 2010/02/27 07:58:52 matt Exp $	*/
 
 /*-
  * Copyright (c) 2010 The NetBSD Foundation, Inc.
@@ -31,7 +31,7 @@
 
 #include <sys/cdefs.h>
 
-__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.1.2.4 2010/02/25 05:53:23 matt Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap_tlb.c,v 1.1.2.5 2010/02/27 07:58:52 matt Exp $");
 
 /*
  * Manages address spaces in a TLB.
@@ -142,14 +142,8 @@
 #include <mips/pte.h>
 
 static kmutex_t pmap_tlb0_mutex __aligned(32);
-#ifdef MULTIPROCESSOR
-static struct pmap_tlb_info *pmap_tlbs[MAXCPUS] = {
-	[0] = &pmap_tlb_info,
-};
-static u_int pmap_ntlbs = 1;
-#endif
 
-struct pmap_tlb_info pmap_tlb_info = {
+struct pmap_tlb_info pmap_tlb0_info = {
 	.ti_asid_hint = 1,
 	.ti_asid_mask = MIPS_TLB_NUM_PIDS - 1,
 	.ti_asid_max = MIPS_TLB_NUM_PIDS - 1,
@@ -164,6 +158,12 @@
 #endif
 };
 
+#ifdef MULTIPROCESSOR
+static struct pmap_tlb_info *pmap_tlbs[MAXCPUS] = {
+	[0] = &pmap_tlb0_info,
+};
+static u_int pmap_ntlbs = 1;
+#endif
 #define	__BITMAP_SET(bm, n) \
 	((bm)[(n) / (8*sizeof(bm[0]))] |= 1LU << ((n) % (8*sizeof(bm[0]))))
 #define	__BITMAP_CLR(bm, n) \
@@ -215,9 +215,24 @@
 pmap_tlb_info_init(struct pmap_tlb_info *ti)
 {
 #ifdef MULTIPROCESSOR
-	if (ti == &pmap_tlb_info) {
+	if (ti == &pmap_tlb0_info) {
+#endif /* MULTIPROCESSOR */
+		KASSERT(ti == &pmap_tlb0_info);
 		mutex_init(ti->ti_lock, MUTEX_DEFAULT, IPL_SCHED);
+		if (!CPUISMIPSNN) {
+			ti->ti_asid_max = mips_options.mips_num_tlb_entries - 1;
+			ti->ti_asids_free = ti->ti_asid_max;
+			ti->ti_asid_mask = ti->ti_asid_max;
+			/*
+			 * Now figure out what mask we need to focus on
+			 * asid_max.
+			 */
+			while ((ti->ti_asid_mask + 1) & ti->ti_asid_mask) {
+				ti->ti_asid_mask |= ti->ti_asid_mask >> 1;
+			}
+		}
 		return;
+#ifdef MULTIPROCESSOR
 	}
 
 	KASSERT(pmap_tlbs[pmap_ntlbs] == NULL);
@@ -225,29 +240,19 @@
 	ti->ti_lock = mutex_obj_alloc(MUTEX_DEFAULT, IPL_SCHED);
 	ti->ti_asid_bitmap[0] = 1;
 	ti->ti_asid_hint = 1;
-	ti->ti_asid_max = pmap_tlbs[0]->ti_asid_max;
-	ti->ti_asid_mask = pmap_tlbs[0]->ti_asid_mask;
+	ti->ti_asid_max = pmap_tlb0_info.ti_asid_max;
+	ti->ti_asid_mask = pmap_tlb0_info.ti_asid_mask;
 	ti->ti_asids_free = ti->ti_asid_max;
 	ti->ti_tlbinvop = TLBINV_NOBODY,
 	ti->ti_victim = NULL;
 	ti->ti_cpu_mask = 0;
 	ti->ti_index = pmap_ntlbs++;
-	ti->ti_wired = 0;
+	/*
+	 * If we are reserving a tlb slot for mapping cpu_info,
+	 * allocate it now.
+	 */
+	ti->ti_wired = (cpu_info_store.ci_tlb_slot >= 0);
 	pmap_tlbs[ti->ti_index] = ti;
-#else
-	KASSERT(ti == &pmap_tlb_info);
-	mutex_init(ti->ti_lock, MUTEX_DEFAULT, IPL_SCHED);
-	if (!CPUISMIPSNN) {
-		ti->ti_asid_max = mips_options.mips_num_tlb_entries - 1;
-		ti->ti_asids_free = ti->ti_asid_max;
-		ti->ti_asid_mask = ti->ti_asid_max;
-		/*
-		 * Now figure out what mask we need to focus on asid_max.
-		 */
-		while ((ti->ti_asid_mask + 1) & ti->ti_asid_mask) {
-			ti->ti_asid_mask |= ti->ti_asid_mask >> 1;
-		}
-	}
 #endif /* MULTIPROCESSOR */
 }
 
@@ -265,7 +270,13 @@
 	ci->ci_tlb_info = ti;
 	ci->ci_ksp_tlb_slot = ti->ti_wired++;
 	/*
-	 * Mark the kernel as active and "onproc" for this cpu.
+	 * If we need a tlb slot for mapping cpu_info, use 0.  If we don't
+	 * need one then ci_tlb_slot will be -1, and so will ci->ci_tlb_slot
+	 */
+	ci->ci_tlb_slot = -(cpu_info_store.ci_tlb_slot < 0);
+	/*
+	 * Mark the kernel as active and "onproc" for this cpu.  We assume
+	 * we are the only CPU running so atomic ops are not needed.
 	 */
 	pmap_kernel()->pm_active |= cpu_mask;
 	pmap_kernel()->pm_onproc |= cpu_mask;

Added files:

Index: src/sys/arch/mips/mips/mips_fixup.c
diff -u /dev/null src/sys/arch/mips/mips/mips_fixup.c:1.1.2.1
--- /dev/null	Sat Feb 27 07:58:53 2010
+++ src/sys/arch/mips/mips/mips_fixup.c	Sat Feb 27 07:58:52 2010
@@ -0,0 +1,173 @@
+/*-
+ * Copyright (c) 2010 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Matt Thomas of 3am Software Foundry.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ *    notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ *    notice, this list of conditions and the following disclaimer in the
+ *    documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <sys/cdefs.h>
+
+__KERNEL_RCSID(0, "$NetBSD: mips_fixup.c,v 1.1.2.1 2010/02/27 07:58:52 matt Exp $");
+
+#include <sys/param.h>
+
+#include <uvm/uvm_extern.h>
+
+#include <mips/locore.h>
+#include <mips/cache.h>
+#include <mips/mips3_pte.h>
+
+#define	INSN_LUI_P(insn)	(((insn) >> 26) == 017)
+#define	INSN_LW_P(insn)		(((insn) >> 26) == 043)
+#define	INSN_LD_P(insn)		(((insn) >> 26) == 067)
+
+#define INSN_LOAD_P(insn)	(INSN_LD_P(insn) || INSN_LW_P(insn))
+
+bool
+mips_fixup_exceptions(mips_fixup_callback_t callback)
+{
+	uint32_t * const start = (uint32_t *)MIPS_KSEG0_START;
+	uint32_t * const end = start + (5 * 128) / sizeof(uint32_t);
+	const int32_t addr = (intptr_t)&cpu_info_store;
+	const size_t size = sizeof(cpu_info_store);
+	uint32_t new_insns[2];
+	uint32_t *lui_insnp = NULL;
+	bool fixed = false;
+	size_t lui_reg = 0;
+	/*
+	 * If this was allocated so that bit 15 of the value/address is 1, then
+	 * %hi will add 1 to the immediate (or 0x10000 to the value loaded)
+	 * to compensate for using a negative offset for the lower half of
+	 * the value.
+	 */
+	const int32_t upper_addr = (addr & ~0xffff) + ((addr << 1) & 0x10000);
+
+	KASSERT((addr & ~0xfff) == ((addr + size - 1) & ~0xfff));
+
+	for (uint32_t *insnp = start; insnp < end; insnp++) {
+		const uint32_t insn = *insnp;
+		if (INSN_LUI_P(insn)) {
+			const int32_t offset = insn << 16;
+			lui_reg = (insn >> 16) & 31;
+#ifdef DEBUG
+			printf("%s: %#x: insn %08x: lui r%zu, %%hi(%#x)", 
+			    __func__, (int32_t)(intptr_t)insnp,
+			    insn, lui_reg, offset);
+#endif
+			if (upper_addr == offset) {
+				lui_insnp = insnp;
+#ifdef DEBUG
+				printf(" (maybe)");
+#endif
+			} else {
+				lui_insnp = NULL;
+			}
+#ifdef DEBUG
+			printf("\n");
+#endif
+		} else if (lui_insnp != NULL && INSN_LOAD_P(insn)) {
+			size_t base = (insn >> 21) & 31;
+			size_t rt = (insn >> 16) & 31;
+			int32_t load_addr = upper_addr + (int16_t)insn;
+			if (addr <= load_addr
+			    && load_addr < addr + size
+			    && base == lui_reg
+			    && rt == lui_reg) {
+#ifdef DEBUG
+				printf("%s: %#x: insn %08x: %s r%zu, %%lo(%08x)(r%zu)\n", 
+				    __func__, (int32_t)(intptr_t)insnp,
+				    insn, INSN_LW_P(insn) ? "lw" : "ld",
+				    rt, load_addr, base);
+#endif
+				new_insns[0] = *lui_insnp;
+				new_insns[1] = *insnp;
+				if ((callback)(load_addr, new_insns)) {
+					*lui_insnp = new_insns[0];
+					*insnp = new_insns[1];
+					fixed = true;
+				}
+				lui_insnp = NULL;
+			} else if (rt == lui_reg) {
+				lui_insnp = NULL;
+			}
+		}
+	}
+
+	if (fixed)
+		mips_icache_sync_range((vaddr_t)start, end - start);
+		
+	return fixed;
+}
+
+bool
+mips_fixup_zero_relative(int32_t load_addr, uint32_t new_insns[2])
+{
+	struct cpu_info * const ci = curcpu();
+	struct pmap_tlb_info * const ti = ci->ci_tlb_info;
+
+	KASSERT(MIPS_KSEG0_P(load_addr));
+	KASSERT(!MIPS_CACHE_VIRTUAL_ALIAS);
+#ifdef MULTIPROCESSOR
+	KASSERT(CPU_IS_PRIMARY(ci));
+#endif
+	KASSERT((intptr_t)ci <= load_addr);
+	KASSERT(load_addr < (intptr_t)(ci + 1));
+
+	/*
+	 * Use the load instrution as a prototype and it make use $0
+	 * as base and the new negative offset.  The second instruction
+	 * is a NOP.
+	 */
+	new_insns[0] =
+	    (new_insns[1] & (0xfc1f0000|PAGE_MASK)) | (0xffff & ~PAGE_MASK);
+	new_insns[1] = 0;
+#ifdef DEBUG
+	printf("%s: %08x: insn#1 %08x: %s r%u, %d(r%u)\n", 
+	    __func__, (int32_t)load_addr, new_insns[0],
+	    INSN_LW_P(new_insns[0]) ? "lw" : "ld",
+	    (new_insns[0] >> 16) & 31,
+	    (int16_t)new_insns[0],
+	    (new_insns[0] >> 21) & 31);
+#endif
+	/*
+	 * Contruct the TLB_LO entry needed to map cpu_info_store.
+	 */
+	const uint32_t tlb_lo = MIPS3_PG_G|MIPS3_PG_V|MIPS3_PG_D
+	    | mips3_paddr_to_tlbpfn(MIPS_KSEG0_TO_PHYS(trunc_page(load_addr)));
+
+	/*
+	 * Now allocate a TLB entry in the primary TLB for the mapping and
+	 * enter the mapping into the TLB.
+	 */
+	TLBINFO_LOCK(ti);
+	if (ci->ci_tlb_slot < 0) {
+		ci->ci_tlb_slot = ti->ti_wired++;
+		mips3_cp0_wired_write(ti->ti_wired);
+		tlb_enter(ci->ci_tlb_slot, -PAGE_SIZE, tlb_lo);
+	}
+	TLBINFO_UNLOCK(ti);
+
+	return true;
+}

Reply via email to