Module Name:    src
Committed By:   mrg
Date:           Wed Mar 10 06:57:23 UTC 2010

Modified Files:
        src/sys/arch/sparc64/sparc64: db_interface.c locore.s pmap.c

Log Message:
XXX: workaround we'd like to remove when pmap / uvm locking is cleaned up:

- rename pseg_get() and pseg_set() to pseg_get_real() and pseg_set_real().
- if USE_LOCKSAFE_PSEG_GETSET is defined, which it current is by default,
  define pseg_[gs]et() in terms of functions that take a new pseg_lock
  mutex at IPL_VM while calling into the real functions.

this seems to avoid the pseg_set() crashes we've seen:
  1 - spare needed, when pseg_get() just worked for this pmap
  2 - the 2rd ldxa via ASI_PHYS_CACHED in pseg_set() loads garbage
      into %o4, and causes the 3rd ldxa to fault


To generate a diff of this commit:
cvs rdiff -u -r1.123 -r1.124 src/sys/arch/sparc64/sparc64/db_interface.c
cvs rdiff -u -r1.326 -r1.327 src/sys/arch/sparc64/sparc64/locore.s
cvs rdiff -u -r1.258 -r1.259 src/sys/arch/sparc64/sparc64/pmap.c

Please note that diffs are not public domain; they are subject to the
copyright notices on the relevant files.

Modified files:

Index: src/sys/arch/sparc64/sparc64/db_interface.c
diff -u src/sys/arch/sparc64/sparc64/db_interface.c:1.123 src/sys/arch/sparc64/sparc64/db_interface.c:1.124
--- src/sys/arch/sparc64/sparc64/db_interface.c:1.123	Sat Mar  6 08:08:29 2010
+++ src/sys/arch/sparc64/sparc64/db_interface.c	Wed Mar 10 06:57:22 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: db_interface.c,v 1.123 2010/03/06 08:08:29 mrg Exp $ */
+/*	$NetBSD: db_interface.c,v 1.124 2010/03/10 06:57:22 mrg Exp $ */
 
 /*
  * Copyright (c) 1996-2002 Eduardo Horvath.  All rights reserved.
@@ -34,7 +34,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.123 2010/03/06 08:08:29 mrg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: db_interface.c,v 1.124 2010/03/10 06:57:22 mrg Exp $");
 
 #include "opt_ddb.h"
 #include "opt_multiprocessor.h"
@@ -650,7 +650,8 @@
 	}
 }
 
-int64_t pseg_get(struct pmap *, vaddr_t);
+/* XXX no locking; shouldn't matter */
+int64_t pseg_get_real(struct pmap *, vaddr_t);
 
 void
 db_dump_pmap(struct pmap *pm)
@@ -706,7 +707,7 @@
 	if (have_addr) {
 		/* lookup an entry for this VA */
 		
-		if ((data = pseg_get(pmap_kernel(), (vaddr_t)addr))) {
+		if ((data = pseg_get_real(pmap_kernel(), (vaddr_t)addr))) {
 			db_printf("pmap_kernel(%p)->pm_segs[%lx][%lx][%lx]=>%qx\n",
 				  (void *)(uintptr_t)addr, (u_long)va_to_seg(addr),
 				  (u_long)va_to_dir(addr), (u_long)va_to_pte(addr),

Index: src/sys/arch/sparc64/sparc64/locore.s
diff -u src/sys/arch/sparc64/sparc64/locore.s:1.326 src/sys/arch/sparc64/sparc64/locore.s:1.327
--- src/sys/arch/sparc64/sparc64/locore.s:1.326	Mon Mar  8 08:59:06 2010
+++ src/sys/arch/sparc64/sparc64/locore.s	Wed Mar 10 06:57:22 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: locore.s,v 1.326 2010/03/08 08:59:06 mrg Exp $	*/
+/*	$NetBSD: locore.s,v 1.327 2010/03/10 06:57:22 mrg Exp $	*/
 
 /*
  * Copyright (c) 2006-2010 Matthew R. Green
@@ -6568,14 +6568,15 @@
 	retl
 	 mov	%o4, %g1		! Restore g1
 #endif
+
 /*
- * extern int64_t pseg_get(struct pmap *pm, vaddr_t addr);
+ * extern int64_t pseg_get_real(struct pmap *pm, vaddr_t addr);
  *
  * Return TTE at addr in pmap.  Uses physical addressing only.
  * pmap->pm_physaddr must by the physical address of pm_segs
  *
  */
-ENTRY(pseg_get)
+ENTRY(pseg_get_real)
 !	flushw			! Make sure we don't have stack probs & lose hibits of %o
 	ldx	[%o0 + PM_PHYS], %o2			! pmap->pm_segs
 
@@ -6637,13 +6638,13 @@
 /*
  * In 32-bit mode:
  *
- * extern int pseg_set(struct pmap* %o0, vaddr_t addr %o1, int64_t tte %o2:%o3,
- *			 paddr_t spare %o4:%o5);
+ * extern int pseg_set_real(struct pmap* %o0, vaddr_t addr %o1,
+ *			    int64_t tte %o2:%o3, paddr_t spare %o4:%o5);
  *
  * In 64-bit mode:
  *
- * extern int pseg_set(struct pmap* %o0, vaddr_t addr %o1, int64_t tte %o2,
- *			paddr_t spare %o3);
+ * extern int pseg_set_real(struct pmap* %o0, vaddr_t addr %o1,
+ *			    int64_t tte %o2, paddr_t spare %o3);
  *
  * Set a pseg entry to a particular TTE value.  Return values are:
  *
@@ -6673,7 +6674,7 @@
  * The counters are 32 bit or 64 bit wide, depending on the kernel type we are
  * running!
  */
-ENTRY(pseg_set)
+ENTRY(pseg_set_real)
 #ifndef _LP64
 	sllx	%o4, 32, %o4				! Put args into 64-bit format
 	sllx	%o2, 32, %o2				! Shift to high 32-bits

Index: src/sys/arch/sparc64/sparc64/pmap.c
diff -u src/sys/arch/sparc64/sparc64/pmap.c:1.258 src/sys/arch/sparc64/sparc64/pmap.c:1.259
--- src/sys/arch/sparc64/sparc64/pmap.c:1.258	Mon Mar  8 08:59:06 2010
+++ src/sys/arch/sparc64/sparc64/pmap.c	Wed Mar 10 06:57:22 2010
@@ -1,4 +1,4 @@
-/*	$NetBSD: pmap.c,v 1.258 2010/03/08 08:59:06 mrg Exp $	*/
+/*	$NetBSD: pmap.c,v 1.259 2010/03/10 06:57:22 mrg Exp $	*/
 /*
  *
  * Copyright (C) 1996-1999 Eduardo Horvath.
@@ -26,7 +26,7 @@
  */
 
 #include <sys/cdefs.h>
-__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.258 2010/03/08 08:59:06 mrg Exp $");
+__KERNEL_RCSID(0, "$NetBSD: pmap.c,v 1.259 2010/03/10 06:57:22 mrg Exp $");
 
 #undef	NO_VCACHE /* Don't forget the locked TLB in dostart */
 #define	HWREF
@@ -79,8 +79,8 @@
 paddr_t cpu0paddr;		/* contigious phys memory preallocated for cpus */
 
 /* These routines are in assembly to allow access thru physical mappings */
-extern int64_t pseg_get(struct pmap *, vaddr_t);
-extern int pseg_set(struct pmap *, vaddr_t, int64_t, paddr_t);
+extern int64_t pseg_get_real(struct pmap *, vaddr_t);
+extern int pseg_set_real(struct pmap *, vaddr_t, int64_t, paddr_t);
 
 /*
  * Diatribe on ref/mod counting:
@@ -350,6 +350,53 @@
 };
 
 /*
+ * This probably shouldn't be necessary, but it stops USIII machines from
+ * breaking in general, and not just for MULTIPROCESSOR.
+ */
+#define USE_LOCKSAFE_PSEG_GETSET
+#if defined(USE_LOCKSAFE_PSEG_GETSET)
+
+static kmutex_t pseg_lock;
+
+static __inline__ int64_t
+pseg_get_locksafe(struct pmap *pm, vaddr_t va)
+{
+	int64_t rv;
+	bool took_lock = lock_available /*&& pm == pmap_kernel()*/;
+
+	if (__predict_true(took_lock))
+		mutex_enter(&pseg_lock);
+	rv = pseg_get_real(pm, va);
+	if (__predict_true(took_lock))
+		mutex_exit(&pseg_lock);
+	return rv;
+}
+
+static __inline__ int
+pseg_set_locksafe(struct pmap *pm, vaddr_t va, int64_t data, paddr_t ptp)
+{
+	int rv;
+	bool took_lock = lock_available /*&& pm == pmap_kernel()*/;
+
+	if (__predict_true(took_lock))
+		mutex_enter(&pseg_lock);
+	rv = pseg_set_real(pm, va, data, ptp);
+	if (__predict_true(took_lock))
+		mutex_exit(&pseg_lock);
+	return rv;
+}
+
+#define pseg_get(pm, va)		pseg_get_locksafe(pm, va)
+#define pseg_set(pm, va, data, ptp)	pseg_set_locksafe(pm, va, data, ptp)
+
+#else /* USE_LOCKSAFE_PSEG_GETSET */
+
+#define pseg_get(pm, va)		pseg_get_real(pm, va)
+#define pseg_set(pm, va, data, ptp)	pseg_set_real(pm, va, data, ptp)
+
+#endif /* USE_LOCKSAFE_PSEG_GETSET */
+
+/*
  * Enter a TTE into the kernel pmap only.  Don't do anything else.
  *
  * Use only during bootstrapping since it does no locking and
@@ -1247,6 +1294,9 @@
 	vm_num_phys = avail_end - avail_start;
 
 	mutex_init(&pmap_lock, MUTEX_DEFAULT, IPL_NONE);
+#if defined(USE_LOCKSAFE_PSEG_GETSET)
+	mutex_init(&pseg_lock, MUTEX_SPIN, IPL_VM);
+#endif
 	lock_available = true;
 }
 

Reply via email to