I've committed a patch to upgrade libgo to the current version of the
master Go library.  As usual, this mail message only includes the
changes to files that are specific to gccgo.  Bootstrapped and ran Go
testsuite on x86_64-unknown-linux-gnu.  Committed to mainline.

Ian

diff -r 1faae2fad711 libgo/MERGE
--- a/libgo/MERGE	Fri Dec 21 14:20:47 2012 -0800
+++ b/libgo/MERGE	Fri Dec 21 16:57:00 2012 -0800
@@ -1,4 +1,4 @@
-c031aa767edf
+6fdc1974457c
 
 The first line of this file holds the Mercurial revision number of the
 last merge done from the master library sources.
diff -r 1faae2fad711 libgo/Makefile.am
--- a/libgo/Makefile.am	Fri Dec 21 14:20:47 2012 -0800
+++ b/libgo/Makefile.am	Fri Dec 21 16:57:00 2012 -0800
@@ -502,6 +502,7 @@
 	runtime/go-unwind.c \
 	runtime/chan.c \
 	runtime/cpuprof.c \
+	runtime/env_posix.c \
 	runtime/lfstack.c \
 	$(runtime_lock_files) \
 	runtime/mcache.c \
@@ -1657,6 +1658,13 @@
 syscall_lsf_file =
 endif
 
+# GNU/Linux specific utimesnano support.
+if LIBGO_IS_LINUX
+syscall_utimesnano_file = go/syscall/libcall_linux_utimesnano.go
+else
+syscall_utimesnano_file = go/syscall/libcall_posix_utimesnano.go
+endif
+
 go_base_syscall_files = \
 	go/syscall/env_unix.go \
 	go/syscall/syscall_errno.go \
@@ -1679,6 +1687,7 @@
 	$(syscall_uname_file) \
 	$(syscall_netlink_file) \
 	$(syscall_lsf_file) \
+	$(syscall_utimesnano_file) \
 	$(GO_LIBCALL_OS_FILE) \
 	$(GO_LIBCALL_OS_ARCH_FILE) \
 	$(GO_SYSCALL_OS_FILE) \
diff -r 1faae2fad711 libgo/go/syscall/env_plan9.go
--- a/libgo/go/syscall/env_plan9.go	Fri Dec 21 14:20:47 2012 -0800
+++ b/libgo/go/syscall/env_plan9.go	Fri Dec 21 16:57:00 2012 -0800
@@ -12,14 +12,17 @@
 )
 
 var (
-	// envOnce guards initialization by copyenv, which populates env.
+	// envOnce guards copyenv, which populates env.
 	envOnce sync.Once
 
 	// envLock guards env.
 	envLock sync.RWMutex
 
 	// env maps from an environment variable to its value.
-	env map[string]string
+	env = make(map[string]string)
+
+	errZeroLengthKey = errors.New("zero length key")
+	errShortWrite    = errors.New("i/o count too small")
 )
 
 func readenv(key string) (string, error) {
@@ -47,12 +50,18 @@
 		return err
 	}
 	defer Close(fd)
-	_, err = Write(fd, []byte(value))
-	return err
+	b := []byte(value)
+	n, err := Write(fd, b)
+	if err != nil {
+		return err
+	}
+	if n != len(b) {
+		return errShortWrite
+	}
+	return nil
 }
 
 func copyenv() {
-	env = make(map[string]string)
 	fd, err := Open("/env", O_RDONLY)
 	if err != nil {
 		return
@@ -72,7 +81,6 @@
 }
 
 func Getenv(key string) (value string, found bool) {
-	envOnce.Do(copyenv)
 	if len(key) == 0 {
 		return "", false
 	}
@@ -80,17 +88,20 @@
 	envLock.RLock()
 	defer envLock.RUnlock()
 
-	v, ok := env[key]
-	if !ok {
+	if v, ok := env[key]; ok {
+		return v, true
+	}
+	v, err := readenv(key)
+	if err != nil {
 		return "", false
 	}
+	env[key] = v
 	return v, true
 }
 
 func Setenv(key, value string) error {
-	envOnce.Do(copyenv)
 	if len(key) == 0 {
-		return errors.New("zero length key")
+		return errZeroLengthKey
 	}
 
 	envLock.Lock()
@@ -105,8 +116,6 @@
 }
 
 func Clearenv() {
-	envOnce.Do(copyenv) // prevent copyenv in Getenv/Setenv
-
 	envLock.Lock()
 	defer envLock.Unlock()
 
@@ -115,9 +124,10 @@
 }
 
 func Environ() []string {
-	envOnce.Do(copyenv)
 	envLock.RLock()
 	defer envLock.RUnlock()
+
+	envOnce.Do(copyenv)
 	a := make([]string, len(env))
 	i := 0
 	for k, v := range env {
diff -r 1faae2fad711 libgo/go/syscall/libcall_linux_utimesnano.go
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libgo/go/syscall/libcall_linux_utimesnano.go	Fri Dec 21 16:57:00 2012 -0800
@@ -0,0 +1,29 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// GNU/Linux version of UtimesNano.
+
+package syscall
+
+import "unsafe"
+
+//sys	utimensat(dirfd int, path string, times *[2]Timespec, flags int) (err error)
+//utimensat(dirfd int, path *byte, times *[2]Timespec, flags int) int
+func UtimesNano(path string, ts[]Timespec) (err error) {
+	if len(ts) != 2 {
+		return EINVAL
+	}
+	err = utimensat(_AT_FDCWD, path, (*[2]Timespec)(unsafe.Pointer(&ts[0])), 0)
+	if err != ENOSYS {
+		return err
+	}
+	// If the utimensat syscall isn't available (utimensat was added to Linux
+	// in 2.6.22, Released, 8 July 2007) then fall back to utimes
+	var tv [2]Timeval
+	for i := 0; i < 2; i++ {
+		tv[i].Sec = Timeval_sec_t(ts[i].Sec)
+		tv[i].Usec = Timeval_usec_t(ts[i].Nsec / 1000)
+	}
+	return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
diff -r 1faae2fad711 libgo/go/syscall/libcall_posix_utimesnano.go
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libgo/go/syscall/libcall_posix_utimesnano.go	Fri Dec 21 16:57:00 2012 -0800
@@ -0,0 +1,24 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// General POSIX version of UtimesNano.
+
+package syscall
+
+import "unsafe"
+
+func UtimesNano(path string, ts []Timespec) error {
+	// TODO: The BSDs can do utimensat with SYS_UTIMENSAT but it
+	// isn't supported by darwin so this uses utimes instead
+	if len(ts) != 2 {
+		return EINVAL
+	}
+	// Not as efficient as it could be because Timespec and
+	// Timeval have different types in the different OSes
+	tv := [2]Timeval{
+		NsecToTimeval(TimespecToNsec(ts[0])),
+		NsecToTimeval(TimespecToNsec(ts[1])),
+	}
+	return utimes(path, (*[2]Timeval)(unsafe.Pointer(&tv[0])))
+}
diff -r 1faae2fad711 libgo/go/syscall/syscall.go
--- a/libgo/go/syscall/syscall.go	Fri Dec 21 14:20:47 2012 -0800
+++ b/libgo/go/syscall/syscall.go	Fri Dec 21 16:57:00 2012 -0800
@@ -3,10 +3,15 @@
 // license that can be found in the LICENSE file.
 
 // Package syscall contains an interface to the low-level operating system
-// primitives.  The details vary depending on the underlying system.
-// Its primary use is inside other packages that provide a more portable
-// interface to the system, such as "os", "time" and "net".  Use those
-// packages rather than this one if you can.
+// primitives.  The details vary depending on the underlying system, and
+// by default, godoc will display the syscall documentation for the current
+// system.  If you want godoc to display syscall documentation for another
+// system, set $GOOS and $GOARCH to the desired system.  For example, if
+// you want to view documentation for freebsd/arm on linux/amd64, set $GOOS
+// to freebsd and $GOARCH to arm.
+// The primary use of syscall is inside other packages that provide a more
+// portable interface to the system, such as "os", "time" and "net".  Use
+// those packages rather than this one if you can.
 // For details of the functions and data types in this package consult
 // the manuals for the appropriate operating system.
 // These calls return err == nil to indicate success; otherwise
diff -r 1faae2fad711 libgo/merge.sh
--- a/libgo/merge.sh	Fri Dec 21 14:20:47 2012 -0800
+++ b/libgo/merge.sh	Fri Dec 21 16:57:00 2012 -0800
@@ -163,7 +163,7 @@
   done
 done
 
-runtime="chan.c cpuprof.c lock_futex.c lock_sema.c mcache.c mcentral.c mfinal.c mfixalloc.c mgc0.c mheap.c msize.c panic.c print.c proc.c race.h runtime.c runtime.h signal_unix.c malloc.h malloc.goc mprof.goc parfor.c runtime1.goc sema.goc sigqueue.goc string.goc time.goc"
+runtime="chan.c cpuprof.c env_posix.c lock_futex.c lock_sema.c mcache.c mcentral.c mfinal.c mfixalloc.c mgc0.c mgc0.h mheap.c msize.c panic.c print.c proc.c race.h runtime.c runtime.h signal_unix.c malloc.h malloc.goc mprof.goc parfor.c runtime1.goc sema.goc sigqueue.goc string.goc time.goc"
 for f in $runtime; do
   merge_c $f $f
 done
diff -r 1faae2fad711 libgo/runtime/env_posix.c
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libgo/runtime/env_posix.c	Fri Dec 21 16:57:00 2012 -0800
@@ -0,0 +1,37 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// +build darwin freebsd linux netbsd openbsd windows
+
+#include "runtime.h"
+#include "array.h"
+
+extern Slice syscall_Envs asm ("syscall.Envs");
+
+const byte*
+runtime_getenv(const char *s)
+{
+	int32 i, j, len;
+	const byte *v, *bs;
+	String* envv;
+	int32 envc;
+
+	bs = (const byte*)s;
+	len = runtime_findnull(bs);
+	envv = (String*)syscall_Envs.__values;
+	envc = syscall_Envs.__count;
+	for(i=0; i<envc; i++){
+		if(envv[i].len <= len)
+			continue;
+		v = (const byte*)envv[i].str;
+		for(j=0; j<len; j++)
+			if(bs[j] != v[j])
+				goto nomatch;
+		if(v[len] != '=')
+			goto nomatch;
+		return v+len+1;
+	nomatch:;
+	}
+	return nil;
+}
diff -r 1faae2fad711 libgo/runtime/go-trampoline.c
--- a/libgo/runtime/go-trampoline.c	Fri Dec 21 14:20:47 2012 -0800
+++ b/libgo/runtime/go-trampoline.c	Fri Dec 21 16:57:00 2012 -0800
@@ -106,8 +106,8 @@
    no other references to it.  */
 
 void
-runtime_trampoline_scan (void (*addroot) (byte *, uintptr))
+runtime_trampoline_scan (void (*addroot) (Obj))
 {
   if (trampoline_page != NULL)
-    addroot ((byte *) &trampoline_page, sizeof trampoline_page);
+    addroot ((Obj){(byte *) &trampoline_page, sizeof trampoline_page, 0});
 }
diff -r 1faae2fad711 libgo/runtime/malloc.goc
--- a/libgo/runtime/malloc.goc	Fri Dec 21 14:20:47 2012 -0800
+++ b/libgo/runtime/malloc.goc	Fri Dec 21 16:57:00 2012 -0800
@@ -491,7 +491,7 @@
 static Lock settype_lock;
 
 void
-runtime_settype_flush(M *m, bool sysalloc)
+runtime_settype_flush(M *mp, bool sysalloc)
 {
 	uintptr *buf, *endbuf;
 	uintptr size, ofs, j, t;
@@ -503,8 +503,8 @@
 	uintptr typ, p;
 	MSpan *s;
 
-	buf = m->settype_buf;
-	endbuf = buf + m->settype_bufsize;
+	buf = mp->settype_buf;
+	endbuf = buf + mp->settype_bufsize;
 
 	runtime_lock(&settype_lock);
 	while(buf < endbuf) {
@@ -602,7 +602,7 @@
 	}
 	runtime_unlock(&settype_lock);
 
-	m->settype_bufsize = 0;
+	mp->settype_bufsize = 0;
 }
 
 // It is forbidden to use this function if it is possible that
@@ -610,7 +610,7 @@
 void
 runtime_settype(void *v, uintptr t)
 {
-	M *m1;
+	M *mp;
 	uintptr *buf;
 	uintptr i;
 	MSpan *s;
@@ -618,16 +618,16 @@
 	if(t == 0)
 		runtime_throw("settype: zero type");
 
-	m1 = runtime_m();
-	buf = m1->settype_buf;
-	i = m1->settype_bufsize;
+	mp = runtime_m();
+	buf = mp->settype_buf;
+	i = mp->settype_bufsize;
 	buf[i+0] = (uintptr)v;
 	buf[i+1] = t;
 	i += 2;
-	m1->settype_bufsize = i;
+	mp->settype_bufsize = i;
 
-	if(i == nelem(m1->settype_buf)) {
-		runtime_settype_flush(m1, false);
+	if(i == nelem(mp->settype_buf)) {
+		runtime_settype_flush(mp, false);
 	}
 
 	if(DebugTypeAtBlockEnd) {
diff -r 1faae2fad711 libgo/runtime/malloc.h
--- a/libgo/runtime/malloc.h	Fri Dec 21 14:20:47 2012 -0800
+++ b/libgo/runtime/malloc.h	Fri Dec 21 16:57:00 2012 -0800
@@ -468,17 +468,25 @@
 	FlagNoGC = 1<<2,	// must not free or scan for pointers
 };
 
+typedef struct Obj Obj;
+struct Obj
+{
+	byte	*p;	// data pointer
+	uintptr	n;	// size of data in bytes
+	uintptr	ti;	// type info
+};
+
 void	runtime_MProf_Malloc(void*, uintptr);
 void	runtime_MProf_Free(void*, uintptr);
 void	runtime_MProf_GC(void);
-void	runtime_MProf_Mark(void (*addroot)(byte *, uintptr));
+void	runtime_MProf_Mark(void (*addroot)(Obj));
 int32	runtime_gcprocs(void);
 void	runtime_helpgc(int32 nproc);
 void	runtime_gchelper(void);
 
 struct __go_func_type;
 bool	runtime_getfinalizer(void *p, bool del, void (**fn)(void*), const struct __go_func_type **ft);
-void	runtime_walkfintab(void (*fn)(void*), void (*scan)(byte *, uintptr));
+void	runtime_walkfintab(void (*fn)(void*), void (*scan)(Obj));
 
 enum
 {
@@ -494,3 +502,6 @@
 void	runtime_gc_m_ptr(Eface*);
 
 void	runtime_memorydump(void);
+
+void	runtime_time_scan(void (*)(Obj));
+void	runtime_trampoline_scan(void (*)(Obj));
diff -r 1faae2fad711 libgo/runtime/mfinal.c
--- a/libgo/runtime/mfinal.c	Fri Dec 21 14:20:47 2012 -0800
+++ b/libgo/runtime/mfinal.c	Fri Dec 21 16:57:00 2012 -0800
@@ -193,7 +193,7 @@
 }
 
 void
-runtime_walkfintab(void (*fn)(void*), void (*addroot)(byte *, uintptr))
+runtime_walkfintab(void (*fn)(void*), void (*addroot)(Obj))
 {
 	void **key;
 	void **ekey;
@@ -206,8 +206,8 @@
 		for(; key < ekey; key++)
 			if(*key != nil && *key != ((void*)-1))
 				fn(*key);
-		addroot((byte*)&fintab[i].fkey, sizeof(void*));
-		addroot((byte*)&fintab[i].val, sizeof(void*));
+		addroot((Obj){(byte*)&fintab[i].fkey, sizeof(void*), 0});
+		addroot((Obj){(byte*)&fintab[i].val, sizeof(void*), 0});
 		runtime_unlock(&fintab[i]);
 	}
 }
diff -r 1faae2fad711 libgo/runtime/mgc0.c
--- a/libgo/runtime/mgc0.c	Fri Dec 21 14:20:47 2012 -0800
+++ b/libgo/runtime/mgc0.c	Fri Dec 21 16:57:00 2012 -0800
@@ -9,6 +9,7 @@
 #include "runtime.h"
 #include "arch.h"
 #include "malloc.h"
+#include "mgc0.h"
 #include "race.h"
 
 #ifdef USING_SPLIT_STACK
@@ -24,11 +25,13 @@
 enum {
 	Debug = 0,
 	DebugMark = 0,  // run second pass to check mark
-	DataBlock = 8*1024,
 
 	// Four bits per word (see #defines below).
 	wordsPerBitmapWord = sizeof(void*)*8/4,
 	bitShift = sizeof(void*)*8/4,
+
+	handoffThreshold = 4,
+	IntermediateBufferCapacity = 64,
 };
 
 // Bits in per-word bitmap.
@@ -81,12 +84,16 @@
 
 static int32 gctrace;
 
+// The size of Workbuf is N*PageSize.
 typedef struct Workbuf Workbuf;
 struct Workbuf
 {
-	LFNode node; // must be first
+#define SIZE (2*PageSize-sizeof(LFNode)-sizeof(uintptr))
+	LFNode  node; // must be first
 	uintptr nobj;
-	byte *obj[512-(sizeof(LFNode)+sizeof(uintptr))/sizeof(byte*)];
+	Obj     obj[SIZE/sizeof(Obj) - 1];
+	uint8   _padding[SIZE%sizeof(Obj) + sizeof(Obj)];
+#undef SIZE
 };
 
 typedef struct Finalizer Finalizer;
@@ -120,13 +127,6 @@
 static void	putempty(Workbuf*);
 static Workbuf* handoff(Workbuf*);
 
-typedef struct GcRoot GcRoot;
-struct GcRoot
-{
-	byte *p;
-	uintptr n;
-};
-
 static struct {
 	uint64	full;  // lock-free list of full blocks
 	uint64	empty; // lock-free list of empty blocks
@@ -143,77 +143,122 @@
 	byte	*chunk;
 	uintptr	nchunk;
 
-	GcRoot	*roots;
+	Obj	*roots;
 	uint32	nroot;
 	uint32	rootcap;
 } work;
 
-// scanblock scans a block of n bytes starting at pointer b for references
-// to other objects, scanning any it finds recursively until there are no
-// unscanned objects left.  Instead of using an explicit recursion, it keeps
-// a work list in the Workbuf* structures and loops in the main function
-// body.  Keeping an explicit work list is easier on the stack allocator and
-// more efficient.
+enum {
+	// TODO(atom): to be expanded in a next CL
+	GC_DEFAULT_PTR = GC_NUM_INSTR,
+};
+
+// PtrTarget and BitTarget are structures used by intermediate buffers.
+// The intermediate buffers hold GC data before it
+// is moved/flushed to the work buffer (Workbuf).
+// The size of an intermediate buffer is very small,
+// such as 32 or 64 elements.
+struct PtrTarget
+{
+	void *p;
+	uintptr ti;
+};
+
+struct BitTarget
+{
+	void *p;
+	uintptr ti;
+	uintptr *bitp, shift;
+};
+
+struct BufferList
+{
+	struct PtrTarget ptrtarget[IntermediateBufferCapacity];
+	struct BitTarget bittarget[IntermediateBufferCapacity];
+	struct BufferList *next;
+};
+static struct BufferList *bufferList;
+
+static Lock lock;
+
+// flushptrbuf moves data from the PtrTarget buffer to the work buffer.
+// The PtrTarget buffer contains blocks irrespective of whether the blocks have been marked or scanned,
+// while the work buffer contains blocks which have been marked
+// and are prepared to be scanned by the garbage collector.
+//
+// _wp, _wbuf, _nobj are input/output parameters and are specifying the work buffer.
+// bitbuf holds temporary data generated by this function.
+//
+// A simplified drawing explaining how the todo-list moves from a structure to another:
+//
+//     scanblock
+//  (find pointers)
+//    Obj ------> PtrTarget (pointer targets)
+//     ↑          |
+//     |          | flushptrbuf (1st part,
+//     |          | find block start)
+//     |          ↓
+//     `--------- BitTarget (pointer targets and the corresponding locations in bitmap)
+//  flushptrbuf
+//  (2nd part, mark and enqueue)
 static void
-scanblock(byte *b, uintptr n)
+flushptrbuf(struct PtrTarget *ptrbuf, uintptr n, Obj **_wp, Workbuf **_wbuf, uintptr *_nobj, struct BitTarget *bitbuf)
 {
-	byte *obj, *arena_start, *arena_used, *p;
-	void **vp;
-	uintptr size, *bitp, bits, shift, i, j, x, xbits, off, nobj, nproc;
+	byte *p, *arena_start, *obj;
+	uintptr size, *bitp, bits, shift, j, x, xbits, off, nobj, ti;
 	MSpan *s;
 	PageID k;
-	void **wp;
+	Obj *wp;
 	Workbuf *wbuf;
-	bool keepworking;
+	struct PtrTarget *ptrbuf_end;
+	struct BitTarget *bitbufpos, *bt;
 
-	if((intptr)n < 0) {
-		runtime_printf("scanblock %p %D\n", b, (int64)n);
-		runtime_throw("scanblock");
+	arena_start = runtime_mheap.arena_start;
+
+	wp = *_wp;
+	wbuf = *_wbuf;
+	nobj = *_nobj;
+
+	ptrbuf_end = ptrbuf + n;
+
+	// If buffer is nearly full, get a new one.
+	if(wbuf == nil || nobj+n >= nelem(wbuf->obj)) {
+		if(wbuf != nil)
+			wbuf->nobj = nobj;
+		wbuf = getempty(wbuf);
+		wp = wbuf->obj;
+		nobj = 0;
+
+		if(n >= nelem(wbuf->obj))
+			runtime_throw("ptrbuf has to be smaller than WorkBuf");
 	}
 
-	// Memory arena parameters.
-	arena_start = runtime_mheap.arena_start;
-	arena_used = runtime_mheap.arena_used;
-	nproc = work.nproc;
+	// TODO(atom): This block is a branch of an if-then-else statement.
+	//             The single-threaded branch may be added in a next CL.
+	{
+		// Multi-threaded version.
 
-	wbuf = nil;  // current work buffer
-	wp = nil;  // storage for next queued pointer (write pointer)
-	nobj = 0;  // number of queued objects
+		bitbufpos = bitbuf;
 
-	// Scanblock helpers pass b==nil.
-	// Procs needs to return to make more
-	// calls to scanblock.  But if work.nproc==1 then
-	// might as well process blocks as soon as we
-	// have them.
-	keepworking = b == nil || work.nproc == 1;
+		while(ptrbuf < ptrbuf_end) {
+			obj = ptrbuf->p;
+			ti = ptrbuf->ti;
+			ptrbuf++;
 
-	// Align b to a word boundary.
-	off = (uintptr)b & (PtrSize-1);
-	if(off != 0) {
-		b += PtrSize - off;
-		n -= PtrSize - off;
-	}
-
-	for(;;) {
-		// Each iteration scans the block b of length n, queueing pointers in
-		// the work buffer.
-		if(Debug > 1)
-			runtime_printf("scanblock %p %D\n", b, (int64)n);
-
-		vp = (void**)b;
-		n >>= (2+PtrSize/8);  /* n /= PtrSize (4 or 8) */
-		for(i=0; i<(uintptr)n; i++) {
-			obj = (byte*)vp[i];
-
-			// Words outside the arena cannot be pointers.
-			if((byte*)obj < arena_start || (byte*)obj >= arena_used)
-				continue;
+			// obj belongs to interval [mheap.arena_start, mheap.arena_used).
+			if(Debug > 1) {
+				if(obj < runtime_mheap.arena_start || obj >= runtime_mheap.arena_used)
+					runtime_throw("object is outside of mheap");
+			}
 
 			// obj may be a pointer to a live object.
 			// Try to find the beginning of the object.
 
 			// Round down to word boundary.
-			obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1));
+			if(((uintptr)obj & ((uintptr)PtrSize-1)) != 0) {
+				obj = (void*)((uintptr)obj & ~((uintptr)PtrSize-1));
+				ti = 0;
+			}
 
 			// Find bits for this word.
 			off = (uintptr*)obj - (uintptr*)arena_start;
@@ -226,6 +271,8 @@
 			if((bits & (bitAllocated|bitBlockBoundary)) != 0)
 				goto found;
 
+			ti = 0;
+
 			// Pointing just past the beginning?
 			// Scan backward a little to find a block boundary.
 			for(j=shift; j-->0; ) {
@@ -246,13 +293,13 @@
 			s = runtime_mheap.map[x];
 			if(s == nil || k < s->start || k - s->start >= s->npages || s->state != MSpanInUse)
 				continue;
-			p =  (byte*)((uintptr)s->start<<PageShift);
+			p = (byte*)((uintptr)s->start<<PageShift);
 			if(s->sizeclass == 0) {
 				obj = p;
 			} else {
 				if((byte*)obj >= (byte*)s->limit)
 					continue;
-				size = runtime_class_to_size[s->sizeclass];
+				size = s->elemsize;
 				int32 i = ((byte*)obj - p)/size;
 				obj = p+i*size;
 			}
@@ -265,81 +312,203 @@
 			bits = xbits >> shift;
 
 		found:
-			// If another proc wants a pointer, give it some.
-			if(work.nwait > 0 && nobj > 4 && work.full == 0) {
-				wbuf->nobj = nobj;
-				wbuf = handoff(wbuf);
-				nobj = wbuf->nobj;
-				wp = (void**)(wbuf->obj + nobj);
-			}
-
 			// Now we have bits, bitp, and shift correct for
 			// obj pointing at the base of the object.
 			// Only care about allocated and not marked.
 			if((bits & (bitAllocated|bitMarked)) != bitAllocated)
 				continue;
-			if(nproc == 1)
-				*bitp |= bitMarked<<shift;
-			else {
-				for(;;) {
-					x = *bitp;
-					if(x & (bitMarked<<shift))
-						goto continue_obj;
-					if(runtime_casp((void**)bitp, (void*)x, (void*)(x|(bitMarked<<shift))))
-						break;
-				}
-			}
+
+			*bitbufpos = (struct BitTarget){obj, ti, bitp, shift};
+			bitbufpos++;
+		}
+
+		runtime_lock(&lock);
+		for(bt=bitbuf; bt<bitbufpos; bt++){
+			xbits = *bt->bitp;
+			bits = xbits >> bt->shift;
+			if((bits & bitMarked) != 0)
+				continue;
+
+			// Mark the block
+			*bt->bitp = xbits | (bitMarked << bt->shift);
 
 			// If object has no pointers, don't need to scan further.
 			if((bits & bitNoPointers) != 0)
 				continue;
 
+			obj = bt->p;
+
+			// Ask span about size class.
+			// (Manually inlined copy of MHeap_Lookup.)
+			x = (uintptr)obj >> PageShift;
+			if(sizeof(void*) == 8)
+				x -= (uintptr)arena_start>>PageShift;
+			s = runtime_mheap.map[x];
+
 			PREFETCH(obj);
 
-			// If buffer is full, get a new one.
-			if(wbuf == nil || nobj >= nelem(wbuf->obj)) {
-				if(wbuf != nil)
-					wbuf->nobj = nobj;
-				wbuf = getempty(wbuf);
-				wp = (void**)(wbuf->obj);
-				nobj = 0;
-			}
-			*wp++ = obj;
+			*wp = (Obj){obj, s->elemsize, bt->ti};
+			wp++;
 			nobj++;
-		continue_obj:;
+		}
+		runtime_unlock(&lock);
+
+		// If another proc wants a pointer, give it some.
+		if(work.nwait > 0 && nobj > handoffThreshold && work.full == 0) {
+			wbuf->nobj = nobj;
+			wbuf = handoff(wbuf);
+			nobj = wbuf->nobj;
+			wp = wbuf->obj + nobj;
+		}
+	}
+
+	*_wp = wp;
+	*_wbuf = wbuf;
+	*_nobj = nobj;
+}
+
+// Program that scans the whole block and treats every block element as a potential pointer
+static uintptr defaultProg[2] = {PtrSize, GC_DEFAULT_PTR};
+
+// scanblock scans a block of n bytes starting at pointer b for references
+// to other objects, scanning any it finds recursively until there are no
+// unscanned objects left.  Instead of using an explicit recursion, it keeps
+// a work list in the Workbuf* structures and loops in the main function
+// body.  Keeping an explicit work list is easier on the stack allocator and
+// more efficient.
+//
+// wbuf: current work buffer
+// wp:   storage for next queued pointer (write pointer)
+// nobj: number of queued objects
+static void
+scanblock(Workbuf *wbuf, Obj *wp, uintptr nobj, bool keepworking)
+{
+	byte *b, *arena_start, *arena_used;
+	uintptr n, i, end_b;
+	void *obj;
+
+	// TODO(atom): to be expanded in a next CL
+	struct Frame {uintptr count, b; uintptr *loop_or_ret;};
+	struct Frame stack_top;
+
+	uintptr *pc;
+
+	struct BufferList *scanbuffers;
+	struct PtrTarget *ptrbuf, *ptrbuf_end;
+	struct BitTarget *bitbuf;
+
+	struct PtrTarget *ptrbufpos;
+
+	// End of local variable declarations.
+
+	if(sizeof(Workbuf) % PageSize != 0)
+		runtime_throw("scanblock: size of Workbuf is suboptimal");
+
+	// Memory arena parameters.
+	arena_start = runtime_mheap.arena_start;
+	arena_used = runtime_mheap.arena_used;
+
+	// Allocate ptrbuf, bitbuf
+	{
+		runtime_lock(&lock);
+
+		if(bufferList == nil) {
+			bufferList = runtime_SysAlloc(sizeof(*bufferList));
+			bufferList->next = nil;
+		}
+		scanbuffers = bufferList;
+		bufferList = bufferList->next;
+
+		ptrbuf = &scanbuffers->ptrtarget[0];
+		ptrbuf_end = &scanbuffers->ptrtarget[0] + nelem(scanbuffers->ptrtarget);
+		bitbuf = &scanbuffers->bittarget[0];
+
+		runtime_unlock(&lock);
+	}
+
+	ptrbufpos = ptrbuf;
+
+	goto next_block;
+
+	for(;;) {
+		// Each iteration scans the block b of length n, queueing pointers in
+		// the work buffer.
+		if(Debug > 1) {
+			runtime_printf("scanblock %p %D\n", b, (int64)n);
 		}
 
+		// TODO(atom): to be replaced in a next CL
+		pc = defaultProg;
+
+		pc++;
+		stack_top.b = (uintptr)b;
+
+		end_b = (uintptr)b + n - PtrSize;
+
+	next_instr:
+		// TODO(atom): to be expanded in a next CL
+		switch(pc[0]) {
+		case GC_DEFAULT_PTR:
+			while(true) {
+				i = stack_top.b;
+				if(i > end_b)
+					goto next_block;
+				stack_top.b += PtrSize;
+
+				obj = *(byte**)i;
+				if((byte*)obj >= arena_start && (byte*)obj < arena_used) {
+					*ptrbufpos = (struct PtrTarget){obj, 0};
+					ptrbufpos++;
+					if(ptrbufpos == ptrbuf_end)
+						goto flush_buffers;
+				}
+			}
+
+		default:
+			runtime_throw("scanblock: invalid GC instruction");
+			return;
+		}
+
+	flush_buffers:
+		flushptrbuf(ptrbuf, ptrbufpos-ptrbuf, &wp, &wbuf, &nobj, bitbuf);
+		ptrbufpos = ptrbuf;
+		goto next_instr;
+
+	next_block:
 		// Done scanning [b, b+n).  Prepare for the next iteration of
-		// the loop by setting b and n to the parameters for the next block.
+		// the loop by setting b, n to the parameters for the next block.
+
+		if(nobj == 0) {
+			flushptrbuf(ptrbuf, ptrbufpos-ptrbuf, &wp, &wbuf, &nobj, bitbuf);
+			ptrbufpos = ptrbuf;
+
+			if(nobj == 0) {
+				if(!keepworking) {
+					if(wbuf)
+						putempty(wbuf);
+					goto endscan;
+				}
+				// Emptied our buffer: refill.
+				wbuf = getfull(wbuf);
+				if(wbuf == nil)
+					goto endscan;
+				nobj = wbuf->nobj;
+				wp = wbuf->obj + wbuf->nobj;
+			}
+		}
 
 		// Fetch b from the work buffer.
-		if(nobj == 0) {
-			if(!keepworking) {
-				if(wbuf)
-					putempty(wbuf);
-				return;
-			}
-			// Emptied our buffer: refill.
-			wbuf = getfull(wbuf);
-			if(wbuf == nil)
-				return;
-			nobj = wbuf->nobj;
-			wp = (void**)(wbuf->obj + wbuf->nobj);
-		}
-		b = *--wp;
+		--wp;
+		b = wp->p;
+		n = wp->n;
 		nobj--;
+	}
 
-		// Ask span about size class.
-		// (Manually inlined copy of MHeap_Lookup.)
-		x = (uintptr)b>>PageShift;
-		if(sizeof(void*) == 8)
-			x -= (uintptr)arena_start>>PageShift;
-		s = runtime_mheap.map[x];
-		if(s->sizeclass == 0)
-			n = s->npages<<PageShift;
-		else
-			n = runtime_class_to_size[s->sizeclass];
-	}
+endscan:
+	runtime_lock(&lock);
+	scanbuffers->next = bufferList;
+	bufferList = scanbuffers;
+	runtime_unlock(&lock);
 }
 
 // debug_scanblock is the debug copy of scanblock.
@@ -386,13 +555,12 @@
 			continue;
 
 		p =  (byte*)((uintptr)s->start<<PageShift);
+		size = s->elemsize;
 		if(s->sizeclass == 0) {
 			obj = p;
-			size = (uintptr)s->npages<<PageShift;
 		} else {
 			if((byte*)obj >= (byte*)s->limit)
 				continue;
-			size = runtime_class_to_size[s->sizeclass];
 			int32 i = ((byte*)obj - p)/size;
 			obj = p+i*size;
 		}
@@ -421,11 +589,74 @@
 	}
 }
 
+// Append obj to the work buffer.
+// _wbuf, _wp, _nobj are input/output parameters and are specifying the work buffer.
+static void
+enqueue(Obj obj, Workbuf **_wbuf, Obj **_wp, uintptr *_nobj)
+{
+	uintptr nobj, off;
+	Obj *wp;
+	Workbuf *wbuf;
+
+	if(Debug > 1)
+		runtime_printf("append obj(%p %D %p)\n", obj.p, (int64)obj.n, obj.ti);
+
+	// Align obj.b to a word boundary.
+	off = (uintptr)obj.p & (PtrSize-1);
+	if(off != 0) {
+		obj.p += PtrSize - off;
+		obj.n -= PtrSize - off;
+		obj.ti = 0;
+	}
+
+	if(obj.p == nil || obj.n == 0)
+		return;
+
+	// Load work buffer state
+	wp = *_wp;
+	wbuf = *_wbuf;
+	nobj = *_nobj;
+
+	// If another proc wants a pointer, give it some.
+	if(work.nwait > 0 && nobj > handoffThreshold && work.full == 0) {
+		wbuf->nobj = nobj;
+		wbuf = handoff(wbuf);
+		nobj = wbuf->nobj;
+		wp = wbuf->obj + nobj;
+	}
+
+	// If buffer is full, get a new one.
+	if(wbuf == nil || nobj >= nelem(wbuf->obj)) {
+		if(wbuf != nil)
+			wbuf->nobj = nobj;
+		wbuf = getempty(wbuf);
+		wp = wbuf->obj;
+		nobj = 0;
+	}
+
+	*wp = obj;
+	wp++;
+	nobj++;
+
+	// Save work buffer state
+	*_wp = wp;
+	*_wbuf = wbuf;
+	*_nobj = nobj;
+}
+
 static void
 markroot(ParFor *desc, uint32 i)
 {
+	Obj *wp;
+	Workbuf *wbuf;
+	uintptr nobj;
+
 	USED(&desc);
-	scanblock(work.roots[i].p, work.roots[i].n);
+	wp = nil;
+	wbuf = nil;
+	nobj = 0;
+	enqueue(work.roots[i], &wbuf, &wp, &nobj);
+	scanblock(wbuf, wp, nobj, false);
 }
 
 // Get an empty work buffer off the work.empty list,
@@ -520,25 +751,24 @@
 }
 
 static void
-addroot(byte *p, uintptr n)
+addroot(Obj obj)
 {
 	uint32 cap;
-	GcRoot *new;
+	Obj *new;
 
 	if(work.nroot >= work.rootcap) {
-		cap = PageSize/sizeof(GcRoot);
+		cap = PageSize/sizeof(Obj);
 		if(cap < 2*work.rootcap)
 			cap = 2*work.rootcap;
-		new = (GcRoot*)runtime_SysAlloc(cap*sizeof(GcRoot));
+		new = (Obj*)runtime_SysAlloc(cap*sizeof(Obj));
 		if(work.roots != nil) {
-			runtime_memmove(new, work.roots, work.rootcap*sizeof(GcRoot));
-			runtime_SysFree(work.roots, work.rootcap*sizeof(GcRoot));
+			runtime_memmove(new, work.roots, work.rootcap*sizeof(Obj));
+			runtime_SysFree(work.roots, work.rootcap*sizeof(Obj));
 		}
 		work.roots = new;
 		work.rootcap = cap;
 	}
-	work.roots[work.nroot].p = p;
-	work.roots[work.nroot].n = n;
+	work.roots[work.nroot] = obj;
 	work.nroot++;
 }
 
@@ -582,11 +812,11 @@
 		}
 	}
 	if(sp != nil) {
-		addroot(sp, spsize);
+		addroot((Obj){sp, spsize, 0});
 		while((sp = __splitstack_find(next_segment, next_sp,
 					      &spsize, &next_segment,
 					      &next_sp, &initial_sp)) != nil)
-			addroot(sp, spsize);
+			addroot((Obj){sp, spsize, 0});
 	}
 #else
 	M *mp;
@@ -608,9 +838,9 @@
 	}
 	top = (byte*)gp->gcinitial_sp + gp->gcstack_size;
 	if(top > bottom)
-		addroot(bottom, top - bottom);
+		addroot((Obj){bottom, top - bottom, 0});
 	else
-		addroot(top, bottom - top);
+		addroot((Obj){top, bottom - top, 0});
 #endif
 }
 
@@ -624,7 +854,7 @@
 		runtime_throw("mark - finalizer inconsistency");
 
 	// do not mark the finalizer block itself.  just mark the things it points at.
-	addroot(v, size);
+	addroot((Obj){v, size, 0});
 }
 
 static struct root_list* roots;
@@ -656,15 +886,15 @@
 			void *decl = pr->decl;
 			if(decl == nil)
 				break;
-			addroot(decl, pr->size);
+			addroot((Obj){decl, pr->size, 0});
 			pr++;
 		}
 	}
 
-	addroot((byte*)&runtime_m0, sizeof runtime_m0);
-	addroot((byte*)&runtime_g0, sizeof runtime_g0);
-	addroot((byte*)&runtime_allg, sizeof runtime_allg);
-	addroot((byte*)&runtime_allm, sizeof runtime_allm);
+	addroot((Obj){(byte*)&runtime_m0, sizeof runtime_m0, 0});
+	addroot((Obj){(byte*)&runtime_g0, sizeof runtime_g0, 0});
+	addroot((Obj){(byte*)&runtime_allg, sizeof runtime_allg, 0});
+	addroot((Obj){(byte*)&runtime_allm, sizeof runtime_allm, 0});
 	runtime_MProf_Mark(addroot);
 	runtime_time_scan(addroot);
 	runtime_trampoline_scan(addroot);
@@ -680,12 +910,14 @@
 				break;
 			case MTypes_Words:
 			case MTypes_Bytes:
-				addroot((byte*)&s->types.data, sizeof(void*));
+				// TODO(atom): consider using defaultProg instead of 0
+				addroot((Obj){(byte*)&s->types.data, sizeof(void*), 0});
 				break;
 			}
 		}
 	}
 
+	// stacks
 	for(gp=runtime_allg; gp!=nil; gp=gp->alllink) {
 		switch(gp->status){
 		default:
@@ -709,9 +941,9 @@
 	runtime_walkfintab(addfinroots, addroot);
 
 	for(fb=allfin; fb; fb=fb->alllink)
-		addroot((byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]));
+		addroot((Obj){(byte*)fb->fin, fb->cnt*sizeof(fb->fin[0]), 0});
 
-	addroot((byte*)&work, sizeof work);
+	addroot((Obj){(byte*)&work, sizeof work, 0});
 }
 
 static bool
@@ -955,8 +1187,9 @@
 {
 	// parallel mark for over gc roots
 	runtime_parfordo(work.markfor);
+
 	// help other threads scan secondary blocks
-	scanblock(nil, 0);
+	scanblock(nil, nil, 0, true);
 
 	if(DebugMark) {
 		// wait while the main thread executes mark(debug_scanblock)
@@ -983,16 +1216,16 @@
 static void
 stealcache(void)
 {
-	M *m;
+	M *mp;
 
-	for(m=runtime_allm; m; m=m->alllink)
-		runtime_MCache_ReleaseAll(m->mcache);
+	for(mp=runtime_allm; mp; mp=mp->alllink)
+		runtime_MCache_ReleaseAll(mp->mcache);
 }
 
 static void
 cachestats(GCStats *stats)
 {
-	M *m;
+	M *mp;
 	MCache *c;
 	uint32 i;
 	uint64 stacks_inuse;
@@ -1003,17 +1236,17 @@
 		runtime_memclr((byte*)stats, sizeof(*stats));
 	stacks_inuse = 0;
 	stacks_sys = runtime_stacks_sys;
-	for(m=runtime_allm; m; m=m->alllink) {
-		c = m->mcache;
+	for(mp=runtime_allm; mp; mp=mp->alllink) {
+		c = mp->mcache;
 		runtime_purgecachedstats(c);
-		// stacks_inuse += m->stackalloc->inuse;
-		// stacks_sys += m->stackalloc->sys;
+		// stacks_inuse += mp->stackalloc->inuse;
+		// stacks_sys += mp->stackalloc->sys;
 		if(stats) {
-			src = (uint64*)&m->gcstats;
+			src = (uint64*)&mp->gcstats;
 			dst = (uint64*)stats;
 			for(i=0; i<sizeof(*stats)/sizeof(uint64); i++)
 				dst[i] += src[i];
-			runtime_memclr((byte*)&m->gcstats, sizeof(m->gcstats));
+			runtime_memclr((byte*)&mp->gcstats, sizeof(mp->gcstats));
 		}
 		for(i=0; i<nelem(c->local_by_size); i++) {
 			mstats.by_size[i].nmalloc += c->local_by_size[i].nmalloc;
@@ -1100,7 +1333,7 @@
 	int64 t0, t1, t2, t3;
 	uint64 heap0, heap1, obj0, obj1;
 	GCStats stats;
-	M *m1;
+	M *mp;
 	uint32 i;
 
 	runtime_semacquire(&runtime_worldsema);
@@ -1116,8 +1349,8 @@
 	m->gcing = 1;
 	runtime_stoptheworld();
 
-	for(m1=runtime_allm; m1; m1=m1->alllink)
-		runtime_settype_flush(m1, false);
+	for(mp=runtime_allm; mp; mp=mp->alllink)
+		runtime_settype_flush(mp, false);
 
 	heap0 = 0;
 	obj0 = 0;
@@ -1127,26 +1360,27 @@
 		obj0 = mstats.nmalloc - mstats.nfree;
 	}
 
+	m->locks++;	// disable gc during mallocs in parforalloc
+	if(work.markfor == nil)
+		work.markfor = runtime_parforalloc(MaxGcproc);
+	if(work.sweepfor == nil)
+		work.sweepfor = runtime_parforalloc(MaxGcproc);
+	m->locks--;
+
 	work.nwait = 0;
 	work.ndone = 0;
 	work.debugmarkdone = 0;
 	work.nproc = runtime_gcprocs();
 	addroots();
-	m->locks++;	// disable gc during mallocs in parforalloc
-	if(work.markfor == nil)
-		work.markfor = runtime_parforalloc(MaxGcproc);
 	runtime_parforsetup(work.markfor, work.nproc, work.nroot, nil, false, markroot);
-	if(work.sweepfor == nil)
-		work.sweepfor = runtime_parforalloc(MaxGcproc);
 	runtime_parforsetup(work.sweepfor, work.nproc, runtime_mheap.nspan, nil, true, sweepspan);
-	m->locks--;
 	if(work.nproc > 1) {
 		runtime_noteclear(&work.alldone);
 		runtime_helpgc(work.nproc);
 	}
 
 	runtime_parfordo(work.markfor);
-	scanblock(nil, 0);
+	scanblock(nil, nil, 0, true);
 
 	if(DebugMark) {
 		for(i=0; i<work.nroot; i++)
diff -r 1faae2fad711 libgo/runtime/mgc0.h
--- /dev/null	Thu Jan 01 00:00:00 1970 +0000
+++ b/libgo/runtime/mgc0.h	Fri Dec 21 16:57:00 2012 -0800
@@ -0,0 +1,42 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector (GC)
+
+// GC instruction opcodes.
+//
+// The opcode of an instruction is followed by zero or more
+// arguments to the instruction.
+//
+// Meaning of arguments:
+//   off      Offset (in bytes) from the start of the current object
+//   objgc    Pointer to GC info of an object
+//   len      Length of an array
+//   elemsize Size (in bytes) of an element
+//   size     Size (in bytes)
+enum {
+	GC_END,         // End of object, loop or subroutine. Args: none
+	GC_PTR,         // A typed pointer. Args: (off, objgc)
+	GC_APTR,        // Pointer to an arbitrary object. Args: (off)
+	GC_ARRAY_START, // Start an array with a fixed length. Args: (off, len, elemsize)
+	GC_ARRAY_NEXT,  // The next element of an array. Args: none
+	GC_CALL,        // Call a subroutine. Args: (off, objgc)
+	GC_MAP_PTR,     // Go map. Args: (off, MapType*)
+	GC_STRING,      // Go string. Args: (off)
+	GC_EFACE,       // interface{}. Args: (off)
+	GC_IFACE,       // interface{...}. Args: (off)
+	GC_SLICE,       // Go slice. Args: (off, objgc)
+	GC_REGION,      // A region/part of the current object. Args: (off, size, objgc)
+
+	GC_NUM_INSTR,   // Number of instruction opcodes
+};
+
+enum {
+	// Size of GC's fixed stack.
+	//
+	// The current GC implementation permits:
+	//  - at most 1 stack allocation because of GC_CALL
+	//  - at most GC_STACK_CAPACITY allocations because of GC_ARRAY_START
+	GC_STACK_CAPACITY = 8,	
+};
diff -r 1faae2fad711 libgo/runtime/mprof.goc
--- a/libgo/runtime/mprof.goc	Fri Dec 21 14:20:47 2012 -0800
+++ b/libgo/runtime/mprof.goc	Fri Dec 21 16:57:00 2012 -0800
@@ -362,13 +362,13 @@
 }
 
 void
-runtime_MProf_Mark(void (*addroot)(byte *, uintptr))
+runtime_MProf_Mark(void (*addroot)(Obj))
 {
 	// buckhash is not allocated via mallocgc.
-	addroot((byte*)&mbuckets, sizeof mbuckets);
-	addroot((byte*)&bbuckets, sizeof bbuckets);
-	addroot((byte*)&addrhash, sizeof addrhash);
-	addroot((byte*)&addrfree, sizeof addrfree);
+	addroot((Obj){(byte*)&mbuckets, sizeof mbuckets, 0});
+	addroot((Obj){(byte*)&bbuckets, sizeof bbuckets, 0});
+	addroot((Obj){(byte*)&addrhash, sizeof addrhash, 0});
+	addroot((Obj){(byte*)&addrfree, sizeof addrfree, 0});
 }
 
 // Must match BlockProfileRecord in debug.go.
@@ -412,18 +412,18 @@
 
 func ThreadCreateProfile(p Slice) (n int, ok bool) {
 	TRecord *r;
-	M *first, *m;
+	M *first, *mp;
 	
 	first = runtime_atomicloadp(&runtime_allm);
 	n = 0;
-	for(m=first; m; m=m->alllink)
+	for(mp=first; mp; mp=mp->alllink)
 		n++;
 	ok = false;
 	if(n <= p.__count) {
 		ok = true;
 		r = (TRecord*)p.__values;
-		for(m=first; m; m=m->alllink) {
-			runtime_memmove(r->stk, m->createstack, sizeof r->stk);
+		for(mp=first; mp; mp=mp->alllink) {
+			runtime_memmove(r->stk, mp->createstack, sizeof r->stk);
 			r++;
 		}
 	}
@@ -471,11 +471,11 @@
 }
 
 static void
-saveg(G *g, TRecord *r)
+saveg(G *gp, TRecord *r)
 {
 	int32 n;
 
-	if(g == runtime_g())
+	if(gp == runtime_g())
 		n = runtime_callers(0, r->stk, nelem(r->stk));
 	else {
 		// FIXME: Not implemented.
diff -r 1faae2fad711 libgo/runtime/proc.c
--- a/libgo/runtime/proc.c	Fri Dec 21 14:20:47 2012 -0800
+++ b/libgo/runtime/proc.c	Fri Dec 21 16:57:00 2012 -0800
@@ -555,13 +555,13 @@
 static void
 schedunlock(void)
 {
-	M *m;
+	M *mp;
 
-	m = mwakeup;
+	mp = mwakeup;
 	mwakeup = nil;
 	runtime_unlock(&runtime_sched);
-	if(m != nil)
-		runtime_notewakeup(&m->havenextg);
+	if(mp != nil)
+		runtime_notewakeup(&mp->havenextg);
 }
 
 void
diff -r 1faae2fad711 libgo/runtime/runtime.c
--- a/libgo/runtime/runtime.c	Fri Dec 21 14:20:47 2012 -0800
+++ b/libgo/runtime/runtime.c	Fri Dec 21 16:57:00 2012 -0800
@@ -79,33 +79,6 @@
 	syscall_Envs.__capacity = n;
 }
 
-const byte*
-runtime_getenv(const char *s)
-{
-	int32 i, j, len;
-	const byte *v, *bs;
-	String* envv;
-	int32 envc;
-
-	bs = (const byte*)s;
-	len = runtime_findnull(bs);
-	envv = (String*)syscall_Envs.__values;
-	envc = syscall_Envs.__count;
-	for(i=0; i<envc; i++){
-		if(envv[i].len <= len)
-			continue;
-		v = (const byte*)envv[i].str;
-		for(j=0; j<len; j++)
-			if(bs[j] != v[j])
-				goto nomatch;
-		if(v[len] != '=')
-			goto nomatch;
-		return v+len+1;
-	nomatch:;
-	}
-	return nil;
-}
-
 int32
 runtime_atoi(const byte *p)
 {
diff -r 1faae2fad711 libgo/runtime/runtime.h
--- a/libgo/runtime/runtime.h	Fri Dec 21 14:20:47 2012 -0800
+++ b/libgo/runtime/runtime.h	Fri Dec 21 16:57:00 2012 -0800
@@ -599,9 +599,6 @@
 	UseSpanType = 1,
 };
 
-void	runtime_time_scan(void (*)(byte*, uintptr));
-void	runtime_trampoline_scan(void (*)(byte *, uintptr));
-
 void	runtime_setsig(int32, bool, bool);
 #define runtime_setitimer setitimer
 
diff -r 1faae2fad711 libgo/runtime/time.goc
--- a/libgo/runtime/time.goc	Fri Dec 21 14:20:47 2012 -0800
+++ b/libgo/runtime/time.goc	Fri Dec 21 16:57:00 2012 -0800
@@ -255,7 +255,7 @@
 }
 
 void
-runtime_time_scan(void (*addroot)(byte*, uintptr))
+runtime_time_scan(void (*addroot)(Obj))
 {
-	addroot((byte*)&timers, sizeof timers);
+	addroot((Obj){(byte*)&timers, sizeof timers, 0});
 }

Reply via email to