Since the gofrontend uses conservative garbage collection on the
stack, pointers that appeared dead can be revived while scanning the
stack.  This can cause the allocation counts seen when sweeping a span
to increase.  We already accept that.  This patch changes the code to
update the counters when we find a discrepancy.  It also fixes the
free index, and changes the span allocation code to retry if it gets a
span that has become full.  This gives us slightly more correct
numbers in MemStats, helps avoid a rare failure in TestReadMemStats,
and helps avoid some very rare crashes.  Bootstrapped and ran Go
testsuite on x86_64-pc-linux-gnu.  Committed to mainline.

Ian
Index: gcc/go/gofrontend/MERGE
===================================================================
--- gcc/go/gofrontend/MERGE     (revision 264290)
+++ gcc/go/gofrontend/MERGE     (working copy)
@@ -1,4 +1,4 @@
-f2cd046a4e0d681c3d21ee547b437d3eab8af268
+82d7205ba9e5c1fe38fd24f89a45caf2e974975b
 
 The first line of this file holds the git revision number of the last
 merge done from the gofrontend repository.
Index: libgo/go/runtime/mcentral.go
===================================================================
--- libgo/go/runtime/mcentral.go        (revision 264245)
+++ libgo/go/runtime/mcentral.go        (working copy)
@@ -56,6 +56,15 @@ retry:
                        c.empty.insertBack(s)
                        unlock(&c.lock)
                        s.sweep(true)
+
+                       // With gccgo's conservative GC, the returned span may
+                       // now be full. See the comments in mspan.sweep.
+                       if uintptr(s.allocCount) == s.nelems {
+                               s.freeindex = s.nelems
+                               lock(&c.lock)
+                               goto retry
+                       }
+
                        goto havespan
                }
                if s.sweepgen == sg-1 {
Index: libgo/go/runtime/mgcsweep.go
===================================================================
--- libgo/go/runtime/mgcsweep.go        (revision 264245)
+++ libgo/go/runtime/mgcsweep.go        (working copy)
@@ -296,7 +296,7 @@ func (s *mspan) sweep(preserve bool) boo
        }
        nfreed := s.allocCount - nalloc
 
-       // This test is not reliable with gccgo, because of
+       // This check is not reliable with gccgo, because of
        // conservative stack scanning. The test boils down to
        // checking that no new bits have been set in gcmarkBits since
        // the span was added to the sweep count. New bits are set by
@@ -309,16 +309,23 @@ func (s *mspan) sweep(preserve bool) boo
        // check to be inaccurate, and it will keep an object live
        // unnecessarily, but provided the pointer is not really live
        // it is not otherwise a problem. So we disable the test for gccgo.
-       if false && nalloc > s.allocCount {
-               print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " 
previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
-               throw("sweep increased allocation count")
+       nfreedSigned := int(nfreed)
+       if nalloc > s.allocCount {
+               // print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " 
previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n")
+               // throw("sweep increased allocation count")
+
+               // For gccgo, adjust the freed count as a signed number.
+               nfreedSigned = int(s.allocCount) - int(nalloc)
+               if uintptr(nalloc) == s.nelems {
+                       s.freeindex = s.nelems
+               }
        }
 
        s.allocCount = nalloc
        wasempty := s.nextFreeIndex() == s.nelems
        s.freeindex = 0 // reset allocation index to start of span.
        if trace.enabled {
-               getg().m.p.ptr().traceReclaimed += uintptr(nfreed) * s.elemsize
+               getg().m.p.ptr().traceReclaimed += uintptr(nfreedSigned) * 
s.elemsize
        }
 
        // gcmarkBits becomes the allocBits.
@@ -334,7 +341,7 @@ func (s *mspan) sweep(preserve bool) boo
        // But we need to set it before we make the span available for 
allocation
        // (return it to heap or mcentral), because allocation code assumes 
that a
        // span is already swept if available for allocation.
-       if freeToHeap || nfreed == 0 {
+       if freeToHeap || nfreedSigned <= 0 {
                // The span must be in our exclusive ownership until we update 
sweepgen,
                // check for potential races.
                if s.state != mSpanInUse || s.sweepgen != sweepgen-1 {
@@ -347,8 +354,11 @@ func (s *mspan) sweep(preserve bool) boo
                atomic.Store(&s.sweepgen, sweepgen)
        }
 
-       if nfreed > 0 && spc.sizeclass() != 0 {
-               c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreed)
+       if spc.sizeclass() != 0 {
+               c.local_nsmallfree[spc.sizeclass()] += uintptr(nfreedSigned)
+       }
+
+       if nfreedSigned > 0 && spc.sizeclass() != 0 {
                res = mheap_.central[spc].mcentral.freeSpan(s, preserve, 
wasempty)
                // MCentral_FreeSpan updates sweepgen
        } else if freeToHeap {

Reply via email to