| // Copyright 2009 The Go Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style |
| // license that can be found in the LICENSE file. |
| |
| // Garbage collector: sweeping |
| |
| package runtime |
| |
| import "unsafe" |
| |
| var sweep sweepdata |
| |
| // State of background sweep. |
| type sweepdata struct { |
| lock mutex |
| g *g |
| parked bool |
| started bool |
| |
| spanidx uint32 // background sweeper position |
| |
| nbgsweep uint32 |
| npausesweep uint32 |
| } |
| |
| //go:nowritebarrier |
| func finishsweep_m() { |
| // The world is stopped so we should be able to complete the sweeps |
| // quickly. |
| for sweepone() != ^uintptr(0) { |
| sweep.npausesweep++ |
| } |
| |
| // There may be some other spans being swept concurrently that |
| // we need to wait for. If finishsweep_m is done with the world stopped |
| // this code is not required. |
| sg := mheap_.sweepgen |
| for _, s := range work.spans { |
| if s.sweepgen != sg && s.state == _MSpanInUse { |
| mSpan_EnsureSwept(s) |
| } |
| } |
| } |
| |
| func bgsweep(c chan int) { |
| sweep.g = getg() |
| |
| lock(&sweep.lock) |
| sweep.parked = true |
| c <- 1 |
| goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1) |
| |
| for { |
| for gosweepone() != ^uintptr(0) { |
| sweep.nbgsweep++ |
| Gosched() |
| } |
| lock(&sweep.lock) |
| if !gosweepdone() { |
| // This can happen if a GC runs between |
| // gosweepone returning ^0 above |
| // and the lock being acquired. |
| unlock(&sweep.lock) |
| continue |
| } |
| sweep.parked = true |
| goparkunlock(&sweep.lock, "GC sweep wait", traceEvGoBlock, 1) |
| } |
| } |
| |
| // sweeps one span |
| // returns number of pages returned to heap, or ^uintptr(0) if there is nothing to sweep |
| //go:nowritebarrier |
| func sweepone() uintptr { |
| _g_ := getg() |
| |
| // increment locks to ensure that the goroutine is not preempted |
| // in the middle of sweep thus leaving the span in an inconsistent state for next GC |
| _g_.m.locks++ |
| sg := mheap_.sweepgen |
| for { |
| idx := xadd(&sweep.spanidx, 1) - 1 |
| if idx >= uint32(len(work.spans)) { |
| mheap_.sweepdone = 1 |
| _g_.m.locks-- |
| return ^uintptr(0) |
| } |
| s := work.spans[idx] |
| if s.state != mSpanInUse { |
| s.sweepgen = sg |
| continue |
| } |
| if s.sweepgen != sg-2 || !cas(&s.sweepgen, sg-2, sg-1) { |
| continue |
| } |
| npages := s.npages |
| if !mSpan_Sweep(s, false) { |
| npages = 0 |
| } |
| _g_.m.locks-- |
| return npages |
| } |
| } |
| |
| //go:nowritebarrier |
| func gosweepone() uintptr { |
| var ret uintptr |
| systemstack(func() { |
| ret = sweepone() |
| }) |
| return ret |
| } |
| |
| //go:nowritebarrier |
| func gosweepdone() bool { |
| return mheap_.sweepdone != 0 |
| } |
| |
| // Returns only when span s has been swept. |
| //go:nowritebarrier |
| func mSpan_EnsureSwept(s *mspan) { |
| // Caller must disable preemption. |
| // Otherwise when this function returns the span can become unswept again |
| // (if GC is triggered on another goroutine). |
| _g_ := getg() |
| if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 { |
| throw("MSpan_EnsureSwept: m is not locked") |
| } |
| |
| sg := mheap_.sweepgen |
| if atomicload(&s.sweepgen) == sg { |
| return |
| } |
| // The caller must be sure that the span is a MSpanInUse span. |
| if cas(&s.sweepgen, sg-2, sg-1) { |
| mSpan_Sweep(s, false) |
| return |
| } |
| // unfortunate condition, and we don't have efficient means to wait |
| for atomicload(&s.sweepgen) != sg { |
| osyield() |
| } |
| } |
| |
| // Sweep frees or collects finalizers for blocks not marked in the mark phase. |
| // It clears the mark bits in preparation for the next GC round. |
| // Returns true if the span was returned to heap. |
| // If preserve=true, don't return it to heap nor relink in MCentral lists; |
| // caller takes care of it. |
| //TODO go:nowritebarrier |
| func mSpan_Sweep(s *mspan, preserve bool) bool { |
| // It's critical that we enter this function with preemption disabled, |
| // GC must not start while we are in the middle of this function. |
| _g_ := getg() |
| if _g_.m.locks == 0 && _g_.m.mallocing == 0 && _g_ != _g_.m.g0 { |
| throw("MSpan_Sweep: m is not locked") |
| } |
| sweepgen := mheap_.sweepgen |
| if s.state != mSpanInUse || s.sweepgen != sweepgen-1 { |
| print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n") |
| throw("MSpan_Sweep: bad span state") |
| } |
| |
| if trace.enabled { |
| traceGCSweepStart() |
| } |
| |
| xadd64(&mheap_.pagesSwept, int64(s.npages)) |
| |
| cl := s.sizeclass |
| size := s.elemsize |
| res := false |
| nfree := 0 |
| |
| var head, end gclinkptr |
| |
| c := _g_.m.mcache |
| freeToHeap := false |
| |
| // Mark any free objects in this span so we don't collect them. |
| sstart := uintptr(s.start << _PageShift) |
| for link := s.freelist; link.ptr() != nil; link = link.ptr().next { |
| if uintptr(link) < sstart || s.limit <= uintptr(link) { |
| // Free list is corrupted. |
| dumpFreeList(s) |
| throw("free list corrupted") |
| } |
| heapBitsForAddr(uintptr(link)).setMarkedNonAtomic() |
| } |
| |
| // Unlink & free special records for any objects we're about to free. |
| specialp := &s.specials |
| special := *specialp |
| for special != nil { |
| // A finalizer can be set for an inner byte of an object, find object beginning. |
| p := uintptr(s.start<<_PageShift) + uintptr(special.offset)/size*size |
| hbits := heapBitsForAddr(p) |
| if !hbits.isMarked() { |
| // Find the exact byte for which the special was setup |
| // (as opposed to object beginning). |
| p := uintptr(s.start<<_PageShift) + uintptr(special.offset) |
| // about to free object: splice out special record |
| y := special |
| special = special.next |
| *specialp = special |
| if !freespecial(y, unsafe.Pointer(p), size, false) { |
| // stop freeing of object if it has a finalizer |
| hbits.setMarkedNonAtomic() |
| } |
| } else { |
| // object is still live: keep special record |
| specialp = &special.next |
| special = *specialp |
| } |
| } |
| |
| // Sweep through n objects of given size starting at p. |
| // This thread owns the span now, so it can manipulate |
| // the block bitmap without atomic operations. |
| |
| size, n, _ := s.layout() |
| heapBitsSweepSpan(s.base(), size, n, func(p uintptr) { |
| // At this point we know that we are looking at garbage object |
| // that needs to be collected. |
| if debug.allocfreetrace != 0 { |
| tracefree(unsafe.Pointer(p), size) |
| } |
| |
| // Reset to allocated+noscan. |
| if cl == 0 { |
| // Free large span. |
| if preserve { |
| throw("can't preserve large span") |
| } |
| heapBitsForSpan(p).initSpan(s.layout()) |
| s.needzero = 1 |
| |
| // important to set sweepgen before returning it to heap |
| atomicstore(&s.sweepgen, sweepgen) |
| |
| // Free the span after heapBitsSweepSpan |
| // returns, since it's not done with the span. |
| freeToHeap = true |
| } else { |
| // Free small object. |
| if size > 2*ptrSize { |
| *(*uintptr)(unsafe.Pointer(p + ptrSize)) = uintptrMask & 0xdeaddeaddeaddead // mark as "needs to be zeroed" |
| } else if size > ptrSize { |
| *(*uintptr)(unsafe.Pointer(p + ptrSize)) = 0 |
| } |
| if head.ptr() == nil { |
| head = gclinkptr(p) |
| } else { |
| end.ptr().next = gclinkptr(p) |
| } |
| end = gclinkptr(p) |
| end.ptr().next = gclinkptr(0x0bade5) |
| nfree++ |
| } |
| }) |
| |
| // We need to set s.sweepgen = h.sweepgen only when all blocks are swept, |
| // because of the potential for a concurrent free/SetFinalizer. |
| // But we need to set it before we make the span available for allocation |
| // (return it to heap or mcentral), because allocation code assumes that a |
| // span is already swept if available for allocation. |
| // |
| // TODO(austin): Clean this up by consolidating atomicstore in |
| // large span path above with this. |
| if !freeToHeap && nfree == 0 { |
| // The span must be in our exclusive ownership until we update sweepgen, |
| // check for potential races. |
| if s.state != mSpanInUse || s.sweepgen != sweepgen-1 { |
| print("MSpan_Sweep: state=", s.state, " sweepgen=", s.sweepgen, " mheap.sweepgen=", sweepgen, "\n") |
| throw("MSpan_Sweep: bad span state after sweep") |
| } |
| atomicstore(&s.sweepgen, sweepgen) |
| } |
| if nfree > 0 { |
| c.local_nsmallfree[cl] += uintptr(nfree) |
| res = mCentral_FreeSpan(&mheap_.central[cl].mcentral, s, int32(nfree), head, end, preserve) |
| // MCentral_FreeSpan updates sweepgen |
| } else if freeToHeap { |
| // Free large span to heap |
| |
| // NOTE(rsc,dvyukov): The original implementation of efence |
| // in CL 22060046 used SysFree instead of SysFault, so that |
| // the operating system would eventually give the memory |
| // back to us again, so that an efence program could run |
| // longer without running out of memory. Unfortunately, |
| // calling SysFree here without any kind of adjustment of the |
| // heap data structures means that when the memory does |
| // come back to us, we have the wrong metadata for it, either in |
| // the MSpan structures or in the garbage collection bitmap. |
| // Using SysFault here means that the program will run out of |
| // memory fairly quickly in efence mode, but at least it won't |
| // have mysterious crashes due to confused memory reuse. |
| // It should be possible to switch back to SysFree if we also |
| // implement and then call some kind of MHeap_DeleteSpan. |
| if debug.efence > 0 { |
| s.limit = 0 // prevent mlookup from finding this span |
| sysFault(unsafe.Pointer(uintptr(s.start<<_PageShift)), size) |
| } else { |
| mHeap_Free(&mheap_, s, 1) |
| } |
| c.local_nlargefree++ |
| c.local_largefree += size |
| res = true |
| } |
| if trace.enabled { |
| traceGCSweepDone() |
| } |
| return res |
| } |
| |
| // deductSweepCredit deducts sweep credit for allocating a span of |
| // size spanBytes. This must be performed *before* the span is |
| // allocated to ensure the system has enough credit. If necessary, it |
| // performs sweeping to prevent going in to debt. If the caller will |
| // also sweep pages (e.g., for a large allocation), it can pass a |
| // non-zero callerSweepPages to leave that many pages unswept. |
| // |
| // deductSweepCredit is the core of the "proportional sweep" system. |
| // It uses statistics gathered by the garbage collector to perform |
| // enough sweeping so that all pages are swept during the concurrent |
| // sweep phase between GC cycles. |
| // |
| // mheap_ must NOT be locked. |
| func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) { |
| if mheap_.sweepPagesPerByte == 0 { |
| // Proportional sweep is done or disabled. |
| return |
| } |
| |
| // Account for this span allocation. |
| spanBytesAlloc := xadd64(&mheap_.spanBytesAlloc, int64(spanBytes)) |
| |
| // Fix debt if necessary. |
| pagesOwed := int64(mheap_.sweepPagesPerByte * float64(spanBytesAlloc)) |
| for pagesOwed-int64(atomicload64(&mheap_.pagesSwept)) > int64(callerSweepPages) { |
| if gosweepone() == ^uintptr(0) { |
| mheap_.sweepPagesPerByte = 0 |
| break |
| } |
| } |
| } |
| |
| func dumpFreeList(s *mspan) { |
| printlock() |
| print("runtime: free list of span ", s, ":\n") |
| sstart := uintptr(s.start << _PageShift) |
| link := s.freelist |
| for i := 0; i < int(s.npages*_PageSize/s.elemsize); i++ { |
| if i != 0 { |
| print(" -> ") |
| } |
| print(hex(link)) |
| if link.ptr() == nil { |
| break |
| } |
| if uintptr(link) < sstart || s.limit <= uintptr(link) { |
| // Bad link. Stop walking before we crash. |
| print(" (BAD)") |
| break |
| } |
| link = link.ptr().next |
| } |
| print("\n") |
| printunlock() |
| } |