| // Copyright 2013 The Go Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style |
| // license that can be found in the LICENSE file. |
| |
| package sync |
| |
| import ( |
| "internal/race" |
| "runtime" |
| "sync/atomic" |
| "unsafe" |
| ) |
| |
| // A Pool is a set of temporary objects that may be individually saved and |
| // retrieved. |
| // |
| // Any item stored in the Pool may be removed automatically at any time without |
| // notification. If the Pool holds the only reference when this happens, the |
| // item might be deallocated. |
| // |
| // A Pool is safe for use by multiple goroutines simultaneously. |
| // |
| // Pool's purpose is to cache allocated but unused items for later reuse, |
| // relieving pressure on the garbage collector. That is, it makes it easy to |
| // build efficient, thread-safe free lists. However, it is not suitable for all |
| // free lists. |
| // |
| // An appropriate use of a Pool is to manage a group of temporary items |
| // silently shared among and potentially reused by concurrent independent |
| // clients of a package. Pool provides a way to amortize allocation overhead |
| // across many clients. |
| // |
| // An example of good use of a Pool is in the fmt package, which maintains a |
| // dynamically-sized store of temporary output buffers. The store scales under |
| // load (when many goroutines are actively printing) and shrinks when |
| // quiescent. |
| // |
| // On the other hand, a free list maintained as part of a short-lived object is |
| // not a suitable use for a Pool, since the overhead does not amortize well in |
| // that scenario. It is more efficient to have such objects implement their own |
| // free list. |
| // |
| // A Pool must not be copied after first use. |
| type Pool struct { |
| noCopy noCopy |
| |
| local unsafe.Pointer // local fixed-size per-P pool, actual type is [P]poolLocal |
| localSize uintptr // size of the local array |
| |
| victim unsafe.Pointer // local from previous cycle |
| victimSize uintptr // size of victims array |
| |
| // New optionally specifies a function to generate |
| // a value when Get would otherwise return nil. |
| // It may not be changed concurrently with calls to Get. |
| New func() interface{} |
| } |
| |
| // Local per-P Pool appendix. |
| type poolLocalInternal struct { |
| private interface{} // Can be used only by the respective P. |
| shared poolChain // Local P can pushHead/popHead; any P can popTail. |
| } |
| |
| type poolLocal struct { |
| poolLocalInternal |
| |
| // Prevents false sharing on widespread platforms with |
| // 128 mod (cache line size) = 0 . |
| pad [128 - unsafe.Sizeof(poolLocalInternal{})%128]byte |
| } |
| |
| // from runtime |
| func fastrand() uint32 |
| |
| var poolRaceHash [128]uint64 |
| |
| // poolRaceAddr returns an address to use as the synchronization point |
| // for race detector logic. We don't use the actual pointer stored in x |
| // directly, for fear of conflicting with other synchronization on that address. |
| // Instead, we hash the pointer to get an index into poolRaceHash. |
| // See discussion on golang.org/cl/31589. |
| func poolRaceAddr(x interface{}) unsafe.Pointer { |
| ptr := uintptr((*[2]unsafe.Pointer)(unsafe.Pointer(&x))[1]) |
| h := uint32((uint64(uint32(ptr)) * 0x85ebca6b) >> 16) |
| return unsafe.Pointer(&poolRaceHash[h%uint32(len(poolRaceHash))]) |
| } |
| |
| // Put adds x to the pool. |
| func (p *Pool) Put(x interface{}) { |
| if x == nil { |
| return |
| } |
| if race.Enabled { |
| if fastrand()%4 == 0 { |
| // Randomly drop x on floor. |
| return |
| } |
| race.ReleaseMerge(poolRaceAddr(x)) |
| race.Disable() |
| } |
| l, _ := p.pin() |
| if l.private == nil { |
| l.private = x |
| x = nil |
| } |
| if x != nil { |
| l.shared.pushHead(x) |
| } |
| runtime_procUnpin() |
| if race.Enabled { |
| race.Enable() |
| } |
| } |
| |
| // Get selects an arbitrary item from the Pool, removes it from the |
| // Pool, and returns it to the caller. |
| // Get may choose to ignore the pool and treat it as empty. |
| // Callers should not assume any relation between values passed to Put and |
| // the values returned by Get. |
| // |
| // If Get would otherwise return nil and p.New is non-nil, Get returns |
| // the result of calling p.New. |
| func (p *Pool) Get() interface{} { |
| if race.Enabled { |
| race.Disable() |
| } |
| l, pid := p.pin() |
| x := l.private |
| l.private = nil |
| if x == nil { |
| // Try to pop the head of the local shard. We prefer |
| // the head over the tail for temporal locality of |
| // reuse. |
| x, _ = l.shared.popHead() |
| if x == nil { |
| x = p.getSlow(pid) |
| } |
| } |
| runtime_procUnpin() |
| if race.Enabled { |
| race.Enable() |
| if x != nil { |
| race.Acquire(poolRaceAddr(x)) |
| } |
| } |
| if x == nil && p.New != nil { |
| x = p.New() |
| } |
| return x |
| } |
| |
| func (p *Pool) getSlow(pid int) interface{} { |
| // See the comment in pin regarding ordering of the loads. |
| size := atomic.LoadUintptr(&p.localSize) // load-acquire |
| locals := p.local // load-consume |
| // Try to steal one element from other procs. |
| for i := 0; i < int(size); i++ { |
| l := indexLocal(locals, (pid+i+1)%int(size)) |
| if x, _ := l.shared.popTail(); x != nil { |
| return x |
| } |
| } |
| |
| // Try the victim cache. We do this after attempting to steal |
| // from all primary caches because we want objects in the |
| // victim cache to age out if at all possible. |
| size = atomic.LoadUintptr(&p.victimSize) |
| if uintptr(pid) >= size { |
| return nil |
| } |
| locals = p.victim |
| l := indexLocal(locals, pid) |
| if x := l.private; x != nil { |
| l.private = nil |
| return x |
| } |
| for i := 0; i < int(size); i++ { |
| l := indexLocal(locals, (pid+i)%int(size)) |
| if x, _ := l.shared.popTail(); x != nil { |
| return x |
| } |
| } |
| |
| // Mark the victim cache as empty for future gets don't bother |
| // with it. |
| atomic.StoreUintptr(&p.victimSize, 0) |
| |
| return nil |
| } |
| |
| // pin pins the current goroutine to P, disables preemption and |
| // returns poolLocal pool for the P and the P's id. |
| // Caller must call runtime_procUnpin() when done with the pool. |
| func (p *Pool) pin() (*poolLocal, int) { |
| pid := runtime_procPin() |
| // In pinSlow we store to local and then to localSize, here we load in opposite order. |
| // Since we've disabled preemption, GC cannot happen in between. |
| // Thus here we must observe local at least as large localSize. |
| // We can observe a newer/larger local, it is fine (we must observe its zero-initialized-ness). |
| s := atomic.LoadUintptr(&p.localSize) // load-acquire |
| l := p.local // load-consume |
| if uintptr(pid) < s { |
| return indexLocal(l, pid), pid |
| } |
| return p.pinSlow() |
| } |
| |
| func (p *Pool) pinSlow() (*poolLocal, int) { |
| // Retry under the mutex. |
| // Can not lock the mutex while pinned. |
| runtime_procUnpin() |
| allPoolsMu.Lock() |
| defer allPoolsMu.Unlock() |
| pid := runtime_procPin() |
| // poolCleanup won't be called while we are pinned. |
| s := p.localSize |
| l := p.local |
| if uintptr(pid) < s { |
| return indexLocal(l, pid), pid |
| } |
| if p.local == nil { |
| allPools = append(allPools, p) |
| } |
| // If GOMAXPROCS changes between GCs, we re-allocate the array and lose the old one. |
| size := runtime.GOMAXPROCS(0) |
| local := make([]poolLocal, size) |
| atomic.StorePointer(&p.local, unsafe.Pointer(&local[0])) // store-release |
| atomic.StoreUintptr(&p.localSize, uintptr(size)) // store-release |
| return &local[pid], pid |
| } |
| |
| func poolCleanup() { |
| // This function is called with the world stopped, at the beginning of a garbage collection. |
| // It must not allocate and probably should not call any runtime functions. |
| |
| // Because the world is stopped, no pool user can be in a |
| // pinned section (in effect, this has all Ps pinned). |
| |
| // Drop victim caches from all pools. |
| for _, p := range oldPools { |
| p.victim = nil |
| p.victimSize = 0 |
| } |
| |
| // Move primary cache to victim cache. |
| for _, p := range allPools { |
| p.victim = p.local |
| p.victimSize = p.localSize |
| p.local = nil |
| p.localSize = 0 |
| } |
| |
| // The pools with non-empty primary caches now have non-empty |
| // victim caches and no pools have primary caches. |
| oldPools, allPools = allPools, nil |
| } |
| |
| var ( |
| allPoolsMu Mutex |
| |
| // allPools is the set of pools that have non-empty primary |
| // caches. Protected by either 1) allPoolsMu and pinning or 2) |
| // STW. |
| allPools []*Pool |
| |
| // oldPools is the set of pools that may have non-empty victim |
| // caches. Protected by STW. |
| oldPools []*Pool |
| ) |
| |
| func init() { |
| runtime_registerPoolCleanup(poolCleanup) |
| } |
| |
| func indexLocal(l unsafe.Pointer, i int) *poolLocal { |
| lp := unsafe.Pointer(uintptr(l) + uintptr(i)*unsafe.Sizeof(poolLocal{})) |
| return (*poolLocal)(lp) |
| } |
| |
| // Implemented in runtime. |
| func runtime_registerPoolCleanup(cleanup func()) |
| func runtime_procPin() int |
| func runtime_procUnpin() |