| // Copyright 2010 The Go Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style |
| // license that can be found in the LICENSE file. |
| |
| // Export guts for testing. |
| |
| package runtime |
| |
| import ( |
| "runtime/internal/atomic" |
| "runtime/internal/sys" |
| "unsafe" |
| ) |
| |
| //var Fadd64 = fadd64 |
| //var Fsub64 = fsub64 |
| //var Fmul64 = fmul64 |
| //var Fdiv64 = fdiv64 |
| //var F64to32 = f64to32 |
| //var F32to64 = f32to64 |
| //var Fcmp64 = fcmp64 |
| //var Fintto64 = fintto64 |
| //var F64toint = f64toint |
| //var Sqrt = sqrt |
| |
| var Entersyscall = entersyscall |
| var Exitsyscall = exitsyscall |
| var LockedOSThread = lockedOSThread |
| var Xadduintptr = atomic.Xadduintptr |
| |
| var FuncPC = funcPC |
| |
| var Fastlog2 = fastlog2 |
| |
| var Atoi = atoi |
| var Atoi32 = atoi32 |
| |
| type LFNode struct { |
| Next uint64 |
| Pushcnt uintptr |
| } |
| |
| func LFStackPush(head *uint64, node *LFNode) { |
| (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node))) |
| } |
| |
| func LFStackPop(head *uint64) *LFNode { |
| return (*LFNode)(unsafe.Pointer((*lfstack)(head).pop())) |
| } |
| |
| func GCMask(x interface{}) (ret []byte) { |
| return nil |
| } |
| |
| func RunSchedLocalQueueTest() { |
| _p_ := new(p) |
| gs := make([]g, len(_p_.runq)) |
| for i := 0; i < len(_p_.runq); i++ { |
| if g, _ := runqget(_p_); g != nil { |
| throw("runq is not empty initially") |
| } |
| for j := 0; j < i; j++ { |
| runqput(_p_, &gs[i], false) |
| } |
| for j := 0; j < i; j++ { |
| if g, _ := runqget(_p_); g != &gs[i] { |
| print("bad element at iter ", i, "/", j, "\n") |
| throw("bad element") |
| } |
| } |
| if g, _ := runqget(_p_); g != nil { |
| throw("runq is not empty afterwards") |
| } |
| } |
| } |
| |
| func RunSchedLocalQueueStealTest() { |
| p1 := new(p) |
| p2 := new(p) |
| gs := make([]g, len(p1.runq)) |
| for i := 0; i < len(p1.runq); i++ { |
| for j := 0; j < i; j++ { |
| gs[j].sig = 0 |
| runqput(p1, &gs[j], false) |
| } |
| gp := runqsteal(p2, p1, true) |
| s := 0 |
| if gp != nil { |
| s++ |
| gp.sig++ |
| } |
| for { |
| gp, _ = runqget(p2) |
| if gp == nil { |
| break |
| } |
| s++ |
| gp.sig++ |
| } |
| for { |
| gp, _ = runqget(p1) |
| if gp == nil { |
| break |
| } |
| gp.sig++ |
| } |
| for j := 0; j < i; j++ { |
| if gs[j].sig != 1 { |
| print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n") |
| throw("bad element") |
| } |
| } |
| if s != i/2 && s != i/2+1 { |
| print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n") |
| throw("bad steal") |
| } |
| } |
| } |
| |
| func RunSchedLocalQueueEmptyTest(iters int) { |
| // Test that runq is not spuriously reported as empty. |
| // Runq emptiness affects scheduling decisions and spurious emptiness |
| // can lead to underutilization (both runnable Gs and idle Ps coexist |
| // for arbitrary long time). |
| done := make(chan bool, 1) |
| _p_ := new(p) |
| gs := make([]g, 2) |
| ready := new(uint32) |
| for i := 0; i < iters; i++ { |
| *ready = 0 |
| next0 := (i & 1) == 0 |
| next1 := (i & 2) == 0 |
| runqput(_p_, &gs[0], next0) |
| go func(done chan bool, p *p, ready *uint32, next0, next1 bool) { |
| for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; { |
| } |
| if runqempty(p) { |
| println("next:", next0, next1) |
| throw("queue is empty") |
| } |
| done <- true |
| }(done, _p_, ready, next0, next1) |
| for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; { |
| } |
| runqput(_p_, &gs[1], next1) |
| runqget(_p_) |
| <-done |
| runqget(_p_) |
| } |
| } |
| |
| var ( |
| StringHash = stringHash |
| BytesHash = bytesHash |
| Int32Hash = int32Hash |
| Int64Hash = int64Hash |
| MemHash = memhash |
| MemHash32 = memhash32 |
| MemHash64 = memhash64 |
| EfaceHash = efaceHash |
| IfaceHash = ifaceHash |
| ) |
| |
| var UseAeshash = &useAeshash |
| |
| func MemclrBytes(b []byte) { |
| s := (*slice)(unsafe.Pointer(&b)) |
| memclrNoHeapPointers(s.array, uintptr(s.len)) |
| } |
| |
| var HashLoad = &hashLoad |
| |
| // entry point for testing |
| //func GostringW(w []uint16) (s string) { |
| // s = gostringw(&w[0]) |
| // return |
| //} |
| |
| type Uintreg sys.Uintreg |
| |
| var Open = open |
| var Close = closefd |
| var Read = read |
| var Write = write |
| |
| func Envs() []string { return envs } |
| func SetEnvs(e []string) { envs = e } |
| |
| //var BigEndian = sys.BigEndian |
| |
| // For benchmarking. |
| |
| func BenchSetType(n int, x interface{}) { |
| e := *efaceOf(&x) |
| t := e._type |
| var size uintptr |
| var p unsafe.Pointer |
| switch t.kind & kindMask { |
| case kindPtr: |
| t = (*ptrtype)(unsafe.Pointer(t)).elem |
| size = t.size |
| p = e.data |
| case kindSlice: |
| slice := *(*struct { |
| ptr unsafe.Pointer |
| len, cap uintptr |
| })(e.data) |
| t = (*slicetype)(unsafe.Pointer(t)).elem |
| size = t.size * slice.len |
| p = slice.ptr |
| } |
| allocSize := roundupsize(size) |
| systemstack(func() { |
| for i := 0; i < n; i++ { |
| heapBitsSetType(uintptr(p), allocSize, size, t) |
| } |
| }) |
| } |
| |
| const PtrSize = sys.PtrSize |
| |
| var ForceGCPeriod = &forcegcperiod |
| |
| // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises |
| // the "environment" traceback level, so later calls to |
| // debug.SetTraceback (e.g., from testing timeouts) can't lower it. |
| func SetTracebackEnv(level string) { |
| setTraceback(level) |
| traceback_env = traceback_cache |
| } |
| |
| var ReadUnaligned32 = readUnaligned32 |
| var ReadUnaligned64 = readUnaligned64 |
| |
| func CountPagesInUse() (pagesInUse, counted uintptr) { |
| stopTheWorld("CountPagesInUse") |
| |
| pagesInUse = uintptr(mheap_.pagesInUse) |
| |
| for _, s := range mheap_.allspans { |
| if s.state == mSpanInUse { |
| counted += s.npages |
| } |
| } |
| |
| startTheWorld() |
| |
| return |
| } |
| |
| func Fastrand() uint32 { return fastrand() } |
| func Fastrandn(n uint32) uint32 { return fastrandn(n) } |
| |
| type ProfBuf profBuf |
| |
| func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf { |
| return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags)) |
| } |
| |
| func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) { |
| (*profBuf)(p).write(tag, now, hdr, stk) |
| } |
| |
| const ( |
| ProfBufBlocking = profBufBlocking |
| ProfBufNonBlocking = profBufNonBlocking |
| ) |
| |
| func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) { |
| return (*profBuf)(p).read(profBufReadMode(mode)) |
| } |
| |
| func (p *ProfBuf) Close() { |
| (*profBuf)(p).close() |
| } |
| |
| // ReadMemStatsSlow returns both the runtime-computed MemStats and |
| // MemStats accumulated by scanning the heap. |
| func ReadMemStatsSlow() (base, slow MemStats) { |
| stopTheWorld("ReadMemStatsSlow") |
| |
| // Run on the system stack to avoid stack growth allocation. |
| systemstack(func() { |
| // Make sure stats don't change. |
| getg().m.mallocing++ |
| |
| readmemstats_m(&base) |
| |
| // Initialize slow from base and zero the fields we're |
| // recomputing. |
| slow = base |
| slow.Alloc = 0 |
| slow.TotalAlloc = 0 |
| slow.Mallocs = 0 |
| slow.Frees = 0 |
| var bySize [_NumSizeClasses]struct { |
| Mallocs, Frees uint64 |
| } |
| |
| // Add up current allocations in spans. |
| for _, s := range mheap_.allspans { |
| if s.state != mSpanInUse { |
| continue |
| } |
| if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 { |
| slow.Mallocs++ |
| slow.Alloc += uint64(s.elemsize) |
| } else { |
| slow.Mallocs += uint64(s.allocCount) |
| slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize) |
| bySize[sizeclass].Mallocs += uint64(s.allocCount) |
| } |
| } |
| |
| // Add in frees. readmemstats_m flushed the cached stats, so |
| // these are up-to-date. |
| var smallFree uint64 |
| slow.Frees = mheap_.nlargefree |
| for i := range mheap_.nsmallfree { |
| slow.Frees += mheap_.nsmallfree[i] |
| bySize[i].Frees = mheap_.nsmallfree[i] |
| bySize[i].Mallocs += mheap_.nsmallfree[i] |
| smallFree += mheap_.nsmallfree[i] * uint64(class_to_size[i]) |
| } |
| slow.Frees += memstats.tinyallocs |
| slow.Mallocs += slow.Frees |
| |
| slow.TotalAlloc = slow.Alloc + mheap_.largefree + smallFree |
| |
| for i := range slow.BySize { |
| slow.BySize[i].Mallocs = bySize[i].Mallocs |
| slow.BySize[i].Frees = bySize[i].Frees |
| } |
| |
| getg().m.mallocing-- |
| }) |
| |
| startTheWorld() |
| return |
| } |
| |
| // BlockOnSystemStack switches to the system stack, prints "x\n" to |
| // stderr, and blocks in a stack containing |
| // "runtime.blockOnSystemStackInternal". |
| func BlockOnSystemStack() { |
| systemstack(blockOnSystemStackInternal) |
| } |
| |
| func blockOnSystemStackInternal() { |
| print("x\n") |
| lock(&deadlock) |
| lock(&deadlock) |
| } |
| |
| type RWMutex struct { |
| rw rwmutex |
| } |
| |
| func (rw *RWMutex) RLock() { |
| rw.rw.rlock() |
| } |
| |
| func (rw *RWMutex) RUnlock() { |
| rw.rw.runlock() |
| } |
| |
| func (rw *RWMutex) Lock() { |
| rw.rw.lock() |
| } |
| |
| func (rw *RWMutex) Unlock() { |
| rw.rw.unlock() |
| } |
| |
| func MapBucketsCount(m map[int]int) int { |
| h := *(**hmap)(unsafe.Pointer(&m)) |
| return 1 << h.B |
| } |
| |
| func MapBucketsPointerIsNil(m map[int]int) bool { |
| h := *(**hmap)(unsafe.Pointer(&m)) |
| return h.buckets == nil |
| } |
| |
| func LockOSCounts() (external, internal uint32) { |
| g := getg() |
| if g.m.lockedExt+g.m.lockedInt == 0 { |
| if g.lockedm != 0 { |
| panic("lockedm on non-locked goroutine") |
| } |
| } else { |
| if g.lockedm == 0 { |
| panic("nil lockedm on locked goroutine") |
| } |
| } |
| return g.m.lockedExt, g.m.lockedInt |
| } |