| // Copyright 2010 The Go Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style |
| // license that can be found in the LICENSE file. |
| |
| // Export guts for testing. |
| |
| package runtime |
| |
| import ( |
| "runtime/internal/atomic" |
| "runtime/internal/sys" |
| "unsafe" |
| ) |
| |
| var Fadd64 = fadd64 |
| var Fsub64 = fsub64 |
| var Fmul64 = fmul64 |
| var Fdiv64 = fdiv64 |
| var F64to32 = f64to32 |
| var F32to64 = f32to64 |
| var Fcmp64 = fcmp64 |
| var Fintto64 = fintto64 |
| var F64toint = f64toint |
| var Sqrt = sqrt |
| |
| var Entersyscall = entersyscall |
| var Exitsyscall = exitsyscall |
| var LockedOSThread = lockedOSThread |
| var Xadduintptr = atomic.Xadduintptr |
| |
| var FuncPC = funcPC |
| |
| var Fastlog2 = fastlog2 |
| |
| var Atoi = atoi |
| var Atoi32 = atoi32 |
| |
| type LFNode struct { |
| Next uint64 |
| Pushcnt uintptr |
| } |
| |
| func LFStackPush(head *uint64, node *LFNode) { |
| lfstackpush(head, (*lfnode)(unsafe.Pointer(node))) |
| } |
| |
| func LFStackPop(head *uint64) *LFNode { |
| return (*LFNode)(unsafe.Pointer(lfstackpop(head))) |
| } |
| |
| func GCMask(x interface{}) (ret []byte) { |
| systemstack(func() { |
| ret = getgcmask(x) |
| }) |
| return |
| } |
| |
| func RunSchedLocalQueueTest() { |
| _p_ := new(p) |
| gs := make([]g, len(_p_.runq)) |
| for i := 0; i < len(_p_.runq); i++ { |
| if g, _ := runqget(_p_); g != nil { |
| throw("runq is not empty initially") |
| } |
| for j := 0; j < i; j++ { |
| runqput(_p_, &gs[i], false) |
| } |
| for j := 0; j < i; j++ { |
| if g, _ := runqget(_p_); g != &gs[i] { |
| print("bad element at iter ", i, "/", j, "\n") |
| throw("bad element") |
| } |
| } |
| if g, _ := runqget(_p_); g != nil { |
| throw("runq is not empty afterwards") |
| } |
| } |
| } |
| |
| func RunSchedLocalQueueStealTest() { |
| p1 := new(p) |
| p2 := new(p) |
| gs := make([]g, len(p1.runq)) |
| for i := 0; i < len(p1.runq); i++ { |
| for j := 0; j < i; j++ { |
| gs[j].sig = 0 |
| runqput(p1, &gs[j], false) |
| } |
| gp := runqsteal(p2, p1, true) |
| s := 0 |
| if gp != nil { |
| s++ |
| gp.sig++ |
| } |
| for { |
| gp, _ = runqget(p2) |
| if gp == nil { |
| break |
| } |
| s++ |
| gp.sig++ |
| } |
| for { |
| gp, _ = runqget(p1) |
| if gp == nil { |
| break |
| } |
| gp.sig++ |
| } |
| for j := 0; j < i; j++ { |
| if gs[j].sig != 1 { |
| print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n") |
| throw("bad element") |
| } |
| } |
| if s != i/2 && s != i/2+1 { |
| print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n") |
| throw("bad steal") |
| } |
| } |
| } |
| |
| func RunSchedLocalQueueEmptyTest(iters int) { |
| // Test that runq is not spuriously reported as empty. |
| // Runq emptiness affects scheduling decisions and spurious emptiness |
| // can lead to underutilization (both runnable Gs and idle Ps coexist |
| // for arbitrary long time). |
| done := make(chan bool, 1) |
| p := new(p) |
| gs := make([]g, 2) |
| ready := new(uint32) |
| for i := 0; i < iters; i++ { |
| *ready = 0 |
| next0 := (i & 1) == 0 |
| next1 := (i & 2) == 0 |
| runqput(p, &gs[0], next0) |
| go func() { |
| for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; { |
| } |
| if runqempty(p) { |
| println("next:", next0, next1) |
| throw("queue is empty") |
| } |
| done <- true |
| }() |
| for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; { |
| } |
| runqput(p, &gs[1], next1) |
| runqget(p) |
| <-done |
| runqget(p) |
| } |
| } |
| |
| var StringHash = stringHash |
| var BytesHash = bytesHash |
| var Int32Hash = int32Hash |
| var Int64Hash = int64Hash |
| var EfaceHash = efaceHash |
| var IfaceHash = ifaceHash |
| |
| func MemclrBytes(b []byte) { |
| s := (*slice)(unsafe.Pointer(&b)) |
| memclrNoHeapPointers(s.array, uintptr(s.len)) |
| } |
| |
| var HashLoad = &hashLoad |
| |
| // entry point for testing |
| func GostringW(w []uint16) (s string) { |
| systemstack(func() { |
| s = gostringw(&w[0]) |
| }) |
| return |
| } |
| |
| type Uintreg sys.Uintreg |
| |
| var Open = open |
| var Close = closefd |
| var Read = read |
| var Write = write |
| |
| func Envs() []string { return envs } |
| func SetEnvs(e []string) { envs = e } |
| |
| var BigEndian = sys.BigEndian |
| |
| // For benchmarking. |
| |
| func BenchSetType(n int, x interface{}) { |
| e := *efaceOf(&x) |
| t := e._type |
| var size uintptr |
| var p unsafe.Pointer |
| switch t.kind & kindMask { |
| case kindPtr: |
| t = (*ptrtype)(unsafe.Pointer(t)).elem |
| size = t.size |
| p = e.data |
| case kindSlice: |
| slice := *(*struct { |
| ptr unsafe.Pointer |
| len, cap uintptr |
| })(e.data) |
| t = (*slicetype)(unsafe.Pointer(t)).elem |
| size = t.size * slice.len |
| p = slice.ptr |
| } |
| allocSize := roundupsize(size) |
| systemstack(func() { |
| for i := 0; i < n; i++ { |
| heapBitsSetType(uintptr(p), allocSize, size, t) |
| } |
| }) |
| } |
| |
| const PtrSize = sys.PtrSize |
| |
| var ForceGCPeriod = &forcegcperiod |
| |
| // SetTracebackEnv is like runtime/debug.SetTraceback, but it raises |
| // the "environment" traceback level, so later calls to |
| // debug.SetTraceback (e.g., from testing timeouts) can't lower it. |
| func SetTracebackEnv(level string) { |
| setTraceback(level) |
| traceback_env = traceback_cache |
| } |
| |
| var ReadUnaligned32 = readUnaligned32 |
| var ReadUnaligned64 = readUnaligned64 |
| |
| func CountPagesInUse() (pagesInUse, counted uintptr) { |
| stopTheWorld("CountPagesInUse") |
| |
| pagesInUse = uintptr(mheap_.pagesInUse) |
| |
| for _, s := range mheap_.allspans { |
| if s.state == mSpanInUse { |
| counted += s.npages |
| } |
| } |
| |
| startTheWorld() |
| |
| return |
| } |