|  | // Copyright 2009 The Go Authors. All rights reserved. | 
|  | // Use of this source code is governed by a BSD-style | 
|  | // license that can be found in the LICENSE file. | 
|  |  | 
|  | package runtime | 
|  |  | 
|  | import ( | 
|  | "runtime/internal/math" | 
|  | "runtime/internal/sys" | 
|  | "unsafe" | 
|  | ) | 
|  |  | 
|  | type slice struct { | 
|  | array unsafe.Pointer | 
|  | len   int | 
|  | cap   int | 
|  | } | 
|  |  | 
|  | // A notInHeapSlice is a slice backed by go:notinheap memory. | 
|  | type notInHeapSlice struct { | 
|  | array *notInHeap | 
|  | len   int | 
|  | cap   int | 
|  | } | 
|  |  | 
|  | func panicmakeslicelen() { | 
|  | panic(errorString("makeslice: len out of range")) | 
|  | } | 
|  |  | 
|  | func panicmakeslicecap() { | 
|  | panic(errorString("makeslice: cap out of range")) | 
|  | } | 
|  |  | 
|  | // makeslicecopy allocates a slice of "tolen" elements of type "et", | 
|  | // then copies "fromlen" elements of type "et" into that new allocation from "from". | 
|  | func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer { | 
|  | var tomem, copymem uintptr | 
|  | if uintptr(tolen) > uintptr(fromlen) { | 
|  | var overflow bool | 
|  | tomem, overflow = math.MulUintptr(et.size, uintptr(tolen)) | 
|  | if overflow || tomem > maxAlloc || tolen < 0 { | 
|  | panicmakeslicelen() | 
|  | } | 
|  | copymem = et.size * uintptr(fromlen) | 
|  | } else { | 
|  | // fromlen is a known good length providing and equal or greater than tolen, | 
|  | // thereby making tolen a good slice length too as from and to slices have the | 
|  | // same element width. | 
|  | tomem = et.size * uintptr(tolen) | 
|  | copymem = tomem | 
|  | } | 
|  |  | 
|  | var to unsafe.Pointer | 
|  | if et.ptrdata == 0 { | 
|  | to = mallocgc(tomem, nil, false) | 
|  | if copymem < tomem { | 
|  | memclrNoHeapPointers(add(to, copymem), tomem-copymem) | 
|  | } | 
|  | } else { | 
|  | // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. | 
|  | to = mallocgc(tomem, et, true) | 
|  | if copymem > 0 && writeBarrier.enabled { | 
|  | // Only shade the pointers in old.array since we know the destination slice to | 
|  | // only contains nil pointers because it has been cleared during alloc. | 
|  | bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem) | 
|  | } | 
|  | } | 
|  |  | 
|  | if raceenabled { | 
|  | callerpc := getcallerpc() | 
|  | pc := funcPC(makeslicecopy) | 
|  | racereadrangepc(from, copymem, callerpc, pc) | 
|  | } | 
|  | if msanenabled { | 
|  | msanread(from, copymem) | 
|  | } | 
|  |  | 
|  | memmove(to, from, copymem) | 
|  |  | 
|  | return to | 
|  | } | 
|  |  | 
|  | func makeslice(et *_type, len, cap int) unsafe.Pointer { | 
|  | mem, overflow := math.MulUintptr(et.size, uintptr(cap)) | 
|  | if overflow || mem > maxAlloc || len < 0 || len > cap { | 
|  | // NOTE: Produce a 'len out of range' error instead of a | 
|  | // 'cap out of range' error when someone does make([]T, bignumber). | 
|  | // 'cap out of range' is true too, but since the cap is only being | 
|  | // supplied implicitly, saying len is clearer. | 
|  | // See golang.org/issue/4085. | 
|  | mem, overflow := math.MulUintptr(et.size, uintptr(len)) | 
|  | if overflow || mem > maxAlloc || len < 0 { | 
|  | panicmakeslicelen() | 
|  | } | 
|  | panicmakeslicecap() | 
|  | } | 
|  |  | 
|  | return mallocgc(mem, et, true) | 
|  | } | 
|  |  | 
|  | func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer { | 
|  | len := int(len64) | 
|  | if int64(len) != len64 { | 
|  | panicmakeslicelen() | 
|  | } | 
|  |  | 
|  | cap := int(cap64) | 
|  | if int64(cap) != cap64 { | 
|  | panicmakeslicecap() | 
|  | } | 
|  |  | 
|  | return makeslice(et, len, cap) | 
|  | } | 
|  |  | 
|  | // growslice handles slice growth during append. | 
|  | // It is passed the slice element type, the old slice, and the desired new minimum capacity, | 
|  | // and it returns a new slice with at least that capacity, with the old data | 
|  | // copied into it. | 
|  | // The new slice's length is set to the old slice's length, | 
|  | // NOT to the new requested capacity. | 
|  | // This is for codegen convenience. The old slice's length is used immediately | 
|  | // to calculate where to write new values during an append. | 
|  | // TODO: When the old backend is gone, reconsider this decision. | 
|  | // The SSA backend might prefer the new length or to return only ptr/cap and save stack space. | 
|  | func growslice(et *_type, old slice, cap int) slice { | 
|  | if raceenabled { | 
|  | callerpc := getcallerpc() | 
|  | racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, funcPC(growslice)) | 
|  | } | 
|  | if msanenabled { | 
|  | msanread(old.array, uintptr(old.len*int(et.size))) | 
|  | } | 
|  |  | 
|  | if cap < old.cap { | 
|  | panic(errorString("growslice: cap out of range")) | 
|  | } | 
|  |  | 
|  | if et.size == 0 { | 
|  | // append should not create a slice with nil pointer but non-zero len. | 
|  | // We assume that append doesn't need to preserve old.array in this case. | 
|  | return slice{unsafe.Pointer(&zerobase), old.len, cap} | 
|  | } | 
|  |  | 
|  | newcap := old.cap | 
|  | doublecap := newcap + newcap | 
|  | if cap > doublecap { | 
|  | newcap = cap | 
|  | } else { | 
|  | if old.len < 1024 { | 
|  | newcap = doublecap | 
|  | } else { | 
|  | // Check 0 < newcap to detect overflow | 
|  | // and prevent an infinite loop. | 
|  | for 0 < newcap && newcap < cap { | 
|  | newcap += newcap / 4 | 
|  | } | 
|  | // Set newcap to the requested cap when | 
|  | // the newcap calculation overflowed. | 
|  | if newcap <= 0 { | 
|  | newcap = cap | 
|  | } | 
|  | } | 
|  | } | 
|  |  | 
|  | var overflow bool | 
|  | var lenmem, newlenmem, capmem uintptr | 
|  | // Specialize for common values of et.size. | 
|  | // For 1 we don't need any division/multiplication. | 
|  | // For sys.PtrSize, compiler will optimize division/multiplication into a shift by a constant. | 
|  | // For powers of 2, use a variable shift. | 
|  | switch { | 
|  | case et.size == 1: | 
|  | lenmem = uintptr(old.len) | 
|  | newlenmem = uintptr(cap) | 
|  | capmem = roundupsize(uintptr(newcap)) | 
|  | overflow = uintptr(newcap) > maxAlloc | 
|  | newcap = int(capmem) | 
|  | case et.size == sys.PtrSize: | 
|  | lenmem = uintptr(old.len) * sys.PtrSize | 
|  | newlenmem = uintptr(cap) * sys.PtrSize | 
|  | capmem = roundupsize(uintptr(newcap) * sys.PtrSize) | 
|  | overflow = uintptr(newcap) > maxAlloc/sys.PtrSize | 
|  | newcap = int(capmem / sys.PtrSize) | 
|  | case isPowerOfTwo(et.size): | 
|  | var shift uintptr | 
|  | if sys.PtrSize == 8 { | 
|  | // Mask shift for better code generation. | 
|  | shift = uintptr(sys.Ctz64(uint64(et.size))) & 63 | 
|  | } else { | 
|  | shift = uintptr(sys.Ctz32(uint32(et.size))) & 31 | 
|  | } | 
|  | lenmem = uintptr(old.len) << shift | 
|  | newlenmem = uintptr(cap) << shift | 
|  | capmem = roundupsize(uintptr(newcap) << shift) | 
|  | overflow = uintptr(newcap) > (maxAlloc >> shift) | 
|  | newcap = int(capmem >> shift) | 
|  | default: | 
|  | lenmem = uintptr(old.len) * et.size | 
|  | newlenmem = uintptr(cap) * et.size | 
|  | capmem, overflow = math.MulUintptr(et.size, uintptr(newcap)) | 
|  | capmem = roundupsize(capmem) | 
|  | newcap = int(capmem / et.size) | 
|  | } | 
|  |  | 
|  | // The check of overflow in addition to capmem > maxAlloc is needed | 
|  | // to prevent an overflow which can be used to trigger a segfault | 
|  | // on 32bit architectures with this example program: | 
|  | // | 
|  | // type T [1<<27 + 1]int64 | 
|  | // | 
|  | // var d T | 
|  | // var s []T | 
|  | // | 
|  | // func main() { | 
|  | //   s = append(s, d, d, d, d) | 
|  | //   print(len(s), "\n") | 
|  | // } | 
|  | if overflow || capmem > maxAlloc { | 
|  | panic(errorString("growslice: cap out of range")) | 
|  | } | 
|  |  | 
|  | var p unsafe.Pointer | 
|  | if et.ptrdata == 0 { | 
|  | p = mallocgc(capmem, nil, false) | 
|  | // The append() that calls growslice is going to overwrite from old.len to cap (which will be the new length). | 
|  | // Only clear the part that will not be overwritten. | 
|  | memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem) | 
|  | } else { | 
|  | // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. | 
|  | p = mallocgc(capmem, et, true) | 
|  | if lenmem > 0 && writeBarrier.enabled { | 
|  | // Only shade the pointers in old.array since we know the destination slice p | 
|  | // only contains nil pointers because it has been cleared during alloc. | 
|  | bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(old.array), lenmem-et.size+et.ptrdata) | 
|  | } | 
|  | } | 
|  | memmove(p, old.array, lenmem) | 
|  |  | 
|  | return slice{p, old.len, newcap} | 
|  | } | 
|  |  | 
|  | func isPowerOfTwo(x uintptr) bool { | 
|  | return x&(x-1) == 0 | 
|  | } | 
|  |  | 
|  | func slicecopy(toPtr unsafe.Pointer, toLen int, fmPtr unsafe.Pointer, fmLen int, width uintptr) int { | 
|  | if fmLen == 0 || toLen == 0 { | 
|  | return 0 | 
|  | } | 
|  |  | 
|  | n := fmLen | 
|  | if toLen < n { | 
|  | n = toLen | 
|  | } | 
|  |  | 
|  | if width == 0 { | 
|  | return n | 
|  | } | 
|  |  | 
|  | if raceenabled { | 
|  | callerpc := getcallerpc() | 
|  | pc := funcPC(slicecopy) | 
|  | racereadrangepc(fmPtr, uintptr(n*int(width)), callerpc, pc) | 
|  | racewriterangepc(toPtr, uintptr(n*int(width)), callerpc, pc) | 
|  | } | 
|  | if msanenabled { | 
|  | msanread(fmPtr, uintptr(n*int(width))) | 
|  | msanwrite(toPtr, uintptr(n*int(width))) | 
|  | } | 
|  |  | 
|  | size := uintptr(n) * width | 
|  | if size == 1 { // common case worth about 2x to do here | 
|  | // TODO: is this still worth it with new memmove impl? | 
|  | *(*byte)(toPtr) = *(*byte)(fmPtr) // known to be a byte pointer | 
|  | } else { | 
|  | memmove(toPtr, fmPtr, size) | 
|  | } | 
|  | return n | 
|  | } | 
|  |  | 
|  | func slicestringcopy(toPtr *byte, toLen int, fm string) int { | 
|  | if len(fm) == 0 || toLen == 0 { | 
|  | return 0 | 
|  | } | 
|  |  | 
|  | n := len(fm) | 
|  | if toLen < n { | 
|  | n = toLen | 
|  | } | 
|  |  | 
|  | if raceenabled { | 
|  | callerpc := getcallerpc() | 
|  | pc := funcPC(slicestringcopy) | 
|  | racewriterangepc(unsafe.Pointer(toPtr), uintptr(n), callerpc, pc) | 
|  | } | 
|  | if msanenabled { | 
|  | msanwrite(unsafe.Pointer(toPtr), uintptr(n)) | 
|  | } | 
|  |  | 
|  | memmove(unsafe.Pointer(toPtr), stringStructOf(&fm).str, uintptr(n)) | 
|  | return n | 
|  | } |