| // Copyright 2009 The Go Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style |
| // license that can be found in the LICENSE file. |
| |
| package runtime |
| |
| import ( |
| "internal/abi" |
| "internal/goarch" |
| "runtime/internal/math" |
| "runtime/internal/sys" |
| "unsafe" |
| ) |
| |
| type slice struct { |
| array unsafe.Pointer |
| len int |
| cap int |
| } |
| |
| // A notInHeapSlice is a slice backed by runtime/internal/sys.NotInHeap memory. |
| type notInHeapSlice struct { |
| array *notInHeap |
| len int |
| cap int |
| } |
| |
| func panicmakeslicelen() { |
| panic(errorString("makeslice: len out of range")) |
| } |
| |
| func panicmakeslicecap() { |
| panic(errorString("makeslice: cap out of range")) |
| } |
| |
| // makeslicecopy allocates a slice of "tolen" elements of type "et", |
| // then copies "fromlen" elements of type "et" into that new allocation from "from". |
| func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer { |
| var tomem, copymem uintptr |
| if uintptr(tolen) > uintptr(fromlen) { |
| var overflow bool |
| tomem, overflow = math.MulUintptr(et.Size_, uintptr(tolen)) |
| if overflow || tomem > maxAlloc || tolen < 0 { |
| panicmakeslicelen() |
| } |
| copymem = et.Size_ * uintptr(fromlen) |
| } else { |
| // fromlen is a known good length providing and equal or greater than tolen, |
| // thereby making tolen a good slice length too as from and to slices have the |
| // same element width. |
| tomem = et.Size_ * uintptr(tolen) |
| copymem = tomem |
| } |
| |
| var to unsafe.Pointer |
| if et.PtrBytes == 0 { |
| to = mallocgc(tomem, nil, false) |
| if copymem < tomem { |
| memclrNoHeapPointers(add(to, copymem), tomem-copymem) |
| } |
| } else { |
| // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. |
| to = mallocgc(tomem, et, true) |
| if copymem > 0 && writeBarrier.enabled { |
| // Only shade the pointers in old.array since we know the destination slice to |
| // only contains nil pointers because it has been cleared during alloc. |
| // |
| // It's safe to pass a type to this function as an optimization because |
| // from and to only ever refer to memory representing whole values of |
| // type et. See the comment on bulkBarrierPreWrite. |
| bulkBarrierPreWriteSrcOnly(uintptr(to), uintptr(from), copymem, et) |
| } |
| } |
| |
| if raceenabled { |
| callerpc := getcallerpc() |
| pc := abi.FuncPCABIInternal(makeslicecopy) |
| racereadrangepc(from, copymem, callerpc, pc) |
| } |
| if msanenabled { |
| msanread(from, copymem) |
| } |
| if asanenabled { |
| asanread(from, copymem) |
| } |
| |
| memmove(to, from, copymem) |
| |
| return to |
| } |
| |
| func makeslice(et *_type, len, cap int) unsafe.Pointer { |
| mem, overflow := math.MulUintptr(et.Size_, uintptr(cap)) |
| if overflow || mem > maxAlloc || len < 0 || len > cap { |
| // NOTE: Produce a 'len out of range' error instead of a |
| // 'cap out of range' error when someone does make([]T, bignumber). |
| // 'cap out of range' is true too, but since the cap is only being |
| // supplied implicitly, saying len is clearer. |
| // See golang.org/issue/4085. |
| mem, overflow := math.MulUintptr(et.Size_, uintptr(len)) |
| if overflow || mem > maxAlloc || len < 0 { |
| panicmakeslicelen() |
| } |
| panicmakeslicecap() |
| } |
| |
| return mallocgc(mem, et, true) |
| } |
| |
| func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer { |
| len := int(len64) |
| if int64(len) != len64 { |
| panicmakeslicelen() |
| } |
| |
| cap := int(cap64) |
| if int64(cap) != cap64 { |
| panicmakeslicecap() |
| } |
| |
| return makeslice(et, len, cap) |
| } |
| |
| // growslice allocates new backing store for a slice. |
| // |
| // arguments: |
| // |
| // oldPtr = pointer to the slice's backing array |
| // newLen = new length (= oldLen + num) |
| // oldCap = original slice's capacity. |
| // num = number of elements being added |
| // et = element type |
| // |
| // return values: |
| // |
| // newPtr = pointer to the new backing store |
| // newLen = same value as the argument |
| // newCap = capacity of the new backing store |
| // |
| // Requires that uint(newLen) > uint(oldCap). |
| // Assumes the original slice length is newLen - num |
| // |
| // A new backing store is allocated with space for at least newLen elements. |
| // Existing entries [0, oldLen) are copied over to the new backing store. |
| // Added entries [oldLen, newLen) are not initialized by growslice |
| // (although for pointer-containing element types, they are zeroed). They |
| // must be initialized by the caller. |
| // Trailing entries [newLen, newCap) are zeroed. |
| // |
| // growslice's odd calling convention makes the generated code that calls |
| // this function simpler. In particular, it accepts and returns the |
| // new length so that the old length is not live (does not need to be |
| // spilled/restored) and the new length is returned (also does not need |
| // to be spilled/restored). |
| func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice { |
| oldLen := newLen - num |
| if raceenabled { |
| callerpc := getcallerpc() |
| racereadrangepc(oldPtr, uintptr(oldLen*int(et.Size_)), callerpc, abi.FuncPCABIInternal(growslice)) |
| } |
| if msanenabled { |
| msanread(oldPtr, uintptr(oldLen*int(et.Size_))) |
| } |
| if asanenabled { |
| asanread(oldPtr, uintptr(oldLen*int(et.Size_))) |
| } |
| |
| if newLen < 0 { |
| panic(errorString("growslice: len out of range")) |
| } |
| |
| if et.Size_ == 0 { |
| // append should not create a slice with nil pointer but non-zero len. |
| // We assume that append doesn't need to preserve oldPtr in this case. |
| return slice{unsafe.Pointer(&zerobase), newLen, newLen} |
| } |
| |
| newcap := nextslicecap(newLen, oldCap) |
| |
| var overflow bool |
| var lenmem, newlenmem, capmem uintptr |
| // Specialize for common values of et.Size. |
| // For 1 we don't need any division/multiplication. |
| // For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant. |
| // For powers of 2, use a variable shift. |
| noscan := et.PtrBytes == 0 |
| switch { |
| case et.Size_ == 1: |
| lenmem = uintptr(oldLen) |
| newlenmem = uintptr(newLen) |
| capmem = roundupsize(uintptr(newcap), noscan) |
| overflow = uintptr(newcap) > maxAlloc |
| newcap = int(capmem) |
| case et.Size_ == goarch.PtrSize: |
| lenmem = uintptr(oldLen) * goarch.PtrSize |
| newlenmem = uintptr(newLen) * goarch.PtrSize |
| capmem = roundupsize(uintptr(newcap)*goarch.PtrSize, noscan) |
| overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize |
| newcap = int(capmem / goarch.PtrSize) |
| case isPowerOfTwo(et.Size_): |
| var shift uintptr |
| if goarch.PtrSize == 8 { |
| // Mask shift for better code generation. |
| shift = uintptr(sys.TrailingZeros64(uint64(et.Size_))) & 63 |
| } else { |
| shift = uintptr(sys.TrailingZeros32(uint32(et.Size_))) & 31 |
| } |
| lenmem = uintptr(oldLen) << shift |
| newlenmem = uintptr(newLen) << shift |
| capmem = roundupsize(uintptr(newcap)<<shift, noscan) |
| overflow = uintptr(newcap) > (maxAlloc >> shift) |
| newcap = int(capmem >> shift) |
| capmem = uintptr(newcap) << shift |
| default: |
| lenmem = uintptr(oldLen) * et.Size_ |
| newlenmem = uintptr(newLen) * et.Size_ |
| capmem, overflow = math.MulUintptr(et.Size_, uintptr(newcap)) |
| capmem = roundupsize(capmem, noscan) |
| newcap = int(capmem / et.Size_) |
| capmem = uintptr(newcap) * et.Size_ |
| } |
| |
| // The check of overflow in addition to capmem > maxAlloc is needed |
| // to prevent an overflow which can be used to trigger a segfault |
| // on 32bit architectures with this example program: |
| // |
| // type T [1<<27 + 1]int64 |
| // |
| // var d T |
| // var s []T |
| // |
| // func main() { |
| // s = append(s, d, d, d, d) |
| // print(len(s), "\n") |
| // } |
| if overflow || capmem > maxAlloc { |
| panic(errorString("growslice: len out of range")) |
| } |
| |
| var p unsafe.Pointer |
| if et.PtrBytes == 0 { |
| p = mallocgc(capmem, nil, false) |
| // The append() that calls growslice is going to overwrite from oldLen to newLen. |
| // Only clear the part that will not be overwritten. |
| // The reflect_growslice() that calls growslice will manually clear |
| // the region not cleared here. |
| memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem) |
| } else { |
| // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. |
| p = mallocgc(capmem, et, true) |
| if lenmem > 0 && writeBarrier.enabled { |
| // Only shade the pointers in oldPtr since we know the destination slice p |
| // only contains nil pointers because it has been cleared during alloc. |
| // |
| // It's safe to pass a type to this function as an optimization because |
| // from and to only ever refer to memory representing whole values of |
| // type et. See the comment on bulkBarrierPreWrite. |
| bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes, et) |
| } |
| } |
| memmove(p, oldPtr, lenmem) |
| |
| return slice{p, newLen, newcap} |
| } |
| |
| // nextslicecap computes the next appropriate slice length. |
| func nextslicecap(newLen, oldCap int) int { |
| newcap := oldCap |
| doublecap := newcap + newcap |
| if newLen > doublecap { |
| return newLen |
| } |
| |
| const threshold = 256 |
| if oldCap < threshold { |
| return doublecap |
| } |
| for { |
| // Transition from growing 2x for small slices |
| // to growing 1.25x for large slices. This formula |
| // gives a smooth-ish transition between the two. |
| newcap += (newcap + 3*threshold) >> 2 |
| |
| // We need to check `newcap >= newLen` and whether `newcap` overflowed. |
| // newLen is guaranteed to be larger than zero, hence |
| // when newcap overflows then `uint(newcap) > uint(newLen)`. |
| // This allows to check for both with the same comparison. |
| if uint(newcap) >= uint(newLen) { |
| break |
| } |
| } |
| |
| // Set newcap to the requested cap when |
| // the newcap calculation overflowed. |
| if newcap <= 0 { |
| return newLen |
| } |
| return newcap |
| } |
| |
| //go:linkname reflect_growslice reflect.growslice |
| func reflect_growslice(et *_type, old slice, num int) slice { |
| // Semantically equivalent to slices.Grow, except that the caller |
| // is responsible for ensuring that old.len+num > old.cap. |
| num -= old.cap - old.len // preserve memory of old[old.len:old.cap] |
| new := growslice(old.array, old.cap+num, old.cap, num, et) |
| // growslice does not zero out new[old.cap:new.len] since it assumes that |
| // the memory will be overwritten by an append() that called growslice. |
| // Since the caller of reflect_growslice is not append(), |
| // zero out this region before returning the slice to the reflect package. |
| if et.PtrBytes == 0 { |
| oldcapmem := uintptr(old.cap) * et.Size_ |
| newlenmem := uintptr(new.len) * et.Size_ |
| memclrNoHeapPointers(add(new.array, oldcapmem), newlenmem-oldcapmem) |
| } |
| new.len = old.len // preserve the old length |
| return new |
| } |
| |
| func isPowerOfTwo(x uintptr) bool { |
| return x&(x-1) == 0 |
| } |
| |
| // slicecopy is used to copy from a string or slice of pointerless elements into a slice. |
| func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int { |
| if fromLen == 0 || toLen == 0 { |
| return 0 |
| } |
| |
| n := fromLen |
| if toLen < n { |
| n = toLen |
| } |
| |
| if width == 0 { |
| return n |
| } |
| |
| size := uintptr(n) * width |
| if raceenabled { |
| callerpc := getcallerpc() |
| pc := abi.FuncPCABIInternal(slicecopy) |
| racereadrangepc(fromPtr, size, callerpc, pc) |
| racewriterangepc(toPtr, size, callerpc, pc) |
| } |
| if msanenabled { |
| msanread(fromPtr, size) |
| msanwrite(toPtr, size) |
| } |
| if asanenabled { |
| asanread(fromPtr, size) |
| asanwrite(toPtr, size) |
| } |
| |
| if size == 1 { // common case worth about 2x to do here |
| // TODO: is this still worth it with new memmove impl? |
| *(*byte)(toPtr) = *(*byte)(fromPtr) // known to be a byte pointer |
| } else { |
| memmove(toPtr, fromPtr, size) |
| } |
| return n |
| } |
| |
| //go:linkname bytealg_MakeNoZero internal/bytealg.MakeNoZero |
| func bytealg_MakeNoZero(len int) []byte { |
| if uintptr(len) > maxAlloc { |
| panicmakeslicelen() |
| } |
| return unsafe.Slice((*byte)(mallocgc(uintptr(len), nil, false)), len) |
| } |