| // Copyright 2009 The Go Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style |
| // license that can be found in the LICENSE file. |
| |
| package runtime |
| |
| import ( |
| "unsafe" |
| ) |
| |
| type slice struct { |
| array unsafe.Pointer |
| len int |
| cap int |
| } |
| |
| // An notInHeapSlice is a slice backed by go:notinheap memory. |
| type notInHeapSlice struct { |
| array *notInHeap |
| len int |
| cap int |
| } |
| |
| // maxElems is a lookup table containing the maximum capacity for a slice. |
| // The index is the size of the slice element. |
| var maxElems = [...]uintptr{ |
| ^uintptr(0), |
| _MaxMem / 1, _MaxMem / 2, _MaxMem / 3, _MaxMem / 4, |
| _MaxMem / 5, _MaxMem / 6, _MaxMem / 7, _MaxMem / 8, |
| _MaxMem / 9, _MaxMem / 10, _MaxMem / 11, _MaxMem / 12, |
| _MaxMem / 13, _MaxMem / 14, _MaxMem / 15, _MaxMem / 16, |
| _MaxMem / 17, _MaxMem / 18, _MaxMem / 19, _MaxMem / 20, |
| _MaxMem / 21, _MaxMem / 22, _MaxMem / 23, _MaxMem / 24, |
| _MaxMem / 25, _MaxMem / 26, _MaxMem / 27, _MaxMem / 28, |
| _MaxMem / 29, _MaxMem / 30, _MaxMem / 31, _MaxMem / 32, |
| } |
| |
| // maxSliceCap returns the maximum capacity for a slice. |
| func maxSliceCap(elemsize uintptr) uintptr { |
| if elemsize < uintptr(len(maxElems)) { |
| return maxElems[elemsize] |
| } |
| return _MaxMem / elemsize |
| } |
| |
| func makeslice(et *_type, len, cap int) slice { |
| // NOTE: The len > maxElements check here is not strictly necessary, |
| // but it produces a 'len out of range' error instead of a 'cap out of range' error |
| // when someone does make([]T, bignumber). 'cap out of range' is true too, |
| // but since the cap is only being supplied implicitly, saying len is clearer. |
| // See issue 4085. |
| maxElements := maxSliceCap(et.size) |
| if len < 0 || uintptr(len) > maxElements { |
| panic(errorString("makeslice: len out of range")) |
| } |
| |
| if cap < len || uintptr(cap) > maxElements { |
| panic(errorString("makeslice: cap out of range")) |
| } |
| |
| p := mallocgc(et.size*uintptr(cap), et, true) |
| return slice{p, len, cap} |
| } |
| |
| func makeslice64(et *_type, len64, cap64 int64) slice { |
| len := int(len64) |
| if int64(len) != len64 { |
| panic(errorString("makeslice: len out of range")) |
| } |
| |
| cap := int(cap64) |
| if int64(cap) != cap64 { |
| panic(errorString("makeslice: cap out of range")) |
| } |
| |
| return makeslice(et, len, cap) |
| } |
| |
| // growslice handles slice growth during append. |
| // It is passed the slice element type, the old slice, and the desired new minimum capacity, |
| // and it returns a new slice with at least that capacity, with the old data |
| // copied into it. |
| // The new slice's length is set to the old slice's length, |
| // NOT to the new requested capacity. |
| // This is for codegen convenience. The old slice's length is used immediately |
| // to calculate where to write new values during an append. |
| // TODO: When the old backend is gone, reconsider this decision. |
| // The SSA backend might prefer the new length or to return only ptr/cap and save stack space. |
| func growslice(et *_type, old slice, cap int) slice { |
| if raceenabled { |
| callerpc := getcallerpc() |
| racereadrangepc(old.array, uintptr(old.len*int(et.size)), callerpc, funcPC(growslice)) |
| } |
| if msanenabled { |
| msanread(old.array, uintptr(old.len*int(et.size))) |
| } |
| |
| if et.size == 0 { |
| if cap < old.cap { |
| panic(errorString("growslice: cap out of range")) |
| } |
| // append should not create a slice with nil pointer but non-zero len. |
| // We assume that append doesn't need to preserve old.array in this case. |
| return slice{unsafe.Pointer(&zerobase), old.len, cap} |
| } |
| |
| newcap := old.cap |
| doublecap := newcap + newcap |
| if cap > doublecap { |
| newcap = cap |
| } else { |
| if old.len < 1024 { |
| newcap = doublecap |
| } else { |
| // Check 0 < newcap to detect overflow |
| // and prevent an infinite loop. |
| for 0 < newcap && newcap < cap { |
| newcap += newcap / 4 |
| } |
| // Set newcap to the requested cap when |
| // the newcap calculation overflowed. |
| if newcap <= 0 { |
| newcap = cap |
| } |
| } |
| } |
| |
| var overflow bool |
| var lenmem, newlenmem, capmem uintptr |
| const ptrSize = unsafe.Sizeof((*byte)(nil)) |
| switch et.size { |
| case 1: |
| lenmem = uintptr(old.len) |
| newlenmem = uintptr(cap) |
| capmem = roundupsize(uintptr(newcap)) |
| overflow = uintptr(newcap) > _MaxMem |
| newcap = int(capmem) |
| case ptrSize: |
| lenmem = uintptr(old.len) * ptrSize |
| newlenmem = uintptr(cap) * ptrSize |
| capmem = roundupsize(uintptr(newcap) * ptrSize) |
| overflow = uintptr(newcap) > _MaxMem/ptrSize |
| newcap = int(capmem / ptrSize) |
| default: |
| lenmem = uintptr(old.len) * et.size |
| newlenmem = uintptr(cap) * et.size |
| capmem = roundupsize(uintptr(newcap) * et.size) |
| overflow = uintptr(newcap) > maxSliceCap(et.size) |
| newcap = int(capmem / et.size) |
| } |
| |
| // The check of overflow (uintptr(newcap) > maxSliceCap(et.size)) |
| // in addition to capmem > _MaxMem is needed to prevent an overflow |
| // which can be used to trigger a segfault on 32bit architectures |
| // with this example program: |
| // |
| // type T [1<<27 + 1]int64 |
| // |
| // var d T |
| // var s []T |
| // |
| // func main() { |
| // s = append(s, d, d, d, d) |
| // print(len(s), "\n") |
| // } |
| if cap < old.cap || overflow || capmem > _MaxMem { |
| panic(errorString("growslice: cap out of range")) |
| } |
| |
| var p unsafe.Pointer |
| if et.kind&kindNoPointers != 0 { |
| p = mallocgc(capmem, nil, false) |
| memmove(p, old.array, lenmem) |
| // The append() that calls growslice is going to overwrite from old.len to cap (which will be the new length). |
| // Only clear the part that will not be overwritten. |
| memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem) |
| } else { |
| // Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. |
| p = mallocgc(capmem, et, true) |
| if !writeBarrier.enabled { |
| memmove(p, old.array, lenmem) |
| } else { |
| for i := uintptr(0); i < lenmem; i += et.size { |
| typedmemmove(et, add(p, i), add(old.array, i)) |
| } |
| } |
| } |
| |
| return slice{p, old.len, newcap} |
| } |
| |
| func slicecopy(to, fm slice, width uintptr) int { |
| if fm.len == 0 || to.len == 0 { |
| return 0 |
| } |
| |
| n := fm.len |
| if to.len < n { |
| n = to.len |
| } |
| |
| if width == 0 { |
| return n |
| } |
| |
| if raceenabled { |
| callerpc := getcallerpc() |
| pc := funcPC(slicecopy) |
| racewriterangepc(to.array, uintptr(n*int(width)), callerpc, pc) |
| racereadrangepc(fm.array, uintptr(n*int(width)), callerpc, pc) |
| } |
| if msanenabled { |
| msanwrite(to.array, uintptr(n*int(width))) |
| msanread(fm.array, uintptr(n*int(width))) |
| } |
| |
| size := uintptr(n) * width |
| if size == 1 { // common case worth about 2x to do here |
| // TODO: is this still worth it with new memmove impl? |
| *(*byte)(to.array) = *(*byte)(fm.array) // known to be a byte pointer |
| } else { |
| memmove(to.array, fm.array, size) |
| } |
| return n |
| } |
| |
| func slicestringcopy(to []byte, fm string) int { |
| if len(fm) == 0 || len(to) == 0 { |
| return 0 |
| } |
| |
| n := len(fm) |
| if len(to) < n { |
| n = len(to) |
| } |
| |
| if raceenabled { |
| callerpc := getcallerpc() |
| pc := funcPC(slicestringcopy) |
| racewriterangepc(unsafe.Pointer(&to[0]), uintptr(n), callerpc, pc) |
| } |
| if msanenabled { |
| msanwrite(unsafe.Pointer(&to[0]), uintptr(n)) |
| } |
| |
| memmove(unsafe.Pointer(&to[0]), stringStructOf(&fm).str, uintptr(n)) |
| return n |
| } |