| // Code generated by mkmalloc.go; DO NOT EDIT. |
| |
| package runtime |
| |
| import ( |
| "internal/goarch" |
| "internal/runtime/sys" |
| "unsafe" |
| ) |
| |
| func mallocgcSmallScanNoHeaderSC1(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 1 |
| |
| const elemsize = 8 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 8 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(8)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 8 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 2 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(16)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 16 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 3 |
| |
| const elemsize = 24 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 24 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(24)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 24 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 4 |
| |
| const elemsize = 32 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 32 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(32)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 32 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 5 |
| |
| const elemsize = 48 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 48 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(48)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 48 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 6 |
| |
| const elemsize = 64 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 64 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(64)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 64 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 7 |
| |
| const elemsize = 80 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 80 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(80)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 80 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 8 |
| |
| const elemsize = 96 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 96 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(96)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 96 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 9 |
| |
| const elemsize = 112 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 112 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(112)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 112 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 10 |
| |
| const elemsize = 128 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 128 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(128)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 128 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 11 |
| |
| const elemsize = 144 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 144 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(144)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 144 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 12 |
| |
| const elemsize = 160 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 160 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(160)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 160 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 13 |
| |
| const elemsize = 176 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 176 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(176)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 176 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 14 |
| |
| const elemsize = 192 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 192 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(192)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 192 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 15 |
| |
| const elemsize = 208 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 208 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(208)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 208 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 16 |
| |
| const elemsize = 224 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 224 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(224)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 224 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 17 |
| |
| const elemsize = 240 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 240 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(240)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 240 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 18 |
| |
| const elemsize = 256 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 256 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(256)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 256 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 19 |
| |
| const elemsize = 288 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 288 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(288)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 288 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 20 |
| |
| const elemsize = 320 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 320 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(320)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 320 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 21 |
| |
| const elemsize = 352 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 352 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(352)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 352 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 22 |
| |
| const elemsize = 384 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 384 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(384)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 384 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 23 |
| |
| const elemsize = 416 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 416 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(416)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 416 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 24 |
| |
| const elemsize = 448 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 448 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(448)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 448 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 25 |
| |
| const elemsize = 480 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 480 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(480)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 480 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallScanNoHeaderSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 26 |
| |
| const elemsize = 512 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallScanNoHeader(size, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(0) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 512 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| if goarch.PtrSize == 8 && sizeclass == 1 { |
| |
| c.scanAlloc += 8 |
| } else { |
| dataSize := size |
| x := uintptr(x) |
| |
| if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(512)) { |
| throw("tried to write heap bits, but no heap bits in span") |
| } |
| |
| src0 := readUintptr(getGCMask(typ)) |
| |
| const elemsize = 512 |
| |
| scanSize := typ.PtrBytes |
| src := src0 |
| if typ.Size_ == goarch.PtrSize { |
| src = (1 << (dataSize / goarch.PtrSize)) - 1 |
| } else { |
| |
| if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { |
| throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") |
| } |
| for i := typ.Size_; i < dataSize; i += typ.Size_ { |
| src |= src0 << (i / goarch.PtrSize) |
| scanSize += typ.Size_ |
| } |
| } |
| |
| dstBase, _ := spanHeapBitsRange(span.base(), pageSize, elemsize) |
| dst := unsafe.Pointer(dstBase) |
| o := (x - span.base()) / goarch.PtrSize |
| i := o / ptrBits |
| j := o % ptrBits |
| const bits uintptr = elemsize / goarch.PtrSize |
| |
| const bitsIsPowerOfTwo = bits&(bits-1) == 0 |
| if bits > ptrBits || (!bitsIsPowerOfTwo && j+bits > ptrBits) { |
| |
| bits0 := ptrBits - j |
| bits1 := bits - bits0 |
| dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) |
| dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize)) |
| *dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) |
| *dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) |
| } else { |
| |
| dst := (*uintptr)(add(dst, i*goarch.PtrSize)) |
| *dst = (*dst)&^(((1<<(min(bits, ptrBits)))-1)<<j) | (src << j) |
| } |
| |
| const doubleCheck = false |
| if doubleCheck { |
| writeHeapBitsDoubleCheck(span, x, dataSize, src, src0, i, j, bits, typ) |
| } |
| if doubleCheckHeapSetType { |
| doubleCheckHeapType(x, dataSize, typ, nil, span) |
| } |
| c.scanAlloc += scanSize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocTiny1(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const constsize = 1 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckTiny(constsize, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| c := getMCache(mp) |
| off := c.tinyoffset |
| |
| if constsize&7 == 0 { |
| off = alignUp(off, 8) |
| } else if goarch.PtrSize == 4 && constsize == 12 { |
| |
| off = alignUp(off, 8) |
| } else if constsize&3 == 0 { |
| off = alignUp(off, 4) |
| } else if constsize&1 == 0 { |
| off = alignUp(off, 2) |
| } |
| if off+constsize <= maxTinySize && c.tiny != 0 { |
| |
| x := unsafe.Pointer(c.tiny + off) |
| c.tinyoffset = off + constsize |
| c.tinyAllocs++ |
| mp.mallocing = 0 |
| releasem(mp) |
| const elemsize = 0 |
| { |
| |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| } |
| |
| checkGCTrigger := false |
| span := c.alloc[tinySpanClass] |
| |
| const nbytes = 8192 |
| const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / |
| 16, |
| ) |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(tinySpanClass) |
| } |
| x := unsafe.Pointer(v) |
| (*[2]uint64)(x)[0] = 0 |
| (*[2]uint64)(x)[1] = 0 |
| |
| if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { |
| |
| c.tiny = uintptr(x) |
| c.tinyoffset = constsize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| |
| if raceenabled { |
| |
| x = add(x, elemsize-constsize) |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocTiny2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const constsize = 2 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckTiny(constsize, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| c := getMCache(mp) |
| off := c.tinyoffset |
| |
| if constsize&7 == 0 { |
| off = alignUp(off, 8) |
| } else if goarch.PtrSize == 4 && constsize == 12 { |
| |
| off = alignUp(off, 8) |
| } else if constsize&3 == 0 { |
| off = alignUp(off, 4) |
| } else if constsize&1 == 0 { |
| off = alignUp(off, 2) |
| } |
| if off+constsize <= maxTinySize && c.tiny != 0 { |
| |
| x := unsafe.Pointer(c.tiny + off) |
| c.tinyoffset = off + constsize |
| c.tinyAllocs++ |
| mp.mallocing = 0 |
| releasem(mp) |
| const elemsize = 0 |
| { |
| |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| } |
| |
| checkGCTrigger := false |
| span := c.alloc[tinySpanClass] |
| |
| const nbytes = 8192 |
| const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / |
| 16, |
| ) |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(tinySpanClass) |
| } |
| x := unsafe.Pointer(v) |
| (*[2]uint64)(x)[0] = 0 |
| (*[2]uint64)(x)[1] = 0 |
| |
| if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { |
| |
| c.tiny = uintptr(x) |
| c.tinyoffset = constsize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| |
| if raceenabled { |
| |
| x = add(x, elemsize-constsize) |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocTiny3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const constsize = 3 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckTiny(constsize, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| c := getMCache(mp) |
| off := c.tinyoffset |
| |
| if constsize&7 == 0 { |
| off = alignUp(off, 8) |
| } else if goarch.PtrSize == 4 && constsize == 12 { |
| |
| off = alignUp(off, 8) |
| } else if constsize&3 == 0 { |
| off = alignUp(off, 4) |
| } else if constsize&1 == 0 { |
| off = alignUp(off, 2) |
| } |
| if off+constsize <= maxTinySize && c.tiny != 0 { |
| |
| x := unsafe.Pointer(c.tiny + off) |
| c.tinyoffset = off + constsize |
| c.tinyAllocs++ |
| mp.mallocing = 0 |
| releasem(mp) |
| const elemsize = 0 |
| { |
| |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| } |
| |
| checkGCTrigger := false |
| span := c.alloc[tinySpanClass] |
| |
| const nbytes = 8192 |
| const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / |
| 16, |
| ) |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(tinySpanClass) |
| } |
| x := unsafe.Pointer(v) |
| (*[2]uint64)(x)[0] = 0 |
| (*[2]uint64)(x)[1] = 0 |
| |
| if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { |
| |
| c.tiny = uintptr(x) |
| c.tinyoffset = constsize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| |
| if raceenabled { |
| |
| x = add(x, elemsize-constsize) |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocTiny4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const constsize = 4 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckTiny(constsize, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| c := getMCache(mp) |
| off := c.tinyoffset |
| |
| if constsize&7 == 0 { |
| off = alignUp(off, 8) |
| } else if goarch.PtrSize == 4 && constsize == 12 { |
| |
| off = alignUp(off, 8) |
| } else if constsize&3 == 0 { |
| off = alignUp(off, 4) |
| } else if constsize&1 == 0 { |
| off = alignUp(off, 2) |
| } |
| if off+constsize <= maxTinySize && c.tiny != 0 { |
| |
| x := unsafe.Pointer(c.tiny + off) |
| c.tinyoffset = off + constsize |
| c.tinyAllocs++ |
| mp.mallocing = 0 |
| releasem(mp) |
| const elemsize = 0 |
| { |
| |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| } |
| |
| checkGCTrigger := false |
| span := c.alloc[tinySpanClass] |
| |
| const nbytes = 8192 |
| const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / |
| 16, |
| ) |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(tinySpanClass) |
| } |
| x := unsafe.Pointer(v) |
| (*[2]uint64)(x)[0] = 0 |
| (*[2]uint64)(x)[1] = 0 |
| |
| if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { |
| |
| c.tiny = uintptr(x) |
| c.tinyoffset = constsize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| |
| if raceenabled { |
| |
| x = add(x, elemsize-constsize) |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocTiny5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const constsize = 5 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckTiny(constsize, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| c := getMCache(mp) |
| off := c.tinyoffset |
| |
| if constsize&7 == 0 { |
| off = alignUp(off, 8) |
| } else if goarch.PtrSize == 4 && constsize == 12 { |
| |
| off = alignUp(off, 8) |
| } else if constsize&3 == 0 { |
| off = alignUp(off, 4) |
| } else if constsize&1 == 0 { |
| off = alignUp(off, 2) |
| } |
| if off+constsize <= maxTinySize && c.tiny != 0 { |
| |
| x := unsafe.Pointer(c.tiny + off) |
| c.tinyoffset = off + constsize |
| c.tinyAllocs++ |
| mp.mallocing = 0 |
| releasem(mp) |
| const elemsize = 0 |
| { |
| |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| } |
| |
| checkGCTrigger := false |
| span := c.alloc[tinySpanClass] |
| |
| const nbytes = 8192 |
| const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / |
| 16, |
| ) |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(tinySpanClass) |
| } |
| x := unsafe.Pointer(v) |
| (*[2]uint64)(x)[0] = 0 |
| (*[2]uint64)(x)[1] = 0 |
| |
| if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { |
| |
| c.tiny = uintptr(x) |
| c.tinyoffset = constsize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| |
| if raceenabled { |
| |
| x = add(x, elemsize-constsize) |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocTiny6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const constsize = 6 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckTiny(constsize, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| c := getMCache(mp) |
| off := c.tinyoffset |
| |
| if constsize&7 == 0 { |
| off = alignUp(off, 8) |
| } else if goarch.PtrSize == 4 && constsize == 12 { |
| |
| off = alignUp(off, 8) |
| } else if constsize&3 == 0 { |
| off = alignUp(off, 4) |
| } else if constsize&1 == 0 { |
| off = alignUp(off, 2) |
| } |
| if off+constsize <= maxTinySize && c.tiny != 0 { |
| |
| x := unsafe.Pointer(c.tiny + off) |
| c.tinyoffset = off + constsize |
| c.tinyAllocs++ |
| mp.mallocing = 0 |
| releasem(mp) |
| const elemsize = 0 |
| { |
| |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| } |
| |
| checkGCTrigger := false |
| span := c.alloc[tinySpanClass] |
| |
| const nbytes = 8192 |
| const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / |
| 16, |
| ) |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(tinySpanClass) |
| } |
| x := unsafe.Pointer(v) |
| (*[2]uint64)(x)[0] = 0 |
| (*[2]uint64)(x)[1] = 0 |
| |
| if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { |
| |
| c.tiny = uintptr(x) |
| c.tinyoffset = constsize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| |
| if raceenabled { |
| |
| x = add(x, elemsize-constsize) |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocTiny7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const constsize = 7 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckTiny(constsize, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| c := getMCache(mp) |
| off := c.tinyoffset |
| |
| if constsize&7 == 0 { |
| off = alignUp(off, 8) |
| } else if goarch.PtrSize == 4 && constsize == 12 { |
| |
| off = alignUp(off, 8) |
| } else if constsize&3 == 0 { |
| off = alignUp(off, 4) |
| } else if constsize&1 == 0 { |
| off = alignUp(off, 2) |
| } |
| if off+constsize <= maxTinySize && c.tiny != 0 { |
| |
| x := unsafe.Pointer(c.tiny + off) |
| c.tinyoffset = off + constsize |
| c.tinyAllocs++ |
| mp.mallocing = 0 |
| releasem(mp) |
| const elemsize = 0 |
| { |
| |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| } |
| |
| checkGCTrigger := false |
| span := c.alloc[tinySpanClass] |
| |
| const nbytes = 8192 |
| const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / |
| 16, |
| ) |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(tinySpanClass) |
| } |
| x := unsafe.Pointer(v) |
| (*[2]uint64)(x)[0] = 0 |
| (*[2]uint64)(x)[1] = 0 |
| |
| if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { |
| |
| c.tiny = uintptr(x) |
| c.tinyoffset = constsize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| |
| if raceenabled { |
| |
| x = add(x, elemsize-constsize) |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocTiny8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const constsize = 8 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckTiny(constsize, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| c := getMCache(mp) |
| off := c.tinyoffset |
| |
| if constsize&7 == 0 { |
| off = alignUp(off, 8) |
| } else if goarch.PtrSize == 4 && constsize == 12 { |
| |
| off = alignUp(off, 8) |
| } else if constsize&3 == 0 { |
| off = alignUp(off, 4) |
| } else if constsize&1 == 0 { |
| off = alignUp(off, 2) |
| } |
| if off+constsize <= maxTinySize && c.tiny != 0 { |
| |
| x := unsafe.Pointer(c.tiny + off) |
| c.tinyoffset = off + constsize |
| c.tinyAllocs++ |
| mp.mallocing = 0 |
| releasem(mp) |
| const elemsize = 0 |
| { |
| |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| } |
| |
| checkGCTrigger := false |
| span := c.alloc[tinySpanClass] |
| |
| const nbytes = 8192 |
| const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / |
| 16, |
| ) |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(tinySpanClass) |
| } |
| x := unsafe.Pointer(v) |
| (*[2]uint64)(x)[0] = 0 |
| (*[2]uint64)(x)[1] = 0 |
| |
| if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { |
| |
| c.tiny = uintptr(x) |
| c.tinyoffset = constsize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| |
| if raceenabled { |
| |
| x = add(x, elemsize-constsize) |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocTiny9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const constsize = 9 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckTiny(constsize, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| c := getMCache(mp) |
| off := c.tinyoffset |
| |
| if constsize&7 == 0 { |
| off = alignUp(off, 8) |
| } else if goarch.PtrSize == 4 && constsize == 12 { |
| |
| off = alignUp(off, 8) |
| } else if constsize&3 == 0 { |
| off = alignUp(off, 4) |
| } else if constsize&1 == 0 { |
| off = alignUp(off, 2) |
| } |
| if off+constsize <= maxTinySize && c.tiny != 0 { |
| |
| x := unsafe.Pointer(c.tiny + off) |
| c.tinyoffset = off + constsize |
| c.tinyAllocs++ |
| mp.mallocing = 0 |
| releasem(mp) |
| const elemsize = 0 |
| { |
| |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| } |
| |
| checkGCTrigger := false |
| span := c.alloc[tinySpanClass] |
| |
| const nbytes = 8192 |
| const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / |
| 16, |
| ) |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(tinySpanClass) |
| } |
| x := unsafe.Pointer(v) |
| (*[2]uint64)(x)[0] = 0 |
| (*[2]uint64)(x)[1] = 0 |
| |
| if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { |
| |
| c.tiny = uintptr(x) |
| c.tinyoffset = constsize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| |
| if raceenabled { |
| |
| x = add(x, elemsize-constsize) |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocTiny10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const constsize = 10 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckTiny(constsize, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| c := getMCache(mp) |
| off := c.tinyoffset |
| |
| if constsize&7 == 0 { |
| off = alignUp(off, 8) |
| } else if goarch.PtrSize == 4 && constsize == 12 { |
| |
| off = alignUp(off, 8) |
| } else if constsize&3 == 0 { |
| off = alignUp(off, 4) |
| } else if constsize&1 == 0 { |
| off = alignUp(off, 2) |
| } |
| if off+constsize <= maxTinySize && c.tiny != 0 { |
| |
| x := unsafe.Pointer(c.tiny + off) |
| c.tinyoffset = off + constsize |
| c.tinyAllocs++ |
| mp.mallocing = 0 |
| releasem(mp) |
| const elemsize = 0 |
| { |
| |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| } |
| |
| checkGCTrigger := false |
| span := c.alloc[tinySpanClass] |
| |
| const nbytes = 8192 |
| const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / |
| 16, |
| ) |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(tinySpanClass) |
| } |
| x := unsafe.Pointer(v) |
| (*[2]uint64)(x)[0] = 0 |
| (*[2]uint64)(x)[1] = 0 |
| |
| if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { |
| |
| c.tiny = uintptr(x) |
| c.tinyoffset = constsize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| |
| if raceenabled { |
| |
| x = add(x, elemsize-constsize) |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocTiny11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const constsize = 11 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckTiny(constsize, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| c := getMCache(mp) |
| off := c.tinyoffset |
| |
| if constsize&7 == 0 { |
| off = alignUp(off, 8) |
| } else if goarch.PtrSize == 4 && constsize == 12 { |
| |
| off = alignUp(off, 8) |
| } else if constsize&3 == 0 { |
| off = alignUp(off, 4) |
| } else if constsize&1 == 0 { |
| off = alignUp(off, 2) |
| } |
| if off+constsize <= maxTinySize && c.tiny != 0 { |
| |
| x := unsafe.Pointer(c.tiny + off) |
| c.tinyoffset = off + constsize |
| c.tinyAllocs++ |
| mp.mallocing = 0 |
| releasem(mp) |
| const elemsize = 0 |
| { |
| |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| } |
| |
| checkGCTrigger := false |
| span := c.alloc[tinySpanClass] |
| |
| const nbytes = 8192 |
| const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / |
| 16, |
| ) |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(tinySpanClass) |
| } |
| x := unsafe.Pointer(v) |
| (*[2]uint64)(x)[0] = 0 |
| (*[2]uint64)(x)[1] = 0 |
| |
| if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { |
| |
| c.tiny = uintptr(x) |
| c.tinyoffset = constsize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| |
| if raceenabled { |
| |
| x = add(x, elemsize-constsize) |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocTiny12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const constsize = 12 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckTiny(constsize, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| c := getMCache(mp) |
| off := c.tinyoffset |
| |
| if constsize&7 == 0 { |
| off = alignUp(off, 8) |
| } else if goarch.PtrSize == 4 && constsize == 12 { |
| |
| off = alignUp(off, 8) |
| } else if constsize&3 == 0 { |
| off = alignUp(off, 4) |
| } else if constsize&1 == 0 { |
| off = alignUp(off, 2) |
| } |
| if off+constsize <= maxTinySize && c.tiny != 0 { |
| |
| x := unsafe.Pointer(c.tiny + off) |
| c.tinyoffset = off + constsize |
| c.tinyAllocs++ |
| mp.mallocing = 0 |
| releasem(mp) |
| const elemsize = 0 |
| { |
| |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| } |
| |
| checkGCTrigger := false |
| span := c.alloc[tinySpanClass] |
| |
| const nbytes = 8192 |
| const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / |
| 16, |
| ) |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(tinySpanClass) |
| } |
| x := unsafe.Pointer(v) |
| (*[2]uint64)(x)[0] = 0 |
| (*[2]uint64)(x)[1] = 0 |
| |
| if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { |
| |
| c.tiny = uintptr(x) |
| c.tinyoffset = constsize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| |
| if raceenabled { |
| |
| x = add(x, elemsize-constsize) |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocTiny13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const constsize = 13 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckTiny(constsize, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| c := getMCache(mp) |
| off := c.tinyoffset |
| |
| if constsize&7 == 0 { |
| off = alignUp(off, 8) |
| } else if goarch.PtrSize == 4 && constsize == 12 { |
| |
| off = alignUp(off, 8) |
| } else if constsize&3 == 0 { |
| off = alignUp(off, 4) |
| } else if constsize&1 == 0 { |
| off = alignUp(off, 2) |
| } |
| if off+constsize <= maxTinySize && c.tiny != 0 { |
| |
| x := unsafe.Pointer(c.tiny + off) |
| c.tinyoffset = off + constsize |
| c.tinyAllocs++ |
| mp.mallocing = 0 |
| releasem(mp) |
| const elemsize = 0 |
| { |
| |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| } |
| |
| checkGCTrigger := false |
| span := c.alloc[tinySpanClass] |
| |
| const nbytes = 8192 |
| const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / |
| 16, |
| ) |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(tinySpanClass) |
| } |
| x := unsafe.Pointer(v) |
| (*[2]uint64)(x)[0] = 0 |
| (*[2]uint64)(x)[1] = 0 |
| |
| if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { |
| |
| c.tiny = uintptr(x) |
| c.tinyoffset = constsize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| |
| if raceenabled { |
| |
| x = add(x, elemsize-constsize) |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocTiny14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const constsize = 14 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckTiny(constsize, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| c := getMCache(mp) |
| off := c.tinyoffset |
| |
| if constsize&7 == 0 { |
| off = alignUp(off, 8) |
| } else if goarch.PtrSize == 4 && constsize == 12 { |
| |
| off = alignUp(off, 8) |
| } else if constsize&3 == 0 { |
| off = alignUp(off, 4) |
| } else if constsize&1 == 0 { |
| off = alignUp(off, 2) |
| } |
| if off+constsize <= maxTinySize && c.tiny != 0 { |
| |
| x := unsafe.Pointer(c.tiny + off) |
| c.tinyoffset = off + constsize |
| c.tinyAllocs++ |
| mp.mallocing = 0 |
| releasem(mp) |
| const elemsize = 0 |
| { |
| |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| } |
| |
| checkGCTrigger := false |
| span := c.alloc[tinySpanClass] |
| |
| const nbytes = 8192 |
| const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / |
| 16, |
| ) |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(tinySpanClass) |
| } |
| x := unsafe.Pointer(v) |
| (*[2]uint64)(x)[0] = 0 |
| (*[2]uint64)(x)[1] = 0 |
| |
| if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { |
| |
| c.tiny = uintptr(x) |
| c.tinyoffset = constsize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| |
| if raceenabled { |
| |
| x = add(x, elemsize-constsize) |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocTiny15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const constsize = 15 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckTiny(constsize, typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| c := getMCache(mp) |
| off := c.tinyoffset |
| |
| if constsize&7 == 0 { |
| off = alignUp(off, 8) |
| } else if goarch.PtrSize == 4 && constsize == 12 { |
| |
| off = alignUp(off, 8) |
| } else if constsize&3 == 0 { |
| off = alignUp(off, 4) |
| } else if constsize&1 == 0 { |
| off = alignUp(off, 2) |
| } |
| if off+constsize <= maxTinySize && c.tiny != 0 { |
| |
| x := unsafe.Pointer(c.tiny + off) |
| c.tinyoffset = off + constsize |
| c.tinyAllocs++ |
| mp.mallocing = 0 |
| releasem(mp) |
| const elemsize = 0 |
| { |
| |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| } |
| |
| checkGCTrigger := false |
| span := c.alloc[tinySpanClass] |
| |
| const nbytes = 8192 |
| const nelems = uint16((nbytes - unsafe.Sizeof(spanInlineMarkBits{})) / |
| 16, |
| ) |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(tinySpanClass) |
| } |
| x := unsafe.Pointer(v) |
| (*[2]uint64)(x)[0] = 0 |
| (*[2]uint64)(x)[1] = 0 |
| |
| if !raceenabled && (constsize < c.tinyoffset || c.tiny == 0) { |
| |
| c.tiny = uintptr(x) |
| c.tinyoffset = constsize |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| |
| if raceenabled { |
| |
| x = add(x, elemsize-constsize) |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC2(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 2 |
| |
| const elemsize = 16 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 16 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC3(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 3 |
| |
| const elemsize = 24 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 24 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC4(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 4 |
| |
| const elemsize = 32 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 32 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC5(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 5 |
| |
| const elemsize = 48 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 48 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC6(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 6 |
| |
| const elemsize = 64 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 64 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC7(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 7 |
| |
| const elemsize = 80 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 80 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC8(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 8 |
| |
| const elemsize = 96 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 96 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC9(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 9 |
| |
| const elemsize = 112 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 112 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC10(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 10 |
| |
| const elemsize = 128 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 128 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC11(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 11 |
| |
| const elemsize = 144 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 144 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC12(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 12 |
| |
| const elemsize = 160 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 160 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC13(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 13 |
| |
| const elemsize = 176 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 176 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC14(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 14 |
| |
| const elemsize = 192 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 192 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC15(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 15 |
| |
| const elemsize = 208 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 208 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC16(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 16 |
| |
| const elemsize = 224 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 224 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC17(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 17 |
| |
| const elemsize = 240 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 240 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC18(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 18 |
| |
| const elemsize = 256 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 256 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC19(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 19 |
| |
| const elemsize = 288 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 288 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC20(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 20 |
| |
| const elemsize = 320 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 320 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC21(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 21 |
| |
| const elemsize = 352 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 352 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC22(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 22 |
| |
| const elemsize = 384 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 384 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC23(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 23 |
| |
| const elemsize = 416 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 416 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC24(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 24 |
| |
| const elemsize = 448 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 448 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC25(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 25 |
| |
| const elemsize = 480 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 480 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |
| |
| func mallocgcSmallNoScanSC26(size uintptr, typ *_type, needzero bool) unsafe.Pointer { |
| if doubleCheckMalloc { |
| if gcphase == _GCmarktermination { |
| throw("mallocgc called with gcphase == _GCmarktermination") |
| } |
| } |
| |
| lockRankMayQueueFinalizer() |
| |
| if debug.malloc { |
| if x := preMallocgcDebug(size, typ); x != nil { |
| return x |
| } |
| } |
| |
| if gcBlackenEnabled != 0 { |
| deductAssistCredit(size) |
| } |
| |
| const sizeclass = 26 |
| |
| const elemsize = 512 |
| |
| mp := acquirem() |
| if doubleCheckMalloc { |
| doubleCheckSmallNoScan(typ, mp) |
| } |
| mp.mallocing = 1 |
| |
| checkGCTrigger := false |
| c := getMCache(mp) |
| const spc = spanClass(sizeclass<<1) | spanClass(1) |
| span := c.alloc[spc] |
| |
| var nextFreeFastResult gclinkptr |
| if span.allocCache != 0 { |
| theBit := sys.TrailingZeros64(span.allocCache) |
| result := span.freeindex + uint16(theBit) |
| if result < span.nelems { |
| freeidx := result + 1 |
| if !(freeidx%64 == 0 && freeidx != span.nelems) { |
| span.allocCache >>= uint(theBit + 1) |
| span.freeindex = freeidx |
| span.allocCount++ |
| nextFreeFastResult = gclinkptr(uintptr(result)* |
| 512 + |
| span.base()) |
| } |
| } |
| } |
| v := nextFreeFastResult |
| if v == 0 { |
| v, span, checkGCTrigger = c.nextFree(spc) |
| } |
| x := unsafe.Pointer(v) |
| if needzero && span.needzero != 0 { |
| memclrNoHeapPointers(x, elemsize) |
| } |
| |
| publicationBarrier() |
| |
| if writeBarrier.enabled { |
| |
| gcmarknewobject(span, uintptr(x)) |
| } else { |
| |
| span.freeIndexForScan = span.freeindex |
| } |
| |
| c.nextSample -= int64(elemsize) |
| if c.nextSample < 0 || MemProfileRate != c.memProfRate { |
| profilealloc(mp, x, elemsize) |
| } |
| mp.mallocing = 0 |
| releasem(mp) |
| |
| if checkGCTrigger { |
| if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { |
| gcStart(t) |
| } |
| } |
| if gcBlackenEnabled != 0 && elemsize != 0 { |
| if assistG := getg().m.curg; assistG != nil { |
| assistG.gcAssistBytes -= int64(elemsize - size) |
| } |
| } |
| |
| if debug.malloc { |
| postMallocgcDebug(x, elemsize, typ) |
| } |
| return x |
| } |