blob: c07992b0f3132ad1f8abcad5acfcbce6ec61fb63 [file] [log] [blame]
// Copyright 2014 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package runtime
import (
"runtime/internal/sys"
"unsafe"
)
func mapaccess1_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast32))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
if !evacuated(oldb) {
b = oldb
}
}
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
if k != key {
continue
}
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
}
b = b.overflow(t)
if b == nil {
return unsafe.Pointer(&zeroVal[0])
}
}
}
func mapaccess2_fast32(t *maptype, h *hmap, key uint32) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast32))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
if !evacuated(oldb) {
b = oldb
}
}
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
if k != key {
continue
}
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
return add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize)), true
}
b = b.overflow(t)
if b == nil {
return unsafe.Pointer(&zeroVal[0]), false
}
}
}
func mapaccess1_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_fast64))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
if !evacuated(oldb) {
b = oldb
}
}
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
if k != key {
continue
}
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
}
b = b.overflow(t)
if b == nil {
return unsafe.Pointer(&zeroVal[0])
}
}
}
func mapaccess2_fast64(t *maptype, h *hmap, key uint64) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_fast64))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
var b *bmap
if h.B == 0 {
// One-bucket table. No need to hash.
b = (*bmap)(h.buckets)
} else {
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b = (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
if !evacuated(oldb) {
b = oldb
}
}
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
if k != key {
continue
}
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
return add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize)), true
}
b = b.overflow(t)
if b == nil {
return unsafe.Pointer(&zeroVal[0]), false
}
}
}
func mapaccess1_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess1_faststr))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0])
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
key := stringStructOf(&ky)
if h.B == 0 {
// One-bucket table.
b := (*bmap)(h.buckets)
if key.len < 32 {
// short key, doing lots of comparisons is ok
for i := uintptr(0); i < bucketCnt; i++ {
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
if k.len != key.len {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
}
}
return unsafe.Pointer(&zeroVal[0])
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
for i := uintptr(0); i < bucketCnt; i++ {
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
if k.len != key.len {
continue
}
if k.str == key.str {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
continue
}
// check last 4 bytes
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
continue
}
if keymaybe != bucketCnt {
// Two keys are potential matches. Use hash to distinguish them.
goto dohash
}
keymaybe = i
}
if keymaybe != bucketCnt {
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize))
}
}
return unsafe.Pointer(&zeroVal[0])
}
dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
if !evacuated(oldb) {
b = oldb
}
}
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x != top {
continue
}
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
if k.len != key.len {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
}
}
b = b.overflow(t)
if b == nil {
return unsafe.Pointer(&zeroVal[0])
}
}
}
func mapaccess2_faststr(t *maptype, h *hmap, ky string) (unsafe.Pointer, bool) {
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
racereadpc(unsafe.Pointer(h), callerpc, funcPC(mapaccess2_faststr))
}
if h == nil || h.count == 0 {
return unsafe.Pointer(&zeroVal[0]), false
}
if h.flags&hashWriting != 0 {
throw("concurrent map read and map write")
}
key := stringStructOf(&ky)
if h.B == 0 {
// One-bucket table.
b := (*bmap)(h.buckets)
if key.len < 32 {
// short key, doing lots of comparisons is ok
for i := uintptr(0); i < bucketCnt; i++ {
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
if k.len != key.len {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
// long key, try not to do more comparisons than necessary
keymaybe := uintptr(bucketCnt)
for i := uintptr(0); i < bucketCnt; i++ {
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x == empty {
continue
}
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
if k.len != key.len {
continue
}
if k.str == key.str {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
}
// check first 4 bytes
if *((*[4]byte)(key.str)) != *((*[4]byte)(k.str)) {
continue
}
// check last 4 bytes
if *((*[4]byte)(add(key.str, uintptr(key.len)-4))) != *((*[4]byte)(add(k.str, uintptr(key.len)-4))) {
continue
}
if keymaybe != bucketCnt {
// Two keys are potential matches. Use hash to distinguish them.
goto dohash
}
keymaybe = i
}
if keymaybe != bucketCnt {
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+keymaybe*2*sys.PtrSize))
if memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+keymaybe*uintptr(t.valuesize)), true
}
}
return unsafe.Pointer(&zeroVal[0]), false
}
dohash:
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
m := uintptr(1)<<h.B - 1
b := (*bmap)(add(h.buckets, (hash&m)*uintptr(t.bucketsize)))
if c := h.oldbuckets; c != nil {
if !h.sameSizeGrow() {
// There used to be half as many buckets; mask down one more power of two.
m >>= 1
}
oldb := (*bmap)(add(c, (hash&m)*uintptr(t.bucketsize)))
if !evacuated(oldb) {
b = oldb
}
}
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
x := *((*uint8)(add(unsafe.Pointer(b), i))) // b.tophash[i] without the bounds check
if x != top {
continue
}
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
if k.len != key.len {
continue
}
if k.str == key.str || memequal(k.str, key.str, uintptr(key.len)) {
return add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize)), true
}
}
b = b.overflow(t)
if b == nil {
return unsafe.Pointer(&zeroVal[0]), false
}
}
}
func mapassign_fast32(t *maptype, h *hmap, key uint32) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&t))
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign.
h.flags |= hashWriting
if h.buckets == nil {
h.buckets = newarray(t.bucket, 1)
}
again:
bucket := hash & (uintptr(1)<<h.B - 1)
if h.growing() {
growWork(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
var inserti *uint8
var insertk unsafe.Pointer
var val unsafe.Pointer
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
if b.tophash[i] == empty && inserti == nil {
inserti = &b.tophash[i]
insertk = add(unsafe.Pointer(b), dataOffset+i*4)
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
}
continue
}
k := *((*uint32)(add(unsafe.Pointer(b), dataOffset+i*4)))
if k != key {
continue
}
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
goto done
}
ovf := b.overflow(t)
if ovf == nil {
break
}
b = ovf
}
// Did not find mapping for key. Allocate new cell & add entry.
// If we hit the max load factor or we have too many overflow buckets,
// and we're not already in the middle of growing, start growing.
if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
hashGrow(t, h)
goto again // Growing the table invalidates everything, so try again
}
if inserti == nil {
// all current buckets are full, allocate a new one.
newb := h.newoverflow(t, b)
inserti = &newb.tophash[0]
insertk = add(unsafe.Pointer(newb), dataOffset)
val = add(insertk, bucketCnt*4)
}
// store new key/value at insert position
typedmemmove(t.key, insertk, unsafe.Pointer(&key))
*inserti = top
h.count++
done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
return val
}
func mapassign_fast32ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&t))
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast32))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign.
h.flags |= hashWriting
if h.buckets == nil {
h.buckets = newarray(t.bucket, 1)
}
again:
bucket := hash & (uintptr(1)<<h.B - 1)
if h.growing() {
growWork(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
var inserti *uint8
var insertk unsafe.Pointer
var val unsafe.Pointer
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
if b.tophash[i] == empty && inserti == nil {
inserti = &b.tophash[i]
insertk = add(unsafe.Pointer(b), dataOffset+i*4)
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
}
continue
}
k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*4)))
if k != key {
continue
}
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*4+i*uintptr(t.valuesize))
goto done
}
ovf := b.overflow(t)
if ovf == nil {
break
}
b = ovf
}
// Did not find mapping for key. Allocate new cell & add entry.
// If we hit the max load factor or we have too many overflow buckets,
// and we're not already in the middle of growing, start growing.
if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
hashGrow(t, h)
goto again // Growing the table invalidates everything, so try again
}
if inserti == nil {
// all current buckets are full, allocate a new one.
newb := h.newoverflow(t, b)
inserti = &newb.tophash[0]
insertk = add(unsafe.Pointer(newb), dataOffset)
val = add(insertk, bucketCnt*4)
}
// store new key/value at insert position
typedmemmove(t.key, insertk, unsafe.Pointer(&key))
*inserti = top
h.count++
done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
return val
}
func mapassign_fast64(t *maptype, h *hmap, key uint64) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&t))
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign.
h.flags |= hashWriting
if h.buckets == nil {
h.buckets = newarray(t.bucket, 1)
}
again:
bucket := hash & (uintptr(1)<<h.B - 1)
if h.growing() {
growWork(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
var inserti *uint8
var insertk unsafe.Pointer
var val unsafe.Pointer
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
if b.tophash[i] == empty && inserti == nil {
inserti = &b.tophash[i]
insertk = add(unsafe.Pointer(b), dataOffset+i*8)
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
}
continue
}
k := *((*uint64)(add(unsafe.Pointer(b), dataOffset+i*8)))
if k != key {
continue
}
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
goto done
}
ovf := b.overflow(t)
if ovf == nil {
break
}
b = ovf
}
// Did not find mapping for key. Allocate new cell & add entry.
// If we hit the max load factor or we have too many overflow buckets,
// and we're not already in the middle of growing, start growing.
if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
hashGrow(t, h)
goto again // Growing the table invalidates everything, so try again
}
if inserti == nil {
// all current buckets are full, allocate a new one.
newb := h.newoverflow(t, b)
inserti = &newb.tophash[0]
insertk = add(unsafe.Pointer(newb), dataOffset)
val = add(insertk, bucketCnt*8)
}
// store new key/value at insert position
typedmemmove(t.key, insertk, unsafe.Pointer(&key))
*inserti = top
h.count++
done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
return val
}
func mapassign_fast64ptr(t *maptype, h *hmap, key unsafe.Pointer) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&t))
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_fast64))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign.
h.flags |= hashWriting
if h.buckets == nil {
h.buckets = newarray(t.bucket, 1)
}
again:
bucket := hash & (uintptr(1)<<h.B - 1)
if h.growing() {
growWork(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
var inserti *uint8
var insertk unsafe.Pointer
var val unsafe.Pointer
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
if b.tophash[i] == empty && inserti == nil {
inserti = &b.tophash[i]
insertk = add(unsafe.Pointer(b), dataOffset+i*8)
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
}
continue
}
k := *((*unsafe.Pointer)(add(unsafe.Pointer(b), dataOffset+i*8)))
if k != key {
continue
}
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*8+i*uintptr(t.valuesize))
goto done
}
ovf := b.overflow(t)
if ovf == nil {
break
}
b = ovf
}
// Did not find mapping for key. Allocate new cell & add entry.
// If we hit the max load factor or we have too many overflow buckets,
// and we're not already in the middle of growing, start growing.
if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
hashGrow(t, h)
goto again // Growing the table invalidates everything, so try again
}
if inserti == nil {
// all current buckets are full, allocate a new one.
newb := h.newoverflow(t, b)
inserti = &newb.tophash[0]
insertk = add(unsafe.Pointer(newb), dataOffset)
val = add(insertk, bucketCnt*8)
}
// store new key/value at insert position
typedmemmove(t.key, insertk, unsafe.Pointer(&key))
*inserti = top
h.count++
done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
return val
}
func mapassign_faststr(t *maptype, h *hmap, ky string) unsafe.Pointer {
if h == nil {
panic(plainError("assignment to entry in nil map"))
}
if raceenabled {
callerpc := getcallerpc(unsafe.Pointer(&t))
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapassign_faststr))
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
key := stringStructOf(&ky)
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapassign.
h.flags |= hashWriting
if h.buckets == nil {
h.buckets = newarray(t.bucket, 1)
}
again:
bucket := hash & (uintptr(1)<<h.B - 1)
if h.growing() {
growWork(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
var inserti *uint8
var insertk unsafe.Pointer
var val unsafe.Pointer
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
if b.tophash[i] == empty && inserti == nil {
inserti = &b.tophash[i]
insertk = add(unsafe.Pointer(b), dataOffset+i*uintptr(t.keysize))
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*uintptr(t.keysize)+i*uintptr(t.valuesize))
}
continue
}
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
if k.len != key.len {
continue
}
if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
continue
}
// already have a mapping for key. Update it.
val = add(unsafe.Pointer(b), dataOffset+bucketCnt*2*sys.PtrSize+i*uintptr(t.valuesize))
goto done
}
ovf := b.overflow(t)
if ovf == nil {
break
}
b = ovf
}
// Did not find mapping for key. Allocate new cell & add entry.
// If we hit the max load factor or we have too many overflow buckets,
// and we're not already in the middle of growing, start growing.
if !h.growing() && (overLoadFactor(int64(h.count), h.B) || tooManyOverflowBuckets(h.noverflow, h.B)) {
hashGrow(t, h)
goto again // Growing the table invalidates everything, so try again
}
if inserti == nil {
// all current buckets are full, allocate a new one.
newb := h.newoverflow(t, b)
inserti = &newb.tophash[0]
insertk = add(unsafe.Pointer(newb), dataOffset)
val = add(insertk, bucketCnt*2*sys.PtrSize)
}
// store new key/value at insert position
*((*stringStruct)(insertk)) = *key
*inserti = top
h.count++
done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
return val
}
func mapdelete_fast32(t *maptype, h *hmap, key uint32) {
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast32))
}
if h == nil || h.count == 0 {
return
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapdelete
h.flags |= hashWriting
bucket := hash & (uintptr(1)<<h.B - 1)
if h.growing() {
growWork(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
continue
}
k := (*uint32)(add(unsafe.Pointer(b), dataOffset+i*4))
if key != *k {
continue
}
typedmemclr(t.key, unsafe.Pointer(k))
v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*4 + i*uintptr(t.valuesize))
typedmemclr(t.elem, v)
b.tophash[i] = empty
h.count--
goto done
}
b = b.overflow(t)
if b == nil {
goto done
}
}
done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
}
func mapdelete_fast64(t *maptype, h *hmap, key uint64) {
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_fast64))
}
if h == nil || h.count == 0 {
return
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
hash := t.key.alg.hash(noescape(unsafe.Pointer(&key)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapdelete
h.flags |= hashWriting
bucket := hash & (uintptr(1)<<h.B - 1)
if h.growing() {
growWork(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
continue
}
k := (*uint64)(add(unsafe.Pointer(b), dataOffset+i*8))
if key != *k {
continue
}
typedmemclr(t.key, unsafe.Pointer(k))
v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*8 + i*uintptr(t.valuesize))
typedmemclr(t.elem, v)
b.tophash[i] = empty
h.count--
goto done
}
b = b.overflow(t)
if b == nil {
goto done
}
}
done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
}
func mapdelete_faststr(t *maptype, h *hmap, ky string) {
if raceenabled && h != nil {
callerpc := getcallerpc(unsafe.Pointer(&t))
racewritepc(unsafe.Pointer(h), callerpc, funcPC(mapdelete_faststr))
}
if h == nil || h.count == 0 {
return
}
if h.flags&hashWriting != 0 {
throw("concurrent map writes")
}
key := stringStructOf(&ky)
hash := t.key.alg.hash(noescape(unsafe.Pointer(&ky)), uintptr(h.hash0))
// Set hashWriting after calling alg.hash for consistency with mapdelete
h.flags |= hashWriting
bucket := hash & (uintptr(1)<<h.B - 1)
if h.growing() {
growWork(t, h, bucket)
}
b := (*bmap)(unsafe.Pointer(uintptr(h.buckets) + bucket*uintptr(t.bucketsize)))
top := uint8(hash >> (sys.PtrSize*8 - 8))
if top < minTopHash {
top += minTopHash
}
for {
for i := uintptr(0); i < bucketCnt; i++ {
if b.tophash[i] != top {
continue
}
k := (*stringStruct)(add(unsafe.Pointer(b), dataOffset+i*2*sys.PtrSize))
if k.len != key.len {
continue
}
if k.str != key.str && !memequal(k.str, key.str, uintptr(key.len)) {
continue
}
typedmemclr(t.key, unsafe.Pointer(k))
v := unsafe.Pointer(uintptr(unsafe.Pointer(b)) + dataOffset + bucketCnt*2*sys.PtrSize + i*uintptr(t.valuesize))
typedmemclr(t.elem, v)
b.tophash[i] = empty
h.count--
goto done
}
b = b.overflow(t)
if b == nil {
goto done
}
}
done:
if h.flags&hashWriting == 0 {
throw("concurrent map writes")
}
h.flags &^= hashWriting
}