Russ Cox | 7b4df8f | 2014-12-22 22:50:42 -0500 | [diff] [blame] | 1 | // Copyright 2009 The Go Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style |
| 3 | // license that can be found in the LICENSE file. |
| 4 | |
| 5 | package runtime |
| 6 | |
| 7 | import "unsafe" |
| 8 | |
| 9 | // These functions cannot have go:noescape annotations, |
| 10 | // because while ptr does not escape, new does. |
| 11 | // If new is marked as not escaping, the compiler will make incorrect |
| 12 | // escape analysis decisions about the pointer value being stored. |
Michael Hudson-Doyle | d497eeb | 2015-08-27 10:59:43 +1200 | [diff] [blame] | 13 | // Instead, these are wrappers around the actual atomics (casp1 and so on) |
Russ Cox | 7b4df8f | 2014-12-22 22:50:42 -0500 | [diff] [blame] | 14 | // that use noescape to convey which arguments do not escape. |
| 15 | // |
| 16 | // Additionally, these functions must update the shadow heap for |
| 17 | // write barrier checking. |
| 18 | |
| 19 | //go:nosplit |
| 20 | func atomicstorep(ptr unsafe.Pointer, new unsafe.Pointer) { |
| 21 | atomicstorep1(noescape(ptr), new) |
| 22 | writebarrierptr_nostore((*uintptr)(ptr), uintptr(new)) |
Russ Cox | 7b4df8f | 2014-12-22 22:50:42 -0500 | [diff] [blame] | 23 | } |
| 24 | |
| 25 | //go:nosplit |
Russ Cox | 7b4df8f | 2014-12-22 22:50:42 -0500 | [diff] [blame] | 26 | func casp(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { |
| 27 | if !casp1((*unsafe.Pointer)(noescape(unsafe.Pointer(ptr))), noescape(old), new) { |
| 28 | return false |
| 29 | } |
| 30 | writebarrierptr_nostore((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) |
Russ Cox | 7b4df8f | 2014-12-22 22:50:42 -0500 | [diff] [blame] | 31 | return true |
| 32 | } |
| 33 | |
| 34 | // Like above, but implement in terms of sync/atomic's uintptr operations. |
| 35 | // We cannot just call the runtime routines, because the race detector expects |
| 36 | // to be able to intercept the sync/atomic forms but not the runtime forms. |
| 37 | |
| 38 | //go:linkname sync_atomic_StoreUintptr sync/atomic.StoreUintptr |
| 39 | func sync_atomic_StoreUintptr(ptr *uintptr, new uintptr) |
| 40 | |
| 41 | //go:linkname sync_atomic_StorePointer sync/atomic.StorePointer |
| 42 | //go:nosplit |
| 43 | func sync_atomic_StorePointer(ptr *unsafe.Pointer, new unsafe.Pointer) { |
| 44 | sync_atomic_StoreUintptr((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) |
| 45 | atomicstorep1(noescape(unsafe.Pointer(ptr)), new) |
| 46 | writebarrierptr_nostore((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) |
Russ Cox | 7b4df8f | 2014-12-22 22:50:42 -0500 | [diff] [blame] | 47 | } |
| 48 | |
| 49 | //go:linkname sync_atomic_SwapUintptr sync/atomic.SwapUintptr |
| 50 | func sync_atomic_SwapUintptr(ptr *uintptr, new uintptr) uintptr |
| 51 | |
| 52 | //go:linkname sync_atomic_SwapPointer sync/atomic.SwapPointer |
| 53 | //go:nosplit |
| 54 | func sync_atomic_SwapPointer(ptr unsafe.Pointer, new unsafe.Pointer) unsafe.Pointer { |
| 55 | old := unsafe.Pointer(sync_atomic_SwapUintptr((*uintptr)(noescape(ptr)), uintptr(new))) |
| 56 | writebarrierptr_nostore((*uintptr)(ptr), uintptr(new)) |
Russ Cox | 7b4df8f | 2014-12-22 22:50:42 -0500 | [diff] [blame] | 57 | return old |
| 58 | } |
| 59 | |
| 60 | //go:linkname sync_atomic_CompareAndSwapUintptr sync/atomic.CompareAndSwapUintptr |
| 61 | func sync_atomic_CompareAndSwapUintptr(ptr *uintptr, old, new uintptr) bool |
| 62 | |
| 63 | //go:linkname sync_atomic_CompareAndSwapPointer sync/atomic.CompareAndSwapPointer |
| 64 | //go:nosplit |
| 65 | func sync_atomic_CompareAndSwapPointer(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool { |
| 66 | if !sync_atomic_CompareAndSwapUintptr((*uintptr)(noescape(unsafe.Pointer(ptr))), uintptr(old), uintptr(new)) { |
| 67 | return false |
| 68 | } |
| 69 | writebarrierptr_nostore((*uintptr)(unsafe.Pointer(ptr)), uintptr(new)) |
Russ Cox | 7b4df8f | 2014-12-22 22:50:42 -0500 | [diff] [blame] | 70 | return true |
| 71 | } |