Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 1 | // Copyright 2009 The Go Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style |
| 3 | // license that can be found in the LICENSE file. |
| 4 | |
| 5 | package sync |
| 6 | |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 7 | import ( |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 8 | "internal/race" |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 9 | "sync/atomic" |
| 10 | "unsafe" |
| 11 | ) |
Russ Cox | 12b7875 | 2011-02-25 14:29:47 -0500 | [diff] [blame] | 12 | |
Ian Lance Taylor | 09ebbf4 | 2017-06-15 16:42:08 -0700 | [diff] [blame] | 13 | // There is a modified copy of this file in runtime/rwmutex.go. |
| 14 | // If you make any changes here, see if you should make them there. |
| 15 | |
Brad Fitzpatrick | 165e752 | 2018-01-16 22:57:44 +0000 | [diff] [blame] | 16 | // A RWMutex is a reader/writer mutual exclusion lock. |
Andrew Gerrand | b3f98d7 | 2016-05-30 15:17:14 +1000 | [diff] [blame] | 17 | // The lock can be held by an arbitrary number of readers or a single writer. |
Joonas Kuorilehto | 766e1ae | 2017-05-24 20:37:53 +0300 | [diff] [blame] | 18 | // The zero value for a RWMutex is an unlocked mutex. |
Aliaksandr Valialkin | c81a353 | 2016-04-15 00:33:28 +0300 | [diff] [blame] | 19 | // |
Brad Fitzpatrick | 165e752 | 2018-01-16 22:57:44 +0000 | [diff] [blame] | 20 | // A RWMutex must not be copied after first use. |
Andrew Gerrand | b3f98d7 | 2016-05-30 15:17:14 +1000 | [diff] [blame] | 21 | // |
Brad Fitzpatrick | e2160cc | 2017-06-14 06:16:49 +0000 | [diff] [blame] | 22 | // If a goroutine holds a RWMutex for reading and another goroutine might |
| 23 | // call Lock, no goroutine should expect to be able to acquire a read lock |
| 24 | // until the initial read lock is released. In particular, this prohibits |
| 25 | // recursive read locking. This is to ensure that the lock eventually becomes |
| 26 | // available; a blocked Lock call excludes new readers from acquiring the |
| 27 | // lock. |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 28 | type RWMutex struct { |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 29 | w Mutex // held if there are pending writers |
| 30 | writerSem uint32 // semaphore for writers to wait for completing readers |
| 31 | readerSem uint32 // semaphore for readers to wait for completing writers |
| 32 | readerCount int32 // number of pending readers |
| 33 | readerWait int32 // number of departing readers |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 34 | } |
| 35 | |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 36 | const rwmutexMaxReaders = 1 << 30 |
| 37 | |
Michael Pratt | b4f3d52 | 2020-11-17 16:47:08 -0500 | [diff] [blame] | 38 | // Happens-before relationships are indicated to the race detector via: |
| 39 | // - Unlock -> Lock: readerSem |
| 40 | // - Unlock -> RLock: readerSem |
| 41 | // - RUnlock -> Lock: writerSem |
| 42 | // |
| 43 | // The methods below temporarily disable handling of race synchronization |
| 44 | // events in order to provide the more precise model above to the race |
| 45 | // detector. |
| 46 | // |
| 47 | // For example, atomic.AddInt32 in RLock should not appear to provide |
| 48 | // acquire-release semantics, which would incorrectly synchronize racing |
| 49 | // readers, thus potentially missing races. |
| 50 | |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 51 | // RLock locks rw for reading. |
Brad Fitzpatrick | e2160cc | 2017-06-14 06:16:49 +0000 | [diff] [blame] | 52 | // |
| 53 | // It should not be used for recursive read locking; a blocked Lock |
| 54 | // call excludes new readers from acquiring the lock. See the |
| 55 | // documentation on the RWMutex type. |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 56 | func (rw *RWMutex) RLock() { |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 57 | if race.Enabled { |
Rémy Oudompheng | 5bb3a66 | 2013-04-08 23:46:54 +0200 | [diff] [blame] | 58 | _ = rw.w.state |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 59 | race.Disable() |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 60 | } |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 61 | if atomic.AddInt32(&rw.readerCount, 1) < 0 { |
| 62 | // A writer is pending, wait for it. |
Carlo Alberto Ferraris | 41cb0ae | 2018-11-09 22:49:38 +0900 | [diff] [blame] | 63 | runtime_SemacquireMutex(&rw.readerSem, false, 0) |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 64 | } |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 65 | if race.Enabled { |
| 66 | race.Enable() |
| 67 | race.Acquire(unsafe.Pointer(&rw.readerSem)) |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 68 | } |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 69 | } |
| 70 | |
| 71 | // RUnlock undoes a single RLock call; |
| 72 | // it does not affect other simultaneous readers. |
| 73 | // It is a run-time error if rw is not locked for reading |
| 74 | // on entry to RUnlock. |
| 75 | func (rw *RWMutex) RUnlock() { |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 76 | if race.Enabled { |
Rémy Oudompheng | 5bb3a66 | 2013-04-08 23:46:54 +0200 | [diff] [blame] | 77 | _ = rw.w.state |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 78 | race.ReleaseMerge(unsafe.Pointer(&rw.writerSem)) |
| 79 | race.Disable() |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 80 | } |
Dmitriy Vyukov | 22d46d5 | 2014-06-19 22:19:56 -0700 | [diff] [blame] | 81 | if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 { |
Carlo Alberto Ferraris | 05051b5 | 2018-11-13 17:08:17 +0900 | [diff] [blame] | 82 | // Outlined slow-path to allow the fast-path to be inlined |
| 83 | rw.rUnlockSlow(r) |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 84 | } |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 85 | if race.Enabled { |
| 86 | race.Enable() |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 87 | } |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 88 | } |
| 89 | |
Carlo Alberto Ferraris | 05051b5 | 2018-11-13 17:08:17 +0900 | [diff] [blame] | 90 | func (rw *RWMutex) rUnlockSlow(r int32) { |
| 91 | if r+1 == 0 || r+1 == -rwmutexMaxReaders { |
| 92 | race.Enable() |
| 93 | throw("sync: RUnlock of unlocked RWMutex") |
| 94 | } |
| 95 | // A writer is pending. |
| 96 | if atomic.AddInt32(&rw.readerWait, -1) == 0 { |
| 97 | // The last reader unblocks the writer. |
| 98 | runtime_Semrelease(&rw.writerSem, false, 1) |
| 99 | } |
| 100 | } |
| 101 | |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 102 | // Lock locks rw for writing. |
| 103 | // If the lock is already locked for reading or writing, |
| 104 | // Lock blocks until the lock is available. |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 105 | func (rw *RWMutex) Lock() { |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 106 | if race.Enabled { |
Rémy Oudompheng | 5bb3a66 | 2013-04-08 23:46:54 +0200 | [diff] [blame] | 107 | _ = rw.w.state |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 108 | race.Disable() |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 109 | } |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 110 | // First, resolve competition with other writers. |
Robert Griesemer | d65a5cc | 2009-12-15 15:40:16 -0800 | [diff] [blame] | 111 | rw.w.Lock() |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 112 | // Announce to readers there is a pending writer. |
| 113 | r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders |
| 114 | // Wait for active readers. |
| 115 | if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 { |
Carlo Alberto Ferraris | 41cb0ae | 2018-11-09 22:49:38 +0900 | [diff] [blame] | 116 | runtime_SemacquireMutex(&rw.writerSem, false, 0) |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 117 | } |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 118 | if race.Enabled { |
| 119 | race.Enable() |
| 120 | race.Acquire(unsafe.Pointer(&rw.readerSem)) |
| 121 | race.Acquire(unsafe.Pointer(&rw.writerSem)) |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 122 | } |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 123 | } |
| 124 | |
Brad Fitzpatrick | 5fea2cc | 2016-03-01 23:21:55 +0000 | [diff] [blame] | 125 | // Unlock unlocks rw for writing. It is a run-time error if rw is |
Rob Pike | 7c189a8 | 2011-02-01 21:29:46 -0800 | [diff] [blame] | 126 | // not locked for writing on entry to Unlock. |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 127 | // |
Rob Pike | 7c189a8 | 2011-02-01 21:29:46 -0800 | [diff] [blame] | 128 | // As with Mutexes, a locked RWMutex is not associated with a particular |
Brad Fitzpatrick | 165e752 | 2018-01-16 22:57:44 +0000 | [diff] [blame] | 129 | // goroutine. One goroutine may RLock (Lock) a RWMutex and then |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 130 | // arrange for another goroutine to RUnlock (Unlock) it. |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 131 | func (rw *RWMutex) Unlock() { |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 132 | if race.Enabled { |
Rémy Oudompheng | 5bb3a66 | 2013-04-08 23:46:54 +0200 | [diff] [blame] | 133 | _ = rw.w.state |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 134 | race.Release(unsafe.Pointer(&rw.readerSem)) |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 135 | race.Disable() |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 136 | } |
| 137 | |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 138 | // Announce to readers there is no active writer. |
| 139 | r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders) |
Dmitriy Vyukov | 22d46d5 | 2014-06-19 22:19:56 -0700 | [diff] [blame] | 140 | if r >= rwmutexMaxReaders { |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 141 | race.Enable() |
Russ Cox | 40d81cf | 2016-10-18 10:26:07 -0400 | [diff] [blame] | 142 | throw("sync: Unlock of unlocked RWMutex") |
Dmitriy Vyukov | 22d46d5 | 2014-06-19 22:19:56 -0700 | [diff] [blame] | 143 | } |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 144 | // Unblock blocked readers, if any. |
| 145 | for i := 0; i < int(r); i++ { |
Carlo Alberto Ferraris | 4c3f260 | 2018-11-10 08:28:44 +0900 | [diff] [blame] | 146 | runtime_Semrelease(&rw.readerSem, false, 0) |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 147 | } |
| 148 | // Allow other writers to proceed. |
| 149 | rw.w.Unlock() |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 150 | if race.Enabled { |
| 151 | race.Enable() |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 152 | } |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 153 | } |
Gustavo Niemeyer | 05b1dbd | 2011-02-16 14:11:07 -0500 | [diff] [blame] | 154 | |
| 155 | // RLocker returns a Locker interface that implements |
| 156 | // the Lock and Unlock methods by calling rw.RLock and rw.RUnlock. |
| 157 | func (rw *RWMutex) RLocker() Locker { |
| 158 | return (*rlocker)(rw) |
| 159 | } |
| 160 | |
| 161 | type rlocker RWMutex |
| 162 | |
| 163 | func (r *rlocker) Lock() { (*RWMutex)(r).RLock() } |
| 164 | func (r *rlocker) Unlock() { (*RWMutex)(r).RUnlock() } |