Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 1 | // Copyright 2009 The Go Authors. All rights reserved. |
| 2 | // Use of this source code is governed by a BSD-style |
| 3 | // license that can be found in the LICENSE file. |
| 4 | |
| 5 | package sync |
| 6 | |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 7 | import ( |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 8 | "internal/race" |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 9 | "sync/atomic" |
| 10 | "unsafe" |
| 11 | ) |
Russ Cox | 12b7875 | 2011-02-25 14:29:47 -0500 | [diff] [blame] | 12 | |
Ian Lance Taylor | 09ebbf4 | 2017-06-15 16:42:08 -0700 | [diff] [blame] | 13 | // There is a modified copy of this file in runtime/rwmutex.go. |
| 14 | // If you make any changes here, see if you should make them there. |
| 15 | |
Brad Fitzpatrick | 165e752 | 2018-01-16 22:57:44 +0000 | [diff] [blame] | 16 | // A RWMutex is a reader/writer mutual exclusion lock. |
Andrew Gerrand | b3f98d7 | 2016-05-30 15:17:14 +1000 | [diff] [blame] | 17 | // The lock can be held by an arbitrary number of readers or a single writer. |
Joonas Kuorilehto | 766e1ae | 2017-05-24 20:37:53 +0300 | [diff] [blame] | 18 | // The zero value for a RWMutex is an unlocked mutex. |
Aliaksandr Valialkin | c81a353 | 2016-04-15 00:33:28 +0300 | [diff] [blame] | 19 | // |
Brad Fitzpatrick | 165e752 | 2018-01-16 22:57:44 +0000 | [diff] [blame] | 20 | // A RWMutex must not be copied after first use. |
Andrew Gerrand | b3f98d7 | 2016-05-30 15:17:14 +1000 | [diff] [blame] | 21 | // |
Brad Fitzpatrick | e2160cc | 2017-06-14 06:16:49 +0000 | [diff] [blame] | 22 | // If a goroutine holds a RWMutex for reading and another goroutine might |
| 23 | // call Lock, no goroutine should expect to be able to acquire a read lock |
| 24 | // until the initial read lock is released. In particular, this prohibits |
| 25 | // recursive read locking. This is to ensure that the lock eventually becomes |
| 26 | // available; a blocked Lock call excludes new readers from acquiring the |
| 27 | // lock. |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 28 | type RWMutex struct { |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 29 | w Mutex // held if there are pending writers |
| 30 | writerSem uint32 // semaphore for writers to wait for completing readers |
| 31 | readerSem uint32 // semaphore for readers to wait for completing writers |
| 32 | readerCount int32 // number of pending readers |
| 33 | readerWait int32 // number of departing readers |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 34 | } |
| 35 | |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 36 | const rwmutexMaxReaders = 1 << 30 |
| 37 | |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 38 | // RLock locks rw for reading. |
Brad Fitzpatrick | e2160cc | 2017-06-14 06:16:49 +0000 | [diff] [blame] | 39 | // |
| 40 | // It should not be used for recursive read locking; a blocked Lock |
| 41 | // call excludes new readers from acquiring the lock. See the |
| 42 | // documentation on the RWMutex type. |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 43 | func (rw *RWMutex) RLock() { |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 44 | if race.Enabled { |
Rémy Oudompheng | 5bb3a66 | 2013-04-08 23:46:54 +0200 | [diff] [blame] | 45 | _ = rw.w.state |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 46 | race.Disable() |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 47 | } |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 48 | if atomic.AddInt32(&rw.readerCount, 1) < 0 { |
| 49 | // A writer is pending, wait for it. |
Carlo Alberto Ferraris | 41cb0ae | 2018-11-09 22:49:38 +0900 | [diff] [blame] | 50 | runtime_SemacquireMutex(&rw.readerSem, false, 0) |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 51 | } |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 52 | if race.Enabled { |
| 53 | race.Enable() |
| 54 | race.Acquire(unsafe.Pointer(&rw.readerSem)) |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 55 | } |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 56 | } |
| 57 | |
| 58 | // RUnlock undoes a single RLock call; |
| 59 | // it does not affect other simultaneous readers. |
| 60 | // It is a run-time error if rw is not locked for reading |
| 61 | // on entry to RUnlock. |
| 62 | func (rw *RWMutex) RUnlock() { |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 63 | if race.Enabled { |
Rémy Oudompheng | 5bb3a66 | 2013-04-08 23:46:54 +0200 | [diff] [blame] | 64 | _ = rw.w.state |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 65 | race.ReleaseMerge(unsafe.Pointer(&rw.writerSem)) |
| 66 | race.Disable() |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 67 | } |
Dmitriy Vyukov | 22d46d5 | 2014-06-19 22:19:56 -0700 | [diff] [blame] | 68 | if r := atomic.AddInt32(&rw.readerCount, -1); r < 0 { |
Carlo Alberto Ferraris | 05051b5 | 2018-11-13 17:08:17 +0900 | [diff] [blame^] | 69 | // Outlined slow-path to allow the fast-path to be inlined |
| 70 | rw.rUnlockSlow(r) |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 71 | } |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 72 | if race.Enabled { |
| 73 | race.Enable() |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 74 | } |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 75 | } |
| 76 | |
Carlo Alberto Ferraris | 05051b5 | 2018-11-13 17:08:17 +0900 | [diff] [blame^] | 77 | func (rw *RWMutex) rUnlockSlow(r int32) { |
| 78 | if r+1 == 0 || r+1 == -rwmutexMaxReaders { |
| 79 | race.Enable() |
| 80 | throw("sync: RUnlock of unlocked RWMutex") |
| 81 | } |
| 82 | // A writer is pending. |
| 83 | if atomic.AddInt32(&rw.readerWait, -1) == 0 { |
| 84 | // The last reader unblocks the writer. |
| 85 | runtime_Semrelease(&rw.writerSem, false, 1) |
| 86 | } |
| 87 | } |
| 88 | |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 89 | // Lock locks rw for writing. |
| 90 | // If the lock is already locked for reading or writing, |
| 91 | // Lock blocks until the lock is available. |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 92 | func (rw *RWMutex) Lock() { |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 93 | if race.Enabled { |
Rémy Oudompheng | 5bb3a66 | 2013-04-08 23:46:54 +0200 | [diff] [blame] | 94 | _ = rw.w.state |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 95 | race.Disable() |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 96 | } |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 97 | // First, resolve competition with other writers. |
Robert Griesemer | d65a5cc | 2009-12-15 15:40:16 -0800 | [diff] [blame] | 98 | rw.w.Lock() |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 99 | // Announce to readers there is a pending writer. |
| 100 | r := atomic.AddInt32(&rw.readerCount, -rwmutexMaxReaders) + rwmutexMaxReaders |
| 101 | // Wait for active readers. |
| 102 | if r != 0 && atomic.AddInt32(&rw.readerWait, r) != 0 { |
Carlo Alberto Ferraris | 41cb0ae | 2018-11-09 22:49:38 +0900 | [diff] [blame] | 103 | runtime_SemacquireMutex(&rw.writerSem, false, 0) |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 104 | } |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 105 | if race.Enabled { |
| 106 | race.Enable() |
| 107 | race.Acquire(unsafe.Pointer(&rw.readerSem)) |
| 108 | race.Acquire(unsafe.Pointer(&rw.writerSem)) |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 109 | } |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 110 | } |
| 111 | |
Brad Fitzpatrick | 5fea2cc | 2016-03-01 23:21:55 +0000 | [diff] [blame] | 112 | // Unlock unlocks rw for writing. It is a run-time error if rw is |
Rob Pike | 7c189a8 | 2011-02-01 21:29:46 -0800 | [diff] [blame] | 113 | // not locked for writing on entry to Unlock. |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 114 | // |
Rob Pike | 7c189a8 | 2011-02-01 21:29:46 -0800 | [diff] [blame] | 115 | // As with Mutexes, a locked RWMutex is not associated with a particular |
Brad Fitzpatrick | 165e752 | 2018-01-16 22:57:44 +0000 | [diff] [blame] | 116 | // goroutine. One goroutine may RLock (Lock) a RWMutex and then |
Péter Szabó | 12206f6 | 2009-11-30 12:10:56 -0800 | [diff] [blame] | 117 | // arrange for another goroutine to RUnlock (Unlock) it. |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 118 | func (rw *RWMutex) Unlock() { |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 119 | if race.Enabled { |
Rémy Oudompheng | 5bb3a66 | 2013-04-08 23:46:54 +0200 | [diff] [blame] | 120 | _ = rw.w.state |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 121 | race.Release(unsafe.Pointer(&rw.readerSem)) |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 122 | race.Disable() |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 123 | } |
| 124 | |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 125 | // Announce to readers there is no active writer. |
| 126 | r := atomic.AddInt32(&rw.readerCount, rwmutexMaxReaders) |
Dmitriy Vyukov | 22d46d5 | 2014-06-19 22:19:56 -0700 | [diff] [blame] | 127 | if r >= rwmutexMaxReaders { |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 128 | race.Enable() |
Russ Cox | 40d81cf | 2016-10-18 10:26:07 -0400 | [diff] [blame] | 129 | throw("sync: Unlock of unlocked RWMutex") |
Dmitriy Vyukov | 22d46d5 | 2014-06-19 22:19:56 -0700 | [diff] [blame] | 130 | } |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 131 | // Unblock blocked readers, if any. |
| 132 | for i := 0; i < int(r); i++ { |
Carlo Alberto Ferraris | 4c3f260 | 2018-11-10 08:28:44 +0900 | [diff] [blame] | 133 | runtime_Semrelease(&rw.readerSem, false, 0) |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 134 | } |
| 135 | // Allow other writers to proceed. |
| 136 | rw.w.Unlock() |
Dmitry Vyukov | 7b767f4 | 2015-09-23 10:03:54 +0200 | [diff] [blame] | 137 | if race.Enabled { |
| 138 | race.Enable() |
Dmitriy Vyukov | 53390c8 | 2012-10-07 22:07:03 +0400 | [diff] [blame] | 139 | } |
Dmitriy Vyukov | daaf29c | 2011-07-12 09:24:21 -0700 | [diff] [blame] | 140 | } |
Gustavo Niemeyer | 05b1dbd | 2011-02-16 14:11:07 -0500 | [diff] [blame] | 141 | |
| 142 | // RLocker returns a Locker interface that implements |
| 143 | // the Lock and Unlock methods by calling rw.RLock and rw.RUnlock. |
| 144 | func (rw *RWMutex) RLocker() Locker { |
| 145 | return (*rlocker)(rw) |
| 146 | } |
| 147 | |
| 148 | type rlocker RWMutex |
| 149 | |
| 150 | func (r *rlocker) Lock() { (*RWMutex)(r).RLock() } |
| 151 | func (r *rlocker) Unlock() { (*RWMutex)(r).RUnlock() } |