| // Copyright 2011 The Go Authors. All rights reserved. |
| // Use of this source code is governed by a BSD-style |
| // license that can be found in the LICENSE file. |
| |
| package sync_test |
| |
| import ( |
| "runtime" |
| . "sync" |
| "sync/atomic" |
| "testing" |
| ) |
| |
| func testWaitGroup(t *testing.T, wg1 *WaitGroup, wg2 *WaitGroup) { |
| n := 16 |
| wg1.Add(n) |
| wg2.Add(n) |
| exited := make(chan bool, n) |
| for i := 0; i != n; i++ { |
| go func(i int) { |
| wg1.Done() |
| wg2.Wait() |
| exited <- true |
| }(i) |
| } |
| wg1.Wait() |
| for i := 0; i != n; i++ { |
| select { |
| case <-exited: |
| t.Fatal("WaitGroup released group too soon") |
| default: |
| } |
| wg2.Done() |
| } |
| for i := 0; i != n; i++ { |
| <-exited // Will block if barrier fails to unlock someone. |
| } |
| } |
| |
| func TestWaitGroup(t *testing.T) { |
| wg1 := &WaitGroup{} |
| wg2 := &WaitGroup{} |
| |
| // Run the same test a few times to ensure barrier is in a proper state. |
| for i := 0; i != 8; i++ { |
| testWaitGroup(t, wg1, wg2) |
| } |
| } |
| |
| func TestWaitGroupMisuse(t *testing.T) { |
| defer func() { |
| err := recover() |
| if err != "sync: negative WaitGroup count" { |
| t.Fatalf("Unexpected panic: %#v", err) |
| } |
| }() |
| wg := &WaitGroup{} |
| wg.Add(1) |
| wg.Done() |
| wg.Done() |
| t.Fatal("Should panic") |
| } |
| |
| func BenchmarkWaitGroupUncontended(b *testing.B) { |
| type PaddedWaitGroup struct { |
| WaitGroup |
| pad [128]uint8 |
| } |
| const CallsPerSched = 1000 |
| procs := runtime.GOMAXPROCS(-1) |
| N := int32(b.N / CallsPerSched) |
| c := make(chan bool, procs) |
| for p := 0; p < procs; p++ { |
| go func() { |
| var wg PaddedWaitGroup |
| for atomic.AddInt32(&N, -1) >= 0 { |
| runtime.Gosched() |
| for g := 0; g < CallsPerSched; g++ { |
| wg.Add(1) |
| wg.Done() |
| wg.Wait() |
| } |
| } |
| c <- true |
| }() |
| } |
| for p := 0; p < procs; p++ { |
| <-c |
| } |
| } |
| |
| func benchmarkWaitGroupAddDone(b *testing.B, localWork int) { |
| const CallsPerSched = 1000 |
| procs := runtime.GOMAXPROCS(-1) |
| N := int32(b.N / CallsPerSched) |
| c := make(chan bool, procs) |
| var wg WaitGroup |
| for p := 0; p < procs; p++ { |
| go func() { |
| foo := 0 |
| for atomic.AddInt32(&N, -1) >= 0 { |
| runtime.Gosched() |
| for g := 0; g < CallsPerSched; g++ { |
| wg.Add(1) |
| for i := 0; i < localWork; i++ { |
| foo *= 2 |
| foo /= 2 |
| } |
| wg.Done() |
| } |
| } |
| c <- foo == 42 |
| }() |
| } |
| for p := 0; p < procs; p++ { |
| <-c |
| } |
| } |
| |
| func BenchmarkWaitGroupAddDone(b *testing.B) { |
| benchmarkWaitGroupAddDone(b, 0) |
| } |
| |
| func BenchmarkWaitGroupAddDoneWork(b *testing.B) { |
| benchmarkWaitGroupAddDone(b, 100) |
| } |
| |
| func benchmarkWaitGroupWait(b *testing.B, localWork int) { |
| const CallsPerSched = 1000 |
| procs := runtime.GOMAXPROCS(-1) |
| N := int32(b.N / CallsPerSched) |
| c := make(chan bool, procs) |
| var wg WaitGroup |
| wg.Add(procs) |
| for p := 0; p < procs; p++ { |
| go wg.Done() |
| } |
| for p := 0; p < procs; p++ { |
| go func() { |
| foo := 0 |
| for atomic.AddInt32(&N, -1) >= 0 { |
| runtime.Gosched() |
| for g := 0; g < CallsPerSched; g++ { |
| wg.Wait() |
| for i := 0; i < localWork; i++ { |
| foo *= 2 |
| foo /= 2 |
| } |
| } |
| } |
| c <- foo == 42 |
| }() |
| } |
| for p := 0; p < procs; p++ { |
| <-c |
| } |
| } |
| |
| func BenchmarkWaitGroupWait(b *testing.B) { |
| benchmarkWaitGroupWait(b, 0) |
| } |
| |
| func BenchmarkWaitGroupWaitWork(b *testing.B) { |
| benchmarkWaitGroupWait(b, 100) |
| } |