syncmap: add benchmark for Range

adjust BenchmarkAdversarialDelete to be somewhat more adversarial.

updates golang/go#18177

Change-Id: Id01ed1077a0447dcfc6ea3929c22baaddbc9d6ee
Reviewed-on: https://go-review.googlesource.com/37151
Reviewed-by: Russ Cox <rsc@golang.org>
diff --git a/syncmap/map_bench_test.go b/syncmap/map_bench_test.go
index ac06274..b95cd00 100644
--- a/syncmap/map_bench_test.go
+++ b/syncmap/map_bench_test.go
@@ -145,8 +145,27 @@
 	})
 }
 
+func BenchmarkRange(b *testing.B) {
+	const mapSize = 1 << 10
+
+	benchMap(b, bench{
+		setup: func(_ *testing.B, m mapInterface) {
+			for i := 0; i < mapSize; i++ {
+				m.Store(i, i)
+			}
+		},
+
+		perG: func(b *testing.B, pb *testing.PB, i int, m mapInterface) {
+			for ; pb.Next(); i++ {
+				m.Range(func(_, _ interface{}) bool { return true })
+			}
+		},
+	})
+}
+
 // BenchmarkAdversarialAlloc tests performance when we store a new value
-// immediately whenever the map is promoted to clean.
+// immediately whenever the map is promoted to clean and otherwise load a
+// unique, missing key.
 //
 // This forces the Load calls to always acquire the map's mutex.
 func BenchmarkAdversarialAlloc(b *testing.B) {
@@ -165,8 +184,8 @@
 	})
 }
 
-// BenchmarkAdversarialDelete tests performance when we delete and restore a
-// value immediately after a large map has been promoted.
+// BenchmarkAdversarialDelete tests performance when we periodically delete
+// one key and add a different one in a large map.
 //
 // This forces the Load calls to always acquire the map's mutex and periodically
 // makes a full copy of the map despite changing only one entry.
@@ -191,7 +210,7 @@
 						return false
 					})
 					m.Delete(key)
-					m.Store(key, key)
+					m.Store(i, i)
 				}
 			}
 		},