runtime, time: remove old timer code

Updates #6239
Updates #27707

Change-Id: I65e6471829c9de4677d3ac78ef6cd7aa0a1fc4cb
Reviewed-on: https://go-review.googlesource.com/c/go/+/171884
Reviewed-by: Brad Fitzpatrick <bradfitz@golang.org>
Reviewed-by: Michael Knyszek <mknyszek@google.com>
Reviewed-by: Emmanuel Odeke <emm.odeke@gmail.com>
diff --git a/src/runtime/proc.go b/src/runtime/proc.go
index c648195..d264e1d 100644
--- a/src/runtime/proc.go
+++ b/src/runtime/proc.go
@@ -4410,44 +4410,23 @@
 	}
 
 	// Maybe jump time forward for playground.
-	if oldTimers {
-		gp := timejumpOld()
-		if gp != nil {
-			casgstatus(gp, _Gwaiting, _Grunnable)
-			globrunqput(gp)
-			_p_ := pidleget()
-			if _p_ == nil {
-				throw("checkdead: no p for timer")
+	_p_ := timejump()
+	if _p_ != nil {
+		for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link {
+			if (*pp).ptr() == _p_ {
+				*pp = _p_.link
+				break
 			}
-			mp := mget()
-			if mp == nil {
-				// There should always be a free M since
-				// nothing is running.
-				throw("checkdead: no m for timer")
-			}
-			mp.nextp.set(_p_)
-			notewakeup(&mp.park)
-			return
 		}
-	} else {
-		_p_ := timejump()
-		if _p_ != nil {
-			for pp := &sched.pidle; *pp != 0; pp = &(*pp).ptr().link {
-				if (*pp).ptr() == _p_ {
-					*pp = _p_.link
-					break
-				}
-			}
-			mp := mget()
-			if mp == nil {
-				// There should always be a free M since
-				// nothing is running.
-				throw("checkdead: no m for timer")
-			}
-			mp.nextp.set(_p_)
-			notewakeup(&mp.park)
-			return
+		mp := mget()
+		if mp == nil {
+			// There should always be a free M since
+			// nothing is running.
+			throw("checkdead: no m for timer")
 		}
+		mp.nextp.set(_p_)
+		notewakeup(&mp.park)
+		return
 	}
 
 	// There are no goroutines running, so we can look at the P's.
diff --git a/src/runtime/time.go b/src/runtime/time.go
index ed044b3..a7d14cf 100644
--- a/src/runtime/time.go
+++ b/src/runtime/time.go
@@ -7,24 +7,17 @@
 package runtime
 
 import (
-	"internal/cpu"
 	"runtime/internal/atomic"
 	"runtime/internal/sys"
 	"unsafe"
 )
 
-// Temporary scaffolding while the new timer code is added.
-const oldTimers = false
-
 // Package time knows the layout of this structure.
 // If this struct changes, adjust ../time/sleep.go:/runtimeTimer.
 type timer struct {
-	tb *timersBucket // the bucket the timer lives in (oldTimers)
-	i  int           // heap index (oldTimers)
-
 	// If this timer is on a heap, which P's heap it is on.
 	// puintptr rather than *p to match uintptr in the versions
-	// of this struct defined in other packages. (!oldTimers)
+	// of this struct defined in other packages.
 	pp puintptr
 
 	// Timer wakes up at when, and then at when+period, ... (period > 0 only)
@@ -36,55 +29,13 @@
 	arg    interface{}
 	seq    uintptr
 
-	// What to set the when field to in timerModifiedXX status. (!oldTimers)
+	// What to set the when field to in timerModifiedXX status.
 	nextwhen int64
 
-	// The status field holds one of the values below. (!oldTimers)
+	// The status field holds one of the values below.
 	status uint32
 }
 
-// timersLen is the length of timers array.
-//
-// Ideally, this would be set to GOMAXPROCS, but that would require
-// dynamic reallocation
-//
-// The current value is a compromise between memory usage and performance
-// that should cover the majority of GOMAXPROCS values used in the wild.
-const timersLen = 64
-
-// timers contains "per-P" timer heaps.
-//
-// Timers are queued into timersBucket associated with the current P,
-// so each P may work with its own timers independently of other P instances.
-//
-// Each timersBucket may be associated with multiple P
-// if GOMAXPROCS > timersLen.
-var timers [timersLen]struct {
-	timersBucket
-
-	// The padding should eliminate false sharing
-	// between timersBucket values.
-	pad [cpu.CacheLinePadSize - unsafe.Sizeof(timersBucket{})%cpu.CacheLinePadSize]byte
-}
-
-func (t *timer) assignBucket() *timersBucket {
-	id := uint8(getg().m.p.ptr().id) % timersLen
-	t.tb = &timers[id].timersBucket
-	return t.tb
-}
-
-//go:notinheap
-type timersBucket struct {
-	lock         mutex
-	gp           *g
-	created      bool
-	sleeping     bool
-	rescheduling bool
-	sleepUntil   int64
-	waitnote     note
-	t            []*timer
-}
-
 // Code outside this file has to be careful in using a timer value.
 //
 // The pp, status, and nextwhen fields may only be used by code in this file.
@@ -226,11 +177,6 @@
 // timeSleep puts the current goroutine to sleep for at least ns nanoseconds.
 //go:linkname timeSleep time.Sleep
 func timeSleep(ns int64) {
-	if oldTimers {
-		timeSleepOld(ns)
-		return
-	}
-
 	if ns <= 0 {
 		return
 	}
@@ -257,30 +203,6 @@
 	return true
 }
 
-func timeSleepOld(ns int64) {
-	if ns <= 0 {
-		return
-	}
-
-	gp := getg()
-	t := gp.timer
-	if t == nil {
-		t = new(timer)
-		gp.timer = t
-	}
-	*t = timer{}
-	t.when = nanotime() + ns
-	t.f = goroutineReady
-	t.arg = gp
-	tb := t.assignBucket()
-	lock(&tb.lock)
-	if !tb.addtimerLocked(t) {
-		unlock(&tb.lock)
-		badTimer()
-	}
-	goparkunlock(&tb.lock, waitReasonSleep, traceEvGoSleep, 3)
-}
-
 // startTimer adds t to the timer heap.
 //go:linkname startTimer time.startTimer
 func startTimer(t *timer) {
@@ -318,11 +240,6 @@
 // That avoids the risk of changing the when field of a timer in some P's heap,
 // which could cause the heap to become unsorted.
 func addtimer(t *timer) {
-	if oldTimers {
-		addtimerOld(t)
-		return
-	}
-
 	// when must never be negative; otherwise runtimer will overflow
 	// during its delta calculation and never expire other runtime timers.
 	if t.when < 0 {
@@ -370,59 +287,11 @@
 	return siftupTimer(pp.timers, i)
 }
 
-func addtimerOld(t *timer) {
-	tb := t.assignBucket()
-	lock(&tb.lock)
-	ok := tb.addtimerLocked(t)
-	unlock(&tb.lock)
-	if !ok {
-		badTimer()
-	}
-}
-
-// Add a timer to the heap and start or kick timerproc if the new timer is
-// earlier than any of the others.
-// Timers are locked.
-// Returns whether all is well: false if the data structure is corrupt
-// due to user-level races.
-func (tb *timersBucket) addtimerLocked(t *timer) bool {
-	// when must never be negative; otherwise timerproc will overflow
-	// during its delta calculation and never expire other runtime timers.
-	if t.when < 0 {
-		t.when = 1<<63 - 1
-	}
-	t.i = len(tb.t)
-	tb.t = append(tb.t, t)
-	if !siftupTimer(tb.t, t.i) {
-		return false
-	}
-	if t.i == 0 {
-		// siftup moved to top: new earliest deadline.
-		if tb.sleeping && tb.sleepUntil > t.when {
-			tb.sleeping = false
-			notewakeup(&tb.waitnote)
-		}
-		if tb.rescheduling {
-			tb.rescheduling = false
-			goready(tb.gp, 0)
-		}
-		if !tb.created {
-			tb.created = true
-			go timerproc(tb)
-		}
-	}
-	return true
-}
-
 // deltimer deletes the timer t. It may be on some other P, so we can't
 // actually remove it from the timers heap. We can only mark it as deleted.
 // It will be removed in due course by the P whose heap it is on.
 // Reports whether the timer was removed before it was run.
 func deltimer(t *timer) bool {
-	if oldTimers {
-		return deltimerOld(t)
-	}
-
 	for {
 		switch s := atomic.Load(&t.status); s {
 		case timerWaiting, timerModifiedLater:
@@ -513,62 +382,9 @@
 	return ok
 }
 
-func deltimerOld(t *timer) bool {
-	if t.tb == nil {
-		// t.tb can be nil if the user created a timer
-		// directly, without invoking startTimer e.g
-		//    time.Ticker{C: c}
-		// In this case, return early without any deletion.
-		// See Issue 21874.
-		return false
-	}
-
-	tb := t.tb
-
-	lock(&tb.lock)
-	removed, ok := tb.deltimerLocked(t)
-	unlock(&tb.lock)
-	if !ok {
-		badTimer()
-	}
-	return removed
-}
-
-func (tb *timersBucket) deltimerLocked(t *timer) (removed, ok bool) {
-	// t may not be registered anymore and may have
-	// a bogus i (typically 0, if generated by Go).
-	// Verify it before proceeding.
-	i := t.i
-	last := len(tb.t) - 1
-	if i < 0 || i > last || tb.t[i] != t {
-		return false, true
-	}
-	if i != last {
-		tb.t[i] = tb.t[last]
-		tb.t[i].i = i
-	}
-	tb.t[last] = nil
-	tb.t = tb.t[:last]
-	ok = true
-	if i != last {
-		if !siftupTimer(tb.t, i) {
-			ok = false
-		}
-		if !siftdownTimer(tb.t, i) {
-			ok = false
-		}
-	}
-	return true, ok
-}
-
 // modtimer modifies an existing timer.
 // This is called by the netpoll code.
 func modtimer(t *timer, when, period int64, f func(interface{}, uintptr), arg interface{}, seq uintptr) {
-	if oldTimers {
-		modtimerOld(t, when, period, f, arg, seq)
-		return
-	}
-
 	if when < 0 {
 		when = maxWhen
 	}
@@ -652,35 +468,11 @@
 	}
 }
 
-func modtimerOld(t *timer, when, period int64, f func(interface{}, uintptr), arg interface{}, seq uintptr) {
-	tb := t.tb
-
-	lock(&tb.lock)
-	_, ok := tb.deltimerLocked(t)
-	if ok {
-		t.when = when
-		t.period = period
-		t.f = f
-		t.arg = arg
-		t.seq = seq
-		ok = tb.addtimerLocked(t)
-	}
-	unlock(&tb.lock)
-	if !ok {
-		badTimer()
-	}
-}
-
 // resettimer resets an existing inactive timer to turn it into an active timer,
 // with a new time for when the timer should fire.
 // This should be called instead of addtimer if the timer value has been,
 // or may have been, used previously.
 func resettimer(t *timer, when int64) {
-	if oldTimers {
-		resettimerOld(t, when)
-		return
-	}
-
 	if when < 0 {
 		when = maxWhen
 	}
@@ -727,82 +519,6 @@
 	}
 }
 
-func resettimerOld(t *timer, when int64) {
-	t.when = when
-	addtimer(t)
-}
-
-// Timerproc runs the time-driven events.
-// It sleeps until the next event in the tb heap.
-// If addtimer inserts a new earlier event, it wakes timerproc early.
-func timerproc(tb *timersBucket) {
-	tb.gp = getg()
-	for {
-		lock(&tb.lock)
-		tb.sleeping = false
-		now := nanotime()
-		delta := int64(-1)
-		for {
-			if len(tb.t) == 0 {
-				delta = -1
-				break
-			}
-			t := tb.t[0]
-			delta = t.when - now
-			if delta > 0 {
-				break
-			}
-			ok := true
-			if t.period > 0 {
-				// leave in heap but adjust next time to fire
-				t.when += t.period * (1 + -delta/t.period)
-				if !siftdownTimer(tb.t, 0) {
-					ok = false
-				}
-			} else {
-				// remove from heap
-				last := len(tb.t) - 1
-				if last > 0 {
-					tb.t[0] = tb.t[last]
-					tb.t[0].i = 0
-				}
-				tb.t[last] = nil
-				tb.t = tb.t[:last]
-				if last > 0 {
-					if !siftdownTimer(tb.t, 0) {
-						ok = false
-					}
-				}
-				t.i = -1 // mark as removed
-			}
-			f := t.f
-			arg := t.arg
-			seq := t.seq
-			unlock(&tb.lock)
-			if !ok {
-				badTimer()
-			}
-			if raceenabled {
-				raceacquire(unsafe.Pointer(t))
-			}
-			f(arg, seq)
-			lock(&tb.lock)
-		}
-		if delta < 0 || faketime > 0 {
-			// No timers left - put goroutine to sleep.
-			tb.rescheduling = true
-			goparkunlock(&tb.lock, waitReasonTimerGoroutineIdle, traceEvGoBlock, 1)
-			continue
-		}
-		// At least one timer pending. Sleep until then.
-		tb.sleeping = true
-		tb.sleepUntil = now + delta
-		noteclear(&tb.waitnote)
-		unlock(&tb.lock)
-		notetsleepg(&tb.waitnote, delta)
-	}
-}
-
 // cleantimers cleans up the head of the timer queue. This speeds up
 // programs that create and delete timers; leaving them in the heap
 // slows down addtimer. Reports whether no timer problems were found.
@@ -1199,55 +915,9 @@
 	return minP
 }
 
-func timejumpOld() *g {
-	if faketime == 0 {
-		return nil
-	}
-
-	for i := range timers {
-		lock(&timers[i].lock)
-	}
-	gp := timejumpLocked()
-	for i := range timers {
-		unlock(&timers[i].lock)
-	}
-
-	return gp
-}
-
-func timejumpLocked() *g {
-	// Determine a timer bucket with minimum when.
-	var minT *timer
-	for i := range timers {
-		tb := &timers[i]
-		if !tb.created || len(tb.t) == 0 {
-			continue
-		}
-		t := tb.t[0]
-		if minT == nil || t.when < minT.when {
-			minT = t
-		}
-	}
-	if minT == nil || minT.when <= faketime {
-		return nil
-	}
-
-	faketime = minT.when
-	tb := minT.tb
-	if !tb.rescheduling {
-		return nil
-	}
-	tb.rescheduling = false
-	return tb.gp
-}
-
 // timeSleepUntil returns the time when the next timer should fire.
 // This is only called by sysmon.
 func timeSleepUntil() int64 {
-	if oldTimers {
-		return timeSleepUntilOld()
-	}
-
 	next := int64(maxWhen)
 
 	// Prevent allp slice changes. This is like retake.
@@ -1298,27 +968,6 @@
 	return next
 }
 
-func timeSleepUntilOld() int64 {
-	next := int64(1<<63 - 1)
-
-	// Determine minimum sleepUntil across all the timer buckets.
-	//
-	// The function can not return a precise answer,
-	// as another timer may pop in as soon as timers have been unlocked.
-	// So lock the timers one by one instead of all at once.
-	for i := range timers {
-		tb := &timers[i]
-
-		lock(&tb.lock)
-		if tb.sleeping && tb.sleepUntil < next {
-			next = tb.sleepUntil
-		}
-		unlock(&tb.lock)
-	}
-
-	return next
-}
-
 // Heap maintenance algorithms.
 // These algorithms check for slice index errors manually.
 // Slice index error can happen if the program is using racy
@@ -1326,9 +975,6 @@
 // it will cause the program to crash with a mysterious
 // "panic holding locks" message. Instead, we panic while not
 // holding a lock.
-// The races can occur despite the bucket locks because assignBucket
-// itself is called without locks, so racy calls can cause a timer to
-// change buckets while executing these functions.
 
 func siftupTimer(t []*timer, i int) bool {
 	if i >= len(t) {
@@ -1342,12 +988,10 @@
 			break
 		}
 		t[i] = t[p]
-		t[i].i = i
 		i = p
 	}
 	if tmp != t[i] {
 		t[i] = tmp
-		t[i].i = i
 	}
 	return true
 }
@@ -1385,12 +1029,10 @@
 			break
 		}
 		t[i] = t[c]
-		t[i].i = i
 		i = c
 	}
 	if tmp != t[i] {
 		t[i] = tmp
-		t[i].i = i
 	}
 	return true
 }
diff --git a/src/runtime/trace.go b/src/runtime/trace.go
index 65d9a38..9aa9fac 100644
--- a/src/runtime/trace.go
+++ b/src/runtime/trace.go
@@ -54,7 +54,7 @@
 	traceEvGoInSyscall       = 32 // denotes that goroutine is in syscall when tracing starts [timestamp, goroutine id]
 	traceEvHeapAlloc         = 33 // memstats.heap_live change [timestamp, heap_alloc]
 	traceEvNextGC            = 34 // memstats.next_gc change [timestamp, next_gc]
-	traceEvTimerGoroutine    = 35 // denotes timer goroutine [timer goroutine id]
+	traceEvTimerGoroutine    = 35 // not currently used; previously denoted timer goroutine [timer goroutine id]
 	traceEvFutileWakeup      = 36 // denotes that the previous wakeup of this goroutine was futile [timestamp]
 	traceEvString            = 37 // string dictionary entry [ID, length, string]
 	traceEvGoStartLocal      = 38 // goroutine starts running on the same P as the last event [timestamp, goroutine id]
@@ -416,13 +416,6 @@
 		var data []byte
 		data = append(data, traceEvFrequency|0<<traceArgCountShift)
 		data = traceAppend(data, uint64(freq))
-		for i := range timers {
-			tb := &timers[i]
-			if tb.gp != nil {
-				data = append(data, traceEvTimerGoroutine|0<<traceArgCountShift)
-				data = traceAppend(data, uint64(tb.gp.goid))
-			}
-		}
 		// This will emit a bunch of full buffers, we will pick them up
 		// on the next iteration.
 		trace.stackTab.dump()
diff --git a/src/time/sleep.go b/src/time/sleep.go
index adce860..4e61d0a 100644
--- a/src/time/sleep.go
+++ b/src/time/sleep.go
@@ -11,10 +11,7 @@
 // Interface to timers implemented in package runtime.
 // Must be in sync with ../runtime/time.go:/^type timer
 type runtimeTimer struct {
-	tb uintptr
-	i  int
-	pp uintptr
-
+	pp       uintptr
 	when     int64
 	period   int64
 	f        func(interface{}, uintptr) // NOTE: must not be closure