...

Source file src/time/sleep_test.go

Documentation: time

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package time_test
     6  
     7  import (
     8  	"errors"
     9  	"fmt"
    10  	"internal/testenv"
    11  	"math/rand"
    12  	"runtime"
    13  	"strings"
    14  	"sync"
    15  	"sync/atomic"
    16  	"testing"
    17  	. "time"
    18  	_ "unsafe" // for go:linkname
    19  )
    20  
    21  // newTimerFunc simulates NewTimer using AfterFunc,
    22  // but this version will not hit the special cases for channels
    23  // that are used when calling NewTimer.
    24  // This makes it easy to test both paths.
    25  func newTimerFunc(d Duration) *Timer {
    26  	c := make(chan Time, 1)
    27  	t := AfterFunc(d, func() { c <- Now() })
    28  	t.C = c
    29  	return t
    30  }
    31  
    32  // haveHighResSleep is true if the system supports at least ~1ms sleeps.
    33  //
    34  //go:linkname haveHighResSleep runtime.haveHighResSleep
    35  var haveHighResSleep bool
    36  
    37  // adjustDelay returns an adjusted delay based on the system sleep resolution.
    38  // Go runtime uses different Windows timers for time.Now and sleeping.
    39  // These can tick at different frequencies and can arrive out of sync.
    40  // The effect can be seen, for example, as time.Sleep(100ms) is actually
    41  // shorter then 100ms when measured as difference between time.Now before and
    42  // after time.Sleep call. This was observed on Windows XP SP3 (windows/386).
    43  func adjustDelay(t *testing.T, delay Duration) Duration {
    44  	if haveHighResSleep {
    45  		return delay
    46  	}
    47  	t.Log("adjusting delay for low resolution sleep")
    48  	switch runtime.GOOS {
    49  	case "windows":
    50  		return delay - 17*Millisecond
    51  	default:
    52  		t.Fatal("adjustDelay unimplemented on " + runtime.GOOS)
    53  		return 0
    54  	}
    55  }
    56  
    57  func TestSleep(t *testing.T) {
    58  	const delay = 100 * Millisecond
    59  	go func() {
    60  		Sleep(delay / 2)
    61  		Interrupt()
    62  	}()
    63  	start := Now()
    64  	Sleep(delay)
    65  	delayadj := adjustDelay(t, delay)
    66  	duration := Since(start)
    67  	if duration < delayadj {
    68  		t.Fatalf("Sleep(%s) slept for only %s", delay, duration)
    69  	}
    70  }
    71  
    72  // Test the basic function calling behavior. Correct queuing
    73  // behavior is tested elsewhere, since After and AfterFunc share
    74  // the same code.
    75  func TestAfterFunc(t *testing.T) {
    76  	i := 10
    77  	c := make(chan bool)
    78  	var f func()
    79  	f = func() {
    80  		i--
    81  		if i >= 0 {
    82  			AfterFunc(0, f)
    83  			Sleep(1 * Second)
    84  		} else {
    85  			c <- true
    86  		}
    87  	}
    88  
    89  	AfterFunc(0, f)
    90  	<-c
    91  }
    92  
    93  func TestTickerStress(t *testing.T) {
    94  	var stop atomic.Bool
    95  	go func() {
    96  		for !stop.Load() {
    97  			runtime.GC()
    98  			// Yield so that the OS can wake up the timer thread,
    99  			// so that it can generate channel sends for the main goroutine,
   100  			// which will eventually set stop = 1 for us.
   101  			Sleep(Nanosecond)
   102  		}
   103  	}()
   104  	ticker := NewTicker(1)
   105  	for i := 0; i < 100; i++ {
   106  		<-ticker.C
   107  	}
   108  	ticker.Stop()
   109  	stop.Store(true)
   110  }
   111  
   112  func TestTickerConcurrentStress(t *testing.T) {
   113  	var stop atomic.Bool
   114  	go func() {
   115  		for !stop.Load() {
   116  			runtime.GC()
   117  			// Yield so that the OS can wake up the timer thread,
   118  			// so that it can generate channel sends for the main goroutine,
   119  			// which will eventually set stop = 1 for us.
   120  			Sleep(Nanosecond)
   121  		}
   122  	}()
   123  	ticker := NewTicker(1)
   124  	var wg sync.WaitGroup
   125  	for i := 0; i < 10; i++ {
   126  		wg.Add(1)
   127  		go func() {
   128  			defer wg.Done()
   129  			for i := 0; i < 100; i++ {
   130  				<-ticker.C
   131  			}
   132  		}()
   133  	}
   134  	wg.Wait()
   135  	ticker.Stop()
   136  	stop.Store(true)
   137  }
   138  
   139  func TestAfterFuncStarvation(t *testing.T) {
   140  	// Start two goroutines ping-ponging on a channel send.
   141  	// At any given time, at least one of these goroutines is runnable:
   142  	// if the channel buffer is full, the receiver is runnable,
   143  	// and if it is not full, the sender is runnable.
   144  	//
   145  	// In addition, the AfterFunc callback should become runnable after
   146  	// the indicated delay.
   147  	//
   148  	// Even if GOMAXPROCS=1, we expect the runtime to eventually schedule
   149  	// the AfterFunc goroutine instead of the runnable channel goroutine.
   150  	// However, in https://go.dev/issue/65178 this was observed to live-lock
   151  	// on wasip1/wasm and js/wasm after <10000 runs.
   152  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(1))
   153  
   154  	var (
   155  		wg   sync.WaitGroup
   156  		stop atomic.Bool
   157  		c    = make(chan bool, 1)
   158  	)
   159  
   160  	wg.Add(2)
   161  	go func() {
   162  		for !stop.Load() {
   163  			c <- true
   164  		}
   165  		close(c)
   166  		wg.Done()
   167  	}()
   168  	go func() {
   169  		for range c {
   170  		}
   171  		wg.Done()
   172  	}()
   173  
   174  	AfterFunc(1*Microsecond, func() { stop.Store(true) })
   175  	wg.Wait()
   176  }
   177  
   178  func benchmark(b *testing.B, bench func(*testing.PB)) {
   179  	// Create equal number of garbage timers on each P before starting
   180  	// the benchmark.
   181  	var wg sync.WaitGroup
   182  	garbageAll := make([][]*Timer, runtime.GOMAXPROCS(0))
   183  	for i := range garbageAll {
   184  		wg.Add(1)
   185  		go func(i int) {
   186  			defer wg.Done()
   187  			garbage := make([]*Timer, 1<<15)
   188  			for j := range garbage {
   189  				garbage[j] = AfterFunc(Hour, nil)
   190  			}
   191  			garbageAll[i] = garbage
   192  		}(i)
   193  	}
   194  	wg.Wait()
   195  
   196  	b.ResetTimer()
   197  	b.RunParallel(bench)
   198  	b.StopTimer()
   199  
   200  	for _, garbage := range garbageAll {
   201  		for _, t := range garbage {
   202  			t.Stop()
   203  		}
   204  	}
   205  }
   206  
   207  func BenchmarkAfterFunc1000(b *testing.B) {
   208  	benchmark(b, func(pb *testing.PB) {
   209  		for pb.Next() {
   210  			n := 1000
   211  			c := make(chan bool)
   212  			var f func()
   213  			f = func() {
   214  				n--
   215  				if n >= 0 {
   216  					AfterFunc(0, f)
   217  				} else {
   218  					c <- true
   219  				}
   220  			}
   221  			AfterFunc(0, f)
   222  			<-c
   223  		}
   224  	})
   225  }
   226  
   227  func BenchmarkAfter(b *testing.B) {
   228  	benchmark(b, func(pb *testing.PB) {
   229  		for pb.Next() {
   230  			<-After(1)
   231  		}
   232  	})
   233  }
   234  
   235  func BenchmarkStop(b *testing.B) {
   236  	b.Run("impl=chan", func(b *testing.B) {
   237  		benchmark(b, func(pb *testing.PB) {
   238  			for pb.Next() {
   239  				NewTimer(1 * Second).Stop()
   240  			}
   241  		})
   242  	})
   243  	b.Run("impl=func", func(b *testing.B) {
   244  		benchmark(b, func(pb *testing.PB) {
   245  			for pb.Next() {
   246  				newTimerFunc(1 * Second).Stop()
   247  			}
   248  		})
   249  	})
   250  }
   251  
   252  func BenchmarkSimultaneousAfterFunc1000(b *testing.B) {
   253  	benchmark(b, func(pb *testing.PB) {
   254  		for pb.Next() {
   255  			n := 1000
   256  			var wg sync.WaitGroup
   257  			wg.Add(n)
   258  			for range n {
   259  				AfterFunc(0, wg.Done)
   260  			}
   261  			wg.Wait()
   262  		}
   263  	})
   264  }
   265  
   266  func BenchmarkStartStop1000(b *testing.B) {
   267  	benchmark(b, func(pb *testing.PB) {
   268  		for pb.Next() {
   269  			const N = 1000
   270  			timers := make([]*Timer, N)
   271  			for i := range timers {
   272  				timers[i] = AfterFunc(Hour, nil)
   273  			}
   274  
   275  			for i := range timers {
   276  				timers[i].Stop()
   277  			}
   278  		}
   279  	})
   280  }
   281  
   282  func BenchmarkReset(b *testing.B) {
   283  	b.Run("impl=chan", func(b *testing.B) {
   284  		benchmark(b, func(pb *testing.PB) {
   285  			t := NewTimer(Hour)
   286  			for pb.Next() {
   287  				t.Reset(Hour)
   288  			}
   289  			t.Stop()
   290  		})
   291  	})
   292  	b.Run("impl=func", func(b *testing.B) {
   293  		benchmark(b, func(pb *testing.PB) {
   294  			t := newTimerFunc(Hour)
   295  			for pb.Next() {
   296  				t.Reset(Hour)
   297  			}
   298  			t.Stop()
   299  		})
   300  	})
   301  }
   302  
   303  func BenchmarkSleep1000(b *testing.B) {
   304  	benchmark(b, func(pb *testing.PB) {
   305  		for pb.Next() {
   306  			const N = 1000
   307  			var wg sync.WaitGroup
   308  			wg.Add(N)
   309  			for range N {
   310  				go func() {
   311  					Sleep(Nanosecond)
   312  					wg.Done()
   313  				}()
   314  			}
   315  			wg.Wait()
   316  		}
   317  	})
   318  }
   319  
   320  func TestAfter(t *testing.T) {
   321  	const delay = 100 * Millisecond
   322  	start := Now()
   323  	end := <-After(delay)
   324  	delayadj := adjustDelay(t, delay)
   325  	if duration := Since(start); duration < delayadj {
   326  		t.Fatalf("After(%s) slept for only %d ns", delay, duration)
   327  	}
   328  	if min := start.Add(delayadj); end.Before(min) {
   329  		t.Fatalf("After(%s) expect >= %s, got %s", delay, min, end)
   330  	}
   331  }
   332  
   333  func TestAfterTick(t *testing.T) {
   334  	t.Parallel()
   335  	const Count = 10
   336  	Delta := 100 * Millisecond
   337  	if testing.Short() {
   338  		Delta = 10 * Millisecond
   339  	}
   340  	t0 := Now()
   341  	for i := 0; i < Count; i++ {
   342  		<-After(Delta)
   343  	}
   344  	t1 := Now()
   345  	d := t1.Sub(t0)
   346  	target := Delta * Count
   347  	if d < target*9/10 {
   348  		t.Fatalf("%d ticks of %s too fast: took %s, expected %s", Count, Delta, d, target)
   349  	}
   350  	if !testing.Short() && d > target*30/10 {
   351  		t.Fatalf("%d ticks of %s too slow: took %s, expected %s", Count, Delta, d, target)
   352  	}
   353  }
   354  
   355  func TestAfterStop(t *testing.T) {
   356  	t.Run("impl=chan", func(t *testing.T) {
   357  		testAfterStop(t, NewTimer)
   358  	})
   359  	t.Run("impl=func", func(t *testing.T) {
   360  		testAfterStop(t, newTimerFunc)
   361  	})
   362  }
   363  
   364  func testAfterStop(t *testing.T, newTimer func(Duration) *Timer) {
   365  	// We want to test that we stop a timer before it runs.
   366  	// We also want to test that it didn't run after a longer timer.
   367  	// Since we don't want the test to run for too long, we don't
   368  	// want to use lengthy times. That makes the test inherently flaky.
   369  	// So only report an error if it fails five times in a row.
   370  
   371  	var errs []string
   372  	logErrs := func() {
   373  		for _, e := range errs {
   374  			t.Log(e)
   375  		}
   376  	}
   377  
   378  	for i := 0; i < 5; i++ {
   379  		AfterFunc(100*Millisecond, func() {})
   380  		t0 := newTimer(50 * Millisecond)
   381  		c1 := make(chan bool, 1)
   382  		t1 := AfterFunc(150*Millisecond, func() { c1 <- true })
   383  		c2 := After(200 * Millisecond)
   384  		if !t0.Stop() {
   385  			errs = append(errs, "failed to stop event 0")
   386  			continue
   387  		}
   388  		if !t1.Stop() {
   389  			errs = append(errs, "failed to stop event 1")
   390  			continue
   391  		}
   392  		<-c2
   393  		select {
   394  		case <-t0.C:
   395  			errs = append(errs, "event 0 was not stopped")
   396  			continue
   397  		case <-c1:
   398  			errs = append(errs, "event 1 was not stopped")
   399  			continue
   400  		default:
   401  		}
   402  		if t1.Stop() {
   403  			errs = append(errs, "Stop returned true twice")
   404  			continue
   405  		}
   406  
   407  		// Test passed, so all done.
   408  		if len(errs) > 0 {
   409  			t.Logf("saw %d errors, ignoring to avoid flakiness", len(errs))
   410  			logErrs()
   411  		}
   412  
   413  		return
   414  	}
   415  
   416  	t.Errorf("saw %d errors", len(errs))
   417  	logErrs()
   418  }
   419  
   420  func TestAfterQueuing(t *testing.T) {
   421  	t.Run("impl=chan", func(t *testing.T) {
   422  		testAfterQueuing(t, After)
   423  	})
   424  	t.Run("impl=func", func(t *testing.T) {
   425  		testAfterQueuing(t, func(d Duration) <-chan Time { return newTimerFunc(d).C })
   426  	})
   427  }
   428  
   429  func testAfterQueuing(t *testing.T, after func(Duration) <-chan Time) {
   430  	// This test flakes out on some systems,
   431  	// so we'll try it a few times before declaring it a failure.
   432  	const attempts = 5
   433  	err := errors.New("!=nil")
   434  	for i := 0; i < attempts && err != nil; i++ {
   435  		delta := Duration(20+i*50) * Millisecond
   436  		if err = testAfterQueuing1(delta, after); err != nil {
   437  			t.Logf("attempt %v failed: %v", i, err)
   438  		}
   439  	}
   440  	if err != nil {
   441  		t.Fatal(err)
   442  	}
   443  }
   444  
   445  var slots = []int{5, 3, 6, 6, 6, 1, 1, 2, 7, 9, 4, 8, 0}
   446  
   447  type afterResult struct {
   448  	slot int
   449  	t    Time
   450  }
   451  
   452  func await(slot int, result chan<- afterResult, ac <-chan Time) {
   453  	result <- afterResult{slot, <-ac}
   454  }
   455  
   456  func testAfterQueuing1(delta Duration, after func(Duration) <-chan Time) error {
   457  	// make the result channel buffered because we don't want
   458  	// to depend on channel queuing semantics that might
   459  	// possibly change in the future.
   460  	result := make(chan afterResult, len(slots))
   461  
   462  	t0 := Now()
   463  	for _, slot := range slots {
   464  		go await(slot, result, After(Duration(slot)*delta))
   465  	}
   466  	var order []int
   467  	var times []Time
   468  	for range slots {
   469  		r := <-result
   470  		order = append(order, r.slot)
   471  		times = append(times, r.t)
   472  	}
   473  	for i := range order {
   474  		if i > 0 && order[i] < order[i-1] {
   475  			return fmt.Errorf("After calls returned out of order: %v", order)
   476  		}
   477  	}
   478  	for i, t := range times {
   479  		dt := t.Sub(t0)
   480  		target := Duration(order[i]) * delta
   481  		if dt < target-delta/2 || dt > target+delta*10 {
   482  			return fmt.Errorf("After(%s) arrived at %s, expected [%s,%s]", target, dt, target-delta/2, target+delta*10)
   483  		}
   484  	}
   485  	return nil
   486  }
   487  
   488  func TestTimerStopStress(t *testing.T) {
   489  	if testing.Short() {
   490  		return
   491  	}
   492  	t.Parallel()
   493  	for i := 0; i < 100; i++ {
   494  		go func(i int) {
   495  			timer := AfterFunc(2*Second, func() {
   496  				t.Errorf("timer %d was not stopped", i)
   497  			})
   498  			Sleep(1 * Second)
   499  			timer.Stop()
   500  		}(i)
   501  	}
   502  	Sleep(3 * Second)
   503  }
   504  
   505  func TestSleepZeroDeadlock(t *testing.T) {
   506  	// Sleep(0) used to hang, the sequence of events was as follows.
   507  	// Sleep(0) sets G's status to Gwaiting, but then immediately returns leaving the status.
   508  	// Then the goroutine calls e.g. new and falls down into the scheduler due to pending GC.
   509  	// After the GC nobody wakes up the goroutine from Gwaiting status.
   510  	defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(4))
   511  	c := make(chan bool)
   512  	go func() {
   513  		for i := 0; i < 100; i++ {
   514  			runtime.GC()
   515  		}
   516  		c <- true
   517  	}()
   518  	for i := 0; i < 100; i++ {
   519  		Sleep(0)
   520  		tmp := make(chan bool, 1)
   521  		tmp <- true
   522  		<-tmp
   523  	}
   524  	<-c
   525  }
   526  
   527  func testReset(d Duration) error {
   528  	t0 := NewTimer(2 * d)
   529  	Sleep(d)
   530  	if !t0.Reset(3 * d) {
   531  		return errors.New("resetting unfired timer returned false")
   532  	}
   533  	Sleep(2 * d)
   534  	select {
   535  	case <-t0.C:
   536  		return errors.New("timer fired early")
   537  	default:
   538  	}
   539  	Sleep(2 * d)
   540  	select {
   541  	case <-t0.C:
   542  	default:
   543  		return errors.New("reset timer did not fire")
   544  	}
   545  
   546  	if t0.Reset(50 * Millisecond) {
   547  		return errors.New("resetting expired timer returned true")
   548  	}
   549  	return nil
   550  }
   551  
   552  func TestReset(t *testing.T) {
   553  	// We try to run this test with increasingly larger multiples
   554  	// until one works so slow, loaded hardware isn't as flaky,
   555  	// but without slowing down fast machines unnecessarily.
   556  	//
   557  	// (maxDuration is several orders of magnitude longer than we
   558  	// expect this test to actually take on a fast, unloaded machine.)
   559  	d := 1 * Millisecond
   560  	const maxDuration = 10 * Second
   561  	for {
   562  		err := testReset(d)
   563  		if err == nil {
   564  			break
   565  		}
   566  		d *= 2
   567  		if d > maxDuration {
   568  			t.Error(err)
   569  		}
   570  		t.Logf("%v; trying duration %v", err, d)
   571  	}
   572  }
   573  
   574  // Test that sleeping (via Sleep or Timer) for an interval so large it
   575  // overflows does not result in a short sleep duration. Nor does it interfere
   576  // with execution of other timers. If it does, timers in this or subsequent
   577  // tests may not fire.
   578  func TestOverflowSleep(t *testing.T) {
   579  	const big = Duration(int64(1<<63 - 1))
   580  
   581  	go func() {
   582  		Sleep(big)
   583  		// On failure, this may return after the test has completed, so
   584  		// we need to panic instead.
   585  		panic("big sleep returned")
   586  	}()
   587  
   588  	select {
   589  	case <-After(big):
   590  		t.Fatalf("big timeout fired")
   591  	case <-After(25 * Millisecond):
   592  		// OK
   593  	}
   594  
   595  	const neg = Duration(-1 << 63)
   596  	Sleep(neg) // Returns immediately.
   597  	select {
   598  	case <-After(neg):
   599  		// OK
   600  	case <-After(1 * Second):
   601  		t.Fatalf("negative timeout didn't fire")
   602  	}
   603  }
   604  
   605  // Test that a panic while deleting a timer does not leave
   606  // the timers mutex held, deadlocking a ticker.Stop in a defer.
   607  func TestIssue5745(t *testing.T) {
   608  	ticker := NewTicker(Hour)
   609  	defer func() {
   610  		// would deadlock here before the fix due to
   611  		// lock taken before the segfault.
   612  		ticker.Stop()
   613  
   614  		if r := recover(); r == nil {
   615  			t.Error("Expected panic, but none happened.")
   616  		}
   617  	}()
   618  
   619  	// cause a panic due to a segfault
   620  	var timer *Timer
   621  	timer.Stop()
   622  	t.Error("Should be unreachable.")
   623  }
   624  
   625  func TestOverflowPeriodRuntimeTimer(t *testing.T) {
   626  	// This may hang forever if timers are broken. See comment near
   627  	// the end of CheckRuntimeTimerOverflow in internal_test.go.
   628  	CheckRuntimeTimerPeriodOverflow()
   629  }
   630  
   631  func checkZeroPanicString(t *testing.T) {
   632  	e := recover()
   633  	s, _ := e.(string)
   634  	if want := "called on uninitialized Timer"; !strings.Contains(s, want) {
   635  		t.Errorf("panic = %v; want substring %q", e, want)
   636  	}
   637  }
   638  
   639  func TestZeroTimerResetPanics(t *testing.T) {
   640  	defer checkZeroPanicString(t)
   641  	var tr Timer
   642  	tr.Reset(1)
   643  }
   644  
   645  func TestZeroTimerStopPanics(t *testing.T) {
   646  	defer checkZeroPanicString(t)
   647  	var tr Timer
   648  	tr.Stop()
   649  }
   650  
   651  // Test that zero duration timers aren't missed by the scheduler. Regression test for issue 44868.
   652  func TestZeroTimer(t *testing.T) {
   653  	t.Run("impl=chan", func(t *testing.T) {
   654  		testZeroTimer(t, NewTimer)
   655  	})
   656  	t.Run("impl=func", func(t *testing.T) {
   657  		testZeroTimer(t, newTimerFunc)
   658  	})
   659  	t.Run("impl=cache", func(t *testing.T) {
   660  		timer := newTimerFunc(Hour)
   661  		testZeroTimer(t, func(d Duration) *Timer {
   662  			timer.Reset(d)
   663  			return timer
   664  		})
   665  	})
   666  }
   667  
   668  func testZeroTimer(t *testing.T, newTimer func(Duration) *Timer) {
   669  	if testing.Short() {
   670  		t.Skip("-short")
   671  	}
   672  
   673  	for i := 0; i < 1000000; i++ {
   674  		s := Now()
   675  		ti := newTimer(0)
   676  		<-ti.C
   677  		if diff := Since(s); diff > 2*Second {
   678  			t.Errorf("Expected time to get value from Timer channel in less than 2 sec, took %v", diff)
   679  		}
   680  	}
   681  }
   682  
   683  // Test that rapidly moving a timer earlier doesn't cause it to get dropped.
   684  // Issue 47329.
   685  func TestTimerModifiedEarlier(t *testing.T) {
   686  	if runtime.GOOS == "plan9" && runtime.GOARCH == "arm" {
   687  		testenv.SkipFlaky(t, 50470)
   688  	}
   689  
   690  	past := Until(Unix(0, 0))
   691  	count := 1000
   692  	fail := 0
   693  	for i := 0; i < count; i++ {
   694  		timer := newTimerFunc(Hour)
   695  		for j := 0; j < 10; j++ {
   696  			if !timer.Stop() {
   697  				<-timer.C
   698  			}
   699  			timer.Reset(past)
   700  		}
   701  
   702  		deadline := NewTimer(10 * Second)
   703  		defer deadline.Stop()
   704  		now := Now()
   705  		select {
   706  		case <-timer.C:
   707  			if since := Since(now); since > 8*Second {
   708  				t.Errorf("timer took too long (%v)", since)
   709  				fail++
   710  			}
   711  		case <-deadline.C:
   712  			t.Error("deadline expired")
   713  		}
   714  	}
   715  
   716  	if fail > 0 {
   717  		t.Errorf("%d failures", fail)
   718  	}
   719  }
   720  
   721  // Test that rapidly moving timers earlier and later doesn't cause
   722  // some of the sleep times to be lost.
   723  // Issue 47762
   724  func TestAdjustTimers(t *testing.T) {
   725  	var rnd = rand.New(rand.NewSource(Now().UnixNano()))
   726  
   727  	timers := make([]*Timer, 100)
   728  	states := make([]int, len(timers))
   729  	indices := rnd.Perm(len(timers))
   730  
   731  	for len(indices) != 0 {
   732  		var ii = rnd.Intn(len(indices))
   733  		var i = indices[ii]
   734  
   735  		var timer = timers[i]
   736  		var state = states[i]
   737  		states[i]++
   738  
   739  		switch state {
   740  		case 0:
   741  			timers[i] = newTimerFunc(0)
   742  
   743  		case 1:
   744  			<-timer.C // Timer is now idle.
   745  
   746  		// Reset to various long durations, which we'll cancel.
   747  		case 2:
   748  			if timer.Reset(1 * Minute) {
   749  				panic("shouldn't be active (1)")
   750  			}
   751  		case 4:
   752  			if timer.Reset(3 * Minute) {
   753  				panic("shouldn't be active (3)")
   754  			}
   755  		case 6:
   756  			if timer.Reset(2 * Minute) {
   757  				panic("shouldn't be active (2)")
   758  			}
   759  
   760  		// Stop and drain a long-duration timer.
   761  		case 3, 5, 7:
   762  			if !timer.Stop() {
   763  				t.Logf("timer %d state %d Stop returned false", i, state)
   764  				<-timer.C
   765  			}
   766  
   767  		// Start a short-duration timer we expect to select without blocking.
   768  		case 8:
   769  			if timer.Reset(0) {
   770  				t.Fatal("timer.Reset returned true")
   771  			}
   772  		case 9:
   773  			now := Now()
   774  			<-timer.C
   775  			dur := Since(now)
   776  			if dur > 750*Millisecond {
   777  				t.Errorf("timer %d took %v to complete", i, dur)
   778  			}
   779  
   780  		// Timer is done. Swap with tail and remove.
   781  		case 10:
   782  			indices[ii] = indices[len(indices)-1]
   783  			indices = indices[:len(indices)-1]
   784  		}
   785  	}
   786  }
   787  
   788  func TestStopResult(t *testing.T) {
   789  	testStopResetResult(t, true)
   790  }
   791  
   792  func TestResetResult(t *testing.T) {
   793  	testStopResetResult(t, false)
   794  }
   795  
   796  // Test that when racing between running a timer and stopping a timer Stop
   797  // consistently indicates whether a value can be read from the channel.
   798  // Issue #69312.
   799  func testStopResetResult(t *testing.T, testStop bool) {
   800  	for _, name := range []string{"0", "1", "2"} {
   801  		t.Run("asynctimerchan="+name, func(t *testing.T) {
   802  			testStopResetResultGODEBUG(t, testStop, name)
   803  		})
   804  	}
   805  }
   806  
   807  func testStopResetResultGODEBUG(t *testing.T, testStop bool, godebug string) {
   808  	t.Setenv("GODEBUG", "asynctimerchan="+godebug)
   809  
   810  	stopOrReset := func(timer *Timer) bool {
   811  		if testStop {
   812  			return timer.Stop()
   813  		} else {
   814  			return timer.Reset(1 * Hour)
   815  		}
   816  	}
   817  
   818  	start := make(chan struct{})
   819  	var wg sync.WaitGroup
   820  	const N = 1000
   821  	wg.Add(N)
   822  	for range N {
   823  		go func() {
   824  			defer wg.Done()
   825  			<-start
   826  			for j := 0; j < 100; j++ {
   827  				timer1 := NewTimer(1 * Millisecond)
   828  				timer2 := NewTimer(1 * Millisecond)
   829  				select {
   830  				case <-timer1.C:
   831  					if !stopOrReset(timer2) {
   832  						// The test fails if this
   833  						// channel read times out.
   834  						<-timer2.C
   835  					}
   836  				case <-timer2.C:
   837  					if !stopOrReset(timer1) {
   838  						// The test fails if this
   839  						// channel read times out.
   840  						<-timer1.C
   841  					}
   842  				}
   843  			}
   844  		}()
   845  	}
   846  	close(start)
   847  	wg.Wait()
   848  }
   849  
   850  // Test having a large number of goroutines wake up a ticker simultaneously.
   851  // This used to trigger a crash when run under x/tools/cmd/stress.
   852  func TestMultiWakeupTicker(t *testing.T) {
   853  	if testing.Short() {
   854  		t.Skip("-short")
   855  	}
   856  
   857  	goroutines := runtime.GOMAXPROCS(0)
   858  	timer := NewTicker(Microsecond)
   859  	var wg sync.WaitGroup
   860  	wg.Add(goroutines)
   861  	for range goroutines {
   862  		go func() {
   863  			defer wg.Done()
   864  			for range 100000 {
   865  				select {
   866  				case <-timer.C:
   867  				case <-After(Millisecond):
   868  				}
   869  			}
   870  		}()
   871  	}
   872  	wg.Wait()
   873  }
   874  
   875  // Test having a large number of goroutines wake up a timer simultaneously.
   876  // This used to trigger a crash when run under x/tools/cmd/stress.
   877  func TestMultiWakeupTimer(t *testing.T) {
   878  	if testing.Short() {
   879  		t.Skip("-short")
   880  	}
   881  
   882  	goroutines := runtime.GOMAXPROCS(0)
   883  	timer := NewTimer(Nanosecond)
   884  	var wg sync.WaitGroup
   885  	wg.Add(goroutines)
   886  	for range goroutines {
   887  		go func() {
   888  			defer wg.Done()
   889  			for range 10000 {
   890  				select {
   891  				case <-timer.C:
   892  				default:
   893  				}
   894  				timer.Reset(Nanosecond)
   895  			}
   896  		}()
   897  	}
   898  	wg.Wait()
   899  }
   900  
   901  // Benchmark timer latency when the thread that creates the timer is busy with
   902  // other work and the timers must be serviced by other threads.
   903  // https://golang.org/issue/38860
   904  func BenchmarkParallelTimerLatency(b *testing.B) {
   905  	gmp := runtime.GOMAXPROCS(0)
   906  	if gmp < 2 || runtime.NumCPU() < gmp {
   907  		b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
   908  	}
   909  
   910  	// allocate memory now to avoid GC interference later.
   911  	timerCount := gmp - 1
   912  	stats := make([]struct {
   913  		sum   float64
   914  		max   Duration
   915  		count int64
   916  		_     [5]int64 // cache line padding
   917  	}, timerCount)
   918  
   919  	// Ensure the time to start new threads to service timers will not pollute
   920  	// the results.
   921  	warmupScheduler(gmp)
   922  
   923  	// Note that other than the AfterFunc calls this benchmark is measuring it
   924  	// avoids using any other timers. In particular, the main goroutine uses
   925  	// doWork to spin for some durations because up through Go 1.15 if all
   926  	// threads are idle sysmon could leave deep sleep when we wake.
   927  
   928  	// Ensure sysmon is in deep sleep.
   929  	doWork(30 * Millisecond)
   930  
   931  	b.ResetTimer()
   932  
   933  	const delay = Millisecond
   934  	var wg sync.WaitGroup
   935  	var count int32
   936  	for i := 0; i < b.N; i++ {
   937  		wg.Add(timerCount)
   938  		atomic.StoreInt32(&count, 0)
   939  		for j := 0; j < timerCount; j++ {
   940  			j := j
   941  			expectedWakeup := Now().Add(delay)
   942  			AfterFunc(delay, func() {
   943  				late := Since(expectedWakeup)
   944  				if late < 0 {
   945  					late = 0
   946  				}
   947  				stats[j].count++
   948  				stats[j].sum += float64(late.Nanoseconds())
   949  				if late > stats[j].max {
   950  					stats[j].max = late
   951  				}
   952  				atomic.AddInt32(&count, 1)
   953  				for atomic.LoadInt32(&count) < int32(timerCount) {
   954  					// spin until all timers fired
   955  				}
   956  				wg.Done()
   957  			})
   958  		}
   959  
   960  		for atomic.LoadInt32(&count) < int32(timerCount) {
   961  			// spin until all timers fired
   962  		}
   963  		wg.Wait()
   964  
   965  		// Spin for a bit to let the other scheduler threads go idle before the
   966  		// next round.
   967  		doWork(Millisecond)
   968  	}
   969  	var total float64
   970  	var samples float64
   971  	max := Duration(0)
   972  	for _, s := range stats {
   973  		if s.max > max {
   974  			max = s.max
   975  		}
   976  		total += s.sum
   977  		samples += float64(s.count)
   978  	}
   979  	b.ReportMetric(0, "ns/op")
   980  	b.ReportMetric(total/samples, "avg-late-ns")
   981  	b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
   982  }
   983  
   984  // Benchmark timer latency with staggered wakeup times and varying CPU bound
   985  // workloads. https://golang.org/issue/38860
   986  func BenchmarkStaggeredTickerLatency(b *testing.B) {
   987  	gmp := runtime.GOMAXPROCS(0)
   988  	if gmp < 2 || runtime.NumCPU() < gmp {
   989  		b.Skip("skipping with GOMAXPROCS < 2 or NumCPU < GOMAXPROCS")
   990  	}
   991  
   992  	const delay = 3 * Millisecond
   993  
   994  	for _, dur := range []Duration{300 * Microsecond, 2 * Millisecond} {
   995  		b.Run(fmt.Sprintf("work-dur=%s", dur), func(b *testing.B) {
   996  			for tickersPerP := 1; tickersPerP < int(delay/dur)+1; tickersPerP++ {
   997  				tickerCount := gmp * tickersPerP
   998  				b.Run(fmt.Sprintf("tickers-per-P=%d", tickersPerP), func(b *testing.B) {
   999  					// allocate memory now to avoid GC interference later.
  1000  					stats := make([]struct {
  1001  						sum   float64
  1002  						max   Duration
  1003  						count int64
  1004  						_     [5]int64 // cache line padding
  1005  					}, tickerCount)
  1006  
  1007  					// Ensure the time to start new threads to service timers
  1008  					// will not pollute the results.
  1009  					warmupScheduler(gmp)
  1010  
  1011  					b.ResetTimer()
  1012  
  1013  					var wg sync.WaitGroup
  1014  					wg.Add(tickerCount)
  1015  					for j := 0; j < tickerCount; j++ {
  1016  						j := j
  1017  						doWork(delay / Duration(gmp))
  1018  						expectedWakeup := Now().Add(delay)
  1019  						ticker := NewTicker(delay)
  1020  						go func(c int, ticker *Ticker, firstWake Time) {
  1021  							defer ticker.Stop()
  1022  
  1023  							for ; c > 0; c-- {
  1024  								<-ticker.C
  1025  								late := Since(expectedWakeup)
  1026  								if late < 0 {
  1027  									late = 0
  1028  								}
  1029  								stats[j].count++
  1030  								stats[j].sum += float64(late.Nanoseconds())
  1031  								if late > stats[j].max {
  1032  									stats[j].max = late
  1033  								}
  1034  								expectedWakeup = expectedWakeup.Add(delay)
  1035  								doWork(dur)
  1036  							}
  1037  							wg.Done()
  1038  						}(b.N, ticker, expectedWakeup)
  1039  					}
  1040  					wg.Wait()
  1041  
  1042  					var total float64
  1043  					var samples float64
  1044  					max := Duration(0)
  1045  					for _, s := range stats {
  1046  						if s.max > max {
  1047  							max = s.max
  1048  						}
  1049  						total += s.sum
  1050  						samples += float64(s.count)
  1051  					}
  1052  					b.ReportMetric(0, "ns/op")
  1053  					b.ReportMetric(total/samples, "avg-late-ns")
  1054  					b.ReportMetric(float64(max.Nanoseconds()), "max-late-ns")
  1055  				})
  1056  			}
  1057  		})
  1058  	}
  1059  }
  1060  
  1061  // warmupScheduler ensures the scheduler has at least targetThreadCount threads
  1062  // in its thread pool.
  1063  func warmupScheduler(targetThreadCount int) {
  1064  	var wg sync.WaitGroup
  1065  	var count int32
  1066  	for i := 0; i < targetThreadCount; i++ {
  1067  		wg.Add(1)
  1068  		go func() {
  1069  			atomic.AddInt32(&count, 1)
  1070  			for atomic.LoadInt32(&count) < int32(targetThreadCount) {
  1071  				// spin until all threads started
  1072  			}
  1073  
  1074  			// spin a bit more to ensure they are all running on separate CPUs.
  1075  			doWork(Millisecond)
  1076  			wg.Done()
  1077  		}()
  1078  	}
  1079  	wg.Wait()
  1080  }
  1081  
  1082  func doWork(dur Duration) {
  1083  	start := Now()
  1084  	for Since(start) < dur {
  1085  	}
  1086  }
  1087  
  1088  func BenchmarkAdjustTimers10000(b *testing.B) {
  1089  	benchmark(b, func(pb *testing.PB) {
  1090  		for pb.Next() {
  1091  			const n = 10000
  1092  			timers := make([]*Timer, 0, n)
  1093  			for range n {
  1094  				t := AfterFunc(Hour, func() {})
  1095  				timers = append(timers, t)
  1096  			}
  1097  			timers[n-1].Reset(Nanosecond)
  1098  			Sleep(Microsecond)
  1099  			for _, t := range timers {
  1100  				t.Stop()
  1101  			}
  1102  		}
  1103  	})
  1104  }
  1105  

View as plain text