Source file
src/runtime/gc_test.go
Documentation: runtime
1
2
3
4
5 package runtime_test
6
7 import (
8 "fmt"
9 "math/bits"
10 "math/rand"
11 "os"
12 "reflect"
13 "runtime"
14 "runtime/debug"
15 "slices"
16 "strings"
17 "sync"
18 "sync/atomic"
19 "testing"
20 "time"
21 "unsafe"
22 )
23
24 func TestGcSys(t *testing.T) {
25 t.Skip("skipping known-flaky test; golang.org/issue/37331")
26 if os.Getenv("GOGC") == "off" {
27 t.Skip("skipping test; GOGC=off in environment")
28 }
29 got := runTestProg(t, "testprog", "GCSys")
30 want := "OK\n"
31 if got != want {
32 t.Fatalf("expected %q, but got %q", want, got)
33 }
34 }
35
36 func TestGcDeepNesting(t *testing.T) {
37 type T [2][2][2][2][2][2][2][2][2][2]*int
38 a := new(T)
39
40
41
42 t.Logf("%p", a)
43
44 a[0][0][0][0][0][0][0][0][0][0] = new(int)
45 *a[0][0][0][0][0][0][0][0][0][0] = 13
46 runtime.GC()
47 if *a[0][0][0][0][0][0][0][0][0][0] != 13 {
48 t.Fail()
49 }
50 }
51
52 func TestGcMapIndirection(t *testing.T) {
53 defer debug.SetGCPercent(debug.SetGCPercent(1))
54 runtime.GC()
55 type T struct {
56 a [256]int
57 }
58 m := make(map[T]T)
59 for i := 0; i < 2000; i++ {
60 var a T
61 a.a[0] = i
62 m[a] = T{}
63 }
64 }
65
66 func TestGcArraySlice(t *testing.T) {
67 type X struct {
68 buf [1]byte
69 nextbuf []byte
70 next *X
71 }
72 var head *X
73 for i := 0; i < 10; i++ {
74 p := &X{}
75 p.buf[0] = 42
76 p.next = head
77 if head != nil {
78 p.nextbuf = head.buf[:]
79 }
80 head = p
81 runtime.GC()
82 }
83 for p := head; p != nil; p = p.next {
84 if p.buf[0] != 42 {
85 t.Fatal("corrupted heap")
86 }
87 }
88 }
89
90 func TestGcRescan(t *testing.T) {
91 type X struct {
92 c chan error
93 nextx *X
94 }
95 type Y struct {
96 X
97 nexty *Y
98 p *int
99 }
100 var head *Y
101 for i := 0; i < 10; i++ {
102 p := &Y{}
103 p.c = make(chan error)
104 if head != nil {
105 p.nextx = &head.X
106 }
107 p.nexty = head
108 p.p = new(int)
109 *p.p = 42
110 head = p
111 runtime.GC()
112 }
113 for p := head; p != nil; p = p.nexty {
114 if *p.p != 42 {
115 t.Fatal("corrupted heap")
116 }
117 }
118 }
119
120 func TestGcLastTime(t *testing.T) {
121 ms := new(runtime.MemStats)
122 t0 := time.Now().UnixNano()
123 runtime.GC()
124 t1 := time.Now().UnixNano()
125 runtime.ReadMemStats(ms)
126 last := int64(ms.LastGC)
127 if t0 > last || last > t1 {
128 t.Fatalf("bad last GC time: got %v, want [%v, %v]", last, t0, t1)
129 }
130 pause := ms.PauseNs[(ms.NumGC+255)%256]
131
132
133 if pause == 0 {
134 t.Logf("last GC pause was 0")
135 } else if pause > 10e9 {
136 t.Logf("bad last GC pause: got %v, want [0, 10e9]", pause)
137 }
138 }
139
140 var hugeSink any
141
142 func TestHugeGCInfo(t *testing.T) {
143
144
145 if hugeSink != nil {
146
147 const n = (400 << 20) + (unsafe.Sizeof(uintptr(0))-4)<<40
148 hugeSink = new([n]*byte)
149 hugeSink = new([n]uintptr)
150 hugeSink = new(struct {
151 x float64
152 y [n]*byte
153 z []string
154 })
155 hugeSink = new(struct {
156 x float64
157 y [n]uintptr
158 z []string
159 })
160 }
161 }
162
163 func TestPeriodicGC(t *testing.T) {
164 if runtime.GOARCH == "wasm" {
165 t.Skip("no sysmon on wasm yet")
166 }
167
168
169 runtime.GC()
170
171 var ms1, ms2 runtime.MemStats
172 runtime.ReadMemStats(&ms1)
173
174
175 orig := *runtime.ForceGCPeriod
176 *runtime.ForceGCPeriod = 0
177
178
179
180
181
182 var numGCs uint32
183 const want = 2
184 for i := 0; i < 200 && numGCs < want; i++ {
185 time.Sleep(5 * time.Millisecond)
186
187
188 runtime.ReadMemStats(&ms2)
189 numGCs = ms2.NumGC - ms1.NumGC
190 }
191 *runtime.ForceGCPeriod = orig
192
193 if numGCs < want {
194 t.Fatalf("no periodic GC: got %v GCs, want >= 2", numGCs)
195 }
196 }
197
198 func TestGcZombieReporting(t *testing.T) {
199
200
201
202
203 got := runTestProg(t, "testprog", "GCZombie", "GODEBUG=invalidptr=0")
204 want := "found pointer to free object"
205 if !strings.Contains(got, want) {
206 t.Fatalf("expected %q in output, but got %q", want, got)
207 }
208 }
209
210 func TestGCTestMoveStackOnNextCall(t *testing.T) {
211 t.Parallel()
212 var onStack int
213
214
215
216 for retry := 0; retry < 5; retry++ {
217 runtime.GCTestMoveStackOnNextCall()
218 if moveStackCheck(t, &onStack, uintptr(unsafe.Pointer(&onStack))) {
219
220 return
221 }
222 }
223 t.Fatal("stack did not move")
224 }
225
226
227
228
229
230 func moveStackCheck(t *testing.T, new *int, old uintptr) bool {
231
232
233
234
235
236 new2 := uintptr(unsafe.Pointer(new))
237
238 t.Logf("old stack pointer %x, new stack pointer %x", old, new2)
239 if new2 == old {
240
241 if cls := runtime.GCTestPointerClass(unsafe.Pointer(new)); cls != "stack" {
242 t.Fatalf("test bug: new (%#x) should be a stack pointer, not %s", new2, cls)
243 }
244
245 return false
246 }
247 return true
248 }
249
250 func TestGCTestMoveStackRepeatedly(t *testing.T) {
251
252
253 for i := 0; i < 100; i++ {
254 runtime.GCTestMoveStackOnNextCall()
255 moveStack1(false)
256 }
257 }
258
259
260 func moveStack1(x bool) {
261
262 if x {
263 println("x")
264 }
265 }
266
267 func TestGCTestIsReachable(t *testing.T) {
268 var all, half []unsafe.Pointer
269 var want uint64
270 for i := 0; i < 16; i++ {
271
272
273 p := unsafe.Pointer(new(*int))
274 all = append(all, p)
275 if i%2 == 0 {
276 half = append(half, p)
277 want |= 1 << i
278 }
279 }
280
281 got := runtime.GCTestIsReachable(all...)
282 if got&want != want {
283
284
285 t.Fatalf("live object not in reachable set; want %b, got %b", want, got)
286 }
287 if bits.OnesCount64(got&^want) > 1 {
288
289
290
291
292 t.Fatalf("dead object in reachable set; want %b, got %b", want, got)
293 }
294 runtime.KeepAlive(half)
295 }
296
297 var pointerClassBSS *int
298 var pointerClassData = 42
299
300 func TestGCTestPointerClass(t *testing.T) {
301 t.Parallel()
302 check := func(p unsafe.Pointer, want string) {
303 t.Helper()
304 got := runtime.GCTestPointerClass(p)
305 if got != want {
306
307
308 t.Errorf("for %#x, want class %s, got %s", uintptr(p), want, got)
309 }
310 }
311 var onStack int
312 var notOnStack int
313 check(unsafe.Pointer(&onStack), "stack")
314 check(unsafe.Pointer(runtime.Escape(¬OnStack)), "heap")
315 check(unsafe.Pointer(&pointerClassBSS), "bss")
316 check(unsafe.Pointer(&pointerClassData), "data")
317 check(nil, "other")
318 }
319
320 func BenchmarkAllocation(b *testing.B) {
321 type T struct {
322 x, y *byte
323 }
324 ngo := runtime.GOMAXPROCS(0)
325 work := make(chan bool, b.N+ngo)
326 result := make(chan *T)
327 for i := 0; i < b.N; i++ {
328 work <- true
329 }
330 for i := 0; i < ngo; i++ {
331 work <- false
332 }
333 for i := 0; i < ngo; i++ {
334 go func() {
335 var x *T
336 for <-work {
337 for i := 0; i < 1000; i++ {
338 x = &T{}
339 }
340 }
341 result <- x
342 }()
343 }
344 for i := 0; i < ngo; i++ {
345 <-result
346 }
347 }
348
349 func TestPrintGC(t *testing.T) {
350 if testing.Short() {
351 t.Skip("Skipping in short mode")
352 }
353 defer runtime.GOMAXPROCS(runtime.GOMAXPROCS(2))
354 done := make(chan bool)
355 go func() {
356 for {
357 select {
358 case <-done:
359 return
360 default:
361 runtime.GC()
362 }
363 }
364 }()
365 for i := 0; i < 1e4; i++ {
366 func() {
367 defer print("")
368 }()
369 }
370 close(done)
371 }
372
373 func testTypeSwitch(x any) error {
374 switch y := x.(type) {
375 case nil:
376
377 case error:
378 return y
379 }
380 return nil
381 }
382
383 func testAssert(x any) error {
384 if y, ok := x.(error); ok {
385 return y
386 }
387 return nil
388 }
389
390 func testAssertVar(x any) error {
391 var y, ok = x.(error)
392 if ok {
393 return y
394 }
395 return nil
396 }
397
398 var a bool
399
400
401 func testIfaceEqual(x any) {
402 if x == "abc" {
403 a = true
404 }
405 }
406
407 func TestPageAccounting(t *testing.T) {
408
409
410
411 const blockSize = 64 << 10
412 blocks := make([]*[blockSize]byte, (64<<20)/blockSize)
413 for i := range blocks {
414 blocks[i] = new([blockSize]byte)
415 }
416
417
418 pagesInUse, counted := runtime.CountPagesInUse()
419 if pagesInUse != counted {
420 t.Fatalf("mheap_.pagesInUse is %d, but direct count is %d", pagesInUse, counted)
421 }
422 }
423
424 func init() {
425
426 *runtime.DoubleCheckReadMemStats = true
427 }
428
429 func TestReadMemStats(t *testing.T) {
430 base, slow := runtime.ReadMemStatsSlow()
431 if base != slow {
432 logDiff(t, "MemStats", reflect.ValueOf(base), reflect.ValueOf(slow))
433 t.Fatal("memstats mismatch")
434 }
435 }
436
437 func logDiff(t *testing.T, prefix string, got, want reflect.Value) {
438 typ := got.Type()
439 switch typ.Kind() {
440 case reflect.Array, reflect.Slice:
441 if got.Len() != want.Len() {
442 t.Logf("len(%s): got %v, want %v", prefix, got, want)
443 return
444 }
445 for i := 0; i < got.Len(); i++ {
446 logDiff(t, fmt.Sprintf("%s[%d]", prefix, i), got.Index(i), want.Index(i))
447 }
448 case reflect.Struct:
449 for i := 0; i < typ.NumField(); i++ {
450 gf, wf := got.Field(i), want.Field(i)
451 logDiff(t, prefix+"."+typ.Field(i).Name, gf, wf)
452 }
453 case reflect.Map:
454 t.Fatal("not implemented: logDiff for map")
455 default:
456 if got.Interface() != want.Interface() {
457 t.Logf("%s: got %v, want %v", prefix, got, want)
458 }
459 }
460 }
461
462 func BenchmarkReadMemStats(b *testing.B) {
463 var ms runtime.MemStats
464 const heapSize = 100 << 20
465 x := make([]*[1024]byte, heapSize/1024)
466 for i := range x {
467 x[i] = new([1024]byte)
468 }
469
470 b.ResetTimer()
471 for i := 0; i < b.N; i++ {
472 runtime.ReadMemStats(&ms)
473 }
474
475 runtime.KeepAlive(x)
476 }
477
478 func applyGCLoad(b *testing.B) func() {
479
480
481
482
483 maxProcs := runtime.GOMAXPROCS(-1)
484 if maxProcs == 1 {
485 b.Skip("This benchmark can only be run with GOMAXPROCS > 1")
486 }
487
488
489 type node struct {
490 children [16]*node
491 }
492 var buildTree func(depth int) *node
493 buildTree = func(depth int) *node {
494 tree := new(node)
495 if depth != 0 {
496 for i := range tree.children {
497 tree.children[i] = buildTree(depth - 1)
498 }
499 }
500 return tree
501 }
502
503
504 done := make(chan struct{})
505 var wg sync.WaitGroup
506 for i := 0; i < maxProcs-1; i++ {
507 wg.Add(1)
508 go func() {
509 defer wg.Done()
510 var hold *node
511 loop:
512 for {
513 hold = buildTree(5)
514 select {
515 case <-done:
516 break loop
517 default:
518 }
519 }
520 runtime.KeepAlive(hold)
521 }()
522 }
523 return func() {
524 close(done)
525 wg.Wait()
526 }
527 }
528
529 func BenchmarkReadMemStatsLatency(b *testing.B) {
530 stop := applyGCLoad(b)
531
532
533 latencies := make([]time.Duration, 0, 1024)
534
535
536
537 b.ResetTimer()
538 var ms runtime.MemStats
539 for i := 0; i < b.N; i++ {
540
541
542 time.Sleep(100 * time.Millisecond)
543 start := time.Now()
544 runtime.ReadMemStats(&ms)
545 latencies = append(latencies, time.Since(start))
546 }
547
548
549
550 b.StopTimer()
551 stop()
552
553
554
555
556 b.ReportMetric(0, "ns/op")
557 b.ReportMetric(0, "B/op")
558 b.ReportMetric(0, "allocs/op")
559
560
561 slices.Sort(latencies)
562 b.ReportMetric(float64(latencies[len(latencies)*50/100]), "p50-ns")
563 b.ReportMetric(float64(latencies[len(latencies)*90/100]), "p90-ns")
564 b.ReportMetric(float64(latencies[len(latencies)*99/100]), "p99-ns")
565 }
566
567 func TestUserForcedGC(t *testing.T) {
568
569 defer debug.SetGCPercent(debug.SetGCPercent(-1))
570
571 var ms1, ms2 runtime.MemStats
572 runtime.ReadMemStats(&ms1)
573 runtime.GC()
574 runtime.ReadMemStats(&ms2)
575 if ms1.NumGC == ms2.NumGC {
576 t.Fatalf("runtime.GC() did not trigger GC")
577 }
578 if ms1.NumForcedGC == ms2.NumForcedGC {
579 t.Fatalf("runtime.GC() was not accounted in NumForcedGC")
580 }
581 }
582
583 func writeBarrierBenchmark(b *testing.B, f func()) {
584 runtime.GC()
585 var ms runtime.MemStats
586 runtime.ReadMemStats(&ms)
587
588
589
590
591 var stop uint32
592 done := make(chan bool)
593 go func() {
594 for atomic.LoadUint32(&stop) == 0 {
595 runtime.GC()
596 }
597 close(done)
598 }()
599 defer func() {
600 atomic.StoreUint32(&stop, 1)
601 <-done
602 }()
603
604 b.ResetTimer()
605 f()
606 b.StopTimer()
607 }
608
609 func BenchmarkWriteBarrier(b *testing.B) {
610 if runtime.GOMAXPROCS(-1) < 2 {
611
612 b.Skip("need GOMAXPROCS >= 2")
613 }
614
615
616
617 type node struct {
618 l, r *node
619 }
620 var wbRoots []*node
621 var mkTree func(level int) *node
622 mkTree = func(level int) *node {
623 if level == 0 {
624 return nil
625 }
626 n := &node{mkTree(level - 1), mkTree(level - 1)}
627 if level == 10 {
628
629
630
631 wbRoots = append(wbRoots, n)
632 }
633 return n
634 }
635 const depth = 22
636 root := mkTree(22)
637
638 writeBarrierBenchmark(b, func() {
639 var stack [depth]*node
640 tos := -1
641
642
643 for i := 0; i < b.N; i += 2 {
644 if tos == -1 {
645 stack[0] = root
646 tos = 0
647 }
648
649
650 n := stack[tos]
651 if n.l == nil {
652 tos--
653 } else {
654 n.l, n.r = n.r, n.l
655 stack[tos] = n.l
656 stack[tos+1] = n.r
657 tos++
658 }
659
660 if i%(1<<12) == 0 {
661
662 runtime.Gosched()
663 }
664 }
665 })
666
667 runtime.KeepAlive(wbRoots)
668 }
669
670 func BenchmarkBulkWriteBarrier(b *testing.B) {
671 if runtime.GOMAXPROCS(-1) < 2 {
672
673 b.Skip("need GOMAXPROCS >= 2")
674 }
675
676
677 const heapSize = 64 << 20
678 type obj [16]*byte
679 ptrs := make([]*obj, heapSize/unsafe.Sizeof(obj{}))
680 for i := range ptrs {
681 ptrs[i] = new(obj)
682 }
683
684 writeBarrierBenchmark(b, func() {
685 const blockSize = 1024
686 var pos int
687 for i := 0; i < b.N; i += blockSize {
688
689 block := ptrs[pos : pos+blockSize]
690 first := block[0]
691 copy(block, block[1:])
692 block[blockSize-1] = first
693
694 pos += blockSize
695 if pos+blockSize > len(ptrs) {
696 pos = 0
697 }
698
699 runtime.Gosched()
700 }
701 })
702
703 runtime.KeepAlive(ptrs)
704 }
705
706 func BenchmarkScanStackNoLocals(b *testing.B) {
707 var ready sync.WaitGroup
708 teardown := make(chan bool)
709 for j := 0; j < 10; j++ {
710 ready.Add(1)
711 go func() {
712 x := 100000
713 countpwg(&x, &ready, teardown)
714 }()
715 }
716 ready.Wait()
717 b.ResetTimer()
718 for i := 0; i < b.N; i++ {
719 b.StartTimer()
720 runtime.GC()
721 runtime.GC()
722 b.StopTimer()
723 }
724 close(teardown)
725 }
726
727 func BenchmarkMSpanCountAlloc(b *testing.B) {
728
729 s := runtime.AllocMSpan()
730 defer runtime.FreeMSpan(s)
731
732
733
734
735 for _, n := range []int{8, 16, 32, 64, 128} {
736 b.Run(fmt.Sprintf("bits=%d", n*8), func(b *testing.B) {
737
738 bits := make([]byte, n)
739 rand.Read(bits)
740
741 b.ResetTimer()
742 for i := 0; i < b.N; i++ {
743 runtime.MSpanCountAlloc(s, bits)
744 }
745 })
746 }
747 }
748
749 func countpwg(n *int, ready *sync.WaitGroup, teardown chan bool) {
750 if *n == 0 {
751 ready.Done()
752 <-teardown
753 return
754 }
755 *n--
756 countpwg(n, ready, teardown)
757 }
758
759 func TestMemoryLimit(t *testing.T) {
760 if testing.Short() {
761 t.Skip("stress test that takes time to run")
762 }
763 if runtime.NumCPU() < 4 {
764 t.Skip("want at least 4 CPUs for this test")
765 }
766 got := runTestProg(t, "testprog", "GCMemoryLimit")
767 want := "OK\n"
768 if got != want {
769 t.Fatalf("expected %q, but got %q", want, got)
770 }
771 }
772
773 func TestMemoryLimitNoGCPercent(t *testing.T) {
774 if testing.Short() {
775 t.Skip("stress test that takes time to run")
776 }
777 if runtime.NumCPU() < 4 {
778 t.Skip("want at least 4 CPUs for this test")
779 }
780 got := runTestProg(t, "testprog", "GCMemoryLimitNoGCPercent")
781 want := "OK\n"
782 if got != want {
783 t.Fatalf("expected %q, but got %q", want, got)
784 }
785 }
786
787 func TestMyGenericFunc(t *testing.T) {
788 runtime.MyGenericFunc[int]()
789 }
790
View as plain text