Source file
src/runtime/mprof.go
Documentation: runtime
1
2
3
4
5
6
7
8 package runtime
9
10 import (
11 "internal/abi"
12 "internal/goarch"
13 "internal/profilerecord"
14 "internal/runtime/atomic"
15 "runtime/internal/sys"
16 "unsafe"
17 )
18
19
20 var (
21
22 profInsertLock mutex
23
24 profBlockLock mutex
25
26 profMemActiveLock mutex
27
28
29 profMemFutureLock [len(memRecord{}.future)]mutex
30 )
31
32
33
34
35 const (
36
37 memProfile bucketType = 1 + iota
38 blockProfile
39 mutexProfile
40
41
42 buckHashSize = 179999
43
44
45
46
47
48
49
50
51
52 maxSkip = 5
53
54
55
56
57 maxProfStackDepth = 1024
58 )
59
60 type bucketType int
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75 type bucket struct {
76 _ sys.NotInHeap
77 next *bucket
78 allnext *bucket
79 typ bucketType
80 hash uintptr
81 size uintptr
82 nstk uintptr
83 }
84
85
86
87 type memRecord struct {
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132 active memRecordCycle
133
134
135
136
137
138
139
140
141
142
143
144 future [3]memRecordCycle
145 }
146
147
148 type memRecordCycle struct {
149 allocs, frees uintptr
150 alloc_bytes, free_bytes uintptr
151 }
152
153
154 func (a *memRecordCycle) add(b *memRecordCycle) {
155 a.allocs += b.allocs
156 a.frees += b.frees
157 a.alloc_bytes += b.alloc_bytes
158 a.free_bytes += b.free_bytes
159 }
160
161
162
163 type blockRecord struct {
164 count float64
165 cycles int64
166 }
167
168 var (
169 mbuckets atomic.UnsafePointer
170 bbuckets atomic.UnsafePointer
171 xbuckets atomic.UnsafePointer
172 buckhash atomic.UnsafePointer
173
174 mProfCycle mProfCycleHolder
175 )
176
177 type buckhashArray [buckHashSize]atomic.UnsafePointer
178
179 const mProfCycleWrap = uint32(len(memRecord{}.future)) * (2 << 24)
180
181
182
183
184
185 type mProfCycleHolder struct {
186 value atomic.Uint32
187 }
188
189
190 func (c *mProfCycleHolder) read() (cycle uint32) {
191 v := c.value.Load()
192 cycle = v >> 1
193 return cycle
194 }
195
196
197
198 func (c *mProfCycleHolder) setFlushed() (cycle uint32, alreadyFlushed bool) {
199 for {
200 prev := c.value.Load()
201 cycle = prev >> 1
202 alreadyFlushed = (prev & 0x1) != 0
203 next := prev | 0x1
204 if c.value.CompareAndSwap(prev, next) {
205 return cycle, alreadyFlushed
206 }
207 }
208 }
209
210
211
212 func (c *mProfCycleHolder) increment() {
213
214
215
216 for {
217 prev := c.value.Load()
218 cycle := prev >> 1
219 cycle = (cycle + 1) % mProfCycleWrap
220 next := cycle << 1
221 if c.value.CompareAndSwap(prev, next) {
222 break
223 }
224 }
225 }
226
227
228 func newBucket(typ bucketType, nstk int) *bucket {
229 size := unsafe.Sizeof(bucket{}) + uintptr(nstk)*unsafe.Sizeof(uintptr(0))
230 switch typ {
231 default:
232 throw("invalid profile bucket type")
233 case memProfile:
234 size += unsafe.Sizeof(memRecord{})
235 case blockProfile, mutexProfile:
236 size += unsafe.Sizeof(blockRecord{})
237 }
238
239 b := (*bucket)(persistentalloc(size, 0, &memstats.buckhash_sys))
240 b.typ = typ
241 b.nstk = uintptr(nstk)
242 return b
243 }
244
245
246
247 func (b *bucket) stk() []uintptr {
248 stk := (*[maxProfStackDepth]uintptr)(add(unsafe.Pointer(b), unsafe.Sizeof(*b)))
249 if b.nstk > maxProfStackDepth {
250
251 throw("bad profile stack count")
252 }
253 return stk[:b.nstk:b.nstk]
254 }
255
256
257 func (b *bucket) mp() *memRecord {
258 if b.typ != memProfile {
259 throw("bad use of bucket.mp")
260 }
261 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
262 return (*memRecord)(data)
263 }
264
265
266 func (b *bucket) bp() *blockRecord {
267 if b.typ != blockProfile && b.typ != mutexProfile {
268 throw("bad use of bucket.bp")
269 }
270 data := add(unsafe.Pointer(b), unsafe.Sizeof(*b)+b.nstk*unsafe.Sizeof(uintptr(0)))
271 return (*blockRecord)(data)
272 }
273
274
275 func stkbucket(typ bucketType, size uintptr, stk []uintptr, alloc bool) *bucket {
276 bh := (*buckhashArray)(buckhash.Load())
277 if bh == nil {
278 lock(&profInsertLock)
279
280 bh = (*buckhashArray)(buckhash.Load())
281 if bh == nil {
282 bh = (*buckhashArray)(sysAlloc(unsafe.Sizeof(buckhashArray{}), &memstats.buckhash_sys))
283 if bh == nil {
284 throw("runtime: cannot allocate memory")
285 }
286 buckhash.StoreNoWB(unsafe.Pointer(bh))
287 }
288 unlock(&profInsertLock)
289 }
290
291
292 var h uintptr
293 for _, pc := range stk {
294 h += pc
295 h += h << 10
296 h ^= h >> 6
297 }
298
299 h += size
300 h += h << 10
301 h ^= h >> 6
302
303 h += h << 3
304 h ^= h >> 11
305
306 i := int(h % buckHashSize)
307
308 for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
309 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
310 return b
311 }
312 }
313
314 if !alloc {
315 return nil
316 }
317
318 lock(&profInsertLock)
319
320 for b := (*bucket)(bh[i].Load()); b != nil; b = b.next {
321 if b.typ == typ && b.hash == h && b.size == size && eqslice(b.stk(), stk) {
322 unlock(&profInsertLock)
323 return b
324 }
325 }
326
327
328 b := newBucket(typ, len(stk))
329 copy(b.stk(), stk)
330 b.hash = h
331 b.size = size
332
333 var allnext *atomic.UnsafePointer
334 if typ == memProfile {
335 allnext = &mbuckets
336 } else if typ == mutexProfile {
337 allnext = &xbuckets
338 } else {
339 allnext = &bbuckets
340 }
341
342 b.next = (*bucket)(bh[i].Load())
343 b.allnext = (*bucket)(allnext.Load())
344
345 bh[i].StoreNoWB(unsafe.Pointer(b))
346 allnext.StoreNoWB(unsafe.Pointer(b))
347
348 unlock(&profInsertLock)
349 return b
350 }
351
352 func eqslice(x, y []uintptr) bool {
353 if len(x) != len(y) {
354 return false
355 }
356 for i, xi := range x {
357 if xi != y[i] {
358 return false
359 }
360 }
361 return true
362 }
363
364
365
366
367
368
369
370
371
372 func mProf_NextCycle() {
373 mProfCycle.increment()
374 }
375
376
377
378
379
380
381
382
383 func mProf_Flush() {
384 cycle, alreadyFlushed := mProfCycle.setFlushed()
385 if alreadyFlushed {
386 return
387 }
388
389 index := cycle % uint32(len(memRecord{}.future))
390 lock(&profMemActiveLock)
391 lock(&profMemFutureLock[index])
392 mProf_FlushLocked(index)
393 unlock(&profMemFutureLock[index])
394 unlock(&profMemActiveLock)
395 }
396
397
398
399
400
401 func mProf_FlushLocked(index uint32) {
402 assertLockHeld(&profMemActiveLock)
403 assertLockHeld(&profMemFutureLock[index])
404 head := (*bucket)(mbuckets.Load())
405 for b := head; b != nil; b = b.allnext {
406 mp := b.mp()
407
408
409
410 mpc := &mp.future[index]
411 mp.active.add(mpc)
412 *mpc = memRecordCycle{}
413 }
414 }
415
416
417
418
419
420 func mProf_PostSweep() {
421
422
423
424
425
426 cycle := mProfCycle.read() + 1
427
428 index := cycle % uint32(len(memRecord{}.future))
429 lock(&profMemActiveLock)
430 lock(&profMemFutureLock[index])
431 mProf_FlushLocked(index)
432 unlock(&profMemFutureLock[index])
433 unlock(&profMemActiveLock)
434 }
435
436
437 func mProf_Malloc(mp *m, p unsafe.Pointer, size uintptr) {
438 if mp.profStack == nil {
439
440
441
442
443 return
444 }
445
446
447 nstk := callers(4, mp.profStack[:debug.profstackdepth])
448 index := (mProfCycle.read() + 2) % uint32(len(memRecord{}.future))
449
450 b := stkbucket(memProfile, size, mp.profStack[:nstk], true)
451 mr := b.mp()
452 mpc := &mr.future[index]
453
454 lock(&profMemFutureLock[index])
455 mpc.allocs++
456 mpc.alloc_bytes += size
457 unlock(&profMemFutureLock[index])
458
459
460
461
462
463 systemstack(func() {
464 setprofilebucket(p, b)
465 })
466 }
467
468
469 func mProf_Free(b *bucket, size uintptr) {
470 index := (mProfCycle.read() + 1) % uint32(len(memRecord{}.future))
471
472 mp := b.mp()
473 mpc := &mp.future[index]
474
475 lock(&profMemFutureLock[index])
476 mpc.frees++
477 mpc.free_bytes += size
478 unlock(&profMemFutureLock[index])
479 }
480
481 var blockprofilerate uint64
482
483
484
485
486
487
488
489 func SetBlockProfileRate(rate int) {
490 var r int64
491 if rate <= 0 {
492 r = 0
493 } else if rate == 1 {
494 r = 1
495 } else {
496
497 r = int64(float64(rate) * float64(ticksPerSecond()) / (1000 * 1000 * 1000))
498 if r == 0 {
499 r = 1
500 }
501 }
502
503 atomic.Store64(&blockprofilerate, uint64(r))
504 }
505
506 func blockevent(cycles int64, skip int) {
507 if cycles <= 0 {
508 cycles = 1
509 }
510
511 rate := int64(atomic.Load64(&blockprofilerate))
512 if blocksampled(cycles, rate) {
513 saveblockevent(cycles, rate, skip+1, blockProfile)
514 }
515 }
516
517
518
519 func blocksampled(cycles, rate int64) bool {
520 if rate <= 0 || (rate > cycles && cheaprand64()%rate > cycles) {
521 return false
522 }
523 return true
524 }
525
526
527
528
529
530
531
532
533 func saveblockevent(cycles, rate int64, skip int, which bucketType) {
534 if debug.profstackdepth == 0 {
535
536
537 return
538 }
539 if skip > maxSkip {
540 print("requested skip=", skip)
541 throw("invalid skip value")
542 }
543 gp := getg()
544 mp := acquirem()
545
546 var nstk int
547 if tracefpunwindoff() || gp.m.hasCgoOnStack() {
548 if gp.m.curg == nil || gp.m.curg == gp {
549 nstk = callers(skip, mp.profStack)
550 } else {
551 nstk = gcallers(gp.m.curg, skip, mp.profStack)
552 }
553 } else {
554 if gp.m.curg == nil || gp.m.curg == gp {
555 if skip > 0 {
556
557
558
559
560
561 skip -= 1
562 }
563 nstk = fpTracebackPartialExpand(skip, unsafe.Pointer(getfp()), mp.profStack)
564 } else {
565 mp.profStack[0] = gp.m.curg.sched.pc
566 nstk = 1 + fpTracebackPartialExpand(skip, unsafe.Pointer(gp.m.curg.sched.bp), mp.profStack[1:])
567 }
568 }
569
570 saveBlockEventStack(cycles, rate, mp.profStack[:nstk], which)
571 releasem(mp)
572 }
573
574
575
576
577
578 func fpTracebackPartialExpand(skip int, fp unsafe.Pointer, pcBuf []uintptr) int {
579 var n int
580 lastFuncID := abi.FuncIDNormal
581 skipOrAdd := func(retPC uintptr) bool {
582 if skip > 0 {
583 skip--
584 } else if n < len(pcBuf) {
585 pcBuf[n] = retPC
586 n++
587 }
588 return n < len(pcBuf)
589 }
590 for n < len(pcBuf) && fp != nil {
591
592 pc := *(*uintptr)(unsafe.Pointer(uintptr(fp) + goarch.PtrSize))
593
594 if skip > 0 {
595 callPC := pc - 1
596 fi := findfunc(callPC)
597 u, uf := newInlineUnwinder(fi, callPC)
598 for ; uf.valid(); uf = u.next(uf) {
599 sf := u.srcFunc(uf)
600 if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(lastFuncID) {
601
602 } else if more := skipOrAdd(uf.pc + 1); !more {
603 return n
604 }
605 lastFuncID = sf.funcID
606 }
607 } else {
608
609
610 pcBuf[n] = pc
611 n++
612 }
613
614
615 fp = unsafe.Pointer(*(*uintptr)(fp))
616 }
617 return n
618 }
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678 type lockTimer struct {
679 lock *mutex
680 timeRate int64
681 timeStart int64
682 tickStart int64
683 }
684
685 func (lt *lockTimer) begin() {
686 rate := int64(atomic.Load64(&mutexprofilerate))
687
688 lt.timeRate = gTrackingPeriod
689 if rate != 0 && rate < lt.timeRate {
690 lt.timeRate = rate
691 }
692 if int64(cheaprand())%lt.timeRate == 0 {
693 lt.timeStart = nanotime()
694 }
695
696 if rate > 0 && int64(cheaprand())%rate == 0 {
697 lt.tickStart = cputicks()
698 }
699 }
700
701 func (lt *lockTimer) end() {
702 gp := getg()
703
704 if lt.timeStart != 0 {
705 nowTime := nanotime()
706 gp.m.mLockProfile.waitTime.Add((nowTime - lt.timeStart) * lt.timeRate)
707 }
708
709 if lt.tickStart != 0 {
710 nowTick := cputicks()
711 gp.m.mLockProfile.recordLock(nowTick-lt.tickStart, lt.lock)
712 }
713 }
714
715 type mLockProfile struct {
716 waitTime atomic.Int64
717 stack []uintptr
718 pending uintptr
719 cycles int64
720 cyclesLost int64
721 disabled bool
722 }
723
724 func (prof *mLockProfile) recordLock(cycles int64, l *mutex) {
725 if cycles <= 0 {
726 return
727 }
728
729 if prof.disabled {
730
731
732
733 prof.cyclesLost += cycles
734 return
735 }
736
737 if uintptr(unsafe.Pointer(l)) == prof.pending {
738
739
740 prof.cycles += cycles
741 return
742 }
743
744 if prev := prof.cycles; prev > 0 {
745
746
747
748 prevScore := uint64(cheaprand64()) % uint64(prev)
749 thisScore := uint64(cheaprand64()) % uint64(cycles)
750 if prevScore > thisScore {
751 prof.cyclesLost += cycles
752 return
753 } else {
754 prof.cyclesLost += prev
755 }
756 }
757
758
759
760
761 prof.pending = uintptr(unsafe.Pointer(l))
762 prof.cycles = cycles
763 }
764
765
766
767
768 func (prof *mLockProfile) recordUnlock(l *mutex) {
769 if uintptr(unsafe.Pointer(l)) == prof.pending {
770 prof.captureStack()
771 }
772 if gp := getg(); gp.m.locks == 1 && gp.m.mLockProfile.cycles != 0 {
773 prof.store()
774 }
775 }
776
777 func (prof *mLockProfile) captureStack() {
778 if debug.profstackdepth == 0 {
779
780
781 return
782 }
783
784 skip := 3
785 if staticLockRanking {
786
787
788
789
790
791
792
793
794
795 skip += 1
796 }
797 prof.pending = 0
798
799 prof.stack[0] = logicalStackSentinel
800 if debug.runtimeContentionStacks.Load() == 0 {
801 prof.stack[1] = abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum
802 prof.stack[2] = 0
803 return
804 }
805
806 var nstk int
807 gp := getg()
808 sp := getcallersp()
809 pc := getcallerpc()
810 systemstack(func() {
811 var u unwinder
812 u.initAt(pc, sp, 0, gp, unwindSilentErrors|unwindJumpStack)
813 nstk = 1 + tracebackPCs(&u, skip, prof.stack[1:])
814 })
815 if nstk < len(prof.stack) {
816 prof.stack[nstk] = 0
817 }
818 }
819
820 func (prof *mLockProfile) store() {
821
822
823
824
825 mp := acquirem()
826 prof.disabled = true
827
828 nstk := int(debug.profstackdepth)
829 for i := 0; i < nstk; i++ {
830 if pc := prof.stack[i]; pc == 0 {
831 nstk = i
832 break
833 }
834 }
835
836 cycles, lost := prof.cycles, prof.cyclesLost
837 prof.cycles, prof.cyclesLost = 0, 0
838
839 rate := int64(atomic.Load64(&mutexprofilerate))
840 saveBlockEventStack(cycles, rate, prof.stack[:nstk], mutexProfile)
841 if lost > 0 {
842 lostStk := [...]uintptr{
843 logicalStackSentinel,
844 abi.FuncPCABIInternal(_LostContendedRuntimeLock) + sys.PCQuantum,
845 }
846 saveBlockEventStack(lost, rate, lostStk[:], mutexProfile)
847 }
848
849 prof.disabled = false
850 releasem(mp)
851 }
852
853 func saveBlockEventStack(cycles, rate int64, stk []uintptr, which bucketType) {
854 b := stkbucket(which, 0, stk, true)
855 bp := b.bp()
856
857 lock(&profBlockLock)
858
859
860
861
862
863 if which == blockProfile && cycles < rate {
864
865 bp.count += float64(rate) / float64(cycles)
866 bp.cycles += rate
867 } else if which == mutexProfile {
868 bp.count += float64(rate)
869 bp.cycles += rate * cycles
870 } else {
871 bp.count++
872 bp.cycles += cycles
873 }
874 unlock(&profBlockLock)
875 }
876
877 var mutexprofilerate uint64
878
879
880
881
882
883
884
885
886 func SetMutexProfileFraction(rate int) int {
887 if rate < 0 {
888 return int(mutexprofilerate)
889 }
890 old := mutexprofilerate
891 atomic.Store64(&mutexprofilerate, uint64(rate))
892 return int(old)
893 }
894
895
896 func mutexevent(cycles int64, skip int) {
897 if cycles < 0 {
898 cycles = 0
899 }
900 rate := int64(atomic.Load64(&mutexprofilerate))
901 if rate > 0 && cheaprand64()%rate == 0 {
902 saveblockevent(cycles, rate, skip+1, mutexProfile)
903 }
904 }
905
906
907
908
909 type StackRecord struct {
910 Stack0 [32]uintptr
911 }
912
913
914
915 func (r *StackRecord) Stack() []uintptr {
916 for i, v := range r.Stack0 {
917 if v == 0 {
918 return r.Stack0[0:i]
919 }
920 }
921 return r.Stack0[0:]
922 }
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938 var MemProfileRate int = 512 * 1024
939
940
941
942
943
944 var disableMemoryProfiling bool
945
946
947
948 type MemProfileRecord struct {
949 AllocBytes, FreeBytes int64
950 AllocObjects, FreeObjects int64
951 Stack0 [32]uintptr
952 }
953
954
955 func (r *MemProfileRecord) InUseBytes() int64 { return r.AllocBytes - r.FreeBytes }
956
957
958 func (r *MemProfileRecord) InUseObjects() int64 {
959 return r.AllocObjects - r.FreeObjects
960 }
961
962
963
964 func (r *MemProfileRecord) Stack() []uintptr {
965 for i, v := range r.Stack0 {
966 if v == 0 {
967 return r.Stack0[0:i]
968 }
969 }
970 return r.Stack0[0:]
971 }
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994 func MemProfile(p []MemProfileRecord, inuseZero bool) (n int, ok bool) {
995 return memProfileInternal(len(p), inuseZero, func(r profilerecord.MemProfileRecord) {
996 copyMemProfileRecord(&p[0], r)
997 p = p[1:]
998 })
999 }
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011 func memProfileInternal(size int, inuseZero bool, copyFn func(profilerecord.MemProfileRecord)) (n int, ok bool) {
1012 cycle := mProfCycle.read()
1013
1014
1015
1016 index := cycle % uint32(len(memRecord{}.future))
1017 lock(&profMemActiveLock)
1018 lock(&profMemFutureLock[index])
1019 mProf_FlushLocked(index)
1020 unlock(&profMemFutureLock[index])
1021 clear := true
1022 head := (*bucket)(mbuckets.Load())
1023 for b := head; b != nil; b = b.allnext {
1024 mp := b.mp()
1025 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
1026 n++
1027 }
1028 if mp.active.allocs != 0 || mp.active.frees != 0 {
1029 clear = false
1030 }
1031 }
1032 if clear {
1033
1034
1035
1036
1037 n = 0
1038 for b := head; b != nil; b = b.allnext {
1039 mp := b.mp()
1040 for c := range mp.future {
1041 lock(&profMemFutureLock[c])
1042 mp.active.add(&mp.future[c])
1043 mp.future[c] = memRecordCycle{}
1044 unlock(&profMemFutureLock[c])
1045 }
1046 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
1047 n++
1048 }
1049 }
1050 }
1051 if n <= size {
1052 ok = true
1053 for b := head; b != nil; b = b.allnext {
1054 mp := b.mp()
1055 if inuseZero || mp.active.alloc_bytes != mp.active.free_bytes {
1056 r := profilerecord.MemProfileRecord{
1057 AllocBytes: int64(mp.active.alloc_bytes),
1058 FreeBytes: int64(mp.active.free_bytes),
1059 AllocObjects: int64(mp.active.allocs),
1060 FreeObjects: int64(mp.active.frees),
1061 Stack: b.stk(),
1062 }
1063 copyFn(r)
1064 }
1065 }
1066 }
1067 unlock(&profMemActiveLock)
1068 return
1069 }
1070
1071 func copyMemProfileRecord(dst *MemProfileRecord, src profilerecord.MemProfileRecord) {
1072 dst.AllocBytes = src.AllocBytes
1073 dst.FreeBytes = src.FreeBytes
1074 dst.AllocObjects = src.AllocObjects
1075 dst.FreeObjects = src.FreeObjects
1076 if raceenabled {
1077 racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), getcallerpc(), abi.FuncPCABIInternal(MemProfile))
1078 }
1079 if msanenabled {
1080 msanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
1081 }
1082 if asanenabled {
1083 asanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
1084 }
1085 i := copy(dst.Stack0[:], src.Stack)
1086 clear(dst.Stack0[i:])
1087 }
1088
1089
1090 func pprof_memProfileInternal(p []profilerecord.MemProfileRecord, inuseZero bool) (n int, ok bool) {
1091 return memProfileInternal(len(p), inuseZero, func(r profilerecord.MemProfileRecord) {
1092 p[0] = r
1093 p = p[1:]
1094 })
1095 }
1096
1097 func iterate_memprof(fn func(*bucket, uintptr, *uintptr, uintptr, uintptr, uintptr)) {
1098 lock(&profMemActiveLock)
1099 head := (*bucket)(mbuckets.Load())
1100 for b := head; b != nil; b = b.allnext {
1101 mp := b.mp()
1102 fn(b, b.nstk, &b.stk()[0], b.size, mp.active.allocs, mp.active.frees)
1103 }
1104 unlock(&profMemActiveLock)
1105 }
1106
1107
1108
1109 type BlockProfileRecord struct {
1110 Count int64
1111 Cycles int64
1112 StackRecord
1113 }
1114
1115
1116
1117
1118
1119
1120
1121
1122 func BlockProfile(p []BlockProfileRecord) (n int, ok bool) {
1123 var m int
1124 n, ok = blockProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
1125 copyBlockProfileRecord(&p[m], r)
1126 m++
1127 })
1128 if ok {
1129 expandFrames(p[:n])
1130 }
1131 return
1132 }
1133
1134 func expandFrames(p []BlockProfileRecord) {
1135 expandedStack := makeProfStack()
1136 for i := range p {
1137 cf := CallersFrames(p[i].Stack())
1138 j := 0
1139 for j < len(expandedStack) {
1140 f, more := cf.Next()
1141
1142
1143 expandedStack[j] = f.PC + 1
1144 j++
1145 if !more {
1146 break
1147 }
1148 }
1149 k := copy(p[i].Stack0[:], expandedStack[:j])
1150 clear(p[i].Stack0[k:])
1151 }
1152 }
1153
1154
1155
1156
1157 func blockProfileInternal(size int, copyFn func(profilerecord.BlockProfileRecord)) (n int, ok bool) {
1158 lock(&profBlockLock)
1159 head := (*bucket)(bbuckets.Load())
1160 for b := head; b != nil; b = b.allnext {
1161 n++
1162 }
1163 if n <= size {
1164 ok = true
1165 for b := head; b != nil; b = b.allnext {
1166 bp := b.bp()
1167 r := profilerecord.BlockProfileRecord{
1168 Count: int64(bp.count),
1169 Cycles: bp.cycles,
1170 Stack: b.stk(),
1171 }
1172
1173
1174 if r.Count == 0 {
1175 r.Count = 1
1176 }
1177 copyFn(r)
1178 }
1179 }
1180 unlock(&profBlockLock)
1181 return
1182 }
1183
1184
1185
1186
1187 func copyBlockProfileRecord(dst *BlockProfileRecord, src profilerecord.BlockProfileRecord) {
1188 dst.Count = src.Count
1189 dst.Cycles = src.Cycles
1190 if raceenabled {
1191 racewriterangepc(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0), getcallerpc(), abi.FuncPCABIInternal(BlockProfile))
1192 }
1193 if msanenabled {
1194 msanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
1195 }
1196 if asanenabled {
1197 asanwrite(unsafe.Pointer(&dst.Stack0[0]), unsafe.Sizeof(dst.Stack0))
1198 }
1199
1200
1201
1202
1203 i := copy(dst.Stack0[:], src.Stack)
1204 clear(dst.Stack0[i:])
1205 }
1206
1207
1208 func pprof_blockProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool) {
1209 return blockProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
1210 p[0] = r
1211 p = p[1:]
1212 })
1213 }
1214
1215
1216
1217
1218
1219
1220
1221 func MutexProfile(p []BlockProfileRecord) (n int, ok bool) {
1222 var m int
1223 n, ok = mutexProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
1224 copyBlockProfileRecord(&p[m], r)
1225 m++
1226 })
1227 if ok {
1228 expandFrames(p[:n])
1229 }
1230 return
1231 }
1232
1233
1234
1235
1236 func mutexProfileInternal(size int, copyFn func(profilerecord.BlockProfileRecord)) (n int, ok bool) {
1237 lock(&profBlockLock)
1238 head := (*bucket)(xbuckets.Load())
1239 for b := head; b != nil; b = b.allnext {
1240 n++
1241 }
1242 if n <= size {
1243 ok = true
1244 for b := head; b != nil; b = b.allnext {
1245 bp := b.bp()
1246 r := profilerecord.BlockProfileRecord{
1247 Count: int64(bp.count),
1248 Cycles: bp.cycles,
1249 Stack: b.stk(),
1250 }
1251 copyFn(r)
1252 }
1253 }
1254 unlock(&profBlockLock)
1255 return
1256 }
1257
1258
1259 func pprof_mutexProfileInternal(p []profilerecord.BlockProfileRecord) (n int, ok bool) {
1260 return mutexProfileInternal(len(p), func(r profilerecord.BlockProfileRecord) {
1261 p[0] = r
1262 p = p[1:]
1263 })
1264 }
1265
1266
1267
1268
1269
1270
1271
1272 func ThreadCreateProfile(p []StackRecord) (n int, ok bool) {
1273 return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) {
1274 i := copy(p[0].Stack0[:], r.Stack)
1275 clear(p[0].Stack0[i:])
1276 p = p[1:]
1277 })
1278 }
1279
1280
1281
1282
1283 func threadCreateProfileInternal(size int, copyFn func(profilerecord.StackRecord)) (n int, ok bool) {
1284 first := (*m)(atomic.Loadp(unsafe.Pointer(&allm)))
1285 for mp := first; mp != nil; mp = mp.alllink {
1286 n++
1287 }
1288 if n <= size {
1289 ok = true
1290 for mp := first; mp != nil; mp = mp.alllink {
1291 r := profilerecord.StackRecord{Stack: mp.createstack[:]}
1292 copyFn(r)
1293 }
1294 }
1295 return
1296 }
1297
1298
1299 func pprof_threadCreateInternal(p []profilerecord.StackRecord) (n int, ok bool) {
1300 return threadCreateProfileInternal(len(p), func(r profilerecord.StackRecord) {
1301 p[0] = r
1302 p = p[1:]
1303 })
1304 }
1305
1306
1307 func pprof_goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
1308 return goroutineProfileWithLabels(p, labels)
1309 }
1310
1311
1312 func goroutineProfileWithLabels(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
1313 if labels != nil && len(labels) != len(p) {
1314 labels = nil
1315 }
1316
1317 return goroutineProfileWithLabelsConcurrent(p, labels)
1318 }
1319
1320 var goroutineProfile = struct {
1321 sema uint32
1322 active bool
1323 offset atomic.Int64
1324 records []profilerecord.StackRecord
1325 labels []unsafe.Pointer
1326 }{
1327 sema: 1,
1328 }
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341 type goroutineProfileState uint32
1342
1343 const (
1344 goroutineProfileAbsent goroutineProfileState = iota
1345 goroutineProfileInProgress
1346 goroutineProfileSatisfied
1347 )
1348
1349 type goroutineProfileStateHolder atomic.Uint32
1350
1351 func (p *goroutineProfileStateHolder) Load() goroutineProfileState {
1352 return goroutineProfileState((*atomic.Uint32)(p).Load())
1353 }
1354
1355 func (p *goroutineProfileStateHolder) Store(value goroutineProfileState) {
1356 (*atomic.Uint32)(p).Store(uint32(value))
1357 }
1358
1359 func (p *goroutineProfileStateHolder) CompareAndSwap(old, new goroutineProfileState) bool {
1360 return (*atomic.Uint32)(p).CompareAndSwap(uint32(old), uint32(new))
1361 }
1362
1363 func goroutineProfileWithLabelsConcurrent(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
1364 if len(p) == 0 {
1365
1366
1367
1368
1369 return int(gcount()), false
1370 }
1371
1372 semacquire(&goroutineProfile.sema)
1373
1374 ourg := getg()
1375
1376 pcbuf := makeProfStack()
1377 stw := stopTheWorld(stwGoroutineProfile)
1378
1379
1380
1381
1382
1383
1384
1385 n = int(gcount())
1386 if fingStatus.Load()&fingRunningFinalizer != 0 {
1387 n++
1388 }
1389
1390 if n > len(p) {
1391
1392
1393
1394 startTheWorld(stw)
1395 semrelease(&goroutineProfile.sema)
1396 return n, false
1397 }
1398
1399
1400 sp := getcallersp()
1401 pc := getcallerpc()
1402 systemstack(func() {
1403 saveg(pc, sp, ourg, &p[0], pcbuf)
1404 })
1405 if labels != nil {
1406 labels[0] = ourg.labels
1407 }
1408 ourg.goroutineProfiled.Store(goroutineProfileSatisfied)
1409 goroutineProfile.offset.Store(1)
1410
1411
1412
1413
1414
1415
1416 goroutineProfile.active = true
1417 goroutineProfile.records = p
1418 goroutineProfile.labels = labels
1419
1420
1421
1422 if fing != nil {
1423 fing.goroutineProfiled.Store(goroutineProfileSatisfied)
1424 if readgstatus(fing) != _Gdead && !isSystemGoroutine(fing, false) {
1425 doRecordGoroutineProfile(fing, pcbuf)
1426 }
1427 }
1428 startTheWorld(stw)
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441 forEachGRace(func(gp1 *g) {
1442 tryRecordGoroutineProfile(gp1, pcbuf, Gosched)
1443 })
1444
1445 stw = stopTheWorld(stwGoroutineProfileCleanup)
1446 endOffset := goroutineProfile.offset.Swap(0)
1447 goroutineProfile.active = false
1448 goroutineProfile.records = nil
1449 goroutineProfile.labels = nil
1450 startTheWorld(stw)
1451
1452
1453
1454 forEachGRace(func(gp1 *g) {
1455 gp1.goroutineProfiled.Store(goroutineProfileAbsent)
1456 })
1457
1458 if raceenabled {
1459 raceacquire(unsafe.Pointer(&labelSync))
1460 }
1461
1462 if n != int(endOffset) {
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472 }
1473
1474 semrelease(&goroutineProfile.sema)
1475 return n, true
1476 }
1477
1478
1479
1480
1481
1482 func tryRecordGoroutineProfileWB(gp1 *g) {
1483 if getg().m.p.ptr() == nil {
1484 throw("no P available, write barriers are forbidden")
1485 }
1486 tryRecordGoroutineProfile(gp1, nil, osyield)
1487 }
1488
1489
1490
1491
1492 func tryRecordGoroutineProfile(gp1 *g, pcbuf []uintptr, yield func()) {
1493 if readgstatus(gp1) == _Gdead {
1494
1495
1496
1497
1498 return
1499 }
1500 if isSystemGoroutine(gp1, true) {
1501
1502
1503 return
1504 }
1505
1506 for {
1507 prev := gp1.goroutineProfiled.Load()
1508 if prev == goroutineProfileSatisfied {
1509
1510
1511 break
1512 }
1513 if prev == goroutineProfileInProgress {
1514
1515
1516 yield()
1517 continue
1518 }
1519
1520
1521
1522
1523
1524
1525 mp := acquirem()
1526 if gp1.goroutineProfiled.CompareAndSwap(goroutineProfileAbsent, goroutineProfileInProgress) {
1527 doRecordGoroutineProfile(gp1, pcbuf)
1528 gp1.goroutineProfiled.Store(goroutineProfileSatisfied)
1529 }
1530 releasem(mp)
1531 }
1532 }
1533
1534
1535
1536
1537
1538
1539
1540
1541 func doRecordGoroutineProfile(gp1 *g, pcbuf []uintptr) {
1542 if readgstatus(gp1) == _Grunning {
1543 print("doRecordGoroutineProfile gp1=", gp1.goid, "\n")
1544 throw("cannot read stack of running goroutine")
1545 }
1546
1547 offset := int(goroutineProfile.offset.Add(1)) - 1
1548
1549 if offset >= len(goroutineProfile.records) {
1550
1551
1552
1553 return
1554 }
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564 systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &goroutineProfile.records[offset], pcbuf) })
1565
1566 if goroutineProfile.labels != nil {
1567 goroutineProfile.labels[offset] = gp1.labels
1568 }
1569 }
1570
1571 func goroutineProfileWithLabelsSync(p []profilerecord.StackRecord, labels []unsafe.Pointer) (n int, ok bool) {
1572 gp := getg()
1573
1574 isOK := func(gp1 *g) bool {
1575
1576
1577 return gp1 != gp && readgstatus(gp1) != _Gdead && !isSystemGoroutine(gp1, false)
1578 }
1579
1580 pcbuf := makeProfStack()
1581 stw := stopTheWorld(stwGoroutineProfile)
1582
1583
1584 n = 1
1585 forEachGRace(func(gp1 *g) {
1586 if isOK(gp1) {
1587 n++
1588 }
1589 })
1590
1591 if n <= len(p) {
1592 ok = true
1593 r, lbl := p, labels
1594
1595
1596 sp := getcallersp()
1597 pc := getcallerpc()
1598 systemstack(func() {
1599 saveg(pc, sp, gp, &r[0], pcbuf)
1600 })
1601 r = r[1:]
1602
1603
1604 if labels != nil {
1605 lbl[0] = gp.labels
1606 lbl = lbl[1:]
1607 }
1608
1609
1610 forEachGRace(func(gp1 *g) {
1611 if !isOK(gp1) {
1612 return
1613 }
1614
1615 if len(r) == 0 {
1616
1617
1618 return
1619 }
1620
1621
1622
1623
1624 systemstack(func() { saveg(^uintptr(0), ^uintptr(0), gp1, &r[0], pcbuf) })
1625 if labels != nil {
1626 lbl[0] = gp1.labels
1627 lbl = lbl[1:]
1628 }
1629 r = r[1:]
1630 })
1631 }
1632
1633 if raceenabled {
1634 raceacquire(unsafe.Pointer(&labelSync))
1635 }
1636
1637 startTheWorld(stw)
1638 return n, ok
1639 }
1640
1641
1642
1643
1644
1645
1646
1647 func GoroutineProfile(p []StackRecord) (n int, ok bool) {
1648 records := make([]profilerecord.StackRecord, len(p))
1649 n, ok = goroutineProfileInternal(records)
1650 if !ok {
1651 return
1652 }
1653 for i, mr := range records[0:n] {
1654 l := copy(p[i].Stack0[:], mr.Stack)
1655 clear(p[i].Stack0[l:])
1656 }
1657 return
1658 }
1659
1660 func goroutineProfileInternal(p []profilerecord.StackRecord) (n int, ok bool) {
1661 return goroutineProfileWithLabels(p, nil)
1662 }
1663
1664 func saveg(pc, sp uintptr, gp *g, r *profilerecord.StackRecord, pcbuf []uintptr) {
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675 if pcbuf == nil {
1676 pcbuf = makeProfStack()
1677 }
1678
1679 var u unwinder
1680 u.initAt(pc, sp, 0, gp, unwindSilentErrors)
1681 n := tracebackPCs(&u, 0, pcbuf)
1682 r.Stack = make([]uintptr, n)
1683 copy(r.Stack, pcbuf)
1684 }
1685
1686
1687
1688
1689
1690 func Stack(buf []byte, all bool) int {
1691 var stw worldStop
1692 if all {
1693 stw = stopTheWorld(stwAllGoroutinesStack)
1694 }
1695
1696 n := 0
1697 if len(buf) > 0 {
1698 gp := getg()
1699 sp := getcallersp()
1700 pc := getcallerpc()
1701 systemstack(func() {
1702 g0 := getg()
1703
1704
1705
1706 g0.m.traceback = 1
1707 g0.writebuf = buf[0:0:len(buf)]
1708 goroutineheader(gp)
1709 traceback(pc, sp, 0, gp)
1710 if all {
1711 tracebackothers(gp)
1712 }
1713 g0.m.traceback = 0
1714 n = len(g0.writebuf)
1715 g0.writebuf = nil
1716 })
1717 }
1718
1719 if all {
1720 startTheWorld(stw)
1721 }
1722 return n
1723 }
1724
View as plain text