Source file
src/runtime/export_test.go
Documentation: runtime
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "internal/goarch"
12 "internal/goos"
13 "internal/runtime/atomic"
14 "runtime/internal/sys"
15 "unsafe"
16 )
17
18 var Fadd64 = fadd64
19 var Fsub64 = fsub64
20 var Fmul64 = fmul64
21 var Fdiv64 = fdiv64
22 var F64to32 = f64to32
23 var F32to64 = f32to64
24 var Fcmp64 = fcmp64
25 var Fintto64 = fintto64
26 var F64toint = f64toint
27
28 var Entersyscall = entersyscall
29 var Exitsyscall = exitsyscall
30 var LockedOSThread = lockedOSThread
31 var Xadduintptr = atomic.Xadduintptr
32
33 var ReadRandomFailed = &readRandomFailed
34
35 var Fastlog2 = fastlog2
36
37 var Atoi = atoi
38 var Atoi32 = atoi32
39 var ParseByteCount = parseByteCount
40
41 var Nanotime = nanotime
42 var NetpollBreak = netpollBreak
43 var Usleep = usleep
44
45 var PhysPageSize = physPageSize
46 var PhysHugePageSize = physHugePageSize
47
48 var NetpollGenericInit = netpollGenericInit
49
50 var Memmove = memmove
51 var MemclrNoHeapPointers = memclrNoHeapPointers
52
53 var CgoCheckPointer = cgoCheckPointer
54
55 const CrashStackImplemented = crashStackImplemented
56
57 const TracebackInnerFrames = tracebackInnerFrames
58 const TracebackOuterFrames = tracebackOuterFrames
59
60 var MapKeys = keys
61 var MapValues = values
62
63 var LockPartialOrder = lockPartialOrder
64
65 type TimeTimer = timeTimer
66
67 type LockRank lockRank
68
69 func (l LockRank) String() string {
70 return lockRank(l).String()
71 }
72
73 const PreemptMSupported = preemptMSupported
74
75 type LFNode struct {
76 Next uint64
77 Pushcnt uintptr
78 }
79
80 func LFStackPush(head *uint64, node *LFNode) {
81 (*lfstack)(head).push((*lfnode)(unsafe.Pointer(node)))
82 }
83
84 func LFStackPop(head *uint64) *LFNode {
85 return (*LFNode)((*lfstack)(head).pop())
86 }
87 func LFNodeValidate(node *LFNode) {
88 lfnodeValidate((*lfnode)(unsafe.Pointer(node)))
89 }
90
91 func Netpoll(delta int64) {
92 systemstack(func() {
93 netpoll(delta)
94 })
95 }
96
97 func GCMask(x any) (ret []byte) {
98 systemstack(func() {
99 ret = getgcmask(x)
100 })
101 return
102 }
103
104 func RunSchedLocalQueueTest() {
105 pp := new(p)
106 gs := make([]g, len(pp.runq))
107 Escape(gs)
108 for i := 0; i < len(pp.runq); i++ {
109 if g, _ := runqget(pp); g != nil {
110 throw("runq is not empty initially")
111 }
112 for j := 0; j < i; j++ {
113 runqput(pp, &gs[i], false)
114 }
115 for j := 0; j < i; j++ {
116 if g, _ := runqget(pp); g != &gs[i] {
117 print("bad element at iter ", i, "/", j, "\n")
118 throw("bad element")
119 }
120 }
121 if g, _ := runqget(pp); g != nil {
122 throw("runq is not empty afterwards")
123 }
124 }
125 }
126
127 func RunSchedLocalQueueStealTest() {
128 p1 := new(p)
129 p2 := new(p)
130 gs := make([]g, len(p1.runq))
131 Escape(gs)
132 for i := 0; i < len(p1.runq); i++ {
133 for j := 0; j < i; j++ {
134 gs[j].sig = 0
135 runqput(p1, &gs[j], false)
136 }
137 gp := runqsteal(p2, p1, true)
138 s := 0
139 if gp != nil {
140 s++
141 gp.sig++
142 }
143 for {
144 gp, _ = runqget(p2)
145 if gp == nil {
146 break
147 }
148 s++
149 gp.sig++
150 }
151 for {
152 gp, _ = runqget(p1)
153 if gp == nil {
154 break
155 }
156 gp.sig++
157 }
158 for j := 0; j < i; j++ {
159 if gs[j].sig != 1 {
160 print("bad element ", j, "(", gs[j].sig, ") at iter ", i, "\n")
161 throw("bad element")
162 }
163 }
164 if s != i/2 && s != i/2+1 {
165 print("bad steal ", s, ", want ", i/2, " or ", i/2+1, ", iter ", i, "\n")
166 throw("bad steal")
167 }
168 }
169 }
170
171 func RunSchedLocalQueueEmptyTest(iters int) {
172
173
174
175
176 done := make(chan bool, 1)
177 p := new(p)
178 gs := make([]g, 2)
179 Escape(gs)
180 ready := new(uint32)
181 for i := 0; i < iters; i++ {
182 *ready = 0
183 next0 := (i & 1) == 0
184 next1 := (i & 2) == 0
185 runqput(p, &gs[0], next0)
186 go func() {
187 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
188 }
189 if runqempty(p) {
190 println("next:", next0, next1)
191 throw("queue is empty")
192 }
193 done <- true
194 }()
195 for atomic.Xadd(ready, 1); atomic.Load(ready) != 2; {
196 }
197 runqput(p, &gs[1], next1)
198 runqget(p)
199 <-done
200 runqget(p)
201 }
202 }
203
204 var (
205 StringHash = stringHash
206 BytesHash = bytesHash
207 Int32Hash = int32Hash
208 Int64Hash = int64Hash
209 MemHash = memhash
210 MemHash32 = memhash32
211 MemHash64 = memhash64
212 EfaceHash = efaceHash
213 IfaceHash = ifaceHash
214 )
215
216 var UseAeshash = &useAeshash
217
218 func MemclrBytes(b []byte) {
219 s := (*slice)(unsafe.Pointer(&b))
220 memclrNoHeapPointers(s.array, uintptr(s.len))
221 }
222
223 const HashLoad = hashLoad
224
225
226 func GostringW(w []uint16) (s string) {
227 systemstack(func() {
228 s = gostringw(&w[0])
229 })
230 return
231 }
232
233 var Open = open
234 var Close = closefd
235 var Read = read
236 var Write = write
237
238 func Envs() []string { return envs }
239 func SetEnvs(e []string) { envs = e }
240
241 const PtrSize = goarch.PtrSize
242
243 var ForceGCPeriod = &forcegcperiod
244
245
246
247
248 func SetTracebackEnv(level string) {
249 setTraceback(level)
250 traceback_env = traceback_cache
251 }
252
253 var ReadUnaligned32 = readUnaligned32
254 var ReadUnaligned64 = readUnaligned64
255
256 func CountPagesInUse() (pagesInUse, counted uintptr) {
257 stw := stopTheWorld(stwForTestCountPagesInUse)
258
259 pagesInUse = mheap_.pagesInUse.Load()
260
261 for _, s := range mheap_.allspans {
262 if s.state.get() == mSpanInUse {
263 counted += s.npages
264 }
265 }
266
267 startTheWorld(stw)
268
269 return
270 }
271
272 func Fastrand() uint32 { return uint32(rand()) }
273 func Fastrand64() uint64 { return rand() }
274 func Fastrandn(n uint32) uint32 { return randn(n) }
275
276 type ProfBuf profBuf
277
278 func NewProfBuf(hdrsize, bufwords, tags int) *ProfBuf {
279 return (*ProfBuf)(newProfBuf(hdrsize, bufwords, tags))
280 }
281
282 func (p *ProfBuf) Write(tag *unsafe.Pointer, now int64, hdr []uint64, stk []uintptr) {
283 (*profBuf)(p).write(tag, now, hdr, stk)
284 }
285
286 const (
287 ProfBufBlocking = profBufBlocking
288 ProfBufNonBlocking = profBufNonBlocking
289 )
290
291 func (p *ProfBuf) Read(mode profBufReadMode) ([]uint64, []unsafe.Pointer, bool) {
292 return (*profBuf)(p).read(mode)
293 }
294
295 func (p *ProfBuf) Close() {
296 (*profBuf)(p).close()
297 }
298
299 type CPUStats = cpuStats
300
301 func ReadCPUStats() CPUStats {
302 return work.cpuStats
303 }
304
305 func ReadMetricsSlow(memStats *MemStats, samplesp unsafe.Pointer, len, cap int) {
306 stw := stopTheWorld(stwForTestReadMetricsSlow)
307
308
309
310 metricsLock()
311 initMetrics()
312
313 systemstack(func() {
314
315
316 getg().racectx = getg().m.curg.racectx
317
318
319
320
321
322
323 readMetricsLocked(samplesp, len, cap)
324
325
326
327
328
329 readmemstats_m(memStats)
330
331
332
333
334 readMetricsLocked(samplesp, len, cap)
335
336
337 getg().racectx = 0
338 })
339 metricsUnlock()
340
341 startTheWorld(stw)
342 }
343
344 var DoubleCheckReadMemStats = &doubleCheckReadMemStats
345
346
347
348 func ReadMemStatsSlow() (base, slow MemStats) {
349 stw := stopTheWorld(stwForTestReadMemStatsSlow)
350
351
352 systemstack(func() {
353
354 getg().m.mallocing++
355
356 readmemstats_m(&base)
357
358
359
360 slow = base
361 slow.Alloc = 0
362 slow.TotalAlloc = 0
363 slow.Mallocs = 0
364 slow.Frees = 0
365 slow.HeapReleased = 0
366 var bySize [_NumSizeClasses]struct {
367 Mallocs, Frees uint64
368 }
369
370
371 for _, s := range mheap_.allspans {
372 if s.state.get() != mSpanInUse {
373 continue
374 }
375 if s.isUnusedUserArenaChunk() {
376 continue
377 }
378 if sizeclass := s.spanclass.sizeclass(); sizeclass == 0 {
379 slow.Mallocs++
380 slow.Alloc += uint64(s.elemsize)
381 } else {
382 slow.Mallocs += uint64(s.allocCount)
383 slow.Alloc += uint64(s.allocCount) * uint64(s.elemsize)
384 bySize[sizeclass].Mallocs += uint64(s.allocCount)
385 }
386 }
387
388
389 var m heapStatsDelta
390 memstats.heapStats.unsafeRead(&m)
391
392
393 var smallFree uint64
394 for i := 0; i < _NumSizeClasses; i++ {
395 slow.Frees += m.smallFreeCount[i]
396 bySize[i].Frees += m.smallFreeCount[i]
397 bySize[i].Mallocs += m.smallFreeCount[i]
398 smallFree += m.smallFreeCount[i] * uint64(class_to_size[i])
399 }
400 slow.Frees += m.tinyAllocCount + m.largeFreeCount
401 slow.Mallocs += slow.Frees
402
403 slow.TotalAlloc = slow.Alloc + m.largeFree + smallFree
404
405 for i := range slow.BySize {
406 slow.BySize[i].Mallocs = bySize[i].Mallocs
407 slow.BySize[i].Frees = bySize[i].Frees
408 }
409
410 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
411 chunk := mheap_.pages.tryChunkOf(i)
412 if chunk == nil {
413 continue
414 }
415 pg := chunk.scavenged.popcntRange(0, pallocChunkPages)
416 slow.HeapReleased += uint64(pg) * pageSize
417 }
418 for _, p := range allp {
419 pg := sys.OnesCount64(p.pcache.scav)
420 slow.HeapReleased += uint64(pg) * pageSize
421 }
422
423 getg().m.mallocing--
424 })
425
426 startTheWorld(stw)
427 return
428 }
429
430
431
432
433 func ShrinkStackAndVerifyFramePointers() {
434 before := stackPoisonCopy
435 defer func() { stackPoisonCopy = before }()
436 stackPoisonCopy = 1
437
438 gp := getg()
439 systemstack(func() {
440 shrinkstack(gp)
441 })
442
443
444 FPCallers(make([]uintptr, 1024))
445 }
446
447
448
449
450 func BlockOnSystemStack() {
451 systemstack(blockOnSystemStackInternal)
452 }
453
454 func blockOnSystemStackInternal() {
455 print("x\n")
456 lock(&deadlock)
457 lock(&deadlock)
458 }
459
460 type RWMutex struct {
461 rw rwmutex
462 }
463
464 func (rw *RWMutex) Init() {
465 rw.rw.init(lockRankTestR, lockRankTestRInternal, lockRankTestW)
466 }
467
468 func (rw *RWMutex) RLock() {
469 rw.rw.rlock()
470 }
471
472 func (rw *RWMutex) RUnlock() {
473 rw.rw.runlock()
474 }
475
476 func (rw *RWMutex) Lock() {
477 rw.rw.lock()
478 }
479
480 func (rw *RWMutex) Unlock() {
481 rw.rw.unlock()
482 }
483
484 const RuntimeHmapSize = unsafe.Sizeof(hmap{})
485
486 func MapBucketsCount(m map[int]int) int {
487 h := *(**hmap)(unsafe.Pointer(&m))
488 return 1 << h.B
489 }
490
491 func MapBucketsPointerIsNil(m map[int]int) bool {
492 h := *(**hmap)(unsafe.Pointer(&m))
493 return h.buckets == nil
494 }
495
496 func OverLoadFactor(count int, B uint8) bool {
497 return overLoadFactor(count, B)
498 }
499
500 func LockOSCounts() (external, internal uint32) {
501 gp := getg()
502 if gp.m.lockedExt+gp.m.lockedInt == 0 {
503 if gp.lockedm != 0 {
504 panic("lockedm on non-locked goroutine")
505 }
506 } else {
507 if gp.lockedm == 0 {
508 panic("nil lockedm on locked goroutine")
509 }
510 }
511 return gp.m.lockedExt, gp.m.lockedInt
512 }
513
514
515 func TracebackSystemstack(stk []uintptr, i int) int {
516 if i == 0 {
517 pc, sp := getcallerpc(), getcallersp()
518 var u unwinder
519 u.initAt(pc, sp, 0, getg(), unwindJumpStack)
520 return tracebackPCs(&u, 0, stk)
521 }
522 n := 0
523 systemstack(func() {
524 n = TracebackSystemstack(stk, i-1)
525 })
526 return n
527 }
528
529 func KeepNArenaHints(n int) {
530 hint := mheap_.arenaHints
531 for i := 1; i < n; i++ {
532 hint = hint.next
533 if hint == nil {
534 return
535 }
536 }
537 hint.next = nil
538 }
539
540
541
542
543
544
545
546 func MapNextArenaHint() (start, end uintptr, ok bool) {
547 hint := mheap_.arenaHints
548 addr := hint.addr
549 if hint.down {
550 start, end = addr-heapArenaBytes, addr
551 addr -= physPageSize
552 } else {
553 start, end = addr, addr+heapArenaBytes
554 }
555 got := sysReserve(unsafe.Pointer(addr), physPageSize)
556 ok = (addr == uintptr(got))
557 if !ok {
558
559
560 sysFreeOS(got, physPageSize)
561 }
562 return
563 }
564
565 func GetNextArenaHint() uintptr {
566 return mheap_.arenaHints.addr
567 }
568
569 type G = g
570
571 type Sudog = sudog
572
573 func Getg() *G {
574 return getg()
575 }
576
577 func Goid() uint64 {
578 return getg().goid
579 }
580
581 func GIsWaitingOnMutex(gp *G) bool {
582 return readgstatus(gp) == _Gwaiting && gp.waitreason.isMutexWait()
583 }
584
585 var CasGStatusAlwaysTrack = &casgstatusAlwaysTrack
586
587
588 func PanicForTesting(b []byte, i int) byte {
589 return unexportedPanicForTesting(b, i)
590 }
591
592
593 func unexportedPanicForTesting(b []byte, i int) byte {
594 return b[i]
595 }
596
597 func G0StackOverflow() {
598 systemstack(func() {
599 g0 := getg()
600 sp := getcallersp()
601
602
603
604 g0.stack.lo = sp - 4096 - stackSystem
605 g0.stackguard0 = g0.stack.lo + stackGuard
606 g0.stackguard1 = g0.stackguard0
607
608 stackOverflow(nil)
609 })
610 }
611
612 func stackOverflow(x *byte) {
613 var buf [256]byte
614 stackOverflow(&buf[0])
615 }
616
617 func MapTombstoneCheck(m map[int]int) {
618
619
620
621 h := *(**hmap)(unsafe.Pointer(&m))
622 i := any(m)
623 t := *(**maptype)(unsafe.Pointer(&i))
624
625 for x := 0; x < 1<<h.B; x++ {
626 b0 := (*bmap)(add(h.buckets, uintptr(x)*uintptr(t.BucketSize)))
627 n := 0
628 for b := b0; b != nil; b = b.overflow(t) {
629 for i := 0; i < abi.MapBucketCount; i++ {
630 if b.tophash[i] != emptyRest {
631 n++
632 }
633 }
634 }
635 k := 0
636 for b := b0; b != nil; b = b.overflow(t) {
637 for i := 0; i < abi.MapBucketCount; i++ {
638 if k < n && b.tophash[i] == emptyRest {
639 panic("early emptyRest")
640 }
641 if k >= n && b.tophash[i] != emptyRest {
642 panic("late non-emptyRest")
643 }
644 if k == n-1 && b.tophash[i] == emptyOne {
645 panic("last non-emptyRest entry is emptyOne")
646 }
647 k++
648 }
649 }
650 }
651 }
652
653 func RunGetgThreadSwitchTest() {
654
655
656
657
658
659
660 ch := make(chan int)
661 go func(ch chan int) {
662 ch <- 5
663 LockOSThread()
664 }(ch)
665
666 g1 := getg()
667
668
669
670
671
672 <-ch
673
674 g2 := getg()
675 if g1 != g2 {
676 panic("g1 != g2")
677 }
678
679
680
681 g3 := getg()
682 if g1 != g3 {
683 panic("g1 != g3")
684 }
685 }
686
687 const (
688 PageSize = pageSize
689 PallocChunkPages = pallocChunkPages
690 PageAlloc64Bit = pageAlloc64Bit
691 PallocSumBytes = pallocSumBytes
692 )
693
694
695 type PallocSum pallocSum
696
697 func PackPallocSum(start, max, end uint) PallocSum { return PallocSum(packPallocSum(start, max, end)) }
698 func (m PallocSum) Start() uint { return pallocSum(m).start() }
699 func (m PallocSum) Max() uint { return pallocSum(m).max() }
700 func (m PallocSum) End() uint { return pallocSum(m).end() }
701
702
703 type PallocBits pallocBits
704
705 func (b *PallocBits) Find(npages uintptr, searchIdx uint) (uint, uint) {
706 return (*pallocBits)(b).find(npages, searchIdx)
707 }
708 func (b *PallocBits) AllocRange(i, n uint) { (*pallocBits)(b).allocRange(i, n) }
709 func (b *PallocBits) Free(i, n uint) { (*pallocBits)(b).free(i, n) }
710 func (b *PallocBits) Summarize() PallocSum { return PallocSum((*pallocBits)(b).summarize()) }
711 func (b *PallocBits) PopcntRange(i, n uint) uint { return (*pageBits)(b).popcntRange(i, n) }
712
713
714
715 func SummarizeSlow(b *PallocBits) PallocSum {
716 var start, most, end uint
717
718 const N = uint(len(b)) * 64
719 for start < N && (*pageBits)(b).get(start) == 0 {
720 start++
721 }
722 for end < N && (*pageBits)(b).get(N-end-1) == 0 {
723 end++
724 }
725 run := uint(0)
726 for i := uint(0); i < N; i++ {
727 if (*pageBits)(b).get(i) == 0 {
728 run++
729 } else {
730 run = 0
731 }
732 most = max(most, run)
733 }
734 return PackPallocSum(start, most, end)
735 }
736
737
738 func FindBitRange64(c uint64, n uint) uint { return findBitRange64(c, n) }
739
740
741
742 func DiffPallocBits(a, b *PallocBits) []BitRange {
743 ba := (*pageBits)(a)
744 bb := (*pageBits)(b)
745
746 var d []BitRange
747 base, size := uint(0), uint(0)
748 for i := uint(0); i < uint(len(ba))*64; i++ {
749 if ba.get(i) != bb.get(i) {
750 if size == 0 {
751 base = i
752 }
753 size++
754 } else {
755 if size != 0 {
756 d = append(d, BitRange{base, size})
757 }
758 size = 0
759 }
760 }
761 if size != 0 {
762 d = append(d, BitRange{base, size})
763 }
764 return d
765 }
766
767
768
769
770 func StringifyPallocBits(b *PallocBits, r BitRange) string {
771 str := ""
772 for j := r.I; j < r.I+r.N; j++ {
773 if (*pageBits)(b).get(j) != 0 {
774 str += "1"
775 } else {
776 str += "0"
777 }
778 }
779 return str
780 }
781
782
783 type PallocData pallocData
784
785 func (d *PallocData) FindScavengeCandidate(searchIdx uint, min, max uintptr) (uint, uint) {
786 return (*pallocData)(d).findScavengeCandidate(searchIdx, min, max)
787 }
788 func (d *PallocData) AllocRange(i, n uint) { (*pallocData)(d).allocRange(i, n) }
789 func (d *PallocData) ScavengedSetRange(i, n uint) {
790 (*pallocData)(d).scavenged.setRange(i, n)
791 }
792 func (d *PallocData) PallocBits() *PallocBits {
793 return (*PallocBits)(&(*pallocData)(d).pallocBits)
794 }
795 func (d *PallocData) Scavenged() *PallocBits {
796 return (*PallocBits)(&(*pallocData)(d).scavenged)
797 }
798
799
800 func FillAligned(x uint64, m uint) uint64 { return fillAligned(x, m) }
801
802
803 type PageCache pageCache
804
805 const PageCachePages = pageCachePages
806
807 func NewPageCache(base uintptr, cache, scav uint64) PageCache {
808 return PageCache(pageCache{base: base, cache: cache, scav: scav})
809 }
810 func (c *PageCache) Empty() bool { return (*pageCache)(c).empty() }
811 func (c *PageCache) Base() uintptr { return (*pageCache)(c).base }
812 func (c *PageCache) Cache() uint64 { return (*pageCache)(c).cache }
813 func (c *PageCache) Scav() uint64 { return (*pageCache)(c).scav }
814 func (c *PageCache) Alloc(npages uintptr) (uintptr, uintptr) {
815 return (*pageCache)(c).alloc(npages)
816 }
817 func (c *PageCache) Flush(s *PageAlloc) {
818 cp := (*pageCache)(c)
819 sp := (*pageAlloc)(s)
820
821 systemstack(func() {
822
823
824 lock(sp.mheapLock)
825 cp.flush(sp)
826 unlock(sp.mheapLock)
827 })
828 }
829
830
831 type ChunkIdx chunkIdx
832
833
834
835 type PageAlloc pageAlloc
836
837 func (p *PageAlloc) Alloc(npages uintptr) (uintptr, uintptr) {
838 pp := (*pageAlloc)(p)
839
840 var addr, scav uintptr
841 systemstack(func() {
842
843
844 lock(pp.mheapLock)
845 addr, scav = pp.alloc(npages)
846 unlock(pp.mheapLock)
847 })
848 return addr, scav
849 }
850 func (p *PageAlloc) AllocToCache() PageCache {
851 pp := (*pageAlloc)(p)
852
853 var c PageCache
854 systemstack(func() {
855
856
857 lock(pp.mheapLock)
858 c = PageCache(pp.allocToCache())
859 unlock(pp.mheapLock)
860 })
861 return c
862 }
863 func (p *PageAlloc) Free(base, npages uintptr) {
864 pp := (*pageAlloc)(p)
865
866 systemstack(func() {
867
868
869 lock(pp.mheapLock)
870 pp.free(base, npages)
871 unlock(pp.mheapLock)
872 })
873 }
874 func (p *PageAlloc) Bounds() (ChunkIdx, ChunkIdx) {
875 return ChunkIdx((*pageAlloc)(p).start), ChunkIdx((*pageAlloc)(p).end)
876 }
877 func (p *PageAlloc) Scavenge(nbytes uintptr) (r uintptr) {
878 pp := (*pageAlloc)(p)
879 systemstack(func() {
880 r = pp.scavenge(nbytes, nil, true)
881 })
882 return
883 }
884 func (p *PageAlloc) InUse() []AddrRange {
885 ranges := make([]AddrRange, 0, len(p.inUse.ranges))
886 for _, r := range p.inUse.ranges {
887 ranges = append(ranges, AddrRange{r})
888 }
889 return ranges
890 }
891
892
893 func (p *PageAlloc) PallocData(i ChunkIdx) *PallocData {
894 ci := chunkIdx(i)
895 return (*PallocData)((*pageAlloc)(p).tryChunkOf(ci))
896 }
897
898
899 type AddrRange struct {
900 addrRange
901 }
902
903
904 func MakeAddrRange(base, limit uintptr) AddrRange {
905 return AddrRange{makeAddrRange(base, limit)}
906 }
907
908
909 func (a AddrRange) Base() uintptr {
910 return a.addrRange.base.addr()
911 }
912
913
914 func (a AddrRange) Limit() uintptr {
915 return a.addrRange.limit.addr()
916 }
917
918
919 func (a AddrRange) Equals(b AddrRange) bool {
920 return a == b
921 }
922
923
924 func (a AddrRange) Size() uintptr {
925 return a.addrRange.size()
926 }
927
928
929
930
931
932 var testSysStat = &memstats.other_sys
933
934
935 type AddrRanges struct {
936 addrRanges
937 mutable bool
938 }
939
940
941
942
943
944
945
946
947
948
949 func NewAddrRanges() AddrRanges {
950 r := addrRanges{}
951 r.init(testSysStat)
952 return AddrRanges{r, true}
953 }
954
955
956
957
958
959
960 func MakeAddrRanges(a ...AddrRange) AddrRanges {
961
962
963
964
965
966 ranges := make([]addrRange, 0, len(a))
967 total := uintptr(0)
968 for _, r := range a {
969 ranges = append(ranges, r.addrRange)
970 total += r.Size()
971 }
972 return AddrRanges{addrRanges{
973 ranges: ranges,
974 totalBytes: total,
975 sysStat: testSysStat,
976 }, false}
977 }
978
979
980
981 func (a *AddrRanges) Ranges() []AddrRange {
982 result := make([]AddrRange, 0, len(a.addrRanges.ranges))
983 for _, r := range a.addrRanges.ranges {
984 result = append(result, AddrRange{r})
985 }
986 return result
987 }
988
989
990
991 func (a *AddrRanges) FindSucc(base uintptr) int {
992 return a.findSucc(base)
993 }
994
995
996
997
998
999 func (a *AddrRanges) Add(r AddrRange) {
1000 if !a.mutable {
1001 throw("attempt to mutate immutable AddrRanges")
1002 }
1003 a.add(r.addrRange)
1004 }
1005
1006
1007 func (a *AddrRanges) TotalBytes() uintptr {
1008 return a.addrRanges.totalBytes
1009 }
1010
1011
1012 type BitRange struct {
1013 I, N uint
1014 }
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030 func NewPageAlloc(chunks, scav map[ChunkIdx][]BitRange) *PageAlloc {
1031 p := new(pageAlloc)
1032
1033
1034 p.init(new(mutex), testSysStat, true)
1035 lockInit(p.mheapLock, lockRankMheap)
1036 for i, init := range chunks {
1037 addr := chunkBase(chunkIdx(i))
1038
1039
1040 systemstack(func() {
1041 lock(p.mheapLock)
1042 p.grow(addr, pallocChunkBytes)
1043 unlock(p.mheapLock)
1044 })
1045
1046
1047 ci := chunkIndex(addr)
1048 chunk := p.chunkOf(ci)
1049
1050
1051 chunk.scavenged.clearRange(0, pallocChunkPages)
1052
1053
1054
1055
1056 p.scav.index.alloc(ci, pallocChunkPages)
1057 p.scav.index.free(ci, 0, pallocChunkPages)
1058
1059
1060 if scav != nil {
1061 if scvg, ok := scav[i]; ok {
1062 for _, s := range scvg {
1063
1064
1065 if s.N != 0 {
1066 chunk.scavenged.setRange(s.I, s.N)
1067 }
1068 }
1069 }
1070 }
1071
1072
1073 for _, s := range init {
1074
1075
1076 if s.N != 0 {
1077 chunk.allocRange(s.I, s.N)
1078
1079
1080 p.scav.index.alloc(ci, s.N)
1081 }
1082 }
1083
1084
1085 systemstack(func() {
1086 lock(p.mheapLock)
1087 p.update(addr, pallocChunkPages, false, false)
1088 unlock(p.mheapLock)
1089 })
1090 }
1091
1092 return (*PageAlloc)(p)
1093 }
1094
1095
1096
1097
1098 func FreePageAlloc(pp *PageAlloc) {
1099 p := (*pageAlloc)(pp)
1100
1101
1102 if pageAlloc64Bit != 0 {
1103 for l := 0; l < summaryLevels; l++ {
1104 sysFreeOS(unsafe.Pointer(&p.summary[l][0]), uintptr(cap(p.summary[l]))*pallocSumBytes)
1105 }
1106 } else {
1107 resSize := uintptr(0)
1108 for _, s := range p.summary {
1109 resSize += uintptr(cap(s)) * pallocSumBytes
1110 }
1111 sysFreeOS(unsafe.Pointer(&p.summary[0][0]), alignUp(resSize, physPageSize))
1112 }
1113
1114
1115 sysFreeOS(unsafe.Pointer(&p.scav.index.chunks[0]), uintptr(cap(p.scav.index.chunks))*unsafe.Sizeof(atomicScavChunkData{}))
1116
1117
1118
1119
1120
1121 gcController.mappedReady.Add(-int64(p.summaryMappedReady))
1122 testSysStat.add(-int64(p.summaryMappedReady))
1123
1124
1125 for i := range p.chunks {
1126 if x := p.chunks[i]; x != nil {
1127 p.chunks[i] = nil
1128
1129 sysFree(unsafe.Pointer(x), unsafe.Sizeof(*p.chunks[0]), testSysStat)
1130 }
1131 }
1132 }
1133
1134
1135
1136
1137
1138
1139
1140 var BaseChunkIdx = func() ChunkIdx {
1141 var prefix uintptr
1142 if pageAlloc64Bit != 0 {
1143 prefix = 0xc000
1144 } else {
1145 prefix = 0x100
1146 }
1147 baseAddr := prefix * pallocChunkBytes
1148 if goos.IsAix != 0 {
1149 baseAddr += arenaBaseOffset
1150 }
1151 return ChunkIdx(chunkIndex(baseAddr))
1152 }()
1153
1154
1155
1156 func PageBase(c ChunkIdx, pageIdx uint) uintptr {
1157 return chunkBase(chunkIdx(c)) + uintptr(pageIdx)*pageSize
1158 }
1159
1160 type BitsMismatch struct {
1161 Base uintptr
1162 Got, Want uint64
1163 }
1164
1165 func CheckScavengedBitsCleared(mismatches []BitsMismatch) (n int, ok bool) {
1166 ok = true
1167
1168
1169 systemstack(func() {
1170 getg().m.mallocing++
1171
1172
1173 lock(&mheap_.lock)
1174 chunkLoop:
1175 for i := mheap_.pages.start; i < mheap_.pages.end; i++ {
1176 chunk := mheap_.pages.tryChunkOf(i)
1177 if chunk == nil {
1178 continue
1179 }
1180 for j := 0; j < pallocChunkPages/64; j++ {
1181
1182
1183
1184
1185
1186 want := chunk.scavenged[j] &^ chunk.pallocBits[j]
1187 got := chunk.scavenged[j]
1188 if want != got {
1189 ok = false
1190 if n >= len(mismatches) {
1191 break chunkLoop
1192 }
1193 mismatches[n] = BitsMismatch{
1194 Base: chunkBase(i) + uintptr(j)*64*pageSize,
1195 Got: got,
1196 Want: want,
1197 }
1198 n++
1199 }
1200 }
1201 }
1202 unlock(&mheap_.lock)
1203
1204 getg().m.mallocing--
1205 })
1206 return
1207 }
1208
1209 func PageCachePagesLeaked() (leaked uintptr) {
1210 stw := stopTheWorld(stwForTestPageCachePagesLeaked)
1211
1212
1213 deadp := allp[len(allp):cap(allp)]
1214 for _, p := range deadp {
1215
1216
1217 if p != nil {
1218 leaked += uintptr(sys.OnesCount64(p.pcache.cache))
1219 }
1220 }
1221
1222 startTheWorld(stw)
1223 return
1224 }
1225
1226 type Mutex = mutex
1227
1228 var Lock = lock
1229 var Unlock = unlock
1230
1231 var MutexContended = mutexContended
1232
1233 func SemRootLock(addr *uint32) *mutex {
1234 root := semtable.rootFor(addr)
1235 return &root.lock
1236 }
1237
1238 var Semacquire = semacquire
1239 var Semrelease1 = semrelease1
1240
1241 func SemNwait(addr *uint32) uint32 {
1242 root := semtable.rootFor(addr)
1243 return root.nwait.Load()
1244 }
1245
1246 const SemTableSize = semTabSize
1247
1248
1249 type SemTable struct {
1250 semTable
1251 }
1252
1253
1254 func (t *SemTable) Enqueue(addr *uint32) {
1255 s := acquireSudog()
1256 s.releasetime = 0
1257 s.acquiretime = 0
1258 s.ticket = 0
1259 t.semTable.rootFor(addr).queue(addr, s, false)
1260 }
1261
1262
1263
1264
1265 func (t *SemTable) Dequeue(addr *uint32) bool {
1266 s, _, _ := t.semTable.rootFor(addr).dequeue(addr)
1267 if s != nil {
1268 releaseSudog(s)
1269 return true
1270 }
1271 return false
1272 }
1273
1274
1275 type MSpan mspan
1276
1277
1278 func AllocMSpan() *MSpan {
1279 var s *mspan
1280 systemstack(func() {
1281 lock(&mheap_.lock)
1282 s = (*mspan)(mheap_.spanalloc.alloc())
1283 unlock(&mheap_.lock)
1284 })
1285 return (*MSpan)(s)
1286 }
1287
1288
1289 func FreeMSpan(s *MSpan) {
1290 systemstack(func() {
1291 lock(&mheap_.lock)
1292 mheap_.spanalloc.free(unsafe.Pointer(s))
1293 unlock(&mheap_.lock)
1294 })
1295 }
1296
1297 func MSpanCountAlloc(ms *MSpan, bits []byte) int {
1298 s := (*mspan)(ms)
1299 s.nelems = uint16(len(bits) * 8)
1300 s.gcmarkBits = (*gcBits)(unsafe.Pointer(&bits[0]))
1301 result := s.countAlloc()
1302 s.gcmarkBits = nil
1303 return result
1304 }
1305
1306 const (
1307 TimeHistSubBucketBits = timeHistSubBucketBits
1308 TimeHistNumSubBuckets = timeHistNumSubBuckets
1309 TimeHistNumBuckets = timeHistNumBuckets
1310 TimeHistMinBucketBits = timeHistMinBucketBits
1311 TimeHistMaxBucketBits = timeHistMaxBucketBits
1312 )
1313
1314 type TimeHistogram timeHistogram
1315
1316
1317
1318
1319
1320 func (th *TimeHistogram) Count(bucket, subBucket int) (uint64, bool) {
1321 t := (*timeHistogram)(th)
1322 if bucket < 0 {
1323 return t.underflow.Load(), false
1324 }
1325 i := bucket*TimeHistNumSubBuckets + subBucket
1326 if i >= len(t.counts) {
1327 return t.overflow.Load(), false
1328 }
1329 return t.counts[i].Load(), true
1330 }
1331
1332 func (th *TimeHistogram) Record(duration int64) {
1333 (*timeHistogram)(th).record(duration)
1334 }
1335
1336 var TimeHistogramMetricsBuckets = timeHistogramMetricsBuckets
1337
1338 func SetIntArgRegs(a int) int {
1339 lock(&finlock)
1340 old := intArgRegs
1341 if a >= 0 {
1342 intArgRegs = a
1343 }
1344 unlock(&finlock)
1345 return old
1346 }
1347
1348 func FinalizerGAsleep() bool {
1349 return fingStatus.Load()&fingWait != 0
1350 }
1351
1352
1353
1354
1355 var GCTestMoveStackOnNextCall = gcTestMoveStackOnNextCall
1356
1357
1358
1359 func GCTestIsReachable(ptrs ...unsafe.Pointer) (mask uint64) {
1360 return gcTestIsReachable(ptrs...)
1361 }
1362
1363
1364
1365
1366
1367
1368
1369 func GCTestPointerClass(p unsafe.Pointer) string {
1370 return gcTestPointerClass(p)
1371 }
1372
1373 const Raceenabled = raceenabled
1374
1375 const (
1376 GCBackgroundUtilization = gcBackgroundUtilization
1377 GCGoalUtilization = gcGoalUtilization
1378 DefaultHeapMinimum = defaultHeapMinimum
1379 MemoryLimitHeapGoalHeadroomPercent = memoryLimitHeapGoalHeadroomPercent
1380 MemoryLimitMinHeapGoalHeadroom = memoryLimitMinHeapGoalHeadroom
1381 )
1382
1383 type GCController struct {
1384 gcControllerState
1385 }
1386
1387 func NewGCController(gcPercent int, memoryLimit int64) *GCController {
1388
1389
1390
1391
1392 g := Escape(new(GCController))
1393 g.gcControllerState.test = true
1394 g.init(int32(gcPercent), memoryLimit)
1395 return g
1396 }
1397
1398 func (c *GCController) StartCycle(stackSize, globalsSize uint64, scannableFrac float64, gomaxprocs int) {
1399 trigger, _ := c.trigger()
1400 if c.heapMarked > trigger {
1401 trigger = c.heapMarked
1402 }
1403 c.maxStackScan.Store(stackSize)
1404 c.globalsScan.Store(globalsSize)
1405 c.heapLive.Store(trigger)
1406 c.heapScan.Add(int64(float64(trigger-c.heapMarked) * scannableFrac))
1407 c.startCycle(0, gomaxprocs, gcTrigger{kind: gcTriggerHeap})
1408 }
1409
1410 func (c *GCController) AssistWorkPerByte() float64 {
1411 return c.assistWorkPerByte.Load()
1412 }
1413
1414 func (c *GCController) HeapGoal() uint64 {
1415 return c.heapGoal()
1416 }
1417
1418 func (c *GCController) HeapLive() uint64 {
1419 return c.heapLive.Load()
1420 }
1421
1422 func (c *GCController) HeapMarked() uint64 {
1423 return c.heapMarked
1424 }
1425
1426 func (c *GCController) Triggered() uint64 {
1427 return c.triggered
1428 }
1429
1430 type GCControllerReviseDelta struct {
1431 HeapLive int64
1432 HeapScan int64
1433 HeapScanWork int64
1434 StackScanWork int64
1435 GlobalsScanWork int64
1436 }
1437
1438 func (c *GCController) Revise(d GCControllerReviseDelta) {
1439 c.heapLive.Add(d.HeapLive)
1440 c.heapScan.Add(d.HeapScan)
1441 c.heapScanWork.Add(d.HeapScanWork)
1442 c.stackScanWork.Add(d.StackScanWork)
1443 c.globalsScanWork.Add(d.GlobalsScanWork)
1444 c.revise()
1445 }
1446
1447 func (c *GCController) EndCycle(bytesMarked uint64, assistTime, elapsed int64, gomaxprocs int) {
1448 c.assistTime.Store(assistTime)
1449 c.endCycle(elapsed, gomaxprocs, false)
1450 c.resetLive(bytesMarked)
1451 c.commit(false)
1452 }
1453
1454 func (c *GCController) AddIdleMarkWorker() bool {
1455 return c.addIdleMarkWorker()
1456 }
1457
1458 func (c *GCController) NeedIdleMarkWorker() bool {
1459 return c.needIdleMarkWorker()
1460 }
1461
1462 func (c *GCController) RemoveIdleMarkWorker() {
1463 c.removeIdleMarkWorker()
1464 }
1465
1466 func (c *GCController) SetMaxIdleMarkWorkers(max int32) {
1467 c.setMaxIdleMarkWorkers(max)
1468 }
1469
1470 var alwaysFalse bool
1471 var escapeSink any
1472
1473 func Escape[T any](x T) T {
1474 if alwaysFalse {
1475 escapeSink = x
1476 }
1477 return x
1478 }
1479
1480
1481 func Acquirem() {
1482 acquirem()
1483 }
1484
1485 func Releasem() {
1486 releasem(getg().m)
1487 }
1488
1489 var Timediv = timediv
1490
1491 type PIController struct {
1492 piController
1493 }
1494
1495 func NewPIController(kp, ti, tt, min, max float64) *PIController {
1496 return &PIController{piController{
1497 kp: kp,
1498 ti: ti,
1499 tt: tt,
1500 min: min,
1501 max: max,
1502 }}
1503 }
1504
1505 func (c *PIController) Next(input, setpoint, period float64) (float64, bool) {
1506 return c.piController.next(input, setpoint, period)
1507 }
1508
1509 const (
1510 CapacityPerProc = capacityPerProc
1511 GCCPULimiterUpdatePeriod = gcCPULimiterUpdatePeriod
1512 )
1513
1514 type GCCPULimiter struct {
1515 limiter gcCPULimiterState
1516 }
1517
1518 func NewGCCPULimiter(now int64, gomaxprocs int32) *GCCPULimiter {
1519
1520
1521
1522
1523 l := Escape(new(GCCPULimiter))
1524 l.limiter.test = true
1525 l.limiter.resetCapacity(now, gomaxprocs)
1526 return l
1527 }
1528
1529 func (l *GCCPULimiter) Fill() uint64 {
1530 return l.limiter.bucket.fill
1531 }
1532
1533 func (l *GCCPULimiter) Capacity() uint64 {
1534 return l.limiter.bucket.capacity
1535 }
1536
1537 func (l *GCCPULimiter) Overflow() uint64 {
1538 return l.limiter.overflow
1539 }
1540
1541 func (l *GCCPULimiter) Limiting() bool {
1542 return l.limiter.limiting()
1543 }
1544
1545 func (l *GCCPULimiter) NeedUpdate(now int64) bool {
1546 return l.limiter.needUpdate(now)
1547 }
1548
1549 func (l *GCCPULimiter) StartGCTransition(enableGC bool, now int64) {
1550 l.limiter.startGCTransition(enableGC, now)
1551 }
1552
1553 func (l *GCCPULimiter) FinishGCTransition(now int64) {
1554 l.limiter.finishGCTransition(now)
1555 }
1556
1557 func (l *GCCPULimiter) Update(now int64) {
1558 l.limiter.update(now)
1559 }
1560
1561 func (l *GCCPULimiter) AddAssistTime(t int64) {
1562 l.limiter.addAssistTime(t)
1563 }
1564
1565 func (l *GCCPULimiter) ResetCapacity(now int64, nprocs int32) {
1566 l.limiter.resetCapacity(now, nprocs)
1567 }
1568
1569 const ScavengePercent = scavengePercent
1570
1571 type Scavenger struct {
1572 Sleep func(int64) int64
1573 Scavenge func(uintptr) (uintptr, int64)
1574 ShouldStop func() bool
1575 GoMaxProcs func() int32
1576
1577 released atomic.Uintptr
1578 scavenger scavengerState
1579 stop chan<- struct{}
1580 done <-chan struct{}
1581 }
1582
1583 func (s *Scavenger) Start() {
1584 if s.Sleep == nil || s.Scavenge == nil || s.ShouldStop == nil || s.GoMaxProcs == nil {
1585 panic("must populate all stubs")
1586 }
1587
1588
1589 s.scavenger.sleepStub = s.Sleep
1590 s.scavenger.scavenge = s.Scavenge
1591 s.scavenger.shouldStop = s.ShouldStop
1592 s.scavenger.gomaxprocs = s.GoMaxProcs
1593
1594
1595 stop := make(chan struct{})
1596 s.stop = stop
1597 done := make(chan struct{})
1598 s.done = done
1599 go func() {
1600
1601 s.scavenger.init()
1602 s.scavenger.park()
1603 for {
1604 select {
1605 case <-stop:
1606 close(done)
1607 return
1608 default:
1609 }
1610 released, workTime := s.scavenger.run()
1611 if released == 0 {
1612 s.scavenger.park()
1613 continue
1614 }
1615 s.released.Add(released)
1616 s.scavenger.sleep(workTime)
1617 }
1618 }()
1619 if !s.BlockUntilParked(1e9 ) {
1620 panic("timed out waiting for scavenger to get ready")
1621 }
1622 }
1623
1624
1625
1626
1627
1628
1629
1630 func (s *Scavenger) BlockUntilParked(timeout int64) bool {
1631
1632
1633
1634
1635
1636 start := nanotime()
1637 for nanotime()-start < timeout {
1638 lock(&s.scavenger.lock)
1639 parked := s.scavenger.parked
1640 unlock(&s.scavenger.lock)
1641 if parked {
1642 return true
1643 }
1644 Gosched()
1645 }
1646 return false
1647 }
1648
1649
1650 func (s *Scavenger) Released() uintptr {
1651 return s.released.Load()
1652 }
1653
1654
1655 func (s *Scavenger) Wake() {
1656 s.scavenger.wake()
1657 }
1658
1659
1660
1661 func (s *Scavenger) Stop() {
1662 lock(&s.scavenger.lock)
1663 parked := s.scavenger.parked
1664 unlock(&s.scavenger.lock)
1665 if !parked {
1666 panic("tried to clean up scavenger that is not parked")
1667 }
1668 close(s.stop)
1669 s.Wake()
1670 <-s.done
1671 }
1672
1673 type ScavengeIndex struct {
1674 i scavengeIndex
1675 }
1676
1677 func NewScavengeIndex(min, max ChunkIdx) *ScavengeIndex {
1678 s := new(ScavengeIndex)
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690 s.i.chunks = make([]atomicScavChunkData, max)
1691 s.i.min.Store(uintptr(min))
1692 s.i.max.Store(uintptr(max))
1693 s.i.minHeapIdx.Store(uintptr(min))
1694 s.i.test = true
1695 return s
1696 }
1697
1698 func (s *ScavengeIndex) Find(force bool) (ChunkIdx, uint) {
1699 ci, off := s.i.find(force)
1700 return ChunkIdx(ci), off
1701 }
1702
1703 func (s *ScavengeIndex) AllocRange(base, limit uintptr) {
1704 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1705 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1706
1707 if sc == ec {
1708
1709 s.i.alloc(sc, ei+1-si)
1710 } else {
1711
1712 s.i.alloc(sc, pallocChunkPages-si)
1713 for c := sc + 1; c < ec; c++ {
1714 s.i.alloc(c, pallocChunkPages)
1715 }
1716 s.i.alloc(ec, ei+1)
1717 }
1718 }
1719
1720 func (s *ScavengeIndex) FreeRange(base, limit uintptr) {
1721 sc, ec := chunkIndex(base), chunkIndex(limit-1)
1722 si, ei := chunkPageIndex(base), chunkPageIndex(limit-1)
1723
1724 if sc == ec {
1725
1726 s.i.free(sc, si, ei+1-si)
1727 } else {
1728
1729 s.i.free(sc, si, pallocChunkPages-si)
1730 for c := sc + 1; c < ec; c++ {
1731 s.i.free(c, 0, pallocChunkPages)
1732 }
1733 s.i.free(ec, 0, ei+1)
1734 }
1735 }
1736
1737 func (s *ScavengeIndex) ResetSearchAddrs() {
1738 for _, a := range []*atomicOffAddr{&s.i.searchAddrBg, &s.i.searchAddrForce} {
1739 addr, marked := a.Load()
1740 if marked {
1741 a.StoreUnmark(addr, addr)
1742 }
1743 a.Clear()
1744 }
1745 s.i.freeHWM = minOffAddr
1746 }
1747
1748 func (s *ScavengeIndex) NextGen() {
1749 s.i.nextGen()
1750 }
1751
1752 func (s *ScavengeIndex) SetEmpty(ci ChunkIdx) {
1753 s.i.setEmpty(chunkIdx(ci))
1754 }
1755
1756 func CheckPackScavChunkData(gen uint32, inUse, lastInUse uint16, flags uint8) bool {
1757 sc0 := scavChunkData{
1758 gen: gen,
1759 inUse: inUse,
1760 lastInUse: lastInUse,
1761 scavChunkFlags: scavChunkFlags(flags),
1762 }
1763 scp := sc0.pack()
1764 sc1 := unpackScavChunkData(scp)
1765 return sc0 == sc1
1766 }
1767
1768 const GTrackingPeriod = gTrackingPeriod
1769
1770 var ZeroBase = unsafe.Pointer(&zerobase)
1771
1772 const UserArenaChunkBytes = userArenaChunkBytes
1773
1774 type UserArena struct {
1775 arena *userArena
1776 }
1777
1778 func NewUserArena() *UserArena {
1779 return &UserArena{newUserArena()}
1780 }
1781
1782 func (a *UserArena) New(out *any) {
1783 i := efaceOf(out)
1784 typ := i._type
1785 if typ.Kind_&abi.KindMask != abi.Pointer {
1786 panic("new result of non-ptr type")
1787 }
1788 typ = (*ptrtype)(unsafe.Pointer(typ)).Elem
1789 i.data = a.arena.new(typ)
1790 }
1791
1792 func (a *UserArena) Slice(sl any, cap int) {
1793 a.arena.slice(sl, cap)
1794 }
1795
1796 func (a *UserArena) Free() {
1797 a.arena.free()
1798 }
1799
1800 func GlobalWaitingArenaChunks() int {
1801 n := 0
1802 systemstack(func() {
1803 lock(&mheap_.lock)
1804 for s := mheap_.userArena.quarantineList.first; s != nil; s = s.next {
1805 n++
1806 }
1807 unlock(&mheap_.lock)
1808 })
1809 return n
1810 }
1811
1812 func UserArenaClone[T any](s T) T {
1813 return arena_heapify(s).(T)
1814 }
1815
1816 var AlignUp = alignUp
1817
1818 func BlockUntilEmptyFinalizerQueue(timeout int64) bool {
1819 return blockUntilEmptyFinalizerQueue(timeout)
1820 }
1821
1822 func FrameStartLine(f *Frame) int {
1823 return f.startLine
1824 }
1825
1826
1827
1828 func PersistentAlloc(n uintptr) unsafe.Pointer {
1829 return persistentalloc(n, 0, &memstats.other_sys)
1830 }
1831
1832
1833
1834 func FPCallers(pcBuf []uintptr) int {
1835 return fpTracebackPCs(unsafe.Pointer(getfp()), pcBuf)
1836 }
1837
1838 const FramePointerEnabled = framepointer_enabled
1839
1840 var (
1841 IsPinned = isPinned
1842 GetPinCounter = pinnerGetPinCounter
1843 )
1844
1845 func SetPinnerLeakPanic(f func()) {
1846 pinnerLeakPanic = f
1847 }
1848 func GetPinnerLeakPanic() func() {
1849 return pinnerLeakPanic
1850 }
1851
1852 var testUintptr uintptr
1853
1854 func MyGenericFunc[T any]() {
1855 systemstack(func() {
1856 testUintptr = 4
1857 })
1858 }
1859
1860 func UnsafePoint(pc uintptr) bool {
1861 fi := findfunc(pc)
1862 v := pcdatavalue(fi, abi.PCDATA_UnsafePoint, pc)
1863 switch v {
1864 case abi.UnsafePointUnsafe:
1865 return true
1866 case abi.UnsafePointSafe:
1867 return false
1868 case abi.UnsafePointRestart1, abi.UnsafePointRestart2, abi.UnsafePointRestartAtEntry:
1869
1870
1871 return false
1872 default:
1873 var buf [20]byte
1874 panic("invalid unsafe point code " + string(itoa(buf[:], uint64(v))))
1875 }
1876 }
1877
1878 type TraceMap struct {
1879 traceMap
1880 }
1881
1882 func (m *TraceMap) PutString(s string) (uint64, bool) {
1883 return m.traceMap.put(unsafe.Pointer(unsafe.StringData(s)), uintptr(len(s)))
1884 }
1885
1886 func (m *TraceMap) Reset() {
1887 m.traceMap.reset()
1888 }
1889
View as plain text