Source file
src/runtime/race.go
Documentation: runtime
1
2
3
4
5
6
7 package runtime
8
9 import (
10 "internal/abi"
11 "unsafe"
12 )
13
14
15
16 func RaceRead(addr unsafe.Pointer)
17 func RaceWrite(addr unsafe.Pointer)
18 func RaceReadRange(addr unsafe.Pointer, len int)
19 func RaceWriteRange(addr unsafe.Pointer, len int)
20
21 func RaceErrors() int {
22 var n uint64
23 racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
24 return int(n)
25 }
26
27
28
29
30
31
32
33
34
35
36
37 func RaceAcquire(addr unsafe.Pointer) {
38 raceacquire(addr)
39 }
40
41
42
43
44
45
46
47
48 func RaceRelease(addr unsafe.Pointer) {
49 racerelease(addr)
50 }
51
52
53
54
55
56
57
58
59 func RaceReleaseMerge(addr unsafe.Pointer) {
60 racereleasemerge(addr)
61 }
62
63
64
65
66
67
68
69 func RaceDisable() {
70 gp := getg()
71 if gp.raceignore == 0 {
72 racecall(&__tsan_go_ignore_sync_begin, gp.racectx, 0, 0, 0)
73 }
74 gp.raceignore++
75 }
76
77
78
79
80 func RaceEnable() {
81 gp := getg()
82 gp.raceignore--
83 if gp.raceignore == 0 {
84 racecall(&__tsan_go_ignore_sync_end, gp.racectx, 0, 0, 0)
85 }
86 }
87
88
89
90 const raceenabled = true
91
92
93
94
95 func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
96 kind := t.Kind_ & abi.KindMask
97 if kind == abi.Array || kind == abi.Struct {
98
99
100 racereadrangepc(addr, t.Size_, callerpc, pc)
101 } else {
102
103
104 racereadpc(addr, callerpc, pc)
105 }
106 }
107
108 func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
109 kind := t.Kind_ & abi.KindMask
110 if kind == abi.Array || kind == abi.Struct {
111
112
113 racewriterangepc(addr, t.Size_, callerpc, pc)
114 } else {
115
116
117 racewritepc(addr, callerpc, pc)
118 }
119 }
120
121
122 func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
123
124
125 func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
126
127 type symbolizeCodeContext struct {
128 pc uintptr
129 fn *byte
130 file *byte
131 line uintptr
132 off uintptr
133 res uintptr
134 }
135
136 var qq = [...]byte{'?', '?', 0}
137 var dash = [...]byte{'-', 0}
138
139 const (
140 raceGetProcCmd = iota
141 raceSymbolizeCodeCmd
142 raceSymbolizeDataCmd
143 )
144
145
146 func racecallback(cmd uintptr, ctx unsafe.Pointer) {
147 switch cmd {
148 case raceGetProcCmd:
149 throw("should have been handled by racecallbackthunk")
150 case raceSymbolizeCodeCmd:
151 raceSymbolizeCode((*symbolizeCodeContext)(ctx))
152 case raceSymbolizeDataCmd:
153 raceSymbolizeData((*symbolizeDataContext)(ctx))
154 default:
155 throw("unknown command")
156 }
157 }
158
159
160
161
162
163
164
165
166
167
168
169
170
171 func raceSymbolizeCode(ctx *symbolizeCodeContext) {
172 pc := ctx.pc
173 fi := findfunc(pc)
174 if fi.valid() {
175 u, uf := newInlineUnwinder(fi, pc)
176 for ; uf.valid(); uf = u.next(uf) {
177 sf := u.srcFunc(uf)
178 if sf.funcID == abi.FuncIDWrapper && u.isInlined(uf) {
179
180
181
182
183 continue
184 }
185
186 name := sf.name()
187 file, line := u.fileLine(uf)
188 if line == 0 {
189
190 continue
191 }
192 ctx.fn = &bytes(name)[0]
193 ctx.line = uintptr(line)
194 ctx.file = &bytes(file)[0]
195 ctx.off = pc - fi.entry()
196 ctx.res = 1
197 if u.isInlined(uf) {
198
199
200 uf = u.next(uf)
201 ctx.pc = uf.pc
202 }
203 return
204 }
205 }
206 ctx.fn = &qq[0]
207 ctx.file = &dash[0]
208 ctx.line = 0
209 ctx.off = ctx.pc
210 ctx.res = 1
211 }
212
213 type symbolizeDataContext struct {
214 addr uintptr
215 heap uintptr
216 start uintptr
217 size uintptr
218 name *byte
219 file *byte
220 line uintptr
221 res uintptr
222 }
223
224 func raceSymbolizeData(ctx *symbolizeDataContext) {
225 if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
226
227 ctx.heap = 1
228 ctx.start = base
229 ctx.size = span.elemsize
230 ctx.res = 1
231 }
232 }
233
234
235
236
237 var __tsan_init byte
238
239
240 var __tsan_fini byte
241
242
243 var __tsan_proc_create byte
244
245
246 var __tsan_proc_destroy byte
247
248
249 var __tsan_map_shadow byte
250
251
252 var __tsan_finalizer_goroutine byte
253
254
255 var __tsan_go_start byte
256
257
258 var __tsan_go_end byte
259
260
261 var __tsan_malloc byte
262
263
264 var __tsan_free byte
265
266
267 var __tsan_acquire byte
268
269
270 var __tsan_release byte
271
272
273 var __tsan_release_acquire byte
274
275
276 var __tsan_release_merge byte
277
278
279 var __tsan_go_ignore_sync_begin byte
280
281
282 var __tsan_go_ignore_sync_end byte
283
284
285 var __tsan_report_count byte
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334 var racedatastart uintptr
335 var racedataend uintptr
336
337
338 var racearenastart uintptr
339 var racearenaend uintptr
340
341 func racefuncenter(callpc uintptr)
342 func racefuncenterfp(fp uintptr)
343 func racefuncexit()
344 func raceread(addr uintptr)
345 func racewrite(addr uintptr)
346 func racereadrange(addr, size uintptr)
347 func racewriterange(addr, size uintptr)
348 func racereadrangepc1(addr, size, pc uintptr)
349 func racewriterangepc1(addr, size, pc uintptr)
350 func racecallbackthunk(uintptr)
351
352
353
354 func racecall(fn *byte, arg0, arg1, arg2, arg3 uintptr)
355
356
357
358
359 func isvalidaddr(addr unsafe.Pointer) bool {
360 return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
361 racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
362 }
363
364
365 func raceinit() (gctx, pctx uintptr) {
366 lockInit(&raceFiniLock, lockRankRaceFini)
367
368
369 if !iscgo && GOOS != "darwin" {
370 throw("raceinit: race build must use cgo")
371 }
372
373 racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), abi.FuncPCABI0(racecallbackthunk), 0)
374
375
376 start := ^uintptr(0)
377 end := uintptr(0)
378 if start > firstmoduledata.noptrdata {
379 start = firstmoduledata.noptrdata
380 }
381 if start > firstmoduledata.data {
382 start = firstmoduledata.data
383 }
384 if start > firstmoduledata.noptrbss {
385 start = firstmoduledata.noptrbss
386 }
387 if start > firstmoduledata.bss {
388 start = firstmoduledata.bss
389 }
390 if end < firstmoduledata.enoptrdata {
391 end = firstmoduledata.enoptrdata
392 }
393 if end < firstmoduledata.edata {
394 end = firstmoduledata.edata
395 }
396 if end < firstmoduledata.enoptrbss {
397 end = firstmoduledata.enoptrbss
398 }
399 if end < firstmoduledata.ebss {
400 end = firstmoduledata.ebss
401 }
402 size := alignUp(end-start, _PageSize)
403 racecall(&__tsan_map_shadow, start, size, 0, 0)
404 racedatastart = start
405 racedataend = start + size
406
407 return
408 }
409
410
411 func racefini() {
412
413
414
415
416
417 lock(&raceFiniLock)
418
419
420
421
422 entersyscall()
423
424
425
426 osPreemptExtEnter(getg().m)
427
428 racecall(&__tsan_fini, 0, 0, 0, 0)
429 }
430
431
432 func raceproccreate() uintptr {
433 var ctx uintptr
434 racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
435 return ctx
436 }
437
438
439 func raceprocdestroy(ctx uintptr) {
440 racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
441 }
442
443
444 func racemapshadow(addr unsafe.Pointer, size uintptr) {
445 if racearenastart == 0 {
446 racearenastart = uintptr(addr)
447 }
448 if racearenaend < uintptr(addr)+size {
449 racearenaend = uintptr(addr) + size
450 }
451 racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
452 }
453
454
455 func racemalloc(p unsafe.Pointer, sz uintptr) {
456 racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
457 }
458
459
460 func racefree(p unsafe.Pointer, sz uintptr) {
461 racecall(&__tsan_free, uintptr(p), sz, 0, 0)
462 }
463
464
465 func racegostart(pc uintptr) uintptr {
466 gp := getg()
467 var spawng *g
468 if gp.m.curg != nil {
469 spawng = gp.m.curg
470 } else {
471 spawng = gp
472 }
473
474 var racectx uintptr
475 racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
476 return racectx
477 }
478
479
480 func racegoend() {
481 racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
482 }
483
484
485 func racectxend(racectx uintptr) {
486 racecall(&__tsan_go_end, racectx, 0, 0, 0)
487 }
488
489
490 func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
491 gp := getg()
492 if gp != gp.m.curg {
493
494
495 return
496 }
497 if callpc != 0 {
498 racefuncenter(callpc)
499 }
500 racewriterangepc1(uintptr(addr), sz, pc)
501 if callpc != 0 {
502 racefuncexit()
503 }
504 }
505
506
507 func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
508 gp := getg()
509 if gp != gp.m.curg {
510
511
512 return
513 }
514 if callpc != 0 {
515 racefuncenter(callpc)
516 }
517 racereadrangepc1(uintptr(addr), sz, pc)
518 if callpc != 0 {
519 racefuncexit()
520 }
521 }
522
523
524 func raceacquire(addr unsafe.Pointer) {
525 raceacquireg(getg(), addr)
526 }
527
528
529 func raceacquireg(gp *g, addr unsafe.Pointer) {
530 if getg().raceignore != 0 || !isvalidaddr(addr) {
531 return
532 }
533 racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
534 }
535
536
537 func raceacquirectx(racectx uintptr, addr unsafe.Pointer) {
538 if !isvalidaddr(addr) {
539 return
540 }
541 racecall(&__tsan_acquire, racectx, uintptr(addr), 0, 0)
542 }
543
544
545 func racerelease(addr unsafe.Pointer) {
546 racereleaseg(getg(), addr)
547 }
548
549
550 func racereleaseg(gp *g, addr unsafe.Pointer) {
551 if getg().raceignore != 0 || !isvalidaddr(addr) {
552 return
553 }
554 racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
555 }
556
557
558 func racereleaseacquire(addr unsafe.Pointer) {
559 racereleaseacquireg(getg(), addr)
560 }
561
562
563 func racereleaseacquireg(gp *g, addr unsafe.Pointer) {
564 if getg().raceignore != 0 || !isvalidaddr(addr) {
565 return
566 }
567 racecall(&__tsan_release_acquire, gp.racectx, uintptr(addr), 0, 0)
568 }
569
570
571 func racereleasemerge(addr unsafe.Pointer) {
572 racereleasemergeg(getg(), addr)
573 }
574
575
576 func racereleasemergeg(gp *g, addr unsafe.Pointer) {
577 if getg().raceignore != 0 || !isvalidaddr(addr) {
578 return
579 }
580 racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
581 }
582
583
584 func racefingo() {
585 racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
586 }
587
588
589
590
591
592
593 func abigen_sync_atomic_LoadInt32(addr *int32) (val int32)
594
595
596 func abigen_sync_atomic_LoadInt64(addr *int64) (val int64)
597
598
599 func abigen_sync_atomic_LoadUint32(addr *uint32) (val uint32)
600
601
602 func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)
603
604
605 func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)
606
607
608 func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
609
610
611 func abigen_sync_atomic_StoreInt32(addr *int32, val int32)
612
613
614 func abigen_sync_atomic_StoreInt64(addr *int64, val int64)
615
616
617 func abigen_sync_atomic_StoreUint32(addr *uint32, val uint32)
618
619
620 func abigen_sync_atomic_StoreUint64(addr *uint64, val uint64)
621
622
623 func abigen_sync_atomic_SwapInt32(addr *int32, new int32) (old int32)
624
625
626 func abigen_sync_atomic_SwapInt64(addr *int64, new int64) (old int64)
627
628
629 func abigen_sync_atomic_SwapUint32(addr *uint32, new uint32) (old uint32)
630
631
632 func abigen_sync_atomic_SwapUint64(addr *uint64, new uint64) (old uint64)
633
634
635 func abigen_sync_atomic_AddInt32(addr *int32, delta int32) (new int32)
636
637
638 func abigen_sync_atomic_AddUint32(addr *uint32, delta uint32) (new uint32)
639
640
641 func abigen_sync_atomic_AddInt64(addr *int64, delta int64) (new int64)
642
643
644 func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64)
645
646
647 func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
648
649
650 func abigen_sync_atomic_AndInt32(addr *int32, mask int32) (old int32)
651
652
653 func abigen_sync_atomic_AndUint32(addr *uint32, mask uint32) (old uint32)
654
655
656 func abigen_sync_atomic_AndInt64(addr *int64, mask int64) (old int64)
657
658
659 func abigen_sync_atomic_AndUint64(addr *uint64, mask uint64) (old uint64)
660
661
662 func abigen_sync_atomic_AndUintptr(addr *uintptr, mask uintptr) (old uintptr)
663
664
665 func abigen_sync_atomic_OrInt32(addr *int32, mask int32) (old int32)
666
667
668 func abigen_sync_atomic_OrUint32(addr *uint32, mask uint32) (old uint32)
669
670
671 func abigen_sync_atomic_OrInt64(addr *int64, mask int64) (old int64)
672
673
674 func abigen_sync_atomic_OrUint64(addr *uint64, mask uint64) (old uint64)
675
676
677 func abigen_sync_atomic_OrUintptr(addr *uintptr, mask uintptr) (old uintptr)
678
679
680 func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
681
682
683 func abigen_sync_atomic_CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
684
685
686 func abigen_sync_atomic_CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
687
688
689 func abigen_sync_atomic_CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)
690
View as plain text