...

Source file src/runtime/race.go

Documentation: runtime

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  //go:build race
     6  
     7  package runtime
     8  
     9  import (
    10  	"internal/abi"
    11  	"unsafe"
    12  )
    13  
    14  // Public race detection API, present iff build with -race.
    15  
    16  func RaceRead(addr unsafe.Pointer)
    17  func RaceWrite(addr unsafe.Pointer)
    18  func RaceReadRange(addr unsafe.Pointer, len int)
    19  func RaceWriteRange(addr unsafe.Pointer, len int)
    20  
    21  func RaceErrors() int {
    22  	var n uint64
    23  	racecall(&__tsan_report_count, uintptr(unsafe.Pointer(&n)), 0, 0, 0)
    24  	return int(n)
    25  }
    26  
    27  // RaceAcquire/RaceRelease/RaceReleaseMerge establish happens-before relations
    28  // between goroutines. These inform the race detector about actual synchronization
    29  // that it can't see for some reason (e.g. synchronization within RaceDisable/RaceEnable
    30  // sections of code).
    31  // RaceAcquire establishes a happens-before relation with the preceding
    32  // RaceReleaseMerge on addr up to and including the last RaceRelease on addr.
    33  // In terms of the C memory model (C11 §5.1.2.4, §7.17.3),
    34  // RaceAcquire is equivalent to atomic_load(memory_order_acquire).
    35  //
    36  //go:nosplit
    37  func RaceAcquire(addr unsafe.Pointer) {
    38  	raceacquire(addr)
    39  }
    40  
    41  // RaceRelease performs a release operation on addr that
    42  // can synchronize with a later RaceAcquire on addr.
    43  //
    44  // In terms of the C memory model, RaceRelease is equivalent to
    45  // atomic_store(memory_order_release).
    46  //
    47  //go:nosplit
    48  func RaceRelease(addr unsafe.Pointer) {
    49  	racerelease(addr)
    50  }
    51  
    52  // RaceReleaseMerge is like RaceRelease, but also establishes a happens-before
    53  // relation with the preceding RaceRelease or RaceReleaseMerge on addr.
    54  //
    55  // In terms of the C memory model, RaceReleaseMerge is equivalent to
    56  // atomic_exchange(memory_order_release).
    57  //
    58  //go:nosplit
    59  func RaceReleaseMerge(addr unsafe.Pointer) {
    60  	racereleasemerge(addr)
    61  }
    62  
    63  // RaceDisable disables handling of race synchronization events in the current goroutine.
    64  // Handling is re-enabled with RaceEnable. RaceDisable/RaceEnable can be nested.
    65  // Non-synchronization events (memory accesses, function entry/exit) still affect
    66  // the race detector.
    67  //
    68  //go:nosplit
    69  func RaceDisable() {
    70  	gp := getg()
    71  	if gp.raceignore == 0 {
    72  		racecall(&__tsan_go_ignore_sync_begin, gp.racectx, 0, 0, 0)
    73  	}
    74  	gp.raceignore++
    75  }
    76  
    77  // RaceEnable re-enables handling of race events in the current goroutine.
    78  //
    79  //go:nosplit
    80  func RaceEnable() {
    81  	gp := getg()
    82  	gp.raceignore--
    83  	if gp.raceignore == 0 {
    84  		racecall(&__tsan_go_ignore_sync_end, gp.racectx, 0, 0, 0)
    85  	}
    86  }
    87  
    88  // Private interface for the runtime.
    89  
    90  const raceenabled = true
    91  
    92  // For all functions accepting callerpc and pc,
    93  // callerpc is a return PC of the function that calls this function,
    94  // pc is start PC of the function that calls this function.
    95  func raceReadObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
    96  	kind := t.Kind_ & abi.KindMask
    97  	if kind == abi.Array || kind == abi.Struct {
    98  		// for composite objects we have to read every address
    99  		// because a write might happen to any subobject.
   100  		racereadrangepc(addr, t.Size_, callerpc, pc)
   101  	} else {
   102  		// for non-composite objects we can read just the start
   103  		// address, as any write must write the first byte.
   104  		racereadpc(addr, callerpc, pc)
   105  	}
   106  }
   107  
   108  func raceWriteObjectPC(t *_type, addr unsafe.Pointer, callerpc, pc uintptr) {
   109  	kind := t.Kind_ & abi.KindMask
   110  	if kind == abi.Array || kind == abi.Struct {
   111  		// for composite objects we have to write every address
   112  		// because a write might happen to any subobject.
   113  		racewriterangepc(addr, t.Size_, callerpc, pc)
   114  	} else {
   115  		// for non-composite objects we can write just the start
   116  		// address, as any write must write the first byte.
   117  		racewritepc(addr, callerpc, pc)
   118  	}
   119  }
   120  
   121  //go:noescape
   122  func racereadpc(addr unsafe.Pointer, callpc, pc uintptr)
   123  
   124  //go:noescape
   125  func racewritepc(addr unsafe.Pointer, callpc, pc uintptr)
   126  
   127  type symbolizeCodeContext struct {
   128  	pc   uintptr
   129  	fn   *byte
   130  	file *byte
   131  	line uintptr
   132  	off  uintptr
   133  	res  uintptr
   134  }
   135  
   136  var qq = [...]byte{'?', '?', 0}
   137  var dash = [...]byte{'-', 0}
   138  
   139  const (
   140  	raceGetProcCmd = iota
   141  	raceSymbolizeCodeCmd
   142  	raceSymbolizeDataCmd
   143  )
   144  
   145  // Callback from C into Go, runs on g0.
   146  func racecallback(cmd uintptr, ctx unsafe.Pointer) {
   147  	switch cmd {
   148  	case raceGetProcCmd:
   149  		throw("should have been handled by racecallbackthunk")
   150  	case raceSymbolizeCodeCmd:
   151  		raceSymbolizeCode((*symbolizeCodeContext)(ctx))
   152  	case raceSymbolizeDataCmd:
   153  		raceSymbolizeData((*symbolizeDataContext)(ctx))
   154  	default:
   155  		throw("unknown command")
   156  	}
   157  }
   158  
   159  // raceSymbolizeCode reads ctx.pc and populates the rest of *ctx with
   160  // information about the code at that pc.
   161  //
   162  // The race detector has already subtracted 1 from pcs, so they point to the last
   163  // byte of call instructions (including calls to runtime.racewrite and friends).
   164  //
   165  // If the incoming pc is part of an inlined function, *ctx is populated
   166  // with information about the inlined function, and on return ctx.pc is set
   167  // to a pc in the logically containing function. (The race detector should call this
   168  // function again with that pc.)
   169  //
   170  // If the incoming pc is not part of an inlined function, the return pc is unchanged.
   171  func raceSymbolizeCode(ctx *symbolizeCodeContext) {
   172  	pc := ctx.pc
   173  	fi := findfunc(pc)
   174  	if fi.valid() {
   175  		u, uf := newInlineUnwinder(fi, pc)
   176  		for ; uf.valid(); uf = u.next(uf) {
   177  			sf := u.srcFunc(uf)
   178  			if sf.funcID == abi.FuncIDWrapper && u.isInlined(uf) {
   179  				// Ignore wrappers, unless we're at the outermost frame of u.
   180  				// A non-inlined wrapper frame always means we have a physical
   181  				// frame consisting entirely of wrappers, in which case we'll
   182  				// take an outermost wrapper over nothing.
   183  				continue
   184  			}
   185  
   186  			name := sf.name()
   187  			file, line := u.fileLine(uf)
   188  			if line == 0 {
   189  				// Failure to symbolize
   190  				continue
   191  			}
   192  			ctx.fn = &bytes(name)[0] // assume NUL-terminated
   193  			ctx.line = uintptr(line)
   194  			ctx.file = &bytes(file)[0] // assume NUL-terminated
   195  			ctx.off = pc - fi.entry()
   196  			ctx.res = 1
   197  			if u.isInlined(uf) {
   198  				// Set ctx.pc to the "caller" so the race detector calls this again
   199  				// to further unwind.
   200  				uf = u.next(uf)
   201  				ctx.pc = uf.pc
   202  			}
   203  			return
   204  		}
   205  	}
   206  	ctx.fn = &qq[0]
   207  	ctx.file = &dash[0]
   208  	ctx.line = 0
   209  	ctx.off = ctx.pc
   210  	ctx.res = 1
   211  }
   212  
   213  type symbolizeDataContext struct {
   214  	addr  uintptr
   215  	heap  uintptr
   216  	start uintptr
   217  	size  uintptr
   218  	name  *byte
   219  	file  *byte
   220  	line  uintptr
   221  	res   uintptr
   222  }
   223  
   224  func raceSymbolizeData(ctx *symbolizeDataContext) {
   225  	if base, span, _ := findObject(ctx.addr, 0, 0); base != 0 {
   226  		// TODO: Does this need to handle malloc headers?
   227  		ctx.heap = 1
   228  		ctx.start = base
   229  		ctx.size = span.elemsize
   230  		ctx.res = 1
   231  	}
   232  }
   233  
   234  // Race runtime functions called via runtime·racecall.
   235  //
   236  //go:linkname __tsan_init __tsan_init
   237  var __tsan_init byte
   238  
   239  //go:linkname __tsan_fini __tsan_fini
   240  var __tsan_fini byte
   241  
   242  //go:linkname __tsan_proc_create __tsan_proc_create
   243  var __tsan_proc_create byte
   244  
   245  //go:linkname __tsan_proc_destroy __tsan_proc_destroy
   246  var __tsan_proc_destroy byte
   247  
   248  //go:linkname __tsan_map_shadow __tsan_map_shadow
   249  var __tsan_map_shadow byte
   250  
   251  //go:linkname __tsan_finalizer_goroutine __tsan_finalizer_goroutine
   252  var __tsan_finalizer_goroutine byte
   253  
   254  //go:linkname __tsan_go_start __tsan_go_start
   255  var __tsan_go_start byte
   256  
   257  //go:linkname __tsan_go_end __tsan_go_end
   258  var __tsan_go_end byte
   259  
   260  //go:linkname __tsan_malloc __tsan_malloc
   261  var __tsan_malloc byte
   262  
   263  //go:linkname __tsan_free __tsan_free
   264  var __tsan_free byte
   265  
   266  //go:linkname __tsan_acquire __tsan_acquire
   267  var __tsan_acquire byte
   268  
   269  //go:linkname __tsan_release __tsan_release
   270  var __tsan_release byte
   271  
   272  //go:linkname __tsan_release_acquire __tsan_release_acquire
   273  var __tsan_release_acquire byte
   274  
   275  //go:linkname __tsan_release_merge __tsan_release_merge
   276  var __tsan_release_merge byte
   277  
   278  //go:linkname __tsan_go_ignore_sync_begin __tsan_go_ignore_sync_begin
   279  var __tsan_go_ignore_sync_begin byte
   280  
   281  //go:linkname __tsan_go_ignore_sync_end __tsan_go_ignore_sync_end
   282  var __tsan_go_ignore_sync_end byte
   283  
   284  //go:linkname __tsan_report_count __tsan_report_count
   285  var __tsan_report_count byte
   286  
   287  // Mimic what cmd/cgo would do.
   288  //
   289  //go:cgo_import_static __tsan_init
   290  //go:cgo_import_static __tsan_fini
   291  //go:cgo_import_static __tsan_proc_create
   292  //go:cgo_import_static __tsan_proc_destroy
   293  //go:cgo_import_static __tsan_map_shadow
   294  //go:cgo_import_static __tsan_finalizer_goroutine
   295  //go:cgo_import_static __tsan_go_start
   296  //go:cgo_import_static __tsan_go_end
   297  //go:cgo_import_static __tsan_malloc
   298  //go:cgo_import_static __tsan_free
   299  //go:cgo_import_static __tsan_acquire
   300  //go:cgo_import_static __tsan_release
   301  //go:cgo_import_static __tsan_release_acquire
   302  //go:cgo_import_static __tsan_release_merge
   303  //go:cgo_import_static __tsan_go_ignore_sync_begin
   304  //go:cgo_import_static __tsan_go_ignore_sync_end
   305  //go:cgo_import_static __tsan_report_count
   306  
   307  // These are called from race_amd64.s.
   308  //
   309  //go:cgo_import_static __tsan_read
   310  //go:cgo_import_static __tsan_read_pc
   311  //go:cgo_import_static __tsan_read_range
   312  //go:cgo_import_static __tsan_write
   313  //go:cgo_import_static __tsan_write_pc
   314  //go:cgo_import_static __tsan_write_range
   315  //go:cgo_import_static __tsan_func_enter
   316  //go:cgo_import_static __tsan_func_exit
   317  
   318  //go:cgo_import_static __tsan_go_atomic32_load
   319  //go:cgo_import_static __tsan_go_atomic64_load
   320  //go:cgo_import_static __tsan_go_atomic32_store
   321  //go:cgo_import_static __tsan_go_atomic64_store
   322  //go:cgo_import_static __tsan_go_atomic32_exchange
   323  //go:cgo_import_static __tsan_go_atomic64_exchange
   324  //go:cgo_import_static __tsan_go_atomic32_fetch_add
   325  //go:cgo_import_static __tsan_go_atomic64_fetch_add
   326  //go:cgo_import_static __tsan_go_atomic32_fetch_and
   327  //go:cgo_import_static __tsan_go_atomic64_fetch_and
   328  //go:cgo_import_static __tsan_go_atomic32_fetch_or
   329  //go:cgo_import_static __tsan_go_atomic64_fetch_or
   330  //go:cgo_import_static __tsan_go_atomic32_compare_exchange
   331  //go:cgo_import_static __tsan_go_atomic64_compare_exchange
   332  
   333  // start/end of global data (data+bss).
   334  var racedatastart uintptr
   335  var racedataend uintptr
   336  
   337  // start/end of heap for race_amd64.s
   338  var racearenastart uintptr
   339  var racearenaend uintptr
   340  
   341  func racefuncenter(callpc uintptr)
   342  func racefuncenterfp(fp uintptr)
   343  func racefuncexit()
   344  func raceread(addr uintptr)
   345  func racewrite(addr uintptr)
   346  func racereadrange(addr, size uintptr)
   347  func racewriterange(addr, size uintptr)
   348  func racereadrangepc1(addr, size, pc uintptr)
   349  func racewriterangepc1(addr, size, pc uintptr)
   350  func racecallbackthunk(uintptr)
   351  
   352  // racecall allows calling an arbitrary function fn from C race runtime
   353  // with up to 4 uintptr arguments.
   354  func racecall(fn *byte, arg0, arg1, arg2, arg3 uintptr)
   355  
   356  // checks if the address has shadow (i.e. heap or data/bss).
   357  //
   358  //go:nosplit
   359  func isvalidaddr(addr unsafe.Pointer) bool {
   360  	return racearenastart <= uintptr(addr) && uintptr(addr) < racearenaend ||
   361  		racedatastart <= uintptr(addr) && uintptr(addr) < racedataend
   362  }
   363  
   364  //go:nosplit
   365  func raceinit() (gctx, pctx uintptr) {
   366  	lockInit(&raceFiniLock, lockRankRaceFini)
   367  
   368  	// On most machines, cgo is required to initialize libc, which is used by race runtime.
   369  	if !iscgo && GOOS != "darwin" {
   370  		throw("raceinit: race build must use cgo")
   371  	}
   372  
   373  	racecall(&__tsan_init, uintptr(unsafe.Pointer(&gctx)), uintptr(unsafe.Pointer(&pctx)), abi.FuncPCABI0(racecallbackthunk), 0)
   374  
   375  	// Round data segment to page boundaries, because it's used in mmap().
   376  	start := ^uintptr(0)
   377  	end := uintptr(0)
   378  	if start > firstmoduledata.noptrdata {
   379  		start = firstmoduledata.noptrdata
   380  	}
   381  	if start > firstmoduledata.data {
   382  		start = firstmoduledata.data
   383  	}
   384  	if start > firstmoduledata.noptrbss {
   385  		start = firstmoduledata.noptrbss
   386  	}
   387  	if start > firstmoduledata.bss {
   388  		start = firstmoduledata.bss
   389  	}
   390  	if end < firstmoduledata.enoptrdata {
   391  		end = firstmoduledata.enoptrdata
   392  	}
   393  	if end < firstmoduledata.edata {
   394  		end = firstmoduledata.edata
   395  	}
   396  	if end < firstmoduledata.enoptrbss {
   397  		end = firstmoduledata.enoptrbss
   398  	}
   399  	if end < firstmoduledata.ebss {
   400  		end = firstmoduledata.ebss
   401  	}
   402  	size := alignUp(end-start, _PageSize)
   403  	racecall(&__tsan_map_shadow, start, size, 0, 0)
   404  	racedatastart = start
   405  	racedataend = start + size
   406  
   407  	return
   408  }
   409  
   410  //go:nosplit
   411  func racefini() {
   412  	// racefini() can only be called once to avoid races.
   413  	// This eventually (via __tsan_fini) calls C.exit which has
   414  	// undefined behavior if called more than once. If the lock is
   415  	// already held it's assumed that the first caller exits the program
   416  	// so other calls can hang forever without an issue.
   417  	lock(&raceFiniLock)
   418  
   419  	// __tsan_fini will run C atexit functions and C++ destructors,
   420  	// which can theoretically call back into Go.
   421  	// Tell the scheduler we entering external code.
   422  	entersyscall()
   423  
   424  	// We're entering external code that may call ExitProcess on
   425  	// Windows.
   426  	osPreemptExtEnter(getg().m)
   427  
   428  	racecall(&__tsan_fini, 0, 0, 0, 0)
   429  }
   430  
   431  //go:nosplit
   432  func raceproccreate() uintptr {
   433  	var ctx uintptr
   434  	racecall(&__tsan_proc_create, uintptr(unsafe.Pointer(&ctx)), 0, 0, 0)
   435  	return ctx
   436  }
   437  
   438  //go:nosplit
   439  func raceprocdestroy(ctx uintptr) {
   440  	racecall(&__tsan_proc_destroy, ctx, 0, 0, 0)
   441  }
   442  
   443  //go:nosplit
   444  func racemapshadow(addr unsafe.Pointer, size uintptr) {
   445  	if racearenastart == 0 {
   446  		racearenastart = uintptr(addr)
   447  	}
   448  	if racearenaend < uintptr(addr)+size {
   449  		racearenaend = uintptr(addr) + size
   450  	}
   451  	racecall(&__tsan_map_shadow, uintptr(addr), size, 0, 0)
   452  }
   453  
   454  //go:nosplit
   455  func racemalloc(p unsafe.Pointer, sz uintptr) {
   456  	racecall(&__tsan_malloc, 0, 0, uintptr(p), sz)
   457  }
   458  
   459  //go:nosplit
   460  func racefree(p unsafe.Pointer, sz uintptr) {
   461  	racecall(&__tsan_free, uintptr(p), sz, 0, 0)
   462  }
   463  
   464  //go:nosplit
   465  func racegostart(pc uintptr) uintptr {
   466  	gp := getg()
   467  	var spawng *g
   468  	if gp.m.curg != nil {
   469  		spawng = gp.m.curg
   470  	} else {
   471  		spawng = gp
   472  	}
   473  
   474  	var racectx uintptr
   475  	racecall(&__tsan_go_start, spawng.racectx, uintptr(unsafe.Pointer(&racectx)), pc, 0)
   476  	return racectx
   477  }
   478  
   479  //go:nosplit
   480  func racegoend() {
   481  	racecall(&__tsan_go_end, getg().racectx, 0, 0, 0)
   482  }
   483  
   484  //go:nosplit
   485  func racectxend(racectx uintptr) {
   486  	racecall(&__tsan_go_end, racectx, 0, 0, 0)
   487  }
   488  
   489  //go:nosplit
   490  func racewriterangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   491  	gp := getg()
   492  	if gp != gp.m.curg {
   493  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   494  		// Not interesting.
   495  		return
   496  	}
   497  	if callpc != 0 {
   498  		racefuncenter(callpc)
   499  	}
   500  	racewriterangepc1(uintptr(addr), sz, pc)
   501  	if callpc != 0 {
   502  		racefuncexit()
   503  	}
   504  }
   505  
   506  //go:nosplit
   507  func racereadrangepc(addr unsafe.Pointer, sz, callpc, pc uintptr) {
   508  	gp := getg()
   509  	if gp != gp.m.curg {
   510  		// The call is coming from manual instrumentation of Go code running on g0/gsignal.
   511  		// Not interesting.
   512  		return
   513  	}
   514  	if callpc != 0 {
   515  		racefuncenter(callpc)
   516  	}
   517  	racereadrangepc1(uintptr(addr), sz, pc)
   518  	if callpc != 0 {
   519  		racefuncexit()
   520  	}
   521  }
   522  
   523  //go:nosplit
   524  func raceacquire(addr unsafe.Pointer) {
   525  	raceacquireg(getg(), addr)
   526  }
   527  
   528  //go:nosplit
   529  func raceacquireg(gp *g, addr unsafe.Pointer) {
   530  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   531  		return
   532  	}
   533  	racecall(&__tsan_acquire, gp.racectx, uintptr(addr), 0, 0)
   534  }
   535  
   536  //go:nosplit
   537  func raceacquirectx(racectx uintptr, addr unsafe.Pointer) {
   538  	if !isvalidaddr(addr) {
   539  		return
   540  	}
   541  	racecall(&__tsan_acquire, racectx, uintptr(addr), 0, 0)
   542  }
   543  
   544  //go:nosplit
   545  func racerelease(addr unsafe.Pointer) {
   546  	racereleaseg(getg(), addr)
   547  }
   548  
   549  //go:nosplit
   550  func racereleaseg(gp *g, addr unsafe.Pointer) {
   551  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   552  		return
   553  	}
   554  	racecall(&__tsan_release, gp.racectx, uintptr(addr), 0, 0)
   555  }
   556  
   557  //go:nosplit
   558  func racereleaseacquire(addr unsafe.Pointer) {
   559  	racereleaseacquireg(getg(), addr)
   560  }
   561  
   562  //go:nosplit
   563  func racereleaseacquireg(gp *g, addr unsafe.Pointer) {
   564  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   565  		return
   566  	}
   567  	racecall(&__tsan_release_acquire, gp.racectx, uintptr(addr), 0, 0)
   568  }
   569  
   570  //go:nosplit
   571  func racereleasemerge(addr unsafe.Pointer) {
   572  	racereleasemergeg(getg(), addr)
   573  }
   574  
   575  //go:nosplit
   576  func racereleasemergeg(gp *g, addr unsafe.Pointer) {
   577  	if getg().raceignore != 0 || !isvalidaddr(addr) {
   578  		return
   579  	}
   580  	racecall(&__tsan_release_merge, gp.racectx, uintptr(addr), 0, 0)
   581  }
   582  
   583  //go:nosplit
   584  func racefingo() {
   585  	racecall(&__tsan_finalizer_goroutine, getg().racectx, 0, 0, 0)
   586  }
   587  
   588  // The declarations below generate ABI wrappers for functions
   589  // implemented in assembly in this package but declared in another
   590  // package.
   591  
   592  //go:linkname abigen_sync_atomic_LoadInt32 sync/atomic.LoadInt32
   593  func abigen_sync_atomic_LoadInt32(addr *int32) (val int32)
   594  
   595  //go:linkname abigen_sync_atomic_LoadInt64 sync/atomic.LoadInt64
   596  func abigen_sync_atomic_LoadInt64(addr *int64) (val int64)
   597  
   598  //go:linkname abigen_sync_atomic_LoadUint32 sync/atomic.LoadUint32
   599  func abigen_sync_atomic_LoadUint32(addr *uint32) (val uint32)
   600  
   601  //go:linkname abigen_sync_atomic_LoadUint64 sync/atomic.LoadUint64
   602  func abigen_sync_atomic_LoadUint64(addr *uint64) (val uint64)
   603  
   604  //go:linkname abigen_sync_atomic_LoadUintptr sync/atomic.LoadUintptr
   605  func abigen_sync_atomic_LoadUintptr(addr *uintptr) (val uintptr)
   606  
   607  //go:linkname abigen_sync_atomic_LoadPointer sync/atomic.LoadPointer
   608  func abigen_sync_atomic_LoadPointer(addr *unsafe.Pointer) (val unsafe.Pointer)
   609  
   610  //go:linkname abigen_sync_atomic_StoreInt32 sync/atomic.StoreInt32
   611  func abigen_sync_atomic_StoreInt32(addr *int32, val int32)
   612  
   613  //go:linkname abigen_sync_atomic_StoreInt64 sync/atomic.StoreInt64
   614  func abigen_sync_atomic_StoreInt64(addr *int64, val int64)
   615  
   616  //go:linkname abigen_sync_atomic_StoreUint32 sync/atomic.StoreUint32
   617  func abigen_sync_atomic_StoreUint32(addr *uint32, val uint32)
   618  
   619  //go:linkname abigen_sync_atomic_StoreUint64 sync/atomic.StoreUint64
   620  func abigen_sync_atomic_StoreUint64(addr *uint64, val uint64)
   621  
   622  //go:linkname abigen_sync_atomic_SwapInt32 sync/atomic.SwapInt32
   623  func abigen_sync_atomic_SwapInt32(addr *int32, new int32) (old int32)
   624  
   625  //go:linkname abigen_sync_atomic_SwapInt64 sync/atomic.SwapInt64
   626  func abigen_sync_atomic_SwapInt64(addr *int64, new int64) (old int64)
   627  
   628  //go:linkname abigen_sync_atomic_SwapUint32 sync/atomic.SwapUint32
   629  func abigen_sync_atomic_SwapUint32(addr *uint32, new uint32) (old uint32)
   630  
   631  //go:linkname abigen_sync_atomic_SwapUint64 sync/atomic.SwapUint64
   632  func abigen_sync_atomic_SwapUint64(addr *uint64, new uint64) (old uint64)
   633  
   634  //go:linkname abigen_sync_atomic_AddInt32 sync/atomic.AddInt32
   635  func abigen_sync_atomic_AddInt32(addr *int32, delta int32) (new int32)
   636  
   637  //go:linkname abigen_sync_atomic_AddUint32 sync/atomic.AddUint32
   638  func abigen_sync_atomic_AddUint32(addr *uint32, delta uint32) (new uint32)
   639  
   640  //go:linkname abigen_sync_atomic_AddInt64 sync/atomic.AddInt64
   641  func abigen_sync_atomic_AddInt64(addr *int64, delta int64) (new int64)
   642  
   643  //go:linkname abigen_sync_atomic_AddUint64 sync/atomic.AddUint64
   644  func abigen_sync_atomic_AddUint64(addr *uint64, delta uint64) (new uint64)
   645  
   646  //go:linkname abigen_sync_atomic_AddUintptr sync/atomic.AddUintptr
   647  func abigen_sync_atomic_AddUintptr(addr *uintptr, delta uintptr) (new uintptr)
   648  
   649  //go:linkname abigen_sync_atomic_AndInt32 sync/atomic.AndInt32
   650  func abigen_sync_atomic_AndInt32(addr *int32, mask int32) (old int32)
   651  
   652  //go:linkname abigen_sync_atomic_AndUint32 sync/atomic.AndUint32
   653  func abigen_sync_atomic_AndUint32(addr *uint32, mask uint32) (old uint32)
   654  
   655  //go:linkname abigen_sync_atomic_AndInt64 sync/atomic.AndInt64
   656  func abigen_sync_atomic_AndInt64(addr *int64, mask int64) (old int64)
   657  
   658  //go:linkname abigen_sync_atomic_AndUint64 sync/atomic.AndUint64
   659  func abigen_sync_atomic_AndUint64(addr *uint64, mask uint64) (old uint64)
   660  
   661  //go:linkname abigen_sync_atomic_AndUintptr sync/atomic.AndUintptr
   662  func abigen_sync_atomic_AndUintptr(addr *uintptr, mask uintptr) (old uintptr)
   663  
   664  //go:linkname abigen_sync_atomic_OrInt32 sync/atomic.OrInt32
   665  func abigen_sync_atomic_OrInt32(addr *int32, mask int32) (old int32)
   666  
   667  //go:linkname abigen_sync_atomic_OrUint32 sync/atomic.OrUint32
   668  func abigen_sync_atomic_OrUint32(addr *uint32, mask uint32) (old uint32)
   669  
   670  //go:linkname abigen_sync_atomic_OrInt64 sync/atomic.OrInt64
   671  func abigen_sync_atomic_OrInt64(addr *int64, mask int64) (old int64)
   672  
   673  //go:linkname abigen_sync_atomic_OrUint64 sync/atomic.OrUint64
   674  func abigen_sync_atomic_OrUint64(addr *uint64, mask uint64) (old uint64)
   675  
   676  //go:linkname abigen_sync_atomic_OrUintptr sync/atomic.OrUintptr
   677  func abigen_sync_atomic_OrUintptr(addr *uintptr, mask uintptr) (old uintptr)
   678  
   679  //go:linkname abigen_sync_atomic_CompareAndSwapInt32 sync/atomic.CompareAndSwapInt32
   680  func abigen_sync_atomic_CompareAndSwapInt32(addr *int32, old, new int32) (swapped bool)
   681  
   682  //go:linkname abigen_sync_atomic_CompareAndSwapInt64 sync/atomic.CompareAndSwapInt64
   683  func abigen_sync_atomic_CompareAndSwapInt64(addr *int64, old, new int64) (swapped bool)
   684  
   685  //go:linkname abigen_sync_atomic_CompareAndSwapUint32 sync/atomic.CompareAndSwapUint32
   686  func abigen_sync_atomic_CompareAndSwapUint32(addr *uint32, old, new uint32) (swapped bool)
   687  
   688  //go:linkname abigen_sync_atomic_CompareAndSwapUint64 sync/atomic.CompareAndSwapUint64
   689  func abigen_sync_atomic_CompareAndSwapUint64(addr *uint64, old, new uint64) (swapped bool)
   690  

View as plain text