...

Text file src/internal/runtime/atomic/atomic_386.s

Documentation: internal/runtime/atomic

     1// Copyright 2015 The Go Authors. All rights reserved.
     2// Use of this source code is governed by a BSD-style
     3// license that can be found in the LICENSE file.
     4
     5#include "textflag.h"
     6#include "funcdata.h"
     7
     8// bool Cas(int32 *val, int32 old, int32 new)
     9// Atomically:
    10//	if(*val == old){
    11//		*val = new;
    12//		return 1;
    13//	}else
    14//		return 0;
    15TEXT ·Cas(SB), NOSPLIT, $0-13
    16	MOVL	ptr+0(FP), BX
    17	MOVL	old+4(FP), AX
    18	MOVL	new+8(FP), CX
    19	LOCK
    20	CMPXCHGL	CX, 0(BX)
    21	SETEQ	ret+12(FP)
    22	RET
    23
    24TEXT ·Casint32(SB), NOSPLIT, $0-13
    25	JMP	·Cas(SB)
    26
    27TEXT ·Casint64(SB), NOSPLIT, $0-21
    28	JMP	·Cas64(SB)
    29
    30TEXT ·Casuintptr(SB), NOSPLIT, $0-13
    31	JMP	·Cas(SB)
    32
    33TEXT ·CasRel(SB), NOSPLIT, $0-13
    34	JMP	·Cas(SB)
    35
    36TEXT ·Loaduintptr(SB), NOSPLIT, $0-8
    37	JMP	·Load(SB)
    38
    39TEXT ·Loaduint(SB), NOSPLIT, $0-8
    40	JMP	·Load(SB)
    41
    42TEXT ·Storeint32(SB), NOSPLIT, $0-8
    43	JMP	·Store(SB)
    44
    45TEXT ·Storeint64(SB), NOSPLIT, $0-12
    46	JMP	·Store64(SB)
    47
    48TEXT ·Storeuintptr(SB), NOSPLIT, $0-8
    49	JMP	·Store(SB)
    50
    51TEXT ·Xadduintptr(SB), NOSPLIT, $0-12
    52	JMP	·Xadd(SB)
    53
    54TEXT ·Loadint32(SB), NOSPLIT, $0-8
    55	JMP	·Load(SB)
    56
    57TEXT ·Loadint64(SB), NOSPLIT, $0-12
    58	JMP	·Load64(SB)
    59
    60TEXT ·Xaddint32(SB), NOSPLIT, $0-12
    61	JMP	·Xadd(SB)
    62
    63TEXT ·Xaddint64(SB), NOSPLIT, $0-20
    64	JMP	·Xadd64(SB)
    65
    66// bool ·Cas64(uint64 *val, uint64 old, uint64 new)
    67// Atomically:
    68//	if(*val == old){
    69//		*val = new;
    70//		return 1;
    71//	} else {
    72//		return 0;
    73//	}
    74TEXT ·Cas64(SB), NOSPLIT, $0-21
    75	NO_LOCAL_POINTERS
    76	MOVL	ptr+0(FP), BP
    77	TESTL	$7, BP
    78	JZ	2(PC)
    79	CALL	·panicUnaligned(SB)
    80	MOVL	old_lo+4(FP), AX
    81	MOVL	old_hi+8(FP), DX
    82	MOVL	new_lo+12(FP), BX
    83	MOVL	new_hi+16(FP), CX
    84	LOCK
    85	CMPXCHG8B	0(BP)
    86	SETEQ	ret+20(FP)
    87	RET
    88
    89// bool Casp1(void **p, void *old, void *new)
    90// Atomically:
    91//	if(*p == old){
    92//		*p = new;
    93//		return 1;
    94//	}else
    95//		return 0;
    96TEXT ·Casp1(SB), NOSPLIT, $0-13
    97	MOVL	ptr+0(FP), BX
    98	MOVL	old+4(FP), AX
    99	MOVL	new+8(FP), CX
   100	LOCK
   101	CMPXCHGL	CX, 0(BX)
   102	SETEQ	ret+12(FP)
   103	RET
   104
   105// uint32 Xadd(uint32 volatile *val, int32 delta)
   106// Atomically:
   107//	*val += delta;
   108//	return *val;
   109TEXT ·Xadd(SB), NOSPLIT, $0-12
   110	MOVL	ptr+0(FP), BX
   111	MOVL	delta+4(FP), AX
   112	MOVL	AX, CX
   113	LOCK
   114	XADDL	AX, 0(BX)
   115	ADDL	CX, AX
   116	MOVL	AX, ret+8(FP)
   117	RET
   118
   119TEXT ·Xadd64(SB), NOSPLIT, $0-20
   120	NO_LOCAL_POINTERS
   121	// no XADDQ so use CMPXCHG8B loop
   122	MOVL	ptr+0(FP), BP
   123	TESTL	$7, BP
   124	JZ	2(PC)
   125	CALL	·panicUnaligned(SB)
   126	// DI:SI = delta
   127	MOVL	delta_lo+4(FP), SI
   128	MOVL	delta_hi+8(FP), DI
   129	// DX:AX = *addr
   130	MOVL	0(BP), AX
   131	MOVL	4(BP), DX
   132addloop:
   133	// CX:BX = DX:AX (*addr) + DI:SI (delta)
   134	MOVL	AX, BX
   135	MOVL	DX, CX
   136	ADDL	SI, BX
   137	ADCL	DI, CX
   138
   139	// if *addr == DX:AX {
   140	//	*addr = CX:BX
   141	// } else {
   142	//	DX:AX = *addr
   143	// }
   144	// all in one instruction
   145	LOCK
   146	CMPXCHG8B	0(BP)
   147
   148	JNZ	addloop
   149
   150	// success
   151	// return CX:BX
   152	MOVL	BX, ret_lo+12(FP)
   153	MOVL	CX, ret_hi+16(FP)
   154	RET
   155
   156// uint8 Xchg8(uint8 *ptr, uint8 new)
   157TEXT ·Xchg8(SB), NOSPLIT, $0-9
   158	MOVL	ptr+0(FP), BX
   159	MOVB	new+4(FP), AX
   160	XCHGB	AX, 0(BX)
   161	MOVB	AX, ret+8(FP)
   162	RET
   163
   164TEXT ·Xchg(SB), NOSPLIT, $0-12
   165	MOVL	ptr+0(FP), BX
   166	MOVL	new+4(FP), AX
   167	XCHGL	AX, 0(BX)
   168	MOVL	AX, ret+8(FP)
   169	RET
   170
   171TEXT ·Xchgint32(SB), NOSPLIT, $0-12
   172	JMP	·Xchg(SB)
   173
   174TEXT ·Xchgint64(SB), NOSPLIT, $0-20
   175	JMP	·Xchg64(SB)
   176
   177TEXT ·Xchguintptr(SB), NOSPLIT, $0-12
   178	JMP	·Xchg(SB)
   179
   180TEXT ·Xchg64(SB),NOSPLIT,$0-20
   181	NO_LOCAL_POINTERS
   182	// no XCHGQ so use CMPXCHG8B loop
   183	MOVL	ptr+0(FP), BP
   184	TESTL	$7, BP
   185	JZ	2(PC)
   186	CALL	·panicUnaligned(SB)
   187	// CX:BX = new
   188	MOVL	new_lo+4(FP), BX
   189	MOVL	new_hi+8(FP), CX
   190	// DX:AX = *addr
   191	MOVL	0(BP), AX
   192	MOVL	4(BP), DX
   193swaploop:
   194	// if *addr == DX:AX
   195	//	*addr = CX:BX
   196	// else
   197	//	DX:AX = *addr
   198	// all in one instruction
   199	LOCK
   200	CMPXCHG8B	0(BP)
   201	JNZ	swaploop
   202
   203	// success
   204	// return DX:AX
   205	MOVL	AX, ret_lo+12(FP)
   206	MOVL	DX, ret_hi+16(FP)
   207	RET
   208
   209TEXT ·StorepNoWB(SB), NOSPLIT, $0-8
   210	MOVL	ptr+0(FP), BX
   211	MOVL	val+4(FP), AX
   212	XCHGL	AX, 0(BX)
   213	RET
   214
   215TEXT ·Store(SB), NOSPLIT, $0-8
   216	MOVL	ptr+0(FP), BX
   217	MOVL	val+4(FP), AX
   218	XCHGL	AX, 0(BX)
   219	RET
   220
   221TEXT ·StoreRel(SB), NOSPLIT, $0-8
   222	JMP	·Store(SB)
   223
   224TEXT ·StoreReluintptr(SB), NOSPLIT, $0-8
   225	JMP	·Store(SB)
   226
   227// uint64 atomicload64(uint64 volatile* addr);
   228TEXT ·Load64(SB), NOSPLIT, $0-12
   229	NO_LOCAL_POINTERS
   230	MOVL	ptr+0(FP), AX
   231	TESTL	$7, AX
   232	JZ	2(PC)
   233	CALL	·panicUnaligned(SB)
   234	MOVQ	(AX), M0
   235	MOVQ	M0, ret+4(FP)
   236	EMMS
   237	RET
   238
   239// void ·Store64(uint64 volatile* addr, uint64 v);
   240TEXT ·Store64(SB), NOSPLIT, $0-12
   241	NO_LOCAL_POINTERS
   242	MOVL	ptr+0(FP), AX
   243	TESTL	$7, AX
   244	JZ	2(PC)
   245	CALL	·panicUnaligned(SB)
   246	// MOVQ and EMMS were introduced on the Pentium MMX.
   247	MOVQ	val+4(FP), M0
   248	MOVQ	M0, (AX)
   249	EMMS
   250	// This is essentially a no-op, but it provides required memory fencing.
   251	// It can be replaced with MFENCE, but MFENCE was introduced only on the Pentium4 (SSE2).
   252	XORL	AX, AX
   253	LOCK
   254	XADDL	AX, (SP)
   255	RET
   256
   257// void	·Or8(byte volatile*, byte);
   258TEXT ·Or8(SB), NOSPLIT, $0-5
   259	MOVL	ptr+0(FP), AX
   260	MOVB	val+4(FP), BX
   261	LOCK
   262	ORB	BX, (AX)
   263	RET
   264
   265// void	·And8(byte volatile*, byte);
   266TEXT ·And8(SB), NOSPLIT, $0-5
   267	MOVL	ptr+0(FP), AX
   268	MOVB	val+4(FP), BX
   269	LOCK
   270	ANDB	BX, (AX)
   271	RET
   272
   273TEXT ·Store8(SB), NOSPLIT, $0-5
   274	MOVL	ptr+0(FP), BX
   275	MOVB	val+4(FP), AX
   276	XCHGB	AX, 0(BX)
   277	RET
   278
   279// func Or(addr *uint32, v uint32)
   280TEXT ·Or(SB), NOSPLIT, $0-8
   281	MOVL	ptr+0(FP), AX
   282	MOVL	val+4(FP), BX
   283	LOCK
   284	ORL	BX, (AX)
   285	RET
   286
   287// func And(addr *uint32, v uint32)
   288TEXT ·And(SB), NOSPLIT, $0-8
   289	MOVL	ptr+0(FP), AX
   290	MOVL	val+4(FP), BX
   291	LOCK
   292	ANDL	BX, (AX)
   293	RET
   294
   295// func And32(addr *uint32, v uint32) old uint32
   296TEXT ·And32(SB), NOSPLIT, $0-12
   297	MOVL	ptr+0(FP), BX
   298	MOVL	val+4(FP), CX
   299casloop:
   300	MOVL 	CX, DX
   301	MOVL	(BX), AX
   302	ANDL	AX, DX
   303	LOCK
   304	CMPXCHGL	DX, (BX)
   305	JNZ casloop
   306	MOVL 	AX, ret+8(FP)
   307	RET
   308
   309// func Or32(addr *uint32, v uint32) old uint32
   310TEXT ·Or32(SB), NOSPLIT, $0-12
   311	MOVL	ptr+0(FP), BX
   312	MOVL	val+4(FP), CX
   313casloop:
   314	MOVL 	CX, DX
   315	MOVL	(BX), AX
   316	ORL	AX, DX
   317	LOCK
   318	CMPXCHGL	DX, (BX)
   319	JNZ casloop
   320	MOVL 	AX, ret+8(FP)
   321	RET
   322
   323// func And64(addr *uint64, v uint64) old uint64
   324TEXT ·And64(SB), NOSPLIT, $0-20
   325	MOVL	ptr+0(FP), BP
   326	// DI:SI = v
   327	MOVL	val_lo+4(FP), SI
   328	MOVL	val_hi+8(FP), DI
   329	// DX:AX = *addr
   330	MOVL	0(BP), AX
   331	MOVL	4(BP), DX
   332casloop:
   333	// CX:BX = DX:AX (*addr) & DI:SI (mask)
   334	MOVL	AX, BX
   335	MOVL	DX, CX
   336	ANDL	SI, BX
   337	ANDL	DI, CX
   338	LOCK
   339	CMPXCHG8B	0(BP)
   340	JNZ casloop
   341	MOVL	AX, ret_lo+12(FP)
   342	MOVL	DX, ret_hi+16(FP)
   343	RET
   344
   345
   346// func Or64(addr *uint64, v uint64) old uint64
   347TEXT ·Or64(SB), NOSPLIT, $0-20
   348	MOVL	ptr+0(FP), BP
   349	// DI:SI = v
   350	MOVL	val_lo+4(FP), SI
   351	MOVL	val_hi+8(FP), DI
   352	// DX:AX = *addr
   353	MOVL	0(BP), AX
   354	MOVL	4(BP), DX
   355casloop:
   356	// CX:BX = DX:AX (*addr) | DI:SI (mask)
   357	MOVL	AX, BX
   358	MOVL	DX, CX
   359	ORL	SI, BX
   360	ORL	DI, CX
   361	LOCK
   362	CMPXCHG8B	0(BP)
   363	JNZ casloop
   364	MOVL	AX, ret_lo+12(FP)
   365	MOVL	DX, ret_hi+16(FP)
   366	RET
   367
   368// func Anduintptr(addr *uintptr, v uintptr) old uintptr
   369TEXT ·Anduintptr(SB), NOSPLIT, $0-12
   370	JMP	·And32(SB)
   371
   372// func Oruintptr(addr *uintptr, v uintptr) old uintptr
   373TEXT ·Oruintptr(SB), NOSPLIT, $0-12
   374	JMP	·Or32(SB)

View as plain text