1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5#include "textflag.h"
6
7// func Store(ptr *uint32, val uint32)
8TEXT ·Store(SB), NOSPLIT, $0
9 MOVD ptr+0(FP), R2
10 MOVWZ val+8(FP), R3
11 MOVW R3, 0(R2)
12 SYNC
13 RET
14
15// func Store8(ptr *uint8, val uint8)
16TEXT ·Store8(SB), NOSPLIT, $0
17 MOVD ptr+0(FP), R2
18 MOVB val+8(FP), R3
19 MOVB R3, 0(R2)
20 SYNC
21 RET
22
23// func Store64(ptr *uint64, val uint64)
24TEXT ·Store64(SB), NOSPLIT, $0
25 MOVD ptr+0(FP), R2
26 MOVD val+8(FP), R3
27 MOVD R3, 0(R2)
28 SYNC
29 RET
30
31// func StorepNoWB(ptr unsafe.Pointer, val unsafe.Pointer)
32TEXT ·StorepNoWB(SB), NOSPLIT, $0
33 MOVD ptr+0(FP), R2
34 MOVD val+8(FP), R3
35 MOVD R3, 0(R2)
36 SYNC
37 RET
38
39// func Cas(ptr *uint32, old, new uint32) bool
40// Atomically:
41// if *ptr == old {
42// *val = new
43// return 1
44// } else {
45// return 0
46// }
47TEXT ·Cas(SB), NOSPLIT, $0-17
48 MOVD ptr+0(FP), R3
49 MOVWZ old+8(FP), R4
50 MOVWZ new+12(FP), R5
51 CS R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5
52 BNE cas_fail
53 MOVB $1, ret+16(FP)
54 RET
55cas_fail:
56 MOVB $0, ret+16(FP)
57 RET
58
59// func Cas64(ptr *uint64, old, new uint64) bool
60// Atomically:
61// if *ptr == old {
62// *ptr = new
63// return 1
64// } else {
65// return 0
66// }
67TEXT ·Cas64(SB), NOSPLIT, $0-25
68 MOVD ptr+0(FP), R3
69 MOVD old+8(FP), R4
70 MOVD new+16(FP), R5
71 CSG R4, R5, 0(R3) // if (R4 == 0(R3)) then 0(R3)= R5
72 BNE cas64_fail
73 MOVB $1, ret+24(FP)
74 RET
75cas64_fail:
76 MOVB $0, ret+24(FP)
77 RET
78
79// func Casint32(ptr *int32, old, new int32) bool
80TEXT ·Casint32(SB), NOSPLIT, $0-17
81 BR ·Cas(SB)
82
83// func Casint64(ptr *int64, old, new int64) bool
84TEXT ·Casint64(SB), NOSPLIT, $0-25
85 BR ·Cas64(SB)
86
87// func Casuintptr(ptr *uintptr, old, new uintptr) bool
88TEXT ·Casuintptr(SB), NOSPLIT, $0-25
89 BR ·Cas64(SB)
90
91// func CasRel(ptr *uint32, old, new uint32) bool
92TEXT ·CasRel(SB), NOSPLIT, $0-17
93 BR ·Cas(SB)
94
95// func Loaduintptr(ptr *uintptr) uintptr
96TEXT ·Loaduintptr(SB), NOSPLIT, $0-16
97 BR ·Load64(SB)
98
99// func Loaduint(ptr *uint) uint
100TEXT ·Loaduint(SB), NOSPLIT, $0-16
101 BR ·Load64(SB)
102
103// func Storeint32(ptr *int32, new int32)
104TEXT ·Storeint32(SB), NOSPLIT, $0-12
105 BR ·Store(SB)
106
107// func Storeint64(ptr *int64, new int64)
108TEXT ·Storeint64(SB), NOSPLIT, $0-16
109 BR ·Store64(SB)
110
111// func Storeuintptr(ptr *uintptr, new uintptr)
112TEXT ·Storeuintptr(SB), NOSPLIT, $0-16
113 BR ·Store64(SB)
114
115// func Loadint32(ptr *int32) int32
116TEXT ·Loadint32(SB), NOSPLIT, $0-12
117 BR ·Load(SB)
118
119// func Loadint64(ptr *int64) int64
120TEXT ·Loadint64(SB), NOSPLIT, $0-16
121 BR ·Load64(SB)
122
123// func Xadduintptr(ptr *uintptr, delta uintptr) uintptr
124TEXT ·Xadduintptr(SB), NOSPLIT, $0-24
125 BR ·Xadd64(SB)
126
127// func Xaddint32(ptr *int32, delta int32) int32
128TEXT ·Xaddint32(SB), NOSPLIT, $0-20
129 BR ·Xadd(SB)
130
131// func Xaddint64(ptr *int64, delta int64) int64
132TEXT ·Xaddint64(SB), NOSPLIT, $0-24
133 BR ·Xadd64(SB)
134
135// func Casp1(ptr *unsafe.Pointer, old, new unsafe.Pointer) bool
136// Atomically:
137// if *ptr == old {
138// *ptr = new
139// return 1
140// } else {
141// return 0
142// }
143TEXT ·Casp1(SB), NOSPLIT, $0-25
144 BR ·Cas64(SB)
145
146// func Xadd(ptr *uint32, delta int32) uint32
147// Atomically:
148// *ptr += delta
149// return *ptr
150TEXT ·Xadd(SB), NOSPLIT, $0-20
151 MOVD ptr+0(FP), R4
152 MOVW delta+8(FP), R5
153 MOVW (R4), R3
154repeat:
155 ADD R5, R3, R6
156 CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
157 BNE repeat
158 MOVW R6, ret+16(FP)
159 RET
160
161// func Xadd64(ptr *uint64, delta int64) uint64
162TEXT ·Xadd64(SB), NOSPLIT, $0-24
163 MOVD ptr+0(FP), R4
164 MOVD delta+8(FP), R5
165 MOVD (R4), R3
166repeat:
167 ADD R5, R3, R6
168 CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
169 BNE repeat
170 MOVD R6, ret+16(FP)
171 RET
172
173// func Xchg(ptr *uint32, new uint32) uint32
174TEXT ·Xchg(SB), NOSPLIT, $0-20
175 MOVD ptr+0(FP), R4
176 MOVW new+8(FP), R3
177 MOVW (R4), R6
178repeat:
179 CS R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4)
180 BNE repeat
181 MOVW R6, ret+16(FP)
182 RET
183
184// func Xchg64(ptr *uint64, new uint64) uint64
185TEXT ·Xchg64(SB), NOSPLIT, $0-24
186 MOVD ptr+0(FP), R4
187 MOVD new+8(FP), R3
188 MOVD (R4), R6
189repeat:
190 CSG R6, R3, (R4) // if R6==(R4) then (R4)=R3 else R6=(R4)
191 BNE repeat
192 MOVD R6, ret+16(FP)
193 RET
194
195// func Xchgint32(ptr *int32, new int32) int32
196TEXT ·Xchgint32(SB), NOSPLIT, $0-20
197 BR ·Xchg(SB)
198
199// func Xchgint64(ptr *int64, new int64) int64
200TEXT ·Xchgint64(SB), NOSPLIT, $0-24
201 BR ·Xchg64(SB)
202
203// func Xchguintptr(ptr *uintptr, new uintptr) uintptr
204TEXT ·Xchguintptr(SB), NOSPLIT, $0-24
205 BR ·Xchg64(SB)
206
207// func Or8(addr *uint8, v uint8)
208TEXT ·Or8(SB), NOSPLIT, $0-9
209 MOVD ptr+0(FP), R3
210 MOVBZ val+8(FP), R4
211 // We don't have atomic operations that work on individual bytes so we
212 // need to align addr down to a word boundary and create a mask
213 // containing v to OR with the entire word atomically.
214 MOVD $(3<<3), R5
215 RXSBG $59, $60, $3, R3, R5 // R5 = 24 - ((addr % 4) * 8) = ((addr & 3) << 3) ^ (3 << 3)
216 ANDW $~3, R3 // R3 = floor(addr, 4) = addr &^ 3
217 SLW R5, R4 // R4 = uint32(v) << R5
218 LAO R4, R6, 0(R3) // R6 = *R3; *R3 |= R4; (atomic)
219 RET
220
221// func And8(addr *uint8, v uint8)
222TEXT ·And8(SB), NOSPLIT, $0-9
223 MOVD ptr+0(FP), R3
224 MOVBZ val+8(FP), R4
225 // We don't have atomic operations that work on individual bytes so we
226 // need to align addr down to a word boundary and create a mask
227 // containing v to AND with the entire word atomically.
228 ORW $~0xff, R4 // R4 = uint32(v) | 0xffffff00
229 MOVD $(3<<3), R5
230 RXSBG $59, $60, $3, R3, R5 // R5 = 24 - ((addr % 4) * 8) = ((addr & 3) << 3) ^ (3 << 3)
231 ANDW $~3, R3 // R3 = floor(addr, 4) = addr &^ 3
232 RLL R5, R4, R4 // R4 = rotl(R4, R5)
233 LAN R4, R6, 0(R3) // R6 = *R3; *R3 &= R4; (atomic)
234 RET
235
236// func Or(addr *uint32, v uint32)
237TEXT ·Or(SB), NOSPLIT, $0-12
238 MOVD ptr+0(FP), R3
239 MOVW val+8(FP), R4
240 LAO R4, R6, 0(R3) // R6 = *R3; *R3 |= R4; (atomic)
241 RET
242
243// func And(addr *uint32, v uint32)
244TEXT ·And(SB), NOSPLIT, $0-12
245 MOVD ptr+0(FP), R3
246 MOVW val+8(FP), R4
247 LAN R4, R6, 0(R3) // R6 = *R3; *R3 &= R4; (atomic)
248 RET
249
250// func Or32(addr *uint32, v uint32) old uint32
251TEXT ·Or32(SB), NOSPLIT, $0-20
252 MOVD ptr+0(FP), R4
253 MOVW val+8(FP), R5
254 MOVW (R4), R3
255repeat:
256 OR R5, R3, R6
257 CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
258 BNE repeat
259 MOVW R3, ret+16(FP)
260 RET
261
262// func And32(addr *uint32, v uint32) old uint32
263TEXT ·And32(SB), NOSPLIT, $0-20
264 MOVD ptr+0(FP), R4
265 MOVW val+8(FP), R5
266 MOVW (R4), R3
267repeat:
268 AND R5, R3, R6
269 CS R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
270 BNE repeat
271 MOVW R3, ret+16(FP)
272 RET
273
274// func Or64(addr *uint64, v uint64) old uint64
275TEXT ·Or64(SB), NOSPLIT, $0-24
276 MOVD ptr+0(FP), R4
277 MOVD val+8(FP), R5
278 MOVD (R4), R3
279repeat:
280 OR R5, R3, R6
281 CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
282 BNE repeat
283 MOVD R3, ret+16(FP)
284 RET
285
286// func And64(addr *uint64, v uint64) old uint64
287TEXT ·And64(SB), NOSPLIT, $0-24
288 MOVD ptr+0(FP), R4
289 MOVD val+8(FP), R5
290 MOVD (R4), R3
291repeat:
292 AND R5, R3, R6
293 CSG R3, R6, (R4) // if R3==(R4) then (R4)=R6 else R3=(R4)
294 BNE repeat
295 MOVD R3, ret+16(FP)
296 RET
297
298// func Anduintptr(addr *uintptr, v uintptr) old uintptr
299TEXT ·Anduintptr(SB), NOSPLIT, $0-24
300 BR ·And64(SB)
301
302// func Oruintptr(addr *uintptr, v uintptr) old uintptr
303TEXT ·Oruintptr(SB), NOSPLIT, $0-24
304 BR ·Or64(SB)
View as plain text