1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5(Add(Ptr|32|16|8) ...) => (ADD ...)
6(Add(32|64)F ...) => (ADD(F|D) ...)
7
8(Select0 (Add32carry <t> x y)) => (ADD <t.FieldType(0)> x y)
9(Select1 (Add32carry <t> x y)) => (SGTU <typ.Bool> x (ADD <t.FieldType(0)> x y))
10(Add32withcarry <t> x y c) => (ADD c (ADD <t> x y))
11
12(Sub(Ptr|32|16|8) ...) => (SUB ...)
13(Sub(32|64)F ...) => (SUB(F|D) ...)
14
15(Select0 (Sub32carry <t> x y)) => (SUB <t.FieldType(0)> x y)
16(Select1 (Sub32carry <t> x y)) => (SGTU <typ.Bool> (SUB <t.FieldType(0)> x y) x)
17(Sub32withcarry <t> x y c) => (SUB (SUB <t> x y) c)
18
19(Mul(32|16|8) ...) => (MUL ...)
20(Mul(32|64)F ...) => (MUL(F|D) ...)
21
22(Hmul(32|32u) x y) => (Select0 (MUL(T|TU) x y))
23(Mul32uhilo ...) => (MULTU ...)
24
25(Div32 x y) => (Select1 (DIV x y))
26(Div32u x y) => (Select1 (DIVU x y))
27(Div16 x y) => (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
28(Div16u x y) => (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
29(Div8 x y) => (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
30(Div8u x y) => (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
31(Div(32|64)F ...) => (DIV(F|D) ...)
32
33(Mod32 x y) => (Select0 (DIV x y))
34(Mod32u x y) => (Select0 (DIVU x y))
35(Mod16 x y) => (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
36(Mod16u x y) => (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
37(Mod8 x y) => (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
38(Mod8u x y) => (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
39
40// math package intrinsics
41(Abs ...) => (ABSD ...)
42
43// (x + y) / 2 with x>=y becomes (x - y) / 2 + y
44(Avg32u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
45
46(And(32|16|8) ...) => (AND ...)
47(Or(32|16|8) ...) => (OR ...)
48(Xor(32|16|8) ...) => (XOR ...)
49
50// constant shifts
51// generic opt rewrites all constant shifts to shift by Const64
52(Lsh32x64 x (Const64 [c])) && uint32(c) < 32 => (SLLconst x [int32(c)])
53(Rsh32x64 x (Const64 [c])) && uint32(c) < 32 => (SRAconst x [int32(c)])
54(Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 => (SRLconst x [int32(c)])
55(Lsh16x64 x (Const64 [c])) && uint32(c) < 16 => (SLLconst x [int32(c)])
56(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
57(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 => (SRLconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
58(Lsh8x64 x (Const64 [c])) && uint32(c) < 8 => (SLLconst x [int32(c)])
59(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
60(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 => (SRLconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
61
62// large constant shifts
63(Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 => (MOVWconst [0])
64(Rsh32Ux64 _ (Const64 [c])) && uint32(c) >= 32 => (MOVWconst [0])
65(Lsh16x64 _ (Const64 [c])) && uint32(c) >= 16 => (MOVWconst [0])
66(Rsh16Ux64 _ (Const64 [c])) && uint32(c) >= 16 => (MOVWconst [0])
67(Lsh8x64 _ (Const64 [c])) && uint32(c) >= 8 => (MOVWconst [0])
68(Rsh8Ux64 _ (Const64 [c])) && uint32(c) >= 8 => (MOVWconst [0])
69
70// large constant signed right shift, we leave the sign bit
71(Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 => (SRAconst x [31])
72(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
73(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
74
75// shifts
76// hardware instruction uses only the low 5 bits of the shift
77// we compare to 32 to ensure Go semantics for large shifts
78(Lsh32x32 <t> x y) => (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
79(Lsh32x16 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
80(Lsh32x8 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
81
82(Lsh16x32 <t> x y) => (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
83(Lsh16x16 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
84(Lsh16x8 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
85
86(Lsh8x32 <t> x y) => (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
87(Lsh8x16 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
88(Lsh8x8 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
89
90(Rsh32Ux32 <t> x y) => (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
91(Rsh32Ux16 <t> x y) => (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
92(Rsh32Ux8 <t> x y) => (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
93
94(Rsh16Ux32 <t> x y) => (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
95(Rsh16Ux16 <t> x y) => (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
96(Rsh16Ux8 <t> x y) => (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
97
98(Rsh8Ux32 <t> x y) => (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
99(Rsh8Ux16 <t> x y) => (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
100(Rsh8Ux8 <t> x y) => (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
101
102(Rsh32x32 x y) => (SRA x ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
103(Rsh32x16 x y) => (SRA x ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
104(Rsh32x8 x y) => (SRA x ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
105
106(Rsh16x32 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
107(Rsh16x16 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
108(Rsh16x8 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
109
110(Rsh8x32 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
111(Rsh8x16 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
112(Rsh8x8 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
113
114// rotates
115(RotateLeft8 <t> x (MOVWconst [c])) => (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
116(RotateLeft16 <t> x (MOVWconst [c])) => (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
117(RotateLeft32 <t> x (MOVWconst [c])) => (Or32 (Lsh32x32 <t> x (MOVWconst [c&31])) (Rsh32Ux32 <t> x (MOVWconst [-c&31])))
118(RotateLeft64 <t> x (MOVWconst [c])) => (Or64 (Lsh64x32 <t> x (MOVWconst [c&63])) (Rsh64Ux32 <t> x (MOVWconst [-c&63])))
119
120// unary ops
121(Neg(32|16|8) ...) => (NEG ...)
122(Neg(32|64)F ...) => (NEG(F|D) ...)
123
124(Com(32|16|8) x) => (NORconst [0] x)
125
126(Sqrt ...) => (SQRTD ...)
127(Sqrt32 ...) => (SQRTF ...)
128
129// TODO: optimize this case?
130(Ctz32NonZero ...) => (Ctz32 ...)
131
132// count trailing zero
133// 32 - CLZ(x&-x - 1)
134(Ctz32 <t> x) => (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
135
136// bit length
137(BitLen32 <t> x) => (SUB (MOVWconst [32]) (CLZ <t> x))
138
139// boolean ops -- booleans are represented with 0=false, 1=true
140(AndB ...) => (AND ...)
141(OrB ...) => (OR ...)
142(EqB x y) => (XORconst [1] (XOR <typ.Bool> x y))
143(NeqB ...) => (XOR ...)
144(Not x) => (XORconst [1] x)
145
146// constants
147(Const(32|16|8) [val]) => (MOVWconst [int32(val)])
148(Const(32|64)F ...) => (MOV(F|D)const ...)
149(ConstNil) => (MOVWconst [0])
150(ConstBool [t]) => (MOVWconst [b2i32(t)])
151
152// truncations
153// Because we ignore high parts of registers, truncates are just copies.
154(Trunc16to8 ...) => (Copy ...)
155(Trunc32to8 ...) => (Copy ...)
156(Trunc32to16 ...) => (Copy ...)
157
158// Zero-/Sign-extensions
159(ZeroExt8to16 ...) => (MOVBUreg ...)
160(ZeroExt8to32 ...) => (MOVBUreg ...)
161(ZeroExt16to32 ...) => (MOVHUreg ...)
162
163(SignExt8to16 ...) => (MOVBreg ...)
164(SignExt8to32 ...) => (MOVBreg ...)
165(SignExt16to32 ...) => (MOVHreg ...)
166
167(Signmask x) => (SRAconst x [31])
168(Zeromask x) => (NEG (SGTU x (MOVWconst [0])))
169(Slicemask <t> x) => (SRAconst (NEG <t> x) [31])
170
171// float-int conversion
172(Cvt32to(32|64)F ...) => (MOVW(F|D) ...)
173(Cvt(32|64)Fto32 ...) => (TRUNC(F|D)W ...)
174(Cvt32Fto64F ...) => (MOVFD ...)
175(Cvt64Fto32F ...) => (MOVDF ...)
176
177(CvtBoolToUint8 ...) => (Copy ...)
178
179(Round(32|64)F ...) => (Copy ...)
180
181// comparisons
182(Eq8 x y) => (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
183(Eq16 x y) => (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
184(Eq32 x y) => (SGTUconst [1] (XOR x y))
185(EqPtr x y) => (SGTUconst [1] (XOR x y))
186(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
187
188(Neq8 x y) => (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
189(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
190(Neq32 x y) => (SGTU (XOR x y) (MOVWconst [0]))
191(NeqPtr x y) => (SGTU (XOR x y) (MOVWconst [0]))
192(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
193
194(Less8 x y) => (SGT (SignExt8to32 y) (SignExt8to32 x))
195(Less16 x y) => (SGT (SignExt16to32 y) (SignExt16to32 x))
196(Less32 x y) => (SGT y x)
197(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
198
199(Less8U x y) => (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
200(Less16U x y) => (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
201(Less32U x y) => (SGTU y x)
202
203(Leq8 x y) => (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
204(Leq16 x y) => (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
205(Leq32 x y) => (XORconst [1] (SGT x y))
206(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
207
208(Leq8U x y) => (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
209(Leq16U x y) => (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
210(Leq32U x y) => (XORconst [1] (SGTU x y))
211
212(OffPtr [off] ptr:(SP)) => (MOVWaddr [int32(off)] ptr)
213(OffPtr [off] ptr) => (ADDconst [int32(off)] ptr)
214
215(Addr {sym} base) => (MOVWaddr {sym} base)
216(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVWaddr {sym} (SPanchored base mem))
217(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVWaddr {sym} base)
218
219// loads
220(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
221(Load <t> ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
222(Load <t> ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
223(Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
224(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
225(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVWload ptr mem)
226(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
227(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
228
229// stores
230(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
231(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
232(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
233(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVFstore ptr val mem)
234(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVDstore ptr val mem)
235
236// float <=> int register moves, with no conversion.
237// These come up when compiling math.{Float32bits, Float32frombits}.
238(MOVWload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) => (MOVWfpgp val)
239(MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (MOVWgpfp val)
240
241// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
242(MOVWstore [off] {sym} ptr (MOVWfpgp val) mem) => (MOVFstore [off] {sym} ptr val mem)
243(MOVFstore [off] {sym} ptr (MOVWgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
244
245// zero instructions
246(Zero [0] _ mem) => mem
247(Zero [1] ptr mem) => (MOVBstore ptr (MOVWconst [0]) mem)
248(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
249 (MOVHstore ptr (MOVWconst [0]) mem)
250(Zero [2] ptr mem) =>
251 (MOVBstore [1] ptr (MOVWconst [0])
252 (MOVBstore [0] ptr (MOVWconst [0]) mem))
253(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
254 (MOVWstore ptr (MOVWconst [0]) mem)
255(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
256 (MOVHstore [2] ptr (MOVWconst [0])
257 (MOVHstore [0] ptr (MOVWconst [0]) mem))
258(Zero [4] ptr mem) =>
259 (MOVBstore [3] ptr (MOVWconst [0])
260 (MOVBstore [2] ptr (MOVWconst [0])
261 (MOVBstore [1] ptr (MOVWconst [0])
262 (MOVBstore [0] ptr (MOVWconst [0]) mem))))
263(Zero [3] ptr mem) =>
264 (MOVBstore [2] ptr (MOVWconst [0])
265 (MOVBstore [1] ptr (MOVWconst [0])
266 (MOVBstore [0] ptr (MOVWconst [0]) mem)))
267(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
268 (MOVHstore [4] ptr (MOVWconst [0])
269 (MOVHstore [2] ptr (MOVWconst [0])
270 (MOVHstore [0] ptr (MOVWconst [0]) mem)))
271(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
272 (MOVWstore [4] ptr (MOVWconst [0])
273 (MOVWstore [0] ptr (MOVWconst [0]) mem))
274(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
275 (MOVWstore [8] ptr (MOVWconst [0])
276 (MOVWstore [4] ptr (MOVWconst [0])
277 (MOVWstore [0] ptr (MOVWconst [0]) mem)))
278(Zero [16] {t} ptr mem) && t.Alignment()%4 == 0 =>
279 (MOVWstore [12] ptr (MOVWconst [0])
280 (MOVWstore [8] ptr (MOVWconst [0])
281 (MOVWstore [4] ptr (MOVWconst [0])
282 (MOVWstore [0] ptr (MOVWconst [0]) mem))))
283
284// large or unaligned zeroing uses a loop
285(Zero [s] {t} ptr mem)
286 && (s > 16 || t.Alignment()%4 != 0) =>
287 (LoweredZero [int32(t.Alignment())]
288 ptr
289 (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))])
290 mem)
291
292// moves
293(Move [0] _ _ mem) => mem
294(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
295(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
296 (MOVHstore dst (MOVHUload src mem) mem)
297(Move [2] dst src mem) =>
298 (MOVBstore [1] dst (MOVBUload [1] src mem)
299 (MOVBstore dst (MOVBUload src mem) mem))
300(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
301 (MOVWstore dst (MOVWload src mem) mem)
302(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
303 (MOVHstore [2] dst (MOVHUload [2] src mem)
304 (MOVHstore dst (MOVHUload src mem) mem))
305(Move [4] dst src mem) =>
306 (MOVBstore [3] dst (MOVBUload [3] src mem)
307 (MOVBstore [2] dst (MOVBUload [2] src mem)
308 (MOVBstore [1] dst (MOVBUload [1] src mem)
309 (MOVBstore dst (MOVBUload src mem) mem))))
310(Move [3] dst src mem) =>
311 (MOVBstore [2] dst (MOVBUload [2] src mem)
312 (MOVBstore [1] dst (MOVBUload [1] src mem)
313 (MOVBstore dst (MOVBUload src mem) mem)))
314(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
315 (MOVWstore [4] dst (MOVWload [4] src mem)
316 (MOVWstore dst (MOVWload src mem) mem))
317(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
318 (MOVHstore [6] dst (MOVHload [6] src mem)
319 (MOVHstore [4] dst (MOVHload [4] src mem)
320 (MOVHstore [2] dst (MOVHload [2] src mem)
321 (MOVHstore dst (MOVHload src mem) mem))))
322(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
323 (MOVHstore [4] dst (MOVHload [4] src mem)
324 (MOVHstore [2] dst (MOVHload [2] src mem)
325 (MOVHstore dst (MOVHload src mem) mem)))
326(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
327 (MOVWstore [8] dst (MOVWload [8] src mem)
328 (MOVWstore [4] dst (MOVWload [4] src mem)
329 (MOVWstore dst (MOVWload src mem) mem)))
330(Move [16] {t} dst src mem) && t.Alignment()%4 == 0 =>
331 (MOVWstore [12] dst (MOVWload [12] src mem)
332 (MOVWstore [8] dst (MOVWload [8] src mem)
333 (MOVWstore [4] dst (MOVWload [4] src mem)
334 (MOVWstore dst (MOVWload src mem) mem))))
335
336
337// large or unaligned move uses a loop
338(Move [s] {t} dst src mem)
339 && (s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0) =>
340 (LoweredMove [int32(t.Alignment())]
341 dst
342 src
343 (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))])
344 mem)
345
346// calls
347(StaticCall ...) => (CALLstatic ...)
348(ClosureCall ...) => (CALLclosure ...)
349(InterCall ...) => (CALLinter ...)
350(TailCall ...) => (CALLtail ...)
351
352// atomic intrinsics
353(AtomicLoad(8|32) ...) => (LoweredAtomicLoad(8|32) ...)
354(AtomicLoadPtr ...) => (LoweredAtomicLoad32 ...)
355
356(AtomicStore(8|32) ...) => (LoweredAtomicStore(8|32) ...)
357(AtomicStorePtrNoWB ...) => (LoweredAtomicStore32 ...)
358
359(AtomicExchange32 ...) => (LoweredAtomicExchange ...)
360(AtomicAdd32 ...) => (LoweredAtomicAdd ...)
361
362(AtomicCompareAndSwap32 ...) => (LoweredAtomicCas ...)
363
364// AtomicOr8(ptr,val) => LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
365(AtomicOr8 ptr val mem) && !config.BigEndian =>
366 (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
367 (SLL <typ.UInt32> (ZeroExt8to32 val)
368 (SLLconst <typ.UInt32> [3]
369 (ANDconst <typ.UInt32> [3] ptr))) mem)
370
371// AtomicAnd8(ptr,val) => LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
372(AtomicAnd8 ptr val mem) && !config.BigEndian =>
373 (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
374 (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
375 (SLLconst <typ.UInt32> [3]
376 (ANDconst <typ.UInt32> [3] ptr)))
377 (NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
378 (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
379 (ANDconst <typ.UInt32> [3] ptr))))) mem)
380
381// AtomicOr8(ptr,val) => LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
382(AtomicOr8 ptr val mem) && config.BigEndian =>
383 (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
384 (SLL <typ.UInt32> (ZeroExt8to32 val)
385 (SLLconst <typ.UInt32> [3]
386 (ANDconst <typ.UInt32> [3]
387 (XORconst <typ.UInt32> [3] ptr)))) mem)
388
389// AtomicAnd8(ptr,val) => LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
390(AtomicAnd8 ptr val mem) && config.BigEndian =>
391 (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
392 (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
393 (SLLconst <typ.UInt32> [3]
394 (ANDconst <typ.UInt32> [3]
395 (XORconst <typ.UInt32> [3] ptr))))
396 (NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
397 (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
398 (ANDconst <typ.UInt32> [3]
399 (XORconst <typ.UInt32> [3] ptr)))))) mem)
400
401(AtomicAnd32 ...) => (LoweredAtomicAnd ...)
402(AtomicOr32 ...) => (LoweredAtomicOr ...)
403
404
405// checks
406(NilCheck ...) => (LoweredNilCheck ...)
407(IsNonNil ptr) => (SGTU ptr (MOVWconst [0]))
408(IsInBounds idx len) => (SGTU len idx)
409(IsSliceInBounds idx len) => (XORconst [1] (SGTU idx len))
410
411// pseudo-ops
412(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
413(GetCallerSP ...) => (LoweredGetCallerSP ...)
414(GetCallerPC ...) => (LoweredGetCallerPC ...)
415
416(If cond yes no) => (NE cond yes no)
417
418// Write barrier.
419(WB ...) => (LoweredWB ...)
420
421(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
422(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
423(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
424
425(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem)
426(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem)
427(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem)
428
429// Optimizations
430
431// Absorb boolean tests into block
432(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
433(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
434(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
435(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
436(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
437(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
438(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
439(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
440(NE (XORconst [1] cmp:(SGTzero _)) yes no) => (EQ cmp yes no)
441(NE (XORconst [1] cmp:(SGTUzero _)) yes no) => (EQ cmp yes no)
442(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
443(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
444(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
445(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
446(EQ (XORconst [1] cmp:(SGTzero _)) yes no) => (NE cmp yes no)
447(EQ (XORconst [1] cmp:(SGTUzero _)) yes no) => (NE cmp yes no)
448(NE (SGTUconst [1] x) yes no) => (EQ x yes no)
449(EQ (SGTUconst [1] x) yes no) => (NE x yes no)
450(NE (SGTUzero x) yes no) => (NE x yes no)
451(EQ (SGTUzero x) yes no) => (EQ x yes no)
452(NE (SGTconst [0] x) yes no) => (LTZ x yes no)
453(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
454(NE (SGTzero x) yes no) => (GTZ x yes no)
455(EQ (SGTzero x) yes no) => (LEZ x yes no)
456
457// fold offset into address
458(ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) => (MOVWaddr [off1+off2] {sym} ptr)
459
460// fold address into load/store
461(MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBload [off1+off2] {sym} ptr mem)
462(MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBUload [off1+off2] {sym} ptr mem)
463(MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHload [off1+off2] {sym} ptr mem)
464(MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHUload [off1+off2] {sym} ptr mem)
465(MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWload [off1+off2] {sym} ptr mem)
466(MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVFload [off1+off2] {sym} ptr mem)
467(MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVDload [off1+off2] {sym} ptr mem)
468
469(MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBstore [off1+off2] {sym} ptr val mem)
470(MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHstore [off1+off2] {sym} ptr val mem)
471(MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWstore [off1+off2] {sym} ptr val mem)
472(MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVFstore [off1+off2] {sym} ptr val mem)
473(MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVDstore [off1+off2] {sym} ptr val mem)
474
475(MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBstorezero [off1+off2] {sym} ptr mem)
476(MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHstorezero [off1+off2] {sym} ptr mem)
477(MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWstorezero [off1+off2] {sym} ptr mem)
478
479(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
480 (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
481(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
482 (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
483(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
484 (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
485(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
486 (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
487(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
488 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
489(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
490 (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
491(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
492 (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
493
494(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
495 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
496(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
497 (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
498(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
499 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
500(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
501 (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
502(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
503 (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
504(MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
505 (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
506(MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
507 (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
508(MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
509 (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
510
511// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
512(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBreg x)
513(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBUreg x)
514(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHreg x)
515(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHUreg x)
516(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
517(MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
518(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
519
520// store zero
521(MOVBstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
522(MOVHstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
523(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
524
525// don't extend after proper load
526(MOVBreg x:(MOVBload _ _)) => (MOVWreg x)
527(MOVBUreg x:(MOVBUload _ _)) => (MOVWreg x)
528(MOVHreg x:(MOVBload _ _)) => (MOVWreg x)
529(MOVHreg x:(MOVBUload _ _)) => (MOVWreg x)
530(MOVHreg x:(MOVHload _ _)) => (MOVWreg x)
531(MOVHUreg x:(MOVBUload _ _)) => (MOVWreg x)
532(MOVHUreg x:(MOVHUload _ _)) => (MOVWreg x)
533
534// fold double extensions
535(MOVBreg x:(MOVBreg _)) => (MOVWreg x)
536(MOVBUreg x:(MOVBUreg _)) => (MOVWreg x)
537(MOVHreg x:(MOVBreg _)) => (MOVWreg x)
538(MOVHreg x:(MOVBUreg _)) => (MOVWreg x)
539(MOVHreg x:(MOVHreg _)) => (MOVWreg x)
540(MOVHUreg x:(MOVBUreg _)) => (MOVWreg x)
541(MOVHUreg x:(MOVHUreg _)) => (MOVWreg x)
542
543// sign extended loads
544// Note: The combined instruction must end up in the same block
545// as the original load. If not, we end up making a value with
546// memory type live in two different blocks, which can lead to
547// multiple memory values alive simultaneously.
548// Make sure we don't combine these ops if the load has another use.
549// This prevents a single load from being split into multiple loads
550// which then might return different values. See test/atomicload.go.
551(MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <t> [off] {sym} ptr mem)
552(MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
553(MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload <t> [off] {sym} ptr mem)
554(MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
555
556// fold extensions and ANDs together
557(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&0xff] x)
558(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&0xffff] x)
559(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 => (ANDconst [c&0x7f] x)
560(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 => (ANDconst [c&0x7fff] x)
561
562// don't extend before store
563(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
564(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
565(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
566(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
567(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
568(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
569(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
570(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
571(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
572
573// if a register move has only 1 use, just use the same register without emitting instruction
574// MOVWnop doesn't emit instruction, only for ensuring the type.
575(MOVWreg x) && x.Uses == 1 => (MOVWnop x)
576
577// TODO: we should be able to get rid of MOVWnop all together.
578// But for now, this is enough to get rid of lots of them.
579(MOVWnop (MOVWconst [c])) => (MOVWconst [c])
580
581// fold constant into arithmetic ops
582(ADD x (MOVWconst <t> [c])) && !t.IsPtr() => (ADDconst [c] x)
583(SUB x (MOVWconst [c])) => (SUBconst [c] x)
584(AND x (MOVWconst [c])) => (ANDconst [c] x)
585(OR x (MOVWconst [c])) => (ORconst [c] x)
586(XOR x (MOVWconst [c])) => (XORconst [c] x)
587(NOR x (MOVWconst [c])) => (NORconst [c] x)
588
589(SLL x (MOVWconst [c])) => (SLLconst x [c&31])
590(SRL x (MOVWconst [c])) => (SRLconst x [c&31])
591(SRA x (MOVWconst [c])) => (SRAconst x [c&31])
592
593(SGT (MOVWconst [c]) x) => (SGTconst [c] x)
594(SGTU (MOVWconst [c]) x) => (SGTUconst [c] x)
595(SGT x (MOVWconst [0])) => (SGTzero x)
596(SGTU x (MOVWconst [0])) => (SGTUzero x)
597
598// mul with constant
599(Select1 (MULTU (MOVWconst [0]) _ )) => (MOVWconst [0])
600(Select0 (MULTU (MOVWconst [0]) _ )) => (MOVWconst [0])
601(Select1 (MULTU (MOVWconst [1]) x )) => x
602(Select0 (MULTU (MOVWconst [1]) _ )) => (MOVWconst [0])
603(Select1 (MULTU (MOVWconst [-1]) x )) => (NEG <x.Type> x)
604(Select0 (MULTU (MOVWconst [-1]) x )) => (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
605(Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo64(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
606(Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo64(int64(uint32(c))) => (SRLconst [int32(32-log2uint32(int64(c)))] x)
607
608(MUL (MOVWconst [0]) _ ) => (MOVWconst [0])
609(MUL (MOVWconst [1]) x ) => x
610(MUL (MOVWconst [-1]) x ) => (NEG x)
611(MUL (MOVWconst [c]) x ) && isPowerOfTwo64(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
612
613// generic simplifications
614(ADD x (NEG y)) => (SUB x y)
615(SUB x x) => (MOVWconst [0])
616(SUB (MOVWconst [0]) x) => (NEG x)
617(AND x x) => x
618(OR x x) => x
619(XOR x x) => (MOVWconst [0])
620
621// miscellaneous patterns generated by dec64
622(AND (SGTUconst [1] x) (SGTUconst [1] y)) => (SGTUconst [1] (OR <x.Type> x y))
623(OR (SGTUzero x) (SGTUzero y)) => (SGTUzero (OR <x.Type> x y))
624
625// remove redundant *const ops
626(ADDconst [0] x) => x
627(SUBconst [0] x) => x
628(ANDconst [0] _) => (MOVWconst [0])
629(ANDconst [-1] x) => x
630(ORconst [0] x) => x
631(ORconst [-1] _) => (MOVWconst [-1])
632(XORconst [0] x) => x
633(XORconst [-1] x) => (NORconst [0] x)
634
635// generic constant folding
636(ADDconst [c] (MOVWconst [d])) => (MOVWconst [int32(c+d)])
637(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
638(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
639(SUBconst [c] (MOVWconst [d])) => (MOVWconst [d-c])
640(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
641(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
642(SLLconst [c] (MOVWconst [d])) => (MOVWconst [d<<uint32(c)])
643(SRLconst [c] (MOVWconst [d])) => (MOVWconst [int32(uint32(d)>>uint32(c))])
644(SRAconst [c] (MOVWconst [d])) => (MOVWconst [d>>uint32(c)])
645(MUL (MOVWconst [c]) (MOVWconst [d])) => (MOVWconst [c*d])
646(Select1 (MULTU (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32(uint32(c)*uint32(d))])
647(Select0 (MULTU (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32((int64(uint32(c))*int64(uint32(d)))>>32)])
648(Select1 (DIV (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [c/d])
649(Select1 (DIVU (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)/uint32(d))])
650(Select0 (DIV (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [c%d])
651(Select0 (DIVU (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)%uint32(d))])
652(ANDconst [c] (MOVWconst [d])) => (MOVWconst [c&d])
653(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
654(ORconst [c] (MOVWconst [d])) => (MOVWconst [c|d])
655(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
656(XORconst [c] (MOVWconst [d])) => (MOVWconst [c^d])
657(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
658(NORconst [c] (MOVWconst [d])) => (MOVWconst [^(c|d)])
659(NEG (MOVWconst [c])) => (MOVWconst [-c])
660(MOVBreg (MOVWconst [c])) => (MOVWconst [int32(int8(c))])
661(MOVBUreg (MOVWconst [c])) => (MOVWconst [int32(uint8(c))])
662(MOVHreg (MOVWconst [c])) => (MOVWconst [int32(int16(c))])
663(MOVHUreg (MOVWconst [c])) => (MOVWconst [int32(uint16(c))])
664(MOVWreg (MOVWconst [c])) => (MOVWconst [c])
665
666// constant comparisons
667(SGTconst [c] (MOVWconst [d])) && c > d => (MOVWconst [1])
668(SGTconst [c] (MOVWconst [d])) && c <= d => (MOVWconst [0])
669(SGTUconst [c] (MOVWconst [d])) && uint32(c) > uint32(d) => (MOVWconst [1])
670(SGTUconst [c] (MOVWconst [d])) && uint32(c) <= uint32(d) => (MOVWconst [0])
671(SGTzero (MOVWconst [d])) && d > 0 => (MOVWconst [1])
672(SGTzero (MOVWconst [d])) && d <= 0 => (MOVWconst [0])
673(SGTUzero (MOVWconst [d])) && d != 0 => (MOVWconst [1])
674(SGTUzero (MOVWconst [d])) && d == 0 => (MOVWconst [0])
675
676// other known comparisons
677(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVWconst [1])
678(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVWconst [0])
679(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVWconst [1])
680(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVWconst [0])
681(SGTUconst [c] (MOVBUreg _)) && 0xff < uint32(c) => (MOVWconst [1])
682(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVWconst [1])
683(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVWconst [0])
684(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVWconst [1])
685(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVWconst [0])
686(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint32(c) => (MOVWconst [1])
687(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVWconst [1])
688(SGTUconst [c] (ANDconst [m] _)) && uint32(m) < uint32(c) => (MOVWconst [1])
689(SGTconst [c] (SRLconst _ [d])) && 0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) => (MOVWconst [1])
690(SGTUconst [c] (SRLconst _ [d])) && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) => (MOVWconst [1])
691
692// absorb constants into branches
693(EQ (MOVWconst [0]) yes no) => (First yes no)
694(EQ (MOVWconst [c]) yes no) && c != 0 => (First no yes)
695(NE (MOVWconst [0]) yes no) => (First no yes)
696(NE (MOVWconst [c]) yes no) && c != 0 => (First yes no)
697(LTZ (MOVWconst [c]) yes no) && c < 0 => (First yes no)
698(LTZ (MOVWconst [c]) yes no) && c >= 0 => (First no yes)
699(LEZ (MOVWconst [c]) yes no) && c <= 0 => (First yes no)
700(LEZ (MOVWconst [c]) yes no) && c > 0 => (First no yes)
701(GTZ (MOVWconst [c]) yes no) && c > 0 => (First yes no)
702(GTZ (MOVWconst [c]) yes no) && c <= 0 => (First no yes)
703(GEZ (MOVWconst [c]) yes no) && c >= 0 => (First yes no)
704(GEZ (MOVWconst [c]) yes no) && c < 0 => (First no yes)
705
706// conditional move
707(CMOVZ _ f (MOVWconst [0])) => f
708(CMOVZ a _ (MOVWconst [c])) && c!=0 => a
709(CMOVZzero _ (MOVWconst [0])) => (MOVWconst [0])
710(CMOVZzero a (MOVWconst [c])) && c!=0 => a
711(CMOVZ a (MOVWconst [0]) c) => (CMOVZzero a c)
712
713// atomic
714(LoweredAtomicStore32 ptr (MOVWconst [0]) mem) => (LoweredAtomicStorezero ptr mem)
715(LoweredAtomicAdd ptr (MOVWconst [c]) mem) && is16Bit(int64(c)) => (LoweredAtomicAddconst [c] ptr mem)
716
View as plain text