1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5(Add(Ptr|64|32|16|8) ...) => (ADD ...)
6(Add(32|64)F ...) => (FADD(S|D) ...)
7
8(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
9(Sub(32|64)F ...) => (FSUB(S|D) ...)
10
11(Mul64 ...) => (MUL ...)
12(Mul(32|16|8) ...) => (MULW ...)
13(Mul(32|64)F ...) => (FMUL(S|D) ...)
14
15(Hmul64 ...) => (MULH ...)
16(Hmul64u ...) => (UMULH ...)
17(Hmul32 x y) => (SRAconst (MULL <typ.Int64> x y) [32])
18(Hmul32u x y) => (SRAconst (UMULL <typ.UInt64> x y) [32])
19(Select0 (Mul64uhilo x y)) => (UMULH x y)
20(Select1 (Mul64uhilo x y)) => (MUL x y)
21
22(Div64 [false] x y) => (DIV x y)
23(Div32 [false] x y) => (DIVW x y)
24(Div16 [false] x y) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
25(Div16u x y) => (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
26(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
27(Div8u x y) => (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
28(Div64u ...) => (UDIV ...)
29(Div32u ...) => (UDIVW ...)
30(Div32F ...) => (FDIVS ...)
31(Div64F ...) => (FDIVD ...)
32
33(Mod64 x y) => (MOD x y)
34(Mod32 x y) => (MODW x y)
35(Mod64u ...) => (UMOD ...)
36(Mod32u ...) => (UMODW ...)
37(Mod(16|8) x y) => (MODW (SignExt(16|8)to32 x) (SignExt(16|8)to32 y))
38(Mod(16|8)u x y) => (UMODW (ZeroExt(16|8)to32 x) (ZeroExt(16|8)to32 y))
39
40// (x + y) / 2 with x>=y => (x - y) / 2 + y
41(Avg64u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
42
43(And(64|32|16|8) ...) => (AND ...)
44(Or(64|32|16|8) ...) => (OR ...)
45(Xor(64|32|16|8) ...) => (XOR ...)
46
47// unary ops
48(Neg(64|32|16|8) ...) => (NEG ...)
49(Neg(32|64)F ...) => (FNEG(S|D) ...)
50(Com(64|32|16|8) ...) => (MVN ...)
51
52// math package intrinsics
53(Abs ...) => (FABSD ...)
54(Sqrt ...) => (FSQRTD ...)
55(Ceil ...) => (FRINTPD ...)
56(Floor ...) => (FRINTMD ...)
57(Round ...) => (FRINTAD ...)
58(RoundToEven ...) => (FRINTND ...)
59(Trunc ...) => (FRINTZD ...)
60(FMA x y z) => (FMADDD z x y)
61
62(Sqrt32 ...) => (FSQRTS ...)
63
64(Min(64|32)F ...) => (FMIN(D|S) ...)
65(Max(64|32)F ...) => (FMAX(D|S) ...)
66
67// lowering rotates
68// we do rotate detection in generic rules, if the following rules need to be changed, check generic rules first.
69(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
70(RotateLeft8 <t> x y) => (OR <t> (SLL <t> x (ANDconst <typ.Int64> [7] y)) (SRL <t> (ZeroExt8to64 x) (ANDconst <typ.Int64> [7] (NEG <typ.Int64> y))))
71(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
72(RotateLeft16 <t> x y) => (RORW <t> (ORshiftLL <typ.UInt32> (ZeroExt16to32 x) (ZeroExt16to32 x) [16]) (NEG <typ.Int64> y))
73(RotateLeft32 x y) => (RORW x (NEG <y.Type> y))
74(RotateLeft64 x y) => (ROR x (NEG <y.Type> y))
75
76(Ctz(64|32|16|8)NonZero ...) => (Ctz(64|32|32|32) ...)
77
78(Ctz64 <t> x) => (CLZ (RBIT <t> x))
79(Ctz32 <t> x) => (CLZW (RBITW <t> x))
80(Ctz16 <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
81(Ctz8 <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
82
83(PopCount64 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> x))))
84(PopCount32 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt32to64 x)))))
85(PopCount16 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt16to64 x)))))
86
87// Load args directly into the register class where it will be used.
88(FMOVDgpfp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
89(FMOVDfpgp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
90
91// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
92(MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem) => (FMOVDstore [off] {sym} ptr val mem)
93(FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem) => (MOVDstore [off] {sym} ptr val mem)
94(MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem) => (FMOVSstore [off] {sym} ptr val mem)
95(FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
96
97// float <=> int register moves, with no conversion.
98// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
99(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _)) => (FMOVDfpgp val)
100(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) => (FMOVDgpfp val)
101(MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _)) => (FMOVSfpgp val)
102(FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (FMOVSgpfp val)
103
104(BitLen64 x) => (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
105(BitLen32 x) => (SUB (MOVDconst [32]) (CLZW <typ.Int> x))
106(BitLen(16|8) x) => (BitLen64 (ZeroExt(16|8)to64 x))
107
108(Bswap64 ...) => (REV ...)
109(Bswap32 ...) => (REVW ...)
110(Bswap16 ...) => (REV16W ...)
111
112(BitRev64 ...) => (RBIT ...)
113(BitRev32 ...) => (RBITW ...)
114(BitRev16 x) => (SRLconst [48] (RBIT <typ.UInt64> x))
115(BitRev8 x) => (SRLconst [56] (RBIT <typ.UInt64> x))
116
117// In fact, UMOD will be translated into UREM instruction, and UREM is originally translated into
118// UDIV and MSUB instructions. But if there is already an identical UDIV instruction just before or
119// after UREM (case like quo, rem := z/y, z%y), then the second UDIV instruction becomes redundant.
120// The purpose of this rule is to have this extra UDIV instruction removed in CSE pass.
121(UMOD <typ.UInt64> x y) => (MSUB <typ.UInt64> x y (UDIV <typ.UInt64> x y))
122(UMODW <typ.UInt32> x y) => (MSUBW <typ.UInt32> x y (UDIVW <typ.UInt32> x y))
123
124// 64-bit addition with carry.
125(Select0 (Add64carry x y c)) => (Select0 <typ.UInt64> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c))))
126(Select1 (Add64carry x y c)) => (ADCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c)))))
127
128// 64-bit subtraction with borrowing.
129(Select0 (Sub64borrow x y bo)) => (Select0 <typ.UInt64> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))
130(Select1 (Sub64borrow x y bo)) => (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))))
131
132// boolean ops -- booleans are represented with 0=false, 1=true
133(AndB ...) => (AND ...)
134(OrB ...) => (OR ...)
135(EqB x y) => (XOR (MOVDconst [1]) (XOR <typ.Bool> x y))
136(NeqB ...) => (XOR ...)
137(Not x) => (XOR (MOVDconst [1]) x)
138
139// shifts
140// hardware instruction uses only the low 6 bits of the shift
141// we compare to 64 to ensure Go semantics for large shifts
142// Rules about rotates with non-const shift are based on the following rules,
143// if the following rules change, please also modify the rules based on them.
144
145// check shiftIsBounded first, if shift value is proved to be valid then we
146// can do the shift directly.
147// left shift
148(Lsh(64|32|16|8)x64 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
149(Lsh(64|32|16|8)x32 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
150(Lsh(64|32|16|8)x16 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
151(Lsh(64|32|16|8)x8 <t> x y) && shiftIsBounded(v) => (SLL <t> x y)
152
153// signed right shift
154(Rsh64x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> x y)
155(Rsh32x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> (SignExt32to64 x) y)
156(Rsh16x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> (SignExt16to64 x) y)
157(Rsh8x(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRA <t> (SignExt8to64 x) y)
158
159// unsigned right shift
160(Rsh64Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> x y)
161(Rsh32Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> (ZeroExt32to64 x) y)
162(Rsh16Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> (ZeroExt16to64 x) y)
163(Rsh8Ux(64|32|16|8) <t> x y) && shiftIsBounded(v) => (SRL <t> (ZeroExt8to64 x) y)
164
165// shift value may be out of range, use CMP + CSEL instead
166(Lsh64x64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
167(Lsh64x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
168
169(Lsh32x64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
170(Lsh32x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
171
172(Lsh16x64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
173(Lsh16x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
174
175(Lsh8x64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
176(Lsh8x(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
177
178(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
179(Rsh64Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
180
181(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
182(Rsh32Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
183
184(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
185(Rsh16Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
186
187(Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
188(Rsh8Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y)))
189
190(Rsh64x64 x y) && !shiftIsBounded(v) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
191(Rsh64x(32|16|8) x y) && !shiftIsBounded(v) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
192
193(Rsh32x64 x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
194(Rsh32x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
195
196(Rsh16x64 x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
197(Rsh16x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
198
199(Rsh8x64 x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
200(Rsh8x(32|16|8) x y) && !shiftIsBounded(v) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] ((ZeroExt32to64|ZeroExt16to64|ZeroExt8to64) y))))
201
202// constants
203(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
204(Const(32|64)F [val]) => (FMOV(S|D)const [float64(val)])
205(ConstNil) => (MOVDconst [0])
206(ConstBool [t]) => (MOVDconst [b2i(t)])
207
208(Slicemask <t> x) => (SRAconst (NEG <t> x) [63])
209
210// truncations
211// Because we ignore high parts of registers, truncates are just copies.
212(Trunc16to8 ...) => (Copy ...)
213(Trunc32to8 ...) => (Copy ...)
214(Trunc32to16 ...) => (Copy ...)
215(Trunc64to8 ...) => (Copy ...)
216(Trunc64to16 ...) => (Copy ...)
217(Trunc64to32 ...) => (Copy ...)
218
219// Zero-/Sign-extensions
220(ZeroExt8to16 ...) => (MOVBUreg ...)
221(ZeroExt8to32 ...) => (MOVBUreg ...)
222(ZeroExt16to32 ...) => (MOVHUreg ...)
223(ZeroExt8to64 ...) => (MOVBUreg ...)
224(ZeroExt16to64 ...) => (MOVHUreg ...)
225(ZeroExt32to64 ...) => (MOVWUreg ...)
226
227(SignExt8to16 ...) => (MOVBreg ...)
228(SignExt8to32 ...) => (MOVBreg ...)
229(SignExt16to32 ...) => (MOVHreg ...)
230(SignExt8to64 ...) => (MOVBreg ...)
231(SignExt16to64 ...) => (MOVHreg ...)
232(SignExt32to64 ...) => (MOVWreg ...)
233
234// float <=> int conversion
235(Cvt32to32F ...) => (SCVTFWS ...)
236(Cvt32to64F ...) => (SCVTFWD ...)
237(Cvt64to32F ...) => (SCVTFS ...)
238(Cvt64to64F ...) => (SCVTFD ...)
239(Cvt32Uto32F ...) => (UCVTFWS ...)
240(Cvt32Uto64F ...) => (UCVTFWD ...)
241(Cvt64Uto32F ...) => (UCVTFS ...)
242(Cvt64Uto64F ...) => (UCVTFD ...)
243(Cvt32Fto32 ...) => (FCVTZSSW ...)
244(Cvt64Fto32 ...) => (FCVTZSDW ...)
245(Cvt32Fto64 ...) => (FCVTZSS ...)
246(Cvt64Fto64 ...) => (FCVTZSD ...)
247(Cvt32Fto32U ...) => (FCVTZUSW ...)
248(Cvt64Fto32U ...) => (FCVTZUDW ...)
249(Cvt32Fto64U ...) => (FCVTZUS ...)
250(Cvt64Fto64U ...) => (FCVTZUD ...)
251(Cvt32Fto64F ...) => (FCVTSD ...)
252(Cvt64Fto32F ...) => (FCVTDS ...)
253
254(CvtBoolToUint8 ...) => (Copy ...)
255
256(Round32F ...) => (LoweredRound32F ...)
257(Round64F ...) => (LoweredRound64F ...)
258
259// comparisons
260(Eq8 x y) => (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
261(Eq16 x y) => (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
262(Eq32 x y) => (Equal (CMPW x y))
263(Eq64 x y) => (Equal (CMP x y))
264(EqPtr x y) => (Equal (CMP x y))
265(Eq32F x y) => (Equal (FCMPS x y))
266(Eq64F x y) => (Equal (FCMPD x y))
267
268(Neq8 x y) => (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
269(Neq16 x y) => (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
270(Neq32 x y) => (NotEqual (CMPW x y))
271(Neq64 x y) => (NotEqual (CMP x y))
272(NeqPtr x y) => (NotEqual (CMP x y))
273(Neq(32|64)F x y) => (NotEqual (FCMP(S|D) x y))
274
275(Less(8|16) x y) => (LessThan (CMPW (SignExt(8|16)to32 x) (SignExt(8|16)to32 y)))
276(Less32 x y) => (LessThan (CMPW x y))
277(Less64 x y) => (LessThan (CMP x y))
278
279// Set condition flags for floating-point comparisons "x < y"
280// and "x <= y". Because if either or both of the operands are
281// NaNs, all three of (x < y), (x == y) and (x > y) are false,
282// and ARM Manual says FCMP instruction sets PSTATE.<N,Z,C,V>
283// of this case to (0, 0, 1, 1).
284(Less32F x y) => (LessThanF (FCMPS x y))
285(Less64F x y) => (LessThanF (FCMPD x y))
286
287// For an unsigned integer x, the following rules are useful when combining branch
288// 0 < x => x != 0
289// x <= 0 => x == 0
290// x < 1 => x == 0
291// 1 <= x => x != 0
292(Less(8U|16U|32U|64U) zero:(MOVDconst [0]) x) => (Neq(8|16|32|64) zero x)
293(Leq(8U|16U|32U|64U) x zero:(MOVDconst [0])) => (Eq(8|16|32|64) x zero)
294(Less(8U|16U|32U|64U) x (MOVDconst [1])) => (Eq(8|16|32|64) x (MOVDconst [0]))
295(Leq(8U|16U|32U|64U) (MOVDconst [1]) x) => (Neq(8|16|32|64) (MOVDconst [0]) x)
296
297(Less8U x y) => (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
298(Less16U x y) => (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
299(Less32U x y) => (LessThanU (CMPW x y))
300(Less64U x y) => (LessThanU (CMP x y))
301
302(Leq8 x y) => (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
303(Leq16 x y) => (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
304(Leq32 x y) => (LessEqual (CMPW x y))
305(Leq64 x y) => (LessEqual (CMP x y))
306
307// Refer to the comments for op Less64F above.
308(Leq32F x y) => (LessEqualF (FCMPS x y))
309(Leq64F x y) => (LessEqualF (FCMPD x y))
310
311(Leq8U x y) => (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
312(Leq16U x y) => (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
313(Leq32U x y) => (LessEqualU (CMPW x y))
314(Leq64U x y) => (LessEqualU (CMP x y))
315
316// Optimize comparison between a floating-point value and 0.0 with "FCMP $(0.0), Fn"
317(FCMPS x (FMOVSconst [0])) => (FCMPS0 x)
318(FCMPS (FMOVSconst [0]) x) => (InvertFlags (FCMPS0 x))
319(FCMPD x (FMOVDconst [0])) => (FCMPD0 x)
320(FCMPD (FMOVDconst [0]) x) => (InvertFlags (FCMPD0 x))
321
322// CSEL needs a flag-generating argument. Synthesize a TSTW if necessary.
323(CondSelect x y boolval) && flagArg(boolval) != nil => (CSEL [boolval.Op] x y flagArg(boolval))
324(CondSelect x y boolval) && flagArg(boolval) == nil => (CSEL [OpARM64NotEqual] x y (TSTWconst [1] boolval))
325
326(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVDaddr [int32(off)] ptr)
327(OffPtr [off] ptr) => (ADDconst [off] ptr)
328
329(Addr {sym} base) => (MOVDaddr {sym} base)
330(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVDaddr {sym} (SPanchored base mem))
331(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVDaddr {sym} base)
332
333// loads
334(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
335(Load <t> ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
336(Load <t> ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
337(Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
338(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
339(Load <t> ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem)
340(Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
341(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
342(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
343(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
344
345// stores
346(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
347(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
348(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
349(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
350(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (FMOVSstore ptr val mem)
351(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (FMOVDstore ptr val mem)
352
353// zeroing
354(Zero [0] _ mem) => mem
355(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
356(Zero [2] ptr mem) => (MOVHstore ptr (MOVDconst [0]) mem)
357(Zero [4] ptr mem) => (MOVWstore ptr (MOVDconst [0]) mem)
358(Zero [3] ptr mem) =>
359 (MOVBstore [2] ptr (MOVDconst [0])
360 (MOVHstore ptr (MOVDconst [0]) mem))
361(Zero [5] ptr mem) =>
362 (MOVBstore [4] ptr (MOVDconst [0])
363 (MOVWstore ptr (MOVDconst [0]) mem))
364(Zero [6] ptr mem) =>
365 (MOVHstore [4] ptr (MOVDconst [0])
366 (MOVWstore ptr (MOVDconst [0]) mem))
367(Zero [7] ptr mem) =>
368 (MOVWstore [3] ptr (MOVDconst [0])
369 (MOVWstore ptr (MOVDconst [0]) mem))
370(Zero [8] ptr mem) => (MOVDstore ptr (MOVDconst [0]) mem)
371(Zero [9] ptr mem) =>
372 (MOVBstore [8] ptr (MOVDconst [0])
373 (MOVDstore ptr (MOVDconst [0]) mem))
374(Zero [10] ptr mem) =>
375 (MOVHstore [8] ptr (MOVDconst [0])
376 (MOVDstore ptr (MOVDconst [0]) mem))
377(Zero [11] ptr mem) =>
378 (MOVDstore [3] ptr (MOVDconst [0])
379 (MOVDstore ptr (MOVDconst [0]) mem))
380(Zero [12] ptr mem) =>
381 (MOVWstore [8] ptr (MOVDconst [0])
382 (MOVDstore ptr (MOVDconst [0]) mem))
383(Zero [13] ptr mem) =>
384 (MOVDstore [5] ptr (MOVDconst [0])
385 (MOVDstore ptr (MOVDconst [0]) mem))
386(Zero [14] ptr mem) =>
387 (MOVDstore [6] ptr (MOVDconst [0])
388 (MOVDstore ptr (MOVDconst [0]) mem))
389(Zero [15] ptr mem) =>
390 (MOVDstore [7] ptr (MOVDconst [0])
391 (MOVDstore ptr (MOVDconst [0]) mem))
392(Zero [16] ptr mem) =>
393 (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)
394
395(Zero [32] ptr mem) =>
396 (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
397 (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))
398
399(Zero [48] ptr mem) =>
400 (STP [32] ptr (MOVDconst [0]) (MOVDconst [0])
401 (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
402 (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))
403
404(Zero [64] ptr mem) =>
405 (STP [48] ptr (MOVDconst [0]) (MOVDconst [0])
406 (STP [32] ptr (MOVDconst [0]) (MOVDconst [0])
407 (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
408 (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))))
409
410// strip off fractional word zeroing
411(Zero [s] ptr mem) && s%16 != 0 && s%16 <= 8 && s > 16 =>
412 (Zero [8]
413 (OffPtr <ptr.Type> ptr [s-8])
414 (Zero [s-s%16] ptr mem))
415(Zero [s] ptr mem) && s%16 != 0 && s%16 > 8 && s > 16 =>
416 (Zero [16]
417 (OffPtr <ptr.Type> ptr [s-16])
418 (Zero [s-s%16] ptr mem))
419
420// medium zeroing uses a duff device
421// 4, 16, and 64 are magic constants, see runtime/mkduff.go
422(Zero [s] ptr mem)
423 && s%16 == 0 && s > 64 && s <= 16*64 =>
424 (DUFFZERO [4 * (64 - s/16)] ptr mem)
425
426// large zeroing uses a loop
427(Zero [s] ptr mem)
428 && s%16 == 0 && s > 16*64 =>
429 (LoweredZero
430 ptr
431 (ADDconst <ptr.Type> [s-16] ptr)
432 mem)
433
434// moves
435(Move [0] _ _ mem) => mem
436(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
437(Move [2] dst src mem) => (MOVHstore dst (MOVHUload src mem) mem)
438(Move [3] dst src mem) =>
439 (MOVBstore [2] dst (MOVBUload [2] src mem)
440 (MOVHstore dst (MOVHUload src mem) mem))
441(Move [4] dst src mem) => (MOVWstore dst (MOVWUload src mem) mem)
442(Move [5] dst src mem) =>
443 (MOVBstore [4] dst (MOVBUload [4] src mem)
444 (MOVWstore dst (MOVWUload src mem) mem))
445(Move [6] dst src mem) =>
446 (MOVHstore [4] dst (MOVHUload [4] src mem)
447 (MOVWstore dst (MOVWUload src mem) mem))
448(Move [7] dst src mem) =>
449 (MOVWstore [3] dst (MOVWUload [3] src mem)
450 (MOVWstore dst (MOVWUload src mem) mem))
451(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
452(Move [9] dst src mem) =>
453 (MOVBstore [8] dst (MOVBUload [8] src mem)
454 (MOVDstore dst (MOVDload src mem) mem))
455(Move [10] dst src mem) =>
456 (MOVHstore [8] dst (MOVHUload [8] src mem)
457 (MOVDstore dst (MOVDload src mem) mem))
458(Move [11] dst src mem) =>
459 (MOVDstore [3] dst (MOVDload [3] src mem)
460 (MOVDstore dst (MOVDload src mem) mem))
461(Move [12] dst src mem) =>
462 (MOVWstore [8] dst (MOVWUload [8] src mem)
463 (MOVDstore dst (MOVDload src mem) mem))
464(Move [13] dst src mem) =>
465 (MOVDstore [5] dst (MOVDload [5] src mem)
466 (MOVDstore dst (MOVDload src mem) mem))
467(Move [14] dst src mem) =>
468 (MOVDstore [6] dst (MOVDload [6] src mem)
469 (MOVDstore dst (MOVDload src mem) mem))
470(Move [15] dst src mem) =>
471 (MOVDstore [7] dst (MOVDload [7] src mem)
472 (MOVDstore dst (MOVDload src mem) mem))
473(Move [16] dst src mem) =>
474 (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)
475
476(Move [s] dst src mem) && s > 16 && s <= 24 =>
477 (MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem)
478 (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))
479(Move [s] dst src mem) && s > 24 && s <= 32 =>
480 (STP [int32(s-16)] dst (Select0 <typ.UInt64> (LDP [int32(s-16)] src mem)) (Select1 <typ.UInt64> (LDP [int32(s-16)] src mem))
481 (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))
482(Move [s] dst src mem) && s > 32 && s <= 40 =>
483 (MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem)
484 (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
485 (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)))
486(Move [s] dst src mem) && s > 40 && s <= 48 =>
487 (STP [int32(s-16)] dst (Select0 <typ.UInt64> (LDP [int32(s-16)] src mem)) (Select1 <typ.UInt64> (LDP [int32(s-16)] src mem))
488 (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
489 (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem)))
490(Move [s] dst src mem) && s > 48 && s <= 56 =>
491 (MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem)
492 (STP [32] dst (Select0 <typ.UInt64> (LDP [32] src mem)) (Select1 <typ.UInt64> (LDP [32] src mem))
493 (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
494 (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))))
495(Move [s] dst src mem) && s > 56 && s <= 64 =>
496 (STP [int32(s-16)] dst (Select0 <typ.UInt64> (LDP [int32(s-16)] src mem)) (Select1 <typ.UInt64> (LDP [int32(s-16)] src mem))
497 (STP [32] dst (Select0 <typ.UInt64> (LDP [32] src mem)) (Select1 <typ.UInt64> (LDP [32] src mem))
498 (STP [16] dst (Select0 <typ.UInt64> (LDP [16] src mem)) (Select1 <typ.UInt64> (LDP [16] src mem))
499 (STP dst (Select0 <typ.UInt64> (LDP src mem)) (Select1 <typ.UInt64> (LDP src mem)) mem))))
500
501// strip off fractional word move
502(Move [s] dst src mem) && s%16 != 0 && s%16 <= 8 && s > 64 =>
503 (Move [8]
504 (OffPtr <dst.Type> dst [s-8])
505 (OffPtr <src.Type> src [s-8])
506 (Move [s-s%16] dst src mem))
507(Move [s] dst src mem) && s%16 != 0 && s%16 > 8 && s > 64 =>
508 (Move [16]
509 (OffPtr <dst.Type> dst [s-16])
510 (OffPtr <src.Type> src [s-16])
511 (Move [s-s%16] dst src mem))
512
513// medium move uses a duff device
514(Move [s] dst src mem)
515 && s > 64 && s <= 16*64 && s%16 == 0
516 && logLargeCopy(v, s) =>
517 (DUFFCOPY [8 * (64 - s/16)] dst src mem)
518// 8 is the number of bytes to encode:
519//
520// LDP.P 16(R16), (R26, R27)
521// STP.P (R26, R27), 16(R17)
522//
523// 64 is number of these blocks. See runtime/duff_arm64.s:duffcopy
524
525// large move uses a loop
526(Move [s] dst src mem)
527 && s%16 == 0 && s > 16*64
528 && logLargeCopy(v, s) =>
529 (LoweredMove
530 dst
531 src
532 (ADDconst <src.Type> src [s-16])
533 mem)
534
535// calls
536(StaticCall ...) => (CALLstatic ...)
537(ClosureCall ...) => (CALLclosure ...)
538(InterCall ...) => (CALLinter ...)
539(TailCall ...) => (CALLtail ...)
540
541// checks
542(NilCheck ...) => (LoweredNilCheck ...)
543(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
544(IsInBounds idx len) => (LessThanU (CMP idx len))
545(IsSliceInBounds idx len) => (LessEqualU (CMP idx len))
546
547// pseudo-ops
548(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
549(GetCallerSP ...) => (LoweredGetCallerSP ...)
550(GetCallerPC ...) => (LoweredGetCallerPC ...)
551
552// Absorb pseudo-ops into blocks.
553(If (Equal cc) yes no) => (EQ cc yes no)
554(If (NotEqual cc) yes no) => (NE cc yes no)
555(If (LessThan cc) yes no) => (LT cc yes no)
556(If (LessThanU cc) yes no) => (ULT cc yes no)
557(If (LessEqual cc) yes no) => (LE cc yes no)
558(If (LessEqualU cc) yes no) => (ULE cc yes no)
559(If (GreaterThan cc) yes no) => (GT cc yes no)
560(If (GreaterThanU cc) yes no) => (UGT cc yes no)
561(If (GreaterEqual cc) yes no) => (GE cc yes no)
562(If (GreaterEqualU cc) yes no) => (UGE cc yes no)
563(If (LessThanF cc) yes no) => (FLT cc yes no)
564(If (LessEqualF cc) yes no) => (FLE cc yes no)
565(If (GreaterThanF cc) yes no) => (FGT cc yes no)
566(If (GreaterEqualF cc) yes no) => (FGE cc yes no)
567
568(If cond yes no) => (TBNZ [0] cond yes no)
569
570(JumpTable idx) => (JUMPTABLE {makeJumpTableSym(b)} idx (MOVDaddr <typ.Uintptr> {makeJumpTableSym(b)} (SB)))
571
572// atomic intrinsics
573// Note: these ops do not accept offset.
574(AtomicLoad8 ...) => (LDARB ...)
575(AtomicLoad32 ...) => (LDARW ...)
576(AtomicLoad64 ...) => (LDAR ...)
577(AtomicLoadPtr ...) => (LDAR ...)
578
579(AtomicStore8 ...) => (STLRB ...)
580(AtomicStore32 ...) => (STLRW ...)
581(AtomicStore64 ...) => (STLR ...)
582(AtomicStorePtrNoWB ...) => (STLR ...)
583
584(AtomicExchange(8|32|64) ...) => (LoweredAtomicExchange(8|32|64) ...)
585(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
586(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...)
587
588(AtomicAdd(32|64)Variant ...) => (LoweredAtomicAdd(32|64)Variant ...)
589(AtomicExchange(8|32|64)Variant ...) => (LoweredAtomicExchange(8|32|64)Variant ...)
590(AtomicCompareAndSwap(32|64)Variant ...) => (LoweredAtomicCas(32|64)Variant ...)
591
592// Return old contents.
593(AtomicAnd(64|32|8)value ...) => (LoweredAtomicAnd(64|32|8) ...)
594(AtomicOr(64|32|8)value ...) => (LoweredAtomicOr(64|32|8) ...)
595(AtomicAnd(64|32|8)valueVariant ...) => (LoweredAtomicAnd(64|32|8)Variant ...)
596(AtomicOr(64|32|8)valueVariant ...) => (LoweredAtomicOr(64|32|8)Variant ...)
597
598// Write barrier.
599(WB ...) => (LoweredWB ...)
600
601// Publication barrier (0xe is ST option)
602(PubBarrier mem) => (DMB [0xe] mem)
603
604(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
605(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
606(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
607
608// Optimizations
609
610// Absorb boolean tests into block
611(NZ (Equal cc) yes no) => (EQ cc yes no)
612(NZ (NotEqual cc) yes no) => (NE cc yes no)
613(NZ (LessThan cc) yes no) => (LT cc yes no)
614(NZ (LessThanU cc) yes no) => (ULT cc yes no)
615(NZ (LessEqual cc) yes no) => (LE cc yes no)
616(NZ (LessEqualU cc) yes no) => (ULE cc yes no)
617(NZ (GreaterThan cc) yes no) => (GT cc yes no)
618(NZ (GreaterThanU cc) yes no) => (UGT cc yes no)
619(NZ (GreaterEqual cc) yes no) => (GE cc yes no)
620(NZ (GreaterEqualU cc) yes no) => (UGE cc yes no)
621(NZ (LessThanF cc) yes no) => (FLT cc yes no)
622(NZ (LessEqualF cc) yes no) => (FLE cc yes no)
623(NZ (GreaterThanF cc) yes no) => (FGT cc yes no)
624(NZ (GreaterEqualF cc) yes no) => (FGE cc yes no)
625
626(TBNZ [0] (Equal cc) yes no) => (EQ cc yes no)
627(TBNZ [0] (NotEqual cc) yes no) => (NE cc yes no)
628(TBNZ [0] (LessThan cc) yes no) => (LT cc yes no)
629(TBNZ [0] (LessThanU cc) yes no) => (ULT cc yes no)
630(TBNZ [0] (LessEqual cc) yes no) => (LE cc yes no)
631(TBNZ [0] (LessEqualU cc) yes no) => (ULE cc yes no)
632(TBNZ [0] (GreaterThan cc) yes no) => (GT cc yes no)
633(TBNZ [0] (GreaterThanU cc) yes no) => (UGT cc yes no)
634(TBNZ [0] (GreaterEqual cc) yes no) => (GE cc yes no)
635(TBNZ [0] (GreaterEqualU cc) yes no) => (UGE cc yes no)
636(TBNZ [0] (LessThanF cc) yes no) => (FLT cc yes no)
637(TBNZ [0] (LessEqualF cc) yes no) => (FLE cc yes no)
638(TBNZ [0] (GreaterThanF cc) yes no) => (FGT cc yes no)
639(TBNZ [0] (GreaterEqualF cc) yes no) => (FGE cc yes no)
640
641((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TST x y) yes no)
642((EQ|NE|LT|LE|GT|GE) (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTconst [c] y) yes no)
643((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTW x y) yes no)
644((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (TSTWconst [int32(c)] y) yes no)
645
646// For conditional instructions such as CSET, CSEL.
647((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPconst [0] z:(AND x y))) && z.Uses == 1 =>
648 ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TST x y))
649((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPWconst [0] x:(ANDconst [c] y))) && x.Uses == 1 =>
650 ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTWconst [int32(c)] y))
651((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPWconst [0] z:(AND x y))) && z.Uses == 1 =>
652 ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTW x y))
653((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (CMPconst [0] x:(ANDconst [c] y))) && x.Uses == 1 =>
654 ((Equal|NotEqual|LessThan|LessEqual|GreaterThan|GreaterEqual) (TSTconst [c] y))
655
656((EQ|NE|LT|LE|GT|GE) (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNconst [c] y) yes no)
657((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNWconst [int32(c)] y) yes no)
658((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMN x y) yes no)
659((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNW x y) yes no)
660
661// CMP(x,-y) -> CMN(x,y) is only valid for unordered comparison, if y can be -1<<63
662((EQ|NE) (CMP x z:(NEG y)) yes no) && z.Uses == 1 => ((EQ|NE) (CMN x y) yes no)
663((Equal|NotEqual) (CMP x z:(NEG y))) && z.Uses == 1 => ((Equal|NotEqual) (CMN x y))
664
665// CMPW(x,-y) -> CMNW(x,y) is only valid for unordered comparison, if y can be -1<<31
666((EQ|NE) (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => ((EQ|NE) (CMNW x y) yes no)
667((Equal|NotEqual) (CMPW x z:(NEG y))) && z.Uses == 1 => ((Equal|NotEqual) (CMNW x y))
668
669// For conditional instructions such as CSET, CSEL.
670// TODO: add support for LE, GT, overflow needs to be considered.
671((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] x:(ADDconst [c] y))) && x.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNconst [c] y))
672((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] x:(ADDconst [c] y))) && x.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNWconst [int32(c)] y))
673((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] z:(ADD x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMN x y))
674((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(ADD x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNW x y))
675((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] z:(MADD a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMN a (MUL <x.Type> x y)))
676((Equal|NotEqual|LessThan|GreaterEqual) (CMPconst [0] z:(MSUB a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMP a (MUL <x.Type> x y)))
677((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(MADDW a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMNW a (MULW <x.Type> x y)))
678((Equal|NotEqual|LessThan|GreaterEqual) (CMPWconst [0] z:(MSUBW a x y))) && z.Uses == 1 => ((Equal|NotEqual|LessThanNoov|GreaterEqualNoov) (CMPW a (MULW <x.Type> x y)))
679
680((CMPconst|CMNconst) [c] y) && c < 0 && c != -1<<63 => ((CMNconst|CMPconst) [-c] y)
681((CMPWconst|CMNWconst) [c] y) && c < 0 && c != -1<<31 => ((CMNWconst|CMPWconst) [-c] y)
682
683((EQ|NE) (CMPconst [0] x) yes no) => ((Z|NZ) x yes no)
684((EQ|NE) (CMPWconst [0] x) yes no) => ((ZW|NZW) x yes no)
685
686((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMN a (MUL <x.Type> x y)) yes no)
687((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMP a (MUL <x.Type> x y)) yes no)
688((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMNW a (MULW <x.Type> x y)) yes no)
689((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => ((EQ|NE|LTnoov|LEnoov|GTnoov|GEnoov) (CMPW a (MULW <x.Type> x y)) yes no)
690
691// Absorb bit-tests into block
692(Z (ANDconst [c] x) yes no) && oneBit(c) => (TBZ [int64(ntz64(c))] x yes no)
693(NZ (ANDconst [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
694(ZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
695(NZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
696(EQ (TSTconst [c] x) yes no) && oneBit(c) => (TBZ [int64(ntz64(c))] x yes no)
697(NE (TSTconst [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
698(EQ (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
699(NE (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
700
701// Test sign-bit for signed comparisons against zero
702(GE (CMPWconst [0] x) yes no) => (TBZ [31] x yes no)
703(GE (CMPconst [0] x) yes no) => (TBZ [63] x yes no)
704(LT (CMPWconst [0] x) yes no) => (TBNZ [31] x yes no)
705(LT (CMPconst [0] x) yes no) => (TBNZ [63] x yes no)
706
707// fold offset into address
708(ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) =>
709 (MOVDaddr [int32(off1)+off2] {sym} ptr)
710
711// fold address into load/store.
712// Do not fold global variable access in -dynlink mode, where it will
713// be rewritten to use the GOT via REGTMP, which currently cannot handle
714// large offset.
715(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
716 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
717 (MOVBload [off1+int32(off2)] {sym} ptr mem)
718(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
719 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
720 (MOVBUload [off1+int32(off2)] {sym} ptr mem)
721(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
722 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
723 (MOVHload [off1+int32(off2)] {sym} ptr mem)
724(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
725 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
726 (MOVHUload [off1+int32(off2)] {sym} ptr mem)
727(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
728 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
729 (MOVWload [off1+int32(off2)] {sym} ptr mem)
730(MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
731 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
732 (MOVWUload [off1+int32(off2)] {sym} ptr mem)
733(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
734 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
735 (MOVDload [off1+int32(off2)] {sym} ptr mem)
736(LDP [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
737 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
738 (LDP [off1+int32(off2)] {sym} ptr mem)
739(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
740 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
741 (FMOVSload [off1+int32(off2)] {sym} ptr mem)
742(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
743 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
744 (FMOVDload [off1+int32(off2)] {sym} ptr mem)
745
746// register indexed load
747(MOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem)
748(MOVWUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem)
749(MOVWload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem)
750(MOVHUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem)
751(MOVHload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem)
752(MOVBUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem)
753(MOVBload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem)
754(FMOVSload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx ptr idx mem)
755(FMOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx ptr idx mem)
756
757(MOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
758(MOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
759(MOVWUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
760(MOVWUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
761(MOVWloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
762(MOVWloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
763(MOVHUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
764(MOVHUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
765(MOVHloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
766(MOVHloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
767(MOVBUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
768(MOVBUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
769(MOVBloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
770(MOVBloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
771(FMOVSloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
772(FMOVSloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
773(FMOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
774(FMOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
775
776// shifted register indexed load
777(MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx8 ptr idx mem)
778(MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx4 ptr idx mem)
779(MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx4 ptr idx mem)
780(MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx2 ptr idx mem)
781(MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx2 ptr idx mem)
782(MOVDloadidx ptr (SLLconst [3] idx) mem) => (MOVDloadidx8 ptr idx mem)
783(MOVWloadidx ptr (SLLconst [2] idx) mem) => (MOVWloadidx4 ptr idx mem)
784(MOVWUloadidx ptr (SLLconst [2] idx) mem) => (MOVWUloadidx4 ptr idx mem)
785(MOVHloadidx ptr (SLLconst [1] idx) mem) => (MOVHloadidx2 ptr idx mem)
786(MOVHUloadidx ptr (SLLconst [1] idx) mem) => (MOVHUloadidx2 ptr idx mem)
787(MOVHloadidx ptr (ADD idx idx) mem) => (MOVHloadidx2 ptr idx mem)
788(MOVHUloadidx ptr (ADD idx idx) mem) => (MOVHUloadidx2 ptr idx mem)
789(MOVDloadidx (SLLconst [3] idx) ptr mem) => (MOVDloadidx8 ptr idx mem)
790(MOVWloadidx (SLLconst [2] idx) ptr mem) => (MOVWloadidx4 ptr idx mem)
791(MOVWUloadidx (SLLconst [2] idx) ptr mem) => (MOVWUloadidx4 ptr idx mem)
792(MOVHloadidx (ADD idx idx) ptr mem) => (MOVHloadidx2 ptr idx mem)
793(MOVHUloadidx (ADD idx idx) ptr mem) => (MOVHUloadidx2 ptr idx mem)
794(MOVDloadidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (MOVDload [int32(c)<<3] ptr mem)
795(MOVWUloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWUload [int32(c)<<2] ptr mem)
796(MOVWloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWload [int32(c)<<2] ptr mem)
797(MOVHUloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHUload [int32(c)<<1] ptr mem)
798(MOVHloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHload [int32(c)<<1] ptr mem)
799
800(FMOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx8 ptr idx mem)
801(FMOVSload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx4 ptr idx mem)
802(FMOVDloadidx ptr (SLLconst [3] idx) mem) => (FMOVDloadidx8 ptr idx mem)
803(FMOVSloadidx ptr (SLLconst [2] idx) mem) => (FMOVSloadidx4 ptr idx mem)
804(FMOVDloadidx (SLLconst [3] idx) ptr mem) => (FMOVDloadidx8 ptr idx mem)
805(FMOVSloadidx (SLLconst [2] idx) ptr mem) => (FMOVSloadidx4 ptr idx mem)
806(FMOVDloadidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (FMOVDload ptr [int32(c)<<3] mem)
807(FMOVSloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (FMOVSload ptr [int32(c)<<2] mem)
808
809(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
810 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
811 (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
812(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
813 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
814 (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
815(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
816 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
817 (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
818(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
819 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
820 (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
821(STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem) && is32Bit(int64(off1)+off2)
822 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
823 (STP [off1+int32(off2)] {sym} ptr val1 val2 mem)
824(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
825 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
826 (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
827(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
828 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
829 (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
830
831// register indexed store
832(MOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem)
833(MOVWstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem)
834(MOVHstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem)
835(MOVBstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem)
836(FMOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx ptr idx val mem)
837(FMOVSstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx ptr idx val mem)
838(MOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVDstore [int32(c)] ptr val mem)
839(MOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVDstore [int32(c)] idx val mem)
840(MOVWstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVWstore [int32(c)] ptr val mem)
841(MOVWstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVWstore [int32(c)] idx val mem)
842(MOVHstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVHstore [int32(c)] ptr val mem)
843(MOVHstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVHstore [int32(c)] idx val mem)
844(MOVBstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVBstore [int32(c)] ptr val mem)
845(MOVBstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVBstore [int32(c)] idx val mem)
846(FMOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVDstore [int32(c)] ptr val mem)
847(FMOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVDstore [int32(c)] idx val mem)
848(FMOVSstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVSstore [int32(c)] ptr val mem)
849(FMOVSstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVSstore [int32(c)] idx val mem)
850
851// shifted register indexed store
852(MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx8 ptr idx val mem)
853(MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx4 ptr idx val mem)
854(MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx2 ptr idx val mem)
855(MOVDstoreidx ptr (SLLconst [3] idx) val mem) => (MOVDstoreidx8 ptr idx val mem)
856(MOVWstoreidx ptr (SLLconst [2] idx) val mem) => (MOVWstoreidx4 ptr idx val mem)
857(MOVHstoreidx ptr (SLLconst [1] idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
858(MOVHstoreidx ptr (ADD idx idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
859(MOVDstoreidx (SLLconst [3] idx) ptr val mem) => (MOVDstoreidx8 ptr idx val mem)
860(MOVWstoreidx (SLLconst [2] idx) ptr val mem) => (MOVWstoreidx4 ptr idx val mem)
861(MOVHstoreidx (SLLconst [1] idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
862(MOVHstoreidx (ADD idx idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
863(MOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (MOVDstore [int32(c)<<3] ptr val mem)
864(MOVWstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (MOVWstore [int32(c)<<2] ptr val mem)
865(MOVHstoreidx2 ptr (MOVDconst [c]) val mem) && is32Bit(c<<1) => (MOVHstore [int32(c)<<1] ptr val mem)
866
867(FMOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx8 ptr idx val mem)
868(FMOVSstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx4 ptr idx val mem)
869(FMOVDstoreidx ptr (SLLconst [3] idx) val mem) => (FMOVDstoreidx8 ptr idx val mem)
870(FMOVSstoreidx ptr (SLLconst [2] idx) val mem) => (FMOVSstoreidx4 ptr idx val mem)
871(FMOVDstoreidx (SLLconst [3] idx) ptr val mem) => (FMOVDstoreidx8 ptr idx val mem)
872(FMOVSstoreidx (SLLconst [2] idx) ptr val mem) => (FMOVSstoreidx4 ptr idx val mem)
873(FMOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (FMOVDstore [int32(c)<<3] ptr val mem)
874(FMOVSstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (FMOVSstore [int32(c)<<2] ptr val mem)
875
876(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
877 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
878 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
879 (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
880(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
881 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
882 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
883 (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
884(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
885 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
886 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
887 (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
888(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
889 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
890 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
891 (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
892(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
893 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
894 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
895 (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
896(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
897 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
898 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
899 (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
900(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
901 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
902 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
903 (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
904(LDP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
905 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
906 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
907 (LDP [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
908(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
909 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
910 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
911 (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
912(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
913 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
914 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
915 (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
916
917(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
918 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
919 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
920 (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
921(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
922 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
923 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
924 (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
925(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
926 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
927 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
928 (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
929(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
930 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
931 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
932 (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
933(STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem)
934 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
935 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
936 (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem)
937(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
938 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
939 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
940 (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
941(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
942 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
943 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
944 (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
945
946// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
947// these seem to have bad interaction with other rules, resulting in slower code
948//(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBreg x)
949//(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBUreg x)
950//(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHreg x)
951//(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHUreg x)
952//(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWreg x)
953//(MOVWUload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWUreg x)
954//(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
955//(FMOVSload [off] {sym} ptr (FMOVSstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
956//(FMOVDload [off] {sym} ptr (FMOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
957//(LDP [off] {sym} ptr (STP [off2] {sym2} ptr2 x y _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x y
958
959// don't extend before store
960(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
961(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
962(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
963(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
964(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
965(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
966(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
967(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
968(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
969(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
970(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
971(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
972(MOVBstoreidx ptr idx (MOVBreg x) mem) => (MOVBstoreidx ptr idx x mem)
973(MOVBstoreidx ptr idx (MOVBUreg x) mem) => (MOVBstoreidx ptr idx x mem)
974(MOVBstoreidx ptr idx (MOVHreg x) mem) => (MOVBstoreidx ptr idx x mem)
975(MOVBstoreidx ptr idx (MOVHUreg x) mem) => (MOVBstoreidx ptr idx x mem)
976(MOVBstoreidx ptr idx (MOVWreg x) mem) => (MOVBstoreidx ptr idx x mem)
977(MOVBstoreidx ptr idx (MOVWUreg x) mem) => (MOVBstoreidx ptr idx x mem)
978(MOVHstoreidx ptr idx (MOVHreg x) mem) => (MOVHstoreidx ptr idx x mem)
979(MOVHstoreidx ptr idx (MOVHUreg x) mem) => (MOVHstoreidx ptr idx x mem)
980(MOVHstoreidx ptr idx (MOVWreg x) mem) => (MOVHstoreidx ptr idx x mem)
981(MOVHstoreidx ptr idx (MOVWUreg x) mem) => (MOVHstoreidx ptr idx x mem)
982(MOVWstoreidx ptr idx (MOVWreg x) mem) => (MOVWstoreidx ptr idx x mem)
983(MOVWstoreidx ptr idx (MOVWUreg x) mem) => (MOVWstoreidx ptr idx x mem)
984(MOVHstoreidx2 ptr idx (MOVHreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
985(MOVHstoreidx2 ptr idx (MOVHUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
986(MOVHstoreidx2 ptr idx (MOVWreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
987(MOVHstoreidx2 ptr idx (MOVWUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
988(MOVWstoreidx4 ptr idx (MOVWreg x) mem) => (MOVWstoreidx4 ptr idx x mem)
989(MOVWstoreidx4 ptr idx (MOVWUreg x) mem) => (MOVWstoreidx4 ptr idx x mem)
990
991// if a register move has only 1 use, just use the same register without emitting instruction
992// MOVDnop doesn't emit instruction, only for ensuring the type.
993(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
994
995// TODO: we should be able to get rid of MOVDnop all together.
996// But for now, this is enough to get rid of lots of them.
997(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
998
999// fold constant into arithmetic ops
1000(ADD x (MOVDconst <t> [c])) && !t.IsPtr() => (ADDconst [c] x)
1001(SUB x (MOVDconst [c])) => (SUBconst [c] x)
1002(AND x (MOVDconst [c])) => (ANDconst [c] x)
1003(OR x (MOVDconst [c])) => (ORconst [c] x)
1004(XOR x (MOVDconst [c])) => (XORconst [c] x)
1005(TST x (MOVDconst [c])) => (TSTconst [c] x)
1006(TSTW x (MOVDconst [c])) => (TSTWconst [int32(c)] x)
1007(CMN x (MOVDconst [c])) => (CMNconst [c] x)
1008(CMNW x (MOVDconst [c])) => (CMNWconst [int32(c)] x)
1009(BIC x (MOVDconst [c])) => (ANDconst [^c] x)
1010(EON x (MOVDconst [c])) => (XORconst [^c] x)
1011(ORN x (MOVDconst [c])) => (ORconst [^c] x)
1012
1013(SLL x (MOVDconst [c])) => (SLLconst x [c&63])
1014(SRL x (MOVDconst [c])) => (SRLconst x [c&63])
1015(SRA x (MOVDconst [c])) => (SRAconst x [c&63])
1016(SLL x (ANDconst [63] y)) => (SLL x y)
1017(SRL x (ANDconst [63] y)) => (SRL x y)
1018(SRA x (ANDconst [63] y)) => (SRA x y)
1019
1020(CMP x (MOVDconst [c])) => (CMPconst [c] x)
1021(CMP (MOVDconst [c]) x) => (InvertFlags (CMPconst [c] x))
1022(CMPW x (MOVDconst [c])) => (CMPWconst [int32(c)] x)
1023(CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x))
1024
1025(ROR x (MOVDconst [c])) => (RORconst x [c&63])
1026(RORW x (MOVDconst [c])) => (RORWconst x [c&31])
1027
1028(ADDSflags x (MOVDconst [c])) => (ADDSconstflags [c] x)
1029
1030(ADDconst [c] y) && c < 0 => (SUBconst [-c] y)
1031
1032// Canonicalize the order of arguments to comparisons - helps with CSE.
1033((CMP|CMPW) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW) y x))
1034
1035// mul-neg => mneg
1036(NEG (MUL x y)) => (MNEG x y)
1037(NEG (MULW x y)) && v.Type.Size() <= 4 => (MNEGW x y)
1038(MUL (NEG x) y) => (MNEG x y)
1039(MULW (NEG x) y) => (MNEGW x y)
1040
1041// madd/msub
1042(ADD a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
1043(SUB a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
1044(ADD a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
1045(SUB a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
1046
1047(ADD a l:(MULW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y)
1048(SUB a l:(MULW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y)
1049(ADD a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MSUBW a x y)
1050(SUB a l:(MNEGW x y)) && v.Type.Size() <= 4 && l.Uses==1 && clobber(l) => (MADDW a x y)
1051
1052// madd/msub can't take constant arguments, so do a bit of reordering if a non-constant is available.
1053// Note: don't reorder arithmetic concerning pointers, as we must ensure that
1054// no intermediate computations are invalid pointers.
1055(ADD <t> a p:(ADDconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (ADDconst [c] (ADD <v.Type> a m))
1056(ADD <t> a p:(SUBconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (SUBconst [c] (ADD <v.Type> a m))
1057(SUB <t> a p:(ADDconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (SUBconst [c] (SUB <v.Type> a m))
1058(SUB <t> a p:(SUBconst [c] m:((MUL|MULW|MNEG|MNEGW) _ _))) && p.Uses==1 && m.Uses==1 && !t.IsPtrShaped() => (ADDconst [c] (SUB <v.Type> a m))
1059
1060// optimize ADCSflags, SBCSflags and friends
1061(ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (ADCzerocarry <typ.UInt64> c)))) => (ADCSflags x y c)
1062(ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (MOVDconst [0])))) => (ADDSflags x y)
1063(SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> bo))))) => (SBCSflags x y bo)
1064(SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (MOVDconst [0])))) => (SUBSflags x y)
1065
1066// mul by constant
1067(MUL _ (MOVDconst [0])) => (MOVDconst [0])
1068(MUL x (MOVDconst [1])) => x
1069
1070(MULW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
1071(MULW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg x)
1072
1073(MUL x (MOVDconst [c])) && canMulStrengthReduce(config, c) => {mulStrengthReduce(v, x, c)}
1074(MULW x (MOVDconst [c])) && v.Type.Size() <= 4 && canMulStrengthReduce32(config, int32(c)) => {mulStrengthReduce32(v, x, int32(c))}
1075
1076// mneg by constant
1077(MNEG x (MOVDconst [-1])) => x
1078(MNEG _ (MOVDconst [0])) => (MOVDconst [0])
1079(MNEG x (MOVDconst [1])) => (NEG x)
1080(MNEG x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
1081(MNEG x (MOVDconst [c])) && isPowerOfTwo(c-1) && c >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
1082(MNEG x (MOVDconst [c])) && isPowerOfTwo(c+1) && c >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
1083(MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
1084(MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
1085(MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
1086(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
1087
1088
1089(MNEGW x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg x)
1090(MNEGW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
1091(MNEGW x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (NEG <x.Type> x))
1092(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
1093(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c) >= 3 => (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> x x [log64(c-1)])))
1094(MNEGW x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c) >= 7 => (MOVWUreg (NEG <x.Type> (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)])))
1095(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2])))
1096(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))))
1097(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3])))
1098(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (NEG <x.Type> (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))))
1099
1100
1101(MADD a x (MOVDconst [-1])) => (SUB a x)
1102(MADD a _ (MOVDconst [0])) => a
1103(MADD a x (MOVDconst [1])) => (ADD a x)
1104(MADD a x (MOVDconst [c])) && isPowerOfTwo(c) => (ADDshiftLL a x [log64(c)])
1105(MADD a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
1106(MADD a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
1107(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
1108(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
1109(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
1110(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
1111
1112(MADD a (MOVDconst [-1]) x) => (SUB a x)
1113(MADD a (MOVDconst [0]) _) => a
1114(MADD a (MOVDconst [1]) x) => (ADD a x)
1115(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c) => (ADDshiftLL a x [log64(c)])
1116(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
1117(MADD a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
1118(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
1119(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
1120(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
1121(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
1122
1123(MADDW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (SUB <a.Type> a x))
1124(MADDW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a)
1125(MADDW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (ADD <a.Type> a x))
1126(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c) => (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
1127(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
1128(MADDW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
1129(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
1130(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
1131(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
1132(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
1133
1134(MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (SUB <a.Type> a x))
1135(MADDW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a)
1136(MADDW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (ADD <a.Type> a x))
1137(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (MOVWUreg (ADDshiftLL <a.Type> a x [log64(c)]))
1138(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (ADD <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
1139(MADDW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (SUB <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
1140(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
1141(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
1142(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
1143(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
1144
1145(MSUB a x (MOVDconst [-1])) => (ADD a x)
1146(MSUB a _ (MOVDconst [0])) => a
1147(MSUB a x (MOVDconst [1])) => (SUB a x)
1148(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c) => (SUBshiftLL a x [log64(c)])
1149(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
1150(MSUB a x (MOVDconst [c])) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
1151(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
1152(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
1153(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
1154(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
1155
1156(MSUB a (MOVDconst [-1]) x) => (ADD a x)
1157(MSUB a (MOVDconst [0]) _) => a
1158(MSUB a (MOVDconst [1]) x) => (SUB a x)
1159(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c) => (SUBshiftLL a x [log64(c)])
1160(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
1161(MSUB a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
1162(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
1163(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
1164(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
1165(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
1166
1167(MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (MOVWUreg (ADD <a.Type> a x))
1168(MSUBW a _ (MOVDconst [c])) && int32(c)==0 => (MOVWUreg a)
1169(MSUBW a x (MOVDconst [c])) && int32(c)==1 => (MOVWUreg (SUB <a.Type> a x))
1170(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c) => (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
1171(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
1172(MSUBW a x (MOVDconst [c])) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
1173(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
1174(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
1175(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
1176(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
1177
1178(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (MOVWUreg (ADD <a.Type> a x))
1179(MSUBW a (MOVDconst [c]) _) && int32(c)==0 => (MOVWUreg a)
1180(MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (MOVWUreg (SUB <a.Type> a x))
1181(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c) => (MOVWUreg (SUBshiftLL <a.Type> a x [log64(c)]))
1182(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c-1) && int32(c)>=3 => (MOVWUreg (SUB <a.Type> a (ADDshiftLL <x.Type> x x [log64(c-1)])))
1183(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo(c+1) && int32(c)>=7 => (MOVWUreg (ADD <a.Type> a (SUBshiftLL <x.Type> x x [log64(c+1)])))
1184(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo(c/3) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)]))
1185(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo(c/5) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)]))
1186(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo(c/7) && is32Bit(c) => (MOVWUreg (ADDshiftLL <a.Type> a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)]))
1187(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo(c/9) && is32Bit(c) => (MOVWUreg (SUBshiftLL <a.Type> a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)]))
1188
1189// div by constant
1190(UDIV x (MOVDconst [1])) => x
1191(UDIV x (MOVDconst [c])) && isPowerOfTwo(c) => (SRLconst [log64(c)] x)
1192(UDIVW x (MOVDconst [c])) && uint32(c)==1 => (MOVWUreg x)
1193(UDIVW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (SRLconst [log64(c)] (MOVWUreg <v.Type> x))
1194(UMOD _ (MOVDconst [1])) => (MOVDconst [0])
1195(UMOD x (MOVDconst [c])) && isPowerOfTwo(c) => (ANDconst [c-1] x)
1196(UMODW _ (MOVDconst [c])) && uint32(c)==1 => (MOVDconst [0])
1197(UMODW x (MOVDconst [c])) && isPowerOfTwo(c) && is32Bit(c) => (ANDconst [c-1] x)
1198
1199// generic simplifications
1200(ADD x (NEG y)) => (SUB x y)
1201(SUB x (NEG y)) => (ADD x y)
1202(SUB x x) => (MOVDconst [0])
1203(AND x x) => x
1204(OR x x) => x
1205(XOR x x) => (MOVDconst [0])
1206(BIC x x) => (MOVDconst [0])
1207(EON x x) => (MOVDconst [-1])
1208(ORN x x) => (MOVDconst [-1])
1209(AND x (MVN y)) => (BIC x y)
1210(XOR x (MVN y)) => (EON x y)
1211(OR x (MVN y)) => (ORN x y)
1212(MVN (XOR x y)) => (EON x y)
1213(NEG (SUB x y)) => (SUB y x)
1214(NEG (NEG x)) => x
1215
1216(CSEL [cc] (MOVDconst [-1]) (MOVDconst [0]) flag) => (CSETM [cc] flag)
1217(CSEL [cc] (MOVDconst [0]) (MOVDconst [-1]) flag) => (CSETM [arm64Negate(cc)] flag)
1218(CSEL [cc] x (MOVDconst [0]) flag) => (CSEL0 [cc] x flag)
1219(CSEL [cc] (MOVDconst [0]) y flag) => (CSEL0 [arm64Negate(cc)] y flag)
1220(CSEL [cc] x (ADDconst [1] a) flag) => (CSINC [cc] x a flag)
1221(CSEL [cc] (ADDconst [1] a) x flag) => (CSINC [arm64Negate(cc)] x a flag)
1222(CSEL [cc] x (MVN a) flag) => (CSINV [cc] x a flag)
1223(CSEL [cc] (MVN a) x flag) => (CSINV [arm64Negate(cc)] x a flag)
1224(CSEL [cc] x (NEG a) flag) => (CSNEG [cc] x a flag)
1225(CSEL [cc] (NEG a) x flag) => (CSNEG [arm64Negate(cc)] x a flag)
1226
1227(SUB x (SUB y z)) => (SUB (ADD <v.Type> x z) y)
1228(SUB (SUB x y) z) => (SUB x (ADD <y.Type> y z))
1229
1230// remove redundant *const ops
1231(ADDconst [0] x) => x
1232(SUBconst [0] x) => x
1233(ANDconst [0] _) => (MOVDconst [0])
1234(ANDconst [-1] x) => x
1235(ORconst [0] x) => x
1236(ORconst [-1] _) => (MOVDconst [-1])
1237(XORconst [0] x) => x
1238(XORconst [-1] x) => (MVN x)
1239
1240// generic constant folding
1241(ADDconst [c] (MOVDconst [d])) => (MOVDconst [c+d])
1242(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
1243(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
1244(SUBconst [c] (MOVDconst [d])) => (MOVDconst [d-c])
1245(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
1246(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
1247(SLLconst [c] (MOVDconst [d])) => (MOVDconst [d<<uint64(c)])
1248(SRLconst [c] (MOVDconst [d])) => (MOVDconst [int64(uint64(d)>>uint64(c))])
1249(SRAconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)])
1250(MUL (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c*d])
1251(MNEG (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-c*d])
1252(MULW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(c*d))])
1253(MNEGW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(uint32(-c*d))])
1254(MADD (MOVDconst [c]) x y) => (ADDconst [c] (MUL <x.Type> x y))
1255(MSUB (MOVDconst [c]) x y) => (ADDconst [c] (MNEG <x.Type> x y))
1256(MADD a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [c*d] a)
1257(MSUB a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [c*d] a)
1258(MADDW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst <x.Type> [c] (MULW <x.Type> x y)))
1259(MSUBW (MOVDconst [c]) x y) => (MOVWUreg (ADDconst <x.Type> [c] (MNEGW <x.Type> x y)))
1260(MADDW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (ADDconst <a.Type> [c*d] a))
1261(MSUBW a (MOVDconst [c]) (MOVDconst [d])) => (MOVWUreg (SUBconst <a.Type> [c*d] a))
1262(DIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c/d])
1263(UDIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)/uint64(d))])
1264(DIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)/int32(d)))])
1265(UDIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)/uint32(d))])
1266(MOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c%d])
1267(UMOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)%uint64(d))])
1268(MODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(int32(c)%int32(d)))])
1269(UMODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)%uint32(d))])
1270(ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d])
1271(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
1272(ANDconst [c] (MOVWUreg x)) => (ANDconst [c&(1<<32-1)] x)
1273(ANDconst [c] (MOVHUreg x)) => (ANDconst [c&(1<<16-1)] x)
1274(ANDconst [c] (MOVBUreg x)) => (ANDconst [c&(1<<8-1)] x)
1275(MOVWUreg (ANDconst [c] x)) => (ANDconst [c&(1<<32-1)] x)
1276(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&(1<<16-1)] x)
1277(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&(1<<8-1)] x)
1278(ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d])
1279(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
1280(XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d])
1281(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
1282(MVN (MOVDconst [c])) => (MOVDconst [^c])
1283(NEG (MOVDconst [c])) => (MOVDconst [-c])
1284(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
1285(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
1286(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
1287(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
1288(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
1289(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
1290(MOVDreg (MOVDconst [c])) => (MOVDconst [c])
1291
1292// constant comparisons
1293(CMPconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags64(x,y)])
1294(CMPWconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags32(int32(x),y)])
1295(TSTconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags64(x&y)])
1296(TSTWconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags32(int32(x)&y)])
1297(CMNconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags64(x,y)])
1298(CMNWconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags32(int32(x),y)])
1299
1300// other known comparisons
1301(CMPconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags64(0,1)])
1302(CMPconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)])
1303(CMPconst (MOVWUreg _) [c]) && 0xffffffff < c => (FlagConstant [subFlags64(0,1)])
1304(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n => (FlagConstant [subFlags64(0,1)])
1305(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n) => (FlagConstant [subFlags64(0,1)])
1306(CMPWconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags64(0,1)])
1307(CMPWconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)])
1308
1309// absorb flag constants into branches
1310(EQ (FlagConstant [fc]) yes no) && fc.eq() => (First yes no)
1311(EQ (FlagConstant [fc]) yes no) && !fc.eq() => (First no yes)
1312
1313(NE (FlagConstant [fc]) yes no) && fc.ne() => (First yes no)
1314(NE (FlagConstant [fc]) yes no) && !fc.ne() => (First no yes)
1315
1316(LT (FlagConstant [fc]) yes no) && fc.lt() => (First yes no)
1317(LT (FlagConstant [fc]) yes no) && !fc.lt() => (First no yes)
1318
1319(LE (FlagConstant [fc]) yes no) && fc.le() => (First yes no)
1320(LE (FlagConstant [fc]) yes no) && !fc.le() => (First no yes)
1321
1322(GT (FlagConstant [fc]) yes no) && fc.gt() => (First yes no)
1323(GT (FlagConstant [fc]) yes no) && !fc.gt() => (First no yes)
1324
1325(GE (FlagConstant [fc]) yes no) && fc.ge() => (First yes no)
1326(GE (FlagConstant [fc]) yes no) && !fc.ge() => (First no yes)
1327
1328(ULT (FlagConstant [fc]) yes no) && fc.ult() => (First yes no)
1329(ULT (FlagConstant [fc]) yes no) && !fc.ult() => (First no yes)
1330
1331(ULE (FlagConstant [fc]) yes no) && fc.ule() => (First yes no)
1332(ULE (FlagConstant [fc]) yes no) && !fc.ule() => (First no yes)
1333
1334(UGT (FlagConstant [fc]) yes no) && fc.ugt() => (First yes no)
1335(UGT (FlagConstant [fc]) yes no) && !fc.ugt() => (First no yes)
1336
1337(UGE (FlagConstant [fc]) yes no) && fc.uge() => (First yes no)
1338(UGE (FlagConstant [fc]) yes no) && !fc.uge() => (First no yes)
1339
1340(LTnoov (FlagConstant [fc]) yes no) && fc.ltNoov() => (First yes no)
1341(LTnoov (FlagConstant [fc]) yes no) && !fc.ltNoov() => (First no yes)
1342
1343(LEnoov (FlagConstant [fc]) yes no) && fc.leNoov() => (First yes no)
1344(LEnoov (FlagConstant [fc]) yes no) && !fc.leNoov() => (First no yes)
1345
1346(GTnoov (FlagConstant [fc]) yes no) && fc.gtNoov() => (First yes no)
1347(GTnoov (FlagConstant [fc]) yes no) && !fc.gtNoov() => (First no yes)
1348
1349(GEnoov (FlagConstant [fc]) yes no) && fc.geNoov() => (First yes no)
1350(GEnoov (FlagConstant [fc]) yes no) && !fc.geNoov() => (First no yes)
1351
1352(Z (MOVDconst [0]) yes no) => (First yes no)
1353(Z (MOVDconst [c]) yes no) && c != 0 => (First no yes)
1354(NZ (MOVDconst [0]) yes no) => (First no yes)
1355(NZ (MOVDconst [c]) yes no) && c != 0 => (First yes no)
1356(ZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First yes no)
1357(ZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First no yes)
1358(NZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First no yes)
1359(NZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First yes no)
1360
1361// absorb InvertFlags into branches
1362(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
1363(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
1364(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
1365(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
1366(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
1367(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
1368(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
1369(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
1370(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
1371(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
1372(FLT (InvertFlags cmp) yes no) => (FGT cmp yes no)
1373(FGT (InvertFlags cmp) yes no) => (FLT cmp yes no)
1374(FLE (InvertFlags cmp) yes no) => (FGE cmp yes no)
1375(FGE (InvertFlags cmp) yes no) => (FLE cmp yes no)
1376(LTnoov (InvertFlags cmp) yes no) => (GTnoov cmp yes no)
1377(GEnoov (InvertFlags cmp) yes no) => (LEnoov cmp yes no)
1378(LEnoov (InvertFlags cmp) yes no) => (GEnoov cmp yes no)
1379(GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no)
1380
1381// absorb InvertFlags into conditional instructions
1382(CSEL [cc] x y (InvertFlags cmp)) => (CSEL [arm64Invert(cc)] x y cmp)
1383(CSEL0 [cc] x (InvertFlags cmp)) => (CSEL0 [arm64Invert(cc)] x cmp)
1384(CSETM [cc] (InvertFlags cmp)) => (CSETM [arm64Invert(cc)] cmp)
1385(CSINC [cc] x y (InvertFlags cmp)) => (CSINC [arm64Invert(cc)] x y cmp)
1386(CSINV [cc] x y (InvertFlags cmp)) => (CSINV [arm64Invert(cc)] x y cmp)
1387(CSNEG [cc] x y (InvertFlags cmp)) => (CSNEG [arm64Invert(cc)] x y cmp)
1388
1389// absorb flag constants into boolean values
1390(Equal (FlagConstant [fc])) => (MOVDconst [b2i(fc.eq())])
1391(NotEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.ne())])
1392(LessThan (FlagConstant [fc])) => (MOVDconst [b2i(fc.lt())])
1393(LessThanU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ult())])
1394(LessEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.le())])
1395(LessEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ule())])
1396(GreaterThan (FlagConstant [fc])) => (MOVDconst [b2i(fc.gt())])
1397(GreaterThanU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ugt())])
1398(GreaterEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.ge())])
1399(GreaterEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.uge())])
1400(LessThanNoov (FlagConstant [fc])) => (MOVDconst [b2i(fc.ltNoov())])
1401(GreaterEqualNoov (FlagConstant [fc])) => (MOVDconst [b2i(fc.geNoov())])
1402
1403// absorb InvertFlags into boolean values
1404(Equal (InvertFlags x)) => (Equal x)
1405(NotEqual (InvertFlags x)) => (NotEqual x)
1406(LessThan (InvertFlags x)) => (GreaterThan x)
1407(LessThanU (InvertFlags x)) => (GreaterThanU x)
1408(GreaterThan (InvertFlags x)) => (LessThan x)
1409(GreaterThanU (InvertFlags x)) => (LessThanU x)
1410(LessEqual (InvertFlags x)) => (GreaterEqual x)
1411(LessEqualU (InvertFlags x)) => (GreaterEqualU x)
1412(GreaterEqual (InvertFlags x)) => (LessEqual x)
1413(GreaterEqualU (InvertFlags x)) => (LessEqualU x)
1414(LessThanF (InvertFlags x)) => (GreaterThanF x)
1415(LessEqualF (InvertFlags x)) => (GreaterEqualF x)
1416(GreaterThanF (InvertFlags x)) => (LessThanF x)
1417(GreaterEqualF (InvertFlags x)) => (LessEqualF x)
1418(LessThanNoov (InvertFlags x)) => (CSEL0 [OpARM64NotEqual] (GreaterEqualNoov <typ.Bool> x) x)
1419(GreaterEqualNoov (InvertFlags x)) => (CSINC [OpARM64NotEqual] (LessThanNoov <typ.Bool> x) (MOVDconst [0]) x)
1420
1421// Don't bother extending if we're not using the higher bits.
1422(MOV(B|BU)reg x) && v.Type.Size() <= 1 => x
1423(MOV(H|HU)reg x) && v.Type.Size() <= 2 => x
1424(MOV(W|WU)reg x) && v.Type.Size() <= 4 => x
1425
1426// omit sign extension
1427(MOVWreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffff80000000) == 0 => (ANDconst <t> x [c])
1428(MOVHreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffff8000) == 0 => (ANDconst <t> x [c])
1429(MOVBreg <t> (ANDconst x [c])) && uint64(c) & uint64(0xffffffffffffff80) == 0 => (ANDconst <t> x [c])
1430
1431// absorb flag constants into conditional instructions
1432(CSEL [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
1433(CSEL [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y
1434(CSEL0 [cc] x flag) && ccARM64Eval(cc, flag) > 0 => x
1435(CSEL0 [cc] _ flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0])
1436(CSNEG [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
1437(CSNEG [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (NEG y)
1438(CSINV [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
1439(CSINV [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (Not y)
1440(CSINC [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
1441(CSINC [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (ADDconst [1] y)
1442(CSETM [cc] flag) && ccARM64Eval(cc, flag) > 0 => (MOVDconst [-1])
1443(CSETM [cc] flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0])
1444
1445// absorb flags back into boolean CSEL
1446(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
1447 (CSEL [boolval.Op] x y flagArg(boolval))
1448(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
1449 (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval))
1450(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
1451 (CSEL0 [boolval.Op] x flagArg(boolval))
1452(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
1453 (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval))
1454
1455// absorb shifts into ops
1456(NEG x:(SLLconst [c] y)) && clobberIfDead(x) => (NEGshiftLL [c] y)
1457(NEG x:(SRLconst [c] y)) && clobberIfDead(x) => (NEGshiftRL [c] y)
1458(NEG x:(SRAconst [c] y)) && clobberIfDead(x) => (NEGshiftRA [c] y)
1459(MVN x:(SLLconst [c] y)) && clobberIfDead(x) => (MVNshiftLL [c] y)
1460(MVN x:(SRLconst [c] y)) && clobberIfDead(x) => (MVNshiftRL [c] y)
1461(MVN x:(SRAconst [c] y)) && clobberIfDead(x) => (MVNshiftRA [c] y)
1462(MVN x:(RORconst [c] y)) && clobberIfDead(x) => (MVNshiftRO [c] y)
1463(ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ADDshiftLL x0 y [c])
1464(ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ADDshiftRL x0 y [c])
1465(ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ADDshiftRA x0 y [c])
1466(SUB x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (SUBshiftLL x0 y [c])
1467(SUB x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (SUBshiftRL x0 y [c])
1468(SUB x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (SUBshiftRA x0 y [c])
1469(AND x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ANDshiftLL x0 y [c])
1470(AND x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ANDshiftRL x0 y [c])
1471(AND x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ANDshiftRA x0 y [c])
1472(AND x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ANDshiftRO x0 y [c])
1473(OR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORshiftLL x0 y [c]) // useful for combined load
1474(OR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORshiftRL x0 y [c])
1475(OR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORshiftRA x0 y [c])
1476(OR x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORshiftRO x0 y [c])
1477(XOR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (XORshiftLL x0 y [c])
1478(XOR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (XORshiftRL x0 y [c])
1479(XOR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (XORshiftRA x0 y [c])
1480(XOR x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (XORshiftRO x0 y [c])
1481(BIC x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (BICshiftLL x0 y [c])
1482(BIC x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (BICshiftRL x0 y [c])
1483(BIC x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (BICshiftRA x0 y [c])
1484(BIC x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (BICshiftRO x0 y [c])
1485(ORN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORNshiftLL x0 y [c])
1486(ORN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORNshiftRL x0 y [c])
1487(ORN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORNshiftRA x0 y [c])
1488(ORN x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORNshiftRO x0 y [c])
1489(EON x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (EONshiftLL x0 y [c])
1490(EON x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (EONshiftRL x0 y [c])
1491(EON x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (EONshiftRA x0 y [c])
1492(EON x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (EONshiftRO x0 y [c])
1493(CMP x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMPshiftLL x0 y [c])
1494(CMP x0:(SLLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftLL x1 y [c]))
1495(CMP x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMPshiftRL x0 y [c])
1496(CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRL x1 y [c]))
1497(CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMPshiftRA x0 y [c])
1498(CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRA x1 y [c]))
1499(CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMNshiftLL x0 y [c])
1500(CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMNshiftRL x0 y [c])
1501(CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMNshiftRA x0 y [c])
1502(TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (TSTshiftLL x0 y [c])
1503(TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (TSTshiftRL x0 y [c])
1504(TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (TSTshiftRA x0 y [c])
1505(TST x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (TSTshiftRO x0 y [c])
1506
1507// prefer *const ops to *shift ops
1508(ADDshiftLL (MOVDconst [c]) x [d]) => (ADDconst [c] (SLLconst <x.Type> x [d]))
1509(ADDshiftRL (MOVDconst [c]) x [d]) => (ADDconst [c] (SRLconst <x.Type> x [d]))
1510(ADDshiftRA (MOVDconst [c]) x [d]) => (ADDconst [c] (SRAconst <x.Type> x [d]))
1511(ANDshiftLL (MOVDconst [c]) x [d]) => (ANDconst [c] (SLLconst <x.Type> x [d]))
1512(ANDshiftRL (MOVDconst [c]) x [d]) => (ANDconst [c] (SRLconst <x.Type> x [d]))
1513(ANDshiftRA (MOVDconst [c]) x [d]) => (ANDconst [c] (SRAconst <x.Type> x [d]))
1514(ANDshiftRO (MOVDconst [c]) x [d]) => (ANDconst [c] (RORconst <x.Type> x [d]))
1515(ORshiftLL (MOVDconst [c]) x [d]) => (ORconst [c] (SLLconst <x.Type> x [d]))
1516(ORshiftRL (MOVDconst [c]) x [d]) => (ORconst [c] (SRLconst <x.Type> x [d]))
1517(ORshiftRA (MOVDconst [c]) x [d]) => (ORconst [c] (SRAconst <x.Type> x [d]))
1518(ORshiftRO (MOVDconst [c]) x [d]) => (ORconst [c] (RORconst <x.Type> x [d]))
1519(XORshiftLL (MOVDconst [c]) x [d]) => (XORconst [c] (SLLconst <x.Type> x [d]))
1520(XORshiftRL (MOVDconst [c]) x [d]) => (XORconst [c] (SRLconst <x.Type> x [d]))
1521(XORshiftRA (MOVDconst [c]) x [d]) => (XORconst [c] (SRAconst <x.Type> x [d]))
1522(XORshiftRO (MOVDconst [c]) x [d]) => (XORconst [c] (RORconst <x.Type> x [d]))
1523(CMPshiftLL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
1524(CMPshiftRL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
1525(CMPshiftRA (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
1526(CMNshiftLL (MOVDconst [c]) x [d]) => (CMNconst [c] (SLLconst <x.Type> x [d]))
1527(CMNshiftRL (MOVDconst [c]) x [d]) => (CMNconst [c] (SRLconst <x.Type> x [d]))
1528(CMNshiftRA (MOVDconst [c]) x [d]) => (CMNconst [c] (SRAconst <x.Type> x [d]))
1529(TSTshiftLL (MOVDconst [c]) x [d]) => (TSTconst [c] (SLLconst <x.Type> x [d]))
1530(TSTshiftRL (MOVDconst [c]) x [d]) => (TSTconst [c] (SRLconst <x.Type> x [d]))
1531(TSTshiftRA (MOVDconst [c]) x [d]) => (TSTconst [c] (SRAconst <x.Type> x [d]))
1532(TSTshiftRO (MOVDconst [c]) x [d]) => (TSTconst [c] (RORconst <x.Type> x [d]))
1533
1534// constant folding in *shift ops
1535(MVNshiftLL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)<<uint64(d))])
1536(MVNshiftRL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)>>uint64(d))])
1537(MVNshiftRA (MOVDconst [c]) [d]) => (MOVDconst [^(c>>uint64(d))])
1538(MVNshiftRO (MOVDconst [c]) [d]) => (MOVDconst [^rotateRight64(c, d)])
1539(NEGshiftLL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)<<uint64(d))])
1540(NEGshiftRL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)>>uint64(d))])
1541(NEGshiftRA (MOVDconst [c]) [d]) => (MOVDconst [-(c>>uint64(d))])
1542(ADDshiftLL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)<<uint64(d))])
1543(ADDshiftRL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)>>uint64(d))])
1544(ADDshiftRA x (MOVDconst [c]) [d]) => (ADDconst x [c>>uint64(d)])
1545(SUBshiftLL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)<<uint64(d))])
1546(SUBshiftRL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)>>uint64(d))])
1547(SUBshiftRA x (MOVDconst [c]) [d]) => (SUBconst x [c>>uint64(d)])
1548(ANDshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)<<uint64(d))])
1549(ANDshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)>>uint64(d))])
1550(ANDshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [c>>uint64(d)])
1551(ANDshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [rotateRight64(c, d)])
1552(ORshiftLL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)<<uint64(d))])
1553(ORshiftRL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)>>uint64(d))])
1554(ORshiftRA x (MOVDconst [c]) [d]) => (ORconst x [c>>uint64(d)])
1555(ORshiftRO x (MOVDconst [c]) [d]) => (ORconst x [rotateRight64(c, d)])
1556(XORshiftLL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)<<uint64(d))])
1557(XORshiftRL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)>>uint64(d))])
1558(XORshiftRA x (MOVDconst [c]) [d]) => (XORconst x [c>>uint64(d)])
1559(XORshiftRO x (MOVDconst [c]) [d]) => (XORconst x [rotateRight64(c, d)])
1560(BICshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)<<uint64(d))])
1561(BICshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)>>uint64(d))])
1562(BICshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [^(c>>uint64(d))])
1563(BICshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [^rotateRight64(c, d)])
1564(ORNshiftLL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)<<uint64(d))])
1565(ORNshiftRL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)>>uint64(d))])
1566(ORNshiftRA x (MOVDconst [c]) [d]) => (ORconst x [^(c>>uint64(d))])
1567(ORNshiftRO x (MOVDconst [c]) [d]) => (ORconst x [^rotateRight64(c, d)])
1568(EONshiftLL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)<<uint64(d))])
1569(EONshiftRL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)>>uint64(d))])
1570(EONshiftRA x (MOVDconst [c]) [d]) => (XORconst x [^(c>>uint64(d))])
1571(EONshiftRO x (MOVDconst [c]) [d]) => (XORconst x [^rotateRight64(c, d)])
1572(CMPshiftLL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)<<uint64(d))])
1573(CMPshiftRL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)>>uint64(d))])
1574(CMPshiftRA x (MOVDconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
1575(CMNshiftLL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)<<uint64(d))])
1576(CMNshiftRL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)>>uint64(d))])
1577(CMNshiftRA x (MOVDconst [c]) [d]) => (CMNconst x [c>>uint64(d)])
1578(TSTshiftLL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)<<uint64(d))])
1579(TSTshiftRL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)>>uint64(d))])
1580(TSTshiftRA x (MOVDconst [c]) [d]) => (TSTconst x [c>>uint64(d)])
1581(TSTshiftRO x (MOVDconst [c]) [d]) => (TSTconst x [rotateRight64(c, d)])
1582
1583// simplification with *shift ops
1584(SUBshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
1585(SUBshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
1586(SUBshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
1587(ANDshiftLL y:(SLLconst x [c]) x [c]) => y
1588(ANDshiftRL y:(SRLconst x [c]) x [c]) => y
1589(ANDshiftRA y:(SRAconst x [c]) x [c]) => y
1590(ANDshiftRO y:(RORconst x [c]) x [c]) => y
1591(ORshiftLL y:(SLLconst x [c]) x [c]) => y
1592(ORshiftRL y:(SRLconst x [c]) x [c]) => y
1593(ORshiftRA y:(SRAconst x [c]) x [c]) => y
1594(ORshiftRO y:(RORconst x [c]) x [c]) => y
1595(XORshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
1596(XORshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
1597(XORshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
1598(XORshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0])
1599(BICshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
1600(BICshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
1601(BICshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
1602(BICshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0])
1603(EONshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
1604(EONshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
1605(EONshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
1606(EONshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1])
1607(ORNshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
1608(ORNshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
1609(ORNshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
1610(ORNshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1])
1611
1612// rev16w | rev16
1613// ((x>>8) | (x<<8)) => (REV16W x), the type of x is uint16, "|" can also be "^" or "+".
1614((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) => (REV16W x)
1615
1616// ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+".
1617((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x))
1618 && uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff
1619 => (REV16W x)
1620
1621// ((x & 0xff00ff00ff00ff00)>>8) | ((x & 0x00ff00ff00ff00ff)<<8), "|" can also be "^" or "+".
1622((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
1623 && (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff)
1624 => (REV16 x)
1625
1626// ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+".
1627((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
1628 && (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff)
1629 => (REV16 (ANDconst <x.Type> [0xffffffff] x))
1630
1631// Extract from reg pair
1632(ADDshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
1633( ORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
1634(XORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
1635
1636(ADDshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
1637 => (EXTRWconst [32-c] x2 x)
1638( ORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
1639 => (EXTRWconst [32-c] x2 x)
1640(XORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
1641 => (EXTRWconst [32-c] x2 x)
1642
1643// Rewrite special pairs of shifts to AND.
1644// On ARM64 the bitmask can fit into an instruction.
1645(SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 => (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
1646(SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 => (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
1647
1648// Special case setting bit as 1. An example is math.Copysign(c,-1)
1649(ORconst [c1] (ANDconst [c2] x)) && c2|c1 == ^0 => (ORconst [c1] x)
1650
1651// If the shift amount is larger than the datasize(32, 16, 8), we can optimize to constant 0.
1652(MOVWUreg (SLLconst [lc] x)) && lc >= 32 => (MOVDconst [0])
1653(MOVHUreg (SLLconst [lc] x)) && lc >= 16 => (MOVDconst [0])
1654(MOVBUreg (SLLconst [lc] x)) && lc >= 8 => (MOVDconst [0])
1655
1656// After zero extension, the upper (64-datasize(32|16|8)) bits are zero, we can optimiza to constant 0.
1657(SRLconst [rc] (MOVWUreg x)) && rc >= 32 => (MOVDconst [0])
1658(SRLconst [rc] (MOVHUreg x)) && rc >= 16 => (MOVDconst [0])
1659(SRLconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVDconst [0])
1660
1661// bitfield ops
1662
1663// sbfiz
1664// (x << lc) >> rc
1665(SRAconst [rc] (SLLconst [lc] x)) && lc > rc => (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
1666// int64(x << lc)
1667(MOVWreg (SLLconst [lc] x)) && lc < 32 => (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
1668(MOVHreg (SLLconst [lc] x)) && lc < 16 => (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
1669(MOVBreg (SLLconst [lc] x)) && lc < 8 => (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
1670// int64(x) << lc
1671(SLLconst [lc] (MOVWreg x)) => (SBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
1672(SLLconst [lc] (MOVHreg x)) => (SBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
1673(SLLconst [lc] (MOVBreg x)) => (SBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
1674
1675// sbfx
1676// (x << lc) >> rc
1677(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc => (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
1678// int64(x) >> rc
1679(SRAconst [rc] (MOVWreg x)) && rc < 32 => (SBFX [armBFAuxInt(rc, 32-rc)] x)
1680(SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x)
1681(SRAconst [rc] (MOVBreg x)) && rc < 8 => (SBFX [armBFAuxInt(rc, 8-rc)] x)
1682// merge sbfx and sign-extension into sbfx
1683(MOVWreg (SBFX [bfc] x)) && bfc.width() <= 32 => (SBFX [bfc] x)
1684(MOVHreg (SBFX [bfc] x)) && bfc.width() <= 16 => (SBFX [bfc] x)
1685(MOVBreg (SBFX [bfc] x)) && bfc.width() <= 8 => (SBFX [bfc] x)
1686
1687// sbfiz/sbfx combinations: merge shifts into bitfield ops
1688(SRAconst [sc] (SBFIZ [bfc] x)) && sc < bfc.lsb()
1689 => (SBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
1690(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.lsb()
1691 && sc < bfc.lsb()+bfc.width()
1692 => (SBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
1693(SBFX [bfc] s:(SLLconst [sc] x))
1694 && s.Uses == 1
1695 && sc <= bfc.lsb()
1696 => (SBFX [armBFAuxInt(bfc.lsb() - sc, bfc.width())] x)
1697(SBFX [bfc] s:(SLLconst [sc] x))
1698 && s.Uses == 1
1699 && sc > bfc.lsb()
1700 => (SBFIZ [armBFAuxInt(sc - bfc.lsb(), bfc.width() - (sc-bfc.lsb()))] x)
1701
1702// ubfiz
1703// (x << lc) >> rc
1704(SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
1705// uint64(x) << lc
1706(SLLconst [lc] (MOVWUreg x)) => (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
1707(SLLconst [lc] (MOVHUreg x)) => (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
1708(SLLconst [lc] (MOVBUreg x)) => (UBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
1709// uint64(x << lc)
1710(MOVWUreg (SLLconst [lc] x)) && lc < 32 => (UBFIZ [armBFAuxInt(lc, 32-lc)] x)
1711(MOVHUreg (SLLconst [lc] x)) && lc < 16 => (UBFIZ [armBFAuxInt(lc, 16-lc)] x)
1712(MOVBUreg (SLLconst [lc] x)) && lc < 8 => (UBFIZ [armBFAuxInt(lc, 8-lc)] x)
1713
1714// merge ANDconst into ubfiz
1715// (x & ac) << sc
1716(SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0)
1717 => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
1718// (x << sc) & ac
1719(ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc)
1720 => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
1721
1722// ubfx
1723// (x << lc) >> rc
1724(SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
1725// uint64(x) >> rc
1726(SRLconst [rc] (MOVWUreg x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32-rc)] x)
1727(SRLconst [rc] (MOVHUreg x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16-rc)] x)
1728(SRLconst [rc] (MOVBUreg x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8-rc)] x)
1729// uint64(x >> rc)
1730(MOVWUreg (SRLconst [rc] x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32)] x)
1731(MOVHUreg (SRLconst [rc] x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16)] x)
1732(MOVBUreg (SRLconst [rc] x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8)] x)
1733// merge ANDconst into ubfx
1734// (x >> sc) & ac
1735(ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0)
1736 => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
1737// (x & ac) >> sc
1738(SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc)
1739 => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
1740// merge ANDconst and ubfx into ubfx
1741(ANDconst [c] (UBFX [bfc] x)) && isARM64BFMask(0, c, 0) =>
1742 (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), arm64BFWidth(c, 0)))] x)
1743(UBFX [bfc] (ANDconst [c] x)) && isARM64BFMask(0, c, 0) && bfc.lsb() + bfc.width() <= arm64BFWidth(c, 0) =>
1744 (UBFX [bfc] x)
1745// merge ubfx and zero-extension into ubfx
1746(MOVWUreg (UBFX [bfc] x)) && bfc.width() <= 32 => (UBFX [bfc] x)
1747(MOVHUreg (UBFX [bfc] x)) && bfc.width() <= 16 => (UBFX [bfc] x)
1748(MOVBUreg (UBFX [bfc] x)) && bfc.width() <= 8 => (UBFX [bfc] x)
1749
1750// Extracting bits from across a zero-extension boundary.
1751(UBFX [bfc] e:(MOVWUreg x))
1752 && e.Uses == 1
1753 && bfc.lsb() < 32
1754 => (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 32-bfc.lsb()))] x)
1755(UBFX [bfc] e:(MOVHUreg x))
1756 && e.Uses == 1
1757 && bfc.lsb() < 16
1758 => (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 16-bfc.lsb()))] x)
1759(UBFX [bfc] e:(MOVBUreg x))
1760 && e.Uses == 1
1761 && bfc.lsb() < 8
1762 => (UBFX [armBFAuxInt(bfc.lsb(), min(bfc.width(), 8-bfc.lsb()))] x)
1763
1764// ubfiz/ubfx combinations: merge shifts into bitfield ops
1765(SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.width()
1766 => (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
1767(UBFX [bfc] (SRLconst [sc] x)) && sc+bfc.width()+bfc.lsb() < 64
1768 => (UBFX [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
1769(SLLconst [sc] (UBFIZ [bfc] x)) && sc+bfc.width()+bfc.lsb() < 64
1770 => (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width())] x)
1771(UBFIZ [bfc] (SLLconst [sc] x)) && sc < bfc.width()
1772 => (UBFIZ [armBFAuxInt(bfc.lsb()+sc, bfc.width()-sc)] x)
1773// ((x << c1) >> c2) >> c3
1774(SRLconst [sc] (UBFIZ [bfc] x)) && sc == bfc.lsb()
1775 => (ANDconst [1<<uint(bfc.width())-1] x)
1776(SRLconst [sc] (UBFIZ [bfc] x)) && sc < bfc.lsb()
1777 => (UBFIZ [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
1778(SRLconst [sc] (UBFIZ [bfc] x)) && sc > bfc.lsb()
1779 && sc < bfc.lsb()+bfc.width()
1780 => (UBFX [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
1781// ((x << c1) << c2) >> c3
1782(UBFX [bfc] (SLLconst [sc] x)) && sc == bfc.lsb()
1783 => (ANDconst [1<<uint(bfc.width())-1] x)
1784(UBFX [bfc] (SLLconst [sc] x)) && sc < bfc.lsb()
1785 => (UBFX [armBFAuxInt(bfc.lsb()-sc, bfc.width())] x)
1786(UBFX [bfc] (SLLconst [sc] x)) && sc > bfc.lsb()
1787 && sc < bfc.lsb()+bfc.width()
1788 => (UBFIZ [armBFAuxInt(sc-bfc.lsb(), bfc.lsb()+bfc.width()-sc)] x)
1789
1790// bfi
1791(OR (UBFIZ [bfc] x) (ANDconst [ac] y))
1792 && ac == ^((1<<uint(bfc.width())-1) << uint(bfc.lsb()))
1793 => (BFI [bfc] y x)
1794(ORshiftLL [s] (ANDconst [xc] x) (ANDconst [yc] y))
1795 && xc == ^(yc << s) // opposite masks
1796 && yc & (yc+1) == 0 // power of 2 minus 1
1797 && yc > 0 // not 0, not all 64 bits (there are better rewrites in that case)
1798 && s+log64(yc+1) <= 64 // shifted mask doesn't overflow
1799 => (BFI [armBFAuxInt(s, log64(yc+1))] x y)
1800(ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
1801 && lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
1802 => (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
1803// bfxil
1804(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(bfc.width())-1)
1805 => (BFXIL [bfc] y x)
1806(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == bfc.width()
1807 => (BFXIL [bfc] y x)
1808(ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) && lc < rc && ac == ^((1<<uint(64-rc)-1))
1809 => (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
1810
1811// FP simplification
1812(FNEGS (FMULS x y)) => (FNMULS x y)
1813(FNEGD (FMULD x y)) => (FNMULD x y)
1814(FMULS (FNEGS x) y) => (FNMULS x y)
1815(FMULD (FNEGD x) y) => (FNMULD x y)
1816(FNEGS (FNMULS x y)) => (FMULS x y)
1817(FNEGD (FNMULD x y)) => (FMULD x y)
1818(FNMULS (FNEGS x) y) => (FMULS x y)
1819(FNMULD (FNEGD x) y) => (FMULD x y)
1820
1821(FADDS a (FMULS x y)) && a.Block.Func.useFMA(v) => (FMADDS a x y)
1822(FADDD a (FMULD x y)) && a.Block.Func.useFMA(v) => (FMADDD a x y)
1823(FSUBS a (FMULS x y)) && a.Block.Func.useFMA(v) => (FMSUBS a x y)
1824(FSUBD a (FMULD x y)) && a.Block.Func.useFMA(v) => (FMSUBD a x y)
1825(FSUBS (FMULS x y) a) && a.Block.Func.useFMA(v) => (FNMSUBS a x y)
1826(FSUBD (FMULD x y) a) && a.Block.Func.useFMA(v) => (FNMSUBD a x y)
1827(FADDS a (FNMULS x y)) && a.Block.Func.useFMA(v) => (FMSUBS a x y)
1828(FADDD a (FNMULD x y)) && a.Block.Func.useFMA(v) => (FMSUBD a x y)
1829(FSUBS a (FNMULS x y)) && a.Block.Func.useFMA(v) => (FMADDS a x y)
1830(FSUBD a (FNMULD x y)) && a.Block.Func.useFMA(v) => (FMADDD a x y)
1831(FSUBS (FNMULS x y) a) && a.Block.Func.useFMA(v) => (FNMADDS a x y)
1832(FSUBD (FNMULD x y) a) && a.Block.Func.useFMA(v) => (FNMADDD a x y)
1833
1834(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read8(sym, int64(off)))])
1835(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
1836(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
1837(MOVDload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
1838(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(int8(read8(sym, int64(off))))])
1839(MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
1840(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
1841
1842// Prefetch instructions (aux is option: 0 - PLDL1KEEP; 1 - PLDL1STRM)
1843(PrefetchCache addr mem) => (PRFM [0] addr mem)
1844(PrefetchCacheStreamed addr mem) => (PRFM [1] addr mem)
1845
1846// Arch-specific inlining for small or disjoint runtime.memmove
1847(SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem)))))
1848 && sz >= 0
1849 && isSameCall(sym, "runtime.memmove")
1850 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
1851 && isInlinableMemmove(dst, src, sz, config)
1852 && clobber(s1, s2, s3, call)
1853 => (Move [sz] dst src mem)
1854
1855// Match post-lowering calls, register version.
1856(SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
1857 && sz >= 0
1858 && isSameCall(sym, "runtime.memmove")
1859 && call.Uses == 1
1860 && isInlinableMemmove(dst, src, sz, config)
1861 && clobber(call)
1862 => (Move [sz] dst src mem)
1863
1864((REV|REVW) ((REV|REVW) p)) => p
1865
1866// internal/runtime/math.MulUintptr intrinsics
1867
1868(Select0 (Mul64uover x y)) => (MUL x y)
1869(Select1 (Mul64uover x y)) => (NotEqual (CMPconst (UMULH <typ.UInt64> x y) [0]))
View as plain text