...
1// Copyright 2022 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5(Add(Ptr|64|32|16|8) ...) => (ADDV ...)
6(Add(32|64)F ...) => (ADD(F|D) ...)
7
8(Sub(Ptr|64|32|16|8) ...) => (SUBV ...)
9(Sub(32|64)F ...) => (SUB(F|D) ...)
10
11(Mul(64|32|16|8) ...) => (MULV ...)
12(Mul(32|64)F ...) => (MUL(F|D) ...)
13(Select0 (Mul64uhilo x y)) => (MULHVU x y)
14(Select1 (Mul64uhilo x y)) => (MULV x y)
15(Select0 (Mul64uover x y)) => (MULV x y)
16(Select1 (Mul64uover x y)) => (SGTU <typ.Bool> (MULHVU x y) (MOVVconst <typ.UInt64> [0]))
17
18(Hmul64 ...) => (MULHV ...)
19(Hmul64u ...) => (MULHVU ...)
20(Hmul32 x y) => (SRAVconst (MULV (SignExt32to64 x) (SignExt32to64 y)) [32])
21(Hmul32u x y) => (SRLVconst (MULV (ZeroExt32to64 x) (ZeroExt32to64 y)) [32])
22
23(Div64 x y) => (DIVV x y)
24(Div64u ...) => (DIVVU ...)
25(Div32 x y) => (DIVV (SignExt32to64 x) (SignExt32to64 y))
26(Div32u x y) => (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))
27(Div16 x y) => (DIVV (SignExt16to64 x) (SignExt16to64 y))
28(Div16u x y) => (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))
29(Div8 x y) => (DIVV (SignExt8to64 x) (SignExt8to64 y))
30(Div8u x y) => (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))
31(Div(32|64)F ...) => (DIV(F|D) ...)
32
33(Mod64 x y) => (REMV x y)
34(Mod64u ...) => (REMVU ...)
35(Mod32 x y) => (REMV (SignExt32to64 x) (SignExt32to64 y))
36(Mod32u x y) => (REMVU (ZeroExt32to64 x) (ZeroExt32to64 y))
37(Mod16 x y) => (REMV (SignExt16to64 x) (SignExt16to64 y))
38(Mod16u x y) => (REMVU (ZeroExt16to64 x) (ZeroExt16to64 y))
39(Mod8 x y) => (REMV (SignExt8to64 x) (SignExt8to64 y))
40(Mod8u x y) => (REMVU (ZeroExt8to64 x) (ZeroExt8to64 y))
41
42(Select0 <t> (Add64carry x y c)) => (ADDV (ADDV <t> x y) c)
43(Select1 <t> (Add64carry x y c)) =>
44 (OR (SGTU <t> x s:(ADDV <t> x y)) (SGTU <t> s (ADDV <t> s c)))
45
46(Select0 <t> (Sub64borrow x y c)) => (SUBV (SUBV <t> x y) c)
47(Select1 <t> (Sub64borrow x y c)) =>
48 (OR (SGTU <t> s:(SUBV <t> x y) x) (SGTU <t> (SUBV <t> s c) s))
49
50// (x + y) / 2 with x>=y => (x - y) / 2 + y
51(Avg64u <t> x y) => (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
52
53(And(64|32|16|8) ...) => (AND ...)
54(Or(64|32|16|8) ...) => (OR ...)
55(Xor(64|32|16|8) ...) => (XOR ...)
56
57// shifts
58// hardware instruction uses only the low 6 bits of the shift
59// we compare to 64 to ensure Go semantics for large shifts
60(Lsh64x64 <t> x y) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
61(Lsh64x32 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
62(Lsh64x16 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
63(Lsh64x8 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
64
65(Lsh32x64 <t> x y) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
66(Lsh32x32 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
67(Lsh32x16 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
68(Lsh32x8 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
69
70(Lsh16x64 <t> x y) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
71(Lsh16x32 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
72(Lsh16x16 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
73(Lsh16x8 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
74
75(Lsh8x64 <t> x y) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
76(Lsh8x32 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
77(Lsh8x16 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
78(Lsh8x8 <t> x y) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
79
80(Rsh64Ux64 <t> x y) => (MASKEQZ (SRLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
81(Rsh64Ux32 <t> x y) => (MASKEQZ (SRLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
82(Rsh64Ux16 <t> x y) => (MASKEQZ (SRLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
83(Rsh64Ux8 <t> x y) => (MASKEQZ (SRLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
84
85(Rsh32Ux64 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt32to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
86(Rsh32Ux32 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
87(Rsh32Ux16 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
88(Rsh32Ux8 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
89
90(Rsh16Ux64 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
91(Rsh16Ux32 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
92(Rsh16Ux16 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
93(Rsh16Ux8 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
94
95(Rsh8Ux64 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
96(Rsh8Ux32 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
97(Rsh8Ux16 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
98(Rsh8Ux8 <t> x y) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
99
100(Rsh64x64 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
101(Rsh64x32 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
102(Rsh64x16 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
103(Rsh64x8 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
104
105(Rsh32x64 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
106(Rsh32x32 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
107(Rsh32x16 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
108(Rsh32x8 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
109
110(Rsh16x64 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
111(Rsh16x32 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
112(Rsh16x16 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
113(Rsh16x8 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
114
115(Rsh8x64 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
116(Rsh8x32 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
117(Rsh8x16 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
118(Rsh8x8 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
119
120// bitfield ops
121
122// bstrpickv
123// (x << lc) >> rc
124(SRLVconst [rc] (SLLVconst [lc] x)) && lc <= rc => (BSTRPICKV [rc-lc + ((64-lc)-1)<<6] x)
125// uint64(x) >> rc
126(SRLVconst [rc] (MOVWUreg x)) && rc < 32 => (BSTRPICKV [rc + 31<<6] x)
127(SRLVconst [rc] (MOVHUreg x)) && rc < 16 => (BSTRPICKV [rc + 15<<6] x)
128(SRLVconst [rc] (MOVBUreg x)) && rc < 8 => (BSTRPICKV [rc + 7<<6] x)
129// uint64(x >> rc)
130(MOVWUreg (SRLVconst [rc] x)) && rc < 32 => (BSTRPICKV [rc + (31+rc)<<6] x)
131(MOVHUreg (SRLVconst [rc] x)) && rc < 16 => (BSTRPICKV [rc + (15+rc)<<6] x)
132(MOVBUreg (SRLVconst [rc] x)) && rc < 8 => (BSTRPICKV [rc + (7+rc)<<6] x)
133
134// rotates
135(RotateLeft8 <t> x (MOVVconst [c])) => (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
136(RotateLeft8 <t> x y) => (OR <t> (SLLV <t> x (ANDconst <typ.Int64> [7] y)) (SRLV <t> (ZeroExt8to64 x) (ANDconst <typ.Int64> [7] (NEGV <typ.Int64> y))))
137(RotateLeft16 <t> x (MOVVconst [c])) => (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
138(RotateLeft16 <t> x y) => (ROTR <t> (OR <typ.UInt32> (ZeroExt16to32 x) (SLLVconst <t> (ZeroExt16to32 x) [16])) (NEGV <typ.Int64> y))
139(RotateLeft32 x y) => (ROTR x (NEGV <y.Type> y))
140(RotateLeft64 x y) => (ROTRV x (NEGV <y.Type> y))
141
142// unary ops
143(Neg(64|32|16|8) ...) => (NEGV ...)
144(Neg(32|64)F ...) => (NEG(F|D) ...)
145
146(Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x)
147
148(BitLen64 <t> x) => (NEGV <t> (SUBVconst <t> [64] (CLZV <t> x)))
149(BitLen32 <t> x) => (NEGV <t> (SUBVconst <t> [32] (CLZW <t> x)))
150(Bswap(16|32|64) ...) => (REVB(2H|2W|V) ...)
151(BitRev8 ...) => (BITREV4B ...)
152(BitRev16 <t> x) => (REVB2H (BITREV4B <t> x))
153(BitRev32 ...) => (BITREVW ...)
154(BitRev64 ...) => (BITREVV ...)
155(Ctz(32|64)NonZero ...) => (Ctz(32|64) ...)
156(Ctz(32|64) ...) => (CTZ(W|V) ...)
157
158(PopCount64 <t> x) => (MOVVfpgp <t> (VPCNT64 <typ.Float64> (MOVVgpfp <typ.Float64> x)))
159(PopCount32 <t> x) => (MOVWfpgp <t> (VPCNT32 <typ.Float32> (MOVWgpfp <typ.Float32> x)))
160(PopCount16 <t> x) => (MOVWfpgp <t> (VPCNT16 <typ.Float32> (MOVWgpfp <typ.Float32> (ZeroExt16to32 x))))
161
162// math package intrinsics
163(Sqrt ...) => (SQRTD ...)
164(Sqrt32 ...) => (SQRTF ...)
165(Abs ...) => (ABSD ...)
166(Copysign ...) => (FCOPYSGD ...)
167
168(Min(64|32)F ...) => (FMIN(D|F) ...)
169(Max(64|32)F ...) => (FMAX(D|F) ...)
170
171// boolean ops -- booleans are represented with 0=false, 1=true
172(AndB ...) => (AND ...)
173(OrB ...) => (OR ...)
174(EqB x y) => (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
175(NeqB ...) => (XOR ...)
176(Not x) => (XORconst [1] x)
177
178// constants
179(Const(64|32|16|8) [val]) => (MOVVconst [int64(val)])
180(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
181(ConstNil) => (MOVVconst [0])
182(ConstBool [t]) => (MOVVconst [int64(b2i(t))])
183
184(Slicemask <t> x) => (SRAVconst (NEGV <t> x) [63])
185
186// truncations
187// Because we ignore high parts of registers, truncates are just copies.
188(Trunc16to8 ...) => (Copy ...)
189(Trunc32to8 ...) => (Copy ...)
190(Trunc32to16 ...) => (Copy ...)
191(Trunc64to8 ...) => (Copy ...)
192(Trunc64to16 ...) => (Copy ...)
193(Trunc64to32 ...) => (Copy ...)
194
195// Zero-/Sign-extensions
196(ZeroExt8to16 ...) => (MOVBUreg ...)
197(ZeroExt8to32 ...) => (MOVBUreg ...)
198(ZeroExt16to32 ...) => (MOVHUreg ...)
199(ZeroExt8to64 ...) => (MOVBUreg ...)
200(ZeroExt16to64 ...) => (MOVHUreg ...)
201(ZeroExt32to64 ...) => (MOVWUreg ...)
202
203(SignExt8to16 ...) => (MOVBreg ...)
204(SignExt8to32 ...) => (MOVBreg ...)
205(SignExt16to32 ...) => (MOVHreg ...)
206(SignExt8to64 ...) => (MOVBreg ...)
207(SignExt16to64 ...) => (MOVHreg ...)
208(SignExt32to64 ...) => (MOVWreg ...)
209
210// float <=> int conversion
211(Cvt32to32F ...) => (MOVWF ...)
212(Cvt32to64F ...) => (MOVWD ...)
213(Cvt64to32F ...) => (MOVVF ...)
214(Cvt64to64F ...) => (MOVVD ...)
215(Cvt32Fto32 ...) => (TRUNCFW ...)
216(Cvt64Fto32 ...) => (TRUNCDW ...)
217(Cvt32Fto64 ...) => (TRUNCFV ...)
218(Cvt64Fto64 ...) => (TRUNCDV ...)
219(Cvt32Fto64F ...) => (MOVFD ...)
220(Cvt64Fto32F ...) => (MOVDF ...)
221
222(CvtBoolToUint8 ...) => (Copy ...)
223
224(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
225
226// comparisons
227(Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
228(Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
229(Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
230(Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y))
231(EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y))
232(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
233
234(Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
235(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
236(Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
237(Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0]))
238(NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0]))
239(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
240
241(Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x))
242(Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x))
243(Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x))
244(Less64 x y) => (SGT y x)
245(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
246
247(Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
248(Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
249(Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
250(Less64U x y) => (SGTU y x)
251
252(Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
253(Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
254(Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
255(Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y))
256(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
257
258(Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
259(Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
260(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
261(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y))
262
263(OffPtr [off] ptr:(SP)) => (MOVVaddr [int32(off)] ptr)
264(OffPtr [off] ptr) => (ADDVconst [off] ptr)
265
266(Addr {sym} base) => (MOVVaddr {sym} base)
267(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVVaddr {sym} (SPanchored base mem))
268(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVVaddr {sym} base)
269
270// loads
271(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
272(Load <t> ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
273(Load <t> ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
274(Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
275(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
276(Load <t> ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem)
277(Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
278(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem)
279(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
280(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
281
282// stores
283(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
284(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
285(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
286(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVVstore ptr val mem)
287(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVFstore ptr val mem)
288(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVDstore ptr val mem)
289
290// zeroing
291(Zero [0] _ mem) => mem
292(Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem)
293(Zero [2] ptr mem) => (MOVHstore ptr (MOVVconst [0]) mem)
294(Zero [3] ptr mem) =>
295 (MOVBstore [2] ptr (MOVVconst [0])
296 (MOVHstore ptr (MOVVconst [0]) mem))
297(Zero [4] {t} ptr mem) => (MOVWstore ptr (MOVVconst [0]) mem)
298(Zero [5] ptr mem) =>
299 (MOVBstore [4] ptr (MOVVconst [0])
300 (MOVWstore ptr (MOVVconst [0]) mem))
301(Zero [6] ptr mem) =>
302 (MOVHstore [4] ptr (MOVVconst [0])
303 (MOVWstore ptr (MOVVconst [0]) mem))
304(Zero [7] ptr mem) =>
305 (MOVWstore [3] ptr (MOVVconst [0])
306 (MOVWstore ptr (MOVVconst [0]) mem))
307(Zero [8] {t} ptr mem) => (MOVVstore ptr (MOVVconst [0]) mem)
308(Zero [9] ptr mem) =>
309 (MOVBstore [8] ptr (MOVVconst [0])
310 (MOVVstore ptr (MOVVconst [0]) mem))
311(Zero [10] ptr mem) =>
312 (MOVHstore [8] ptr (MOVVconst [0])
313 (MOVVstore ptr (MOVVconst [0]) mem))
314(Zero [11] ptr mem) =>
315 (MOVWstore [7] ptr (MOVVconst [0])
316 (MOVVstore ptr (MOVVconst [0]) mem))
317(Zero [12] ptr mem) =>
318 (MOVWstore [8] ptr (MOVVconst [0])
319 (MOVVstore ptr (MOVVconst [0]) mem))
320(Zero [13] ptr mem) =>
321 (MOVVstore [5] ptr (MOVVconst [0])
322 (MOVVstore ptr (MOVVconst [0]) mem))
323(Zero [14] ptr mem) =>
324 (MOVVstore [6] ptr (MOVVconst [0])
325 (MOVVstore ptr (MOVVconst [0]) mem))
326(Zero [15] ptr mem) =>
327 (MOVVstore [7] ptr (MOVVconst [0])
328 (MOVVstore ptr (MOVVconst [0]) mem))
329(Zero [16] ptr mem) =>
330 (MOVVstore [8] ptr (MOVVconst [0])
331 (MOVVstore ptr (MOVVconst [0]) mem))
332
333// strip off fractional word zeroing
334(Zero [s] ptr mem) && s%8 != 0 && s > 16 =>
335 (Zero [s%8]
336 (OffPtr <ptr.Type> ptr [s-s%8])
337 (Zero [s-s%8] ptr mem))
338
339// medium zeroing uses a duff device
340(Zero [s] ptr mem)
341 && s%8 == 0 && s > 16 && s <= 8*128
342 && !config.noDuffDevice =>
343 (DUFFZERO [8 * (128 - s/8)] ptr mem)
344
345// large zeroing uses a loop
346(Zero [s] ptr mem)
347 && s%8 == 0 && s > 8*128 =>
348 (LoweredZero
349 ptr
350 (ADDVconst <ptr.Type> ptr [s-8])
351 mem)
352
353// moves
354(Move [0] _ _ mem) => mem
355(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
356(Move [2] dst src mem) => (MOVHstore dst (MOVHUload src mem) mem)
357(Move [3] dst src mem) =>
358 (MOVBstore [2] dst (MOVBUload [2] src mem)
359 (MOVHstore dst (MOVHUload src mem) mem))
360(Move [4] dst src mem) => (MOVWstore dst (MOVWUload src mem) mem)
361(Move [5] dst src mem) =>
362 (MOVBstore [4] dst (MOVBUload [4] src mem)
363 (MOVWstore dst (MOVWUload src mem) mem))
364(Move [6] dst src mem) =>
365 (MOVHstore [4] dst (MOVHUload [4] src mem)
366 (MOVWstore dst (MOVWUload src mem) mem))
367(Move [7] dst src mem) =>
368 (MOVWstore [3] dst (MOVWUload [3] src mem)
369 (MOVWstore dst (MOVWUload src mem) mem))
370(Move [8] dst src mem) => (MOVVstore dst (MOVVload src mem) mem)
371(Move [9] dst src mem) =>
372 (MOVBstore [8] dst (MOVBUload [8] src mem)
373 (MOVVstore dst (MOVVload src mem) mem))
374(Move [10] dst src mem) =>
375 (MOVHstore [8] dst (MOVHUload [8] src mem)
376 (MOVVstore dst (MOVVload src mem) mem))
377(Move [11] dst src mem) =>
378 (MOVWstore [7] dst (MOVWload [7] src mem)
379 (MOVVstore dst (MOVVload src mem) mem))
380(Move [12] dst src mem) =>
381 (MOVWstore [8] dst (MOVWUload [8] src mem)
382 (MOVVstore dst (MOVVload src mem) mem))
383(Move [13] dst src mem) =>
384 (MOVVstore [5] dst (MOVVload [5] src mem)
385 (MOVVstore dst (MOVVload src mem) mem))
386(Move [14] dst src mem) =>
387 (MOVVstore [6] dst (MOVVload [6] src mem)
388 (MOVVstore dst (MOVVload src mem) mem))
389(Move [15] dst src mem) =>
390 (MOVVstore [7] dst (MOVVload [7] src mem)
391 (MOVVstore dst (MOVVload src mem) mem))
392(Move [16] dst src mem) =>
393 (MOVVstore [8] dst (MOVVload [8] src mem)
394 (MOVVstore dst (MOVVload src mem) mem))
395
396// strip off fractional word move
397(Move [s] dst src mem) && s%8 != 0 && s > 16 =>
398 (Move [s%8]
399 (OffPtr <dst.Type> dst [s-s%8])
400 (OffPtr <src.Type> src [s-s%8])
401 (Move [s-s%8] dst src mem))
402
403// medium move uses a duff device
404(Move [s] dst src mem)
405 && s%8 == 0 && s > 16 && s <= 8*128
406 && !config.noDuffDevice && logLargeCopy(v, s) =>
407 (DUFFCOPY [16 * (128 - s/8)] dst src mem)
408// 16 and 128 are magic constants. 16 is the number of bytes to encode:
409// MOVV (R20), R30
410// ADDV $8, R20
411// MOVV R30, (R21)
412// ADDV $8, R21
413// and 128 is the number of such blocks. See runtime/duff_loong64.s:duffcopy.
414
415// large move uses a loop
416(Move [s] dst src mem)
417 && s%8 == 0 && s > 1024 && logLargeCopy(v, s) =>
418 (LoweredMove
419 dst
420 src
421 (ADDVconst <src.Type> src [s-8])
422 mem)
423
424
425// float <=> int register moves, with no conversion.
426// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
427(MOVVload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) => (MOVVfpgp val)
428(MOVDload [off] {sym} ptr (MOVVstore [off] {sym} ptr val _)) => (MOVVgpfp val)
429(MOVWUload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) => (ZeroExt32to64 (MOVWfpgp <typ.Float32> val))
430(MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (MOVWgpfp val)
431
432// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
433(MOVVstore [off] {sym} ptr (MOVVfpgp val) mem) => (MOVDstore [off] {sym} ptr val mem)
434(MOVDstore [off] {sym} ptr (MOVVgpfp val) mem) => (MOVVstore [off] {sym} ptr val mem)
435(MOVWstore [off] {sym} ptr (MOVWfpgp val) mem) => (MOVFstore [off] {sym} ptr val mem)
436(MOVFstore [off] {sym} ptr (MOVWgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
437
438// calls
439(StaticCall ...) => (CALLstatic ...)
440(ClosureCall ...) => (CALLclosure ...)
441(InterCall ...) => (CALLinter ...)
442(TailCall ...) => (CALLtail ...)
443
444// atomic intrinsics
445(AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...)
446(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
447
448(AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...)
449(AtomicStore(8|32|64)Variant ...) => (LoweredAtomicStore(8|32|64)Variant ...)
450(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
451
452(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
453(AtomicExchange8Variant ...) => (LoweredAtomicExchange8Variant ...)
454
455(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
456
457// Loong64's 32-bit atomic operation instructions ll.w and amcasw are both sign-extended,
458// so the input parameters need to be sign-extended to 64 bits, otherwise the subsequent
459// comparison operations may not produce the expected results.
460//
461(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
462(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
463(AtomicCompareAndSwap32Variant ptr old new mem) => (LoweredAtomicCas32Variant ptr (SignExt32to64 old) new mem)
464(AtomicCompareAndSwap64Variant ...) => (LoweredAtomicCas64Variant ...)
465
466// Atomic memory logical operations (old style).
467//
468// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
469// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3, uint32(val) << ((ptr & 3) * 8))
470//
471(AtomicAnd8 ptr val mem) =>
472 (LoweredAtomicAnd32 (AND <typ.Uintptr> (MOVVconst [^3]) ptr)
473 (NORconst [0] <typ.UInt32> (SLLV <typ.UInt32> (XORconst <typ.UInt32> [0xff] (ZeroExt8to32 val))
474 (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr)))) mem)
475
476(AtomicOr8 ptr val mem) =>
477 (LoweredAtomicOr32 (AND <typ.Uintptr> (MOVVconst [^3]) ptr)
478 (SLLV <typ.UInt32> (ZeroExt8to32 val)
479 (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr))) mem)
480
481(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
482(AtomicOr32 ...) => (LoweredAtomicOr32 ...)
483
484// Atomic memory logical operations (new style).
485(AtomicAnd(64|32)value ...) => (LoweredAtomicAnd(64|32)value ...)
486(AtomicOr(64|32)value ...) => (LoweredAtomicOr(64|32)value ...)
487
488// checks
489(NilCheck ...) => (LoweredNilCheck ...)
490(IsNonNil ptr) => (SGTU ptr (MOVVconst [0]))
491(IsInBounds idx len) => (SGTU len idx)
492(IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len))
493
494// pseudo-ops
495(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
496(GetCallerSP ...) => (LoweredGetCallerSP ...)
497(GetCallerPC ...) => (LoweredGetCallerPC ...)
498
499(If cond yes no) => (NE (MOVBUreg <typ.UInt64> cond) yes no)
500(MOVBUreg x:((SGT|SGTU) _ _)) => x
501(MOVBUreg x:(XOR (MOVVconst [1]) ((SGT|SGTU) _ _))) => x
502
503// Write barrier.
504(WB ...) => (LoweredWB ...)
505
506// Publication barrier as intrinsic
507(PubBarrier ...) => (LoweredPubBarrier ...)
508
509(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
510(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
511(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
512
513(CondSelect <t> x y cond) => (OR (MASKEQZ <t> x cond) (MASKNEZ <t> y cond))
514
515// c > d-x => x > d-c
516(SGT (MOVVconst [c]) (NEGV (SUBVconst [d] x))) && is32Bit(d-c) => (SGT x (MOVVconst [d-c]))
517
518(SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x)
519(SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x)
520
521// fold offset into address
522(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
523
524// fold address into load/store
525// Do not fold global variable access in -dynlink mode, where it will be rewritten
526// to use the GOT via REGTMP, which currently cannot handle large offset.
527(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
528 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
529 (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {sym} ptr mem)
530
531(MOV(B|H|W|V|F|D)store [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
532 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
533 (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {sym} ptr val mem)
534
535(MOV(B|H|W|V)storezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
536 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
537 (MOV(B|H|W|V)storezero [off1+int32(off2)] {sym} ptr mem)
538
539(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
540 && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
541 (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
542
543(MOV(B|H|W|V|F|D)store [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
544 && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
545 (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
546
547(MOV(B|H|W|V)storezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
548 && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
549 (MOV(B|H|W|V)storezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
550
551// don't extend after proper load
552(MOVBreg x:(MOVBload _ _)) => (MOVVreg x)
553(MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x)
554(MOVHreg x:(MOVBload _ _)) => (MOVVreg x)
555(MOVHreg x:(MOVBUload _ _)) => (MOVVreg x)
556(MOVHreg x:(MOVHload _ _)) => (MOVVreg x)
557(MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x)
558(MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x)
559(MOVWreg x:(MOVBload _ _)) => (MOVVreg x)
560(MOVWreg x:(MOVBUload _ _)) => (MOVVreg x)
561(MOVWreg x:(MOVHload _ _)) => (MOVVreg x)
562(MOVWreg x:(MOVHUload _ _)) => (MOVVreg x)
563(MOVWreg x:(MOVWload _ _)) => (MOVVreg x)
564(MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x)
565(MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x)
566(MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x)
567
568// fold double extensions
569(MOVBreg x:(MOVBreg _)) => (MOVVreg x)
570(MOVBUreg x:(MOVBUreg _)) => (MOVVreg x)
571(MOVHreg x:(MOVBreg _)) => (MOVVreg x)
572(MOVHreg x:(MOVBUreg _)) => (MOVVreg x)
573(MOVHreg x:(MOVHreg _)) => (MOVVreg x)
574(MOVHUreg x:(MOVBUreg _)) => (MOVVreg x)
575(MOVHUreg x:(MOVHUreg _)) => (MOVVreg x)
576(MOVWreg x:(MOVBreg _)) => (MOVVreg x)
577(MOVWreg x:(MOVBUreg _)) => (MOVVreg x)
578(MOVWreg x:(MOVHreg _)) => (MOVVreg x)
579(MOVWreg x:(MOVWreg _)) => (MOVVreg x)
580(MOVWUreg x:(MOVBUreg _)) => (MOVVreg x)
581(MOVWUreg x:(MOVHUreg _)) => (MOVVreg x)
582(MOVWUreg x:(MOVWUreg _)) => (MOVVreg x)
583
584// don't extend before store
585(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
586(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
587(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
588(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
589(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
590(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
591(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
592(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
593(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
594(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
595(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
596(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
597
598(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
599(MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
600(MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
601(MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVVstorezero [off] {sym} ptr mem)
602
603// register indexed load
604(MOVVload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVVloadidx ptr idx mem)
605(MOVWUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem)
606(MOVWload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem)
607(MOVHUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem)
608(MOVHload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem)
609(MOVBUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem)
610(MOVBload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem)
611(MOVFload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVFloadidx ptr idx mem)
612(MOVDload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem)
613(MOVVloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVVload [int32(c)] ptr mem)
614(MOVVloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVVload [int32(c)] ptr mem)
615(MOVWUloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
616(MOVWUloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
617(MOVWloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
618(MOVWloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
619(MOVHUloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
620(MOVHUloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
621(MOVHloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
622(MOVHloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
623(MOVBUloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
624(MOVBUloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
625(MOVBloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
626(MOVBloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
627(MOVFloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVFload [int32(c)] ptr mem)
628(MOVFloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVFload [int32(c)] ptr mem)
629(MOVDloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
630(MOVDloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
631
632// register indexed store
633(MOVVstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVVstoreidx ptr idx val mem)
634(MOVWstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem)
635(MOVHstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem)
636(MOVBstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem)
637(MOVFstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVFstoreidx ptr idx val mem)
638(MOVDstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem)
639(MOVVstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVVstore [int32(c)] ptr val mem)
640(MOVVstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVVstore [int32(c)] idx val mem)
641(MOVWstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVWstore [int32(c)] ptr val mem)
642(MOVWstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVWstore [int32(c)] idx val mem)
643(MOVHstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVHstore [int32(c)] ptr val mem)
644(MOVHstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVHstore [int32(c)] idx val mem)
645(MOVBstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVBstore [int32(c)] ptr val mem)
646(MOVBstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVBstore [int32(c)] idx val mem)
647(MOVFstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVFstore [int32(c)] ptr val mem)
648(MOVFstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVFstore [int32(c)] idx val mem)
649(MOVDstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVDstore [int32(c)] ptr val mem)
650(MOVDstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVDstore [int32(c)] idx val mem)
651
652// register indexed store zero
653(MOVVstorezero [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVVstorezeroidx ptr idx mem)
654(MOVWstorezero [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWstorezeroidx ptr idx mem)
655(MOVHstorezero [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHstorezeroidx ptr idx mem)
656(MOVBstorezero [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBstorezeroidx ptr idx mem)
657(MOVVstoreidx ptr idx (MOVVconst [0]) mem) => (MOVVstorezeroidx ptr idx mem)
658(MOVWstoreidx ptr idx (MOVVconst [0]) mem) => (MOVWstorezeroidx ptr idx mem)
659(MOVHstoreidx ptr idx (MOVVconst [0]) mem) => (MOVHstorezeroidx ptr idx mem)
660(MOVBstoreidx ptr idx (MOVVconst [0]) mem) => (MOVBstorezeroidx ptr idx mem)
661(MOVVstorezeroidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVVstorezero [int32(c)] ptr mem)
662(MOVVstorezeroidx (MOVVconst [c]) idx mem) && is32Bit(c) => (MOVVstorezero [int32(c)] idx mem)
663(MOVWstorezeroidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVWstorezero [int32(c)] ptr mem)
664(MOVWstorezeroidx (MOVVconst [c]) idx mem) && is32Bit(c) => (MOVWstorezero [int32(c)] idx mem)
665(MOVHstorezeroidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVHstorezero [int32(c)] ptr mem)
666(MOVHstorezeroidx (MOVVconst [c]) idx mem) && is32Bit(c) => (MOVHstorezero [int32(c)] idx mem)
667(MOVBstorezeroidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVBstorezero [int32(c)] ptr mem)
668(MOVBstorezeroidx (MOVVconst [c]) idx mem) && is32Bit(c) => (MOVBstorezero [int32(c)] idx mem)
669
670// if a register move has only 1 use, just use the same register without emitting instruction
671// MOVVnop doesn't emit instruction, only for ensuring the type.
672(MOVVreg x) && x.Uses == 1 => (MOVVnop x)
673
674// TODO: we should be able to get rid of MOVVnop all together.
675// But for now, this is enough to get rid of lots of them.
676(MOVVnop (MOVVconst [c])) => (MOVVconst [c])
677
678// fold constant into arithmetic ops
679(ADDV x (MOVVconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDVconst [c] x)
680(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x)
681(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x)
682(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x)
683(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x)
684(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x)
685
686(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
687(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
688(SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63])
689(SLLV x (MOVVconst [c])) => (SLLVconst x [c])
690(SRLV x (MOVVconst [c])) => (SRLVconst x [c])
691(SRAV x (MOVVconst [c])) => (SRAVconst x [c])
692(ROTR x (MOVVconst [c])) => (ROTRconst x [c&31])
693(ROTRV x (MOVVconst [c])) => (ROTRVconst x [c&63])
694
695// If the shift amount is larger than the datasize(32, 16, 8), we can optimize to constant 0.
696(MOVWUreg (SLLVconst [lc] x)) && lc >= 32 => (MOVVconst [0])
697(MOVHUreg (SLLVconst [lc] x)) && lc >= 16 => (MOVVconst [0])
698(MOVBUreg (SLLVconst [lc] x)) && lc >= 8 => (MOVVconst [0])
699
700// After zero extension, the upper (64-datasize(32|16|8)) bits are zero, we can optimize to constant 0.
701(SRLVconst [rc] (MOVWUreg x)) && rc >= 32 => (MOVVconst [0])
702(SRLVconst [rc] (MOVHUreg x)) && rc >= 16 => (MOVVconst [0])
703(SRLVconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVVconst [0])
704
705// mul by constant
706(MULV x (MOVVconst [-1])) => (NEGV x)
707(MULV _ (MOVVconst [0])) => (MOVVconst [0])
708(MULV x (MOVVconst [1])) => x
709(MULV x (MOVVconst [c])) && isPowerOfTwo(c) => (SLLVconst [log64(c)] x)
710
711// div by constant
712(DIVVU x (MOVVconst [1])) => x
713(DIVVU x (MOVVconst [c])) && isPowerOfTwo(c) => (SRLVconst [log64(c)] x)
714(REMVU _ (MOVVconst [1])) => (MOVVconst [0]) // mod
715(REMVU x (MOVVconst [c])) && isPowerOfTwo(c) => (ANDconst [c-1] x) // mod
716
717// FMA
718(FMA ...) => (FMADDD ...)
719((ADD|SUB)F (MULF x y) z) && z.Block.Func.useFMA(v) => (FM(ADD|SUB)F x y z)
720((ADD|SUB)D (MULD x y) z) && z.Block.Func.useFMA(v) => (FM(ADD|SUB)D x y z)
721// z - xy -> -(xy - z)
722(SUBF z (MULF x y)) && z.Block.Func.useFMA(v) => (FNMSUBF x y z)
723(SUBD z (MULD x y)) && z.Block.Func.useFMA(v) => (FNMSUBD x y z)
724// z + (-xy) -> -(xy - z)
725// z - (-xy) -> xy + z
726((ADD|SUB)F z (NEGF (MULF x y))) && z.Block.Func.useFMA(v) => (F(NMSUB|MADD)F x y z)
727((ADD|SUB)D z (NEGD (MULD x y))) && z.Block.Func.useFMA(v) => (F(NMSUB|MADD)D x y z)
728// -xy - z -> -(xy + z)
729(SUBF (NEGF (MULF x y)) z) && z.Block.Func.useFMA(v) => (FNMADDF x y z)
730(SUBD (NEGD (MULD x y)) z) && z.Block.Func.useFMA(v) => (FNMADDD x y z)
731
732// generic simplifications
733(ADDV x (NEGV y)) => (SUBV x y)
734(SUBV x x) => (MOVVconst [0])
735(SUBV (MOVVconst [0]) x) => (NEGV x)
736(AND x x) => x
737(OR x x) => x
738(XOR x x) => (MOVVconst [0])
739
740// remove redundant *const ops
741(ADDVconst [0] x) => x
742(SUBVconst [0] x) => x
743(ANDconst [0] _) => (MOVVconst [0])
744(ANDconst [-1] x) => x
745(ORconst [0] x) => x
746(ORconst [-1] _) => (MOVVconst [-1])
747(XORconst [0] x) => x
748(XORconst [-1] x) => (NORconst [0] x)
749(MASKEQZ (MOVVconst [0]) cond) => (MOVVconst [0])
750(MASKNEZ (MOVVconst [0]) cond) => (MOVVconst [0])
751(MASKEQZ x (MOVVconst [c])) && c == 0 => (MOVVconst [0])
752(MASKEQZ x (MOVVconst [c])) && c != 0 => x
753
754// generic constant folding
755(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d])
756(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x)
757(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x)
758(SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c])
759(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x)
760(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x)
761(SUBV (MOVVconst [c]) (NEGV (SUBVconst [d] x))) => (ADDVconst [c-d] x)
762(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d<<uint64(c)])
763(SRLVconst [c] (MOVVconst [d])) => (MOVVconst [int64(uint64(d)>>uint64(c))])
764(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)])
765(MULV (MOVVconst [c]) (MOVVconst [d])) => (MOVVconst [c*d])
766(DIVV (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [c/d])
767(DIVVU (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [int64(uint64(c)/uint64(d))])
768(REMV (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [c%d]) // mod
769(REMVU (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod
770(ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d])
771(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
772(ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d])
773(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x)
774(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d])
775(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x)
776(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)])
777(NEGV (MOVVconst [c])) => (MOVVconst [-c])
778(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))])
779(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))])
780(MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))])
781(MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))])
782(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))])
783(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))])
784(MOVVreg (MOVVconst [c])) => (MOVVconst [c])
785
786(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&0xff] x)
787
788// constant comparisons
789(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1])
790(SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0])
791(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1])
792(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0])
793
794// other known comparisons
795(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1])
796(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0])
797(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1])
798(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0])
799(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1])
800(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1])
801(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0])
802(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1])
803(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0])
804(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1])
805(SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0])
806(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1])
807(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1])
808(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
809(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
810
811// SGT/SGTU with known outcomes.
812(SGT x x) => (MOVVconst [0])
813(SGTU x x) => (MOVVconst [0])
814
815// Optimizations
816
817// Absorb boolean tests into block
818(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
819(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
820(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
821(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
822(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
823(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
824(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
825(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
826(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
827(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
828(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
829(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
830(NE (SGTUconst [1] x) yes no) => (EQ x yes no)
831(EQ (SGTUconst [1] x) yes no) => (NE x yes no)
832(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no)
833(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no)
834(NE (SGTconst [0] x) yes no) => (LTZ x yes no)
835(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
836(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
837(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
838
839(EQ (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (EQ (SGTUconst [c] y) yes no)
840(NE (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (NE (SGTUconst [c] y) yes no)
841(EQ (SUBV x y) yes no) => (BEQ x y yes no)
842(NE (SUBV x y) yes no) => (BNE x y yes no)
843(EQ (SGT x y) yes no) => (BGE y x yes no)
844(NE (SGT x y) yes no) => (BLT y x yes no)
845(EQ (SGTU x y) yes no) => (BGEU y x yes no)
846(NE (SGTU x y) yes no) => (BLTU y x yes no)
847
848// absorb constants into branches
849(EQ (MOVVconst [0]) yes no) => (First yes no)
850(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes)
851(NE (MOVVconst [0]) yes no) => (First no yes)
852(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no)
853(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no)
854(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes)
855(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no)
856(LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes)
857(GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no)
858(GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes)
859(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no)
860(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes)
861
862// Arch-specific inlining for small or disjoint runtime.memmove
863// Match post-lowering calls, register version.
864(SelectN [0] call:(CALLstatic {sym} dst src (MOVVconst [sz]) mem))
865 && sz >= 0
866 && isSameCall(sym, "runtime.memmove")
867 && call.Uses == 1
868 && isInlinableMemmove(dst, src, sz, config)
869 && clobber(call)
870 => (Move [sz] dst src mem)
View as plain text