1// Copyright 2022 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5(Add(Ptr|64|32|16|8) ...) => (ADDV ...)
6(Add(32|64)F ...) => (ADD(F|D) ...)
7
8(Sub(Ptr|64|32|16|8) ...) => (SUBV ...)
9(Sub(32|64)F ...) => (SUB(F|D) ...)
10
11(Mul(64|32|16|8) ...) => (MULV ...)
12(Mul(32|64)F ...) => (MUL(F|D) ...)
13(Select0 (Mul64uhilo x y)) => (MULHVU x y)
14(Select1 (Mul64uhilo x y)) => (MULV x y)
15(Select0 (Mul64uover x y)) => (MULV x y)
16(Select1 (Mul64uover x y)) => (SGTU <typ.Bool> (MULHVU x y) (MOVVconst <typ.UInt64> [0]))
17
18(Hmul64 ...) => (MULHV ...)
19(Hmul64u ...) => (MULHVU ...)
20(Hmul32 x y) => (SRAVconst (MULV (SignExt32to64 x) (SignExt32to64 y)) [32])
21(Hmul32u x y) => (SRLVconst (MULV (ZeroExt32to64 x) (ZeroExt32to64 y)) [32])
22
23(Div64 x y) => (DIVV x y)
24(Div64u ...) => (DIVVU ...)
25(Div32 x y) => (DIVV (SignExt32to64 x) (SignExt32to64 y))
26(Div32u x y) => (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))
27(Div16 x y) => (DIVV (SignExt16to64 x) (SignExt16to64 y))
28(Div16u x y) => (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))
29(Div8 x y) => (DIVV (SignExt8to64 x) (SignExt8to64 y))
30(Div8u x y) => (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))
31(Div(32|64)F ...) => (DIV(F|D) ...)
32
33(Mod64 x y) => (REMV x y)
34(Mod64u ...) => (REMVU ...)
35(Mod32 x y) => (REMV (SignExt32to64 x) (SignExt32to64 y))
36(Mod32u x y) => (REMVU (ZeroExt32to64 x) (ZeroExt32to64 y))
37(Mod16 x y) => (REMV (SignExt16to64 x) (SignExt16to64 y))
38(Mod16u x y) => (REMVU (ZeroExt16to64 x) (ZeroExt16to64 y))
39(Mod8 x y) => (REMV (SignExt8to64 x) (SignExt8to64 y))
40(Mod8u x y) => (REMVU (ZeroExt8to64 x) (ZeroExt8to64 y))
41
42(Select0 <t> (Add64carry x y c)) => (ADDV (ADDV <t> x y) c)
43(Select1 <t> (Add64carry x y c)) =>
44 (OR (SGTU <t> x s:(ADDV <t> x y)) (SGTU <t> s (ADDV <t> s c)))
45
46(Select0 <t> (Sub64borrow x y c)) => (SUBV (SUBV <t> x y) c)
47(Select1 <t> (Sub64borrow x y c)) =>
48 (OR (SGTU <t> s:(SUBV <t> x y) x) (SGTU <t> (SUBV <t> s c) s))
49
50// (x + y) / 2 with x>=y => (x - y) / 2 + y
51(Avg64u <t> x y) => (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
52
53(And(64|32|16|8) ...) => (AND ...)
54(Or(64|32|16|8) ...) => (OR ...)
55(Xor(64|32|16|8) ...) => (XOR ...)
56
57// shifts
58// hardware instruction uses only the low 6 bits of the shift
59// we compare to 64 to ensure Go semantics for large shifts
60
61// left shift
62(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLLV x y)
63(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
64(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLLV x y)
65(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLLV x y)
66
67(Lsh64x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
68(Lsh64x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
69(Lsh64x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
70(Lsh64x8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
71
72(Lsh32x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x y) (SGTU (MOVVconst <typ.UInt64> [32]) y))
73(Lsh32x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt32to64 y)))
74(Lsh32x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt16to64 y)))
75(Lsh32x8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt8to64 y)))
76
77(Lsh16x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
78(Lsh16x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
79(Lsh16x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
80(Lsh16x8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
81
82(Lsh8x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
83(Lsh8x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
84(Lsh8x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
85(Lsh8x8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
86
87// unsigned right shift
88(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLV x y)
89(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL x y)
90(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLV (ZeroExt16to64 x) y)
91(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLV (ZeroExt8to64 x) y)
92
93(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
94(Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
95(Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
96(Rsh64Ux8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
97
98(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x y) (SGTU (MOVVconst <typ.UInt64> [32]) y))
99(Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt32to64 y)))
100(Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt16to64 y)))
101(Rsh32Ux8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt8to64 y)))
102
103(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
104(Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
105(Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
106(Rsh16Ux8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
107
108(Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) y) (SGTU (MOVVconst <typ.UInt64> [64]) y))
109(Rsh8Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
110(Rsh8Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
111(Rsh8Ux8 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y)))
112
113// signed right shift
114(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAV x y)
115(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA x y)
116(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAV (SignExt16to64 x) y)
117(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAV (SignExt8to64 x) y)
118
119(Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
120(Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
121(Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
122(Rsh64x8 <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
123
124(Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [31]))) y))
125(Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt32to64 y)))
126(Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt16to64 y)))
127(Rsh32x8 <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt8to64 y)))
128
129(Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
130(Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
131(Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
132(Rsh16x8 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
133
134(Rsh8x64 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
135(Rsh8x32 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
136(Rsh8x16 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
137(Rsh8x8 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
138
139// bitfield ops
140
141// bstrpickv
142// (x << lc) >> rc
143(SRLVconst [rc] (SLLVconst [lc] x)) && lc <= rc => (BSTRPICKV [rc-lc + ((64-lc)-1)<<6] x)
144// uint64(x) >> rc
145(SRLVconst [rc] (MOVWUreg x)) && rc < 32 => (BSTRPICKV [rc + 31<<6] x)
146(SRLVconst [rc] (MOVHUreg x)) && rc < 16 => (BSTRPICKV [rc + 15<<6] x)
147(SRLVconst [rc] (MOVBUreg x)) && rc < 8 => (BSTRPICKV [rc + 7<<6] x)
148// uint64(x >> rc)
149(MOVWUreg (SRLVconst [rc] x)) && rc < 32 => (BSTRPICKV [rc + (31+rc)<<6] x)
150(MOVHUreg (SRLVconst [rc] x)) && rc < 16 => (BSTRPICKV [rc + (15+rc)<<6] x)
151(MOVBUreg (SRLVconst [rc] x)) && rc < 8 => (BSTRPICKV [rc + (7+rc)<<6] x)
152
153// rotates
154(RotateLeft8 <t> x (MOVVconst [c])) => (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
155(RotateLeft8 <t> x y) => (OR <t> (SLLV <t> x (ANDconst <typ.Int64> [7] y)) (SRLV <t> (ZeroExt8to64 x) (ANDconst <typ.Int64> [7] (NEGV <typ.Int64> y))))
156(RotateLeft16 <t> x (MOVVconst [c])) => (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
157(RotateLeft16 <t> x y) => (ROTR <t> (OR <typ.UInt32> (ZeroExt16to32 x) (SLLVconst <t> (ZeroExt16to32 x) [16])) (NEGV <typ.Int64> y))
158(RotateLeft32 x y) => (ROTR x (NEGV <y.Type> y))
159(RotateLeft64 x y) => (ROTRV x (NEGV <y.Type> y))
160
161// unary ops
162(Neg(64|32|16|8) ...) => (NEGV ...)
163(Neg(32|64)F ...) => (NEG(F|D) ...)
164
165(Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x)
166
167(BitLen64 <t> x) => (NEGV <t> (SUBVconst <t> [64] (CLZV <t> x)))
168(BitLen32 <t> x) => (NEGV <t> (SUBVconst <t> [32] (CLZW <t> x)))
169(BitLen(16|8) x) => (BitLen64 (ZeroExt(16|8)to64 x))
170(Bswap(16|32|64) ...) => (REVB(2H|2W|V) ...)
171(BitRev8 ...) => (BITREV4B ...)
172(BitRev16 <t> x) => (REVB2H (BITREV4B <t> x))
173(BitRev32 ...) => (BITREVW ...)
174(BitRev64 ...) => (BITREVV ...)
175(Ctz(64|32|16|8)NonZero ...) => (Ctz64 ...)
176(Ctz(32|64) ...) => (CTZ(W|V) ...)
177(Ctz16 x) => (CTZV (OR <typ.UInt64> x (MOVVconst [1<<16])))
178(Ctz8 x) => (CTZV (OR <typ.UInt64> x (MOVVconst [1<<8])))
179
180(PopCount64 <t> x) => (MOVVfpgp <t> (VPCNT64 <typ.Float64> (MOVVgpfp <typ.Float64> x)))
181(PopCount32 <t> x) => (MOVWfpgp <t> (VPCNT32 <typ.Float32> (MOVWgpfp <typ.Float32> x)))
182(PopCount16 <t> x) => (MOVWfpgp <t> (VPCNT16 <typ.Float32> (MOVWgpfp <typ.Float32> (ZeroExt16to32 x))))
183
184// math package intrinsics
185(Sqrt ...) => (SQRTD ...)
186(Sqrt32 ...) => (SQRTF ...)
187(Abs ...) => (ABSD ...)
188(Copysign ...) => (FCOPYSGD ...)
189
190(Min(64|32)F ...) => (FMIN(D|F) ...)
191(Max(64|32)F ...) => (FMAX(D|F) ...)
192
193// boolean ops -- booleans are represented with 0=false, 1=true
194(AndB ...) => (AND ...)
195(OrB ...) => (OR ...)
196(EqB x y) => (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
197(NeqB ...) => (XOR ...)
198(Not x) => (XORconst [1] x)
199
200// constants
201(Const(64|32|16|8) [val]) => (MOVVconst [int64(val)])
202(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
203(ConstNil) => (MOVVconst [0])
204(ConstBool [t]) => (MOVVconst [int64(b2i(t))])
205
206(Slicemask <t> x) => (SRAVconst (NEGV <t> x) [63])
207
208// truncations
209// Because we ignore high parts of registers, truncates are just copies.
210(Trunc16to8 ...) => (Copy ...)
211(Trunc32to8 ...) => (Copy ...)
212(Trunc32to16 ...) => (Copy ...)
213(Trunc64to8 ...) => (Copy ...)
214(Trunc64to16 ...) => (Copy ...)
215(Trunc64to32 ...) => (Copy ...)
216
217// Zero-/Sign-extensions
218(ZeroExt8to16 ...) => (MOVBUreg ...)
219(ZeroExt8to32 ...) => (MOVBUreg ...)
220(ZeroExt16to32 ...) => (MOVHUreg ...)
221(ZeroExt8to64 ...) => (MOVBUreg ...)
222(ZeroExt16to64 ...) => (MOVHUreg ...)
223(ZeroExt32to64 ...) => (MOVWUreg ...)
224
225(SignExt8to16 ...) => (MOVBreg ...)
226(SignExt8to32 ...) => (MOVBreg ...)
227(SignExt16to32 ...) => (MOVHreg ...)
228(SignExt8to64 ...) => (MOVBreg ...)
229(SignExt16to64 ...) => (MOVHreg ...)
230(SignExt32to64 ...) => (MOVWreg ...)
231
232// float <=> int conversion
233(Cvt32to32F ...) => (MOVWF ...)
234(Cvt32to64F ...) => (MOVWD ...)
235(Cvt64to32F ...) => (MOVVF ...)
236(Cvt64to64F ...) => (MOVVD ...)
237(Cvt32Fto32 ...) => (TRUNCFW ...)
238(Cvt64Fto32 ...) => (TRUNCDW ...)
239(Cvt32Fto64 ...) => (TRUNCFV ...)
240(Cvt64Fto64 ...) => (TRUNCDV ...)
241(Cvt32Fto64F ...) => (MOVFD ...)
242(Cvt64Fto32F ...) => (MOVDF ...)
243
244(CvtBoolToUint8 ...) => (Copy ...)
245
246(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
247
248// comparisons
249(Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
250(Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
251(Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
252(Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y))
253(EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y))
254(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
255
256(Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
257(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
258(Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
259(Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0]))
260(NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0]))
261(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
262
263(Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x))
264(Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x))
265(Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x))
266(Less64 x y) => (SGT y x)
267(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
268
269(Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
270(Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
271(Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
272(Less64U x y) => (SGTU y x)
273
274(Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
275(Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
276(Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
277(Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y))
278(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
279
280(Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
281(Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
282(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
283(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y))
284
285(OffPtr [off] ptr:(SP)) => (MOVVaddr [int32(off)] ptr)
286(OffPtr [off] ptr) => (ADDVconst [off] ptr)
287
288(Addr {sym} base) => (MOVVaddr {sym} base)
289(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVVaddr {sym} (SPanchored base mem))
290(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVVaddr {sym} base)
291
292// loads
293(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
294(Load <t> ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
295(Load <t> ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
296(Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
297(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
298(Load <t> ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem)
299(Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
300(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem)
301(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
302(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
303
304// stores
305(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
306(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
307(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
308(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVVstore ptr val mem)
309(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVFstore ptr val mem)
310(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVDstore ptr val mem)
311
312// zeroing
313(Zero [0] _ mem) => mem
314(Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem)
315(Zero [2] ptr mem) => (MOVHstore ptr (MOVVconst [0]) mem)
316(Zero [3] ptr mem) =>
317 (MOVBstore [2] ptr (MOVVconst [0])
318 (MOVHstore ptr (MOVVconst [0]) mem))
319(Zero [4] {t} ptr mem) => (MOVWstore ptr (MOVVconst [0]) mem)
320(Zero [5] ptr mem) =>
321 (MOVBstore [4] ptr (MOVVconst [0])
322 (MOVWstore ptr (MOVVconst [0]) mem))
323(Zero [6] ptr mem) =>
324 (MOVHstore [4] ptr (MOVVconst [0])
325 (MOVWstore ptr (MOVVconst [0]) mem))
326(Zero [7] ptr mem) =>
327 (MOVWstore [3] ptr (MOVVconst [0])
328 (MOVWstore ptr (MOVVconst [0]) mem))
329(Zero [8] {t} ptr mem) => (MOVVstore ptr (MOVVconst [0]) mem)
330(Zero [9] ptr mem) =>
331 (MOVBstore [8] ptr (MOVVconst [0])
332 (MOVVstore ptr (MOVVconst [0]) mem))
333(Zero [10] ptr mem) =>
334 (MOVHstore [8] ptr (MOVVconst [0])
335 (MOVVstore ptr (MOVVconst [0]) mem))
336(Zero [11] ptr mem) =>
337 (MOVWstore [7] ptr (MOVVconst [0])
338 (MOVVstore ptr (MOVVconst [0]) mem))
339(Zero [12] ptr mem) =>
340 (MOVWstore [8] ptr (MOVVconst [0])
341 (MOVVstore ptr (MOVVconst [0]) mem))
342(Zero [13] ptr mem) =>
343 (MOVVstore [5] ptr (MOVVconst [0])
344 (MOVVstore ptr (MOVVconst [0]) mem))
345(Zero [14] ptr mem) =>
346 (MOVVstore [6] ptr (MOVVconst [0])
347 (MOVVstore ptr (MOVVconst [0]) mem))
348(Zero [15] ptr mem) =>
349 (MOVVstore [7] ptr (MOVVconst [0])
350 (MOVVstore ptr (MOVVconst [0]) mem))
351(Zero [16] ptr mem) =>
352 (MOVVstore [8] ptr (MOVVconst [0])
353 (MOVVstore ptr (MOVVconst [0]) mem))
354
355// strip off fractional word zeroing
356(Zero [s] ptr mem) && s%8 != 0 && s > 16 =>
357 (Zero [s%8]
358 (OffPtr <ptr.Type> ptr [s-s%8])
359 (Zero [s-s%8] ptr mem))
360
361// medium zeroing uses a duff device
362(Zero [s] ptr mem)
363 && s%8 == 0 && s > 16 && s <= 8*128 =>
364 (DUFFZERO [8 * (128 - s/8)] ptr mem)
365
366// large zeroing uses a loop
367(Zero [s] ptr mem)
368 && s%8 == 0 && s > 8*128 =>
369 (LoweredZero
370 ptr
371 (ADDVconst <ptr.Type> ptr [s-8])
372 mem)
373
374// moves
375(Move [0] _ _ mem) => mem
376(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
377(Move [2] dst src mem) => (MOVHstore dst (MOVHUload src mem) mem)
378(Move [3] dst src mem) =>
379 (MOVBstore [2] dst (MOVBUload [2] src mem)
380 (MOVHstore dst (MOVHUload src mem) mem))
381(Move [4] dst src mem) => (MOVWstore dst (MOVWUload src mem) mem)
382(Move [5] dst src mem) =>
383 (MOVBstore [4] dst (MOVBUload [4] src mem)
384 (MOVWstore dst (MOVWUload src mem) mem))
385(Move [6] dst src mem) =>
386 (MOVHstore [4] dst (MOVHUload [4] src mem)
387 (MOVWstore dst (MOVWUload src mem) mem))
388(Move [7] dst src mem) =>
389 (MOVWstore [3] dst (MOVWUload [3] src mem)
390 (MOVWstore dst (MOVWUload src mem) mem))
391(Move [8] dst src mem) => (MOVVstore dst (MOVVload src mem) mem)
392(Move [9] dst src mem) =>
393 (MOVBstore [8] dst (MOVBUload [8] src mem)
394 (MOVVstore dst (MOVVload src mem) mem))
395(Move [10] dst src mem) =>
396 (MOVHstore [8] dst (MOVHUload [8] src mem)
397 (MOVVstore dst (MOVVload src mem) mem))
398(Move [11] dst src mem) =>
399 (MOVWstore [7] dst (MOVWload [7] src mem)
400 (MOVVstore dst (MOVVload src mem) mem))
401(Move [12] dst src mem) =>
402 (MOVWstore [8] dst (MOVWUload [8] src mem)
403 (MOVVstore dst (MOVVload src mem) mem))
404(Move [13] dst src mem) =>
405 (MOVVstore [5] dst (MOVVload [5] src mem)
406 (MOVVstore dst (MOVVload src mem) mem))
407(Move [14] dst src mem) =>
408 (MOVVstore [6] dst (MOVVload [6] src mem)
409 (MOVVstore dst (MOVVload src mem) mem))
410(Move [15] dst src mem) =>
411 (MOVVstore [7] dst (MOVVload [7] src mem)
412 (MOVVstore dst (MOVVload src mem) mem))
413(Move [16] dst src mem) =>
414 (MOVVstore [8] dst (MOVVload [8] src mem)
415 (MOVVstore dst (MOVVload src mem) mem))
416
417// strip off fractional word move
418(Move [s] dst src mem) && s%8 != 0 && s > 16 =>
419 (Move [s%8]
420 (OffPtr <dst.Type> dst [s-s%8])
421 (OffPtr <src.Type> src [s-s%8])
422 (Move [s-s%8] dst src mem))
423
424// medium move uses a duff device
425(Move [s] dst src mem)
426 && s%8 == 0 && s > 16 && s <= 8*128
427 && logLargeCopy(v, s) =>
428 (DUFFCOPY [16 * (128 - s/8)] dst src mem)
429// 16 and 128 are magic constants. 16 is the number of bytes to encode:
430// MOVV (R20), R30
431// ADDV $8, R20
432// MOVV R30, (R21)
433// ADDV $8, R21
434// and 128 is the number of such blocks. See runtime/duff_loong64.s:duffcopy.
435
436// large move uses a loop
437(Move [s] dst src mem)
438 && s%8 == 0 && s > 1024 && logLargeCopy(v, s) =>
439 (LoweredMove
440 dst
441 src
442 (ADDVconst <src.Type> src [s-8])
443 mem)
444
445
446// float <=> int register moves, with no conversion.
447// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
448(MOVVload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) => (MOVVfpgp val)
449(MOVDload [off] {sym} ptr (MOVVstore [off] {sym} ptr val _)) => (MOVVgpfp val)
450(MOVWUload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) => (ZeroExt32to64 (MOVWfpgp <typ.Float32> val))
451(MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (MOVWgpfp val)
452
453// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
454(MOVVstore [off] {sym} ptr (MOVVfpgp val) mem) => (MOVDstore [off] {sym} ptr val mem)
455(MOVDstore [off] {sym} ptr (MOVVgpfp val) mem) => (MOVVstore [off] {sym} ptr val mem)
456(MOVWstore [off] {sym} ptr (MOVWfpgp val) mem) => (MOVFstore [off] {sym} ptr val mem)
457(MOVFstore [off] {sym} ptr (MOVWgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
458
459// calls
460(StaticCall ...) => (CALLstatic ...)
461(ClosureCall ...) => (CALLclosure ...)
462(InterCall ...) => (CALLinter ...)
463(TailCall ...) => (CALLtail ...)
464
465// atomic intrinsics
466(AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...)
467(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
468
469(AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...)
470(AtomicStore(8|32|64)Variant ...) => (LoweredAtomicStore(8|32|64)Variant ...)
471(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
472
473(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
474(AtomicExchange8Variant ...) => (LoweredAtomicExchange8Variant ...)
475
476(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
477
478// Loong64's 32-bit atomic operation instructions ll.w and amcasw are both sign-extended,
479// so the input parameters need to be sign-extended to 64 bits, otherwise the subsequent
480// comparison operations may not produce the expected results.
481//
482(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
483(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
484(AtomicCompareAndSwap32Variant ptr old new mem) => (LoweredAtomicCas32Variant ptr (SignExt32to64 old) new mem)
485(AtomicCompareAndSwap64Variant ...) => (LoweredAtomicCas64Variant ...)
486
487// Atomic memory logical operations (old style).
488//
489// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
490// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3, uint32(val) << ((ptr & 3) * 8))
491//
492(AtomicAnd8 ptr val mem) =>
493 (LoweredAtomicAnd32 (AND <typ.Uintptr> (MOVVconst [^3]) ptr)
494 (NORconst [0] <typ.UInt32> (SLLV <typ.UInt32> (XORconst <typ.UInt32> [0xff] (ZeroExt8to32 val))
495 (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr)))) mem)
496
497(AtomicOr8 ptr val mem) =>
498 (LoweredAtomicOr32 (AND <typ.Uintptr> (MOVVconst [^3]) ptr)
499 (SLLV <typ.UInt32> (ZeroExt8to32 val)
500 (SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr))) mem)
501
502(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
503(AtomicOr32 ...) => (LoweredAtomicOr32 ...)
504
505// Atomic memory logical operations (new style).
506(AtomicAnd(64|32)value ...) => (LoweredAtomicAnd(64|32)value ...)
507(AtomicOr(64|32)value ...) => (LoweredAtomicOr(64|32)value ...)
508
509// checks
510(NilCheck ...) => (LoweredNilCheck ...)
511(IsNonNil ptr) => (SGTU ptr (MOVVconst [0]))
512(IsInBounds idx len) => (SGTU len idx)
513(IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len))
514
515// pseudo-ops
516(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
517(GetCallerSP ...) => (LoweredGetCallerSP ...)
518(GetCallerPC ...) => (LoweredGetCallerPC ...)
519
520(If cond yes no) => (NE (MOVBUreg <typ.UInt64> cond) yes no)
521(MOVBUreg x:((SGT|SGTU) _ _)) => x
522(MOVBUreg x:(XOR (MOVVconst [1]) ((SGT|SGTU) _ _))) => x
523
524// Write barrier.
525(WB ...) => (LoweredWB ...)
526
527// Publication barrier as intrinsic
528(PubBarrier ...) => (LoweredPubBarrier ...)
529
530(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
531(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
532(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
533
534(CondSelect <t> x y cond) => (OR (MASKEQZ <t> x cond) (MASKNEZ <t> y cond))
535
536// c > d-x => x > d-c
537(SGT (MOVVconst [c]) (NEGV (SUBVconst [d] x))) && is32Bit(d-c) => (SGT x (MOVVconst [d-c]))
538
539(SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x)
540(SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x)
541
542// fold offset into address
543(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
544
545// fold address into load/store
546// Do not fold global variable access in -dynlink mode, where it will be rewritten
547// to use the GOT via REGTMP, which currently cannot handle large offset.
548(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
549 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
550 (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {sym} ptr mem)
551
552(MOV(B|H|W|V|F|D)store [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
553 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
554 (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {sym} ptr val mem)
555
556(MOV(B|H|W|V)storezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
557 && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
558 (MOV(B|H|W|V)storezero [off1+int32(off2)] {sym} ptr mem)
559
560(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
561 && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
562 (MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
563
564(MOV(B|H|W|V|F|D)store [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
565 && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
566 (MOV(B|H|W|V|F|D)store [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
567
568(MOV(B|H|W|V)storezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
569 && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
570 (MOV(B|H|W|V)storezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
571
572// don't extend after proper load
573(MOVBreg x:(MOVBload _ _)) => (MOVVreg x)
574(MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x)
575(MOVHreg x:(MOVBload _ _)) => (MOVVreg x)
576(MOVHreg x:(MOVBUload _ _)) => (MOVVreg x)
577(MOVHreg x:(MOVHload _ _)) => (MOVVreg x)
578(MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x)
579(MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x)
580(MOVWreg x:(MOVBload _ _)) => (MOVVreg x)
581(MOVWreg x:(MOVBUload _ _)) => (MOVVreg x)
582(MOVWreg x:(MOVHload _ _)) => (MOVVreg x)
583(MOVWreg x:(MOVHUload _ _)) => (MOVVreg x)
584(MOVWreg x:(MOVWload _ _)) => (MOVVreg x)
585(MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x)
586(MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x)
587(MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x)
588
589// fold double extensions
590(MOVBreg x:(MOVBreg _)) => (MOVVreg x)
591(MOVBUreg x:(MOVBUreg _)) => (MOVVreg x)
592(MOVHreg x:(MOVBreg _)) => (MOVVreg x)
593(MOVHreg x:(MOVBUreg _)) => (MOVVreg x)
594(MOVHreg x:(MOVHreg _)) => (MOVVreg x)
595(MOVHUreg x:(MOVBUreg _)) => (MOVVreg x)
596(MOVHUreg x:(MOVHUreg _)) => (MOVVreg x)
597(MOVWreg x:(MOVBreg _)) => (MOVVreg x)
598(MOVWreg x:(MOVBUreg _)) => (MOVVreg x)
599(MOVWreg x:(MOVHreg _)) => (MOVVreg x)
600(MOVWreg x:(MOVWreg _)) => (MOVVreg x)
601(MOVWUreg x:(MOVBUreg _)) => (MOVVreg x)
602(MOVWUreg x:(MOVHUreg _)) => (MOVVreg x)
603(MOVWUreg x:(MOVWUreg _)) => (MOVVreg x)
604
605// don't extend before store
606(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
607(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
608(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
609(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
610(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
611(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
612(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
613(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
614(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
615(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
616(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
617(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
618
619(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
620(MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
621(MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
622(MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVVstorezero [off] {sym} ptr mem)
623
624// register indexed load
625(MOVVload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVVloadidx ptr idx mem)
626(MOVWUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem)
627(MOVWload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem)
628(MOVHUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem)
629(MOVHload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem)
630(MOVBUload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem)
631(MOVBload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem)
632(MOVFload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVFloadidx ptr idx mem)
633(MOVDload [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem)
634(MOVVloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVVload [int32(c)] ptr mem)
635(MOVVloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVVload [int32(c)] ptr mem)
636(MOVWUloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
637(MOVWUloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
638(MOVWloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
639(MOVWloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
640(MOVHUloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
641(MOVHUloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
642(MOVHloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
643(MOVHloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
644(MOVBUloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
645(MOVBUloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
646(MOVBloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
647(MOVBloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
648(MOVFloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVFload [int32(c)] ptr mem)
649(MOVFloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVFload [int32(c)] ptr mem)
650(MOVDloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
651(MOVDloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
652
653// register indexed store
654(MOVVstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVVstoreidx ptr idx val mem)
655(MOVWstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem)
656(MOVHstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem)
657(MOVBstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem)
658(MOVFstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVFstoreidx ptr idx val mem)
659(MOVDstore [off] {sym} (ADDV ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem)
660(MOVVstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVVstore [int32(c)] ptr val mem)
661(MOVVstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVVstore [int32(c)] idx val mem)
662(MOVWstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVWstore [int32(c)] ptr val mem)
663(MOVWstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVWstore [int32(c)] idx val mem)
664(MOVHstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVHstore [int32(c)] ptr val mem)
665(MOVHstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVHstore [int32(c)] idx val mem)
666(MOVBstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVBstore [int32(c)] ptr val mem)
667(MOVBstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVBstore [int32(c)] idx val mem)
668(MOVFstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVFstore [int32(c)] ptr val mem)
669(MOVFstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVFstore [int32(c)] idx val mem)
670(MOVDstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVDstore [int32(c)] ptr val mem)
671(MOVDstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVDstore [int32(c)] idx val mem)
672
673// register indexed store zero
674(MOVVstorezero [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVVstorezeroidx ptr idx mem)
675(MOVWstorezero [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVWstorezeroidx ptr idx mem)
676(MOVHstorezero [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVHstorezeroidx ptr idx mem)
677(MOVBstorezero [off] {sym} (ADDV ptr idx) mem) && off == 0 && sym == nil => (MOVBstorezeroidx ptr idx mem)
678(MOVVstoreidx ptr idx (MOVVconst [0]) mem) => (MOVVstorezeroidx ptr idx mem)
679(MOVWstoreidx ptr idx (MOVVconst [0]) mem) => (MOVWstorezeroidx ptr idx mem)
680(MOVHstoreidx ptr idx (MOVVconst [0]) mem) => (MOVHstorezeroidx ptr idx mem)
681(MOVBstoreidx ptr idx (MOVVconst [0]) mem) => (MOVBstorezeroidx ptr idx mem)
682(MOVVstorezeroidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVVstorezero [int32(c)] ptr mem)
683(MOVVstorezeroidx (MOVVconst [c]) idx mem) && is32Bit(c) => (MOVVstorezero [int32(c)] idx mem)
684(MOVWstorezeroidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVWstorezero [int32(c)] ptr mem)
685(MOVWstorezeroidx (MOVVconst [c]) idx mem) && is32Bit(c) => (MOVWstorezero [int32(c)] idx mem)
686(MOVHstorezeroidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVHstorezero [int32(c)] ptr mem)
687(MOVHstorezeroidx (MOVVconst [c]) idx mem) && is32Bit(c) => (MOVHstorezero [int32(c)] idx mem)
688(MOVBstorezeroidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVBstorezero [int32(c)] ptr mem)
689(MOVBstorezeroidx (MOVVconst [c]) idx mem) && is32Bit(c) => (MOVBstorezero [int32(c)] idx mem)
690
691// if a register move has only 1 use, just use the same register without emitting instruction
692// MOVVnop doesn't emit instruction, only for ensuring the type.
693(MOVVreg x) && x.Uses == 1 => (MOVVnop x)
694
695// TODO: we should be able to get rid of MOVVnop all together.
696// But for now, this is enough to get rid of lots of them.
697(MOVVnop (MOVVconst [c])) => (MOVVconst [c])
698
699// fold constant into arithmetic ops
700(ADDV x (MOVVconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDVconst [c] x)
701(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x)
702(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x)
703(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x)
704(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x)
705(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x)
706
707(SLL _ (MOVVconst [c])) && uint64(c)>=32 => (MOVVconst [0])
708(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
709(SRL _ (MOVVconst [c])) && uint64(c)>=32 => (MOVVconst [0])
710(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
711(SRA x (MOVVconst [c])) && uint64(c)>=32 => (SRAconst x [31])
712(SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63])
713(SLL x (MOVVconst [c])) && uint64(c) >=0 && uint64(c) <=31 => (SLLconst x [c])
714(SLLV x (MOVVconst [c])) => (SLLVconst x [c])
715(SRL x (MOVVconst [c])) && uint64(c) >=0 && uint64(c) <=31 => (SRLconst x [c])
716(SRLV x (MOVVconst [c])) => (SRLVconst x [c])
717(SRA x (MOVVconst [c])) && uint64(c) >=0 && uint64(c) <=31 => (SRAconst x [c])
718(SRAV x (MOVVconst [c])) => (SRAVconst x [c])
719(ROTR x (MOVVconst [c])) => (ROTRconst x [c&31])
720(ROTRV x (MOVVconst [c])) => (ROTRVconst x [c&63])
721
722// SLLV/SRLV/SRAV only considers the bottom 6 bits of y, similarly SLL/SRL/SRA only considers the
723// bottom 5 bits of y.
724(SLL x (ANDconst [31] y)) => (SLL x y)
725(SRL x (ANDconst [31] y)) => (SRL x y)
726(SRA x (ANDconst [31] y)) => (SRA x y)
727(SLLV x (ANDconst [63] y)) => (SLLV x y)
728(SRLV x (ANDconst [63] y)) => (SRLV x y)
729(SRAV x (ANDconst [63] y)) => (SRAV x y)
730
731// Avoid unnecessary zero and sign extension when right shifting.
732(SRLVconst [rc] (MOVWUreg y)) && rc >= 0 && rc <= 31 => (SRLconst [int64(rc)] y)
733(SRAVconst [rc] (MOVWreg y)) && rc >= 0 && rc <= 31 => (SRAconst [int64(rc)] y)
734
735// Replace right shifts that exceed size of signed type.
736(SRAVconst <t> [rc] (MOVBreg y)) && rc >= 8 => (SRAVconst [63] (SLLVconst <t> [56] y))
737(SRAVconst <t> [rc] (MOVHreg y)) && rc >= 16 => (SRAVconst [63] (SLLVconst <t> [48] y))
738(SRAVconst <t> [rc] (MOVWreg y)) && rc >= 32 => (SRAconst [31] y)
739
740// If the shift amount is larger than the datasize(32, 16, 8), we can optimize to constant 0.
741(MOVWUreg (SLLVconst [lc] x)) && lc >= 32 => (MOVVconst [0])
742(MOVHUreg (SLLVconst [lc] x)) && lc >= 16 => (MOVVconst [0])
743(MOVBUreg (SLLVconst [lc] x)) && lc >= 8 => (MOVVconst [0])
744
745// After zero extension, the upper (64-datasize(32|16|8)) bits are zero, we can optimize to constant 0.
746(SRLVconst [rc] (MOVWUreg x)) && rc >= 32 => (MOVVconst [0])
747(SRLVconst [rc] (MOVHUreg x)) && rc >= 16 => (MOVVconst [0])
748(SRLVconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVVconst [0])
749
750// mul by constant
751(MULV x (MOVVconst [-1])) => (NEGV x)
752(MULV _ (MOVVconst [0])) => (MOVVconst [0])
753(MULV x (MOVVconst [1])) => x
754(MULV x (MOVVconst [c])) && isPowerOfTwo(c) => (SLLVconst [log64(c)] x)
755
756// div by constant
757(DIVVU x (MOVVconst [1])) => x
758(DIVVU x (MOVVconst [c])) && isPowerOfTwo(c) => (SRLVconst [log64(c)] x)
759(REMVU _ (MOVVconst [1])) => (MOVVconst [0]) // mod
760(REMVU x (MOVVconst [c])) && isPowerOfTwo(c) => (ANDconst [c-1] x) // mod
761
762// FMA
763(FMA ...) => (FMADDD ...)
764((ADD|SUB)F (MULF x y) z) && z.Block.Func.useFMA(v) => (FM(ADD|SUB)F x y z)
765((ADD|SUB)D (MULD x y) z) && z.Block.Func.useFMA(v) => (FM(ADD|SUB)D x y z)
766// z - xy -> -(xy - z)
767(SUBF z (MULF x y)) && z.Block.Func.useFMA(v) => (FNMSUBF x y z)
768(SUBD z (MULD x y)) && z.Block.Func.useFMA(v) => (FNMSUBD x y z)
769// z + (-xy) -> -(xy - z)
770// z - (-xy) -> xy + z
771((ADD|SUB)F z (NEGF (MULF x y))) && z.Block.Func.useFMA(v) => (F(NMSUB|MADD)F x y z)
772((ADD|SUB)D z (NEGD (MULD x y))) && z.Block.Func.useFMA(v) => (F(NMSUB|MADD)D x y z)
773// -xy - z -> -(xy + z)
774(SUBF (NEGF (MULF x y)) z) && z.Block.Func.useFMA(v) => (FNMADDF x y z)
775(SUBD (NEGD (MULD x y)) z) && z.Block.Func.useFMA(v) => (FNMADDD x y z)
776
777// generic simplifications
778(ADDV x (NEGV y)) => (SUBV x y)
779(SUBV x (NEGV y)) => (ADDV x y)
780(SUBV x x) => (MOVVconst [0])
781(SUBV (MOVVconst [0]) x) => (NEGV x)
782(AND x x) => x
783(OR x x) => x
784(XOR x x) => (MOVVconst [0])
785(ORN x (MOVVconst [-1])) => x
786(AND x (NORconst [0] y)) => (ANDN x y)
787(OR x (NORconst [0] y)) => (ORN x y)
788
789// Fold negation into subtraction.
790(NEGV (SUBV x y)) => (SUBV y x)
791(NEGV <t> s:(ADDVconst [c] (SUBV x y))) && s.Uses == 1 && is12Bit(-c) => (ADDVconst [-c] (SUBV <t> y x))
792
793// Double negation.
794(NEGV (NEGV x)) => x
795// Fold NEGV into ADDVconst. Take care to keep c in 12 bit range.
796(NEGV <t> s:(ADDVconst [c] (NEGV x))) && s.Uses == 1 && is12Bit(-c) => (ADDVconst [-c] x)
797
798// remove redundant *const ops
799(ADDVconst [0] x) => x
800(SUBVconst [0] x) => x
801(ANDconst [0] _) => (MOVVconst [0])
802(ANDconst [-1] x) => x
803(ORconst [0] x) => x
804(ORconst [-1] _) => (MOVVconst [-1])
805(XORconst [0] x) => x
806(XORconst [-1] x) => (NORconst [0] x)
807(MASKEQZ (MOVVconst [0]) cond) => (MOVVconst [0])
808(MASKNEZ (MOVVconst [0]) cond) => (MOVVconst [0])
809(MASKEQZ x (MOVVconst [c])) && c == 0 => (MOVVconst [0])
810(MASKEQZ x (MOVVconst [c])) && c != 0 => x
811
812// generic constant folding
813(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d])
814(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x)
815(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x)
816(SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c])
817(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x)
818(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x)
819(SUBV (MOVVconst [c]) (NEGV (SUBVconst [d] x))) => (ADDVconst [c-d] x)
820(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d<<uint64(c)])
821(SRLVconst [c] (MOVVconst [d])) => (MOVVconst [int64(uint64(d)>>uint64(c))])
822(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)])
823(MULV (MOVVconst [c]) (MOVVconst [d])) => (MOVVconst [c*d])
824(DIVV (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [c/d])
825(DIVVU (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [int64(uint64(c)/uint64(d))])
826(REMV (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [c%d]) // mod
827(REMVU (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod
828(ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d])
829(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
830(ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d])
831(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x)
832(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d])
833(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x)
834(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)])
835(NEGV (MOVVconst [c])) => (MOVVconst [-c])
836(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))])
837(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))])
838(MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))])
839(MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))])
840(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))])
841(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))])
842(MOVVreg (MOVVconst [c])) => (MOVVconst [c])
843
844(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&0xff] x)
845
846// Prefetch instructions (hint specified using aux field)
847// For PRELD{,X} A value of hint indicates:
848// hint=0 is defined as load prefetch to L1-cache
849// hint=2 is defined as load prefetch to L3-cache
850// The PrefetchCacheStreamed implementation prefetches 512 bytes of data
851// into L3. The aux field are defined as follows:
852// bit[4:0]:
853// $hint parameter of PRELDX instruction
854// bit[41:5]:
855// $n parameter of PRELDX instruction, bit[0] of $n is the address
856// sequence, bits[11:1] is the block size, bits[20:12] is the block
857// num, bits[36:21] is the stride, for more details about $n, refer
858// to src/cmd/internal/obj/loong64/doc.go
859(PrefetchCache addr mem) => (PRELD addr mem [0])
860(PrefetchCacheStreamed addr mem) => (PRELDX addr mem [(((512 << 1) + (1 << 12)) << 5) + 2])
861
862// constant comparisons
863(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1])
864(SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0])
865(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1])
866(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0])
867
868// other known comparisons
869(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1])
870(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0])
871(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1])
872(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0])
873(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1])
874(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1])
875(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0])
876(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1])
877(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0])
878(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1])
879(SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0])
880(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1])
881(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1])
882(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
883(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
884
885// SGT/SGTU with known outcomes.
886(SGT x x) => (MOVVconst [0])
887(SGTU x x) => (MOVVconst [0])
888
889// Optimizations
890
891// Absorb boolean tests into block
892(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
893(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
894(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
895(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
896(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
897(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
898(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
899(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
900(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
901(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
902(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
903(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
904(NE (SGTUconst [1] x) yes no) => (EQ x yes no)
905(EQ (SGTUconst [1] x) yes no) => (NE x yes no)
906(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no)
907(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no)
908(NE (SGTconst [0] x) yes no) => (LTZ x yes no)
909(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
910(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
911(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
912
913(EQ (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (EQ (SGTUconst [c] y) yes no)
914(NE (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (NE (SGTUconst [c] y) yes no)
915(EQ (SUBV x y) yes no) => (BEQ x y yes no)
916(NE (SUBV x y) yes no) => (BNE x y yes no)
917(EQ (SGT x y) yes no) => (BGE y x yes no)
918(NE (SGT x y) yes no) => (BLT y x yes no)
919(EQ (SGTU x y) yes no) => (BGEU y x yes no)
920(NE (SGTU x y) yes no) => (BLTU y x yes no)
921
922// absorb constants into branches
923(EQ (MOVVconst [0]) yes no) => (First yes no)
924(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes)
925(NE (MOVVconst [0]) yes no) => (First no yes)
926(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no)
927(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no)
928(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes)
929(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no)
930(LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes)
931(GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no)
932(GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes)
933(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no)
934(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes)
935
936// Arch-specific inlining for small or disjoint runtime.memmove
937// Match post-lowering calls, register version.
938(SelectN [0] call:(CALLstatic {sym} dst src (MOVVconst [sz]) mem))
939 && sz >= 0
940 && isSameCall(sym, "runtime.memmove")
941 && call.Uses == 1
942 && isInlinableMemmove(dst, src, sz, config)
943 && clobber(call)
944 => (Move [sz] dst src mem)
View as plain text