1// Copyright 2016 The Go Authors. All rights reserved.
2// Use of this source code is governed by a BSD-style
3// license that can be found in the LICENSE file.
4
5(Add(Ptr|64|32|16|8) ...) => (ADDV ...)
6(Add(32|64)F ...) => (ADD(F|D) ...)
7
8(Sub(Ptr|64|32|16|8) ...) => (SUBV ...)
9(Sub(32|64)F ...) => (SUB(F|D) ...)
10
11(Mul(64|32|16|8) x y) => (Select1 (MULVU x y))
12(Mul(32|64)F ...) => (MUL(F|D) ...)
13(Mul64uhilo ...) => (MULVU ...)
14(Select0 (Mul64uover x y)) => (Select1 <typ.UInt64> (MULVU x y))
15(Select1 (Mul64uover x y)) => (SGTU <typ.Bool> (Select0 <typ.UInt64> (MULVU x y)) (MOVVconst <typ.UInt64> [0]))
16
17(Hmul64 x y) => (Select0 (MULV x y))
18(Hmul64u x y) => (Select0 (MULVU x y))
19(Hmul32 x y) => (SRAVconst (Select1 <typ.Int64> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
20(Hmul32u x y) => (SRLVconst (Select1 <typ.UInt64> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
21
22(Div64 x y) => (Select1 (DIVV x y))
23(Div64u x y) => (Select1 (DIVVU x y))
24(Div32 x y) => (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
25(Div32u x y) => (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
26(Div16 x y) => (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
27(Div16u x y) => (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
28(Div8 x y) => (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
29(Div8u x y) => (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
30(Div(32|64)F ...) => (DIV(F|D) ...)
31
32(Mod64 x y) => (Select0 (DIVV x y))
33(Mod64u x y) => (Select0 (DIVVU x y))
34(Mod32 x y) => (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
35(Mod32u x y) => (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
36(Mod16 x y) => (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
37(Mod16u x y) => (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
38(Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
39(Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
40
41(Select0 <t> (Add64carry x y c)) => (ADDV (ADDV <t> x y) c)
42(Select1 <t> (Add64carry x y c)) =>
43 (OR (SGTU <t> x s:(ADDV <t> x y)) (SGTU <t> s (ADDV <t> s c)))
44
45(Select0 <t> (Sub64borrow x y c)) => (SUBV (SUBV <t> x y) c)
46(Select1 <t> (Sub64borrow x y c)) =>
47 (OR (SGTU <t> s:(SUBV <t> x y) x) (SGTU <t> (SUBV <t> s c) s))
48
49// math package intrinsics
50(Abs ...) => (ABSD ...)
51
52// (x + y) / 2 with x>=y => (x - y) / 2 + y
53(Avg64u <t> x y) => (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
54
55(And(64|32|16|8) ...) => (AND ...)
56(Or(64|32|16|8) ...) => (OR ...)
57(Xor(64|32|16|8) ...) => (XOR ...)
58
59// shifts
60// hardware instruction uses only the low 6 bits of the shift
61// we compare to 64 to ensure Go semantics for large shifts
62(Lsh64x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
63(Lsh64x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
64(Lsh64x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
65(Lsh64x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
66
67(Lsh32x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
68(Lsh32x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
69(Lsh32x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
70(Lsh32x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
71
72(Lsh16x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
73(Lsh16x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
74(Lsh16x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
75(Lsh16x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
76
77(Lsh8x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
78(Lsh8x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
79(Lsh8x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
80(Lsh8x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
81
82(Rsh64Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> x y))
83(Rsh64Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
84(Rsh64Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
85(Rsh64Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
86
87(Rsh32Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
88(Rsh32Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
89(Rsh32Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
90(Rsh32Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
91
92(Rsh16Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
93(Rsh16Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
94(Rsh16Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
95(Rsh16Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
96
97(Rsh8Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
98(Rsh8Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
99(Rsh8Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
100(Rsh8Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
101
102(Rsh64x64 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
103(Rsh64x32 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
104(Rsh64x16 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
105(Rsh64x8 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
106
107(Rsh32x64 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
108(Rsh32x32 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
109(Rsh32x16 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
110(Rsh32x8 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
111
112(Rsh16x64 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
113(Rsh16x32 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
114(Rsh16x16 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
115(Rsh16x8 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
116
117(Rsh8x64 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
118(Rsh8x32 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
119(Rsh8x16 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
120(Rsh8x8 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
121
122// rotates
123(RotateLeft8 <t> x (MOVVconst [c])) => (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
124(RotateLeft16 <t> x (MOVVconst [c])) => (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
125(RotateLeft32 <t> x (MOVVconst [c])) => (Or32 (Lsh32x64 <t> x (MOVVconst [c&31])) (Rsh32Ux64 <t> x (MOVVconst [-c&31])))
126(RotateLeft64 <t> x (MOVVconst [c])) => (Or64 (Lsh64x64 <t> x (MOVVconst [c&63])) (Rsh64Ux64 <t> x (MOVVconst [-c&63])))
127
128// unary ops
129(Neg(64|32|16|8) ...) => (NEGV ...)
130(Neg(32|64)F ...) => (NEG(F|D) ...)
131
132(Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x)
133
134(Sqrt ...) => (SQRTD ...)
135(Sqrt32 ...) => (SQRTF ...)
136
137// boolean ops -- booleans are represented with 0=false, 1=true
138(AndB ...) => (AND ...)
139(OrB ...) => (OR ...)
140(EqB x y) => (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
141(NeqB ...) => (XOR ...)
142(Not x) => (XORconst [1] x)
143
144// constants
145(Const(64|32|16|8) [val]) => (MOVVconst [int64(val)])
146(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
147(ConstNil) => (MOVVconst [0])
148(ConstBool [t]) => (MOVVconst [int64(b2i(t))])
149
150(Slicemask <t> x) => (SRAVconst (NEGV <t> x) [63])
151
152// truncations
153// Because we ignore high parts of registers, truncates are just copies.
154(Trunc16to8 ...) => (Copy ...)
155(Trunc32to8 ...) => (Copy ...)
156(Trunc32to16 ...) => (Copy ...)
157(Trunc64to8 ...) => (Copy ...)
158(Trunc64to16 ...) => (Copy ...)
159(Trunc64to32 ...) => (Copy ...)
160
161// Zero-/Sign-extensions
162(ZeroExt8to16 ...) => (MOVBUreg ...)
163(ZeroExt8to32 ...) => (MOVBUreg ...)
164(ZeroExt16to32 ...) => (MOVHUreg ...)
165(ZeroExt8to64 ...) => (MOVBUreg ...)
166(ZeroExt16to64 ...) => (MOVHUreg ...)
167(ZeroExt32to64 ...) => (MOVWUreg ...)
168
169(SignExt8to16 ...) => (MOVBreg ...)
170(SignExt8to32 ...) => (MOVBreg ...)
171(SignExt16to32 ...) => (MOVHreg ...)
172(SignExt8to64 ...) => (MOVBreg ...)
173(SignExt16to64 ...) => (MOVHreg ...)
174(SignExt32to64 ...) => (MOVWreg ...)
175
176// float <=> int conversion
177(Cvt32to32F ...) => (MOVWF ...)
178(Cvt32to64F ...) => (MOVWD ...)
179(Cvt64to32F ...) => (MOVVF ...)
180(Cvt64to64F ...) => (MOVVD ...)
181(Cvt32Fto32 ...) => (TRUNCFW ...)
182(Cvt64Fto32 ...) => (TRUNCDW ...)
183(Cvt32Fto64 ...) => (TRUNCFV ...)
184(Cvt64Fto64 ...) => (TRUNCDV ...)
185(Cvt32Fto64F ...) => (MOVFD ...)
186(Cvt64Fto32F ...) => (MOVDF ...)
187
188(CvtBoolToUint8 ...) => (Copy ...)
189
190(Round(32|64)F ...) => (Copy ...)
191
192// comparisons
193(Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
194(Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
195(Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
196(Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y))
197(EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y))
198(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
199
200(Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
201(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
202(Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
203(Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0]))
204(NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0]))
205(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
206
207(Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x))
208(Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x))
209(Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x))
210(Less64 x y) => (SGT y x)
211(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
212
213(Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
214(Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
215(Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
216(Less64U x y) => (SGTU y x)
217
218(Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
219(Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
220(Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
221(Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y))
222(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
223
224(Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
225(Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
226(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
227(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y))
228
229(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVVaddr [int32(off)] ptr)
230(OffPtr [off] ptr) => (ADDVconst [off] ptr)
231
232(Addr {sym} base) => (MOVVaddr {sym} base)
233(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVVaddr {sym} (SPanchored base mem))
234(LocalAddr <t> {sym} base _) && !t.Elem().HasPointers() => (MOVVaddr {sym} base)
235
236// loads
237(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
238(Load <t> ptr mem) && (is8BitInt(t) && t.IsSigned()) => (MOVBload ptr mem)
239(Load <t> ptr mem) && (is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
240(Load <t> ptr mem) && (is16BitInt(t) && t.IsSigned()) => (MOVHload ptr mem)
241(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
242(Load <t> ptr mem) && (is32BitInt(t) && t.IsSigned()) => (MOVWload ptr mem)
243(Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
244(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem)
245(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
246(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
247
248// stores
249(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
250(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
251(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
252(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVVstore ptr val mem)
253(Store {t} ptr val mem) && t.Size() == 4 && t.IsFloat() => (MOVFstore ptr val mem)
254(Store {t} ptr val mem) && t.Size() == 8 && t.IsFloat() => (MOVDstore ptr val mem)
255
256// zeroing
257(Zero [0] _ mem) => mem
258(Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem)
259(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
260 (MOVHstore ptr (MOVVconst [0]) mem)
261(Zero [2] ptr mem) =>
262 (MOVBstore [1] ptr (MOVVconst [0])
263 (MOVBstore [0] ptr (MOVVconst [0]) mem))
264(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
265 (MOVWstore ptr (MOVVconst [0]) mem)
266(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
267 (MOVHstore [2] ptr (MOVVconst [0])
268 (MOVHstore [0] ptr (MOVVconst [0]) mem))
269(Zero [4] ptr mem) =>
270 (MOVBstore [3] ptr (MOVVconst [0])
271 (MOVBstore [2] ptr (MOVVconst [0])
272 (MOVBstore [1] ptr (MOVVconst [0])
273 (MOVBstore [0] ptr (MOVVconst [0]) mem))))
274(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
275 (MOVVstore ptr (MOVVconst [0]) mem)
276(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
277 (MOVWstore [4] ptr (MOVVconst [0])
278 (MOVWstore [0] ptr (MOVVconst [0]) mem))
279(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
280 (MOVHstore [6] ptr (MOVVconst [0])
281 (MOVHstore [4] ptr (MOVVconst [0])
282 (MOVHstore [2] ptr (MOVVconst [0])
283 (MOVHstore [0] ptr (MOVVconst [0]) mem))))
284
285(Zero [3] ptr mem) =>
286 (MOVBstore [2] ptr (MOVVconst [0])
287 (MOVBstore [1] ptr (MOVVconst [0])
288 (MOVBstore [0] ptr (MOVVconst [0]) mem)))
289(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
290 (MOVHstore [4] ptr (MOVVconst [0])
291 (MOVHstore [2] ptr (MOVVconst [0])
292 (MOVHstore [0] ptr (MOVVconst [0]) mem)))
293(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
294 (MOVWstore [8] ptr (MOVVconst [0])
295 (MOVWstore [4] ptr (MOVVconst [0])
296 (MOVWstore [0] ptr (MOVVconst [0]) mem)))
297(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
298 (MOVVstore [8] ptr (MOVVconst [0])
299 (MOVVstore [0] ptr (MOVVconst [0]) mem))
300(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
301 (MOVVstore [16] ptr (MOVVconst [0])
302 (MOVVstore [8] ptr (MOVVconst [0])
303 (MOVVstore [0] ptr (MOVVconst [0]) mem)))
304
305// medium zeroing uses a duff device
306// 8, and 128 are magic constants, see runtime/mkduff.go
307(Zero [s] {t} ptr mem)
308 && s%8 == 0 && s > 24 && s <= 8*128
309 && t.Alignment()%8 == 0 && !config.noDuffDevice =>
310 (DUFFZERO [8 * (128 - s/8)] ptr mem)
311
312// large or unaligned zeroing uses a loop
313(Zero [s] {t} ptr mem)
314 && (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 =>
315 (LoweredZero [t.Alignment()]
316 ptr
317 (ADDVconst <ptr.Type> ptr [s-moveSize(t.Alignment(), config)])
318 mem)
319
320// moves
321(Move [0] _ _ mem) => mem
322(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
323(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
324 (MOVHstore dst (MOVHload src mem) mem)
325(Move [2] dst src mem) =>
326 (MOVBstore [1] dst (MOVBload [1] src mem)
327 (MOVBstore dst (MOVBload src mem) mem))
328(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
329 (MOVWstore dst (MOVWload src mem) mem)
330(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
331 (MOVHstore [2] dst (MOVHload [2] src mem)
332 (MOVHstore dst (MOVHload src mem) mem))
333(Move [4] dst src mem) =>
334 (MOVBstore [3] dst (MOVBload [3] src mem)
335 (MOVBstore [2] dst (MOVBload [2] src mem)
336 (MOVBstore [1] dst (MOVBload [1] src mem)
337 (MOVBstore dst (MOVBload src mem) mem))))
338(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
339 (MOVVstore dst (MOVVload src mem) mem)
340(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
341 (MOVWstore [4] dst (MOVWload [4] src mem)
342 (MOVWstore dst (MOVWload src mem) mem))
343(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
344 (MOVHstore [6] dst (MOVHload [6] src mem)
345 (MOVHstore [4] dst (MOVHload [4] src mem)
346 (MOVHstore [2] dst (MOVHload [2] src mem)
347 (MOVHstore dst (MOVHload src mem) mem))))
348
349(Move [3] dst src mem) =>
350 (MOVBstore [2] dst (MOVBload [2] src mem)
351 (MOVBstore [1] dst (MOVBload [1] src mem)
352 (MOVBstore dst (MOVBload src mem) mem)))
353(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
354 (MOVHstore [4] dst (MOVHload [4] src mem)
355 (MOVHstore [2] dst (MOVHload [2] src mem)
356 (MOVHstore dst (MOVHload src mem) mem)))
357(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
358 (MOVWstore [8] dst (MOVWload [8] src mem)
359 (MOVWstore [4] dst (MOVWload [4] src mem)
360 (MOVWstore dst (MOVWload src mem) mem)))
361(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
362 (MOVVstore [8] dst (MOVVload [8] src mem)
363 (MOVVstore dst (MOVVload src mem) mem))
364(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
365 (MOVVstore [16] dst (MOVVload [16] src mem)
366 (MOVVstore [8] dst (MOVVload [8] src mem)
367 (MOVVstore dst (MOVVload src mem) mem)))
368
369// float <=> int register moves, with no conversion.
370// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
371(MOVVload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) => (MOVVfpgp val)
372(MOVDload [off] {sym} ptr (MOVVstore [off] {sym} ptr val _)) => (MOVVgpfp val)
373(MOVWUload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) => (ZeroExt32to64 (MOVWfpgp <typ.Float32> val))
374(MOVFload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (MOVWgpfp val)
375
376// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
377(MOVVstore [off] {sym} ptr (MOVVfpgp val) mem) => (MOVDstore [off] {sym} ptr val mem)
378(MOVDstore [off] {sym} ptr (MOVVgpfp val) mem) => (MOVVstore [off] {sym} ptr val mem)
379(MOVWstore [off] {sym} ptr (MOVWfpgp val) mem) => (MOVFstore [off] {sym} ptr val mem)
380(MOVFstore [off] {sym} ptr (MOVWgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
381
382// medium move uses a duff device
383(Move [s] {t} dst src mem)
384 && s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0
385 && !config.noDuffDevice && logLargeCopy(v, s) =>
386 (DUFFCOPY [16 * (128 - s/8)] dst src mem)
387// 16 and 128 are magic constants. 16 is the number of bytes to encode:
388// MOVV (R1), R23
389// ADDV $8, R1
390// MOVV R23, (R2)
391// ADDV $8, R2
392// and 128 is the number of such blocks. See runtime/duff_mips64.s:duffcopy.
393
394// large or unaligned move uses a loop
395(Move [s] {t} dst src mem)
396 && s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 =>
397 (LoweredMove [t.Alignment()]
398 dst
399 src
400 (ADDVconst <src.Type> src [s-moveSize(t.Alignment(), config)])
401 mem)
402
403// calls
404(StaticCall ...) => (CALLstatic ...)
405(ClosureCall ...) => (CALLclosure ...)
406(InterCall ...) => (CALLinter ...)
407(TailCall ...) => (CALLtail ...)
408
409// atomic intrinsics
410(AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...)
411(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
412
413(AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...)
414(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
415
416(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
417
418(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
419
420(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
421(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
422
423// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3,uint32(val) << ((ptr & 3) * 8))
424(AtomicOr8 ptr val mem) && !config.BigEndian =>
425 (LoweredAtomicOr32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr)
426 (SLLV <typ.UInt32> (ZeroExt8to32 val)
427 (SLLVconst <typ.UInt64> [3]
428 (ANDconst <typ.UInt64> [3] ptr))) mem)
429
430// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
431(AtomicAnd8 ptr val mem) && !config.BigEndian =>
432 (LoweredAtomicAnd32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr)
433 (OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val)
434 (SLLVconst <typ.UInt64> [3]
435 (ANDconst <typ.UInt64> [3] ptr)))
436 (NORconst [0] <typ.UInt64> (SLLV <typ.UInt64>
437 (MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3]
438 (ANDconst <typ.UInt64> [3] ptr))))) mem)
439
440// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
441(AtomicOr8 ptr val mem) && config.BigEndian =>
442 (LoweredAtomicOr32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr)
443 (SLLV <typ.UInt32> (ZeroExt8to32 val)
444 (SLLVconst <typ.UInt64> [3]
445 (ANDconst <typ.UInt64> [3]
446 (XORconst <typ.UInt64> [3] ptr)))) mem)
447
448// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
449(AtomicAnd8 ptr val mem) && config.BigEndian =>
450 (LoweredAtomicAnd32 (AND <typ.UInt32Ptr> (MOVVconst [^3]) ptr)
451 (OR <typ.UInt64> (SLLV <typ.UInt32> (ZeroExt8to32 val)
452 (SLLVconst <typ.UInt64> [3]
453 (ANDconst <typ.UInt64> [3]
454 (XORconst <typ.UInt64> [3] ptr))))
455 (NORconst [0] <typ.UInt64> (SLLV <typ.UInt64>
456 (MOVVconst [0xff]) (SLLVconst <typ.UInt64> [3]
457 (ANDconst <typ.UInt64> [3]
458 (XORconst <typ.UInt64> [3] ptr)))))) mem)
459
460(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
461(AtomicOr32 ...) => (LoweredAtomicOr32 ...)
462
463// checks
464(NilCheck ...) => (LoweredNilCheck ...)
465(IsNonNil ptr) => (SGTU ptr (MOVVconst [0]))
466(IsInBounds idx len) => (SGTU len idx)
467(IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len))
468
469// pseudo-ops
470(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
471(GetCallerSP ...) => (LoweredGetCallerSP ...)
472(GetCallerPC ...) => (LoweredGetCallerPC ...)
473
474(If cond yes no) => (NE cond yes no)
475
476// Write barrier.
477(WB ...) => (LoweredWB ...)
478
479(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
480(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
481(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
482
483// Optimizations
484
485// Absorb boolean tests into block
486(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
487(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
488(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
489(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
490(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
491(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
492(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
493(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
494(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
495(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
496(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
497(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
498(NE (SGTUconst [1] x) yes no) => (EQ x yes no)
499(EQ (SGTUconst [1] x) yes no) => (NE x yes no)
500(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no)
501(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no)
502(NE (SGTconst [0] x) yes no) => (LTZ x yes no)
503(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
504(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
505(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
506
507// fold offset into address
508(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
509
510// fold address into load/store
511(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
512 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBload [off1+int32(off2)] {sym} ptr mem)
513(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
514 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBUload [off1+int32(off2)] {sym} ptr mem)
515(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
516 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHload [off1+int32(off2)] {sym} ptr mem)
517(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
518 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHUload [off1+int32(off2)] {sym} ptr mem)
519(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
520 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWload [off1+int32(off2)] {sym} ptr mem)
521(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
522 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWUload [off1+int32(off2)] {sym} ptr mem)
523(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
524 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVload [off1+int32(off2)] {sym} ptr mem)
525(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
526 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVFload [off1+int32(off2)] {sym} ptr mem)
527(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
528 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVDload [off1+int32(off2)] {sym} ptr mem)
529
530(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
531 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
532(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
533 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
534(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
535 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
536(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
537 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem)
538(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
539 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
540(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
541 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
542(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
543 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
544(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
545 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
546(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
547 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
548(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
549 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
550
551(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
552 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
553 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
554 (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
555(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
556 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
557 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
558 (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
559(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
560 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
561 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
562 (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
563(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
564 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
565 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
566 (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
567(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
568 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
569 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
570 (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
571(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
572 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
573 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
574 (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
575(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
576 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
577 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
578 (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
579(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
580 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
581 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
582 (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
583(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
584 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
585 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
586 (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
587
588(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
589 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
590 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
591 (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
592(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
593 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
594 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
595 (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
596(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
597 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
598 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
599 (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
600(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
601 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
602 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
603 (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
604(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
605 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
606 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
607 (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
608(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
609 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
610 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
611 (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
612(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
613 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
614 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
615 (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
616(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
617 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
618 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
619 (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
620(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
621 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
622 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
623 (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
624(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
625 && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
626 && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
627 (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
628
629// store zero
630(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
631(MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
632(MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
633(MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVVstorezero [off] {sym} ptr mem)
634
635// don't extend after proper load
636(MOVBreg x:(MOVBload _ _)) => (MOVVreg x)
637(MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x)
638(MOVHreg x:(MOVBload _ _)) => (MOVVreg x)
639(MOVHreg x:(MOVBUload _ _)) => (MOVVreg x)
640(MOVHreg x:(MOVHload _ _)) => (MOVVreg x)
641(MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x)
642(MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x)
643(MOVWreg x:(MOVBload _ _)) => (MOVVreg x)
644(MOVWreg x:(MOVBUload _ _)) => (MOVVreg x)
645(MOVWreg x:(MOVHload _ _)) => (MOVVreg x)
646(MOVWreg x:(MOVHUload _ _)) => (MOVVreg x)
647(MOVWreg x:(MOVWload _ _)) => (MOVVreg x)
648(MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x)
649(MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x)
650(MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x)
651
652// fold double extensions
653(MOVBreg x:(MOVBreg _)) => (MOVVreg x)
654(MOVBUreg x:(MOVBUreg _)) => (MOVVreg x)
655(MOVHreg x:(MOVBreg _)) => (MOVVreg x)
656(MOVHreg x:(MOVBUreg _)) => (MOVVreg x)
657(MOVHreg x:(MOVHreg _)) => (MOVVreg x)
658(MOVHUreg x:(MOVBUreg _)) => (MOVVreg x)
659(MOVHUreg x:(MOVHUreg _)) => (MOVVreg x)
660(MOVWreg x:(MOVBreg _)) => (MOVVreg x)
661(MOVWreg x:(MOVBUreg _)) => (MOVVreg x)
662(MOVWreg x:(MOVHreg _)) => (MOVVreg x)
663(MOVWreg x:(MOVWreg _)) => (MOVVreg x)
664(MOVWUreg x:(MOVBUreg _)) => (MOVVreg x)
665(MOVWUreg x:(MOVHUreg _)) => (MOVVreg x)
666(MOVWUreg x:(MOVWUreg _)) => (MOVVreg x)
667
668// don't extend before store
669(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
670(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
671(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
672(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
673(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
674(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
675(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
676(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
677(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
678(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
679(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
680(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
681
682// if a register move has only 1 use, just use the same register without emitting instruction
683// MOVVnop doesn't emit instruction, only for ensuring the type.
684(MOVVreg x) && x.Uses == 1 => (MOVVnop x)
685
686// TODO: we should be able to get rid of MOVVnop all together.
687// But for now, this is enough to get rid of lots of them.
688(MOVVnop (MOVVconst [c])) => (MOVVconst [c])
689
690// fold constant into arithmetic ops
691(ADDV x (MOVVconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDVconst [c] x)
692(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x)
693(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x)
694(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x)
695(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x)
696(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x)
697
698(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
699(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
700(SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63])
701(SLLV x (MOVVconst [c])) => (SLLVconst x [c])
702(SRLV x (MOVVconst [c])) => (SRLVconst x [c])
703(SRAV x (MOVVconst [c])) => (SRAVconst x [c])
704
705(SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x)
706(SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x)
707
708// mul by constant
709(Select1 (MULVU x (MOVVconst [-1]))) => (NEGV x)
710(Select1 (MULVU _ (MOVVconst [0]))) => (MOVVconst [0])
711(Select1 (MULVU x (MOVVconst [1]))) => x
712(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SLLVconst [log64(c)] x)
713
714// div by constant
715(Select1 (DIVVU x (MOVVconst [1]))) => x
716(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SRLVconst [log64(c)] x)
717(Select0 (DIVVU _ (MOVVconst [1]))) => (MOVVconst [0]) // mod
718(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (ANDconst [c-1] x) // mod
719
720// generic simplifications
721(ADDV x (NEGV y)) => (SUBV x y)
722(SUBV x x) => (MOVVconst [0])
723(SUBV (MOVVconst [0]) x) => (NEGV x)
724(AND x x) => x
725(OR x x) => x
726(XOR x x) => (MOVVconst [0])
727
728// remove redundant *const ops
729(ADDVconst [0] x) => x
730(SUBVconst [0] x) => x
731(ANDconst [0] _) => (MOVVconst [0])
732(ANDconst [-1] x) => x
733(ORconst [0] x) => x
734(ORconst [-1] _) => (MOVVconst [-1])
735(XORconst [0] x) => x
736(XORconst [-1] x) => (NORconst [0] x)
737
738// generic constant folding
739(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d])
740(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x)
741(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x)
742(SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c])
743(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x)
744(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x)
745(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d<<uint64(c)])
746(SRLVconst [c] (MOVVconst [d])) => (MOVVconst [int64(uint64(d)>>uint64(c))])
747(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)])
748(Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c*d])
749(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c/d])
750(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)/uint64(d))])
751(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c%d]) // mod
752(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod
753(ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d])
754(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
755(ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d])
756(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x)
757(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d])
758(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x)
759(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)])
760(NEGV (MOVVconst [c])) => (MOVVconst [-c])
761(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))])
762(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))])
763(MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))])
764(MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))])
765(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))])
766(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))])
767(MOVVreg (MOVVconst [c])) => (MOVVconst [c])
768(LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem)
769(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem)
770(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem)
771
772// constant comparisons
773(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1])
774(SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0])
775(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1])
776(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0])
777
778// other known comparisons
779(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1])
780(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0])
781(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1])
782(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0])
783(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1])
784(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1])
785(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0])
786(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1])
787(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0])
788(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1])
789(SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0])
790(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1])
791(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1])
792(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
793(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
794
795// absorb constants into branches
796(EQ (MOVVconst [0]) yes no) => (First yes no)
797(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes)
798(NE (MOVVconst [0]) yes no) => (First no yes)
799(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no)
800(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no)
801(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes)
802(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no)
803(LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes)
804(GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no)
805(GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes)
806(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no)
807(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes)
808
809// SGT/SGTU with known outcomes.
810(SGT x x) => (MOVVconst [0])
811(SGTU x x) => (MOVVconst [0])
812
813// fold readonly sym load
814(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read8(sym, int64(off)))])
815(MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
816(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
817(MOVVload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
View as plain text