...

Text file src/cmd/compile/internal/ssa/_gen/RISCV64.rules

Documentation: cmd/compile/internal/ssa/_gen

     1// Copyright 2016 The Go Authors. All rights reserved.
     2// Use of this source code is governed by a BSD-style
     3// license that can be found in the LICENSE file.
     4
     5// Lowering arithmetic
     6(Add(Ptr|64|32|16|8) ...) => (ADD ...)
     7(Add(64|32)F ...) => (FADD(D|S) ...)
     8
     9(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
    10(Sub(64|32)F ...) => (FSUB(D|S) ...)
    11
    12(Mul64 ...) => (MUL  ...)
    13(Mul64uhilo ...) => (LoweredMuluhilo ...)
    14(Mul64uover ...) => (LoweredMuluover ...)
    15(Mul32 ...) => (MULW ...)
    16(Mul16 x y) => (MULW (SignExt16to32 x) (SignExt16to32 y))
    17(Mul8 x y)  => (MULW (SignExt8to32 x)  (SignExt8to32 y))
    18(Mul(64|32)F ...) => (FMUL(D|S) ...)
    19
    20(Div(64|32)F ...) => (FDIV(D|S) ...)
    21
    22(Div64 x y [false])  => (DIV x y)
    23(Div64u ...) => (DIVU ...)
    24(Div32 x y [false])  => (DIVW x y)
    25(Div32u ...) => (DIVUW ...)
    26(Div16 x y [false])  => (DIVW  (SignExt16to32 x) (SignExt16to32 y))
    27(Div16u x y) => (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
    28(Div8 x y)   => (DIVW  (SignExt8to32 x)  (SignExt8to32 y))
    29(Div8u x y)  => (DIVUW (ZeroExt8to32 x)  (ZeroExt8to32 y))
    30
    31(Hmul64 ...)  => (MULH  ...)
    32(Hmul64u ...) => (MULHU ...)
    33(Hmul32 x y)  => (SRAI [32] (MUL  (SignExt32to64 x) (SignExt32to64 y)))
    34(Hmul32u x y) => (SRLI [32] (MUL  (ZeroExt32to64 x) (ZeroExt32to64 y)))
    35
    36(Select0 (Add64carry x y c)) => (ADD (ADD <typ.UInt64> x y) c)
    37(Select1 (Add64carry x y c)) =>
    38	(OR (SLTU <typ.UInt64> s:(ADD <typ.UInt64> x y) x) (SLTU <typ.UInt64> (ADD <typ.UInt64> s c) s))
    39
    40(Select0 (Sub64borrow x y c)) => (SUB (SUB <typ.UInt64> x y) c)
    41(Select1 (Sub64borrow x y c)) =>
    42	(OR (SLTU <typ.UInt64> x s:(SUB <typ.UInt64> x y)) (SLTU <typ.UInt64> s (SUB <typ.UInt64> s c)))
    43
    44// (x + y) / 2 => (x / 2) + (y / 2) + (x & y & 1)
    45(Avg64u <t> x y) => (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
    46
    47(Mod64 x y [false])  => (REM x y)
    48(Mod64u ...) => (REMU  ...)
    49(Mod32 x y [false])  => (REMW x y)
    50(Mod32u ...) => (REMUW ...)
    51(Mod16 x y [false])  => (REMW  (SignExt16to32 x) (SignExt16to32 y))
    52(Mod16u x y) => (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
    53(Mod8 x y)   => (REMW  (SignExt8to32  x) (SignExt8to32  y))
    54(Mod8u x y)  => (REMUW (ZeroExt8to32  x) (ZeroExt8to32  y))
    55
    56(And(64|32|16|8) ...) => (AND ...)
    57(Or(64|32|16|8) ...) => (OR ...)
    58(Xor(64|32|16|8) ...) => (XOR ...)
    59
    60(Neg(64|32|16|8) ...) => (NEG ...)
    61(Neg(64|32)F ...) => (FNEG(D|S) ...)
    62
    63(Com(64|32|16|8) ...) => (NOT ...)
    64
    65(Sqrt ...) => (FSQRTD ...)
    66(Sqrt32 ...) => (FSQRTS ...)
    67
    68(Copysign ...) => (FSGNJD ...)
    69
    70(Abs ...) => (FABSD ...)
    71
    72(FMA ...) => (FMADDD ...)
    73
    74(Min(64|32)F ...) => (LoweredFMIN(D|S) ...)
    75(Max(64|32)F ...) => (LoweredFMAX(D|S) ...)
    76
    77// Sign and zero extension.
    78
    79(SignExt8to16  ...) => (MOVBreg ...)
    80(SignExt8to32  ...) => (MOVBreg ...)
    81(SignExt8to64  ...) => (MOVBreg ...)
    82(SignExt16to32 ...) => (MOVHreg ...)
    83(SignExt16to64 ...) => (MOVHreg ...)
    84(SignExt32to64 ...) => (MOVWreg ...)
    85
    86(ZeroExt8to16  ...) => (MOVBUreg ...)
    87(ZeroExt8to32  ...) => (MOVBUreg ...)
    88(ZeroExt8to64  ...) => (MOVBUreg ...)
    89(ZeroExt16to32 ...) => (MOVHUreg ...)
    90(ZeroExt16to64 ...) => (MOVHUreg ...)
    91(ZeroExt32to64 ...) => (MOVWUreg ...)
    92
    93(Cvt32to32F ...) => (FCVTSW ...)
    94(Cvt32to64F ...) => (FCVTDW ...)
    95(Cvt64to32F ...) => (FCVTSL ...)
    96(Cvt64to64F ...) => (FCVTDL ...)
    97
    98(Cvt32Fto32 ...) => (FCVTWS ...)
    99(Cvt32Fto64 ...) => (FCVTLS ...)
   100(Cvt64Fto32 ...) => (FCVTWD ...)
   101(Cvt64Fto64 ...) => (FCVTLD ...)
   102
   103(Cvt32Fto64F ...) => (FCVTDS ...)
   104(Cvt64Fto32F ...) => (FCVTSD ...)
   105
   106(CvtBoolToUint8 ...) => (Copy ...)
   107
   108(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
   109
   110(Slicemask <t> x) => (SRAI [63] (NEG <t> x))
   111
   112// Truncations
   113// We ignore the unused high parts of registers, so truncates are just copies.
   114(Trunc16to8  ...) => (Copy ...)
   115(Trunc32to8  ...) => (Copy ...)
   116(Trunc32to16 ...) => (Copy ...)
   117(Trunc64to8  ...) => (Copy ...)
   118(Trunc64to16 ...) => (Copy ...)
   119(Trunc64to32 ...) => (Copy ...)
   120
   121// Shifts
   122
   123// SLL only considers the bottom 6 bits of y. If y > 64, the result should
   124// always be 0.
   125//
   126// Breaking down the operation:
   127//
   128// (SLL x y) generates x << (y & 63).
   129//
   130// If y < 64, this is the value we want. Otherwise, we want zero.
   131//
   132// So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise.
   133(Lsh8x8   <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
   134(Lsh8x16  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
   135(Lsh8x32  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
   136(Lsh8x64  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg8  <t> (SLTIU <t> [64] y)))
   137(Lsh16x8  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
   138(Lsh16x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
   139(Lsh16x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
   140(Lsh16x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
   141(Lsh32x8  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
   142(Lsh32x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
   143(Lsh32x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
   144(Lsh32x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
   145(Lsh64x8  <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
   146(Lsh64x16 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
   147(Lsh64x32 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
   148(Lsh64x64 <t> x y) && !shiftIsBounded(v) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
   149
   150(Lsh8x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLL x y)
   151(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
   152(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
   153(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
   154
   155// SRL only considers the bottom 6 bits of y, similarly SRLW only considers the
   156// bottom 5 bits of y. Ensure that the result is always zero if the shift exceeds
   157// the maximum value. See Lsh above for a detailed description.
   158(Rsh8Ux8   <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
   159(Rsh8Ux16  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
   160(Rsh8Ux32  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
   161(Rsh8Ux64  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt8to64  x) y) (Neg8  <t> (SLTIU <t> [64] y)))
   162(Rsh16Ux8  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
   163(Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
   164(Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
   165(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
   166(Rsh32Ux8  <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt8to64  y))))
   167(Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt16to64 y))))
   168(Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] (ZeroExt32to64 y))))
   169(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRLW <t>  x                y) (Neg32 <t> (SLTIU <t> [32] y)))
   170(Rsh64Ux8  <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64  y))))
   171(Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
   172(Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
   173(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (AND (SRL  <t>  x                y) (Neg64 <t> (SLTIU <t> [64] y)))
   174
   175(Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) => (SRL  (ZeroExt8to64  x) y)
   176(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL  (ZeroExt16to64 x) y)
   177(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLW x                 y)
   178(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL  x                 y)
   179
   180// SRA only considers the bottom 6 bits of y, similarly SRAW only considers the
   181// bottom 5 bits. If y is greater than the maximum value (either 63 or 31
   182// depending on the instruction),  the result of the shift should be either 0
   183// or -1 based on the sign bit of x.
   184//
   185// We implement this by performing the max shift (-1) if y > the maximum value.
   186//
   187// We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves
   188// us with -1 (0xffff...) if y >= 64.  Similarly, we OR (uint64(y < 32) - 1) into y
   189// before passing it to SRAW.
   190//
   191// We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
   192// more than the 5 or 6 bits SRAW and SRA care about.
   193(Rsh8x8   <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
   194(Rsh8x16  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
   195(Rsh8x32  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
   196(Rsh8x64  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt8to64  x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
   197(Rsh16x8  <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
   198(Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
   199(Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
   200(Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRA  <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
   201(Rsh32x8  <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt8to64  y)))))
   202(Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt16to64 y)))))
   203(Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] (ZeroExt32to64 y)))))
   204(Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRAW <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [32] y))))
   205(Rsh64x8  <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64  y)))))
   206(Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
   207(Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
   208(Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRA  <t> x                 (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
   209
   210(Rsh8x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRA  (SignExt8to64  x) y)
   211(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA  (SignExt16to64 x) y)
   212(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW  x                y)
   213(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA   x                y)
   214
   215// Rotates.
   216(RotateLeft8  <t> x y) => (OR (SLL  <t> x (ANDI [7]  <y.Type> y)) (SRL <t> (ZeroExt8to64  x) (ANDI [7]  <y.Type> (NEG <y.Type> y))))
   217(RotateLeft16 <t> x y) => (OR (SLL  <t> x (ANDI [15] <y.Type> y)) (SRL <t> (ZeroExt16to64 x) (ANDI [15] <y.Type> (NEG <y.Type> y))))
   218(RotateLeft32 ...) => (ROLW ...)
   219(RotateLeft64 ...) => (ROL  ...)
   220
   221// Count trailing zeros (note that these will only be emitted for rva22u64 and above).
   222(Ctz(64|32|16|8)NonZero ...) => (Ctz64 ...)
   223(Ctz64 ...) => (CTZ  ...)
   224(Ctz32 ...) => (CTZW ...)
   225(Ctz16 x) => (CTZW (ORI <typ.UInt32> [1<<16] x))
   226(Ctz8  x) => (CTZW (ORI <typ.UInt32> [1<<8]  x))
   227
   228// Bit length (note that these will only be emitted for rva22u64 and above).
   229(BitLen64 <t> x) => (SUB (MOVDconst [64]) (CLZ  <t> x))
   230(BitLen32 <t> x) => (SUB (MOVDconst [32]) (CLZW <t> x))
   231(BitLen16 x) => (BitLen64 (ZeroExt16to64 x))
   232(BitLen8  x) => (BitLen64 (ZeroExt8to64 x))
   233
   234// Byte swap (note that these will only be emitted for rva22u64 and above).
   235(Bswap64 ...) => (REV8 ...)
   236(Bswap32 <t> x) => (SRLI [32] (REV8 <t> x))
   237(Bswap16 <t> x) => (SRLI [48] (REV8 <t> x))
   238
   239// Population count (note that these will be emitted with guards for rva20u64).
   240(PopCount64 ...) => (CPOP  ...)
   241(PopCount32 ...) => (CPOPW ...)
   242(PopCount16 x) => (CPOP (ZeroExt16to64 x))
   243(PopCount8  x) => (CPOP (ZeroExt8to64  x))
   244
   245(Less64  ...) => (SLT  ...)
   246(Less32  x y) => (SLT  (SignExt32to64 x) (SignExt32to64 y))
   247(Less16  x y) => (SLT  (SignExt16to64 x) (SignExt16to64 y))
   248(Less8   x y) => (SLT  (SignExt8to64  x) (SignExt8to64  y))
   249(Less64U ...) => (SLTU ...)
   250(Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
   251(Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
   252(Less8U  x y) => (SLTU (ZeroExt8to64  x) (ZeroExt8to64  y))
   253(Less(64|32)F ...) => (FLT(D|S) ...)
   254
   255// Convert x <= y to !(y > x).
   256(Leq(64|32|16|8)  x y) => (Not (Less(64|32|16|8)  y x))
   257(Leq(64|32|16|8)U x y) => (Not (Less(64|32|16|8)U y x))
   258(Leq(64|32)F ...) => (FLE(D|S) ...)
   259
   260(EqPtr x y) => (SEQZ (SUB <typ.Uintptr> x y))
   261(Eq64  x y) => (SEQZ (SUB <x.Type> x y))
   262(Eq32  x y) &&  x.Type.IsSigned() => (SEQZ (SUB <x.Type> (SignExt32to64 x) (SignExt32to64 y)))
   263(Eq32  x y) && !x.Type.IsSigned() => (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
   264(Eq16  x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
   265(Eq8   x y) => (SEQZ (SUB <x.Type> (ZeroExt8to64  x) (ZeroExt8to64  y)))
   266(Eq(64|32)F ...) => (FEQ(D|S) ...)
   267
   268(NeqPtr x y) => (Not (EqPtr x y))
   269(Neq64  x y) => (Not (Eq64  x y))
   270(Neq32  x y) => (Not (Eq32  x y))
   271(Neq16  x y) => (Not (Eq16  x y))
   272(Neq8   x y) => (Not (Eq8   x y))
   273(Neq(64|32)F ...) => (FNE(D|S) ...)
   274
   275// Loads
   276(Load <t> ptr mem) &&  t.IsBoolean()                   => (MOVBUload ptr mem)
   277(Load <t> ptr mem) && ( is8BitInt(t) &&  t.IsSigned()) => (MOVBload  ptr mem)
   278(Load <t> ptr mem) && ( is8BitInt(t) && !t.IsSigned()) => (MOVBUload ptr mem)
   279(Load <t> ptr mem) && (is16BitInt(t) &&  t.IsSigned()) => (MOVHload  ptr mem)
   280(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
   281(Load <t> ptr mem) && (is32BitInt(t) &&  t.IsSigned()) => (MOVWload  ptr mem)
   282(Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
   283(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t))      => (MOVDload  ptr mem)
   284(Load <t> ptr mem) &&  is32BitFloat(t)                 => (FMOVWload ptr mem)
   285(Load <t> ptr mem) &&  is64BitFloat(t)                 => (FMOVDload ptr mem)
   286
   287// Stores
   288(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
   289(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
   290(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
   291(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVDstore ptr val mem)
   292(Store {t} ptr val mem) && t.Size() == 4 &&  t.IsFloat() => (FMOVWstore ptr val mem)
   293(Store {t} ptr val mem) && t.Size() == 8 &&  t.IsFloat() => (FMOVDstore ptr val mem)
   294
   295// We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
   296// knows what variables are being read/written by the ops.
   297(MOV(B|BU|H|HU|W|WU|D)load [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) &&
   298	is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
   299	(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   300	(MOV(B|BU|H|HU|W|WU|D)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
   301
   302(MOV(B|H|W|D)store [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) &&
   303	is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
   304	(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   305	(MOV(B|H|W|D)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
   306
   307(MOV(B|H|W|D)storezero [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) &&
   308	canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) &&
   309	(base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   310	(MOV(B|H|W|D)storezero [off1+off2] {mergeSym(sym1,sym2)} base mem)
   311
   312(MOV(B|BU|H|HU|W|WU|D)load [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
   313	(MOV(B|BU|H|HU|W|WU|D)load [off1+int32(off2)] {sym} base mem)
   314
   315(MOV(B|H|W|D)store [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
   316	(MOV(B|H|W|D)store [off1+int32(off2)] {sym} base val mem)
   317
   318(MOV(B|H|W|D)storezero [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
   319	(MOV(B|H|W|D)storezero [off1+int32(off2)] {sym} base mem)
   320
   321// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
   322// with OffPtr -> ADDI.
   323(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
   324
   325// Small zeroing
   326(Zero [0] _ mem) => mem
   327(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
   328(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
   329	(MOVHstore ptr (MOVDconst [0]) mem)
   330(Zero [2] ptr mem) =>
   331	(MOVBstore [1] ptr (MOVDconst [0])
   332		(MOVBstore ptr (MOVDconst [0]) mem))
   333(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
   334	(MOVWstore ptr (MOVDconst [0]) mem)
   335(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
   336	(MOVHstore [2] ptr (MOVDconst [0])
   337		(MOVHstore ptr (MOVDconst [0]) mem))
   338(Zero [4] ptr mem) =>
   339	(MOVBstore [3] ptr (MOVDconst [0])
   340		(MOVBstore [2] ptr (MOVDconst [0])
   341			(MOVBstore [1] ptr (MOVDconst [0])
   342				(MOVBstore ptr (MOVDconst [0]) mem))))
   343(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
   344	(MOVDstore ptr (MOVDconst [0]) mem)
   345(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
   346	(MOVWstore [4] ptr (MOVDconst [0])
   347		(MOVWstore ptr (MOVDconst [0]) mem))
   348(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
   349	(MOVHstore [6] ptr (MOVDconst [0])
   350		(MOVHstore [4] ptr (MOVDconst [0])
   351			(MOVHstore [2] ptr (MOVDconst [0])
   352				(MOVHstore ptr (MOVDconst [0]) mem))))
   353
   354(Zero [3] ptr mem) =>
   355	(MOVBstore [2] ptr (MOVDconst [0])
   356		(MOVBstore [1] ptr (MOVDconst [0])
   357			(MOVBstore ptr (MOVDconst [0]) mem)))
   358(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
   359	(MOVHstore [4] ptr (MOVDconst [0])
   360		(MOVHstore [2] ptr (MOVDconst [0])
   361			(MOVHstore ptr (MOVDconst [0]) mem)))
   362(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
   363	(MOVWstore [8] ptr (MOVDconst [0])
   364		(MOVWstore [4] ptr (MOVDconst [0])
   365			(MOVWstore ptr (MOVDconst [0]) mem)))
   366(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
   367	(MOVDstore [8] ptr (MOVDconst [0])
   368		(MOVDstore ptr (MOVDconst [0]) mem))
   369(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
   370	(MOVDstore [16] ptr (MOVDconst [0])
   371		(MOVDstore [8] ptr (MOVDconst [0])
   372			(MOVDstore ptr (MOVDconst [0]) mem)))
   373(Zero [32] {t} ptr mem) && t.Alignment()%8 == 0 =>
   374	(MOVDstore [24] ptr (MOVDconst [0])
   375		(MOVDstore [16] ptr (MOVDconst [0])
   376			(MOVDstore [8] ptr (MOVDconst [0])
   377				(MOVDstore ptr (MOVDconst [0]) mem))))
   378
   379// Medium 8-aligned zeroing uses a Duff's device
   380// 8 and 128 are magic constants, see runtime/mkduff.go
   381(Zero [s] {t} ptr mem)
   382	&& s%8 == 0 && s <= 8*128
   383	&& t.Alignment()%8 == 0 =>
   384	(DUFFZERO [8 * (128 - s/8)] ptr mem)
   385
   386// Generic zeroing uses a loop
   387(Zero [s] {t} ptr mem) =>
   388	(LoweredZero [t.Alignment()]
   389		ptr
   390		(ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)]))
   391		mem)
   392
   393// Checks
   394(IsNonNil ...) => (SNEZ ...)
   395(IsInBounds ...) => (Less64U ...)
   396(IsSliceInBounds ...) => (Leq64U ...)
   397
   398// Trivial lowering
   399(NilCheck ...) => (LoweredNilCheck ...)
   400(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
   401(GetCallerSP ...) => (LoweredGetCallerSP ...)
   402(GetCallerPC ...) => (LoweredGetCallerPC ...)
   403
   404// Write barrier.
   405(WB ...) => (LoweredWB ...)
   406
   407// Publication barrier as intrinsic
   408(PubBarrier ...) => (LoweredPubBarrier ...)
   409
   410(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
   411(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
   412(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
   413
   414// Small moves
   415(Move [0] _ _ mem) => mem
   416(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
   417(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
   418	(MOVHstore dst (MOVHload src mem) mem)
   419(Move [2] dst src mem) =>
   420	(MOVBstore [1] dst (MOVBload [1] src mem)
   421		(MOVBstore dst (MOVBload src mem) mem))
   422(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
   423	(MOVWstore dst (MOVWload src mem) mem)
   424(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
   425	(MOVHstore [2] dst (MOVHload [2] src mem)
   426		(MOVHstore dst (MOVHload src mem) mem))
   427(Move [4] dst src mem) =>
   428	(MOVBstore [3] dst (MOVBload [3] src mem)
   429		(MOVBstore [2] dst (MOVBload [2] src mem)
   430			(MOVBstore [1] dst (MOVBload [1] src mem)
   431				(MOVBstore dst (MOVBload src mem) mem))))
   432(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
   433	(MOVDstore dst (MOVDload src mem) mem)
   434(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
   435	(MOVWstore [4] dst (MOVWload [4] src mem)
   436		(MOVWstore dst (MOVWload src mem) mem))
   437(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
   438	(MOVHstore [6] dst (MOVHload [6] src mem)
   439		(MOVHstore [4] dst (MOVHload [4] src mem)
   440			(MOVHstore [2] dst (MOVHload [2] src mem)
   441				(MOVHstore dst (MOVHload src mem) mem))))
   442
   443(Move [3] dst src mem) =>
   444	(MOVBstore [2] dst (MOVBload [2] src mem)
   445		(MOVBstore [1] dst (MOVBload [1] src mem)
   446			(MOVBstore dst (MOVBload src mem) mem)))
   447(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
   448	(MOVHstore [4] dst (MOVHload [4] src mem)
   449		(MOVHstore [2] dst (MOVHload [2] src mem)
   450			(MOVHstore dst (MOVHload src mem) mem)))
   451(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
   452	(MOVWstore [8] dst (MOVWload [8] src mem)
   453		(MOVWstore [4] dst (MOVWload [4] src mem)
   454			(MOVWstore dst (MOVWload src mem) mem)))
   455(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
   456	(MOVDstore [8] dst (MOVDload [8] src mem)
   457		(MOVDstore dst (MOVDload src mem) mem))
   458(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
   459	(MOVDstore [16] dst (MOVDload [16] src mem)
   460		(MOVDstore [8] dst (MOVDload [8] src mem)
   461			(MOVDstore dst (MOVDload src mem) mem)))
   462(Move [32] {t} dst src mem) && t.Alignment()%8 == 0 =>
   463	(MOVDstore [24] dst (MOVDload [24] src mem)
   464		(MOVDstore [16] dst (MOVDload [16] src mem)
   465			(MOVDstore [8] dst (MOVDload [8] src mem)
   466				(MOVDstore dst (MOVDload src mem) mem))))
   467
   468// Medium 8-aligned move uses a Duff's device
   469// 16 and 128 are magic constants, see runtime/mkduff.go
   470(Move [s] {t} dst src mem)
   471	&& s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0
   472	&& logLargeCopy(v, s) =>
   473	(DUFFCOPY [16 * (128 - s/8)] dst src mem)
   474
   475// Generic move uses a loop
   476(Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) =>
   477	(LoweredMove [t.Alignment()]
   478		dst
   479		src
   480		(ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src)
   481		mem)
   482
   483// Boolean ops; 0=false, 1=true
   484(AndB ...) => (AND ...)
   485(OrB  ...) => (OR  ...)
   486(EqB  x y) => (SEQZ (SUB <typ.Bool> x y))
   487(NeqB x y) => (SNEZ (SUB <typ.Bool> x y))
   488(Not  ...) => (SEQZ ...)
   489
   490// Lowering pointer arithmetic
   491// TODO: Special handling for SP offsets, like ARM
   492(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVaddr [int32(off)] ptr)
   493(OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
   494(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
   495
   496(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
   497(Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))]))
   498(Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
   499(ConstNil) => (MOVDconst [0])
   500(ConstBool [val]) => (MOVDconst [int64(b2i(val))])
   501
   502(Addr {sym} base) => (MOVaddr {sym} [0] base)
   503(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVaddr {sym} (SPanchored base mem))
   504(LocalAddr <t> {sym} base _)  && !t.Elem().HasPointers() => (MOVaddr {sym} base)
   505
   506// Calls
   507(StaticCall  ...) => (CALLstatic  ...)
   508(ClosureCall ...) => (CALLclosure ...)
   509(InterCall   ...) => (CALLinter   ...)
   510(TailCall ...) => (CALLtail ...)
   511
   512// Atomic Intrinsics
   513(AtomicLoad(Ptr|64|32|8)  ...) => (LoweredAtomicLoad(64|64|32|8) ...)
   514(AtomicStore(PtrNoWB|64|32|8) ...) => (LoweredAtomicStore(64|64|32|8) ...)
   515(AtomicAdd(64|32) ...) => (LoweredAtomicAdd(64|32) ...)
   516
   517// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
   518(AtomicAnd8 ptr val mem) =>
   519	(LoweredAtomicAnd32 (ANDI <typ.Uintptr> [^3] ptr)
   520		(NOT <typ.UInt32> (SLL <typ.UInt32> (XORI <typ.UInt32> [0xff] (ZeroExt8to32 val))
   521			(SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr)))) mem)
   522
   523(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
   524
   525(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
   526(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
   527
   528(AtomicExchange(64|32) ...) => (LoweredAtomicExchange(64|32) ...)
   529
   530// AtomicOr8(ptr,val)  => LoweredAtomicOr32(ptr&^3, uint32(val)<<((ptr&3)*8))
   531(AtomicOr8 ptr val mem) =>
   532	(LoweredAtomicOr32 (ANDI <typ.Uintptr> [^3] ptr)
   533		(SLL <typ.UInt32> (ZeroExt8to32 val)
   534			(SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr))) mem)
   535
   536(AtomicOr32  ...) => (LoweredAtomicOr32  ...)
   537
   538// Conditional branches
   539(If cond yes no) => (BNEZ (MOVBUreg <typ.UInt64> cond) yes no)
   540
   541// Optimizations
   542
   543// Absorb SEQZ/SNEZ into branch.
   544(BEQZ (SEQZ x) yes no) => (BNEZ x yes no)
   545(BEQZ (SNEZ x) yes no) => (BEQZ x yes no)
   546(BNEZ (SEQZ x) yes no) => (BEQZ x yes no)
   547(BNEZ (SNEZ x) yes no) => (BNEZ x yes no)
   548
   549// Remove redundant NEG from BEQZ/BNEZ.
   550(BEQZ (NEG x) yes no) => (BEQZ x yes no)
   551(BNEZ (NEG x) yes no) => (BNEZ x yes no)
   552
   553// Negate comparison with FNES/FNED.
   554(BEQZ (FNES <t> x y) yes no) => (BNEZ (FEQS <t> x y) yes no)
   555(BNEZ (FNES <t> x y) yes no) => (BEQZ (FEQS <t> x y) yes no)
   556(BEQZ (FNED <t> x y) yes no) => (BNEZ (FEQD <t> x y) yes no)
   557(BNEZ (FNED <t> x y) yes no) => (BEQZ (FEQD <t> x y) yes no)
   558
   559// Convert BEQZ/BNEZ into more optimal branch conditions.
   560(BEQZ (SUB x y) yes no) => (BEQ x y yes no)
   561(BNEZ (SUB x y) yes no) => (BNE x y yes no)
   562(BEQZ (SLT x y) yes no) => (BGE x y yes no)
   563(BNEZ (SLT x y) yes no) => (BLT x y yes no)
   564(BEQZ (SLTU x y) yes no) => (BGEU x y yes no)
   565(BNEZ (SLTU x y) yes no) => (BLTU x y yes no)
   566(BEQZ (SLTI [x] y) yes no) => (BGE y (MOVDconst [x]) yes no)
   567(BNEZ (SLTI [x] y) yes no) => (BLT y (MOVDconst [x]) yes no)
   568(BEQZ (SLTIU [x] y) yes no) => (BGEU y (MOVDconst [x]) yes no)
   569(BNEZ (SLTIU [x] y) yes no) => (BLTU y (MOVDconst [x]) yes no)
   570
   571// Convert branch with zero to more optimal branch zero.
   572(BEQ  (MOVDconst [0]) cond yes no) => (BEQZ cond yes no)
   573(BEQ  cond (MOVDconst [0]) yes no) => (BEQZ cond yes no)
   574(BNE  (MOVDconst [0]) cond yes no) => (BNEZ cond yes no)
   575(BNE  cond (MOVDconst [0]) yes no) => (BNEZ cond yes no)
   576(BLT  (MOVDconst [0]) cond yes no) => (BGTZ cond yes no)
   577(BLT  cond (MOVDconst [0]) yes no) => (BLTZ cond yes no)
   578(BLTU (MOVDconst [0]) cond yes no) => (BNEZ cond yes no)
   579(BGE  (MOVDconst [0]) cond yes no) => (BLEZ cond yes no)
   580(BGE  cond (MOVDconst [0]) yes no) => (BGEZ cond yes no)
   581(BGEU (MOVDconst [0]) cond yes no) => (BEQZ cond yes no)
   582
   583// Remove redundant NEG from SEQZ/SNEZ.
   584(SEQZ (NEG x)) => (SEQZ x)
   585(SNEZ (NEG x)) => (SNEZ x)
   586
   587// Remove redundant SEQZ/SNEZ.
   588(SEQZ (SEQZ x)) => (SNEZ x)
   589(SEQZ (SNEZ x)) => (SEQZ x)
   590(SNEZ (SEQZ x)) => (SEQZ x)
   591(SNEZ (SNEZ x)) => (SNEZ x)
   592
   593// Store zero.
   594(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
   595(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
   596(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
   597(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
   598
   599// Boolean ops are already extended.
   600(MOVBUreg x:((FLES|FLTS|FEQS|FNES) _ _)) => x
   601(MOVBUreg x:((FLED|FLTD|FEQD|FNED) _ _)) => x
   602(MOVBUreg x:((SEQZ|SNEZ) _)) => x
   603(MOVBUreg x:((SLT|SLTU) _ _)) => x
   604
   605// Avoid extending when already sufficiently masked.
   606(MOVBreg  x:(ANDI [c] y)) && c >= 0 && int64(int8(c)) == c => x
   607(MOVHreg  x:(ANDI [c] y)) && c >= 0 && int64(int16(c)) == c => x
   608(MOVWreg  x:(ANDI [c] y)) && c >= 0 && int64(int32(c)) == c => x
   609(MOVBUreg x:(ANDI [c] y)) && c >= 0 && int64(uint8(c)) == c => x
   610(MOVHUreg x:(ANDI [c] y)) && c >= 0 && int64(uint16(c)) == c => x
   611(MOVWUreg x:(ANDI [c] y)) && c >= 0 && int64(uint32(c)) == c => x
   612
   613// Combine masking and zero extension.
   614(MOVBUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint8(c))] x)
   615(MOVHUreg (ANDI [c] x)) && c < 0 => (ANDI [int64(uint16(c))] x)
   616(MOVWUreg (ANDI [c] x)) && c < 0 => (AND (MOVDconst [int64(uint32(c))]) x)
   617
   618// Combine negation and sign extension.
   619(MOVWreg (NEG x)) => (NEGW x)
   620
   621// Avoid sign/zero extension for consts.
   622(MOVBreg  (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
   623(MOVHreg  (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
   624(MOVWreg  (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
   625(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
   626(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
   627(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
   628
   629// Avoid sign/zero extension after properly typed load.
   630(MOVBreg  x:(MOVBload  _ _)) => (MOVDreg x)
   631(MOVHreg  x:(MOVBload  _ _)) => (MOVDreg x)
   632(MOVHreg  x:(MOVBUload _ _)) => (MOVDreg x)
   633(MOVHreg  x:(MOVHload  _ _)) => (MOVDreg x)
   634(MOVWreg  x:(MOVBload  _ _)) => (MOVDreg x)
   635(MOVWreg  x:(MOVBUload _ _)) => (MOVDreg x)
   636(MOVWreg  x:(MOVHload  _ _)) => (MOVDreg x)
   637(MOVWreg  x:(MOVHUload _ _)) => (MOVDreg x)
   638(MOVWreg  x:(MOVWload  _ _)) => (MOVDreg x)
   639(MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
   640(MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
   641(MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
   642(MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
   643(MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
   644(MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
   645
   646// Avoid zero extension after properly typed atomic operation.
   647(MOVBUreg x:(Select0 (LoweredAtomicLoad8 _ _))) => (MOVDreg x)
   648(MOVBUreg x:(Select0 (LoweredAtomicCas32 _ _ _ _))) => (MOVDreg x)
   649(MOVBUreg x:(Select0 (LoweredAtomicCas64 _ _ _ _))) => (MOVDreg x)
   650
   651// Avoid sign extension after word arithmetic.
   652(MOVWreg x:(ADDIW   _)) => (MOVDreg x)
   653(MOVWreg x:(SUBW  _ _)) => (MOVDreg x)
   654(MOVWreg x:(NEGW    _)) => (MOVDreg x)
   655(MOVWreg x:(MULW  _ _)) => (MOVDreg x)
   656(MOVWreg x:(DIVW  _ _)) => (MOVDreg x)
   657(MOVWreg x:(DIVUW _ _)) => (MOVDreg x)
   658(MOVWreg x:(REMW  _ _)) => (MOVDreg x)
   659(MOVWreg x:(REMUW _ _)) => (MOVDreg x)
   660(MOVWreg x:(ROLW  _ _)) => (MOVDreg x)
   661(MOVWreg x:(RORW  _ _)) => (MOVDreg x)
   662(MOVWreg x:(RORIW   _)) => (MOVDreg x)
   663
   664// Fold double extensions.
   665(MOVBreg  x:(MOVBreg  _)) => (MOVDreg x)
   666(MOVHreg  x:(MOVBreg  _)) => (MOVDreg x)
   667(MOVHreg  x:(MOVBUreg _)) => (MOVDreg x)
   668(MOVHreg  x:(MOVHreg  _)) => (MOVDreg x)
   669(MOVWreg  x:(MOVBreg  _)) => (MOVDreg x)
   670(MOVWreg  x:(MOVBUreg _)) => (MOVDreg x)
   671(MOVWreg  x:(MOVHreg  _)) => (MOVDreg x)
   672(MOVWreg  x:(MOVWreg  _)) => (MOVDreg x)
   673(MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
   674(MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
   675(MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
   676(MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
   677(MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
   678(MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
   679
   680// Do not extend before store.
   681(MOVBstore [off] {sym} ptr (MOVBreg  x) mem) => (MOVBstore [off] {sym} ptr x mem)
   682(MOVBstore [off] {sym} ptr (MOVHreg  x) mem) => (MOVBstore [off] {sym} ptr x mem)
   683(MOVBstore [off] {sym} ptr (MOVWreg  x) mem) => (MOVBstore [off] {sym} ptr x mem)
   684(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   685(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   686(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   687(MOVHstore [off] {sym} ptr (MOVHreg  x) mem) => (MOVHstore [off] {sym} ptr x mem)
   688(MOVHstore [off] {sym} ptr (MOVWreg  x) mem) => (MOVHstore [off] {sym} ptr x mem)
   689(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   690(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   691(MOVWstore [off] {sym} ptr (MOVWreg  x) mem) => (MOVWstore [off] {sym} ptr x mem)
   692(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
   693
   694// Replace extend after load with alternate load where possible.
   695(MOVBreg  <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload  <t> [off] {sym} ptr mem)
   696(MOVHreg  <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload  <t> [off] {sym} ptr mem)
   697(MOVWreg  <t> x:(MOVWUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload  <t> [off] {sym} ptr mem)
   698(MOVBUreg <t> x:(MOVBload  [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
   699(MOVHUreg <t> x:(MOVHload  [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
   700(MOVWUreg <t> x:(MOVWload  [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem)
   701
   702// If a register move has only 1 use, just use the same register without emitting instruction
   703// MOVnop does not emit an instruction, only for ensuring the type.
   704(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
   705
   706// TODO: we should be able to get rid of MOVDnop all together.
   707// But for now, this is enough to get rid of lots of them.
   708(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
   709
   710// Avoid unnecessary zero and sign extension when right shifting.
   711(SRAI <t> [x] (MOVWreg  y)) && x >= 0 && x <= 31 => (SRAIW <t> [int64(x)] y)
   712(SRLI <t> [x] (MOVWUreg y)) && x >= 0 && x <= 31 => (SRLIW <t> [int64(x)] y)
   713
   714// Replace right shifts that exceed size of signed type.
   715(SRAI <t> [x] (MOVBreg y)) && x >=  8 => (SRAI  [63] (SLLI <t> [56] y))
   716(SRAI <t> [x] (MOVHreg y)) && x >= 16 => (SRAI  [63] (SLLI <t> [48] y))
   717(SRAI <t> [x] (MOVWreg y)) && x >= 32 => (SRAIW [31] y)
   718
   719// Eliminate right shifts that exceed size of unsigned type.
   720(SRLI <t> [x] (MOVBUreg y)) && x >=  8 => (MOVDconst <t> [0])
   721(SRLI <t> [x] (MOVHUreg y)) && x >= 16 => (MOVDconst <t> [0])
   722(SRLI <t> [x] (MOVWUreg y)) && x >= 32 => (MOVDconst <t> [0])
   723
   724// Fold constant into immediate instructions where possible.
   725(ADD (MOVDconst <t> [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x)
   726(AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
   727(OR  (MOVDconst [val]) x) && is32Bit(val) => (ORI  [val] x)
   728(XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
   729(ROL  x (MOVDconst [val])) => (RORI  [int64(int8(-val)&63)] x)
   730(ROLW x (MOVDconst [val])) => (RORIW [int64(int8(-val)&31)] x)
   731(ROR  x (MOVDconst [val])) => (RORI  [int64(val&63)] x)
   732(RORW x (MOVDconst [val])) => (RORIW [int64(val&31)] x)
   733(SLL  x (MOVDconst [val])) => (SLLI [int64(val&63)] x)
   734(SRL  x (MOVDconst [val])) => (SRLI [int64(val&63)] x)
   735(SLLW x (MOVDconst [val])) => (SLLIW [int64(val&31)] x)
   736(SRLW x (MOVDconst [val])) => (SRLIW [int64(val&31)] x)
   737(SRA  x (MOVDconst [val])) => (SRAI [int64(val&63)] x)
   738(SRAW x (MOVDconst [val])) => (SRAIW [int64(val&31)] x)
   739(SLT  x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTI  [val] x)
   740(SLTU x (MOVDconst [val])) && val >= -2048 && val <= 2047 => (SLTIU [val] x)
   741
   742// Replace negated left rotation with right rotation.
   743(ROL  x (NEG y)) => (ROR  x y)
   744(ROLW x (NEG y)) => (RORW x y)
   745
   746// generic simplifications
   747(ADD x (NEG y)) => (SUB x y)
   748(SUB x (NEG y)) => (ADD x y)
   749(SUB x x) => (MOVDconst [0])
   750(AND x x) => x
   751(OR  x x) => x
   752(ORN x x) => (MOVDconst [-1])
   753(XOR x x) => (MOVDconst [0])
   754
   755// Convert const subtraction into ADDI with negative immediate, where possible.
   756(SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x)
   757(SUB <t> (MOVDconst [val]) y) && is32Bit(-val) => (NEG (ADDI <t> [-val] y))
   758
   759// Subtraction of zero.
   760(SUB  x (MOVDconst [0])) => x
   761(SUBW x (MOVDconst [0])) => (ADDIW [0] x)
   762
   763// Subtraction from zero.
   764(SUB  (MOVDconst [0]) x) => (NEG x)
   765(SUBW (MOVDconst [0]) x) => (NEGW x)
   766
   767// Fold negation into subtraction.
   768(NEG (SUB x y)) => (SUB y x)
   769(NEG <t> s:(ADDI [val] (SUB x y))) && s.Uses == 1 && is32Bit(-val) => (ADDI [-val] (SUB <t> y x))
   770
   771// Double negation.
   772(NEG (NEG x)) => x
   773(NEG <t> s:(ADDI [val] (NEG x))) && s.Uses == 1 && is32Bit(-val) => (ADDI [-val] x)
   774
   775// Addition of zero or two constants.
   776(ADDI [0] x) => x
   777(ADDI [x] (MOVDconst [y])) && is32Bit(x + y) => (MOVDconst [x + y])
   778
   779// ANDI with all zeros, all ones or two constants.
   780(ANDI [0]  x) => (MOVDconst [0])
   781(ANDI [-1] x) => x
   782(ANDI [x] (MOVDconst [y])) => (MOVDconst [x & y])
   783
   784// ORI with all zeroes, all ones or two constants.
   785(ORI [0]  x) => x
   786(ORI [-1] x) => (MOVDconst [-1])
   787(ORI [x] (MOVDconst [y])) => (MOVDconst [x | y])
   788
   789// Combine operations with immediate.
   790(ADDI [x] (ADDI [y] z)) && is32Bit(x + y) => (ADDI [x + y] z)
   791(ANDI [x] (ANDI [y] z)) => (ANDI [x & y] z)
   792(ORI  [x] (ORI  [y] z)) => (ORI  [x | y] z)
   793
   794// Negation of a constant.
   795(NEG  (MOVDconst [x])) => (MOVDconst [-x])
   796(NEGW (MOVDconst [x])) => (MOVDconst [int64(int32(-x))])
   797
   798// Shift of a constant.
   799(SLLI [x] (MOVDconst [y])) && is32Bit(y << uint32(x)) => (MOVDconst [y << uint32(x)])
   800(SRLI [x] (MOVDconst [y])) => (MOVDconst [int64(uint64(y) >> uint32(x))])
   801(SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> uint32(x)])
   802
   803// SLTI/SLTIU with constants.
   804(SLTI  [x] (MOVDconst [y])) => (MOVDconst [b2i(int64(y) < int64(x))])
   805(SLTIU [x] (MOVDconst [y])) => (MOVDconst [b2i(uint64(y) < uint64(x))])
   806
   807// SLTI/SLTIU with known outcomes.
   808(SLTI  [x] (ANDI [y] _)) && y >= 0 && int64(y) < int64(x) => (MOVDconst [1])
   809(SLTIU [x] (ANDI [y] _)) && y >= 0 && uint64(y) < uint64(x) => (MOVDconst [1])
   810(SLTI  [x] (ORI  [y] _)) && y >= 0 && int64(y) >= int64(x) => (MOVDconst [0])
   811(SLTIU [x] (ORI  [y] _)) && y >= 0 && uint64(y) >= uint64(x) => (MOVDconst [0])
   812
   813// SLT/SLTU with known outcomes.
   814(SLT  x x) => (MOVDconst [0])
   815(SLTU x x) => (MOVDconst [0])
   816
   817// Deadcode for LoweredMuluhilo
   818(Select0 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MULHU x y)
   819(Select1 m:(LoweredMuluhilo x y)) && m.Uses == 1 => (MUL x y)
   820
   821(FADD(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FMADD(S|D) x y a)
   822(FSUB(S|D) a (FMUL(S|D) x y)) && a.Block.Func.useFMA(v) => (FNMSUB(S|D) x y a)
   823(FSUB(S|D) (FMUL(S|D) x y) a) && a.Block.Func.useFMA(v) => (FMSUB(S|D) x y a)
   824
   825// Merge negation into fused multiply-add and multiply-subtract.
   826//
   827// Key:
   828//
   829//   [+ -](x * y [+ -] z).
   830//    _ N         A S
   831//                D U
   832//                D B
   833//
   834// Note: multiplication commutativity handled by rule generator.
   835(F(MADD|NMADD|MSUB|NMSUB)S neg:(FNEGS x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)S x y z)
   836(F(MADD|NMADD|MSUB|NMSUB)S x y neg:(FNEGS z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)S x y z)
   837(F(MADD|NMADD|MSUB|NMSUB)D neg:(FNEGD x) y z) && neg.Uses == 1 => (F(NMSUB|MSUB|NMADD|MADD)D x y z)
   838(F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)
   839
   840//
   841// Optimisations for rva22u64 and above.
   842//
   843
   844// Combine left shift and addition.
   845(ADD (SLLI [1] x) y) && buildcfg.GORISCV64 >= 22 => (SH1ADD x y)
   846(ADD (SLLI [2] x) y) && buildcfg.GORISCV64 >= 22 => (SH2ADD x y)
   847(ADD (SLLI [3] x) y) && buildcfg.GORISCV64 >= 22 => (SH3ADD x y)
   848
   849// Integer minimum and maximum.
   850(Min64  x y) && buildcfg.GORISCV64 >= 22 => (MIN  x y)
   851(Max64  x y) && buildcfg.GORISCV64 >= 22 => (MAX  x y)
   852(Min64u x y) && buildcfg.GORISCV64 >= 22 => (MINU x y)
   853(Max64u x y) && buildcfg.GORISCV64 >= 22 => (MAXU x y)

View as plain text