...

Text file src/cmd/compile/internal/ssa/_gen/LOONG64.rules

Documentation: cmd/compile/internal/ssa/_gen

     1// Copyright 2022 The Go Authors. All rights reserved.
     2// Use of this source code is governed by a BSD-style
     3// license that can be found in the LICENSE file.
     4
     5(Add(Ptr|64|32|16|8) ...) => (ADDV ...)
     6(Add(32|64)F ...) => (ADD(F|D) ...)
     7
     8(Sub(Ptr|64|32|16|8) ...) => (SUBV ...)
     9(Sub(32|64)F ...) => (SUB(F|D) ...)
    10
    11(Mul(64|32|16|8) ...) => (MULV ...)
    12(Mul(32|64)F ...) => (MUL(F|D) ...)
    13(Select0 (Mul64uhilo x y)) => (MULHVU x y)
    14(Select1 (Mul64uhilo x y)) => (MULV x y)
    15(Select0 (Mul64uover x y)) => (MULV x y)
    16(Select1 (Mul64uover x y)) => (SGTU <typ.Bool> (MULHVU x y) (MOVVconst <typ.UInt64> [0]))
    17
    18// 32 mul 32 -> 64
    19(MULV r:(MOVWUreg x) s:(MOVWUreg y)) && r.Uses == 1 && s.Uses == 1 => (MULWVWU x y)
    20(MULV r:(MOVWreg  x) s:(MOVWreg  y)) && r.Uses == 1 && s.Uses == 1 =>  (MULWVW x y)
    21
    22(Hmul64 ...)  => (MULHV  ...)
    23(Hmul64u ...) => (MULHVU ...)
    24(Hmul32 ...)  => (MULH  ...)
    25(Hmul32u ...) => (MULHU ...)
    26
    27(Div64 x y) => (DIVV x y)
    28(Div64u ...) => (DIVVU ...)
    29(Div32 x y) => (DIVV (SignExt32to64 x) (SignExt32to64 y))
    30(Div32u x y) => (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y))
    31(Div16 x y) => (DIVV (SignExt16to64 x) (SignExt16to64 y))
    32(Div16u x y) => (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y))
    33(Div8 x y) => (DIVV (SignExt8to64 x) (SignExt8to64 y))
    34(Div8u x y) => (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y))
    35(Div(32|64)F ...) => (DIV(F|D) ...)
    36
    37(Mod64 x y) => (REMV x y)
    38(Mod64u ...) => (REMVU ...)
    39(Mod32 x y) => (REMV (SignExt32to64 x) (SignExt32to64 y))
    40(Mod32u x y) => (REMVU (ZeroExt32to64 x) (ZeroExt32to64 y))
    41(Mod16 x y) => (REMV (SignExt16to64 x) (SignExt16to64 y))
    42(Mod16u x y) => (REMVU (ZeroExt16to64 x) (ZeroExt16to64 y))
    43(Mod8 x y) => (REMV (SignExt8to64 x) (SignExt8to64 y))
    44(Mod8u x y) => (REMVU (ZeroExt8to64 x) (ZeroExt8to64 y))
    45
    46(Select0 <t> (Add64carry x y c)) => (ADDV (ADDV <t> x y) c)
    47(Select1 <t> (Add64carry x y c)) =>
    48	(OR (SGTU <t> x s:(ADDV <t> x y)) (SGTU <t> s (ADDV <t> s c)))
    49
    50(Select0 <t> (Sub64borrow x y c)) => (SUBV (SUBV <t> x y) c)
    51(Select1 <t> (Sub64borrow x y c)) =>
    52	(OR (SGTU <t> s:(SUBV <t> x y) x) (SGTU <t> (SUBV <t> s c) s))
    53
    54// (x + y) / 2 with x>=y => (x - y) / 2 + y
    55(Avg64u <t> x y) => (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
    56
    57(And(64|32|16|8) ...) => (AND ...)
    58(Or(64|32|16|8) ...) => (OR ...)
    59(Xor(64|32|16|8) ...) => (XOR ...)
    60
    61// shifts
    62// hardware instruction uses only the low 6 bits of the shift
    63// we compare to 64 to ensure Go semantics for large shifts
    64
    65// left shift
    66(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLLV x y)
    67(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLL x y)
    68(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLLV x y)
    69(Lsh8x(64|32|16|8)  x y) && shiftIsBounded(v) => (SLLV x y)
    70
    71(Lsh64x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x                y)  (SGTU (MOVVconst <typ.UInt64> [64])                y))
    72(Lsh64x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
    73(Lsh64x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
    74(Lsh64x8  <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt8to64  y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64  y)))
    75
    76(Lsh32x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x                y)  (SGTU (MOVVconst <typ.UInt64> [32])                y))
    77(Lsh32x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt32to64 y)))
    78(Lsh32x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt16to64 y)))
    79(Lsh32x8  <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLL <t> x (ZeroExt8to64  y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt8to64  y)))
    80
    81(Lsh16x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x                y)  (SGTU (MOVVconst <typ.UInt64> [64])                y))
    82(Lsh16x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
    83(Lsh16x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
    84(Lsh16x8  <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt8to64  y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64  y)))
    85
    86(Lsh8x64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x                y)  (SGTU (MOVVconst <typ.UInt64> [64])                y))
    87(Lsh8x32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
    88(Lsh8x16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
    89(Lsh8x8  <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SLLV <t> x (ZeroExt8to64  y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64  y)))
    90
    91// unsigned right shift
    92(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLV  x                 y)
    93(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRL   x                 y)
    94(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRLV  (ZeroExt16to64 x) y)
    95(Rsh8Ux(64|32|16|8)  x y) && shiftIsBounded(v) => (SRLV  (ZeroExt8to64  x) y)
    96
    97(Rsh64Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x                y)  (SGTU (MOVVconst <typ.UInt64> [64])                y))
    98(Rsh64Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
    99(Rsh64Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
   100(Rsh64Ux8  <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> x (ZeroExt8to64  y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64  y)))
   101
   102(Rsh32Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x                y)  (SGTU (MOVVconst <typ.UInt64> [32])                y))
   103(Rsh32Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt32to64 y)))
   104(Rsh32Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt16to64 y)))
   105(Rsh32Ux8  <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRL <t> x (ZeroExt8to64  y)) (SGTU (MOVVconst <typ.UInt64> [32]) (ZeroExt8to64  y)))
   106
   107(Rsh16Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x)                y)  (SGTU (MOVVconst <typ.UInt64> [64])                y))
   108(Rsh16Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
   109(Rsh16Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
   110(Rsh16Ux8  <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64  y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64  y)))
   111
   112(Rsh8Ux64 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x)                y)  (SGTU (MOVVconst <typ.UInt64> [64])                y))
   113(Rsh8Ux32 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y)))
   114(Rsh8Ux16 <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y)))
   115(Rsh8Ux8  <t> x y) && !shiftIsBounded(v) => (MASKEQZ (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64  y)) (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64  y)))
   116
   117// signed right shift
   118(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAV   x                y)
   119(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRA    x                y)
   120(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAV  (SignExt16to64 x) y)
   121(Rsh8x(64|32|16|8)  x y) && shiftIsBounded(v) => (SRAV  (SignExt8to64  x) y)
   122
   123(Rsh64x64 <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
   124(Rsh64x32 <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
   125(Rsh64x16 <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
   126(Rsh64x8  <t> x y) && !shiftIsBounded(v) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64  y)))
   127
   128(Rsh32x64 <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [31]))) y))
   129(Rsh32x32 <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt32to64 y)))
   130(Rsh32x16 <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt16to64 y)))
   131(Rsh32x8  <t> x y) && !shiftIsBounded(v) => (SRA x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (MOVVconst <typ.UInt64> [31]))) (ZeroExt8to64  y)))
   132
   133(Rsh16x64 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
   134(Rsh16x32 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
   135(Rsh16x16 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
   136(Rsh16x8  <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64  y)))
   137
   138(Rsh8x64 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
   139(Rsh8x32 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
   140(Rsh8x16 <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
   141(Rsh8x8  <t> x y) && !shiftIsBounded(v) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64  y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64  y)))
   142
   143
   144// revb2h
   145// ((x>>8) | (x<<8)) => (REVB2H x), the type of x is uint16
   146((OR|XOR|ADDV) <typ.UInt16> (SRLVconst [8] <typ.UInt16> x) (SLLVconst [8] <typ.UInt16> x)) => (REVB2H x)
   147
   148// ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), the type of x is uint32
   149((OR|XOR|ADDV) (SRLconst [8] (ANDconst [c1] x)) (SLLconst [8] (ANDconst [c2] x)))
   150	&& uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff
   151	=> (REVB2H x)
   152
   153// revb4h
   154// ((x & 0xff00ff00ff00ff00)>>8) | ((x & 0x00ff00ff00ff00ff)<<8), the type of x is uint64
   155((OR|XOR|ADDV) (SRLVconst [8] (AND (MOVVconst [c1]) x)) (SLLVconst [8] (AND (MOVVconst [c2]) x)))
   156	&& uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff
   157	=> (REVB4H x)
   158
   159// ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), the type of x is uint64
   160((OR|XOR|ADDV) (SRLVconst [8] (AND (MOVVconst [c1]) x)) (SLLVconst [8] (ANDconst [c2] x)))
   161	&& uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff
   162	=> (REVB4H (ANDconst <x.Type> [0xffffffff] x))
   163
   164// bitfield ops
   165
   166// bstrpickv
   167// (x << lc) >> rc
   168(SRLVconst [rc] (SLLVconst [lc] x)) && lc <= rc => (BSTRPICKV [rc-lc + ((64-lc)-1)<<6] x)
   169// uint64(x) >> rc
   170(SRLVconst [rc] (MOVWUreg x)) && rc < 32 => (BSTRPICKV [rc + 31<<6] x)
   171(SRLVconst [rc] (MOVHUreg x)) && rc < 16 => (BSTRPICKV [rc + 15<<6] x)
   172(SRLVconst [rc] (MOVBUreg x)) && rc < 8 => (BSTRPICKV [rc + 7<<6] x)
   173// uint64(x >> rc)
   174(MOVWUreg (SRLVconst [rc] x)) && rc < 32 => (BSTRPICKV [rc + (31+rc)<<6] x)
   175(MOVHUreg (SRLVconst [rc] x)) && rc < 16 => (BSTRPICKV [rc + (15+rc)<<6] x)
   176(MOVBUreg (SRLVconst [rc] x)) && rc < 8 => (BSTRPICKV [rc + (7+rc)<<6] x)
   177
   178// rotates
   179(RotateLeft8 <t> x (MOVVconst [c])) => (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
   180(RotateLeft8 <t> x y) => (OR <t> (SLLV <t> x (ANDconst <typ.Int64> [7] y)) (SRLV <t> (ZeroExt8to64 x) (ANDconst <typ.Int64> [7] (NEGV <typ.Int64> y))))
   181(RotateLeft16 <t> x (MOVVconst [c])) => (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
   182(RotateLeft16 <t> x y) => (ROTR <t> (OR <typ.UInt32> (ZeroExt16to32 x) (SLLVconst <t> (ZeroExt16to32 x) [16])) (NEGV <typ.Int64> y))
   183(RotateLeft32 x y) => (ROTR  x (NEGV <y.Type> y))
   184(RotateLeft64 x y) => (ROTRV x (NEGV <y.Type> y))
   185
   186// unary ops
   187(Neg(64|32|16|8) ...) => (NEGV ...)
   188(Neg(32|64)F ...) => (NEG(F|D) ...)
   189
   190(Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x)
   191
   192(BitLen64 <t> x) => (NEGV <t> (SUBVconst <t> [64] (CLZV <t> x)))
   193(BitLen32 <t> x) => (NEGV <t> (SUBVconst <t> [32] (CLZW <t> x)))
   194(BitLen(16|8) x) => (BitLen64 (ZeroExt(16|8)to64 x))
   195(Bswap(16|32|64) ...) => (REVB(2H|2W|V) ...)
   196(BitRev8 ...) => (BITREV4B ...)
   197(BitRev16 <t> x) => (REVB2H (BITREV4B <t> x))
   198(BitRev32 ...) => (BITREVW ...)
   199(BitRev64 ...) => (BITREVV ...)
   200(Ctz(64|32|16|8)NonZero ...) => (Ctz64 ...)
   201(Ctz(32|64) ...) => (CTZ(W|V) ...)
   202(Ctz16 x) => (CTZV (OR <typ.UInt64> x (MOVVconst [1<<16])))
   203(Ctz8  x) => (CTZV (OR <typ.UInt64> x (MOVVconst [1<<8])))
   204
   205(PopCount64 <t> x) => (MOVVfpgp <t> (VPCNT64 <typ.Float64> (MOVVgpfp <typ.Float64> x)))
   206(PopCount32 <t> x) => (MOVWfpgp <t> (VPCNT32 <typ.Float32> (MOVWgpfp <typ.Float32> x)))
   207(PopCount16 <t> x) => (MOVWfpgp <t> (VPCNT16 <typ.Float32> (MOVWgpfp <typ.Float32> (ZeroExt16to32 x))))
   208
   209// math package intrinsics
   210(Sqrt ...) => (SQRTD ...)
   211(Sqrt32 ...) => (SQRTF ...)
   212(Abs ...) => (ABSD ...)
   213(Copysign ...) => (FCOPYSGD ...)
   214
   215(Min(64|32)F ...) => (FMIN(D|F) ...)
   216(Max(64|32)F ...) => (FMAX(D|F) ...)
   217
   218// boolean ops -- booleans are represented with 0=false, 1=true
   219(AndB ...) => (AND ...)
   220(OrB ...) => (OR ...)
   221(EqB x y) => (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
   222(NeqB ...) => (XOR ...)
   223(Not x) => (XORconst [1] x)
   224
   225// constants
   226(Const(64|32|16|8) [val]) => (MOVVconst [int64(val)])
   227(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
   228(ConstNil) => (MOVVconst [0])
   229(ConstBool [t]) => (MOVVconst [int64(b2i(t))])
   230
   231(Slicemask <t> x) => (SRAVconst (NEGV <t> x) [63])
   232
   233// truncations
   234// Because we ignore high parts of registers, truncates are just copies.
   235(Trunc16to8 ...) => (Copy ...)
   236(Trunc32to8 ...) => (Copy ...)
   237(Trunc32to16 ...) => (Copy ...)
   238(Trunc64to8 ...) => (Copy ...)
   239(Trunc64to16 ...) => (Copy ...)
   240(Trunc64to32 ...) => (Copy ...)
   241
   242// Zero-/Sign-extensions
   243(ZeroExt8to16 ...) => (MOVBUreg ...)
   244(ZeroExt8to32 ...) => (MOVBUreg ...)
   245(ZeroExt16to32 ...) => (MOVHUreg ...)
   246(ZeroExt8to64 ...) => (MOVBUreg ...)
   247(ZeroExt16to64 ...) => (MOVHUreg ...)
   248(ZeroExt32to64 ...) => (MOVWUreg ...)
   249
   250(SignExt8to16 ...) => (MOVBreg ...)
   251(SignExt8to32 ...) => (MOVBreg ...)
   252(SignExt16to32 ...) => (MOVHreg ...)
   253(SignExt8to64 ...) => (MOVBreg ...)
   254(SignExt16to64 ...) => (MOVHreg ...)
   255(SignExt32to64 ...) => (MOVWreg ...)
   256
   257// float <=> int conversion
   258(Cvt32to32F ...) => (MOVWF ...)
   259(Cvt32to64F ...) => (MOVWD ...)
   260(Cvt64to32F ...) => (MOVVF ...)
   261(Cvt64to64F ...) => (MOVVD ...)
   262(Cvt32Fto32 ...) => (TRUNCFW ...)
   263(Cvt64Fto32 ...) => (TRUNCDW ...)
   264(Cvt32Fto64 ...) => (TRUNCFV ...)
   265(Cvt64Fto64 ...) => (TRUNCDV ...)
   266(Cvt32Fto64F ...) => (MOVFD ...)
   267(Cvt64Fto32F ...) => (MOVDF ...)
   268
   269(CvtBoolToUint8 ...) => (Copy ...)
   270
   271(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
   272
   273// comparisons
   274(Eq8 x y)  => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
   275(Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
   276(Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
   277(Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y))
   278(EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y))
   279(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
   280
   281(Neq8 x y)  => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
   282(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
   283(Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
   284(Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0]))
   285(NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0]))
   286(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
   287
   288(Less8 x y)  => (SGT (SignExt8to64 y) (SignExt8to64 x))
   289(Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x))
   290(Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x))
   291(Less64 x y) => (SGT y x)
   292(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
   293
   294(Less8U x y)  => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
   295(Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
   296(Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
   297(Less64U x y) => (SGTU y x)
   298
   299(Leq8 x y)  => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
   300(Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
   301(Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
   302(Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y))
   303(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
   304
   305(Leq8U x y)  => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
   306(Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
   307(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
   308(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y))
   309
   310(OffPtr [off] ptr:(SP)) => (MOVVaddr [int32(off)] ptr)
   311(OffPtr [off] ptr) => (ADDVconst [off] ptr)
   312
   313(Addr {sym} base) => (MOVVaddr {sym} base)
   314(LocalAddr <t> {sym} base mem) && t.Elem().HasPointers() => (MOVVaddr {sym} (SPanchored base mem))
   315(LocalAddr <t> {sym} base _)  && !t.Elem().HasPointers() => (MOVVaddr {sym} base)
   316
   317// loads
   318(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
   319(Load <t> ptr mem) && (is8BitInt(t)  &&  t.IsSigned()) => (MOVBload ptr mem)
   320(Load <t> ptr mem) && (is8BitInt(t)  && !t.IsSigned()) => (MOVBUload ptr mem)
   321(Load <t> ptr mem) && (is16BitInt(t) &&  t.IsSigned()) => (MOVHload ptr mem)
   322(Load <t> ptr mem) && (is16BitInt(t) && !t.IsSigned()) => (MOVHUload ptr mem)
   323(Load <t> ptr mem) && (is32BitInt(t) &&  t.IsSigned()) => (MOVWload ptr mem)
   324(Load <t> ptr mem) && (is32BitInt(t) && !t.IsSigned()) => (MOVWUload ptr mem)
   325(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem)
   326(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
   327(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
   328
   329// stores
   330(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
   331(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
   332(Store {t} ptr val mem) && t.Size() == 4 && !t.IsFloat() => (MOVWstore ptr val mem)
   333(Store {t} ptr val mem) && t.Size() == 8 && !t.IsFloat() => (MOVVstore ptr val mem)
   334(Store {t} ptr val mem) && t.Size() == 4 &&  t.IsFloat() => (MOVFstore ptr val mem)
   335(Store {t} ptr val mem) && t.Size() == 8 &&  t.IsFloat() => (MOVDstore ptr val mem)
   336
   337// zeroing
   338(Zero [0] _ mem) => mem
   339(Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem)
   340(Zero [2] ptr mem) => (MOVHstore ptr (MOVVconst [0]) mem)
   341(Zero [3] ptr mem) =>
   342    (MOVBstore [2] ptr (MOVVconst [0])
   343        (MOVHstore ptr (MOVVconst [0]) mem))
   344(Zero [4] {t} ptr mem) => (MOVWstore ptr (MOVVconst [0]) mem)
   345(Zero [5] ptr mem) =>
   346    (MOVBstore [4] ptr (MOVVconst [0])
   347        (MOVWstore ptr (MOVVconst [0]) mem))
   348(Zero [6] ptr mem) =>
   349    (MOVHstore [4] ptr (MOVVconst [0])
   350        (MOVWstore ptr (MOVVconst [0]) mem))
   351(Zero [7] ptr mem) =>
   352    (MOVWstore [3] ptr (MOVVconst [0])
   353        (MOVWstore ptr (MOVVconst [0]) mem))
   354(Zero [8] {t} ptr mem) => (MOVVstore ptr (MOVVconst [0]) mem)
   355(Zero [9] ptr mem) =>
   356        (MOVBstore [8] ptr (MOVVconst [0])
   357                (MOVVstore ptr (MOVVconst [0]) mem))
   358(Zero [10] ptr mem) =>
   359        (MOVHstore [8] ptr (MOVVconst [0])
   360                (MOVVstore ptr (MOVVconst [0]) mem))
   361(Zero [11] ptr mem) =>
   362        (MOVWstore [7] ptr (MOVVconst [0])
   363                (MOVVstore ptr (MOVVconst [0]) mem))
   364(Zero [12] ptr mem) =>
   365        (MOVWstore [8] ptr (MOVVconst [0])
   366                (MOVVstore ptr (MOVVconst [0]) mem))
   367(Zero [13] ptr mem) =>
   368        (MOVVstore [5] ptr (MOVVconst [0])
   369                (MOVVstore ptr (MOVVconst [0]) mem))
   370(Zero [14] ptr mem) =>
   371        (MOVVstore [6] ptr (MOVVconst [0])
   372                (MOVVstore ptr (MOVVconst [0]) mem))
   373(Zero [15] ptr mem) =>
   374        (MOVVstore [7] ptr (MOVVconst [0])
   375                (MOVVstore ptr (MOVVconst [0]) mem))
   376(Zero [16] ptr mem) =>
   377        (MOVVstore [8] ptr (MOVVconst [0])
   378                (MOVVstore ptr (MOVVconst [0]) mem))
   379
   380(Zero [s] ptr mem) && s > 16 && s < 192 => (LoweredZero [s] ptr mem)
   381(Zero [s] ptr mem) && s >= 192 => (LoweredZeroLoop [s] ptr mem)
   382
   383// moves
   384(Move [0] _ _ mem) => mem
   385(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
   386(Move [2] dst src mem) => (MOVHstore dst (MOVHUload src mem) mem)
   387(Move [3] dst src mem) =>
   388        (MOVBstore [2] dst (MOVBUload [2] src mem)
   389                (MOVHstore dst (MOVHUload src mem) mem))
   390(Move [4] dst src mem) => (MOVWstore dst (MOVWUload src mem) mem)
   391(Move [5] dst src mem) =>
   392        (MOVBstore [4] dst (MOVBUload [4] src mem)
   393                (MOVWstore dst (MOVWUload src mem) mem))
   394(Move [6] dst src mem) =>
   395        (MOVHstore [4] dst (MOVHUload [4] src mem)
   396                (MOVWstore dst (MOVWUload src mem) mem))
   397(Move [7] dst src mem) =>
   398        (MOVWstore [3] dst (MOVWUload [3] src mem)
   399                (MOVWstore dst (MOVWUload src mem) mem))
   400(Move [8] dst src mem) => (MOVVstore dst (MOVVload src mem) mem)
   401(Move [9] dst src mem) =>
   402        (MOVBstore [8] dst (MOVBUload [8] src mem)
   403                (MOVVstore dst (MOVVload src mem) mem))
   404(Move [10] dst src mem) =>
   405        (MOVHstore [8] dst (MOVHUload [8] src mem)
   406                (MOVVstore dst (MOVVload src mem) mem))
   407(Move [11] dst src mem) =>
   408        (MOVWstore [7] dst (MOVWload [7] src mem)
   409                (MOVVstore dst (MOVVload src mem) mem))
   410(Move [12] dst src mem) =>
   411        (MOVWstore [8] dst (MOVWUload [8] src mem)
   412                (MOVVstore dst (MOVVload src mem) mem))
   413(Move [13] dst src mem) =>
   414        (MOVVstore [5] dst (MOVVload [5] src mem)
   415                (MOVVstore dst (MOVVload src mem) mem))
   416(Move [14] dst src mem) =>
   417        (MOVVstore [6] dst (MOVVload [6] src mem)
   418                (MOVVstore dst (MOVVload src mem) mem))
   419(Move [15] dst src mem) =>
   420        (MOVVstore [7] dst (MOVVload [7] src mem)
   421                (MOVVstore dst (MOVVload src mem) mem))
   422(Move [16] dst src mem) =>
   423        (MOVVstore [8] dst (MOVVload [8] src mem)
   424                (MOVVstore dst (MOVVload src mem) mem))
   425
   426(Move [s] dst src mem) && s > 16 && s < 192 && logLargeCopy(v, s) => (LoweredMove [s] dst src mem)
   427(Move [s] dst src mem) && s >= 192 && logLargeCopy(v, s) => (LoweredMoveLoop [s] dst src mem)
   428
   429// float <=> int register moves, with no conversion.
   430// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
   431(MOVVload  [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) => (MOVVfpgp val)
   432(MOVDload  [off] {sym} ptr (MOVVstore [off] {sym} ptr val _)) => (MOVVgpfp val)
   433(MOVWUload [off] {sym} ptr (MOVFstore [off] {sym} ptr val _)) => (ZeroExt32to64 (MOVWfpgp <typ.Float32> val))
   434(MOVFload  [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (MOVWgpfp val)
   435
   436// If the memory load and store operations use the same ptr, they are combined into a direct move operation between registers.
   437(MOV(V|W|H|B)load [off] {sym} ptr (MOV(V|W|H|B)store [off] {sym} ptr x _)) => (MOV(V|W|H|B)reg x)
   438(MOV(W|H|B)Uload  [off] {sym} ptr (MOV(W|H|B)store   [off] {sym} ptr x _)) => (MOV(W|H|B)Ureg  x)
   439
   440// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
   441(MOVVstore [off] {sym} ptr (MOVVfpgp val) mem) => (MOVDstore [off] {sym} ptr val mem)
   442(MOVDstore [off] {sym} ptr (MOVVgpfp val) mem) => (MOVVstore [off] {sym} ptr val mem)
   443(MOVWstore [off] {sym} ptr (MOVWfpgp val) mem) => (MOVFstore [off] {sym} ptr val mem)
   444(MOVFstore [off] {sym} ptr (MOVWgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
   445
   446// calls
   447(StaticCall ...) => (CALLstatic ...)
   448(ClosureCall ...) => (CALLclosure ...)
   449(InterCall ...) => (CALLinter ...)
   450(TailCall ...) => (CALLtail ...)
   451
   452// atomic intrinsics
   453(AtomicLoad(8|32|64)   ...) => (LoweredAtomicLoad(8|32|64)  ...)
   454(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
   455
   456(AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64)  ...)
   457(AtomicStore(8|32|64)Variant ...) => (LoweredAtomicStore(8|32|64)Variant  ...)
   458(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
   459
   460(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
   461(AtomicExchange8Variant  ...) => (LoweredAtomicExchange8Variant  ...)
   462
   463(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
   464
   465// Loong64's 32-bit atomic operation instructions ll.w and amcasw are both sign-extended,
   466// so the input parameters need to be sign-extended to 64 bits, otherwise the subsequent
   467// comparison operations may not produce the expected results.
   468//
   469(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
   470(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
   471(AtomicCompareAndSwap32Variant ptr old new mem) => (LoweredAtomicCas32Variant ptr (SignExt32to64 old) new mem)
   472(AtomicCompareAndSwap64Variant ...) => (LoweredAtomicCas64Variant ...)
   473
   474// Atomic memory logical operations (old style).
   475//
   476// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
   477// AtomicOr8(ptr,val)  => LoweredAtomicOr32(ptr&^3, uint32(val) << ((ptr & 3) * 8))
   478//
   479(AtomicAnd8 ptr val mem) =>
   480	(LoweredAtomicAnd32 (AND <typ.Uintptr> (MOVVconst [^3]) ptr)
   481		(NORconst [0] <typ.UInt32> (SLLV <typ.UInt32> (XORconst <typ.UInt32> [0xff] (ZeroExt8to32 val))
   482			(SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr)))) mem)
   483
   484(AtomicOr8 ptr val mem) =>
   485	(LoweredAtomicOr32 (AND <typ.Uintptr> (MOVVconst [^3]) ptr)
   486		(SLLV <typ.UInt32> (ZeroExt8to32 val)
   487			(SLLVconst <typ.UInt64> [3] (ANDconst <typ.UInt64> [3] ptr))) mem)
   488
   489(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
   490(AtomicOr32  ...) => (LoweredAtomicOr32  ...)
   491
   492// Atomic memory logical operations (new style).
   493(AtomicAnd(64|32)value ...) => (LoweredAtomicAnd(64|32)value ...)
   494(AtomicOr(64|32)value  ...) => (LoweredAtomicOr(64|32)value  ...)
   495
   496// checks
   497(NilCheck ...) => (LoweredNilCheck ...)
   498(IsNonNil ptr) => (SGTU ptr (MOVVconst [0]))
   499(IsInBounds idx len) => (SGTU len idx)
   500(IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len))
   501
   502// pseudo-ops
   503(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
   504(GetCallerSP ...) => (LoweredGetCallerSP ...)
   505(GetCallerPC ...) => (LoweredGetCallerPC ...)
   506
   507(If cond yes no) => (NEZ (MOVBUreg <typ.UInt64> cond) yes no)
   508(MOVBUreg x:((SGT|SGTU) _ _)) => x
   509(MOVBUreg x:(XOR (MOVVconst [1]) ((SGT|SGTU) _ _))) => x
   510
   511(JumpTable idx) => (JUMPTABLE {makeJumpTableSym(b)} idx (MOVVaddr <typ.Uintptr> {makeJumpTableSym(b)} (SB)))
   512
   513// Write barrier.
   514(WB ...) => (LoweredWB ...)
   515
   516// Publication barrier as intrinsic
   517(PubBarrier ...) => (LoweredPubBarrier ...)
   518
   519(PanicBounds ...) => (LoweredPanicBoundsRR ...)
   520(LoweredPanicBoundsRR [kind] x (MOVVconst [c]) mem) => (LoweredPanicBoundsRC [kind] x {PanicBoundsC{C:c}} mem)
   521(LoweredPanicBoundsRR [kind] (MOVVconst [c]) y mem) => (LoweredPanicBoundsCR [kind] {PanicBoundsC{C:c}} y mem)
   522(LoweredPanicBoundsRC [kind] {p} (MOVVconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:c, Cy:p.C}} mem)
   523(LoweredPanicBoundsCR [kind] {p} (MOVVconst [c]) mem) => (LoweredPanicBoundsCC [kind] {PanicBoundsCC{Cx:p.C, Cy:c}} mem)
   524
   525(CondSelect <t> x y cond) => (OR (MASKEQZ <t> x cond) (MASKNEZ <t> y cond))
   526
   527// c > d-x => x > d-c
   528(SGT (MOVVconst [c]) (NEGV (SUBVconst [d] x))) && is32Bit(d-c) => (SGT x (MOVVconst [d-c]))
   529
   530(SGT  (MOVVconst [c]) x) && is32Bit(c) => (SGTconst  [c] x)
   531(SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x)
   532
   533// fold offset into address
   534(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
   535
   536// fold address into load/store
   537// Do not fold global variable access in -dynlink mode, where it will be rewritten
   538// to use the GOT via REGTMP, which currently cannot handle large offset.
   539(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
   540	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   541	(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {sym} ptr mem)
   542
   543(MOV(B|H|W|V|F|D)store [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
   544	&& (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   545	(MOV(B|H|W|V|F|D)store [off1+int32(off2)] {sym} ptr val mem)
   546
   547(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
   548	&& is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   549	(MOV(B|BU|H|HU|W|WU|V|F|D)load [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
   550
   551(MOV(B|H|W|V|F|D)store [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
   552	&& is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_dynlink) =>
   553	(MOV(B|H|W|V|F|D)store [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
   554
   555// don't extend after proper load
   556(MOVBreg x:(MOVBload _ _)) => (MOVVreg x)
   557(MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x)
   558(MOVHreg x:(MOVBload _ _)) => (MOVVreg x)
   559(MOVHreg x:(MOVBUload _ _)) => (MOVVreg x)
   560(MOVHreg x:(MOVHload _ _)) => (MOVVreg x)
   561(MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x)
   562(MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x)
   563(MOVWreg x:(MOVBload _ _)) => (MOVVreg x)
   564(MOVWreg x:(MOVBUload _ _)) => (MOVVreg x)
   565(MOVWreg x:(MOVHload _ _)) => (MOVVreg x)
   566(MOVWreg x:(MOVHUload _ _)) => (MOVVreg x)
   567(MOVWreg x:(MOVWload _ _)) => (MOVVreg x)
   568(MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x)
   569(MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x)
   570(MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x)
   571(MOVBreg x:(MOVBloadidx _ _ _)) => (MOVVreg x)
   572(MOVBUreg x:(MOVBUloadidx _ _ _)) => (MOVVreg x)
   573(MOVHreg x:(MOVBloadidx _ _ _)) => (MOVVreg x)
   574(MOVHreg x:(MOVBUloadidx _ _ _)) => (MOVVreg x)
   575(MOVHreg x:(MOVHloadidx _ _ _)) => (MOVVreg x)
   576(MOVHUreg x:(MOVBUloadidx _ _ _)) => (MOVVreg x)
   577(MOVHUreg x:(MOVHUloadidx _ _ _)) => (MOVVreg x)
   578(MOVWreg x:(MOVBloadidx _ _ _)) => (MOVVreg x)
   579(MOVWreg x:(MOVBUloadidx _ _ _)) => (MOVVreg x)
   580(MOVWreg x:(MOVHloadidx _ _ _)) => (MOVVreg x)
   581(MOVWreg x:(MOVHUloadidx _ _ _)) => (MOVVreg x)
   582(MOVWreg x:(MOVWloadidx _ _ _)) => (MOVVreg x)
   583(MOVWUreg x:(MOVBUloadidx _ _ _)) => (MOVVreg x)
   584(MOVWUreg x:(MOVHUloadidx _ _ _)) => (MOVVreg x)
   585(MOVWUreg x:(MOVWUloadidx _ _ _)) => (MOVVreg x)
   586
   587// fold double extensions
   588(MOVBreg x:(MOVBreg _)) => (MOVVreg x)
   589(MOVBUreg x:(MOVBUreg _)) => (MOVVreg x)
   590(MOVHreg x:(MOVBreg _)) => (MOVVreg x)
   591(MOVHreg x:(MOVBUreg _)) => (MOVVreg x)
   592(MOVHreg x:(MOVHreg _)) => (MOVVreg x)
   593(MOVHUreg x:(MOVBUreg _)) => (MOVVreg x)
   594(MOVHUreg x:(MOVHUreg _)) => (MOVVreg x)
   595(MOVWreg x:(MOVBreg _)) => (MOVVreg x)
   596(MOVWreg x:(MOVBUreg _)) => (MOVVreg x)
   597(MOVWreg x:(MOVHreg _)) => (MOVVreg x)
   598(MOVWreg x:(MOVWreg _)) => (MOVVreg x)
   599(MOVWUreg x:(MOVBUreg _)) => (MOVVreg x)
   600(MOVWUreg x:(MOVHUreg _)) => (MOVVreg x)
   601(MOVWUreg x:(MOVWUreg _)) => (MOVVreg x)
   602
   603// don't extend before store
   604(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   605(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   606(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   607(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   608(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   609(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
   610(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   611(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   612(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   613(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
   614(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
   615(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
   616
   617// register indexed load
   618(MOVVload  [off] {sym} (ADDV                ptr idx) mem) && off == 0 && sym == nil => (MOVVloadidx  ptr                                idx  mem)
   619(MOVVload  [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVVloadidx  ptr (SLLVconst <typ.Int64> [shift] idx) mem)
   620(MOVWUload [off] {sym} (ADDV                ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr                                idx  mem)
   621(MOVWUload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr (SLLVconst <typ.Int64> [shift] idx) mem)
   622(MOVWload  [off] {sym} (ADDV                ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx  ptr                                idx  mem)
   623(MOVWload  [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx  ptr (SLLVconst <typ.Int64> [shift] idx) mem)
   624(MOVHUload [off] {sym} (ADDV                ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr                                idx  mem)
   625(MOVHUload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr (SLLVconst <typ.Int64> [shift] idx) mem)
   626(MOVHload  [off] {sym} (ADDV                ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx  ptr                                idx  mem)
   627(MOVHload  [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx  ptr (SLLVconst <typ.Int64> [shift] idx) mem)
   628(MOVBUload [off] {sym} (ADDV                ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr                                idx  mem)
   629(MOVBUload [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr (SLLVconst <typ.Int64> [shift] idx) mem)
   630(MOVBload  [off] {sym} (ADDV                ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx  ptr                                idx  mem)
   631(MOVBload  [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx  ptr (SLLVconst <typ.Int64> [shift] idx) mem)
   632(MOVFload  [off] {sym} (ADDV                ptr idx) mem) && off == 0 && sym == nil => (MOVFloadidx  ptr                                idx  mem)
   633(MOVFload  [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVFloadidx  ptr (SLLVconst <typ.Int64> [shift] idx) mem)
   634(MOVDload  [off] {sym} (ADDV                ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx  ptr                                idx  mem)
   635(MOVDload  [off] {sym} (ADDshiftLLV [shift] ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx  ptr (SLLVconst <typ.Int64> [shift] idx) mem)
   636(MOVVloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVVload [int32(c)] ptr mem)
   637(MOVVloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVVload [int32(c)] ptr mem)
   638(MOVWUloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
   639(MOVWUloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
   640(MOVWloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
   641(MOVWloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
   642(MOVHUloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
   643(MOVHUloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
   644(MOVHloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
   645(MOVHloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
   646(MOVBUloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
   647(MOVBUloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
   648(MOVBloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
   649(MOVBloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
   650(MOVFloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVFload [int32(c)] ptr mem)
   651(MOVFloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVFload [int32(c)] ptr mem)
   652(MOVDloadidx ptr (MOVVconst [c]) mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
   653(MOVDloadidx (MOVVconst [c]) ptr mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
   654
   655// register indexed store
   656(MOVVstore [off] {sym} (ADDV                ptr idx) val mem) && off == 0 && sym == nil => (MOVVstoreidx ptr                                idx  val mem)
   657(MOVVstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVVstoreidx ptr (SLLVconst <typ.Int64> [shift] idx) val mem)
   658(MOVWstore [off] {sym} (ADDV                ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr                                idx  val mem)
   659(MOVWstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr (SLLVconst <typ.Int64> [shift] idx) val mem)
   660(MOVHstore [off] {sym} (ADDV                ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr                                idx  val mem)
   661(MOVHstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr (SLLVconst <typ.Int64> [shift] idx) val mem)
   662(MOVBstore [off] {sym} (ADDV                ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr                                idx  val mem)
   663(MOVBstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr (SLLVconst <typ.Int64> [shift] idx) val mem)
   664(MOVFstore [off] {sym} (ADDV                ptr idx) val mem) && off == 0 && sym == nil => (MOVFstoreidx ptr                                idx  val mem)
   665(MOVFstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVFstoreidx ptr (SLLVconst <typ.Int64> [shift] idx) val mem)
   666(MOVDstore [off] {sym} (ADDV                ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr                                idx  val mem)
   667(MOVDstore [off] {sym} (ADDshiftLLV [shift] ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr (SLLVconst <typ.Int64> [shift] idx) val mem)
   668(MOVVstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVVstore [int32(c)] ptr val mem)
   669(MOVVstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVVstore [int32(c)] idx val mem)
   670(MOVWstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVWstore [int32(c)] ptr val mem)
   671(MOVWstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVWstore [int32(c)] idx val mem)
   672(MOVHstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVHstore [int32(c)] ptr val mem)
   673(MOVHstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVHstore [int32(c)] idx val mem)
   674(MOVBstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVBstore [int32(c)] ptr val mem)
   675(MOVBstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVBstore [int32(c)] idx val mem)
   676(MOVFstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVFstore [int32(c)] ptr val mem)
   677(MOVFstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVFstore [int32(c)] idx val mem)
   678(MOVDstoreidx ptr (MOVVconst [c]) val mem) && is32Bit(c) => (MOVDstore [int32(c)] ptr val mem)
   679(MOVDstoreidx (MOVVconst [c]) idx val mem) && is32Bit(c) => (MOVDstore [int32(c)] idx val mem)
   680
   681// if a register move has only 1 use, just use the same register without emitting instruction
   682// MOVVnop doesn't emit instruction, only for ensuring the type.
   683(MOVVreg x) && x.Uses == 1 => (MOVVnop x)
   684
   685// TODO: we should be able to get rid of MOVVnop all together.
   686// But for now, this is enough to get rid of lots of them.
   687(MOVVnop (MOVVconst [c])) => (MOVVconst [c])
   688
   689// fold constant into arithmetic ops
   690(ADDV x (MOVVconst <t> [c])) && is32Bit(c) && !t.IsPtr() => (ADDVconst [c] x)
   691(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x)
   692(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x)
   693(OR  x (MOVVconst [c])) && is32Bit(c) => (ORconst  [c] x)
   694(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x)
   695(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x)
   696
   697(SLL _ (MOVVconst [c])) && uint64(c)>=32 => (MOVVconst [0])
   698(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
   699(SRL _ (MOVVconst [c])) && uint64(c)>=32 => (MOVVconst [0])
   700(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
   701(SRA x (MOVVconst [c])) && uint64(c)>=32 => (SRAconst x [31])
   702(SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63])
   703(SLL x (MOVVconst [c])) && uint64(c) >=0 && uint64(c) <=31 => (SLLconst x [c])
   704(SLLV x (MOVVconst [c])) => (SLLVconst x [c])
   705(SRL x (MOVVconst [c])) && uint64(c) >=0 && uint64(c) <=31 => (SRLconst x [c])
   706(SRLV x (MOVVconst [c])) => (SRLVconst x [c])
   707(SRA x (MOVVconst [c])) && uint64(c) >=0 && uint64(c) <=31 => (SRAconst x [c])
   708(SRAV x (MOVVconst [c])) => (SRAVconst x [c])
   709(ROTR x (MOVVconst [c]))  => (ROTRconst x [c&31])
   710(ROTRV x (MOVVconst [c])) => (ROTRVconst x [c&63])
   711
   712// SLLV/SRLV/SRAV only considers the bottom 6 bits of y, similarly SLL/SRL/SRA only considers the
   713// bottom 5 bits of y.
   714(SLL x (ANDconst [31] y)) => (SLL x y)
   715(SRL x (ANDconst [31] y)) => (SRL x y)
   716(SRA x (ANDconst [31] y)) => (SRA x y)
   717(SLLV x (ANDconst [63] y)) => (SLLV x y)
   718(SRLV x (ANDconst [63] y)) => (SRLV x y)
   719(SRAV x (ANDconst [63] y)) => (SRAV x y)
   720
   721// Avoid unnecessary zero and sign extension when right shifting.
   722(SRLVconst [rc] (MOVWUreg y)) && rc >= 0 && rc <= 31 => (SRLconst [int64(rc)] y)
   723(SRAVconst [rc] (MOVWreg y)) && rc >= 0 && rc <= 31 => (SRAconst [int64(rc)] y)
   724
   725// Replace right shifts that exceed size of signed type.
   726(SRAVconst <t> [rc] (MOVBreg y)) && rc >=  8 => (SRAVconst [63] (SLLVconst <t> [56] y))
   727(SRAVconst <t> [rc] (MOVHreg y)) && rc >= 16 => (SRAVconst  [63] (SLLVconst <t> [48] y))
   728(SRAVconst <t> [rc] (MOVWreg y)) && rc >= 32 => (SRAconst [31] y)
   729
   730// If the shift amount is larger than the datasize(32, 16, 8), we can optimize to constant 0.
   731(MOVWUreg (SLLVconst [lc] x)) && lc >= 32 => (MOVVconst [0])
   732(MOVHUreg (SLLVconst [lc] x)) && lc >= 16 => (MOVVconst [0])
   733(MOVBUreg (SLLVconst [lc] x)) && lc >= 8 => (MOVVconst [0])
   734
   735// After zero extension, the upper (64-datasize(32|16|8)) bits are zero, we can optimize to constant 0.
   736(SRLVconst [rc] (MOVWUreg x)) && rc >= 32 => (MOVVconst [0])
   737(SRLVconst [rc] (MOVHUreg x)) && rc >= 16 => (MOVVconst [0])
   738(SRLVconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVVconst [0])
   739
   740// (x + x) << c -> x << c+1
   741((SLLV|SLL)const <t> [c] (ADDV x x)) && c < t.Size() * 8 - 1  => ((SLLV|SLL)const [c+1] x)
   742((SLLV|SLL)const <t> [c] (ADDV x x)) && c >= t.Size() * 8 - 1 => (MOVVconst [0])
   743
   744// mul by constant
   745(MULV _ (MOVVconst [0])) => (MOVVconst [0])
   746(MULV x (MOVVconst [1])) => x
   747
   748(MULV  x (MOVVconst [c])) && canMulStrengthReduce(config, c) => {mulStrengthReduce(v, x, c)}
   749
   750(ADDV x0 x1:(SLLVconst [c] y)) && x1.Uses == 1 && c > 0 && c <= 4 => (ADDshiftLLV x0 y [c])
   751
   752// fold constant in ADDshift op
   753(ADDshiftLLV x (MOVVconst [c]) [d]) && is12Bit(c<<d) => (ADDVconst x [c<<d])
   754
   755// div by constant
   756(DIVVU x (MOVVconst [1])) => x
   757(DIVVU x (MOVVconst [c])) && isPowerOfTwo(c) => (SRLVconst [log64(c)] x)
   758(REMVU _ (MOVVconst [1])) => (MOVVconst [0])                       // mod
   759(REMVU x (MOVVconst [c])) && isPowerOfTwo(c) => (ANDconst [c-1] x) // mod
   760
   761// FMA
   762(FMA ...) => (FMADDD ...)
   763((ADD|SUB)F (MULF x y) z) && z.Block.Func.useFMA(v) => (FM(ADD|SUB)F x y z)
   764((ADD|SUB)D (MULD x y) z) && z.Block.Func.useFMA(v) => (FM(ADD|SUB)D x y z)
   765// z - xy -> -(xy - z)
   766(SUBF z (MULF x y)) && z.Block.Func.useFMA(v) => (FNMSUBF x y z)
   767(SUBD z (MULD x y)) && z.Block.Func.useFMA(v) => (FNMSUBD x y z)
   768// z + (-xy) -> -(xy - z)
   769// z - (-xy) -> xy + z
   770((ADD|SUB)F z (NEGF (MULF x y))) && z.Block.Func.useFMA(v) => (F(NMSUB|MADD)F x y z)
   771((ADD|SUB)D z (NEGD (MULD x y))) && z.Block.Func.useFMA(v) => (F(NMSUB|MADD)D x y z)
   772// -xy - z -> -(xy + z)
   773(SUBF (NEGF (MULF x y)) z) && z.Block.Func.useFMA(v) => (FNMADDF x y z)
   774(SUBD (NEGD (MULD x y)) z) && z.Block.Func.useFMA(v) => (FNMADDD x y z)
   775
   776// generic simplifications
   777(ADDV x (NEGV y)) => (SUBV x y)
   778(SUBV x (NEGV y)) => (ADDV x y)
   779(SUBV x x) => (MOVVconst [0])
   780(SUBV (MOVVconst [0]) x) => (NEGV x)
   781(AND x x) => x
   782(OR  x x) => x
   783(XOR x x) => (MOVVconst [0])
   784(ORN x (MOVVconst [-1])) => x
   785(AND x (NORconst [0] y)) => (ANDN x y)
   786(OR x (NORconst [0] y)) => (ORN x y)
   787
   788// Fold negation into subtraction.
   789(NEGV (SUBV x y)) => (SUBV y x)
   790(NEGV <t> s:(ADDVconst [c] (SUBV x y))) && s.Uses == 1 && is12Bit(-c) => (ADDVconst [-c] (SUBV <t> y x))
   791
   792// Double negation.
   793(NEGV (NEGV x)) => x
   794// Fold NEGV into ADDVconst. Take care to keep c in 12 bit range.
   795(NEGV <t> s:(ADDVconst [c] (NEGV x))) && s.Uses == 1 && is12Bit(-c) => (ADDVconst [-c] x)
   796
   797// remove redundant *const ops
   798(ADDVconst [0]  x) => x
   799(SUBVconst [0]  x) => x
   800(ANDconst [0]  _) => (MOVVconst [0])
   801(ANDconst [-1] x) => x
   802(ORconst  [0]  x) => x
   803(ORconst  [-1] _) => (MOVVconst [-1])
   804(XORconst [0]  x) => x
   805(XORconst [-1] x) => (NORconst [0] x)
   806(MASKEQZ (MOVVconst [0]) cond) => (MOVVconst [0])
   807(MASKNEZ (MOVVconst [0]) cond) => (MOVVconst [0])
   808(MASKEQZ x (MOVVconst [c])) && c == 0 => (MOVVconst [0])
   809(MASKEQZ x (MOVVconst [c])) && c != 0 => x
   810
   811// generic constant folding
   812(ADDVconst [c] (MOVVconst [d]))  => (MOVVconst [c+d])
   813(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x)
   814(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x)
   815(SUBVconst [c] (MOVVconst [d]))  => (MOVVconst [d-c])
   816(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x)
   817(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x)
   818(SUBV (MOVVconst [c]) (NEGV (SUBVconst [d] x))) => (ADDVconst [c-d] x)
   819(ADDVconst [c] x) && is32Bit(c) && c&0xffff == 0 && c != 0 => (ADDV16const [c] x)
   820(SLLVconst [c] (MOVVconst [d]))  => (MOVVconst [d<<uint64(c)])
   821(SRLVconst [c] (MOVVconst [d]))  => (MOVVconst [int64(uint64(d)>>uint64(c))])
   822(SRAVconst [c] (MOVVconst [d]))  => (MOVVconst [d>>uint64(c)])
   823(MULV (MOVVconst [c]) (MOVVconst [d])) => (MOVVconst [c*d])
   824(DIVV  (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [c/d])
   825(DIVVU (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [int64(uint64(c)/uint64(d))])
   826(REMV  (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [c%d])   // mod
   827(REMVU (MOVVconst [c]) (MOVVconst [d])) && d != 0 => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod
   828(ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d])
   829(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
   830(ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d])
   831(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x)
   832(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d])
   833(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x)
   834(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)])
   835(NEGV (MOVVconst [c])) => (MOVVconst [-c])
   836(MOVBreg  (MOVVconst [c])) => (MOVVconst [int64(int8(c))])
   837(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))])
   838(MOVHreg  (MOVVconst [c])) => (MOVVconst [int64(int16(c))])
   839(MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))])
   840(MOVWreg  (MOVVconst [c])) => (MOVVconst [int64(int32(c))])
   841(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))])
   842(MOVVreg  (MOVVconst [c])) => (MOVVconst [c])
   843
   844(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&0xff] x)
   845
   846// Avoid extending when already sufficiently masked.
   847(MOVBreg  x:(ANDconst [c] y)) && c >= 0 && int64(int8(c)) == c => x
   848(MOVHreg  x:(ANDconst [c] y)) && c >= 0 && int64(int16(c)) == c => x
   849(MOVWreg  x:(ANDconst [c] y)) && c >= 0 && int64(int32(c)) == c => x
   850(MOVBUreg x:(ANDconst [c] y)) && c >= 0 && int64(uint8(c)) == c => x
   851(MOVHUreg x:(ANDconst [c] y)) && c >= 0 && int64(uint16(c)) == c => x
   852(MOVWUreg x:(ANDconst [c] y)) && c >= 0 && int64(uint32(c)) == c => x
   853
   854// Prefetch instructions (hint specified using aux field)
   855// For PRELD{,X} A value of hint indicates:
   856//    hint=0 is defined as load prefetch to L1-cache
   857//    hint=2 is defined as load prefetch to L3-cache
   858// The PrefetchCacheStreamed implementation prefetches 512 bytes of data
   859// into L3. The aux field are defined as follows:
   860//    bit[4:0]:
   861//       $hint parameter of PRELDX instruction
   862//    bit[41:5]:
   863//       $n parameter of PRELDX instruction, bit[0] of $n is the address
   864//       sequence, bits[11:1] is the block size, bits[20:12] is the block
   865//       num, bits[36:21] is the stride, for more details about $n, refer
   866//       to src/cmd/internal/obj/loong64/doc.go
   867(PrefetchCache addr mem)         => (PRELD  addr mem [0])
   868(PrefetchCacheStreamed addr mem) => (PRELDX addr mem [(((512 << 1) + (1 << 12)) << 5) + 2])
   869
   870// constant comparisons
   871(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1])
   872(SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0])
   873(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1])
   874(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0])
   875
   876// other known comparisons
   877(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1])
   878(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0])
   879(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1])
   880(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0])
   881(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1])
   882(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1])
   883(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0])
   884(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1])
   885(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0])
   886(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1])
   887(SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0])
   888(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1])
   889(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1])
   890(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
   891(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
   892
   893// SGT/SGTU with known outcomes.
   894(SGT  x x) => (MOVVconst [0])
   895(SGTU x x) => (MOVVconst [0])
   896
   897// Optimizations
   898
   899// Absorb boolean tests into block
   900(NEZ (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
   901(NEZ (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
   902(EQZ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
   903(EQZ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
   904(NEZ (XORconst [1] cmp:(SGT _ _)) yes no) => (EQZ cmp yes no)
   905(NEZ (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQZ cmp yes no)
   906(NEZ (XORconst [1] cmp:(SGTconst _)) yes no) => (EQZ cmp yes no)
   907(NEZ (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQZ cmp yes no)
   908(EQZ (XORconst [1] cmp:(SGT _ _)) yes no) => (NEZ cmp yes no)
   909(EQZ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NEZ cmp yes no)
   910(EQZ (XORconst [1] cmp:(SGTconst _)) yes no) => (NEZ cmp yes no)
   911(EQZ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NEZ cmp yes no)
   912(NEZ (SGTUconst [1] x) yes no) => (EQZ x yes no)
   913(EQZ (SGTUconst [1] x) yes no) => (NEZ x yes no)
   914(NEZ (SGTU x (MOVVconst [0])) yes no) => (NEZ x yes no)
   915(EQZ (SGTU x (MOVVconst [0])) yes no) => (EQZ x yes no)
   916(NEZ (SGTconst [0] x) yes no) => (LTZ x yes no)
   917(EQZ (SGTconst [0] x) yes no) => (GEZ x yes no)
   918(NEZ (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
   919(EQZ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
   920
   921// Convert EQZ/NEZ into more optimal branch conditions.
   922(EQZ (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (EQZ (SGTUconst [c] y) yes no)
   923(NEZ (SGTU (MOVVconst [c]) y) yes no) && c >= -2048 && c <= 2047 => (NEZ (SGTUconst [c] y) yes no)
   924(EQZ (SUBV x y) yes no) => (BEQ x y yes no)
   925(NEZ (SUBV x y) yes no) => (BNE x y yes no)
   926(EQZ (SGT x y) yes no) => (BGE y x yes no)
   927(NEZ (SGT x y) yes no) => (BLT y x yes no)
   928(EQZ (SGTU x y) yes no) => (BGEU y x yes no)
   929(NEZ (SGTU x y) yes no) => (BLTU y x yes no)
   930(EQZ (SGTconst [c] y) yes no) => (BGE y (MOVVconst [c]) yes no)
   931(NEZ (SGTconst [c] y) yes no) => (BLT y (MOVVconst [c]) yes no)
   932(EQZ (SGTUconst [c] y) yes no) => (BGEU y (MOVVconst [c]) yes no)
   933(NEZ (SGTUconst [c] y) yes no) => (BLTU y (MOVVconst [c]) yes no)
   934
   935// absorb constants into branches
   936(EQZ  (MOVVconst [0]) yes no) => (First yes no)
   937(EQZ  (MOVVconst [c]) yes no) && c != 0 => (First no yes)
   938(NEZ  (MOVVconst [0]) yes no) => (First no yes)
   939(NEZ  (MOVVconst [c]) yes no) && c != 0 => (First yes no)
   940(LTZ (MOVVconst [c]) yes no) && c <  0 => (First yes no)
   941(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes)
   942(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no)
   943(LEZ (MOVVconst [c]) yes no) && c >  0 => (First no yes)
   944(GTZ (MOVVconst [c]) yes no) && c >  0 => (First yes no)
   945(GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes)
   946(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no)
   947(GEZ (MOVVconst [c]) yes no) && c <  0 => (First no yes)
   948
   949// absorb NEGV into branches
   950(EQZ (NEGV x) yes no) => (EQZ x yes no)
   951(NEZ (NEGV x) yes no) => (NEZ x yes no)
   952
   953// Convert branch with zero to more optimal branch zero.
   954(BEQ  (MOVVconst [0]) cond yes no) => (EQZ cond yes no)
   955(BEQ  cond (MOVVconst [0]) yes no) => (EQZ cond yes no)
   956(BNE  (MOVVconst [0]) cond yes no) => (NEZ cond yes no)
   957(BNE  cond (MOVVconst [0]) yes no) => (NEZ cond yes no)
   958(BLT  (MOVVconst [0]) cond yes no) => (GTZ cond yes no)
   959(BLT  cond (MOVVconst [0]) yes no) => (LTZ cond yes no)
   960(BLTU (MOVVconst [0]) cond yes no) => (NEZ cond yes no)
   961(BGE  (MOVVconst [0]) cond yes no) => (LEZ cond yes no)
   962(BGE  cond (MOVVconst [0]) yes no) => (GEZ cond yes no)
   963(BGEU (MOVVconst [0]) cond yes no) => (EQZ cond yes no)
   964
   965// Arch-specific inlining for small or disjoint runtime.memmove
   966// Match post-lowering calls, register version.
   967(SelectN [0] call:(CALLstatic {sym} dst src (MOVVconst [sz]) mem))
   968	&& sz >= 0
   969	&& isSameCall(sym, "runtime.memmove")
   970	&& call.Uses == 1
   971	&& isInlinableMemmove(dst, src, sz, config)
   972	&& clobber(call)
   973	=> (Move [sz] dst src mem)
   974
   975// fold readonly sym load
   976(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read8(sym, int64(off)))])
   977(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
   978(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
   979(MOVVload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
   980(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(int8(read8(sym, int64(off))))])
   981(MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(int16(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))])
   982(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))])

View as plain text