diff --git a/src/cmd/compile/internal/ssa/_gen/386.rules b/src/cmd/compile/internal/ssa/_gen/386.rules index 88074e5fd08130..db16ab0961face 100644 --- a/src/cmd/compile/internal/ssa/_gen/386.rules +++ b/src/cmd/compile/internal/ssa/_gen/386.rules @@ -394,7 +394,7 @@ (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF cmp yes no) // fold constants into instructions -(ADDL x (MOVLconst [c])) => (ADDLconst [c] x) +(ADDL x (MOVLconst [c])) && !t.IsPtr() => (ADDLconst [c] x) (ADDLcarry x (MOVLconst [c])) => (ADDLconstcarry [c] x) (ADCL x (MOVLconst [c]) f) => (ADCLconst [c] x f) diff --git a/src/cmd/compile/internal/ssa/_gen/AMD64.rules b/src/cmd/compile/internal/ssa/_gen/AMD64.rules index d58a34630b3374..06e48a18589ccd 100644 --- a/src/cmd/compile/internal/ssa/_gen/AMD64.rules +++ b/src/cmd/compile/internal/ssa/_gen/AMD64.rules @@ -746,7 +746,7 @@ // (SETEQF x) => (ANDQ (SETEQ x) (SETORD x)) // fold constants into instructions -(ADDQ x (MOVQconst [c])) && is32Bit(c) => (ADDQconst [int32(c)] x) +(ADDQ x (MOVQconst [c])) && is32Bit(c) && !t.IsPtr() => (ADDQconst [int32(c)] x) (ADDQ x (MOVLconst [c])) => (ADDQconst [c] x) (ADDL x (MOVLconst [c])) => (ADDLconst [c] x) diff --git a/src/cmd/compile/internal/ssa/_gen/ARM.rules b/src/cmd/compile/internal/ssa/_gen/ARM.rules index d8fbf41754e74f..9ea9f9674a2977 100644 --- a/src/cmd/compile/internal/ssa/_gen/ARM.rules +++ b/src/cmd/compile/internal/ssa/_gen/ARM.rules @@ -481,7 +481,7 @@ (MOVHloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVHreg x) // fold constant into arithmetic ops -(ADD x (MOVWconst [c])) => (ADDconst [c] x) +(ADD x (MOVWconst [c])) && !t.IsPtr() => (ADDconst [c] x) (SUB (MOVWconst [c]) x) => (RSBconst [c] x) (SUB x (MOVWconst [c])) => (SUBconst [c] x) (RSB (MOVWconst [c]) x) => (SUBconst [c] x) diff --git a/src/cmd/compile/internal/ssa/_gen/ARM64.rules b/src/cmd/compile/internal/ssa/_gen/ARM64.rules index 0ae02f5de370c8..cf43542615cf70 100644 --- a/src/cmd/compile/internal/ssa/_gen/ARM64.rules +++ b/src/cmd/compile/internal/ssa/_gen/ARM64.rules @@ -1241,7 +1241,7 @@ (MOVDnop (MOVDconst [c])) => (MOVDconst [c]) // fold constant into arithmetic ops -(ADD x (MOVDconst [c])) => (ADDconst [c] x) +(ADD x (MOVDconst [c])) && !t.IsPtr() => (ADDconst [c] x) (SUB x (MOVDconst [c])) => (SUBconst [c] x) (AND x (MOVDconst [c])) => (ANDconst [c] x) (OR x (MOVDconst [c])) => (ORconst [c] x) diff --git a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules index 7e445e506e5548..08b94b7c07376a 100644 --- a/src/cmd/compile/internal/ssa/_gen/LOONG64.rules +++ b/src/cmd/compile/internal/ssa/_gen/LOONG64.rules @@ -571,7 +571,7 @@ (MOVVreg x) && x.Uses == 1 => (MOVVnop x) // fold constant into arithmetic ops -(ADDV x (MOVVconst [c])) && is32Bit(c) => (ADDVconst [c] x) +(ADDV x (MOVVconst [c])) && is32Bit(c) && !t.IsPtr() => (ADDVconst [c] x) (SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x) (AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x) (OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x) diff --git a/src/cmd/compile/internal/ssa/_gen/MIPS.rules b/src/cmd/compile/internal/ssa/_gen/MIPS.rules index 9cd5a1618e0b03..b74ab7b609744e 100644 --- a/src/cmd/compile/internal/ssa/_gen/MIPS.rules +++ b/src/cmd/compile/internal/ssa/_gen/MIPS.rules @@ -567,7 +567,7 @@ (MOVWnop (MOVWconst [c])) => (MOVWconst [c]) // fold constant into arithmetic ops -(ADD x (MOVWconst [c])) => (ADDconst [c] x) +(ADD x (MOVWconst [c])) && !t.IsPtr() => (ADDconst [c] x) (SUB x (MOVWconst [c])) => (SUBconst [c] x) (AND x (MOVWconst [c])) => (ANDconst [c] x) (OR x (MOVWconst [c])) => (ORconst [c] x) diff --git a/src/cmd/compile/internal/ssa/_gen/MIPS64.rules b/src/cmd/compile/internal/ssa/_gen/MIPS64.rules index b0d0dd8e665dbc..e5cfd90e82f4ad 100644 --- a/src/cmd/compile/internal/ssa/_gen/MIPS64.rules +++ b/src/cmd/compile/internal/ssa/_gen/MIPS64.rules @@ -567,7 +567,7 @@ (MOVVnop (MOVVconst [c])) => (MOVVconst [c]) // fold constant into arithmetic ops -(ADDV x (MOVVconst [c])) && is32Bit(c) => (ADDVconst [c] x) +(ADDV x (MOVVconst [c])) && is32Bit(c) && !t.IsPtr() => (ADDVconst [c] x) (SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x) (AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x) (OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x) diff --git a/src/cmd/compile/internal/ssa/_gen/PPC64.rules b/src/cmd/compile/internal/ssa/_gen/PPC64.rules index 16cf91c7b6a4d2..ba7347f31da4fb 100644 --- a/src/cmd/compile/internal/ssa/_gen/PPC64.rules +++ b/src/cmd/compile/internal/ssa/_gen/PPC64.rules @@ -670,7 +670,7 @@ // Arithmetic constant ops -(ADD x (MOVDconst [c])) && is32Bit(c) => (ADDconst [c] x) +(ADD x (MOVDconst [c])) && is32Bit(c) && !t.IsPtr() => (ADDconst [c] x) (ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) => (ADDconst [c+d] x) (ADDconst [0] x) => x (SUB x (MOVDconst [c])) && is32Bit(-c) => (ADDconst [-c] x) diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules index 802b1dd1fd6439..378b8c06f86266 100644 --- a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules +++ b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules @@ -757,7 +757,7 @@ (MOVDnop (MOVDconst [c])) => (MOVDconst [c]) // Fold constant into immediate instructions where possible. -(ADD (MOVDconst [val]) x) && is32Bit(val) => (ADDI [val] x) +(ADD (MOVDconst [val]) x) && is32Bit(val) && !t.IsPtr() => (ADDI [val] x) (AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x) (OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x) (XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x) diff --git a/src/cmd/compile/internal/ssa/_gen/S390X.rules b/src/cmd/compile/internal/ssa/_gen/S390X.rules index 9495010a736bad..4502a573842373 100644 --- a/src/cmd/compile/internal/ssa/_gen/S390X.rules +++ b/src/cmd/compile/internal/ssa/_gen/S390X.rules @@ -632,7 +632,7 @@ (BRC {c} (CMPWUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CIJ {c} x [ int8(y)] yes no) // Fold constants into instructions. -(ADD x (MOVDconst [c])) && is32Bit(c) => (ADDconst [int32(c)] x) +(ADD x (MOVDconst [c])) && is32Bit(c) && !t.IsPtr() => (ADDconst [int32(c)] x) (ADDW x (MOVDconst [c])) => (ADDWconst [int32(c)] x) (SUB x (MOVDconst [c])) && is32Bit(c) => (SUBconst x [int32(c)]) diff --git a/src/cmd/compile/internal/ssa/_gen/Wasm.rules b/src/cmd/compile/internal/ssa/_gen/Wasm.rules index e31808ebe175a3..91a9fc5e4a9772 100644 --- a/src/cmd/compile/internal/ssa/_gen/Wasm.rules +++ b/src/cmd/compile/internal/ssa/_gen/Wasm.rules @@ -372,7 +372,7 @@ (I64LeU (I64Const [1]) x) => (I64Eqz (I64Eqz x)) (I64Ne x (I64Const [0])) => (I64Eqz (I64Eqz x)) -(I64Add x (I64Const [y])) => (I64AddConst [y] x) +(I64Add x (I64Const [y])) && !t.IsPtr() => (I64AddConst [y] x) (I64AddConst [0] x) => x (I64Eqz (I64Eqz (I64Eqz x))) => (I64Eqz x) diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go index 064173a946bbf9..f658d9380a29d7 100644 --- a/src/cmd/compile/internal/ssa/rewrite386.go +++ b/src/cmd/compile/internal/ssa/rewrite386.go @@ -726,7 +726,8 @@ func rewriteValue386_Op386ADCL(v *Value) bool { func rewriteValue386_Op386ADDL(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ADDL x (MOVLconst [c])) + // match: (ADDL x (MOVLconst [c])) + // cond: !t.IsPtr() // result: (ADDLconst [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -734,7 +735,11 @@ func rewriteValue386_Op386ADDL(v *Value) bool { if v_1.Op != Op386MOVLconst { continue } + t := v_1.Type c := auxIntToInt32(v_1.AuxInt) + if !(!t.IsPtr()) { + continue + } v.reset(Op386ADDLconst) v.AuxInt = int32ToAuxInt(c) v.AddArg(x) diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go index d0982ce17bd67e..41cf6b93525ce5 100644 --- a/src/cmd/compile/internal/ssa/rewriteAMD64.go +++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go @@ -1792,8 +1792,8 @@ func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool { func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ADDQ x (MOVQconst [c])) - // cond: is32Bit(c) + // match: (ADDQ x (MOVQconst [c])) + // cond: is32Bit(c) && !t.IsPtr() // result: (ADDQconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -1801,8 +1801,9 @@ func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool { if v_1.Op != OpAMD64MOVQconst { continue } + t := v_1.Type c := auxIntToInt64(v_1.AuxInt) - if !(is32Bit(c)) { + if !(is32Bit(c) && !t.IsPtr()) { continue } v.reset(OpAMD64ADDQconst) diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go index 6ea1a7e4b26afa..896ea502233843 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM.go +++ b/src/cmd/compile/internal/ssa/rewriteARM.go @@ -1315,7 +1315,8 @@ func rewriteValueARM_OpARMADD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] b := v.Block - // match: (ADD x (MOVWconst [c])) + // match: (ADD x (MOVWconst [c])) + // cond: !t.IsPtr() // result: (ADDconst [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -1323,7 +1324,11 @@ func rewriteValueARM_OpARMADD(v *Value) bool { if v_1.Op != OpARMMOVWconst { continue } + t := v_1.Type c := auxIntToInt32(v_1.AuxInt) + if !(!t.IsPtr()) { + continue + } v.reset(OpARMADDconst) v.AuxInt = int32ToAuxInt(c) v.AddArg(x) diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go index 6f02b507e823f8..7cc7a2a4242761 100644 --- a/src/cmd/compile/internal/ssa/rewriteARM64.go +++ b/src/cmd/compile/internal/ssa/rewriteARM64.go @@ -1178,7 +1178,8 @@ func rewriteValueARM64_OpARM64ADCSflags(v *Value) bool { func rewriteValueARM64_OpARM64ADD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ADD x (MOVDconst [c])) + // match: (ADD x (MOVDconst [c])) + // cond: !t.IsPtr() // result: (ADDconst [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -1186,7 +1187,11 @@ func rewriteValueARM64_OpARM64ADD(v *Value) bool { if v_1.Op != OpARM64MOVDconst { continue } + t := v_1.Type c := auxIntToInt64(v_1.AuxInt) + if !(!t.IsPtr()) { + continue + } v.reset(OpARM64ADDconst) v.AuxInt = int64ToAuxInt(c) v.AddArg(x) diff --git a/src/cmd/compile/internal/ssa/rewriteLOONG64.go b/src/cmd/compile/internal/ssa/rewriteLOONG64.go index 3c783a3037bbde..9008923c80b92e 100644 --- a/src/cmd/compile/internal/ssa/rewriteLOONG64.go +++ b/src/cmd/compile/internal/ssa/rewriteLOONG64.go @@ -1354,8 +1354,8 @@ func rewriteValueLOONG64_OpIsSliceInBounds(v *Value) bool { func rewriteValueLOONG64_OpLOONG64ADDV(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ADDV x (MOVVconst [c])) - // cond: is32Bit(c) + // match: (ADDV x (MOVVconst [c])) + // cond: is32Bit(c) && !t.IsPtr() // result: (ADDVconst [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -1363,8 +1363,9 @@ func rewriteValueLOONG64_OpLOONG64ADDV(v *Value) bool { if v_1.Op != OpLOONG64MOVVconst { continue } + t := v_1.Type c := auxIntToInt64(v_1.AuxInt) - if !(is32Bit(c)) { + if !(is32Bit(c) && !t.IsPtr()) { continue } v.reset(OpLOONG64ADDVconst) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go index a8cda7644a7c1f..4d56908b30f8e9 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go @@ -2025,7 +2025,8 @@ func rewriteValueMIPS_OpLsh8x8(v *Value) bool { func rewriteValueMIPS_OpMIPSADD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ADD x (MOVWconst [c])) + // match: (ADD x (MOVWconst [c])) + // cond: !t.IsPtr() // result: (ADDconst [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -2033,7 +2034,11 @@ func rewriteValueMIPS_OpMIPSADD(v *Value) bool { if v_1.Op != OpMIPSMOVWconst { continue } + t := v_1.Type c := auxIntToInt32(v_1.AuxInt) + if !(!t.IsPtr()) { + continue + } v.reset(OpMIPSADDconst) v.AuxInt = int32ToAuxInt(c) v.AddArg(x) diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go index 82d52f0b3f3479..8b01407e01b3dc 100644 --- a/src/cmd/compile/internal/ssa/rewriteMIPS64.go +++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go @@ -2298,8 +2298,8 @@ func rewriteValueMIPS64_OpLsh8x8(v *Value) bool { func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ADDV x (MOVVconst [c])) - // cond: is32Bit(c) + // match: (ADDV x (MOVVconst [c])) + // cond: is32Bit(c) && !t.IsPtr() // result: (ADDVconst [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -2307,8 +2307,9 @@ func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool { if v_1.Op != OpMIPS64MOVVconst { continue } + t := v_1.Type c := auxIntToInt64(v_1.AuxInt) - if !(is32Bit(c)) { + if !(is32Bit(c) && !t.IsPtr()) { continue } v.reset(OpMIPS64ADDVconst) diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go index a0d4b54c7ab0f0..bcd0a9ab4ebc18 100644 --- a/src/cmd/compile/internal/ssa/rewritePPC64.go +++ b/src/cmd/compile/internal/ssa/rewritePPC64.go @@ -3811,8 +3811,8 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { } break } - // match: (ADD x (MOVDconst [c])) - // cond: is32Bit(c) + // match: (ADD x (MOVDconst [c])) + // cond: is32Bit(c) && !t.IsPtr() // result: (ADDconst [c] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -3820,8 +3820,9 @@ func rewriteValuePPC64_OpPPC64ADD(v *Value) bool { if v_1.Op != OpPPC64MOVDconst { continue } + t := v_1.Type c := auxIntToInt64(v_1.AuxInt) - if !(is32Bit(c)) { + if !(is32Bit(c) && !t.IsPtr()) { continue } v.reset(OpPPC64ADDconst) diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go index 66a6967db4cd0a..68851272166e20 100644 --- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go +++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go @@ -3150,17 +3150,18 @@ func rewriteValueRISCV64_OpPanicBounds(v *Value) bool { func rewriteValueRISCV64_OpRISCV64ADD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ADD (MOVDconst [val]) x) - // cond: is32Bit(val) + // match: (ADD (MOVDconst [val]) x) + // cond: is32Bit(val) && !t.IsPtr() // result: (ADDI [val] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { if v_0.Op != OpRISCV64MOVDconst { continue } + t := v_0.Type val := auxIntToInt64(v_0.AuxInt) x := v_1 - if !(is32Bit(val)) { + if !(is32Bit(val) && !t.IsPtr()) { continue } v.reset(OpRISCV64ADDI) diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go index b766156b437051..e8cc88d655faf0 100644 --- a/src/cmd/compile/internal/ssa/rewriteS390X.go +++ b/src/cmd/compile/internal/ssa/rewriteS390X.go @@ -5286,8 +5286,8 @@ func rewriteValueS390X_OpRsh8x8(v *Value) bool { func rewriteValueS390X_OpS390XADD(v *Value) bool { v_1 := v.Args[1] v_0 := v.Args[0] - // match: (ADD x (MOVDconst [c])) - // cond: is32Bit(c) + // match: (ADD x (MOVDconst [c])) + // cond: is32Bit(c) && !t.IsPtr() // result: (ADDconst [int32(c)] x) for { for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 { @@ -5295,8 +5295,9 @@ func rewriteValueS390X_OpS390XADD(v *Value) bool { if v_1.Op != OpS390XMOVDconst { continue } + t := v_1.Type c := auxIntToInt64(v_1.AuxInt) - if !(is32Bit(c)) { + if !(is32Bit(c) && !t.IsPtr()) { continue } v.reset(OpS390XADDconst) diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go index bb35d8e663ce93..6f83aea13afc5a 100644 --- a/src/cmd/compile/internal/ssa/rewriteWasm.go +++ b/src/cmd/compile/internal/ssa/rewriteWasm.go @@ -3620,14 +3620,19 @@ func rewriteValueWasm_OpWasmI64Add(v *Value) bool { v.AddArg2(y, v0) return true } - // match: (I64Add x (I64Const [y])) + // match: (I64Add x (I64Const [y])) + // cond: !t.IsPtr() // result: (I64AddConst [y] x) for { x := v_0 if v_1.Op != OpWasmI64Const { break } + t := v_1.Type y := auxIntToInt64(v_1.AuxInt) + if !(!t.IsPtr()) { + break + } v.reset(OpWasmI64AddConst) v.AuxInt = int64ToAuxInt(y) v.AddArg(x)