Skip to content

Commit

Permalink
[LLVM][tests/CodeGen/AArch64] Convert instances of ConstantExpr based…
Browse files Browse the repository at this point in the history
… splats to use splat().

This is mostly NFC but some output does change due to consistently
inserting into poison rather than undef and using i64 as the index
type for inserts.
  • Loading branch information
paulwalker-arm committed Feb 27, 2024
1 parent 19cec9c commit d6ff986
Show file tree
Hide file tree
Showing 17 changed files with 216 additions and 216 deletions.
Original file line number Diff line number Diff line change
Expand Up @@ -40,11 +40,11 @@ entry:
%7 = fmul fast <vscale x 2 x double> %2, %0
%8 = fmul fast <vscale x 2 x double> %3, %1
%9 = fsub fast <vscale x 2 x double> %7, %8
%10 = fmul fast <vscale x 2 x double> %9, shufflevector (<vscale x 2 x double> insertelement (<vscale x 2 x double> poison, double 3.000000e+00, i64 0), <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer)
%11 = fmul fast <vscale x 2 x double> %6, shufflevector (<vscale x 2 x double> insertelement (<vscale x 2 x double> poison, double 1.100000e+01, i64 0), <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer)
%10 = fmul fast <vscale x 2 x double> %9, splat (double 3.000000e+00)
%11 = fmul fast <vscale x 2 x double> %6, splat (double 1.100000e+01)
%12 = fadd fast <vscale x 2 x double> %10, %11
%13 = fmul fast <vscale x 2 x double> %9, shufflevector (<vscale x 2 x double> insertelement (<vscale x 2 x double> poison, double 1.100000e+01, i64 0), <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer)
%14 = fmul fast <vscale x 2 x double> %6, shufflevector (<vscale x 2 x double> insertelement (<vscale x 2 x double> poison, double 3.000000e+00, i64 0), <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer)
%13 = fmul fast <vscale x 2 x double> %9, splat (double 1.100000e+01)
%14 = fmul fast <vscale x 2 x double> %6, splat (double 3.000000e+00)
%15 = fsub fast <vscale x 2 x double> %13, %14
%interleaved.vec = tail call <vscale x 4 x double> @llvm.experimental.vector.interleave2.nxv4f64(<vscale x 2 x double> %15, <vscale x 2 x double> %12)
ret <vscale x 4 x double> %interleaved.vec
Expand Down
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AArch64/dag-combine-concat-vectors.ll
Original file line number Diff line number Diff line change
Expand Up @@ -63,7 +63,7 @@ define fastcc i8 @allocno_reload_assign() {
br label %1

1: ; preds = %1, %0
call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x ptr> zeroinitializer, i32 0, <vscale x 16 x i1> xor (<vscale x 16 x i1> shufflevector (<vscale x 16 x i1> icmp eq (<vscale x 16 x ptr> insertelement (<vscale x 16 x ptr> poison, ptr null, i64 0), <vscale x 16 x ptr> zeroinitializer), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i32 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer)))
call void @llvm.masked.scatter.nxv16i8.nxv16p0(<vscale x 16 x i8> zeroinitializer, <vscale x 16 x ptr> zeroinitializer, i32 0, <vscale x 16 x i1> xor (<vscale x 16 x i1> shufflevector (<vscale x 16 x i1> icmp eq (<vscale x 16 x ptr> insertelement (<vscale x 16 x ptr> poison, ptr null, i64 0), <vscale x 16 x ptr> zeroinitializer), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i1> splat (i1 true)))
br label %1
}

Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/AArch64/fold-int-pow2-with-fmul-or-fdiv.ll
Original file line number Diff line number Diff line change
Expand Up @@ -611,9 +611,9 @@ define <vscale x 4 x float> @fdiv_pow2_nx4xfloat(<vscale x 4 x i32> %i) "target-
; CHECK-NEXT: ucvtf z0.s, p0/m, z0.s
; CHECK-NEXT: fdivr z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: ret
%p2 = shl <vscale x 4 x i32> shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> poison, i32 1, i64 0), <vscale x 4 x i32> poison, <vscale x 4 x i32> zeroinitializer), %i
%p2 = shl <vscale x 4 x i32> splat (i32 1), %i
%p2_f = uitofp <vscale x 4 x i32> %p2 to <vscale x 4 x float>
%r = fdiv <vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 9.000000e+00, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer), %p2_f
%r = fdiv <vscale x 4 x float> splat (float 9.000000e+00), %p2_f
ret <vscale x 4 x float> %r
}

Expand All @@ -626,6 +626,6 @@ define <vscale x 2 x double> @scalable2(<vscale x 2 x i64> %0) "target-features"
; CHECK-NEXT: fdivr z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: ret
%2 = uitofp <vscale x 2 x i64> %0 to <vscale x 2 x double>
%3 = fdiv <vscale x 2 x double> shufflevector (<vscale x 2 x double> insertelement (<vscale x 2 x double> poison, double 1.000000e+00, i64 0), <vscale x 2 x double> poison, <vscale x 2 x i32> zeroinitializer), %2
%3 = fdiv <vscale x 2 x double> splat (double 1.000000e+00), %2
ret <vscale x 2 x double> %3
}
2 changes: 1 addition & 1 deletion llvm/test/CodeGen/AArch64/sve-calling-convention-mixed.ll
Original file line number Diff line number Diff line change
Expand Up @@ -724,7 +724,7 @@ define void @verify_all_operands_are_initialised() {
; CHECK-NEXT: add sp, sp, #16
; CHECK-NEXT: ldp x29, x30, [sp], #16 // 16-byte Folded Reload
; CHECK-NEXT: ret
call void @func_f8_and_v0_passed_via_memory(float 0.0, float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, <vscale x 4 x float> shufflevector (<vscale x 4 x float> insertelement (<vscale x 4 x float> poison, float 9.000000e+00, i64 0), <vscale x 4 x float> poison, <vscale x 4 x i32> zeroinitializer))
call void @func_f8_and_v0_passed_via_memory(float 0.0, float 1.0, float 2.0, float 3.0, float 4.0, float 5.0, float 6.0, float 7.0, float 8.0, <vscale x 4 x float> splat (float 9.000000e+00))
ret void
}

Expand Down
16 changes: 8 additions & 8 deletions llvm/test/CodeGen/AArch64/sve-expand-div.ll
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@ define <vscale x 16 x i8> @sdiv_i8(<vscale x 16 x i8> %a) #0 {
; CHECK-NEXT: lsr z1.b, z0.b, #7
; CHECK-NEXT: add z0.b, z0.b, z1.b
; CHECK-NEXT: ret
%div = sdiv <vscale x 16 x i8> %a, shufflevector (<vscale x 16 x i8> insertelement (<vscale x 16 x i8> undef, i8 3, i32 0), <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer)
%div = sdiv <vscale x 16 x i8> %a, splat (i8 3)
ret <vscale x 16 x i8> %div
}

Expand All @@ -30,7 +30,7 @@ define <vscale x 8 x i16> @sdiv_i16(<vscale x 8 x i16> %a) #0 {
; CHECK-NEXT: lsr z1.h, z0.h, #15
; CHECK-NEXT: add z0.h, z0.h, z1.h
; CHECK-NEXT: ret
%div = sdiv <vscale x 8 x i16> %a, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> undef, i16 3, i32 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer)
%div = sdiv <vscale x 8 x i16> %a, splat (i16 3)
ret <vscale x 8 x i16> %div
}

Expand All @@ -45,7 +45,7 @@ define <vscale x 4 x i32> @sdiv_i32(<vscale x 4 x i32> %a) #0 {
; CHECK-NEXT: lsr z1.s, z0.s, #31
; CHECK-NEXT: add z0.s, z0.s, z1.s
; CHECK-NEXT: ret
%div = sdiv <vscale x 4 x i32> %a, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> undef, i32 3, i32 0), <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer)
%div = sdiv <vscale x 4 x i32> %a, splat (i32 3)
ret <vscale x 4 x i32> %div
}

Expand All @@ -60,7 +60,7 @@ define <vscale x 2 x i64> @sdiv_i64(<vscale x 2 x i64> %a) #0 {
; CHECK-NEXT: lsr z1.d, z0.d, #63
; CHECK-NEXT: add z0.d, z0.d, z1.d
; CHECK-NEXT: ret
%div = sdiv <vscale x 2 x i64> %a, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 3, i32 0), <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer)
%div = sdiv <vscale x 2 x i64> %a, splat (i64 3)
ret <vscale x 2 x i64> %div
}

Expand All @@ -76,7 +76,7 @@ define <vscale x 16 x i8> @udiv_i8(<vscale x 16 x i8> %a) #0 {
; CHECK-NEXT: umulh z0.b, p0/m, z0.b, z1.b
; CHECK-NEXT: lsr z0.b, z0.b, #1
; CHECK-NEXT: ret
%div = udiv <vscale x 16 x i8> %a, shufflevector (<vscale x 16 x i8> insertelement (<vscale x 16 x i8> undef, i8 3, i32 0), <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer)
%div = udiv <vscale x 16 x i8> %a, splat (i8 3)
ret <vscale x 16 x i8> %div
}

Expand All @@ -89,7 +89,7 @@ define <vscale x 8 x i16> @udiv_i16(<vscale x 8 x i16> %a) #0 {
; CHECK-NEXT: umulh z0.h, p0/m, z0.h, z1.h
; CHECK-NEXT: lsr z0.h, z0.h, #1
; CHECK-NEXT: ret
%div = udiv <vscale x 8 x i16> %a, shufflevector (<vscale x 8 x i16> insertelement (<vscale x 8 x i16> undef, i16 3, i32 0), <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer)
%div = udiv <vscale x 8 x i16> %a, splat (i16 3)
ret <vscale x 8 x i16> %div
}

Expand All @@ -103,7 +103,7 @@ define <vscale x 4 x i32> @udiv_i32(<vscale x 4 x i32> %a) #0 {
; CHECK-NEXT: umulh z0.s, p0/m, z0.s, z1.s
; CHECK-NEXT: lsr z0.s, z0.s, #1
; CHECK-NEXT: ret
%div = udiv <vscale x 4 x i32> %a, shufflevector (<vscale x 4 x i32> insertelement (<vscale x 4 x i32> undef, i32 3, i32 0), <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer)
%div = udiv <vscale x 4 x i32> %a, splat (i32 3)
ret <vscale x 4 x i32> %div
}

Expand All @@ -117,7 +117,7 @@ define <vscale x 2 x i64> @udiv_i64(<vscale x 2 x i64> %a) #0 {
; CHECK-NEXT: umulh z0.d, p0/m, z0.d, z1.d
; CHECK-NEXT: lsr z0.d, z0.d, #1
; CHECK-NEXT: ret
%div = udiv <vscale x 2 x i64> %a, shufflevector (<vscale x 2 x i64> insertelement (<vscale x 2 x i64> undef, i64 3, i32 0), <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer)
%div = udiv <vscale x 2 x i64> %a, splat (i64 3)
ret <vscale x 2 x i64> %div
}

Expand Down
6 changes: 3 additions & 3 deletions llvm/test/CodeGen/AArch64/sve-fp-int-min-max.ll
Original file line number Diff line number Diff line change
Expand Up @@ -33,11 +33,11 @@ define i64 @scalable_int_min_max(ptr %arg, ptr %arg1, <vscale x 2 x ptr> %i37, <
entry:
%i56 = getelementptr inbounds float, ptr %arg, i64 0
%i57 = load <vscale x 2 x float>, ptr %i56, align 4
%i58 = fmul <vscale x 2 x float> %i57, shufflevector (<vscale x 2 x float> insertelement (<vscale x 2 x float> poison, float 0x401D41D420000000, i64 0), <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer)
%i59 = fadd <vscale x 2 x float> %i58, shufflevector (<vscale x 2 x float> insertelement (<vscale x 2 x float> poison, float 1.023500e+03, i64 0), <vscale x 2 x float> poison, <vscale x 2 x i32> zeroinitializer)
%i58 = fmul <vscale x 2 x float> %i57, splat (float 0x401D41D420000000)
%i59 = fadd <vscale x 2 x float> %i58, splat (float 1.023500e+03)
%i60 = fptosi <vscale x 2 x float> %i59 to <vscale x 2 x i32>
%i61 = tail call <vscale x 2 x i32> @llvm.smax.nxv2i32(<vscale x 2 x i32> %i60, <vscale x 2 x i32> zeroinitializer)
%i62 = tail call <vscale x 2 x i32> @llvm.smin.nxv2i32(<vscale x 2 x i32> %i61, <vscale x 2 x i32> shufflevector (<vscale x 2 x i32> insertelement (<vscale x 2 x i32> poison, i32 1023, i64 0), <vscale x 2 x i32> poison, <vscale x 2 x i32> zeroinitializer))
%i62 = tail call <vscale x 2 x i32> @llvm.smin.nxv2i32(<vscale x 2 x i32> %i61, <vscale x 2 x i32> splat (i32 1023))
%i63 = icmp ne <vscale x 2 x i32> %i62, zeroinitializer
%i64 = getelementptr float, ptr %arg1, i64 0
%i65 = tail call <vscale x 2 x float> @llvm.masked.load.nxv2f32.p0(ptr %i64, i32 4, <vscale x 2 x i1> %i63, <vscale x 2 x float> poison)
Expand Down
12 changes: 6 additions & 6 deletions llvm/test/CodeGen/AArch64/sve-gather-scatter-dag-combine.ll
Original file line number Diff line number Diff line change
Expand Up @@ -95,7 +95,7 @@ define <vscale x 16 x i8> @narrow_i64_gather_index_i8_zext(ptr %out, ptr %in, <v
%wide.load = load <vscale x 16 x i8>, ptr %2, align 1
%3 = zext <vscale x 16 x i8> %wide.load to <vscale x 16 x i64>
%4 = getelementptr inbounds i8, ptr %in, <vscale x 16 x i64> %3
%wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %4, i32 1, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i32 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i8> undef)
%wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %4, i32 1, <vscale x 16 x i1> splat (i1 true), <vscale x 16 x i8> undef)
ret <vscale x 16 x i8> %wide.masked.gather
}

Expand All @@ -121,7 +121,7 @@ define <vscale x 16 x i8> @narrow_i64_gather_index_i8_sext(ptr %out, ptr %in, <v
%wide.load = load <vscale x 16 x i8>, ptr %2, align 1
%3 = sext <vscale x 16 x i8> %wide.load to <vscale x 16 x i64>
%4 = getelementptr inbounds i8, ptr %in, <vscale x 16 x i64> %3
%wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %4, i32 1, <vscale x 16 x i1> shufflevector (<vscale x 16 x i1> insertelement (<vscale x 16 x i1> poison, i1 true, i32 0), <vscale x 16 x i1> poison, <vscale x 16 x i32> zeroinitializer), <vscale x 16 x i8> undef)
%wide.masked.gather = call <vscale x 16 x i8> @llvm.masked.gather.nxv16i8.nxv16p0(<vscale x 16 x ptr> %4, i32 1, <vscale x 16 x i1> splat (i1 true), <vscale x 16 x i8> undef)
ret <vscale x 16 x i8> %wide.masked.gather
}

Expand All @@ -141,7 +141,7 @@ define <vscale x 8 x i16> @narrow_i64_gather_index_i16_zext(ptr %out, ptr %in, <
%wide.load = load <vscale x 8 x i16>, ptr %2, align 1
%3 = zext <vscale x 8 x i16> %wide.load to <vscale x 8 x i64>
%4 = getelementptr inbounds i16, ptr %in, <vscale x 8 x i64> %3
%wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %4, i32 1, <vscale x 8 x i1> shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i32 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i16> undef)
%wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %4, i32 1, <vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> undef)
ret <vscale x 8 x i16> %wide.masked.gather
}

Expand All @@ -161,7 +161,7 @@ define <vscale x 8 x i16> @narrow_i64_gather_index_i16_sext(ptr %out, ptr %in, <
%wide.load = load <vscale x 8 x i16>, ptr %2, align 1
%3 = sext <vscale x 8 x i16> %wide.load to <vscale x 8 x i64>
%4 = getelementptr inbounds i16, ptr %in, <vscale x 8 x i64> %3
%wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %4, i32 1, <vscale x 8 x i1> shufflevector (<vscale x 8 x i1> insertelement (<vscale x 8 x i1> poison, i1 true, i32 0), <vscale x 8 x i1> poison, <vscale x 8 x i32> zeroinitializer), <vscale x 8 x i16> undef)
%wide.masked.gather = call <vscale x 8 x i16> @llvm.masked.gather.nxv8i16.nxv8p0(<vscale x 8 x ptr> %4, i32 1, <vscale x 8 x i1> splat (i1 true), <vscale x 8 x i16> undef)
ret <vscale x 8 x i16> %wide.masked.gather
}

Expand All @@ -177,7 +177,7 @@ define <vscale x 4 x i32> @no_narrow_i64_gather_index_i32(ptr %out, ptr %in, <vs
%wide.load = load <vscale x 4 x i32>, ptr %2, align 1
%3 = zext <vscale x 4 x i32> %wide.load to <vscale x 4 x i64>
%4 = getelementptr inbounds i32, ptr %in, <vscale x 4 x i64> %3
%wide.masked.gather = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %4, i32 1, <vscale x 4 x i1> shufflevector (<vscale x 4 x i1> insertelement (<vscale x 4 x i1> poison, i1 true, i32 0), <vscale x 4 x i1> poison, <vscale x 4 x i32> zeroinitializer), <vscale x 4 x i32> undef)
%wide.masked.gather = call <vscale x 4 x i32> @llvm.masked.gather.nxv4i32.nxv4p0(<vscale x 4 x ptr> %4, i32 1, <vscale x 4 x i1> splat (i1 true), <vscale x 4 x i32> undef)
ret <vscale x 4 x i32> %wide.masked.gather
}

Expand All @@ -192,7 +192,7 @@ define <vscale x 2 x i64> @no_narrow_i64_gather_index_i64(ptr %out, ptr %in, <vs
%2 = bitcast ptr %1 to ptr
%wide.load = load <vscale x 2 x i64>, ptr %2, align 1
%3 = getelementptr inbounds i64, ptr %in, <vscale x 2 x i64> %wide.load
%wide.masked.gather = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> %3, i32 1, <vscale x 2 x i1> shufflevector (<vscale x 2 x i1> insertelement (<vscale x 2 x i1> poison, i1 true, i32 0), <vscale x 2 x i1> poison, <vscale x 2 x i32> zeroinitializer), <vscale x 2 x i64> undef)
%wide.masked.gather = call <vscale x 2 x i64> @llvm.masked.gather.nxv2i64.nxv2p0(<vscale x 2 x ptr> %3, i32 1, <vscale x 2 x i1> splat (i1 true), <vscale x 2 x i64> undef)
ret <vscale x 2 x i64> %wide.masked.gather
}

Expand Down
Loading

0 comments on commit d6ff986

Please sign in to comment.