From 7fe3388c58e78ec4154d45f25ecb189332f01a00 Mon Sep 17 00:00:00 2001 From: Sanjay Patel Date: Wed, 8 Feb 2017 15:22:52 +0000 Subject: [PATCH] [x86] add AVX512vl target for more coverage; NFC git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@294462 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/X86/vselect-pcmp.ll | 169 ++++++++++++++++++++++--------- 1 file changed, 122 insertions(+), 47 deletions(-) diff --git a/test/CodeGen/X86/vselect-pcmp.ll b/test/CodeGen/X86/vselect-pcmp.ll index 4b39503e85aa..34411a64131c 100644 --- a/test/CodeGen/X86/vselect-pcmp.ll +++ b/test/CodeGen/X86/vselect-pcmp.ll @@ -1,7 +1,8 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX1 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX2 -; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx | FileCheck %s --check-prefix=AVX --check-prefix=AVX12F --check-prefix=AVX12 --check-prefix=AVX1 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx2 | FileCheck %s --check-prefix=AVX --check-prefix=AVX12F --check-prefix=AVX12 --check-prefix=AVX2 +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512f | FileCheck %s --check-prefix=AVX --check-prefix=AVX12F --check-prefix=AVX512 --check-prefix=AVX512F +; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=avx512vl | FileCheck %s --check-prefix=AVX --check-prefix=AVX512 --check-prefix=AVX512VL ; The condition vector for BLENDV* only cares about the sign bit of each element. ; So in these tests, if we generate BLENDV*, we should be able to remove the redundant cmp op. @@ -23,62 +24,99 @@ define <16 x i8> @signbit_sel_v16i8(<16 x i8> %x, <16 x i8> %y, <16 x i8> %mask) ; Sorry 16-bit, you're not important enough to support? define <8 x i16> @signbit_sel_v8i16(<8 x i16> %x, <8 x i16> %y, <8 x i16> %mask) { -; AVX-LABEL: signbit_sel_v8i16: -; AVX: # BB#0: -; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; AVX-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2 -; AVX-NEXT: vpandn %xmm1, %xmm2, %xmm1 -; AVX-NEXT: vpand %xmm2, %xmm0, %xmm0 -; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0 -; AVX-NEXT: retq +; AVX12F-LABEL: signbit_sel_v8i16: +; AVX12F: # BB#0: +; AVX12F-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX12F-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2 +; AVX12F-NEXT: vpandn %xmm1, %xmm2, %xmm1 +; AVX12F-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX12F-NEXT: vpor %xmm1, %xmm0, %xmm0 +; AVX12F-NEXT: retq +; +; AVX512VL-LABEL: signbit_sel_v8i16: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX512VL-NEXT: vpcmpgtw %xmm2, %xmm3, %xmm2 +; AVX512VL-NEXT: vpandnq %xmm1, %xmm2, %xmm1 +; AVX512VL-NEXT: vpand %xmm2, %xmm0, %xmm0 +; AVX512VL-NEXT: vpor %xmm1, %xmm0, %xmm0 +; AVX512VL-NEXT: retq %tr = icmp slt <8 x i16> %mask, zeroinitializer %z = select <8 x i1> %tr, <8 x i16> %x, <8 x i16> %y ret <8 x i16> %z } define <4 x i32> @signbit_sel_v4i32(<4 x i32> %x, <4 x i32> %y, <4 x i32> %mask) { -; AVX-LABEL: signbit_sel_v4i32: -; AVX: # BB#0: -; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; AVX-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2 -; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX12F-LABEL: signbit_sel_v4i32: +; AVX12F: # BB#0: +; AVX12F-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX12F-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2 +; AVX12F-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 +; AVX12F-NEXT: retq +; +; AVX512VL-LABEL: signbit_sel_v4i32: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX512VL-NEXT: vpcmpgtd %xmm2, %xmm3, %k1 +; AVX512VL-NEXT: vpblendmd %xmm0, %xmm1, %xmm0 {%k1} +; AVX512VL-NEXT: retq %tr = icmp slt <4 x i32> %mask, zeroinitializer %z = select <4 x i1> %tr, <4 x i32> %x, <4 x i32> %y ret <4 x i32> %z } define <2 x i64> @signbit_sel_v2i64(<2 x i64> %x, <2 x i64> %y, <2 x i64> %mask) { -; AVX-LABEL: signbit_sel_v2i64: -; AVX: # BB#0: -; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; AVX-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2 -; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX12F-LABEL: signbit_sel_v2i64: +; AVX12F: # BB#0: +; AVX12F-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX12F-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2 +; AVX12F-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 +; AVX12F-NEXT: retq +; +; AVX512VL-LABEL: signbit_sel_v2i64: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX512VL-NEXT: vpcmpgtq %xmm2, %xmm3, %k1 +; AVX512VL-NEXT: vpblendmq %xmm0, %xmm1, %xmm0 {%k1} +; AVX512VL-NEXT: retq %tr = icmp slt <2 x i64> %mask, zeroinitializer %z = select <2 x i1> %tr, <2 x i64> %x, <2 x i64> %y ret <2 x i64> %z } define <4 x float> @signbit_sel_v4f32(<4 x float> %x, <4 x float> %y, <4 x i32> %mask) { -; AVX-LABEL: signbit_sel_v4f32: -; AVX: # BB#0: -; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; AVX-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2 -; AVX-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX12F-LABEL: signbit_sel_v4f32: +; AVX12F: # BB#0: +; AVX12F-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX12F-NEXT: vpcmpgtd %xmm2, %xmm3, %xmm2 +; AVX12F-NEXT: vblendvps %xmm2, %xmm0, %xmm1, %xmm0 +; AVX12F-NEXT: retq +; +; AVX512VL-LABEL: signbit_sel_v4f32: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX512VL-NEXT: vpcmpgtd %xmm2, %xmm3, %k1 +; AVX512VL-NEXT: vblendmps %xmm0, %xmm1, %xmm0 {%k1} +; AVX512VL-NEXT: retq %tr = icmp slt <4 x i32> %mask, zeroinitializer %z = select <4 x i1> %tr, <4 x float> %x, <4 x float> %y ret <4 x float> %z } define <2 x double> @signbit_sel_v2f64(<2 x double> %x, <2 x double> %y, <2 x i64> %mask) { -; AVX-LABEL: signbit_sel_v2f64: -; AVX: # BB#0: -; AVX-NEXT: vpxor %xmm3, %xmm3, %xmm3 -; AVX-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2 -; AVX-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 -; AVX-NEXT: retq +; AVX12F-LABEL: signbit_sel_v2f64: +; AVX12F: # BB#0: +; AVX12F-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX12F-NEXT: vpcmpgtq %xmm2, %xmm3, %xmm2 +; AVX12F-NEXT: vblendvpd %xmm2, %xmm0, %xmm1, %xmm0 +; AVX12F-NEXT: retq +; +; AVX512VL-LABEL: signbit_sel_v2f64: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX512VL-NEXT: vpcmpgtq %xmm2, %xmm3, %k1 +; AVX512VL-NEXT: vblendmpd %xmm0, %xmm1, %xmm0 {%k1} +; AVX512VL-NEXT: retq %tr = icmp slt <2 x i64> %mask, zeroinitializer %z = select <2 x i1> %tr, <2 x double> %x, <2 x double> %y ret <2 x double> %z @@ -106,12 +144,12 @@ define <32 x i8> @signbit_sel_v32i8(<32 x i8> %x, <32 x i8> %y, <32 x i8> %mask) ; AVX2-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0 ; AVX2-NEXT: retq ; -; AVX512F-LABEL: signbit_sel_v32i8: -; AVX512F: # BB#0: -; AVX512F-NEXT: vpxor %ymm3, %ymm3, %ymm3 -; AVX512F-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm2 -; AVX512F-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0 -; AVX512F-NEXT: retq +; AVX512-LABEL: signbit_sel_v32i8: +; AVX512: # BB#0: +; AVX512-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX512-NEXT: vpcmpgtb %ymm2, %ymm3, %ymm2 +; AVX512-NEXT: vpblendvb %ymm2, %ymm0, %ymm1, %ymm0 +; AVX512-NEXT: retq %tr = icmp slt <32 x i8> %mask, zeroinitializer %z = select <32 x i1> %tr, <32 x i8> %x, <32 x i8> %y ret <32 x i8> %z @@ -149,6 +187,15 @@ define <16 x i16> @signbit_sel_v16i16(<16 x i16> %x, <16 x i16> %y, <16 x i16> % ; AVX512F-NEXT: vpand %ymm2, %ymm0, %ymm0 ; AVX512F-NEXT: vpor %ymm1, %ymm0, %ymm0 ; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: signbit_sel_v16i16: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX512VL-NEXT: vpcmpgtw %ymm2, %ymm3, %ymm2 +; AVX512VL-NEXT: vpandnq %ymm1, %ymm2, %ymm1 +; AVX512VL-NEXT: vpand %ymm2, %ymm0, %ymm0 +; AVX512VL-NEXT: vpor %ymm1, %ymm0, %ymm0 +; AVX512VL-NEXT: retq %tr = icmp slt <16 x i16> %mask, zeroinitializer %z = select <16 x i1> %tr, <16 x i16> %x, <16 x i16> %y ret <16 x i16> %z @@ -182,6 +229,13 @@ define <8 x i32> @signbit_sel_v8i32(<8 x i32> %x, <8 x i32> %y, <8 x i32> %mask) ; AVX512F-NEXT: vpblendmd %zmm0, %zmm1, %zmm0 {%k1} ; AVX512F-NEXT: # kill: %YMM0 %YMM0 %ZMM0 ; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: signbit_sel_v8i32: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX512VL-NEXT: vpcmpgtd %ymm2, %ymm3, %k1 +; AVX512VL-NEXT: vpblendmd %ymm0, %ymm1, %ymm0 {%k1} +; AVX512VL-NEXT: retq %tr = icmp slt <8 x i32> %mask, zeroinitializer %z = select <8 x i1> %tr, <8 x i32> %x, <8 x i32> %y ret <8 x i32> %z @@ -211,6 +265,13 @@ define <4 x i64> @signbit_sel_v4i64(<4 x i64> %x, <4 x i64> %y, <4 x i64> %mask) ; AVX512F-NEXT: vpcmpgtq %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 ; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: signbit_sel_v4i64: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX512VL-NEXT: vpcmpgtq %ymm2, %ymm3, %k1 +; AVX512VL-NEXT: vpblendmq %ymm0, %ymm1, %ymm0 {%k1} +; AVX512VL-NEXT: retq %tr = icmp slt <4 x i64> %mask, zeroinitializer %z = select <4 x i1> %tr, <4 x i64> %x, <4 x i64> %y ret <4 x i64> %z @@ -240,6 +301,13 @@ define <4 x double> @signbit_sel_v4f64(<4 x double> %x, <4 x double> %y, <4 x i6 ; AVX512F-NEXT: vpcmpgtq %ymm2, %ymm3, %ymm2 ; AVX512F-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 ; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: signbit_sel_v4f64: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpxor %ymm3, %ymm3, %ymm3 +; AVX512VL-NEXT: vpcmpgtq %ymm2, %ymm3, %k1 +; AVX512VL-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1} +; AVX512VL-NEXT: retq %tr = icmp slt <4 x i64> %mask, zeroinitializer %z = select <4 x i1> %tr, <4 x double> %x, <4 x double> %y ret <4 x double> %z @@ -274,6 +342,13 @@ define <4 x double> @signbit_sel_v4f64_small_mask(<4 x double> %x, <4 x double> ; AVX512F-NEXT: vpmovsxdq %xmm2, %ymm2 ; AVX512F-NEXT: vblendvpd %ymm2, %ymm0, %ymm1, %ymm0 ; AVX512F-NEXT: retq +; +; AVX512VL-LABEL: signbit_sel_v4f64_small_mask: +; AVX512VL: # BB#0: +; AVX512VL-NEXT: vpxor %xmm3, %xmm3, %xmm3 +; AVX512VL-NEXT: vpcmpgtd %xmm2, %xmm3, %k1 +; AVX512VL-NEXT: vblendmpd %ymm0, %ymm1, %ymm0 {%k1} +; AVX512VL-NEXT: retq %tr = icmp slt <4 x i32> %mask, zeroinitializer %z = select <4 x i1> %tr, <4 x double> %x, <4 x double> %y ret <4 x double> %z @@ -306,12 +381,12 @@ define <8 x double> @signbit_sel_v8f64(<8 x double> %x, <8 x double> %y, <8 x i6 ; AVX2-NEXT: vblendvpd %ymm5, %ymm1, %ymm3, %ymm1 ; AVX2-NEXT: retq ; -; AVX512F-LABEL: signbit_sel_v8f64: -; AVX512F: # BB#0: -; AVX512F-NEXT: vpxord %zmm3, %zmm3, %zmm3 -; AVX512F-NEXT: vpcmpgtq %zmm2, %zmm3, %k1 -; AVX512F-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1} -; AVX512F-NEXT: retq +; AVX512-LABEL: signbit_sel_v8f64: +; AVX512: # BB#0: +; AVX512-NEXT: vpxord %zmm3, %zmm3, %zmm3 +; AVX512-NEXT: vpcmpgtq %zmm2, %zmm3, %k1 +; AVX512-NEXT: vblendmpd %zmm0, %zmm1, %zmm0 {%k1} +; AVX512-NEXT: retq %tr = icmp slt <8 x i64> %mask, zeroinitializer %z = select <8 x i1> %tr, <8 x double> %x, <8 x double> %y ret <8 x double> %z