diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c @@ -24,11 +24,7 @@ // CHECK-LABEL: @fixed_caller( // CHECK-NEXT: entry: -// CHECK-NEXT: [[X:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[X_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[X]], i64 0) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[CASTSCALABLESVE]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE1:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[CASTFIXEDSVE]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE1]] +// CHECK-NEXT: ret [[X_COERCE:%.*]] // fixed_int32_t fixed_caller(fixed_int32_t x) { return sizeless_callee(x); @@ -36,9 +32,7 @@ // CHECK-LABEL: @fixed_callee( // CHECK-NEXT: entry: -// CHECK-NEXT: [[X:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[X_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[X]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE]] +// CHECK-NEXT: ret [[X_COERCE:%.*]] // fixed_int32_t fixed_callee(fixed_int32_t x) { return x; @@ -47,12 +41,9 @@ // CHECK-LABEL: @sizeless_caller( // CHECK-NEXT: entry: // CHECK-NEXT: [[COERCE1:%.*]] = alloca <16 x i32>, align 16 -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[X:%.*]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[CASTFIXEDSVE]], i64 0) -// CHECK-NEXT: [[CALL:%.*]] = call @fixed_callee( [[CASTSCALABLESVE]]) // CHECK-NEXT: [[TMP0:%.*]] = bitcast <16 x i32>* [[COERCE1]] to * -// CHECK-NEXT: store [[CALL]], * [[TMP0]], align 16 -// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, [[TBAA6:!tbaa !.*]] +// CHECK-NEXT: store [[X:%.*]], * [[TMP0]], align 16 +// CHECK-NEXT: [[TMP1:%.*]] = load <16 x i32>, <16 x i32>* [[COERCE1]], align 16, !tbaa [[TBAA6:![0-9]+]] // CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TMP1]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE2]] // @@ -66,15 +57,9 @@ // CHECK-LABEL: @call_int32_ff( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[OP1_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[OP2:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[OP2_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OP1]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OP2]], i64 0) // CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) -// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv4i32( [[TMP0]], [[CASTSCALABLESVE]], [[CASTSCALABLESVE2]]) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP1]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE3:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[CASTFIXEDSVE]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE3]] +// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv4i32( [[TMP0]], [[OP1_COERCE:%.*]], [[OP2_COERCE:%.*]]) +// CHECK-NEXT: ret [[TMP1]] // fixed_int32_t call_int32_ff(svbool_t pg, fixed_int32_t op1, fixed_int32_t op2) { return svsel(pg, op1, op2); @@ -82,15 +67,9 @@ // CHECK-LABEL: @call_float64_ff( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[OP1_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[OP2:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[OP2_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[OP1]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE2:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[OP2]], i64 0) // CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) -// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv2f64( [[TMP0]], [[CASTSCALABLESVE]], [[CASTSCALABLESVE2]]) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[TMP1]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE3:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[CASTFIXEDSVE]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE3]] +// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv2f64( [[TMP0]], [[OP1_COERCE:%.*]], [[OP2_COERCE:%.*]]) +// CHECK-NEXT: ret [[TMP1]] // fixed_float64_t call_float64_ff(svbool_t pg, fixed_float64_t op1, fixed_float64_t op2) { return svsel(pg, op1, op2); @@ -106,20 +85,20 @@ // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, [[TBAA6]] +// CHECK-NEXT: [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[OP2]] to * // CHECK-NEXT: store [[OP2_COERCE:%.*]], * [[TMP1]], align 16 -// CHECK-NEXT: [[OP22:%.*]] = load <8 x i8>, <8 x i8>* [[OP2]], align 16, [[TBAA6]] -// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, [[TBAA6]] -// CHECK-NEXT: store <8 x i8> [[OP22]], <8 x i8>* [[OP2_ADDR]], align 16, [[TBAA6]] +// CHECK-NEXT: [[OP22:%.*]] = load <8 x i8>, <8 x i8>* [[OP2]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: store <8 x i8> [[OP22]], <8 x i8>* [[OP2_ADDR]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[TMP2:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to * -// CHECK-NEXT: [[TMP3:%.*]] = load , * [[TMP2]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP3:%.*]] = load , * [[TMP2]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[TMP4:%.*]] = bitcast <8 x i8>* [[OP2_ADDR]] to * -// CHECK-NEXT: [[TMP5:%.*]] = load , * [[TMP4]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP5:%.*]] = load , * [[TMP4]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[TMP6:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[TMP3]], [[TMP5]]) -// CHECK-NEXT: store [[TMP6]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA9:!tbaa !.*]] +// CHECK-NEXT: store [[TMP6]], * [[SAVED_CALL_RVALUE]], align 16, !tbaa [[TBAA9:![0-9]+]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <8 x i8>* -// CHECK-NEXT: [[TMP7:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP7:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x i8>* // CHECK-NEXT: store <8 x i8> [[TMP7]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP8:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -135,13 +114,9 @@ // CHECK-LABEL: @call_int32_fs( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[OP1_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[OP1]], i64 0) // CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) -// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv4i32( [[TMP0]], [[CASTSCALABLESVE]], [[OP2:%.*]]) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP1]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE1:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[CASTFIXEDSVE]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE1]] +// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv4i32( [[TMP0]], [[OP1_COERCE:%.*]], [[OP2:%.*]]) +// CHECK-NEXT: ret [[TMP1]] // fixed_int32_t call_int32_fs(svbool_t pg, fixed_int32_t op1, svint32_t op2) { return svsel(pg, op1, op2); @@ -149,13 +124,9 @@ // CHECK-LABEL: @call_float64_fs( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[OP1_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[OP1]], i64 0) // CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) -// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv2f64( [[TMP0]], [[CASTSCALABLESVE]], [[OP2:%.*]]) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[TMP1]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE1:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[CASTFIXEDSVE]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE1]] +// CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv2f64( [[TMP0]], [[OP1_COERCE:%.*]], [[OP2:%.*]]) +// CHECK-NEXT: ret [[TMP1]] // fixed_float64_t call_float64_fs(svbool_t pg, fixed_float64_t op1, svfloat64_t op2) { return svsel(pg, op1, op2); @@ -169,14 +140,14 @@ // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to * // CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, [[TBAA6]] -// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, [[TBAA6]] +// CHECK-NEXT: [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[OP1_ADDR]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[OP1_ADDR]] to * -// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[TMP3:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[TMP2]], [[OP2:%.*]]) -// CHECK-NEXT: store [[TMP3]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]] +// CHECK-NEXT: store [[TMP3]], * [[SAVED_CALL_RVALUE]], align 16, !tbaa [[TBAA9]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <8 x i8>* -// CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP4:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x i8>* // CHECK-NEXT: store <8 x i8> [[TMP4]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP5:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -194,9 +165,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv4i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv4i32( [[TMP0]], [[OP1:%.*]], [[OP2:%.*]]) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TMP1]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[CASTFIXEDSVE]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE]] +// CHECK-NEXT: ret [[TMP1]] // fixed_int32_t call_int32_ss(svbool_t pg, svint32_t op1, svint32_t op2) { return svsel(pg, op1, op2); @@ -206,9 +175,7 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.convert.from.svbool.nxv2i1( [[PG:%.*]]) // CHECK-NEXT: [[TMP1:%.*]] = call @llvm.aarch64.sve.sel.nxv2f64( [[TMP0]], [[OP1:%.*]], [[OP2:%.*]]) -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[TMP1]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[CASTFIXEDSVE]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE]] +// CHECK-NEXT: ret [[TMP1]] // fixed_float64_t call_float64_ss(svbool_t pg, svfloat64_t op1, svfloat64_t op2) { return svsel(pg, op1, op2); @@ -219,9 +186,9 @@ // CHECK-NEXT: [[SAVED_CALL_RVALUE:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 // CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[OP1:%.*]], [[OP2:%.*]]) -// CHECK-NEXT: store [[TMP0]], * [[SAVED_CALL_RVALUE]], align 16, [[TBAA9]] +// CHECK-NEXT: store [[TMP0]], * [[SAVED_CALL_RVALUE]], align 16, !tbaa [[TBAA9]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_CALL_RVALUE]] to <8 x i8>* -// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x i8>* // CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load , * [[RETVAL_COERCE]], align 16 diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c @@ -13,9 +13,7 @@ // CHECK-LABEL: @to_svint32_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TYPE_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TYPE]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE]] +// CHECK-NEXT: ret [[TYPE_COERCE:%.*]] // svint32_t to_svint32_t(fixed_int32_t type) { return type; @@ -23,9 +21,7 @@ // CHECK-LABEL: @from_svint32_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TYPE:%.*]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[CASTFIXEDSVE]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE]] +// CHECK-NEXT: ret [[TYPE:%.*]] // fixed_int32_t from_svint32_t(svint32_t type) { return type; @@ -33,9 +29,7 @@ // CHECK-LABEL: @to_svfloat64_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[TYPE_COERCE:%.*]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[TYPE]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE]] +// CHECK-NEXT: ret [[TYPE_COERCE:%.*]] // svfloat64_t to_svfloat64_t(fixed_float64_t type) { return type; @@ -43,9 +37,7 @@ // CHECK-LABEL: @from_svfloat64_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <8 x double> @llvm.experimental.vector.extract.v8f64.nxv2f64( [[TYPE:%.*]], i64 0) -// CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv2f64.v8f64( undef, <8 x double> [[CASTFIXEDSVE]], i64 0) -// CHECK-NEXT: ret [[CASTSCALABLESVE]] +// CHECK-NEXT: ret [[TYPE:%.*]] // fixed_float64_t from_svfloat64_t(svfloat64_t type) { return type; @@ -57,10 +49,10 @@ // CHECK-NEXT: [[TYPE_ADDR:%.*]] = alloca <8 x i8>, align 16 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[TYPE]] to * // CHECK-NEXT: store [[TYPE_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[TYPE1:%.*]] = load <8 x i8>, <8 x i8>* [[TYPE]], align 16, [[TBAA6:!tbaa !.*]] -// CHECK-NEXT: store <8 x i8> [[TYPE1]], <8 x i8>* [[TYPE_ADDR]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TYPE1:%.*]] = load <8 x i8>, <8 x i8>* [[TYPE]], align 16, !tbaa [[TBAA6:![0-9]+]] +// CHECK-NEXT: store <8 x i8> [[TYPE1]], <8 x i8>* [[TYPE_ADDR]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[TYPE_ADDR]] to * -// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: ret [[TMP2]] // svbool_t to_svbool_t(fixed_bool_t type) { @@ -71,9 +63,9 @@ // CHECK-NEXT: entry: // CHECK-NEXT: [[TYPE_ADDR:%.*]] = alloca , align 16 // CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 -// CHECK-NEXT: store [[TYPE:%.*]], * [[TYPE_ADDR]], align 16, [[TBAA9:!tbaa !.*]] +// CHECK-NEXT: store [[TYPE:%.*]], * [[TYPE_ADDR]], align 16, !tbaa [[TBAA9:![0-9]+]] // CHECK-NEXT: [[TMP0:%.*]] = bitcast * [[TYPE_ADDR]] to <8 x i8>* -// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x i8>* // CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 // CHECK-NEXT: [[TMP2:%.*]] = load , * [[RETVAL_COERCE]], align 16 @@ -85,7 +77,7 @@ // CHECK-LABEL: @to_svint32_t__from_gnu_int32_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TYPE]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // @@ -96,7 +88,7 @@ // CHECK-LABEL: @from_svint32_t__to_gnu_int32_t( // CHECK-NEXT: entry: // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TYPE:%.*]], i64 0) -// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]] +// CHECK-NEXT: store <16 x i32> [[CASTFIXEDSVE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: ret void // gnu_int32_t from_svint32_t__to_gnu_int32_t(svint32_t type) { @@ -105,7 +97,7 @@ // CHECK-LABEL: @to_fixed_int32_t__from_gnu_int32_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, [[TBAA6]] +// CHECK-NEXT: [[TYPE:%.*]] = load <16 x i32>, <16 x i32>* [[TMP0:%.*]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: [[CASTSCALABLESVE:%.*]] = call @llvm.experimental.vector.insert.nxv4i32.v16i32( undef, <16 x i32> [[TYPE]], i64 0) // CHECK-NEXT: ret [[CASTSCALABLESVE]] // @@ -116,7 +108,7 @@ // CHECK-LABEL: @from_fixed_int32_t__to_gnu_int32_t( // CHECK-NEXT: entry: // CHECK-NEXT: [[TYPE:%.*]] = call <16 x i32> @llvm.experimental.vector.extract.v16i32.nxv4i32( [[TYPE_COERCE:%.*]], i64 0) -// CHECK-NEXT: store <16 x i32> [[TYPE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, [[TBAA6]] +// CHECK-NEXT: store <16 x i32> [[TYPE]], <16 x i32>* [[AGG_RESULT:%.*]], align 16, !tbaa [[TBAA6]] // CHECK-NEXT: ret void // gnu_int32_t from_fixed_int32_t__to_gnu_int32_t(fixed_int32_t type) { diff --git a/llvm/lib/Analysis/InstructionSimplify.cpp b/llvm/lib/Analysis/InstructionSimplify.cpp --- a/llvm/lib/Analysis/InstructionSimplify.cpp +++ b/llvm/lib/Analysis/InstructionSimplify.cpp @@ -5625,6 +5625,19 @@ break; } + case Intrinsic::experimental_vector_extract: { + Type *ReturnType = F->getReturnType(); + + // (extract_vector (insert_vector _, X, 0), 0) -> X + unsigned IdxN = cast(Op1)->getZExtValue(); + Value *X = nullptr; + if (match(Op0, m_Intrinsic( + m_Value(), m_Value(X), m_Zero())) && + IdxN == 0 && X->getType() == ReturnType) + return X; + + break; + } default: break; } @@ -5720,6 +5733,21 @@ return nullptr; } + case Intrinsic::experimental_vector_insert: { + Value *SubVec = Call->getArgOperand(1); + Value *Idx = Call->getArgOperand(2); + Type *ReturnType = F->getReturnType(); + + // (insert_vector _, (extract_vector X, 0), 0) -> X + unsigned IdxN = cast(Idx)->getZExtValue(); + Value *X = nullptr; + if (match(SubVec, m_Intrinsic( + m_Value(X), m_Zero())) && + IdxN == 0 && X->getType() == ReturnType) + return X; + + return nullptr; + } default: return nullptr; } diff --git a/llvm/test/Transforms/InstSimplify/extract-vector.ll b/llvm/test/Transforms/InstSimplify/extract-vector.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstSimplify/extract-vector.ll @@ -0,0 +1,26 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instsimplify -S | FileCheck %s + +define <16 x i8> @redundant_insert_extract_chain(<16 x i8> %x) { +; CHECK-LABEL: @redundant_insert_extract_chain( +; CHECK-NEXT: ret <16 x i8> [[X:%.*]] +; + %inserted = call @llvm.experimental.vector.insert.nxv32i8.v16i8( undef, <16 x i8> %x, i64 0) + %extracted = call <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv32i8( %inserted, i64 0) + ret <16 x i8> %extracted +} + +define <8 x i8> @non_redundant_insert_extract_chain(<16 x i8> %x) { +; CHECK-LABEL: @non_redundant_insert_extract_chain( +; CHECK-NEXT: [[INSERTED:%.*]] = call @llvm.experimental.vector.insert.nxv32i8.v16i8( undef, <16 x i8> [[X:%.*]], i64 0) +; CHECK-NEXT: [[EXTRACTED:%.*]] = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8( [[INSERTED]], i64 0) +; CHECK-NEXT: ret <8 x i8> [[EXTRACTED]] +; + %inserted = call @llvm.experimental.vector.insert.nxv32i8.v16i8( undef, <16 x i8> %x, i64 0) + %extracted = call <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8( %inserted, i64 0) + ret <8 x i8> %extracted +} + +declare <16 x i8> @llvm.experimental.vector.extract.v16i8.nxv32i8(, i64) +declare <8 x i8> @llvm.experimental.vector.extract.v8i8.nxv32i8(, i64) +declare @llvm.experimental.vector.insert.nxv32i8.v16i8(, <16 x i8>, i64) diff --git a/llvm/test/Transforms/InstSimplify/insert-vector.ll b/llvm/test/Transforms/InstSimplify/insert-vector.ll new file mode 100644 --- /dev/null +++ b/llvm/test/Transforms/InstSimplify/insert-vector.ll @@ -0,0 +1,26 @@ +; NOTE: Assertions have been autogenerated by utils/update_test_checks.py +; RUN: opt < %s -instsimplify -S | FileCheck %s + +define @redundant_extract_insert_chain( %pg, %x) { +; CHECK-LABEL: @redundant_extract_insert_chain( +; CHECK-NEXT: ret [[X:%.*]] +; + %extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8( %x, i64 0) + %inserted = call @llvm.experimental.vector.insert.nxv16i8.v32i8( undef, <32 x i8> %extracted, i64 0) + ret %inserted +} + +define @non_redundant_extract_insert_chain( %pg, %x) { +; CHECK-LABEL: @non_redundant_extract_insert_chain( +; CHECK-NEXT: [[EXTRACTED:%.*]] = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8( [[X:%.*]], i64 0) +; CHECK-NEXT: [[INSERTED:%.*]] = call @llvm.experimental.vector.insert.nxv16i8.v32i8( undef, <32 x i8> [[EXTRACTED]], i64 0) +; CHECK-NEXT: ret [[INSERTED]] +; + %extracted = call <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8( %x, i64 0) + %inserted = call @llvm.experimental.vector.insert.nxv16i8.v32i8( undef, <32 x i8> %extracted, i64 0) + ret %inserted +} + +declare <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv16i8(, i64) +declare <32 x i8> @llvm.experimental.vector.extract.v32i8.nxv32i8(, i64) +declare @llvm.experimental.vector.insert.nxv16i8.v32i8(, <32 x i8>, i64)