diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-bitcast.c @@ -191,32 +191,32 @@ // CHECK-128-LABEL: @read_bool( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <2 x i8>, align 16 +// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <2 x i8>, align 2 // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] -// CHECK-128-NEXT: store <2 x i8> [[TMP0]], <2 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]] +// CHECK-128-NEXT: store <2 x i8> [[TMP0]], <2 x i8>* [[SAVED_VALUE]], align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <2 x i8>* [[SAVED_VALUE]] to * -// CHECK-128-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-128-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret [[TMP1]] // // CHECK-256-LABEL: @read_bool( // CHECK-256-NEXT: entry: -// CHECK-256-NEXT: [[SAVED_VALUE:%.*]] = alloca <4 x i8>, align 16 +// CHECK-256-NEXT: [[SAVED_VALUE:%.*]] = alloca <4 x i8>, align 4 // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i8>, <4 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] -// CHECK-256-NEXT: store <4 x i8> [[TMP0]], <4 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]] +// CHECK-256-NEXT: store <4 x i8> [[TMP0]], <4 x i8>* [[SAVED_VALUE]], align 4, !tbaa [[TBAA6]] // CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <4 x i8>* [[SAVED_VALUE]] to * -// CHECK-256-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-256-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 4, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret [[TMP1]] // // CHECK-512-LABEL: @read_bool( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 16 +// CHECK-512-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8 // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] -// CHECK-512-NEXT: store <8 x i8> [[TMP0]], <8 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]] +// CHECK-512-NEXT: store <8 x i8> [[TMP0]], <8 x i8>* [[SAVED_VALUE]], align 8, !tbaa [[TBAA6]] // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to * -// CHECK-512-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-512-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret [[TMP1]] // svbool_t read_bool(struct struct_bool *s) { @@ -225,30 +225,30 @@ // CHECK-128-LABEL: @write_bool( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 16 -// CHECK-128-NEXT: store [[X:%.*]], * [[SAVED_VALUE]], align 16, !tbaa [[TBAA9:![0-9]+]] +// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 2 +// CHECK-128-NEXT: store [[X:%.*]], * [[SAVED_VALUE]], align 2, !tbaa [[TBAA9:![0-9]+]] // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_VALUE]] to <2 x i8>* -// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[CASTFIXEDSVE]], align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-128-NEXT: store <2 x i8> [[TMP0]], <2 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-256-LABEL: @write_bool( // CHECK-256-NEXT: entry: -// CHECK-256-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 16 -// CHECK-256-NEXT: store [[X:%.*]], * [[SAVED_VALUE]], align 16, !tbaa [[TBAA9:![0-9]+]] +// CHECK-256-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 4 +// CHECK-256-NEXT: store [[X:%.*]], * [[SAVED_VALUE]], align 4, !tbaa [[TBAA9:![0-9]+]] // CHECK-256-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_VALUE]] to <4 x i8>* -// CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i8>, <4 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-256-NEXT: [[TMP0:%.*]] = load <4 x i8>, <4 x i8>* [[CASTFIXEDSVE]], align 4, !tbaa [[TBAA6]] // CHECK-256-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-256-NEXT: store <4 x i8> [[TMP0]], <4 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] // CHECK-256-NEXT: ret void // // CHECK-512-LABEL: @write_bool( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 16 -// CHECK-512-NEXT: store [[X:%.*]], * [[SAVED_VALUE]], align 16, !tbaa [[TBAA9:![0-9]+]] +// CHECK-512-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 8 +// CHECK-512-NEXT: store [[X:%.*]], * [[SAVED_VALUE]], align 8, !tbaa [[TBAA9:![0-9]+]] // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_VALUE]] to <8 x i8>* -// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]] // CHECK-512-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [[STRUCT_STRUCT_BOOL:%.*]], %struct.struct_bool* [[S:%.*]], i64 0, i32 1, i64 0 // CHECK-512-NEXT: store <8 x i8> [[TMP0]], <8 x i8>* [[ARRAYIDX]], align 2, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-call.c @@ -77,31 +77,31 @@ // CHECK-LABEL: @call_bool_ff( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = alloca <8 x i8>, align 16 -// CHECK-NEXT: [[OP2:%.*]] = alloca <8 x i8>, align 16 -// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 16 -// CHECK-NEXT: [[SAVED_VALUE3:%.*]] = alloca <8 x i8>, align 16 -// CHECK-NEXT: [[SAVED_VALUE5:%.*]] = alloca , align 16 -// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 +// CHECK-NEXT: [[OP1:%.*]] = alloca <8 x i8>, align 8 +// CHECK-NEXT: [[OP2:%.*]] = alloca <8 x i8>, align 8 +// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8 +// CHECK-NEXT: [[SAVED_VALUE3:%.*]] = alloca <8 x i8>, align 8 +// CHECK-NEXT: [[SAVED_VALUE5:%.*]] = alloca , align 8 +// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 8 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to * -// CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 8 +// CHECK-NEXT: [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 8, !tbaa [[TBAA6]] // CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[OP2]] to * -// CHECK-NEXT: store [[OP2_COERCE:%.*]], * [[TMP1]], align 16 -// CHECK-NEXT: [[OP22:%.*]] = load <8 x i8>, <8 x i8>* [[OP2]], align 16, !tbaa [[TBAA6]] -// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: store [[OP2_COERCE:%.*]], * [[TMP1]], align 8 +// CHECK-NEXT: [[OP22:%.*]] = load <8 x i8>, <8 x i8>* [[OP2]], align 8, !tbaa [[TBAA6]] +// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[SAVED_VALUE]], align 8, !tbaa [[TBAA6]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to * -// CHECK-NEXT: [[TMP2:%.*]] = load , * [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] -// CHECK-NEXT: store <8 x i8> [[OP22]], <8 x i8>* [[SAVED_VALUE3]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]] +// CHECK-NEXT: store <8 x i8> [[OP22]], <8 x i8>* [[SAVED_VALUE3]], align 8, !tbaa [[TBAA6]] // CHECK-NEXT: [[CASTFIXEDSVE4:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE3]] to * -// CHECK-NEXT: [[TMP3:%.*]] = load , * [[CASTFIXEDSVE4]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: [[TMP3:%.*]] = load , * [[CASTFIXEDSVE4]], align 8, !tbaa [[TBAA6]] // CHECK-NEXT: [[TMP4:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[TMP2]], [[TMP3]]) -// CHECK-NEXT: store [[TMP4]], * [[SAVED_VALUE5]], align 16, !tbaa [[TBAA9:![0-9]+]] +// CHECK-NEXT: store [[TMP4]], * [[SAVED_VALUE5]], align 8, !tbaa [[TBAA9:![0-9]+]] // CHECK-NEXT: [[CASTFIXEDSVE6:%.*]] = bitcast * [[SAVED_VALUE5]] to <8 x i8>* -// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE6]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: [[TMP5:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE6]], align 8, !tbaa [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x i8>* -// CHECK-NEXT: store <8 x i8> [[TMP5]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP6:%.*]] = load , * [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: store <8 x i8> [[TMP5]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 8 +// CHECK-NEXT: [[TMP6:%.*]] = load , * [[RETVAL_COERCE]], align 8 // CHECK-NEXT: ret [[TMP6]] // fixed_bool_t call_bool_ff(svbool_t pg, fixed_bool_t op1, fixed_bool_t op2) { @@ -134,23 +134,23 @@ // CHECK-LABEL: @call_bool_fs( // CHECK-NEXT: entry: -// CHECK-NEXT: [[OP1:%.*]] = alloca <8 x i8>, align 16 -// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 16 -// CHECK-NEXT: [[SAVED_VALUE2:%.*]] = alloca , align 16 -// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 +// CHECK-NEXT: [[OP1:%.*]] = alloca <8 x i8>, align 8 +// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8 +// CHECK-NEXT: [[SAVED_VALUE2:%.*]] = alloca , align 8 +// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 8 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[OP1]] to * -// CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 16, !tbaa [[TBAA6]] -// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: store [[OP1_COERCE:%.*]], * [[TMP0]], align 8 +// CHECK-NEXT: [[OP11:%.*]] = load <8 x i8>, <8 x i8>* [[OP1]], align 8, !tbaa [[TBAA6]] +// CHECK-NEXT: store <8 x i8> [[OP11]], <8 x i8>* [[SAVED_VALUE]], align 8, !tbaa [[TBAA6]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to * -// CHECK-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]] // CHECK-NEXT: [[TMP2:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[TMP1]], [[OP2:%.*]]) -// CHECK-NEXT: store [[TMP2]], * [[SAVED_VALUE2]], align 16, !tbaa [[TBAA9]] +// CHECK-NEXT: store [[TMP2]], * [[SAVED_VALUE2]], align 8, !tbaa [[TBAA9]] // CHECK-NEXT: [[CASTFIXEDSVE3:%.*]] = bitcast * [[SAVED_VALUE2]] to <8 x i8>* -// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE3]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: [[TMP3:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE3]], align 8, !tbaa [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x i8>* -// CHECK-NEXT: store <8 x i8> [[TMP3]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP4:%.*]] = load , * [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: store <8 x i8> [[TMP3]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 8 +// CHECK-NEXT: [[TMP4:%.*]] = load , * [[RETVAL_COERCE]], align 8 // CHECK-NEXT: ret [[TMP4]] // fixed_bool_t call_bool_fs(svbool_t pg, fixed_bool_t op1, svbool_t op2) { @@ -183,15 +183,15 @@ // CHECK-LABEL: @call_bool_ss( // CHECK-NEXT: entry: -// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 16 -// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 +// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 8 +// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 8 // CHECK-NEXT: [[TMP0:%.*]] = call @llvm.aarch64.sve.sel.nxv16i1( [[PG:%.*]], [[OP1:%.*]], [[OP2:%.*]]) -// CHECK-NEXT: store [[TMP0]], * [[SAVED_VALUE]], align 16, !tbaa [[TBAA9]] +// CHECK-NEXT: store [[TMP0]], * [[SAVED_VALUE]], align 8, !tbaa [[TBAA9]] // CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_VALUE]] to <8 x i8>* -// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x i8>* -// CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP2:%.*]] = load , * [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 8 +// CHECK-NEXT: [[TMP2:%.*]] = load , * [[RETVAL_COERCE]], align 8 // CHECK-NEXT: ret [[TMP2]] // fixed_bool_t call_bool_ss(svbool_t pg, svbool_t op1, svbool_t op2) { diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-cast.c @@ -45,15 +45,15 @@ // CHECK-LABEL: @to_svbool_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE:%.*]] = alloca <8 x i8>, align 16 -// CHECK-NEXT: [[TYPE_ADDR:%.*]] = alloca <8 x i8>, align 16 +// CHECK-NEXT: [[TYPE:%.*]] = alloca <8 x i8>, align 8 +// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8 // CHECK-NEXT: [[TMP0:%.*]] = bitcast <8 x i8>* [[TYPE]] to * -// CHECK-NEXT: store [[TYPE_COERCE:%.*]], * [[TMP0]], align 16 -// CHECK-NEXT: [[TYPE1:%.*]] = load <8 x i8>, <8 x i8>* [[TYPE]], align 16, !tbaa [[TBAA6:![0-9]+]] -// CHECK-NEXT: store <8 x i8> [[TYPE1]], <8 x i8>* [[TYPE_ADDR]], align 16, !tbaa [[TBAA6]] -// CHECK-NEXT: [[TMP1:%.*]] = bitcast <8 x i8>* [[TYPE_ADDR]] to * -// CHECK-NEXT: [[TMP2:%.*]] = load , * [[TMP1]], align 16, !tbaa [[TBAA6]] -// CHECK-NEXT: ret [[TMP2]] +// CHECK-NEXT: store [[TYPE_COERCE:%.*]], * [[TMP0]], align 8 +// CHECK-NEXT: [[TYPE1:%.*]] = load <8 x i8>, <8 x i8>* [[TYPE]], align 8, !tbaa [[TBAA6:![0-9]+]] +// CHECK-NEXT: store <8 x i8> [[TYPE1]], <8 x i8>* [[SAVED_VALUE]], align 8, !tbaa [[TBAA6]] +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to * +// CHECK-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]] +// CHECK-NEXT: ret [[TMP1]] // svbool_t to_svbool_t(fixed_bool_t type) { return type; @@ -61,15 +61,15 @@ // CHECK-LABEL: @from_svbool_t( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TYPE_ADDR:%.*]] = alloca , align 16 -// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 -// CHECK-NEXT: store [[TYPE:%.*]], * [[TYPE_ADDR]], align 16, !tbaa [[TBAA9:![0-9]+]] -// CHECK-NEXT: [[TMP0:%.*]] = bitcast * [[TYPE_ADDR]] to <8 x i8>* -// CHECK-NEXT: [[TMP1:%.*]] = load <8 x i8>, <8 x i8>* [[TMP0]], align 16, !tbaa [[TBAA6]] +// CHECK-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 8 +// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 8 +// CHECK-NEXT: store [[TYPE:%.*]], * [[SAVED_VALUE]], align 8, !tbaa [[TBAA9:![0-9]+]] +// CHECK-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_VALUE]] to <8 x i8>* +// CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]] // CHECK-NEXT: [[RETVAL_0__SROA_CAST:%.*]] = bitcast * [[RETVAL_COERCE]] to <8 x i8>* -// CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 16 -// CHECK-NEXT: [[TMP2:%.*]] = load , * [[RETVAL_COERCE]], align 16 -// CHECK-NEXT: ret [[TMP2]] +// CHECK-NEXT: store <8 x i8> [[TMP0]], <8 x i8>* [[RETVAL_0__SROA_CAST]], align 8 +// CHECK-NEXT: [[TMP1:%.*]] = load , * [[RETVAL_COERCE]], align 8 +// CHECK-NEXT: ret [[TMP1]] // fixed_bool_t from_svbool_t(svbool_t type) { return type; diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-codegen.c @@ -92,7 +92,7 @@ // CHECK-NEXT: [[RETVAL:%.*]] = alloca <8 x i8>, align 2 // CHECK-NEXT: [[ARR:%.*]] = alloca [3 x <8 x i8>], align 2 // CHECK-NEXT: [[PARR:%.*]] = alloca <8 x i8>*, align 8 -// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 16 +// CHECK-NEXT: [[RETVAL_COERCE:%.*]] = alloca , align 2 // CHECK-NEXT: [[ARRAYIDX:%.*]] = getelementptr inbounds [3 x <8 x i8>], [3 x <8 x i8>]* [[ARR]], i64 0, i64 0 // CHECK-NEXT: store <8 x i8>* [[ARRAYIDX]], <8 x i8>** [[PARR]], align 8 // CHECK-NEXT: [[TMP0:%.*]] = load <8 x i8>*, <8 x i8>** [[PARR]], align 8 @@ -100,8 +100,8 @@ // CHECK-NEXT: store <8 x i8> [[TMP1]], <8 x i8>* [[RETVAL]], align 2 // CHECK-NEXT: [[TMP2:%.*]] = bitcast * [[RETVAL_COERCE]] to i8* // CHECK-NEXT: [[TMP3:%.*]] = bitcast <8 x i8>* [[RETVAL]] to i8* -// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 16 [[TMP2]], i8* align 2 [[TMP3]], i64 8, i1 false) -// CHECK-NEXT: [[TMP4:%.*]] = load , * [[RETVAL_COERCE]], align 16 +// CHECK-NEXT: call void @llvm.memcpy.p0i8.p0i8.i64(i8* align 2 [[TMP2]], i8* align 2 [[TMP3]], i64 8, i1 false) +// CHECK-NEXT: [[TMP4:%.*]] = load , * [[RETVAL_COERCE]], align 2 // CHECK-NEXT: ret [[TMP4]] // fixed_bool_t address_of_array_idx() { diff --git a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c --- a/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c +++ b/clang/test/CodeGen/attr-arm-sve-vector-bits-globals.c @@ -49,19 +49,19 @@ // CHECK-128-LABEL: @write_global_bool( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 16 -// CHECK-128-NEXT: store [[V:%.*]], * [[SAVED_VALUE]], align 16, !tbaa [[TBAA9:![0-9]+]] +// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 2 +// CHECK-128-NEXT: store [[V:%.*]], * [[SAVED_VALUE]], align 2, !tbaa [[TBAA9:![0-9]+]] // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_VALUE]] to <2 x i8>* -// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* [[CASTFIXEDSVE]], align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: store <2 x i8> [[TMP0]], <2 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret void // // CHECK-512-LABEL: @write_global_bool( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 16 -// CHECK-512-NEXT: store [[V:%.*]], * [[SAVED_VALUE]], align 16, !tbaa [[TBAA9:![0-9]+]] +// CHECK-512-NEXT: [[SAVED_VALUE:%.*]] = alloca , align 8 +// CHECK-512-NEXT: store [[V:%.*]], * [[SAVED_VALUE]], align 8, !tbaa [[TBAA9:![0-9]+]] // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast * [[SAVED_VALUE]] to <8 x i8>* -// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]] // CHECK-512-NEXT: store <8 x i8> [[TMP0]], <8 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret void // @@ -101,20 +101,20 @@ // CHECK-128-LABEL: @read_global_bool( // CHECK-128-NEXT: entry: -// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <2 x i8>, align 16 +// CHECK-128-NEXT: [[SAVED_VALUE:%.*]] = alloca <2 x i8>, align 2 // CHECK-128-NEXT: [[TMP0:%.*]] = load <2 x i8>, <2 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] -// CHECK-128-NEXT: store <2 x i8> [[TMP0]], <2 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]] +// CHECK-128-NEXT: store <2 x i8> [[TMP0]], <2 x i8>* [[SAVED_VALUE]], align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <2 x i8>* [[SAVED_VALUE]] to * -// CHECK-128-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-128-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 2, !tbaa [[TBAA6]] // CHECK-128-NEXT: ret [[TMP1]] // // CHECK-512-LABEL: @read_global_bool( // CHECK-512-NEXT: entry: -// CHECK-512-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 16 +// CHECK-512-NEXT: [[SAVED_VALUE:%.*]] = alloca <8 x i8>, align 8 // CHECK-512-NEXT: [[TMP0:%.*]] = load <8 x i8>, <8 x i8>* @global_bool, align 2, !tbaa [[TBAA6]] -// CHECK-512-NEXT: store <8 x i8> [[TMP0]], <8 x i8>* [[SAVED_VALUE]], align 16, !tbaa [[TBAA6]] +// CHECK-512-NEXT: store <8 x i8> [[TMP0]], <8 x i8>* [[SAVED_VALUE]], align 8, !tbaa [[TBAA6]] // CHECK-512-NEXT: [[CASTFIXEDSVE:%.*]] = bitcast <8 x i8>* [[SAVED_VALUE]] to * -// CHECK-512-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 16, !tbaa [[TBAA6]] +// CHECK-512-NEXT: [[TMP1:%.*]] = load , * [[CASTFIXEDSVE]], align 8, !tbaa [[TBAA6]] // CHECK-512-NEXT: ret [[TMP1]] // svbool_t read_global_bool() { return global_bool; } diff --git a/clang/test/CodeGen/builtins-ppc-pair-mma.c b/clang/test/CodeGen/builtins-ppc-pair-mma.c --- a/clang/test/CodeGen/builtins-ppc-pair-mma.c +++ b/clang/test/CodeGen/builtins-ppc-pair-mma.c @@ -1112,10 +1112,10 @@ // CHECK-LABEL: @test71( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VPP:%.*]], i64 128 +// CHECK-NEXT: [[TMP0:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VPP:%.*]], i64 1024 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <256 x i1>* [[TMP0]] to i8* // CHECK-NEXT: [[TMP2:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VP2:%.*]], i64 128 +// CHECK-NEXT: [[TMP3:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VP2:%.*]], i64 1024 // CHECK-NEXT: [[TMP4:%.*]] = bitcast <256 x i1>* [[TMP3]] to i8* // CHECK-NEXT: tail call void @llvm.ppc.vsx.stxvp(<256 x i1> [[TMP2]], i8* [[TMP4]]) // CHECK-NEXT: ret void @@ -1304,10 +1304,10 @@ // CHECK-LABEL: @test83( // CHECK-NEXT: entry: -// CHECK-NEXT: [[TMP0:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VPP:%.*]], i64 128 +// CHECK-NEXT: [[TMP0:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VPP:%.*]], i64 1024 // CHECK-NEXT: [[TMP1:%.*]] = bitcast <256 x i1>* [[TMP0]] to i8* // CHECK-NEXT: [[TMP2:%.*]] = tail call <256 x i1> @llvm.ppc.vsx.lxvp(i8* [[TMP1]]) -// CHECK-NEXT: [[TMP3:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VP2:%.*]], i64 128 +// CHECK-NEXT: [[TMP3:%.*]] = getelementptr <256 x i1>, <256 x i1>* [[VP2:%.*]], i64 1024 // CHECK-NEXT: [[TMP4:%.*]] = bitcast <256 x i1>* [[TMP3]] to i8* // CHECK-NEXT: tail call void @llvm.ppc.vsx.stxvp(<256 x i1> [[TMP2]], i8* [[TMP4]]) // CHECK-NEXT: ret void diff --git a/llvm/lib/IR/DataLayout.cpp b/llvm/lib/IR/DataLayout.cpp --- a/llvm/lib/IR/DataLayout.cpp +++ b/llvm/lib/IR/DataLayout.cpp @@ -800,15 +800,11 @@ // By default, use natural alignment for vector types. This is consistent // with what clang and llvm-gcc do. - // TODO: This should probably not be using the alloc size. - unsigned Alignment = - getTypeAllocSize(cast(Ty)->getElementType()); + // // We're only calculating a natural alignment, so it doesn't have to be // based on the full size for scalable vectors. Using the minimum element // count should be enough here. - Alignment *= cast(Ty)->getElementCount().getKnownMinValue(); - Alignment = PowerOf2Ceil(Alignment); - return Align(Alignment); + return Align(PowerOf2Ceil(getTypeStoreSize(Ty).getKnownMinSize())); } case Type::X86_AMXTyID: return Align(64); diff --git a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td --- a/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td +++ b/llvm/lib/Target/AArch64/AArch64SVEInstrInfo.td @@ -2120,9 +2120,6 @@ } defm Pat_Store_P16 : unpred_store_predicate; - defm Pat_Store_P8 : unpred_store_predicate; - defm Pat_Store_P4 : unpred_store_predicate; - defm Pat_Store_P2 : unpred_store_predicate; multiclass unpred_load_predicate { def _fi : Pat<(Ty (load (am_sve_fi GPR64sp:$base, simm9:$offset))), @@ -2133,9 +2130,6 @@ } defm Pat_Load_P16 : unpred_load_predicate; - defm Pat_Load_P8 : unpred_load_predicate; - defm Pat_Load_P4 : unpred_load_predicate; - defm Pat_Load_P2 : unpred_load_predicate; multiclass ld1 { diff --git a/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll b/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll --- a/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll +++ b/llvm/test/CodeGen/AArch64/GlobalISel/ret-vec-promote.ll @@ -7,7 +7,7 @@ ; CHECK: bb.1 (%ir-block.0): ; CHECK: liveins: $x0 ; CHECK: [[COPY:%[0-9]+]]:_(p0) = COPY $x0 - ; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s1>) = G_LOAD [[COPY]](p0) :: (load (<4 x s1>) from %ir.v, align 4) + ; CHECK: [[LOAD:%[0-9]+]]:_(<4 x s1>) = G_LOAD [[COPY]](p0) :: (load (<4 x s1>) from %ir.v) ; CHECK: [[ANYEXT:%[0-9]+]]:_(<4 x s16>) = G_ANYEXT [[LOAD]](<4 x s1>) ; CHECK: $d0 = COPY [[ANYEXT]](<4 x s16>) ; CHECK: RET_ReallyLR implicit $d0 diff --git a/llvm/test/CodeGen/AArch64/spillfill-sve.ll b/llvm/test/CodeGen/AArch64/spillfill-sve.ll --- a/llvm/test/CodeGen/AArch64/spillfill-sve.ll +++ b/llvm/test/CodeGen/AArch64/spillfill-sve.ll @@ -404,8 +404,8 @@ define void @fill_nxv16i1() { ; CHECK-LABEL: fill_nxv16i1 -; CHECK-DAG: ldr p{{[01]}}, [sp, #8, mul vl] -; CHECK-DAG: ldr p{{[01]}}, [sp] +; CHECK-DAG: ldr p{{[01]}}, [sp, #7, mul vl] +; CHECK-DAG: ldr p{{[01]}}, [sp, #6, mul vl] %local0 = alloca %local1 = alloca load volatile , * %local0 @@ -413,81 +413,15 @@ ret void } -define void @fill_nxv8i1() { -; CHECK-LABEL: fill_nxv8i1 -; CHECK-DAG: ldr p{{[01]}}, [sp, #4, mul vl] -; CHECK-DAG: ldr p{{[01]}}, [sp] - %local0 = alloca - %local1 = alloca - load volatile , * %local0 - load volatile , * %local1 - ret void -} - -define void @fill_nxv4i1() { -; CHECK-LABEL: fill_nxv4i1 -; CHECK-DAG: ldr p{{[01]}}, [sp, #6, mul vl] -; CHECK-DAG: ldr p{{[01]}}, [sp, #4, mul vl] - %local0 = alloca - %local1 = alloca - load volatile , * %local0 - load volatile , * %local1 - ret void -} - -define void @fill_nxv2i1() { -; CHECK-LABEL: fill_nxv2i1 -; CHECK-DAG: ldr p{{[01]}}, [sp, #7, mul vl] -; CHECK-DAG: ldr p{{[01]}}, [sp, #6, mul vl] - %local0 = alloca - %local1 = alloca - load volatile , * %local0 - load volatile , * %local1 - ret void -} - ; Predicate spills define void @spill_nxv16i1( %v0, %v1) { ; CHECK-LABEL: spill_nxv16i1 -; CHECK-DAG: str p{{[01]}}, [sp, #8, mul vl] -; CHECK-DAG: str p{{[01]}}, [sp] +; CHECK-DAG: str p{{[01]}}, [sp, #7, mul vl] +; CHECK-DAG: str p{{[01]}}, [sp, #6, mul vl] %local0 = alloca %local1 = alloca store volatile %v0, * %local0 store volatile %v1, * %local1 ret void } - -define void @spill_nxv8i1( %v0, %v1) { -; CHECK-LABEL: spill_nxv8i1 -; CHECK-DAG: str p{{[01]}}, [sp, #4, mul vl] -; CHECK-DAG: str p{{[01]}}, [sp] - %local0 = alloca - %local1 = alloca - store volatile %v0, * %local0 - store volatile %v1, * %local1 - ret void -} - -define void @spill_nxv4i1( %v0, %v1) { -; CHECK-LABEL: spill_nxv4i1 -; CHECK-DAG: str p{{[01]}}, [sp, #6, mul vl] -; CHECK-DAG: str p{{[01]}}, [sp, #4, mul vl] - %local0 = alloca - %local1 = alloca - store volatile %v0, * %local0 - store volatile %v1, * %local1 - ret void -} - -define void @spill_nxv2i1( %v0, %v1) { -; CHECK-LABEL: spill_nxv2i1 -; CHECK-DAG: str p{{[01]}}, [sp, #7, mul vl] -; CHECK-DAG: str p{{[01]}}, [sp, #6, mul vl] - %local0 = alloca - %local1 = alloca - store volatile %v0, * %local0 - store volatile %v1, * %local1 - ret void -} diff --git a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll --- a/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll +++ b/llvm/test/CodeGen/AArch64/sve-calling-convention-byref.ll @@ -43,22 +43,22 @@ ; p3 = %p3 ; x0 = &%p4 ; x1 = &%p5 -define aarch64_sve_vector_pcs @callee_with_many_svepred_arg( %p0, %p1, %p2, %p3, %p4, %p5) { +define aarch64_sve_vector_pcs @callee_with_many_svepred_arg( %p0, %p1, %p2, %p3, %p4, %p5) { ; CHECK: name: callee_with_many_svepred_arg ; CHECK-DAG: [[BASE:%[0-9]+]]:gpr64common = COPY $x1 ; CHECK-DAG: [[RES:%[0-9]+]]:ppr = LDR_PXI [[BASE]], 0 ; CHECK-DAG: $p0 = COPY [[RES]] ; CHECK: RET_ReallyLR implicit $p0 - ret %p5 + ret %p5 } ; Test that p4 and p5 are passed by reference. -define aarch64_sve_vector_pcs @caller_with_many_svepred_arg( %p) { +define aarch64_sve_vector_pcs @caller_with_many_svepred_arg( %p) { ; CHECK: name: caller_with_many_svepred_arg ; CHECK: stack: -; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 1, alignment: 4, +; CHECK: - { id: 0, name: '', type: default, offset: 0, size: 2, alignment: 2, ; CHECK-NEXT: stack-id: scalable-vector -; CHECK: - { id: 1, name: '', type: default, offset: 0, size: 1, alignment: 4, +; CHECK: - { id: 1, name: '', type: default, offset: 0, size: 2, alignment: 2, ; CHECK-NEXT: stack-id: scalable-vector ; CHECK-DAG: STR_PXI %{{[0-9]+}}, %stack.0, 0 ; CHECK-DAG: STR_PXI %{{[0-9]+}}, %stack.1, 0 @@ -68,8 +68,8 @@ ; CHECK-DAG: $x1 = COPY [[BASE2]] ; CHECK-NEXT: BL @callee_with_many_svepred_arg ; CHECK: RET_ReallyLR implicit $p0 - %ret = call aarch64_sve_vector_pcs @callee_with_many_svepred_arg( %p, %p, %p, %p, %p, %p) - ret %ret + %ret = call aarch64_sve_vector_pcs @callee_with_many_svepred_arg( %p, %p, %p, %p, %p, %p) + ret %ret } ; Test that z8 and z9, passed by reference, are loaded from a location that is passed on the stack. diff --git a/llvm/test/CodeGen/AMDGPU/lower-kernargs.ll b/llvm/test/CodeGen/AMDGPU/lower-kernargs.ll --- a/llvm/test/CodeGen/AMDGPU/lower-kernargs.ll +++ b/llvm/test/CodeGen/AMDGPU/lower-kernargs.ll @@ -913,33 +913,35 @@ define amdgpu_kernel void @kern_realign_i1_v3i1(i1 %arg0, <3 x i1> %arg1) #0 { ; HSA-LABEL: @kern_realign_i1_v3i1( -; HSA-NEXT: [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT:%.*]] = call nonnull align 16 dereferenceable(8) i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() +; HSA-NEXT: [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT:%.*]] = call nonnull align 16 dereferenceable(4) i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() ; HSA-NEXT: [[ARG0_KERNARG_OFFSET_ALIGN_DOWN:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT]], i64 0 ; HSA-NEXT: [[ARG0_KERNARG_OFFSET_ALIGN_DOWN_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG0_KERNARG_OFFSET_ALIGN_DOWN]] to i32 addrspace(4)* ; HSA-NEXT: [[TMP1:%.*]] = load i32, i32 addrspace(4)* [[ARG0_KERNARG_OFFSET_ALIGN_DOWN_CAST]], align 16, !invariant.load !0 ; HSA-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i1 -; HSA-NEXT: [[ARG1_KERNARG_OFFSET_ALIGN_DOWN:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT]], i64 4 +; HSA-NEXT: [[ARG1_KERNARG_OFFSET_ALIGN_DOWN:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT]], i64 0 ; HSA-NEXT: [[ARG1_KERNARG_OFFSET_ALIGN_DOWN_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG1_KERNARG_OFFSET_ALIGN_DOWN]] to i32 addrspace(4)* -; HSA-NEXT: [[TMP3:%.*]] = load i32, i32 addrspace(4)* [[ARG1_KERNARG_OFFSET_ALIGN_DOWN_CAST]], align 4, !invariant.load !0 -; HSA-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i3 -; HSA-NEXT: [[ARG1_LOAD:%.*]] = bitcast i3 [[TMP4]] to <3 x i1> +; HSA-NEXT: [[TMP3:%.*]] = load i32, i32 addrspace(4)* [[ARG1_KERNARG_OFFSET_ALIGN_DOWN_CAST]], align 16, !invariant.load !0 +; HSA-NEXT: [[TMP4:%.*]] = lshr i32 [[TMP3]], 8 +; HSA-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i3 +; HSA-NEXT: [[ARG1_LOAD:%.*]] = bitcast i3 [[TMP5]] to <3 x i1> ; HSA-NEXT: store volatile i1 [[TMP2]], i1 addrspace(1)* undef, align 1 -; HSA-NEXT: store volatile <3 x i1> [[ARG1_LOAD]], <3 x i1> addrspace(1)* undef, align 4 +; HSA-NEXT: store volatile <3 x i1> [[ARG1_LOAD]], <3 x i1> addrspace(1)* undef, align 1 ; HSA-NEXT: ret void ; ; MESA-LABEL: @kern_realign_i1_v3i1( -; MESA-NEXT: [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT:%.*]] = call nonnull align 16 dereferenceable(44) i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() +; MESA-NEXT: [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT:%.*]] = call nonnull align 16 dereferenceable(40) i8 addrspace(4)* @llvm.amdgcn.kernarg.segment.ptr() ; MESA-NEXT: [[ARG0_KERNARG_OFFSET_ALIGN_DOWN:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT]], i64 36 ; MESA-NEXT: [[ARG0_KERNARG_OFFSET_ALIGN_DOWN_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG0_KERNARG_OFFSET_ALIGN_DOWN]] to i32 addrspace(4)* ; MESA-NEXT: [[TMP1:%.*]] = load i32, i32 addrspace(4)* [[ARG0_KERNARG_OFFSET_ALIGN_DOWN_CAST]], align 4, !invariant.load !0 ; MESA-NEXT: [[TMP2:%.*]] = trunc i32 [[TMP1]] to i1 -; MESA-NEXT: [[ARG1_KERNARG_OFFSET_ALIGN_DOWN:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT]], i64 40 +; MESA-NEXT: [[ARG1_KERNARG_OFFSET_ALIGN_DOWN:%.*]] = getelementptr inbounds i8, i8 addrspace(4)* [[KERN_REALIGN_I1_V3I1_KERNARG_SEGMENT]], i64 36 ; MESA-NEXT: [[ARG1_KERNARG_OFFSET_ALIGN_DOWN_CAST:%.*]] = bitcast i8 addrspace(4)* [[ARG1_KERNARG_OFFSET_ALIGN_DOWN]] to i32 addrspace(4)* -; MESA-NEXT: [[TMP3:%.*]] = load i32, i32 addrspace(4)* [[ARG1_KERNARG_OFFSET_ALIGN_DOWN_CAST]], align 8, !invariant.load !0 -; MESA-NEXT: [[TMP4:%.*]] = trunc i32 [[TMP3]] to i3 -; MESA-NEXT: [[ARG1_LOAD:%.*]] = bitcast i3 [[TMP4]] to <3 x i1> +; MESA-NEXT: [[TMP3:%.*]] = load i32, i32 addrspace(4)* [[ARG1_KERNARG_OFFSET_ALIGN_DOWN_CAST]], align 4, !invariant.load !0 +; MESA-NEXT: [[TMP4:%.*]] = lshr i32 [[TMP3]], 8 +; MESA-NEXT: [[TMP5:%.*]] = trunc i32 [[TMP4]] to i3 +; MESA-NEXT: [[ARG1_LOAD:%.*]] = bitcast i3 [[TMP5]] to <3 x i1> ; MESA-NEXT: store volatile i1 [[TMP2]], i1 addrspace(1)* undef, align 1 -; MESA-NEXT: store volatile <3 x i1> [[ARG1_LOAD]], <3 x i1> addrspace(1)* undef, align 4 +; MESA-NEXT: store volatile <3 x i1> [[ARG1_LOAD]], <3 x i1> addrspace(1)* undef, align 1 ; MESA-NEXT: ret void ; store volatile i1 %arg0, i1 addrspace(1)* undef diff --git a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll --- a/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll +++ b/llvm/test/CodeGen/NVPTX/f16x2-instructions.ll @@ -479,8 +479,9 @@ ; CHECK-NOF16-DAG: setp.neu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.neu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; +; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; define <2 x i1> @test_fcmp_une(<2 x half> %a, <2 x half> %b) #0 { %r = fcmp une <2 x half> %a, %b @@ -500,8 +501,9 @@ ; CHECK-NOF16-DAG: setp.equ.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.equ.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; +; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; define <2 x i1> @test_fcmp_ueq(<2 x half> %a, <2 x half> %b) #0 { %r = fcmp ueq <2 x half> %a, %b @@ -521,8 +523,9 @@ ; CHECK-NOF16-DAG: setp.gtu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.gtu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; +; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; define <2 x i1> @test_fcmp_ugt(<2 x half> %a, <2 x half> %b) #0 { %r = fcmp ugt <2 x half> %a, %b @@ -542,8 +545,9 @@ ; CHECK-NOF16-DAG: setp.geu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.geu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; +; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; define <2 x i1> @test_fcmp_uge(<2 x half> %a, <2 x half> %b) #0 { %r = fcmp uge <2 x half> %a, %b @@ -563,8 +567,9 @@ ; CHECK-NOF16-DAG: setp.ltu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.ltu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; +; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; define <2 x i1> @test_fcmp_ult(<2 x half> %a, <2 x half> %b) #0 { %r = fcmp ult <2 x half> %a, %b @@ -584,8 +589,9 @@ ; CHECK-NOF16-DAG: setp.leu.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.leu.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; +; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; define <2 x i1> @test_fcmp_ule(<2 x half> %a, <2 x half> %b) #0 { %r = fcmp ule <2 x half> %a, %b @@ -606,8 +612,9 @@ ; CHECK-NOF16-DAG: setp.nan.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.nan.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; +; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; define <2 x i1> @test_fcmp_uno(<2 x half> %a, <2 x half> %b) #0 { %r = fcmp uno <2 x half> %a, %b @@ -627,8 +634,9 @@ ; CHECK-NOF16-DAG: setp.ne.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.ne.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; +; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; define <2 x i1> @test_fcmp_one(<2 x half> %a, <2 x half> %b) #0 { %r = fcmp one <2 x half> %a, %b @@ -648,8 +656,9 @@ ; CHECK-NOF16-DAG: setp.eq.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.eq.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; +; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; define <2 x i1> @test_fcmp_oeq(<2 x half> %a, <2 x half> %b) #0 { %r = fcmp oeq <2 x half> %a, %b @@ -669,8 +678,9 @@ ; CHECK-NOF16-DAG: setp.gt.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.gt.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; +; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; define <2 x i1> @test_fcmp_ogt(<2 x half> %a, <2 x half> %b) #0 { %r = fcmp ogt <2 x half> %a, %b @@ -690,8 +700,9 @@ ; CHECK-NOF16-DAG: setp.ge.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.ge.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; +; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; define <2 x i1> @test_fcmp_oge(<2 x half> %a, <2 x half> %b) #0 { %r = fcmp oge <2 x half> %a, %b @@ -711,8 +722,9 @@ ; CHECK-NOF16-DAG: setp.lt.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.lt.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; +; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; define <2 x i1> @test_fcmp_olt(<2 x half> %a, <2 x half> %b) #0 { %r = fcmp olt <2 x half> %a, %b @@ -732,8 +744,9 @@ ; CHECK-NOF16-DAG: setp.le.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.le.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; +; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; define <2 x i1> @test_fcmp_ole(<2 x half> %a, <2 x half> %b) #0 { %r = fcmp ole <2 x half> %a, %b @@ -753,8 +766,9 @@ ; CHECK-NOF16-DAG: setp.num.f32 [[P0:%p[0-9]+]], [[FA0]], [[FB0]] ; CHECK-NOF16-DAG: setp.num.f32 [[P1:%p[0-9]+]], [[FA1]], [[FB1]] ; CHECK-DAG: selp.u16 [[R0:%rs[0-9]+]], -1, 0, [[P0]]; +; CHECK-NEXT: st.param.b8 [func_retval0+0], [[R0]]; ; CHECK-DAG: selp.u16 [[R1:%rs[0-9]+]], -1, 0, [[P1]]; -; CHECK-NEXT: st.param.v2.b8 [func_retval0+0], {[[R0]], [[R1]]}; +; CHECK-NEXT: st.param.b8 [func_retval0+1], [[R1]]; ; CHECK-NEXT: ret; define <2 x i1> @test_fcmp_ord(<2 x half> %a, <2 x half> %b) #0 { %r = fcmp ord <2 x half> %a, %b diff --git a/llvm/test/CodeGen/NVPTX/param-load-store.ll b/llvm/test/CodeGen/NVPTX/param-load-store.ll --- a/llvm/test/CodeGen/NVPTX/param-load-store.ll +++ b/llvm/test/CodeGen/NVPTX/param-load-store.ll @@ -66,20 +66,20 @@ } ; Make sure that i1 loads are vectorized as i8 loads, respecting each element alignment. -; CHECK: .func (.param .align 4 .b8 func_retval0[4]) +; CHECK: .func (.param .align 1 .b8 func_retval0[1]) ; CHECK-LABEL: test_v3i1( -; CHECK-NEXT: .param .align 4 .b8 test_v3i1_param_0[4] +; CHECK-NEXT: .param .align 1 .b8 test_v3i1_param_0[1] ; CHECK-DAG: ld.param.u8 [[E2:%rs[0-9]+]], [test_v3i1_param_0+2]; -; CHECK-DAG: ld.param.v2.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]]}, [test_v3i1_param_0] -; CHECK: .param .align 4 .b8 param0[4]; -; CHECK-DAG: st.param.v2.b8 [param0+0], {[[E0]], [[E1]]}; +; CHECK-DAG: ld.param.u8 [[E0:%rs[0-9]+]], [test_v3i1_param_0] +; CHECK: .param .align 1 .b8 param0[1]; +; CHECK-DAG: st.param.b8 [param0+0], [[E0]]; ; CHECK-DAG: st.param.b8 [param0+2], [[E2]]; -; CHECK: .param .align 4 .b8 retval0[4]; +; CHECK: .param .align 1 .b8 retval0[1]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v3i1, -; CHECK-DAG: ld.param.v2.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]]}, [retval0+0]; +; CHECK-DAG: ld.param.b8 [[RE0:%rs[0-9]+]], [retval0+0]; ; CHECK-DAG: ld.param.b8 [[RE2:%rs[0-9]+]], [retval0+2]; -; CHECK-DAG: st.param.v2.b8 [func_retval0+0], {[[RE0]], [[RE1]]} +; CHECK-DAG: st.param.b8 [func_retval0+0], [[RE0]] ; CHECK-DAG: st.param.b8 [func_retval0+2], [[RE2]]; ; CHECK-NEXT: ret; define <3 x i1> @test_v3i1(<3 x i1> %a) { @@ -87,37 +87,43 @@ ret <3 x i1> %r; } -; CHECK: .func (.param .align 4 .b8 func_retval0[4]) +; CHECK: .func (.param .align 1 .b8 func_retval0[1]) ; CHECK-LABEL: test_v4i1( -; CHECK-NEXT: .param .align 4 .b8 test_v4i1_param_0[4] -; CHECK: ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v4i1_param_0] -; CHECK: .param .align 4 .b8 param0[4]; -; CHECK: st.param.v4.b8 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]}; -; CHECK: .param .align 4 .b8 retval0[4]; +; CHECK-NEXT: .param .align 1 .b8 test_v4i1_param_0[1] +; CHECK: ld.param.u8 [[E0:%rs[0-9]+]], [test_v4i1_param_0] +; CHECK: .param .align 1 .b8 param0[1]; +; CHECK: st.param.b8 [param0+0], [[E0]]; +; CHECK: .param .align 1 .b8 retval0[1]; ; CHECK: call.uni (retval0), ; CHECK: test_v4i1, -; CHECK: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0]; -; CHECK: st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]}; +; CHECK: ld.param.b8 [[RE0:%rs[0-9]+]], [retval0+0]; +; CHECK: ld.param.b8 [[RE1:%rs[0-9]+]], [retval0+1]; +; CHECK: ld.param.b8 [[RE2:%rs[0-9]+]], [retval0+2]; +; CHECK: ld.param.b8 [[RE3:%rs[0-9]+]], [retval0+3]; +; CHECK: st.param.b8 [func_retval0+0], [[RE0]]; +; CHECK: st.param.b8 [func_retval0+1], [[RE1]]; +; CHECK: st.param.b8 [func_retval0+2], [[RE2]]; +; CHECK: st.param.b8 [func_retval0+3], [[RE3]]; ; CHECK-NEXT: ret; define <4 x i1> @test_v4i1(<4 x i1> %a) { %r = tail call <4 x i1> @test_v4i1(<4 x i1> %a); ret <4 x i1> %r; } -; CHECK: .func (.param .align 8 .b8 func_retval0[8]) +; CHECK: .func (.param .align 1 .b8 func_retval0[1]) ; CHECK-LABEL: test_v5i1( -; CHECK-NEXT: .param .align 8 .b8 test_v5i1_param_0[8] +; CHECK-NEXT: .param .align 1 .b8 test_v5i1_param_0[1] ; CHECK-DAG: ld.param.u8 [[E4:%rs[0-9]+]], [test_v5i1_param_0+4]; -; CHECK-DAG: ld.param.v4.u8 {[[E0:%rs[0-9]+]], [[E1:%rs[0-9]+]], [[E2:%rs[0-9]+]], [[E3:%rs[0-9]+]]}, [test_v5i1_param_0] -; CHECK: .param .align 8 .b8 param0[8]; -; CHECK-DAG: st.param.v4.b8 [param0+0], {[[E0]], [[E1]], [[E2]], [[E3]]}; +; CHECK-DAG: ld.param.u8 [[E0:%rs[0-9]+]], [test_v5i1_param_0] +; CHECK: .param .align 1 .b8 param0[1]; +; CHECK-DAG: st.param.b8 [param0+0], [[E0]]; ; CHECK-DAG: st.param.b8 [param0+4], [[E4]]; -; CHECK: .param .align 8 .b8 retval0[8]; +; CHECK: .param .align 1 .b8 retval0[1]; ; CHECK: call.uni (retval0), ; CHECK-NEXT: test_v5i1, -; CHECK-DAG: ld.param.v4.b8 {[[RE0:%rs[0-9]+]], [[RE1:%rs[0-9]+]], [[RE2:%rs[0-9]+]], [[RE3:%rs[0-9]+]]}, [retval0+0]; +; CHECK-DAG: ld.param.b8 [[RE0:%rs[0-9]+]], [retval0+0]; ; CHECK-DAG: ld.param.b8 [[RE4:%rs[0-9]+]], [retval0+4]; -; CHECK-DAG: st.param.v4.b8 [func_retval0+0], {[[RE0]], [[RE1]], [[RE2]], [[RE3]]} +; CHECK-DAG: st.param.b8 [func_retval0+0], [[RE0]] ; CHECK-DAG: st.param.b8 [func_retval0+4], [[RE4]]; ; CHECK-NEXT: ret; define <5 x i1> @test_v5i1(<5 x i1> %a) { diff --git a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll --- a/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll +++ b/llvm/test/CodeGen/RISCV/rvv/fixed-vectors-unaligned.ll @@ -84,9 +84,9 @@ ; RV32-NEXT: vslideup.vi v26, v25, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v25, v26, 0 -; RV32-NEXT: addi a0, sp, 14 +; RV32-NEXT: addi a0, sp, 15 ; RV32-NEXT: vse1.v v25, (a0) -; RV32-NEXT: lbu a0, 14(sp) +; RV32-NEXT: lbu a0, 15(sp) ; RV32-NEXT: andi a1, a0, 1 ; RV32-NEXT: beqz a1, .LBB4_2 ; RV32-NEXT: # %bb.1: # %cond.load @@ -131,9 +131,9 @@ ; RV64-NEXT: vslideup.vi v26, v25, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v25, v26, 0 -; RV64-NEXT: addi a0, sp, 14 +; RV64-NEXT: addi a0, sp, 15 ; RV64-NEXT: vse1.v v25, (a0) -; RV64-NEXT: lbu a0, 14(sp) +; RV64-NEXT: lbu a0, 15(sp) ; RV64-NEXT: andi a1, a0, 1 ; RV64-NEXT: beqz a1, .LBB4_2 ; RV64-NEXT: # %bb.1: # %cond.load @@ -184,9 +184,9 @@ ; RV32-NEXT: vslideup.vi v26, v25, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v25, v26, 0 -; RV32-NEXT: addi a0, sp, 14 +; RV32-NEXT: addi a0, sp, 15 ; RV32-NEXT: vse1.v v25, (a0) -; RV32-NEXT: lbu a0, 14(sp) +; RV32-NEXT: lbu a0, 15(sp) ; RV32-NEXT: andi a1, a0, 1 ; RV32-NEXT: vsetivli zero, 2, e32, m1, ta, mu ; RV32-NEXT: vmv.v.i v25, 0 @@ -231,9 +231,9 @@ ; RV64-NEXT: vslideup.vi v26, v25, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v25, v26, 0 -; RV64-NEXT: addi a0, sp, 14 +; RV64-NEXT: addi a0, sp, 15 ; RV64-NEXT: vse1.v v25, (a0) -; RV64-NEXT: lbu a0, 14(sp) +; RV64-NEXT: lbu a0, 15(sp) ; RV64-NEXT: andi a1, a0, 1 ; RV64-NEXT: beqz a1, .LBB5_2 ; RV64-NEXT: # %bb.1: # %cond.load @@ -284,9 +284,9 @@ ; RV32-NEXT: vslideup.vi v26, v25, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v25, v26, 0 -; RV32-NEXT: addi a0, sp, 12 +; RV32-NEXT: addi a0, sp, 15 ; RV32-NEXT: vse1.v v25, (a0) -; RV32-NEXT: lbu a0, 12(sp) +; RV32-NEXT: lbu a0, 15(sp) ; RV32-NEXT: andi a1, a0, 1 ; RV32-NEXT: bnez a1, .LBB6_5 ; RV32-NEXT: # %bb.1: # %else @@ -361,9 +361,9 @@ ; RV64-NEXT: vslideup.vi v26, v25, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v25, v26, 0 -; RV64-NEXT: addi a0, sp, 12 +; RV64-NEXT: addi a0, sp, 15 ; RV64-NEXT: vse1.v v25, (a0) -; RV64-NEXT: lbu a0, 12(sp) +; RV64-NEXT: lbu a0, 15(sp) ; RV64-NEXT: andi a1, a0, 1 ; RV64-NEXT: bnez a1, .LBB6_5 ; RV64-NEXT: # %bb.1: # %else @@ -444,9 +444,9 @@ ; RV32-NEXT: vslideup.vi v26, v25, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v25, v26, 0 -; RV32-NEXT: addi a0, sp, 14 +; RV32-NEXT: addi a0, sp, 15 ; RV32-NEXT: vse1.v v25, (a0) -; RV32-NEXT: lbu a0, 14(sp) +; RV32-NEXT: lbu a0, 15(sp) ; RV32-NEXT: andi a1, a0, 1 ; RV32-NEXT: bnez a1, .LBB7_3 ; RV32-NEXT: # %bb.1: # %else @@ -489,9 +489,9 @@ ; RV64-NEXT: vslideup.vi v26, v25, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v25, v26, 0 -; RV64-NEXT: addi a0, sp, 14 +; RV64-NEXT: addi a0, sp, 15 ; RV64-NEXT: vse1.v v25, (a0) -; RV64-NEXT: lbu a0, 14(sp) +; RV64-NEXT: lbu a0, 15(sp) ; RV64-NEXT: andi a1, a0, 1 ; RV64-NEXT: bnez a1, .LBB7_3 ; RV64-NEXT: # %bb.1: # %else @@ -543,9 +543,9 @@ ; RV32-NEXT: vslideup.vi v26, v25, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v25, v26, 0 -; RV32-NEXT: addi a2, sp, 14 +; RV32-NEXT: addi a2, sp, 15 ; RV32-NEXT: vse1.v v25, (a2) -; RV32-NEXT: lbu a2, 14(sp) +; RV32-NEXT: lbu a2, 15(sp) ; RV32-NEXT: andi a3, a2, 1 ; RV32-NEXT: beqz a3, .LBB8_2 ; RV32-NEXT: # %bb.1: # %cond.load @@ -603,9 +603,9 @@ ; RV64-NEXT: vslideup.vi v26, v25, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v25, v26, 0 -; RV64-NEXT: addi a2, sp, 14 +; RV64-NEXT: addi a2, sp, 15 ; RV64-NEXT: vse1.v v25, (a2) -; RV64-NEXT: lbu a2, 14(sp) +; RV64-NEXT: lbu a2, 15(sp) ; RV64-NEXT: andi a3, a2, 1 ; RV64-NEXT: beqz a3, .LBB8_2 ; RV64-NEXT: # %bb.1: # %cond.load @@ -671,9 +671,9 @@ ; RV32-NEXT: vslideup.vi v26, v25, 0 ; RV32-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV32-NEXT: vmsne.vi v25, v26, 0 -; RV32-NEXT: addi a1, sp, 14 +; RV32-NEXT: addi a1, sp, 15 ; RV32-NEXT: vse1.v v25, (a1) -; RV32-NEXT: lbu a1, 14(sp) +; RV32-NEXT: lbu a1, 15(sp) ; RV32-NEXT: andi a2, a1, 1 ; RV32-NEXT: bnez a2, .LBB9_3 ; RV32-NEXT: # %bb.1: # %else @@ -714,9 +714,9 @@ ; RV64-NEXT: vslideup.vi v26, v25, 0 ; RV64-NEXT: vsetivli zero, 8, e8, mf2, ta, mu ; RV64-NEXT: vmsne.vi v25, v26, 0 -; RV64-NEXT: addi a1, sp, 14 +; RV64-NEXT: addi a1, sp, 15 ; RV64-NEXT: vse1.v v25, (a1) -; RV64-NEXT: lbu a1, 14(sp) +; RV64-NEXT: lbu a1, 15(sp) ; RV64-NEXT: andi a2, a1, 1 ; RV64-NEXT: bnez a2, .LBB9_3 ; RV64-NEXT: # %bb.1: # %else diff --git a/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll b/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll --- a/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll +++ b/llvm/test/CodeGen/Thumb2/mve-masked-ldst.ll @@ -820,8 +820,8 @@ ; CHECK-LE: @ %bb.0: @ %entry ; CHECK-LE-NEXT: .save {r7, lr} ; CHECK-LE-NEXT: push {r7, lr} -; CHECK-LE-NEXT: .pad #8 -; CHECK-LE-NEXT: sub sp, #8 +; CHECK-LE-NEXT: .pad #4 +; CHECK-LE-NEXT: sub sp, #4 ; CHECK-LE-NEXT: vldrh.s32 q0, [r1] ; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr ; CHECK-LE-NEXT: @ implicit-def: $q0 @@ -888,7 +888,7 @@ ; CHECK-LE-NEXT: itt mi ; CHECK-LE-NEXT: vmovmi r1, s7 ; CHECK-LE-NEXT: strmi r1, [r0, #12] -; CHECK-LE-NEXT: add sp, #8 +; CHECK-LE-NEXT: add sp, #4 ; CHECK-LE-NEXT: pop {r7, pc} ; CHECK-LE-NEXT: .LBB18_6: @ %cond.load ; CHECK-LE-NEXT: vldr.16 s0, [r2] @@ -911,8 +911,8 @@ ; CHECK-BE: @ %bb.0: @ %entry ; CHECK-BE-NEXT: .save {r7, lr} ; CHECK-BE-NEXT: push {r7, lr} -; CHECK-BE-NEXT: .pad #8 -; CHECK-BE-NEXT: sub sp, #8 +; CHECK-BE-NEXT: .pad #4 +; CHECK-BE-NEXT: sub sp, #4 ; CHECK-BE-NEXT: vldrh.s32 q0, [r1] ; CHECK-BE-NEXT: vcmp.s32 gt, q0, zr ; CHECK-BE-NEXT: @ implicit-def: $q0 @@ -979,7 +979,7 @@ ; CHECK-BE-NEXT: itt ne ; CHECK-BE-NEXT: vmovne r1, s7 ; CHECK-BE-NEXT: strne r1, [r0, #12] -; CHECK-BE-NEXT: add sp, #8 +; CHECK-BE-NEXT: add sp, #4 ; CHECK-BE-NEXT: pop {r7, pc} ; CHECK-BE-NEXT: .LBB18_6: @ %cond.load ; CHECK-BE-NEXT: vldr.16 s0, [r2] @@ -1011,8 +1011,8 @@ ; CHECK-LE: @ %bb.0: @ %entry ; CHECK-LE-NEXT: .save {r7, lr} ; CHECK-LE-NEXT: push {r7, lr} -; CHECK-LE-NEXT: .pad #8 -; CHECK-LE-NEXT: sub sp, #8 +; CHECK-LE-NEXT: .pad #4 +; CHECK-LE-NEXT: sub sp, #4 ; CHECK-LE-NEXT: vldrh.s32 q0, [r1] ; CHECK-LE-NEXT: vcmp.s32 gt, q0, zr ; CHECK-LE-NEXT: @ implicit-def: $q0 @@ -1079,7 +1079,7 @@ ; CHECK-LE-NEXT: itt mi ; CHECK-LE-NEXT: vmovmi r1, s7 ; CHECK-LE-NEXT: strmi r1, [r0, #12] -; CHECK-LE-NEXT: add sp, #8 +; CHECK-LE-NEXT: add sp, #4 ; CHECK-LE-NEXT: pop {r7, pc} ; CHECK-LE-NEXT: .LBB19_6: @ %cond.load ; CHECK-LE-NEXT: vldr.16 s0, [r2] @@ -1102,8 +1102,8 @@ ; CHECK-BE: @ %bb.0: @ %entry ; CHECK-BE-NEXT: .save {r7, lr} ; CHECK-BE-NEXT: push {r7, lr} -; CHECK-BE-NEXT: .pad #8 -; CHECK-BE-NEXT: sub sp, #8 +; CHECK-BE-NEXT: .pad #4 +; CHECK-BE-NEXT: sub sp, #4 ; CHECK-BE-NEXT: vldrh.s32 q0, [r1] ; CHECK-BE-NEXT: vcmp.s32 gt, q0, zr ; CHECK-BE-NEXT: @ implicit-def: $q0 @@ -1170,7 +1170,7 @@ ; CHECK-BE-NEXT: itt ne ; CHECK-BE-NEXT: vmovne r1, s7 ; CHECK-BE-NEXT: strne r1, [r0, #12] -; CHECK-BE-NEXT: add sp, #8 +; CHECK-BE-NEXT: add sp, #4 ; CHECK-BE-NEXT: pop {r7, pc} ; CHECK-BE-NEXT: .LBB19_6: @ %cond.load ; CHECK-BE-NEXT: vldr.16 s0, [r2] diff --git a/llvm/test/CodeGen/Thumb2/mve-masked-load.ll b/llvm/test/CodeGen/Thumb2/mve-masked-load.ll --- a/llvm/test/CodeGen/Thumb2/mve-masked-load.ll +++ b/llvm/test/CodeGen/Thumb2/mve-masked-load.ll @@ -557,8 +557,8 @@ define arm_aapcs_vfpcc <8 x i16> @masked_v8i16_align1_undef(<8 x i16> *%dest, <8 x i16> %a) { ; CHECK-LE-LABEL: masked_v8i16_align1_undef: ; CHECK-LE: @ %bb.0: @ %entry -; CHECK-LE-NEXT: .pad #8 -; CHECK-LE-NEXT: sub sp, #8 +; CHECK-LE-NEXT: .pad #4 +; CHECK-LE-NEXT: sub sp, #4 ; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr ; CHECK-LE-NEXT: @ implicit-def: $q0 ; CHECK-LE-NEXT: vmrs r1, p0 @@ -620,13 +620,13 @@ ; CHECK-LE-NEXT: itt mi ; CHECK-LE-NEXT: ldrhmi r0, [r0, #14] ; CHECK-LE-NEXT: vmovmi.16 q0[7], r0 -; CHECK-LE-NEXT: add sp, #8 +; CHECK-LE-NEXT: add sp, #4 ; CHECK-LE-NEXT: bx lr ; ; CHECK-BE-LABEL: masked_v8i16_align1_undef: ; CHECK-BE: @ %bb.0: @ %entry -; CHECK-BE-NEXT: .pad #8 -; CHECK-BE-NEXT: sub sp, #8 +; CHECK-BE-NEXT: .pad #4 +; CHECK-BE-NEXT: sub sp, #4 ; CHECK-BE-NEXT: vrev64.16 q1, q0 ; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr ; CHECK-BE-NEXT: @ implicit-def: $q1 @@ -690,7 +690,7 @@ ; CHECK-BE-NEXT: ldrhne r0, [r0, #14] ; CHECK-BE-NEXT: vmovne.16 q1[7], r0 ; CHECK-BE-NEXT: vrev64.16 q0, q1 -; CHECK-BE-NEXT: add sp, #8 +; CHECK-BE-NEXT: add sp, #4 ; CHECK-BE-NEXT: bx lr entry: %c = icmp sgt <8 x i16> %a, zeroinitializer @@ -1433,8 +1433,8 @@ define arm_aapcs_vfpcc <8 x half> @masked_v8f16_align1_undef(<8 x half> *%dest, <8 x i16> %a) { ; CHECK-LE-LABEL: masked_v8f16_align1_undef: ; CHECK-LE: @ %bb.0: @ %entry -; CHECK-LE-NEXT: .pad #40 -; CHECK-LE-NEXT: sub sp, #40 +; CHECK-LE-NEXT: .pad #36 +; CHECK-LE-NEXT: sub sp, #36 ; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr ; CHECK-LE-NEXT: @ implicit-def: $q0 ; CHECK-LE-NEXT: vmrs r1, p0 @@ -1488,7 +1488,7 @@ ; CHECK-LE-NEXT: lsls r1, r1, #24 ; CHECK-LE-NEXT: bmi .LBB45_16 ; CHECK-LE-NEXT: .LBB45_8: @ %else20 -; CHECK-LE-NEXT: add sp, #40 +; CHECK-LE-NEXT: add sp, #36 ; CHECK-LE-NEXT: bx lr ; CHECK-LE-NEXT: .LBB45_9: @ %cond.load ; CHECK-LE-NEXT: ldrh r2, [r0] @@ -1546,13 +1546,13 @@ ; CHECK-LE-NEXT: strh.w r0, [sp] ; CHECK-LE-NEXT: vldr.16 s4, [sp] ; CHECK-LE-NEXT: vins.f16 s3, s4 -; CHECK-LE-NEXT: add sp, #40 +; CHECK-LE-NEXT: add sp, #36 ; CHECK-LE-NEXT: bx lr ; ; CHECK-BE-LABEL: masked_v8f16_align1_undef: ; CHECK-BE: @ %bb.0: @ %entry -; CHECK-BE-NEXT: .pad #40 -; CHECK-BE-NEXT: sub sp, #40 +; CHECK-BE-NEXT: .pad #36 +; CHECK-BE-NEXT: sub sp, #36 ; CHECK-BE-NEXT: vrev64.16 q1, q0 ; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr ; CHECK-BE-NEXT: @ implicit-def: $q1 @@ -1613,7 +1613,7 @@ ; CHECK-BE-NEXT: vins.f16 s7, s0 ; CHECK-BE-NEXT: .LBB45_9: @ %else20 ; CHECK-BE-NEXT: vrev64.16 q0, q1 -; CHECK-BE-NEXT: add sp, #40 +; CHECK-BE-NEXT: add sp, #36 ; CHECK-BE-NEXT: bx lr ; CHECK-BE-NEXT: .LBB45_10: @ %cond.load ; CHECK-BE-NEXT: ldrh r2, [r0] diff --git a/llvm/test/CodeGen/Thumb2/mve-masked-store.ll b/llvm/test/CodeGen/Thumb2/mve-masked-store.ll --- a/llvm/test/CodeGen/Thumb2/mve-masked-store.ll +++ b/llvm/test/CodeGen/Thumb2/mve-masked-store.ll @@ -185,8 +185,8 @@ define arm_aapcs_vfpcc void @masked_v8i16_align1(<8 x i16> *%dest, <8 x i16> %a) { ; CHECK-LE-LABEL: masked_v8i16_align1: ; CHECK-LE: @ %bb.0: @ %entry -; CHECK-LE-NEXT: .pad #8 -; CHECK-LE-NEXT: sub sp, #8 +; CHECK-LE-NEXT: .pad #4 +; CHECK-LE-NEXT: sub sp, #4 ; CHECK-LE-NEXT: vcmp.s16 gt, q0, zr ; CHECK-LE-NEXT: vmrs r1, p0 ; CHECK-LE-NEXT: and r2, r1, #1 @@ -247,13 +247,13 @@ ; CHECK-LE-NEXT: itt mi ; CHECK-LE-NEXT: vmovmi.u16 r1, q0[7] ; CHECK-LE-NEXT: strhmi r1, [r0, #14] -; CHECK-LE-NEXT: add sp, #8 +; CHECK-LE-NEXT: add sp, #4 ; CHECK-LE-NEXT: bx lr ; ; CHECK-BE-LABEL: masked_v8i16_align1: ; CHECK-BE: @ %bb.0: @ %entry -; CHECK-BE-NEXT: .pad #8 -; CHECK-BE-NEXT: sub sp, #8 +; CHECK-BE-NEXT: .pad #4 +; CHECK-BE-NEXT: sub sp, #4 ; CHECK-BE-NEXT: vrev64.16 q1, q0 ; CHECK-BE-NEXT: vcmp.s16 gt, q1, zr ; CHECK-BE-NEXT: vmrs r1, p0 @@ -315,7 +315,7 @@ ; CHECK-BE-NEXT: itt ne ; CHECK-BE-NEXT: vmovne.u16 r1, q1[7] ; CHECK-BE-NEXT: strhne r1, [r0, #14] -; CHECK-BE-NEXT: add sp, #8 +; CHECK-BE-NEXT: add sp, #4 ; CHECK-BE-NEXT: bx lr entry: %c = icmp sgt <8 x i16> %a, zeroinitializer @@ -646,8 +646,8 @@ define arm_aapcs_vfpcc void @masked_v8f16_align1(<8 x half> *%dest, <8 x half> %a, <8 x i16> %b) { ; CHECK-LE-LABEL: masked_v8f16_align1: ; CHECK-LE: @ %bb.0: @ %entry -; CHECK-LE-NEXT: .pad #40 -; CHECK-LE-NEXT: sub sp, #40 +; CHECK-LE-NEXT: .pad #36 +; CHECK-LE-NEXT: sub sp, #36 ; CHECK-LE-NEXT: vcmp.i16 ne, q1, zr ; CHECK-LE-NEXT: movs r2, #0 ; CHECK-LE-NEXT: vmrs r1, p0 @@ -700,7 +700,7 @@ ; CHECK-LE-NEXT: lsls r1, r1, #24 ; CHECK-LE-NEXT: bmi .LBB16_16 ; CHECK-LE-NEXT: .LBB16_8: @ %else14 -; CHECK-LE-NEXT: add sp, #40 +; CHECK-LE-NEXT: add sp, #36 ; CHECK-LE-NEXT: bx lr ; CHECK-LE-NEXT: .LBB16_9: @ %cond.store ; CHECK-LE-NEXT: vstr.16 s0, [sp, #28] @@ -752,13 +752,13 @@ ; CHECK-LE-NEXT: vstr.16 s0, [sp] ; CHECK-LE-NEXT: ldrh.w r1, [sp] ; CHECK-LE-NEXT: strh r1, [r0, #14] -; CHECK-LE-NEXT: add sp, #40 +; CHECK-LE-NEXT: add sp, #36 ; CHECK-LE-NEXT: bx lr ; ; CHECK-BE-LABEL: masked_v8f16_align1: ; CHECK-BE: @ %bb.0: @ %entry -; CHECK-BE-NEXT: .pad #40 -; CHECK-BE-NEXT: sub sp, #40 +; CHECK-BE-NEXT: .pad #36 +; CHECK-BE-NEXT: sub sp, #36 ; CHECK-BE-NEXT: vrev64.16 q2, q1 ; CHECK-BE-NEXT: vrev64.16 q1, q0 ; CHECK-BE-NEXT: vcmp.i16 ne, q2, zr @@ -813,7 +813,7 @@ ; CHECK-BE-NEXT: lsls r1, r1, #31 ; CHECK-BE-NEXT: bne .LBB16_16 ; CHECK-BE-NEXT: .LBB16_8: @ %else14 -; CHECK-BE-NEXT: add sp, #40 +; CHECK-BE-NEXT: add sp, #36 ; CHECK-BE-NEXT: bx lr ; CHECK-BE-NEXT: .LBB16_9: @ %cond.store ; CHECK-BE-NEXT: vstr.16 s4, [sp, #28] @@ -865,7 +865,7 @@ ; CHECK-BE-NEXT: vstr.16 s0, [sp] ; CHECK-BE-NEXT: ldrh.w r1, [sp] ; CHECK-BE-NEXT: strh r1, [r0, #14] -; CHECK-BE-NEXT: add sp, #40 +; CHECK-BE-NEXT: add sp, #36 ; CHECK-BE-NEXT: bx lr entry: %c = icmp ugt <8 x i16> %b, zeroinitializer diff --git a/llvm/test/CodeGen/Thumb2/mve-pred-bitcast.ll b/llvm/test/CodeGen/Thumb2/mve-pred-bitcast.ll --- a/llvm/test/CodeGen/Thumb2/mve-pred-bitcast.ll +++ b/llvm/test/CodeGen/Thumb2/mve-pred-bitcast.ll @@ -56,8 +56,8 @@ define arm_aapcs_vfpcc <8 x i16> @bitcast_to_v8i1(i8 %b, <8 x i16> %a) { ; CHECK-LE-LABEL: bitcast_to_v8i1: ; CHECK-LE: @ %bb.0: @ %entry -; CHECK-LE-NEXT: .pad #8 -; CHECK-LE-NEXT: sub sp, #8 +; CHECK-LE-NEXT: .pad #4 +; CHECK-LE-NEXT: sub sp, #4 ; CHECK-LE-NEXT: uxtb r0, r0 ; CHECK-LE-NEXT: vmov.i8 q1, #0x0 ; CHECK-LE-NEXT: vmov.i8 q2, #0xff @@ -82,13 +82,13 @@ ; CHECK-LE-NEXT: vcmp.i16 ne, q1, zr ; CHECK-LE-NEXT: vmov.i32 q1, #0x0 ; CHECK-LE-NEXT: vpsel q0, q0, q1 -; CHECK-LE-NEXT: add sp, #8 +; CHECK-LE-NEXT: add sp, #4 ; CHECK-LE-NEXT: bx lr ; ; CHECK-BE-LABEL: bitcast_to_v8i1: ; CHECK-BE: @ %bb.0: @ %entry -; CHECK-BE-NEXT: .pad #8 -; CHECK-BE-NEXT: sub sp, #8 +; CHECK-BE-NEXT: .pad #4 +; CHECK-BE-NEXT: sub sp, #4 ; CHECK-BE-NEXT: uxtb r0, r0 ; CHECK-BE-NEXT: vmov.i8 q1, #0x0 ; CHECK-BE-NEXT: rbit r0, r0 @@ -118,7 +118,7 @@ ; CHECK-BE-NEXT: vrev32.16 q0, q0 ; CHECK-BE-NEXT: vpsel q1, q1, q0 ; CHECK-BE-NEXT: vrev64.16 q0, q1 -; CHECK-BE-NEXT: add sp, #8 +; CHECK-BE-NEXT: add sp, #4 ; CHECK-BE-NEXT: bx lr entry: %c = bitcast i8 %b to <8 x i1> @@ -129,45 +129,29 @@ define arm_aapcs_vfpcc <16 x i8> @bitcast_to_v16i1(i16 %b, <16 x i8> %a) { ; CHECK-LE-LABEL: bitcast_to_v16i1: ; CHECK-LE: @ %bb.0: @ %entry -; CHECK-LE-NEXT: .save {r4, r6, r7, lr} -; CHECK-LE-NEXT: push {r4, r6, r7, lr} -; CHECK-LE-NEXT: .setfp r7, sp, #8 -; CHECK-LE-NEXT: add r7, sp, #8 -; CHECK-LE-NEXT: .pad #16 -; CHECK-LE-NEXT: sub sp, #16 -; CHECK-LE-NEXT: mov r4, sp -; CHECK-LE-NEXT: bfc r4, #0, #4 -; CHECK-LE-NEXT: mov sp, r4 -; CHECK-LE-NEXT: sub.w r4, r7, #8 +; CHECK-LE-NEXT: .pad #4 +; CHECK-LE-NEXT: sub sp, #4 ; CHECK-LE-NEXT: vmsr p0, r0 ; CHECK-LE-NEXT: vmov.i32 q1, #0x0 ; CHECK-LE-NEXT: vpsel q0, q0, q1 -; CHECK-LE-NEXT: mov sp, r4 -; CHECK-LE-NEXT: pop {r4, r6, r7, pc} +; CHECK-LE-NEXT: add sp, #4 +; CHECK-LE-NEXT: bx lr ; ; CHECK-BE-LABEL: bitcast_to_v16i1: ; CHECK-BE: @ %bb.0: @ %entry -; CHECK-BE-NEXT: .save {r4, r6, r7, lr} -; CHECK-BE-NEXT: push {r4, r6, r7, lr} -; CHECK-BE-NEXT: .setfp r7, sp, #8 -; CHECK-BE-NEXT: add r7, sp, #8 -; CHECK-BE-NEXT: .pad #16 -; CHECK-BE-NEXT: sub sp, #16 -; CHECK-BE-NEXT: mov r4, sp -; CHECK-BE-NEXT: bfc r4, #0, #4 -; CHECK-BE-NEXT: mov sp, r4 +; CHECK-BE-NEXT: .pad #4 +; CHECK-BE-NEXT: sub sp, #4 ; CHECK-BE-NEXT: uxth r0, r0 ; CHECK-BE-NEXT: vrev64.8 q1, q0 ; CHECK-BE-NEXT: rbit r0, r0 ; CHECK-BE-NEXT: vmov.i32 q0, #0x0 -; CHECK-BE-NEXT: sub.w r4, r7, #8 ; CHECK-BE-NEXT: vrev32.8 q0, q0 ; CHECK-BE-NEXT: lsrs r0, r0, #16 ; CHECK-BE-NEXT: vmsr p0, r0 ; CHECK-BE-NEXT: vpsel q1, q1, q0 ; CHECK-BE-NEXT: vrev64.8 q0, q1 -; CHECK-BE-NEXT: mov sp, r4 -; CHECK-BE-NEXT: pop {r4, r6, r7, pc} +; CHECK-BE-NEXT: add sp, #4 +; CHECK-BE-NEXT: bx lr entry: %c = bitcast i16 %b to <16 x i1> %s = select <16 x i1> %c, <16 x i8> %a, <16 x i8> zeroinitializer @@ -266,8 +250,8 @@ define arm_aapcs_vfpcc i8 @bitcast_from_v8i1(<8 x i16> %a) { ; CHECK-LE-LABEL: bitcast_from_v8i1: ; CHECK-LE: @ %bb.0: @ %entry -; CHECK-LE-NEXT: .pad #8 -; CHECK-LE-NEXT: sub sp, #8 +; CHECK-LE-NEXT: .pad #4 +; CHECK-LE-NEXT: sub sp, #4 ; CHECK-LE-NEXT: vcmp.i16 eq, q0, zr ; CHECK-LE-NEXT: vmrs r1, p0 ; CHECK-LE-NEXT: and r0, r1, #1 @@ -296,13 +280,13 @@ ; CHECK-LE-NEXT: rsbs r1, r1, #0 ; CHECK-LE-NEXT: bfi r0, r1, #7, #1 ; CHECK-LE-NEXT: uxtb r0, r0 -; CHECK-LE-NEXT: add sp, #8 +; CHECK-LE-NEXT: add sp, #4 ; CHECK-LE-NEXT: bx lr ; ; CHECK-BE-LABEL: bitcast_from_v8i1: ; CHECK-BE: @ %bb.0: @ %entry -; CHECK-BE-NEXT: .pad #8 -; CHECK-BE-NEXT: sub sp, #8 +; CHECK-BE-NEXT: .pad #4 +; CHECK-BE-NEXT: sub sp, #4 ; CHECK-BE-NEXT: vrev64.16 q1, q0 ; CHECK-BE-NEXT: vcmp.i16 eq, q1, zr ; CHECK-BE-NEXT: vmrs r1, p0 @@ -332,7 +316,7 @@ ; CHECK-BE-NEXT: rsbs r1, r1, #0 ; CHECK-BE-NEXT: bfi r0, r1, #7, #1 ; CHECK-BE-NEXT: uxtb r0, r0 -; CHECK-BE-NEXT: add sp, #8 +; CHECK-BE-NEXT: add sp, #4 ; CHECK-BE-NEXT: bx lr entry: %c = icmp eq <8 x i16> %a, zeroinitializer @@ -343,41 +327,25 @@ define arm_aapcs_vfpcc i16 @bitcast_from_v16i1(<16 x i8> %a) { ; CHECK-LE-LABEL: bitcast_from_v16i1: ; CHECK-LE: @ %bb.0: @ %entry -; CHECK-LE-NEXT: .save {r4, r6, r7, lr} -; CHECK-LE-NEXT: push {r4, r6, r7, lr} -; CHECK-LE-NEXT: .setfp r7, sp, #8 -; CHECK-LE-NEXT: add r7, sp, #8 -; CHECK-LE-NEXT: .pad #16 -; CHECK-LE-NEXT: sub sp, #16 -; CHECK-LE-NEXT: mov r4, sp -; CHECK-LE-NEXT: bfc r4, #0, #4 -; CHECK-LE-NEXT: mov sp, r4 +; CHECK-LE-NEXT: .pad #4 +; CHECK-LE-NEXT: sub sp, #4 ; CHECK-LE-NEXT: vcmp.i8 eq, q0, zr -; CHECK-LE-NEXT: sub.w r4, r7, #8 ; CHECK-LE-NEXT: vmrs r0, p0 ; CHECK-LE-NEXT: uxth r0, r0 -; CHECK-LE-NEXT: mov sp, r4 -; CHECK-LE-NEXT: pop {r4, r6, r7, pc} +; CHECK-LE-NEXT: add sp, #4 +; CHECK-LE-NEXT: bx lr ; ; CHECK-BE-LABEL: bitcast_from_v16i1: ; CHECK-BE: @ %bb.0: @ %entry -; CHECK-BE-NEXT: .save {r4, r6, r7, lr} -; CHECK-BE-NEXT: push {r4, r6, r7, lr} -; CHECK-BE-NEXT: .setfp r7, sp, #8 -; CHECK-BE-NEXT: add r7, sp, #8 -; CHECK-BE-NEXT: .pad #16 -; CHECK-BE-NEXT: sub sp, #16 -; CHECK-BE-NEXT: mov r4, sp -; CHECK-BE-NEXT: bfc r4, #0, #4 -; CHECK-BE-NEXT: mov sp, r4 +; CHECK-BE-NEXT: .pad #4 +; CHECK-BE-NEXT: sub sp, #4 ; CHECK-BE-NEXT: vrev64.8 q1, q0 -; CHECK-BE-NEXT: sub.w r4, r7, #8 ; CHECK-BE-NEXT: vcmp.i8 eq, q1, zr ; CHECK-BE-NEXT: vmrs r0, p0 ; CHECK-BE-NEXT: rbit r0, r0 ; CHECK-BE-NEXT: lsrs r0, r0, #16 -; CHECK-BE-NEXT: mov sp, r4 -; CHECK-BE-NEXT: pop {r4, r6, r7, pc} +; CHECK-BE-NEXT: add sp, #4 +; CHECK-BE-NEXT: bx lr entry: %c = icmp eq <16 x i8> %a, zeroinitializer %b = bitcast <16 x i1> %c to i16 diff --git a/llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll b/llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll --- a/llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll +++ b/llvm/test/CodeGen/X86/avx512-extract-subvector-load-store.ll @@ -18,7 +18,8 @@ ; ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_4_v2i1: ; AVX512NOTDQ: # %bb.0: -; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0 +; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax +; AVX512NOTDQ-NEXT: kmovd %eax, %k0 ; AVX512NOTDQ-NEXT: kshiftrw $4, %k0, %k1 ; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 ; AVX512NOTDQ-NEXT: vmovdqa64 %xmm2, %xmm2 {%k1} {z} @@ -47,7 +48,8 @@ ; ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_7_v2i1: ; AVX512NOTDQ: # %bb.0: -; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0 +; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax +; AVX512NOTDQ-NEXT: kmovd %eax, %k0 ; AVX512NOTDQ-NEXT: kshiftrw $6, %k0, %k1 ; AVX512NOTDQ-NEXT: vpcmpeqd %xmm2, %xmm2, %xmm2 ; AVX512NOTDQ-NEXT: vmovdqa64 %xmm2, %xmm2 {%k1} {z} @@ -678,7 +680,8 @@ ; ; AVX512NOTDQ-LABEL: load_v2i1_broadcast_1_v1i1_store: ; AVX512NOTDQ: # %bb.0: -; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0 +; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax +; AVX512NOTDQ-NEXT: kmovd %eax, %k0 ; AVX512NOTDQ-NEXT: kshiftrw $1, %k0, %k0 ; AVX512NOTDQ-NEXT: kshiftlw $15, %k0, %k0 ; AVX512NOTDQ-NEXT: kshiftrw $15, %k0, %k0 @@ -770,7 +773,8 @@ ; ; AVX512NOTDQ-LABEL: load_v4i1_broadcast_2_v1i1_store: ; AVX512NOTDQ: # %bb.0: -; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0 +; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax +; AVX512NOTDQ-NEXT: kmovd %eax, %k0 ; AVX512NOTDQ-NEXT: kshiftrw $2, %k0, %k0 ; AVX512NOTDQ-NEXT: kshiftlw $15, %k0, %k0 ; AVX512NOTDQ-NEXT: kshiftrw $15, %k0, %k0 @@ -794,7 +798,8 @@ ; ; AVX512NOTDQ-LABEL: load_v4i1_broadcast_3_v1i1_store: ; AVX512NOTDQ: # %bb.0: -; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0 +; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax +; AVX512NOTDQ-NEXT: kmovd %eax, %k0 ; AVX512NOTDQ-NEXT: kshiftrw $3, %k0, %k0 ; AVX512NOTDQ-NEXT: kshiftlw $15, %k0, %k0 ; AVX512NOTDQ-NEXT: kshiftrw $15, %k0, %k0 @@ -818,7 +823,8 @@ ; ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_4_v1i1_store: ; AVX512NOTDQ: # %bb.0: -; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0 +; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax +; AVX512NOTDQ-NEXT: kmovd %eax, %k0 ; AVX512NOTDQ-NEXT: kshiftrw $4, %k0, %k0 ; AVX512NOTDQ-NEXT: kshiftlw $15, %k0, %k0 ; AVX512NOTDQ-NEXT: kshiftrw $15, %k0, %k0 @@ -843,7 +849,8 @@ ; ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_4_v2i1_store: ; AVX512NOTDQ: # %bb.0: -; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0 +; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax +; AVX512NOTDQ-NEXT: kmovd %eax, %k0 ; AVX512NOTDQ-NEXT: kshiftrw $4, %k0, %k1 ; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 ; AVX512NOTDQ-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} @@ -869,7 +876,8 @@ ; ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_7_v1i1_store: ; AVX512NOTDQ: # %bb.0: -; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0 +; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax +; AVX512NOTDQ-NEXT: kmovd %eax, %k0 ; AVX512NOTDQ-NEXT: kshiftrw $7, %k0, %k0 ; AVX512NOTDQ-NEXT: kshiftlw $15, %k0, %k0 ; AVX512NOTDQ-NEXT: kshiftrw $15, %k0, %k0 @@ -894,7 +902,8 @@ ; ; AVX512NOTDQ-LABEL: load_v8i1_broadcast_7_v2i1_store: ; AVX512NOTDQ: # %bb.0: -; AVX512NOTDQ-NEXT: kmovw (%rdi), %k0 +; AVX512NOTDQ-NEXT: movzbl (%rdi), %eax +; AVX512NOTDQ-NEXT: kmovd %eax, %k0 ; AVX512NOTDQ-NEXT: kshiftrw $6, %k0, %k1 ; AVX512NOTDQ-NEXT: vpcmpeqd %xmm0, %xmm0, %xmm0 ; AVX512NOTDQ-NEXT: vmovdqa64 %xmm0, %xmm0 {%k1} {z} diff --git a/llvm/test/CodeGen/X86/avx512-mask-op.ll b/llvm/test/CodeGen/X86/avx512-mask-op.ll --- a/llvm/test/CodeGen/X86/avx512-mask-op.ll +++ b/llvm/test/CodeGen/X86/avx512-mask-op.ll @@ -459,13 +459,13 @@ ; ; X86-LABEL: conv1: ; X86: ## %bb.0: ## %entry -; X86-NEXT: subl $12, %esp -; X86-NEXT: .cfi_def_cfa_offset 16 +; X86-NEXT: pushl %eax +; X86-NEXT: .cfi_def_cfa_offset 8 ; X86-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-NEXT: movb $-1, (%eax) -; X86-NEXT: movb $-2, (%esp) +; X86-NEXT: movb $-2, {{[0-9]+}}(%esp) ; X86-NEXT: movb $-2, %al -; X86-NEXT: addl $12, %esp +; X86-NEXT: popl %ecx ; X86-NEXT: retl entry: store <8 x i1> , <8 x i1>* %R @@ -2291,7 +2291,8 @@ define <8 x i64> @load_8i1(<8 x i1>* %a) { ; KNL-LABEL: load_8i1: ; KNL: ## %bb.0: -; KNL-NEXT: kmovw (%rdi), %k1 +; KNL-NEXT: movzbl (%rdi), %eax +; KNL-NEXT: kmovw %eax, %k1 ; KNL-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: retq ; @@ -2303,7 +2304,8 @@ ; ; AVX512BW-LABEL: load_8i1: ; AVX512BW: ## %bb.0: -; AVX512BW-NEXT: kmovw (%rdi), %k1 +; AVX512BW-NEXT: movzbl (%rdi), %eax +; AVX512BW-NEXT: kmovd %eax, %k1 ; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; AVX512BW-NEXT: retq ; @@ -2363,7 +2365,8 @@ define <2 x i16> @load_2i1(<2 x i1>* %a) { ; KNL-LABEL: load_2i1: ; KNL: ## %bb.0: -; KNL-NEXT: kmovw (%rdi), %k1 +; KNL-NEXT: movzbl (%rdi), %eax +; KNL-NEXT: kmovw %eax, %k1 ; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vpmovdw %zmm0, %ymm0 ; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 @@ -2378,7 +2381,8 @@ ; ; AVX512BW-LABEL: load_2i1: ; AVX512BW: ## %bb.0: -; AVX512BW-NEXT: kmovw (%rdi), %k0 +; AVX512BW-NEXT: movzbl (%rdi), %eax +; AVX512BW-NEXT: kmovd %eax, %k0 ; AVX512BW-NEXT: vpmovm2w %k0, %zmm0 ; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper @@ -2407,7 +2411,8 @@ define <4 x i16> @load_4i1(<4 x i1>* %a) { ; KNL-LABEL: load_4i1: ; KNL: ## %bb.0: -; KNL-NEXT: kmovw (%rdi), %k1 +; KNL-NEXT: movzbl (%rdi), %eax +; KNL-NEXT: kmovw %eax, %k1 ; KNL-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; KNL-NEXT: vpmovdw %zmm0, %ymm0 ; KNL-NEXT: ## kill: def $xmm0 killed $xmm0 killed $ymm0 @@ -2422,7 +2427,8 @@ ; ; AVX512BW-LABEL: load_4i1: ; AVX512BW: ## %bb.0: -; AVX512BW-NEXT: kmovw (%rdi), %k0 +; AVX512BW-NEXT: movzbl (%rdi), %eax +; AVX512BW-NEXT: kmovd %eax, %k0 ; AVX512BW-NEXT: vpmovm2w %k0, %zmm0 ; AVX512BW-NEXT: ## kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper diff --git a/llvm/test/CodeGen/X86/avx512-select.ll b/llvm/test/CodeGen/X86/avx512-select.ll --- a/llvm/test/CodeGen/X86/avx512-select.ll +++ b/llvm/test/CodeGen/X86/avx512-select.ll @@ -153,8 +153,10 @@ ; X86-AVX512F: # %bb.0: ; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-AVX512F-NEXT: kmovw (%ecx), %k0 -; X86-AVX512F-NEXT: kmovw (%eax), %k1 +; X86-AVX512F-NEXT: movzbl (%ecx), %ecx +; X86-AVX512F-NEXT: kmovw %ecx, %k0 +; X86-AVX512F-NEXT: movzbl (%eax), %eax +; X86-AVX512F-NEXT: kmovw %eax, %k1 ; X86-AVX512F-NEXT: korw %k1, %k0, %k0 ; X86-AVX512F-NEXT: kmovw %k0, %eax ; X86-AVX512F-NEXT: # kill: def $al killed $al killed $eax @@ -162,8 +164,10 @@ ; ; X64-AVX512F-LABEL: select05_mem: ; X64-AVX512F: # %bb.0: -; X64-AVX512F-NEXT: kmovw (%rsi), %k0 -; X64-AVX512F-NEXT: kmovw (%rdi), %k1 +; X64-AVX512F-NEXT: movzbl (%rsi), %eax +; X64-AVX512F-NEXT: kmovw %eax, %k0 +; X64-AVX512F-NEXT: movzbl (%rdi), %eax +; X64-AVX512F-NEXT: kmovw %eax, %k1 ; X64-AVX512F-NEXT: korw %k1, %k0, %k0 ; X64-AVX512F-NEXT: kmovw %k0, %eax ; X64-AVX512F-NEXT: # kill: def $al killed $al killed $eax @@ -173,8 +177,10 @@ ; X86-AVX512BW: # %bb.0: ; X86-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-AVX512BW-NEXT: kmovw (%ecx), %k0 -; X86-AVX512BW-NEXT: kmovw (%eax), %k1 +; X86-AVX512BW-NEXT: movzbl (%ecx), %ecx +; X86-AVX512BW-NEXT: kmovd %ecx, %k0 +; X86-AVX512BW-NEXT: movzbl (%eax), %eax +; X86-AVX512BW-NEXT: kmovd %eax, %k1 ; X86-AVX512BW-NEXT: korw %k1, %k0, %k0 ; X86-AVX512BW-NEXT: kmovd %k0, %eax ; X86-AVX512BW-NEXT: # kill: def $al killed $al killed $eax @@ -182,8 +188,10 @@ ; ; X64-AVX512BW-LABEL: select05_mem: ; X64-AVX512BW: # %bb.0: -; X64-AVX512BW-NEXT: kmovw (%rsi), %k0 -; X64-AVX512BW-NEXT: kmovw (%rdi), %k1 +; X64-AVX512BW-NEXT: movzbl (%rsi), %eax +; X64-AVX512BW-NEXT: kmovd %eax, %k0 +; X64-AVX512BW-NEXT: movzbl (%rdi), %eax +; X64-AVX512BW-NEXT: kmovd %eax, %k1 ; X64-AVX512BW-NEXT: korw %k1, %k0, %k0 ; X64-AVX512BW-NEXT: kmovd %k0, %eax ; X64-AVX512BW-NEXT: # kill: def $al killed $al killed $eax @@ -220,8 +228,10 @@ ; X86-AVX512F: # %bb.0: ; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-AVX512F-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-AVX512F-NEXT: kmovw (%ecx), %k0 -; X86-AVX512F-NEXT: kmovw (%eax), %k1 +; X86-AVX512F-NEXT: movzbl (%ecx), %ecx +; X86-AVX512F-NEXT: kmovw %ecx, %k0 +; X86-AVX512F-NEXT: movzbl (%eax), %eax +; X86-AVX512F-NEXT: kmovw %eax, %k1 ; X86-AVX512F-NEXT: kandw %k1, %k0, %k0 ; X86-AVX512F-NEXT: kmovw %k0, %eax ; X86-AVX512F-NEXT: # kill: def $al killed $al killed $eax @@ -229,8 +239,10 @@ ; ; X64-AVX512F-LABEL: select06_mem: ; X64-AVX512F: # %bb.0: -; X64-AVX512F-NEXT: kmovw (%rsi), %k0 -; X64-AVX512F-NEXT: kmovw (%rdi), %k1 +; X64-AVX512F-NEXT: movzbl (%rsi), %eax +; X64-AVX512F-NEXT: kmovw %eax, %k0 +; X64-AVX512F-NEXT: movzbl (%rdi), %eax +; X64-AVX512F-NEXT: kmovw %eax, %k1 ; X64-AVX512F-NEXT: kandw %k1, %k0, %k0 ; X64-AVX512F-NEXT: kmovw %k0, %eax ; X64-AVX512F-NEXT: # kill: def $al killed $al killed $eax @@ -240,8 +252,10 @@ ; X86-AVX512BW: # %bb.0: ; X86-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %eax ; X86-AVX512BW-NEXT: movl {{[0-9]+}}(%esp), %ecx -; X86-AVX512BW-NEXT: kmovw (%ecx), %k0 -; X86-AVX512BW-NEXT: kmovw (%eax), %k1 +; X86-AVX512BW-NEXT: movzbl (%ecx), %ecx +; X86-AVX512BW-NEXT: kmovd %ecx, %k0 +; X86-AVX512BW-NEXT: movzbl (%eax), %eax +; X86-AVX512BW-NEXT: kmovd %eax, %k1 ; X86-AVX512BW-NEXT: kandw %k1, %k0, %k0 ; X86-AVX512BW-NEXT: kmovd %k0, %eax ; X86-AVX512BW-NEXT: # kill: def $al killed $al killed $eax @@ -249,8 +263,10 @@ ; ; X64-AVX512BW-LABEL: select06_mem: ; X64-AVX512BW: # %bb.0: -; X64-AVX512BW-NEXT: kmovw (%rsi), %k0 -; X64-AVX512BW-NEXT: kmovw (%rdi), %k1 +; X64-AVX512BW-NEXT: movzbl (%rsi), %eax +; X64-AVX512BW-NEXT: kmovd %eax, %k0 +; X64-AVX512BW-NEXT: movzbl (%rdi), %eax +; X64-AVX512BW-NEXT: kmovd %eax, %k1 ; X64-AVX512BW-NEXT: kandw %k1, %k0, %k0 ; X64-AVX512BW-NEXT: kmovd %k0, %eax ; X64-AVX512BW-NEXT: # kill: def $al killed $al killed $eax diff --git a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll --- a/llvm/test/CodeGen/X86/bitcast-vector-bool.ll +++ b/llvm/test/CodeGen/X86/bitcast-vector-bool.ll @@ -478,19 +478,13 @@ ; ; AVX512-LABEL: bitcast_v32i16_to_v2i16: ; AVX512: # %bb.0: -; AVX512-NEXT: pushq %rbp -; AVX512-NEXT: movq %rsp, %rbp -; AVX512-NEXT: andq $-32, %rsp -; AVX512-NEXT: subq $32, %rsp ; AVX512-NEXT: vpmovw2m %zmm0, %k0 -; AVX512-NEXT: kmovd %k0, (%rsp) -; AVX512-NEXT: vmovdqa (%rsp), %xmm0 +; AVX512-NEXT: kmovd %k0, -{{[0-9]+}}(%rsp) +; AVX512-NEXT: vmovdqa -{{[0-9]+}}(%rsp), %xmm0 ; AVX512-NEXT: vmovd %xmm0, %ecx ; AVX512-NEXT: vpextrw $1, %xmm0, %eax ; AVX512-NEXT: addl %ecx, %eax ; AVX512-NEXT: # kill: def $ax killed $ax killed $eax -; AVX512-NEXT: movq %rbp, %rsp -; AVX512-NEXT: popq %rbp ; AVX512-NEXT: vzeroupper ; AVX512-NEXT: retq %1 = icmp slt <32 x i16> %a0, zeroinitializer diff --git a/llvm/test/CodeGen/X86/load-local-v3i129.ll b/llvm/test/CodeGen/X86/load-local-v3i129.ll --- a/llvm/test/CodeGen/X86/load-local-v3i129.ll +++ b/llvm/test/CodeGen/X86/load-local-v3i129.ll @@ -4,8 +4,6 @@ define void @_start() { ; CHECK-LABEL: _start: ; CHECK: # %bb.0: # %Entry -; CHECK-NEXT: pushq %rax -; CHECK-NEXT: .cfi_def_cfa_offset 16 ; CHECK-NEXT: movq -{{[0-9]+}}(%rsp), %rax ; CHECK-NEXT: movq -{{[0-9]+}}(%rsp), %rcx ; CHECK-NEXT: shrdq $2, %rcx, %rax @@ -16,8 +14,6 @@ ; CHECK-NEXT: movq %rax, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: orq $-2, -{{[0-9]+}}(%rsp) ; CHECK-NEXT: movq $-1, -{{[0-9]+}}(%rsp) -; CHECK-NEXT: popq %rax -; CHECK-NEXT: .cfi_def_cfa_offset 8 ; CHECK-NEXT: retq Entry: %y = alloca <3 x i129>, align 4 diff --git a/llvm/test/CodeGen/X86/pr41619.ll b/llvm/test/CodeGen/X86/pr41619.ll --- a/llvm/test/CodeGen/X86/pr41619.ll +++ b/llvm/test/CodeGen/X86/pr41619.ll @@ -1,6 +1,6 @@ ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py -; RUN: llc < %s -mtriple=x86_64-apple-macosx10.14.0 -mattr=avx2 | FileCheck %s --check-prefixes=CHECK,AVX -; RUN: llc < %s -mtriple=x86_64-apple-macosx10.14.0 -mattr=avx512bw | FileCheck %s --check-prefixes=CHECK,AVX512 +; RUN: llc < %s -mtriple=x86_64-apple-macosx10.14.0 -mattr=avx2 | FileCheck %s --check-prefixes=CHECK +; RUN: llc < %s -mtriple=x86_64-apple-macosx10.14.0 -mattr=avx512bw | FileCheck %s --check-prefixes=CHECK define void @foo(double %arg) { ; CHECK-LABEL: foo: @@ -29,23 +29,11 @@ ; This used to crash with mask registers on avx512bw targets. define i32 @bar(double %blah) nounwind { -; AVX-LABEL: bar: -; AVX: ## %bb.0: -; AVX-NEXT: pushq %rbp -; AVX-NEXT: movq %rsp, %rbp -; AVX-NEXT: andq $-32, %rsp -; AVX-NEXT: subq $32, %rsp -; AVX-NEXT: vmovq %xmm0, %rax -; AVX-NEXT: ## kill: def $eax killed $eax killed $rax -; AVX-NEXT: movq %rbp, %rsp -; AVX-NEXT: popq %rbp -; AVX-NEXT: retq -; -; AVX512-LABEL: bar: -; AVX512: ## %bb.0: -; AVX512-NEXT: vmovq %xmm0, %rax -; AVX512-NEXT: ## kill: def $eax killed $eax killed $rax -; AVX512-NEXT: retq +; CHECK-LABEL: bar: +; CHECK: ## %bb.0: +; CHECK-NEXT: vmovq %xmm0, %rax +; CHECK-NEXT: ## kill: def $eax killed $eax killed $rax +; CHECK-NEXT: retq %z = bitcast double %blah to i64 %y = trunc i64 %z to i32 %a = bitcast i32 %y to <32 x i1> diff --git a/llvm/test/CodeGen/X86/vector-sext.ll b/llvm/test/CodeGen/X86/vector-sext.ll --- a/llvm/test/CodeGen/X86/vector-sext.ll +++ b/llvm/test/CodeGen/X86/vector-sext.ll @@ -1382,13 +1382,23 @@ ; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] ; AVX2-NEXT: retq ; -; AVX512-LABEL: load_sext_2i1_to_2i64: -; AVX512: # %bb.0: # %entry -; AVX512-NEXT: kmovw (%rdi), %k1 -; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq +; AVX512F-LABEL: load_sext_2i1_to_2i64: +; AVX512F: # %bb.0: # %entry +; AVX512F-NEXT: movzbl (%rdi), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: load_sext_2i1_to_2i64: +; AVX512BW: # %bb.0: # %entry +; AVX512BW-NEXT: movzbl (%rdi), %eax +; AVX512BW-NEXT: kmovd %eax, %k1 +; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq ; ; X86-SSE2-LABEL: load_sext_2i1_to_2i64: ; X86-SSE2: # %bb.0: # %entry @@ -1619,13 +1629,23 @@ ; AVX2-NEXT: vpinsrd $3, %eax, %xmm0, %xmm0 ; AVX2-NEXT: retq ; -; AVX512-LABEL: load_sext_4i1_to_4i32: -; AVX512: # %bb.0: # %entry -; AVX512-NEXT: kmovw (%rdi), %k1 -; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 -; AVX512-NEXT: vzeroupper -; AVX512-NEXT: retq +; AVX512F-LABEL: load_sext_4i1_to_4i32: +; AVX512F: # %bb.0: # %entry +; AVX512F-NEXT: movzbl (%rdi), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512F-NEXT: vzeroupper +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: load_sext_4i1_to_4i32: +; AVX512BW: # %bb.0: # %entry +; AVX512BW-NEXT: movzbl (%rdi), %eax +; AVX512BW-NEXT: kmovd %eax, %k1 +; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 +; AVX512BW-NEXT: vzeroupper +; AVX512BW-NEXT: retq ; ; X86-SSE2-LABEL: load_sext_4i1_to_4i32: ; X86-SSE2: # %bb.0: # %entry @@ -1882,12 +1902,21 @@ ; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 ; AVX2-NEXT: retq ; -; AVX512-LABEL: load_sext_4i1_to_4i64: -; AVX512: # %bb.0: # %entry -; AVX512-NEXT: kmovw (%rdi), %k1 -; AVX512-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: load_sext_4i1_to_4i64: +; AVX512F: # %bb.0: # %entry +; AVX512F-NEXT: movzbl (%rdi), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: load_sext_4i1_to_4i64: +; AVX512BW: # %bb.0: # %entry +; AVX512BW-NEXT: movzbl (%rdi), %eax +; AVX512BW-NEXT: kmovd %eax, %k1 +; AVX512BW-NEXT: vpternlogq $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 +; AVX512BW-NEXT: retq ; ; X86-SSE2-LABEL: load_sext_4i1_to_4i64: ; X86-SSE2: # %bb.0: # %entry @@ -2105,7 +2134,8 @@ define <8 x i16> @load_sext_8i1_to_8i16(<8 x i1> *%ptr) { ; SSE-LABEL: load_sext_8i1_to_8i16: ; SSE: # %bb.0: # %entry -; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: movzbl (%rdi), %eax +; SSE-NEXT: movd %eax, %xmm0 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; SSE-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128] @@ -2115,7 +2145,8 @@ ; ; AVX1-LABEL: load_sext_8i1_to_8i16: ; AVX1: # %bb.0: # %entry -; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX1-NEXT: movzbl (%rdi), %eax +; AVX1-NEXT: vmovd %eax, %xmm0 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128] @@ -2133,7 +2164,8 @@ ; ; AVX512F-LABEL: load_sext_8i1_to_8i16: ; AVX512F: # %bb.0: # %entry -; AVX512F-NEXT: kmovw (%rdi), %k1 +; AVX512F-NEXT: movzbl (%rdi), %eax +; AVX512F-NEXT: kmovw %eax, %k1 ; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} ; AVX512F-NEXT: vpmovdw %zmm0, %ymm0 ; AVX512F-NEXT: # kill: def $xmm0 killed $xmm0 killed $ymm0 @@ -2142,7 +2174,8 @@ ; ; AVX512BW-LABEL: load_sext_8i1_to_8i16: ; AVX512BW: # %bb.0: # %entry -; AVX512BW-NEXT: kmovw (%rdi), %k0 +; AVX512BW-NEXT: movzbl (%rdi), %eax +; AVX512BW-NEXT: kmovd %eax, %k0 ; AVX512BW-NEXT: vpmovm2w %k0, %zmm0 ; AVX512BW-NEXT: # kill: def $xmm0 killed $xmm0 killed $zmm0 ; AVX512BW-NEXT: vzeroupper @@ -2151,7 +2184,8 @@ ; X86-SSE-LABEL: load_sext_8i1_to_8i16: ; X86-SSE: # %bb.0: # %entry ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-SSE-NEXT: movzbl (%eax), %eax +; X86-SSE-NEXT: movd %eax, %xmm0 ; X86-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; X86-SSE-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; X86-SSE-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128] @@ -2316,7 +2350,8 @@ define <8 x i32> @load_sext_8i1_to_8i32(<8 x i1> *%ptr) { ; SSE-LABEL: load_sext_8i1_to_8i32: ; SSE: # %bb.0: # %entry -; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: movzbl (%rdi), %eax +; SSE-NEXT: movd %eax, %xmm0 ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8] ; SSE-NEXT: movdqa %xmm1, %xmm0 @@ -2329,8 +2364,9 @@ ; ; AVX1-LABEL: load_sext_8i1_to_8i32: ; AVX1: # %bb.0: # %entry -; AVX1-NEXT: vmovss {{.*#+}} xmm0 = mem[0],zero,zero,zero -; AVX1-NEXT: vpermilps {{.*#+}} xmm0 = xmm0[0,0,0,0] +; AVX1-NEXT: movzbl (%rdi), %eax +; AVX1-NEXT: vmovd %eax, %xmm0 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 ; AVX1-NEXT: vandps {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %ymm0, %ymm0 ; AVX1-NEXT: vpcmpeqd {{\.?LCPI[0-9]+_[0-9]+}}(%rip), %xmm0, %xmm1 @@ -2347,17 +2383,27 @@ ; AVX2-NEXT: vpcmpeqd %ymm1, %ymm0, %ymm0 ; AVX2-NEXT: retq ; -; AVX512-LABEL: load_sext_8i1_to_8i32: -; AVX512: # %bb.0: # %entry -; AVX512-NEXT: kmovw (%rdi), %k1 -; AVX512-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} -; AVX512-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 -; AVX512-NEXT: retq +; AVX512F-LABEL: load_sext_8i1_to_8i32: +; AVX512F: # %bb.0: # %entry +; AVX512F-NEXT: movzbl (%rdi), %eax +; AVX512F-NEXT: kmovw %eax, %k1 +; AVX512F-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512F-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 +; AVX512F-NEXT: retq +; +; AVX512BW-LABEL: load_sext_8i1_to_8i32: +; AVX512BW: # %bb.0: # %entry +; AVX512BW-NEXT: movzbl (%rdi), %eax +; AVX512BW-NEXT: kmovd %eax, %k1 +; AVX512BW-NEXT: vpternlogd $255, %zmm0, %zmm0, %zmm0 {%k1} {z} +; AVX512BW-NEXT: # kill: def $ymm0 killed $ymm0 killed $zmm0 +; AVX512BW-NEXT: retq ; ; X86-SSE-LABEL: load_sext_8i1_to_8i32: ; X86-SSE: # %bb.0: # %entry ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-SSE-NEXT: movzbl (%eax), %eax +; X86-SSE-NEXT: movd %eax, %xmm0 ; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] ; X86-SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8] ; X86-SSE-NEXT: movdqa %xmm1, %xmm0 @@ -2443,7 +2489,8 @@ define <16 x i8> @load_sext_16i1_to_16i8(<16 x i1> *%ptr) nounwind readnone { ; SSE2-LABEL: load_sext_16i1_to_16i8: ; SSE2: # %bb.0: # %entry -; SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE2-NEXT: movzwl (%rdi), %eax +; SSE2-NEXT: movd %eax, %xmm0 ; SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7] ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] @@ -2454,7 +2501,8 @@ ; ; SSSE3-LABEL: load_sext_16i1_to_16i8: ; SSSE3: # %bb.0: # %entry -; SSSE3-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSSE3-NEXT: movzwl (%rdi), %eax +; SSSE3-NEXT: movd %eax, %xmm0 ; SSSE3-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] ; SSSE3-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] ; SSSE3-NEXT: pand %xmm1, %xmm0 @@ -2463,7 +2511,8 @@ ; ; SSE41-LABEL: load_sext_16i1_to_16i8: ; SSE41: # %bb.0: # %entry -; SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE41-NEXT: movzwl (%rdi), %eax +; SSE41-NEXT: movd %eax, %xmm0 ; SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] ; SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] ; SSE41-NEXT: pand %xmm1, %xmm0 @@ -2472,7 +2521,8 @@ ; ; AVX1-LABEL: load_sext_16i1_to_16i8: ; AVX1: # %bb.0: # %entry -; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX1-NEXT: movzwl (%rdi), %eax +; AVX1-NEXT: vmovd %eax, %xmm0 ; AVX1-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] ; AVX1-NEXT: vmovddup {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745] ; AVX1-NEXT: # xmm1 = mem[0,0] @@ -2482,7 +2532,8 @@ ; ; AVX2-LABEL: load_sext_16i1_to_16i8: ; AVX2: # %bb.0: # %entry -; AVX2-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX2-NEXT: movzwl (%rdi), %eax +; AVX2-NEXT: vmovd %eax, %xmm0 ; AVX2-NEXT: vpshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] ; AVX2-NEXT: vpbroadcastq {{.*#+}} xmm1 = [9241421688590303745,9241421688590303745] ; AVX2-NEXT: vpand %xmm1, %xmm0, %xmm0 @@ -2508,7 +2559,8 @@ ; X86-SSE2-LABEL: load_sext_16i1_to_16i8: ; X86-SSE2: # %bb.0: # %entry ; X86-SSE2-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE2-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-SSE2-NEXT: movzwl (%eax), %eax +; X86-SSE2-NEXT: movd %eax, %xmm0 ; X86-SSE2-NEXT: punpcklbw {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3,4,4,5,5,6,6,7,7] ; X86-SSE2-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,1,1,4,5,6,7] ; X86-SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] @@ -2520,7 +2572,8 @@ ; X86-SSE41-LABEL: load_sext_16i1_to_16i8: ; X86-SSE41: # %bb.0: # %entry ; X86-SSE41-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE41-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-SSE41-NEXT: movzwl (%eax), %eax +; X86-SSE41-NEXT: movd %eax, %xmm0 ; X86-SSE41-NEXT: pshufb {{.*#+}} xmm0 = xmm0[0,0,0,0,0,0,0,0,1,1,1,1,1,1,1,1] ; X86-SSE41-NEXT: movdqa {{.*#+}} xmm1 = [1,2,4,8,16,32,64,128,1,2,4,8,16,32,64,128] ; X86-SSE41-NEXT: pand %xmm1, %xmm0 @@ -2535,7 +2588,8 @@ define <16 x i16> @load_sext_16i1_to_16i16(<16 x i1> *%ptr) { ; SSE-LABEL: load_sext_16i1_to_16i16: ; SSE: # %bb.0: # %entry -; SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; SSE-NEXT: movzwl (%rdi), %eax +; SSE-NEXT: movd %eax, %xmm0 ; SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] ; SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128] @@ -2549,7 +2603,8 @@ ; ; AVX1-LABEL: load_sext_16i1_to_16i16: ; AVX1: # %bb.0: # %entry -; AVX1-NEXT: vmovd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; AVX1-NEXT: movzwl (%rdi), %eax +; AVX1-NEXT: vmovd %eax, %xmm0 ; AVX1-NEXT: vpshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[0,0,0,0] ; AVX1-NEXT: vinsertf128 $1, %xmm0, %ymm0, %ymm0 @@ -2585,7 +2640,8 @@ ; X86-SSE-LABEL: load_sext_16i1_to_16i16: ; X86-SSE: # %bb.0: # %entry ; X86-SSE-NEXT: movl {{[0-9]+}}(%esp), %eax -; X86-SSE-NEXT: movd {{.*#+}} xmm0 = mem[0],zero,zero,zero +; X86-SSE-NEXT: movzwl (%eax), %eax +; X86-SSE-NEXT: movd %eax, %xmm0 ; X86-SSE-NEXT: pshuflw {{.*#+}} xmm0 = xmm0[0,0,0,0,4,5,6,7] ; X86-SSE-NEXT: pshufd {{.*#+}} xmm1 = xmm0[0,0,0,0] ; X86-SSE-NEXT: movdqa {{.*#+}} xmm2 = [1,2,4,8,16,32,64,128] diff --git a/llvm/test/Transforms/InstCombine/abs-intrinsic.ll b/llvm/test/Transforms/InstCombine/abs-intrinsic.ll --- a/llvm/test/Transforms/InstCombine/abs-intrinsic.ll +++ b/llvm/test/Transforms/InstCombine/abs-intrinsic.ll @@ -402,7 +402,7 @@ define <3 x i82> @srem_by_2(<3 x i82> %x, <3 x i82>* %p) { ; CHECK-LABEL: @srem_by_2( ; CHECK-NEXT: [[S:%.*]] = srem <3 x i82> [[X:%.*]], -; CHECK-NEXT: store <3 x i82> [[S]], <3 x i82>* [[P:%.*]], align 64 +; CHECK-NEXT: store <3 x i82> [[S]], <3 x i82>* [[P:%.*]], align 32 ; CHECK-NEXT: [[R:%.*]] = and <3 x i82> [[X]], ; CHECK-NEXT: ret <3 x i82> [[R]] ; diff --git a/llvm/test/Transforms/InstCombine/icmp-vec.ll b/llvm/test/Transforms/InstCombine/icmp-vec.ll --- a/llvm/test/Transforms/InstCombine/icmp-vec.ll +++ b/llvm/test/Transforms/InstCombine/icmp-vec.ll @@ -506,7 +506,7 @@ define i1 @not_cast_ne-1_uses(<3 x i2> %x, <3 x i2>* %p) { ; CHECK-LABEL: @not_cast_ne-1_uses( ; CHECK-NEXT: [[NOT:%.*]] = xor <3 x i2> [[X:%.*]], -; CHECK-NEXT: store <3 x i2> [[NOT]], <3 x i2>* [[P:%.*]], align 4 +; CHECK-NEXT: store <3 x i2> [[NOT]], <3 x i2>* [[P:%.*]], align 1 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <3 x i2> [[X]] to i6 ; CHECK-NEXT: [[R:%.*]] = icmp ne i6 [[TMP1]], 0 ; CHECK-NEXT: ret i1 [[R]] @@ -553,7 +553,7 @@ define i1 @eq_cast_eq-1_use1(<2 x i4> %x, <2 x i4> %y, <2 x i1>* %p) { ; CHECK-LABEL: @eq_cast_eq-1_use1( ; CHECK-NEXT: [[IC:%.*]] = icmp sgt <2 x i4> [[X:%.*]], [[Y:%.*]] -; CHECK-NEXT: store <2 x i1> [[IC]], <2 x i1>* [[P:%.*]], align 2 +; CHECK-NEXT: store <2 x i1> [[IC]], <2 x i1>* [[P:%.*]], align 1 ; CHECK-NEXT: [[B:%.*]] = bitcast <2 x i1> [[IC]] to i2 ; CHECK-NEXT: [[R:%.*]] = icmp eq i2 [[B]], -1 ; CHECK-NEXT: ret i1 [[R]] diff --git a/llvm/test/Transforms/InstCombine/select-min-max.ll b/llvm/test/Transforms/InstCombine/select-min-max.ll --- a/llvm/test/Transforms/InstCombine/select-min-max.ll +++ b/llvm/test/Transforms/InstCombine/select-min-max.ll @@ -48,7 +48,7 @@ define <3 x i5> @umax_umax_common_op_11(i1 %cond, <3 x i5> %x, <3 x i5> %y, <3 x i5> %z, <3 x i5>* %p) { ; CHECK-LABEL: @umax_umax_common_op_11( ; CHECK-NEXT: [[M2:%.*]] = call <3 x i5> @llvm.umax.v3i5(<3 x i5> [[Y:%.*]], <3 x i5> [[Z:%.*]]) -; CHECK-NEXT: store <3 x i5> [[M2]], <3 x i5>* [[P:%.*]], align 4 +; CHECK-NEXT: store <3 x i5> [[M2]], <3 x i5>* [[P:%.*]], align 2 ; CHECK-NEXT: [[MINMAXOP:%.*]] = select i1 [[COND:%.*]], <3 x i5> [[X:%.*]], <3 x i5> [[Y]] ; CHECK-NEXT: [[SEL:%.*]] = call <3 x i5> @llvm.umax.v3i5(<3 x i5> [[MINMAXOP]], <3 x i5> [[Z]]) ; CHECK-NEXT: ret <3 x i5> [[SEL]] diff --git a/llvm/test/Transforms/InstCombine/shufflevec-bitcast.ll b/llvm/test/Transforms/InstCombine/shufflevec-bitcast.ll --- a/llvm/test/Transforms/InstCombine/shufflevec-bitcast.ll +++ b/llvm/test/Transforms/InstCombine/shufflevec-bitcast.ll @@ -197,7 +197,7 @@ ; CHECK-LABEL: @shuf_bitcast_insert_use2( ; CHECK-NEXT: [[I:%.*]] = insertelement <2 x i8> [[V:%.*]], i8 [[X:%.*]], i32 0 ; CHECK-NEXT: [[TMP1:%.*]] = bitcast <4 x i4>* [[P:%.*]] to <2 x i8>* -; CHECK-NEXT: store <2 x i8> [[I]], <2 x i8>* [[TMP1]], align 4 +; CHECK-NEXT: store <2 x i8> [[I]], <2 x i8>* [[TMP1]], align 2 ; CHECK-NEXT: [[R:%.*]] = bitcast i8 [[X]] to <2 x i4> ; CHECK-NEXT: ret <2 x i4> [[R]] ; diff --git a/llvm/test/Transforms/SROA/vector-promotion-different-size.ll b/llvm/test/Transforms/SROA/vector-promotion-different-size.ll --- a/llvm/test/Transforms/SROA/vector-promotion-different-size.ll +++ b/llvm/test/Transforms/SROA/vector-promotion-different-size.ll @@ -3,7 +3,7 @@ define <4 x i1> @vector_bitcast() { ; CHECK-LABEL: @vector_bitcast - ; CHECK: alloca i1 + ; CHECK: alloca <3 x i1> %a = alloca <3 x i1> store <3 x i1> , <3 x i1>* %a diff --git a/llvm/test/Transforms/VectorCombine/load-insert-store.ll b/llvm/test/Transforms/VectorCombine/load-insert-store.ll --- a/llvm/test/Transforms/VectorCombine/load-insert-store.ll +++ b/llvm/test/Transforms/VectorCombine/load-insert-store.ll @@ -64,7 +64,7 @@ define void @insert_store_v9i4(<9 x i4>* %q, i4 zeroext %s) { ; CHECK-LABEL: @insert_store_v9i4( ; CHECK-NEXT: entry: -; CHECK-NEXT: [[TMP0:%.*]] = load <9 x i4>, <9 x i4>* [[Q:%.*]], align 16 +; CHECK-NEXT: [[TMP0:%.*]] = load <9 x i4>, <9 x i4>* [[Q:%.*]], align 8 ; CHECK-NEXT: [[VECINS:%.*]] = insertelement <9 x i4> [[TMP0]], i4 [[S:%.*]], i32 3 ; CHECK-NEXT: store <9 x i4> [[VECINS]], <9 x i4>* [[Q]], align 1 ; CHECK-NEXT: ret void