Index: llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/load-addressing-modes.mir @@ -286,8 +286,9 @@ ; CHECK-LABEL: name: mul_not_pow_2 ; CHECK: liveins: $x0, $x1, $d2 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 - ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 7 - ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[MOVi64imm]], [[COPY]], $xzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 7 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 + ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load 8 from %ir.addr) ; CHECK: $d2 = COPY [[LDRDroX]] @@ -317,8 +318,9 @@ ; CHECK-LABEL: name: mul_wrong_pow_2 ; CHECK: liveins: $x0, $x1, $d2 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 - ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 16 - ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[MOVi64imm]], [[COPY]], $xzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 16 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 + ; CHECK: [[MADDXrrr:%[0-9]+]]:gpr64 = MADDXrrr [[SUBREG_TO_REG]], [[COPY]], $xzr ; CHECK: [[COPY1:%[0-9]+]]:gpr64sp = COPY $x1 ; CHECK: [[LDRDroX:%[0-9]+]]:fpr64 = LDRDroX [[COPY1]], [[MADDXrrr]], 0, 0 :: (load 8 from %ir.addr) ; CHECK: $d2 = COPY [[LDRDroX]] Index: llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/opt-fold-compare.mir @@ -44,10 +44,10 @@ ; CHECK: liveins: $w0, $w1 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 - ; CHECK: [[MOVwzr:%[0-9]+]]:gpr32 = COPY $wzr - ; CHECK: [[MOVi32imm1:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: $wzr = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv - ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm1]], [[MOVwzr]], 1, implicit $nzcv + ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY2]], 1, implicit $nzcv ; CHECK: $w0 = COPY [[CSELWr]] ; CHECK: RET_ReallyLR implicit $w0 %0:gpr(s32) = COPY $w0 @@ -76,10 +76,10 @@ ; CHECK: liveins: $w0, $w1 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = COPY $wzr - ; CHECK: [[MOVi32imm1:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 ; CHECK: $wzr = ADDSWrr [[COPY]], [[COPY1]], implicit-def $nzcv - ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm1]], [[MOVi32imm]], 1, implicit $nzcv + ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY2]], 1, implicit $nzcv ; CHECK: $w0 = COPY [[CSELWr]] ; CHECK: RET_ReallyLR implicit $w0 %0:gpr(s32) = COPY $w0 @@ -108,11 +108,11 @@ ; CHECK: liveins: $w0, $w1 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = COPY $wzr - ; CHECK: [[MOVi32imm1:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[MOVi32imm]], [[COPY1]], implicit-def $nzcv + ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY2]], [[COPY1]], implicit-def $nzcv ; CHECK: $wzr = SUBSWrr [[COPY]], [[SUBSWrr]], implicit-def $nzcv - ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm1]], [[MOVi32imm]], 11, implicit $nzcv + ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY2]], 11, implicit $nzcv ; CHECK: $w0 = COPY [[CSELWr]] ; CHECK: RET_ReallyLR implicit $w0 %0:gpr(s32) = COPY $w0 @@ -141,11 +141,11 @@ ; CHECK: liveins: $w0, $w1 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $w1 - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = COPY $wzr - ; CHECK: [[MOVi32imm1:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[MOVi32imm]], [[COPY]], implicit-def $nzcv + ; CHECK: [[COPY2:%[0-9]+]]:gpr32 = COPY $wzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[SUBSWrr:%[0-9]+]]:gpr32 = SUBSWrr [[COPY2]], [[COPY]], implicit-def $nzcv ; CHECK: $wzr = SUBSWrr [[SUBSWrr]], [[COPY1]], implicit-def $nzcv - ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm1]], [[MOVi32imm]], 11, implicit $nzcv + ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY2]], 11, implicit $nzcv ; CHECK: $w0 = COPY [[CSELWr]] ; CHECK: RET_ReallyLR implicit $w0 %0:gpr(s32) = COPY $w0 @@ -174,10 +174,11 @@ ; CHECK: liveins: $x0, $x1 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 - ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = COPY $xzr - ; CHECK: [[MOVi64imm1:%[0-9]+]]:gpr64 = MOVi64imm 1 + ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 ; CHECK: $xzr = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv - ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[MOVi64imm1]], [[MOVi64imm]], 1, implicit $nzcv + ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[SUBREG_TO_REG]], [[COPY2]], 1, implicit $nzcv ; CHECK: $x0 = COPY [[CSELXr]] ; CHECK: RET_ReallyLR implicit $x0 %0:gpr(s64) = COPY $x0 @@ -206,10 +207,11 @@ ; CHECK: liveins: $x0, $x1 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 - ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = COPY $xzr - ; CHECK: [[MOVi64imm1:%[0-9]+]]:gpr64 = MOVi64imm 1 + ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 ; CHECK: $xzr = ADDSXrr [[COPY]], [[COPY1]], implicit-def $nzcv - ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[MOVi64imm1]], [[MOVi64imm]], 1, implicit $nzcv + ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[SUBREG_TO_REG]], [[COPY2]], 1, implicit $nzcv ; CHECK: $x0 = COPY [[CSELXr]] ; CHECK: RET_ReallyLR implicit $x0 %0:gpr(s64) = COPY $x0 @@ -238,11 +240,12 @@ ; CHECK: liveins: $x0, $x1 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 - ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = COPY $xzr - ; CHECK: [[MOVi64imm1:%[0-9]+]]:gpr64 = MOVi64imm 1 - ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[MOVi64imm]], [[COPY1]], implicit-def $nzcv + ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 + ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY2]], [[COPY1]], implicit-def $nzcv ; CHECK: $xzr = SUBSXrr [[COPY]], [[SUBSXrr]], implicit-def $nzcv - ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[MOVi64imm1]], [[MOVi64imm]], 11, implicit $nzcv + ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[SUBREG_TO_REG]], [[COPY2]], 11, implicit $nzcv ; CHECK: $x0 = COPY [[CSELXr]] ; CHECK: RET_ReallyLR implicit $x0 %0:gpr(s64) = COPY $x0 @@ -271,11 +274,12 @@ ; CHECK: liveins: $x0, $x1 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $x1 - ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = COPY $xzr - ; CHECK: [[MOVi64imm1:%[0-9]+]]:gpr64 = MOVi64imm 1 - ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[MOVi64imm]], [[COPY]], implicit-def $nzcv + ; CHECK: [[COPY2:%[0-9]+]]:gpr64 = COPY $xzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 + ; CHECK: [[SUBSXrr:%[0-9]+]]:gpr64 = SUBSXrr [[COPY2]], [[COPY]], implicit-def $nzcv ; CHECK: $xzr = SUBSXrr [[SUBSXrr]], [[COPY1]], implicit-def $nzcv - ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[MOVi64imm1]], [[MOVi64imm]], 11, implicit $nzcv + ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[SUBREG_TO_REG]], [[COPY2]], 11, implicit $nzcv ; CHECK: $x0 = COPY [[CSELXr]] ; CHECK: RET_ReallyLR implicit $x0 %0:gpr(s64) = COPY $x0 @@ -302,10 +306,10 @@ ; CHECK-LABEL: name: tst_s32 ; CHECK: liveins: $w0, $w1 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = COPY $wzr - ; CHECK: [[MOVi32imm1:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: $wzr = ANDSWrr [[MOVi32imm]], [[COPY]], implicit-def $nzcv - ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm1]], [[MOVi32imm]], 0, implicit $nzcv + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: $wzr = ANDSWrr [[COPY1]], [[COPY]], implicit-def $nzcv + ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY1]], 0, implicit $nzcv ; CHECK: $w0 = COPY [[CSELWr]] ; CHECK: RET_ReallyLR implicit $w0 %0:gpr(s32) = COPY $w0 @@ -333,10 +337,11 @@ ; CHECK-LABEL: name: tst_s64 ; CHECK: liveins: $x0, $x1 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x1 - ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = COPY $xzr - ; CHECK: [[MOVi64imm1:%[0-9]+]]:gpr64 = MOVi64imm 1 - ; CHECK: $xzr = ANDSXrr [[MOVi64imm]], [[COPY]], implicit-def $nzcv - ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[MOVi64imm1]], [[MOVi64imm]], 0, implicit $nzcv + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $xzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 + ; CHECK: $xzr = ANDSXrr [[COPY1]], [[COPY]], implicit-def $nzcv + ; CHECK: [[CSELXr:%[0-9]+]]:gpr64 = CSELXr [[SUBREG_TO_REG]], [[COPY1]], 0, implicit $nzcv ; CHECK: $x0 = COPY [[CSELXr]] ; CHECK: RET_ReallyLR implicit $x0 %0:gpr(s64) = COPY $x0 @@ -364,11 +369,11 @@ ; CHECK-LABEL: name: no_tst_unsigned_compare ; CHECK: liveins: $w0, $w1 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = COPY $wzr - ; CHECK: [[MOVi32imm1:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32common = ANDWrr [[MOVi32imm]], [[COPY]] + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32common = ANDWrr [[COPY1]], [[COPY]] ; CHECK: $wzr = SUBSWri [[ANDWrr]], 0, 0, implicit-def $nzcv - ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm1]], [[MOVi32imm]], 8, implicit $nzcv + ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY1]], 8, implicit $nzcv ; CHECK: $w0 = COPY [[CSELWr]] ; CHECK: RET_ReallyLR implicit $w0 %0:gpr(s32) = COPY $w0 @@ -396,11 +401,11 @@ ; CHECK-LABEL: name: no_tst_nonzero ; CHECK: liveins: $w0, $w1 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w1 - ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = COPY $wzr - ; CHECK: [[MOVi32imm1:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32common = ANDWrr [[MOVi32imm]], [[COPY]] + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[ANDWrr:%[0-9]+]]:gpr32common = ANDWrr [[COPY1]], [[COPY]] ; CHECK: $wzr = SUBSWri [[ANDWrr]], 42, 0, implicit-def $nzcv - ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm1]], [[MOVi32imm]], 8, implicit $nzcv + ; CHECK: [[CSELWr:%[0-9]+]]:gpr32 = CSELWr [[MOVi32imm]], [[COPY1]], 8, implicit $nzcv ; CHECK: $w0 = COPY [[CSELWr]] ; CHECK: RET_ReallyLR implicit $w0 %0:gpr(s32) = COPY $w0 Index: llvm/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/select-atomicrmw.mir @@ -29,9 +29,10 @@ ; CHECK-LABEL: name: atomicrmw_xchg_i64 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1 - ; CHECK: [[RES:%[0-9]+]]:gpr64 = SWPX [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr) - ; CHECK: $x0 = COPY [[RES]] + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 + ; CHECK: [[SWPX:%[0-9]+]]:gpr64 = SWPX [[SUBREG_TO_REG]], [[COPY]] :: (load store monotonic 8 on %ir.addr) + ; CHECK: $x0 = COPY [[SWPX]] %0:gpr(p0) = COPY $x0 %1:gpr(s64) = G_CONSTANT i64 1 %2:gpr(s64) = G_ATOMICRMW_XCHG %0, %1 :: (load store monotonic 8 on %ir.addr) @@ -48,9 +49,10 @@ ; CHECK-LABEL: name: atomicrmw_add_i64 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1 - ; CHECK: [[RES:%[0-9]+]]:gpr64 = LDADDX [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr) - ; CHECK: $x0 = COPY [[RES]] + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 + ; CHECK: [[LDADDX:%[0-9]+]]:gpr64 = LDADDX [[SUBREG_TO_REG]], [[COPY]] :: (load store monotonic 8 on %ir.addr) + ; CHECK: $x0 = COPY [[LDADDX]] %0:gpr(p0) = COPY $x0 %1:gpr(s64) = G_CONSTANT i64 1 %2:gpr(s64) = G_ATOMICRMW_ADD %0, %1 :: (load store monotonic 8 on %ir.addr) @@ -67,9 +69,9 @@ ; CHECK-LABEL: name: atomicrmw_add_i32 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDADDALW [[CST]], [[COPY]] :: (load store seq_cst 4 on %ir.addr) - ; CHECK: $w0 = COPY [[RES]] + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[LDADDALW:%[0-9]+]]:gpr32 = LDADDALW [[MOVi32imm]], [[COPY]] :: (load store seq_cst 4 on %ir.addr) + ; CHECK: $w0 = COPY [[LDADDALW]] %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4 on %ir.addr) @@ -87,9 +89,9 @@ ; CHECK-LABEL: name: atomicrmw_sub_i32 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDADDALW [[CST]], [[COPY]] :: (load store seq_cst 4 on %ir.addr) - ; CHECK: $w0 = COPY [[RES]] + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[LDADDALW:%[0-9]+]]:gpr32 = LDADDALW [[MOVi32imm]], [[COPY]] :: (load store seq_cst 4 on %ir.addr) + ; CHECK: $w0 = COPY [[LDADDALW]] %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_ADD %0, %1 :: (load store seq_cst 4 on %ir.addr) @@ -107,10 +109,10 @@ ; CHECK-LABEL: name: atomicrmw_and_i32 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[CST2:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[CST]] - ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDCLRAW [[CST2]], [[COPY]] :: (load store acquire 4 on %ir.addr) - ; CHECK: $w0 = COPY [[RES]] + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[ORNWrr:%[0-9]+]]:gpr32 = ORNWrr $wzr, [[MOVi32imm]] + ; CHECK: [[LDCLRAW:%[0-9]+]]:gpr32 = LDCLRAW [[ORNWrr]], [[COPY]] :: (load store acquire 4 on %ir.addr) + ; CHECK: $w0 = COPY [[LDCLRAW]] %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_AND %0, %1 :: (load store acquire 4 on %ir.addr) @@ -128,9 +130,9 @@ ; CHECK-LABEL: name: atomicrmw_or_i32 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSETLW [[CST]], [[COPY]] :: (load store release 4 on %ir.addr) - ; CHECK: $w0 = COPY [[RES]] + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[LDSETLW:%[0-9]+]]:gpr32 = LDSETLW [[MOVi32imm]], [[COPY]] :: (load store release 4 on %ir.addr) + ; CHECK: $w0 = COPY [[LDSETLW]] %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_OR %0, %1 :: (load store release 4 on %ir.addr) @@ -148,9 +150,9 @@ ; CHECK-LABEL: name: atomicrmw_xor_i32 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDEORALW [[CST]], [[COPY]] :: (load store acq_rel 4 on %ir.addr) - ; CHECK: $w0 = COPY [[RES]] + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[LDEORALW:%[0-9]+]]:gpr32 = LDEORALW [[MOVi32imm]], [[COPY]] :: (load store acq_rel 4 on %ir.addr) + ; CHECK: $w0 = COPY [[LDEORALW]] %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_XOR %0, %1 :: (load store acq_rel 4 on %ir.addr) @@ -168,9 +170,9 @@ ; CHECK-LABEL: name: atomicrmw_min_i32 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSMINALW [[CST]], [[COPY]] :: (load store acq_rel 4 on %ir.addr) - ; CHECK: $w0 = COPY [[RES]] + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[LDSMINALW:%[0-9]+]]:gpr32 = LDSMINALW [[MOVi32imm]], [[COPY]] :: (load store acq_rel 4 on %ir.addr) + ; CHECK: $w0 = COPY [[LDSMINALW]] %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_MIN %0, %1 :: (load store acq_rel 4 on %ir.addr) @@ -188,9 +190,9 @@ ; CHECK-LABEL: name: atomicrmw_max_i32 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDSMAXALW [[CST]], [[COPY]] :: (load store acq_rel 4 on %ir.addr) - ; CHECK: $w0 = COPY [[RES]] + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[LDSMAXALW:%[0-9]+]]:gpr32 = LDSMAXALW [[MOVi32imm]], [[COPY]] :: (load store acq_rel 4 on %ir.addr) + ; CHECK: $w0 = COPY [[LDSMAXALW]] %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_MAX %0, %1 :: (load store acq_rel 4 on %ir.addr) @@ -208,9 +210,9 @@ ; CHECK-LABEL: name: atomicrmw_umin_i32 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDUMINALW [[CST]], [[COPY]] :: (load store acq_rel 4 on %ir.addr) - ; CHECK: $w0 = COPY [[RES]] + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[LDUMINALW:%[0-9]+]]:gpr32 = LDUMINALW [[MOVi32imm]], [[COPY]] :: (load store acq_rel 4 on %ir.addr) + ; CHECK: $w0 = COPY [[LDUMINALW]] %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_UMIN %0, %1 :: (load store acq_rel 4 on %ir.addr) @@ -228,9 +230,9 @@ ; CHECK-LABEL: name: atomicrmw_umax_i32 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[RES:%[0-9]+]]:gpr32 = LDUMAXALW [[CST]], [[COPY]] :: (load store acq_rel 4 on %ir.addr) - ; CHECK: $w0 = COPY [[RES]] + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[LDUMAXALW:%[0-9]+]]:gpr32 = LDUMAXALW [[MOVi32imm]], [[COPY]] :: (load store acq_rel 4 on %ir.addr) + ; CHECK: $w0 = COPY [[LDUMAXALW]] %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 1 %2:gpr(s32) = G_ATOMICRMW_UMAX %0, %1 :: (load store acq_rel 4 on %ir.addr) Index: llvm/test/CodeGen/AArch64/GlobalISel/select-cmp.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/select-cmp.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/select-cmp.mir @@ -58,8 +58,9 @@ ; CHECK-LABEL: name: cmp_imm_out_of_range ; CHECK: liveins: $x0 ; CHECK: [[COPY:%[0-9]+]]:gpr64 = COPY $x0 - ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 13132 - ; CHECK: $xzr = SUBSXrr [[COPY]], [[MOVi64imm]], implicit-def $nzcv + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 13132 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 + ; CHECK: $xzr = SUBSXrr [[COPY]], [[SUBREG_TO_REG]], implicit-def $nzcv ; CHECK: [[CSINCWr:%[0-9]+]]:gpr32 = CSINCWr $wzr, $wzr, 1, implicit $nzcv ; CHECK: $w0 = COPY [[CSINCWr]] ; CHECK: RET_ReallyLR implicit $w0 Index: llvm/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/select-cmpxchg.mir @@ -19,10 +19,10 @@ ; CHECK-LABEL: name: cmpxchg_i32 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[CMP:%[0-9]+]]:gpr32 = COPY $wzr - ; CHECK: [[CST:%[0-9]+]]:gpr32 = MOVi32imm 1 - ; CHECK: [[RES:%[0-9]+]]:gpr32 = CASW [[CMP]], [[CST]], [[COPY]] :: (load store monotonic 4 on %ir.addr) - ; CHECK: $w0 = COPY [[RES]] + ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY $wzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[CASW:%[0-9]+]]:gpr32 = CASW [[COPY1]], [[MOVi32imm]], [[COPY]] :: (load store monotonic 4 on %ir.addr) + ; CHECK: $w0 = COPY [[CASW]] %0:gpr(p0) = COPY $x0 %1:gpr(s32) = G_CONSTANT i32 0 %2:gpr(s32) = G_CONSTANT i32 1 @@ -41,10 +41,11 @@ ; CHECK-LABEL: name: cmpxchg_i64 ; CHECK: [[COPY:%[0-9]+]]:gpr64sp = COPY $x0 - ; CHECK: [[CMP:%[0-9]+]]:gpr64 = COPY $xzr - ; CHECK: [[CST:%[0-9]+]]:gpr64 = MOVi64imm 1 - ; CHECK: [[RES:%[0-9]+]]:gpr64 = CASX [[CMP]], [[CST]], [[COPY]] :: (load store monotonic 8 on %ir.addr) - ; CHECK: $x0 = COPY [[RES]] + ; CHECK: [[COPY1:%[0-9]+]]:gpr64 = COPY $xzr + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 + ; CHECK: [[CASX:%[0-9]+]]:gpr64 = CASX [[COPY1]], [[SUBREG_TO_REG]], [[COPY]] :: (load store monotonic 8 on %ir.addr) + ; CHECK: $x0 = COPY [[CASX]] %0:gpr(p0) = COPY $x0 %1:gpr(s64) = G_CONSTANT i64 0 %2:gpr(s64) = G_CONSTANT i64 1 Index: llvm/test/CodeGen/AArch64/GlobalISel/select-imm.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/select-imm.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/select-imm.mir @@ -43,8 +43,9 @@ liveins: $w0, $w1 ; CHECK-LABEL: name: imm_s64_gpr - ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 1234 - ; CHECK: $x0 = COPY [[MOVi64imm]] + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 1234 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64all = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 + ; CHECK: $x0 = COPY [[SUBREG_TO_REG]] %0(s64) = G_CONSTANT i64 1234 $x0 = COPY %0(s64) ... Index: llvm/test/CodeGen/AArch64/GlobalISel/select-scalar-shift-imm.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/select-scalar-shift-imm.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/select-scalar-shift-imm.mir @@ -130,8 +130,9 @@ ; CHECK-LABEL: name: lshr_32_notimm64 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 - ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 8 - ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[MOVi64imm]], 8000 + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 8 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 + ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[SUBREG_TO_REG]], 8000 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[ANDXri]].sub_32 ; CHECK: [[LSRVWr:%[0-9]+]]:gpr32 = LSRVWr [[COPY]], [[COPY1]] ; CHECK: $w0 = COPY [[LSRVWr]] @@ -154,8 +155,9 @@ ; CHECK-LABEL: name: ashr_32_notimm64 ; CHECK: [[COPY:%[0-9]+]]:gpr32 = COPY $w0 - ; CHECK: [[MOVi64imm:%[0-9]+]]:gpr64 = MOVi64imm 8 - ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[MOVi64imm]], 8000 + ; CHECK: [[MOVi32imm:%[0-9]+]]:gpr32 = MOVi32imm 8 + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gpr64 = SUBREG_TO_REG 0, [[MOVi32imm]], %subreg.sub_32 + ; CHECK: [[ANDXri:%[0-9]+]]:gpr64sp = ANDXri [[SUBREG_TO_REG]], 8000 ; CHECK: [[COPY1:%[0-9]+]]:gpr32 = COPY [[ANDXri]].sub_32 ; CHECK: [[ASRVWr:%[0-9]+]]:gpr32 = ASRVWr [[COPY]], [[COPY1]] ; CHECK: $w0 = COPY [[ASRVWr]] Index: llvm/test/CodeGen/AArch64/GlobalISel/select.mir =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/select.mir +++ llvm/test/CodeGen/AArch64/GlobalISel/select.mir @@ -108,7 +108,8 @@ # CHECK: body: # CHECK: %0:gpr64 = COPY $x0 -# CHECK: %1:gpr64 = MOVi64imm 10000 +# CHECK: %3:gpr32 = MOVi32imm 10000 +# CHECK: %1:gpr64 = SUBREG_TO_REG 0, %3, %subreg.sub_32 # CHECK: %2:gpr64 = ADDXrr %0, %1 body: | bb.0: Index: llvm/test/CodeGen/AArch64/GlobalISel/swifterror.ll =================================================================== --- llvm/test/CodeGen/AArch64/GlobalISel/swifterror.ll +++ llvm/test/CodeGen/AArch64/GlobalISel/swifterror.ll @@ -9,7 +9,7 @@ define float @foo(%swift_error** swifterror %error_ptr_ref) { ; CHECK-LABEL: foo: ; CHECK: mov [[ID:w[0-9]+]], #1 -; CHECK: mov x0, #16 +; CHECK: mov w0, #16 ; CHECK: malloc ; CHECK: strb [[ID]], [x0, #8] ; CHECK: mov x21, x0 @@ -99,7 +99,7 @@ ; CHECK-LABEL: foo_if: ; CHECK: cbz w0 ; CHECK: mov [[ID:w[0-9]+]], #1 -; CHECK: mov x0, #16 +; CHECK: mov w0, #16 ; CHECK: malloc ; CHECK: strb [[ID]], [x0, #8] ; CHECK: mov x21, x0 @@ -127,7 +127,7 @@ define float @foo_loop(%swift_error** swifterror %error_ptr_ref, i32 %cc, float %cc2) { ; CHECK-LABEL: foo_loop: ; CHECK: cbz -; CHECK: mov x0, #16 +; CHECK: mov w0, #16 ; CHECK: malloc ; CHECK: mov x21, x0 ; CHECK: strb w{{.*}}, [x0, #8] @@ -165,7 +165,7 @@ ; CHECK-LABEL: foo_sret: ; CHECK: mov [[SRET:x[0-9]+]], x8 ; CHECK: mov [[ID:w[0-9]+]], #1 -; CHECK: mov x0, #16 +; CHECK: mov w0, #16 ; CHECK: malloc ; CHECK: strb [[ID]], [x0, #8] ; CHECK: str w{{.*}}, [{{.*}}[[SRET]], #4] @@ -221,7 +221,7 @@ define float @foo_vararg(%swift_error** swifterror %error_ptr_ref, ...) { ; CHECK-LABEL: foo_vararg: ; CHECK-DAG: mov [[ID:w[0-9]+]], #1 -; CHECK: mov x0, #16 +; CHECK: mov w0, #16 ; CHECK: malloc ; CHECK-DAG: strb [[ID]], [x0, #8] @@ -333,14 +333,14 @@ ; CHECK: mov x27, x7 ; CHECK: mov x28, x21 ; Setup call. -; CHECK: mov x0, #1 -; CHECK: mov x1, #2 -; CHECK: mov x2, #3 -; CHECK: mov x3, #4 -; CHECK: mov x4, #5 -; CHECK: mov x5, #6 -; CHECK: mov x6, #7 -; CHECK: mov x7, #8 +; CHECK: mov w0, #1 +; CHECK: mov w1, #2 +; CHECK: mov w2, #3 +; CHECK: mov w3, #4 +; CHECK: mov w4, #5 +; CHECK: mov w5, #6 +; CHECK: mov w6, #7 +; CHECK: mov w7, #8 ; CHECK: str xzr, [sp] ; CHECK: mov x21, xzr ; CHECK: bl _params_in_reg2 @@ -398,14 +398,14 @@ ; CHECK: mov x27, x7 ; CHECK: mov x28, x21 ; Setup call arguments. -; CHECK: mov x0, #1 -; CHECK: mov x1, #2 -; CHECK: mov x2, #3 -; CHECK: mov x3, #4 -; CHECK: mov x4, #5 -; CHECK: mov x5, #6 -; CHECK: mov x6, #7 -; CHECK: mov x7, #8 +; CHECK: mov w0, #1 +; CHECK: mov w1, #2 +; CHECK: mov w2, #3 +; CHECK: mov w3, #4 +; CHECK: mov w4, #5 +; CHECK: mov w5, #6 +; CHECK: mov w6, #7 +; CHECK: mov w7, #8 ; CHECK: mov x21, xzr ; CHECK: bl _params_in_reg2 ; Store swifterror %error_ptr_ref. @@ -433,14 +433,14 @@ ; Save swifterror %err. ; CHECK: mov x19, x21 ; Setup call. -; CHECK: mov x0, #1 -; CHECK: mov x1, #2 -; CHECK: mov x2, #3 -; CHECK: mov x3, #4 -; CHECK: mov x4, #5 -; CHECK: mov x5, #6 -; CHECK: mov x6, #7 -; CHECK: mov x7, #8 +; CHECK: mov w0, #1 +; CHECK: mov w1, #2 +; CHECK: mov w2, #3 +; CHECK: mov w3, #4 +; CHECK: mov w4, #5 +; CHECK: mov w5, #6 +; CHECK: mov w6, #7 +; CHECK: mov w7, #8 ; ... setup call with swiferror %error_ptr_ref. ; CHECK: ldr x21, [sp, #8] ; CHECK: bl _params_in_reg2 Index: llvm/test/CodeGen/X86/GlobalISel/add-ext.ll =================================================================== --- llvm/test/CodeGen/X86/GlobalISel/add-ext.ll +++ llvm/test/CodeGen/X86/GlobalISel/add-ext.ll @@ -202,16 +202,19 @@ ; CHECK: # %bb.0: ; CHECK-NEXT: # kill: def $esi killed $esi def $rsi ; CHECK-NEXT: leal 1(%rsi), %eax +; CHECK-NEXT: movl %eax, %eax ; CHECK-NEXT: movq $4, %rcx ; CHECK-NEXT: imulq %rcx, %rax ; CHECK-NEXT: addq %rdi, %rax ; CHECK-NEXT: leal 2(%rsi), %edx +; CHECK-NEXT: movl %edx, %edx ; CHECK-NEXT: imulq %rcx, %rdx ; CHECK-NEXT: addq %rdi, %rdx ; CHECK-NEXT: movl (%rdx), %edx ; CHECK-NEXT: addl (%rax), %edx -; CHECK-NEXT: imulq %rcx, %rsi -; CHECK-NEXT: leaq (%rdi,%rsi), %rax +; CHECK-NEXT: movl %esi, %eax +; CHECK-NEXT: imulq %rcx, %rax +; CHECK-NEXT: addq %rdi, %rax ; CHECK-NEXT: movl %edx, (%rax) ; CHECK-NEXT: retq Index: llvm/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir =================================================================== --- llvm/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir +++ llvm/test/CodeGen/X86/GlobalISel/select-ext-x86-64.mir @@ -136,7 +136,8 @@ ; ALL-LABEL: name: anyext_s64_from_s8 ; ALL: [[COPY:%[0-9]+]]:gr64_with_sub_8bit = COPY $rdi ; ALL: [[COPY1:%[0-9]+]]:gr8 = COPY [[COPY]].sub_8bit - ; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_8bit + ; ALL: [[MOVZX32rr8_:%[0-9]+]]:gr32 = MOVZX32rr8 [[COPY1]] + ; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[MOVZX32rr8_]], %subreg.sub_32bit ; ALL: $rax = COPY [[SUBREG_TO_REG]] ; ALL: RET 0, implicit $rax %0(s64) = COPY $rdi @@ -161,7 +162,8 @@ ; ALL-LABEL: name: anyext_s64_from_s16 ; ALL: [[COPY:%[0-9]+]]:gr64 = COPY $rdi ; ALL: [[COPY1:%[0-9]+]]:gr16 = COPY [[COPY]].sub_16bit - ; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY1]], %subreg.sub_16bit + ; ALL: [[MOVZX32rr16_:%[0-9]+]]:gr32 = MOVZX32rr16 [[COPY1]] + ; ALL: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[MOVZX32rr16_]], %subreg.sub_32bit ; ALL: $rax = COPY [[SUBREG_TO_REG]] ; ALL: RET 0, implicit $rax %0(s64) = COPY $rdi Index: llvm/test/CodeGen/X86/GlobalISel/x86_64-select-zext.mir =================================================================== --- llvm/test/CodeGen/X86/GlobalISel/x86_64-select-zext.mir +++ llvm/test/CodeGen/X86/GlobalISel/x86_64-select-zext.mir @@ -351,7 +351,8 @@ ; CHECK-LABEL: name: zext_i32_to_i64 ; CHECK: liveins: $edi ; CHECK: [[COPY:%[0-9]+]]:gr32 = COPY $edi - ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[COPY]], %subreg.sub_32bit + ; CHECK: [[MOV32rr:%[0-9]+]]:gr32 = MOV32rr [[COPY]] + ; CHECK: [[SUBREG_TO_REG:%[0-9]+]]:gr64 = SUBREG_TO_REG 0, [[MOV32rr]], %subreg.sub_32bit ; CHECK: $rax = COPY [[SUBREG_TO_REG]] ; CHECK: RET 0, implicit $rax %0:gpr(s32) = COPY $edi Index: llvm/test/TableGen/GlobalISelEmitterSubreg.td =================================================================== --- llvm/test/TableGen/GlobalISelEmitterSubreg.td +++ llvm/test/TableGen/GlobalISelEmitterSubreg.td @@ -117,3 +117,21 @@ // CHECK-NEXT: GIR_ConstrainOperandRC, /*InsnID*/0, /*Op*/0, /*RC DRegs*/1, // CHECK-NEXT: GIR_ConstrainOperandRC, /*InsnID*/0, /*Op*/1, /*RC DRegs*/1, // CHECK-NEXT: GIR_ConstrainOperandRC, /*InsnID*/0, /*Op*/2, /*RC SRegs*/0, + +// Test that we can import SUBREG_TO_REG +def : Pat<(i32 (zext SOP:$src)), + (SUBREG_TO_REG (i64 0), (SUBSOME_INSN SOP:$src), sub0)>; +// CHECK-LABEL: (zext:{ *:[i32] } SOP:{ *:[i16] }:$src) => (SUBREG_TO_REG:{ *:[i32] } 0:{ *:[i64] }, (SUBSOME_INSN:{ *:[i16] } SOP:{ *:[i16] }:$src), sub0:{ *:[i32] }) +// CHECK-NEXT: GIR_MakeTempReg, /*TempRegID*/0, /*TypeID*/GILLT_s16, +// CHECK-NEXT: GIR_BuildMI, /*InsnID*/1, /*Opcode*/MyTarget::SUBSOME_INSN, +// CHECK-NEXT: GIR_AddTempRegister, /*InsnID*/1, /*TempRegID*/0, /*TempRegFlags*/RegState::Define, +// CHECK-NEXT: GIR_Copy, /*NewInsnID*/1, /*OldInsnID*/0, /*OpIdx*/1, // src +// CHECK-NEXT: GIR_ConstrainSelectedInstOperands, /*InsnID*/1, +// CHECK-NEXT: GIR_BuildMI, /*InsnID*/0, /*Opcode*/TargetOpcode::SUBREG_TO_REG, +// CHECK-NEXT: GIR_Copy, /*NewInsnID*/0, /*OldInsnID*/0, /*OpIdx*/0, // dst +// CHECK-NEXT: GIR_AddImm, /*InsnID*/0, /*Imm*/0, +// CHECK-NEXT: GIR_AddTempRegister, /*InsnID*/0, /*TempRegID*/0, /*TempRegFlags*/0, +// CHECK-NEXT: GIR_AddImm, /*InsnID*/0, /*Imm*/1, +// CHECK-NEXT: GIR_EraseFromParent, /*InsnID*/0, +// CHECK-NEXT: GIR_ConstrainOperandRC, /*InsnID*/0, /*Op*/0, /*RC DRegs*/1, +// CHECK-NEXT: GIR_ConstrainOperandRC, /*InsnID*/0, /*Op*/2, /*RC SRegs*/0, Index: llvm/utils/TableGen/GlobalISelEmitter.cpp =================================================================== --- llvm/utils/TableGen/GlobalISelEmitter.cpp +++ llvm/utils/TableGen/GlobalISelEmitter.cpp @@ -3169,8 +3169,14 @@ /// \p SubRegNode, and the subregister index defined by \p SubRegIdxNode. /// If no register class is found, return None. Optional + inferSuperRegisterClassForNode(const TypeSetByHwMode &Ty, + TreePatternNode *SuperRegNode, + TreePatternNode *SubRegIdxNode); + + /// Infer a CodeGenRegisterClass which suppoorts \p Ty and \p SubRegIdxNode. + /// Return None if no such class exists. + Optional inferSuperRegisterClass(const TypeSetByHwMode &Ty, - TreePatternNode *SuperRegNode, TreePatternNode *SubRegIdxNode); /// Return the CodeGenRegisterClass associated with \p Leaf if it has one. @@ -3864,8 +3870,9 @@ if (!SubClass) return failedImport( "Cannot infer register class from INSERT_SUBREG operand #1"); - Optional SuperClass = inferSuperRegisterClass( - Dst->getExtType(0), Dst->getChild(0), Dst->getChild(2)); + Optional SuperClass = + inferSuperRegisterClassForNode(Dst->getExtType(0), Dst->getChild(0), + Dst->getChild(2)); if (!SuperClass) return failedImport( "Cannot infer register class for INSERT_SUBREG operand #0"); @@ -3880,6 +3887,26 @@ return InsertPtOrError.get(); } + // Similar to INSERT_SUBREG, we also have to handle SUBREG_TO_REG as a + // subinstruction. + if (Target.getInstruction(Dst->getOperator()).TheDef->getName() == + "SUBREG_TO_REG") { + auto SubClass = inferRegClassFromPattern(Dst->getChild(1)); + if (!SubClass) + return failedImport( + "Cannot infer register class from SUBREG_TO_REG child #1"); + auto SuperClass = inferSuperRegisterClass(Dst->getExtType(0), + Dst->getChild(2)); + if (!SuperClass) + return failedImport( + "Cannot infer register class for SUBREG_TO_REG operand #0"); + M.insertAction( + InsertPt, DstMIBuilder.getInsnID(), 0, **SuperClass); + M.insertAction( + InsertPt, DstMIBuilder.getInsnID(), 2, **SubClass); + return InsertPtOrError.get(); +} + M.insertAction(InsertPt, DstMIBuilder.getInsnID()); return InsertPtOrError.get(); @@ -4128,23 +4155,11 @@ Optional GlobalISelEmitter::inferSuperRegisterClass(const TypeSetByHwMode &Ty, - TreePatternNode *SuperRegNode, TreePatternNode *SubRegIdxNode) { - // Check if we already have a defined register class for the super register - // node. If we do, then we should preserve that rather than inferring anything - // from the subregister index node. We can assume that whoever wrote the - // pattern in the first place made sure that the super register and - // subregister are compatible. - if (Optional SuperRegisterClass = - inferRegClassFromPattern(SuperRegNode)) - return SuperRegisterClass; - + assert(SubRegIdxNode && "Expected subregister index node!"); // We need a ValueTypeByHwMode for getSuperRegForSubReg. if (!Ty.isValueTypeByHwMode(false)) return None; - - // We don't know anything about the super register. Try to use the subregister - // index to infer an appropriate register class. if (!SubRegIdxNode->isLeaf()) return None; DefInit *SubRegInit = dyn_cast(SubRegIdxNode->getLeafValue()); @@ -4161,6 +4176,22 @@ return *RC; } +Optional +GlobalISelEmitter::inferSuperRegisterClassForNode( + const TypeSetByHwMode &Ty, TreePatternNode *SuperRegNode, + TreePatternNode *SubRegIdxNode) { + assert(SuperRegNode && "Expected super register node!"); + // Check if we already have a defined register class for the super register + // node. If we do, then we should preserve that rather than inferring anything + // from the subregister index node. We can assume that whoever wrote the + // pattern in the first place made sure that the super register and + // subregister are compatible. + if (Optional SuperRegisterClass = + inferRegClassFromPattern(SuperRegNode)) + return *SuperRegisterClass; + return inferSuperRegisterClass(Ty, SubRegIdxNode); +} + Expected GlobalISelEmitter::runOnPattern(const PatternToMatch &P) { // Keep track of the matchers and actions to emit. int Score = P.getPatternComplexity(CGP); @@ -4281,8 +4312,8 @@ if (DstIOpRec == nullptr) return failedImport("EXTRACT_SUBREG operand #0 isn't a register class"); } else if (DstI.TheDef->getName() == "INSERT_SUBREG") { - auto MaybeSuperClass = - inferSuperRegisterClass(VTy, Dst->getChild(0), Dst->getChild(2)); + auto MaybeSuperClass = inferSuperRegisterClassForNode( + VTy, Dst->getChild(0), Dst->getChild(2)); if (!MaybeSuperClass) return failedImport( "Cannot infer register class for INSERT_SUBREG operand #0"); @@ -4295,6 +4326,17 @@ OM.addPredicate(**MaybeSuperClass); ++OpIdx; continue; + } else if (DstI.TheDef->getName() == "SUBREG_TO_REG") { + auto MaybeRegClass = inferSuperRegisterClass(VTy, Dst->getChild(2)); + if (!MaybeRegClass) + return failedImport( + "Cannot infer register class for SUBREG_TO_REG operand #0"); + OperandMatcher &OM = InsnMatcher.getOperand(OpIdx); + OM.setSymbolicName(DstIOperand.Name); + M.defineOperand(OM.getSymbolicName(), OM); + OM.addPredicate(**MaybeRegClass); + ++OpIdx; + continue; } else if (DstIOpRec->isSubClassOf("RegisterOperand")) DstIOpRec = DstIOpRec->getValueAsDef("RegClass"); else if (!DstIOpRec->isSubClassOf("RegisterClass")) @@ -4393,7 +4435,7 @@ if (!SubClass) return failedImport( "Cannot infer register class from INSERT_SUBREG operand #1"); - auto SuperClass = inferSuperRegisterClass( + auto SuperClass = inferSuperRegisterClassForNode( Src->getExtType(0), Dst->getChild(0), Dst->getChild(2)); if (!SuperClass) return failedImport( @@ -4405,6 +4447,30 @@ return std::move(M); } + if (DstI.TheDef->getName() == "SUBREG_TO_REG") { + // We need to constrain the destination and subregister source. + assert(Src->getExtTypes().size() == 1 && + "Expected Src of SUBREG_TO_REG to have one result type"); + + // Attempt to infer the subregister source from the first child. If it has + // an explicitly given register class, we'll use that. Otherwise, we will + // fail. + auto SubClass = inferRegClassFromPattern(Dst->getChild(1)); + if (!SubClass) + return failedImport( + "Cannot infer register class from SUBREG_TO_REG child #1"); + // We don't have a child to look at that might have a super register node. + auto SuperClass = + inferSuperRegisterClass(Src->getExtType(0), Dst->getChild(2)); + if (!SuperClass) + return failedImport( + "Cannot infer register class for SUBREG_TO_REG operand #0"); + M.addAction(0, 0, **SuperClass); + M.addAction(0, 2, **SubClass); + ++NumPatternImported; + return std::move(M); + } + M.addAction(0); // We're done with this pattern! It's eligible for GISel emission; return it.