Index: lib/CodeGen/MachineInstr.cpp =================================================================== --- lib/CodeGen/MachineInstr.cpp +++ lib/CodeGen/MachineInstr.cpp @@ -935,7 +935,7 @@ continue; if (MOReg == Reg || (TRI && TargetRegisterInfo::isPhysicalRegister(MOReg) && TargetRegisterInfo::isPhysicalRegister(Reg) && - TRI->isSubRegister(MOReg, Reg))) + TRI->isSubRegister(Reg, MOReg))) if (!isKill || MO.isKill()) return i; } Index: test/CodeGen/AArch64/post-ra-machine-sink.mir =================================================================== --- test/CodeGen/AArch64/post-ra-machine-sink.mir +++ test/CodeGen/AArch64/post-ra-machine-sink.mir @@ -211,10 +211,10 @@ # CHECK-LABEL: bb.0: # CHECK-NOT: renamable $w19 = COPY $w0, implicit-def $x19 # CHECK-LABEL: bb.2: -# CHECK: $w1 = ADDWrr $w1, $w0, implicit $x0 +# CHECK: $w1 = ADDWrr $w1, $w0, implicit killed $x0 # CHECK-LABEL: bb.3: # CHECK: liveins: $x1, $w0 -# CHECK: renamable $w19 = COPY killed $w0, implicit-def $x19 +# CHECK: renamable $w19 = COPY $w0, implicit-def $x19 name: sinkcopy8 tracksRegLiveness: true body: | Index: test/CodeGen/AMDGPU/optimize-if-exec-masking.mir =================================================================== --- test/CodeGen/AMDGPU/optimize-if-exec-masking.mir +++ test/CodeGen/AMDGPU/optimize-if-exec-masking.mir @@ -131,6 +131,26 @@ ret void } + define amdgpu_kernel void @if_and_xor_read_exec_copy_subreg(i32 %z, i32 %v) #0 { + main_body: + %id = call i32 @llvm.amdgcn.workitem.id.x() + %cc = icmp eq i32 %id, 0 + %0 = call { i1, i64 } @llvm.amdgcn.if(i1 %cc) + %1 = extractvalue { i1, i64 } %0, 0 + %2 = extractvalue { i1, i64 } %0, 1 + br i1 %1, label %if, label %end + + if: ; preds = %main_body + %v.if = load volatile i32, i32 addrspace(1)* undef + br label %end + + end: ; preds = %if, %main_body + %r = phi i32 [ 4, %main_body ], [ %v.if, %if ] + call void @llvm.amdgcn.end.cf(i64 %2) + store i32 %r, i32 addrspace(1)* undef + ret void + } + ; Function Attrs: nounwind readnone declare i32 @llvm.amdgcn.workitem.id.x() #1 @@ -731,3 +751,61 @@ S_ENDPGM ... +--- +# A read from exec copy subreg prevents optimization +# CHECK-LABEL: name: if_and_xor_read_exec_copy_subreg{{$}} +# CHECK: $sgpr0_sgpr1 = COPY $exec +# CHECK-NEXT: $sgpr4 = S_MOV_B32 $sgpr1 +name: if_and_xor_read_exec_copy_subreg +alignment: 0 +exposesReturnsTwice: false +legalized: false +regBankSelected: false +selected: false +tracksRegLiveness: true +liveins: + - { reg: '$vgpr0' } +frameInfo: + isFrameAddressTaken: false + isReturnAddressTaken: false + hasStackMap: false + hasPatchPoint: false + stackSize: 0 + offsetAdjustment: 0 + maxAlignment: 0 + adjustsStack: false + hasCalls: false + maxCallFrameSize: 0 + hasOpaqueSPAdjustment: false + hasVAStart: false + hasMustTailInVarArgFunc: false +body: | + bb.0.main_body: + liveins: $vgpr0 + + $sgpr0_sgpr1 = COPY $exec + $sgpr4 = S_MOV_B32 $sgpr1 + $vcc = V_CMP_EQ_I32_e64 0, killed $vgpr0, implicit $exec + $vgpr0 = V_MOV_B32_e32 4, implicit $exec + $sgpr2_sgpr3 = S_AND_B64 $sgpr0_sgpr1, killed $vcc, implicit-def $scc + $sgpr0_sgpr1 = S_XOR_B64 $sgpr2_sgpr3, killed $sgpr0_sgpr1, implicit-def $scc + $exec = S_MOV_B64_term killed $sgpr2_sgpr3 + SI_MASK_BRANCH %bb.2, implicit $exec + S_BRANCH %bb.1 + + bb.1.if: + liveins: $sgpr0_sgpr1 + + $sgpr7 = S_MOV_B32 61440 + $sgpr6 = S_MOV_B32 -1 + $vgpr0 = BUFFER_LOAD_DWORD_OFFSET $sgpr4_sgpr5_sgpr6_sgpr7, 0, 0, 0, 0, 0, implicit $exec :: (volatile load 4 from `i32 addrspace(1)* undef`) + + bb.2.end: + liveins: $vgpr0, $sgpr0_sgpr1 + + $exec = S_OR_B64 $exec, killed $sgpr0_sgpr1, implicit-def $scc + $sgpr3 = S_MOV_B32 61440 + $sgpr2 = S_MOV_B32 -1 + BUFFER_STORE_DWORD_OFFSET killed $vgpr0, $sgpr0_sgpr1_sgpr2_sgpr3, 0, 0, 0, 0, 0, implicit $exec :: (store 4 into `i32 addrspace(1)* undef`) + S_ENDPGM +... Index: test/CodeGen/ARM/deps-fix.ll =================================================================== --- test/CodeGen/ARM/deps-fix.ll +++ test/CodeGen/ARM/deps-fix.ll @@ -5,8 +5,7 @@ ; CHECK: fun_a define <4 x float> @fun_a(<4 x float> %in, <4 x float> %x, float %y) nounwind { -; CHECK: vext -; CHECK: vext +; CHECK: vorr ; CHECK: vadd.f32 %1 = insertelement <4 x float> %in, float %y, i32 0 %2 = fadd <4 x float> %1, %x Index: test/CodeGen/Mips/atomic64.ll =================================================================== --- test/CodeGen/Mips/atomic64.ll +++ test/CodeGen/Mips/atomic64.ll @@ -85,7 +85,7 @@ ; MIPS64R6-NEXT: lld $2, 0($1) ; MIPS64R6-NEXT: daddu $3, $2, $4 ; MIPS64R6-NEXT: scd $3, 0($1) -; MIPS64R6-NEXT: beqzc $3, .LBB0_1 +; MIPS64R6-NEXT: beqc $3, $zero, .LBB0_1 ; MIPS64R6-NEXT: nop ; MIPS64R6-NEXT: # %bb.2: # %entry ; MIPS64R6-NEXT: jrc $ra @@ -103,7 +103,7 @@ ; MIPS64R6O0-NEXT: lld $2, 0($1) ; MIPS64R6O0-NEXT: daddu $3, $2, $4 ; MIPS64R6O0-NEXT: scd $3, 0($1) -; MIPS64R6O0-NEXT: beqzc $3, .LBB0_1 +; MIPS64R6O0-NEXT: beqc $3, $zero, .LBB0_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry ; MIPS64R6O0-NEXT: sd $25, 8($sp) # 8-byte Folded Spill ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16 @@ -245,7 +245,7 @@ ; MIPS64R6-NEXT: lld $2, 0($1) ; MIPS64R6-NEXT: dsubu $3, $2, $4 ; MIPS64R6-NEXT: scd $3, 0($1) -; MIPS64R6-NEXT: beqzc $3, .LBB1_1 +; MIPS64R6-NEXT: beqc $3, $zero, .LBB1_1 ; MIPS64R6-NEXT: nop ; MIPS64R6-NEXT: # %bb.2: # %entry ; MIPS64R6-NEXT: jrc $ra @@ -263,7 +263,7 @@ ; MIPS64R6O0-NEXT: lld $2, 0($1) ; MIPS64R6O0-NEXT: dsubu $3, $2, $4 ; MIPS64R6O0-NEXT: scd $3, 0($1) -; MIPS64R6O0-NEXT: beqzc $3, .LBB1_1 +; MIPS64R6O0-NEXT: beqc $3, $zero, .LBB1_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry ; MIPS64R6O0-NEXT: sd $25, 8($sp) # 8-byte Folded Spill ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16 @@ -405,7 +405,7 @@ ; MIPS64R6-NEXT: lld $2, 0($1) ; MIPS64R6-NEXT: and $3, $2, $4 ; MIPS64R6-NEXT: scd $3, 0($1) -; MIPS64R6-NEXT: beqzc $3, .LBB2_1 +; MIPS64R6-NEXT: beqc $3, $zero, .LBB2_1 ; MIPS64R6-NEXT: nop ; MIPS64R6-NEXT: # %bb.2: # %entry ; MIPS64R6-NEXT: jrc $ra @@ -423,7 +423,7 @@ ; MIPS64R6O0-NEXT: lld $2, 0($1) ; MIPS64R6O0-NEXT: and $3, $2, $4 ; MIPS64R6O0-NEXT: scd $3, 0($1) -; MIPS64R6O0-NEXT: beqzc $3, .LBB2_1 +; MIPS64R6O0-NEXT: beqc $3, $zero, .LBB2_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry ; MIPS64R6O0-NEXT: sd $25, 8($sp) # 8-byte Folded Spill ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16 @@ -565,7 +565,7 @@ ; MIPS64R6-NEXT: lld $2, 0($1) ; MIPS64R6-NEXT: or $3, $2, $4 ; MIPS64R6-NEXT: scd $3, 0($1) -; MIPS64R6-NEXT: beqzc $3, .LBB3_1 +; MIPS64R6-NEXT: beqc $3, $zero, .LBB3_1 ; MIPS64R6-NEXT: nop ; MIPS64R6-NEXT: # %bb.2: # %entry ; MIPS64R6-NEXT: jrc $ra @@ -583,7 +583,7 @@ ; MIPS64R6O0-NEXT: lld $2, 0($1) ; MIPS64R6O0-NEXT: or $3, $2, $4 ; MIPS64R6O0-NEXT: scd $3, 0($1) -; MIPS64R6O0-NEXT: beqzc $3, .LBB3_1 +; MIPS64R6O0-NEXT: beqc $3, $zero, .LBB3_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry ; MIPS64R6O0-NEXT: sd $25, 8($sp) # 8-byte Folded Spill ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16 @@ -725,7 +725,7 @@ ; MIPS64R6-NEXT: lld $2, 0($1) ; MIPS64R6-NEXT: xor $3, $2, $4 ; MIPS64R6-NEXT: scd $3, 0($1) -; MIPS64R6-NEXT: beqzc $3, .LBB4_1 +; MIPS64R6-NEXT: beqc $3, $zero, .LBB4_1 ; MIPS64R6-NEXT: nop ; MIPS64R6-NEXT: # %bb.2: # %entry ; MIPS64R6-NEXT: jrc $ra @@ -743,7 +743,7 @@ ; MIPS64R6O0-NEXT: lld $2, 0($1) ; MIPS64R6O0-NEXT: xor $3, $2, $4 ; MIPS64R6O0-NEXT: scd $3, 0($1) -; MIPS64R6O0-NEXT: beqzc $3, .LBB4_1 +; MIPS64R6O0-NEXT: beqc $3, $zero, .LBB4_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry ; MIPS64R6O0-NEXT: sd $25, 8($sp) # 8-byte Folded Spill ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16 @@ -889,7 +889,7 @@ ; MIPS64R6-NEXT: and $3, $2, $4 ; MIPS64R6-NEXT: nor $3, $zero, $3 ; MIPS64R6-NEXT: scd $3, 0($1) -; MIPS64R6-NEXT: beqzc $3, .LBB5_1 +; MIPS64R6-NEXT: beqc $3, $zero, .LBB5_1 ; MIPS64R6-NEXT: nop ; MIPS64R6-NEXT: # %bb.2: # %entry ; MIPS64R6-NEXT: jrc $ra @@ -908,7 +908,7 @@ ; MIPS64R6O0-NEXT: and $3, $2, $4 ; MIPS64R6O0-NEXT: nor $3, $zero, $3 ; MIPS64R6O0-NEXT: scd $3, 0($1) -; MIPS64R6O0-NEXT: beqzc $3, .LBB5_1 +; MIPS64R6O0-NEXT: beqc $3, $zero, .LBB5_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry ; MIPS64R6O0-NEXT: sd $25, 8($sp) # 8-byte Folded Spill ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16 @@ -1062,7 +1062,7 @@ ; MIPS64R6-NEXT: lld $2, 0($1) ; MIPS64R6-NEXT: move $3, $4 ; MIPS64R6-NEXT: scd $3, 0($1) -; MIPS64R6-NEXT: beqzc $3, .LBB6_1 +; MIPS64R6-NEXT: beqc $3, $zero, .LBB6_1 ; MIPS64R6-NEXT: nop ; MIPS64R6-NEXT: # %bb.2: # %entry ; MIPS64R6-NEXT: jr $ra @@ -1083,7 +1083,7 @@ ; MIPS64R6O0-NEXT: lld $2, 0($1) ; MIPS64R6O0-NEXT: move $3, $4 ; MIPS64R6O0-NEXT: scd $3, 0($1) -; MIPS64R6O0-NEXT: beqzc $3, .LBB6_1 +; MIPS64R6O0-NEXT: beqc $3, $zero, .LBB6_1 ; MIPS64R6O0-NEXT: # %bb.2: # %entry ; MIPS64R6O0-NEXT: sd $25, 0($sp) # 8-byte Folded Spill ; MIPS64R6O0-NEXT: daddiu $sp, $sp, 16 @@ -1259,7 +1259,7 @@ ; MIPS64R6-NEXT: # in Loop: Header=BB7_1 Depth=1 ; MIPS64R6-NEXT: move $3, $5 ; MIPS64R6-NEXT: scd $3, 0($1) -; MIPS64R6-NEXT: beqzc $3, .LBB7_1 +; MIPS64R6-NEXT: beqc $3, $zero, .LBB7_1 ; MIPS64R6-NEXT: nop ; MIPS64R6-NEXT: .LBB7_3: # %entry ; MIPS64R6-NEXT: jr $ra @@ -1285,7 +1285,7 @@ ; MIPS64R6O0-NEXT: # in Loop: Header=BB7_1 Depth=1 ; MIPS64R6O0-NEXT: move $7, $5 ; MIPS64R6O0-NEXT: scd $7, 0($1) -; MIPS64R6O0-NEXT: beqzc $7, .LBB7_1 +; MIPS64R6O0-NEXT: beqc $7, $zero, .LBB7_1 ; MIPS64R6O0-NEXT: .LBB7_3: # %entry ; MIPS64R6O0-NEXT: sd $2, 24($sp) # 8-byte Folded Spill ; MIPS64R6O0-NEXT: move $2, $6 Index: test/CodeGen/Mips/compactbranches/compact-branches-64.ll =================================================================== --- test/CodeGen/Mips/compactbranches/compact-branches-64.ll +++ test/CodeGen/Mips/compactbranches/compact-branches-64.ll @@ -139,7 +139,7 @@ ; CHECK: jalrc $25 %call = tail call i64 @k() %cmp = icmp eq i64 %call, 0 -; CHECK: bnezc +; CHECK: bnec br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry: @@ -159,7 +159,7 @@ ; CHECK: jalrc $25 %call = tail call i64 @k() %cmp = icmp eq i64 %call, 0 -; CHECK: beqzc +; CHECK: beqc br i1 %cmp, label %if.end, label %if.then if.then: ; preds = %entry: @@ -180,7 +180,7 @@ ; CHECK: jalrc $25 %call = call i64 @k() %cmp = icmp ne i64 %call, 0 -; CHECK: beqzc +; CHECK: beqc br i1 %cmp, label %if.then, label %if.end if.then: ; preds = %entry