diff --git a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp --- a/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp +++ b/llvm/lib/CodeGen/SelectionDAG/LegalizeIntegerTypes.cpp @@ -5076,6 +5076,17 @@ return; } + // If HiCmp simplified to LHSHi != RHSHi, then we don't need a select and can + // use an OR instead. + if (HiCmp.getOpcode() == ISD::SETCC && + cast(HiCmp.getOperand(2))->get() == ISD::SETNE && + ((HiCmp.getOperand(0) == LHSHi && HiCmp.getOperand(1) == RHSHi) || + (HiCmp.getOperand(0) == RHSHi && HiCmp.getOperand(1) == LHSHi))) { + NewLHS = DAG.getNode(ISD::OR, dl, LoCmp.getValueType(), LoCmp, HiCmp); + NewRHS = SDValue(); + return; + } + NewLHS = TLI.SimplifySetCC(getSetCCResultType(HiVT), LHSHi, RHSHi, ISD::SETEQ, false, DagCombineInfo, dl); if (!NewLHS.getNode()) diff --git a/llvm/test/CodeGen/RISCV/forced-atomics.ll b/llvm/test/CodeGen/RISCV/forced-atomics.ll --- a/llvm/test/CodeGen/RISCV/forced-atomics.ll +++ b/llvm/test/CodeGen/RISCV/forced-atomics.ll @@ -2700,26 +2700,20 @@ ; RV32-NEXT: call __atomic_compare_exchange_8@plt ; RV32-NEXT: lw a1, 4(sp) ; RV32-NEXT: lw a4, 0(sp) -; RV32-NEXT: bnez a0, .LBB51_6 +; RV32-NEXT: bnez a0, .LBB51_4 ; RV32-NEXT: .LBB51_2: # %atomicrmw.start ; RV32-NEXT: # =>This Inner Loop Header: Depth=1 -; RV32-NEXT: beqz a1, .LBB51_4 -; RV32-NEXT: # %bb.3: # %atomicrmw.start -; RV32-NEXT: # in Loop: Header=BB51_2 Depth=1 ; RV32-NEXT: snez a0, a1 +; RV32-NEXT: sltiu a2, a4, 2 +; RV32-NEXT: xori a2, a2, 1 +; RV32-NEXT: or a0, a2, a0 ; RV32-NEXT: mv a2, a4 ; RV32-NEXT: bnez a0, .LBB51_1 -; RV32-NEXT: j .LBB51_5 -; RV32-NEXT: .LBB51_4: # in Loop: Header=BB51_2 Depth=1 -; RV32-NEXT: sltiu a0, a4, 2 -; RV32-NEXT: xori a0, a0, 1 -; RV32-NEXT: mv a2, a4 -; RV32-NEXT: bnez a0, .LBB51_1 -; RV32-NEXT: .LBB51_5: # %atomicrmw.start +; RV32-NEXT: # %bb.3: # %atomicrmw.start ; RV32-NEXT: # in Loop: Header=BB51_2 Depth=1 ; RV32-NEXT: li a2, 1 ; RV32-NEXT: j .LBB51_1 -; RV32-NEXT: .LBB51_6: # %atomicrmw.end +; RV32-NEXT: .LBB51_4: # %atomicrmw.end ; RV32-NEXT: mv a0, a4 ; RV32-NEXT: lw ra, 12(sp) # 4-byte Folded Reload ; RV32-NEXT: lw s0, 8(sp) # 4-byte Folded Reload diff --git a/llvm/test/CodeGen/RISCV/fpclamptosat.ll b/llvm/test/CodeGen/RISCV/fpclamptosat.ll --- a/llvm/test/CodeGen/RISCV/fpclamptosat.ll +++ b/llvm/test/CodeGen/RISCV/fpclamptosat.ll @@ -1320,44 +1320,37 @@ ; RV32IF-NEXT: .LBB20_2: ; RV32IF-NEXT: seqz a2, a0 ; RV32IF-NEXT: .LBB20_3: # %entry -; RV32IF-NEXT: lw a3, 12(sp) -; RV32IF-NEXT: xori a4, a0, 1 -; RV32IF-NEXT: or a4, a4, a1 -; RV32IF-NEXT: seqz a4, a4 -; RV32IF-NEXT: addi a4, a4, -1 -; RV32IF-NEXT: and a2, a4, a2 -; RV32IF-NEXT: neg a4, a2 -; RV32IF-NEXT: bnez a2, .LBB20_5 +; RV32IF-NEXT: xori a3, a0, 1 +; RV32IF-NEXT: or a3, a3, a1 +; RV32IF-NEXT: seqz a3, a3 +; RV32IF-NEXT: addi a3, a3, -1 +; RV32IF-NEXT: and a3, a3, a2 +; RV32IF-NEXT: neg a2, a3 +; RV32IF-NEXT: bnez a3, .LBB20_5 ; RV32IF-NEXT: # %bb.4: # %entry ; RV32IF-NEXT: li a0, 1 ; RV32IF-NEXT: .LBB20_5: # %entry -; RV32IF-NEXT: lw a5, 8(sp) -; RV32IF-NEXT: and a2, a4, a1 -; RV32IF-NEXT: and a1, a4, a3 -; RV32IF-NEXT: beqz a2, .LBB20_8 +; RV32IF-NEXT: lw a3, 8(sp) +; RV32IF-NEXT: lw a4, 12(sp) +; RV32IF-NEXT: and a5, a2, a1 +; RV32IF-NEXT: beqz a5, .LBB20_7 ; RV32IF-NEXT: # %bb.6: # %entry -; RV32IF-NEXT: sgtz a3, a2 -; RV32IF-NEXT: and a4, a4, a5 -; RV32IF-NEXT: bnez a1, .LBB20_9 +; RV32IF-NEXT: sgtz a1, a5 +; RV32IF-NEXT: j .LBB20_8 ; RV32IF-NEXT: .LBB20_7: -; RV32IF-NEXT: snez a5, a4 -; RV32IF-NEXT: or a0, a0, a2 +; RV32IF-NEXT: snez a1, a0 +; RV32IF-NEXT: .LBB20_8: # %entry +; RV32IF-NEXT: and a4, a2, a4 +; RV32IF-NEXT: or a0, a0, a5 +; RV32IF-NEXT: and a2, a2, a3 ; RV32IF-NEXT: bnez a0, .LBB20_10 -; RV32IF-NEXT: j .LBB20_11 -; RV32IF-NEXT: .LBB20_8: -; RV32IF-NEXT: snez a3, a0 -; RV32IF-NEXT: and a4, a4, a5 -; RV32IF-NEXT: beqz a1, .LBB20_7 -; RV32IF-NEXT: .LBB20_9: # %entry -; RV32IF-NEXT: snez a5, a1 -; RV32IF-NEXT: or a0, a0, a2 -; RV32IF-NEXT: beqz a0, .LBB20_11 +; RV32IF-NEXT: # %bb.9: +; RV32IF-NEXT: or a0, a2, a4 +; RV32IF-NEXT: snez a1, a0 ; RV32IF-NEXT: .LBB20_10: # %entry -; RV32IF-NEXT: mv a5, a3 -; RV32IF-NEXT: .LBB20_11: # %entry -; RV32IF-NEXT: neg a2, a5 -; RV32IF-NEXT: and a0, a2, a4 -; RV32IF-NEXT: and a1, a2, a1 +; RV32IF-NEXT: neg a1, a1 +; RV32IF-NEXT: and a0, a1, a2 +; RV32IF-NEXT: and a1, a1, a4 ; RV32IF-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IF-NEXT: addi sp, sp, 32 ; RV32IF-NEXT: ret @@ -1406,44 +1399,37 @@ ; RV32IFD-NEXT: .LBB20_2: ; RV32IFD-NEXT: seqz a2, a0 ; RV32IFD-NEXT: .LBB20_3: # %entry -; RV32IFD-NEXT: lw a3, 12(sp) -; RV32IFD-NEXT: xori a4, a0, 1 -; RV32IFD-NEXT: or a4, a4, a1 -; RV32IFD-NEXT: seqz a4, a4 -; RV32IFD-NEXT: addi a4, a4, -1 -; RV32IFD-NEXT: and a2, a4, a2 -; RV32IFD-NEXT: neg a4, a2 -; RV32IFD-NEXT: bnez a2, .LBB20_5 +; RV32IFD-NEXT: xori a3, a0, 1 +; RV32IFD-NEXT: or a3, a3, a1 +; RV32IFD-NEXT: seqz a3, a3 +; RV32IFD-NEXT: addi a3, a3, -1 +; RV32IFD-NEXT: and a3, a3, a2 +; RV32IFD-NEXT: neg a2, a3 +; RV32IFD-NEXT: bnez a3, .LBB20_5 ; RV32IFD-NEXT: # %bb.4: # %entry ; RV32IFD-NEXT: li a0, 1 ; RV32IFD-NEXT: .LBB20_5: # %entry -; RV32IFD-NEXT: lw a5, 8(sp) -; RV32IFD-NEXT: and a2, a4, a1 -; RV32IFD-NEXT: and a1, a4, a3 -; RV32IFD-NEXT: beqz a2, .LBB20_8 +; RV32IFD-NEXT: lw a3, 8(sp) +; RV32IFD-NEXT: lw a4, 12(sp) +; RV32IFD-NEXT: and a5, a2, a1 +; RV32IFD-NEXT: beqz a5, .LBB20_7 ; RV32IFD-NEXT: # %bb.6: # %entry -; RV32IFD-NEXT: sgtz a3, a2 -; RV32IFD-NEXT: and a4, a4, a5 -; RV32IFD-NEXT: bnez a1, .LBB20_9 +; RV32IFD-NEXT: sgtz a1, a5 +; RV32IFD-NEXT: j .LBB20_8 ; RV32IFD-NEXT: .LBB20_7: -; RV32IFD-NEXT: snez a5, a4 -; RV32IFD-NEXT: or a0, a0, a2 +; RV32IFD-NEXT: snez a1, a0 +; RV32IFD-NEXT: .LBB20_8: # %entry +; RV32IFD-NEXT: and a4, a2, a4 +; RV32IFD-NEXT: or a0, a0, a5 +; RV32IFD-NEXT: and a2, a2, a3 ; RV32IFD-NEXT: bnez a0, .LBB20_10 -; RV32IFD-NEXT: j .LBB20_11 -; RV32IFD-NEXT: .LBB20_8: -; RV32IFD-NEXT: snez a3, a0 -; RV32IFD-NEXT: and a4, a4, a5 -; RV32IFD-NEXT: beqz a1, .LBB20_7 -; RV32IFD-NEXT: .LBB20_9: # %entry -; RV32IFD-NEXT: snez a5, a1 -; RV32IFD-NEXT: or a0, a0, a2 -; RV32IFD-NEXT: beqz a0, .LBB20_11 +; RV32IFD-NEXT: # %bb.9: +; RV32IFD-NEXT: or a0, a2, a4 +; RV32IFD-NEXT: snez a1, a0 ; RV32IFD-NEXT: .LBB20_10: # %entry -; RV32IFD-NEXT: mv a5, a3 -; RV32IFD-NEXT: .LBB20_11: # %entry -; RV32IFD-NEXT: neg a2, a5 -; RV32IFD-NEXT: and a0, a2, a4 -; RV32IFD-NEXT: and a1, a2, a1 +; RV32IFD-NEXT: neg a1, a1 +; RV32IFD-NEXT: and a0, a1, a2 +; RV32IFD-NEXT: and a1, a1, a4 ; RV32IFD-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32IFD-NEXT: addi sp, sp, 32 ; RV32IFD-NEXT: ret @@ -1602,44 +1588,37 @@ ; RV32-NEXT: .LBB23_2: ; RV32-NEXT: seqz a2, a0 ; RV32-NEXT: .LBB23_3: # %entry -; RV32-NEXT: lw a3, 12(sp) -; RV32-NEXT: xori a4, a0, 1 -; RV32-NEXT: or a4, a4, a1 -; RV32-NEXT: seqz a4, a4 -; RV32-NEXT: addi a4, a4, -1 -; RV32-NEXT: and a2, a4, a2 -; RV32-NEXT: neg a4, a2 -; RV32-NEXT: bnez a2, .LBB23_5 +; RV32-NEXT: xori a3, a0, 1 +; RV32-NEXT: or a3, a3, a1 +; RV32-NEXT: seqz a3, a3 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a3, a3, a2 +; RV32-NEXT: neg a2, a3 +; RV32-NEXT: bnez a3, .LBB23_5 ; RV32-NEXT: # %bb.4: # %entry ; RV32-NEXT: li a0, 1 ; RV32-NEXT: .LBB23_5: # %entry -; RV32-NEXT: lw a5, 8(sp) -; RV32-NEXT: and a2, a4, a1 -; RV32-NEXT: and a1, a4, a3 -; RV32-NEXT: beqz a2, .LBB23_8 +; RV32-NEXT: lw a3, 8(sp) +; RV32-NEXT: lw a4, 12(sp) +; RV32-NEXT: and a5, a2, a1 +; RV32-NEXT: beqz a5, .LBB23_7 ; RV32-NEXT: # %bb.6: # %entry -; RV32-NEXT: sgtz a3, a2 -; RV32-NEXT: and a4, a4, a5 -; RV32-NEXT: bnez a1, .LBB23_9 +; RV32-NEXT: sgtz a1, a5 +; RV32-NEXT: j .LBB23_8 ; RV32-NEXT: .LBB23_7: -; RV32-NEXT: snez a5, a4 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: snez a1, a0 +; RV32-NEXT: .LBB23_8: # %entry +; RV32-NEXT: and a4, a2, a4 +; RV32-NEXT: or a0, a0, a5 +; RV32-NEXT: and a2, a2, a3 ; RV32-NEXT: bnez a0, .LBB23_10 -; RV32-NEXT: j .LBB23_11 -; RV32-NEXT: .LBB23_8: -; RV32-NEXT: snez a3, a0 -; RV32-NEXT: and a4, a4, a5 -; RV32-NEXT: beqz a1, .LBB23_7 -; RV32-NEXT: .LBB23_9: # %entry -; RV32-NEXT: snez a5, a1 -; RV32-NEXT: or a0, a0, a2 -; RV32-NEXT: beqz a0, .LBB23_11 +; RV32-NEXT: # %bb.9: +; RV32-NEXT: or a0, a2, a4 +; RV32-NEXT: snez a1, a0 ; RV32-NEXT: .LBB23_10: # %entry -; RV32-NEXT: mv a5, a3 -; RV32-NEXT: .LBB23_11: # %entry -; RV32-NEXT: neg a2, a5 -; RV32-NEXT: and a0, a2, a4 -; RV32-NEXT: and a1, a2, a1 +; RV32-NEXT: neg a1, a1 +; RV32-NEXT: and a0, a1, a2 +; RV32-NEXT: and a1, a1, a4 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret @@ -1865,44 +1844,37 @@ ; RV32-NEXT: .LBB26_2: ; RV32-NEXT: seqz a2, a0 ; RV32-NEXT: .LBB26_3: # %entry -; RV32-NEXT: lw a3, 12(sp) -; RV32-NEXT: xori a4, a0, 1 -; RV32-NEXT: or a4, a4, a1 -; RV32-NEXT: seqz a4, a4 -; RV32-NEXT: addi a4, a4, -1 -; RV32-NEXT: and a2, a4, a2 -; RV32-NEXT: neg a4, a2 -; RV32-NEXT: bnez a2, .LBB26_5 +; RV32-NEXT: xori a3, a0, 1 +; RV32-NEXT: or a3, a3, a1 +; RV32-NEXT: seqz a3, a3 +; RV32-NEXT: addi a3, a3, -1 +; RV32-NEXT: and a3, a3, a2 +; RV32-NEXT: neg a2, a3 +; RV32-NEXT: bnez a3, .LBB26_5 ; RV32-NEXT: # %bb.4: # %entry ; RV32-NEXT: li a0, 1 ; RV32-NEXT: .LBB26_5: # %entry -; RV32-NEXT: lw a5, 8(sp) -; RV32-NEXT: and a2, a4, a1 -; RV32-NEXT: and a1, a4, a3 -; RV32-NEXT: beqz a2, .LBB26_8 +; RV32-NEXT: lw a3, 8(sp) +; RV32-NEXT: lw a4, 12(sp) +; RV32-NEXT: and a5, a2, a1 +; RV32-NEXT: beqz a5, .LBB26_7 ; RV32-NEXT: # %bb.6: # %entry -; RV32-NEXT: sgtz a3, a2 -; RV32-NEXT: and a4, a4, a5 -; RV32-NEXT: bnez a1, .LBB26_9 +; RV32-NEXT: sgtz a1, a5 +; RV32-NEXT: j .LBB26_8 ; RV32-NEXT: .LBB26_7: -; RV32-NEXT: snez a5, a4 -; RV32-NEXT: or a0, a0, a2 +; RV32-NEXT: snez a1, a0 +; RV32-NEXT: .LBB26_8: # %entry +; RV32-NEXT: and a4, a2, a4 +; RV32-NEXT: or a0, a0, a5 +; RV32-NEXT: and a2, a2, a3 ; RV32-NEXT: bnez a0, .LBB26_10 -; RV32-NEXT: j .LBB26_11 -; RV32-NEXT: .LBB26_8: -; RV32-NEXT: snez a3, a0 -; RV32-NEXT: and a4, a4, a5 -; RV32-NEXT: beqz a1, .LBB26_7 -; RV32-NEXT: .LBB26_9: # %entry -; RV32-NEXT: snez a5, a1 -; RV32-NEXT: or a0, a0, a2 -; RV32-NEXT: beqz a0, .LBB26_11 +; RV32-NEXT: # %bb.9: +; RV32-NEXT: or a0, a2, a4 +; RV32-NEXT: snez a1, a0 ; RV32-NEXT: .LBB26_10: # %entry -; RV32-NEXT: mv a5, a3 -; RV32-NEXT: .LBB26_11: # %entry -; RV32-NEXT: neg a2, a5 -; RV32-NEXT: and a0, a2, a4 -; RV32-NEXT: and a1, a2, a1 +; RV32-NEXT: neg a1, a1 +; RV32-NEXT: and a0, a1, a2 +; RV32-NEXT: and a1, a1, a4 ; RV32-NEXT: lw ra, 28(sp) # 4-byte Folded Reload ; RV32-NEXT: addi sp, sp, 32 ; RV32-NEXT: ret diff --git a/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll b/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll --- a/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll +++ b/llvm/test/CodeGen/RISCV/lack-of-signed-truncation-check.ll @@ -353,15 +353,10 @@ ; RV32I-NEXT: add a2, a0, a2 ; RV32I-NEXT: sltu a0, a2, a0 ; RV32I-NEXT: add a0, a1, a0 -; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: beq a0, a1, .LBB10_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltiu a0, a0, -1 -; RV32I-NEXT: ret -; RV32I-NEXT: .LBB10_2: -; RV32I-NEXT: lui a0, 1048560 -; RV32I-NEXT: sltu a0, a2, a0 +; RV32I-NEXT: lui a1, 1048560 +; RV32I-NEXT: sltu a1, a2, a1 +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: ret ; ; RV64I-LABEL: add_ultcmp_i64_i16: @@ -399,14 +394,9 @@ ; RV32I-NEXT: addi a2, a0, -128 ; RV32I-NEXT: sltu a0, a2, a0 ; RV32I-NEXT: add a0, a1, a0 -; RV32I-NEXT: addi a0, a0, -1 -; RV32I-NEXT: li a1, -1 -; RV32I-NEXT: beq a0, a1, .LBB11_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: sltiu a0, a0, -1 -; RV32I-NEXT: ret -; RV32I-NEXT: .LBB11_2: -; RV32I-NEXT: sltiu a0, a2, -256 +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: sltiu a1, a2, -256 +; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: ret ; ; RV64-LABEL: add_ultcmp_i64_i8: @@ -614,14 +604,11 @@ ; RV32I: # %bb.0: ; RV32I-NEXT: addi a2, a0, 128 ; RV32I-NEXT: sltu a0, a2, a0 -; RV32I-NEXT: add a1, a1, a0 -; RV32I-NEXT: beqz a1, .LBB18_2 -; RV32I-NEXT: # %bb.1: -; RV32I-NEXT: snez a0, a1 -; RV32I-NEXT: ret -; RV32I-NEXT: .LBB18_2: -; RV32I-NEXT: sltiu a0, a2, 256 -; RV32I-NEXT: xori a0, a0, 1 +; RV32I-NEXT: add a0, a1, a0 +; RV32I-NEXT: snez a0, a0 +; RV32I-NEXT: sltiu a1, a2, 256 +; RV32I-NEXT: xori a1, a1, 1 +; RV32I-NEXT: or a0, a1, a0 ; RV32I-NEXT: ret ; ; RV64-LABEL: add_ugecmp_i64_i8: diff --git a/llvm/test/CodeGen/VE/Scalar/br_cc.ll b/llvm/test/CodeGen/VE/Scalar/br_cc.ll --- a/llvm/test/CodeGen/VE/Scalar/br_cc.ll +++ b/llvm/test/CodeGen/VE/Scalar/br_cc.ll @@ -551,13 +551,13 @@ ; CHECK-LABEL: br_cc_u128_imm: ; CHECK: # %bb.0: ; CHECK-NEXT: or %s2, 0, (0)1 -; CHECK-NEXT: cmps.l %s3, %s1, (0)1 -; CHECK-NEXT: or %s4, 0, (0)1 -; CHECK-NEXT: cmov.l.ne %s4, (63)0, %s3 +; CHECK-NEXT: cmps.l %s1, %s1, (0)1 +; CHECK-NEXT: or %s3, 0, (0)1 +; CHECK-NEXT: cmov.l.ne %s3, (63)0, %s1 ; CHECK-NEXT: cmpu.l %s0, %s0, (58)0 ; CHECK-NEXT: cmov.l.gt %s2, (63)0, %s0 -; CHECK-NEXT: cmov.l.eq %s4, %s2, %s1 -; CHECK-NEXT: brne.w 0, %s4, .LBB{{[0-9]+}}_2 +; CHECK-NEXT: or %s0, %s2, %s3 +; CHECK-NEXT: brne.w 0, %s0, .LBB{{[0-9]+}}_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: #APP ; CHECK-NEXT: nop @@ -883,15 +883,14 @@ define void @br_cc_imm_u128(i128 %0) { ; CHECK-LABEL: br_cc_imm_u128: ; CHECK: # %bb.0: -; CHECK-NEXT: cmps.l %s2, %s1, (0)0 +; CHECK-NEXT: cmps.l %s1, %s1, (0)0 +; CHECK-NEXT: or %s2, 0, (0)1 ; CHECK-NEXT: or %s3, 0, (0)1 -; CHECK-NEXT: or %s4, 0, (0)1 -; CHECK-NEXT: cmov.l.ne %s4, (63)0, %s2 +; CHECK-NEXT: cmov.l.ne %s3, (63)0, %s1 ; CHECK-NEXT: cmpu.l %s0, %s0, (58)1 -; CHECK-NEXT: cmov.l.lt %s3, (63)0, %s0 -; CHECK-NEXT: cmpu.l %s0, %s1, (0)0 -; CHECK-NEXT: cmov.l.eq %s4, %s3, %s0 -; CHECK-NEXT: brne.w 0, %s4, .LBB{{[0-9]+}}_2 +; CHECK-NEXT: cmov.l.lt %s2, (63)0, %s0 +; CHECK-NEXT: or %s0, %s2, %s3 +; CHECK-NEXT: brne.w 0, %s0, .LBB{{[0-9]+}}_2 ; CHECK-NEXT: # %bb.1: ; CHECK-NEXT: #APP ; CHECK-NEXT: nop