diff --git a/llvm/test/Transforms/InstCombine/smax-icmp.ll b/llvm/test/Transforms/InstCombine/smax-icmp.ll --- a/llvm/test/Transforms/InstCombine/smax-icmp.ll +++ b/llvm/test/Transforms/InstCombine/smax-icmp.ll @@ -232,3 +232,596 @@ ret i1 %cmp2 } +declare void @use(i1 %c) + +define void @eq_smax_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @eq_smax_contextual( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp eq i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smax.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @eq_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @eq_smax_contextual_commuted( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp eq i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smax.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @slt_smax_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @slt_smax_contextual( +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp slt i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smax.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @slt_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @slt_smax_contextual_commuted( +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp slt i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smax.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @sle_smax_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @sle_smax_contextual( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp sle i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smax.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @sle_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @sle_smax_contextual_commuted( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp sle i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smax.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @sgt_smax_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @sgt_smax_contextual( +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp sgt i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smax.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @sgt_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @sgt_smax_contextual_commuted( +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp sgt i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smax.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @sge_smax_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @sge_smax_contextual( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp sge i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smax.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @sge_smax_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @sge_smax_contextual_commuted( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smax.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp sge i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smax.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +declare i32 @llvm.smax.i32(i32, i32) diff --git a/llvm/test/Transforms/InstCombine/smin-icmp.ll b/llvm/test/Transforms/InstCombine/smin-icmp.ll --- a/llvm/test/Transforms/InstCombine/smin-icmp.ll +++ b/llvm/test/Transforms/InstCombine/smin-icmp.ll @@ -331,3 +331,967 @@ ret i1 %cmp2 } +declare void @use(i1 %c) + +define void @eq_smin_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @eq_smin_contextual( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp eq i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smin.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @eq_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @eq_smin_contextual_commuted( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp eq i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smin.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @slt_smin_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @slt_smin_contextual( +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp slt i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smin.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @slt_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @slt_smin_contextual_commuted( +; CHECK-NEXT: [[CMP:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp slt i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smin.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @sle_smin_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @sle_smin_contextual( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp sle i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smin.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @sle_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @sle_smin_contextual_commuted( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp sle i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smin.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @sgt_smin_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @sgt_smin_contextual( +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp sgt i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smin.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @sgt_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @sgt_smin_contextual_commuted( +; CHECK-NEXT: [[CMP:%.*]] = icmp sgt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp sgt i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smin.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @sge_smin_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @sge_smin_contextual( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp sge i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smin.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @sge_smin_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @sge_smin_contextual_commuted( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp slt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.smin.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp sge i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.smin.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +declare void @use_v2i1(<2 x i1> %c) + +; icmp pred smin(X, Y), X +define void @eq_smin_v2i32(<2 x i32> %x, <2 x i32> %y) { +; CHECK-LABEL: @eq_smin_v2i32( +; CHECK-NEXT: [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[X:%.*]], <2 x i32> [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp sgt <2 x i32> [[X]], [[Y]] +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP1]]) +; CHECK-NEXT: call void @use_v2i1(<2 x i1> ) +; CHECK-NEXT: call void @use_v2i1(<2 x i1> zeroinitializer) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sle <2 x i32> [[X]], [[Y]] +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], [[X]] +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule <2 x i32> [[COND]], [[X]] +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt <2 x i32> [[COND]], [[X]] +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge <2 x i32> [[COND]], [[X]] +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp sle <2 x i32> [[X]], [[Y]] +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp sgt <2 x i32> [[X]], [[Y]] +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP10]]) +; CHECK-NEXT: ret void +; + %cond = call <2 x i32> @llvm.smin.v2i32(<2 x i32> %x, <2 x i32> %y) + %cmp1 = icmp slt <2 x i32> %cond, %x + call void @use_v2i1(<2 x i1> %cmp1) + %cmp2 = icmp sle <2 x i32> %cond, %x + call void @use_v2i1(<2 x i1> %cmp2) + %cmp3 = icmp sgt <2 x i32> %cond, %x + call void @use_v2i1(<2 x i1> %cmp3) + %cmp4 = icmp sge <2 x i32> %cond, %x + call void @use_v2i1(<2 x i1> %cmp4) + %cmp5 = icmp ult <2 x i32> %cond, %x + call void @use_v2i1(<2 x i1> %cmp5) + %cmp6 = icmp ule <2 x i32> %cond, %x + call void @use_v2i1(<2 x i1> %cmp6) + %cmp7 = icmp ugt <2 x i32> %cond, %x + call void @use_v2i1(<2 x i1> %cmp7) + %cmp8 = icmp uge <2 x i32> %cond, %x + call void @use_v2i1(<2 x i1> %cmp8) + %cmp9 = icmp eq <2 x i32> %cond, %x + call void @use_v2i1(<2 x i1> %cmp9) + %cmp10 = icmp ne <2 x i32> %cond, %x + call void @use_v2i1(<2 x i1> %cmp10) + ret void +} + +; icmp pred smin(C1, Y), C2 where C1 == C2 +define void @eq_smin_v2i32_constant(<2 x i32> %y) { +; CHECK-LABEL: @eq_smin_v2i32_constant( +; CHECK-NEXT: [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> ) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt <2 x i32> [[Y]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP1]]) +; CHECK-NEXT: call void @use_v2i1(<2 x i1> ) +; CHECK-NEXT: call void @use_v2i1(<2 x i1> zeroinitializer) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ult <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp ugt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp sgt <2 x i32> [[Y]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp slt <2 x i32> [[Y]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP10]]) +; CHECK-NEXT: ret void +; + %cond = call <2 x i32> @llvm.smin.v2i32(<2 x i32> , <2 x i32> %y) + %cmp1 = icmp slt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp1) + %cmp2 = icmp sle <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp2) + %cmp3 = icmp sgt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp3) + %cmp4 = icmp sge <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp4) + %cmp5 = icmp ult <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp5) + %cmp6 = icmp ule <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp6) + %cmp7 = icmp ugt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp7) + %cmp8 = icmp uge <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp8) + %cmp9 = icmp eq <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp9) + %cmp10 = icmp ne <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp10) + ret void +} + +; icmp pred smin(C1, Y), C2 where C1 < C2 +define void @slt_smin_v2i32_constant(<2 x i32> %y) { +; CHECK-LABEL: @slt_smin_v2i32_constant( +; CHECK-NEXT: [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> ) +; CHECK-NEXT: call void @use_v2i1(<2 x i1> ) +; CHECK-NEXT: call void @use_v2i1(<2 x i1> ) +; CHECK-NEXT: call void @use_v2i1(<2 x i1> zeroinitializer) +; CHECK-NEXT: call void @use_v2i1(<2 x i1> zeroinitializer) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ult <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp ugt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP8]]) +; CHECK-NEXT: call void @use_v2i1(<2 x i1> zeroinitializer) +; CHECK-NEXT: call void @use_v2i1(<2 x i1> ) +; CHECK-NEXT: ret void +; + %cond = call <2 x i32> @llvm.smin.v2i32(<2 x i32> , <2 x i32> %y) + %cmp1 = icmp slt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp1) + %cmp2 = icmp sle <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp2) + %cmp3 = icmp sgt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp3) + %cmp4 = icmp sge <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp4) + %cmp5 = icmp ult <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp5) + %cmp6 = icmp ule <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp6) + %cmp7 = icmp ugt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp7) + %cmp8 = icmp uge <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp8) + %cmp9 = icmp eq <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp9) + %cmp10 = icmp ne <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp10) + ret void +} + +; icmp pred smin(C1, Y), C2 where C1 <= C2 +define void @sle_smin_v2i32_constant(<2 x i32> %y) { +; CHECK-LABEL: @sle_smin_v2i32_constant( +; CHECK-NEXT: [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> ) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp slt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ult <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp ugt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP10]]) +; CHECK-NEXT: ret void +; + %cond = call <2 x i32> @llvm.smin.v2i32(<2 x i32> , <2 x i32> %y) + %cmp1 = icmp slt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp1) + %cmp2 = icmp sle <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp2) + %cmp3 = icmp sgt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp3) + %cmp4 = icmp sge <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp4) + %cmp5 = icmp ult <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp5) + %cmp6 = icmp ule <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp6) + %cmp7 = icmp ugt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp7) + %cmp8 = icmp uge <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp8) + %cmp9 = icmp eq <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp9) + %cmp10 = icmp ne <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp10) + ret void +} + +; icmp pred smin(C1, Y), C2 where C1 > C2 +define void @sgt_smin_v2i32_constant(<2 x i32> %y) { +; CHECK-LABEL: @sgt_smin_v2i32_constant( +; CHECK-NEXT: [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> ) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp slt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ult <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp ugt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP10]]) +; CHECK-NEXT: ret void +; + %cond = call <2 x i32> @llvm.smin.v2i32(<2 x i32> , <2 x i32> %y) + %cmp1 = icmp slt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp1) + %cmp2 = icmp sle <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp2) + %cmp3 = icmp sgt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp3) + %cmp4 = icmp sge <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp4) + %cmp5 = icmp ult <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp5) + %cmp6 = icmp ule <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp6) + %cmp7 = icmp ugt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp7) + %cmp8 = icmp uge <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp8) + %cmp9 = icmp eq <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp9) + %cmp10 = icmp ne <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp10) + ret void +} + +; icmp pred smin(C1, Y), C2 where C1 >= C2 +define void @sge_smin_v2i32_constant(<2 x i32> %y) { +; CHECK-LABEL: @sge_smin_v2i32_constant( +; CHECK-NEXT: [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> ) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp slt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ult <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp ugt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP10]]) +; CHECK-NEXT: ret void +; + %cond = call <2 x i32> @llvm.smin.v2i32(<2 x i32> , <2 x i32> %y) + %cmp1 = icmp slt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp1) + %cmp2 = icmp sle <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp2) + %cmp3 = icmp sgt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp3) + %cmp4 = icmp sge <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp4) + %cmp5 = icmp ult <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp5) + %cmp6 = icmp ule <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp6) + %cmp7 = icmp ugt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp7) + %cmp8 = icmp uge <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp8) + %cmp9 = icmp eq <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp9) + %cmp10 = icmp ne <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp10) + ret void +} + +; icmp pred smin(C1, Y), C2 where (icmp pred' C1, C2) is not a constant splat for all pred' +define void @unknown_smin_v2i32_constant(<2 x i32> %y) { +; CHECK-LABEL: @unknown_smin_v2i32_constant( +; CHECK-NEXT: [[COND:%.*]] = call <2 x i32> @llvm.smin.v2i32(<2 x i32> [[Y:%.*]], <2 x i32> ) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp slt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sgt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ult <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp ugt <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne <2 x i32> [[COND]], +; CHECK-NEXT: call void @use_v2i1(<2 x i1> [[CMP10]]) +; CHECK-NEXT: ret void +; + %cond = call <2 x i32> @llvm.smin.v2i32(<2 x i32> , <2 x i32> %y) + %cmp1 = icmp slt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp1) + %cmp2 = icmp sle <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp2) + %cmp3 = icmp sgt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp3) + %cmp4 = icmp sge <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp4) + %cmp5 = icmp ult <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp5) + %cmp6 = icmp ule <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp6) + %cmp7 = icmp ugt <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp7) + %cmp8 = icmp uge <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp8) + %cmp9 = icmp eq <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp9) + %cmp10 = icmp ne <2 x i32> %cond, + call void @use_v2i1(<2 x i1> %cmp10) + ret void +} + +; Test cases from PR62898 + +define i1 @smin_or_bitwise(i32 %x) { +; CHECK-LABEL: @smin_or_bitwise( +; CHECK-NEXT: [[COND:%.*]] = tail call i32 @llvm.smin.i32(i32 [[X:%.*]], i32 1) +; CHECK-NEXT: [[LOBIT:%.*]] = or i32 [[COND]], [[X]] +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp slt i32 [[LOBIT]], 0 +; CHECK-NEXT: ret i1 [[TOBOOL]] +; + %cond = tail call i32 @llvm.smin.i32(i32 %x, i32 1) + %lobit = or i32 %cond, %x + %tobool = icmp slt i32 %lobit, 0 + ret i1 %tobool +} + +define i1 @smin_and_bitwise(i32 %x) { +; CHECK-LABEL: @smin_and_bitwise( +; CHECK-NEXT: [[COND:%.*]] = tail call i32 @llvm.smin.i32(i32 [[X:%.*]], i32 1) +; CHECK-NEXT: [[LOBIT:%.*]] = and i32 [[COND]], [[X]] +; CHECK-NEXT: [[TOBOOL:%.*]] = icmp slt i32 [[LOBIT]], 0 +; CHECK-NEXT: ret i1 [[TOBOOL]] +; + %cond = tail call i32 @llvm.smin.i32(i32 %x, i32 1) + %lobit = and i32 %cond, %x + %tobool = icmp slt i32 %lobit, 0 + ret i1 %tobool +} + +declare i32 @llvm.smin.i32(i32, i32) +declare <2 x i32> @llvm.smin.v2i32(<2 x i32>, <2 x i32>) diff --git a/llvm/test/Transforms/InstCombine/umax-icmp.ll b/llvm/test/Transforms/InstCombine/umax-icmp.ll --- a/llvm/test/Transforms/InstCombine/umax-icmp.ll +++ b/llvm/test/Transforms/InstCombine/umax-icmp.ll @@ -232,3 +232,596 @@ ret i1 %cmp2 } +declare void @use(i1 %c) + +define void @eq_umax_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @eq_umax_contextual( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp eq i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umax.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @eq_umax_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @eq_umax_contextual_commuted( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp eq i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umax.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @ult_umax_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @ult_umax_contextual( +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp ult i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umax.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @ult_umax_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @ult_umax_contextual_commuted( +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp ult i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umax.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @ule_umax_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @ule_umax_contextual( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp ule i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umax.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @ule_umax_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @ule_umax_contextual_commuted( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp ule i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umax.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @ugt_umax_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @ugt_umax_contextual( +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp ugt i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umax.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @ugt_umax_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @ugt_umax_contextual_commuted( +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp ugt i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umax.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @uge_umax_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @uge_umax_contextual( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp uge i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umax.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @uge_umax_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @uge_umax_contextual_commuted( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umax.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp uge i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umax.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +declare i32 @llvm.umax.i32(i32, i32) diff --git a/llvm/test/Transforms/InstCombine/umin-icmp.ll b/llvm/test/Transforms/InstCombine/umin-icmp.ll --- a/llvm/test/Transforms/InstCombine/umin-icmp.ll +++ b/llvm/test/Transforms/InstCombine/umin-icmp.ll @@ -232,3 +232,596 @@ ret i1 %cmp2 } +declare void @use(i1 %c) + +define void @eq_umin_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @eq_umin_contextual( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp eq i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umin.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @eq_umin_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @eq_umin_contextual_commuted( +; CHECK-NEXT: [[CMP:%.*]] = icmp eq i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp eq i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umin.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @ult_umin_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @ult_umin_contextual( +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp ult i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umin.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @ult_umin_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @ult_umin_contextual_commuted( +; CHECK-NEXT: [[CMP:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp ult i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umin.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @ule_umin_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @ule_umin_contextual( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp ule i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umin.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @ule_umin_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @ule_umin_contextual_commuted( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp ule i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umin.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @ugt_umin_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @ugt_umin_contextual( +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp ugt i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umin.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @ugt_umin_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @ugt_umin_contextual_commuted( +; CHECK-NEXT: [[CMP:%.*]] = icmp ugt i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP]], label [[IF:%.*]], label [[END:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp ugt i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umin.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @uge_umin_contextual(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @uge_umin_contextual( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[X]], i32 [[Y:%.*]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp uge i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umin.i32(i32 %x, i32 %y) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +define void @uge_umin_contextual_commuted(i32 %x, i32 %y, i32 %z) { +; CHECK-LABEL: @uge_umin_contextual_commuted( +; CHECK-NEXT: [[CMP_NOT:%.*]] = icmp ult i32 [[X:%.*]], [[Z:%.*]] +; CHECK-NEXT: br i1 [[CMP_NOT]], label [[END:%.*]], label [[IF:%.*]] +; CHECK: if: +; CHECK-NEXT: [[COND:%.*]] = call i32 @llvm.umin.i32(i32 [[Y:%.*]], i32 [[X]]) +; CHECK-NEXT: [[CMP1:%.*]] = icmp slt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP1]]) +; CHECK-NEXT: [[CMP2:%.*]] = icmp sle i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP2]]) +; CHECK-NEXT: [[CMP3:%.*]] = icmp sgt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP3]]) +; CHECK-NEXT: [[CMP4:%.*]] = icmp sge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP4]]) +; CHECK-NEXT: [[CMP5:%.*]] = icmp ult i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP5]]) +; CHECK-NEXT: [[CMP6:%.*]] = icmp ule i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP6]]) +; CHECK-NEXT: [[CMP7:%.*]] = icmp ugt i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP7]]) +; CHECK-NEXT: [[CMP8:%.*]] = icmp uge i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP8]]) +; CHECK-NEXT: [[CMP9:%.*]] = icmp eq i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP9]]) +; CHECK-NEXT: [[CMP10:%.*]] = icmp ne i32 [[COND]], [[Z]] +; CHECK-NEXT: call void @use(i1 [[CMP10]]) +; CHECK-NEXT: ret void +; CHECK: end: +; CHECK-NEXT: ret void +; + %cmp = icmp uge i32 %x, %z + br i1 %cmp, label %if, label %end +if: + %cond = call i32 @llvm.umin.i32(i32 %y, i32 %x) + %cmp1 = icmp slt i32 %cond, %z + call void @use(i1 %cmp1) + %cmp2 = icmp sle i32 %cond, %z + call void @use(i1 %cmp2) + %cmp3 = icmp sgt i32 %cond, %z + call void @use(i1 %cmp3) + %cmp4 = icmp sge i32 %cond, %z + call void @use(i1 %cmp4) + %cmp5 = icmp ult i32 %cond, %z + call void @use(i1 %cmp5) + %cmp6 = icmp ule i32 %cond, %z + call void @use(i1 %cmp6) + %cmp7 = icmp ugt i32 %cond, %z + call void @use(i1 %cmp7) + %cmp8 = icmp uge i32 %cond, %z + call void @use(i1 %cmp8) + %cmp9 = icmp eq i32 %cond, %z + call void @use(i1 %cmp9) + %cmp10 = icmp ne i32 %cond, %z + call void @use(i1 %cmp10) + ret void +end: + ret void +} + +declare i32 @llvm.umin.i32(i32, i32)