Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -8133,9 +8133,8 @@ // Only do this if the inner op has one use since this will just increases // register pressure for no benefit. - if (Opc != AMDGPUISD::FMIN_LEGACY && Opc != AMDGPUISD::FMAX_LEGACY && - !VT.isVector() && VT != MVT::f64 && + !VT.isVector() && VT.isSimple() && VT != MVT::f64 && VT != MVT::i8 && ((VT != MVT::f16 && VT != MVT::i16) || Subtarget->hasMin3Max3_16())) { // max(max(a, b), c) -> max3(a, b, c) // min(min(a, b), c) -> min3(a, b, c) Index: test/CodeGen/AMDGPU/fmin3.ll =================================================================== --- test/CodeGen/AMDGPU/fmin3.ll +++ test/CodeGen/AMDGPU/fmin3.ll @@ -118,7 +118,33 @@ ret <2 x half> %res } +; GCN-LABEL: {{^}}test_fmin3_olt_0_f64: +; GCN-NOT: v_min3 +define amdgpu_kernel void @test_fmin3_olt_0_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %bptr, double addrspace(1)* %cptr) #0 { + %a = load volatile double, double addrspace(1)* %aptr, align 4 + %b = load volatile double, double addrspace(1)* %bptr, align 4 + %c = load volatile double, double addrspace(1)* %cptr, align 4 + %f0 = call double @llvm.minnum.f64(double %a, double %b) + %f1 = call double @llvm.minnum.f64(double %f0, double %c) + store double %f1, double addrspace(1)* %out, align 4 + ret void +} + +; Commute operand of second fmin +; GCN-LABEL: {{^}}test_fmin3_olt_1_f64: +; GCN-NOT: v_min3 +define amdgpu_kernel void @test_fmin3_olt_1_f64(double addrspace(1)* %out, double addrspace(1)* %aptr, double addrspace(1)* %bptr, double addrspace(1)* %cptr) #0 { + %a = load volatile double, double addrspace(1)* %aptr, align 4 + %b = load volatile double, double addrspace(1)* %bptr, align 4 + %c = load volatile double, double addrspace(1)* %cptr, align 4 + %f0 = call double @llvm.minnum.f64(double %a, double %b) + %f1 = call double @llvm.minnum.f64(double %c, double %f0) + store double %f1, double addrspace(1)* %out, align 4 + ret void +} + declare i32 @llvm.amdgcn.workitem.id.x() #1 +declare double @llvm.minnum.f64(double, double) #1 declare float @llvm.minnum.f32(float, float) #1 declare half @llvm.minnum.f16(half, half) #1 declare <2 x half> @llvm.minnum.v2f16(<2 x half>, <2 x half>) Index: test/CodeGen/AMDGPU/min3.ll =================================================================== --- test/CodeGen/AMDGPU/min3.ll +++ test/CodeGen/AMDGPU/min3.ll @@ -158,6 +158,178 @@ ret void } +; GCN-LABEL: {{^}}v_test_imin3_slt_i8: +; SI: v_min3_i32 + +; VI: v_min_i16 +; VI: v_min_i16 + +; GFX9: v_min3_i16 +define amdgpu_kernel void @v_test_imin3_slt_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %aptr, i8 addrspace(1)* %bptr, i8 addrspace(1)* %cptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr i8, i8 addrspace(1)* %aptr, i32 %tid + %gep1 = getelementptr i8, i8 addrspace(1)* %bptr, i32 %tid + %gep2 = getelementptr i8, i8 addrspace(1)* %cptr, i32 %tid + %outgep = getelementptr i8, i8 addrspace(1)* %out, i32 %tid + %a = load i8, i8 addrspace(1)* %gep0 + %b = load i8, i8 addrspace(1)* %gep1 + %c = load i8, i8 addrspace(1)* %gep2 + %icmp0 = icmp slt i8 %a, %b + %i0 = select i1 %icmp0, i8 %a, i8 %b + %icmp1 = icmp slt i8 %i0, %c + %i1 = select i1 %icmp1, i8 %i0, i8 %c + store i8 %i1, i8 addrspace(1)* %outgep + ret void +} + +; GCN-LABEL: {{^}}v_test_umin3_ult_i8: +; SI: v_min3_u32 + +; VI: v_min_u16 +; VI: v_min_u16 + +; GFX9: v_min3_u16 +define amdgpu_kernel void @v_test_umin3_ult_i8(i8 addrspace(1)* %out, i8 addrspace(1)* %aptr, i8 addrspace(1)* %bptr, i8 addrspace(1)* %cptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr i8, i8 addrspace(1)* %aptr, i32 %tid + %gep1 = getelementptr i8, i8 addrspace(1)* %bptr, i32 %tid + %gep2 = getelementptr i8, i8 addrspace(1)* %cptr, i32 %tid + %outgep = getelementptr i8, i8 addrspace(1)* %out, i32 %tid + %a = load i8, i8 addrspace(1)* %gep0 + %b = load i8, i8 addrspace(1)* %gep1 + %c = load i8, i8 addrspace(1)* %gep2 + %icmp0 = icmp ult i8 %a, %b + %i0 = select i1 %icmp0, i8 %a, i8 %b + %icmp1 = icmp ult i8 %i0, %c + %i1 = select i1 %icmp1, i8 %i0, i8 %c + store i8 %i1, i8 addrspace(1)* %outgep + ret void +} + +; GCN-LABEL: {{^}}v_test_imin3_slt_i7: +; SI: v_min3_i32 + +; VI: v_min_i16 +; VI: v_min_i16 + +; GFX9: v_min3_i16 +define amdgpu_kernel void @v_test_imin3_slt_i7(i7 addrspace(1)* %out, i7 addrspace(1)* %aptr, i7 addrspace(1)* %bptr, i7 addrspace(1)* %cptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr i7, i7 addrspace(1)* %aptr, i32 %tid + %gep1 = getelementptr i7, i7 addrspace(1)* %bptr, i32 %tid + %gep2 = getelementptr i7, i7 addrspace(1)* %cptr, i32 %tid + %outgep = getelementptr i7, i7 addrspace(1)* %out, i32 %tid + %a = load i7, i7 addrspace(1)* %gep0 + %b = load i7, i7 addrspace(1)* %gep1 + %c = load i7, i7 addrspace(1)* %gep2 + %icmp0 = icmp slt i7 %a, %b + %i0 = select i1 %icmp0, i7 %a, i7 %b + %icmp1 = icmp slt i7 %i0, %c + %i1 = select i1 %icmp1, i7 %i0, i7 %c + store i7 %i1, i7 addrspace(1)* %outgep + ret void +} + +; GCN-LABEL: {{^}}v_test_umin3_ult_i7: +; SI: v_min3_u32 + +; VI: v_min_u16 +; VI: v_min_u16 + +; GFX9: v_min3_u16 +define amdgpu_kernel void @v_test_umin3_ult_i7(i7 addrspace(1)* %out, i7 addrspace(1)* %aptr, i7 addrspace(1)* %bptr, i7 addrspace(1)* %cptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr i7, i7 addrspace(1)* %aptr, i32 %tid + %gep1 = getelementptr i7, i7 addrspace(1)* %bptr, i32 %tid + %gep2 = getelementptr i7, i7 addrspace(1)* %cptr, i32 %tid + %outgep = getelementptr i7, i7 addrspace(1)* %out, i32 %tid + %a = load i7, i7 addrspace(1)* %gep0 + %b = load i7, i7 addrspace(1)* %gep1 + %c = load i7, i7 addrspace(1)* %gep2 + %icmp0 = icmp ult i7 %a, %b + %i0 = select i1 %icmp0, i7 %a, i7 %b + %icmp1 = icmp ult i7 %i0, %c + %i1 = select i1 %icmp1, i7 %i0, i7 %c + store i7 %i1, i7 addrspace(1)* %outgep + ret void +} + +; GCN-LABEL: {{^}}v_test_imin3_slt_i33: +; GCN-NOT: v_min3 +define amdgpu_kernel void @v_test_imin3_slt_i33(i33 addrspace(1)* %out, i33 addrspace(1)* %aptr, i33 addrspace(1)* %bptr, i33 addrspace(1)* %cptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr i33, i33 addrspace(1)* %aptr, i32 %tid + %gep1 = getelementptr i33, i33 addrspace(1)* %bptr, i32 %tid + %gep2 = getelementptr i33, i33 addrspace(1)* %cptr, i32 %tid + %outgep = getelementptr i33, i33 addrspace(1)* %out, i32 %tid + %a = load i33, i33 addrspace(1)* %gep0 + %b = load i33, i33 addrspace(1)* %gep1 + %c = load i33, i33 addrspace(1)* %gep2 + %icmp0 = icmp slt i33 %a, %b + %i0 = select i1 %icmp0, i33 %a, i33 %b + %icmp1 = icmp slt i33 %i0, %c + %i1 = select i1 %icmp1, i33 %i0, i33 %c + store i33 %i1, i33 addrspace(1)* %outgep + ret void +} + +; GCN-LABEL: {{^}}v_test_umin3_ult_i33: +; GCN-NOT: v_min3 +define amdgpu_kernel void @v_test_umin3_ult_i33(i33 addrspace(1)* %out, i33 addrspace(1)* %aptr, i33 addrspace(1)* %bptr, i33 addrspace(1)* %cptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr i33, i33 addrspace(1)* %aptr, i32 %tid + %gep1 = getelementptr i33, i33 addrspace(1)* %bptr, i32 %tid + %gep2 = getelementptr i33, i33 addrspace(1)* %cptr, i32 %tid + %outgep = getelementptr i33, i33 addrspace(1)* %out, i32 %tid + %a = load i33, i33 addrspace(1)* %gep0 + %b = load i33, i33 addrspace(1)* %gep1 + %c = load i33, i33 addrspace(1)* %gep2 + %icmp0 = icmp ult i33 %a, %b + %i0 = select i1 %icmp0, i33 %a, i33 %b + %icmp1 = icmp ult i33 %i0, %c + %i1 = select i1 %icmp1, i33 %i0, i33 %c + store i33 %i1, i33 addrspace(1)* %outgep + ret void +} + +; GCN-LABEL: {{^}}v_test_imin3_slt_i64: +; GCN-NOT: v_min3 +define amdgpu_kernel void @v_test_imin3_slt_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr, i64 addrspace(1)* %cptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid + %gep1 = getelementptr i64, i64 addrspace(1)* %bptr, i32 %tid + %gep2 = getelementptr i64, i64 addrspace(1)* %cptr, i32 %tid + %outgep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid + %a = load i64, i64 addrspace(1)* %gep0 + %b = load i64, i64 addrspace(1)* %gep1 + %c = load i64, i64 addrspace(1)* %gep2 + %icmp0 = icmp slt i64 %a, %b + %i0 = select i1 %icmp0, i64 %a, i64 %b + %icmp1 = icmp slt i64 %i0, %c + %i1 = select i1 %icmp1, i64 %i0, i64 %c + store i64 %i1, i64 addrspace(1)* %outgep + ret void +} + +; GCN-LABEL: {{^}}v_test_umin3_ult_i64: +; GCN-NOT: v_min3 +define amdgpu_kernel void @v_test_umin3_ult_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %aptr, i64 addrspace(1)* %bptr, i64 addrspace(1)* %cptr) #0 { + %tid = call i32 @llvm.amdgcn.workitem.id.x() + %gep0 = getelementptr i64, i64 addrspace(1)* %aptr, i32 %tid + %gep1 = getelementptr i64, i64 addrspace(1)* %bptr, i32 %tid + %gep2 = getelementptr i64, i64 addrspace(1)* %cptr, i32 %tid + %outgep = getelementptr i64, i64 addrspace(1)* %out, i32 %tid + %a = load i64, i64 addrspace(1)* %gep0 + %b = load i64, i64 addrspace(1)* %gep1 + %c = load i64, i64 addrspace(1)* %gep2 + %icmp0 = icmp ult i64 %a, %b + %i0 = select i1 %icmp0, i64 %a, i64 %b + %icmp1 = icmp ult i64 %i0, %c + %i1 = select i1 %icmp1, i64 %i0, i64 %c + store i64 %i1, i64 addrspace(1)* %outgep + ret void +} + declare i32 @llvm.amdgcn.workitem.id.x() #1 attributes #0 = { nounwind }