Index: lib/Target/R600/SIInstrInfo.h =================================================================== --- lib/Target/R600/SIInstrInfo.h +++ lib/Target/R600/SIInstrInfo.h @@ -114,7 +114,7 @@ // register. If there is no hardware instruction that can store to \p // DstRC, then AMDGPU::COPY is returned. unsigned getMovOpcode(const TargetRegisterClass *DstRC) const; - unsigned commuteOpcode(unsigned Opcode) const; + unsigned commuteOpcode(const MachineInstr &MI) const; MachineInstr *commuteInstruction(MachineInstr *MI, bool NewMI = false) const override; Index: lib/Target/R600/SIInstrInfo.cpp =================================================================== --- lib/Target/R600/SIInstrInfo.cpp +++ lib/Target/R600/SIInstrInfo.cpp @@ -418,7 +418,116 @@ } } -unsigned SIInstrInfo::commuteOpcode(unsigned Opcode) const { +// FIXME: This should be generated from an InstrMapping. The problem is I don't +// see how it can be used to generate a mapping both ways without deciding one +// of the compares is the "commuted" version. +static unsigned commuteCompareOpcode(unsigned Opcode, unsigned Size) { + // TODO: Handle cmps / cmpx / cmpsx variants. + + static const unsigned cmpTable32[][2] = { + // Signed compares. + { AMDGPU::V_CMP_F_I32_e64, AMDGPU::V_CMP_F_I32_e64 }, // same + { AMDGPU::V_CMP_LT_I32_e64, AMDGPU::V_CMP_GT_I32_e64 }, + { AMDGPU::V_CMP_EQ_I32_e64, AMDGPU::V_CMP_EQ_I32_e64 }, // same + { AMDGPU::V_CMP_LE_I32_e64, AMDGPU::V_CMP_GE_I32_e64 }, + { AMDGPU::V_CMP_GT_I32_e64, AMDGPU::V_CMP_LT_I32_e64 }, + { AMDGPU::V_CMP_NE_I32_e64, AMDGPU::V_CMP_NE_I32_e64 }, // same + { AMDGPU::V_CMP_GE_I32_e64, AMDGPU::V_CMP_LE_I32_e64 }, + { AMDGPU::V_CMP_T_I32_e64, AMDGPU::V_CMP_T_I32_e64 }, // same + + // Unsigned compares. + { AMDGPU::V_CMP_F_U32_e64, AMDGPU::V_CMP_F_U32_e64 }, // same + { AMDGPU::V_CMP_LT_U32_e64, AMDGPU::V_CMP_GT_U32_e64 }, + { AMDGPU::V_CMP_EQ_U32_e64, AMDGPU::V_CMP_EQ_U32_e64 }, // same + { AMDGPU::V_CMP_LE_U32_e64, AMDGPU::V_CMP_GE_U32_e64 }, + { AMDGPU::V_CMP_GT_U32_e64, AMDGPU::V_CMP_LT_U32_e64 }, + { AMDGPU::V_CMP_NE_U32_e64, AMDGPU::V_CMP_NE_U32_e64 }, // same + { AMDGPU::V_CMP_GE_U32_e64, AMDGPU::V_CMP_LE_U32_e64 }, + { AMDGPU::V_CMP_T_U32_e64, AMDGPU::V_CMP_T_U32_e64 }, // same + + // FP compares. + { AMDGPU::V_CMP_F_F32_e64, AMDGPU::V_CMP_F_F32_e64 }, // same + { AMDGPU::V_CMP_LT_F32_e64, AMDGPU::V_CMP_GT_F32_e64 }, + { AMDGPU::V_CMP_EQ_F32_e64, AMDGPU::V_CMP_EQ_F32_e64 }, // same + { AMDGPU::V_CMP_LE_F32_e64, AMDGPU::V_CMP_GE_F32_e64 }, + { AMDGPU::V_CMP_GT_F32_e64, AMDGPU::V_CMP_LT_F32_e64 }, + { AMDGPU::V_CMP_LG_F32_e64, AMDGPU::V_CMP_LG_F32_e64 }, // same + { AMDGPU::V_CMP_GE_F32_e64, AMDGPU::V_CMP_LE_F32_e64 }, + { AMDGPU::V_CMP_O_F32_e64, AMDGPU::V_CMP_O_F32_e64 }, // same + { AMDGPU::V_CMP_U_F32_e64, AMDGPU::V_CMP_U_F32_e64 }, // same + { AMDGPU::V_CMP_NGE_F32_e64, AMDGPU::V_CMP_NLE_F32_e64 }, + { AMDGPU::V_CMP_NLG_F32_e64, AMDGPU::V_CMP_NLG_F32_e64 }, // same + { AMDGPU::V_CMP_NGT_F32_e64, AMDGPU::V_CMP_NLT_F32_e64 }, + { AMDGPU::V_CMP_NLE_F32_e64, AMDGPU::V_CMP_NGE_F32_e64 }, + { AMDGPU::V_CMP_NEQ_F32_e64, AMDGPU::V_CMP_NEQ_F32_e64 }, // same + { AMDGPU::V_CMP_NLT_F32_e64, AMDGPU::V_CMP_NGT_F32_e64 }, + { AMDGPU::V_CMP_TRU_F32_e64, AMDGPU::V_CMP_TRU_F32_e64 } // same + }; + + static const unsigned cmpTable64[][2] = { + // Signed compares. + { AMDGPU::V_CMP_F_I64_e64, AMDGPU::V_CMP_F_I64_e64 }, // same + { AMDGPU::V_CMP_LT_I64_e64, AMDGPU::V_CMP_GT_I64_e64 }, + { AMDGPU::V_CMP_EQ_I64_e64, AMDGPU::V_CMP_EQ_I64_e64 }, // same + { AMDGPU::V_CMP_LE_I64_e64, AMDGPU::V_CMP_GE_I64_e64 }, + { AMDGPU::V_CMP_GT_I64_e64, AMDGPU::V_CMP_LT_I64_e64 }, + { AMDGPU::V_CMP_NE_I64_e64, AMDGPU::V_CMP_NE_I64_e64 }, // same + { AMDGPU::V_CMP_GE_I64_e64, AMDGPU::V_CMP_LE_I64_e64 }, + { AMDGPU::V_CMP_T_I64_e64, AMDGPU::V_CMP_T_I64_e64 }, // same + + // Unsigned compares. + { AMDGPU::V_CMP_F_U64_e64, AMDGPU::V_CMP_F_U64_e64 }, // same + { AMDGPU::V_CMP_LT_U64_e64, AMDGPU::V_CMP_GT_U64_e64 }, + { AMDGPU::V_CMP_EQ_U64_e64, AMDGPU::V_CMP_EQ_U64_e64 }, // same + { AMDGPU::V_CMP_LE_U64_e64, AMDGPU::V_CMP_GE_U64_e64 }, + { AMDGPU::V_CMP_GT_U64_e64, AMDGPU::V_CMP_LT_U64_e64 }, + { AMDGPU::V_CMP_NE_U64_e64, AMDGPU::V_CMP_NE_U64_e64 }, // same + { AMDGPU::V_CMP_GE_U64_e64, AMDGPU::V_CMP_LE_U64_e64 }, + { AMDGPU::V_CMP_T_U64_e64, AMDGPU::V_CMP_T_U64_e64 }, // same + + // FP compares. + { AMDGPU::V_CMP_F_F64_e64, AMDGPU::V_CMP_F_F64_e64 }, // same + { AMDGPU::V_CMP_LT_F64_e64, AMDGPU::V_CMP_GT_F64_e64 }, + { AMDGPU::V_CMP_EQ_F64_e64, AMDGPU::V_CMP_EQ_F64_e64 }, // same + { AMDGPU::V_CMP_LE_F64_e64, AMDGPU::V_CMP_GE_F64_e64 }, + { AMDGPU::V_CMP_GT_F64_e64, AMDGPU::V_CMP_LT_F64_e64 }, + { AMDGPU::V_CMP_LG_F64_e64, AMDGPU::V_CMP_LG_F64_e64 }, // same + { AMDGPU::V_CMP_GE_F64_e64, AMDGPU::V_CMP_LE_F64_e64 }, + { AMDGPU::V_CMP_O_F64_e64, AMDGPU::V_CMP_O_F64_e64 }, // same + { AMDGPU::V_CMP_U_F64_e64, AMDGPU::V_CMP_U_F64_e64 }, // same + { AMDGPU::V_CMP_NGE_F64_e64, AMDGPU::V_CMP_NLE_F64_e64 }, + { AMDGPU::V_CMP_NLG_F64_e64, AMDGPU::V_CMP_NLG_F64_e64 }, // same + { AMDGPU::V_CMP_NGT_F64_e64, AMDGPU::V_CMP_NLT_F64_e64 }, + { AMDGPU::V_CMP_NLE_F64_e64, AMDGPU::V_CMP_NGE_F64_e64 }, + { AMDGPU::V_CMP_NEQ_F64_e64, AMDGPU::V_CMP_NEQ_F64_e64 }, // same + { AMDGPU::V_CMP_NLT_F64_e64, AMDGPU::V_CMP_NGT_F64_e64 }, + { AMDGPU::V_CMP_TRU_F64_e64, AMDGPU::V_CMP_TRU_F64_e64 } // same + }; + + if (Size == 4) { + for (const auto& Entry : cmpTable32) { + if (Entry[0] == Opcode) + return Entry[1]; + } + } else if (Size == 8) { + for (const auto& Entry : cmpTable64) { + if (Entry[0] == Opcode) + return Entry[1]; + } + } + + llvm_unreachable("unhandled comparison"); +} + +unsigned SIInstrInfo::commuteOpcode(const MachineInstr &MI) const { + const unsigned Opcode = MI.getOpcode(); + + if (MI.isCompare()) { + int Src0Idx = AMDGPU::getNamedOperandIdx(Opcode, AMDGPU::OpName::src0); + unsigned Size = getOpSize(MI, Src0Idx); + return commuteCompareOpcode(Opcode, Size); + } + int NewOpc; // Try to map original to commuted opcode @@ -792,7 +901,7 @@ } if (MI) - MI->setDesc(get(commuteOpcode(MI->getOpcode()))); + MI->setDesc(get(commuteOpcode(*MI))); return MI; } Index: lib/Target/R600/SIInstructions.td =================================================================== --- lib/Target/R600/SIInstructions.td +++ lib/Target/R600/SIInstructions.td @@ -501,6 +501,7 @@ let isCompare = 1 in { +let isCommutable = 1 in { defm V_CMP_F_F32 : VOPC_F32 , "v_cmp_f_f32">; defm V_CMP_LT_F32 : VOPC_F32 , "v_cmp_lt_f32", COND_OLT>; defm V_CMP_EQ_F32 : VOPC_F32 , "v_cmp_eq_f32", COND_OEQ>; @@ -517,6 +518,7 @@ defm V_CMP_NEQ_F32 : VOPC_F32 , "v_cmp_neq_f32", COND_UNE>; defm V_CMP_NLT_F32 : VOPC_F32 , "v_cmp_nlt_f32", COND_UGE>; defm V_CMP_TRU_F32 : VOPC_F32 , "v_cmp_tru_f32">; +} // End isCommutable = 1 let hasSideEffects = 1 in { @@ -539,6 +541,7 @@ } // End hasSideEffects = 1 +let isCommutable = 1 in { defm V_CMP_F_F64 : VOPC_F64 , "v_cmp_f_f64">; defm V_CMP_LT_F64 : VOPC_F64 , "v_cmp_lt_f64", COND_OLT>; defm V_CMP_EQ_F64 : VOPC_F64 , "v_cmp_eq_f64", COND_OEQ>; @@ -555,6 +558,7 @@ defm V_CMP_NEQ_F64 : VOPC_F64 , "v_cmp_neq_f64", COND_UNE>; defm V_CMP_NLT_F64 : VOPC_F64 , "v_cmp_nlt_f64", COND_UGE>; defm V_CMP_TRU_F64 : VOPC_F64 , "v_cmp_tru_f64">; +} // End isCommutable = 1 let hasSideEffects = 1 in { @@ -657,6 +661,7 @@ } // End SubtargetPredicate = isSICI +let isCommutable = 1 in { defm V_CMP_F_I32 : VOPC_I32 , "v_cmp_f_i32">; defm V_CMP_LT_I32 : VOPC_I32 , "v_cmp_lt_i32", COND_SLT>; defm V_CMP_EQ_I32 : VOPC_I32 , "v_cmp_eq_i32", COND_EQ>; @@ -665,6 +670,7 @@ defm V_CMP_NE_I32 : VOPC_I32 , "v_cmp_ne_i32", COND_NE>; defm V_CMP_GE_I32 : VOPC_I32 , "v_cmp_ge_i32", COND_SGE>; defm V_CMP_T_I32 : VOPC_I32 , "v_cmp_t_i32">; +} // End isCommutable = 1 let hasSideEffects = 1 in { @@ -679,6 +685,7 @@ } // End hasSideEffects = 1 +let isCommutable = 1 in { defm V_CMP_F_I64 : VOPC_I64 , "v_cmp_f_i64">; defm V_CMP_LT_I64 : VOPC_I64 , "v_cmp_lt_i64", COND_SLT>; defm V_CMP_EQ_I64 : VOPC_I64 , "v_cmp_eq_i64", COND_EQ>; @@ -687,6 +694,7 @@ defm V_CMP_NE_I64 : VOPC_I64 , "v_cmp_ne_i64", COND_NE>; defm V_CMP_GE_I64 : VOPC_I64 , "v_cmp_ge_i64", COND_SGE>; defm V_CMP_T_I64 : VOPC_I64 , "v_cmp_t_i64">; +} // End isCommutable = 1 let hasSideEffects = 1 in { @@ -701,6 +709,7 @@ } // End hasSideEffects = 1 +let isCommutable = 1 in { defm V_CMP_F_U32 : VOPC_I32 , "v_cmp_f_u32">; defm V_CMP_LT_U32 : VOPC_I32 , "v_cmp_lt_u32", COND_ULT>; defm V_CMP_EQ_U32 : VOPC_I32 , "v_cmp_eq_u32", COND_EQ>; @@ -709,6 +718,7 @@ defm V_CMP_NE_U32 : VOPC_I32 , "v_cmp_ne_u32", COND_NE>; defm V_CMP_GE_U32 : VOPC_I32 , "v_cmp_ge_u32", COND_UGE>; defm V_CMP_T_U32 : VOPC_I32 , "v_cmp_t_u32">; +} // End isCommutable = 1 let hasSideEffects = 1 in { @@ -723,6 +733,7 @@ } // End hasSideEffects = 1 +let isCommutable = 1 in { defm V_CMP_F_U64 : VOPC_I64 , "v_cmp_f_u64">; defm V_CMP_LT_U64 : VOPC_I64 , "v_cmp_lt_u64", COND_ULT>; defm V_CMP_EQ_U64 : VOPC_I64 , "v_cmp_eq_u64", COND_EQ>; @@ -731,6 +742,7 @@ defm V_CMP_NE_U64 : VOPC_I64 , "v_cmp_ne_u64", COND_NE>; defm V_CMP_GE_U64 : VOPC_I64 , "v_cmp_ge_u64", COND_UGE>; defm V_CMP_T_U64 : VOPC_I64 , "v_cmp_t_u64">; +} // End isCommutable = 1 let hasSideEffects = 1 in { Index: test/CodeGen/R600/commute-compares.ll =================================================================== --- /dev/null +++ test/CodeGen/R600/commute-compares.ll @@ -0,0 +1,697 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s + +declare i32 @llvm.r600.read.tidig.x() #0 + +; -------------------------------------------------------------------------------- +; i32 compares +; -------------------------------------------------------------------------------- + +; GCN-LABEL: {{^}}commute_eq_64_i32: +; GCN: v_cmp_eq_i32_e32 vcc, 64, v{{[0-9]+}} +define void @commute_eq_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i32 addrspace(1)* %gep.in + %cmp = icmp eq i32 %val, 64 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ne_64_i32: +; GCN: v_cmp_ne_i32_e32 vcc, 64, v{{[0-9]+}} +define void @commute_ne_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i32 addrspace(1)* %gep.in + %cmp = icmp ne i32 %val, 64 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; FIXME: Why isn't this being folded as a constant? +; GCN-LABEL: {{^}}commute_ne_litk_i32: +; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x3039 +; GCN: v_cmp_ne_i32_e32 vcc, [[K]], v{{[0-9]+}} +define void @commute_ne_litk_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i32 addrspace(1)* %gep.in + %cmp = icmp ne i32 %val, 12345 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ugt_64_i32: +; GCN: v_cmp_lt_u32_e32 vcc, 64, v{{[0-9]+}} +define void @commute_ugt_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i32 addrspace(1)* %gep.in + %cmp = icmp ugt i32 %val, 64 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_uge_64_i32: +; GCN: v_cmp_lt_u32_e32 vcc, 63, v{{[0-9]+}} +define void @commute_uge_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i32 addrspace(1)* %gep.in + %cmp = icmp uge i32 %val, 64 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ult_64_i32: +; GCN: v_cmp_gt_u32_e32 vcc, 64, v{{[0-9]+}} +define void @commute_ult_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i32 addrspace(1)* %gep.in + %cmp = icmp ult i32 %val, 64 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ule_63_i32: +; GCN: v_cmp_gt_u32_e32 vcc, 64, v{{[0-9]+}} +define void @commute_ule_63_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i32 addrspace(1)* %gep.in + %cmp = icmp ule i32 %val, 63 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; FIXME: Undo canonicalization to gt (x + 1) since it doesn't use the inline imm + +; GCN-LABEL: {{^}}commute_ule_64_i32: +; GCN: v_mov_b32_e32 [[K:v[0-9]+]], 0x41{{$}} +; GCN: v_cmp_gt_u32_e32 vcc, [[K]], v{{[0-9]+}} +define void @commute_ule_64_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i32 addrspace(1)* %gep.in + %cmp = icmp ule i32 %val, 64 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_sgt_neg1_i32: +; GCN: v_cmp_lt_i32_e32 vcc, -1, v{{[0-9]+}} +define void @commute_sgt_neg1_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i32 addrspace(1)* %gep.in + %cmp = icmp sgt i32 %val, -1 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_sge_neg2_i32: +; GCN: v_cmp_lt_i32_e32 vcc, -3, v{{[0-9]+}} +define void @commute_sge_neg2_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i32 addrspace(1)* %gep.in + %cmp = icmp sge i32 %val, -2 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_slt_neg16_i32: +; GCN: v_cmp_gt_i32_e32 vcc, -16, v{{[0-9]+}} +define void @commute_slt_neg16_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i32 addrspace(1)* %gep.in + %cmp = icmp slt i32 %val, -16 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_sle_5_i32: +; GCN: v_cmp_gt_i32_e32 vcc, 6, v{{[0-9]+}} +define void @commute_sle_5_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i32 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i32 addrspace(1)* %gep.in + %cmp = icmp sle i32 %val, 5 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; -------------------------------------------------------------------------------- +; i64 compares +; -------------------------------------------------------------------------------- + +; GCN-LABEL: {{^}}commute_eq_64_i64: +; GCN: v_cmp_eq_i64_e32 vcc, 64, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_eq_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i64 addrspace(1)* %gep.in + %cmp = icmp eq i64 %val, 64 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ne_64_i64: +; GCN: v_cmp_ne_i64_e32 vcc, 64, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_ne_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i64 addrspace(1)* %gep.in + %cmp = icmp ne i64 %val, 64 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ugt_64_i64: +; GCN: v_cmp_lt_u64_e32 vcc, 64, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_ugt_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i64 addrspace(1)* %gep.in + %cmp = icmp ugt i64 %val, 64 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_uge_64_i64: +; GCN: v_cmp_lt_u64_e32 vcc, 63, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_uge_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i64 addrspace(1)* %gep.in + %cmp = icmp uge i64 %val, 64 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ult_64_i64: +; GCN: v_cmp_gt_u64_e32 vcc, 64, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_ult_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i64 addrspace(1)* %gep.in + %cmp = icmp ult i64 %val, 64 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ule_63_i64: +; GCN: v_cmp_gt_u64_e32 vcc, 64, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_ule_63_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i64 addrspace(1)* %gep.in + %cmp = icmp ule i64 %val, 63 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; FIXME: Undo canonicalization to gt (x + 1) since it doesn't use the inline imm + +; GCN-LABEL: {{^}}commute_ule_64_i64: +; GCN-DAG: s_movk_i32 s[[KLO:[0-9]+]], 0x41{{$}} +; GCN: v_cmp_gt_u64_e32 vcc, s{{\[}}[[KLO]]:{{[0-9]+\]}}, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_ule_64_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i64 addrspace(1)* %gep.in + %cmp = icmp ule i64 %val, 64 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_sgt_neg1_i64: +; GCN: v_cmp_lt_i64_e32 vcc, -1, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_sgt_neg1_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i64 addrspace(1)* %gep.in + %cmp = icmp sgt i64 %val, -1 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_sge_neg2_i64: +; GCN: v_cmp_lt_i64_e32 vcc, -3, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_sge_neg2_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i64 addrspace(1)* %gep.in + %cmp = icmp sge i64 %val, -2 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_slt_neg16_i64: +; GCN: v_cmp_gt_i64_e32 vcc, -16, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_slt_neg16_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i64 addrspace(1)* %gep.in + %cmp = icmp slt i64 %val, -16 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_sle_5_i64: +; GCN: v_cmp_gt_i64_e32 vcc, 6, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_sle_5_i64(i32 addrspace(1)* %out, i64 addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr i64 addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load i64 addrspace(1)* %gep.in + %cmp = icmp sle i64 %val, 5 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; -------------------------------------------------------------------------------- +; f32 compares +; -------------------------------------------------------------------------------- + + +; GCN-LABEL: {{^}}commute_oeq_2.0_f32: +; GCN: v_cmp_eq_f32_e32 vcc, 2.0, v{{[0-9]+}} +define void @commute_oeq_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr float addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load float addrspace(1)* %gep.in + %cmp = fcmp oeq float %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + + +; GCN-LABEL: {{^}}commute_ogt_2.0_f32: +; GCN: v_cmp_lt_f32_e32 vcc, 2.0, v{{[0-9]+}} +define void @commute_ogt_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr float addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load float addrspace(1)* %gep.in + %cmp = fcmp ogt float %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_oge_2.0_f32: +; GCN: v_cmp_le_f32_e32 vcc, 2.0, v{{[0-9]+}} +define void @commute_oge_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr float addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load float addrspace(1)* %gep.in + %cmp = fcmp oge float %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_olt_2.0_f32: +; GCN: v_cmp_gt_f32_e32 vcc, 2.0, v{{[0-9]+}} +define void @commute_olt_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr float addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load float addrspace(1)* %gep.in + %cmp = fcmp olt float %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ole_2.0_f32: +; GCN: v_cmp_ge_f32_e32 vcc, 2.0, v{{[0-9]+}} +define void @commute_ole_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr float addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load float addrspace(1)* %gep.in + %cmp = fcmp ole float %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_one_2.0_f32: +; GCN: v_cmp_lg_f32_e32 vcc, 2.0, v{{[0-9]+}} +define void @commute_one_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr float addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load float addrspace(1)* %gep.in + %cmp = fcmp one float %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ord_2.0_f32: +; GCN: v_cmp_o_f32_e32 vcc, [[REG:v[0-9]+]], [[REG]] +define void @commute_ord_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr float addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load float addrspace(1)* %gep.in + %cmp = fcmp ord float %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ueq_2.0_f32: +; GCN: v_cmp_nlg_f32_e32 vcc, 2.0, v{{[0-9]+}} +define void @commute_ueq_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr float addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load float addrspace(1)* %gep.in + %cmp = fcmp ueq float %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ugt_2.0_f32: +; GCN: v_cmp_nge_f32_e32 vcc, 2.0, v{{[0-9]+}} +define void @commute_ugt_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr float addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load float addrspace(1)* %gep.in + %cmp = fcmp ugt float %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_uge_2.0_f32: +; GCN: v_cmp_ngt_f32_e32 vcc, 2.0, v{{[0-9]+}} +define void @commute_uge_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr float addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load float addrspace(1)* %gep.in + %cmp = fcmp uge float %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ult_2.0_f32: +; GCN: v_cmp_nle_f32_e32 vcc, 2.0, v{{[0-9]+}} +define void @commute_ult_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr float addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load float addrspace(1)* %gep.in + %cmp = fcmp ult float %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ule_2.0_f32: +; GCN: v_cmp_nlt_f32_e32 vcc, 2.0, v{{[0-9]+}} +define void @commute_ule_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr float addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load float addrspace(1)* %gep.in + %cmp = fcmp ule float %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_une_2.0_f32: +; GCN: v_cmp_neq_f32_e32 vcc, 2.0, v{{[0-9]+}} +define void @commute_une_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr float addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load float addrspace(1)* %gep.in + %cmp = fcmp une float %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_uno_2.0_f32: +; GCN: v_cmp_u_f32_e32 vcc, [[REG:v[0-9]+]], [[REG]] +define void @commute_uno_2.0_f32(i32 addrspace(1)* %out, float addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr float addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load float addrspace(1)* %gep.in + %cmp = fcmp uno float %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; -------------------------------------------------------------------------------- +; f64 compares +; -------------------------------------------------------------------------------- + + +; GCN-LABEL: {{^}}commute_oeq_2.0_f64: +; GCN: v_cmp_eq_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_oeq_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr double addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load double addrspace(1)* %gep.in + %cmp = fcmp oeq double %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + + +; GCN-LABEL: {{^}}commute_ogt_2.0_f64: +; GCN: v_cmp_lt_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_ogt_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr double addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load double addrspace(1)* %gep.in + %cmp = fcmp ogt double %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_oge_2.0_f64: +; GCN: v_cmp_le_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_oge_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr double addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load double addrspace(1)* %gep.in + %cmp = fcmp oge double %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_olt_2.0_f64: +; GCN: v_cmp_gt_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_olt_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr double addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load double addrspace(1)* %gep.in + %cmp = fcmp olt double %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ole_2.0_f64: +; GCN: v_cmp_ge_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_ole_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr double addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load double addrspace(1)* %gep.in + %cmp = fcmp ole double %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_one_2.0_f64: +; GCN: v_cmp_lg_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_one_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr double addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load double addrspace(1)* %gep.in + %cmp = fcmp one double %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ord_2.0_f64: +; GCN: v_cmp_o_f64_e32 vcc, [[REG:v\[[0-9]+:[0-9]+\]]], [[REG]] +define void @commute_ord_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr double addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load double addrspace(1)* %gep.in + %cmp = fcmp ord double %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ueq_2.0_f64: +; GCN: v_cmp_nlg_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_ueq_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr double addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load double addrspace(1)* %gep.in + %cmp = fcmp ueq double %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ugt_2.0_f64: +; GCN: v_cmp_nge_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_ugt_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr double addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load double addrspace(1)* %gep.in + %cmp = fcmp ugt double %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_uge_2.0_f64: +; GCN: v_cmp_ngt_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_uge_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr double addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load double addrspace(1)* %gep.in + %cmp = fcmp uge double %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ult_2.0_f64: +; GCN: v_cmp_nle_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_ult_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr double addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load double addrspace(1)* %gep.in + %cmp = fcmp ult double %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_ule_2.0_f64: +; GCN: v_cmp_nlt_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_ule_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr double addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load double addrspace(1)* %gep.in + %cmp = fcmp ule double %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_une_2.0_f64: +; GCN: v_cmp_neq_f64_e32 vcc, 2.0, v{{\[[0-9]+:[0-9]+\]}} +define void @commute_une_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr double addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load double addrspace(1)* %gep.in + %cmp = fcmp une double %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +; GCN-LABEL: {{^}}commute_uno_2.0_f64: +; GCN: v_cmp_u_f64_e32 vcc, [[REG:v\[[0-9]+:[0-9]+\]]], [[REG]] +define void @commute_uno_2.0_f64(i32 addrspace(1)* %out, double addrspace(1)* %in) #1 { + %tid = call i32 @llvm.r600.read.tidig.x() #0 + %gep.in = getelementptr double addrspace(1)* %in, i32 %tid + %gep.out = getelementptr i32 addrspace(1)* %out, i32 %tid + %val = load double addrspace(1)* %gep.in + %cmp = fcmp uno double %val, 2.0 + %ext = sext i1 %cmp to i32 + store i32 %ext, i32 addrspace(1)* %gep.out + ret void +} + +attributes #0 = { nounwind readnone } +attributes #1 = { nounwind } Index: test/CodeGen/R600/fceil64.ll =================================================================== --- test/CodeGen/R600/fceil64.ll +++ test/CodeGen/R600/fceil64.ll @@ -17,13 +17,13 @@ ; SI: s_lshr_b64 ; SI: s_not_b64 ; SI: s_and_b64 -; SI: cmp_lt_i32 +; SI: cmp_gt_i32 ; SI: cndmask_b32 ; SI: cndmask_b32 -; SI: cmp_gt_i32 +; SI: cmp_lt_i32 ; SI: cndmask_b32 ; SI: cndmask_b32 -; SI-DAG: v_cmp_gt_f64 +; SI-DAG: v_cmp_lt_f64 ; SI-DAG: v_cmp_lg_f64 ; SI: s_and_b64 ; SI: v_cndmask_b32 Index: test/CodeGen/R600/ffloor.f64.ll =================================================================== --- test/CodeGen/R600/ffloor.f64.ll +++ test/CodeGen/R600/ffloor.f64.ll @@ -18,13 +18,13 @@ ; SI: s_lshr_b64 ; SI: s_not_b64 ; SI: s_and_b64 -; SI: cmp_lt_i32 +; SI: cmp_gt_i32 ; SI: cndmask_b32 ; SI: cndmask_b32 -; SI: cmp_gt_i32 +; SI: cmp_lt_i32 ; SI: cndmask_b32 ; SI: cndmask_b32 -; SI-DAG: v_cmp_lt_f64 +; SI-DAG: v_cmp_gt_f64 ; SI-DAG: v_cmp_lg_f64 ; SI-DAG: s_and_b64 ; SI-DAG: v_cndmask_b32 Index: test/CodeGen/R600/ftrunc.f64.ll =================================================================== --- test/CodeGen/R600/ftrunc.f64.ll +++ test/CodeGen/R600/ftrunc.f64.ll @@ -27,12 +27,12 @@ ; SI: s_and_b32 s{{[0-9]+}}, s{{[0-9]+}}, 0x80000000 ; SI: s_add_i32 s{{[0-9]+}}, [[SEXP]], 0xfffffc01 ; SI: s_lshr_b64 -; SI: cmp_lt_i32 +; SI: cmp_gt_i32 ; SI: s_not_b64 ; SI: s_and_b64 ; SI: cndmask_b32 ; SI: cndmask_b32 -; SI: cmp_gt_i32 +; SI: cmp_lt_i32 ; SI: cndmask_b32 ; SI: cndmask_b32 ; SI: s_endpgm Index: test/CodeGen/R600/i1-copy-phi.ll =================================================================== --- test/CodeGen/R600/i1-copy-phi.ll +++ test/CodeGen/R600/i1-copy-phi.ll @@ -6,7 +6,7 @@ ; SI: s_and_saveexec_b64 ; SI: s_xor_b64 ; SI: v_mov_b32_e32 [[REG]], -1{{$}} -; SI: v_cmp_ne_i32_e64 {{s\[[0-9]+:[0-9]+\]}}, [[REG]], 0 +; SI: v_cmp_ne_i32_e32 vcc, 0, [[REG]] ; SI: s_and_saveexec_b64 ; SI: s_xor_b64 ; SI: s_endpgm Index: test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll =================================================================== --- test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll +++ test/CodeGen/R600/llvm.AMDGPU.div_fmas.ll @@ -78,7 +78,7 @@ } ; GCN-LABEL: {{^}}test_div_fmas_f32_cond_to_vcc: -; SI: v_cmp_eq_i32_e64 vcc, s{{[0-9]+}}, 0 +; SI: v_cmp_eq_i32_e64 vcc, 0, s{{[0-9]+}} ; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} define void @test_div_fmas_f32_cond_to_vcc(float addrspace(1)* %out, float %a, float %b, float %c, i32 %i) nounwind { %cmp = icmp eq i32 %i, 0 @@ -110,8 +110,8 @@ ; SI-DAG: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4{{$}} ; SI-DAG: buffer_load_dword [[C:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:8{{$}} -; SI-DAG: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0 -; SI-DAG: v_cmp_ne_i32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], s{{[0-9]+}}, 0 +; SI-DAG: v_cmp_eq_i32_e32 [[CMP0:vcc]], 0, v{{[0-9]+}} +; SI-DAG: v_cmp_ne_i32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 0, s{{[0-9]+}} ; SI: s_and_b64 vcc, [[CMP0]], [[CMP1]] ; SI: v_div_fmas_f32 {{v[0-9]+}}, [[A]], [[B]], [[C]] ; SI: s_endpgm @@ -136,17 +136,17 @@ } ; GCN-LABEL: {{^}}test_div_fmas_f32_i1_phi_vcc: -; SI: v_cmp_eq_i32_e64 [[CMPTID:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0 -; SI: s_and_saveexec_b64 [[CMPTID]], [[CMPTID]] -; SI: s_xor_b64 [[CMPTID]], exec, [[CMPTID]] +; SI: v_cmp_eq_i32_e32 vcc, 0, v{{[0-9]+}} +; SI: s_and_saveexec_b64 [[SAVE:s\[[0-9]+:[0-9]+\]]], vcc +; SI: s_xor_b64 [[SAVE]], exec, [[SAVE]] ; SI: buffer_load_dword [[LOAD:v[0-9]+]] -; SI: v_cmp_ne_i32_e64 [[CMPLOAD:s\[[0-9]+:[0-9]+\]]], [[LOAD]], 0 -; SI: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, [[CMPLOAD]] +; SI: v_cmp_ne_i32_e32 vcc, 0, [[LOAD]] +; SI: v_cndmask_b32_e64 {{v[0-9]+}}, 0, -1, vcc ; SI: BB9_2: -; SI: s_or_b64 exec, exec, [[CMPTID]] +; SI: s_or_b64 exec, exec, [[SAVE]] ; SI: v_cmp_ne_i32_e32 vcc, 0, v0 ; SI: v_div_fmas_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}} ; SI: buffer_store_dword Index: test/CodeGen/R600/llvm.round.f64.ll =================================================================== --- test/CodeGen/R600/llvm.round.f64.ll +++ test/CodeGen/R600/llvm.round.f64.ll @@ -21,7 +21,7 @@ ; SI-DAG: v_cmp_eq_i32 ; SI-DAG: s_mov_b32 [[BFIMASK:s[0-9]+]], 0x7fffffff -; SI-DAG: v_cmp_lt_i32_e64 +; SI-DAG: v_cmp_gt_i32_e64 ; SI-DAG: v_bfi_b32 [[COPYSIGN:v[0-9]+]], [[BFIMASK]] ; SI-DAG: v_cmp_gt_i32_e64 Index: test/CodeGen/R600/llvm.round.ll =================================================================== --- test/CodeGen/R600/llvm.round.ll +++ test/CodeGen/R600/llvm.round.ll @@ -9,7 +9,7 @@ ; SI: v_sub_f32_e32 [[SUB:v[0-9]+]], [[SX]], [[TRUNC]] ; SI: v_mov_b32_e32 [[VX:v[0-9]+]], [[SX]] ; SI: v_bfi_b32 [[COPYSIGN:v[0-9]+]], [[K]], 1.0, [[VX]] -; SI: v_cmp_ge_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |[[SUB]]|, 0.5 +; SI: v_cmp_le_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], 0.5, |[[SUB]]| ; SI: v_cndmask_b32_e64 [[SEL:v[0-9]+]], 0, [[VX]], [[CMP]] ; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], [[SEL]], [[TRUNC]] ; SI: buffer_store_dword [[RESULT]] Index: test/CodeGen/R600/or.ll =================================================================== --- test/CodeGen/R600/or.ll +++ test/CodeGen/R600/or.ll @@ -155,7 +155,7 @@ ; FUNC-LABEL: {{^}}or_i1: ; EG: OR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}} -; SI: s_or_b64 s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}], s[{{[0-9]+:[0-9]+}}] +; SI: s_or_b64 s[{{[0-9]+:[0-9]+}}], vcc, s[{{[0-9]+:[0-9]+}}] define void @or_i1(float addrspace(1)* %out, float addrspace(1)* %in0, float addrspace(1)* %in1) { %a = load float addrspace(1)* %in0 %b = load float addrspace(1)* %in1 Index: test/CodeGen/R600/setcc-opt.ll =================================================================== --- test/CodeGen/R600/setcc-opt.ll +++ test/CodeGen/R600/setcc-opt.ll @@ -40,7 +40,7 @@ ; FUNC-LABEL: {{^}}sext_bool_icmp_eq_1: ; GCN: v_cmp_eq_i32_e32 vcc, ; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, -1, vcc -; GCN-NEXT: v_cmp_eq_i32_e64 {{s\[[0-9]+:[0-9]+\]}}, [[TMP]], 1{{$}} +; GCN-NEXT: v_cmp_eq_i32_e32 vcc, 1, [[TMP]]{{$}} ; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, 1, ; GCN-NEXT: buffer_store_byte [[TMP]] ; GCN-NEXT: s_endpgm @@ -56,7 +56,7 @@ ; FUNC-LABEL: {{^}}sext_bool_icmp_ne_1: ; GCN: v_cmp_ne_i32_e32 vcc, ; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, -1, vcc -; GCN-NEXT: v_cmp_ne_i32_e64 {{s\[[0-9]+:[0-9]+\]}}, [[TMP]], 1{{$}} +; GCN-NEXT: v_cmp_ne_i32_e32 vcc, 1, [[TMP]]{{$}} ; GCN-NEXT: v_cndmask_b32_e64 [[TMP:v[0-9]+]], 0, 1, ; GCN-NEXT: buffer_store_byte [[TMP]] ; GCN-NEXT: s_endpgm @@ -129,8 +129,8 @@ ; VI-DAG: s_load_dword [[A:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x2c ; VI-DAG: s_load_dword [[B:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0x30 ; GCN: v_mov_b32_e32 [[VB:v[0-9]+]], [[B]] -; GCN: v_cmp_ne_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[VB]], 2{{$}} -; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[CMP]] +; GCN: v_cmp_ne_i32_e32 vcc, 2, [[VB]]{{$}} +; GCN: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc ; GCN: buffer_store_byte ; GCN: s_endpgm define void @sext_bool_icmp_ne_k(i1 addrspace(1)* %out, i32 %a, i32 %b) nounwind { @@ -144,7 +144,7 @@ ; FUNC-LABEL: {{^}}cmp_zext_k_i8max: ; GCN: buffer_load_ubyte [[B:v[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0 offset:44 ; GCN: v_mov_b32_e32 [[K255:v[0-9]+]], 0xff{{$}} -; GCN: v_cmp_ne_i32_e32 vcc, [[B]], [[K255]] +; GCN: v_cmp_ne_i32_e32 vcc, [[K255]], [[B]] ; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc ; GCN-NEXT: buffer_store_byte [[RESULT]] ; GCN: s_endpgm @@ -157,8 +157,8 @@ ; FUNC-LABEL: {{^}}cmp_sext_k_neg1: ; GCN: buffer_load_sbyte [[B:v[0-9]+]] -; GCN: v_cmp_ne_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[B]], -1{{$}} -; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[CMP]] +; GCN: v_cmp_ne_i32_e32 vcc, -1, [[B]]{{$}} +; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc ; GCN-NEXT: buffer_store_byte [[RESULT]] ; GCN: s_endpgm define void @cmp_sext_k_neg1(i1 addrspace(1)* %out, i8 addrspace(1)* %b.ptr) nounwind { @@ -171,7 +171,7 @@ ; FUNC-LABEL: {{^}}cmp_sext_k_neg1_i8_sext_arg: ; GCN: s_load_dword [[B:s[0-9]+]] -; GCN: v_cmp_ne_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[B]], -1{{$}} +; GCN: v_cmp_ne_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -1, [[B]] ; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[CMP]] ; GCN-NEXT: buffer_store_byte [[RESULT]] ; GCN: s_endpgm @@ -189,7 +189,7 @@ ; FUNC-LABEL: {{^}}cmp_sext_k_neg1_i8_arg: ; GCN-DAG: buffer_load_ubyte [[B:v[0-9]+]] ; GCN-DAG: v_mov_b32_e32 [[K:v[0-9]+]], 0xff{{$}} -; GCN: v_cmp_ne_i32_e32 vcc, [[B]], [[K]]{{$}} +; GCN: v_cmp_ne_i32_e32 vcc, [[K]], [[B]]{{$}} ; GCN-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, vcc ; GCN-NEXT: buffer_store_byte [[RESULT]] ; GCN: s_endpgm Index: test/CodeGen/R600/sgpr-control-flow.ll =================================================================== --- test/CodeGen/R600/sgpr-control-flow.ll +++ test/CodeGen/R600/sgpr-control-flow.ll @@ -64,15 +64,15 @@ ; SI-LABEL: {{^}}sgpr_if_else_valu_cmp_phi_br: ; SI: buffer_load_dword [[AVAL:v[0-9]+]] -; SI: v_cmp_lt_i32_e64 [[CMP_IF:s\[[0-9]+:[0-9]+\]]], [[AVAL]], 0 +; SI: v_cmp_gt_i32_e32 [[CMP_IF:vcc]], 0, [[AVAL]] ; SI: v_cndmask_b32_e64 [[V_CMP:v[0-9]+]], 0, -1, [[CMP_IF]] ; SI: BB2_1: ; SI: buffer_load_dword [[AVAL:v[0-9]+]] -; SI: v_cmp_eq_i32_e64 [[CMP_ELSE:s\[[0-9]+:[0-9]+\]]], [[AVAL]], 0 +; SI: v_cmp_eq_i32_e32 [[CMP_ELSE:vcc]], 0, [[AVAL]] ; SI: v_cndmask_b32_e64 [[V_CMP]], 0, -1, [[CMP_ELSE]] -; SI: v_cmp_ne_i32_e64 [[CMP_CMP:s\[[0-9]+:[0-9]+\]]], [[V_CMP]], 0 +; SI: v_cmp_ne_i32_e32 [[CMP_CMP:vcc]], 0, [[V_CMP]] ; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP_CMP]] ; SI: buffer_store_dword [[RESULT]] define void @sgpr_if_else_valu_cmp_phi_br(i32 addrspace(1)* %out, i32 addrspace(1)* %a, i32 addrspace(1)* %b) { Index: test/CodeGen/R600/trunc-cmp-constant.ll =================================================================== --- test/CodeGen/R600/trunc-cmp-constant.ll +++ test/CodeGen/R600/trunc-cmp-constant.ll @@ -4,8 +4,8 @@ ; FUNC-LABEL {{^}}sextload_i1_to_i32_trunc_cmp_eq_0: ; SI: buffer_load_ubyte [[LOAD:v[0-9]+]] ; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]] -; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[TMP]], 1{{$}} -; SI: s_xor_b64 s{{\[[0-9]+:[0-9]+\]}}, s{{\[[0-9]+:[0-9]+\]}}, -1{{$}} +; SI: v_cmp_eq_i32_e32 vcc, 1, [[TMP]]{{$}} +; SI: s_xor_b64 s{{\[[0-9]+:[0-9]+\]}}, vcc, -1{{$}} ; SI: v_cndmask_b32_e64 ; SI: buffer_store_byte define void @sextload_i1_to_i32_trunc_cmp_eq_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind { @@ -20,8 +20,8 @@ ; FUNC-LABEL: {{^}}zextload_i1_to_i32_trunc_cmp_eq_0: ; SI: buffer_load_ubyte [[LOAD:v[0-9]+]] ; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]] -; SI: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], [[TMP]], 1{{$}} -; SI-NEXT: s_xor_b64 [[NEG:s\[[0-9]+:[0-9]+\]]], [[CMP0]], -1 +; SI: v_cmp_eq_i32_e32 vcc, 1, [[TMP]]{{$}} +; SI-NEXT: s_xor_b64 [[NEG:s\[[0-9]+:[0-9]+\]]], vcc, -1 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[NEG]] ; SI-NEXT: buffer_store_byte [[RESULT]] define void @zextload_i1_to_i32_trunc_cmp_eq_0(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind { @@ -117,8 +117,8 @@ ; FUNC-LABEL: {{^}}zextload_i1_to_i32_trunc_cmp_ne_1: ; SI: buffer_load_ubyte [[LOAD:v[0-9]+]] ; SI: v_and_b32_e32 [[TMP:v[0-9]+]], 1, [[LOAD]] -; SI: v_cmp_eq_i32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], [[TMP]], 1{{$}} -; SI-NEXT: s_xor_b64 [[NEG:s\[[0-9]+:[0-9]+\]]], [[CMP0]], -1 +; SI: v_cmp_eq_i32_e32 vcc, 1, [[TMP]]{{$}} +; SI-NEXT: s_xor_b64 [[NEG:s\[[0-9]+:[0-9]+\]]], vcc, -1 ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, 1, [[NEG]] ; SI-NEXT: buffer_store_byte [[RESULT]] define void @zextload_i1_to_i32_trunc_cmp_ne_1(i1 addrspace(1)* %out, i1 addrspace(1)* %in) nounwind { @@ -157,7 +157,7 @@ ; FUNC-LABEL: {{^}}masked_load_i1_to_i32_trunc_cmp_ne_neg1: ; SI: buffer_load_sbyte [[LOAD:v[0-9]+]] -; SI: v_cmp_ne_i32_e64 {{s\[[0-9]+:[0-9]+\]}}, [[LOAD]], -1{{$}} +; SI: v_cmp_ne_i32_e32 vcc, -1, [[LOAD]]{{$}} ; SI-NEXT: v_cndmask_b32_e64 ; SI-NEXT: buffer_store_byte define void @masked_load_i1_to_i32_trunc_cmp_ne_neg1(i1 addrspace(1)* %out, i8 addrspace(1)* %in) nounwind { Index: test/CodeGen/R600/trunc.ll =================================================================== --- test/CodeGen/R600/trunc.ll +++ test/CodeGen/R600/trunc.ll @@ -73,8 +73,8 @@ ; SI-LABEL: {{^}}s_trunc_i64_to_i1: ; SI: s_load_dwordx2 s{{\[}}[[SLO:[0-9]+]]:{{[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0xb ; SI: v_and_b32_e64 [[MASKED:v[0-9]+]], 1, s[[SLO]] -; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[MASKED]], 1 -; SI: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, [[CMP]] +; SI: v_cmp_eq_i32_e32 vcc, 1, [[MASKED]] +; SI: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, vcc define void @s_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 %x) { %trunc = trunc i64 %x to i1 %sel = select i1 %trunc, i32 63, i32 -12 @@ -85,8 +85,8 @@ ; SI-LABEL: {{^}}v_trunc_i64_to_i1: ; SI: buffer_load_dwordx2 v{{\[}}[[VLO:[0-9]+]]:{{[0-9]+\]}} ; SI: v_and_b32_e32 [[MASKED:v[0-9]+]], 1, v[[VLO]] -; SI: v_cmp_eq_i32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], [[MASKED]], 1 -; SI: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, [[CMP]] +; SI: v_cmp_eq_i32_e32 vcc, 1, [[MASKED]] +; SI: v_cndmask_b32_e64 {{v[0-9]+}}, -12, 63, vcc define void @v_trunc_i64_to_i1(i32 addrspace(1)* %out, i64 addrspace(1)* %in) { %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone %gep = getelementptr i64 addrspace(1)* %in, i32 %tid Index: test/CodeGen/R600/valu-i1.ll =================================================================== --- test/CodeGen/R600/valu-i1.ll +++ test/CodeGen/R600/valu-i1.ll @@ -42,8 +42,8 @@ } ; SI-LABEL: @simple_test_v_if -; SI: v_cmp_ne_i32_e64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0 -; SI: s_and_saveexec_b64 [[BR_SREG]], [[BR_SREG]] +; SI: v_cmp_ne_i32_e32 vcc, 0, v{{[0-9]+}} +; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc ; SI: s_xor_b64 [[BR_SREG]], exec, [[BR_SREG]] ; SI: ; BB#1 @@ -68,8 +68,8 @@ } ; SI-LABEL: @simple_test_v_loop -; SI: v_cmp_ne_i32_e64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], v{{[0-9]+}}, 0 -; SI: s_and_saveexec_b64 [[BR_SREG]], [[BR_SREG]] +; SI: v_cmp_ne_i32_e32 vcc, 0, v{{[0-9]+}} +; SI: s_and_saveexec_b64 [[BR_SREG:s\[[0-9]+:[0-9]+\]]], vcc ; SI: s_xor_b64 [[BR_SREG]], exec, [[BR_SREG]] ; SI: s_cbranch_execz BB2_2 @@ -111,8 +111,8 @@ ; Branch to exit if uniformly not taken ; SI: ; BB#0: ; SI: buffer_load_dword [[VBOUND:v[0-9]+]] -; SI: v_cmp_gt_i32_e64 [[OUTER_CMP_SREG:s\[[0-9]+:[0-9]+\]]] -; SI: s_and_saveexec_b64 [[OUTER_CMP_SREG]], [[OUTER_CMP_SREG]] +; SI: v_cmp_lt_i32_e32 vcc +; SI: s_and_saveexec_b64 [[OUTER_CMP_SREG:s\[[0-9]+:[0-9]+\]]], vcc ; SI: s_xor_b64 [[OUTER_CMP_SREG]], exec, [[OUTER_CMP_SREG]] ; SI: s_cbranch_execz BB3_2 @@ -125,8 +125,8 @@ ; SI: BB3_3: ; SI: buffer_load_dword [[B:v[0-9]+]] ; SI: buffer_load_dword [[A:v[0-9]+]] -; SI-DAG: v_cmp_ne_i32_e64 [[NEG1_CHECK_0:s\[[0-9]+:[0-9]+\]]], [[A]], -1 -; SI-DAG: v_cmp_ne_i32_e64 [[NEG1_CHECK_1:s\[[0-9]+:[0-9]+\]]], [[B]], -1 +; SI-DAG: v_cmp_ne_i32_e64 [[NEG1_CHECK_0:s\[[0-9]+:[0-9]+\]]], -1, [[A]] +; SI-DAG: v_cmp_ne_i32_e32 [[NEG1_CHECK_1:vcc]], -1, [[B]] ; SI: s_and_b64 [[ORNEG1:s\[[0-9]+:[0-9]+\]]], [[NEG1_CHECK_1]], [[NEG1_CHECK_0]] ; SI: s_and_saveexec_b64 [[ORNEG1]], [[ORNEG1]] ; SI: s_xor_b64 [[ORNEG1]], exec, [[ORNEG1]] Index: test/CodeGen/R600/xor.ll =================================================================== --- test/CodeGen/R600/xor.ll +++ test/CodeGen/R600/xor.ll @@ -40,8 +40,8 @@ ; FUNC-LABEL: {{^}}xor_i1: ; EG: XOR_INT {{\** *}}T{{[0-9]+\.[XYZW], PV\.[XYZW], PS}} -; SI-DAG: v_cmp_ge_f32_e64 [[CMP0:s\[[0-9]+:[0-9]+\]]], {{v[0-9]+}}, 0 -; SI-DAG: v_cmp_ge_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], {{v[0-9]+}}, 1.0 +; SI-DAG: v_cmp_le_f32_e32 [[CMP0:vcc]], 0, {{v[0-9]+}} +; SI-DAG: v_cmp_le_f32_e64 [[CMP1:s\[[0-9]+:[0-9]+\]]], 1.0, {{v[0-9]+}} ; SI: s_xor_b64 [[XOR:s\[[0-9]+:[0-9]+\]]], [[CMP0]], [[CMP1]] ; SI: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], {{v[0-9]+}}, {{v[0-9]+}}, [[XOR]] ; SI: buffer_store_dword [[RESULT]]