Index: lib/Target/AMDGPU/AMDGPUIntrinsics.td =================================================================== --- lib/Target/AMDGPU/AMDGPUIntrinsics.td +++ lib/Target/AMDGPU/AMDGPUIntrinsics.td @@ -19,7 +19,6 @@ def int_AMDGPU_arl : Intrinsic<[llvm_i32_ty], [llvm_float_ty], [IntrNoMem]>; def int_AMDGPU_cndlt : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty, llvm_float_ty], [IntrNoMem]>; def int_AMDGPU_div : Intrinsic<[llvm_float_ty], [llvm_float_ty, llvm_float_ty], [IntrNoMem]>; - def int_AMDGPU_fract : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>], [IntrNoMem]>; def int_AMDGPU_clamp : Intrinsic<[llvm_anyfloat_ty], [LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>], [IntrNoMem]>; // This is named backwards (instead of rsq_legacy) so we don't have Index: lib/Target/AMDGPU/R600ISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/R600ISelLowering.cpp +++ lib/Target/AMDGPU/R600ISelLowering.cpp @@ -851,9 +851,6 @@ case Intrinsic::AMDGPU_rsq: // XXX - I'm assuming SI's RSQ_LEGACY matches R600's behavior. return DAG.getNode(AMDGPUISD::RSQ_LEGACY, DL, VT, Op.getOperand(1)); - - case AMDGPUIntrinsic::AMDGPU_fract: - return DAG.getNode(AMDGPUISD::FRACT, DL, VT, Op.getOperand(1)); } // break out of case ISD::INTRINSIC_WO_CHAIN in switch(Op.getOpcode()) break; Index: lib/Target/AMDGPU/R600Instructions.td =================================================================== --- lib/Target/AMDGPU/R600Instructions.td +++ lib/Target/AMDGPU/R600Instructions.td @@ -733,6 +733,7 @@ [(set i32:$dst, (selectcc f32:$src0, f32:$src1, -1, 0, COND_UNE_NE))] >; +// FIXME: Need combine for AMDGPUfract def FRACT : R600_1OP_Helper <0x10, "FRACT", AMDGPUfract>; def TRUNC : R600_1OP_Helper <0x11, "TRUNC", ftrunc>; def CEIL : R600_1OP_Helper <0x12, "CEIL", fceil>; Index: lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- lib/Target/AMDGPU/SIISelLowering.cpp +++ lib/Target/AMDGPU/SIISelLowering.cpp @@ -1360,9 +1360,6 @@ Op.getOperand(2), Op.getOperand(3)); - case AMDGPUIntrinsic::AMDGPU_fract: // Legacy name. - return DAG.getNode(ISD::FSUB, DL, VT, Op.getOperand(1), - DAG.getNode(ISD::FFLOOR, DL, VT, Op.getOperand(1))); case AMDGPUIntrinsic::SI_fs_constant: { SDValue M0 = copyToM0(DAG, DAG.getEntryNode(), DL, Op.getOperand(3)); SDValue Glue = M0.getValue(1); Index: test/CodeGen/AMDGPU/fract.f64.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/fract.f64.ll @@ -0,0 +1,66 @@ +; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s + +declare double @llvm.fabs.f64(double) #0 +declare double @llvm.floor.f64(double) #0 + +; FUNC-LABEL: {{^}}fract_f64: +; GCN: v_fract_f64_e32 [[FRC:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]] +; SI: v_mov_b32_e32 v[[UPLO:[0-9]+]], -1 +; SI: v_mov_b32_e32 v[[UPHI:[0-9]+]], 0x3fefffff +; SI: v_min_f64 v{{\[}}[[MINLO:[0-9]+]]:[[MINHI:[0-9]+]]], v{{\[}}[[UPLO]]:[[UPHI]]], [[FRC]] +; SI: v_cmp_class_f64_e64 [[COND:s\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO]]:[[HI]]], 3 +; SI: v_cndmask_b32_e64 v[[RESLO:[0-9]+]], v[[MINLO]], v[[LO]], [[COND]] +; SI: v_cndmask_b32_e64 v[[RESHI:[0-9]+]], v[[MINHI]], v[[HI]], [[COND]] +; SI: buffer_store_dwordx2 v{{\[}}[[RESLO]]:[[RESHI]]] +; CI: buffer_store_dwordx2 [[FRC]] +define void @fract_f64(double addrspace(1)* %out, double addrspace(1)* %src) #1 { + %x = load double, double addrspace(1)* %src + %floor.x = call double @llvm.floor.f64(double %x) + %fract = fsub double %x, %floor.x + store double %fract, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}fract_f64_neg: +; GCN: v_fract_f64_e64 [[FRC:v\[[0-9]+:[0-9]+\]]], -v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]] +; SI: v_mov_b32_e32 v[[UPLO:[0-9]+]], -1 +; SI: v_mov_b32_e32 v[[UPHI:[0-9]+]], 0x3fefffff +; SI: v_min_f64 v{{\[}}[[MINLO:[0-9]+]]:[[MINHI:[0-9]+]]], v{{\[}}[[UPLO]]:[[UPHI]]], [[FRC]] +; SI: v_cmp_class_f64_e64 [[COND:s\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO]]:[[HI]]], 3 +; SI: v_cndmask_b32_e64 v[[RESLO:[0-9]+]], v[[MINLO]], v[[LO]], [[COND]] +; SI: v_cndmask_b32_e64 v[[RESHI:[0-9]+]], v[[MINHI]], v[[HI]], [[COND]] +; SI: buffer_store_dwordx2 v{{\[}}[[RESLO]]:[[RESHI]]] +; CI: buffer_store_dwordx2 [[FRC]] +define void @fract_f64_neg(double addrspace(1)* %out, double addrspace(1)* %src) #1 { + %x = load double, double addrspace(1)* %src + %neg.x = fsub double -0.0, %x + %floor.neg.x = call double @llvm.floor.f64(double %neg.x) + %fract = fsub double %neg.x, %floor.neg.x + store double %fract, double addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}fract_f64_neg_abs: +; GCN: v_fract_f64_e64 [[FRC:v\[[0-9]+:[0-9]+\]]], -|v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]]| +; SI: v_mov_b32_e32 v[[UPLO:[0-9]+]], -1 +; SI: v_mov_b32_e32 v[[UPHI:[0-9]+]], 0x3fefffff +; SI: v_min_f64 v{{\[}}[[MINLO:[0-9]+]]:[[MINHI:[0-9]+]]], v{{\[}}[[UPLO]]:[[UPHI]]], [[FRC]] +; SI: v_cmp_class_f64_e64 [[COND:s\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO]]:[[HI]]], 3 +; SI: v_cndmask_b32_e64 v[[RESLO:[0-9]+]], v[[MINLO]], v[[LO]], [[COND]] +; SI: v_cndmask_b32_e64 v[[RESHI:[0-9]+]], v[[MINHI]], v[[HI]], [[COND]] +; SI: buffer_store_dwordx2 v{{\[}}[[RESLO]]:[[RESHI]]] +; CI: buffer_store_dwordx2 [[FRC]] +define void @fract_f64_neg_abs(double addrspace(1)* %out, double addrspace(1)* %src) #1 { + %x = load double, double addrspace(1)* %src + %abs.x = call double @llvm.fabs.f64(double %x) + %neg.abs.x = fsub double -0.0, %abs.x + %floor.neg.abs.x = call double @llvm.floor.f64(double %neg.abs.x) + %fract = fsub double %neg.abs.x, %floor.neg.abs.x + store double %fract, double addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind readnone } +attributes #1 = { nounwind } Index: test/CodeGen/AMDGPU/fract.ll =================================================================== --- /dev/null +++ test/CodeGen/AMDGPU/fract.ll @@ -0,0 +1,58 @@ +; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s +; XUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s + +declare float @llvm.fabs.f32(float) #0 +declare float @llvm.floor.f32(float) #0 + +; FUNC-LABEL: {{^}}fract_f32: +; CI: v_fract_f32_e32 [[RESULT:v[0-9]+]], [[INPUT:v[0-9]+]] +; SI: v_floor_f32_e32 [[FLR:v[0-9]+]], [[INPUT:v[0-9]+]] +; SI: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[FLR]], [[INPUT]] +; GCN: buffer_store_dword [[RESULT]] + +; XEG: FRACT +define void @fract_f32(float addrspace(1)* %out, float addrspace(1)* %src) #1 { + %x = load float, float addrspace(1)* %src + %floor.x = call float @llvm.floor.f32(float %x) + %fract = fsub float %x, %floor.x + store float %fract, float addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}fract_f32_neg: +; CI: v_fract_f32_e64 [[RESULT:v[0-9]+]], -[[INPUT:v[0-9]+]] +; SI: v_floor_f32_e64 [[FLR:v[0-9]+]], -[[INPUT:v[0-9]+]] +; SI: v_sub_f32_e64 [[RESULT:v[0-9]+]], -[[INPUT]], [[FLR]] +; GCN: buffer_store_dword [[RESULT]] + +; XEG: FRACT +define void @fract_f32_neg(float addrspace(1)* %out, float addrspace(1)* %src) #1 { + %x = load float, float addrspace(1)* %src + %x.neg = fsub float -0.0, %x + %floor.x.neg = call float @llvm.floor.f32(float %x.neg) + %fract = fsub float %x.neg, %floor.x.neg + store float %fract, float addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}fract_f32_neg_abs: +; CI: v_fract_f32_e64 [[RESULT:v[0-9]+]], -|[[INPUT:v[0-9]+]]| +; SI: v_floor_f32_e64 [[FLR:v[0-9]+]], -|[[INPUT:v[0-9]+]]| +; SI: v_sub_f32_e64 [[RESULT:v[0-9]+]], -|[[INPUT]]|, [[FLR]] +; GCN: buffer_store_dword [[RESULT]] + +; XEG: FRACT +define void @fract_f32_neg_abs(float addrspace(1)* %out, float addrspace(1)* %src) #1 { + %x = load float, float addrspace(1)* %src + %abs.x = call float @llvm.fabs.f32(float %x) + %neg.abs.x = fsub float -0.0, %abs.x + %floor.neg.abs.x = call float @llvm.floor.f32(float %neg.abs.x) + %fract = fsub float %neg.abs.x, %floor.neg.abs.x + store float %fract, float addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind readnone } +attributes #1 = { nounwind } Index: test/CodeGen/AMDGPU/llvm.AMDGPU.fract.f64.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.AMDGPU.fract.f64.ll +++ /dev/null @@ -1,60 +0,0 @@ -; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s -; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s - -declare double @llvm.fabs.f64(double %Val) -declare double @llvm.AMDGPU.fract.f64(double) nounwind readnone - -; FUNC-LABEL: {{^}}fract_f64: -; GCN: v_fract_f64_e32 [[FRC:v\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]] -; SI: v_mov_b32_e32 v[[UPLO:[0-9]+]], -1 -; SI: v_mov_b32_e32 v[[UPHI:[0-9]+]], 0x3fefffff -; SI: v_min_f64 v{{\[}}[[MINLO:[0-9]+]]:[[MINHI:[0-9]+]]], v{{\[}}[[UPLO]]:[[UPHI]]], [[FRC]] -; SI: v_cmp_class_f64_e64 [[COND:s\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO]]:[[HI]]], 3 -; SI: v_cndmask_b32_e64 v[[RESLO:[0-9]+]], v[[MINLO]], v[[LO]], [[COND]] -; SI: v_cndmask_b32_e64 v[[RESHI:[0-9]+]], v[[MINHI]], v[[HI]], [[COND]] -; SI: buffer_store_dwordx2 v{{\[}}[[RESLO]]:[[RESHI]]] -; CI: buffer_store_dwordx2 [[FRC]] -define void @fract_f64(double addrspace(1)* %out, double addrspace(1)* %src) nounwind { - %val = load double, double addrspace(1)* %src, align 4 - %fract = call double @llvm.AMDGPU.fract.f64(double %val) nounwind readnone - store double %fract, double addrspace(1)* %out, align 4 - ret void -} - -; FUNC-LABEL: {{^}}fract_f64_neg: -; GCN: v_fract_f64_e64 [[FRC:v\[[0-9]+:[0-9]+\]]], -v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]] -; SI: v_mov_b32_e32 v[[UPLO:[0-9]+]], -1 -; SI: v_mov_b32_e32 v[[UPHI:[0-9]+]], 0x3fefffff -; SI: v_min_f64 v{{\[}}[[MINLO:[0-9]+]]:[[MINHI:[0-9]+]]], v{{\[}}[[UPLO]]:[[UPHI]]], [[FRC]] -; SI: v_cmp_class_f64_e64 [[COND:s\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO]]:[[HI]]], 3 -; SI: v_cndmask_b32_e64 v[[RESLO:[0-9]+]], v[[MINLO]], v[[LO]], [[COND]] -; SI: v_cndmask_b32_e64 v[[RESHI:[0-9]+]], v[[MINHI]], v[[HI]], [[COND]] -; SI: buffer_store_dwordx2 v{{\[}}[[RESLO]]:[[RESHI]]] -; CI: buffer_store_dwordx2 [[FRC]] -define void @fract_f64_neg(double addrspace(1)* %out, double addrspace(1)* %src) nounwind { - %val = load double, double addrspace(1)* %src, align 4 - %neg = fsub double 0.0, %val - %fract = call double @llvm.AMDGPU.fract.f64(double %neg) nounwind readnone - store double %fract, double addrspace(1)* %out, align 4 - ret void -} - -; FUNC-LABEL: {{^}}fract_f64_neg_abs: -; GCN: v_fract_f64_e64 [[FRC:v\[[0-9]+:[0-9]+\]]], -|v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]]| -; SI: v_mov_b32_e32 v[[UPLO:[0-9]+]], -1 -; SI: v_mov_b32_e32 v[[UPHI:[0-9]+]], 0x3fefffff -; SI: v_min_f64 v{{\[}}[[MINLO:[0-9]+]]:[[MINHI:[0-9]+]]], v{{\[}}[[UPLO]]:[[UPHI]]], [[FRC]] -; SI: v_cmp_class_f64_e64 [[COND:s\[[0-9]+:[0-9]+\]]], v{{\[}}[[LO]]:[[HI]]], 3 -; SI: v_cndmask_b32_e64 v[[RESLO:[0-9]+]], v[[MINLO]], v[[LO]], [[COND]] -; SI: v_cndmask_b32_e64 v[[RESHI:[0-9]+]], v[[MINHI]], v[[HI]], [[COND]] -; SI: buffer_store_dwordx2 v{{\[}}[[RESLO]]:[[RESHI]]] -; CI: buffer_store_dwordx2 [[FRC]] -define void @fract_f64_neg_abs(double addrspace(1)* %out, double addrspace(1)* %src) nounwind { - %val = load double, double addrspace(1)* %src, align 4 - %abs = call double @llvm.fabs.f64(double %val) - %neg = fsub double 0.0, %abs - %fract = call double @llvm.AMDGPU.fract.f64(double %neg) nounwind readnone - store double %fract, double addrspace(1)* %out, align 4 - ret void -} Index: test/CodeGen/AMDGPU/llvm.AMDGPU.fract.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.AMDGPU.fract.ll +++ /dev/null @@ -1,49 +0,0 @@ -; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=SI -check-prefix=FUNC %s -; RUN: llc -march=amdgcn -mcpu=bonaire -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=GCN -check-prefix=CI -check-prefix=FUNC %s -; RUN: llc -march=r600 -mcpu=cypress -verify-machineinstrs -enable-unsafe-fp-math < %s | FileCheck -check-prefix=EG -check-prefix=FUNC %s - -declare float @llvm.fabs.f32(float %Val) -declare float @llvm.AMDGPU.fract.f32(float) nounwind readnone - -; FUNC-LABEL: {{^}}fract_f32: -; CI: v_fract_f32_e32 [[RESULT:v[0-9]+]], [[INPUT:v[0-9]+]] -; SI: v_floor_f32_e32 [[FLR:v[0-9]+]], [[INPUT:v[0-9]+]] -; SI: v_subrev_f32_e32 [[RESULT:v[0-9]+]], [[FLR]], [[INPUT]] -; GCN: buffer_store_dword [[RESULT]] -; EG: FRACT -define void @fract_f32(float addrspace(1)* %out, float addrspace(1)* %src) nounwind { - %val = load float, float addrspace(1)* %src, align 4 - %fract = call float @llvm.AMDGPU.fract.f32(float %val) nounwind readnone - store float %fract, float addrspace(1)* %out, align 4 - ret void -} - -; FUNC-LABEL: {{^}}fract_f32_neg: -; CI: v_fract_f32_e64 [[RESULT:v[0-9]+]], -[[INPUT:v[0-9]+]] -; SI: v_floor_f32_e64 [[FLR:v[0-9]+]], -[[INPUT:v[0-9]+]] -; SI: v_sub_f32_e64 [[RESULT:v[0-9]+]], -[[INPUT]], [[FLR]] -; GCN: buffer_store_dword [[RESULT]] -; EG: FRACT -define void @fract_f32_neg(float addrspace(1)* %out, float addrspace(1)* %src) nounwind { - %val = load float, float addrspace(1)* %src, align 4 - %neg = fsub float 0.0, %val - %fract = call float @llvm.AMDGPU.fract.f32(float %neg) nounwind readnone - store float %fract, float addrspace(1)* %out, align 4 - ret void -} - -; FUNC-LABEL: {{^}}fract_f32_neg_abs: -; CI: v_fract_f32_e64 [[RESULT:v[0-9]+]], -|[[INPUT:v[0-9]+]]| -; SI: v_floor_f32_e64 [[FLR:v[0-9]+]], -|[[INPUT:v[0-9]+]]| -; SI: v_sub_f32_e64 [[RESULT:v[0-9]+]], -|[[INPUT]]|, [[FLR]] -; GCN: buffer_store_dword [[RESULT]] -; EG: FRACT -define void @fract_f32_neg_abs(float addrspace(1)* %out, float addrspace(1)* %src) nounwind { - %val = load float, float addrspace(1)* %src, align 4 - %abs = call float @llvm.fabs.f32(float %val) - %neg = fsub float 0.0, %abs - %fract = call float @llvm.AMDGPU.fract.f32(float %neg) nounwind readnone - store float %fract, float addrspace(1)* %out, align 4 - ret void -} Index: test/CodeGen/AMDGPU/si-spill-cf.ll =================================================================== --- test/CodeGen/AMDGPU/si-spill-cf.ll +++ test/CodeGen/AMDGPU/si-spill-cf.ll @@ -256,7 +256,8 @@ ENDIF2582: ; preds = %ELSE2584, %IF2565 %213 = fadd float %1, undef %214 = fadd float 0.000000e+00, %213 - %215 = call float @llvm.AMDGPU.fract.f32(float %214) + %floor = call float @llvm.floor.f32(float %214) + %215 = fsub float %214, %floor br i1 undef, label %IF2589, label %ELSE2590 IF2589: ; preds = %ENDIF2582 @@ -480,7 +481,7 @@ declare float @llvm.SI.load.const(<16 x i8>, i32) #1 ; Function Attrs: nounwind readnone -declare float @llvm.AMDGPU.fract.f32(float) #1 +declare float @llvm.floor.f32(float) #1 ; Function Attrs: nounwind readnone declare float @llvm.sqrt.f32(float) #1