Index: llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td =================================================================== --- llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td +++ llvm/trunk/include/llvm/IR/IntrinsicsAMDGPU.td @@ -552,6 +552,14 @@ Intrinsic<[llvm_i64_ty], [llvm_anyfloat_ty, LLVMMatchType<0>, llvm_i32_ty], [IntrNoMem, IntrConvergent]>; +def int_amdgcn_readfirstlane : + GCCBuiltin<"__builtin_amdgcn_readfirstlane">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty], [IntrNoMem, IntrConvergent]>; + +def int_amdgcn_readlane : + GCCBuiltin<"__builtin_amdgcn_readlane">, + Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty], [IntrNoMem, IntrConvergent]>; + //===----------------------------------------------------------------------===// // CI+ Intrinsics //===----------------------------------------------------------------------===// Index: llvm/trunk/lib/Target/AMDGPU/SIInstructions.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIInstructions.td +++ llvm/trunk/lib/Target/AMDGPU/SIInstructions.td @@ -1066,9 +1066,9 @@ def V_READFIRSTLANE_B32 : VOP1 < 0x00000002, (outs SReg_32:$vdst), - (ins VS_32:$src0), + (ins VGPR_32:$src0), "v_readfirstlane_b32 $vdst, $src0", - [] + [(set i32:$vdst, (int_amdgcn_readfirstlane i32:$src0))] > { let isConvergent = 1; } @@ -1447,8 +1447,9 @@ vop3 <0x001, 0x289>, "v_readlane_b32", (outs SReg_32:$vdst), - (ins VS_32:$src0, SCSrc_32:$src1), - "v_readlane_b32 $vdst, $src0, $src1" + (ins VGPR_32:$src0, SCSrc_32:$src1), + "v_readlane_b32 $vdst, $src0, $src1", + [(set i32:$vdst, (int_amdgcn_readlane i32:$src0, i32:$src1))] >; defm V_WRITELANE_B32 : VOP2SI_3VI_m < Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.readfirstlane.ll @@ -0,0 +1,35 @@ +; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck %s + +declare i32 @llvm.amdgcn.readfirstlane(i32) #0 + +; CHECK-LABEL: {{^}}test_readfirstlane: +; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, v{{[0-9]+}} +define void @test_readfirstlane(i32 addrspace(1)* %out, i32 %src) #1 { + %readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 %src) + store i32 %readfirstlane, i32 addrspace(1)* %out, align 4 + ret void +} + +; CHECK-LABEL: {{^}}test_readfirstlane_imm: +; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], 32 +; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, [[VVAL]] +define void @test_readfirstlane_imm(i32 addrspace(1)* %out) #1 { + %readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 32) + store i32 %readfirstlane, i32 addrspace(1)* %out, align 4 + ret void +} + +; TODO: m0 should be folded. +; CHECK-LABEL: {{^}}test_readfirstlane_m0: +; CHECK: s_mov_b32 m0, -1 +; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], m0 +; CHECK: v_readfirstlane_b32 s{{[0-9]+}}, [[VVAL]] +define void @test_readfirstlane_m0(i32 addrspace(1)* %out) #1 { + %m0 = call i32 asm "s_mov_b32 m0, -1", "={M0}"() + %readfirstlane = call i32 @llvm.amdgcn.readfirstlane(i32 %m0) + store i32 %readfirstlane, i32 addrspace(1)* %out, align 4 + ret void +} + +attributes #0 = { nounwind readnone convergent } +attributes #1 = { nounwind } Index: llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll +++ llvm/trunk/test/CodeGen/AMDGPU/llvm.amdgcn.readlane.ll @@ -0,0 +1,43 @@ +; RUN: llc -mtriple=amdgcn--amdhsa -mcpu=fiji -verify-machineinstrs < %s | FileCheck %s + +declare i32 @llvm.amdgcn.readlane(i32, i32) #0 + +; CHECK-LABEL: {{^}}test_readlane_sreg: +; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, s{{[0-9]+}} +define void @test_readlane_sreg(i32 addrspace(1)* %out, i32 %src0, i32 %src1) #1 { + %readlane = call i32 @llvm.amdgcn.readlane(i32 %src0, i32 %src1) + store i32 %readlane, i32 addrspace(1)* %out, align 4 + ret void +} + +; CHECK-LABEL: {{^}}test_readlane_imm_sreg: +; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], 32 +; CHECK: v_readlane_b32 s{{[0-9]+}}, [[VVAL]], s{{[0-9]+}} +define void @test_readlane_imm_sreg(i32 addrspace(1)* %out, i32 %src1) #1 { + %readlane = call i32 @llvm.amdgcn.readlane(i32 32, i32 %src1) + store i32 %readlane, i32 addrspace(1)* %out, align 4 + ret void +} + +; TODO: m0 should be folded. +; CHECK-LABEL: {{^}}test_readlane_m0_sreg: +; CHECK: s_mov_b32 m0, -1 +; CHECK: v_mov_b32_e32 [[VVAL:v[0-9]]], m0 +; CHECK: v_readlane_b32 s{{[0-9]+}}, [[VVAL]], s{{[0-9]+}} +define void @test_readlane_m0_sreg(i32 addrspace(1)* %out, i32 %src1) #1 { + %m0 = call i32 asm "s_mov_b32 m0, -1", "={M0}"() + %readlane = call i32 @llvm.amdgcn.readlane(i32 %m0, i32 %src1) + store i32 %readlane, i32 addrspace(1)* %out, align 4 + ret void +} + +; CHECK-LABEL: {{^}}test_readlane_imm: +; CHECK: v_readlane_b32 s{{[0-9]+}}, v{{[0-9]+}}, 32 +define void @test_readlane_imm(i32 addrspace(1)* %out, i32 %src0) #1 { + %readlane = call i32 @llvm.amdgcn.readlane(i32 %src0, i32 32) #0 + store i32 %readlane, i32 addrspace(1)* %out, align 4 + ret void +} + +attributes #0 = { nounwind readnone convergent } +attributes #1 = { nounwind }