Index: llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp +++ llvm/trunk/lib/Target/AMDGPU/AMDGPUISelDAGToDAG.cpp @@ -978,8 +978,6 @@ // default case - // FIXME: This is broken on SI where we still need to check if the base - // pointer is positive here. Base = Addr; Offset0 = CurDAG->getTargetConstant(0, DL, MVT::i8); Offset1 = CurDAG->getTargetConstant(1, DL, MVT::i8); Index: llvm/trunk/lib/Target/AMDGPU/DSInstructions.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/DSInstructions.td +++ llvm/trunk/lib/Target/AMDGPU/DSInstructions.td @@ -728,7 +728,9 @@ (i1 0)) >; -let OtherPredicates = [LDSRequiresM0Init] in { +// v2i32 loads are split into i32 loads on SI during lowering, due to a bug +// related to bounds checking. +let OtherPredicates = [LDSRequiresM0Init, isCIVI] in { def : DS64Bit4ByteAlignedReadPat; def : DS64Bit4ByteAlignedWritePat; } Index: llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp +++ llvm/trunk/lib/Target/AMDGPU/SIISelLowering.cpp @@ -6292,6 +6292,17 @@ if (NumElements > 2) return SplitVectorLoad(Op, DAG); + + // SI has a hardware bug in the LDS / GDS boounds checking: if the base + // address is negative, then the instruction is incorrectly treated as + // out-of-bounds even if base + offsets is in bounds. Split vectorized + // loads here to avoid emitting ds_read2_b32. We may re-combine the + // load later in the SILoadStoreOptimizer. + if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && + NumElements == 2 && MemVT.getStoreSize() == 8 && + Load->getAlignment() < 8) { + return SplitVectorLoad(Op, DAG); + } } return SDValue(); } @@ -6694,6 +6705,18 @@ if (NumElements > 2) return SplitVectorStore(Op, DAG); + + // SI has a hardware bug in the LDS / GDS boounds checking: if the base + // address is negative, then the instruction is incorrectly treated as + // out-of-bounds even if base + offsets is in bounds. Split vectorized + // stores here to avoid emitting ds_write2_b32. We may re-combine the + // store later in the SILoadStoreOptimizer. + if (Subtarget->getGeneration() == AMDGPUSubtarget::SOUTHERN_ISLANDS && + NumElements == 2 && VT.getStoreSize() == 8 && + Store->getAlignment() < 8) { + return SplitVectorStore(Op, DAG); + } + return SDValue(); } else { llvm_unreachable("unhandled address space"); Index: llvm/trunk/test/CodeGen/AMDGPU/lds-bounds.ll =================================================================== --- llvm/trunk/test/CodeGen/AMDGPU/lds-bounds.ll +++ llvm/trunk/test/CodeGen/AMDGPU/lds-bounds.ll @@ -0,0 +1,129 @@ +; RUN: llc -march=amdgcn -mcpu=tahiti -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,SI %s +; RUN: llc -march=amdgcn -mcpu=hawaii -verify-machineinstrs < %s | FileCheck -check-prefixes=GCN,NOSI %s + +@compute_lds = external addrspace(3) global [512 x i32], align 16 + +; GCN-LABEL: {{^}}store_aligned: +; GCN: ds_write_b64 +define amdgpu_cs void @store_aligned(i32 addrspace(3)* %ptr) #0 { +entry: + %ptr.gep.1 = getelementptr i32, i32 addrspace(3)* %ptr, i32 1 + + store i32 42, i32 addrspace(3)* %ptr, align 8 + store i32 43, i32 addrspace(3)* %ptr.gep.1 + ret void +} + + +; GCN-LABEL: {{^}}load_aligned: +; GCN: ds_read_b64 +define amdgpu_cs <2 x float> @load_aligned(i32 addrspace(3)* %ptr) #0 { +entry: + %ptr.gep.1 = getelementptr i32, i32 addrspace(3)* %ptr, i32 1 + + %v.0 = load i32, i32 addrspace(3)* %ptr, align 8 + %v.1 = load i32, i32 addrspace(3)* %ptr.gep.1 + + %r.0 = insertelement <2 x i32> undef, i32 %v.0, i32 0 + %r.1 = insertelement <2 x i32> %r.0, i32 %v.1, i32 1 + %bc = bitcast <2 x i32> %r.1 to <2 x float> + ret <2 x float> %bc +} + + +; GCN-LABEL: {{^}}store_global_const_idx: +; GCN: ds_write2_b32 +define amdgpu_cs void @store_global_const_idx() #0 { +entry: + %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 3 + %ptr.b = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 4 + + store i32 42, i32 addrspace(3)* %ptr.a + store i32 43, i32 addrspace(3)* %ptr.b + ret void +} + + +; GCN-LABEL: {{^}}load_global_const_idx: +; GCN: ds_read2_b32 +define amdgpu_cs <2 x float> @load_global_const_idx() #0 { +entry: + %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 3 + %ptr.b = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 4 + + %v.0 = load i32, i32 addrspace(3)* %ptr.a + %v.1 = load i32, i32 addrspace(3)* %ptr.b + + %r.0 = insertelement <2 x i32> undef, i32 %v.0, i32 0 + %r.1 = insertelement <2 x i32> %r.0, i32 %v.1, i32 1 + %bc = bitcast <2 x i32> %r.1 to <2 x float> + ret <2 x float> %bc +} + + +; GCN-LABEL: {{^}}store_global_var_idx_case1: +; SI: ds_write_b32 +; SI: ds_write_b32 +; NONSI: ds_write2_b32 +define amdgpu_cs void @store_global_var_idx_case1(i32 %idx) #0 { +entry: + %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 %idx + %ptr.b = getelementptr i32, i32 addrspace(3)* %ptr.a, i32 1 + + store i32 42, i32 addrspace(3)* %ptr.a + store i32 43, i32 addrspace(3)* %ptr.b + ret void +} + + +; GCN-LABEL: {{^}}load_global_var_idx_case1: +; SI: ds_read_b32 +; SI: ds_read_b32 +; NONSI: ds_read2_b32 +define amdgpu_cs <2 x float> @load_global_var_idx_case1(i32 %idx) #0 { +entry: + %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 %idx + %ptr.b = getelementptr i32, i32 addrspace(3)* %ptr.a, i32 1 + + %v.0 = load i32, i32 addrspace(3)* %ptr.a + %v.1 = load i32, i32 addrspace(3)* %ptr.b + + %r.0 = insertelement <2 x i32> undef, i32 %v.0, i32 0 + %r.1 = insertelement <2 x i32> %r.0, i32 %v.1, i32 1 + %bc = bitcast <2 x i32> %r.1 to <2 x float> + ret <2 x float> %bc +} + + +; GCN-LABEL: {{^}}store_global_var_idx_case2: +; GCN: ds_write2_b32 +define amdgpu_cs void @store_global_var_idx_case2(i32 %idx) #0 { +entry: + %idx.and = and i32 %idx, 255 + %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 %idx.and + %ptr.b = getelementptr i32, i32 addrspace(3)* %ptr.a, i32 1 + + store i32 42, i32 addrspace(3)* %ptr.a + store i32 43, i32 addrspace(3)* %ptr.b + ret void +} + + +; GCN-LABEL: {{^}}load_global_var_idx_case2: +; GCN: ds_read2_b32 +define amdgpu_cs <2 x float> @load_global_var_idx_case2(i32 %idx) #0 { +entry: + %idx.and = and i32 %idx, 255 + %ptr.a = getelementptr [512 x i32], [512 x i32] addrspace(3)* @compute_lds, i32 0, i32 %idx.and + %ptr.b = getelementptr i32, i32 addrspace(3)* %ptr.a, i32 1 + + %v.0 = load i32, i32 addrspace(3)* %ptr.a + %v.1 = load i32, i32 addrspace(3)* %ptr.b + + %r.0 = insertelement <2 x i32> undef, i32 %v.0, i32 0 + %r.1 = insertelement <2 x i32> %r.0, i32 %v.1, i32 1 + %bc = bitcast <2 x i32> %r.1 to <2 x float> + ret <2 x float> %bc +} + +attributes #0 = { nounwind }