Index: lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp =================================================================== --- lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp +++ lib/Target/AMDGPU/AMDGPUTargetTransformInfo.cpp @@ -108,6 +108,37 @@ MVT::SimpleValueType SLT = LT.second.getScalarType().SimpleTy; switch (ISD) { + case ISD::SHL: + case ISD::SRL: + case ISD::SRA: { + if (SLT == MVT::i64) + return get64BitInstrCost() * LT.first * NElts; + + // i32 + return getFullRateInstrCost() * LT.first * NElts; + } + case ISD::ADD: + case ISD::SUB: + case ISD::AND: + case ISD::OR: + case ISD::XOR: { + if (SLT == MVT::i64){ + // and, or and xor are typically split into 2 VALU instructions. + return 2 * getFullRateInstrCost() * LT.first * NElts; + } + + return LT.first * NElts * getFullRateInstrCost(); + } + case ISD::MUL: { + const int QuarterRateCost = getQuarterRateInstrCost(); + if (SLT == MVT::i64) { + const int FullRateCost = getFullRateInstrCost(); + return (4 * QuarterRateCost + (2 * 2) * FullRateCost) * LT.first * NElts; + } + + // i32 + return QuarterRateCost * NElts * LT.first; + } case ISD::FADD: case ISD::FSUB: case ISD::FMUL: Index: test/Analysis/CostModel/AMDGPU/add-sub.ll =================================================================== --- /dev/null +++ test/Analysis/CostModel/AMDGPU/add-sub.ll @@ -0,0 +1,138 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck %s + +; CHECK: 'add_i32' +; CHECK: estimated cost of 1 for {{.*}} add i32 +define void @add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %add = add i32 %vec, %b + store i32 %add, i32 addrspace(1)* %out + ret void +} + +; CHECK: 'add_v2i32' +; CHECK: estimated cost of 2 for {{.*}} add <2 x i32> +define void @add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 { + %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr + %add = add <2 x i32> %vec, %b + store <2 x i32> %add, <2 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'add_v3i32' +; CHECK: estimated cost of 3 for {{.*}} add <3 x i32> +define void @add_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 { + %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr + %add = add <3 x i32> %vec, %b + store <3 x i32> %add, <3 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'add_v4i32' +; CHECK: estimated cost of 4 for {{.*}} add <4 x i32> +define void @add_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 { + %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr + %add = add <4 x i32> %vec, %b + store <4 x i32> %add, <4 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'add_i64' +; CHECK: estimated cost of 2 for {{.*}} add i64 +define void @add_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %add = add i64 %vec, %b + store i64 %add, i64 addrspace(1)* %out + ret void +} + +; CHECK: 'add_v2i64' +; CHECK: estimated cost of 4 for {{.*}} add <2 x i64> +define void @add_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 { + %vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr + %add = add <2 x i64> %vec, %b + store <2 x i64> %add, <2 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'add_v3i64' +; CHECK: estimated cost of 6 for {{.*}} add <3 x i64> +define void @add_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 { + %vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr + %add = add <3 x i64> %vec, %b + store <3 x i64> %add, <3 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'add_v4i64' +; CHECK: estimated cost of 8 for {{.*}} add <4 x i64> +define void @add_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 { + %vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr + %add = add <4 x i64> %vec, %b + store <4 x i64> %add, <4 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'add_v16i64' +; CHECK: estimated cost of 32 for {{.*}} add <16 x i64> +define void @add_v16i64(<16 x i64> addrspace(1)* %out, <16 x i64> addrspace(1)* %vaddr, <16 x i64> %b) #0 { + %vec = load <16 x i64>, <16 x i64> addrspace(1)* %vaddr + %add = add <16 x i64> %vec, %b + store <16 x i64> %add, <16 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'add_i16' +; CHECK: estimated cost of 1 for {{.*}} add i16 +define void @add_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 { + %vec = load i16, i16 addrspace(1)* %vaddr + %add = add i16 %vec, %b + store i16 %add, i16 addrspace(1)* %out + ret void +} + +; CHECK: 'add_v2i16' +; CHECK: estimated cost of 2 for {{.*}} add <2 x i16> +define void @add_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 { + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr + %add = add <2 x i16> %vec, %b + store <2 x i16> %add, <2 x i16> addrspace(1)* %out + ret void +} + +; CHECK: 'sub_i32' +; CHECK: estimated cost of 1 for {{.*}} sub i32 +define void @sub_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %sub = sub i32 %vec, %b + store i32 %sub, i32 addrspace(1)* %out + ret void +} + +; CHECK: 'sub_i64' +; CHECK: estimated cost of 2 for {{.*}} sub i64 +define void @sub_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %sub = sub i64 %vec, %b + store i64 %sub, i64 addrspace(1)* %out + ret void +} +; CHECK: 'sub_i16' +; CHECK: estimated cost of 1 for {{.*}} sub i16 +define void @sub_i16(i16 addrspace(1)* %out, i16 addrspace(1)* %vaddr, i16 %b) #0 { + %vec = load i16, i16 addrspace(1)* %vaddr + %sub = sub i16 %vec, %b + store i16 %sub, i16 addrspace(1)* %out + ret void +} + +; CHECK: 'sub_v2i16' +; CHECK: estimated cost of 2 for {{.*}} sub <2 x i16> +define void @sub_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> addrspace(1)* %vaddr, <2 x i16> %b) #0 { + %vec = load <2 x i16>, <2 x i16> addrspace(1)* %vaddr + %sub = sub <2 x i16> %vec, %b + store <2 x i16> %sub, <2 x i16> addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } Index: test/Analysis/CostModel/AMDGPU/add.ll =================================================================== --- test/Analysis/CostModel/AMDGPU/add.ll +++ /dev/null @@ -1,56 +0,0 @@ -; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck %s - -; CHECK: 'add_i32' -; CHECK: estimated cost of 1 for {{.*}} add i32 -define void @add_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { - %vec = load i32, i32 addrspace(1)* %vaddr - %add = add i32 %vec, %b - store i32 %add, i32 addrspace(1)* %out - ret void -} - -; CHECK: 'add_v2i32' -; CHECK: estimated cost of 2 for {{.*}} add <2 x i32> -define void @add_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 { - %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr - %add = add <2 x i32> %vec, %b - store <2 x i32> %add, <2 x i32> addrspace(1)* %out - ret void -} - -; CHECK: 'add_v3i32' -; CHECK: estimated cost of 3 for {{.*}} add <3 x i32> -define void @add_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 { - %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr - %add = add <3 x i32> %vec, %b - store <3 x i32> %add, <3 x i32> addrspace(1)* %out - ret void -} - -; CHECK: 'add_i64' -; CHECK: estimated cost of 1 for {{.*}} add i64 -define void @add_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { - %vec = load i64, i64 addrspace(1)* %vaddr - %add = add i64 %vec, %b - store i64 %add, i64 addrspace(1)* %out - ret void -} - -; CHECK: 'add_v2i64' -; CHECK: estimated cost of 2 for {{.*}} add <2 x i64> -define void @add_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 { - %vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr - %add = add <2 x i64> %vec, %b - store <2 x i64> %add, <2 x i64> addrspace(1)* %out - ret void -} - -; CHECK: 'add_v3i64' -; CHECK: estimated cost of 3 for {{.*}} add <3 x i64> -define void @add_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 { - %vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr - %add = add <3 x i64> %vec, %b - store <3 x i64> %add, <3 x i64> addrspace(1)* %out - ret void -} - Index: test/Analysis/CostModel/AMDGPU/bit-ops.ll =================================================================== --- /dev/null +++ test/Analysis/CostModel/AMDGPU/bit-ops.ll @@ -0,0 +1,59 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck %s + +; CHECK: 'or_i32' +; CHECK: estimated cost of 1 for {{.*}} or i32 +define void @or_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %or = or i32 %vec, %b + store i32 %or, i32 addrspace(1)* %out + ret void +} + +; CHECK: 'or_i64' +; CHECK: estimated cost of 2 for {{.*}} or i64 +define void @or_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %or = or i64 %vec, %b + store i64 %or, i64 addrspace(1)* %out + ret void +} + +; CHECK: 'xor_i32' +; CHECK: estimated cost of 1 for {{.*}} xor i32 +define void @xor_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %or = xor i32 %vec, %b + store i32 %or, i32 addrspace(1)* %out + ret void +} + +; CHECK: 'xor_i64' +; CHECK: estimated cost of 2 for {{.*}} xor i64 +define void @xor_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %or = xor i64 %vec, %b + store i64 %or, i64 addrspace(1)* %out + ret void +} + + +; CHECK: 'and_i32' +; CHECK: estimated cost of 1 for {{.*}} and i32 +define void @and_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %or = and i32 %vec, %b + store i32 %or, i32 addrspace(1)* %out + ret void +} + +; CHECK: 'and_i64' +; CHECK: estimated cost of 2 for {{.*}} and i64 +define void @and_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %or = and i64 %vec, %b + store i64 %or, i64 addrspace(1)* %out + ret void +} + + +attributes #0 = { nounwind } Index: test/Analysis/CostModel/AMDGPU/mul.ll =================================================================== --- /dev/null +++ test/Analysis/CostModel/AMDGPU/mul.ll @@ -0,0 +1,85 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa < %s | FileCheck %s + +; CHECK: 'mul_i32' +; CHECK: estimated cost of 3 for {{.*}} mul i32 +define void @mul_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %mul = mul i32 %vec, %b + store i32 %mul, i32 addrspace(1)* %out + ret void +} + +; CHECK: 'mul_v2i32' +; CHECK: estimated cost of 6 for {{.*}} mul <2 x i32> +define void @mul_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> addrspace(1)* %vaddr, <2 x i32> %b) #0 { + %vec = load <2 x i32>, <2 x i32> addrspace(1)* %vaddr + %mul = mul <2 x i32> %vec, %b + store <2 x i32> %mul, <2 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'mul_v3i32' +; CHECK: estimated cost of 9 for {{.*}} mul <3 x i32> +define void @mul_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> addrspace(1)* %vaddr, <3 x i32> %b) #0 { + %vec = load <3 x i32>, <3 x i32> addrspace(1)* %vaddr + %mul = mul <3 x i32> %vec, %b + store <3 x i32> %mul, <3 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'mul_v4i32' +; CHECK: estimated cost of 12 for {{.*}} mul <4 x i32> +define void @mul_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> addrspace(1)* %vaddr, <4 x i32> %b) #0 { + %vec = load <4 x i32>, <4 x i32> addrspace(1)* %vaddr + %mul = mul <4 x i32> %vec, %b + store <4 x i32> %mul, <4 x i32> addrspace(1)* %out + ret void +} + +; CHECK: 'mul_i64' +; CHECK: estimated cost of 16 for {{.*}} mul i64 +define void @mul_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %mul = mul i64 %vec, %b + store i64 %mul, i64 addrspace(1)* %out + ret void +} + +; CHECK: 'mul_v2i64' +; CHECK: estimated cost of 32 for {{.*}} mul <2 x i64> +define void @mul_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> addrspace(1)* %vaddr, <2 x i64> %b) #0 { + %vec = load <2 x i64>, <2 x i64> addrspace(1)* %vaddr + %mul = mul <2 x i64> %vec, %b + store <2 x i64> %mul, <2 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'mul_v3i64' +; CHECK: estimated cost of 48 for {{.*}} mul <3 x i64> +define void @mul_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> addrspace(1)* %vaddr, <3 x i64> %b) #0 { + %vec = load <3 x i64>, <3 x i64> addrspace(1)* %vaddr + %mul = mul <3 x i64> %vec, %b + store <3 x i64> %mul, <3 x i64> addrspace(1)* %out + ret void +} + +; CHECK: 'mul_v4i64' +; CHECK: estimated cost of 64 for {{.*}} mul <4 x i64> +define void @mul_v4i64(<4 x i64> addrspace(1)* %out, <4 x i64> addrspace(1)* %vaddr, <4 x i64> %b) #0 { + %vec = load <4 x i64>, <4 x i64> addrspace(1)* %vaddr + %mul = mul <4 x i64> %vec, %b + store <4 x i64> %mul, <4 x i64> addrspace(1)* %out + ret void +} + + +; CHECK: 'mul_v8i64' +; CHECK: estimated cost of 128 for {{.*}} mul <8 x i64> +define void @mul_v8i64(<8 x i64> addrspace(1)* %out, <8 x i64> addrspace(1)* %vaddr, <8 x i64> %b) #0 { + %vec = load <8 x i64>, <8 x i64> addrspace(1)* %vaddr + %mul = mul <8 x i64> %vec, %b + store <8 x i64> %mul, <8 x i64> addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } Index: test/Analysis/CostModel/AMDGPU/shifts.ll =================================================================== --- /dev/null +++ test/Analysis/CostModel/AMDGPU/shifts.ll @@ -0,0 +1,61 @@ +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=+half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=FAST64 %s +; RUN: opt -cost-model -analyze -mtriple=amdgcn-unknown-amdhsa -mattr=-half-rate-64-ops < %s | FileCheck -check-prefix=ALL -check-prefix=SLOW64 %s + +; ALL: 'shl_i32' +; ALL: estimated cost of 1 for {{.*}} shl i32 +define void @shl_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %or = shl i32 %vec, %b + store i32 %or, i32 addrspace(1)* %out + ret void +} + +; ALL: 'shl_i64' +; FAST64: estimated cost of 2 for {{.*}} shl i64 +; SLOW64: estimated cost of 3 for {{.*}} shl i64 +define void @shl_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %or = shl i64 %vec, %b + store i64 %or, i64 addrspace(1)* %out + ret void +} + +; ALL: 'lshr_i32' +; ALL: estimated cost of 1 for {{.*}} lshr i32 +define void @lshr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %or = lshr i32 %vec, %b + store i32 %or, i32 addrspace(1)* %out + ret void +} + +; ALL: 'lshr_i64' +; FAST64: estimated cost of 2 for {{.*}} lshr i64 +; SLOW64: estimated cost of 3 for {{.*}} lshr i64 +define void @lshr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %or = lshr i64 %vec, %b + store i64 %or, i64 addrspace(1)* %out + ret void +} + +; ALL: 'ashr_i32' +; ALL: estimated cost of 1 for {{.*}} ashr i32 +define void @ashr_i32(i32 addrspace(1)* %out, i32 addrspace(1)* %vaddr, i32 %b) #0 { + %vec = load i32, i32 addrspace(1)* %vaddr + %or = ashr i32 %vec, %b + store i32 %or, i32 addrspace(1)* %out + ret void +} + +; ALL: 'ashr_i64' +; FAST64: estimated cost of 2 for {{.*}} ashr i64 +; SLOW64: estimated cost of 3 for {{.*}} ashr i64 +define void @ashr_i64(i64 addrspace(1)* %out, i64 addrspace(1)* %vaddr, i64 %b) #0 { + %vec = load i64, i64 addrspace(1)* %vaddr + %or = ashr i64 %vec, %b + store i64 %or, i64 addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } Index: test/Transforms/IndVarSimplify/AMDGPU/lit.local.cfg =================================================================== --- /dev/null +++ test/Transforms/IndVarSimplify/AMDGPU/lit.local.cfg @@ -0,0 +1,2 @@ +if not 'AMDGPU' in config.root.targets: + config.unsupported = True Index: test/Transforms/IndVarSimplify/AMDGPU/no-widen-to-i64.ll =================================================================== --- /dev/null +++ test/Transforms/IndVarSimplify/AMDGPU/no-widen-to-i64.ll @@ -0,0 +1,98 @@ +; RUN: opt -S -mtriple=amdgcn-unknown-amdhsa -indvars %s | FileCheck %s + +; Bug 21148 + +; Induction variables should not be widened for 64-bit integers, +; despite being a legal type. +; +; The cost of basic arithmetic instructions on a 64-bit integer are +; twice as expensive as that on a 32-bit integer, or split into 2 +; 32-bit components. + +target datalayout = "e-p:32:32-p1:64:64-p2:64:64-p3:32:32-p4:64:64-p5:32:32-p24:64:64-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024-v2048:2048-n32:64" + +; CHECK-LABEL: @indvar_32_bit( +; CHECK-NOT: sext i32 +; CHECK: phi i32 +define void @indvar_32_bit(i32 %n, i32* nocapture %output) { +entry: + %cmp5 = icmp sgt i32 %n, 0 + br i1 %cmp5, label %for.body.preheader, label %for.end + +for.body.preheader: ; preds = %entry + br label %for.body + +for.body: ; preds = %for.body.preheader, %for.body + %i.06 = phi i32 [ 0, %for.body.preheader ], [ %add, %for.body ] + %mul = mul nsw i32 %i.06, %i.06 + %tmp0 = sext i32 %i.06 to i64 + %arrayidx = getelementptr inbounds i32, i32* %output, i64 %tmp0 + store i32 %mul, i32* %arrayidx, align 4 + %add = add nsw i32 %i.06, 3 + %cmp = icmp slt i32 %add, %n + br i1 %cmp, label %for.body, label %for.end.loopexit + +for.end.loopexit: ; preds = %for.body + br label %for.end + +for.end: ; preds = %for.end.loopexit, %entry + ret void +} + +; CHECK-LABEL: @no_promote_i32( +; CHECK-NOT: sext i32 +; CHECK: br +; CHECK-NOT: shl i64 +; CHECK-NOT: ashr i64 +; CHECK-NOT: mul nsw i64 +; CHECK-NOT: add nsw i64 +define void @no_promote_i32(i32 addrspace(1)* %out, i32 %a, i32 %b) { +entry: + br label %for.body + +for.body: + %inc = phi i32 [ 0, %entry ], [ %inc.i, %for.body ] + %tmp0 = add i32 %a, %inc + %shl = shl i32 %inc, 8 + %shr = ashr exact i32 %shl, 8 + %mul = mul nsw i32 %shr, %a + %add = add nsw i32 %mul, %b + %tmp1 = sext i32 %add to i64 + %arrayidx1 = getelementptr inbounds i32, i32 addrspace(1)* %out, i64 %tmp1 + store i32 %tmp0, i32 addrspace(1)* %arrayidx1, align 4 + %inc.i = add nsw i32 %inc, 1 + %cmp = icmp slt i32 %inc.i, 16 + br i1 %cmp, label %for.body, label %for.end + +for.end: + ret void +} + +; FIXME: This should really be promoted to i64, since it will need to +; be legalized anyway. + +; CHECK-LABEL: @indvar_48_bit( +define void @indvar_48_bit(i48 %n, i48* nocapture %output) { +entry: + %cmp5 = icmp sgt i48 %n, 0 + br i1 %cmp5, label %for.body.preheader, label %for.end + +for.body.preheader: ; preds = %entry + br label %for.body + +for.body: ; preds = %for.body.preheader, %for.body + %i.06 = phi i48 [ 0, %for.body.preheader ], [ %add, %for.body ] + %mul = mul nsw i48 %i.06, %i.06 + %tmp0 = sext i48 %i.06 to i64 + %arrayidx = getelementptr inbounds i48, i48* %output, i64 %tmp0 + store i48 %mul, i48* %arrayidx, align 4 + %add = add nsw i48 %i.06, 3 + %cmp = icmp slt i48 %add, %n + br i1 %cmp, label %for.body, label %for.end.loopexit + +for.end.loopexit: ; preds = %for.body + br label %for.end + +for.end: ; preds = %for.end.loopexit, %entry + ret void +}