Index: flang/lib/Optimizer/CodeGen/CodeGen.cpp =================================================================== --- flang/lib/Optimizer/CodeGen/CodeGen.cpp +++ flang/lib/Optimizer/CodeGen/CodeGen.cpp @@ -141,12 +141,9 @@ mlir::Type resultTy, mlir::ConversionPatternRewriter &rewriter, unsigned boxValue) const { - mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); - mlir::LLVM::ConstantOp cValuePos = - genConstantOffset(loc, rewriter, boxValue); auto pty = mlir::LLVM::LLVMPointerType::get(resultTy); auto p = rewriter.create( - loc, pty, box, mlir::ValueRange{c0, cValuePos}); + loc, pty, box, llvm::ArrayRef{0, boxValue}); return rewriter.create(loc, resultTy, p); } @@ -156,26 +153,21 @@ getDimsFromBox(mlir::Location loc, llvm::ArrayRef retTys, mlir::Value box, mlir::Value dim, mlir::ConversionPatternRewriter &rewriter) const { - mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); - mlir::LLVM::ConstantOp cDims = - genConstantOffset(loc, rewriter, kDimsPosInBox); mlir::LLVM::LoadOp l0 = - loadFromOffset(loc, box, c0, cDims, dim, 0, retTys[0], rewriter); + loadFromOffset(loc, box, 0, kDimsPosInBox, dim, 0, retTys[0], rewriter); mlir::LLVM::LoadOp l1 = - loadFromOffset(loc, box, c0, cDims, dim, 1, retTys[1], rewriter); + loadFromOffset(loc, box, 0, kDimsPosInBox, dim, 1, retTys[1], rewriter); mlir::LLVM::LoadOp l2 = - loadFromOffset(loc, box, c0, cDims, dim, 2, retTys[2], rewriter); + loadFromOffset(loc, box, 0, kDimsPosInBox, dim, 2, retTys[2], rewriter); return {l0.getResult(), l1.getResult(), l2.getResult()}; } mlir::LLVM::LoadOp - loadFromOffset(mlir::Location loc, mlir::Value a, mlir::LLVM::ConstantOp c0, - mlir::LLVM::ConstantOp cDims, mlir::Value dim, int off, - mlir::Type ty, + loadFromOffset(mlir::Location loc, mlir::Value a, int32_t c0, int32_t cDims, + mlir::Value dim, int off, mlir::Type ty, mlir::ConversionPatternRewriter &rewriter) const { auto pty = mlir::LLVM::LLVMPointerType::get(ty); - mlir::LLVM::ConstantOp c = genConstantOffset(loc, rewriter, off); - mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, c); + mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, a, c0, cDims, dim, off); return rewriter.create(loc, ty, p); } @@ -183,33 +175,25 @@ loadStrideFromBox(mlir::Location loc, mlir::Value box, unsigned dim, mlir::ConversionPatternRewriter &rewriter) const { auto idxTy = lowerTy().indexType(); - auto c0 = genConstantOffset(loc, rewriter, 0); - auto cDims = genConstantOffset(loc, rewriter, kDimsPosInBox); auto dimValue = genConstantIndex(loc, idxTy, rewriter, dim); - return loadFromOffset(loc, box, c0, cDims, dimValue, kDimStridePos, idxTy, - rewriter); + return loadFromOffset(loc, box, 0, kDimsPosInBox, dimValue, kDimStridePos, + idxTy, rewriter); } /// Read base address from a fir.box. Returned address has type ty. mlir::Value loadBaseAddrFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, mlir::ConversionPatternRewriter &rewriter) const { - mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); - mlir::LLVM::ConstantOp cAddr = - genConstantOffset(loc, rewriter, kAddrPosInBox); auto pty = mlir::LLVM::LLVMPointerType::get(ty); - mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cAddr); + mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, 0, kAddrPosInBox); return rewriter.create(loc, ty, p); } mlir::Value loadElementSizeFromBox(mlir::Location loc, mlir::Type ty, mlir::Value box, mlir::ConversionPatternRewriter &rewriter) const { - mlir::LLVM::ConstantOp c0 = genConstantOffset(loc, rewriter, 0); - mlir::LLVM::ConstantOp cElemLen = - genConstantOffset(loc, rewriter, kElemLenPosInBox); auto pty = mlir::LLVM::LLVMPointerType::get(ty); - mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, c0, cElemLen); + mlir::LLVM::GEPOp p = genGEP(loc, pty, rewriter, box, 0, kElemLenPosInBox); return rewriter.create(loc, ty, p); } @@ -262,7 +246,7 @@ mlir::LLVM::GEPOp genGEP(mlir::Location loc, mlir::Type ty, mlir::ConversionPatternRewriter &rewriter, mlir::Value base, ARGS... args) const { - llvm::SmallVector cv = {args...}; + llvm::SmallVector cv = {args...}; return rewriter.create(loc, ty, base, cv); } @@ -1016,9 +1000,8 @@ computeDerivedTypeSize(mlir::Location loc, mlir::Type ptrTy, mlir::Type idxTy, mlir::ConversionPatternRewriter &rewriter) { auto nullPtr = rewriter.create(loc, ptrTy); - mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); - llvm::SmallVector args = {one}; - auto gep = rewriter.create(loc, ptrTy, nullPtr, args); + auto gep = rewriter.create( + loc, ptrTy, nullPtr, llvm::ArrayRef{1}); return rewriter.create(loc, idxTy, gep); } @@ -1255,10 +1238,8 @@ auto ptrTy = mlir::LLVM::LLVMPointerType::get( this->lowerTy().convertType(boxEleTy)); auto nullPtr = rewriter.create(loc, ptrTy); - auto one = - genConstantIndex(loc, this->lowerTy().offsetType(), rewriter, 1); - auto gep = rewriter.create(loc, ptrTy, nullPtr, - mlir::ValueRange{one}); + auto gep = rewriter.create( + loc, ptrTy, nullPtr, llvm::ArrayRef{1}); auto eleSize = rewriter.create( loc, this->lowerTy().indexType(), gep); return {eleSize, @@ -1414,7 +1395,7 @@ mlir::ValueRange cstInteriorIndices, mlir::ValueRange componentIndices, llvm::Optional substringOffset) const { - llvm::SmallVector gepArgs{outerOffset}; + llvm::SmallVector gepArgs{outerOffset}; mlir::Type resultTy = base.getType().cast().getElementType(); // Fortran is column major, llvm GEP is row major: reverse the indices here. @@ -1454,12 +1435,12 @@ if (gepArgs.size() != 1) fir::emitFatalError(loc, "corrupted substring GEP in fir.embox/fir.rebox"); - mlir::Type outterOffsetTy = gepArgs[0].getType(); + mlir::Type outterOffsetTy = gepArgs[0].get().getType(); mlir::Value cast = this->integerCast(loc, rewriter, outterOffsetTy, *substringOffset); - gepArgs[0] = rewriter.create(loc, outterOffsetTy, - gepArgs[0], cast); + gepArgs[0] = rewriter.create( + loc, outterOffsetTy, gepArgs[0].get(), cast); } } resultTy = mlir::LLVM::LLVMPointerType::get(resultTy); @@ -2157,8 +2138,7 @@ auto sliceOps = coor.slice().begin(); mlir::Value one = genConstantIndex(loc, idxTy, rewriter, 1); mlir::Value prevExt = one; - mlir::Value zero = genConstantIndex(loc, idxTy, rewriter, 0); - mlir::Value offset = zero; + mlir::Value offset = genConstantIndex(loc, idxTy, rewriter, 0); const bool isShifted = !coor.shift().empty(); const bool isSliced = !coor.slice().empty(); const bool baseIsBoxed = coor.memref().getType().isa(); @@ -2219,7 +2199,7 @@ loadBaseAddrFromBox(loc, baseTy, operands[0], rewriter); mlir::Type voidPtrTy = getVoidPtrType(); base = rewriter.create(loc, voidPtrTy, base); - llvm::SmallVector args{offset}; + llvm::SmallVector args{offset}; auto addr = rewriter.create(loc, voidPtrTy, base, args); if (coor.subcomponent().empty()) { @@ -2228,7 +2208,7 @@ } auto casted = rewriter.create(loc, baseTy, addr); args.clear(); - args.push_back(zero); + args.push_back(0); if (!coor.lenParams().empty()) { // If type parameters are present, then we don't want to use a GEPOp // as below, as the LLVM struct type cannot be statically defined. @@ -2276,7 +2256,7 @@ auto newTy = mlir::LLVM::LLVMPointerType::get(eleTy); base = rewriter.create(loc, newTy, operands[0]); } - llvm::SmallVector args = {offset}; + llvm::SmallVector args = {offset}; for (auto i = coor.subcomponentOffset(); i != coor.indicesOffset(); ++i) args.push_back(operands[i]); rewriter.replaceOpWithNewOp(coor, ty, base, args); @@ -2307,10 +2287,7 @@ // Complex type - basically, extract the real or imaginary part if (fir::isa_complex(objectTy)) { - mlir::LLVM::ConstantOp c0 = - genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); - llvm::SmallVector offs = {c0, operands[1]}; - mlir::Value gep = genGEP(loc, ty, rewriter, base, offs); + mlir::Value gep = genGEP(loc, ty, rewriter, base, 0, operands[1]); rewriter.replaceOp(coor, gep); return mlir::success(); } @@ -2429,8 +2406,6 @@ // coordinate_of %box, %idx1, %idx2 : !fir.ref // 2.4. TODO: Either document or disable any other case that the following // implementation might convert. - mlir::LLVM::ConstantOp c0 = - genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); mlir::Value resultAddr = loadBaseAddrFromBox(loc, getBaseAddrTypeFromBox(boxBaseAddr.getType()), boxBaseAddr, rewriter); @@ -2458,9 +2433,9 @@ } auto voidPtrBase = rewriter.create(loc, voidPtrTy, resultAddr); - llvm::SmallVector args = {off}; - resultAddr = rewriter.create(loc, voidPtrTy, - voidPtrBase, args); + resultAddr = rewriter.create( + loc, voidPtrTy, voidPtrBase, + llvm::ArrayRef{off}); i += arrTy.getDimension() - 1; cpnTy = arrTy.getEleTy(); } else if (auto recTy = cpnTy.dyn_cast()) { @@ -2469,12 +2444,11 @@ mlir::Value nxtOpnd = operands[i]; auto memObj = rewriter.create(loc, recRefTy, resultAddr); - llvm::SmallVector args = {c0, nxtOpnd}; cpnTy = recTy.getType(getFieldNumber(recTy, nxtOpnd)); auto llvmCurrentObjTy = lowerTy().convertType(cpnTy); auto gep = rewriter.create( loc, mlir::LLVM::LLVMPointerType::get(llvmCurrentObjTy), memObj, - args); + llvm::ArrayRef{0, nxtOpnd}); resultAddr = rewriter.create(loc, voidPtrTy, gep); } else { @@ -2529,11 +2503,9 @@ loc, "fir.coordinate_of with a dynamic element size is unsupported"); if (hasKnownShape || columnIsDeferred) { - llvm::SmallVector offs; + llvm::SmallVector offs; if (hasKnownShape && hasSubdimension) { - mlir::LLVM::ConstantOp c0 = - genConstantIndex(loc, lowerTy().indexType(), rewriter, 0); - offs.push_back(c0); + offs.push_back(0); } llvm::Optional dims; llvm::SmallVector arrIdx; Index: flang/test/Fir/alloc.fir =================================================================== --- flang/test/Fir/alloc.fir +++ flang/test/Fir/alloc.fir @@ -48,14 +48,14 @@ } // CHECK-LABEL: define ptr @allocmem_scalar_char( -// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([10 x i8], ptr null, i64 1) to i64)) +// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([10 x i8], ptr null, i32 1) to i64)) func.func @allocmem_scalar_char() -> !fir.heap> { %1 = fir.allocmem !fir.char<1,10> return %1 : !fir.heap> } // CHECK-LABEL: define ptr @allocmem_scalar_char_kind( -// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([10 x i16], ptr null, i64 1) to i64)) +// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([10 x i16], ptr null, i32 1) to i64)) func.func @allocmem_scalar_char_kind() -> !fir.heap> { %1 = fir.allocmem !fir.char<2,10> return %1 : !fir.heap> @@ -131,14 +131,14 @@ } // CHECK-LABEL: define ptr @allocmem_array_of_nonchar( -// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([3 x [3 x i32]], ptr null, i64 1) to i64)) +// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([3 x [3 x i32]], ptr null, i32 1) to i64)) func.func @allocmem_array_of_nonchar() -> !fir.heap> { %1 = fir.allocmem !fir.array<3x3xi32> return %1 : !fir.heap> } // CHECK-LABEL: define ptr @allocmem_array_of_char( -// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([3 x [3 x [10 x i8]]], ptr null, i64 1) to i64)) +// CHECK: call ptr @malloc(i64 ptrtoint (ptr getelementptr ([3 x [3 x [10 x i8]]], ptr null, i32 1) to i64)) func.func @allocmem_array_of_char() -> !fir.heap>> { %1 = fir.allocmem !fir.array<3x3x!fir.char<1,10>> return %1 : !fir.heap>> @@ -175,7 +175,7 @@ // CHECK-LABEL: define ptr @allocmem_dynarray_of_nonchar( // CHECK-SAME: i64 %[[extent:.*]]) -// CHECK: %[[prod1:.*]] = mul i64 ptrtoint (ptr getelementptr ([3 x i32], ptr null, i64 1) to i64), %[[extent]] +// CHECK: %[[prod1:.*]] = mul i64 ptrtoint (ptr getelementptr ([3 x i32], ptr null, i32 1) to i64), %[[extent]] // CHECK: call ptr @malloc(i64 %[[prod1]]) func.func @allocmem_dynarray_of_nonchar(%e: index) -> !fir.heap> { %1 = fir.allocmem !fir.array<3x?xi32>, %e @@ -213,7 +213,7 @@ // CHECK-LABEL: define ptr @allocmem_dynarray_of_char( // CHECK-SAME: i64 %[[extent:.*]]) -// CHECK: %[[prod1:.*]] = mul i64 ptrtoint (ptr getelementptr ([3 x [10 x i16]], ptr null, i64 1) to i64), %[[extent]] +// CHECK: %[[prod1:.*]] = mul i64 ptrtoint (ptr getelementptr ([3 x [10 x i16]], ptr null, i32 1) to i64), %[[extent]] // CHECK: call ptr @malloc(i64 %[[prod1]]) func.func @allocmem_dynarray_of_char(%e : index) -> !fir.heap>> { %1 = fir.allocmem !fir.array<3x?x!fir.char<2,10>>, %e @@ -222,7 +222,7 @@ // CHECK-LABEL: define ptr @allocmem_dynarray_of_char2( // CHECK-SAME: i64 %[[extent:.*]]) -// CHECK: %[[prod1:.*]] = mul i64 ptrtoint (ptr getelementptr ([10 x i16], ptr null, i64 1) to i64), %[[extent]] +// CHECK: %[[prod1:.*]] = mul i64 ptrtoint (ptr getelementptr ([10 x i16], ptr null, i32 1) to i64), %[[extent]] // CHECK: %[[prod2:.*]] = mul i64 %[[prod1]], %[[extent]] // CHECK: call ptr @malloc(i64 %[[prod2]]) func.func @allocmem_dynarray_of_char2(%e : index) -> !fir.heap>> { @@ -316,7 +316,7 @@ // CHECK-LABEL: define ptr @allocmem_array_with_holes_char( // CHECK-SAME: i64 %[[e:.*]]) -// CHECK: %[[mul:.*]] = mul i64 mul (i64 ptrtoint (ptr getelementptr ([3 x [10 x i16]], ptr null, i64 1) to i64), i64 4), %[[e]] +// CHECK: %[[mul:.*]] = mul i64 mul (i64 ptrtoint (ptr getelementptr ([3 x [10 x i16]], ptr null, i32 1) to i64), i64 4), %[[e]] // CHECK: call ptr @malloc(i64 %[[mul]]) func.func @allocmem_array_with_holes_char(%e: index) -> !fir.heap>> { %1 = fir.allocmem !fir.array<3x?x4x!fir.char<2,10>>, %e Index: flang/test/Fir/boxproc.fir =================================================================== --- flang/test/Fir/boxproc.fir +++ flang/test/Fir/boxproc.fir @@ -3,7 +3,7 @@ // CHECK-LABEL: define void @_QPtest_proc_dummy() // CHECK: %[[VAL_0:.*]] = alloca i32, i64 1, align 4 // CHECK: %[[VAL_1:.*]] = alloca { ptr }, i64 1, align 8 -// CHECK: %[[VAL_2:.*]] = getelementptr { ptr }, ptr %[[VAL_1]], i64 0, i32 0 +// CHECK: %[[VAL_2:.*]] = getelementptr { ptr }, ptr %[[VAL_1]], i32 0, i32 0 // CHECK: store ptr %[[VAL_0]], ptr %[[VAL_2]], align 8 // CHECK: store i32 1, ptr %[[VAL_0]], align 4 // CHECK: %[[VAL_3:.*]] = alloca [32 x i8], i64 1, align 1 @@ -64,7 +64,7 @@ // CHECK: %[[VAL_0:.*]] = alloca [40 x i8], i64 1, align 1 // CHECK: %[[VAL_1:.*]] = alloca [10 x i8], i64 1, align 1 // CHECK: %[[VAL_2:.*]] = alloca { { ptr, i64 } }, i64 1, align 8 -// CHECK: %[[VAL_3:.*]] = getelementptr { { ptr, i64 } }, ptr %[[VAL_2]], i64 0, i32 0 +// CHECK: %[[VAL_3:.*]] = getelementptr { { ptr, i64 } }, ptr %[[VAL_2]], i32 0, i32 0 // CHECK: %[[VAL_5:.*]] = insertvalue { ptr, i64 } undef, ptr %[[VAL_1]], 0 // CHECK: %[[VAL_6:.*]] = insertvalue { ptr, i64 } %[[VAL_5]], i64 10, 1 // CHECK: store { ptr, i64 } %[[VAL_6]], ptr %[[VAL_3]], align 8 @@ -73,7 +73,7 @@ // CHECK: %[[VAL_11:.*]] = phi // CHECK: %[[VAL_13:.*]] = phi // CHECK: %[[VAL_15:.*]] = icmp sgt i64 %[[VAL_13]], 0 -// CHECK: %[[VAL_18:.*]] = getelementptr [10 x [1 x i8]], ptr %[[VAL_1]], i64 0, i64 %[[VAL_11]] +// CHECK: %[[VAL_18:.*]] = getelementptr [10 x [1 x i8]], ptr %[[VAL_1]], i32 0, i64 %[[VAL_11]] // CHECK: store [1 x i8] c" ", ptr %[[VAL_18]], align 1 // CHECK: %[[VAL_20:.*]] = alloca [32 x i8], i64 1, align 1 // CHECK: call void @llvm.init.trampoline(ptr %[[VAL_20]], ptr @_QFtest_proc_dummy_charPgen_message, ptr %[[VAL_2]]) @@ -89,7 +89,7 @@ // CHECK-LABEL: define { ptr, i64 } @_QFtest_proc_dummy_charPgen_message(ptr // CHECK-SAME: %[[VAL_0:.*]], i64 %[[VAL_1:.*]], ptr nest %[[VAL_2:.*]]) -// CHECK: %[[VAL_3:.*]] = getelementptr { { ptr, i64 } }, ptr %[[VAL_2]], i64 0, i32 0 +// CHECK: %[[VAL_3:.*]] = getelementptr { { ptr, i64 } }, ptr %[[VAL_2]], i32 0, i32 0 // CHECK: %[[VAL_4:.*]] = load { ptr, i64 }, ptr %[[VAL_3]], align 8 // CHECK: %[[VAL_5:.*]] = extractvalue { ptr, i64 } %[[VAL_4]], 0 // CHECK: %[[VAL_6:.*]] = extractvalue { ptr, i64 } %[[VAL_4]], 1 Index: flang/test/Fir/convert-to-llvm.fir =================================================================== --- flang/test/Fir/convert-to-llvm.fir +++ flang/test/Fir/convert-to-llvm.fir @@ -216,7 +216,7 @@ // CHECK-LABEL: llvm.func @test_alloc_and_freemem_several() { // CHECK: [[NULL:%.*]] = llvm.mlir.null : !llvm.ptr> -// CHECK: [[PTR:%.*]] = llvm.getelementptr [[NULL]][{{.*}}] : (!llvm.ptr>, i64) -> !llvm.ptr> +// CHECK: [[PTR:%.*]] = llvm.getelementptr [[NULL]][{{.*}}] : (!llvm.ptr>) -> !llvm.ptr> // CHECK: [[N:%.*]] = llvm.ptrtoint [[PTR]] : !llvm.ptr> to i64 // CHECK: [[MALLOC:%.*]] = llvm.call @malloc([[N]]) // CHECK: [[B1:%.*]] = llvm.bitcast [[MALLOC]] : !llvm.ptr to !llvm.ptr> @@ -883,8 +883,7 @@ // CHECK-LABEL: llvm.func @extract_rank( // CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> i32 -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][%[[C0]], 3] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr +// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 3] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr // CHECK: %[[RANK:.*]] = llvm.load %[[GEP]] : !llvm.ptr // CHECK: llvm.return %[[RANK]] : i32 @@ -899,8 +898,7 @@ // CHECK-LABEL: llvm.func @extract_addr( // CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][%[[C0]], 0] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr> +// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 0] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr> // CHECK: %[[ADDR:.*]] = llvm.load %[[GEP]] : !llvm.ptr> // CHECK: llvm.return %[[ADDR]] : !llvm.ptr @@ -910,24 +908,20 @@ func.func @extract_dims(%arg0: !fir.box>) -> index { %c1 = arith.constant 0 : i32 - %cast = fir.convert %arg0 : (!fir.box>) -> !fir.box> + %cast = fir.convert %arg0 : (!fir.box>) -> !fir.box> %0:3 = fir.box_dims %cast, %c1 : (!fir.box>, i32) -> (index, index, index) return %0 : index } // CHECK-LABEL: llvm.func @extract_dims( // CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> i64 -// CHECK: %[[C0_1:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[CAST:.*]] = llvm.bitcast %[[ARG0]] : !llvm.ptr, i64, i32, i8, i8, i8, i8)>> to !llvm.ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>> // CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[C0_2:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[GEP0:.*]] = llvm.getelementptr %[[CAST]][%[[C0]], 7, %[[C0_1]], %[[C0_2]]] : (!llvm.ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>, i32, i32, i32) -> !llvm.ptr +// CHECK: %[[CAST:.*]] = llvm.bitcast %[[ARG0]] : !llvm.ptr, i64, i32, i8, i8, i8, i8)>> to !llvm.ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>> +// CHECK: %[[GEP0:.*]] = llvm.getelementptr %[[CAST]][0, 7, %[[C0]], 0] : (!llvm.ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>, i32) -> !llvm.ptr // CHECK: %[[LOAD0:.*]] = llvm.load %[[GEP0]] : !llvm.ptr -// CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : i32) : i32 -// CHECK: %[[GEP1:.*]] = llvm.getelementptr %[[CAST]][%[[C0]], 7, %[[C0_1]], %[[C1]]] : (!llvm.ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>, i32, i32, i32) -> !llvm.ptr +// CHECK: %[[GEP1:.*]] = llvm.getelementptr %[[CAST]][0, 7, %[[C0]], 1] : (!llvm.ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)>>, i32) -> !llvm.ptr // CHECK: %[[LOAD1:.*]] = llvm.load %[[GEP1]] : !llvm.ptr -// CHECK: %[[C2:.*]] = llvm.mlir.constant(2 : i32) : i32 -// CHECK: %[[GEP2:.*]] = llvm.getelementptr %[[CAST]][%[[C0]], 7, %[[C0_1]], %[[C2]]] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32, i32, i32) -> !llvm.ptr +// CHECK: %[[GEP2:.*]] = llvm.getelementptr %[[CAST]][0, 7, %[[C0]], 2] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr // CHECK: %[[LOAD2:.*]] = llvm.load %[[GEP2]] : !llvm.ptr // CHECK: llvm.return %[[LOAD0]] : i64 @@ -942,8 +936,7 @@ // CHECK-LABEL: llvm.func @extract_elesize( // CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> i32 -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][%[[C0]], 1] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr +// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 1] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr // CHECK: %[[ELE_SIZE:.*]] = llvm.load %[[GEP]] : !llvm.ptr // CHECK: llvm.return %[[ELE_SIZE]] : i32 @@ -959,8 +952,7 @@ // CHECK-LABEL: llvm.func @box_isarray( // CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> i1 -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][%[[C0]], 3] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr +// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 3] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr // CHECK: %[[RANK:.*]] = llvm.load %[[GEP]] : !llvm.ptr // CHECK: %[[C0_ISARRAY:.*]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %[[IS_ARRAY:.*]] = llvm.icmp "ne" %[[RANK]], %[[C0_ISARRAY]] : i32 @@ -979,8 +971,7 @@ // CHECK-LABEL: llvm.func @box_isalloc( // CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> i1 -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][%[[C0]], 5] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr +// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 5] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr // CHECK: %[[ATTR:.*]] = llvm.load %[[GEP]] : !llvm.ptr // CHECK: %[[ATTR_ISALLOC:.*]] = llvm.mlir.constant(2 : i32) : i32 // CHECK: %[[AND:.*]] = llvm.and %[[ATTR]], %[[ATTR_ISALLOC]] : i32 @@ -1001,8 +992,7 @@ // CHECK-LABEL: llvm.func @box_isptr( // CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> i1 -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][%[[C0]], 5] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr +// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 5] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr // CHECK: %[[ATTR:.*]] = llvm.load %[[GEP]] : !llvm.ptr // CHECK: %[[ATTR_ISALLOC:.*]] = llvm.mlir.constant(1 : i32) : i32 // CHECK: %[[AND:.*]] = llvm.and %[[ATTR]], %[[ATTR_ISALLOC]] : i32 @@ -1489,8 +1479,7 @@ // CHECK-LABEL: llvm.func @box_tdesc( // CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) { -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][%[[C0]], 4] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i32) -> !llvm.ptr +// CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ARG0]][0, 4] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr // CHECK: %[[LOAD:.*]] = llvm.load %[[GEP]] : !llvm.ptr // CHECK: %{{.*}} = llvm.inttoptr %[[LOAD]] : i{{.*}} to !llvm.ptr @@ -1653,9 +1642,9 @@ // CHECK: %{{.*}} = llvm.insertvalue %[[TYPE_CODE_I8]], %{{.*}}[4 : i32] : !llvm.struct<(ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)> // CHECK: %[[F18ADDENDUM:.*]] = llvm.mlir.constant(1 : i32) : i32 // CHECK: %[[F18ADDENDUM_I8:.*]] = llvm.trunc %[[F18ADDENDUM]] : i32 to i8 -// CHECK: %{{.*}} = llvm.insertvalue %[[F18ADDENDUM_I8]], %18[6 : i32] : !llvm.struct<(ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)> +// CHECK: %{{.*}} = llvm.insertvalue %[[F18ADDENDUM_I8]], %17[6 : i32] : !llvm.struct<(ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)> // CHECK: %[[TDESC:.*]] = llvm.mlir.addressof @_QMtest_dinitE.dt.tseq : !llvm.ptr -// CHECK: %[[TDESC_CAST:.*]] = llvm.bitcast %22 : !llvm.ptr to !llvm.ptr +// CHECK: %[[TDESC_CAST:.*]] = llvm.bitcast %21 : !llvm.ptr to !llvm.ptr // CHECK: %{{.*}} = llvm.insertvalue %[[TDESC_CAST]], %{{.*}}[7 : i32] : !llvm.struct<(ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i{{.*}}>)> // ----- @@ -1797,7 +1786,7 @@ // integer::n,sh1,sh2 // double precision::arr(sh1:n,sh2:n) // call xb(arr(2:n,4:n)) -// end subroutine +// end subroutine // ``` // N is the upperbound, sh1 and sh2 are the shifts or lowerbounds @@ -1815,7 +1804,7 @@ %box = fircg.ext_embox %arr(%n1, %n2) origin %sh1, %sh2[%c2, %N, %c1, %c4, %N, %c1] : (!fir.ref>, index, index, index, index, index, index, index, index, index, index) -> !fir.box> fir.call @_QPxb(%box) : (!fir.box>) -> () return -} +} func.func private @_QPxb(!fir.box>) // CHECK-LABEL: llvm.func @_QPsb( @@ -1932,10 +1921,9 @@ // CHECK: %[[BOX6:.*]] = llvm.insertvalue %[[F18ADDENDUM_I8]], %[[BOX5]][6 : i32] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)> // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : i64) : i64 // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i64) : i64 -// CHECK: %[[ELEM_LEN_I64:.*]] = llvm.sext %[[ELEM_LEN]] : i32 to i64 +// CHECK: %[[ELEM_LEN_I64:.*]] = llvm.sext %[[ELEM_LEN]] : i32 to i64 // CHECK: %[[ELE_TYPE:.*]] = llvm.mlir.null : !llvm.ptr> -// CHECK: %[[C1_0:.*]] = llvm.mlir.constant(1 : i64) : i64 -// CHECK: %[[GEP_DTYPE_SIZE:.*]] = llvm.getelementptr %[[ELE_TYPE]][%[[C1_0]]] : (!llvm.ptr>, i64) -> !llvm.ptr> +// CHECK: %[[GEP_DTYPE_SIZE:.*]] = llvm.getelementptr %[[ELE_TYPE]][1] : (!llvm.ptr>) -> !llvm.ptr> // CHECK: %[[PTRTOINT_DTYPE_SIZE:.*]] = llvm.ptrtoint %[[GEP_DTYPE_SIZE]] : !llvm.ptr> to i64 // CHECK: %[[ADJUSTED_OFFSET:.*]] = llvm.sub %3, %30 : i64 // CHECK: %[[EXT_SUB:.*]] = llvm.sub %[[C10]], %[[C1]] : i64 @@ -2034,15 +2022,12 @@ // CHECK: %[[C0_1:.*]] = llvm.mlir.constant(0 : i64) : i64 // CHECK: %[[IDX:.*]] = llvm.sub %[[C0]], %[[C1]] : i64 // CHECK: %[[DIFF0:.*]] = llvm.mul %[[IDX]], %[[C1]] : i64 -// CHECK: %[[C0_2:.*]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %[[DIMOFFSET:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[STRIDPOS:.*]] = llvm.mlir.constant(2 : i32) : i32 -// CHECK: %[[GEPSTRIDE:.*]] = llvm.getelementptr %[[ARG0]][%[[C0_2]], 7, %[[DIMOFFSET]], %[[STRIDPOS]]] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i32, i64, i32) -> !llvm.ptr +// CHECK: %[[GEPSTRIDE:.*]] = llvm.getelementptr %[[ARG0]][0, 7, %[[DIMOFFSET]], 2] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i64) -> !llvm.ptr // CHECK: %[[LOADEDSTRIDE:.*]] = llvm.load %[[GEPSTRIDE]] : !llvm.ptr // CHECK: %[[SC:.*]] = llvm.mul %[[DIFF0]], %[[LOADEDSTRIDE]] : i64 // CHECK: %[[OFFSET:.*]] = llvm.add %[[SC]], %[[C0_1]] : i64 -// CHECK: %[[C0_3:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[GEPADDR:.*]] = llvm.getelementptr %[[ARG0]][%[[C0_3]], 0] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i32) -> !llvm.ptr> +// CHECK: %[[GEPADDR:.*]] = llvm.getelementptr %[[ARG0]][0, 0] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>) -> !llvm.ptr> // CHECK: %[[LOADEDADDR:.*]] = llvm.load %[[GEPADDR]] : !llvm.ptr> // CHECK: %[[LOADEDADDRBITCAST:.*]] = llvm.bitcast %[[LOADEDADDR]] : !llvm.ptr to !llvm.ptr // CHECK: %[[GEPADDROFFSET:.*]] = llvm.getelementptr %[[LOADEDADDRBITCAST]][%[[OFFSET]]] : (!llvm.ptr, i64) -> !llvm.ptr @@ -2212,17 +2197,12 @@ //CHECK: %[[ADDENDUM_I8:.*]] = llvm.trunc %[[ADDENDUM]] : i32 to i8 //CHECK: %[[RBOX_TMP6:.*]] = llvm.insertvalue %[[ADDENDUM_I8]], %[[RBOX_TMP5]][6 : i32] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> //CHECK: %[[DIM1:.*]] = llvm.mlir.constant(0 : i64) : i64 -//CHECK: %[[GEP_ZERO_1:.*]] = llvm.mlir.constant(0 : i32) : i32 -//CHECK: %[[LB1_IDX:.*]] = llvm.mlir.constant(2 : i32) : i32 -//CHECK: %[[DIM1_STRIDE_REF:.*]] = llvm.getelementptr %[[ARG0]][%[[GEP_ZERO_1]], 7, %[[DIM1]], %[[LB1_IDX]]] : (!llvm.ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>, i32, i64, i32) -> !llvm.ptr +//CHECK: %[[DIM1_STRIDE_REF:.*]] = llvm.getelementptr %[[ARG0]][0, 7, %[[DIM1]], 2] : (!llvm.ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>, i64) -> !llvm.ptr //CHECK: %[[DIM1_STRIDE:.*]] = llvm.load %[[DIM1_STRIDE_REF]] : !llvm.ptr //CHECK: %[[DIM2:.*]] = llvm.mlir.constant(1 : i64) : i64 -//CHECK: %[[GEP_ZERO_2:.*]] = llvm.mlir.constant(0 : i32) : i32 -//CHECK: %[[STRIDE2_IDX:.*]] = llvm.mlir.constant(2 : i32) : i32 -//CHECK: %[[DIM2_STRIDE_REF:.*]] = llvm.getelementptr %[[ARG0]][%[[GEP_ZERO_2]], 7, %[[DIM2]], %[[STRIDE2_IDX]]] : (!llvm.ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>, i32, i64, i32) -> !llvm.ptr +//CHECK: %[[DIM2_STRIDE_REF:.*]] = llvm.getelementptr %[[ARG0]][0, 7, %[[DIM2]], 2] : (!llvm.ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>, i64) -> !llvm.ptr //CHECK: %[[DIM2_STRIDE:.*]] = llvm.load %[[DIM2_STRIDE_REF]] : !llvm.ptr -//CHECK: %[[ZERO_1:.*]] = llvm.mlir.constant(0 : i32) : i32 -//CHECK: %[[SOURCE_ARRAY_PTR:.*]] = llvm.getelementptr %[[ARG0]][%[[ZERO_1]], 0] : (!llvm.ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>, i32) -> !llvm.ptr> +//CHECK: %[[SOURCE_ARRAY_PTR:.*]] = llvm.getelementptr %[[ARG0]][0, 0] : (!llvm.ptr, i64, i32, i8, i8, i8, i8, array<2 x array<3 x i64>>)>>) -> !llvm.ptr> //CHECK: %[[SOURCE_ARRAY:.*]] = llvm.load %[[SOURCE_ARRAY_PTR]] : !llvm.ptr> //CHECK: %[[ZERO_ELEMS:.*]] = llvm.mlir.constant(0 : i64) : i64 //CHECK: %[[SOURCE_ARRAY_I8PTR:.*]] = llvm.bitcast %[[SOURCE_ARRAY]] : !llvm.ptr to !llvm.ptr @@ -2285,12 +2265,9 @@ //CHECK: %[[ADDENDUM_I8:.*]] = llvm.trunc %[[ADDENDUM]] : i32 to i8 //CHECK: %[[RBOX_TMP6:.*]] = llvm.insertvalue %[[ADDENDUM_I8]], %[[RBOX_TMP5]][6 : i32] : !llvm.struct<(ptr, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>)> //CHECK: %[[DIM1:.*]] = llvm.mlir.constant(0 : i64) : i64 -//CHECK: %[[ZERO_3:.*]] = llvm.mlir.constant(0 : i32) : i32 -//CHECK: %[[STRIDE_IDX:.*]] = llvm.mlir.constant(2 : i32) : i32 -//CHECK: %[[SRC_STRIDE_PTR:.*]] = llvm.getelementptr %[[ARG0]][%[[ZERO_3]], 7, %[[DIM1]], %[[STRIDE_IDX]]] : (!llvm.ptr)>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>>, i32, i64, i32) -> !llvm.ptr +//CHECK: %[[SRC_STRIDE_PTR:.*]] = llvm.getelementptr %[[ARG0]][0, 7, %[[DIM1]], 2] : (!llvm.ptr)>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>>, i64) -> !llvm.ptr //CHECK: %[[SRC_STRIDE:.*]] = llvm.load %[[SRC_STRIDE_PTR]] : !llvm.ptr -//CHECK: %[[ZERO_4:.*]] = llvm.mlir.constant(0 : i32) : i32 -//CHECK: %[[SRC_ARRAY_PTR:.*]] = llvm.getelementptr %[[ARG0]][%[[ZERO_4]], 0] : (!llvm.ptr)>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>>, i32) -> !llvm.ptr)>>> +//CHECK: %[[SRC_ARRAY_PTR:.*]] = llvm.getelementptr %[[ARG0]][0, 0] : (!llvm.ptr)>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>>) -> !llvm.ptr)>>> //CHECK: %[[SRC_ARRAY:.*]] = llvm.load %[[SRC_ARRAY_PTR]] : !llvm.ptr)>>> //CHECK: %[[ZERO_6:.*]] = llvm.mlir.constant(0 : i64) : i64 //CHECK: %[[SRC_CAST:.*]] = llvm.bitcast %[[SRC_ARRAY]] : !llvm.ptr)>> to !llvm.ptr)>> @@ -2330,8 +2307,7 @@ } // CHECK-LABEL: llvm.func @coordinate_ref_complex // CHECK-SAME: %[[ARG0:.*]]: !llvm.ptr> -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %{{.*}} = llvm.getelementptr %[[ARG0]][%[[C0]], 0] : (!llvm.ptr>, i64) -> !llvm.ptr +// CHECK: %{{.*}} = llvm.getelementptr %[[ARG0]][0, 0] : (!llvm.ptr>) -> !llvm.ptr // CHECK-NEXT: llvm.return // ----- @@ -2344,8 +2320,7 @@ } // CHECK-LABEL: llvm.func @coordinate_box_complex // CHECK-SAME: %[[BOX:.*]]: !llvm.ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>> -// CHECK: %[[C0:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %{{.*}} = llvm.getelementptr %[[BOX]][%[[C0]], 0] : (!llvm.ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>, i64) -> !llvm.ptr +// CHECK: %{{.*}} = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}})>>) -> !llvm.ptr // CHECK-NEXT: llvm.return // ----- @@ -2362,13 +2337,11 @@ // CHECK-LABEL: llvm.func @coordinate_box_derived_1 // CHECK-SAME: %[[BOX:.*]]: !llvm.ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i64>)>>) // CHECK: %[[COORDINATE:.*]] = llvm.mlir.constant(1 : i32) : i32 -// CHECK: %[[C0_3:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[C0_1:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[DERIVED_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[C0_1]], 0] : (!llvm.ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i64>)>>, i32) -> !llvm.ptr>> +// CHECK: %[[DERIVED_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i64>)>>) -> !llvm.ptr>> // CHECK: %[[DERIVED_VAL:.*]] = llvm.load %[[DERIVED_ADDR]] : !llvm.ptr>> // CHECK: %[[DERIVED_CAST:.*]] = llvm.bitcast %[[DERIVED_VAL]] : !llvm.ptr> to !llvm.ptr> -// CHECK: %[[SUBOBJECT_ADDR:.*]] = llvm.getelementptr %[[DERIVED_CAST]][%[[C0_3]], 1] : (!llvm.ptr>, i64) -> !llvm.ptr -// CHECK: %[[CAST_TO_I8_PTR:.*]] = llvm.bitcast %7 : !llvm.ptr to !llvm.ptr +// CHECK: %[[SUBOBJECT_ADDR:.*]] = llvm.getelementptr %[[DERIVED_CAST]][0, 1] : (!llvm.ptr>) -> !llvm.ptr +// CHECK: %[[CAST_TO_I8_PTR:.*]] = llvm.bitcast %[[SUBOBJECT_ADDR]] : !llvm.ptr to !llvm.ptr // CHECK: %{{.*}} = llvm.bitcast %[[CAST_TO_I8_PTR]] : !llvm.ptr to !llvm.ptr // CHECK-NEXT: llvm.return @@ -2384,15 +2357,13 @@ // CHECK-SAME: (%[[BOX:.*]]: !llvm.ptr, i32)>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i64>)>>) // CHECK-NEXT: %[[C0_0:.*]] = llvm.mlir.constant(0 : i32) : i32 // CHECK-NEXT: %[[C1:.*]] = llvm.mlir.constant(1 : i32) : i32 -// CHECK-NEXT: %[[C0_3:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK-NEXT: %[[C0_1:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[DERIVED_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[C0_1]], 0] : (!llvm.ptr, i32)>>, i{{.*}}, i{{.*}}32, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i64>)>>, i32) -> !llvm.ptr, i32)>>> +// CHECK: %[[DERIVED_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr, i32)>>, i{{.*}}, i{{.*}}32, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, ptr, array<1 x i64>)>>) -> !llvm.ptr, i32)>>> // CHECK-NEXT: %[[DERIVED_VAL:.*]] = llvm.load %[[DERIVED_ADDR]] : !llvm.ptr, i32)>>> // CHECK-NEXT: %[[DERIVED_CAST_I8_PTR:.*]] = llvm.bitcast %[[DERIVED_VAL]] : !llvm.ptr, i32)>> to !llvm.ptr, i32)>> -// CHECK-NEXT: %[[ANOTHER_DERIVED_ADDR:.*]] = llvm.getelementptr %[[DERIVED_CAST_I8_PTR]][%[[C0_3]], 0] : (!llvm.ptr, i32)>>, i64) -> !llvm.ptr> +// CHECK-NEXT: %[[ANOTHER_DERIVED_ADDR:.*]] = llvm.getelementptr %[[DERIVED_CAST_I8_PTR]][0, 0] : (!llvm.ptr, i32)>>) -> !llvm.ptr> // CHECK-NEXT: %[[ANOTHER_DERIVED_ADDR_AS_VOID_PTR:.*]] = llvm.bitcast %[[ANOTHER_DERIVED_ADDR]] : !llvm.ptr> to !llvm.ptr // CHECK-NEXT: %[[ANOTHER_DERIVED_RECAST:.*]] = llvm.bitcast %[[ANOTHER_DERIVED_ADDR_AS_VOID_PTR]] : !llvm.ptr to !llvm.ptr> -// CHECK-NEXT: %[[SUBOBJECT_ADDR:.*]] = llvm.getelementptr %[[ANOTHER_DERIVED_RECAST]][%[[C0_3]], 1] : (!llvm.ptr>, i64) -> !llvm.ptr +// CHECK-NEXT: %[[SUBOBJECT_ADDR:.*]] = llvm.getelementptr %[[ANOTHER_DERIVED_RECAST]][0, 1] : (!llvm.ptr>) -> !llvm.ptr // CHECK-NEXT: %[[SUBOBJECT_AS_VOID_PTR:.*]] = llvm.bitcast %[[SUBOBJECT_ADDR]] : !llvm.ptr to !llvm.ptr // CHECK-NEXT: %{{.*}} = llvm.bitcast %[[SUBOBJECT_AS_VOID_PTR]] : !llvm.ptr to !llvm.ptr // CHECK-NEXT: llvm.return @@ -2412,21 +2383,13 @@ // CHECK-LABEL: llvm.func @coordinate_box_array_1d // CHECK-SAME: %[[BOX:.*]]: !llvm.ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>> // CHECK-SAME: %[[COORDINATE:.*]]: i64 -// CHECK-NEXT: %{{.*}} = llvm.mlir.constant(0 : i64) : i64 // There's only one box here. Its index is `0`. Generate it. -// CHECK-NEXT: %[[BOX_IDX:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[ARRAY_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[BOX_IDX]], 0] : (!llvm.ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i32) -> !llvm.ptr>> +// CHECK: %[[ARRAY_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>) -> !llvm.ptr>> // CHECK-NEXT: %[[ARRAY_OBJECT:.*]] = llvm.load %[[ARRAY_ADDR]] : !llvm.ptr>> // CHECK-NEXT: %[[OFFSET_INIT:.*]] = llvm.mlir.constant(0 : i64) : i64 -// Same as [[BOX_IDX]], just recreated. -// CHECK-NEXT: %[[BOX_IDX_1:.*]] = llvm.mlir.constant(0 : i32) : i32 -// Index of the array that contains the CFI_dim_t objects -// CHECK-NEXT: %[[CFI_DIM_IDX:.*]] = llvm.mlir.constant(7 : i32) : i32 // Index of the 1st CFI_dim_t object (corresonds the the 1st dimension) // CHECK-NEXT: %[[DIM_1_IDX:.*]] = llvm.mlir.constant(0 : i64) : i64 -// Index of the memory stride within a CFI_dim_t object -// CHECK-NEXT: %[[DIM_1_MEM_STRIDE:.*]] = llvm.mlir.constant(2 : i32) : i32 -// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[BOX_IDX_1]], 7, %[[DIM_1_IDX]], %[[DIM_1_MEM_STRIDE]]] : (!llvm.ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i32, i64, i32) -> !llvm.ptr +// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 7, %[[DIM_1_IDX]], 2] : (!llvm.ptr>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i64) -> !llvm.ptr // CHECK-NEXT: %[[DIM_1_MEM_STRIDE_VAL:.*]] = llvm.load %[[DIM_1_MEM_STRIDE_ADDR]] : !llvm.ptr // CHECK-NEXT: %[[BYTE_OFFSET:.*]] = llvm.mul %[[COORDINATE]], %[[DIM_1_MEM_STRIDE_VAL]] : i64 // CHECK-NEXT: %[[SUBOJECT_OFFSET:.*]] = llvm.add %[[BYTE_OFFSET]], %[[OFFSET_INIT]] : i64 @@ -2443,22 +2406,12 @@ // CHECK-LABEL: llvm.func @coordinate_of_box_dynamic_array_1d // CHECK-SAME: %[[BOX:.*]]: !llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>> // CHECK-SAME: %[[COORDINATE:.*]]: i64 -// CHECK-NEXT: %{{.*}} = llvm.mlir.constant(0 : i64) : i64 -// There's only one box here. Its index is `0`. Generate it. -// CHECK-NEXT: %[[BOX_IDX:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK-NEXT: %[[BOX_1ST_ELEM_IDX:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK-NEXT: %[[ARRAY_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[BOX_IDX]], 0] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i32) -> !llvm.ptr> +// CHECK-NEXT: %[[ARRAY_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>) -> !llvm.ptr> // CHECK-NEXT: %[[ARRAY_OBJECT:.*]] = llvm.load %[[ARRAY_ADDR]] : !llvm.ptr> // CHECK-NEXT: %[[OFFSET_INIT:.*]] = llvm.mlir.constant(0 : i64) : i64 -// Same as [[BOX_IDX]], just recreated. -// CHECK-NEXT: %[[BOX_IDX_1:.*]] = llvm.mlir.constant(0 : i32) : i32 -// Index of the array that contains the CFI_dim_t objects -// CHECK-NEXT: %[[CFI_DIM_IDX:.*]] = llvm.mlir.constant(7 : i32) : i32 // Index of the 1st CFI_dim_t object (corresonds the the 1st dimension) // CHECK-NEXT: %[[DIM_1_IDX:.*]] = llvm.mlir.constant(0 : i64) : i64 -// Index of the memory stride within a CFI_dim_t object -// CHECK-NEXT: %[[DIM_1_MEM_STRIDE:.*]] = llvm.mlir.constant(2 : i32) : i32 -// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[BOX_IDX_1]], 7, %[[DIM_1_IDX]], %[[DIM_1_MEM_STRIDE]]] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i32, i64, i32) -> !llvm.ptr +// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 7, %[[DIM_1_IDX]], 2] : (!llvm.ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)>>, i64) -> !llvm.ptr // CHECK-NEXT: %[[DIM_1_MEM_STRIDE_VAL:.*]] = llvm.load %[[DIM_1_MEM_STRIDE_ADDR]] : !llvm.ptr // CHECK-NEXT: %[[BYTE_OFFSET:.*]] = llvm.mul %[[COORDINATE]], %[[DIM_1_MEM_STRIDE_VAL]] : i64 // CHECK-NEXT: %[[SUBOJECT_OFFSET:.*]] = llvm.add %[[BYTE_OFFSET]], %[[OFFSET_INIT]] : i64 @@ -2477,34 +2430,18 @@ // CHECK-LABEL: llvm.func @coordinate_box_array_2d // CHECK-SAME: %[[BOX:.*]]: !llvm.ptr>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>> // CHECK-SAME: %[[COORDINATE_1:.*]]: i64, %[[COORDINATE_2:.*]]: i64) -// CHECK-NEXT: %{{.*}} = llvm.mlir.constant(0 : i64) : i64 -// There's only one box here. Its index is `0`. Generate it. -// CHECK-NEXT: %[[BOX_IDX:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK-NEXT: %[[BOX_1ST_ELEM_IDX:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK-NEXT: %[[ARRAY_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[BOX_IDX]], 0] : (!llvm.ptr>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>>, i32) -> !llvm.ptr>>> +// CHECK-NEXT: %[[ARRAY_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 0] : (!llvm.ptr>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>>) -> !llvm.ptr>>> // CHECK-NEXT: %[[ARRAY_OBJECT:.*]] = llvm.load %[[ARRAY_ADDR]] : !llvm.ptr>>> // CHECK-NEXT: %[[OFFSET_INIT:.*]] = llvm.mlir.constant(0 : i64) : i64 -// Same as [[BOX_IDX]], just recreated. -// CHECK-NEXT: %[[BOX_IDX_1:.*]] = llvm.mlir.constant(0 : i32) : i32 -// Index of the array that contains the CFI_dim_t objects -// CHECK-NEXT: %[[CFI_DIM_IDX:.*]] = llvm.mlir.constant(7 : i32) : i32 // Index of the 1st CFI_dim_t object (corresonds the the 1st dimension) // CHECK-NEXT: %[[DIM_1_IDX:.*]] = llvm.mlir.constant(0 : i64) : i64 -// Index of the memory stride within a CFI_dim_t object -// CHECK-NEXT: %[[DIM_1_MEM_STRIDE:.*]] = llvm.mlir.constant(2 : i32) : i32 -// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[BOX_IDX_1]], 7, %[[DIM_1_IDX]], %[[DIM_1_MEM_STRIDE]]] : (!llvm.ptr>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>>, i32, i64, i32) -> !llvm.ptr +// CHECK-NEXT: %[[DIM_1_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 7, %[[DIM_1_IDX]], 2] : (!llvm.ptr>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>>, i64) -> !llvm.ptr // CHECK-NEXT: %[[DIM_1_MEM_STRIDE_VAL:.*]] = llvm.load %[[DIM_1_MEM_STRIDE_ADDR]] : !llvm.ptr // CHECK-NEXT: %[[BYTE_OFFSET_1:.*]] = llvm.mul %[[COORDINATE_1]], %[[DIM_1_MEM_STRIDE_VAL]] : i64 // CHECK-NEXT: %[[SUBOBJECT_OFFSET_1:.*]] = llvm.add %[[BYTE_OFFSET]], %[[OFFSET_INIT]] : i64 -// Same as [[BOX_IDX]], just recreated. -// CHECK-NEXT: %[[BOX_IDX_2:.*]] = llvm.mlir.constant(0 : i32) : i32 -// Index of the array that contains the CFI_dim_t objects (same as CFI_DIM_IDX, just recreated) -// CHECK-NEXT: %[[CFI_DIM_IDX_1:.*]] = llvm.mlir.constant(7 : i32) : i32 // Index of the 1st CFI_dim_t object (corresonds the the 2nd dimension) // CHECK-NEXT: %[[DIM_2_IDX:.*]] = llvm.mlir.constant(1 : i64) : i64 -// Index of the memory stride within a CFI_dim_t object -// CHECK-NEXT: %[[DIM_2_MEM_STRIDE:.*]] = llvm.mlir.constant(2 : i32) : i32 -// CHECK-NEXT: %[[DIM_2_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][%[[BOX_IDX_2]], 7, %[[DIM_2_IDX]], %[[DIM_2_MEM_STRIDE]]] : (!llvm.ptr>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>>, i32, i64, i32) -> !llvm.ptr +// CHECK-NEXT: %[[DIM_2_MEM_STRIDE_ADDR:.*]] = llvm.getelementptr %[[BOX]][0, 7, %[[DIM_2_IDX]], 2] : (!llvm.ptr>>, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)>>, i64) -> !llvm.ptr // CHECK-NEXT: %[[DIM_2_MEM_STRIDE_VAL:.*]] = llvm.load %[[DIM_2_MEM_STRIDE_ADDR]] : !llvm.ptr // CHECK-NEXT: %[[BYTE_OFFSET_2:.*]] = llvm.mul %[[COORDINATE_2]], %[[DIM_2_MEM_STRIDE_VAL]] : i64 // CHECK-NEXT: %[[SUBOBJECT_OFFSET_2:.*]] = llvm.add %[[BYTE_OFFSET_2]], %[[SUBOBJECT_OFFSET_1]] : i64 @@ -2526,23 +2463,18 @@ // CHECK-LABEL: llvm.func @coordinate_box_derived_inside_array( // CHECK-SAME: %[[BOX:.*]]: !llvm.ptr>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>>, // CHECK-SAME: %[[COORDINATE_1:.*]]: i64) { -// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[VAL_4:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[VAL_5:.*]] = llvm.mlir.constant(0 : i32) : i32 -// CHECK: %[[VAL_6:.*]] = llvm.getelementptr %[[BOX]]{{\[}}%[[VAL_4]], 0] : (!llvm.ptr>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>>, i32) -> !llvm.ptr>>> +// CHECK: %[[VAL_6:.*]] = llvm.getelementptr %[[BOX]]{{\[}}0, 0] : (!llvm.ptr>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>>) -> !llvm.ptr>>> // CHECK: %[[ARRAY:.*]] = llvm.load %[[VAL_6]] : !llvm.ptr>>> // CHECK: %[[VAL_8:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[VAL_9:.*]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %[[DIM_IDX:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[DIM_MEM_STRIDE:.*]] = llvm.mlir.constant(2 : i32) : i32 -// CHECK: %[[VAL_13:.*]] = llvm.getelementptr %[[BOX]][%[[VAL_9]], 7, %[[DIM_IDX]], %[[DIM_MEM_STRIDE]]] : (!llvm.ptr>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>>, i32, i64, i32) -> !llvm.ptr +// CHECK: %[[VAL_13:.*]] = llvm.getelementptr %[[BOX]][0, 7, %[[DIM_IDX]], 2] : (!llvm.ptr>>, i64, i32, i8, i8, i8, i8, array<1 x array<3 x i64>>, ptr, array<1 x i64>)>>, i64) -> !llvm.ptr // CHECK: %[[VAL_14:.*]] = llvm.load %[[VAL_13]] : !llvm.ptr // CHECK: %[[VAL_15:.*]] = llvm.mul %[[COORDINATE_1]], %[[VAL_14]] : i64 // CHECK: %[[OFFSET:.*]] = llvm.add %[[VAL_15]], %[[VAL_8]] : i64 // CHECK: %[[VAL_17:.*]] = llvm.bitcast %[[ARRAY]] : !llvm.ptr>> to !llvm.ptr // CHECK: %[[VAL_18:.*]] = llvm.getelementptr %[[VAL_17]][%[[OFFSET]]] : (!llvm.ptr, i64) -> !llvm.ptr // CHECK: %[[DERIVED:.*]] = llvm.bitcast %[[VAL_18]] : !llvm.ptr to !llvm.ptr> -// CHECK: %[[VAL_20:.*]] = llvm.getelementptr %[[DERIVED]][%[[VAL_3]], 1] : (!llvm.ptr>, i64) -> !llvm.ptr +// CHECK: %[[VAL_20:.*]] = llvm.getelementptr %[[DERIVED]][0, 1] : (!llvm.ptr>) -> !llvm.ptr // CHECK: %[[VAL_21:.*]] = llvm.bitcast %[[VAL_20]] : !llvm.ptr to !llvm.ptr // CHECK: %[[VAL_22:.*]] = llvm.bitcast %[[VAL_21]] : !llvm.ptr to !llvm.ptr // CHECK: llvm.return @@ -2572,8 +2504,7 @@ // CHECK-LABEL: llvm.func @coordinate_array_known_size_1d( // CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr>, // CHECK-SAME: %[[VAL_1:.*]]: i64) { -// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}%[[VAL_2]], %[[VAL_1]]] : (!llvm.ptr>, i64, i64) -> !llvm.ptr +// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, %[[VAL_1]]] : (!llvm.ptr>, i64) -> !llvm.ptr // CHECK: llvm.return // CHECK: } @@ -2587,8 +2518,7 @@ // CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr>>, // CHECK-SAME: %[[VAL_1:.*]]: i64, // CHECK-SAME: %[[VAL_2:.*]]: i64) { -// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[VAL_4:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}%[[VAL_3]], %[[VAL_2]], %[[VAL_1]]] : (!llvm.ptr>>, i64, i64, i64) -> !llvm.ptr +// CHECK: %[[VAL_4:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, %[[VAL_2]], %[[VAL_1]]] : (!llvm.ptr>>, i64, i64) -> !llvm.ptr // CHECK: llvm.return // CHECK: } @@ -2601,8 +2531,7 @@ // CHECK-LABEL: llvm.func @coordinate_array_known_size_2d_get_array( // CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr>>, // CHECK-SAME: %[[VAL_1:.*]]: i64) { -// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]][%[[VAL_2]], %[[VAL_1]]] : (!llvm.ptr>>, i64, i64) -> !llvm.ptr> +// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]][0, %[[VAL_1]]] : (!llvm.ptr>>, i64) -> !llvm.ptr> // CHECK: llvm.return // CHECK: } @@ -2616,8 +2545,7 @@ } // CHECK-LABEL: llvm.func @coordinate_ref_derived( // CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr>) { -// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}%[[VAL_2]], 1] : (!llvm.ptr>, i64) -> !llvm.ptr +// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, 1] : (!llvm.ptr>) -> !llvm.ptr // CHECK: llvm.return // CHECK: } @@ -2631,8 +2559,7 @@ } // CHECK-LABEL: llvm.func @coordinate_ref_derived_nested( // CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr, i32)>>) { -// CHECK: %[[VAL_3:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[VAL_4:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}%[[VAL_3]], 0, 1] : (!llvm.ptr, i32)>>, i64) -> !llvm.ptr +// CHECK: %[[VAL_4:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, 0, 1] : (!llvm.ptr, i32)>>) -> !llvm.ptr // CHECK: llvm.return // CHECK: } @@ -2661,8 +2588,7 @@ } // CHECK-LABEL: llvm.func @test_coordinate_of_tuple( // CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr)>>) { -// CHECK: %[[VAL_2:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}%[[VAL_2]], 0] : (!llvm.ptr)>>, i64) -> !llvm.ptr +// CHECK: %[[VAL_3:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, 0] : (!llvm.ptr)>>) -> !llvm.ptr // CHECK: llvm.return // CHECK: } Index: flang/test/Fir/embox.fir =================================================================== --- flang/test/Fir/embox.fir +++ flang/test/Fir/embox.fir @@ -42,7 +42,7 @@ // CHECK: %[[a5:.*]] = insertvalue { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } // CHECK-SAME: { ptr undef, i64 4, i32 20180515, i8 1, i8 9, i8 0, i8 0, [1 x [3 x i64]] // CHECK-SAME: [i64 1, i64 5, i64 mul -// CHECK-SAME: (i64 ptrtoint (ptr getelementptr (%_QFtest_dt_sliceTt, ptr null, i64 1) to i64), i64 2)]] } +// CHECK-SAME: (i64 ptrtoint (ptr getelementptr (%_QFtest_dt_sliceTt, ptr null, i32 1) to i64), i64 2)]] } // CHECK-SAME: , ptr %[[a4]], 0 // CHECK: store { ptr, i64, i32, i8, i8, i8, i8, [1 x [3 x i64]] } %[[a5]], ptr %[[a1]], align 8, Index: flang/test/Fir/field-index.fir =================================================================== --- flang/test/Fir/field-index.fir +++ flang/test/Fir/field-index.fir @@ -10,7 +10,7 @@ // CHECK-SAME: (ptr %[[arg0:.*]]) func.func @simple_field(%arg0: !fir.ref>) -> i32 { %1 = fir.field_index i, !fir.type - // CHECK: %[[GEP:.*]] = getelementptr %a, ptr %[[arg0]], i64 0, i32 1 + // CHECK: %[[GEP:.*]] = getelementptr %a, ptr %[[arg0]], i32 0, i32 1 %2 = fir.coordinate_of %arg0, %1 : (!fir.ref>, !fir.field) -> !fir.ref // CHECK: load i32, ptr %[[GEP]] %3 = fir.load %2 : !fir.ref @@ -22,7 +22,7 @@ func.func @derived_field(%arg0: !fir.ref}>>) -> i32 { %1 = fir.field_index some_b, !fir.type}> %2 = fir.field_index i, !fir.type - // CHECK: %[[GEP:.*]] = getelementptr %c, ptr %[[arg0]], i64 0, i32 1, i32 1 + // CHECK: %[[GEP:.*]] = getelementptr %c, ptr %[[arg0]], i32 0, i32 1, i32 1 %3 = fir.coordinate_of %arg0, %1, %2 : (!fir.ref}>>, !fir.field, !fir.field) -> !fir.ref // CHECK: load i32, ptr %[[GEP]] %4 = fir.load %3 : !fir.ref Index: flang/test/Fir/loop10.fir =================================================================== --- flang/test/Fir/loop10.fir +++ flang/test/Fir/loop10.fir @@ -13,7 +13,7 @@ // CHECK-DAG: %[[COL:.*]] = phi i64 {{.*}} [ 11, // CHECK: icmp sgt i64 %[[COL]], 0 fir.do_loop %jv = %c0 to %c10 step %c1 { - // CHECK: getelementptr {{.*}} %[[ADDR]], i64 0, i64 %[[R]], i64 %[[C]] + // CHECK: getelementptr {{.*}} %[[ADDR]], i32 0, i64 %[[R]], i64 %[[C]] %ptr = fir.coordinate_of %addr, %jv, %iv : (!fir.ref>, index, index) -> !fir.ref %c22 = arith.constant 22 : i32 // CHECK: store i32 22, Index: flang/test/Fir/rebox-susbtring.fir =================================================================== --- flang/test/Fir/rebox-susbtring.fir +++ flang/test/Fir/rebox-susbtring.fir @@ -19,10 +19,9 @@ // Only test the computation of the base address offset computation accounting for the substring // CHECK: %[[VAL_4:.*]] = llvm.mlir.constant(1 : i64) : i64 -// CHECK: %[[VAL_7:.*]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %[[VAL_30:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[VAL_37:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}%[[VAL_7]], 0] : (!llvm.ptr<[[char20_descriptor_t]]>)>>, i32) -> !llvm.ptr>> +// CHECK: %[[VAL_37:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, 0] : (!llvm.ptr<[[char20_descriptor_t]]>)>>) -> !llvm.ptr>> // CHECK: %[[VAL_38:.*]] = llvm.load %[[VAL_37]] : !llvm.ptr>> // CHECK: %[[VAL_39:.*]] = llvm.bitcast %[[VAL_38]] : !llvm.ptr> to !llvm.ptr> // CHECK: %[[VAL_40:.*]] = llvm.getelementptr %[[VAL_39]]{{\[}}%[[VAL_30]], %[[VAL_4]]] : (!llvm.ptr>, i64, i64) -> !llvm.ptr @@ -52,11 +51,9 @@ // Only test the computation of the base address offset computation accounting for the substring of the component // CHECK: %[[VAL_1:.*]] = llvm.mlir.constant(1 : i32) : i32 -// CHECK: %[[VAL_4:.*]] = llvm.mlir.constant(1 : i64) : i64 -// CHECK: %[[VAL_17:.*]] = llvm.mlir.constant(0 : i32) : i32 // CHECK: %[[VAL_21:.*]] = llvm.mlir.constant(0 : i64) : i64 -// CHECK: %[[VAL_30:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}%[[VAL_17]], 0] : (!llvm.ptr<[[struct_t_descriptor:.*]]>, i32) -> !llvm.ptr> +// CHECK: %[[VAL_30:.*]] = llvm.getelementptr %[[VAL_0]]{{\[}}0, 0] : (!llvm.ptr<[[struct_t_descriptor:.*]]>) -> !llvm.ptr> // CHECK: %[[VAL_31:.*]] = llvm.load %[[VAL_30]] : !llvm.ptr> // CHECK: %[[VAL_32:.*]] = llvm.bitcast %[[VAL_31]] : !llvm.ptr<[[struct_t]]> to !llvm.ptr<[[struct_t]]> // CHECK: %[[VAL_33:.*]] = llvm.getelementptr %[[VAL_32]]{{\[}}%[[VAL_21]], 1, %[[VAL_4]]] : (!llvm.ptr<[[struct_t]]>, i64, i64) -> !llvm.ptr Index: flang/test/Lower/complex-part.f90 =================================================================== --- flang/test/Lower/complex-part.f90 +++ flang/test/Lower/complex-part.f90 @@ -8,5 +8,5 @@ ! Verify that the offset in the struct does not regress from i32. ! CHECK-LABEL: define void @_QQmain() -! CHECK: getelementptr { float, float }, ptr %{{[0-9]+}}, i64 0, i32 0 +! CHECK: getelementptr { float, float }, ptr %{{[0-9]+}}, i32 0, i32 0 Index: mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp =================================================================== --- mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp +++ mlir/lib/Conversion/AsyncToLLVM/AsyncToLLVM.cpp @@ -567,10 +567,8 @@ // %Size = getelementptr %T* null, int 1 // %SizeI = ptrtoint %T* %Size to i64 auto nullPtr = rewriter.create(loc, storagePtrType); - auto one = rewriter.create( - loc, i64, rewriter.getI64IntegerAttr(1)); auto gep = rewriter.create(loc, storagePtrType, nullPtr, - one.getResult()); + ArrayRef{1}); return rewriter.create(loc, i64, gep); }; Index: mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp =================================================================== --- mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp +++ mlir/lib/Conversion/GPUCommon/GPUOpsLowering.cpp @@ -82,12 +82,7 @@ // Rewrite workgroup memory attributions to addresses of global buffers. rewriter.setInsertionPointToStart(&gpuFuncOp.front()); unsigned numProperArguments = gpuFuncOp.getNumArguments(); - auto i32Type = IntegerType::get(rewriter.getContext(), 32); - Value zero = nullptr; - if (!workgroupBuffers.empty()) - zero = rewriter.create(loc, i32Type, - rewriter.getI32IntegerAttr(0)); for (const auto &en : llvm::enumerate(workgroupBuffers)) { LLVM::GlobalOp global = en.value(); Value address = rewriter.create(loc, global); @@ -95,7 +90,7 @@ global.getType().cast().getElementType(); Value memory = rewriter.create( loc, LLVM::LLVMPointerType::get(elementType, global.getAddrSpace()), - address, ArrayRef{zero, zero}); + address, ArrayRef{0, 0}); // Build a memref descriptor pointing to the buffer to plug with the // existing memref infrastructure. This may use more registers than @@ -170,7 +165,6 @@ mlir::Type llvmI8 = typeConverter->convertType(rewriter.getI8Type()); mlir::Type i8Ptr = LLVM::LLVMPointerType::get(llvmI8); - mlir::Type llvmIndex = typeConverter->convertType(rewriter.getIndexType()); mlir::Type llvmI32 = typeConverter->convertType(rewriter.getI32Type()); mlir::Type llvmI64 = typeConverter->convertType(rewriter.getI64Type()); // Note: this is the GPUModule op, not the ModuleOp that surrounds it @@ -226,10 +220,8 @@ // Get a pointer to the format string's first element and pass it to printf() Value globalPtr = rewriter.create(loc, global); - Value zero = rewriter.create( - loc, llvmIndex, rewriter.getIntegerAttr(llvmIndex, 0)); Value stringStart = rewriter.create( - loc, i8Ptr, globalPtr, mlir::ValueRange({zero, zero})); + loc, i8Ptr, globalPtr, ArrayRef{0, 0}); Value stringLen = rewriter.create( loc, llvmI64, rewriter.getI64IntegerAttr(formatStringSize)); @@ -289,7 +281,6 @@ mlir::Type llvmI8 = typeConverter->convertType(rewriter.getIntegerType(8)); mlir::Type i8Ptr = LLVM::LLVMPointerType::get(llvmI8, addressSpace); - mlir::Type llvmIndex = typeConverter->convertType(rewriter.getIndexType()); // Note: this is the GPUModule op, not the ModuleOp that surrounds it // This ensures that global constants and declarations are placed within @@ -325,10 +316,8 @@ // Get a pointer to the format string's first element Value globalPtr = rewriter.create(loc, global); - Value zero = rewriter.create( - loc, llvmIndex, rewriter.getIntegerAttr(llvmIndex, 0)); Value stringStart = rewriter.create( - loc, i8Ptr, globalPtr, mlir::ValueRange({zero, zero})); + loc, i8Ptr, globalPtr, ArrayRef{0, 0}); // Construct arguments and function call auto argsRange = adaptor.args(); Index: mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp =================================================================== --- mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp +++ mlir/lib/Conversion/GPUCommon/GPUToLLVMConversion.cpp @@ -653,17 +653,14 @@ loc, llvmInt32Type, builder.getI32IntegerAttr(numArguments)); auto arrayPtr = builder.create(loc, llvmPointerPointerType, arraySize, /*alignment=*/0); - auto zero = builder.create(loc, llvmInt32Type, - builder.getI32IntegerAttr(0)); for (const auto &en : llvm::enumerate(arguments)) { - auto index = builder.create( - loc, llvmInt32Type, builder.getI32IntegerAttr(en.index())); auto fieldPtr = builder.create( loc, LLVM::LLVMPointerType::get(argumentTypes[en.index()]), structPtr, - ArrayRef{zero, index.getResult()}); + ArrayRef{0, en.index()}); builder.create(loc, en.value(), fieldPtr); - auto elementPtr = builder.create(loc, llvmPointerPointerType, - arrayPtr, index.getResult()); + auto elementPtr = + builder.create(loc, llvmPointerPointerType, arrayPtr, + ArrayRef{en.index()}); auto casted = builder.create(loc, llvmPointerType, fieldPtr); builder.create(loc, casted, elementPtr); @@ -811,8 +808,8 @@ Type elementPtrType = getElementPtrType(memRefType); Value nullPtr = rewriter.create(loc, elementPtrType); - Value gepPtr = rewriter.create(loc, elementPtrType, nullPtr, - ArrayRef{numElements}); + Value gepPtr = + rewriter.create(loc, elementPtrType, nullPtr, numElements); auto sizeBytes = rewriter.create(loc, getIndexType(), gepPtr); Index: mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp =================================================================== --- mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp +++ mlir/lib/Conversion/LLVMCommon/MemRefBuilder.cpp @@ -138,7 +138,6 @@ auto arrayPtrTy = LLVM::LLVMPointerType::get(arrayTy); // Copy size values to stack-allocated memory. - auto zero = createIndexAttrConstant(builder, loc, indexType, 0); auto one = createIndexAttrConstant(builder, loc, indexType, 1); auto sizes = builder.create( loc, arrayTy, value, @@ -149,7 +148,7 @@ // Load an return size value of interest. auto resultPtr = builder.create(loc, indexPtrTy, sizesPtr, - ValueRange({zero, pos})); + ArrayRef{0, pos}); return builder.create(loc, resultPtr); } @@ -402,10 +401,8 @@ Value elementPtrPtr = builder.create(loc, elemPtrPtrType, memRefDescPtr); - Value one = - createIndexAttrConstant(builder, loc, typeConverter.getIndexType(), 1); Value alignedGep = builder.create( - loc, elemPtrPtrType, elementPtrPtr, ValueRange({one})); + loc, elemPtrPtrType, elementPtrPtr, ArrayRef{1}); return builder.create(loc, alignedGep); } @@ -417,10 +414,8 @@ Value elementPtrPtr = builder.create(loc, elemPtrPtrType, memRefDescPtr); - Value one = - createIndexAttrConstant(builder, loc, typeConverter.getIndexType(), 1); Value alignedGep = builder.create( - loc, elemPtrPtrType, elementPtrPtr, ValueRange({one})); + loc, elemPtrPtrType, elementPtrPtr, ArrayRef{1}); builder.create(loc, alignedPtr, alignedGep); } @@ -431,10 +426,8 @@ Value elementPtrPtr = builder.create(loc, elemPtrPtrType, memRefDescPtr); - Value two = - createIndexAttrConstant(builder, loc, typeConverter.getIndexType(), 2); Value offsetGep = builder.create( - loc, elemPtrPtrType, elementPtrPtr, ValueRange({two})); + loc, elemPtrPtrType, elementPtrPtr, ArrayRef{2}); offsetGep = builder.create( loc, LLVM::LLVMPointerType::get(typeConverter.getIndexType()), offsetGep); return builder.create(loc, offsetGep); @@ -447,10 +440,8 @@ Value elementPtrPtr = builder.create(loc, elemPtrPtrType, memRefDescPtr); - Value two = - createIndexAttrConstant(builder, loc, typeConverter.getIndexType(), 2); Value offsetGep = builder.create( - loc, elemPtrPtrType, elementPtrPtr, ValueRange({two})); + loc, elemPtrPtrType, elementPtrPtr, ArrayRef{2}); offsetGep = builder.create( loc, LLVM::LLVMPointerType::get(typeConverter.getIndexType()), offsetGep); builder.create(loc, offset, offsetGep); @@ -467,21 +458,16 @@ Value structPtr = builder.create(loc, structPtrTy, memRefDescPtr); - Type int32Type = typeConverter.convertType(builder.getI32Type()); - Value zero = - createIndexAttrConstant(builder, loc, typeConverter.getIndexType(), 0); - Value three = builder.create(loc, int32Type, - builder.getI32IntegerAttr(3)); return builder.create(loc, LLVM::LLVMPointerType::get(indexTy), - structPtr, ValueRange({zero, three})); + structPtr, ArrayRef{0, 3}); } Value UnrankedMemRefDescriptor::size(OpBuilder &builder, Location loc, LLVMTypeConverter &typeConverter, Value sizeBasePtr, Value index) { Type indexPtrTy = LLVM::LLVMPointerType::get(typeConverter.getIndexType()); - Value sizeStoreGep = builder.create(loc, indexPtrTy, sizeBasePtr, - ValueRange({index})); + Value sizeStoreGep = + builder.create(loc, indexPtrTy, sizeBasePtr, index); return builder.create(loc, sizeStoreGep); } @@ -490,8 +476,8 @@ Value sizeBasePtr, Value index, Value size) { Type indexPtrTy = LLVM::LLVMPointerType::get(typeConverter.getIndexType()); - Value sizeStoreGep = builder.create(loc, indexPtrTy, sizeBasePtr, - ValueRange({index})); + Value sizeStoreGep = + builder.create(loc, indexPtrTy, sizeBasePtr, index); builder.create(loc, size, sizeStoreGep); } @@ -499,8 +485,7 @@ LLVMTypeConverter &typeConverter, Value sizeBasePtr, Value rank) { Type indexPtrTy = LLVM::LLVMPointerType::get(typeConverter.getIndexType()); - return builder.create(loc, indexPtrTy, sizeBasePtr, - ValueRange({rank})); + return builder.create(loc, indexPtrTy, sizeBasePtr, rank); } Value UnrankedMemRefDescriptor::stride(OpBuilder &builder, Location loc, @@ -508,8 +493,8 @@ Value strideBasePtr, Value index, Value stride) { Type indexPtrTy = LLVM::LLVMPointerType::get(typeConverter.getIndexType()); - Value strideStoreGep = builder.create( - loc, indexPtrTy, strideBasePtr, ValueRange({index})); + Value strideStoreGep = + builder.create(loc, indexPtrTy, strideBasePtr, index); return builder.create(loc, strideStoreGep); } @@ -518,7 +503,7 @@ Value strideBasePtr, Value index, Value stride) { Type indexPtrTy = LLVM::LLVMPointerType::get(typeConverter.getIndexType()); - Value strideStoreGep = builder.create( - loc, indexPtrTy, strideBasePtr, ValueRange({index})); + Value strideStoreGep = + builder.create(loc, indexPtrTy, strideBasePtr, index); builder.create(loc, stride, strideStoreGep); } Index: mlir/lib/Conversion/LLVMCommon/Pattern.cpp =================================================================== --- mlir/lib/Conversion/LLVMCommon/Pattern.cpp +++ mlir/lib/Conversion/LLVMCommon/Pattern.cpp @@ -163,8 +163,8 @@ // Buffer size in bytes. Type elementPtrType = getElementPtrType(memRefType); Value nullPtr = rewriter.create(loc, elementPtrType); - Value gepPtr = rewriter.create(loc, elementPtrType, nullPtr, - ArrayRef{runningStride}); + Value gepPtr = rewriter.create( + loc, elementPtrType, nullPtr, runningStride); sizeBytes = rewriter.create(loc, getIndexType(), gepPtr); } @@ -178,9 +178,8 @@ auto convertedPtrType = LLVM::LLVMPointerType::get(typeConverter->convertType(type)); auto nullPtr = rewriter.create(loc, convertedPtrType); - auto gep = rewriter.create( - loc, convertedPtrType, nullPtr, - ArrayRef{createIndexConstant(rewriter, loc, 1)}); + auto gep = rewriter.create(loc, convertedPtrType, nullPtr, + ArrayRef{1}); return rewriter.create(loc, getIndexType(), gep); } Index: mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp =================================================================== --- mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp +++ mlir/lib/Conversion/MemRefToLLVM/MemRefToLLVM.cpp @@ -389,19 +389,15 @@ // Get pointer to offset field of memref descriptor. Type indexPtrTy = LLVM::LLVMPointerType::get( getTypeConverter()->getIndexType(), addressSpace); - Value two = rewriter.create( - loc, typeConverter->convertType(rewriter.getI32Type()), - rewriter.getI32IntegerAttr(2)); Value offsetPtr = rewriter.create( - loc, indexPtrTy, scalarMemRefDescPtr, - ValueRange({createIndexConstant(rewriter, loc, 0), two})); + loc, indexPtrTy, scalarMemRefDescPtr, ArrayRef{0, 2}); // The size value that we have to extract can be obtained using GEPop with // `dimOp.index() + 1` index argument. Value idxPlusOne = rewriter.create( loc, createIndexConstant(rewriter, loc, 1), adaptor.getIndex()); - Value sizePtr = rewriter.create(loc, indexPtrTy, offsetPtr, - ValueRange({idxPlusOne})); + Value sizePtr = + rewriter.create(loc, indexPtrTy, offsetPtr, idxPlusOne); return rewriter.create(loc, sizePtr); } @@ -664,11 +660,9 @@ Type elementType = typeConverter->convertType(type.getElementType()); Type elementPtrType = LLVM::LLVMPointerType::get(elementType, memSpace); - SmallVector operands; - operands.insert(operands.end(), type.getRank() + 1, - createIndexConstant(rewriter, loc, 0)); - auto gep = - rewriter.create(loc, elementPtrType, addressOf, operands); + auto gep = rewriter.create( + loc, elementPtrType, addressOf, + SmallVector(type.getRank() + 1, 0)); // We do not expect the memref obtained using `memref.get_global` to be // ever deallocated. Set the allocated pointer to be known bad value to @@ -1286,8 +1280,8 @@ // Copy size from shape to descriptor. Type llvmIndexPtrType = LLVM::LLVMPointerType::get(indexType); - Value sizeLoadGep = rewriter.create( - loc, llvmIndexPtrType, shapeOperandPtr, ValueRange{indexArg}); + Value sizeLoadGep = rewriter.create(loc, llvmIndexPtrType, + shapeOperandPtr, indexArg); Value size = rewriter.create(loc, sizeLoadGep); UnrankedMemRefDescriptor::setSize(rewriter, loc, *getTypeConverter(), targetSizesBase, indexArg, size); Index: mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp =================================================================== --- mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp +++ mlir/lib/Dialect/LLVMIR/IR/LLVMDialect.cpp @@ -2987,12 +2987,9 @@ // Get the pointer to the first character in the global string. Value globalPtr = builder.create(loc, global); - Value cst0 = builder.create( - loc, IntegerType::get(ctx, 64), - builder.getIntegerAttr(builder.getIndexType(), 0)); return builder.create( loc, LLVM::LLVMPointerType::get(IntegerType::get(ctx, 8)), globalPtr, - ValueRange{cst0, cst0}); + ArrayRef{0, 0}); } bool mlir::LLVM::satisfiesLLVMModule(Operation *op) { Index: mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir =================================================================== --- mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir +++ mlir/test/Conversion/AsyncToLLVM/convert-runtime-to-llvm.mlir @@ -10,8 +10,7 @@ // CHECK-LABEL: @create_value func.func @create_value() { // CHECK: %[[NULL:.*]] = llvm.mlir.null : !llvm.ptr - // CHECK: %[[ONE:.*]] = llvm.mlir.constant(1 : i64) : i64 - // CHECK: %[[OFFSET:.*]] = llvm.getelementptr %[[NULL]][%[[ONE]]] + // CHECK: %[[OFFSET:.*]] = llvm.getelementptr %[[NULL]][1] // CHECK: %[[SIZE:.*]] = llvm.ptrtoint %[[OFFSET]] // CHECK: %[[VALUE:.*]] = call @mlirAsyncRuntimeCreateValue(%[[SIZE]]) %0 = async.runtime.create : !async.value Index: mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir =================================================================== --- mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir +++ mlir/test/Conversion/GPUCommon/lower-launch-func-to-gpu-runtime-calls.mlir @@ -32,8 +32,7 @@ // CHECK-DAG: [[C256:%.*]] = llvm.mlir.constant(256 : i32) : i32 // CHECK-DAG: [[C8:%.*]] = llvm.mlir.constant(8 : index) : i64 // CHECK: [[ADDRESSOF:%.*]] = llvm.mlir.addressof @[[GLOBAL]] - // CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : index) - // CHECK: [[BINARY:%.*]] = llvm.getelementptr [[ADDRESSOF]]{{\[}}[[C0]], [[C0]]] + // CHECK: [[BINARY:%.*]] = llvm.getelementptr [[ADDRESSOF]]{{\[}}0, 0] // CHECK-SAME: -> !llvm.ptr // CHECK: [[MODULE:%.*]] = llvm.call @mgpuModuleLoad([[BINARY]]) Index: mlir/test/Conversion/GPUCommon/memory-attrbution.mlir =================================================================== --- mlir/test/Conversion/GPUCommon/memory-attrbution.mlir +++ mlir/test/Conversion/GPUCommon/memory-attrbution.mlir @@ -67,14 +67,12 @@ // ROCDL-SAME: { gpu.func @workgroup(%arg0: f32) workgroup(%arg1: memref<4xf32, 3>) { // Get the address of the first element in the global array. - // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32 // NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr, 3> - // NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]] + // NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][0, 0] // NVVM-SAME: !llvm.ptr - // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32 // ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr, 3> - // ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]] + // ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][0, 0] // ROCDL-SAME: !llvm.ptr // Populate the memref descriptor. @@ -130,14 +128,12 @@ // ROCDL-LABEL: llvm.func @workgroup3d gpu.func @workgroup3d(%arg0: f32) workgroup(%arg1: memref<4x2x6xf32, 3>) { // Get the address of the first element in the global array. - // NVVM: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32 // NVVM: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr, 3> - // NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]] + // NVVM: %[[raw:.*]] = llvm.getelementptr %[[addr]][0, 0] // NVVM-SAME: !llvm.ptr - // ROCDL: %[[c0:.*]] = llvm.mlir.constant(0 : i32) : i32 // ROCDL: %[[addr:.*]] = llvm.mlir.addressof @[[$buffer]] : !llvm.ptr, 3> - // ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][%[[c0]], %[[c0]]] + // ROCDL: %[[raw:.*]] = llvm.getelementptr %[[addr]][0, 0] // ROCDL-SAME: !llvm.ptr // Populate the memref descriptor. Index: mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-hip.mlir =================================================================== --- mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-hip.mlir +++ mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-hip.mlir @@ -12,8 +12,7 @@ // CHECK: %[[CST0:.*]] = llvm.mlir.constant(0 : i64) : i64 // CHECK-NEXT: %[[DESC0:.*]] = llvm.call @__ockl_printf_begin(%0) : (i64) -> i64 // CHECK-NEXT: %[[FORMATSTR:.*]] = llvm.mlir.addressof @[[$PRINT_GLOBAL0]] : !llvm.ptr> - // CHECK-NEXT: %[[CST1:.*]] = llvm.mlir.constant(0 : i64) : i64 - // CHECK-NEXT: %[[FORMATSTART:.*]] = llvm.getelementptr %[[FORMATSTR]][%[[CST1]], %[[CST1]]] : (!llvm.ptr>, i64, i64) -> !llvm.ptr + // CHECK-NEXT: %[[FORMATSTART:.*]] = llvm.getelementptr %[[FORMATSTR]][0, 0] : (!llvm.ptr>) -> !llvm.ptr // CHECK-NEXT: %[[FORMATLEN:.*]] = llvm.mlir.constant(14 : i64) : i64 // CHECK-NEXT: %[[ISLAST:.*]] = llvm.mlir.constant(1 : i32) : i32 // CHECK-NEXT: %[[ISNTLAST:.*]] = llvm.mlir.constant(0 : i32) : i32 @@ -29,8 +28,7 @@ // CHECK: %[[CST0:.*]] = llvm.mlir.constant(0 : i64) : i64 // CHECK-NEXT: %[[DESC0:.*]] = llvm.call @__ockl_printf_begin(%0) : (i64) -> i64 // CHECK-NEXT: %[[FORMATSTR:.*]] = llvm.mlir.addressof @[[$PRINT_GLOBAL1]] : !llvm.ptr> - // CHECK-NEXT: %[[CST1:.*]] = llvm.mlir.constant(0 : i64) : i64 - // CHECK-NEXT: %[[FORMATSTART:.*]] = llvm.getelementptr %[[FORMATSTR]][%[[CST1]], %[[CST1]]] : (!llvm.ptr>, i64, i64) -> !llvm.ptr + // CHECK-NEXT: %[[FORMATSTART:.*]] = llvm.getelementptr %[[FORMATSTR]][0, 0] : (!llvm.ptr>) -> !llvm.ptr // CHECK-NEXT: %[[FORMATLEN:.*]] = llvm.mlir.constant(11 : i64) : i64 // CHECK-NEXT: %[[ISLAST:.*]] = llvm.mlir.constant(1 : i32) : i32 // CHECK-NEXT: %[[ISNTLAST:.*]] = llvm.mlir.constant(0 : i32) : i32 Index: mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-opencl.mlir =================================================================== --- mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-opencl.mlir +++ mlir/test/Conversion/GPUToROCDL/gpu-to-rocdl-opencl.mlir @@ -7,8 +7,7 @@ // CHECK: (%[[ARG0:.*]]: i32) gpu.func @test_printf(%arg0: i32) { // CHECK: %[[IMM0:.*]] = llvm.mlir.addressof @[[$PRINT_GLOBAL]] : !llvm.ptr, 4> - // CHECK-NEXT: %[[IMM1:.*]] = llvm.mlir.constant(0 : i64) : i64 - // CHECK-NEXT: %[[IMM2:.*]] = llvm.getelementptr %[[IMM0]][%[[IMM1]], %[[IMM1]]] : (!llvm.ptr, 4>, i64, i64) -> !llvm.ptr + // CHECK-NEXT: %[[IMM2:.*]] = llvm.getelementptr %[[IMM0]][0, 0] : (!llvm.ptr, 4>) -> !llvm.ptr // CHECK-NEXT: %{{.*}} = llvm.call @printf(%[[IMM2]], %[[ARG0]]) : (!llvm.ptr, i32) -> i32 gpu.printf "Hello: %d\n" %arg0 : i32 gpu.return Index: mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir =================================================================== --- mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir +++ mlir/test/Conversion/MemRefToLLVM/convert-dynamic-memref-ops.mlir @@ -387,12 +387,11 @@ // CHECK: %{{.*}}, %[[IDXarg:.*]]: index func.func @memref_dim_with_dyn_index(%arg : memref<3x?xf32>, %idx : index) -> index { // CHECK-DAG: %[[IDX:.*]] = builtin.unrealized_conversion_cast %[[IDXarg]] - // CHECK-DAG: %[[C0:.*]] = llvm.mlir.constant(0 : index) : i64 // CHECK-DAG: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK-DAG: %[[SIZES:.*]] = llvm.extractvalue %{{.*}}[3] : ![[DESCR_TY:.*]] // CHECK-DAG: %[[SIZES_PTR:.*]] = llvm.alloca %[[C1]] x !llvm.array<2 x i64> : (i64) -> !llvm.ptr> // CHECK-DAG: llvm.store %[[SIZES]], %[[SIZES_PTR]] : !llvm.ptr> - // CHECK-DAG: %[[RESULT_PTR:.*]] = llvm.getelementptr %[[SIZES_PTR]][%[[C0]], %[[IDX]]] : (!llvm.ptr>, i64, i64) -> !llvm.ptr + // CHECK-DAG: %[[RESULT_PTR:.*]] = llvm.getelementptr %[[SIZES_PTR]][0, %[[IDX]]] : (!llvm.ptr>, i64) -> !llvm.ptr // CHECK-DAG: %[[RESULT:.*]] = llvm.load %[[RESULT_PTR]] : !llvm.ptr %result = memref.dim %arg, %idx : memref<3x?xf32> return %result : index @@ -454,9 +453,8 @@ // CHECK: [[BASE_PTR_PTR:%.*]] = llvm.bitcast [[DESCRIPTOR]] : !llvm.ptr to !llvm.ptr> // CHECK: [[BASE_PTR:%.*]] = llvm.load [[BASE_PTR_PTR]] : !llvm.ptr> // CHECK: [[BASE_PTR_PTR_:%.*]] = llvm.bitcast [[DESCRIPTOR]] : !llvm.ptr to !llvm.ptr> -// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : i64 -// CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[BASE_PTR_PTR_]]{{\[}}[[C1]]] -// CHECK-SAME: : (!llvm.ptr>, i64) -> !llvm.ptr> +// CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[BASE_PTR_PTR_]]{{\[}}1] +// CHECK-SAME: : (!llvm.ptr>) -> !llvm.ptr> // CHECK: [[ALIGNED_PTR:%.*]] = llvm.load [[ALIGNED_PTR_PTR]] : !llvm.ptr> // CHECK: [[OUT_1:%.*]] = llvm.insertvalue [[BASE_PTR]], [[OUT_0]][0] : [[TY]] // CHECK: [[OUT_2:%.*]] = llvm.insertvalue [[ALIGNED_PTR]], [[OUT_1]][1] : [[TY]] @@ -498,20 +496,17 @@ // CHECK-SAME: !llvm.ptr to !llvm.ptr> // CHECK: llvm.store [[ALLOC_PTR]], [[BASE_PTR_PTR]] : !llvm.ptr> // CHECK: [[BASE_PTR_PTR_:%.*]] = llvm.bitcast [[UNDERLYING_DESC]] : !llvm.ptr to !llvm.ptr> -// CHECK: [[C1:%.*]] = llvm.mlir.constant(1 : index) : i64 -// CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[BASE_PTR_PTR_]]{{\[}}[[C1]]] +// CHECK: [[ALIGNED_PTR_PTR:%.*]] = llvm.getelementptr [[BASE_PTR_PTR_]]{{\[}}1] // CHECK: llvm.store [[ALIGN_PTR]], [[ALIGNED_PTR_PTR]] : !llvm.ptr> // CHECK: [[BASE_PTR_PTR__:%.*]] = llvm.bitcast [[UNDERLYING_DESC]] : !llvm.ptr to !llvm.ptr> -// CHECK: [[C2:%.*]] = llvm.mlir.constant(2 : index) : i64 -// CHECK: [[OFFSET_PTR_:%.*]] = llvm.getelementptr [[BASE_PTR_PTR__]]{{\[}}[[C2]]] +// CHECK: [[OFFSET_PTR_:%.*]] = llvm.getelementptr [[BASE_PTR_PTR__]]{{\[}}2] // CHECK: [[OFFSET_PTR:%.*]] = llvm.bitcast [[OFFSET_PTR_]] // CHECK: llvm.store [[OFFSET]], [[OFFSET_PTR]] : !llvm.ptr // Iterate over shape operand in reverse order and set sizes and strides. // CHECK: [[STRUCT_PTR:%.*]] = llvm.bitcast [[UNDERLYING_DESC]] // CHECK-SAME: !llvm.ptr to !llvm.ptr, ptr, i64, i64)>> -// CHECK: [[C0:%.*]] = llvm.mlir.constant(0 : index) : i64 -// CHECK: [[SIZES_PTR:%.*]] = llvm.getelementptr [[STRUCT_PTR]]{{\[}}[[C0]], 3] +// CHECK: [[SIZES_PTR:%.*]] = llvm.getelementptr [[STRUCT_PTR]]{{\[}}0, 3] // CHECK: [[STRIDES_PTR:%.*]] = llvm.getelementptr [[SIZES_PTR]]{{\[}}[[RANK]]] // CHECK: [[SHAPE_IN_PTR:%.*]] = llvm.extractvalue [[SHAPE]][1] : [[SHAPE_TY]] // CHECK: [[C1_:%.*]] = llvm.mlir.constant(1 : index) : i64 Index: mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir =================================================================== --- mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir +++ mlir/test/Conversion/MemRefToLLVM/memref-to-llvm.mlir @@ -570,11 +570,9 @@ // CHECK: %[[ZERO_D_DESC:.*]] = llvm.bitcast %[[RANKED_DESC]] // CHECK-SAME: : !llvm.ptr to !llvm.ptr, ptr, i64)>> -// CHECK: %[[C0_:.*]] = llvm.mlir.constant(0 : index) : i64 - // CHECK: %[[OFFSET_PTR:.*]] = llvm.getelementptr %[[ZERO_D_DESC]]{{\[}} -// CHECK-SAME: %[[C0_]], 2] : (!llvm.ptr, ptr, -// CHECK-SAME: i64)>>, i64) -> !llvm.ptr +// CHECK-SAME: 0, 2] : (!llvm.ptr, ptr, +// CHECK-SAME: i64)>>) -> !llvm.ptr // CHECK: %[[C1:.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[INDEX_INC:.*]] = llvm.add %[[C1]], %{{.*}} : i64 @@ -636,8 +634,7 @@ // CHECK: %[[DIM:.*]] = llvm.mlir.constant(2 : index) : i64 // CHECK: %[[STRIDE:.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv0 : !llvm.ptr> - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64 - // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]]] : (!llvm.ptr>, i64, i64) -> !llvm.ptr + // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][0, 0] : (!llvm.ptr>) -> !llvm.ptr // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64 // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr // CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> @@ -657,8 +654,7 @@ // CHECK: %[[DIM1:.*]] = llvm.mlir.constant(3 : index) : i64 // CHECK: %[[STRIDE1:.*]] = llvm.mlir.constant(1 : index) : i64 // CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv2 : !llvm.ptr>> - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64 - // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]], %[[ZERO]], %[[ZERO]]] : (!llvm.ptr>>, i64, i64, i64) -> !llvm.ptr + // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][0, 0, 0] : (!llvm.ptr>>) -> !llvm.ptr // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64 // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr // CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> @@ -682,8 +678,7 @@ // CHECK-LABEL: func @get_gv3_memref func.func @get_gv3_memref() { // CHECK: %[[ADDR:.*]] = llvm.mlir.addressof @gv3 : !llvm.ptr - // CHECK: %[[ZERO:.*]] = llvm.mlir.constant(0 : index) : i64 - // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][%[[ZERO]]] : (!llvm.ptr, i64) -> !llvm.ptr + // CHECK: %[[GEP:.*]] = llvm.getelementptr %[[ADDR]][0] : (!llvm.ptr) -> !llvm.ptr // CHECK: %[[DEADBEEF:.*]] = llvm.mlir.constant(3735928559 : index) : i64 // CHECK: %[[DEADBEEFPTR:.*]] = llvm.inttoptr %[[DEADBEEF]] : i64 to !llvm.ptr // CHECK: llvm.mlir.undef : !llvm.struct<(ptr, ptr, i64)> @@ -1029,8 +1024,7 @@ // CHECK: [[EXTRACT0:%.*]] = llvm.extractvalue {{%.*}}[3, 0] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> // CHECK: [[MUL:%.*]] = llvm.mul [[ONE]], [[EXTRACT0]] : i64 // CHECK: [[NULL:%.*]] = llvm.mlir.null : !llvm.ptr - // CHECK: [[ONE2:%.*]] = llvm.mlir.constant(1 : index) : i64 - // CHECK: [[GEP:%.*]] = llvm.getelementptr [[NULL]][[[ONE2]]] : (!llvm.ptr, i64) -> !llvm.ptr + // CHECK: [[GEP:%.*]] = llvm.getelementptr [[NULL]][1] : (!llvm.ptr) -> !llvm.ptr // CHECK: [[PTRTOINT:%.*]] = llvm.ptrtoint [[GEP]] : !llvm.ptr to i64 // CHECK: [[SIZE:%.*]] = llvm.mul [[MUL]], [[PTRTOINT]] : i64 // CHECK: [[EXTRACT1P:%.*]] = llvm.extractvalue {{%.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<1 x i64>, array<1 x i64>)> @@ -1058,8 +1052,7 @@ // CHECK: [[EXTRACT1:%.*]] = llvm.extractvalue {{%.*}}[3, 1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> // CHECK: [[MUL2:%.*]] = llvm.mul [[MUL1]], [[EXTRACT1]] : i64 // CHECK: [[NULL:%.*]] = llvm.mlir.null : !llvm.ptr - // CHECK: [[ONE2:%.*]] = llvm.mlir.constant(1 : index) : i64 - // CHECK: [[GEP:%.*]] = llvm.getelementptr [[NULL]][[[ONE2]]] : (!llvm.ptr, i64) -> !llvm.ptr + // CHECK: [[GEP:%.*]] = llvm.getelementptr [[NULL]][1] : (!llvm.ptr) -> !llvm.ptr // CHECK: [[PTRTOINT:%.*]] = llvm.ptrtoint [[GEP]] : !llvm.ptr to i64 // CHECK: [[SIZE:%.*]] = llvm.mul [[MUL2]], [[PTRTOINT]] : i64 // CHECK: [[EXTRACT1P:%.*]] = llvm.extractvalue {{%.*}}[1] : !llvm.struct<(ptr, ptr, i64, array<2 x i64>, array<2 x i64>)> Index: mlir/test/Target/LLVMIR/Import/incorrect-constant-caching.ll =================================================================== --- mlir/test/Target/LLVMIR/Import/incorrect-constant-caching.ll +++ mlir/test/Target/LLVMIR/Import/incorrect-constant-caching.ll @@ -8,16 +8,12 @@ ; only wrote minimum level of checks. %my_struct = type {i32, i8*} -; CHECK: llvm.mlir.constant(0 : i32) : i32 -; CHECK: llvm.mlir.constant(0 : i32) : i32 ; CHECK: llvm.mlir.addressof @str1 : !llvm.ptr> ; CHECK: llvm.getelementptr ; CHECK: llvm.mlir.constant(7 : i32) : i32 ; CHECK: llvm.mlir.undef : !llvm.struct<"my_struct", (i32, ptr)> ; CHECK: llvm.insertvalue ; CHECK: llvm.insertvalue -; CHECK: llvm.mlir.constant(0 : i32) : i32 -; CHECK: llvm.mlir.constant(0 : i32) : i32 ; CHECK: llvm.mlir.addressof @str0 : !llvm.ptr> ; CHECK: llvm.getelementptr ; CHECK: llvm.mlir.constant(8 : i32) : i32