diff --git a/flang/include/flang/Optimizer/Dialect/FIROps.h b/flang/include/flang/Optimizer/Dialect/FIROps.h --- a/flang/include/flang/Optimizer/Dialect/FIROps.h +++ b/flang/include/flang/Optimizer/Dialect/FIROps.h @@ -38,6 +38,10 @@ mlir::OpAsmParser::OperandType &selector, mlir::Type &type); +static constexpr llvm::StringRef getNormalizedLowerBoundAttrName() { + return "normalized.lb"; +} + } // namespace fir #define GET_OP_CLASSES diff --git a/flang/lib/Optimizer/CodeGen/CodeGen.cpp b/flang/lib/Optimizer/CodeGen/CodeGen.cpp --- a/flang/lib/Optimizer/CodeGen/CodeGen.cpp +++ b/flang/lib/Optimizer/CodeGen/CodeGen.cpp @@ -1809,11 +1809,11 @@ mlir::LogicalResult matchAndRewrite(fir::cg::XEmboxOp xbox, OpAdaptor adaptor, mlir::ConversionPatternRewriter &rewriter) const override { - auto [boxTy, dest, eleSize] = consDescriptorPrefix( - xbox, rewriter, xbox.getOutRank(), - adaptor.getOperands().drop_front(xbox.lenParamOffset())); - // Generate the triples in the dims field of the descriptor mlir::ValueRange operands = adaptor.getOperands(); + auto [boxTy, dest, eleSize] = + consDescriptorPrefix(xbox, rewriter, xbox.getOutRank(), + operands.drop_front(xbox.lenParamOffset())); + // Generate the triples in the dims field of the descriptor auto i64Ty = mlir::IntegerType::get(xbox.getContext(), 64); mlir::Value base = operands[0]; assert(!xbox.shape().empty() && "must have a shape"); @@ -1832,40 +1832,50 @@ llvm::SmallVector gepArgs; unsigned constRows = 0; mlir::Value ptrOffset = zero; - if (auto memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType())) - if (auto seqTy = memEleTy.dyn_cast()) { - mlir::Type seqEleTy = seqTy.getEleTy(); - // Adjust the element scaling factor if the element is a dependent type. - if (fir::hasDynamicSize(seqEleTy)) { - if (fir::isa_char(seqEleTy)) { - assert(xbox.lenParams().size() == 1); - prevPtrOff = integerCast(loc, rewriter, i64Ty, - operands[xbox.lenParamOffset()]); - } else if (seqEleTy.isa()) { - TODO(loc, "generate call to calculate size of PDT"); - } else { - return rewriter.notifyMatchFailure(xbox, "unexpected dynamic type"); - } - } else { - constRows = seqTy.getConstantRows(); - } + mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); + assert(memEleTy.isa()); + auto seqTy = memEleTy.cast(); + mlir::Type seqEleTy = seqTy.getEleTy(); + // Adjust the element scaling factor if the element is a dependent type. + if (fir::hasDynamicSize(seqEleTy)) { + if (auto charTy = seqEleTy.dyn_cast()) { + assert(xbox.lenParams().size() == 1); + mlir::LLVM::ConstantOp charSize = genConstantIndex( + loc, i64Ty, rewriter, lowerTy().characterBitsize(charTy) / 8); + mlir::Value castedLen = + integerCast(loc, rewriter, i64Ty, operands[xbox.lenParamOffset()]); + auto byteOffset = + rewriter.create(loc, i64Ty, charSize, castedLen); + prevPtrOff = integerCast(loc, rewriter, i64Ty, byteOffset); + } else if (seqEleTy.isa()) { + // prevPtrOff = ; + TODO(loc, "generate call to calculate size of PDT"); + } else { + fir::emitFatalError(loc, "unexpected dynamic type"); } + } else { + constRows = seqTy.getConstantRows(); + } - bool hasSubcomp = !xbox.subcomponent().empty(); + const bool hasSubcomp = !xbox.subcomponent().empty(); + const bool hasSubstr = !xbox.substr().empty(); mlir::Value stepExpr; if (hasSubcomp) { // We have a subcomponent. The step value needs to be the number of // bytes per element (which is a derived type). - mlir::Type ty0 = base.getType(); - [[maybe_unused]] auto ptrTy = ty0.dyn_cast(); - assert(ptrTy && "expected pointer type"); - mlir::Type memEleTy = fir::dyn_cast_ptrEleTy(xbox.memref().getType()); - assert(memEleTy && "expected fir pointer type"); - auto seqTy = memEleTy.dyn_cast(); - assert(seqTy && "expected sequence type"); - mlir::Type seqEleTy = seqTy.getEleTy(); auto eleTy = mlir::LLVM::LLVMPointerType::get(convertType(seqEleTy)); stepExpr = computeDerivedTypeSize(loc, eleTy, i64Ty, rewriter); + } else if (hasSubstr) { + // We have a substring. The step value needs to be the number of bytes + // per CHARACTER element. + auto charTy = seqEleTy.cast(); + if (fir::hasDynamicSize(charTy)) { + stepExpr = prevPtrOff; + } else { + stepExpr = genConstantIndex(loc, i64Ty, rewriter, + charTy.getLen() * + lowerTy().characterBitsize(charTy) / 8); + } } // Process the array subspace arguments (shape, shift, etc.), if any, @@ -1899,13 +1909,19 @@ } } if (!skipNext) { - // store lower bound (normally 0) + // Lower bound is normalized to 0 for BIND(C) interoperability. mlir::Value lb = zero; - if (eleTy.isa() || eleTy.isa()) { + bool isaPointerOrAllocatable = + eleTy.isa() || eleTy.isa(); + // Lower bound is defaults to 1 for POINTER, ALLOCATABLE, and + // denormalized descriptors. + if (isaPointerOrAllocatable || !normalizedLowerBound(xbox)) lb = one; - if (hasShift) - lb = operands[shiftOffset]; - } + // If there is a shifted origin and this is not a normalized descriptor + // then use the value from the shift op as the lower bound. + if (hasShift && + (isaPointerOrAllocatable || !normalizedLowerBound(xbox))) + lb = operands[shiftOffset]; dest = insertLowerBound(rewriter, loc, dest, descIdx, lb); // store extent @@ -1916,8 +1932,7 @@ dest = insertExtent(rewriter, loc, dest, descIdx, extent); // store step (scaled by shaped extent) - - mlir::Value step = hasSubcomp ? stepExpr : prevDim; + mlir::Value step = (hasSubcomp || hasSubstr) ? stepExpr : prevDim; if (hasSlice) step = rewriter.create(loc, i64Ty, step, operands[sliceOffset + 2]); @@ -1939,7 +1954,7 @@ if (hasSlice) sliceOffset += 3; } - if (hasSlice || hasSubcomp || !xbox.substr().empty()) { + if (hasSlice || hasSubcomp || hasSubstr) { llvm::SmallVector args = {base, ptrOffset}; args.append(gepArgs.rbegin(), gepArgs.rend()); if (hasSubcomp) { @@ -1954,7 +1969,7 @@ xbox.subcomponent().size()); } base = rewriter.create(loc, base.getType(), args); - if (!xbox.substr().empty()) + if (hasSubstr) base = shiftSubstringBase(rewriter, loc, base, operands[xbox.substrOffset()]); } @@ -1966,6 +1981,13 @@ rewriter.replaceOp(xbox, result); return success(); } + + /// Return true if `xbox` has a normalized lower bounds attribute. A box value + /// that is neither a POINTER nor an ALLOCATABLE should be normalized to a + /// zero origin lower bound for interoperability with BIND(C). + inline static bool normalizedLowerBound(fir::cg::XEmboxOp xbox) { + return xbox->hasAttr(fir::getNormalizedLowerBoundAttrName()); + } }; /// Create a new box given a box reference. diff --git a/flang/test/Fir/convert-to-llvm.fir b/flang/test/Fir/convert-to-llvm.fir --- a/flang/test/Fir/convert-to-llvm.fir +++ b/flang/test/Fir/convert-to-llvm.fir @@ -1670,7 +1670,7 @@ // CHECK: %[[ADJUSTED_OFFSET:.*]] = llvm.sub %[[C0]], %[[C0]] : i64 // CHECK: %[[DIM_OFFSET:.*]] = llvm.mul %[[ADJUSTED_OFFSET]], %[[ONE]] : i64 // CHECK: %[[PTR_OFFSET:.*]] = llvm.add %[[DIM_OFFSET]], %[[ZERO]] : i64 -// CHECK: %[[BOX7:.*]] = llvm.insertvalue %[[ZERO]], %[[BOX6]][7 : i32, 0 : i32, 0 : i32] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)> +// CHECK: %[[BOX7:.*]] = llvm.insertvalue %[[C0]], %[[BOX6]][7 : i32, 0 : i32, 0 : i32] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)> // CHECK: %[[EXTENT0:.*]] = llvm.sub %[[C0]], %[[C0]] : i64 // CHECK: %[[EXTENT1:.*]] = llvm.add %[[EXTENT0]], %[[C0]] : i64 // CHECK: %[[EXTENT2:.*]] = llvm.sdiv %[[EXTENT1]], %[[C0]] : i64 @@ -1771,7 +1771,7 @@ // CHECK: %[[ADJUSTED_OFFSET:.*]] = llvm.sub %[[C2]], %[[SH1]] : i64 // CHECK: %[[DIM_OFFSET:.*]] = llvm.mul %[[ADJUSTED_OFFSET]], %[[ONE]] : i64 // CHECK: %[[PTR_OFFSET:.*]] = llvm.add %[[DIM_OFFSET]], %[[ZERO]] : i64 -// CHECK: %[[BOX7:.*]] = llvm.insertvalue %[[ZERO]], %[[BOX6]][7 : i32, 0 : i32, 0 : i32] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)> +// CHECK: %[[BOX7:.*]] = llvm.insertvalue %[[SH1]], %[[BOX6]][7 : i32, 0 : i32, 0 : i32] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)> // CHECK: %[[EXTENT0:.*]] = llvm.sub %[[ARG0]], %[[C2]] : i64 // CHECK: %[[EXTENT1:.*]] = llvm.add %[[EXTENT0]], %[[C1]] : i64 // CHECK: %[[EXTENT2:.*]] = llvm.sdiv %[[EXTENT1]], %[[C1]] : i64 @@ -1785,7 +1785,7 @@ // CHECK: %[[ADJUSTED_OFFSET:.*]] = llvm.sub %[[C4]], %[[SH2]] : i64 // CHECK: %[[DIM_OFFSET:.*]] = llvm.mul %[[ADJUSTED_OFFSET]], %[[PREV_PTROFF]] : i64 // CHECK: %[[PTR_OFFSET0:.*]] = llvm.add %[[DIM_OFFSET]], %[[PTR_OFFSET]] : i64 -// CHECK: %[[BOX10:.*]] = llvm.insertvalue %[[ZERO]], %[[BOX9]][7 : i32, 1 : i32, 0 : i32] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)> +// CHECK: %[[BOX10:.*]] = llvm.insertvalue %[[SH2]], %[[BOX9]][7 : i32, 1 : i32, 0 : i32] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<2 x array<3 x i64>>)> // CHECK: %[[EXT_SUB:.*]] = llvm.sub %[[N]], %[[C4]] : i64 // CHECK: %[[EXT_ADD:.*]] = llvm.add %[[EXT_SUB]], %[[C1]] : i64 // CHECK: %[[EXT_SDIV:.*]] = llvm.sdiv %[[EXT_ADD]], %[[C1]] : i64 @@ -1853,7 +1853,7 @@ // CHECK: %[[GEP_DTYPE_SIZE:.*]] = llvm.getelementptr %[[ELE_TYPE]][%[[C1_0]]] : (!llvm.ptr>, i64) -> !llvm.ptr> // CHECK: %[[PTRTOINT_DTYPE_SIZE:.*]] = llvm.ptrtoint %[[GEP_DTYPE_SIZE]] : !llvm.ptr> to i64 // CHECK: %[[ADJUSTED_OFFSET:.*]] = llvm.sub %3, %30 : i64 -// CHECK: %[[BOX7:.*]] = llvm.insertvalue %[[ZERO]], %[[BOX6]][7 : i32, 0 : i32, 0 : i32] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)> +// CHECK: %[[BOX7:.*]] = llvm.insertvalue %[[ONE]], %[[BOX6]][7 : i32, 0 : i32, 0 : i32] : !llvm.struct<(ptr, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, i{{.*}}, array<1 x array<3 x i64>>)> // CHECK: %[[EXT_SUB:.*]] = llvm.sub %[[C10]], %[[C1]] : i64 // CHECK: %[[EXT_ADD:.*]] = llvm.add %[[EXT_SUB]], %[[C2]] : i64 // CHECK: %[[EXT_SDIV:.*]] = llvm.sdiv %[[EXT_ADD]], %[[C2]] : i64