Index: flang/lib/Optimizer/CodeGen/CodeGen.cpp =================================================================== --- flang/lib/Optimizer/CodeGen/CodeGen.cpp +++ flang/lib/Optimizer/CodeGen/CodeGen.cpp @@ -2073,13 +2073,8 @@ const bool isSliced = !coor.slice().empty(); const bool baseIsBoxed = coor.memref().getType().isa(); - auto indexOps = coor.indices().begin(); - auto shapeOps = coor.shape().begin(); - auto shiftOps = coor.shift().begin(); - auto sliceOps = coor.slice().begin(); // For each dimension of the array, generate the offset calculation. - for (unsigned i = 0; i < rank; - ++i, ++indexOps, ++shapeOps, ++shiftOps, sliceOps += 3) { + for (unsigned i = 0; i < rank; ++i) { mlir::Value index = integerCast(loc, rewriter, idxTy, operands[coor.indicesOffset() + i]); mlir::Value lb = isShifted ? integerCast(loc, rewriter, idxTy, @@ -2090,10 +2085,11 @@ // Compute zero based index in dimension i of the element, applying // potential triplets and lower bounds. if (isSliced) { - mlir::Value ub = *(sliceOps + 1); + mlir::Value ub = operands[coor.sliceOffset() + i + 1]; normalSlice = !mlir::isa_and_nonnull(ub.getDefiningOp()); if (normalSlice) - step = integerCast(loc, rewriter, idxTy, *(sliceOps + 2)); + step = integerCast(loc, rewriter, idxTy, + operands[coor.sliceOffset() + i + 2]); } auto idx = rewriter.create(loc, idxTy, index, lb); mlir::Value diff = Index: flang/test/Fir/convert-to-llvm.fir =================================================================== --- flang/test/Fir/convert-to-llvm.fir +++ flang/test/Fir/convert-to-llvm.fir @@ -2054,6 +2054,90 @@ // CHECK: %[[BITCAST:.*]] = llvm.bitcast %[[ARG0]] : !llvm.ptr> to !llvm.ptr // CHECK: %{{.*}} = llvm.getelementptr %[[BITCAST]][%[[OFFSET]]] : (!llvm.ptr, i64) -> !llvm.ptr +// Conversion with index type shape and slice + +func.func @ext_array_coor5(%arg0: !fir.ref>, %idx1 : index, %idx2 : index, %idx3 : index, %idx4 : index, %idx5 : index) { + %1 = fircg.ext_array_coor %arg0(%idx1)[%idx2, %idx3, %idx4]<%idx5> : (!fir.ref>, index, index, index, index, index) -> !fir.ref + return +} + +// CHECK-LABEL: llvm.func @ext_array_coor5( +// CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr, %[[VAL_1:.*]]: i64, %[[VAL_2:.*]]: i64, %[[VAL_3:.*]]: i64, %[[VAL_4:.*]]: i64, %[[VAL_5:.*]]: i64) { +// CHECK: %[[VAL_6:.*]] = llvm.mlir.constant(1 : i64) : i64 +// CHECK: %[[VAL_7:.*]] = llvm.mlir.constant(0 : i64) : i64 +// CHECK: %[[VAL_8:.*]] = llvm.sub %[[VAL_5]], %[[VAL_6]] : i64 +// CHECK: %[[VAL_9:.*]] = llvm.mul %[[VAL_8]], %[[VAL_4]] : i64 +// CHECK: %[[VAL_10:.*]] = llvm.sub %[[VAL_2]], %[[VAL_6]] : i64 +// CHECK: %[[VAL_11:.*]] = llvm.add %[[VAL_9]], %[[VAL_10]] : i64 +// CHECK: %[[VAL_12:.*]] = llvm.mul %[[VAL_11]], %[[VAL_6]] : i64 +// CHECK: %[[VAL_13:.*]] = llvm.add %[[VAL_12]], %[[VAL_7]] : i64 +// CHECK: %[[VAL_14:.*]] = llvm.mul %[[VAL_6]], %[[VAL_1]] : i64 +// CHECK: %[[VAL_15:.*]] = llvm.bitcast %[[VAL_0]] : !llvm.ptr to !llvm.ptr +// CHECK: %[[VAL_16:.*]] = llvm.getelementptr %[[VAL_15]][%[[VAL_13]]] : (!llvm.ptr, i64) -> !llvm.ptr +// CHECK: } + +// Conversion for 3-d array + +func.func @ext_array_coor6(%arg0: !fir.ref>, %idx1 : index, %idx2 : index, %idx3 : index, %idx4 : index, %idx5 : index) { + %1 = fircg.ext_array_coor %arg0(%idx1, %idx1, %idx1)[%idx2, %idx3, %idx4, %idx2, %idx3, %idx4, %idx2, %idx3, %idx4]<%idx5, %idx5, %idx5> : (!fir.ref>, index, index, index, index, index, index, index, index, index, index, index, index, index, index, index) -> !fir.ref + return +} + +// CHECK-LABEL: llvm.func @ext_array_coor6( +// CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr, %[[VAL_1:.*]]: i64, %[[VAL_2:.*]]: i64, %[[VAL_3:.*]]: i64, %[[VAL_4:.*]]: i64, %[[VAL_5:.*]]: i64) { +// CHECK: %[[VAL_6:.*]] = llvm.mlir.constant(1 : i64) : i64 +// CHECK: %[[VAL_7:.*]] = llvm.mlir.constant(0 : i64) : i64 +// CHECK: %[[VAL_8:.*]] = llvm.sub %[[VAL_5]], %[[VAL_6]] : i64 +// CHECK: %[[VAL_9:.*]] = llvm.mul %[[VAL_8]], %[[VAL_4]] : i64 +// CHECK: %[[VAL_10:.*]] = llvm.sub %[[VAL_2]], %[[VAL_6]] : i64 +// CHECK: %[[VAL_11:.*]] = llvm.add %[[VAL_9]], %[[VAL_10]] : i64 +// CHECK: %[[VAL_12:.*]] = llvm.mul %[[VAL_11]], %[[VAL_6]] : i64 +// CHECK: %[[VAL_13:.*]] = llvm.add %[[VAL_12]], %[[VAL_7]] : i64 +// CHECK: %[[VAL_14:.*]] = llvm.mul %[[VAL_6]], %[[VAL_1]] : i64 +// CHECK: %[[VAL_15:.*]] = llvm.sub %[[VAL_5]], %[[VAL_6]] : i64 +// CHECK: %[[VAL_16:.*]] = llvm.mul %[[VAL_15]], %[[VAL_2]] : i64 +// CHECK: %[[VAL_17:.*]] = llvm.sub %[[VAL_3]], %[[VAL_6]] : i64 +// CHECK: %[[VAL_18:.*]] = llvm.add %[[VAL_16]], %[[VAL_17]] : i64 +// CHECK: %[[VAL_19:.*]] = llvm.mul %[[VAL_18]], %[[VAL_14]] : i64 +// CHECK: %[[VAL_20:.*]] = llvm.add %[[VAL_19]], %[[VAL_13]] : i64 +// CHECK: %[[VAL_21:.*]] = llvm.mul %[[VAL_14]], %[[VAL_1]] : i64 +// CHECK: %[[VAL_22:.*]] = llvm.sub %[[VAL_5]], %[[VAL_6]] : i64 +// CHECK: %[[VAL_23:.*]] = llvm.mul %[[VAL_22]], %[[VAL_3]] : i64 +// CHECK: %[[VAL_24:.*]] = llvm.sub %[[VAL_4]], %[[VAL_6]] : i64 +// CHECK: %[[VAL_25:.*]] = llvm.add %[[VAL_23]], %[[VAL_24]] : i64 +// CHECK: %[[VAL_26:.*]] = llvm.mul %[[VAL_25]], %[[VAL_21]] : i64 +// CHECK: %[[VAL_27:.*]] = llvm.add %[[VAL_26]], %[[VAL_20]] : i64 +// CHECK: %[[VAL_28:.*]] = llvm.mul %[[VAL_21]], %[[VAL_1]] : i64 +// CHECK: %[[VAL_29:.*]] = llvm.bitcast %[[VAL_0]] : !llvm.ptr to !llvm.ptr +// CHECK: %[[VAL_30:.*]] = llvm.getelementptr %[[VAL_29]][%[[VAL_27]]] : (!llvm.ptr, i64) -> !llvm.ptr +// CHECK: llvm.return +// CHECK: } + +// Conversion for derived type with type param + +func.func @ext_array_coor_dt_slice(%arg0: !fir.ref>>, %idx1 : index, %idx2 : index, %idx3 : index, %idx4 : index, %idx5 : index) { + %1 = fir.field_index i, !fir.type<_QFtest_dt_sliceTt{i:i32,j:i32}> + %2 = fircg.ext_array_coor %arg0(%idx1)[%idx2, %idx3, %idx4] path %1 <%idx5>: (!fir.ref>>, index, index, index, index, !fir.field, index) -> !fir.ref> + return +} + +// CHECK-LABEL: llvm.func @ext_array_coor_dt_slice( +// CHECK-SAME: %[[VAL_0:.*]]: !llvm.ptr>>, %[[VAL_1:.*]]: i64, %[[VAL_2:.*]]: i64, %[[VAL_3:.*]]: i64, %[[VAL_4:.*]]: i64, %[[VAL_5:.*]]: i64) { +// CHECK: %[[VAL_6:.*]] = llvm.mlir.constant(0 : i32) : i32 +// CHECK: %[[VAL_7:.*]] = llvm.mlir.constant(1 : i64) : i64 +// CHECK: %[[VAL_8:.*]] = llvm.mlir.constant(0 : i64) : i64 +// CHECK: %[[VAL_9:.*]] = llvm.sub %[[VAL_5]], %[[VAL_7]] : i64 +// CHECK: %[[VAL_10:.*]] = llvm.mul %[[VAL_9]], %[[VAL_4]] : i64 +// CHECK: %[[VAL_11:.*]] = llvm.sub %[[VAL_2]], %[[VAL_7]] : i64 +// CHECK: %[[VAL_12:.*]] = llvm.add %[[VAL_10]], %[[VAL_11]] : i64 +// CHECK: %[[VAL_13:.*]] = llvm.mul %[[VAL_12]], %[[VAL_7]] : i64 +// CHECK: %[[VAL_14:.*]] = llvm.add %[[VAL_13]], %[[VAL_8]] : i64 +// CHECK: %[[VAL_15:.*]] = llvm.mul %[[VAL_7]], %[[VAL_1]] : i64 +// CHECK: %[[VAL_16:.*]] = llvm.bitcast %[[VAL_0]] : !llvm.ptr>> to !llvm.ptr> +// CHECK: %[[VAL_17:.*]] = llvm.getelementptr %[[VAL_16]][%[[VAL_14]], 0] : (!llvm.ptr>, i64) -> !llvm.ptr> +// CHECK: llvm.return +// CHECK: } + // ----- // Check `fircg.ext_rebox` conversion to LLVM IR dialect