diff --git a/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp b/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp --- a/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp +++ b/mlir/lib/Analysis/DataFlow/IntegerRangeAnalysis.cpp @@ -128,6 +128,11 @@ ArrayRef argLattices, unsigned firstIndex) { if (auto inferrable = dyn_cast(op)) { LLVM_DEBUG(llvm::dbgs() << "Inferring ranges for " << *op << "\n"); + // If the lattice on any operand is unitialized, bail out. + if (llvm::any_of(op->getOperands(), [&](Value value) { + return getLatticeElementFor(op, value)->getValue().isUninitialized(); + })) + return; SmallVector argRanges( llvm::map_range(op->getOperands(), [&](Value value) { return getLatticeElementFor(op, value)->getValue().getValue(); diff --git a/mlir/test/Dialect/Arith/unsigned-when-equivalent.mlir b/mlir/test/Dialect/Arith/unsigned-when-equivalent.mlir --- a/mlir/test/Dialect/Arith/unsigned-when-equivalent.mlir +++ b/mlir/test/Dialect/Arith/unsigned-when-equivalent.mlir @@ -105,3 +105,12 @@ %cmp = arith.cmpi slt, %cst_0, %cst_0 : vector<1xi32> return } + +// CHECK-LABEL: @gpu_func +func.func @gpu_func(%arg0: memref<2x32xf32>, %arg1: memref<2x32xf32>, %arg2: memref<32xf32>, %arg3: f32, %arg4: !gpu.async.token, %arg5: index, %arg6: index) -> memref<2x32xf32> { + %c1 = arith.constant 1 : index + %2 = gpu.launch async [%arg4] blocks(%arg7, %arg8, %arg9) in (%arg13 = %c1, %arg14 = %c1, %arg15 = %c1) threads(%arg10, %arg11, %arg12) in (%arg16 = %c1, %arg17 = %c1, %arg18 = %c1) { + gpu.terminator + } + return %arg1 : memref<2x32xf32> +}