diff --git a/llvm/lib/Target/X86/X86LowerAMXType.cpp b/llvm/lib/Target/X86/X86LowerAMXType.cpp --- a/llvm/lib/Target/X86/X86LowerAMXType.cpp +++ b/llvm/lib/Target/X86/X86LowerAMXType.cpp @@ -74,7 +74,7 @@ match(II, m_Intrinsic(m_Value())); } -static bool isAMXInstrinsic(User *I) { +static bool isAMXInstrinsic(Value *I) { auto *II = dyn_cast(I); if (!II) return false; @@ -908,6 +908,96 @@ return true; } +// %43 = call <256 x i32> @llvm.x86.cast.tile.to.vector.v256i32(x86_amx %42) +// store <256 x i32> %43, <256 x i32>* %p, align 64 +// --> +// call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* %p, +// i64 64, x86_amx %42) +static void combineCastStore(IntrinsicInst *Cast, StoreInst *ST) { + Value *Tile = Cast->getOperand(0); + // TODO: If Tile is cast intrinsic or phi node, just return + if (!isAMXInstrinsic(Tile)) + return; + auto *II = cast(Tile); + // Tile is output from AMX intrinsic. The first operand of the + // intrinsic is row, the second operand of the intrinsic is column. + Value *Row = II->getOperand(0); + Value *Col = II->getOperand(1); + IRBuilder<> Builder(ST); + // Use the maximum column as stride. It must be the same with load + // stride. + Value *Stride = Builder.getInt64(64); + Value *I8Ptr = + Builder.CreateBitCast(ST->getOperand(1), Builder.getInt8PtrTy()); + std::array Args = {Row, Col, I8Ptr, Stride, Tile}; + Builder.CreateIntrinsic(Intrinsic::x86_tilestored64_internal, None, Args); +} + +// %65 = load <256 x i32>, <256 x i32>* %p, align 64 +// %66 = call x86_amx @llvm.x86.cast.vector.to.tile(<256 x i32> %65) +// --> +// %66 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, +// i8* %p, i64 64) +static void combineLoadCast(IntrinsicInst *Cast, LoadInst *LD) { + Value *Row = nullptr, *Col = nullptr; + Use &U = *(Cast->use_begin()); + unsigned OpNo = U.getOperandNo(); + auto *II = cast(U.getUser()); + if (!isAMXInstrinsic(II)) + return; + std::tie(Row, Col) = getShape(II, OpNo); + IRBuilder<> Builder(Cast); + // Use the maximun column as stride. + Value *Stride = Builder.getInt64(64); + Value *I8Ptr = + Builder.CreateBitCast(LD->getOperand(0), Builder.getInt8PtrTy()); + std::array Args = {Row, Col, I8Ptr, Stride}; + + Value *NewInst = + Builder.CreateIntrinsic(Intrinsic::x86_tileloadd64_internal, None, Args); + Cast->replaceAllUsesWith(NewInst); +} + +static bool combineLdSt(SmallVectorImpl &Casts) { + bool Change = false; + for (auto *Cast : Casts) { + IntrinsicInst *II = dyn_cast(Cast); + // %43 = call <256 x i32> @llvm.x86.cast.tile.to.vector(x86_amx %42) + // store <256 x i32> %43, <256 x i32>* %p, align 64 + // --> + // call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* %p, + // i64 64, x86_amx %42) + if (II->getIntrinsicID() == Intrinsic::x86_cast_tile_to_vector) { + SmallVector DeadStores; + for (User *U : Cast->users()) { + StoreInst *Store = dyn_cast(U); + if (!Store) + continue; + combineCastStore(cast(Cast), Store); + DeadStores.push_back(Store); + Change = true; + } + for (auto *Store : DeadStores) + Store->eraseFromParent(); + } else { // x86_cast_vector_to_tile + SmallVector DeadLoads; + LoadInst *Load = dyn_cast(Cast->getOperand(0)); + if (!Load || !Load->hasOneUse()) + continue; + // %65 = load <256 x i32>, <256 x i32>* %p, align 64 + // %66 = call x86_amx @llvm.x86.cast.vector.to.tile(<256 x i32> %65) + // --> + // %66 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, + // i8* %p, i64 64) + combineLoadCast(cast(Cast), Load); + // Set the operand is null so that load instruction can be erased. + Cast->setOperand(0, nullptr); + Load->eraseFromParent(); + } + } + return Change; +} + bool X86LowerAMXCast::combineAMXcast(TargetLibraryInfo *TLI) { bool Change = false; // Collect tile cast instruction. @@ -949,17 +1039,22 @@ Convert(Vec2TileInsts, Intrinsic::x86_cast_tile_to_vector); Convert(Tile2VecInsts, Intrinsic::x86_cast_vector_to_tile); + SmallVector LiveCasts; auto EraseInst = [&](SmallVectorImpl &Insts) { for (auto *Inst : Insts) { if (Inst->use_empty()) { Inst->eraseFromParent(); Change = true; + } else { + LiveCasts.push_back(Inst); } } }; EraseInst(Vec2TileInsts); EraseInst(Tile2VecInsts); + Change |= combineLdSt(LiveCasts); + EraseInst(LiveCasts); // Handle the A->B->A cast, and there is an intervening PHI node. for (BasicBlock &BB : Func) { diff --git a/llvm/test/CodeGen/X86/AMX/amx-combine.ll b/llvm/test/CodeGen/X86/AMX/amx-combine.ll --- a/llvm/test/CodeGen/X86/AMX/amx-combine.ll +++ b/llvm/test/CodeGen/X86/AMX/amx-combine.ll @@ -3,12 +3,9 @@ define void @combine_store(<256 x i32> *%p) { ; CHECK-LABEL: @combine_store( -; CHECK-NEXT: [[TMP1:%.*]] = alloca <256 x i32>, align 64 ; CHECK-NEXT: [[T1:%.*]] = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 64) -; CHECK-NEXT: [[TMP2:%.*]] = bitcast <256 x i32>* [[TMP1]] to i8* -; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, i8* [[TMP2]], i64 64, x86_amx [[T1]]) -; CHECK-NEXT: [[TMP3:%.*]] = load <256 x i32>, <256 x i32>* [[TMP1]], align 1024 -; CHECK-NEXT: store <256 x i32> [[TMP3]], <256 x i32>* [[P:%.*]], align 64 +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <256 x i32>* [[P:%.*]] to i8* +; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, i8* [[TMP1]], i64 64, x86_amx [[T1]]) ; CHECK-NEXT: ret void ; %t1 = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 64) @@ -24,7 +21,8 @@ ; CHECK-NEXT: [[TMP2:%.*]] = bitcast <256 x i32>* [[TMP1]] to i8* ; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, i8* [[TMP2]], i64 64, x86_amx [[T1]]) ; CHECK-NEXT: [[TMP3:%.*]] = load <256 x i32>, <256 x i32>* [[TMP1]], align 1024 -; CHECK-NEXT: store <256 x i32> [[TMP3]], <256 x i32>* [[P:%.*]], align 64 +; CHECK-NEXT: [[TMP4:%.*]] = bitcast <256 x i32>* [[P:%.*]] to i8* +; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, i8* [[TMP4]], i64 64, x86_amx [[T1]]) ; CHECK-NEXT: ret <256 x i32> [[TMP3]] ; %t1 = call x86_amx @llvm.x86.tilezero.internal(i16 16, i16 64) @@ -35,12 +33,9 @@ define void @combine_load(<256 x i32> *%p, i8 *%p2) { ; CHECK-LABEL: @combine_load( -; CHECK-NEXT: [[TMP1:%.*]] = alloca <256 x i32>, align 64 -; CHECK-NEXT: [[T1:%.*]] = load <256 x i32>, <256 x i32>* [[P:%.*]], align 64 -; CHECK-NEXT: [[TMP2:%.*]] = bitcast <256 x i32>* [[TMP1]] to i8* -; CHECK-NEXT: store <256 x i32> [[T1]], <256 x i32>* [[TMP1]], align 1024 -; CHECK-NEXT: [[TMP3:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* [[TMP2]], i64 64) -; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, i8* [[P2:%.*]], i64 64, x86_amx [[TMP3]]) +; CHECK-NEXT: [[TMP1:%.*]] = bitcast <256 x i32>* [[P:%.*]] to i8* +; CHECK-NEXT: [[TMP2:%.*]] = call x86_amx @llvm.x86.tileloadd64.internal(i16 16, i16 64, i8* [[TMP1]], i64 64) +; CHECK-NEXT: call void @llvm.x86.tilestored64.internal(i16 16, i16 64, i8* [[P2:%.*]], i64 64, x86_amx [[TMP2]]) ; CHECK-NEXT: ret void ; %t1 = load <256 x i32>, <256 x i32>* %p, align 64