diff --git a/llvm/lib/Target/X86/X86LowerAMXType.cpp b/llvm/lib/Target/X86/X86LowerAMXType.cpp --- a/llvm/lib/Target/X86/X86LowerAMXType.cpp +++ b/llvm/lib/Target/X86/X86LowerAMXType.cpp @@ -703,6 +703,9 @@ public: X86LowerAMXCast(Function &F) : Func(F) {} + void combineCastStore(IntrinsicInst *Cast, StoreInst *ST); + void combineLoadCast(IntrinsicInst *Cast, LoadInst *LD); + bool combineLdSt(SmallVectorImpl &Casts); bool combineAMXcast(TargetLibraryInfo *TLI); bool transformAMXCast(IntrinsicInst *AMXCast); bool transformAllAMXCast(); @@ -913,7 +916,7 @@ // --> // call void @llvm.x86.tilestored64.internal(i16 %row, i16 %col, i8* %p, // i64 64, x86_amx %42) -static void combineCastStore(IntrinsicInst *Cast, StoreInst *ST) { +void X86LowerAMXCast::combineCastStore(IntrinsicInst *Cast, StoreInst *ST) { Value *Tile = Cast->getOperand(0); // TODO: If it is cast intrinsic or phi node, we can propagate the // shape information through def-use chain. @@ -939,7 +942,7 @@ // --> // %66 = call x86_amx @llvm.x86.tileloadd64.internal(i16 %row, i16 %col, // i8* %p, i64 64) -static void combineLoadCast(IntrinsicInst *Cast, LoadInst *LD) { +void X86LowerAMXCast::combineLoadCast(IntrinsicInst *Cast, LoadInst *LD) { Value *Row = nullptr, *Col = nullptr; Use &U = *(Cast->use_begin()); unsigned OpNo = U.getOperandNo(); @@ -961,7 +964,7 @@ Cast->replaceAllUsesWith(NewInst); } -static bool combineLdSt(SmallVectorImpl &Casts) { +bool X86LowerAMXCast::combineLdSt(SmallVectorImpl &Casts) { bool Change = false; for (auto *Cast : Casts) { auto *II = cast(Cast);