Index: llvm/lib/Bitcode/Reader/BitcodeReader.cpp =================================================================== --- llvm/lib/Bitcode/Reader/BitcodeReader.cpp +++ llvm/lib/Bitcode/Reader/BitcodeReader.cpp @@ -4970,18 +4970,19 @@ InstructionList.push_back(I); break; } - case bitc::FUNC_CODE_INST_LOAD: { // LOAD: [opty, op, align, vol] + case bitc::FUNC_CODE_INST_LOAD: { + // LOAD: [opty, op, align, vol, [hasPtrProv, [PtrProv]]] unsigned OpNum = 0; Value *Op; if (getValueTypePair(Record, OpNum, NextValueNo, Op, &FullTy) || - (OpNum + 2 != Record.size() && OpNum + 3 != Record.size())) + (OpNum + 2 > Record.size())) return error("Invalid record"); if (!isa(Op->getType())) return error("Load operand is not a pointer type"); Type *Ty = nullptr; - if (OpNum + 3 == Record.size()) { + if (OpNum + 3 <= Record.size()) { FullTy = getFullyStructuredTypeByID(Record[OpNum++]); Ty = flattenPointerTypes(FullTy); } else @@ -4998,23 +4999,38 @@ return error("load of unsized type"); if (!Align) Align = TheModule->getDataLayout().getABITypeAlign(Ty); - I = new LoadInst(Ty, Op, "", Record[OpNum + 1], *Align); + bool IsVolatile = Record[OpNum + 1]; + OpNum += 2; + Value *PtrProvenance = nullptr; + if (OpNum < Record.size()) { + // Do we have a ptr_provenance ? + if (Record[OpNum++]) { + if (getValueTypePair(Record, OpNum, NextValueNo, PtrProvenance)) + return error("Invalid ptr_provenance operand"); + } + } + if (OpNum != Record.size()) + return error("Invalid record"); + I = new LoadInst(Ty, Op, "", IsVolatile, *Align); + if (PtrProvenance) + cast(I)->setNoaliasProvenanceOperand(PtrProvenance); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_LOADATOMIC: { - // LOADATOMIC: [opty, op, align, vol, ordering, ssid] + // LOADATOMIC: [opty, op, align, vol, ordering, ssid, + // [hasPtrProv,[PtrProv]]] unsigned OpNum = 0; Value *Op; if (getValueTypePair(Record, OpNum, NextValueNo, Op, &FullTy) || - (OpNum + 4 != Record.size() && OpNum + 5 != Record.size())) + (OpNum + 4 > Record.size())) return error("Invalid record"); if (!isa(Op->getType())) return error("Load operand is not a pointer type"); Type *Ty = nullptr; - if (OpNum + 5 == Record.size()) { + if (OpNum + 5 <= Record.size()) { FullTy = getFullyStructuredTypeByID(Record[OpNum++]); Ty = flattenPointerTypes(FullTy); } else @@ -5037,12 +5053,27 @@ return Err; if (!Align) return error("Alignment missing from atomic load"); - I = new LoadInst(Ty, Op, "", Record[OpNum + 1], *Align, Ordering, SSID); + bool IsVolatile = Record[OpNum + 1]; + OpNum += 4; + Value *PtrProvenance = nullptr; + if (OpNum < Record.size()) { + // Do we have a ptr_provenance ? + if (Record[OpNum++]) { + if (getValueTypePair(Record, OpNum, NextValueNo, PtrProvenance)) + return error("Invalid ptr_provenance operand"); + } + } + if (OpNum != Record.size()) + return error("Invalid record"); + I = new LoadInst(Ty, Op, "", IsVolatile, *Align, Ordering, SSID); + if (PtrProvenance) + cast(I)->setNoaliasProvenanceOperand(PtrProvenance); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_STORE: - case bitc::FUNC_CODE_INST_STORE_OLD: { // STORE2:[ptrty, ptr, val, align, vol] + case bitc::FUNC_CODE_INST_STORE_OLD: { + // STORE2:[ptrty, ptr, val, align, vol, [hasPtrProv, [PtrProv]]] unsigned OpNum = 0; Value *Val, *Ptr; Type *FullTy; @@ -5051,7 +5082,7 @@ ? getValueTypePair(Record, OpNum, NextValueNo, Val) : popValue(Record, OpNum, NextValueNo, getPointerElementFlatType(FullTy), Val)) || - OpNum + 2 != Record.size()) + OpNum + 2 > Record.size()) return error("Invalid record"); if (Error Err = typeCheckLoadStoreInst(Val->getType(), Ptr->getType())) @@ -5064,13 +5095,28 @@ return error("store of unsized type"); if (!Align) Align = TheModule->getDataLayout().getABITypeAlign(Val->getType()); - I = new StoreInst(Val, Ptr, Record[OpNum + 1], *Align); + bool IsVolatile = Record[OpNum + 1]; + OpNum += 2; + Value *PtrProvenance = nullptr; + if (OpNum < Record.size()) { + // Do we have a ptr_provenance ? + if (Record[OpNum++]) { + if (getValueTypePair(Record, OpNum, NextValueNo, PtrProvenance)) + return error("Invalid ptr_provenance operand"); + } + } + if (OpNum != Record.size()) + return error("Invalid record"); + I = new StoreInst(Val, Ptr, IsVolatile, *Align); + if (PtrProvenance) + cast(I)->setNoaliasProvenanceOperand(PtrProvenance); InstructionList.push_back(I); break; } case bitc::FUNC_CODE_INST_STOREATOMIC: case bitc::FUNC_CODE_INST_STOREATOMIC_OLD: { - // STOREATOMIC: [ptrty, ptr, val, align, vol, ordering, ssid] + // STOREATOMIC: [ptrty, ptr, val, align, vol, ordering, ssid, + // [hasPtrProv, [PtrProv]]] unsigned OpNum = 0; Value *Val, *Ptr; Type *FullTy; @@ -5080,7 +5126,7 @@ ? getValueTypePair(Record, OpNum, NextValueNo, Val) : popValue(Record, OpNum, NextValueNo, getPointerElementFlatType(FullTy), Val)) || - OpNum + 4 != Record.size()) + OpNum + 4 > Record.size()) return error("Invalid record"); if (Error Err = typeCheckLoadStoreInst(Val->getType(), Ptr->getType())) @@ -5099,7 +5145,21 @@ return Err; if (!Align) return error("Alignment missing from atomic store"); - I = new StoreInst(Val, Ptr, Record[OpNum + 1], *Align, Ordering, SSID); + bool IsVolatile = Record[OpNum + 1]; + OpNum += 4; + Value *PtrProvenance = nullptr; + if (OpNum < Record.size()) { + // Do we have a ptr_provenance ? + if (Record[OpNum++]) { + if (getValueTypePair(Record, OpNum, NextValueNo, PtrProvenance)) + return error("Invalid ptr_provenance operand"); + } + } + if (OpNum != Record.size()) + return error("Invalid record"); + I = new StoreInst(Val, Ptr, IsVolatile, *Align, Ordering, SSID); + if (PtrProvenance) + cast(I)->setNoaliasProvenanceOperand(PtrProvenance); InstructionList.push_back(I); break; } Index: llvm/lib/Bitcode/Writer/BitcodeWriter.cpp =================================================================== --- llvm/lib/Bitcode/Writer/BitcodeWriter.cpp +++ llvm/lib/Bitcode/Writer/BitcodeWriter.cpp @@ -3057,7 +3057,8 @@ pushValueAndType(I.getOperand(0), InstID, Vals); } else { Code = bitc::FUNC_CODE_INST_LOAD; - if (!pushValueAndType(I.getOperand(0), InstID, Vals)) // ptr + if (!pushValueAndType(I.getOperand(0), InstID, Vals) && + !cast(I).hasNoaliasProvenanceOperand()) // ptr AbbrevToUse = FUNCTION_INST_LOAD_ABBREV; } Vals.push_back(VE.getTypeID(I.getType())); @@ -3067,6 +3068,15 @@ Vals.push_back(getEncodedOrdering(cast(I).getOrdering())); Vals.push_back(getEncodedSyncScopeID(cast(I).getSyncScopeID())); } + if (cast(I).hasNoaliasProvenanceOperand()) { + Vals.push_back(true); // ptr_provenance present + pushValueAndType(cast(I).getNoaliasProvenanceOperand(), InstID, + Vals); // ptrty + ptr_provenance + } else { + // No ptr_provenance - do nothing for now, when in future more arguments + // are emitted, do not forget to emit a 'false': + // Vals.push_back(false); + } break; case Instruction::Store: if (cast(I).isAtomic()) @@ -3082,6 +3092,15 @@ Vals.push_back( getEncodedSyncScopeID(cast(I).getSyncScopeID())); } + if (cast(I).hasNoaliasProvenanceOperand()) { + Vals.push_back(true); // ptr_provenance present + pushValueAndType(cast(I).getNoaliasProvenanceOperand(), InstID, + Vals); // ptrty + ptr_provenance + } else { + // No ptr_provenance - do nothing for now, when in future more arguments + // are emitted, do not forget to emit a 'false': + // Vals.push_back(false); + } break; case Instruction::AtomicCmpXchg: Code = bitc::FUNC_CODE_INST_CMPXCHG; Index: llvm/test/Bitcode/compatibility.ll =================================================================== --- llvm/test/Bitcode/compatibility.ll +++ llvm/test/Bitcode/compatibility.ll @@ -828,12 +828,26 @@ %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16 ; CHECK: %ld.3 = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, align 16 + %ld.1p = load atomic i32, i32* %word monotonic, ptr_provenance i32* %word, align 4 + ; CHECK: %ld.1p = load atomic i32, i32* %word monotonic, ptr_provenance i32* %word, align 4 + %ld.2p = load atomic volatile i32, i32* %word acquire, ptr_provenance i32* %word, align 8 + ; CHECK: %ld.2p = load atomic volatile i32, i32* %word acquire, ptr_provenance i32* %word, align 8 + %ld.3p = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, ptr_provenance i32* %word, align 16 + ; CHECK: %ld.3p = load atomic volatile i32, i32* %word syncscope("singlethread") seq_cst, ptr_provenance i32* %word, align 16 + store atomic i32 23, i32* %word monotonic, align 4 ; CHECK: store atomic i32 23, i32* %word monotonic, align 4 store atomic volatile i32 24, i32* %word monotonic, align 4 ; CHECK: store atomic volatile i32 24, i32* %word monotonic, align 4 store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4 ; CHECK: store atomic volatile i32 25, i32* %word syncscope("singlethread") monotonic, align 4 + + store atomic i32 26, i32* %word monotonic, ptr_provenance i32* %word, align 4 + ; CHECK: store atomic i32 26, i32* %word monotonic, ptr_provenance i32* %word, align 4 + store atomic volatile i32 27, i32* %word monotonic, ptr_provenance i32* %word, align 4 + ; CHECK: store atomic volatile i32 27, i32* %word monotonic, ptr_provenance i32* %word, align 4 + store atomic volatile i32 28, i32* %word syncscope("singlethread") monotonic, ptr_provenance i32* %word, align 4 + ; CHECK: store atomic volatile i32 28, i32* %word syncscope("singlethread") monotonic, ptr_provenance i32* %word, align 4 ret void } @@ -1388,11 +1402,21 @@ load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9 ; CHECK: load volatile i32*, i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9 + load i32*, i32** %base, ptr_provenance i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9 + ; CHECK: load i32*, i32** %base, ptr_provenance i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9 + load volatile i32*, i32** %base, ptr_provenance i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9 + ; CHECK: load volatile i32*, i32** %base, ptr_provenance i32** %base, align 8, !invariant.load !7, !nontemporal !8, !nonnull !7, !dereferenceable !9, !dereferenceable_or_null !9 + store i32* null, i32** %base, align 4, !nontemporal !8 ; CHECK: store i32* null, i32** %base, align 4, !nontemporal !8 store volatile i32* null, i32** %base, align 4, !nontemporal !8 ; CHECK: store volatile i32* null, i32** %base, align 4, !nontemporal !8 + store i32* null, i32** %base, ptr_provenance i32** %base, align 4, !nontemporal !8 + ; CHECK: store i32* null, i32** %base, ptr_provenance i32** %base, align 4, !nontemporal !8 + store volatile i32* null, i32** %base, ptr_provenance i32** %base, align 4, !nontemporal !8 + ; CHECK: store volatile i32* null, i32** %base, ptr_provenance i32** %base, align 4, !nontemporal !8 + ret void } Index: llvm/test/Bitcode/loadstore_ptr_provenance.ll =================================================================== --- llvm/test/Bitcode/loadstore_ptr_provenance.ll +++ llvm/test/Bitcode/loadstore_ptr_provenance.ll @@ -1,7 +1,6 @@ ; RUN: opt --verify -S < %s | FileCheck %s -; Activate when bitcode support is added: -; R U N: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s -; R U N: verify-uselistorder < %s +; RUN: llvm-as < %s | llvm-dis | llvm-as | llvm-dis | FileCheck %s +; RUN: verify-uselistorder < %s define i32 @f(i32* %p, i32* %q, i32* %word, i32** %base) { ; CHECK: define i32 @f(i32* %p, i32* %q, i32* %word, i32** %base) {