Index: llvm/trunk/include/llvm/TableGen/Record.h =================================================================== --- llvm/trunk/include/llvm/TableGen/Record.h +++ llvm/trunk/include/llvm/TableGen/Record.h @@ -788,7 +788,7 @@ // Fold - If possible, fold this to a simpler init. Return this if not // possible to fold. - Init *Fold(Record *CurRec) const; + Init *Fold(Record *CurRec, bool IsFinal = false) const; Init *resolveReferences(Resolver &R) const override; @@ -1497,6 +1497,9 @@ /// If there are any field references that refer to fields /// that have been filled in, we can propagate the values now. + /// + /// This is a final resolve: any error messages, e.g. due to undefined + /// !cast references, are generated now. void resolveReferences(); /// Apply the resolver to the name of the record as well as to the @@ -1786,6 +1789,7 @@ /// Init::resolveReferences. class Resolver { Record *CurRec; + bool IsFinal = false; public: explicit Resolver(Record *CurRec) : CurRec(CurRec) {} @@ -1801,6 +1805,13 @@ // result in a ? (UnsetInit). This behavior is used to represent instruction // encodings by keeping references to unset variables within a record. virtual bool keepUnsetBits() const { return false; } + + // Whether this is the final resolve step before adding a record to the + // RecordKeeper. Error reporting during resolve and related constant folding + // should only happen when this is true. + bool isFinal() const { return IsFinal; } + + void setFinal(bool Final) { IsFinal = Final; } }; /// Resolve arbitrary mappings. @@ -1861,7 +1872,10 @@ DenseSet Shadowed; public: - explicit ShadowResolver(Resolver &R) : Resolver(R.getCurrentRecord()), R(R) {} + explicit ShadowResolver(Resolver &R) + : Resolver(R.getCurrentRecord()), R(R) { + setFinal(R.isFinal()); + } void addShadow(Init *Key) { Shadowed.insert(Key); } Index: llvm/trunk/include/llvm/Target/TargetSelectionDAG.td =================================================================== --- llvm/trunk/include/llvm/Target/TargetSelectionDAG.td +++ llvm/trunk/include/llvm/Target/TargetSelectionDAG.td @@ -1132,27 +1132,27 @@ multiclass binary_atomic_op_ord { def #NAME#_monotonic : PatFrag<(ops node:$ptr, node:$val), - (!cast(#NAME) node:$ptr, node:$val)> { + (!cast(#NAME) node:$ptr, node:$val)> { let IsAtomic = 1; let IsAtomicOrderingMonotonic = 1; } def #NAME#_acquire : PatFrag<(ops node:$ptr, node:$val), - (!cast(#NAME) node:$ptr, node:$val)> { + (!cast(#NAME) node:$ptr, node:$val)> { let IsAtomic = 1; let IsAtomicOrderingAcquire = 1; } def #NAME#_release : PatFrag<(ops node:$ptr, node:$val), - (!cast(#NAME) node:$ptr, node:$val)> { + (!cast(#NAME) node:$ptr, node:$val)> { let IsAtomic = 1; let IsAtomicOrderingRelease = 1; } def #NAME#_acq_rel : PatFrag<(ops node:$ptr, node:$val), - (!cast(#NAME) node:$ptr, node:$val)> { + (!cast(#NAME) node:$ptr, node:$val)> { let IsAtomic = 1; let IsAtomicOrderingAcquireRelease = 1; } def #NAME#_seq_cst : PatFrag<(ops node:$ptr, node:$val), - (!cast(#NAME) node:$ptr, node:$val)> { + (!cast(#NAME) node:$ptr, node:$val)> { let IsAtomic = 1; let IsAtomicOrderingSequentiallyConsistent = 1; } @@ -1160,27 +1160,27 @@ multiclass ternary_atomic_op_ord { def #NAME#_monotonic : PatFrag<(ops node:$ptr, node:$cmp, node:$val), - (!cast(#NAME) node:$ptr, node:$cmp, node:$val)> { + (!cast(#NAME) node:$ptr, node:$cmp, node:$val)> { let IsAtomic = 1; let IsAtomicOrderingMonotonic = 1; } def #NAME#_acquire : PatFrag<(ops node:$ptr, node:$cmp, node:$val), - (!cast(#NAME) node:$ptr, node:$cmp, node:$val)> { + (!cast(#NAME) node:$ptr, node:$cmp, node:$val)> { let IsAtomic = 1; let IsAtomicOrderingAcquire = 1; } def #NAME#_release : PatFrag<(ops node:$ptr, node:$cmp, node:$val), - (!cast(#NAME) node:$ptr, node:$cmp, node:$val)> { + (!cast(#NAME) node:$ptr, node:$cmp, node:$val)> { let IsAtomic = 1; let IsAtomicOrderingRelease = 1; } def #NAME#_acq_rel : PatFrag<(ops node:$ptr, node:$cmp, node:$val), - (!cast(#NAME) node:$ptr, node:$cmp, node:$val)> { + (!cast(#NAME) node:$ptr, node:$cmp, node:$val)> { let IsAtomic = 1; let IsAtomicOrderingAcquireRelease = 1; } def #NAME#_seq_cst : PatFrag<(ops node:$ptr, node:$cmp, node:$val), - (!cast(#NAME) node:$ptr, node:$cmp, node:$val)> { + (!cast(#NAME) node:$ptr, node:$cmp, node:$val)> { let IsAtomic = 1; let IsAtomicOrderingSequentiallyConsistent = 1; } Index: llvm/trunk/lib/TableGen/Record.cpp =================================================================== --- llvm/trunk/lib/TableGen/Record.cpp +++ llvm/trunk/lib/TableGen/Record.cpp @@ -194,7 +194,7 @@ std::string RecordRecTy::getAsString() const { if (NumClasses == 1) - return getClasses()[0]->getName(); + return getClasses()[0]->getNameInitAsString(); std::string Str = "{"; bool First = true; @@ -202,7 +202,7 @@ if (!First) Str += ", "; First = false; - Str += R->getName(); + Str += R->getNameInitAsString(); } Str += "}"; return Str; @@ -700,7 +700,7 @@ ProfileUnOpInit(ID, getOpcode(), getOperand(), getType()); } -Init *UnOpInit::Fold(Record *CurRec) const { +Init *UnOpInit::Fold(Record *CurRec, bool IsFinal) const { switch (getOpcode()) { case CAST: if (isa(getType())) { @@ -715,12 +715,34 @@ } else if (isa(getType())) { if (StringInit *Name = dyn_cast(LHS)) { assert(CurRec && "NULL pointer"); - if (Record *D = (CurRec->getRecords()).getDef(Name->getValue())) - return DefInit::get(D); + Record *D; - PrintFatalError(CurRec->getLoc(), - Twine("Undefined reference to record: '") + - Name->getValue() + "'\n"); + // Self-references are allowed, but their resolution is delayed until + // the final resolve to ensure that we get the correct type for them. + if (Name == CurRec->getNameInit()) { + if (!IsFinal) + break; + D = CurRec; + } else { + D = CurRec->getRecords().getDef(Name->getValue()); + if (!D) { + if (IsFinal) + PrintFatalError(CurRec->getLoc(), + Twine("Undefined reference to record: '") + + Name->getValue() + "'\n"); + break; + } + } + + DefInit *DI = DefInit::get(D); + if (!DI->getType()->typeIsA(getType())) { + PrintFatalError(CurRec->getLoc(), + Twine("Expected type '") + + getType()->getAsString() + "', got '" + + DI->getType()->getAsString() + "' in: " + + getAsString() + "\n"); + } + return DI; } } @@ -762,9 +784,9 @@ Init *UnOpInit::resolveReferences(Resolver &R) const { Init *lhs = LHS->resolveReferences(R); - if (LHS != lhs) + if (LHS != lhs || (R.isFinal() && getOpcode() == CAST)) return (UnOpInit::get(getOpcode(), lhs, getType())) - ->Fold(R.getCurrentRecord()); + ->Fold(R.getCurrentRecord(), R.isFinal()); return const_cast(this); } @@ -1904,6 +1926,7 @@ void Record::resolveReferences() { RecordResolver R(*this); + R.setFinal(true); resolveReferences(R); } Index: llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td =================================================================== --- llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td +++ llvm/trunk/lib/Target/AArch64/AArch64InstrFormats.td @@ -5931,10 +5931,10 @@ multiclass SIMDThreeScalarHSTied opc, string asm, SDPatternOperator OpNode = null_frag> { def v1i32: BaseSIMDThreeScalarTied; def v1i16: BaseSIMDThreeScalarTied; } @@ -8789,7 +8789,7 @@ let mayLoad = 1, mayStore = 0, hasSideEffects = 0 in class BaseSIMDLdR opcode, bit S, bits<2> size, string asm, - Operand listtype> + DAGOperand listtype> : BaseSIMDLdStSingle<1, R, opcode, asm, "\t$Vt, [$Rn]", "", (outs listtype:$Vt), (ins GPR64sp:$Rn), []> { @@ -8801,7 +8801,7 @@ } let mayLoad = 1, mayStore = 0, hasSideEffects = 0 in class BaseSIMDLdRPost opcode, bit S, bits<2> size, - string asm, Operand listtype, Operand GPR64pi> + string asm, DAGOperand listtype, DAGOperand GPR64pi> : BaseSIMDLdStSingle<1, R, opcode, asm, "\t$Vt, [$Rn], $Xm", "$Rn = $wback", (outs GPR64sp:$wback, listtype:$Vt), @@ -8859,46 +8859,46 @@ multiclass SIMDLdR opcode, bit S, string asm, string Count, int Offset1, int Offset2, int Offset4, int Offset8> { def v8b : BaseSIMDLdR<0, R, opcode, S, 0b00, asm, - !cast("VecList" # Count # "8b")>; + !cast("VecList" # Count # "8b")>; def v16b: BaseSIMDLdR<1, R, opcode, S, 0b00, asm, - !cast("VecList" # Count #"16b")>; + !cast("VecList" # Count #"16b")>; def v4h : BaseSIMDLdR<0, R, opcode, S, 0b01, asm, - !cast("VecList" # Count #"4h")>; + !cast("VecList" # Count #"4h")>; def v8h : BaseSIMDLdR<1, R, opcode, S, 0b01, asm, - !cast("VecList" # Count #"8h")>; + !cast("VecList" # Count #"8h")>; def v2s : BaseSIMDLdR<0, R, opcode, S, 0b10, asm, - !cast("VecList" # Count #"2s")>; + !cast("VecList" # Count #"2s")>; def v4s : BaseSIMDLdR<1, R, opcode, S, 0b10, asm, - !cast("VecList" # Count #"4s")>; + !cast("VecList" # Count #"4s")>; def v1d : BaseSIMDLdR<0, R, opcode, S, 0b11, asm, - !cast("VecList" # Count #"1d")>; + !cast("VecList" # Count #"1d")>; def v2d : BaseSIMDLdR<1, R, opcode, S, 0b11, asm, - !cast("VecList" # Count #"2d")>; + !cast("VecList" # Count #"2d")>; def v8b_POST : BaseSIMDLdRPost<0, R, opcode, S, 0b00, asm, - !cast("VecList" # Count # "8b"), - !cast("GPR64pi" # Offset1)>; + !cast("VecList" # Count # "8b"), + !cast("GPR64pi" # Offset1)>; def v16b_POST: BaseSIMDLdRPost<1, R, opcode, S, 0b00, asm, - !cast("VecList" # Count # "16b"), - !cast("GPR64pi" # Offset1)>; + !cast("VecList" # Count # "16b"), + !cast("GPR64pi" # Offset1)>; def v4h_POST : BaseSIMDLdRPost<0, R, opcode, S, 0b01, asm, - !cast("VecList" # Count # "4h"), - !cast("GPR64pi" # Offset2)>; + !cast("VecList" # Count # "4h"), + !cast("GPR64pi" # Offset2)>; def v8h_POST : BaseSIMDLdRPost<1, R, opcode, S, 0b01, asm, - !cast("VecList" # Count # "8h"), - !cast("GPR64pi" # Offset2)>; + !cast("VecList" # Count # "8h"), + !cast("GPR64pi" # Offset2)>; def v2s_POST : BaseSIMDLdRPost<0, R, opcode, S, 0b10, asm, - !cast("VecList" # Count # "2s"), - !cast("GPR64pi" # Offset4)>; + !cast("VecList" # Count # "2s"), + !cast("GPR64pi" # Offset4)>; def v4s_POST : BaseSIMDLdRPost<1, R, opcode, S, 0b10, asm, - !cast("VecList" # Count # "4s"), - !cast("GPR64pi" # Offset4)>; + !cast("VecList" # Count # "4s"), + !cast("GPR64pi" # Offset4)>; def v1d_POST : BaseSIMDLdRPost<0, R, opcode, S, 0b11, asm, - !cast("VecList" # Count # "1d"), - !cast("GPR64pi" # Offset8)>; + !cast("VecList" # Count # "1d"), + !cast("GPR64pi" # Offset8)>; def v2d_POST : BaseSIMDLdRPost<1, R, opcode, S, 0b11, asm, - !cast("VecList" # Count # "2d"), - !cast("GPR64pi" # Offset8)>; + !cast("VecList" # Count # "2d"), + !cast("GPR64pi" # Offset8)>; defm : SIMDLdrAliases; defm : SIMDLdrAliases; @@ -9300,9 +9300,9 @@ let Predicates = [HasNEON, HasRDM] in { class BaseSIMDThreeSameVectorTiedR0 size, bits<5> opcode, - RegisterOperand regtype, string asm, + RegisterOperand regtype, string asm, string kind, list pattern> - : BaseSIMDThreeSameVectorTied { } multiclass SIMDThreeSameVectorSQRDMLxHTiedHS opc, string asm, @@ -9311,7 +9311,7 @@ [(set (v4i16 V64:$dst), (Accum (v4i16 V64:$Rd), (v4i16 (int_aarch64_neon_sqrdmulh (v4i16 V64:$Rn), - (v4i16 V64:$Rm)))))]>; + (v4i16 V64:$Rm)))))]>; def v8i16 : BaseSIMDThreeSameVectorTiedR0<1, U, 0b01, opc, V128, asm, ".8h", [(set (v8i16 V128:$dst), (Accum (v8i16 V128:$Rd), @@ -9375,28 +9375,28 @@ let Inst{21} = idx{0}; } - // FIXME: it would be nice to use the scalar (v1i32) instruction here, but + // FIXME: it would be nice to use the scalar (v1i32) instruction here, but // an intermediate EXTRACT_SUBREG would be untyped. - // FIXME: direct EXTRACT_SUBREG from v2i32 to i32 is illegal, that's why we + // FIXME: direct EXTRACT_SUBREG from v2i32 to i32 is illegal, that's why we // got it lowered here as (i32 vector_extract (v4i32 insert_subvector(..))) def : Pat<(i32 (Accum (i32 FPR32Op:$Rd), - (i32 (vector_extract + (i32 (vector_extract (v4i32 (insert_subvector - (undef), - (v2i32 (int_aarch64_neon_sqrdmulh + (undef), + (v2i32 (int_aarch64_neon_sqrdmulh (v2i32 V64:$Rn), - (v2i32 (AArch64duplane32 + (v2i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx)))), (i32 0))), (i64 0))))), (EXTRACT_SUBREG (v2i32 (!cast(NAME # v2i32_indexed) - (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), - FPR32Op:$Rd, - ssub)), + (v2i32 (INSERT_SUBREG (v2i32 (IMPLICIT_DEF)), + FPR32Op:$Rd, + ssub)), V64:$Rn, - V128:$Rm, + V128:$Rm, VectorIndexS:$idx)), ssub)>; @@ -9417,26 +9417,26 @@ // FIXME: it would be nice to use the scalar (v1i32) instruction here, but // an intermediate EXTRACT_SUBREG would be untyped. def : Pat<(i32 (Accum (i32 FPR32Op:$Rd), - (i32 (vector_extract - (v4i32 (int_aarch64_neon_sqrdmulh + (i32 (vector_extract + (v4i32 (int_aarch64_neon_sqrdmulh (v4i32 V128:$Rn), - (v4i32 (AArch64duplane32 + (v4i32 (AArch64duplane32 (v4i32 V128:$Rm), VectorIndexS:$idx)))), (i64 0))))), (EXTRACT_SUBREG (v4i32 (!cast(NAME # v4i32_indexed) - (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), - FPR32Op:$Rd, - ssub)), + (v4i32 (INSERT_SUBREG (v4i32 (IMPLICIT_DEF)), + FPR32Op:$Rd, + ssub)), V128:$Rn, - V128:$Rm, + V128:$Rm, VectorIndexS:$idx)), ssub)>; def i16_indexed : BaseSIMDIndexedTied<1, U, 1, 0b01, opc, FPR16Op, FPR16Op, V128_lo, - VectorIndexH, asm, ".h", "", "", ".h", + VectorIndexH, asm, ".h", "", "", ".h", []> { bits<3> idx; let Inst{11} = idx{2}; @@ -9930,7 +9930,7 @@ let Predicates = [HasLSE]; } -multiclass LDOPregister opc, string op, bits<1> Acq, bits<1> Rel, +multiclass LDOPregister opc, string op, bits<1> Acq, bits<1> Rel, string order> { let Sz = 0b00, Acq = Acq, Rel = Rel, opc = opc in def B : BaseLDOPregister; @@ -9947,15 +9947,15 @@ let Predicates = [HasLSE] in multiclass LDOPregister_patterns_ord_dag { - def : Pat<(!cast(op#"_"#size#"_monotonic") GPR64sp:$Rn, SrcRHS), + def : Pat<(!cast(op#"_"#size#"_monotonic") GPR64sp:$Rn, SrcRHS), (!cast(inst # suffix) DstRHS, GPR64sp:$Rn)>; - def : Pat<(!cast(op#"_"#size#"_acquire") GPR64sp:$Rn, SrcRHS), + def : Pat<(!cast(op#"_"#size#"_acquire") GPR64sp:$Rn, SrcRHS), (!cast(inst # "A" # suffix) DstRHS, GPR64sp:$Rn)>; - def : Pat<(!cast(op#"_"#size#"_release") GPR64sp:$Rn, SrcRHS), + def : Pat<(!cast(op#"_"#size#"_release") GPR64sp:$Rn, SrcRHS), (!cast(inst # "L" # suffix) DstRHS, GPR64sp:$Rn)>; - def : Pat<(!cast(op#"_"#size#"_acq_rel") GPR64sp:$Rn, SrcRHS), + def : Pat<(!cast(op#"_"#size#"_acq_rel") GPR64sp:$Rn, SrcRHS), (!cast(inst # "AL" # suffix) DstRHS, GPR64sp:$Rn)>; - def : Pat<(!cast(op#"_"#size#"_seq_cst") GPR64sp:$Rn, SrcRHS), + def : Pat<(!cast(op#"_"#size#"_seq_cst") GPR64sp:$Rn, SrcRHS), (!cast(inst # "AL" # suffix) DstRHS, GPR64sp:$Rn)>; } @@ -9994,15 +9994,15 @@ let Predicates = [HasLSE] in multiclass CASregister_patterns_ord_dag { - def : Pat<(!cast(op#"_"#size#"_monotonic") GPR64sp:$Rn, OLD, NEW), + def : Pat<(!cast(op#"_"#size#"_monotonic") GPR64sp:$Rn, OLD, NEW), (!cast(inst # suffix) OLD, NEW, GPR64sp:$Rn)>; - def : Pat<(!cast(op#"_"#size#"_acquire") GPR64sp:$Rn, OLD, NEW), + def : Pat<(!cast(op#"_"#size#"_acquire") GPR64sp:$Rn, OLD, NEW), (!cast(inst # "A" # suffix) OLD, NEW, GPR64sp:$Rn)>; - def : Pat<(!cast(op#"_"#size#"_release") GPR64sp:$Rn, OLD, NEW), + def : Pat<(!cast(op#"_"#size#"_release") GPR64sp:$Rn, OLD, NEW), (!cast(inst # "L" # suffix) OLD, NEW, GPR64sp:$Rn)>; - def : Pat<(!cast(op#"_"#size#"_acq_rel") GPR64sp:$Rn, OLD, NEW), + def : Pat<(!cast(op#"_"#size#"_acq_rel") GPR64sp:$Rn, OLD, NEW), (!cast(inst # "AL" # suffix) OLD, NEW, GPR64sp:$Rn)>; - def : Pat<(!cast(op#"_"#size#"_seq_cst") GPR64sp:$Rn, OLD, NEW), + def : Pat<(!cast(op#"_"#size#"_seq_cst") GPR64sp:$Rn, OLD, NEW), (!cast(inst # "AL" # suffix) OLD, NEW, GPR64sp:$Rn)>; } Index: llvm/trunk/lib/Target/AMDGPU/SMInstructions.td =================================================================== --- llvm/trunk/lib/Target/AMDGPU/SMInstructions.td +++ llvm/trunk/lib/Target/AMDGPU/SMInstructions.td @@ -500,7 +500,7 @@ class SMRD_Pattern_ci : GCNPat < (smrd_load (SMRDImm32 i64:$sbase, i32:$offset)), - (vt (!cast(Instr#"_IMM_ci") $sbase, $offset, 0))> { + (vt (!cast(Instr#"_IMM_ci") $sbase, $offset, 0))> { let OtherPredicates = [isCIOnly]; } Index: llvm/trunk/lib/Target/SystemZ/SystemZOperands.td =================================================================== --- llvm/trunk/lib/Target/SystemZ/SystemZOperands.td +++ llvm/trunk/lib/Target/SystemZ/SystemZOperands.td @@ -115,13 +115,13 @@ class BDMode : AddressingMode("ADDR"##bitsize), - !cast("disp"##dispsize##"imm"##bitsize))>; + !cast("disp"##dispsize##"imm"##bitsize))>; // An addressing mode with a base, displacement and index. class BDXMode : AddressingMode("ADDR"##bitsize), - !cast("disp"##dispsize##"imm"##bitsize), + !cast("disp"##dispsize##"imm"##bitsize), !cast("ADDR"##bitsize))>; // A BDMode paired with an immediate length operand of LENSIZE bits. @@ -130,21 +130,21 @@ : AddressingMode("ADDR"##bitsize), - !cast("disp"##dispsize##"imm"##bitsize), - !cast("imm"##bitsize))>; + !cast("disp"##dispsize##"imm"##bitsize), + !cast("imm"##bitsize))>; // A BDMode paired with a register length operand. class BDRMode : AddressingMode("ADDR"##bitsize), - !cast("disp"##dispsize##"imm"##bitsize), + !cast("disp"##dispsize##"imm"##bitsize), !cast("GR"##bitsize))>; // An addressing mode with a base, displacement and a vector index. class BDVMode : AddressOperand("ADDR"##bitsize), - !cast("disp"##dispsize##"imm"##bitsize), + !cast("disp"##dispsize##"imm"##bitsize), !cast("VR128"))>; //===----------------------------------------------------------------------===// Index: llvm/trunk/test/TableGen/cast-typeerror.td =================================================================== --- llvm/trunk/test/TableGen/cast-typeerror.td +++ llvm/trunk/test/TableGen/cast-typeerror.td @@ -10,5 +10,5 @@ B b = !cast(name); } -// CHECK: error: Invalid value of type 'A' is found when setting 'b' of type 'B' +// CHECK: error: Expected type 'B', got 'A' in: !cast("A0") def Test : C<"A0">; Index: llvm/trunk/test/TableGen/self-reference.td =================================================================== --- llvm/trunk/test/TableGen/self-reference.td +++ llvm/trunk/test/TableGen/self-reference.td @@ -16,6 +16,14 @@ // CHECK: dag q = (ops C0); // CHECK: } +// CHECK: def D0 { +// CHECK: D d = D0; +// CHECK: } + +// CHECK: def E0 { +// CHECK: E e = E0; +// CHECK: } + def ops; class A { @@ -42,3 +50,26 @@ } def C0 : C<"C0">; + +// Explore some unused corner cases. +// +// A self-reference within a class may seem icky, but it unavoidably falls out +// orthogonally of having forward class declarations and late resolve of self +// references. +class D { + D d = !cast(self); +} + +def D0 : D<"D0">; + +class E { + E e = x; +} + +// Putting the !cast directly in the def should work as well: we shouldn't +// depend on implementation details of when exactly the record is looked up. +// +// Note the difference between !cast("E0") and plain E0: the latter wouldn't +// work here because E0 does not yet have E as a superclass while the template +// arguments are being parsed. +def E0 : E("E0")>;