Index: llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp =================================================================== --- llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp +++ llvm/lib/Target/X86/AsmParser/X86AsmParser.cpp @@ -835,8 +835,6 @@ InlineAsmIdentifierInfo &Info, bool IsUnevaluatedOperand, SMLoc &End); - std::unique_ptr ParseMemOperand(unsigned SegReg, SMLoc MemStart); - bool ParseIntelMemoryOperandSize(unsigned &Size); std::unique_ptr CreateMemForInlineAsm(unsigned SegReg, const MCExpr *Disp, unsigned BaseReg, @@ -1936,38 +1934,8 @@ std::unique_ptr X86AsmParser::ParseATTOperand() { MCAsmParser &Parser = getParser(); switch (getLexer().getKind()) { - default: - // Parse a memory operand with no segment register. - return ParseMemOperand(0, Parser.getTok().getLoc()); - case AsmToken::Percent: { - // Read the register. - unsigned RegNo; - SMLoc Start, End; - if (ParseRegister(RegNo, Start, End)) return nullptr; - if (RegNo == X86::EIZ || RegNo == X86::RIZ) { - Error(Start, "%eiz and %riz can only be used as index registers", - SMRange(Start, End)); - return nullptr; - } - if (RegNo == X86::RIP) { - Error(Start, "%rip can only be used as a base register", - SMRange(Start, End)); - return nullptr; - } - - // If this is a segment register followed by a ':', then this is the start - // of a memory reference, otherwise this is a normal register reference. - if (getLexer().isNot(AsmToken::Colon)) - return X86Operand::CreateReg(RegNo, Start, End); - - if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(RegNo)) - return ErrorOperand(Start, "invalid segment register"); - - getParser().Lex(); // Eat the colon. - return ParseMemOperand(RegNo, Start); - } case AsmToken::Dollar: { - // $42 -> immediate. + // $42 or $ID -> immediate. SMLoc Start = Parser.getTok().getLoc(), End; Parser.Lex(); const MCExpr *Val; @@ -1975,10 +1943,251 @@ return nullptr; return X86Operand::CreateImm(Val, Start, End); } - case AsmToken::LCurly:{ + case AsmToken::LCurly: { SMLoc Start = Parser.getTok().getLoc(); return ParseRoundingModeOp(Start); } + default: { + // This a memory operand or a register. We have some parsing complications + // as a '(' may be part of an immediate expression or the addressing mode + // block. This is complicated by the possibility for a assembler-level + // variable to refer to a register as well as an immediate expression. + + // Cases with relevant parentheses explicitly listed: + // 1. seg : imm (base-index-scale-expr) + // 2. seg : (imm) (base-index-scale-expr) + // 3. seg : (base-index-scale-expr) + // 4. imm (base-index-scale-expr) + // 5. (imm) (base-index-scale-expr) + // 6. (base-index-scale-expr) + // 7. imm + // 8. (imm) + // 9. reg + + // Check for a segment prefix. + SMLoc StartLoc = Parser.getTok().getLoc(), EndLoc; + SMLoc Loc; + const MCExpr *Imm = nullptr; + unsigned SegReg = 0; + if (getLexer().isNot(AsmToken::LParen)) { + // Parse Potential Imm. This is either an expression or a register. + if (Parser.parseTokenLoc(Loc) || Parser.parseExpression(Imm, EndLoc)) + return nullptr; + // Parser out Register. + if (auto *RE = dyn_cast(Imm)) { + // Segment Register. Reset Imm. + Imm = nullptr; + unsigned Reg = RE->getRegNo(); + // Sanity check register. + if (Reg == X86::EIZ || Reg == X86::RIZ) { + Error(Loc, "%eiz and %riz can only be used as index registers", + SMRange(Loc, EndLoc)); + return nullptr; + } + if (Reg == X86::RIP) { + Error(Loc, "%rip can only be used as a base register", + SMRange(Loc, EndLoc)); + return nullptr; + } + // Return non-segment register or segment register without ':' + // immediately + if (Parser.parseOptionalToken(AsmToken::Colon)) { + if (!X86MCRegisterClasses[X86::SEGMENT_REGRegClassID].contains(Reg)) + return ErrorOperand(Loc, "invalid segment register"); + SegReg = Reg; + } else // Not a Segment Modifier. Return now. + return X86Operand::CreateReg(Reg, Loc, EndLoc); + } + } + + // Current position (*) in remaining memory operands: + // 1. seg : * imm (base-index-scale-expr) + // 2. seg : *(imm) (base-index-scale-expr) + // 3. seg : *(base-index-scale-expr) + // 4. imm *(base-index-scale-expr) + // 5. *(imm) (base-index-scale-expr) + // 6. *(base-index-scale-expr) + // 7. imm * + // 8. *(imm) + // 9. ALREADY RETURNED + + // If we haven't parsed an immediate expression yet, check if we're in cases + // 4 or 6 by checking if the first object + // after the parenthesis is a register (or an identifier referring to a + // register) and parse imm-expr or default to 0 as appropriate. + + if (!Imm) { + bool AtMemOperand = false; + if (getLexer().is(AsmToken::LParen)) { + AsmToken Buf[2]; + auto TokCount = getLexer().peekTokens(Buf, true); + if (TokCount >= 1) { + StringRef Id; + switch (Buf[0].getKind()) { + case AsmToken::Percent: + case AsmToken::Comma: + AtMemOperand = true; + break; + case AsmToken::At: + case AsmToken::Dollar: + if ((TokCount > 1) && + (Buf[1].is(AsmToken::Identifier) || + Buf[1].is(AsmToken::String)) && + (Buf[0].getLoc().getPointer() + 1 == + Buf[1].getLoc().getPointer())) + Id = StringRef(Buf[0].getLoc().getPointer(), + Buf[1].getIdentifier().size() + 1); + break; + case AsmToken::Identifier: + case AsmToken::String: + Id = Buf[0].getIdentifier(); + break; + default: + break; + } + if (!Id.empty()) { + // Lookup id to see if it's a register + MCSymbol *Sym = getContext().getOrCreateSymbol(Id); + if (Sym->isVariable()) { + auto V = Sym->getVariableValue(/*SetUsed*/ false); + AtMemOperand = isa(V); + } + } + } + } + // Parse immediate if we're not at the mem operand yet. + if (!AtMemOperand) { + if (Parser.parseTokenLoc(Loc) || Parser.parseExpression(Imm, EndLoc)) + return nullptr; + assert(!isa(Imm) && + "Expected non-register to be parsed here."); + } else { + // Imm is implicitly zero if we haven't parsed it yet. + Imm = MCConstantExpr::create(0, Parser.getContext()); + } + } + + // At this point we are either at the end of the operand or at the start of + // the base-index-scale-expr. If we're not at a "(" this is an immediate + // expression. + + if (!parseOptionalToken(AsmToken::LParen)) { + if (SegReg == 0) + return X86Operand::CreateMem(getPointerWidth(), Imm, StartLoc, EndLoc); + return X86Operand::CreateMem(getPointerWidth(), SegReg, Imm, 0, 0, 1, + StartLoc, EndLoc); + } + + // If we reached here, then eat the ( of the memory operand. Process + // the rest of the memory operand. + unsigned BaseReg = 0, IndexReg = 0, Scale = 1; + SMLoc BaseLoc = getLexer().getLoc(); + const MCExpr *E; + + // Parse BaseReg if one is defined. + if (getLexer().isNot(AsmToken::Comma) && + getLexer().isNot(AsmToken::RParen)) { + if (Parser.parseExpression(E, EndLoc) || !isa(E)) + return nullptr; + + // Sanity check register. + BaseReg = cast(E)->getRegNo(); + if (BaseReg == X86::EIZ || BaseReg == X86::RIZ) { + Error(BaseLoc, "eiz and riz can only be used as index registers", + SMRange(Loc, EndLoc)); + return nullptr; + } + } + + if (parseOptionalToken(AsmToken::Comma)) { + // Following the comma we should have either an index register, or a scale + // value. We don't support the later form, but we want to parse it + // correctly. + // + // Note that even though it would be completely consistent to support + // syntax like "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for + // this. + if (getLexer().isNot(AsmToken::RParen)) { + if (Parser.parseTokenLoc(Loc) || Parser.parseExpression(E, EndLoc)) + return nullptr; + + if (!isa(E)) { + // We've parsed an unexpected Scale Value instead of an index + // register. Interpret it as an absolute. + int64_t ScaleVal; + if (!E->evaluateAsAbsolute(ScaleVal, + getStreamer().getAssemblerPtr())) { + Error(Loc, "expected absolute expression"); + return nullptr; + } + if (ScaleVal != 1) + Warning(Loc, "scale factor without index register is ignored"); + Scale = 1; + } else { // IndexReg Found. + IndexReg = cast(E)->getRegNo(); + + if (BaseReg == X86::RIP) { + Error(Loc, "%rip as base register can not have an index register"); + return nullptr; + } + if (IndexReg == X86::RIP) { + Error(Loc, "%rip is not allowed as an index register"); + return nullptr; + } + + if (parseOptionalToken(AsmToken::Comma)) { + // Parse the scale amount: + // ::= ',' [scale-expression] + + // A scale amount without an index is ignored. + if (getLexer().isNot(AsmToken::RParen)) { + int64_t ScaleVal; + if (Parser.parseTokenLoc(Loc) || + Parser.parseAbsoluteExpression(ScaleVal)) { + Error(Loc, "expected scale expression"); + return nullptr; + } + // Validate the scale amount. + if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) && + ScaleVal != 1) { + Error(Loc, "scale factor in 16-bit address must be 1"); + return nullptr; + } + if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 && + ScaleVal != 8) { + Error(Loc, "scale factor in address must be 1, 2, 4 or 8"); + return nullptr; + } + Scale = (unsigned)ScaleVal; + } + } + } + } + } + + // Ok, we've eaten the memory operand, verify we have a ')' and eat it too. + if (parseToken(AsmToken::RParen, "unexpected token in memory operand")) + return nullptr; + + // This is a terrible hack to handle "out[s]?[bwl]? %al, (%dx)" -> + // "outb %al, %dx". Out doesn't take a memory form, but this is a widely + // documented form in various unofficial manuals, so a lot of code uses it. + if (BaseReg == X86::DX && IndexReg == 0 && Scale == 1 && SegReg == 0 && + isa(Imm) && cast(Imm)->getValue() == 0) + return X86Operand::CreateDXReg(BaseLoc, BaseLoc); + + StringRef ErrMsg; + if (CheckBaseRegAndIndexRegAndScale(BaseReg, IndexReg, Scale, is64BitMode(), + ErrMsg)) { + Error(BaseLoc, ErrMsg); + return nullptr; + } + + if (SegReg || BaseReg || IndexReg) + return X86Operand::CreateMem(getPointerWidth(), SegReg, Imm, BaseReg, + IndexReg, Scale, StartLoc, EndLoc); + return X86Operand::CreateMem(getPointerWidth(), Imm, StartLoc, EndLoc); + } } } @@ -2087,180 +2296,6 @@ return false; } -/// ParseMemOperand: segment: disp(basereg, indexreg, scale). The '%ds:' prefix -/// has already been parsed if present. -std::unique_ptr X86AsmParser::ParseMemOperand(unsigned SegReg, - SMLoc MemStart) { - - MCAsmParser &Parser = getParser(); - // We have to disambiguate a parenthesized expression "(4+5)" from the start - // of a memory operand with a missing displacement "(%ebx)" or "(,%eax)". The - // only way to do this without lookahead is to eat the '(' and see what is - // after it. - const MCExpr *Disp = MCConstantExpr::create(0, getParser().getContext()); - if (getLexer().isNot(AsmToken::LParen)) { - SMLoc ExprEnd; - if (getParser().parseExpression(Disp, ExprEnd)) return nullptr; - // Disp may be a variable, handle register values. - if (auto *RE = dyn_cast(Disp)) - return X86Operand::CreateReg(RE->getRegNo(), MemStart, ExprEnd); - - // After parsing the base expression we could either have a parenthesized - // memory address or not. If not, return now. If so, eat the (. - if (getLexer().isNot(AsmToken::LParen)) { - // Unless we have a segment register, treat this as an immediate. - if (SegReg == 0) - return X86Operand::CreateMem(getPointerWidth(), Disp, MemStart, ExprEnd); - return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, 0, 0, 1, - MemStart, ExprEnd); - } - - // Eat the '('. - Parser.Lex(); - } else { - // Okay, we have a '('. We don't know if this is an expression or not, but - // so we have to eat the ( to see beyond it. - SMLoc LParenLoc = Parser.getTok().getLoc(); - Parser.Lex(); // Eat the '('. - - if (getLexer().is(AsmToken::Percent) || getLexer().is(AsmToken::Comma)) { - // Nothing to do here, fall into the code below with the '(' part of the - // memory operand consumed. - } else { - SMLoc ExprEnd; - getLexer().UnLex(AsmToken(AsmToken::LParen, "(")); - - // It must be either an parenthesized expression, or an expression that - // begins from a parenthesized expression, parse it now. Example: (1+2) or - // (1+2)+3 - if (getParser().parseExpression(Disp, ExprEnd)) - return nullptr; - - // After parsing the base expression we could either have a parenthesized - // memory address or not. If not, return now. If so, eat the (. - if (getLexer().isNot(AsmToken::LParen)) { - // Unless we have a segment register, treat this as an immediate. - if (SegReg == 0) - return X86Operand::CreateMem(getPointerWidth(), Disp, LParenLoc, - ExprEnd); - return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, 0, 0, 1, - MemStart, ExprEnd); - } - - // Eat the '('. - Parser.Lex(); - } - } - - // If we reached here, then we just ate the ( of the memory operand. Process - // the rest of the memory operand. - unsigned BaseReg = 0, IndexReg = 0, Scale = 1; - SMLoc IndexLoc, BaseLoc; - - if (getLexer().is(AsmToken::Percent)) { - SMLoc StartLoc, EndLoc; - BaseLoc = Parser.getTok().getLoc(); - if (ParseRegister(BaseReg, StartLoc, EndLoc)) return nullptr; - if (BaseReg == X86::EIZ || BaseReg == X86::RIZ) { - Error(StartLoc, "eiz and riz can only be used as index registers", - SMRange(StartLoc, EndLoc)); - return nullptr; - } - } - - if (getLexer().is(AsmToken::Comma)) { - Parser.Lex(); // Eat the comma. - IndexLoc = Parser.getTok().getLoc(); - - // Following the comma we should have either an index register, or a scale - // value. We don't support the later form, but we want to parse it - // correctly. - // - // Not that even though it would be completely consistent to support syntax - // like "1(%eax,,1)", the assembler doesn't. Use "eiz" or "riz" for this. - if (getLexer().is(AsmToken::Percent)) { - SMLoc L; - if (ParseRegister(IndexReg, L, L)) - return nullptr; - if (BaseReg == X86::RIP) { - Error(IndexLoc, "%rip as base register can not have an index register"); - return nullptr; - } - if (IndexReg == X86::RIP) { - Error(IndexLoc, "%rip is not allowed as an index register"); - return nullptr; - } - - if (getLexer().isNot(AsmToken::RParen)) { - // Parse the scale amount: - // ::= ',' [scale-expression] - if (parseToken(AsmToken::Comma, "expected comma in scale expression")) - return nullptr; - - if (getLexer().isNot(AsmToken::RParen)) { - SMLoc Loc = Parser.getTok().getLoc(); - - int64_t ScaleVal; - if (getParser().parseAbsoluteExpression(ScaleVal)){ - Error(Loc, "expected scale expression"); - return nullptr; - } - - // Validate the scale amount. - if (X86MCRegisterClasses[X86::GR16RegClassID].contains(BaseReg) && - ScaleVal != 1) { - Error(Loc, "scale factor in 16-bit address must be 1"); - return nullptr; - } - if (ScaleVal != 1 && ScaleVal != 2 && ScaleVal != 4 && - ScaleVal != 8) { - Error(Loc, "scale factor in address must be 1, 2, 4 or 8"); - return nullptr; - } - Scale = (unsigned)ScaleVal; - } - } - } else if (getLexer().isNot(AsmToken::RParen)) { - // A scale amount without an index is ignored. - // index. - SMLoc Loc = Parser.getTok().getLoc(); - - int64_t Value; - if (getParser().parseAbsoluteExpression(Value)) - return nullptr; - - if (Value != 1) - Warning(Loc, "scale factor without index register is ignored"); - Scale = 1; - } - } - - // Ok, we've eaten the memory operand, verify we have a ')' and eat it too. - SMLoc MemEnd = Parser.getTok().getEndLoc(); - if (parseToken(AsmToken::RParen, "unexpected token in memory operand")) - return nullptr; - - // This is a terrible hack to handle "out[s]?[bwl]? %al, (%dx)" -> - // "outb %al, %dx". Out doesn't take a memory form, but this is a widely - // documented form in various unofficial manuals, so a lot of code uses it. - if (BaseReg == X86::DX && IndexReg == 0 && Scale == 1 && - SegReg == 0 && isa(Disp) && - cast(Disp)->getValue() == 0) - return X86Operand::CreateDXReg(BaseLoc, BaseLoc); - - StringRef ErrMsg; - if (CheckBaseRegAndIndexRegAndScale(BaseReg, IndexReg, Scale, is64BitMode(), - ErrMsg)) { - Error(BaseLoc, ErrMsg); - return nullptr; - } - - if (SegReg || BaseReg || IndexReg) - return X86Operand::CreateMem(getPointerWidth(), SegReg, Disp, BaseReg, - IndexReg, Scale, MemStart, MemEnd); - return X86Operand::CreateMem(getPointerWidth(), Disp, MemStart, MemEnd); -} - // Parse either a standard primary expression or a register. bool X86AsmParser::parsePrimaryExpr(const MCExpr *&Res, SMLoc &EndLoc) { MCAsmParser &Parser = getParser(); Index: llvm/test/MC/X86/x86_operands.s =================================================================== --- llvm/test/MC/X86/x86_operands.s +++ llvm/test/MC/X86/x86_operands.s @@ -61,3 +61,406 @@ # CHECK: movl %gs:8, %eax movl %gs:8, %eax +# Make sure we handle parsing uses of variables assigned +# to registers in operands. + +v_ecx = %ecx +v_eax = %eax +v_gs = %gs +v_imm = 4 + +#CHECK: movl %eax, %ecx + movl %eax, v_ecx + +#CHECK: movl $1, %gs:0 + movl $1, v_gs:(,) +#CHECK: movl $1, %gs:(,%eax) + movl $1, v_gs:(,%eax) +#CHECK: movl $1, %gs:(,%eax,2) + movl $1, v_gs:(,%eax,2) +#CHECK: movl $1, %gs:(,%eax,4) + movl $1, v_gs:(,%eax,v_imm) +#CHECK: movl $1, %gs:(,%eax) + movl $1, v_gs:(,v_eax) +#CHECK: movl $1, %gs:(,%eax,2) + movl $1, v_gs:(,v_eax,2) +#CHECK: movl $1, %gs:(,%eax,4) + movl $1, v_gs:(,v_eax,v_imm) +#CHECK: movl $1, %gs:(%ecx) + movl $1, v_gs:(%ecx) +#CHECK: movl $1, %gs:(%ecx) + movl $1, v_gs:(%ecx,) +#CHECK: movl $1, %gs:(%ecx,%eax) + movl $1, v_gs:(%ecx,%eax) +#CHECK: movl $1, %gs:(%ecx,%eax,2) + movl $1, v_gs:(%ecx,%eax,2) +#CHECK: movl $1, %gs:(%ecx,%eax,4) + movl $1, v_gs:(%ecx,%eax,v_imm) +#CHECK: movl $1, %gs:(%ecx,%eax) + movl $1, v_gs:(%ecx,v_eax) +#CHECK: movl $1, %gs:(%ecx,%eax,2) + movl $1, v_gs:(%ecx,v_eax,2) +#CHECK: movl $1, %gs:(%ecx,%eax,4) + movl $1, v_gs:(%ecx,v_eax,v_imm) +#CHECK: movl $1, %gs:(%ecx) + movl $1, v_gs:(v_ecx) +#CHECK: movl $1, %gs:(%ecx) + movl $1, v_gs:(v_ecx,) +#CHECK: movl $1, %gs:(%ecx,%eax) + movl $1, v_gs:(v_ecx,%eax) +#CHECK: movl $1, %gs:(%ecx,%eax,2) + movl $1, v_gs:(v_ecx,%eax,2) +#CHECK: movl $1, %gs:(%ecx,%eax,4) + movl $1, v_gs:(v_ecx,%eax,v_imm) +#CHECK: movl $1, %gs:(%ecx,%eax) + movl $1, v_gs:(v_ecx,v_eax) +#CHECK: movl $1, %gs:(%ecx,%eax,2) + movl $1, v_gs:(v_ecx,v_eax,2) +#CHECK: movl $1, %gs:(%ecx,%eax,4) + movl $1, v_gs:(v_ecx,v_eax,v_imm) +#CHECK: movl $1, %gs:4 + movl $1, v_gs:4 +#CHECK: movl $1, %gs:4 + movl $1, v_gs:4() +#CHECK: movl $1, %gs:4 + movl $1, v_gs:4(,) +#CHECK: movl $1, %gs:4(,%eax) + movl $1, v_gs:4(,%eax) +#CHECK: movl $1, %gs:4(,%eax,2) + movl $1, v_gs:4(,%eax,2) +#CHECK: movl $1, %gs:4(,%eax,4) + movl $1, v_gs:4(,%eax,v_imm) +#CHECK: movl $1, %gs:4(,%eax) + movl $1, v_gs:4(,v_eax) +#CHECK: movl $1, %gs:4(,%eax,2) + movl $1, v_gs:4(,v_eax,2) +#CHECK: movl $1, %gs:4(,%eax,4) + movl $1, v_gs:4(,v_eax,v_imm) +#CHECK: movl $1, %gs:4(%ecx) + movl $1, v_gs:4(%ecx) +#CHECK: movl $1, %gs:4(%ecx) + movl $1, v_gs:4(%ecx,) +#CHECK: movl $1, %gs:4(%ecx,%eax) + movl $1, v_gs:4(%ecx,%eax) +#CHECK: movl $1, %gs:4(%ecx,%eax,2) + movl $1, v_gs:4(%ecx,%eax,2) +#CHECK: movl $1, %gs:4(%ecx,%eax,4) + movl $1, v_gs:4(%ecx,%eax,v_imm) +#CHECK: movl $1, %gs:4(%ecx,%eax) + movl $1, v_gs:4(%ecx,v_eax) +#CHECK: movl $1, %gs:4(%ecx,%eax,2) + movl $1, v_gs:4(%ecx,v_eax,2) +#CHECK: movl $1, %gs:4(%ecx,%eax,4) + movl $1, v_gs:4(%ecx,v_eax,v_imm) +#CHECK: movl $1, %gs:4(%ecx) + movl $1, v_gs:4(v_ecx) +#CHECK: movl $1, %gs:4(%ecx) + movl $1, v_gs:4(v_ecx,) +#CHECK: movl $1, %gs:4(%ecx,%eax) + movl $1, v_gs:4(v_ecx,%eax) +#CHECK: movl $1, %gs:4(%ecx,%eax,2) + movl $1, v_gs:4(v_ecx,%eax,2) +#CHECK: movl $1, %gs:4(%ecx,%eax,4) + movl $1, v_gs:4(v_ecx,%eax,v_imm) +#CHECK: movl $1, %gs:4(%ecx,%eax) + movl $1, v_gs:4(v_ecx,v_eax) +#CHECK: movl $1, %gs:4(%ecx,%eax,2) + movl $1, v_gs:4(v_ecx,v_eax,2) +#CHECK: movl $1, %gs:4(%ecx,%eax,4) + movl $1, v_gs:4(v_ecx,v_eax,v_imm) +#CHECK: movl $1, %gs:4 + movl $1, v_gs:v_imm +#CHECK: movl $1, %gs:4 + movl $1, v_gs:v_imm() +#CHECK: movl $1, %gs:4 + movl $1, v_gs:v_imm(,) +#CHECK: movl $1, %gs:4(,%eax) + movl $1, v_gs:v_imm(,%eax) +#CHECK: movl $1, %gs:4(,%eax,2) + movl $1, v_gs:v_imm(,%eax,2) +#CHECK: movl $1, %gs:4(,%eax,4) + movl $1, v_gs:v_imm(,%eax,v_imm) +#CHECK: movl $1, %gs:4(,%eax) + movl $1, v_gs:v_imm(,v_eax) +#CHECK: movl $1, %gs:4(,%eax,2) + movl $1, v_gs:v_imm(,v_eax,2) +#CHECK: movl $1, %gs:4(,%eax,4) + movl $1, v_gs:v_imm(,v_eax,v_imm) +#CHECK: movl $1, %gs:4(%ecx) + movl $1, v_gs:v_imm(%ecx) +#CHECK: movl $1, %gs:4(%ecx) + movl $1, v_gs:v_imm(%ecx,) +#CHECK: movl $1, %gs:4(%ecx,%eax) + movl $1, v_gs:v_imm(%ecx,%eax) +#CHECK: movl $1, %gs:4(%ecx,%eax,2) + movl $1, v_gs:v_imm(%ecx,%eax,2) +#CHECK: movl $1, %gs:4(%ecx,%eax,4) + movl $1, v_gs:v_imm(%ecx,%eax,v_imm) +#CHECK: movl $1, %gs:4(%ecx,%eax) + movl $1, v_gs:v_imm(%ecx,v_eax) +#CHECK: movl $1, %gs:4(%ecx,%eax,2) + movl $1, v_gs:v_imm(%ecx,v_eax,2) +#CHECK: movl $1, %gs:4(%ecx,%eax,4) + movl $1, v_gs:v_imm(%ecx,v_eax,v_imm) +#CHECK: movl $1, %gs:4(%ecx) + movl $1, v_gs:v_imm(v_ecx) +#CHECK: movl $1, %gs:4(%ecx) + movl $1, v_gs:v_imm(v_ecx,) +#CHECK: movl $1, %gs:4(%ecx,%eax) + movl $1, v_gs:v_imm(v_ecx,%eax) +#CHECK: movl $1, %gs:4(%ecx,%eax,2) + movl $1, v_gs:v_imm(v_ecx,%eax,2) +#CHECK: movl $1, %gs:4(%ecx,%eax,4) + movl $1, v_gs:v_imm(v_ecx,%eax,v_imm) +#CHECK: movl $1, %gs:4(%ecx,%eax) + movl $1, v_gs:v_imm(v_ecx,v_eax) +#CHECK: movl $1, %gs:4(%ecx,%eax,2) + movl $1, v_gs:v_imm(v_ecx,v_eax,2) +#CHECK: movl $1, %gs:4(%ecx,%eax,4) + movl $1, v_gs:v_imm(v_ecx,v_eax,v_imm) +#CHECK: movl $1, %gs:8 + movl $1, v_gs:(v_imm+4) +#CHECK: movl $1, %gs:8 + movl $1, v_gs:(v_imm+4)() +#CHECK: movl $1, %gs:8 + movl $1, v_gs:(v_imm+4)(,) +#CHECK: movl $1, %gs:8(,%eax) + movl $1, v_gs:(v_imm+4)(,%eax) +#CHECK: movl $1, %gs:8(,%eax,2) + movl $1, v_gs:(v_imm+4)(,%eax,2) +#CHECK: movl $1, %gs:8(,%eax,4) + movl $1, v_gs:(v_imm+4)(,%eax,v_imm) +#CHECK: movl $1, %gs:8(,%eax) + movl $1, v_gs:(v_imm+4)(,v_eax) +#CHECK: movl $1, %gs:8(,%eax,2) + movl $1, v_gs:(v_imm+4)(,v_eax,2) +#CHECK: movl $1, %gs:8(,%eax,4) + movl $1, v_gs:(v_imm+4)(,v_eax,v_imm) +#CHECK: movl $1, %gs:8(%ecx) + movl $1, v_gs:(v_imm+4)(%ecx) +#CHECK: movl $1, %gs:8(%ecx) + movl $1, v_gs:(v_imm+4)(%ecx,) +#CHECK: movl $1, %gs:8(%ecx,%eax) + movl $1, v_gs:(v_imm+4)(%ecx,%eax) +#CHECK: movl $1, %gs:8(%ecx,%eax,2) + movl $1, v_gs:(v_imm+4)(%ecx,%eax,2) +#CHECK: movl $1, %gs:8(%ecx,%eax,4) + movl $1, v_gs:(v_imm+4)(%ecx,%eax,v_imm) +#CHECK: movl $1, %gs:8(%ecx,%eax) + movl $1, v_gs:(v_imm+4)(%ecx,v_eax) +#CHECK: movl $1, %gs:8(%ecx,%eax,2) + movl $1, v_gs:(v_imm+4)(%ecx,v_eax,2) +#CHECK: movl $1, %gs:8(%ecx,%eax,4) + movl $1, v_gs:(v_imm+4)(%ecx,v_eax,v_imm) +#CHECK: movl $1, %gs:8(%ecx) + movl $1, v_gs:(v_imm+4)(v_ecx) +#CHECK: movl $1, %gs:8(%ecx) + movl $1, v_gs:(v_imm+4)(v_ecx,) +#CHECK: movl $1, %gs:8(%ecx,%eax) + movl $1, v_gs:(v_imm+4)(v_ecx,%eax) +#CHECK: movl $1, %gs:8(%ecx,%eax,2) + movl $1, v_gs:(v_imm+4)(v_ecx,%eax,2) +#CHECK: movl $1, %gs:8(%ecx,%eax,4) + movl $1, v_gs:(v_imm+4)(v_ecx,%eax,v_imm) +#CHECK: movl $1, %gs:8(%ecx,%eax) + movl $1, v_gs:(v_imm+4)(v_ecx,v_eax) +#CHECK: movl $1, %gs:8(%ecx,%eax,2) + movl $1, v_gs:(v_imm+4)(v_ecx,v_eax,2) +#CHECK: movl $1, %gs:8(%ecx,%eax,4) + movl $1, v_gs:(v_imm+4)(v_ecx,v_eax,v_imm) +#CHECK: movl $1, %fs:0 + movl $1, %fs:(,) +#CHECK: movl $1, %fs:(,%eax) + movl $1, %fs:(,%eax) +#CHECK: movl $1, %fs:(,%eax,2) + movl $1, %fs:(,%eax,2) +#CHECK: movl $1, %fs:(,%eax,4) + movl $1, %fs:(,%eax,v_imm) +#CHECK: movl $1, %fs:(,%eax) + movl $1, %fs:(,v_eax) +#CHECK: movl $1, %fs:(,%eax,2) + movl $1, %fs:(,v_eax,2) +#CHECK: movl $1, %fs:(,%eax,4) + movl $1, %fs:(,v_eax,v_imm) +#CHECK: movl $1, %fs:(%ecx) + movl $1, %fs:(%ecx) +#CHECK: movl $1, %fs:(%ecx) + movl $1, %fs:(%ecx,) +#CHECK: movl $1, %fs:(%ecx,%eax) + movl $1, %fs:(%ecx,%eax) +#CHECK: movl $1, %fs:(%ecx,%eax,2) + movl $1, %fs:(%ecx,%eax,2) +#CHECK: movl $1, %fs:(%ecx,%eax,4) + movl $1, %fs:(%ecx,%eax,v_imm) +#CHECK: movl $1, %fs:(%ecx,%eax) + movl $1, %fs:(%ecx,v_eax) +#CHECK: movl $1, %fs:(%ecx,%eax,2) + movl $1, %fs:(%ecx,v_eax,2) +#CHECK: movl $1, %fs:(%ecx,%eax,4) + movl $1, %fs:(%ecx,v_eax,v_imm) +#CHECK: movl $1, %fs:(%ecx) + movl $1, %fs:(v_ecx) +#CHECK: movl $1, %fs:(%ecx) + movl $1, %fs:(v_ecx,) +#CHECK: movl $1, %fs:(%ecx,%eax) + movl $1, %fs:(v_ecx,%eax) +#CHECK: movl $1, %fs:(%ecx,%eax,2) + movl $1, %fs:(v_ecx,%eax,2) +#CHECK: movl $1, %fs:(%ecx,%eax,4) + movl $1, %fs:(v_ecx,%eax,v_imm) +#CHECK: movl $1, %fs:(%ecx,%eax) + movl $1, %fs:(v_ecx,v_eax) +#CHECK: movl $1, %fs:(%ecx,%eax,2) + movl $1, %fs:(v_ecx,v_eax,2) +#CHECK: movl $1, %fs:(%ecx,%eax,4) + movl $1, %fs:(v_ecx,v_eax,v_imm) +#CHECK: movl $1, %fs:4 + movl $1, %fs:4 +#CHECK: movl $1, %fs:4 + movl $1, %fs:4() +#CHECK: movl $1, %fs:4 + movl $1, %fs:4(,) +#CHECK: movl $1, %fs:4(,%eax) + movl $1, %fs:4(,%eax) +#CHECK: movl $1, %fs:4(,%eax,2) + movl $1, %fs:4(,%eax,2) +#CHECK: movl $1, %fs:4(,%eax,4) + movl $1, %fs:4(,%eax,v_imm) +#CHECK: movl $1, %fs:4(,%eax) + movl $1, %fs:4(,v_eax) +#CHECK: movl $1, %fs:4(,%eax,2) + movl $1, %fs:4(,v_eax,2) +#CHECK: movl $1, %fs:4(,%eax,4) + movl $1, %fs:4(,v_eax,v_imm) +#CHECK: movl $1, %fs:4(%ecx) + movl $1, %fs:4(%ecx) +#CHECK: movl $1, %fs:4(%ecx) + movl $1, %fs:4(%ecx,) +#CHECK: movl $1, %fs:4(%ecx,%eax) + movl $1, %fs:4(%ecx,%eax) +#CHECK: movl $1, %fs:4(%ecx,%eax,2) + movl $1, %fs:4(%ecx,%eax,2) +#CHECK: movl $1, %fs:4(%ecx,%eax,4) + movl $1, %fs:4(%ecx,%eax,v_imm) +#CHECK: movl $1, %fs:4(%ecx,%eax) + movl $1, %fs:4(%ecx,v_eax) +#CHECK: movl $1, %fs:4(%ecx,%eax,2) + movl $1, %fs:4(%ecx,v_eax,2) +#CHECK: movl $1, %fs:4(%ecx,%eax,4) + movl $1, %fs:4(%ecx,v_eax,v_imm) +#CHECK: movl $1, %fs:4(%ecx) + movl $1, %fs:4(v_ecx) +#CHECK: movl $1, %fs:4(%ecx) + movl $1, %fs:4(v_ecx,) +#CHECK: movl $1, %fs:4(%ecx,%eax) + movl $1, %fs:4(v_ecx,%eax) +#CHECK: movl $1, %fs:4(%ecx,%eax,2) + movl $1, %fs:4(v_ecx,%eax,2) +#CHECK: movl $1, %fs:4(%ecx,%eax,4) + movl $1, %fs:4(v_ecx,%eax,v_imm) +#CHECK: movl $1, %fs:4(%ecx,%eax) + movl $1, %fs:4(v_ecx,v_eax) +#CHECK: movl $1, %fs:4(%ecx,%eax,2) + movl $1, %fs:4(v_ecx,v_eax,2) +#CHECK: movl $1, %fs:4(%ecx,%eax,4) + movl $1, %fs:4(v_ecx,v_eax,v_imm) +#CHECK: movl $1, %fs:4 + movl $1, %fs:v_imm +#CHECK: movl $1, %fs:4 + movl $1, %fs:v_imm() +#CHECK: movl $1, %fs:4 + movl $1, %fs:v_imm(,) +#CHECK: movl $1, %fs:4(,%eax) + movl $1, %fs:v_imm(,%eax) +#CHECK: movl $1, %fs:4(,%eax,2) + movl $1, %fs:v_imm(,%eax,2) +#CHECK: movl $1, %fs:4(,%eax,4) + movl $1, %fs:v_imm(,%eax,v_imm) +#CHECK: movl $1, %fs:4(,%eax) + movl $1, %fs:v_imm(,v_eax) +#CHECK: movl $1, %fs:4(,%eax,2) + movl $1, %fs:v_imm(,v_eax,2) +#CHECK: movl $1, %fs:4(,%eax,4) + movl $1, %fs:v_imm(,v_eax,v_imm) +#CHECK: movl $1, %fs:4(%ecx) + movl $1, %fs:v_imm(%ecx) +#CHECK: movl $1, %fs:4(%ecx) + movl $1, %fs:v_imm(%ecx,) +#CHECK: movl $1, %fs:4(%ecx,%eax) + movl $1, %fs:v_imm(%ecx,%eax) +#CHECK: movl $1, %fs:4(%ecx,%eax,2) + movl $1, %fs:v_imm(%ecx,%eax,2) +#CHECK: movl $1, %fs:4(%ecx,%eax,4) + movl $1, %fs:v_imm(%ecx,%eax,v_imm) +#CHECK: movl $1, %fs:4(%ecx,%eax) + movl $1, %fs:v_imm(%ecx,v_eax) +#CHECK: movl $1, %fs:4(%ecx,%eax,2) + movl $1, %fs:v_imm(%ecx,v_eax,2) +#CHECK: movl $1, %fs:4(%ecx,%eax,4) + movl $1, %fs:v_imm(%ecx,v_eax,v_imm) +#CHECK: movl $1, %fs:4(%ecx) + movl $1, %fs:v_imm(v_ecx) +#CHECK: movl $1, %fs:4(%ecx) + movl $1, %fs:v_imm(v_ecx,) +#CHECK: movl $1, %fs:4(%ecx,%eax) + movl $1, %fs:v_imm(v_ecx,%eax) +#CHECK: movl $1, %fs:4(%ecx,%eax,2) + movl $1, %fs:v_imm(v_ecx,%eax,2) +#CHECK: movl $1, %fs:4(%ecx,%eax,4) + movl $1, %fs:v_imm(v_ecx,%eax,v_imm) +#CHECK: movl $1, %fs:4(%ecx,%eax) + movl $1, %fs:v_imm(v_ecx,v_eax) +#CHECK: movl $1, %fs:4(%ecx,%eax,2) + movl $1, %fs:v_imm(v_ecx,v_eax,2) +#CHECK: movl $1, %fs:4(%ecx,%eax,4) + movl $1, %fs:v_imm(v_ecx,v_eax,v_imm) +#CHECK: movl $1, %fs:8 + movl $1, %fs:(v_imm+4) +#CHECK: movl $1, %fs:8 + movl $1, %fs:(v_imm+4)() +#CHECK: movl $1, %fs:8 + movl $1, %fs:(v_imm+4)(,) +#CHECK: movl $1, %fs:8(,%eax) + movl $1, %fs:(v_imm+4)(,%eax) +#CHECK: movl $1, %fs:8(,%eax,2) + movl $1, %fs:(v_imm+4)(,%eax,2) +#CHECK: movl $1, %fs:8(,%eax,4) + movl $1, %fs:(v_imm+4)(,%eax,v_imm) +#CHECK: movl $1, %fs:8(,%eax) + movl $1, %fs:(v_imm+4)(,v_eax) +#CHECK: movl $1, %fs:8(,%eax,2) + movl $1, %fs:(v_imm+4)(,v_eax,2) +#CHECK: movl $1, %fs:8(,%eax,4) + movl $1, %fs:(v_imm+4)(,v_eax,v_imm) +#CHECK: movl $1, %fs:8(%ecx) + movl $1, %fs:(v_imm+4)(%ecx) +#CHECK: movl $1, %fs:8(%ecx) + movl $1, %fs:(v_imm+4)(%ecx,) +#CHECK: movl $1, %fs:8(%ecx,%eax) + movl $1, %fs:(v_imm+4)(%ecx,%eax) +#CHECK: movl $1, %fs:8(%ecx,%eax,2) + movl $1, %fs:(v_imm+4)(%ecx,%eax,2) +#CHECK: movl $1, %fs:8(%ecx,%eax,4) + movl $1, %fs:(v_imm+4)(%ecx,%eax,v_imm) +#CHECK: movl $1, %fs:8(%ecx,%eax) + movl $1, %fs:(v_imm+4)(%ecx,v_eax) +#CHECK: movl $1, %fs:8(%ecx,%eax,2) + movl $1, %fs:(v_imm+4)(%ecx,v_eax,2) +#CHECK: movl $1, %fs:8(%ecx,%eax,4) + movl $1, %fs:(v_imm+4)(%ecx,v_eax,v_imm) +#CHECK: movl $1, %fs:8(%ecx) + movl $1, %fs:(v_imm+4)(v_ecx) +#CHECK: movl $1, %fs:8(%ecx) + movl $1, %fs:(v_imm+4)(v_ecx,) +#CHECK: movl $1, %fs:8(%ecx,%eax) + movl $1, %fs:(v_imm+4)(v_ecx,%eax) +#CHECK: movl $1, %fs:8(%ecx,%eax,2) + movl $1, %fs:(v_imm+4)(v_ecx,%eax,2) +#CHECK: movl $1, %fs:8(%ecx,%eax,4) + movl $1, %fs:(v_imm+4)(v_ecx,%eax,v_imm) +#CHECK: movl $1, %fs:8(%ecx,%eax) + movl $1, %fs:(v_imm+4)(v_ecx,v_eax) +#CHECK: movl $1, %fs:8(%ecx,%eax,2) + movl $1, %fs:(v_imm+4)(v_ecx,v_eax,2) +#CHECK: movl $1, %fs:8(%ecx,%eax,4) + movl $1, %fs:(v_imm+4)(v_ecx,v_eax,v_imm)