Index: lib/CodeGen/MachineVerifier.cpp =================================================================== --- lib/CodeGen/MachineVerifier.cpp +++ lib/CodeGen/MachineVerifier.cpp @@ -979,6 +979,29 @@ LLT DstTy = MRI->getType(MI->getOperand(0).getReg()); if (DstTy.isVector()) report("Instruction cannot use a vector result type", MI); + + if (MI->getOpcode() == TargetOpcode::G_CONSTANT) { + if (!MI->getOperand(1).isCImm()) { + report("G_CONSTANT operand must be cimm", MI); + break; + } + + const ConstantInt *CI = MI->getOperand(1).getCImm(); + if (CI->getBitWidth() != DstTy.getSizeInBits()) + report("inconsistent constant size", MI); + } else { + if (!MI->getOperand(1).isFPImm()) { + report("G_FCONSTANT operand must be fpimm", MI); + break; + } + const ConstantFP *CF = MI->getOperand(1).getFPImm(); + + if (APFloat::getSizeInBits(CF->getValueAPF().getSemantics()) != + DstTy.getSizeInBits()) { + report("inconsistent constant size", MI); + } + } + break; } case TargetOpcode::G_LOAD: Index: test/CodeGen/AArch64/GlobalISel/localizer.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/localizer.mir +++ test/CodeGen/AArch64/GlobalISel/localizer.mir @@ -24,9 +24,9 @@ body: | bb.0: ; CHECK-LABEL: name: local_use - ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT 1 + ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1 ; CHECK: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]] - %0:gpr(s32) = G_CONSTANT 1 + %0:gpr(s32) = G_CONSTANT i32 1 %1:gpr(s32) = G_ADD %0, %0 ... @@ -38,10 +38,10 @@ ; CHECK-LABEL: name: non_local_1use ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x80000000) - ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT 1 + ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1 ; CHECK: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]] ; CHECK: bb.1: - ; CHECK: [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT 1 + ; CHECK: [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1 ; CHECK: [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[C1]], [[ADD]] ; Existing registers should be left untouched @@ -50,7 +50,7 @@ bb.0: successors: %bb.1 - %0:gpr(s32) = G_CONSTANT 1 + %0:gpr(s32) = G_CONSTANT i32 1 %1:gpr(s32) = G_ADD %0, %0 bb.1: @@ -65,10 +65,10 @@ ; CHECK-LABEL: name: non_local_2uses ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x80000000) - ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT 1 + ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1 ; CHECK: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]] ; CHECK: bb.1: - ; CHECK: [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT 1 + ; CHECK: [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1 ; CHECK: [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[C1]], [[C1]] ; Existing registers should be left untouched @@ -77,7 +77,7 @@ bb.0: successors: %bb.1 - %0:gpr(s32) = G_CONSTANT 1 + %0:gpr(s32) = G_CONSTANT i32 1 %1:gpr(s32) = G_ADD %0, %0 bb.1: @@ -93,11 +93,11 @@ ; CHECK-LABEL: name: non_local_phi_use ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x80000000) - ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT 1 + ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1 ; CHECK: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]] ; CHECK: bb.1: ; CHECK: successors: %bb.2(0x80000000) - ; CHECK: [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT 1 + ; CHECK: [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1 ; CHECK: bb.2: ; CHECK: [[PHI:%[0-9]+]]:gpr(s32) = PHI [[C1]](s32), %bb.1 ; CHECK: [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[PHI]], [[PHI]] @@ -108,7 +108,7 @@ bb.0: successors: %bb.1 - %0:gpr(s32) = G_CONSTANT 1 + %0:gpr(s32) = G_CONSTANT i32 1 %1:gpr(s32) = G_ADD %0, %0 bb.1: @@ -128,14 +128,14 @@ ; CHECK-LABEL: name: non_local_phi_use_followed_by_use ; CHECK: bb.0: ; CHECK: successors: %bb.1(0x80000000) - ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT 1 + ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1 ; CHECK: [[ADD:%[0-9]+]]:gpr(s32) = G_ADD [[C]], [[C]] ; CHECK: bb.1: ; CHECK: successors: %bb.2(0x80000000) - ; CHECK: [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT 1 + ; CHECK: [[C1:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1 ; CHECK: bb.2: ; CHECK: [[PHI:%[0-9]+]]:gpr(s32) = PHI [[C1]](s32), %bb.1 - ; CHECK: [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT 1 + ; CHECK: [[C2:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 1 ; CHECK: [[ADD1:%[0-9]+]]:gpr(s32) = G_ADD [[PHI]], [[C2]] ; Existing registers should be left untouched @@ -144,7 +144,7 @@ bb.0: successors: %bb.1 - %0:gpr(s32) = G_CONSTANT 1 + %0:gpr(s32) = G_CONSTANT i32 1 %1:gpr(s32) = G_ADD %0, %0 bb.1: Index: test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads.mir +++ test/CodeGen/AArch64/GlobalISel/prelegalizercombiner-extending-loads.mir @@ -193,7 +193,7 @@ ; CHECK: $w1 = COPY [[T1]](s32) %0:_(p0) = COPY $x0 %1:_(s8) = G_LOAD %0 :: (load 1 from %ir.addr) - %2:_(s8) = G_CONSTANT i32 -1 + %2:_(s8) = G_CONSTANT i8 -1 %3:_(s8) = G_XOR %1, %2 %5:_(s32) = G_ANYEXT %3 %6:_(s32) = G_SEXT %1 @@ -218,7 +218,7 @@ $w0 = COPY %2 $w1 = COPY %3 ... - + --- name: test_1signext_1zeroext body: | Index: test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir +++ test/CodeGen/AArch64/GlobalISel/regbankselect-default.mir @@ -437,8 +437,8 @@ body: | bb.0: ; CHECK-LABEL: name: test_constant_s32 - ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT 123 - %0(s32) = G_CONSTANT 123 + ; CHECK: [[C:%[0-9]+]]:gpr(s32) = G_CONSTANT i32 123 + %0(s32) = G_CONSTANT i32 123 ... --- @@ -449,8 +449,8 @@ body: | bb.0: ; CHECK-LABEL: name: test_constant_p0 - ; CHECK: [[C:%[0-9]+]]:gpr(p0) = G_CONSTANT 0 - %0(p0) = G_CONSTANT 0 + ; CHECK: [[C:%[0-9]+]]:gpr(p0) = G_CONSTANT i64 0 + %0(p0) = G_CONSTANT i64 0 ... --- Index: test/CodeGen/AArch64/GlobalISel/select-binop.mir =================================================================== --- test/CodeGen/AArch64/GlobalISel/select-binop.mir +++ test/CodeGen/AArch64/GlobalISel/select-binop.mir @@ -152,7 +152,7 @@ ; CHECK: [[ADDXri:%[0-9]+]]:gpr64sp = ADDXri [[COPY]], 1, 0 ; CHECK: $x0 = COPY [[ADDXri]] %0(s64) = COPY $x0 - %1(s64) = G_CONSTANT i32 1 + %1(s64) = G_CONSTANT i64 1 %2(s64) = G_ADD %0, %1 $x0 = COPY %2(s64) ... Index: test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir +++ test/CodeGen/AMDGPU/GlobalISel/inst-select-implicit-def.mir @@ -45,7 +45,7 @@ body: | bb.0: %0:vgpr(p0) = G_IMPLICIT_DEF - %1:vgpr(s32) = G_CONSTANT 4 + %1:vgpr(s32) = G_CONSTANT i32 4 G_STORE %1, %0 :: (store 4) ... @@ -62,7 +62,7 @@ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 4, implicit $exec ; GCN: FLAT_STORE_DWORD [[DEF]], [[V_MOV_B32_e32_]], 0, 0, 0, implicit $exec, implicit $flat_scr %0:vgpr(p1) = G_IMPLICIT_DEF - %1:vgpr(s32) = G_CONSTANT 4 + %1:vgpr(s32) = G_CONSTANT i32 4 G_STORE %1, %0 :: (store 4, addrspace 1) ... --- @@ -78,7 +78,7 @@ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 4, implicit $exec ; GCN: FLAT_STORE_DWORD [[DEF]], [[V_MOV_B32_e32_]], 0, 0, 0, implicit $exec, implicit $flat_scr %0:vgpr(p3) = G_IMPLICIT_DEF - %1:vgpr(s32) = G_CONSTANT 4 + %1:vgpr(s32) = G_CONSTANT i32 4 G_STORE %1, %0 :: (store 4, addrspace 1) ... --- @@ -94,6 +94,6 @@ ; GCN: [[V_MOV_B32_e32_:%[0-9]+]]:vgpr_32 = V_MOV_B32_e32 4, implicit $exec ; GCN: FLAT_STORE_DWORD [[DEF]], [[V_MOV_B32_e32_]], 0, 0, 0, implicit $exec, implicit $flat_scr %0:vgpr(p4) = G_IMPLICIT_DEF - %1:vgpr(s32) = G_CONSTANT 4 + %1:vgpr(s32) = G_CONSTANT i32 4 G_STORE %1, %0 :: (store 4, addrspace 1) ... Index: test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-and.mir @@ -31,7 +31,7 @@ ; CHECK: S_NOP 0, implicit [[AND]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 - %2:_(s32) = G_CONSTANT i64 0 + %2:_(s32) = G_CONSTANT i32 0 %3:_(s1) = G_ICMP intpred(ne), %0, %2 %4:_(s1) = G_ICMP intpred(ne), %1, %2 %5:_(s32) = G_AND %0, %1 Index: test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-extract-vector-elt.mir @@ -253,7 +253,7 @@ ; CHECK-LABEL: name: extract_vector_elt_0_v2i1_i1 ; CHECK: [[DEF:%[0-9]+]]:_(<2 x s1>) = G_IMPLICIT_DEF - ; CHECK: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i32 0 + ; CHECK: [[C:%[0-9]+]]:_(s1) = G_CONSTANT i1 false ; CHECK: [[UV:%[0-9]+]]:_(s1), [[UV1:%[0-9]+]]:_(s1) = G_UNMERGE_VALUES [[DEF]](<2 x s1>) ; CHECK: [[SEXT:%[0-9]+]]:_(s32) = G_SEXT [[UV]](s1) ; CHECK: [[SEXT1:%[0-9]+]]:_(s32) = G_SEXT [[UV1]](s1) @@ -263,7 +263,7 @@ ; CHECK: [[COPY:%[0-9]+]]:_(s32) = COPY [[EVEC]](s32) ; CHECK: $vgpr0 = COPY [[COPY]](s32) %0:_(<2 x s1>) = G_IMPLICIT_DEF - %1:_(s1) = G_CONSTANT i32 0 + %1:_(s1) = G_CONSTANT i1 false %2:_(s1) = G_EXTRACT_VECTOR_ELT %0, %1 %3:_(s32) = G_ANYEXT %2 $vgpr0 = COPY %3 Index: test/CodeGen/AMDGPU/GlobalISel/legalize-icmp.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-icmp.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-icmp.mir @@ -88,7 +88,7 @@ ; CHECK: [[SELECT:%[0-9]+]]:_(s32) = G_SELECT [[ICMP]](s1), [[COPY3]], [[COPY4]] ; CHECK: [[COPY5:%[0-9]+]]:_(s32) = COPY [[SELECT]](s32) ; CHECK: $vgpr0 = COPY [[COPY5]](s32) - %0:_(s8) = G_CONSTANT i16 0 + %0:_(s8) = G_CONSTANT i8 0 %1:_(s32) = COPY $vgpr0 %2:_(s8) = G_TRUNC %1 %3:_(s1) = G_ICMP intpred(ne), %0, %2 Index: test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-or.mir @@ -31,7 +31,7 @@ ; CHECK: S_NOP 0, implicit [[OR]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 - %2:_(s32) = G_CONSTANT i64 0 + %2:_(s32) = G_CONSTANT i32 0 %3:_(s1) = G_ICMP intpred(ne), %0, %2 %4:_(s1) = G_ICMP intpred(ne), %1, %2 %5:_(s32) = G_OR %0, %1 Index: test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-select.mir @@ -97,8 +97,8 @@ %1:_(s32) = COPY $vgpr0 %2:_(s1) = G_ICMP intpred(ne), %0, %1 - %3:_(s8) = G_CONSTANT i16 1 - %4:_(s8) = G_CONSTANT i16 2 + %3:_(s8) = G_CONSTANT i8 1 + %4:_(s8) = G_CONSTANT i8 2 %5:_(s8) = G_SELECT %2, %3, %4 %6:_(s32) = G_ANYEXT %5 $vgpr0 = COPY %6 @@ -125,8 +125,8 @@ %1:_(s32) = COPY $vgpr0 %2:_(s1) = G_ICMP intpred(ne), %0, %1 - %3:_(s7) = G_CONSTANT i16 1 - %4:_(s7) = G_CONSTANT i16 2 + %3:_(s7) = G_CONSTANT i7 1 + %4:_(s7) = G_CONSTANT i7 2 %5:_(s7) = G_SELECT %2, %3, %4 %6:_(s32) = G_ANYEXT %5 $vgpr0 = COPY %6 Index: test/CodeGen/AMDGPU/GlobalISel/legalize-xor.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/legalize-xor.mir +++ test/CodeGen/AMDGPU/GlobalISel/legalize-xor.mir @@ -31,7 +31,7 @@ ; CHECK: S_NOP 0, implicit [[XOR]](s32) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 - %2:_(s32) = G_CONSTANT i64 0 + %2:_(s32) = G_CONSTANT i32 0 %3:_(s1) = G_ICMP intpred(ne), %0, %2 %4:_(s1) = G_ICMP intpred(ne), %1, %2 %5:_(s32) = G_XOR %0, %1 Index: test/CodeGen/AMDGPU/GlobalISel/regbankselect-and.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/regbankselect-and.mir +++ test/CodeGen/AMDGPU/GlobalISel/regbankselect-and.mir @@ -77,7 +77,7 @@ ; CHECK-LABEL: name: and_i1_scc_scc ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; CHECK: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 - ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; CHECK: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[C]] ; CHECK: [[ICMP1:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[COPY1]](s32), [[C]] ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s1) = COPY [[ICMP]](s1) @@ -86,7 +86,7 @@ ; CHECK: S_NOP 0, implicit [[AND]](s1) %0:_(s32) = COPY $sgpr0 %1:_(s32) = COPY $sgpr1 - %2:_(s32) = G_CONSTANT i64 0 + %2:_(s32) = G_CONSTANT i32 0 %4:_(s1) = G_ICMP intpred(ne), %0, %2 %5:_(s1) = G_ICMP intpred(ne), %1, %2 %6:_(s1) = G_AND %4, %5 @@ -103,7 +103,7 @@ ; CHECK-LABEL: name: and_i1_vcc_vcc ; CHECK: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr1 - ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; CHECK: [[ICMP:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[C]] ; CHECK: [[ICMP1:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY1]](s32), [[C]] ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s1) = COPY [[ICMP]](s1) @@ -112,7 +112,7 @@ ; CHECK: S_NOP 0, implicit [[AND]](s1) %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $vgpr1 - %2:_(s32) = G_CONSTANT i64 0 + %2:_(s32) = G_CONSTANT i32 0 %4:_(s1) = G_ICMP intpred(ne), %0, %2 %5:_(s1) = G_ICMP intpred(ne), %1, %2 %6:_(s1) = G_AND %4, %5 @@ -129,7 +129,7 @@ ; CHECK-LABEL: name: and_i1_scc_vcc ; CHECK: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; CHECK: [[COPY1:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 - ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; CHECK: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(ne), [[COPY]](s32), [[C]] ; CHECK: [[ICMP1:%[0-9]+]]:vcc(s1) = G_ICMP intpred(ne), [[COPY1]](s32), [[C]] ; CHECK: [[COPY2:%[0-9]+]]:sgpr(s1) = COPY [[ICMP]](s1) @@ -138,7 +138,7 @@ ; CHECK: S_NOP 0, implicit [[AND]](s1) %0:_(s32) = COPY $sgpr0 %1:_(s32) = COPY $vgpr0 - %2:_(s32) = G_CONSTANT i64 0 + %2:_(s32) = G_CONSTANT i32 0 %4:_(s1) = G_ICMP intpred(ne), %0, %2 %5:_(s1) = G_ICMP intpred(ne), %1, %2 %6:_(s1) = G_AND %4, %5 Index: test/CodeGen/AMDGPU/GlobalISel/regbankselect-default.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/regbankselect-default.mir +++ test/CodeGen/AMDGPU/GlobalISel/regbankselect-default.mir @@ -27,8 +27,9 @@ body: | bb.0: ; CHECK-LABEL: name: test_fconstant_f16_1 - ; CHECK: [[C:%[0-9]+]]:sgpr(s32) = G_FCONSTANT half 0xH3C00 - %0:_(s32) = G_FCONSTANT half 1.0 + ; CHECK: [[C:%[0-9]+]]:sgpr(s16) = G_FCONSTANT half 0xH3C00 + %0:_(s16) = G_FCONSTANT half 1.0 + %1:_(s32) = G_ANYEXT %0 ... --- Index: test/CodeGen/AMDGPU/GlobalISel/regbankselect-sadde.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/regbankselect-sadde.mir +++ test/CodeGen/AMDGPU/GlobalISel/regbankselect-sadde.mir @@ -13,20 +13,20 @@ ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 - ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; FAST: [[SADDE:%[0-9]+]]:sgpr(s32), [[SADDE1:%[0-9]+]]:scc(s1) = G_SADDE [[COPY]], [[COPY1]], [[ICMP]] ; GREEDY-LABEL: name: sadde_s32_sss ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 - ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; GREEDY: [[SADDE:%[0-9]+]]:sgpr(s32), [[SADDE1:%[0-9]+]]:scc(s1) = G_SADDE [[COPY]], [[COPY1]], [[ICMP]] %0:_(s32) = COPY $sgpr0 %1:_(s32) = COPY $sgpr1 %2:_(s32) = COPY $sgpr2 - %3:_(s32) = G_CONSTANT i64 0 + %3:_(s32) = G_CONSTANT i32 0 %4:_(s1) = G_ICMP intpred(eq), %2, %3 %5:_(s32), %6:_(s1) = G_SADDE %0, %1, %4 ... @@ -42,7 +42,7 @@ ; FAST: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 - ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; FAST: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) ; FAST: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1) @@ -51,7 +51,7 @@ ; GREEDY: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 - ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) ; GREEDY: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1) @@ -59,7 +59,7 @@ %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $sgpr0 %2:_(s32) = COPY $sgpr1 - %3:_(s32) = G_CONSTANT i64 0 + %3:_(s32) = G_CONSTANT i32 0 %4:_(s1) = G_ICMP intpred(eq), %2, %3 %5:_(s32), %6:_(s1) = G_SADDE %0, %1, %4 ... Index: test/CodeGen/AMDGPU/GlobalISel/regbankselect-ssube.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/regbankselect-ssube.mir +++ test/CodeGen/AMDGPU/GlobalISel/regbankselect-ssube.mir @@ -13,20 +13,20 @@ ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 - ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; FAST: [[SSUBE:%[0-9]+]]:sgpr(s32), [[SSUBE1:%[0-9]+]]:scc(s1) = G_SSUBE [[COPY]], [[COPY1]], [[ICMP]] ; GREEDY-LABEL: name: ssube_s32_sss ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 - ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; GREEDY: [[SSUBE:%[0-9]+]]:sgpr(s32), [[SSUBE1:%[0-9]+]]:scc(s1) = G_SSUBE [[COPY]], [[COPY1]], [[ICMP]] %0:_(s32) = COPY $sgpr0 %1:_(s32) = COPY $sgpr1 %2:_(s32) = COPY $sgpr2 - %3:_(s32) = G_CONSTANT i64 0 + %3:_(s32) = G_CONSTANT i32 0 %4:_(s1) = G_ICMP intpred(eq), %2, %3 %5:_(s32), %6:_(s1) = G_SSUBE %0, %1, %4 ... @@ -42,7 +42,7 @@ ; FAST: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 - ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; FAST: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) ; FAST: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1) @@ -51,7 +51,7 @@ ; GREEDY: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 - ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) ; GREEDY: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1) @@ -59,7 +59,7 @@ %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $sgpr0 %2:_(s32) = COPY $sgpr1 - %3:_(s32) = G_CONSTANT i64 0 + %3:_(s32) = G_CONSTANT i32 0 %4:_(s1) = G_ICMP intpred(eq), %2, %3 %5:_(s32), %6:_(s1) = G_SSUBE %0, %1, %4 ... Index: test/CodeGen/AMDGPU/GlobalISel/regbankselect-uadde.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/regbankselect-uadde.mir +++ test/CodeGen/AMDGPU/GlobalISel/regbankselect-uadde.mir @@ -12,20 +12,20 @@ ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 - ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; FAST: [[UADDE:%[0-9]+]]:sgpr(s32), [[UADDE1:%[0-9]+]]:scc(s1) = G_UADDE [[COPY]], [[COPY1]], [[ICMP]] ; GREEDY-LABEL: name: uadde_s32_sss ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 - ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; GREEDY: [[UADDE:%[0-9]+]]:sgpr(s32), [[UADDE1:%[0-9]+]]:scc(s1) = G_UADDE [[COPY]], [[COPY1]], [[ICMP]] %0:_(s32) = COPY $sgpr0 %1:_(s32) = COPY $sgpr1 %2:_(s32) = COPY $sgpr2 - %3:_(s32) = G_CONSTANT i64 0 + %3:_(s32) = G_CONSTANT i32 0 %4:_(s1) = G_ICMP intpred(eq), %2, %3 %5:_(s32), %6:_(s1) = G_UADDE %0, %1, %4 ... @@ -41,7 +41,7 @@ ; FAST: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 - ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; FAST: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) ; FAST: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1) @@ -50,7 +50,7 @@ ; GREEDY: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 - ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) ; GREEDY: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1) @@ -58,7 +58,7 @@ %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $sgpr0 %2:_(s32) = COPY $sgpr1 - %3:_(s32) = G_CONSTANT i64 0 + %3:_(s32) = G_CONSTANT i32 0 %4:_(s1) = G_ICMP intpred(eq), %2, %3 %5:_(s32), %6:_(s1) = G_UADDE %0, %1, %4 ... Index: test/CodeGen/AMDGPU/GlobalISel/regbankselect-usube.mir =================================================================== --- test/CodeGen/AMDGPU/GlobalISel/regbankselect-usube.mir +++ test/CodeGen/AMDGPU/GlobalISel/regbankselect-usube.mir @@ -13,20 +13,20 @@ ; FAST: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 - ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; FAST: [[USUBE:%[0-9]+]]:sgpr(s32), [[USUBE1:%[0-9]+]]:scc(s1) = G_USUBE [[COPY]], [[COPY1]], [[ICMP]] ; GREEDY-LABEL: name: usube_s32_sss ; GREEDY: [[COPY:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr2 - ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; GREEDY: [[USUBE:%[0-9]+]]:sgpr(s32), [[USUBE1:%[0-9]+]]:scc(s1) = G_USUBE [[COPY]], [[COPY1]], [[ICMP]] %0:_(s32) = COPY $sgpr0 %1:_(s32) = COPY $sgpr1 %2:_(s32) = COPY $sgpr2 - %3:_(s32) = G_CONSTANT i64 0 + %3:_(s32) = G_CONSTANT i32 0 %4:_(s1) = G_ICMP intpred(eq), %2, %3 %5:_(s32), %6:_(s1) = G_USUBE %0, %1, %4 ... @@ -42,7 +42,7 @@ ; FAST: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 ; FAST: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; FAST: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 - ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; FAST: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; FAST: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; FAST: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) ; FAST: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1) @@ -51,7 +51,7 @@ ; GREEDY: [[COPY:%[0-9]+]]:vgpr(s32) = COPY $vgpr0 ; GREEDY: [[COPY1:%[0-9]+]]:sgpr(s32) = COPY $sgpr0 ; GREEDY: [[COPY2:%[0-9]+]]:sgpr(s32) = COPY $sgpr1 - ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i64 0 + ; GREEDY: [[C:%[0-9]+]]:sgpr(s32) = G_CONSTANT i32 0 ; GREEDY: [[ICMP:%[0-9]+]]:scc(s1) = G_ICMP intpred(eq), [[COPY2]](s32), [[C]] ; GREEDY: [[COPY3:%[0-9]+]]:vgpr(s32) = COPY [[COPY1]](s32) ; GREEDY: [[COPY4:%[0-9]+]]:vcc(s1) = COPY [[ICMP]](s1) @@ -59,7 +59,7 @@ %0:_(s32) = COPY $vgpr0 %1:_(s32) = COPY $sgpr0 %2:_(s32) = COPY $sgpr1 - %3:_(s32) = G_CONSTANT i64 0 + %3:_(s32) = G_CONSTANT i32 0 %4:_(s1) = G_ICMP intpred(eq), %2, %3 %5:_(s32), %6:_(s1) = G_USUBE %0, %1, %4 ... Index: test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir +++ test/CodeGen/ARM/GlobalISel/arm-instruction-select.mir @@ -1495,7 +1495,7 @@ - { id: 0, class: gprb } body: | bb.0: - %0(s32) = G_CONSTANT 65537 + %0(s32) = G_CONSTANT i32 65537 ; CHECK: %[[C:[0-9]+]]:gpr = MOVi32imm 65537 $r0 = COPY %0(s32) @@ -1512,7 +1512,7 @@ - { id: 0, class: gprb } body: | bb.0: - %0(s32) = G_CONSTANT 42 + %0(s32) = G_CONSTANT i32 42 ; CHECK: %[[C:[0-9]+]]:gpr = MOVi 42, 14, $noreg, $noreg $r0 = COPY %0(s32) Index: test/CodeGen/ARM/GlobalISel/arm-legalize-consts.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-legalize-consts.mir +++ test/CodeGen/ARM/GlobalISel/arm-legalize-consts.mir @@ -24,8 +24,8 @@ %4(p0) = COPY $r0 - %0(s32) = G_CONSTANT 42 - ; CHECK: {{%[0-9]+}}:_(s32) = G_CONSTANT 42 + %0(s32) = G_CONSTANT i32 42 + ; CHECK: {{%[0-9]+}}:_(s32) = G_CONSTANT i32 42 %1(s16) = G_CONSTANT i16 21 G_STORE %1(s16), %4(p0) :: (store 2) @@ -48,9 +48,9 @@ ; CHECK: {{%[0-9]+}}:_(s1) = G_TRUNC [[EXT]](s32) ; CHECK-NOT: G_CONSTANT i1 - %5(p0) = G_CONSTANT 0 + %5(p0) = G_CONSTANT i32 0 G_STORE %5(p0), %4(p0) :: (store 4) - ; CHECK: {{%[0-9]+}}:_(p0) = G_CONSTANT 0 + ; CHECK: {{%[0-9]+}}:_(p0) = G_CONSTANT i32 0 $r0 = COPY %0(s32) BX_RET 14, $noreg, implicit $r0 Index: test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir +++ test/CodeGen/ARM/GlobalISel/arm-regbankselect.mir @@ -521,7 +521,7 @@ - { id: 0, class: _ } body: | bb.0: - %0(s32) = G_CONSTANT 42 + %0(s32) = G_CONSTANT i32 42 $r0 = COPY %0(s32) BX_RET 14, $noreg, implicit $r0 ... Index: test/CodeGen/ARM/GlobalISel/select-revsh.mir =================================================================== --- test/CodeGen/ARM/GlobalISel/select-revsh.mir +++ test/CodeGen/ARM/GlobalISel/select-revsh.mir @@ -40,7 +40,7 @@ %5(s32) = G_CONSTANT i32 8 %6(s32) = G_LSHR %0(s32), %5(s32) - %7(s32) = G_CONSTANT 255 + %7(s32) = G_CONSTANT i32 255 %8(s32) = G_AND %6(s32), %7(s32) %9(s32) = G_OR %4(s32), %8(s32) @@ -88,7 +88,7 @@ %5(s32) = G_CONSTANT i32 8 %6(s32) = G_LSHR %0(s32), %5(s32) - %7(s32) = G_CONSTANT 255 + %7(s32) = G_CONSTANT i32 255 %8(s32) = G_AND %6(s32), %7(s32) %9(s32) = G_OR %8(s32), %4(s32) @@ -134,7 +134,7 @@ %5(s32) = G_CONSTANT i32 8 %6(s32) = G_LSHR %0(s32), %5(s32) - %7(s32) = G_CONSTANT 255 + %7(s32) = G_CONSTANT i32 255 %8(s32) = G_AND %6(s32), %7(s32) %9(s32) = G_OR %4(s32), %8(s32) Index: test/CodeGen/X86/GlobalISel/select-ashr-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-ashr-scalar.mir +++ test/CodeGen/X86/GlobalISel/select-ashr-scalar.mir @@ -131,7 +131,7 @@ ; ALL: $rax = COPY [[SAR64ri]] ; ALL: RET 0, implicit $rax %0(s64) = COPY $rdi - %1(s8) = G_CONSTANT i64 5 + %1(s8) = G_CONSTANT i8 5 %2(s64) = G_ASHR %0, %1 $rax = COPY %2(s64) RET 0, implicit $rax @@ -162,7 +162,7 @@ ; ALL: $rax = COPY [[SAR64r1_]] ; ALL: RET 0, implicit $rax %0(s64) = COPY $rdi - %1(s8) = G_CONSTANT i64 1 + %1(s8) = G_CONSTANT i8 1 %2(s64) = G_ASHR %0, %1 $rax = COPY %2(s64) RET 0, implicit $rax @@ -229,7 +229,7 @@ ; ALL: $eax = COPY [[SAR32ri]] ; ALL: RET 0, implicit $eax %0(s32) = COPY $edi - %1(s8) = G_CONSTANT i32 5 + %1(s8) = G_CONSTANT i8 5 %2(s32) = G_ASHR %0, %1 $eax = COPY %2(s32) RET 0, implicit $eax @@ -260,7 +260,7 @@ ; ALL: $eax = COPY [[SAR32r1_]] ; ALL: RET 0, implicit $eax %0(s32) = COPY $edi - %1(s8) = G_CONSTANT i32 1 + %1(s8) = G_CONSTANT i8 1 %2(s32) = G_ASHR %0, %1 $eax = COPY %2(s32) RET 0, implicit $eax @@ -332,7 +332,7 @@ ; ALL: $ax = COPY [[SAR16ri]] ; ALL: RET 0, implicit $ax %0(s32) = COPY $edi - %2(s8) = G_CONSTANT i16 5 + %2(s8) = G_CONSTANT i8 5 %1(s16) = G_TRUNC %0(s32) %3(s16) = G_ASHR %1, %2 $ax = COPY %3(s16) @@ -366,7 +366,7 @@ ; ALL: $ax = COPY [[SAR16r1_]] ; ALL: RET 0, implicit $ax %0(s32) = COPY $edi - %2(s8) = G_CONSTANT i16 1 + %2(s8) = G_CONSTANT i8 1 %1(s16) = G_TRUNC %0(s32) %3(s16) = G_ASHR %1, %2 $ax = COPY %3(s16) Index: test/CodeGen/X86/GlobalISel/select-lshr-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-lshr-scalar.mir +++ test/CodeGen/X86/GlobalISel/select-lshr-scalar.mir @@ -131,7 +131,7 @@ ; ALL: $rax = COPY [[SHR64ri]] ; ALL: RET 0, implicit $rax %0(s64) = COPY $rdi - %1(s8) = G_CONSTANT i64 5 + %1(s8) = G_CONSTANT i8 5 %2(s64) = G_LSHR %0, %1 $rax = COPY %2(s64) RET 0, implicit $rax @@ -162,7 +162,7 @@ ; ALL: $rax = COPY [[SHR64r1_]] ; ALL: RET 0, implicit $rax %0(s64) = COPY $rdi - %1(s8) = G_CONSTANT i64 1 + %1(s8) = G_CONSTANT i8 1 %2(s64) = G_LSHR %0, %1 $rax = COPY %2(s64) RET 0, implicit $rax @@ -229,7 +229,7 @@ ; ALL: $eax = COPY [[SHR32ri]] ; ALL: RET 0, implicit $eax %0(s32) = COPY $edi - %1(s8) = G_CONSTANT i32 5 + %1(s8) = G_CONSTANT i8 5 %2(s32) = G_LSHR %0, %1 $eax = COPY %2(s32) RET 0, implicit $eax @@ -260,7 +260,7 @@ ; ALL: $eax = COPY [[SHR32r1_]] ; ALL: RET 0, implicit $eax %0(s32) = COPY $edi - %1(s8) = G_CONSTANT i32 1 + %1(s8) = G_CONSTANT i8 1 %2(s32) = G_LSHR %0, %1 $eax = COPY %2(s32) RET 0, implicit $eax @@ -332,7 +332,7 @@ ; ALL: $ax = COPY [[SHR16ri]] ; ALL: RET 0, implicit $ax %0(s32) = COPY $edi - %2(s8) = G_CONSTANT i16 5 + %2(s8) = G_CONSTANT i8 5 %1(s16) = G_TRUNC %0(s32) %3(s16) = G_LSHR %1, %2 $ax = COPY %3(s16) @@ -366,7 +366,7 @@ ; ALL: $ax = COPY [[SHR16r1_]] ; ALL: RET 0, implicit $ax %0(s32) = COPY $edi - %2(s8) = G_CONSTANT i16 1 + %2(s8) = G_CONSTANT i8 1 %1(s16) = G_TRUNC %0(s32) %3(s16) = G_LSHR %1, %2 $ax = COPY %3(s16) Index: test/CodeGen/X86/GlobalISel/select-shl-scalar.mir =================================================================== --- test/CodeGen/X86/GlobalISel/select-shl-scalar.mir +++ test/CodeGen/X86/GlobalISel/select-shl-scalar.mir @@ -132,7 +132,7 @@ ; ALL: $rax = COPY [[SHL64ri]] ; ALL: RET 0, implicit $rax %0(s64) = COPY $rdi - %1(s8) = G_CONSTANT i64 5 + %1(s8) = G_CONSTANT i8 5 %2(s64) = G_SHL %0, %1 $rax = COPY %2(s64) RET 0, implicit $rax @@ -163,7 +163,7 @@ ; ALL: $rax = COPY [[ADD64rr]] ; ALL: RET 0, implicit $rax %0(s64) = COPY $rdi - %1(s8) = G_CONSTANT i64 1 + %1(s8) = G_CONSTANT i8 1 %2(s64) = G_SHL %0, %1 $rax = COPY %2(s64) RET 0, implicit $rax @@ -230,7 +230,7 @@ ; ALL: $eax = COPY [[SHL32ri]] ; ALL: RET 0, implicit $eax %0(s32) = COPY $edi - %1(s8) = G_CONSTANT i32 5 + %1(s8) = G_CONSTANT i8 5 %2(s32) = G_SHL %0, %1 $eax = COPY %2(s32) RET 0, implicit $eax @@ -261,7 +261,7 @@ ; ALL: $eax = COPY [[ADD32rr]] ; ALL: RET 0, implicit $eax %0(s32) = COPY $edi - %1(s8) = G_CONSTANT i32 1 + %1(s8) = G_CONSTANT i8 1 %2(s32) = G_SHL %0, %1 $eax = COPY %2(s32) RET 0, implicit $eax @@ -333,7 +333,7 @@ ; ALL: $ax = COPY [[SHL16ri]] ; ALL: RET 0, implicit $ax %0(s32) = COPY $edi - %2(s8) = G_CONSTANT i16 5 + %2(s8) = G_CONSTANT i8 5 %1(s16) = G_TRUNC %0(s32) %3(s16) = G_SHL %1, %2 $ax = COPY %3(s16) @@ -367,7 +367,7 @@ ; ALL: $ax = COPY [[ADD16rr]] ; ALL: RET 0, implicit $ax %0(s32) = COPY $edi - %2(s8) = G_CONSTANT i16 1 + %2(s8) = G_CONSTANT i8 1 %1(s16) = G_TRUNC %0(s32) %3(s16) = G_SHL %1, %2 $ax = COPY %3(s16) Index: test/Verifier/test_g_constant.mir =================================================================== --- test/Verifier/test_g_constant.mir +++ test/Verifier/test_g_constant.mir @@ -1,4 +1,4 @@ -#RUN: not llc -o - -global-isel -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s +#RUN: not llc -march=aarch64 -o - -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s # REQUIRES: global-isel, aarch64-registered-target --- @@ -15,4 +15,27 @@ ; CHECK: Bad machine code: Too few operands %1:_(s32) = G_CONSTANT + + ; Not a cimm + ; CHECK: Bad machine code: G_CONSTANT operand must be cimm + %2:_(s32) = G_CONSTANT 0 + + ; Not a cimm + ; CHECK: Bad machine code: G_CONSTANT operand must be cimm + %3:_(s32) = G_CONSTANT float 1.0 + + ; Size is bigger than result + ; CHECK: Bad machine code: inconsistent constant size + %4:_(s32) = G_CONSTANT i64 0 + + ; Size is smaller than result + ; CHECK: Bad machine code: inconsistent constant size + %4:_(s32) = G_CONSTANT i16 0 + + ; CHECK: Bad machine code: inconsistent constant size + %5:_(p0) = G_CONSTANT i64 0 + + ; CHECK: Bad machine code: inconsistent constant size + %6:_(p0) = G_CONSTANT i128 0 + ... Index: test/Verifier/test_g_fconstant.mir =================================================================== --- test/Verifier/test_g_fconstant.mir +++ test/Verifier/test_g_fconstant.mir @@ -1,8 +1,8 @@ -#RUN: not llc -o - -global-isel -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s +#RUN: not llc -march=aarch64 -o /dev/null -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s # REQUIRES: global-isel, aarch64-registered-target --- -name: test_fconstant_vector +name: test_fconstant legalized: true regBankSelected: false selected: false @@ -12,4 +12,24 @@ bb.0: ; CHECK: Bad machine code: Instruction cannot use a vector result type %0:_(<2 x s32>) = G_FCONSTANT float 0.0 + + ; CHECK: Bad machine code: Too few operands + %1:_(s32) = G_FCONSTANT + + ; Not a cimm + ; CHECK: Bad machine code: G_FCONSTANT operand must be fpimm + %2:_(s32) = G_FCONSTANT 0 + + ; Not a cimm + ; CHECK: Bad machine code: G_FCONSTANT operand must be fpimm + %3:_(s32) = G_FCONSTANT i32 0 + + ; Size is bigger than result + ; CHECK: Bad machine code: inconsistent constant size + %4:_(s32) = G_FCONSTANT double 1.0 + + ; Size is smaller than result + ; CHECK: Bad machine code: inconsistent constant size + %4:_(s32) = G_FCONSTANT half 1.0 + ...