Index: lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp =================================================================== --- lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp +++ lib/Target/NVPTX/InstPrinter/NVPTXInstPrinter.cpp @@ -57,13 +57,13 @@ OS << "%r"; break; case 4: - OS << "%rl"; + OS << "%rd"; break; case 5: OS << "%f"; break; case 6: - OS << "%fl"; + OS << "%fd"; break; } Index: lib/Target/NVPTX/NVPTXAsmPrinter.cpp =================================================================== --- lib/Target/NVPTX/NVPTXAsmPrinter.cpp +++ lib/Target/NVPTX/NVPTXAsmPrinter.cpp @@ -2010,9 +2010,9 @@ // O << "\t.reg .s16 %rc<" << NVPTXNumRegisters << ">;\n"; // O << "\t.reg .s16 %rs<" << NVPTXNumRegisters << ">;\n"; // O << "\t.reg .s32 %r<" << NVPTXNumRegisters << ">;\n"; - // O << "\t.reg .s64 %rl<" << NVPTXNumRegisters << ">;\n"; + // O << "\t.reg .s64 %rd<" << NVPTXNumRegisters << ">;\n"; // O << "\t.reg .f32 %f<" << NVPTXNumRegisters << ">;\n"; - // O << "\t.reg .f64 %fl<" << NVPTXNumRegisters << ">;\n"; + // O << "\t.reg .f64 %fd<" << NVPTXNumRegisters << ">;\n"; // Emit declaration of the virtual registers or 'physical' registers for // each register class Index: lib/Target/NVPTX/NVPTXRegisterInfo.cpp =================================================================== --- lib/Target/NVPTX/NVPTXRegisterInfo.cpp +++ lib/Target/NVPTX/NVPTXRegisterInfo.cpp @@ -53,9 +53,9 @@ return "%f"; } if (RC == &NVPTX::Float64RegsRegClass) { - return "%fl"; + return "%fd"; } else if (RC == &NVPTX::Int64RegsRegClass) { - return "%rl"; + return "%rd"; } else if (RC == &NVPTX::Int32RegsRegClass) { return "%r"; } else if (RC == &NVPTX::Int16RegsRegClass) { Index: lib/Target/NVPTX/NVPTXRegisterInfo.td =================================================================== --- lib/Target/NVPTX/NVPTXRegisterInfo.td +++ lib/Target/NVPTX/NVPTXRegisterInfo.td @@ -35,9 +35,9 @@ def P#i : NVPTXReg<"%p"#i>; // Predicate def RS#i : NVPTXReg<"%rs"#i>; // 16-bit def R#i : NVPTXReg<"%r"#i>; // 32-bit - def RL#i : NVPTXReg<"%rl"#i>; // 64-bit + def RL#i : NVPTXReg<"%rd"#i>; // 64-bit def F#i : NVPTXReg<"%f"#i>; // 32-bit float - def FL#i : NVPTXReg<"%fl"#i>; // 64-bit float + def FL#i : NVPTXReg<"%fd"#i>; // 64-bit float // Arguments def ia#i : NVPTXReg<"%ia"#i>; Index: test/CodeGen/NVPTX/arithmetic-fp-sm20.ll =================================================================== --- test/CodeGen/NVPTX/arithmetic-fp-sm20.ll +++ test/CodeGen/NVPTX/arithmetic-fp-sm20.ll @@ -9,28 +9,28 @@ ;;; f64 define double @fadd_f64(double %a, double %b) { -; CHECK: add.f64 %fl{{[0-9]+}}, %fl{{[0-9]+}}, %fl{{[0-9]+}} +; CHECK: add.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}} ; CHECK: ret %ret = fadd double %a, %b ret double %ret } define double @fsub_f64(double %a, double %b) { -; CHECK: sub.f64 %fl{{[0-9]+}}, %fl{{[0-9]+}}, %fl{{[0-9]+}} +; CHECK: sub.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}} ; CHECK: ret %ret = fsub double %a, %b ret double %ret } define double @fmul_f64(double %a, double %b) { -; CHECK: mul.f64 %fl{{[0-9]+}}, %fl{{[0-9]+}}, %fl{{[0-9]+}} +; CHECK: mul.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}} ; CHECK: ret %ret = fmul double %a, %b ret double %ret } define double @fdiv_f64(double %a, double %b) { -; CHECK: div.rn.f64 %fl{{[0-9]+}}, %fl{{[0-9]+}}, %fl{{[0-9]+}} +; CHECK: div.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}} ; CHECK: ret %ret = fdiv double %a, %b ret double %ret Index: test/CodeGen/NVPTX/arithmetic-int.ll =================================================================== --- test/CodeGen/NVPTX/arithmetic-int.ll +++ test/CodeGen/NVPTX/arithmetic-int.ll @@ -9,70 +9,70 @@ ;;; i64 define i64 @add_i64(i64 %a, i64 %b) { -; CHECK: add.s64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}} +; CHECK: add.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}} ; CHECK: ret %ret = add i64 %a, %b ret i64 %ret } define i64 @sub_i64(i64 %a, i64 %b) { -; CHECK: sub.s64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}} +; CHECK: sub.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}} ; CHECK: ret %ret = sub i64 %a, %b ret i64 %ret } define i64 @mul_i64(i64 %a, i64 %b) { -; CHECK: mul.lo.s64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}} +; CHECK: mul.lo.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}} ; CHECK: ret %ret = mul i64 %a, %b ret i64 %ret } define i64 @sdiv_i64(i64 %a, i64 %b) { -; CHECK: div.s64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}} +; CHECK: div.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}} ; CHECK: ret %ret = sdiv i64 %a, %b ret i64 %ret } define i64 @udiv_i64(i64 %a, i64 %b) { -; CHECK: div.u64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}} +; CHECK: div.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}} ; CHECK: ret %ret = udiv i64 %a, %b ret i64 %ret } define i64 @srem_i64(i64 %a, i64 %b) { -; CHECK: rem.s64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}} +; CHECK: rem.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}} ; CHECK: ret %ret = srem i64 %a, %b ret i64 %ret } define i64 @urem_i64(i64 %a, i64 %b) { -; CHECK: rem.u64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}} +; CHECK: rem.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}} ; CHECK: ret %ret = urem i64 %a, %b ret i64 %ret } define i64 @and_i64(i64 %a, i64 %b) { -; CHECK: and.b64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}} +; CHECK: and.b64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}} ; CHECK: ret %ret = and i64 %a, %b ret i64 %ret } define i64 @or_i64(i64 %a, i64 %b) { -; CHECK: or.b64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}} +; CHECK: or.b64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}} ; CHECK: ret %ret = or i64 %a, %b ret i64 %ret } define i64 @xor_i64(i64 %a, i64 %b) { -; CHECK: xor.b64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %rl{{[0-9]+}} +; CHECK: xor.b64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %rd{{[0-9]+}} ; CHECK: ret %ret = xor i64 %a, %b ret i64 %ret @@ -80,7 +80,7 @@ define i64 @shl_i64(i64 %a, i64 %b) { ; PTX requires 32-bit shift amount -; CHECK: shl.b64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %r{{[0-9]+}} +; CHECK: shl.b64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %r{{[0-9]+}} ; CHECK: ret %ret = shl i64 %a, %b ret i64 %ret @@ -88,7 +88,7 @@ define i64 @ashr_i64(i64 %a, i64 %b) { ; PTX requires 32-bit shift amount -; CHECK: shr.s64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %r{{[0-9]+}} +; CHECK: shr.s64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %r{{[0-9]+}} ; CHECK: ret %ret = ashr i64 %a, %b ret i64 %ret @@ -96,7 +96,7 @@ define i64 @lshr_i64(i64 %a, i64 %b) { ; PTX requires 32-bit shift amount -; CHECK: shr.u64 %rl{{[0-9]+}}, %rl{{[0-9]+}}, %r{{[0-9]+}} +; CHECK: shr.u64 %rd{{[0-9]+}}, %rd{{[0-9]+}}, %r{{[0-9]+}} ; CHECK: ret %ret = lshr i64 %a, %b ret i64 %ret Index: test/CodeGen/NVPTX/call-with-alloca-buffer.ll =================================================================== --- test/CodeGen/NVPTX/call-with-alloca-buffer.ll +++ test/CodeGen/NVPTX/call-with-alloca-buffer.ll @@ -20,11 +20,11 @@ %buf = alloca [16 x i8], align 4 ; CHECK: .local .align 4 .b8 __local_depot0[16] -; CHECK: mov.u64 %rl[[BUF_REG:[0-9]+]] -; CHECK: cvta.local.u64 %SP, %rl[[BUF_REG]] +; CHECK: mov.u64 %rd[[BUF_REG:[0-9]+]] +; CHECK: cvta.local.u64 %SP, %rd[[BUF_REG]] -; CHECK: ld.param.u64 %rl[[A_REG:[0-9]+]], [kernel_func_param_0] -; CHECK: ld.f32 %f[[A0_REG:[0-9]+]], [%rl[[A_REG]]] +; CHECK: ld.param.u64 %rd[[A_REG:[0-9]+]], [kernel_func_param_0] +; CHECK: ld.f32 %f[[A0_REG:[0-9]+]], [%rd[[A_REG]]] ; CHECK: st.f32 [%SP+0], %f[[A0_REG]] %0 = load float* %a, align 4 @@ -46,11 +46,11 @@ %7 = bitcast i8* %arrayidx7 to float* store float %6, float* %7, align 4 -; CHECK: add.u64 %rl[[SP_REG:[0-9]+]], %SP, 0 +; CHECK: add.u64 %rd[[SP_REG:[0-9]+]], %SP, 0 ; CHECK: .param .b64 param0; -; CHECK-NEXT: st.param.b64 [param0+0], %rl[[A_REG]] +; CHECK-NEXT: st.param.b64 [param0+0], %rd[[A_REG]] ; CHECK-NEXT: .param .b64 param1; -; CHECK-NEXT: st.param.b64 [param1+0], %rl[[SP_REG]] +; CHECK-NEXT: st.param.b64 [param1+0], %rd[[SP_REG]] ; CHECK-NEXT: call.uni ; CHECK-NEXT: callee, Index: test/CodeGen/NVPTX/compare-int.ll =================================================================== --- test/CodeGen/NVPTX/compare-int.ll +++ test/CodeGen/NVPTX/compare-int.ll @@ -9,8 +9,8 @@ ;;; i64 define i64 @icmp_eq_i64(i64 %a, i64 %b) { -; CHECK: setp.eq.s64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}} -; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]] +; CHECK: setp.eq.s64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}} +; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]] ; CHECK: ret %cmp = icmp eq i64 %a, %b %ret = zext i1 %cmp to i64 @@ -18,8 +18,8 @@ } define i64 @icmp_ne_i64(i64 %a, i64 %b) { -; CHECK: setp.ne.s64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}} -; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]] +; CHECK: setp.ne.s64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}} +; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]] ; CHECK: ret %cmp = icmp ne i64 %a, %b %ret = zext i1 %cmp to i64 @@ -27,8 +27,8 @@ } define i64 @icmp_ugt_i64(i64 %a, i64 %b) { -; CHECK: setp.gt.u64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}} -; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]] +; CHECK: setp.gt.u64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}} +; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]] ; CHECK: ret %cmp = icmp ugt i64 %a, %b %ret = zext i1 %cmp to i64 @@ -36,8 +36,8 @@ } define i64 @icmp_uge_i64(i64 %a, i64 %b) { -; CHECK: setp.ge.u64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}} -; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]] +; CHECK: setp.ge.u64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}} +; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]] ; CHECK: ret %cmp = icmp uge i64 %a, %b %ret = zext i1 %cmp to i64 @@ -45,8 +45,8 @@ } define i64 @icmp_ult_i64(i64 %a, i64 %b) { -; CHECK: setp.lt.u64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}} -; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]] +; CHECK: setp.lt.u64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}} +; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]] ; CHECK: ret %cmp = icmp ult i64 %a, %b %ret = zext i1 %cmp to i64 @@ -54,8 +54,8 @@ } define i64 @icmp_ule_i64(i64 %a, i64 %b) { -; CHECK: setp.le.u64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}} -; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]] +; CHECK: setp.le.u64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}} +; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]] ; CHECK: ret %cmp = icmp ule i64 %a, %b %ret = zext i1 %cmp to i64 @@ -63,8 +63,8 @@ } define i64 @icmp_sgt_i64(i64 %a, i64 %b) { -; CHECK: setp.gt.s64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}} -; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]] +; CHECK: setp.gt.s64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}} +; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]] ; CHECK: ret %cmp = icmp sgt i64 %a, %b %ret = zext i1 %cmp to i64 @@ -72,8 +72,8 @@ } define i64 @icmp_sge_i64(i64 %a, i64 %b) { -; CHECK: setp.ge.s64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}} -; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]] +; CHECK: setp.ge.s64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}} +; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]] ; CHECK: ret %cmp = icmp sge i64 %a, %b %ret = zext i1 %cmp to i64 @@ -81,8 +81,8 @@ } define i64 @icmp_slt_i64(i64 %a, i64 %b) { -; CHECK: setp.lt.s64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}} -; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]] +; CHECK: setp.lt.s64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}} +; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]] ; CHECK: ret %cmp = icmp slt i64 %a, %b %ret = zext i1 %cmp to i64 @@ -90,8 +90,8 @@ } define i64 @icmp_sle_i64(i64 %a, i64 %b) { -; CHECK: setp.le.s64 %p[[P0:[0-9]+]], %rl{{[0-9]+}}, %rl{{[0-9]+}} -; CHECK: selp.u64 %rl{{[0-9]+}}, 1, 0, %p[[P0]] +; CHECK: setp.le.s64 %p[[P0:[0-9]+]], %rd{{[0-9]+}}, %rd{{[0-9]+}} +; CHECK: selp.u64 %rd{{[0-9]+}}, 1, 0, %p[[P0]] ; CHECK: ret %cmp = icmp sle i64 %a, %b %ret = zext i1 %cmp to i64 Index: test/CodeGen/NVPTX/convert-fp.ll =================================================================== --- test/CodeGen/NVPTX/convert-fp.ll +++ test/CodeGen/NVPTX/convert-fp.ll @@ -10,7 +10,7 @@ } define i16 @cvt_i16_f64(double %x) { -; CHECK: cvt.rzi.u16.f64 %rs{{[0-9]+}}, %fl{{[0-9]+}}; +; CHECK: cvt.rzi.u16.f64 %rs{{[0-9]+}}, %fd{{[0-9]+}}; ; CHECK: ret; %a = fptoui double %x to i16 ret i16 %a @@ -24,7 +24,7 @@ } define i32 @cvt_i32_f64(double %x) { -; CHECK: cvt.rzi.u32.f64 %r{{[0-9]+}}, %fl{{[0-9]+}}; +; CHECK: cvt.rzi.u32.f64 %r{{[0-9]+}}, %fd{{[0-9]+}}; ; CHECK: ret; %a = fptoui double %x to i32 ret i32 %a @@ -32,14 +32,14 @@ define i64 @cvt_i64_f32(float %x) { -; CHECK: cvt.rzi.u64.f32 %rl{{[0-9]+}}, %f{{[0-9]+}}; +; CHECK: cvt.rzi.u64.f32 %rd{{[0-9]+}}, %f{{[0-9]+}}; ; CHECK: ret; %a = fptoui float %x to i64 ret i64 %a } define i64 @cvt_i64_f64(double %x) { -; CHECK: cvt.rzi.u64.f64 %rl{{[0-9]+}}, %fl{{[0-9]+}}; +; CHECK: cvt.rzi.u64.f64 %rd{{[0-9]+}}, %fd{{[0-9]+}}; ; CHECK: ret; %a = fptoui double %x to i64 ret i64 %a @@ -60,14 +60,14 @@ } define float @cvt_f32_i64(i64 %x) { -; CHECK: cvt.rn.f32.u64 %f{{[0-9]+}}, %rl{{[0-9]+}}; +; CHECK: cvt.rn.f32.u64 %f{{[0-9]+}}, %rd{{[0-9]+}}; ; CHECK: ret; %a = uitofp i64 %x to float ret float %a } define float @cvt_f32_f64(double %x) { -; CHECK: cvt.rn.f32.f64 %f{{[0-9]+}}, %fl{{[0-9]+}}; +; CHECK: cvt.rn.f32.f64 %f{{[0-9]+}}, %fd{{[0-9]+}}; ; CHECK: ret; %a = fptrunc double %x to float ret float %a @@ -88,56 +88,56 @@ } define float @cvt_f32_s64(i64 %x) { -; CHECK: cvt.rn.f32.s64 %f{{[0-9]+}}, %rl{{[0-9]+}} +; CHECK: cvt.rn.f32.s64 %f{{[0-9]+}}, %rd{{[0-9]+}} ; CHECK: ret %a = sitofp i64 %x to float ret float %a } define double @cvt_f64_i16(i16 %x) { -; CHECK: cvt.rn.f64.u16 %fl{{[0-9]+}}, %rs{{[0-9]+}}; +; CHECK: cvt.rn.f64.u16 %fd{{[0-9]+}}, %rs{{[0-9]+}}; ; CHECK: ret; %a = uitofp i16 %x to double ret double %a } define double @cvt_f64_i32(i32 %x) { -; CHECK: cvt.rn.f64.u32 %fl{{[0-9]+}}, %r{{[0-9]+}}; +; CHECK: cvt.rn.f64.u32 %fd{{[0-9]+}}, %r{{[0-9]+}}; ; CHECK: ret; %a = uitofp i32 %x to double ret double %a } define double @cvt_f64_i64(i64 %x) { -; CHECK: cvt.rn.f64.u64 %fl{{[0-9]+}}, %rl{{[0-9]+}}; +; CHECK: cvt.rn.f64.u64 %fd{{[0-9]+}}, %rd{{[0-9]+}}; ; CHECK: ret; %a = uitofp i64 %x to double ret double %a } define double @cvt_f64_f32(float %x) { -; CHECK: cvt.f64.f32 %fl{{[0-9]+}}, %f{{[0-9]+}}; +; CHECK: cvt.f64.f32 %fd{{[0-9]+}}, %f{{[0-9]+}}; ; CHECK: ret; %a = fpext float %x to double ret double %a } define double @cvt_f64_s16(i16 %x) { -; CHECK: cvt.rn.f64.s16 %fl{{[0-9]+}}, %rs{{[0-9]+}} +; CHECK: cvt.rn.f64.s16 %fd{{[0-9]+}}, %rs{{[0-9]+}} ; CHECK: ret %a = sitofp i16 %x to double ret double %a } define double @cvt_f64_s32(i32 %x) { -; CHECK: cvt.rn.f64.s32 %fl{{[0-9]+}}, %r{{[0-9]+}} +; CHECK: cvt.rn.f64.s32 %fd{{[0-9]+}}, %r{{[0-9]+}} ; CHECK: ret %a = sitofp i32 %x to double ret double %a } define double @cvt_f64_s64(i64 %x) { -; CHECK: cvt.rn.f64.s64 %fl{{[0-9]+}}, %rl{{[0-9]+}} +; CHECK: cvt.rn.f64.s64 %fd{{[0-9]+}}, %rd{{[0-9]+}} ; CHECK: ret %a = sitofp i64 %x to double ret double %a Index: test/CodeGen/NVPTX/convert-int-sm20.ll =================================================================== --- test/CodeGen/NVPTX/convert-int-sm20.ll +++ test/CodeGen/NVPTX/convert-int-sm20.ll @@ -48,16 +48,16 @@ ; i64 define i64 @cvt_i64_i16(i16 %x) { -; CHECK: ld.param.u16 %rl[[R0:[0-9]+]], [cvt_i64_i16_param_{{[0-9]+}}] -; CHECK: st.param.b64 [func_retval{{[0-9]+}}+0], %rl[[R0]] +; CHECK: ld.param.u16 %rd[[R0:[0-9]+]], [cvt_i64_i16_param_{{[0-9]+}}] +; CHECK: st.param.b64 [func_retval{{[0-9]+}}+0], %rd[[R0]] ; CHECK: ret %a = zext i16 %x to i64 ret i64 %a } define i64 @cvt_i64_i32(i32 %x) { -; CHECK: ld.param.u32 %rl[[R0:[0-9]+]], [cvt_i64_i32_param_{{[0-9]+}}] -; CHECK: st.param.b64 [func_retval{{[0-9]+}}+0], %rl[[R0]] +; CHECK: ld.param.u32 %rd[[R0:[0-9]+]], [cvt_i64_i32_param_{{[0-9]+}}] +; CHECK: st.param.b64 [func_retval{{[0-9]+}}+0], %rd[[R0]] ; CHECK: ret %a = zext i32 %x to i64 ret i64 %a Index: test/CodeGen/NVPTX/fma.ll =================================================================== --- test/CodeGen/NVPTX/fma.ll +++ test/CodeGen/NVPTX/fma.ll @@ -9,7 +9,7 @@ } define ptx_device double @t1_f64(double %x, double %y, double %z) { -; CHECK: fma.rn.f64 %fl{{[0-9]+}}, %fl{{[0-9]+}}, %fl{{[0-9]+}}, %fl{{[0-9]+}}; +; CHECK: fma.rn.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}, %fd{{[0-9]+}}; ; CHECK: ret; %a = fmul double %x, %y %b = fadd double %a, %z Index: test/CodeGen/NVPTX/fp-literals.ll =================================================================== --- test/CodeGen/NVPTX/fp-literals.ll +++ test/CodeGen/NVPTX/fp-literals.ll @@ -11,7 +11,7 @@ } ; CHECK: myaddd -; CHECK: add.f64 %fl{{[0-9]+}}, %fl{{[0-9]+}}, 0d3FF0000000000000 +; CHECK: add.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}, 0d3FF0000000000000 define double @myaddd(double %a) { %ret = fadd double %a, 1.0 ret double %ret Index: test/CodeGen/NVPTX/intrinsic-old.ll =================================================================== --- test/CodeGen/NVPTX/intrinsic-old.ll +++ test/CodeGen/NVPTX/intrinsic-old.ll @@ -198,7 +198,7 @@ } define ptx_device i64 @test_clock64() { -; CHECK: mov.u64 %rl{{[0-9]+}}, %clock64; +; CHECK: mov.u64 %rd{{[0-9]+}}, %clock64; ; CHECK: ret; %x = call i64 @llvm.ptx.read.clock64() ret i64 %x Index: test/CodeGen/NVPTX/intrinsics.ll =================================================================== --- test/CodeGen/NVPTX/intrinsics.ll +++ test/CodeGen/NVPTX/intrinsics.ll @@ -9,7 +9,7 @@ } define ptx_device double @test_fabs(double %d) { -; CHECK: abs.f64 %fl{{[0-9]+}}, %fl{{[0-9]+}}; +; CHECK: abs.f64 %fd{{[0-9]+}}, %fd{{[0-9]+}}; ; CHECK: ret; %x = call double @llvm.fabs.f64(double %d) ret double %x Index: test/CodeGen/NVPTX/ld-addrspace.ll =================================================================== --- test/CodeGen/NVPTX/ld-addrspace.ll +++ test/CodeGen/NVPTX/ld-addrspace.ll @@ -6,7 +6,7 @@ define i8 @ld_global_i8(i8 addrspace(1)* %ptr) { ; PTX32: ld.global.u8 %r{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.global.u8 %r{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.global.u8 %r{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i8 addrspace(1)* %ptr ret i8 %a @@ -15,7 +15,7 @@ define i8 @ld_shared_i8(i8 addrspace(3)* %ptr) { ; PTX32: ld.shared.u8 %r{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.shared.u8 %r{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.shared.u8 %r{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i8 addrspace(3)* %ptr ret i8 %a @@ -24,7 +24,7 @@ define i8 @ld_local_i8(i8 addrspace(5)* %ptr) { ; PTX32: ld.local.u8 %r{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.local.u8 %r{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.local.u8 %r{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i8 addrspace(5)* %ptr ret i8 %a @@ -34,7 +34,7 @@ define i16 @ld_global_i16(i16 addrspace(1)* %ptr) { ; PTX32: ld.global.u16 %r{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.global.u16 %r{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.global.u16 %r{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i16 addrspace(1)* %ptr ret i16 %a @@ -43,7 +43,7 @@ define i16 @ld_shared_i16(i16 addrspace(3)* %ptr) { ; PTX32: ld.shared.u16 %r{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.shared.u16 %r{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.shared.u16 %r{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i16 addrspace(3)* %ptr ret i16 %a @@ -52,7 +52,7 @@ define i16 @ld_local_i16(i16 addrspace(5)* %ptr) { ; PTX32: ld.local.u16 %r{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.local.u16 %r{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.local.u16 %r{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i16 addrspace(5)* %ptr ret i16 %a @@ -62,7 +62,7 @@ define i32 @ld_global_i32(i32 addrspace(1)* %ptr) { ; PTX32: ld.global.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.global.u32 %r{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.global.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i32 addrspace(1)* %ptr ret i32 %a @@ -71,7 +71,7 @@ define i32 @ld_shared_i32(i32 addrspace(3)* %ptr) { ; PTX32: ld.shared.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.shared.u32 %r{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.shared.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i32 addrspace(3)* %ptr ret i32 %a @@ -80,7 +80,7 @@ define i32 @ld_local_i32(i32 addrspace(5)* %ptr) { ; PTX32: ld.local.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.local.u32 %r{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.local.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i32 addrspace(5)* %ptr ret i32 %a @@ -88,27 +88,27 @@ ;; i64 define i64 @ld_global_i64(i64 addrspace(1)* %ptr) { -; PTX32: ld.global.u64 %rl{{[0-9]+}}, [%r{{[0-9]+}}] +; PTX32: ld.global.u64 %rd{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.global.u64 %rl{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.global.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i64 addrspace(1)* %ptr ret i64 %a } define i64 @ld_shared_i64(i64 addrspace(3)* %ptr) { -; PTX32: ld.shared.u64 %rl{{[0-9]+}}, [%r{{[0-9]+}}] +; PTX32: ld.shared.u64 %rd{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.shared.u64 %rl{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.shared.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i64 addrspace(3)* %ptr ret i64 %a } define i64 @ld_local_i64(i64 addrspace(5)* %ptr) { -; PTX32: ld.local.u64 %rl{{[0-9]+}}, [%r{{[0-9]+}}] +; PTX32: ld.local.u64 %rd{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.local.u64 %rl{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.local.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i64 addrspace(5)* %ptr ret i64 %a @@ -118,7 +118,7 @@ define float @ld_global_f32(float addrspace(1)* %ptr) { ; PTX32: ld.global.f32 %f{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.global.f32 %f{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.global.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load float addrspace(1)* %ptr ret float %a @@ -127,7 +127,7 @@ define float @ld_shared_f32(float addrspace(3)* %ptr) { ; PTX32: ld.shared.f32 %f{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.shared.f32 %f{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.shared.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load float addrspace(3)* %ptr ret float %a @@ -136,7 +136,7 @@ define float @ld_local_f32(float addrspace(5)* %ptr) { ; PTX32: ld.local.f32 %f{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.local.f32 %f{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.local.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load float addrspace(5)* %ptr ret float %a @@ -144,27 +144,27 @@ ;; f64 define double @ld_global_f64(double addrspace(1)* %ptr) { -; PTX32: ld.global.f64 %fl{{[0-9]+}}, [%r{{[0-9]+}}] +; PTX32: ld.global.f64 %fd{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.global.f64 %fl{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.global.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load double addrspace(1)* %ptr ret double %a } define double @ld_shared_f64(double addrspace(3)* %ptr) { -; PTX32: ld.shared.f64 %fl{{[0-9]+}}, [%r{{[0-9]+}}] +; PTX32: ld.shared.f64 %fd{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.shared.f64 %fl{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.shared.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load double addrspace(3)* %ptr ret double %a } define double @ld_local_f64(double addrspace(5)* %ptr) { -; PTX32: ld.local.f64 %fl{{[0-9]+}}, [%r{{[0-9]+}}] +; PTX32: ld.local.f64 %fd{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.local.f64 %fl{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.local.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load double addrspace(5)* %ptr ret double %a Index: test/CodeGen/NVPTX/ld-generic.ll =================================================================== --- test/CodeGen/NVPTX/ld-generic.ll +++ test/CodeGen/NVPTX/ld-generic.ll @@ -6,7 +6,7 @@ define i8 @ld_global_i8(i8 addrspace(0)* %ptr) { ; PTX32: ld.u8 %r{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.u8 %r{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.u8 %r{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i8 addrspace(0)* %ptr ret i8 %a @@ -16,7 +16,7 @@ define i16 @ld_global_i16(i16 addrspace(0)* %ptr) { ; PTX32: ld.u16 %r{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.u16 %r{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.u16 %r{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i16 addrspace(0)* %ptr ret i16 %a @@ -26,7 +26,7 @@ define i32 @ld_global_i32(i32 addrspace(0)* %ptr) { ; PTX32: ld.u32 %r{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.u32 %r{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.u32 %r{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i32 addrspace(0)* %ptr ret i32 %a @@ -34,9 +34,9 @@ ;; i64 define i64 @ld_global_i64(i64 addrspace(0)* %ptr) { -; PTX32: ld.u64 %rl{{[0-9]+}}, [%r{{[0-9]+}}] +; PTX32: ld.u64 %rd{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.u64 %rl{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.u64 %rd{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load i64 addrspace(0)* %ptr ret i64 %a @@ -46,7 +46,7 @@ define float @ld_global_f32(float addrspace(0)* %ptr) { ; PTX32: ld.f32 %f{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.f32 %f{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.f32 %f{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load float addrspace(0)* %ptr ret float %a @@ -54,9 +54,9 @@ ;; f64 define double @ld_global_f64(double addrspace(0)* %ptr) { -; PTX32: ld.f64 %fl{{[0-9]+}}, [%r{{[0-9]+}}] +; PTX32: ld.f64 %fd{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: ret -; PTX64: ld.f64 %fl{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.f64 %fd{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: ret %a = load double addrspace(0)* %ptr ret double %a Index: test/CodeGen/NVPTX/local-stack-frame.ll =================================================================== --- test/CodeGen/NVPTX/local-stack-frame.ll +++ test/CodeGen/NVPTX/local-stack-frame.ll @@ -7,8 +7,8 @@ ; PTX32: cvta.local.u32 %SP, %r{{[0-9]+}}; ; PTX32: ld.param.u32 %r{{[0-9]+}}, [foo_param_0]; ; PTX32: st.volatile.u32 [%SP+0], %r{{[0-9]+}}; -; PTX64: mov.u64 %rl{{[0-9]+}}, __local_depot{{[0-9]+}}; -; PTX64: cvta.local.u64 %SP, %rl{{[0-9]+}}; +; PTX64: mov.u64 %rd{{[0-9]+}}, __local_depot{{[0-9]+}}; +; PTX64: cvta.local.u64 %SP, %rd{{[0-9]+}}; ; PTX64: ld.param.u32 %r{{[0-9]+}}, [foo_param_0]; ; PTX64: st.volatile.u32 [%SP+0], %r{{[0-9]+}}; define void @foo(i32 %a) { Index: test/CodeGen/NVPTX/pr13291-i1-store.ll =================================================================== --- test/CodeGen/NVPTX/pr13291-i1-store.ll +++ test/CodeGen/NVPTX/pr13291-i1-store.ll @@ -5,7 +5,7 @@ ; PTX32: mov.u16 %rs{{[0-9]+}}, 0; ; PTX32-NEXT: st.u8 [%r{{[0-9]+}}], %rs{{[0-9]+}}; ; PTX64: mov.u16 %rs{{[0-9]+}}, 0; -; PTX64-NEXT: st.u8 [%rl{{[0-9]+}}], %rs{{[0-9]+}}; +; PTX64-NEXT: st.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}}; store i1 false, i1* %a ret void } @@ -15,7 +15,7 @@ ; PTX32: ld.u8 %rs{{[0-9]+}}, [%r{{[0-9]+}}] ; PTX32: and.b16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, 1; ; PTX32: setp.eq.b16 %p{{[0-9]+}}, %rs{{[0-9]+}}, 1; -; PTX64: ld.u8 %rs{{[0-9]+}}, [%rl{{[0-9]+}}] +; PTX64: ld.u8 %rs{{[0-9]+}}, [%rd{{[0-9]+}}] ; PTX64: and.b16 %rs{{[0-9]+}}, %rs{{[0-9]+}}, 1; ; PTX64: setp.eq.b16 %p{{[0-9]+}}, %rs{{[0-9]+}}, 1; Index: test/CodeGen/NVPTX/st-addrspace.ll =================================================================== --- test/CodeGen/NVPTX/st-addrspace.ll +++ test/CodeGen/NVPTX/st-addrspace.ll @@ -7,7 +7,7 @@ define void @st_global_i8(i8 addrspace(1)* %ptr, i8 %a) { ; PTX32: st.global.u8 [%r{{[0-9]+}}], %rs{{[0-9]+}} ; PTX32: ret -; PTX64: st.global.u8 [%rl{{[0-9]+}}], %rs{{[0-9]+}} +; PTX64: st.global.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}} ; PTX64: ret store i8 %a, i8 addrspace(1)* %ptr ret void @@ -16,7 +16,7 @@ define void @st_shared_i8(i8 addrspace(3)* %ptr, i8 %a) { ; PTX32: st.shared.u8 [%r{{[0-9]+}}], %rs{{[0-9]+}} ; PTX32: ret -; PTX64: st.shared.u8 [%rl{{[0-9]+}}], %rs{{[0-9]+}} +; PTX64: st.shared.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}} ; PTX64: ret store i8 %a, i8 addrspace(3)* %ptr ret void @@ -25,7 +25,7 @@ define void @st_local_i8(i8 addrspace(5)* %ptr, i8 %a) { ; PTX32: st.local.u8 [%r{{[0-9]+}}], %rs{{[0-9]+}} ; PTX32: ret -; PTX64: st.local.u8 [%rl{{[0-9]+}}], %rs{{[0-9]+}} +; PTX64: st.local.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}} ; PTX64: ret store i8 %a, i8 addrspace(5)* %ptr ret void @@ -36,7 +36,7 @@ define void @st_global_i16(i16 addrspace(1)* %ptr, i16 %a) { ; PTX32: st.global.u16 [%r{{[0-9]+}}], %rs{{[0-9]+}} ; PTX32: ret -; PTX64: st.global.u16 [%rl{{[0-9]+}}], %rs{{[0-9]+}} +; PTX64: st.global.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}} ; PTX64: ret store i16 %a, i16 addrspace(1)* %ptr ret void @@ -45,7 +45,7 @@ define void @st_shared_i16(i16 addrspace(3)* %ptr, i16 %a) { ; PTX32: st.shared.u16 [%r{{[0-9]+}}], %rs{{[0-9]+}} ; PTX32: ret -; PTX64: st.shared.u16 [%rl{{[0-9]+}}], %rs{{[0-9]+}} +; PTX64: st.shared.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}} ; PTX64: ret store i16 %a, i16 addrspace(3)* %ptr ret void @@ -54,7 +54,7 @@ define void @st_local_i16(i16 addrspace(5)* %ptr, i16 %a) { ; PTX32: st.local.u16 [%r{{[0-9]+}}], %rs{{[0-9]+}} ; PTX32: ret -; PTX64: st.local.u16 [%rl{{[0-9]+}}], %rs{{[0-9]+}} +; PTX64: st.local.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}} ; PTX64: ret store i16 %a, i16 addrspace(5)* %ptr ret void @@ -65,7 +65,7 @@ define void @st_global_i32(i32 addrspace(1)* %ptr, i32 %a) { ; PTX32: st.global.u32 [%r{{[0-9]+}}], %r{{[0-9]+}} ; PTX32: ret -; PTX64: st.global.u32 [%rl{{[0-9]+}}], %r{{[0-9]+}} +; PTX64: st.global.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}} ; PTX64: ret store i32 %a, i32 addrspace(1)* %ptr ret void @@ -74,7 +74,7 @@ define void @st_shared_i32(i32 addrspace(3)* %ptr, i32 %a) { ; PTX32: st.shared.u32 [%r{{[0-9]+}}], %r{{[0-9]+}} ; PTX32: ret -; PTX64: st.shared.u32 [%rl{{[0-9]+}}], %r{{[0-9]+}} +; PTX64: st.shared.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}} ; PTX64: ret store i32 %a, i32 addrspace(3)* %ptr ret void @@ -83,7 +83,7 @@ define void @st_local_i32(i32 addrspace(5)* %ptr, i32 %a) { ; PTX32: st.local.u32 [%r{{[0-9]+}}], %r{{[0-9]+}} ; PTX32: ret -; PTX64: st.local.u32 [%rl{{[0-9]+}}], %r{{[0-9]+}} +; PTX64: st.local.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}} ; PTX64: ret store i32 %a, i32 addrspace(5)* %ptr ret void @@ -92,27 +92,27 @@ ;; i64 define void @st_global_i64(i64 addrspace(1)* %ptr, i64 %a) { -; PTX32: st.global.u64 [%r{{[0-9]+}}], %rl{{[0-9]+}} +; PTX32: st.global.u64 [%r{{[0-9]+}}], %rd{{[0-9]+}} ; PTX32: ret -; PTX64: st.global.u64 [%rl{{[0-9]+}}], %rl{{[0-9]+}} +; PTX64: st.global.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}} ; PTX64: ret store i64 %a, i64 addrspace(1)* %ptr ret void } define void @st_shared_i64(i64 addrspace(3)* %ptr, i64 %a) { -; PTX32: st.shared.u64 [%r{{[0-9]+}}], %rl{{[0-9]+}} +; PTX32: st.shared.u64 [%r{{[0-9]+}}], %rd{{[0-9]+}} ; PTX32: ret -; PTX64: st.shared.u64 [%rl{{[0-9]+}}], %rl{{[0-9]+}} +; PTX64: st.shared.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}} ; PTX64: ret store i64 %a, i64 addrspace(3)* %ptr ret void } define void @st_local_i64(i64 addrspace(5)* %ptr, i64 %a) { -; PTX32: st.local.u64 [%r{{[0-9]+}}], %rl{{[0-9]+}} +; PTX32: st.local.u64 [%r{{[0-9]+}}], %rd{{[0-9]+}} ; PTX32: ret -; PTX64: st.local.u64 [%rl{{[0-9]+}}], %rl{{[0-9]+}} +; PTX64: st.local.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}} ; PTX64: ret store i64 %a, i64 addrspace(5)* %ptr ret void @@ -123,7 +123,7 @@ define void @st_global_f32(float addrspace(1)* %ptr, float %a) { ; PTX32: st.global.f32 [%r{{[0-9]+}}], %f{{[0-9]+}} ; PTX32: ret -; PTX64: st.global.f32 [%rl{{[0-9]+}}], %f{{[0-9]+}} +; PTX64: st.global.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}} ; PTX64: ret store float %a, float addrspace(1)* %ptr ret void @@ -132,7 +132,7 @@ define void @st_shared_f32(float addrspace(3)* %ptr, float %a) { ; PTX32: st.shared.f32 [%r{{[0-9]+}}], %f{{[0-9]+}} ; PTX32: ret -; PTX64: st.shared.f32 [%rl{{[0-9]+}}], %f{{[0-9]+}} +; PTX64: st.shared.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}} ; PTX64: ret store float %a, float addrspace(3)* %ptr ret void @@ -141,7 +141,7 @@ define void @st_local_f32(float addrspace(5)* %ptr, float %a) { ; PTX32: st.local.f32 [%r{{[0-9]+}}], %f{{[0-9]+}} ; PTX32: ret -; PTX64: st.local.f32 [%rl{{[0-9]+}}], %f{{[0-9]+}} +; PTX64: st.local.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}} ; PTX64: ret store float %a, float addrspace(5)* %ptr ret void @@ -150,27 +150,27 @@ ;; f64 define void @st_global_f64(double addrspace(1)* %ptr, double %a) { -; PTX32: st.global.f64 [%r{{[0-9]+}}], %fl{{[0-9]+}} +; PTX32: st.global.f64 [%r{{[0-9]+}}], %fd{{[0-9]+}} ; PTX32: ret -; PTX64: st.global.f64 [%rl{{[0-9]+}}], %fl{{[0-9]+}} +; PTX64: st.global.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}} ; PTX64: ret store double %a, double addrspace(1)* %ptr ret void } define void @st_shared_f64(double addrspace(3)* %ptr, double %a) { -; PTX32: st.shared.f64 [%r{{[0-9]+}}], %fl{{[0-9]+}} +; PTX32: st.shared.f64 [%r{{[0-9]+}}], %fd{{[0-9]+}} ; PTX32: ret -; PTX64: st.shared.f64 [%rl{{[0-9]+}}], %fl{{[0-9]+}} +; PTX64: st.shared.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}} ; PTX64: ret store double %a, double addrspace(3)* %ptr ret void } define void @st_local_f64(double addrspace(5)* %ptr, double %a) { -; PTX32: st.local.f64 [%r{{[0-9]+}}], %fl{{[0-9]+}} +; PTX32: st.local.f64 [%r{{[0-9]+}}], %fd{{[0-9]+}} ; PTX32: ret -; PTX64: st.local.f64 [%rl{{[0-9]+}}], %fl{{[0-9]+}} +; PTX64: st.local.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}} ; PTX64: ret store double %a, double addrspace(5)* %ptr ret void Index: test/CodeGen/NVPTX/st-generic.ll =================================================================== --- test/CodeGen/NVPTX/st-generic.ll +++ test/CodeGen/NVPTX/st-generic.ll @@ -7,7 +7,7 @@ define void @st_global_i8(i8 addrspace(0)* %ptr, i8 %a) { ; PTX32: st.u8 [%r{{[0-9]+}}], %rs{{[0-9]+}} ; PTX32: ret -; PTX64: st.u8 [%rl{{[0-9]+}}], %rs{{[0-9]+}} +; PTX64: st.u8 [%rd{{[0-9]+}}], %rs{{[0-9]+}} ; PTX64: ret store i8 %a, i8 addrspace(0)* %ptr ret void @@ -18,7 +18,7 @@ define void @st_global_i16(i16 addrspace(0)* %ptr, i16 %a) { ; PTX32: st.u16 [%r{{[0-9]+}}], %rs{{[0-9]+}} ; PTX32: ret -; PTX64: st.u16 [%rl{{[0-9]+}}], %rs{{[0-9]+}} +; PTX64: st.u16 [%rd{{[0-9]+}}], %rs{{[0-9]+}} ; PTX64: ret store i16 %a, i16 addrspace(0)* %ptr ret void @@ -29,7 +29,7 @@ define void @st_global_i32(i32 addrspace(0)* %ptr, i32 %a) { ; PTX32: st.u32 [%r{{[0-9]+}}], %r{{[0-9]+}} ; PTX32: ret -; PTX64: st.u32 [%rl{{[0-9]+}}], %r{{[0-9]+}} +; PTX64: st.u32 [%rd{{[0-9]+}}], %r{{[0-9]+}} ; PTX64: ret store i32 %a, i32 addrspace(0)* %ptr ret void @@ -38,9 +38,9 @@ ;; i64 define void @st_global_i64(i64 addrspace(0)* %ptr, i64 %a) { -; PTX32: st.u64 [%r{{[0-9]+}}], %rl{{[0-9]+}} +; PTX32: st.u64 [%r{{[0-9]+}}], %rd{{[0-9]+}} ; PTX32: ret -; PTX64: st.u64 [%rl{{[0-9]+}}], %rl{{[0-9]+}} +; PTX64: st.u64 [%rd{{[0-9]+}}], %rd{{[0-9]+}} ; PTX64: ret store i64 %a, i64 addrspace(0)* %ptr ret void @@ -51,7 +51,7 @@ define void @st_global_f32(float addrspace(0)* %ptr, float %a) { ; PTX32: st.f32 [%r{{[0-9]+}}], %f{{[0-9]+}} ; PTX32: ret -; PTX64: st.f32 [%rl{{[0-9]+}}], %f{{[0-9]+}} +; PTX64: st.f32 [%rd{{[0-9]+}}], %f{{[0-9]+}} ; PTX64: ret store float %a, float addrspace(0)* %ptr ret void @@ -60,9 +60,9 @@ ;; f64 define void @st_global_f64(double addrspace(0)* %ptr, double %a) { -; PTX32: st.f64 [%r{{[0-9]+}}], %fl{{[0-9]+}} +; PTX32: st.f64 [%r{{[0-9]+}}], %fd{{[0-9]+}} ; PTX32: ret -; PTX64: st.f64 [%rl{{[0-9]+}}], %fl{{[0-9]+}} +; PTX64: st.f64 [%rd{{[0-9]+}}], %fd{{[0-9]+}} ; PTX64: ret store double %a, double addrspace(0)* %ptr ret void Index: test/TableGen/intrinsic-long-name.td =================================================================== --- /dev/null +++ test/TableGen/intrinsic-long-name.td @@ -0,0 +1,32 @@ +// RUN: llvm-tblgen -gen-intrinsic %s | FileCheck %s +// XFAIL: vg_leak + +class IntrinsicProperty; + +class ValueType { + string Namespace = "MVT"; + int Size = size; + int Value = value; +} + +class LLVMType { + ValueType VT = vt; +} + +class Intrinsic param_types = []> { + string LLVMName = name; + bit isTarget = 0; + string TargetPrefix = ""; + list RetTypes = []; + list ParamTypes = param_types; + list Properties = []; +} + +def iAny : ValueType<0, 254>; +def llvm_anyint_ty : LLVMType; + +// Make sure we generate the long name without crashing +// CHECK: this_is_a_really_long_intrinsic_name_but_we_should_still_not_crash // llvm.this.is.a.really.long.intrinsic.name.but.we.should.still.not.crash +def int_foo : Intrinsic<"llvm.foo", [llvm_anyint_ty]>; +def int_this_is_a_really_long_intrinsic_name_but_we_should_still_not_crash : Intrinsic<"llvm.this.is.a.really.long.intrinsic.name.but.we.should.still.not.crash", [llvm_anyint_ty]>; + Index: test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll =================================================================== --- test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll +++ test/Transforms/SeparateConstOffsetFromGEP/NVPTX/split-gep-and-gvn.ll @@ -45,7 +45,7 @@ ret void } ; PTX-LABEL: sum_of_array( -; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rl|r)[0-9]+]]{{\]}} +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rd|r)[0-9]+]]{{\]}} ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}} ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}} ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}} @@ -88,7 +88,7 @@ ret void } ; PTX-LABEL: sum_of_array2( -; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rl|r)[0-9]+]]{{\]}} +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rd|r)[0-9]+]]{{\]}} ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}} ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}} ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}} @@ -129,7 +129,7 @@ ret void } ; PTX-LABEL: sum_of_array3( -; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rl|r)[0-9]+]]{{\]}} +; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG:%(rd|r)[0-9]+]]{{\]}} ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+4{{\]}} ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+128{{\]}} ; PTX: ld.shared.f32 {{%f[0-9]+}}, {{\[}}[[BASE_REG]]+132{{\]}} Index: utils/TableGen/IntrinsicEmitter.cpp =================================================================== --- utils/TableGen/IntrinsicEmitter.cpp +++ utils/TableGen/IntrinsicEmitter.cpp @@ -129,8 +129,9 @@ for (unsigned i = 0, e = Ints.size(); i != e; ++i) { OS << " " << Ints[i].EnumName; OS << ((i != e-1) ? ", " : " "); - OS << std::string(40-Ints[i].EnumName.size(), ' ') - << "// " << Ints[i].Name << "\n"; + if (Ints[i].EnumName.size() < 40) + OS << std::string(40-Ints[i].EnumName.size(), ' '); + OS << " // " << Ints[i].Name << "\n"; } OS << "#endif\n\n"; }