Index: llvm/lib/Target/SystemZ/SystemZISelLowering.cpp =================================================================== --- llvm/lib/Target/SystemZ/SystemZISelLowering.cpp +++ llvm/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -1088,7 +1088,7 @@ TargetLowering::ConstraintWeight SystemZTargetLowering:: getSingleConstraintMatchWeight(AsmOperandInfo &info, const char *constraint) const { - ConstraintWeight weight = CW_Invalid; + ConstraintWeight weight = CW_Default; Value *CallOperandVal = info.CallOperandVal; // If we don't have a value, we can't do a match, // but allow it at the lowest weight. @@ -1115,8 +1115,7 @@ break; case 'v': // Vector register - if ((type->isVectorTy() || type->isFloatingPointTy()) && - Subtarget.hasVector()) + if (type->isVectorTy() || type->isFloatingPointTy()) weight = CW_Register; break; @@ -1179,9 +1178,9 @@ default: break; case 'd': // Data register (equivalent to 'r') case 'r': // General-purpose register - if (VT == MVT::i64) + if (VT.getSizeInBits() == 64) return std::make_pair(0U, &SystemZ::GR64BitRegClass); - else if (VT == MVT::i128) + else if (VT.getSizeInBits() == 128) return std::make_pair(0U, &SystemZ::GR128BitRegClass); return std::make_pair(0U, &SystemZ::GR32BitRegClass); @@ -1196,23 +1195,22 @@ return std::make_pair(0U, &SystemZ::GRH32BitRegClass); case 'f': // Floating-point register - if (!useSoftFloat()) { - if (VT == MVT::f64) - return std::make_pair(0U, &SystemZ::FP64BitRegClass); - else if (VT == MVT::f128) - return std::make_pair(0U, &SystemZ::FP128BitRegClass); - return std::make_pair(0U, &SystemZ::FP32BitRegClass); - } - break; + if (useSoftFloat()) + report_fatal_error("can't use 'f' constraint with soft-float."); + if (VT.getSizeInBits() == 64) + return std::make_pair(0U, &SystemZ::FP64BitRegClass); + else if (VT.getSizeInBits() == 128) + return std::make_pair(0U, &SystemZ::FP128BitRegClass); + return std::make_pair(0U, &SystemZ::FP32BitRegClass); + case 'v': // Vector register - if (Subtarget.hasVector()) { - if (VT == MVT::f32) - return std::make_pair(0U, &SystemZ::VR32BitRegClass); - if (VT == MVT::f64) - return std::make_pair(0U, &SystemZ::VR64BitRegClass); - return std::make_pair(0U, &SystemZ::VR128BitRegClass); - } - break; + if (!Subtarget.hasVector()) + report_fatal_error("can't use 'v' constraint without vector facility."); + if (VT.getSizeInBits() == 32) + return std::make_pair(0U, &SystemZ::VR32BitRegClass); + if (VT.getSizeInBits() == 64) + return std::make_pair(0U, &SystemZ::VR64BitRegClass); + return std::make_pair(0U, &SystemZ::VR128BitRegClass); } } if (Constraint.size() > 0 && Constraint[0] == '{') { @@ -1462,6 +1460,12 @@ Parts[0] = lowerI128ToGR128(DAG, Val); return true; } + if (ValueVT.getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) { + // Inline assmebly operand: f128 -> i128 + SDValue Val_i128 = DAG.getNode(ISD::BITCAST, SDLoc(Val), MVT::i128, Val); + Parts[0] = lowerI128ToGR128(DAG, Val_i128); + return true; + } return false; } @@ -1475,6 +1479,11 @@ if (ValueVT == MVT::i128 && NumParts == 1) // Inline assembly operand. return lowerGR128ToI128(DAG, Parts[0]); + if (ValueVT.getSizeInBits() == 128 && NumParts == 1 && PartVT == MVT::Untyped) { + // Inline assmebly operand: i128 -> f128 + SDValue Val_i128 = lowerGR128ToI128(DAG, Parts[0]); + return DAG.getNode(ISD::BITCAST, SDLoc(Val_i128), MVT::f128, Val_i128); + } return SDValue(); } Index: llvm/test/CodeGen/SystemZ/inline-asm-f-constraint-softfloat.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/inline-asm-f-constraint-softfloat.ll @@ -0,0 +1,9 @@ +; RUN: not --crash llc -mtriple=s390x-linux-gnu -mcpu=z15 -mattr=soft-float < %s 2>&1 | FileCheck %s + +; CHECK: LLVM ERROR: can't use 'f' constraint with soft-float. + +define signext i32 @int_and_f(i32 signext %cc_dep1) { +entry: + %0 = tail call i32 asm sideeffect "", "=f,0"(i32 %cc_dep1) + ret i32 %0 +} Index: llvm/test/CodeGen/SystemZ/inline-asm-fp-int-casting-zEC12.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/inline-asm-fp-int-casting-zEC12.ll @@ -0,0 +1,201 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc -mtriple=s390x-linux-gnu -mcpu=zEC12 < %s | FileCheck %s +; +; Test inline assembly where the operand is bitcasted. + +define signext i32 @int_and_f(i32 signext %cc_dep1) { +; CHECK-LABEL: int_and_f: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: risbhg %r0, %r2, 0, 159, 32 +; CHECK-NEXT: ldgr %f0, %r0 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lgdr %r0, %f0 +; CHECK-NEXT: risblg %r0, %r0, 0, 159, 32 +; CHECK-NEXT: lgfr %r2, %r0 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call i32 asm sideeffect "", "=f,0"(i32 %cc_dep1) + ret i32 %0 +} + +define i64 @long_and_f(i64 %cc_dep1) { +; CHECK-LABEL: long_and_f: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ldgr %f0, %r2 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lgdr %r2, %f0 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call i64 asm sideeffect "", "=f,0"(i64 %cc_dep1) + ret i64 %0 +} + +define void @__int128_and_f(ptr noalias nocapture writeonly sret(i128) align 8 %agg.result, ptr %0) { +; CHECK-LABEL: __int128_and_f: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ld %f0, 0(%r3) +; CHECK-NEXT: ld %f2, 8(%r3) +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: std %f0, 0(%r2) +; CHECK-NEXT: std %f2, 8(%r2) +; CHECK-NEXT: br %r14 +entry: + %cc_dep1 = load i128, ptr %0, align 8 + %1 = tail call i128 asm sideeffect "", "=f,0"(i128 %cc_dep1) + store i128 %1, ptr %agg.result, align 8 + ret void +} + +define float @float_and_r(float %cc_dep1) { +; CHECK-LABEL: float_and_r: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $f0s killed $f0s def $f0d +; CHECK-NEXT: lgdr %r0, %f0 +; CHECK-NEXT: risblg %r0, %r0, 0, 159, 32 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: risbhg %r0, %r0, 0, 159, 32 +; CHECK-NEXT: ldgr %f0, %r0 +; CHECK-NEXT: # kill: def $f0s killed $f0s killed $f0d +; CHECK-NEXT: br %r14 +entry: + %0 = tail call float asm sideeffect "", "=r,0"(float %cc_dep1) + ret float %0 +} + +define double @double_and_r(double %cc_dep1) { +; CHECK-LABEL: double_and_r: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lgdr %r0, %f0 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ldgr %f0, %r0 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call double asm sideeffect "", "=r,0"(double %cc_dep1) + ret double %0 +} + +define void @longdouble_and_r(ptr noalias nocapture writeonly sret(fp128) align 8 %agg.result, ptr %0) { +; CHECK-LABEL: longdouble_and_r: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lg %r1, 8(%r3) +; CHECK-NEXT: lg %r0, 0(%r3) +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: stg %r1, 8(%r2) +; CHECK-NEXT: stg %r0, 0(%r2) +; CHECK-NEXT: br %r14 +entry: + %cc_dep1 = load fp128, ptr %0, align 8 + %1 = tail call fp128 asm sideeffect "", "=r,0"(fp128 %cc_dep1) + store fp128 %1, ptr %agg.result, align 8 + ret void +} + +define <2 x i16> @vec32_and_r(<2 x i16> %cc_dep1) { +; CHECK-LABEL: vec32_and_r: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $r3l killed $r3l def $r3d +; CHECK-NEXT: # kill: def $r2l killed $r2l def $r2d +; CHECK-NEXT: risbgn %r3, %r2, 32, 47, 16 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: srlk %r2, %r3, 16 +; CHECK-NEXT: # kill: def $r3l killed $r3l killed $r3d +; CHECK-NEXT: br %r14 +entry: + %0 = tail call <2 x i16> asm sideeffect "", "=r,0"(<2 x i16> %cc_dep1) + ret <2 x i16> %0 +} + +define <2 x i32> @vec64_and_r(<2 x i32> %cc_dep1) { +; CHECK-LABEL: vec64_and_r: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $r2l killed $r2l def $r2d +; CHECK-NEXT: sllg %r0, %r2, 32 +; CHECK-NEXT: lr %r0, %r3 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lr %r3, %r0 +; CHECK-NEXT: srlg %r2, %r0, 32 +; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d +; CHECK-NEXT: br %r14 +entry: + %0 = tail call <2 x i32> asm sideeffect "", "=r,0"(<2 x i32> %cc_dep1) + ret <2 x i32> %0 +} + +define <2 x i16> @vec32_and_f(<2 x i16> %cc_dep1) { +; CHECK-LABEL: vec32_and_f: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $r3l killed $r3l def $r3d +; CHECK-NEXT: # kill: def $r2l killed $r2l def $r2d +; CHECK-NEXT: risbgn %r3, %r2, 32, 47, 16 +; CHECK-NEXT: risbhg %r0, %r3, 0, 159, 32 +; CHECK-NEXT: ldgr %f0, %r0 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lgdr %r0, %f0 +; CHECK-NEXT: risblg %r3, %r0, 0, 159, 32 +; CHECK-NEXT: srlk %r2, %r3, 16 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call <2 x i16> asm sideeffect "", "=f,0"(<2 x i16> %cc_dep1) + ret <2 x i16> %0 +} + +define <2 x i32> @vec64_and_f(<2 x i32> %cc_dep1) { +; CHECK-LABEL: vec64_and_f: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: # kill: def $r2l killed $r2l def $r2d +; CHECK-NEXT: sllg %r0, %r2, 32 +; CHECK-NEXT: lr %r0, %r3 +; CHECK-NEXT: ldgr %f0, %r0 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lgdr %r3, %f0 +; CHECK-NEXT: srlg %r2, %r3, 32 +; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d +; CHECK-NEXT: # kill: def $r3l killed $r3l killed $r3d +; CHECK-NEXT: br %r14 +entry: + %0 = tail call <2 x i32> asm sideeffect "", "=f,0"(<2 x i32> %cc_dep1) + ret <2 x i32> %0 +} + +define <4 x i32> @vec128_and_f(<4 x i32> %cc_dep1) { +; CHECK-LABEL: vec128_and_f: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: aghi %r15, -176 +; CHECK-NEXT: .cfi_def_cfa_offset 336 +; CHECK-NEXT: # kill: def $r4l killed $r4l def $r4d +; CHECK-NEXT: sllg %r0, %r4, 32 +; CHECK-NEXT: lr %r0, %r5 +; CHECK-NEXT: # kill: def $r2l killed $r2l def $r2d +; CHECK-NEXT: stg %r0, 168(%r15) +; CHECK-NEXT: sllg %r0, %r2, 32 +; CHECK-NEXT: lr %r0, %r3 +; CHECK-NEXT: stg %r0, 160(%r15) +; CHECK-NEXT: ld %f0, 160(%r15) +; CHECK-NEXT: ld %f2, 168(%r15) +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lgdr %r3, %f0 +; CHECK-NEXT: lgdr %r5, %f2 +; CHECK-NEXT: srlg %r2, %r3, 32 +; CHECK-NEXT: srlg %r4, %r5, 32 +; CHECK-NEXT: # kill: def $r2l killed $r2l killed $r2d +; CHECK-NEXT: # kill: def $r3l killed $r3l killed $r3d +; CHECK-NEXT: # kill: def $r4l killed $r4l killed $r4d +; CHECK-NEXT: # kill: def $r5l killed $r5l killed $r5d +; CHECK-NEXT: aghi %r15, 176 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call <4 x i32> asm sideeffect "", "=f,0"(<4 x i32> %cc_dep1) + ret <4 x i32> %0 +} + Index: llvm/test/CodeGen/SystemZ/inline-asm-fp-int-casting.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/inline-asm-fp-int-casting.ll @@ -0,0 +1,262 @@ +; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py UTC_ARGS: --version 2 +; RUN: llc -mtriple=s390x-linux-gnu -mcpu=z15 < %s | FileCheck %s --check-prefixes=CHECK,Z15 +; RUN: llc -mtriple=s390x-linux-gnu -mcpu=z13 < %s | FileCheck %s --check-prefixes=CHECK,Z13 +; +; Test inline assembly where the operand is bitcasted. + +define signext i32 @int_and_f(i32 signext %cc_dep1) { +; CHECK-LABEL: int_and_f: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vlvgf %v0, %r2, 0 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vlgvf %r0, %v0, 0 +; CHECK-NEXT: lgfr %r2, %r0 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call i32 asm sideeffect "", "=f,0"(i32 %cc_dep1) + ret i32 %0 +} + +define i64 @long_and_f(i64 %cc_dep1) { +; CHECK-LABEL: long_and_f: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ldgr %f0, %r2 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lgdr %r2, %f0 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call i64 asm sideeffect "", "=f,0"(i64 %cc_dep1) + ret i64 %0 +} + +define void @__int128_and_f(ptr noalias nocapture writeonly sret(i128) align 8 %agg.result, ptr %0) { +; Z15-LABEL: __int128_and_f: +; Z15: # %bb.0: # %entry +; Z15-NEXT: vl %v0, 0(%r3), 3 +; Z15-NEXT: vrepg %v2, %v0, 1 +; Z15-NEXT: #APP +; Z15-NEXT: #NO_APP +; Z15-NEXT: vmrhg %v0, %v0, %v2 +; Z15-NEXT: vst %v0, 0(%r2), 3 +; Z15-NEXT: br %r14 +; +; Z13-LABEL: __int128_and_f: +; Z13: # %bb.0: # %entry +; Z13-NEXT: ld %f0, 0(%r3) +; Z13-NEXT: ld %f2, 8(%r3) +; Z13-NEXT: #APP +; Z13-NEXT: #NO_APP +; Z13-NEXT: std %f0, 0(%r2) +; Z13-NEXT: std %f2, 8(%r2) +; Z13-NEXT: br %r14 +entry: + %cc_dep1 = load i128, ptr %0, align 8 + %1 = tail call i128 asm sideeffect "", "=f,0"(i128 %cc_dep1) + store i128 %1, ptr %agg.result, align 8 + ret void +} + +define signext i32 @int_and_v(i32 signext %cc_dep1) { +; CHECK-LABEL: int_and_v: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vlvgf %v0, %r2, 0 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vlgvf %r0, %v0, 0 +; CHECK-NEXT: lgfr %r2, %r0 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call i32 asm sideeffect "", "=v,0"(i32 %cc_dep1) + ret i32 %0 +} + +define i64 @long_and_v(i64 %cc_dep1) { +; CHECK-LABEL: long_and_v: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: ldgr %f0, %r2 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: lgdr %r2, %f0 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call i64 asm sideeffect "", "=v,0"(i64 %cc_dep1) + ret i64 %0 +} + +define void @__int128_and_v(ptr noalias nocapture writeonly sret(i128) align 8 %agg.result, ptr %0) { +; CHECK-LABEL: __int128_and_v: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl %v0, 0(%r3), 3 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vst %v0, 0(%r2), 3 +; CHECK-NEXT: br %r14 +entry: + %cc_dep1 = load i128, ptr %0, align 8 + %1 = tail call i128 asm sideeffect "", "=v,0"(i128 %cc_dep1) + store i128 %1, ptr %agg.result, align 8 + ret void +} + +define float @float_and_r(float %cc_dep1) { +; CHECK-LABEL: float_and_r: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vlgvf %r0, %v0, 0 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vlvgf %v0, %r0, 0 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call float asm sideeffect "", "=r,0"(float %cc_dep1) + ret float %0 +} + +define double @double_and_r(double %cc_dep1) { +; CHECK-LABEL: double_and_r: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lgdr %r0, %f0 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: ldgr %f0, %r0 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call double asm sideeffect "", "=r,0"(double %cc_dep1) + ret double %0 +} + +define void @longdouble_and_r(ptr noalias nocapture writeonly sret(fp128) align 8 %agg.result, ptr %0) { +; CHECK-LABEL: longdouble_and_r: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: lg %r1, 8(%r3) +; CHECK-NEXT: lg %r0, 0(%r3) +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: stg %r1, 8(%r2) +; CHECK-NEXT: stg %r0, 0(%r2) +; CHECK-NEXT: br %r14 +entry: + %cc_dep1 = load fp128, ptr %0, align 8 + %1 = tail call fp128 asm sideeffect "", "=r,0"(fp128 %cc_dep1) + store fp128 %1, ptr %agg.result, align 8 + ret void +} + +define float @float_and_v(float %cc_dep1) { +; CHECK-LABEL: float_and_v: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: br %r14 +entry: + %0 = tail call float asm sideeffect "", "=v,0"(float %cc_dep1) + ret float %0 +} + +define double @double_and_v(double %cc_dep1) { +; CHECK-LABEL: double_and_v: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: br %r14 +entry: + %0 = tail call double asm sideeffect "", "=v,0"(double %cc_dep1) + ret double %0 +} + +define void @longdouble_and_v(ptr noalias nocapture writeonly sret(fp128) align 8 %agg.result, ptr %0) { +; CHECK-LABEL: longdouble_and_v: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vl %v0, 0(%r3), 3 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vst %v0, 0(%r2), 3 +; CHECK-NEXT: br %r14 +entry: + %cc_dep1 = load fp128, ptr %0, align 8 + %1 = tail call fp128 asm sideeffect "", "=v,0"(fp128 %cc_dep1) + store fp128 %1, ptr %agg.result, align 8 + ret void +} + +define <2 x i16> @vec32_and_r(<2 x i16> %cc_dep1) { +; CHECK-LABEL: vec32_and_r: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vlgvf %r0, %v24, 0 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vlvgf %v24, %r0, 0 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call <2 x i16> asm sideeffect "", "=r,0"(<2 x i16> %cc_dep1) + ret <2 x i16> %0 +} + +define <2 x i32> @vec64_and_r(<2 x i32> %cc_dep1) { +; CHECK-LABEL: vec64_and_r: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vlgvg %r0, %v24, 0 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vlvgg %v24, %r0, 0 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call <2 x i32> asm sideeffect "", "=r,0"(<2 x i32> %cc_dep1) + ret <2 x i32> %0 +} + +define <4 x i32> @vec128_and_r(<4 x i32> %cc_dep1) { +; CHECK-LABEL: vec128_and_r: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vlgvg %r1, %v24, 1 +; CHECK-NEXT: vlgvg %r0, %v24, 0 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vlvgp %v24, %r0, %r1 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call <4 x i32> asm sideeffect "", "=r,0"(<4 x i32> %cc_dep1) + ret <4 x i32> %0 +} + +define <2 x i16> @vec32_and_f(<2 x i16> %cc_dep1) { +; CHECK-LABEL: vec32_and_f: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vlr %v0, %v24 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vlr %v24, %v0 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call <2 x i16> asm sideeffect "", "=f,0"(<2 x i16> %cc_dep1) + ret <2 x i16> %0 +} + +define <2 x i32> @vec64_and_f(<2 x i32> %cc_dep1) { +; CHECK-LABEL: vec64_and_f: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vlr %v0, %v24 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vlr %v24, %v0 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call <2 x i32> asm sideeffect "", "=f,0"(<2 x i32> %cc_dep1) + ret <2 x i32> %0 +} + +define <4 x i32> @vec128_and_f(<4 x i32> %cc_dep1) { +; CHECK-LABEL: vec128_and_f: +; CHECK: # %bb.0: # %entry +; CHECK-NEXT: vrepg %v2, %v24, 1 +; CHECK-NEXT: vlr %v0, %v24 +; CHECK-NEXT: #APP +; CHECK-NEXT: #NO_APP +; CHECK-NEXT: vmrhg %v24, %v0, %v2 +; CHECK-NEXT: br %r14 +entry: + %0 = tail call <4 x i32> asm sideeffect "", "=f,0"(<4 x i32> %cc_dep1) + ret <4 x i32> %0 +} + Index: llvm/test/CodeGen/SystemZ/inline-asm-v-constraint-novecfacility.ll =================================================================== --- /dev/null +++ llvm/test/CodeGen/SystemZ/inline-asm-v-constraint-novecfacility.ll @@ -0,0 +1,9 @@ +; RUN: not --crash llc < %s -mtriple=s390x-linux-gnu -mcpu=zEC12 2>&1 | FileCheck %s + +; CHECK: LLVM ERROR: can't use 'v' constraint without vector facility. + +define signext i32 @int_and_v(i32 signext %cc_dep1) { +entry: + %0 = tail call i32 asm sideeffect "", "=v,0"(i32 %cc_dep1) + ret i32 %0 +} Index: llvm/test/CodeGen/SystemZ/soft-float-inline-asm-01.ll =================================================================== --- llvm/test/CodeGen/SystemZ/soft-float-inline-asm-01.ll +++ llvm/test/CodeGen/SystemZ/soft-float-inline-asm-01.ll @@ -1,4 +1,4 @@ -; RUN: not llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -mattr=soft-float -O3 2>&1 | FileCheck %s +; RUN: not --crash llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -mattr=soft-float -O3 2>&1 | FileCheck %s ; ; Verify that inline asms cannot use fp/vector registers with soft-float. @@ -7,4 +7,4 @@ ret float %ret } -; CHECK: error: couldn't allocate output register for constraint 'f' +; CHECK: LLVM ERROR: can't use 'f' constraint with soft-float. Index: llvm/test/CodeGen/SystemZ/soft-float-inline-asm-03.ll =================================================================== --- llvm/test/CodeGen/SystemZ/soft-float-inline-asm-03.ll +++ llvm/test/CodeGen/SystemZ/soft-float-inline-asm-03.ll @@ -1,4 +1,5 @@ -; RUN: not llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -mattr=soft-float -O3 2>&1 | FileCheck %s +; RUN: not --crash llc < %s -mtriple=s390x-linux-gnu -mcpu=z13 -mattr=soft-float \ +; RUN: -O3 2>&1 | FileCheck %s ; ; Verify that inline asms cannot use fp/vector registers with soft-float. @@ -7,4 +8,4 @@ ret <2 x i64> %ret } -; CHECK: error: couldn't allocate output register for constraint 'v' +; CHECK: LLVM ERROR: can't use 'v' constraint without vector facility.