Index: lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp =================================================================== --- lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp +++ lib/Target/AMDGPU/AsmParser/AMDGPUAsmParser.cpp @@ -1032,9 +1032,16 @@ Negate = true; } - if (getLexer().getKind() == AsmToken::Pipe) { - Parser.Lex(); - Abs = true; + if (getLexer().getKind() == AsmToken::Identifier) { + const AsmToken Tok = Parser.getTok(); + if (Tok.getString() == "abs") { + Parser.Lex(); + if (getLexer().getKind() != AsmToken::LParen) + return MatchOperand_ParseFail; + + Parser.Lex(); + Abs = true; + } } switch(getLexer().getKind()) { @@ -1080,7 +1087,7 @@ Modifiers |= 0x1; if (Abs) { - if (getLexer().getKind() != AsmToken::Pipe) + if (getLexer().getKind() != AsmToken::RParen) return MatchOperand_ParseFail; Parser.Lex(); Modifiers |= 0x2; Index: lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp =================================================================== --- lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp +++ lib/Target/AMDGPU/InstPrinter/AMDGPUInstPrinter.cpp @@ -354,10 +354,10 @@ if (InputModifiers & SISrcMods::NEG) O << '-'; if (InputModifiers & SISrcMods::ABS) - O << '|'; + O << "abs("; printOperand(MI, OpNo + 1, O); if (InputModifiers & SISrcMods::ABS) - O << '|'; + O << ')'; } void AMDGPUInstPrinter::printInterpSlot(const MCInst *MI, unsigned OpNum, Index: test/CodeGen/AMDGPU/commute_modifiers.ll =================================================================== --- test/CodeGen/AMDGPU/commute_modifiers.ll +++ test/CodeGen/AMDGPU/commute_modifiers.ll @@ -6,7 +6,7 @@ ; FUNC-LABEL: @commute_add_imm_fabs_f32 ; SI: buffer_load_dword [[X:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} -; SI: v_add_f32_e64 [[REG:v[0-9]+]], 2.0, |[[X]]| +; SI: v_add_f32_e64 [[REG:v[0-9]+]], 2.0, abs([[X]]) ; SI-NEXT: buffer_store_dword [[REG]] define void @commute_add_imm_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { %tid = call i32 @llvm.r600.read.tidig.x() #1 @@ -20,7 +20,7 @@ ; FUNC-LABEL: @commute_mul_imm_fneg_fabs_f32 ; SI: buffer_load_dword [[X:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} -; SI: v_mul_f32_e64 [[REG:v[0-9]+]], -4.0, |[[X]]| +; SI: v_mul_f32_e64 [[REG:v[0-9]+]], -4.0, abs([[X]]) ; SI-NEXT: buffer_store_dword [[REG]] define void @commute_mul_imm_fneg_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { %tid = call i32 @llvm.r600.read.tidig.x() #1 @@ -51,7 +51,7 @@ ; FUNC-LABEL: @commute_add_lit_fabs_f32 ; SI: buffer_load_dword [[X:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} ; SI: v_mov_b32_e32 [[K:v[0-9]+]], 0x44800000 -; SI: v_add_f32_e64 [[REG:v[0-9]+]], |[[X]]|, [[K]] +; SI: v_add_f32_e64 [[REG:v[0-9]+]], abs([[X]]), [[K]] ; SI-NEXT: buffer_store_dword [[REG]] define void @commute_add_lit_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { %tid = call i32 @llvm.r600.read.tidig.x() #1 @@ -66,7 +66,7 @@ ; FUNC-LABEL: @commute_add_fabs_f32 ; SI-DAG: buffer_load_dword [[X:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} ; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 -; SI: v_add_f32_e64 [[REG:v[0-9]+]], [[X]], |[[Y]]| +; SI: v_add_f32_e64 [[REG:v[0-9]+]], [[X]], abs([[Y]]) ; SI-NEXT: buffer_store_dword [[REG]] define void @commute_add_fabs_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { %tid = call i32 @llvm.r600.read.tidig.x() #1 @@ -100,7 +100,7 @@ ; FUNC-LABEL: @commute_mul_fabs_fneg_f32 ; SI-DAG: buffer_load_dword [[X:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} ; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 -; SI: v_mul_f32_e64 [[REG:v[0-9]+]], [[X]], -|[[Y]]| +; SI: v_mul_f32_e64 [[REG:v[0-9]+]], [[X]], -abs([[Y]]) ; SI-NEXT: buffer_store_dword [[REG]] define void @commute_mul_fabs_fneg_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { %tid = call i32 @llvm.r600.read.tidig.x() #1 @@ -119,7 +119,7 @@ ; FUNC-LABEL: @commute_mul_fabs_x_fabs_y_f32 ; SI-DAG: buffer_load_dword [[X:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} ; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 -; SI: v_mul_f32_e64 [[REG:v[0-9]+]], |[[X]]|, |[[Y]]| +; SI: v_mul_f32_e64 [[REG:v[0-9]+]], abs([[X]]), abs([[Y]]) ; SI-NEXT: buffer_store_dword [[REG]] define void @commute_mul_fabs_x_fabs_y_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { %tid = call i32 @llvm.r600.read.tidig.x() #1 @@ -137,7 +137,7 @@ ; FUNC-LABEL: @commute_mul_fabs_x_fneg_fabs_y_f32 ; SI-DAG: buffer_load_dword [[X:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} ; SI-DAG: buffer_load_dword [[Y:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 -; SI: v_mul_f32_e64 [[REG:v[0-9]+]], |[[X]]|, -|[[Y]]| +; SI: v_mul_f32_e64 [[REG:v[0-9]+]], abs([[X]]), -abs([[Y]]) ; SI-NEXT: buffer_store_dword [[REG]] define void @commute_mul_fabs_x_fneg_fabs_y_f32(float addrspace(1)* %out, float addrspace(1)* %in) #0 { %tid = call i32 @llvm.r600.read.tidig.x() #1 @@ -159,7 +159,7 @@ ; SI-LABEL: {{^}}fma_a_2.0_neg_b_f32 ; SI-DAG: buffer_load_dword [[R1:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} ; SI-DAG: buffer_load_dword [[R2:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 -; SI: v_fma_f32 [[RESULT:v[0-9]+]], 2.0, [[R1]], |[[R2]]| +; SI: v_fma_f32 [[RESULT:v[0-9]+]], 2.0, [[R1]], abs([[R2]]) ; SI: buffer_store_dword [[RESULT]] define void @fma_a_2.0_neg_b_f32(float addrspace(1)* %out, float addrspace(1)* %in) { %tid = call i32 @llvm.r600.read.tidig.x() nounwind readnone Index: test/CodeGen/AMDGPU/cvt_flr_i32_f32.ll =================================================================== --- test/CodeGen/AMDGPU/cvt_flr_i32_f32.ll +++ test/CodeGen/AMDGPU/cvt_flr_i32_f32.ll @@ -33,7 +33,7 @@ ; FUNC-LABEL: {{^}}cvt_flr_i32_f32_fabs: ; SI-NOT: add ; SI-SAFE-NOT: v_cvt_flr_i32_f32 -; SI-NONAN: v_cvt_flr_i32_f32_e64 v{{[0-9]+}}, |s{{[0-9]+}}| +; SI-NONAN: v_cvt_flr_i32_f32_e64 v{{[0-9]+}}, abs(s{{[0-9]+}}) ; SI: s_endpgm define void @cvt_flr_i32_f32_fabs(i32 addrspace(1)* %out, float %x) #0 { %x.fabs = call float @llvm.fabs.f32(float %x) #1 @@ -59,7 +59,7 @@ ; FUNC-LABEL: {{^}}cvt_flr_i32_f32_fabs_fneg: ; SI-NOT: add ; SI-SAFE-NOT: v_cvt_flr_i32_f32 -; SI-NONAN: v_cvt_flr_i32_f32_e64 v{{[0-9]+}}, -|s{{[0-9]+}}| +; SI-NONAN: v_cvt_flr_i32_f32_e64 v{{[0-9]+}}, -abs(s{{[0-9]+}}) ; SI: s_endpgm define void @cvt_flr_i32_f32_fabs_fneg(i32 addrspace(1)* %out, float %x) #0 { %x.fabs = call float @llvm.fabs.f32(float %x) #1 Index: test/CodeGen/AMDGPU/cvt_rpi_i32_f32.ll =================================================================== --- test/CodeGen/AMDGPU/cvt_rpi_i32_f32.ll +++ test/CodeGen/AMDGPU/cvt_rpi_i32_f32.ll @@ -19,7 +19,7 @@ ; FUNC-LABEL: {{^}}cvt_rpi_i32_f32_fabs: ; SI-SAFE-NOT: v_cvt_rpi_i32_f32 -; SI-NONAN: v_cvt_rpi_i32_f32_e64 v{{[0-9]+}}, |s{{[0-9]+}}|{{$}} +; SI-NONAN: v_cvt_rpi_i32_f32_e64 v{{[0-9]+}}, abs(s{{[0-9]+}}){{$}} ; SI: s_endpgm define void @cvt_rpi_i32_f32_fabs(i32 addrspace(1)* %out, float %x) #0 { %x.fabs = call float @llvm.fabs.f32(float %x) #1 @@ -49,9 +49,9 @@ ; FIXME: This doesn't work for same reason as above ; FUNC-LABEL: {{^}}cvt_rpi_i32_f32_fabs_fneg: ; SI-SAFE-NOT: v_cvt_rpi_i32_f32 -; XSI-NONAN: v_cvt_rpi_i32_f32_e64 v{{[0-9]+}}, -|s{{[0-9]+}}| +; XSI-NONAN: v_cvt_rpi_i32_f32_e64 v{{[0-9]+}}, -abs(s{{[0-9]+}}) -; SI: v_sub_f32_e64 [[TMP:v[0-9]+]], 0.5, |s{{[0-9]+}}| +; SI: v_sub_f32_e64 [[TMP:v[0-9]+]], 0.5, abs(s{{[0-9]+}}) ; SI-SAFE-NOT: v_cvt_flr_i32_f32 ; SI-NONAN: v_cvt_flr_i32_f32_e32 {{v[0-9]+}}, [[TMP]] ; SI: s_endpgm Index: test/CodeGen/AMDGPU/fabs.f64.ll =================================================================== --- test/CodeGen/AMDGPU/fabs.f64.ll +++ test/CodeGen/AMDGPU/fabs.f64.ll @@ -55,7 +55,7 @@ ; SI-LABEL: {{^}}fabs_fold_f64: ; SI: s_load_dwordx2 [[ABS_VALUE:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb ; SI-NOT: and -; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, |[[ABS_VALUE]]|, {{v\[[0-9]+:[0-9]+\]}} +; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, abs([[ABS_VALUE]]), {{v\[[0-9]+:[0-9]+\]}} ; SI: s_endpgm define void @fabs_fold_f64(double addrspace(1)* %out, double %in0, double %in1) { %fabs = call double @llvm.fabs.f64(double %in0) @@ -67,7 +67,7 @@ ; SI-LABEL: {{^}}fabs_fn_fold_f64: ; SI: s_load_dwordx2 [[ABS_VALUE:s\[[0-9]+:[0-9]+\]]], {{s\[[0-9]+:[0-9]+\]}}, 0xb ; SI-NOT: and -; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, |[[ABS_VALUE]]|, {{v\[[0-9]+:[0-9]+\]}} +; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, abs([[ABS_VALUE]]), {{v\[[0-9]+:[0-9]+\]}} ; SI: s_endpgm define void @fabs_fn_fold_f64(double addrspace(1)* %out, double %in0, double %in1) { %fabs = call double @fabs(double %in0) Index: test/CodeGen/AMDGPU/fabs.ll =================================================================== --- test/CodeGen/AMDGPU/fabs.ll +++ test/CodeGen/AMDGPU/fabs.ll @@ -75,7 +75,7 @@ ; SI: s_load_dword [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb ; VI: s_load_dword [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x2c ; GCN-NOT: and -; GCN: v_mul_f32_e64 v{{[0-9]+}}, |[[ABS_VALUE]]|, v{{[0-9]+}} +; GCN: v_mul_f32_e64 v{{[0-9]+}}, abs([[ABS_VALUE]]), v{{[0-9]+}} define void @fabs_fn_fold(float addrspace(1)* %out, float %in0, float %in1) { %fabs = call float @fabs(float %in0) %fmul = fmul float %fabs, %in1 @@ -87,7 +87,7 @@ ; SI: s_load_dword [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0xb ; VI: s_load_dword [[ABS_VALUE:s[0-9]+]], s[{{[0-9]+:[0-9]+}}], 0x2c ; GCN-NOT: and -; GCN: v_mul_f32_e64 v{{[0-9]+}}, |[[ABS_VALUE]]|, v{{[0-9]+}} +; GCN: v_mul_f32_e64 v{{[0-9]+}}, abs([[ABS_VALUE]]), v{{[0-9]+}} define void @fabs_fold(float addrspace(1)* %out, float %in0, float %in1) { %fabs = call float @llvm.fabs.f32(float %in0) %fmul = fmul float %fabs, %in1 Index: test/CodeGen/AMDGPU/ffloor.f64.ll =================================================================== --- test/CodeGen/AMDGPU/ffloor.f64.ll +++ test/CodeGen/AMDGPU/ffloor.f64.ll @@ -43,12 +43,12 @@ ; FUNC-LABEL: {{^}}ffloor_f64_neg_abs: ; CI: v_floor_f64_e64 -; SI: v_fract_f64_e64 {{v[[0-9]+:[0-9]+]}}, -|[[INPUT:s[[0-9]+:[0-9]+]]]| +; SI: v_fract_f64_e64 {{v[[0-9]+:[0-9]+]}}, -abs([[INPUT:s[[0-9]+:[0-9]+]]]) ; SI: v_min_f64 ; SI: v_cmp_class_f64_e64 ; SI: v_cndmask_b32_e64 ; SI: v_cndmask_b32_e64 -; SI: v_add_f64 {{v[[0-9]+:[0-9]+]}}, -|[[INPUT]]| +; SI: v_add_f64 {{v[[0-9]+:[0-9]+]}}, -abs([[INPUT]]) ; SI: s_endpgm define void @ffloor_f64_neg_abs(double addrspace(1)* %out, double %x) { %abs = call double @llvm.fabs.f64(double %x) Index: test/CodeGen/AMDGPU/fmul-2-combine-multi-use.ll =================================================================== --- test/CodeGen/AMDGPU/fmul-2-combine-multi-use.ll +++ test/CodeGen/AMDGPU/fmul-2-combine-multi-use.ll @@ -42,8 +42,8 @@ } ; GCN-LABEL: {{^}}multiple_use_fadd_fmad: -; GCN-DAG: v_add_f32_e64 [[MUL2:v[0-9]+]], |[[X:s[0-9]+]]|, |s{{[0-9]+}}| -; GCN-DAG: v_mad_f32 [[MAD:v[0-9]+]], 2.0, |[[X]]|, v{{[0-9]+}} +; GCN-DAG: v_add_f32_e64 [[MUL2:v[0-9]+]], abs([[X:s[0-9]+]]), abs(s{{[0-9]+}}) +; GCN-DAG: v_mad_f32 [[MAD:v[0-9]+]], 2.0, abs([[X]]), v{{[0-9]+}} ; GCN-DAG: buffer_store_dword [[MUL2]] ; GCN-DAG: buffer_store_dword [[MAD]] ; GCN: s_endpgm @@ -58,8 +58,8 @@ } ; GCN-LABEL: {{^}}multiple_use_fadd_multi_fmad: -; GCN: v_mad_f32 {{v[0-9]+}}, 2.0, |[[X:s[0-9]+]]|, v{{[0-9]+}} -; GCN: v_mad_f32 {{v[0-9]+}}, 2.0, |[[X]]|, v{{[0-9]+}} +; GCN: v_mad_f32 {{v[0-9]+}}, 2.0, abs([[X:s[0-9]+]]), v{{[0-9]+}} +; GCN: v_mad_f32 {{v[0-9]+}}, 2.0, abs([[X]]), v{{[0-9]+}} define void @multiple_use_fadd_multi_fmad(float addrspace(1)* %out, float %x, float %y, float %z) #0 { %out.gep.1 = getelementptr float, float addrspace(1)* %out, i32 1 %x.abs = call float @llvm.fabs.f32(float %x) Index: test/CodeGen/AMDGPU/fneg-fabs.f64.ll =================================================================== --- test/CodeGen/AMDGPU/fneg-fabs.f64.ll +++ test/CodeGen/AMDGPU/fneg-fabs.f64.ll @@ -5,7 +5,7 @@ ; into 2 modifiers, although theoretically that should work. ; FUNC-LABEL: {{^}}fneg_fabs_fadd_f64: -; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, -|v{{\[[0-9]+:[0-9]+\]}}| +; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, -abs(v{{\[[0-9]+:[0-9]+\]\)}} define void @fneg_fabs_fadd_f64(double addrspace(1)* %out, double %x, double %y) { %fabs = call double @llvm.fabs.f64(double %x) %fsub = fsub double -0.000000e+00, %fabs @@ -25,7 +25,7 @@ } ; FUNC-LABEL: {{^}}fneg_fabs_fmul_f64: -; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, -|{{v\[[0-9]+:[0-9]+\]}}| +; SI: v_mul_f64 {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, -abs({{v\[[0-9]+:[0-9]+\]}}) define void @fneg_fabs_fmul_f64(double addrspace(1)* %out, double %x, double %y) { %fabs = call double @llvm.fabs.f64(double %x) %fsub = fsub double -0.000000e+00, %fabs Index: test/CodeGen/AMDGPU/fneg-fabs.ll =================================================================== --- test/CodeGen/AMDGPU/fneg-fabs.ll +++ test/CodeGen/AMDGPU/fneg-fabs.ll @@ -4,7 +4,7 @@ ; FUNC-LABEL: {{^}}fneg_fabs_fadd_f32: ; SI-NOT: and -; SI: v_sub_f32_e64 {{v[0-9]+}}, {{s[0-9]+}}, |{{v[0-9]+}}| +; SI: v_sub_f32_e64 {{v[0-9]+}}, {{s[0-9]+}}, abs({{v[0-9]+}}) define void @fneg_fabs_fadd_f32(float addrspace(1)* %out, float %x, float %y) { %fabs = call float @llvm.fabs.f32(float %x) %fsub = fsub float -0.000000e+00, %fabs @@ -15,7 +15,7 @@ ; FUNC-LABEL: {{^}}fneg_fabs_fmul_f32: ; SI-NOT: and -; SI: v_mul_f32_e64 {{v[0-9]+}}, {{s[0-9]+}}, -|{{v[0-9]+}}| +; SI: v_mul_f32_e64 {{v[0-9]+}}, {{s[0-9]+}}, -abs({{v[0-9]+}}) ; SI-NOT: and define void @fneg_fabs_fmul_f32(float addrspace(1)* %out, float %x, float %y) { %fabs = call float @llvm.fabs.f32(float %x) Index: test/CodeGen/AMDGPU/fp_to_sint.ll =================================================================== --- test/CodeGen/AMDGPU/fp_to_sint.ll +++ test/CodeGen/AMDGPU/fp_to_sint.ll @@ -15,7 +15,7 @@ } ; FUNC-LABEL: {{^}}fp_to_sint_i32_fabs: -; SI: v_cvt_i32_f32_e64 v{{[0-9]+}}, |s{{[0-9]+}}|{{$}} +; SI: v_cvt_i32_f32_e64 v{{[0-9]+}}, abs(s{{[0-9]+}}){{$}} define void @fp_to_sint_i32_fabs(i32 addrspace(1)* %out, float %in) { %in.fabs = call float @llvm.fabs.f32(float %in) #0 %conv = fptosi float %in.fabs to i32 Index: test/CodeGen/AMDGPU/fsub64.ll =================================================================== --- test/CodeGen/AMDGPU/fsub64.ll +++ test/CodeGen/AMDGPU/fsub64.ll @@ -15,7 +15,7 @@ } ; SI-LABEL: {{^}}fsub_fabs_f64: -; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -\|v\[[0-9]+:[0-9]+\]\|}} +; SI: v_add_f64 {{v\[[0-9]+:[0-9]+\], v\[[0-9]+:[0-9]+\], -abs\(v\[[0-9]+:[0-9]+\]\)}} define void @fsub_fabs_f64(double addrspace(1)* %out, double addrspace(1)* %in1, double addrspace(1)* %in2) { %r0 = load double, double addrspace(1)* %in1 Index: test/CodeGen/AMDGPU/llvm.AMDGPU.clamp.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.AMDGPU.clamp.ll +++ test/CodeGen/AMDGPU/llvm.AMDGPU.clamp.ll @@ -21,7 +21,7 @@ ; FUNC-LABEL: {{^}}clamp_fabs_0_1_f32: ; SI: s_load_dword [[ARG:s[0-9]+]], -; SI: v_add_f32_e64 [[RESULT:v[0-9]+]], 0, |[[ARG]]| clamp{{$}} +; SI: v_add_f32_e64 [[RESULT:v[0-9]+]], 0, abs([[ARG]]) clamp{{$}} ; SI: buffer_store_dword [[RESULT]] ; SI: s_endpgm define void @clamp_fabs_0_1_f32(float addrspace(1)* %out, float %src) nounwind { @@ -45,7 +45,7 @@ ; FUNC-LABEL: {{^}}clamp_fneg_fabs_0_1_f32: ; SI: s_load_dword [[ARG:s[0-9]+]], -; SI: v_add_f32_e64 [[RESULT:v[0-9]+]], 0, -|[[ARG]]| clamp{{$}} +; SI: v_add_f32_e64 [[RESULT:v[0-9]+]], 0, -abs([[ARG]]) clamp{{$}} ; SI: buffer_store_dword [[RESULT]] ; SI: s_endpgm define void @clamp_fneg_fabs_0_1_f32(float addrspace(1)* %out, float %src) nounwind { Index: test/CodeGen/AMDGPU/llvm.AMDGPU.class.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.AMDGPU.class.ll +++ test/CodeGen/AMDGPU/llvm.AMDGPU.class.ll @@ -25,7 +25,7 @@ ; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb ; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc ; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]] -; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |[[SA]]|, [[VB]] +; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], abs([[SA]]), [[VB]] ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]] ; SI-NEXT: buffer_store_dword [[RESULT]] ; SI: s_endpgm @@ -57,7 +57,7 @@ ; SI-DAG: s_load_dword [[SA:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xb ; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xc ; SI: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]] -; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -|[[SA]]|, [[VB]] +; SI: v_cmp_class_f32_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -abs([[SA]]), [[VB]] ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]] ; SI-NEXT: buffer_store_dword [[RESULT]] ; SI: s_endpgm @@ -201,7 +201,7 @@ ; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb ; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd ; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]] -; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], |[[SA]]|, [[VB]] +; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], abs([[SA]]), [[VB]] ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]] ; SI-NEXT: buffer_store_dword [[RESULT]] ; SI: s_endpgm @@ -233,7 +233,7 @@ ; SI-DAG: s_load_dwordx2 [[SA:s\[[0-9]+:[0-9]+\]]], s{{\[[0-9]+:[0-9]+\]}}, 0xb ; SI-DAG: s_load_dword [[SB:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, 0xd ; SI-DAG: v_mov_b32_e32 [[VB:v[0-9]+]], [[SB]] -; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -|[[SA]]|, [[VB]] +; SI: v_cmp_class_f64_e64 [[CMP:s\[[0-9]+:[0-9]+\]]], -abs([[SA]]), [[VB]] ; SI-NEXT: v_cndmask_b32_e64 [[RESULT:v[0-9]+]], 0, -1, [[CMP]] ; SI-NEXT: buffer_store_dword [[RESULT]] ; SI: s_endpgm Index: test/CodeGen/AMDGPU/llvm.AMDGPU.div_scale.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.AMDGPU.div_scale.ll +++ test/CodeGen/AMDGPU/llvm.AMDGPU.div_scale.ll @@ -322,7 +322,7 @@ ; SI-LABEL @test_div_scale_f32_fabs_num: ; SI-DAG: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 ; SI-DAG: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 -; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[B]], [[B]], |[[A]]| +; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], [[B]], [[B]], abs([[A]]) ; SI: buffer_store_dword [[RESULT0]] ; SI: s_endpgm define void @test_div_scale_f32_fabs_num(float addrspace(1)* %out, float addrspace(1)* %in) nounwind { @@ -344,7 +344,7 @@ ; SI-LABEL @test_div_scale_f32_fabs_den: ; SI-DAG: buffer_load_dword [[A:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 ; SI-DAG: buffer_load_dword [[B:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 -; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], |[[B]]|, |[[B]]|, [[A]] +; SI: v_div_scale_f32 [[RESULT0:v[0-9]+]], [[RESULT1:s\[[0-9]+:[0-9]+\]]], abs([[B]]), abs([[B]]), [[A]] ; SI: buffer_store_dword [[RESULT0]] ; SI: s_endpgm define void @test_div_scale_f32_fabs_den(float addrspace(1)* %out, float addrspace(1)* %in) nounwind { Index: test/CodeGen/AMDGPU/llvm.AMDGPU.fract.f64.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.AMDGPU.fract.f64.ll +++ test/CodeGen/AMDGPU/llvm.AMDGPU.fract.f64.ll @@ -41,7 +41,7 @@ } ; FUNC-LABEL: {{^}}fract_f64_neg_abs: -; GCN: v_fract_f64_e64 [[FRC:v\[[0-9]+:[0-9]+\]]], -|v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]]| +; GCN: v_fract_f64_e64 [[FRC:v\[[0-9]+:[0-9]+\]]], -abs(v{{\[}}[[LO:[0-9]+]]:[[HI:[0-9]+]]]) ; SI: v_mov_b32_e32 v[[UPLO:[0-9]+]], -1 ; SI: v_mov_b32_e32 v[[UPHI:[0-9]+]], 0x3fefffff ; SI: v_min_f64 v{{\[}}[[MINLO:[0-9]+]]:[[MINHI:[0-9]+]]], v{{\[}}[[UPLO]]:[[UPHI]]], [[FRC]] Index: test/CodeGen/AMDGPU/llvm.AMDGPU.fract.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.AMDGPU.fract.ll +++ test/CodeGen/AMDGPU/llvm.AMDGPU.fract.ll @@ -50,9 +50,9 @@ } ; FUNC-LABEL: {{^}}fract_f32_neg_abs: -; CI: v_fract_f32_e64 [[RESULT:v[0-9]+]], -|[[INPUT:v[0-9]+]]| -; SI: v_floor_f32_e64 [[FLR:v[0-9]+]], -|[[INPUT:v[0-9]+]]| -; SI: v_sub_f32_e64 [[RESULT:v[0-9]+]], -|[[INPUT]]|, [[FLR]] +; CI: v_fract_f32_e64 [[RESULT:v[0-9]+]], -abs([[INPUT:v[0-9]+]]) +; SI: v_floor_f32_e64 [[FLR:v[0-9]+]], -abs([[INPUT:v[0-9]+]]) +; SI: v_sub_f32_e64 [[RESULT:v[0-9]+]], -abs([[INPUT]]), [[FLR]] ; GCN: buffer_store_dword [[RESULT]] ; EG: FRACT define void @fract_f32_neg_abs(float addrspace(1)* %out, float addrspace(1)* %src) nounwind { Index: test/CodeGen/AMDGPU/llvm.round.ll =================================================================== --- test/CodeGen/AMDGPU/llvm.round.ll +++ test/CodeGen/AMDGPU/llvm.round.ll @@ -9,7 +9,7 @@ ; SI: v_sub_f32_e32 [[SUB:v[0-9]+]], [[SX]], [[TRUNC]] ; SI: v_mov_b32_e32 [[VX:v[0-9]+]], [[SX]] ; SI: v_bfi_b32 [[COPYSIGN:v[0-9]+]], [[K]], 1.0, [[VX]] -; SI: v_cmp_le_f32_e64 vcc, 0.5, |[[SUB]]| +; SI: v_cmp_le_f32_e64 vcc, 0.5, abs([[SUB]]) ; SI: v_cndmask_b32_e32 [[SEL:v[0-9]+]], 0, [[VX]] ; SI: v_add_f32_e32 [[RESULT:v[0-9]+]], [[SEL]], [[TRUNC]] ; SI: buffer_store_dword [[RESULT]] Index: test/CodeGen/AMDGPU/mad-sub.ll =================================================================== --- test/CodeGen/AMDGPU/mad-sub.ll +++ test/CodeGen/AMDGPU/mad-sub.ll @@ -76,7 +76,7 @@ ; SI: buffer_load_dword [[REGA:v[0-9]+]] ; SI: buffer_load_dword [[REGB:v[0-9]+]] ; SI: buffer_load_dword [[REGC:v[0-9]+]] -; SI: v_mad_f32 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], -|[[REGC]]| +; SI: v_mad_f32 [[RESULT:v[0-9]+]], [[REGA]], [[REGB]], -abs([[REGC]]) ; SI: buffer_store_dword [[RESULT]] define void @mad_sub_fabs_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 { %tid = tail call i32 @llvm.r600.read.tidig.x() #0 @@ -101,7 +101,7 @@ ; SI: buffer_load_dword [[REGA:v[0-9]+]] ; SI: buffer_load_dword [[REGB:v[0-9]+]] ; SI: buffer_load_dword [[REGC:v[0-9]+]] -; SI: v_mad_f32 [[RESULT:v[0-9]+]], -[[REGA]], [[REGB]], |[[REGC]]| +; SI: v_mad_f32 [[RESULT:v[0-9]+]], -[[REGA]], [[REGB]], abs([[REGC]]) ; SI: buffer_store_dword [[RESULT]] define void @mad_sub_fabs_inv_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 { %tid = tail call i32 @llvm.r600.read.tidig.x() #0 @@ -148,7 +148,7 @@ ; SI: buffer_load_dword [[REGA:v[0-9]+]] ; SI: buffer_load_dword [[REGB:v[0-9]+]] ; SI: buffer_load_dword [[REGC:v[0-9]+]] -; SI: v_mad_f32 [[RESULT:v[0-9]+]], [[REGA]], |[[REGB]]|, -[[REGC]] +; SI: v_mad_f32 [[RESULT:v[0-9]+]], [[REGA]], abs([[REGB]]), -[[REGC]] ; SI: buffer_store_dword [[RESULT]] define void @mad_fabs_sub_f32(float addrspace(1)* noalias nocapture %out, float addrspace(1)* noalias nocapture readonly %ptr) #1 { %tid = tail call i32 @llvm.r600.read.tidig.x() #0 Index: test/CodeGen/AMDGPU/madak.ll =================================================================== --- test/CodeGen/AMDGPU/madak.ll +++ test/CodeGen/AMDGPU/madak.ll @@ -151,7 +151,7 @@ ; GCN-LABEL: {{^}}no_madak_src0_modifier_f32: ; GCN: buffer_load_dword [[VA:v[0-9]+]] ; GCN: buffer_load_dword [[VB:v[0-9]+]] -; GCN: v_mad_f32 {{v[0-9]+}}, |{{v[0-9]+}}|, {{v[0-9]+}}, {{[sv][0-9]+}} +; GCN: v_mad_f32 {{v[0-9]+}}, abs({{v[0-9]+}}), {{v[0-9]+}}, {{[sv][0-9]+}} ; GCN: s_endpgm define void @no_madak_src0_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind { %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone @@ -173,7 +173,7 @@ ; GCN-LABEL: {{^}}no_madak_src1_modifier_f32: ; GCN: buffer_load_dword [[VA:v[0-9]+]] ; GCN: buffer_load_dword [[VB:v[0-9]+]] -; GCN: v_mad_f32 {{v[0-9]+}}, {{v[0-9]+}}, |{{v[0-9]+}}|, {{[sv][0-9]+}} +; GCN: v_mad_f32 {{v[0-9]+}}, {{v[0-9]+}}, abs({{v[0-9]+}}), {{[sv][0-9]+}} ; GCN: s_endpgm define void @no_madak_src1_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in.a, float addrspace(1)* noalias %in.b) nounwind { %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone Index: test/CodeGen/AMDGPU/madmk.ll =================================================================== --- test/CodeGen/AMDGPU/madmk.ll +++ test/CodeGen/AMDGPU/madmk.ll @@ -124,7 +124,7 @@ ; GCN-LABEL: {{^}}no_madmk_src0_modifier_f32: ; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} ; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 -; GCN: v_mad_f32 {{v[0-9]+}}, |{{v[0-9]+}}|, {{v[0-9]+}}, {{[sv][0-9]+}} +; GCN: v_mad_f32 {{v[0-9]+}}, abs({{v[0-9]+}}), {{v[0-9]+}}, {{[sv][0-9]+}} define void @no_madmk_src0_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind { %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid @@ -145,7 +145,7 @@ ; GCN-LABEL: {{^}}no_madmk_src2_modifier_f32: ; GCN-DAG: buffer_load_dword [[VA:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64{{$}} ; GCN-DAG: buffer_load_dword [[VB:v[0-9]+]], {{v\[[0-9]+:[0-9]+\]}}, {{s\[[0-9]+:[0-9]+\]}}, 0 addr64 offset:4 -; GCN: v_mad_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, |{{[sv][0-9]+}}| +; GCN: v_mad_f32 {{v[0-9]+}}, {{v[0-9]+}}, {{v[0-9]+}}, abs({{[sv][0-9]+}}) define void @no_madmk_src2_modifier_f32(float addrspace(1)* noalias %out, float addrspace(1)* noalias %in) nounwind { %tid = tail call i32 @llvm.r600.read.tidig.x() nounwind readnone %gep.0 = getelementptr float, float addrspace(1)* %in, i32 %tid Index: test/MC/AMDGPU/vop3-errs.s =================================================================== --- test/MC/AMDGPU/vop3-errs.s +++ test/MC/AMDGPU/vop3-errs.s @@ -3,3 +3,13 @@ v_add_f32_e64 v0, v1 // CHECK: error: too few operands for instruction + +v_add_f32_e64 v0, v1, abs(v2 +// CHECK: error: failed parsing operand. + +v_add_f32_e64 v0, v1, |v2| +// CHECK: error: not a valid operand. +// CHECK: error: unexpected token at start of statement + +v_add_f32_e64 v0, v1, absv2 +// CHECK: error: invalid operand for instruction Index: test/MC/AMDGPU/vop3.s =================================================================== --- test/MC/AMDGPU/vop3.s +++ test/MC/AMDGPU/vop3.s @@ -36,29 +36,29 @@ // SICI: v_cmp_lt_f32_e64 s[2:3], -v4, -v6 ; encoding: [0x02,0x00,0x02,0xd0,0x04,0x0d,0x02,0x60] // VI: v_cmp_lt_f32_e64 s[2:3], -v4, -v6 ; encoding: [0x02,0x00,0x41,0xd0,0x04,0x0d,0x02,0x60] -v_cmp_lt_f32 s[2:3] |v4|, v6 -// SICI: v_cmp_lt_f32_e64 s[2:3], |v4|, v6 ; encoding: [0x02,0x01,0x02,0xd0,0x04,0x0d,0x02,0x00] -// VI: v_cmp_lt_f32_e64 s[2:3], |v4|, v6 ; encoding: [0x02,0x01,0x41,0xd0,0x04,0x0d,0x02,0x00] +v_cmp_lt_f32 s[2:3] abs(v4), v6 +// SICI: v_cmp_lt_f32_e64 s[2:3], abs(v4), v6 ; encoding: [0x02,0x01,0x02,0xd0,0x04,0x0d,0x02,0x00] +// VI: v_cmp_lt_f32_e64 s[2:3], abs(v4), v6 ; encoding: [0x02,0x01,0x41,0xd0,0x04,0x0d,0x02,0x00] -v_cmp_lt_f32 s[2:3] v4, |v6| -// SICI: v_cmp_lt_f32_e64 s[2:3], v4, |v6| ; encoding: [0x02,0x02,0x02,0xd0,0x04,0x0d,0x02,0x00] -// VI: v_cmp_lt_f32_e64 s[2:3], v4, |v6| ; encoding: [0x02,0x02,0x41,0xd0,0x04,0x0d,0x02,0x00] +v_cmp_lt_f32 s[2:3] v4, abs(v6) +// SICI: v_cmp_lt_f32_e64 s[2:3], v4, abs(v6) ; encoding: [0x02,0x02,0x02,0xd0,0x04,0x0d,0x02,0x00] +// VI: v_cmp_lt_f32_e64 s[2:3], v4, abs(v6) ; encoding: [0x02,0x02,0x41,0xd0,0x04,0x0d,0x02,0x00] -v_cmp_lt_f32 s[2:3] |v4|, |v6| -// SICI: v_cmp_lt_f32_e64 s[2:3], |v4|, |v6| ; encoding: [0x02,0x03,0x02,0xd0,0x04,0x0d,0x02,0x00] -// VI: v_cmp_lt_f32_e64 s[2:3], |v4|, |v6| ; encoding: [0x02,0x03,0x41,0xd0,0x04,0x0d,0x02,0x00] +v_cmp_lt_f32 s[2:3] abs(v4), abs(v6) +// SICI: v_cmp_lt_f32_e64 s[2:3], abs(v4), abs(v6) ; encoding: [0x02,0x03,0x02,0xd0,0x04,0x0d,0x02,0x00] +// VI: v_cmp_lt_f32_e64 s[2:3], abs(v4), abs(v6) ; encoding: [0x02,0x03,0x41,0xd0,0x04,0x0d,0x02,0x00] -v_cmp_lt_f32 s[2:3] -|v4|, v6 -// SICI: v_cmp_lt_f32_e64 s[2:3], -|v4|, v6 ; encoding: [0x02,0x01,0x02,0xd0,0x04,0x0d,0x02,0x20] -// VI: v_cmp_lt_f32_e64 s[2:3], -|v4|, v6 ; encoding: [0x02,0x01,0x41,0xd0,0x04,0x0d,0x02,0x20] +v_cmp_lt_f32 s[2:3] -abs(v4), v6 +// SICI: v_cmp_lt_f32_e64 s[2:3], -abs(v4), v6 ; encoding: [0x02,0x01,0x02,0xd0,0x04,0x0d,0x02,0x20] +// VI: v_cmp_lt_f32_e64 s[2:3], -abs(v4), v6 ; encoding: [0x02,0x01,0x41,0xd0,0x04,0x0d,0x02,0x20] -v_cmp_lt_f32 s[2:3] v4, -|v6| -// SICI: v_cmp_lt_f32_e64 s[2:3], v4, -|v6| ; encoding: [0x02,0x02,0x02,0xd0,0x04,0x0d,0x02,0x40] -// VI: v_cmp_lt_f32_e64 s[2:3], v4, -|v6| ; encoding: [0x02,0x02,0x41,0xd0,0x04,0x0d,0x02,0x40] +v_cmp_lt_f32 s[2:3] v4, -abs(v6) +// SICI: v_cmp_lt_f32_e64 s[2:3], v4, -abs(v6) ; encoding: [0x02,0x02,0x02,0xd0,0x04,0x0d,0x02,0x40] +// VI: v_cmp_lt_f32_e64 s[2:3], v4, -abs(v6) ; encoding: [0x02,0x02,0x41,0xd0,0x04,0x0d,0x02,0x40] -v_cmp_lt_f32 s[2:3] -|v4|, -|v6| -// SICI: v_cmp_lt_f32_e64 s[2:3], -|v4|, -|v6| ; encoding: [0x02,0x03,0x02,0xd0,0x04,0x0d,0x02,0x60] -// VI: v_cmp_lt_f32_e64 s[2:3], -|v4|, -|v6| ; encoding: [0x02,0x03,0x41,0xd0,0x04,0x0d,0x02,0x60] +v_cmp_lt_f32 s[2:3] -abs(v4), -abs(v6) +// SICI: v_cmp_lt_f32_e64 s[2:3], -abs(v4), -abs(v6) ; encoding: [0x02,0x03,0x02,0xd0,0x04,0x0d,0x02,0x60] +// VI: v_cmp_lt_f32_e64 s[2:3], -abs(v4), -abs(v6) ; encoding: [0x02,0x03,0x41,0xd0,0x04,0x0d,0x02,0x60] // // Instruction tests: @@ -137,19 +137,19 @@ // // Modifier tests: -// +// v_fract_f32 v1, -v2 // SICI: v_fract_f32_e64 v1, -v2 ; encoding: [0x01,0x00,0x40,0xd3,0x02,0x01,0x00,0x20] // VI: v_fract_f32_e64 v1, -v2 ; encoding: [0x01,0x00,0x5b,0xd1,0x02,0x01,0x00,0x20] -v_fract_f32 v1, |v2| -// SICI: v_fract_f32_e64 v1, |v2| ; encoding: [0x01,0x01,0x40,0xd3,0x02,0x01,0x00,0x00] -// VI: v_fract_f32_e64 v1, |v2| ; encoding: [0x01,0x01,0x5b,0xd1,0x02,0x01,0x00,0x00] +v_fract_f32 v1, abs(v2) +// SICI: v_fract_f32_e64 v1, abs(v2) ; encoding: [0x01,0x01,0x40,0xd3,0x02,0x01,0x00,0x00] +// VI: v_fract_f32_e64 v1, abs(v2) ; encoding: [0x01,0x01,0x5b,0xd1,0x02,0x01,0x00,0x00] -v_fract_f32 v1, -|v2| -// SICI: v_fract_f32_e64 v1, -|v2| ; encoding: [0x01,0x01,0x40,0xd3,0x02,0x01,0x00,0x20] -// VI: v_fract_f32_e64 v1, -|v2| ; encoding: [0x01,0x01,0x5b,0xd1,0x02,0x01,0x00,0x20] +v_fract_f32 v1, -abs(v2) +// SICI: v_fract_f32_e64 v1, -abs(v2) ; encoding: [0x01,0x01,0x40,0xd3,0x02,0x01,0x00,0x20] +// VI: v_fract_f32_e64 v1, -abs(v2) ; encoding: [0x01,0x01,0x5b,0xd1,0x02,0x01,0x00,0x20] v_fract_f32 v1, v2 clamp // SICI: v_fract_f32_e64 v1, v2 clamp ; encoding: [0x01,0x08,0x40,0xd3,0x02,0x01,0x00,0x00]