diff --git a/llvm/docs/LangRef.rst b/llvm/docs/LangRef.rst --- a/llvm/docs/LangRef.rst +++ b/llvm/docs/LangRef.rst @@ -157,15 +157,12 @@ @.str = private unnamed_addr constant [13 x i8] c"hello world\0A\00" ; External declaration of the puts function - declare i32 @puts(i8* nocapture) nounwind + declare i32 @puts(ptr nocapture) nounwind ; Definition of main function - define i32 @main() { ; i32()* - ; Convert [13 x i8]* to i8*... - %cast210 = getelementptr [13 x i8], [13 x i8]* @.str, i64 0, i64 0 - + define i32 @main() { ; Call puts function to write out the string to stdout. - call i32 @puts(i8* %cast210) + call i32 @puts(ptr @.str) ret i32 0 } @@ -1075,7 +1072,7 @@ .. code-block:: llvm - declare i32 @printf(i8* noalias nocapture, ...) + declare i32 @printf(ptr noalias nocapture, ...) declare i32 @atoi(i8 zeroext) declare signext i8 @returns_signed_char() @@ -1283,11 +1280,11 @@ .. code-block:: llvm - define void @f(i8* nocapture %a, i8* %b) { + define void @f(ptr nocapture %a, ptr %b) { ; (capture %b) } - call void @f(i8* @glb, i8* @glb) ; well-defined + call void @f(ptr @glb, ptr @glb) ; well-defined ``nofree`` This indicates that callee does not free the pointer argument. This is not @@ -1459,9 +1456,8 @@ .. code-block:: llvm - %0 = bitcast void* () @f to i32* - %a = getelementptr inbounds i32, i32* %0, i32 -1 - %b = load i32, i32* %a + %a = getelementptr inbounds i32, ptr @f, i32 -1 + %b = load i32, ptr %a Prefix data is laid out as if it were an initializer for a global variable of the prefix data's type. The function will be placed such that the @@ -1506,9 +1502,9 @@ .. code-block:: text - %0 = type <{ i8, i8, i8* }> + %0 = type <{ i8, i8, ptr }> - define void @f() prologue %0 <{ i8 235, i8 8, i8* @md}> { ... } + define void @f() prologue %0 <{ i8 235, i8 8, ptr @md}> { ... } A function may have prologue data but no body. This has similar semantics to the ``available_externally`` linkage in that the data may be used by the @@ -2439,7 +2435,7 @@ define void @f() { call void @x() ;; no deopt state call void @y() [ "deopt"(i32 10) ] - call void @y() [ "deopt"(i32 10), "unknown"(i8* null) ] + call void @y() [ "deopt"(i32 10), "unknown"(ptr null) ] ret void } @@ -2455,7 +2451,7 @@ define void @g() { call void @x() ;; still no deopt state call void @y() [ "deopt"(i32 20, i32 10) ] - call void @y() [ "deopt"(i32 20, i32 10), "unknown"(i8* null) ] + call void @y() [ "deopt"(i32 20, i32 10), "unknown"(ptr null) ] ret void } @@ -2535,14 +2531,14 @@ .. code-block:: llvm - call void @llvm.assume(i1 true) ["align"(i32* %val, i32 8)] + call void @llvm.assume(i1 true) ["align"(ptr %val, i32 8)] allows the optimizer to assume that at location of call to :ref:`llvm.assume ` ``%val`` has an alignment of at least 8. .. code-block:: llvm - call void @llvm.assume(i1 %cond) ["cold"(), "nonnull"(i64* %val)] + call void @llvm.assume(i1 %cond) ["cold"(), "nonnull"(ptr %val)] allows the optimizer to assume that the :ref:`llvm.assume ` call location is cold and that ``%val`` may not be null. @@ -2555,7 +2551,7 @@ .. code-block:: llvm - call void @llvm.assume(i1 true) ["align"(i32* %val, i32 %align)] + call void @llvm.assume(i1 true) ["align"(ptr %val, i32 %align)] If the operand bundle value violates any requirements on the attribute value, the behavior is undefined, unless one of the following exceptions applies: @@ -2571,7 +2567,7 @@ * Attributes that can be expressed via operand bundles are directly the property that the optimizer uses and cares about. Encoding attributes as operand bundles removes the need for an instruction sequence that represents - the property (e.g., `icmp ne i32* %p, null` for `nonnull`) and for the + the property (e.g., `icmp ne ptr %p, null` for `nonnull`) and for the optimizer to deduce the property from that instruction sequence. * Expressing the property using operand bundles makes it easy to identify the use of the value as a use in an :ref:`llvm.assume `. This then @@ -2600,10 +2596,9 @@ ... %t = call token @llvm.call.preallocated.setup(i32 1) - %a = call i8* @llvm.call.preallocated.arg(token %t, i32 0) preallocated(%foo) - %b = bitcast i8* %a to %foo* + %a = call ptr @llvm.call.preallocated.arg(token %t, i32 0) preallocated(%foo) ; initialize %b - call void @bar(i32 42, %foo* preallocated(%foo) %b) ["preallocated"(token %t)] + call void @bar(i32 42, ptr preallocated(%foo) %a) ["preallocated"(token %t)] .. _ob_gc_live: @@ -2634,8 +2629,8 @@ ; The marker instruction and a runtime function call are inserted after the call ; to @foo. - call i8* @foo() [ "clang.arc.attachedcall"(i8* (i8*)* @objc_retainAutoreleasedReturnValue) ] - call i8* @foo() [ "clang.arc.attachedcall"(i8* (i8*)* @objc_unsafeClaimAutoreleasedReturnValue) ] + call ptr @foo() [ "clang.arc.attachedcall"(ptr @objc_retainAutoreleasedReturnValue) ] + call ptr @foo() [ "clang.arc.attachedcall"(ptr @objc_unsafeClaimAutoreleasedReturnValue) ] The operand bundle is needed to ensure the call is immediately followed by the marker instruction and the ObjC runtime call in the final output. @@ -2967,26 +2962,26 @@ .. code-block:: llvm - @glb = global i8* null - @glb2 = global i8* null - @glb3 = global i8* null + @glb = global ptr null + @glb2 = global ptr null + @glb3 = global ptr null @glbi = global i32 0 - define i8* @f(i8* %a, i8* %b, i8* %c, i8* %d, i8* %e) { - store i8* %a, i8** @glb ; %a is captured by this call + define ptr @f(ptr %a, ptr %b, ptr %c, ptr %d, ptr %e) { + store ptr %a, ptr @glb ; %a is captured by this call - store i8* %b, i8** @glb2 ; %b isn't captured because the stored value is overwritten by the store below - store i8* null, i8** @glb2 + store ptr %b, ptr @glb2 ; %b isn't captured because the stored value is overwritten by the store below + store ptr null, ptr @glb2 - store i8* %c, i8** @glb3 + store ptr %c, ptr @glb3 call void @g() ; If @g makes a copy of %c that outlives this call (@f), %c is captured - store i8* null, i8** @glb3 + store ptr null, ptr @glb3 - %i = ptrtoint i8* %d to i64 + %i = ptrtoint ptr %d to i64 %j = trunc i64 %i to i32 - store i32 %j, i32* @glbi ; %d is captured + store i32 %j, ptr @glbi ; %d is captured - ret i8* %e ; %e is captured + ret ptr %e ; %e is captured } 2. The call stores any bit of the pointer carrying information into a place, @@ -2997,10 +2992,10 @@ @lock = global i1 true - define void @f(i8* %a) { - store i8* %a, i8** @glb - store atomic i1 false, i1* @lock release ; %a is captured because another thread can safely read @glb - store i8* null, i8** @glb + define void @f(ptr %a) { + store ptr %a, ptr* @glb + store atomic i1 false, ptr @lock release ; %a is captured because another thread can safely read @glb + store ptr null, ptr @glb ret void } @@ -3010,8 +3005,8 @@ @glb = global i8 0 - define void @f(i8* %a) { - %c = icmp eq i8* %a, @glb + define void @f(ptr %a) { + %c = icmp eq ptr %a, @glb br i1 %c, label %BB_EXIT, label %BB_CONTINUE ; escapes %a BB_EXIT: call void @exit() @@ -3341,7 +3336,7 @@ } ; At global scope. - uselistorder i32* @global, { 1, 2, 0 } + uselistorder ptr @global, { 1, 2, 0 } uselistorder i32 7, { 1, 0 } uselistorder i32 (i32) @bar, { 1, 0 } uselistorder_bb @foo, %bb, { 5, 1, 3, 2, 0, 4 } @@ -3428,9 +3423,7 @@ +---------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | ``i32 (i32)`` | function taking an ``i32``, returning an ``i32`` | +---------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``float (i16, i32 *) *`` | :ref:`Pointer ` to a function that takes an ``i16`` and a :ref:`pointer ` to ``i32``, returning ``float``. | -+---------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``i32 (i8*, ...)`` | A vararg function that takes at least one :ref:`pointer ` to ``i8`` (char in C), which returns an integer. This is the signature for ``printf`` in LLVM. | +| ``i32 (ptr, ...)`` | A vararg function that takes at least one :ref:`pointer ` argument and returns an integer. This is the signature for ``printf`` in LLVM. | +---------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | ``{i32, i32} (i32)`` | A function taking an ``i32``, returning a :ref:`structure ` containing two ``i32`` values | +---------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -3622,7 +3615,7 @@ ; concatenating the values: ; %val now has the hexadecimal value 0x1235. - store i16 %val, i16* %ptr + store i16 %val, ptr %ptr ; In memory the content will be (8-bit addressing): ; @@ -3639,7 +3632,7 @@ ; concatenating the values: ; %val now has the hexadecimal value 0x5321. - store i16 %val, i16* %ptr + store i16 %val, ptr %ptr ; In memory the content will be (8-bit addressing): ; @@ -3676,7 +3669,7 @@ +------------------------+----------------------------------------------------+ | ``<2 x i64>`` | Vector of 2 64-bit integer values. | +------------------------+----------------------------------------------------+ -| ``<4 x i64*>`` | Vector of 4 pointers to 64-bit integer values. | +| ``<4 x ptr>`` | Vector of 4 pointers | +------------------------+----------------------------------------------------+ | ```` | Vector with a multiple of 4 32-bit integer values. | +------------------------+----------------------------------------------------+ @@ -3813,7 +3806,7 @@ required to match what the underlying code generator expects. Structures can either be "literal" or "identified". A literal structure -is defined inline with other types (e.g. ``{i32, i32}*``) whereas +is defined inline with other types (e.g. ``[2 x {i32, i32}]``) whereas identified types are always defined at the top level with a name. Literal types are uniqued by their contents and can never be recursive or opaque since there is no way to write one. Identified types can be @@ -3831,7 +3824,7 @@ +------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | ``{ i32, i32, i32 }`` | A triple of three ``i32`` values | +------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ -| ``{ float, i32 (i32) * }`` | A pair, where the first element is a ``float`` and the second element is a :ref:`pointer ` to a :ref:`function ` that takes an ``i32``, returning an ``i32``. | +| ``{ float, ptr }`` | A pair, where the first element is a ``float`` and the second element is a :ref:`pointer `. | +------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ | ``<{ i8, i32 }>`` | A packed struct known to be 5 bytes in size. | +------------------------------+---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+ @@ -3934,7 +3927,7 @@ Structure constants are represented with notation similar to structure type definitions (a comma separated list of elements, surrounded by braces (``{}``)). For example: - "``{ i32 4, float 17.0, i32* @G }``", where "``@G``" is declared as + "``{ i32 4, float 17.0, ptr @G }``", where "``@G``" is declared as "``@G = external global i32``". Structure constants must have :ref:`structure type `, and the number and types of elements must match those specified by the type. @@ -3963,7 +3956,7 @@ **Metadata node** A metadata node is a constant tuple without types. For example: "``!{!0, !{!2, !0}, !"test"}``". Metadata can reference constant values, - for example: "``!{!0, i32 0, i8* @global, i64 (i64)* @function, !"str"}``". + for example: "``!{!0, i32 0, ptr @global, ptr @function, !"str"}``". Unlike other typed constants that are meant to be interpreted as part of the instruction stream, metadata is a place to attach additional information such as debug info. @@ -3982,7 +3975,7 @@ @X = global i32 17 @Y = global i32 42 - @Z = global [2 x i32*] [ i32* @X, i32* @Y ] + @Z = global [2 x ptr] [ ptr @X, ptr @Y ] .. _undefvalues: @@ -4150,8 +4143,8 @@ %X = and i32 undef, 255 switch %X, label %ret [ .. ] ; UB - store undef, i8* %ptr - %X = load i8* %ptr ; %X is undef + store undef, ptr %ptr + %X = load ptr %ptr ; %X is undef switch i8 %X, label %ret [ .. ] ; UB Safe: @@ -4210,17 +4203,15 @@ %poison = sub nuw i32 0, 1 ; Results in a poison value. %poison2 = sub i32 poison, 1 ; Also results in a poison value. %still_poison = and i32 %poison, 0 ; 0, but also poison. - %poison_yet_again = getelementptr i32, i32* @h, i32 %still_poison - store i32 0, i32* %poison_yet_again ; Undefined behavior due to + %poison_yet_again = getelementptr i32, ptr @h, i32 %still_poison + store i32 0, ptr %poison_yet_again ; Undefined behavior due to ; store to poison. - store i32 %poison, i32* @g ; Poison value stored to memory. - %poison3 = load i32, i32* @g ; Poison value loaded back from memory. + store i32 %poison, ptr @g ; Poison value stored to memory. + %poison3 = load i32, ptr @g ; Poison value loaded back from memory. - %narrowaddr = bitcast i32* @g to i16* - %wideaddr = bitcast i32* @g to i64* - %poison4 = load i16, i16* %narrowaddr ; Returns a poison value. - %poison5 = load i64, i64* %wideaddr ; Returns a poison value. + %poison4 = load i16, ptr @g ; Returns a poison value. + %poison5 = load i64, ptr @g ; Returns a poison value. %cmp = icmp slt i32 %poison, 0 ; Returns a poison value. br i1 %cmp, label %end, label %end ; undefined behavior @@ -4253,7 +4244,7 @@ The '``blockaddress``' constant computes the address of the specified basic block in the specified function. -It always has an ``i8 addrspace(P)*`` type, where ``P`` is the address space +It always has an ``ptr addrspace(P)`` type, where ``P`` is the address space of the function containing ``%block`` (usually ``addrspace(0)``). Taking the address of the entry block is illegal. @@ -6212,19 +6203,19 @@ !7 = !{!3} ; These two instructions don't alias: - %0 = load float, float* %c, align 4, !alias.scope !5 - store float %0, float* %arrayidx.i, align 4, !noalias !5 + %0 = load float, ptr %c, align 4, !alias.scope !5 + store float %0, ptr %arrayidx.i, align 4, !noalias !5 ; These two instructions also don't alias (for domain !1, the set of scopes ; in the !alias.scope equals that in the !noalias list): - %2 = load float, float* %c, align 4, !alias.scope !5 - store float %2, float* %arrayidx.i2, align 4, !noalias !6 + %2 = load float, ptr %c, align 4, !alias.scope !5 + store float %2, ptr %arrayidx.i2, align 4, !noalias !6 ; These two instructions may alias (for domain !0, the set of scopes in ; the !noalias list is not a superset of, or equal to, the scopes in the ; !alias.scope list): - %2 = load float, float* %c, align 4, !alias.scope !6 - store float %0, float* %arrayidx.i, align 4, !noalias !7 + %2 = load float, ptr %c, align 4, !alias.scope !6 + store float %0, ptr %arrayidx.i, align 4, !noalias !7 '``fpmath``' Metadata ^^^^^^^^^^^^^^^^^^^^^ @@ -6275,8 +6266,8 @@ .. code-block:: llvm - %a = load i8, i8* %x, align 1, !range !0 ; Can only be 0 or 1 - %b = load i8, i8* %y, align 1, !range !1 ; Can only be 255 (-1), 0 or 1 + %a = load i8, ptr %x, align 1, !range !0 ; Can only be 0 or 1 + %b = load i8, ptr %y, align 1, !range !1 ; Can only be 255 (-1), 0 or 1 %c = call i8 @foo(), !range !2 ; Can only be 0, 1, 3, 4 or 5 %d = invoke i8 @bar() to label %cont unwind label %lpad, !range !3 ; Can only be -2, -1, 3, 4 or 5 @@ -6323,7 +6314,7 @@ %result = call i64 %binop(i64 %x, i64 %y), !callees !0 ... - !0 = !{i64 (i64, i64)* @add, i64 (i64, i64)* @sub} + !0 = !{ptr @add, ptr @sub} '``callback``' Metadata ^^^^^^^^^^^^^^^^^^^^^^^ @@ -6374,7 +6365,7 @@ .. code-block:: text - declare !callback !1 dso_local i32 @pthread_create(i64*, %union.pthread_attr_t*, i8* (i8*)*, i8*) + declare !callback !1 dso_local i32 @pthread_create(ptr, ptr, ptr, ptr) ... !2 = !{i64 2, i64 3, i1 false} @@ -6392,7 +6383,7 @@ .. code-block:: text - declare !callback !0 dso_local void @__kmpc_fork_call(%struct.ident_t*, i32, void (i32*, i32*, ...)*, ...) + declare !callback !0 dso_local void @__kmpc_fork_call(ptr, i32, ptr, ...) ... !1 = !{i64 2, i64 -1, i64 -1, i1 true} @@ -6874,7 +6865,7 @@ .. code-block:: llvm - %val = load i32, i32* %arrayidx, !llvm.access.group !0 + %val = load i32, ptr %arrayidx, !llvm.access.group !0 ... !0 = !{!1, !2} !1 = distinct !{} @@ -6935,9 +6926,9 @@ for.body: ... - %val0 = load i32, i32* %arrayidx, !llvm.access.group !1 + %val0 = load i32, ptr %arrayidx, !llvm.access.group !1 ... - store i32 %val0, i32* %arrayidx1, !llvm.access.group !1 + store i32 %val0, ptr %arrayidx1, !llvm.access.group !1 ... br i1 %exitcond, label %for.end, label %for.body, !llvm.loop !0 @@ -6952,21 +6943,21 @@ outer.for.body: ... - %val1 = load i32, i32* %arrayidx3, !llvm.access.group !4 + %val1 = load i32, ptr %arrayidx3, !llvm.access.group !4 ... br label %inner.for.body inner.for.body: ... - %val0 = load i32, i32* %arrayidx1, !llvm.access.group !3 + %val0 = load i32, ptr %arrayidx1, !llvm.access.group !3 ... - store i32 %val0, i32* %arrayidx2, !llvm.access.group !3 + store i32 %val0, ptr %arrayidx2, !llvm.access.group !3 ... br i1 %exitcond, label %inner.for.end, label %inner.for.body, !llvm.loop !1 inner.for.end: ... - store i32 %val1, i32* %arrayidx4, !llvm.access.group !4 + store i32 %val1, ptr %arrayidx4, !llvm.access.group !4 ... br i1 %exitcond, label %outer.for.end, label %outer.for.body, !llvm.loop !2 @@ -7032,26 +7023,26 @@ @unknownPtr = external global i8 ... %ptr = alloca i8 - store i8 42, i8* %ptr, !invariant.group !0 - call void @foo(i8* %ptr) + store i8 42, ptr %ptr, !invariant.group !0 + call void @foo(ptr %ptr) - %a = load i8, i8* %ptr, !invariant.group !0 ; Can assume that value under %ptr didn't change - call void @foo(i8* %ptr) + %a = load i8, ptr %ptr, !invariant.group !0 ; Can assume that value under %ptr didn't change + call void @foo(ptr %ptr) - %newPtr = call i8* @getPointer(i8* %ptr) - %c = load i8, i8* %newPtr, !invariant.group !0 ; Can't assume anything, because we only have information about %ptr + %newPtr = call ptr @getPointer(ptr %ptr) + %c = load i8, ptr %newPtr, !invariant.group !0 ; Can't assume anything, because we only have information about %ptr - %unknownValue = load i8, i8* @unknownPtr - store i8 %unknownValue, i8* %ptr, !invariant.group !0 ; Can assume that %unknownValue == 42 + %unknownValue = load i8, ptr @unknownPtr + store i8 %unknownValue, ptr %ptr, !invariant.group !0 ; Can assume that %unknownValue == 42 - call void @foo(i8* %ptr) - %newPtr2 = call i8* @llvm.launder.invariant.group(i8* %ptr) - %d = load i8, i8* %newPtr2, !invariant.group !0 ; Can't step through launder.invariant.group to get value of %ptr + call void @foo(ptr %ptr) + %newPtr2 = call ptr @llvm.launder.invariant.group.p0(ptr %ptr) + %d = load i8, ptr %newPtr2, !invariant.group !0 ; Can't step through launder.invariant.group to get value of %ptr ... - declare void @foo(i8*) - declare i8* @getPointer(i8*) - declare i8* @llvm.launder.invariant.group(i8*) + declare void @foo(ptr) + declare ptr @getPointer(ptr) + declare ptr @llvm.launder.invariant.group.p0(ptr) !0 = !{} @@ -7061,9 +7052,9 @@ .. code-block:: llvm - %v = load i8, i8* %x, !invariant.group !0 + %v = load i8, ptr %x, !invariant.group !0 ; if %x mustalias %y then we can replace the above instruction with - %v = load i8, i8* %y + %v = load i8, ptr %y Note that this is an experimental feature, which means that its semantics might change in the future. @@ -7109,7 +7100,7 @@ $a = comdat any @a = global i32 1, comdat $a @b = internal global i32 2, comdat $a, section "abc", !associated !0 - !0 = !{i32* @a} + !0 = !{ptr @a} '``prof``' Metadata @@ -7192,7 +7183,7 @@ .. code-block:: text - %a.addr = alloca float*, align 8, !annotation !0 + %a.addr = alloca ptr, align 8, !annotation !0 !0 = !{!"auto-init"} '``func_sanitize``' Metadata @@ -7209,11 +7200,11 @@ .. code-block:: text - @__llvm_rtti_proxy = private unnamed_addr constant i8* bitcast ({ i8*, i8* }* @_ZTIFvvE to i8*) + @__llvm_rtti_proxy = private unnamed_addr constant ptr @_ZTIFvvE define void @_Z3funv() !func_sanitize !0 { return void } - !0 = !{i32 846595819, i8** @__llvm_rtti_proxy} + !0 = !{i32 846595819, ptr @__llvm_rtti_proxy} Module Flags Metadata ===================== @@ -7726,13 +7717,13 @@ .. code-block:: text - define i64 @foo(i64* %0, i32* %1, i8* %2, i8 %3) { - store i32* %1, i32** @x - %5 = getelementptr inbounds i8, i8* %2, i64 5 - %6 = load i8, i8* %5 - %7 = getelementptr inbounds i8, i8* %2, i8 %3 - tail call void @bar(i8 %3, i8* %7) - %8 = load i64, i64* %0 + define i64 @foo(ptr %0, ptr %1, ptr %2, i8 %3) { + store ptr %1, ptr @x + %5 = getelementptr inbounds i8, ptr %2, i64 5 + %6 = load i8, ptr %5 + %7 = getelementptr inbounds i8, ptr %2, i8 %3 + tail call void @bar(i8 %3, ptr %7) + %8 = load i64, ptr %0 ret i64 %8 } @@ -7924,9 +7915,9 @@ @X = global i8 4 @Y = global i32 123 - @llvm.used = appending global [2 x i8*] [ - i8* @X, - i8* bitcast (i32* @Y to i8*) + @llvm.used = appending global [2 x ptr] [ + ptr @X, + ptr @Y ], section "llvm.metadata" If a symbol appears in the ``@llvm.used`` list, then the compiler, assembler, @@ -7962,8 +7953,8 @@ .. code-block:: llvm - %0 = type { i32, void ()*, i8* } - @llvm.global_ctors = appending global [1 x %0] [%0 { i32 65535, void ()* @ctor, i8* @data }] + %0 = type { i32, ptr, ptr } + @llvm.global_ctors = appending global [1 x %0] [%0 { i32 65535, ptr @ctor, ptr @data }] The ``@llvm.global_ctors`` array contains a list of constructor functions, priorities, and an associated global or function. @@ -7983,8 +7974,8 @@ .. code-block:: llvm - %0 = type { i32, void ()*, i8* } - @llvm.global_dtors = appending global [1 x %0] [%0 { i32 65535, void ()* @dtor, i8* @data }] + %0 = type { i32, ptr, ptr } + @llvm.global_dtors = appending global [1 x %0] [%0 { i32 65535, ptr @dtor, ptr @data }] The ``@llvm.global_dtors`` array contains a list of destructor functions, priorities, and an associated global or function. @@ -8209,7 +8200,7 @@ :: - indirectbr *
, [ label , label , ... ] + indirectbr ptr
, [ label , label , ... ] Overview: """"""""" @@ -8250,7 +8241,7 @@ .. code-block:: llvm - indirectbr i8* %Addr, [ label %bb1, label %bb2, label %bb3 ] + indirectbr ptr %Addr, [ label %bb1, label %bb2, label %bb3 ] .. _i_invoke: @@ -8477,7 +8468,7 @@ .. code-block:: llvm - resume { i8*, i32 } %exn + resume { ptr, i32 } %exn .. _i_catchswitch: @@ -9953,10 +9944,10 @@ .. code-block:: llvm - %ptr = alloca i32 ; yields i32*:ptr - %ptr = alloca i32, i32 4 ; yields i32*:ptr - %ptr = alloca i32, i32 4, align 1024 ; yields i32*:ptr - %ptr = alloca i32, align 1024 ; yields i32*:ptr + %ptr = alloca i32 ; yields ptr + %ptr = alloca i32, i32 4 ; yields ptr + %ptr = alloca i32, i32 4, align 1024 ; yields ptr + %ptr = alloca i32, align 1024 ; yields ptr .. _i_load: @@ -9968,8 +9959,8 @@ :: - = load [volatile] , * [, align ][, !nontemporal !][, !invariant.load !][, !invariant.group !][, !nonnull !][, !dereferenceable !][, !dereferenceable_or_null !][, !align !][, !noundef !] - = load atomic [volatile] , * [syncscope("")] , align [, !invariant.group !] + = load [volatile] , ptr [, align ][, !nontemporal !][, !invariant.load !][, !invariant.group !][, !nonnull !][, !dereferenceable !][, !dereferenceable_or_null !][, !align !][, !noundef !] + = load atomic [volatile] , ptr [syncscope("")] , align [, !invariant.group !] ! = !{ i32 1 } ! = !{} ! = !{ i64 } @@ -10089,9 +10080,9 @@ .. code-block:: llvm - %ptr = alloca i32 ; yields i32*:ptr - store i32 3, i32* %ptr ; yields void - %val = load i32, i32* %ptr ; yields i32:val = i32 3 + %ptr = alloca i32 ; yields ptr + store i32 3, ptr %ptr ; yields void + %val = load i32, ptr %ptr ; yields i32:val = i32 3 .. _i_store: @@ -10103,8 +10094,8 @@ :: - store [volatile] , * [, align ][, !nontemporal !][, !invariant.group !] ; yields void - store atomic [volatile] , * [syncscope("")] , align [, !invariant.group !] ; yields void + store [volatile] , ptr [, align ][, !nontemporal !][, !invariant.group !] ; yields void + store atomic [volatile] , ptr [syncscope("")] , align [, !invariant.group !] ; yields void ! = !{ i32 1 } ! = !{} @@ -10183,9 +10174,9 @@ .. code-block:: llvm - %ptr = alloca i32 ; yields i32*:ptr - store i32 3, i32* %ptr ; yields void - %val = load i32, i32* %ptr ; yields i32:val = i32 3 + %ptr = alloca i32 ; yields ptr + store i32 3, ptr %ptr ; yields void + %val = load i32, ptr %ptr ; yields i32:val = i32 3 .. _i_fence: @@ -10253,7 +10244,7 @@ :: - cmpxchg [weak] [volatile] * , , [syncscope("")] [, align ] ; yields { ty, i1 } + cmpxchg [weak] [volatile] ptr , , [syncscope("")] [, align ] ; yields { ty, i1 } Overview: """"""""" @@ -10319,13 +10310,13 @@ .. code-block:: llvm entry: - %orig = load atomic i32, i32* %ptr unordered, align 4 ; yields i32 + %orig = load atomic i32, ptr %ptr unordered, align 4 ; yields i32 br label %loop loop: %cmp = phi i32 [ %orig, %entry ], [%value_loaded, %loop] %squared = mul i32 %cmp, %cmp - %val_success = cmpxchg i32* %ptr, i32 %cmp, i32 %squared acq_rel monotonic ; yields { i32, i1 } + %val_success = cmpxchg ptr %ptr, i32 %cmp, i32 %squared acq_rel monotonic ; yields { i32, i1 } %value_loaded = extractvalue { i32, i1 } %val_success, 0 %success = extractvalue { i32, i1 } %val_success, 1 br i1 %success, label %done, label %loop @@ -10343,7 +10334,7 @@ :: - atomicrmw [volatile] * , [syncscope("")] [, align ] ; yields ty + atomicrmw [volatile] ptr , [syncscope("")] [, align ] ; yields ty Overview: """"""""" @@ -10422,7 +10413,7 @@ .. code-block:: llvm - %old = atomicrmw add i32* %ptr, i32 1 acquire ; yields i32 + %old = atomicrmw add ptr %ptr, i32 1 acquire ; yields i32 .. _i_getelementptr: @@ -10434,9 +10425,9 @@ :: - = getelementptr , * {, [inrange] }* - = getelementptr inbounds , * {, [inrange] }* - = getelementptr , , [inrange] + = getelementptr , ptr {, [inrange] }* + = getelementptr inbounds ptr {, [inrange] }* + = getelementptr , , [inrange] Overview: """"""""" @@ -10497,10 +10488,10 @@ %struct.RT = type { i8, [10 x [20 x i32]], i8 } %struct.ST = type { i32, double, %struct.RT } - define i32* @foo(%struct.ST* %s) nounwind uwtable readnone optsize ssp { + define ptr @foo(ptr %s) nounwind uwtable readnone optsize ssp { entry: - %arrayidx = getelementptr inbounds %struct.ST, %struct.ST* %s, i64 1, i32 2, i32 1, i64 5, i64 13 - ret i32* %arrayidx + %arrayidx = getelementptr inbounds %struct.ST, ptr %s, i64 1, i32 2, i32 1, i64 5, i64 13 + ret ptr %arrayidx } Semantics: @@ -10515,7 +10506,7 @@ structure, yielding a '``[10 x [20 x i32]]``' type, an array. The two dimensions of the array are subscripted into, yielding an '``i32``' type. The '``getelementptr``' instruction returns a pointer to this -element, thus computing a value of '``i32*``' type. +element. Note that it is perfectly legal to index partially through a structure, returning a pointer to an inner element. Because of this, the LLVM code @@ -10523,13 +10514,13 @@ .. code-block:: llvm - define i32* @foo(%struct.ST* %s) { - %t1 = getelementptr %struct.ST, %struct.ST* %s, i32 1 ; yields %struct.ST*:%t1 - %t2 = getelementptr %struct.ST, %struct.ST* %t1, i32 0, i32 2 ; yields %struct.RT*:%t2 - %t3 = getelementptr %struct.RT, %struct.RT* %t2, i32 0, i32 1 ; yields [10 x [20 x i32]]*:%t3 - %t4 = getelementptr [10 x [20 x i32]], [10 x [20 x i32]]* %t3, i32 0, i32 5 ; yields [20 x i32]*:%t4 - %t5 = getelementptr [20 x i32], [20 x i32]* %t4, i32 0, i32 13 ; yields i32*:%t5 - ret i32* %t5 + define ptr @foo(ptr %s) { + %t1 = getelementptr %struct.ST, ptr %s, i32 1 + %t2 = getelementptr %struct.ST, ptr %t1, i32 0, i32 2 + %t3 = getelementptr %struct.RT, ptr %t2, i32 0, i32 1 + %t4 = getelementptr [10 x [20 x i32]], ptr %t3, i32 0, i32 5 + %t5 = getelementptr [20 x i32], ptr %t4, i32 0, i32 13 + ret ptr %t5 } If the ``inbounds`` keyword is present, the result value of the @@ -10588,14 +10579,10 @@ .. code-block:: llvm - ; yields [12 x i8]*:aptr - %aptr = getelementptr {i32, [12 x i8]}, {i32, [12 x i8]}* %saptr, i64 0, i32 1 - ; yields i8*:vptr - %vptr = getelementptr {i32, <2 x i8>}, {i32, <2 x i8>}* %svptr, i64 0, i32 1, i32 1 - ; yields i8*:eptr - %eptr = getelementptr [12 x i8], [12 x i8]* %aptr, i64 0, i32 1 - ; yields i32*:iptr - %iptr = getelementptr [10 x i32], [10 x i32]* @arr, i16 0, i16 0 + %aptr = getelementptr {i32, [12 x i8]}, ptr %saptr, i64 0, i32 1 + %vptr = getelementptr {i32, <2 x i8>}, ptr %svptr, i64 0, i32 1, i32 1 + %eptr = getelementptr [12 x i8], ptr %aptr, i64 0, i32 1 + %iptr = getelementptr [10 x i32], ptr @arr, i16 0, i16 0 Vector of pointers: """"""""""""""""""" @@ -10613,25 +10600,25 @@ ; Add the same scalar offset to each pointer of a vector: ; A[i] = ptrs[i] + offset*sizeof(i8) - %A = getelementptr i8, <4 x i8*> %ptrs, i64 %offset + %A = getelementptr i8, <4 x ptr> %ptrs, i64 %offset ; Add distinct offsets to the same pointer: ; A[i] = ptr + offsets[i]*sizeof(i8) - %A = getelementptr i8, i8* %ptr, <4 x i64> %offsets + %A = getelementptr i8, ptr %ptr, <4 x i64> %offsets - ; In all cases described above the type of the result is <4 x i8*> + ; In all cases described above the type of the result is <4 x ptr> The two following instructions are equivalent: .. code-block:: llvm - getelementptr %struct.ST, <4 x %struct.ST*> %s, <4 x i64> %ind1, + getelementptr %struct.ST, <4 x ptr> %s, <4 x i64> %ind1, <4 x i32> , <4 x i32> , <4 x i32> %ind4, <4 x i64> - getelementptr %struct.ST, <4 x %struct.ST*> %s, <4 x i64> %ind1, + getelementptr %struct.ST, <4 x ptr> %s, <4 x i64> %ind1, i32 2, i32 1, <4 x i32> %ind4, i64 13 Let's look at the C code, where the vector version of ``getelementptr`` @@ -10648,9 +10635,9 @@ .. code-block:: llvm ; get pointers for 8 elements from array B - %ptrs = getelementptr double, double* %B, <8 x i32> %C + %ptrs = getelementptr double, ptr %B, <8 x i32> %C ; load 8 elements from array B into A - %A = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x double*> %ptrs, + %A = call <8 x double> @llvm.masked.gather.v8f64.v8p0f64(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x double> %passthru) Conversion Operations @@ -11078,9 +11065,9 @@ .. code-block:: llvm - %X = ptrtoint i32* %P to i8 ; yields truncation on 32-bit architecture - %Y = ptrtoint i32* %P to i64 ; yields zero extension on 32-bit architecture - %Z = ptrtoint <4 x i32*> %P to <4 x i64>; yields vector zero extension for a vector of addresses on 32-bit architecture + %X = ptrtoint ptr %P to i8 ; yields truncation on 32-bit architecture + %Y = ptrtoint ptr %P to i64 ; yields zero extension on 32-bit architecture + %Z = ptrtoint <4 x ptr> %P to <4 x i64>; yields vector zero extension for a vector of addresses on 32-bit architecture .. _i_inttoptr: @@ -11132,10 +11119,10 @@ .. code-block:: llvm - %X = inttoptr i32 255 to i32* ; yields zero extension on 64-bit architecture - %Y = inttoptr i32 255 to i32* ; yields no-op on 32-bit architecture - %Z = inttoptr i64 0 to i32* ; yields truncation on 32-bit architecture - %Z = inttoptr <4 x i32> %G to <4 x i8*>; yields truncation of vector G to four pointers + %X = inttoptr i32 255 to ptr ; yields zero extension on 64-bit architecture + %Y = inttoptr i32 255 to ptr ; yields no-op on 32-bit architecture + %Z = inttoptr i64 0 to ptr ; yields truncation on 32-bit architecture + %Z = inttoptr <4 x i32> %G to <4 x ptr>; yields truncation of vector G to four pointers .. _i_bitcast: @@ -11190,7 +11177,7 @@ .. code-block:: text %X = bitcast i8 255 to i8 ; yields i8 :-1 - %Y = bitcast i32* %x to i16* ; yields i16*:%x + %Y = bitcast i32* %x to i16* ; yields i16*:%x %Z = bitcast <2 x i32> %V to i64; ; yields i64: %V (depends on endianess) %Z = bitcast <2 x i32*> %V to <2 x i64*> ; yields <2 x i64*> @@ -11235,9 +11222,9 @@ .. code-block:: llvm - %X = addrspacecast i32* %x to i32 addrspace(1)* ; yields i32 addrspace(1)*:%x - %Y = addrspacecast i32 addrspace(1)* %y to i64 addrspace(2)* ; yields i64 addrspace(2)*:%y - %Z = addrspacecast <4 x i32*> %z to <4 x float addrspace(3)*> ; yields <4 x float addrspace(3)*>:%z + %X = addrspacecast ptr %x to ptr addrspace(1) + %Y = addrspacecast ptr addrspace(1) %y to ptr addrspace(2) + %Z = addrspacecast <4 x ptr> %z to <4 x ptr addrspace(3)> .. _otherops: @@ -11333,7 +11320,7 @@ .. code-block:: text = icmp eq i32 4, 5 ; yields: result=false - = icmp ne float* %X, %X ; yields: result=false + = icmp ne ptr %X, %X ; yields: result=false = icmp ult i16 4, 5 ; yields: result=true = icmp sgt i16 4, 5 ; yields: result=false = icmp ule i16 -4, 5 ; yields: result=false @@ -11769,7 +11756,7 @@ .. code-block:: llvm %retval = call i32 @test(i32 %argc) - call i32 (i8*, ...)* @printf(i8* %msg, i32 12, i8 42) ; yields i32 + call i32 (ptr, ...) @printf(ptr %msg, i32 12, i8 42) ; yields i32 %X = tail call i32 @foo() ; yields i32 %Y = tail call fastcc i32 @foo() ; yields i32 call void %foo(i8 signext 97) @@ -11874,7 +11861,7 @@ contains the global variable representing the "type" that may be caught or filtered respectively. Unlike the ``catch`` clause, the ``filter`` clause takes an array constant as its argument. Use -"``[0 x i8**] undef``" for a filter which cannot throw. The +"``[0 x ptr] undef``" for a filter which cannot throw. The '``landingpad``' instruction must contain *at least* one ``clause`` or the ``cleanup`` flag. @@ -11912,15 +11899,15 @@ .. code-block:: llvm ;; A landing pad which can catch an integer. - %res = landingpad { i8*, i32 } - catch i8** @_ZTIi + %res = landingpad { ptr, i32 } + catch ptr @_ZTIi ;; A landing pad that is a cleanup. - %res = landingpad { i8*, i32 } + %res = landingpad { ptr, i32 } cleanup ;; A landing pad which can catch an integer and can only throw a double. - %res = landingpad { i8*, i32 } - catch i8** @_ZTIi - filter [1 x i8**] [i8** @_ZTId] + %res = landingpad { ptr, i32 } + catch ptr @_ZTIi + filter [1 x ptr] [ptr @_ZTId] .. _i_catchpad: @@ -11988,7 +11975,7 @@ %cs = catchswitch within none [label %handler0] unwind to caller ;; A catch block which can catch an integer. handler0: - %tok = catchpad within %cs [i8** @_ZTIi] + %tok = catchpad within %cs [ptr @_ZTIi] .. _i_cleanuppad: @@ -12144,35 +12131,33 @@ .. code-block:: llvm ; This struct is different for every platform. For most platforms, - ; it is merely an i8*. - %struct.va_list = type { i8* } + ; it is merely a ptr. + %struct.va_list = type { ptr } ; For Unix x86_64 platforms, va_list is the following struct: - ; %struct.va_list = type { i32, i32, i8*, i8* } + ; %struct.va_list = type { i32, i32, ptr, ptr } define i32 @test(i32 %X, ...) { ; Initialize variable argument processing %ap = alloca %struct.va_list - %ap2 = bitcast %struct.va_list* %ap to i8* - call void @llvm.va_start(i8* %ap2) + call void @llvm.va_start(ptr %ap) ; Read a single integer argument - %tmp = va_arg i8* %ap2, i32 + %tmp = va_arg ptr %ap, i32 ; Demonstrate usage of llvm.va_copy and llvm.va_end - %aq = alloca i8* - %aq2 = bitcast i8** %aq to i8* - call void @llvm.va_copy(i8* %aq2, i8* %ap2) - call void @llvm.va_end(i8* %aq2) + %aq = alloca ptr + call void @llvm.va_copy(ptr %aq, ptr %ap) + call void @llvm.va_end(ptr %aq) ; Stop processing of arguments. - call void @llvm.va_end(i8* %ap2) + call void @llvm.va_end(ptr %ap) ret i32 %tmp } - declare void @llvm.va_start(i8*) - declare void @llvm.va_copy(i8*, i8*) - declare void @llvm.va_end(i8*) + declare void @llvm.va_start(ptr) + declare void @llvm.va_copy(ptr, ptr) + declare void @llvm.va_end(ptr) .. _int_va_start: @@ -12184,12 +12169,12 @@ :: - declare void @llvm.va_start(i8* ) + declare void @llvm.va_start(ptr ) Overview: """"""""" -The '``llvm.va_start``' intrinsic initializes ``*`` for +The '``llvm.va_start``' intrinsic initializes ```` for subsequent use by ``va_arg``. Arguments: @@ -12216,12 +12201,12 @@ :: - declare void @llvm.va_end(i8* ) + declare void @llvm.va_end(ptr ) Overview: """"""""" -The '``llvm.va_end``' intrinsic destroys ``*``, which has been +The '``llvm.va_end``' intrinsic destroys ````, which has been initialized previously with ``llvm.va_start`` or ``llvm.va_copy``. Arguments: @@ -12249,7 +12234,7 @@ :: - declare void @llvm.va_copy(i8* , i8* ) + declare void @llvm.va_copy(ptr , ptr ) Overview: """"""""" @@ -12305,7 +12290,7 @@ :: - declare void @llvm.gcroot(i8** %ptrloc, i8* %metadata) + declare void @llvm.gcroot(ptr %ptrloc, ptr %metadata) Overview: """"""""" @@ -12340,7 +12325,7 @@ :: - declare i8* @llvm.gcread(i8* %ObjPtr, i8** %Ptr) + declare ptr @llvm.gcread(ptr %ObjPtr, ptr %Ptr) Overview: """"""""" @@ -12376,7 +12361,7 @@ :: - declare void @llvm.gcwrite(i8* %P1, i8* %Obj, i8** %P2) + declare void @llvm.gcwrite(ptr %P1, ptr %Obj, ptr %P2) Overview: """"""""" @@ -12415,7 +12400,7 @@ declare token @llvm.experimental.gc.statepoint(i64 , i32 , - func_type* elementtype(func_type) , + ptr elementtype(func_type) , i64 <#call args>, i64 , ... (call parameters), i64 0, i64 0) @@ -12509,7 +12494,7 @@ :: - declare type* + declare type @llvm.experimental.gc.result(token %statepoint_token) Overview: @@ -12695,7 +12680,7 @@ :: - declare i8* @llvm.returnaddress(i32 ) + declare ptr @llvm.returnaddress(i32 ) Overview: """"""""" @@ -12733,7 +12718,7 @@ :: - declare i8* @llvm.addressofreturnaddress() + declare ptr @llvm.addressofreturnaddress() Overview: """"""""" @@ -12759,7 +12744,7 @@ :: - declare i8* @llvm.sponentry() + declare ptr @llvm.sponentry() Overview: """"""""" @@ -12780,7 +12765,7 @@ :: - declare i8* @llvm.frameaddress(i32 ) + declare ptr @llvm.frameaddress(i32 ) Overview: """"""""" @@ -12817,7 +12802,7 @@ :: - declare i8** @llvm.swift.async.context.addr() + declare ptr @llvm.swift.async.context.addr() Overview: """"""""" @@ -12841,7 +12826,7 @@ :: declare void @llvm.localescape(...) - declare i8* @llvm.localrecover(i8* %func, i8* %fp, i32 %idx) + declare ptr @llvm.localrecover(ptr %func, ptr %fp, i32 %idx) Overview: """"""""" @@ -13006,7 +12991,7 @@ :: - declare i8* @llvm.stacksave() + declare ptr @llvm.stacksave() Overview: """"""""" @@ -13038,7 +13023,7 @@ :: - declare void @llvm.stackrestore(i8* %ptr) + declare void @llvm.stackrestore(ptr %ptr) Overview: """"""""" @@ -13104,7 +13089,7 @@ :: - declare void @llvm.prefetch(i8*
, i32 , i32 , i32 ) + declare void @llvm.prefetch(ptr
, i32 , i32 , i32 ) Overview: """"""""" @@ -13205,7 +13190,7 @@ :: - declare void @llvm.clear_cache(i8*, i8*) + declare void @llvm.clear_cache(ptr, ptr) Overview: """"""""" @@ -13238,7 +13223,7 @@ :: - declare void @llvm.instrprof.increment(i8* , i64 , + declare void @llvm.instrprof.increment(ptr , i64 , i32 , i32 ) Overview: @@ -13282,7 +13267,7 @@ :: - declare void @llvm.instrprof.increment.step(i8* , i64 , + declare void @llvm.instrprof.increment.step(ptr , i64 , i32 , i32 , i64 ) @@ -13312,7 +13297,7 @@ :: - declare void @llvm.instrprof.cover(i8* , i64 , + declare void @llvm.instrprof.cover(ptr , i64 , i32 , i32 ) Overview: @@ -13340,7 +13325,7 @@ :: - declare void @llvm.instrprof.value.profile(i8* , i64 , + declare void @llvm.instrprof.value.profile(ptr , i64 , i64 , i32 , i32 ) @@ -13389,7 +13374,7 @@ :: - declare i8* @llvm.thread.pointer() + declare ptr @llvm.thread.pointer() Overview: """"""""" @@ -13463,7 +13448,7 @@ :: - declare i8* @llvm.call.preallocated.arg(token %setup_token, i32 %arg_index) + declare ptr @llvm.call.preallocated.arg(token %setup_token, i32 %arg_index) Overview: """"""""" @@ -13502,7 +13487,7 @@ :: - declare i8* @llvm.call.preallocated.teardown(token %setup_token) + declare ptr @llvm.call.preallocated.teardown(token %setup_token) Overview: """"""""" @@ -13538,11 +13523,10 @@ .. code-block:: llvm %cs = call token @llvm.call.preallocated.setup(i32 1) - %x = call i8* @llvm.call.preallocated.arg(token %cs, i32 0) preallocated(i32) - %y = bitcast i8* %x to i32* - invoke void @constructor(i32* %y) to label %conta unwind label %contb + %x = call ptr @llvm.call.preallocated.arg(token %cs, i32 0) preallocated(i32) + invoke void @constructor(ptr %x) to label %conta unwind label %contb conta: - call void @foo1(i32* preallocated(i32) %y) ["preallocated"(token %cs)] + call void @foo1(ptr preallocated(i32) %x) ["preallocated"(token %cs)] ret void contb: %s = catchswitch within none [label %catch] unwind to caller @@ -13731,10 +13715,10 @@ :: - declare void @llvm.memcpy.p0i8.p0i8.i32(i8* , i8* , - i32 , i1 ) - declare void @llvm.memcpy.p0i8.p0i8.i64(i8* , i8* , - i64 , i1 ) + declare void @llvm.memcpy.p0.p0.i32(ptr , ptr , + i32 , i1 ) + declare void @llvm.memcpy.p0.p0.i64(ptr , ptr , + i64 , i1 ) Overview: """"""""" @@ -13790,10 +13774,10 @@ :: - declare void @llvm.memcpy.inline.p0i8.p0i8.i32(i8* , i8* , - i32 , i1 ) - declare void @llvm.memcpy.inline.p0i8.p0i8.i64(i8* , i8* , - i64 , i1 ) + declare void @llvm.memcpy.inline.p0.p0.i32(ptr , ptr , + i32 , i1 ) + declare void @llvm.memcpy.inline.p0.p0.i64(ptr , ptr , + i64 , i1 ) Overview: """"""""" @@ -13847,10 +13831,10 @@ :: - declare void @llvm.memmove.p0i8.p0i8.i32(i8* , i8* , - i32 , i1 ) - declare void @llvm.memmove.p0i8.p0i8.i64(i8* , i8* , - i64 , i1 ) + declare void @llvm.memmove.p0.p0.i32(ptr , ptr , + i32 , i1 ) + declare void @llvm.memmove.p0.p0.i64(ptr , ptr , + i64 , i1 ) Overview: """"""""" @@ -13908,10 +13892,10 @@ :: - declare void @llvm.memset.p0i8.i32(i8* , i8 , - i32 , i1 ) - declare void @llvm.memset.p0i8.i64(i8* , i8 , - i64 , i1 ) + declare void @llvm.memset.p0.i32(ptr , i8 , + i32 , i1 ) + declare void @llvm.memset.p0.i64(ptr , i8 , + i64 , i1 ) Overview: """"""""" @@ -13966,12 +13950,10 @@ :: - declare void @llvm.memset.inline.p0i8.p0i8.i32(i8* , i8 , - i32 , - i1 ) - declare void @llvm.memset.inline.p0i8.p0i8.i64(i8* , i8 , - i64 , - i1 ) + declare void @llvm.memset.inline.p0.p0i8.i32(ptr , i8 , + i32 , i1 ) + declare void @llvm.memset.inline.p0.p0.i64(ptr , i8 , + i64 , i1 ) Overview: """"""""" @@ -17771,7 +17753,7 @@ .. code-block:: llvm - %a = load i16, i16* @x, align 2 + %a = load i16, ptr @x, align 2 %res = call float @llvm.convert.from.fp16(i16 %a) Saturating floating-point to integer conversions @@ -17926,20 +17908,18 @@ argument list. This is used to implement the GCC nested function address extension. -For example, if the function is ``i32 f(i8* nest %c, i32 %x, i32 %y)`` -then the resulting function pointer has signature ``i32 (i32, i32)*``. +For example, if the function is ``i32 f(ptr nest %c, i32 %x, i32 %y)`` +then the resulting function pointer has signature ``i32 (i32, i32)``. It can be created as follows: .. code-block:: llvm %tramp = alloca [10 x i8], align 4 ; size and alignment only correct for X86 - %tramp1 = getelementptr [10 x i8], [10 x i8]* %tramp, i32 0, i32 0 - call i8* @llvm.init.trampoline(i8* %tramp1, i8* bitcast (i32 (i8*, i32, i32)* @f to i8*), i8* %nval) - %p = call i8* @llvm.adjust.trampoline(i8* %tramp1) - %fp = bitcast i8* %p to i32 (i32, i32)* + call ptr @llvm.init.trampoline(ptr %tramp, ptr @f, ptr %nval) + %fp = call ptr @llvm.adjust.trampoline(ptr %tramp) The call ``%val = call i32 %fp(i32 %x, i32 %y)`` is then equivalent to -``%val = call i32 %f(i8* %nval, i32 %x, i32 %y)``. +``%val = call i32 %f(ptr %nval, i32 %x, i32 %y)``. .. _int_it: @@ -17951,7 +17931,7 @@ :: - declare void @llvm.init.trampoline(i8* , i8* , i8* ) + declare void @llvm.init.trampoline(ptr , ptr , ptr ) Overview: """"""""" @@ -17968,8 +17948,7 @@ intrinsic. Note that the size and the alignment are target-specific - LLVM currently provides no portable way of determining them, so a front-end that generates this intrinsic needs to have some -target-specific knowledge. The ``func`` argument must hold a function -bitcast to an ``i8*``. +target-specific knowledge. The ``func`` argument must hold a function. Semantics: """""""""" @@ -17997,7 +17976,7 @@ :: - declare i8* @llvm.adjust.trampoline(i8* ) + declare ptr @llvm.adjust.trampoline(ptr ) Overview: """"""""" @@ -20068,10 +20047,10 @@ :: - declare <4 x float> @llvm.vp.load.v4f32.p0v4f32(<4 x float>* %ptr, <4 x i1> %mask, i32 %evl) - declare @llvm.vp.load.nxv2i16.p0nxv2i16(* %ptr, %mask, i32 %evl) - declare <8 x float> @llvm.vp.load.v8f32.p1v8f32(<8 x float> addrspace(1)* %ptr, <8 x i1> %mask, i32 %evl) - declare @llvm.vp.load.nxv1i64.p6nxv1i64( addrspace(6)* %ptr, %mask, i32 %evl) + declare <4 x float> @llvm.vp.load.v4f32.p0(ptr %ptr, <4 x i1> %mask, i32 %evl) + declare @llvm.vp.load.nxv2i16.p0(ptr %ptr, %mask, i32 %evl) + declare <8 x float> @llvm.vp.load.v8f32.p1(ptr addrspace(1) %ptr, <8 x i1> %mask, i32 %evl) + declare @llvm.vp.load.nxv1i64.p6(ptr addrspace(6) %ptr, %mask, i32 %evl) Overview: """"""""" @@ -20107,10 +20086,10 @@ .. code-block:: text - %r = call <8 x i8> @llvm.vp.load.v8i8.p0v8i8(<8 x i8>* align 2 %ptr, <8 x i1> %mask, i32 %evl) + %r = call <8 x i8> @llvm.vp.load.v8i8.p0(ptr align 2 %ptr, <8 x i1> %mask, i32 %evl) ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r - %also.r = call <8 x i8> @llvm.masked.load.v8i8.p0v8i8(<8 x i8>* %ptr, i32 2, <8 x i1> %mask, <8 x i8> undef) + %also.r = call <8 x i8> @llvm.masked.load.v8i8.p0(ptr %ptr, i32 2, <8 x i1> %mask, <8 x i8> undef) .. _int_vp_store: @@ -20124,10 +20103,10 @@ :: - declare void @llvm.vp.store.v4f32.p0v4f32(<4 x float> %val, <4 x float>* %ptr, <4 x i1> %mask, i32 %evl) - declare void @llvm.vp.store.nxv2i16.p0nxv2i16( %val, * %ptr, %mask, i32 %evl) - declare void @llvm.vp.store.v8f32.p1v8f32(<8 x float> %val, <8 x float> addrspace(1)* %ptr, <8 x i1> %mask, i32 %evl) - declare void @llvm.vp.store.nxv1i64.p6nxv1i64( %val, addrspace(6)* %ptr, %mask, i32 %evl) + declare void @llvm.vp.store.v4f32.p0(<4 x float> %val, ptr %ptr, <4 x i1> %mask, i32 %evl) + declare void @llvm.vp.store.nxv2i16.p0( %val, ptr %ptr, %mask, i32 %evl) + declare void @llvm.vp.store.v8f32.p1(<8 x float> %val, ptr addrspace(1) %ptr, <8 x i1> %mask, i32 %evl) + declare void @llvm.vp.store.nxv1i64.p6( %val, ptr addrspace(6) %ptr, %mask, i32 %evl) Overview: """"""""" @@ -20164,10 +20143,10 @@ .. code-block:: text - call void @llvm.vp.store.v8i8.p0v8i8(<8 x i8> %val, <8 x i8>* align 4 %ptr, <8 x i1> %mask, i32 %evl) + call void @llvm.vp.store.v8i8.p0(<8 x i8> %val, ptr align 4 %ptr, <8 x i1> %mask, i32 %evl) ;; For all lanes below %evl, the call above is lane-wise equivalent to the call below. - call void @llvm.masked.store.v8i8.p0v8i8(<8 x i8> %val, <8 x i8>* %ptr, i32 4, <8 x i1> %mask) + call void @llvm.masked.store.v8i8.p0(<8 x i8> %val, ptr %ptr, i32 4, <8 x i1> %mask) .. _int_experimental_vp_strided_load: @@ -20181,8 +20160,8 @@ :: - declare <4 x float> @llvm.experimental.vp.strided.load.v4f32.i64(float* %ptr, i64 %stride, <4 x i1> %mask, i32 %evl) - declare @llvm.experimental.vp.strided.load.nxv2i16.i64(i16* %ptr, i64 %stride, %mask, i32 %evl) + declare <4 x float> @llvm.experimental.vp.strided.load.v4f32.i64(ptr %ptr, i64 %stride, <4 x i1> %mask, i32 %evl) + declare @llvm.experimental.vp.strided.load.nxv2i16.i64(ptr %ptr, i64 %stride, %mask, i32 %evl) Overview: """"""""" @@ -20240,8 +20219,8 @@ :: - declare void @llvm.experimental.vp.strided.store.v4f32.i64(<4 x float> %val, float* %ptr, i64 %stride, <4 x i1> %mask, i32 %evl) - declare void @llvm.experimental.vp.strided.store.nxv2i16.i64( %val, i16* %ptr, i64 %stride, %mask, i32 %evl) + declare void @llvm.experimental.vp.strided.store.v4f32.i64(<4 x float> %val, ptr %ptr, i64 %stride, <4 x i1> %mask, i32 %evl) + declare void @llvm.experimental.vp.strided.store.nxv2i16.i64( %val, ptr %ptr, i64 %stride, %mask, i32 %evl) Overview: """"""""" @@ -20301,10 +20280,10 @@ :: - declare <4 x double> @llvm.vp.gather.v4f64.v4p0f64(<4 x double*> %ptrs, <4 x i1> %mask, i32 %evl) - declare @llvm.vp.gather.nxv2i8.nxv2p0i8( %ptrs, %mask, i32 %evl) - declare <2 x float> @llvm.vp.gather.v2f32.v2p2f32(<2 x float addrspace(2)*> %ptrs, <2 x i1> %mask, i32 %evl) - declare @llvm.vp.gather.nxv4i32.nxv4p4i32( %ptrs, %mask, i32 %evl) + declare <4 x double> @llvm.vp.gather.v4f64.v4p0(<4 x ptr> %ptrs, <4 x i1> %mask, i32 %evl) + declare @llvm.vp.gather.nxv2i8.nxv2p0( %ptrs, %mask, i32 %evl) + declare <2 x float> @llvm.vp.gather.v2f32.v2p2(<2 x ptr addrspace(2)> %ptrs, <2 x i1> %mask, i32 %evl) + declare @llvm.vp.gather.nxv4i32.nxv4p4( %ptrs, %mask, i32 %evl) Overview: """"""""" @@ -20341,10 +20320,10 @@ .. code-block:: text - %r = call <8 x i8> @llvm.vp.gather.v8i8.v8p0i8(<8 x i8*> align 8 %ptrs, <8 x i1> %mask, i32 %evl) + %r = call <8 x i8> @llvm.vp.gather.v8i8.v8p0(<8 x ptr> align 8 %ptrs, <8 x i1> %mask, i32 %evl) ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r - %also.r = call <8 x i8> @llvm.masked.gather.v8i8.v8p0i8(<8 x i8*> %ptrs, i32 8, <8 x i1> %mask, <8 x i8> undef) + %also.r = call <8 x i8> @llvm.masked.gather.v8i8.v8p0(<8 x ptr> %ptrs, i32 8, <8 x i1> %mask, <8 x i8> undef) .. _int_vp_scatter: @@ -20358,10 +20337,10 @@ :: - declare void @llvm.vp.scatter.v4f64.v4p0f64(<4 x double> %val, <4 x double*> %ptrs, <4 x i1> %mask, i32 %evl) - declare void @llvm.vp.scatter.nxv2i8.nxv2p0i8( %val, %ptrs, %mask, i32 %evl) - declare void @llvm.vp.scatter.v2f32.v2p2f32(<2 x float> %val, <2 x float addrspace(2)*> %ptrs, <2 x i1> %mask, i32 %evl) - declare void @llvm.vp.scatter.nxv4i32.nxv4p4i32( %val, %ptrs, %mask, i32 %evl) + declare void @llvm.vp.scatter.v4f64.v4p0(<4 x double> %val, <4 x ptr> %ptrs, <4 x i1> %mask, i32 %evl) + declare void @llvm.vp.scatter.nxv2i8.nxv2p0( %val, %ptrs, %mask, i32 %evl) + declare void @llvm.vp.scatter.v2f32.v2p2(<2 x float> %val, <2 x ptr addrspace(2)> %ptrs, <2 x i1> %mask, i32 %evl) + declare void @llvm.vp.scatter.nxv4i32.nxv4p4( %val, %ptrs, %mask, i32 %evl) Overview: """"""""" @@ -20399,10 +20378,10 @@ .. code-block:: text - call void @llvm.vp.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> align 1 %ptrs, <8 x i1> %mask, i32 %evl) + call void @llvm.vp.scatter.v8i8.v8p0(<8 x i8> %val, <8 x ptr> align 1 %ptrs, <8 x i1> %mask, i32 %evl) ;; For all lanes below %evl, the call above is lane-wise equivalent to the call below. - call void @llvm.masked.scatter.v8i8.v8p0i8(<8 x i8> %val, <8 x i8*> %ptrs, i32 1, <8 x i1> %mask) + call void @llvm.masked.scatter.v8i8.v8p0(<8 x i8> %val, <8 x ptr> %ptrs, i32 1, <8 x i1> %mask) .. _int_vp_trunc: @@ -20909,9 +20888,9 @@ :: - declare <16 x i8> @llvm.vp.ptrtoint.v16i8.v16p0i32 (<16 x i32*> , <16 x i1> , i32 ) - declare @llvm.vp.ptrtoint.nxv4i8.nxv4p0i32 ( , , i32 ) - declare <256 x i64> @llvm.vp.ptrtoint.v16i64.v16p0i32 (<256 x i32*> , <256 x i1> , i32 ) + declare <16 x i8> @llvm.vp.ptrtoint.v16i8.v16p0(<16 x ptr> , <16 x i1> , i32 ) + declare @llvm.vp.ptrtoint.nxv4i8.nxv4p0( , , i32 ) + declare <256 x i64> @llvm.vp.ptrtoint.v16i64.v16p0(<256 x ptr> , <256 x i1> , i32 ) Overview: """"""""" @@ -20948,10 +20927,10 @@ .. code-block:: llvm - %r = call <4 x i8> @llvm.vp.ptrtoint.v4i8.v4p0i32(<4 x i32*> %a, <4 x i1> %mask, i32 %evl) + %r = call <4 x i8> @llvm.vp.ptrtoint.v4i8.v4p0i32(<4 x ptr> %a, <4 x i1> %mask, i32 %evl) ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r - %t = ptrtoint <4 x i32*> %a to <4 x i8> + %t = ptrtoint <4 x ptr> %a to <4 x i8> %also.r = select <4 x i1> %mask, <4 x i8> %t, <4 x i8> undef @@ -20966,9 +20945,9 @@ :: - declare <16 x i32*> @llvm.vp.inttoptr.v16p0i32.v16i32 (<16 x i32> , <16 x i1> , i32 ) - declare @llvm.vp.inttoptr.nxv4p0i32.nxv4i32 ( , , i32 ) - declare <256 x i32*> @llvm.vp.inttoptr.v256p0i32.v256i32 (<256 x i32> , <256 x i1> , i32 ) + declare <16 x ptr> @llvm.vp.inttoptr.v16p0.v16i32 (<16 x i32> , <16 x i1> , i32 ) + declare @llvm.vp.inttoptr.nxv4p0.nxv4i32 ( , , i32 ) + declare <256 x ptr> @llvm.vp.inttoptr.v256p0.v256i32 (<256 x i32> , <256 x i1> , i32 ) Overview: """"""""" @@ -21003,11 +20982,11 @@ .. code-block:: llvm - %r = call <4 x i32*> @llvm.vp.inttoptr.v4p0i32.v4i32(<4 x i32> %a, <4 x i1> %mask, i32 %evl) + %r = call <4 x ptr> @llvm.vp.inttoptr.v4p0i32.v4i32(<4 x i32> %a, <4 x i1> %mask, i32 %evl) ;; For all lanes below %evl, %r is lane-wise equivalent to %also.r - %t = inttoptr <4 x i32> %a to <4 x i32*> - %also.r = select <4 x i1> %mask, <4 x i32*> %t, <4 x i32*> undef + %t = inttoptr <4 x i32> %a to <4 x ptr> + %also.r = select <4 x i1> %mask, <4 x ptr> %t, <4 x ptr> undef .. _int_vp_fcmp: @@ -21144,12 +21123,10 @@ :: - declare <16 x float> @llvm.masked.load.v16f32.p0v16f32 (<16 x float>* , i32 , <16 x i1> , <16 x float> ) - declare <2 x double> @llvm.masked.load.v2f64.p0v2f64 (<2 x double>* , i32 , <2 x i1> , <2 x double> ) - ;; The data is a vector of pointers to double - declare <8 x double*> @llvm.masked.load.v8p0f64.p0v8p0f64 (<8 x double*>* , i32 , <8 x i1> , <8 x double*> ) - ;; The data is a vector of function pointers - declare <8 x i32 ()*> @llvm.masked.load.v8p0f_i32f.p0v8p0f_i32f (<8 x i32 ()*>* , i32 , <8 x i1> , <8 x i32 ()*> ) + declare <16 x float> @llvm.masked.load.v16f32.p0(ptr , i32 , <16 x i1> , <16 x float> ) + declare <2 x double> @llvm.masked.load.v2f64.p0(ptr , i32 , <2 x i1> , <2 x double> ) + ;; The data is a vector of pointers + declare <8 x ptr> @llvm.masked.load.v8p0.p0(ptr , i32 , <8 x i1> , <8 x ptr> ) Overview: """"""""" @@ -21171,10 +21148,10 @@ :: - %res = call <16 x float> @llvm.masked.load.v16f32.p0v16f32 (<16 x float>* %ptr, i32 4, <16 x i1>%mask, <16 x float> %passthru) + %res = call <16 x float> @llvm.masked.load.v16f32.p0(ptr %ptr, i32 4, <16 x i1>%mask, <16 x float> %passthru) ;; The result of the two following instructions is identical aside from potential memory access exception - %loadlal = load <16 x float>, <16 x float>* %ptr, align 4 + %loadlal = load <16 x float>, ptr %ptr, align 4 %res = select <16 x i1> %mask, <16 x float> %loadlal, <16 x float> %passthru .. _int_mstore: @@ -21188,12 +21165,10 @@ :: - declare void @llvm.masked.store.v8i32.p0v8i32 (<8 x i32> , <8 x i32>* , i32 , <8 x i1> ) - declare void @llvm.masked.store.v16f32.p0v16f32 (<16 x float> , <16 x float>* , i32 , <16 x i1> ) - ;; The data is a vector of pointers to double - declare void @llvm.masked.store.v8p0f64.p0v8p0f64 (<8 x double*> , <8 x double*>* , i32 , <8 x i1> ) - ;; The data is a vector of function pointers - declare void @llvm.masked.store.v4p0f_i32f.p0v4p0f_i32f (<4 x i32 ()*> , <4 x i32 ()*>* , i32 , <4 x i1> ) + declare void @llvm.masked.store.v8i32.p0 (<8 x i32> , ptr , i32 , <8 x i1> ) + declare void @llvm.masked.store.v16f32.p0(<16 x float> , ptr , i32 , <16 x i1> ) + ;; The data is a vector of pointers + declare void @llvm.masked.store.v8p0.p0 (<8 x ptr> , ptr , i32 , <8 x i1> ) Overview: """"""""" @@ -21214,12 +21189,12 @@ :: - call void @llvm.masked.store.v16f32.p0v16f32(<16 x float> %value, <16 x float>* %ptr, i32 4, <16 x i1> %mask) + call void @llvm.masked.store.v16f32.p0(<16 x float> %value, ptr %ptr, i32 4, <16 x i1> %mask) ;; The result of the following instructions is identical aside from potential data races and memory access exceptions - %oldval = load <16 x float>, <16 x float>* %ptr, align 4 + %oldval = load <16 x float>, ptr %ptr, align 4 %res = select <16 x i1> %mask, <16 x float> %value, <16 x float> %oldval - store <16 x float> %res, <16 x float>* %ptr, align 4 + store <16 x float> %res, ptr %ptr, align 4 Masked Vector Gather and Scatter Intrinsics @@ -21238,9 +21213,9 @@ :: - declare <16 x float> @llvm.masked.gather.v16f32.v16p0f32 (<16 x float*> , i32 , <16 x i1> , <16 x float> ) - declare <2 x double> @llvm.masked.gather.v2f64.v2p1f64 (<2 x double addrspace(1)*> , i32 , <2 x i1> , <2 x double> ) - declare <8 x float*> @llvm.masked.gather.v8p0f32.v8p0p0f32 (<8 x float**> , i32 , <8 x i1> , <8 x float*> ) + declare <16 x float> @llvm.masked.gather.v16f32.v16p0(<16 x ptr> , i32 , <16 x i1> , <16 x float> ) + declare <2 x double> @llvm.masked.gather.v2f64.v2p1(<2 x ptr addrspace(1)> , i32 , <2 x i1> , <2 x double> ) + declare <8 x ptr> @llvm.masked.gather.v8p0.v8p0(<8 x ptr> , i32 , <8 x i1> , <8 x ptr> ) Overview: """"""""" @@ -21262,18 +21237,18 @@ :: - %res = call <4 x double> @llvm.masked.gather.v4f64.v4p0f64 (<4 x double*> %ptrs, i32 8, <4 x i1> , <4 x double> undef) + %res = call <4 x double> @llvm.masked.gather.v4f64.v4p0(<4 x ptr> %ptrs, i32 8, <4 x i1> , <4 x double> undef) ;; The gather with all-true mask is equivalent to the following instruction sequence - %ptr0 = extractelement <4 x double*> %ptrs, i32 0 - %ptr1 = extractelement <4 x double*> %ptrs, i32 1 - %ptr2 = extractelement <4 x double*> %ptrs, i32 2 - %ptr3 = extractelement <4 x double*> %ptrs, i32 3 + %ptr0 = extractelement <4 x ptr> %ptrs, i32 0 + %ptr1 = extractelement <4 x ptr> %ptrs, i32 1 + %ptr2 = extractelement <4 x ptr> %ptrs, i32 2 + %ptr3 = extractelement <4 x ptr> %ptrs, i32 3 - %val0 = load double, double* %ptr0, align 8 - %val1 = load double, double* %ptr1, align 8 - %val2 = load double, double* %ptr2, align 8 - %val3 = load double, double* %ptr3, align 8 + %val0 = load double, ptr %ptr0, align 8 + %val1 = load double, ptr %ptr1, align 8 + %val2 = load double, ptr %ptr2, align 8 + %val3 = load double, ptr %ptr3, align 8 %vec0 = insertelement <4 x double>undef, %val0, 0 %vec01 = insertelement <4 x double>%vec0, %val1, 1 @@ -21291,9 +21266,9 @@ :: - declare void @llvm.masked.scatter.v8i32.v8p0i32 (<8 x i32> , <8 x i32*> , i32 , <8 x i1> ) - declare void @llvm.masked.scatter.v16f32.v16p1f32 (<16 x float> , <16 x float addrspace(1)*> , i32 , <16 x i1> ) - declare void @llvm.masked.scatter.v4p0f64.v4p0p0f64 (<4 x double*> , <4 x double**> , i32 , <4 x i1> ) + declare void @llvm.masked.scatter.v8i32.v8p0 (<8 x i32> , <8 x ptr> , i32 , <8 x i1> ) + declare void @llvm.masked.scatter.v16f32.v16p1(<16 x float> , <16 x ptr addrspace(1)> , i32 , <16 x i1> ) + declare void @llvm.masked.scatter.v4p0.v4p0 (<4 x ptr> , <4 x ptr> , i32 , <4 x i1> ) Overview: """"""""" @@ -21313,22 +21288,22 @@ :: ;; This instruction unconditionally stores data vector in multiple addresses - call @llvm.masked.scatter.v8i32.v8p0i32 (<8 x i32> %value, <8 x i32*> %ptrs, i32 4, <8 x i1> ) + call @llvm.masked.scatter.v8i32.v8p0(<8 x i32> %value, <8 x ptr> %ptrs, i32 4, <8 x i1> ) ;; It is equivalent to a list of scalar stores %val0 = extractelement <8 x i32> %value, i32 0 %val1 = extractelement <8 x i32> %value, i32 1 .. %val7 = extractelement <8 x i32> %value, i32 7 - %ptr0 = extractelement <8 x i32*> %ptrs, i32 0 - %ptr1 = extractelement <8 x i32*> %ptrs, i32 1 + %ptr0 = extractelement <8 x ptr> %ptrs, i32 0 + %ptr1 = extractelement <8 x ptr> %ptrs, i32 1 .. - %ptr7 = extractelement <8 x i32*> %ptrs, i32 7 + %ptr7 = extractelement <8 x ptr> %ptrs, i32 7 ;; Note: the order of the following stores is important when they overlap: - store i32 %val0, i32* %ptr0, align 4 - store i32 %val1, i32* %ptr1, align 4 + store i32 %val0, ptr %ptr0, align 4 + store i32 %val1, ptr %ptr1, align 4 .. - store i32 %val7, i32* %ptr7, align 4 + store i32 %val7, ptr %ptr7, align 4 Masked Vector Expanding Load and Compressing Store Intrinsics @@ -21347,8 +21322,8 @@ :: - declare <16 x float> @llvm.masked.expandload.v16f32 (float* , <16 x i1> , <16 x float> ) - declare <2 x i64> @llvm.masked.expandload.v2i64 (i64* , <2 x i1> , <2 x i64> ) + declare <16 x float> @llvm.masked.expandload.v16f32 (ptr , <16 x i1> , <16 x float> ) + declare <2 x i64> @llvm.masked.expandload.v2i64 (ptr , <2 x i1> , <2 x i64> ) Overview: """"""""" @@ -21380,9 +21355,9 @@ ; Load several elements from array B and expand them in a vector. ; The number of loaded elements is equal to the number of '1' elements in the Mask. - %Tmp = call <8 x double> @llvm.masked.expandload.v8f64(double* %Bptr, <8 x i1> %Mask, <8 x double> undef) + %Tmp = call <8 x double> @llvm.masked.expandload.v8f64(ptr %Bptr, <8 x i1> %Mask, <8 x double> undef) ; Store the result in A - call void @llvm.masked.store.v8f64.p0v8f64(<8 x double> %Tmp, <8 x double>* %Aptr, i32 8, <8 x i1> %Mask) + call void @llvm.masked.store.v8f64.p0(<8 x double> %Tmp, ptr %Aptr, i32 8, <8 x i1> %Mask) ; %Bptr should be increased on each iteration according to the number of '1' elements in the Mask. %MaskI = bitcast <8 x i1> %Mask to i8 @@ -21405,8 +21380,8 @@ :: - declare void @llvm.masked.compressstore.v8i32 (<8 x i32> , i32* , <8 x i1> ) - declare void @llvm.masked.compressstore.v16f32 (<16 x float> , float* , <16 x i1> ) + declare void @llvm.masked.compressstore.v8i32 (<8 x i32> , ptr , <8 x i1> ) + declare void @llvm.masked.compressstore.v16f32 (<16 x float> , ptr , <16 x i1> ) Overview: """"""""" @@ -21437,9 +21412,9 @@ .. code-block:: llvm ; Load elements from A. - %Tmp = call <8 x double> @llvm.masked.load.v8f64.p0v8f64(<8 x double>* %Aptr, i32 8, <8 x i1> %Mask, <8 x double> undef) + %Tmp = call <8 x double> @llvm.masked.load.v8f64.p0(ptr %Aptr, i32 8, <8 x i1> %Mask, <8 x double> undef) ; Store all selected elements consecutively in array B - call @llvm.masked.compressstore.v8f64(<8 x double> %Tmp, double* %Bptr, <8 x i1> %Mask) + call @llvm.masked.compressstore.v8f64(<8 x double> %Tmp, ptr %Bptr, <8 x i1> %Mask) ; %Bptr should be increased on each iteration according to the number of '1' elements in the Mask. %MaskI = bitcast <8 x i1> %Mask to i8 @@ -21468,7 +21443,7 @@ :: - declare void @llvm.lifetime.start(i64 , i8* nocapture ) + declare void @llvm.lifetime.start(i64 , ptr nocapture ) Overview: """"""""" @@ -21518,7 +21493,7 @@ :: - declare void @llvm.lifetime.end(i64 , i8* nocapture ) + declare void @llvm.lifetime.end(i64 , ptr nocapture ) Overview: """"""""" @@ -21558,7 +21533,7 @@ :: - declare {}* @llvm.invariant.start.p0i8(i64 , i8* nocapture ) + declare ptr @llvm.invariant.start.p0(i64 , ptr nocapture ) Overview: """"""""" @@ -21589,7 +21564,7 @@ :: - declare void @llvm.invariant.end.p0i8({}* , i64 , i8* nocapture ) + declare void @llvm.invariant.end.p0(ptr , i64 , ptr nocapture ) Overview: """"""""" @@ -21621,7 +21596,7 @@ :: - declare i8* @llvm.launder.invariant.group.p0i8(i8* ) + declare ptr @llvm.launder.invariant.group.p0(ptr ) Overview: """"""""" @@ -21657,7 +21632,7 @@ :: - declare i8* @llvm.strip.invariant.group.p0i8(i8* ) + declare ptr @llvm.strip.invariant.group.p0(ptr ) Overview: """"""""" @@ -23368,19 +23343,19 @@ ; This examples shows two possible positions for noalias.decl and how they impact the semantics: ; If it is outside the loop (Version 1), then %a and %b are noalias across *all* iterations. ; If it is inside the loop (Version 2), then %a and %b are noalias only within *one* iteration. - declare void @decl_in_loop(i8* %a.base, i8* %b.base) { + declare void @decl_in_loop(ptr %a.base, ptr %b.base) { entry: ; call void @llvm.experimental.noalias.scope.decl(metadata !2) ; Version 1: noalias decl outside loop br label %loop loop: - %a = phi i8* [ %a.base, %entry ], [ %a.inc, %loop ] - %b = phi i8* [ %b.base, %entry ], [ %b.inc, %loop ] + %a = phi ptr [ %a.base, %entry ], [ %a.inc, %loop ] + %b = phi ptr [ %b.base, %entry ], [ %b.inc, %loop ] ; call void @llvm.experimental.noalias.scope.decl(metadata !2) ; Version 2: noalias decl inside loop - %val = load i8, i8* %a, !alias.scope !2 - store i8 %val, i8* %b, !noalias !2 - %a.inc = getelementptr inbounds i8, i8* %a, i64 1 - %b.inc = getelementptr inbounds i8, i8* %b, i64 1 + %val = load i8, ptr %a, !alias.scope !2 + store i8 %val, ptr %b, !noalias !2 + %a.inc = getelementptr inbounds i8, ptr %a, i64 1 + %b.inc = getelementptr inbounds i8, ptr %b, i64 1 %cond = call i1 @cond() br i1 %cond, label %loop, label %exit @@ -23560,7 +23535,7 @@ :: - declare void @llvm.var.annotation(i8* , i8* , i8* , i32 ) + declare void @llvm.var.annotation(ptr , ptr , ptr , i32 ) Overview: """"""""" @@ -23595,11 +23570,8 @@ :: - declare i8* @llvm.ptr.annotation.p
i8(i8* , i8* , i8* , i32 ) - declare i16* @llvm.ptr.annotation.p
i16(i16* , i8* , i8* , i32 ) - declare i32* @llvm.ptr.annotation.p
i32(i32* , i8* , i8* , i32 ) - declare i64* @llvm.ptr.annotation.p
i64(i64* , i8* , i8* , i32 ) - declare i256* @llvm.ptr.annotation.p
i256(i256* , i8* , i8* , i32 ) + declare ptr @llvm.ptr.annotation.p0(ptr , ptr , ptr , i32 ) + declare ptr @llvm.ptr.annotation.p1(ptr addrspace(1) , ptr , ptr , i32 ) Overview: """"""""" @@ -23635,11 +23607,11 @@ :: - declare i8 @llvm.annotation.i8(i8 , i8* , i8* , i32 ) - declare i16 @llvm.annotation.i16(i16 , i8* , i8* , i32 ) - declare i32 @llvm.annotation.i32(i32 , i8* , i8* , i32 ) - declare i64 @llvm.annotation.i64(i64 , i8* , i8* , i32 ) - declare i256 @llvm.annotation.i256(i256 , i8* , i8* , i32 ) + declare i8 @llvm.annotation.i8(i8 , ptr , ptr , i32 ) + declare i16 @llvm.annotation.i16(i16 , ptr , ptr , i32 ) + declare i32 @llvm.annotation.i32(i32 , ptr , ptr , i32 ) + declare i64 @llvm.annotation.i64(i64 , ptr , ptr , i32 ) + declare i256 @llvm.annotation.i256(i256 , ptr , ptr , i32 ) Overview: """"""""" @@ -23776,7 +23748,7 @@ :: - declare void @llvm.stackprotector(i8* , i8** ) + declare void @llvm.stackprotector(ptr , ptr ) Overview: """"""""" @@ -23812,7 +23784,7 @@ :: - declare i8* @llvm.stackguard() + declare ptr @llvm.stackguard() Overview: """"""""" @@ -23847,8 +23819,8 @@ :: - declare i32 @llvm.objectsize.i32(i8* , i1 , i1 , i1 ) - declare i64 @llvm.objectsize.i64(i8* , i1 , i1 , i1 ) + declare i32 @llvm.objectsize.i32(ptr , i1 , i1 , i1 ) + declare i64 @llvm.objectsize.i64(ptr , i1 , i1 , i1 ) Overview: """"""""" @@ -24031,7 +24003,7 @@ :: - declare i1 @llvm.type.test(i8* %ptr, metadata %type) nounwind readnone + declare i1 @llvm.type.test(ptr %ptr, metadata %type) nounwind readnone Arguments: @@ -24056,7 +24028,7 @@ :: - declare {i8*, i1} @llvm.type.checked.load(i8* %ptr, i32 %offset, metadata %type) argmemonly nounwind readonly + declare {ptr, i1} @llvm.type.checked.load(ptr %ptr, i32 %offset, metadata %type) argmemonly nounwind readonly Arguments: @@ -24442,7 +24414,7 @@ :: - declare i8* @llvm.load.relative.iN(i8* %ptr, iN %offset) argmemonly nounwind readonly + declare ptr @llvm.load.relative.iN(ptr %ptr, iN %offset) argmemonly nounwind readonly Overview: """"""""" @@ -24627,14 +24599,14 @@ :: - declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i32(i8* , - i8* , - i32 , - i32 ) - declare void @llvm.memcpy.element.unordered.atomic.p0i8.p0i8.i64(i8* , - i8* , - i64 , - i32 ) + declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i32(ptr , + ptr , + i32 , + i32 ) + declare void @llvm.memcpy.element.unordered.atomic.p0.p0.i64(ptr , + ptr , + i64 , + i32 ) Overview: """"""""" @@ -24701,14 +24673,14 @@ :: - declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i32(i8* , - i8* , - i32 , - i32 ) - declare void @llvm.memmove.element.unordered.atomic.p0i8.p0i8.i64(i8* , - i8* , - i64 , - i32 ) + declare void @llvm.memmove.element.unordered.atomic.p0.p0.i32(ptr , + ptr , + i32 , + i32 ) + declare void @llvm.memmove.element.unordered.atomic.p0.p0.i64(ptr , + ptr , + i64 , + i32 ) Overview: """"""""" @@ -24782,14 +24754,14 @@ :: - declare void @llvm.memset.element.unordered.atomic.p0i8.i32(i8* , - i8 , - i32 , - i32 ) - declare void @llvm.memset.element.unordered.atomic.p0i8.i64(i8* , - i8 , - i64 , - i32 ) + declare void @llvm.memset.element.unordered.atomic.p0.i32(ptr , + i8 , + i32 , + i32 ) + declare void @llvm.memset.element.unordered.atomic.p0.i64(ptr , + i8 , + i64 , + i32 ) Overview: """"""""" @@ -24855,7 +24827,7 @@ """"""" :: - declare i8* @llvm.objc.autorelease(i8*) + declare ptr @llvm.objc.autorelease(ptr) Lowering: """"""""" @@ -24869,7 +24841,7 @@ """"""" :: - declare void @llvm.objc.autoreleasePoolPop(i8*) + declare void @llvm.objc.autoreleasePoolPop(ptr) Lowering: """"""""" @@ -24883,7 +24855,7 @@ """"""" :: - declare i8* @llvm.objc.autoreleasePoolPush() + declare ptr @llvm.objc.autoreleasePoolPush() Lowering: """"""""" @@ -24897,7 +24869,7 @@ """"""" :: - declare i8* @llvm.objc.autoreleaseReturnValue(i8*) + declare ptr @llvm.objc.autoreleaseReturnValue(ptr) Lowering: """"""""" @@ -24911,7 +24883,7 @@ """"""" :: - declare void @llvm.objc.copyWeak(i8**, i8**) + declare void @llvm.objc.copyWeak(ptr, ptr) Lowering: """"""""" @@ -24925,7 +24897,7 @@ """"""" :: - declare void @llvm.objc.destroyWeak(i8**) + declare void @llvm.objc.destroyWeak(ptr) Lowering: """"""""" @@ -24939,7 +24911,7 @@ """"""" :: - declare i8* @llvm.objc.initWeak(i8**, i8*) + declare ptr @llvm.objc.initWeak(ptr, ptr) Lowering: """"""""" @@ -24953,7 +24925,7 @@ """"""" :: - declare i8* @llvm.objc.loadWeak(i8**) + declare ptr @llvm.objc.loadWeak(ptr) Lowering: """"""""" @@ -24967,7 +24939,7 @@ """"""" :: - declare i8* @llvm.objc.loadWeakRetained(i8**) + declare ptr @llvm.objc.loadWeakRetained(ptr) Lowering: """"""""" @@ -24981,7 +24953,7 @@ """"""" :: - declare void @llvm.objc.moveWeak(i8**, i8**) + declare void @llvm.objc.moveWeak(ptr, ptr) Lowering: """"""""" @@ -24995,7 +24967,7 @@ """"""" :: - declare void @llvm.objc.release(i8*) + declare void @llvm.objc.release(ptr) Lowering: """"""""" @@ -25009,7 +24981,7 @@ """"""" :: - declare i8* @llvm.objc.retain(i8*) + declare ptr @llvm.objc.retain(ptr) Lowering: """"""""" @@ -25023,7 +24995,7 @@ """"""" :: - declare i8* @llvm.objc.retainAutorelease(i8*) + declare ptr @llvm.objc.retainAutorelease(ptr) Lowering: """"""""" @@ -25037,7 +25009,7 @@ """"""" :: - declare i8* @llvm.objc.retainAutoreleaseReturnValue(i8*) + declare ptr @llvm.objc.retainAutoreleaseReturnValue(ptr) Lowering: """"""""" @@ -25051,7 +25023,7 @@ """"""" :: - declare i8* @llvm.objc.retainAutoreleasedReturnValue(i8*) + declare ptr @llvm.objc.retainAutoreleasedReturnValue(ptr) Lowering: """"""""" @@ -25065,7 +25037,7 @@ """"""" :: - declare i8* @llvm.objc.retainBlock(i8*) + declare ptr @llvm.objc.retainBlock(ptr) Lowering: """"""""" @@ -25079,7 +25051,7 @@ """"""" :: - declare void @llvm.objc.storeStrong(i8**, i8*) + declare void @llvm.objc.storeStrong(ptr, ptr) Lowering: """"""""" @@ -25093,7 +25065,7 @@ """"""" :: - declare i8* @llvm.objc.storeWeak(i8**, i8*) + declare ptr @llvm.objc.storeWeak(ptr, ptr) Lowering: """""""""