Initial Commit

This commit is contained in:
Sajid
2024-09-07 18:00:09 +06:00
commit 0f9a53f75a
3352 changed files with 1563708 additions and 0 deletions

View File

@@ -0,0 +1,247 @@
//===- AbstractCallSite.h - Abstract call sites -----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the AbstractCallSite class, which is a is a wrapper that
// allows treating direct, indirect, and callback calls the same.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_ABSTRACTCALLSITE_H
#define LLVM_IR_ABSTRACTCALLSITE_H
#include "llvm/IR/Argument.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Use.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include <cassert>
namespace llvm {
/// AbstractCallSite
///
/// An abstract call site is a wrapper that allows to treat direct,
/// indirect, and callback calls the same. If an abstract call site
/// represents a direct or indirect call site it behaves like a stripped
/// down version of a normal call site object. The abstract call site can
/// also represent a callback call, thus the fact that the initially
/// called function (=broker) may invoke a third one (=callback callee).
/// In this case, the abstract call site hides the middle man, hence the
/// broker function. The result is a representation of the callback call,
/// inside the broker, but in the context of the original call to the broker.
///
/// There are up to three functions involved when we talk about callback call
/// sites. The caller (1), which invokes the broker function. The broker
/// function (2), that will invoke the callee zero or more times. And finally
/// the callee (3), which is the target of the callback call.
///
/// The abstract call site will handle the mapping from parameters to arguments
/// depending on the semantic of the broker function. However, it is important
/// to note that the mapping is often partial. Thus, some arguments of the
/// call/invoke instruction are mapped to parameters of the callee while others
/// are not.
class AbstractCallSite {
public:
/// The encoding of a callback with regards to the underlying instruction.
struct CallbackInfo {
/// For direct/indirect calls the parameter encoding is empty. If it is not,
/// the abstract call site represents a callback. In that case, the first
/// element of the encoding vector represents which argument of the call
/// site CB is the callback callee. The remaining elements map parameters
/// (identified by their position) to the arguments that will be passed
/// through (also identified by position but in the call site instruction).
///
/// NOTE that we use LLVM argument numbers (starting at 0) and not
/// clang/source argument numbers (starting at 1). The -1 entries represent
/// unknown values that are passed to the callee.
using ParameterEncodingTy = SmallVector<int, 0>;
ParameterEncodingTy ParameterEncoding;
};
private:
/// The underlying call site:
/// caller -> callee, if this is a direct or indirect call site
/// caller -> broker function, if this is a callback call site
CallBase *CB;
/// The encoding of a callback with regards to the underlying instruction.
CallbackInfo CI;
public:
/// Sole constructor for abstract call sites (ACS).
///
/// An abstract call site can only be constructed through a llvm::Use because
/// each operand (=use) of an instruction could potentially be a different
/// abstract call site. Furthermore, even if the value of the llvm::Use is the
/// same, and the user is as well, the abstract call sites might not be.
///
/// If a use is not associated with an abstract call site the constructed ACS
/// will evaluate to false if converted to a boolean.
///
/// If the use is the callee use of a call or invoke instruction, the
/// constructed abstract call site will behave as a llvm::CallSite would.
///
/// If the use is not a callee use of a call or invoke instruction, the
/// callback metadata is used to determine the argument <-> parameter mapping
/// as well as the callee of the abstract call site.
AbstractCallSite(const Use *U);
/// Add operand uses of \p CB that represent callback uses into
/// \p CallbackUses.
///
/// All uses added to \p CallbackUses can be used to create abstract call
/// sites for which AbstractCallSite::isCallbackCall() will return true.
static void getCallbackUses(const CallBase &CB,
SmallVectorImpl<const Use *> &CallbackUses);
/// Conversion operator to conveniently check for a valid/initialized ACS.
explicit operator bool() const { return CB != nullptr; }
/// Return the underlying instruction.
CallBase *getInstruction() const { return CB; }
/// Return true if this ACS represents a direct call.
bool isDirectCall() const {
return !isCallbackCall() && !CB->isIndirectCall();
}
/// Return true if this ACS represents an indirect call.
bool isIndirectCall() const {
return !isCallbackCall() && CB->isIndirectCall();
}
/// Return true if this ACS represents a callback call.
bool isCallbackCall() const {
// For a callback call site the callee is ALWAYS stored first in the
// transitive values vector. Thus, a non-empty vector indicates a callback.
return !CI.ParameterEncoding.empty();
}
/// Return true if @p UI is the use that defines the callee of this ACS.
bool isCallee(Value::const_user_iterator UI) const {
return isCallee(&UI.getUse());
}
/// Return true if @p U is the use that defines the callee of this ACS.
bool isCallee(const Use *U) const {
if (isDirectCall())
return CB->isCallee(U);
assert(!CI.ParameterEncoding.empty() &&
"Callback without parameter encoding!");
// If the use is actually in a constant cast expression which itself
// has only one use, we look through the constant cast expression.
if (auto *CE = dyn_cast<ConstantExpr>(U->getUser()))
if (CE->hasOneUse() && CE->isCast())
U = &*CE->use_begin();
return (int)CB->getArgOperandNo(U) == CI.ParameterEncoding[0];
}
/// Return the number of parameters of the callee.
unsigned getNumArgOperands() const {
if (isDirectCall())
return CB->arg_size();
// Subtract 1 for the callee encoding.
return CI.ParameterEncoding.size() - 1;
}
/// Return the operand index of the underlying instruction associated with @p
/// Arg.
int getCallArgOperandNo(Argument &Arg) const {
return getCallArgOperandNo(Arg.getArgNo());
}
/// Return the operand index of the underlying instruction associated with
/// the function parameter number @p ArgNo or -1 if there is none.
int getCallArgOperandNo(unsigned ArgNo) const {
if (isDirectCall())
return ArgNo;
// Add 1 for the callee encoding.
return CI.ParameterEncoding[ArgNo + 1];
}
/// Return the operand of the underlying instruction associated with @p Arg.
Value *getCallArgOperand(Argument &Arg) const {
return getCallArgOperand(Arg.getArgNo());
}
/// Return the operand of the underlying instruction associated with the
/// function parameter number @p ArgNo or nullptr if there is none.
Value *getCallArgOperand(unsigned ArgNo) const {
if (isDirectCall())
return CB->getArgOperand(ArgNo);
// Add 1 for the callee encoding.
return CI.ParameterEncoding[ArgNo + 1] >= 0
? CB->getArgOperand(CI.ParameterEncoding[ArgNo + 1])
: nullptr;
}
/// Return the operand index of the underlying instruction associated with the
/// callee of this ACS. Only valid for callback calls!
int getCallArgOperandNoForCallee() const {
assert(isCallbackCall());
assert(CI.ParameterEncoding.size() && CI.ParameterEncoding[0] >= 0);
return CI.ParameterEncoding[0];
}
/// Return the use of the callee value in the underlying instruction. Only
/// valid for callback calls!
const Use &getCalleeUseForCallback() const {
int CalleeArgIdx = getCallArgOperandNoForCallee();
assert(CalleeArgIdx >= 0 &&
unsigned(CalleeArgIdx) < getInstruction()->getNumOperands());
return getInstruction()->getOperandUse(CalleeArgIdx);
}
/// Return the pointer to function that is being called.
Value *getCalledOperand() const {
if (isDirectCall())
return CB->getCalledOperand();
return CB->getArgOperand(getCallArgOperandNoForCallee());
}
/// Return the function being called if this is a direct call, otherwise
/// return null (if it's an indirect call).
Function *getCalledFunction() const {
Value *V = getCalledOperand();
return V ? dyn_cast<Function>(V->stripPointerCasts()) : nullptr;
}
};
/// Apply function Func to each CB's callback call site.
template <typename UnaryFunction>
void forEachCallbackCallSite(const CallBase &CB, UnaryFunction Func) {
SmallVector<const Use *, 4u> CallbackUses;
AbstractCallSite::getCallbackUses(CB, CallbackUses);
for (const Use *U : CallbackUses) {
AbstractCallSite ACS(U);
assert(ACS && ACS.isCallbackCall() && "must be a callback call");
Func(ACS);
}
}
/// Apply function Func to each CB's callback function.
template <typename UnaryFunction>
void forEachCallbackFunction(const CallBase &CB, UnaryFunction Func) {
forEachCallbackCallSite(CB, [&Func](AbstractCallSite &ACS) {
if (Function *Callback = ACS.getCalledFunction())
Func(Callback);
});
}
} // end namespace llvm
#endif // LLVM_IR_ABSTRACTCALLSITE_H

View File

@@ -0,0 +1,180 @@
//===-- llvm/Argument.h - Definition of the Argument class ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the Argument class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_ARGUMENT_H
#define LLVM_IR_ARGUMENT_H
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Value.h"
namespace llvm {
/// This class represents an incoming formal argument to a Function. A formal
/// argument, since it is ``formal'', does not contain an actual value but
/// instead represents the type, argument number, and attributes of an argument
/// for a specific function. When used in the body of said function, the
/// argument of course represents the value of the actual argument that the
/// function was called with.
class Argument final : public Value {
Function *Parent;
unsigned ArgNo;
friend class Function;
void setParent(Function *parent);
public:
/// Argument constructor.
explicit Argument(Type *Ty, const Twine &Name = "", Function *F = nullptr,
unsigned ArgNo = 0);
inline const Function *getParent() const { return Parent; }
inline Function *getParent() { return Parent; }
/// Return the index of this formal argument in its containing function.
///
/// For example in "void foo(int a, float b)" a is 0 and b is 1.
unsigned getArgNo() const {
assert(Parent && "can't get number of unparented arg");
return ArgNo;
}
/// Return true if this argument has the nonnull attribute. Also returns true
/// if at least one byte is known to be dereferenceable and the pointer is in
/// addrspace(0).
/// If AllowUndefOrPoison is true, respect the semantics of nonnull attribute
/// and return true even if the argument can be undef or poison.
bool hasNonNullAttr(bool AllowUndefOrPoison = true) const;
/// If this argument has the dereferenceable attribute, return the number of
/// bytes known to be dereferenceable. Otherwise, zero is returned.
uint64_t getDereferenceableBytes() const;
/// If this argument has the dereferenceable_or_null attribute, return the
/// number of bytes known to be dereferenceable. Otherwise, zero is returned.
uint64_t getDereferenceableOrNullBytes() const;
/// Return true if this argument has the byval attribute.
bool hasByValAttr() const;
/// Return true if this argument has the byref attribute.
bool hasByRefAttr() const;
/// Return true if this argument has the swiftself attribute.
bool hasSwiftSelfAttr() const;
/// Return true if this argument has the swifterror attribute.
bool hasSwiftErrorAttr() const;
/// Return true if this argument has the byval, inalloca, or preallocated
/// attribute. These attributes represent arguments being passed by value,
/// with an associated copy between the caller and callee
bool hasPassPointeeByValueCopyAttr() const;
/// If this argument satisfies has hasPassPointeeByValueAttr, return the
/// in-memory ABI size copied to the stack for the call. Otherwise, return 0.
uint64_t getPassPointeeByValueCopySize(const DataLayout &DL) const;
/// Return true if this argument has the byval, sret, inalloca, preallocated,
/// or byref attribute. These attributes represent arguments being passed by
/// value (which may or may not involve a stack copy)
bool hasPointeeInMemoryValueAttr() const;
/// If hasPointeeInMemoryValueAttr returns true, the in-memory ABI type is
/// returned. Otherwise, nullptr.
Type *getPointeeInMemoryValueType() const;
/// If this is a byval or inalloca argument, return its alignment.
/// FIXME: Remove this function once transition to Align is over.
/// Use getParamAlign() instead.
uint64_t getParamAlignment() const;
/// If this is a byval or inalloca argument, return its alignment.
MaybeAlign getParamAlign() const;
MaybeAlign getParamStackAlign() const;
/// If this is a byval argument, return its type.
Type *getParamByValType() const;
/// If this is an sret argument, return its type.
Type *getParamStructRetType() const;
/// If this is a byref argument, return its type.
Type *getParamByRefType() const;
/// If this is an inalloca argument, return its type.
Type *getParamInAllocaType() const;
/// Return true if this argument has the nest attribute.
bool hasNestAttr() const;
/// Return true if this argument has the noalias attribute.
bool hasNoAliasAttr() const;
/// Return true if this argument has the nocapture attribute.
bool hasNoCaptureAttr() const;
/// Return true if this argument has the nofree attribute.
bool hasNoFreeAttr() const;
/// Return true if this argument has the sret attribute.
bool hasStructRetAttr() const;
/// Return true if this argument has the inreg attribute.
bool hasInRegAttr() const;
/// Return true if this argument has the returned attribute.
bool hasReturnedAttr() const;
/// Return true if this argument has the readonly or readnone attribute.
bool onlyReadsMemory() const;
/// Return true if this argument has the inalloca attribute.
bool hasInAllocaAttr() const;
/// Return true if this argument has the preallocated attribute.
bool hasPreallocatedAttr() const;
/// Return true if this argument has the zext attribute.
bool hasZExtAttr() const;
/// Return true if this argument has the sext attribute.
bool hasSExtAttr() const;
/// Add attributes to an argument.
void addAttrs(AttrBuilder &B);
void addAttr(Attribute::AttrKind Kind);
void addAttr(Attribute Attr);
/// Remove attributes from an argument.
void removeAttr(Attribute::AttrKind Kind);
void removeAttrs(const AttributeMask &AM);
/// Check if an argument has a given attribute.
bool hasAttribute(Attribute::AttrKind Kind) const;
Attribute getAttribute(Attribute::AttrKind Kind) const;
/// Method for support type inquiry through isa, cast, and dyn_cast.
static bool classof(const Value *V) {
return V->getValueID() == ArgumentVal;
}
};
} // End llvm namespace
#endif

View File

@@ -0,0 +1,61 @@
//===-- AssemblyAnnotationWriter.h - Annotation .ll files -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Clients of the assembly writer can use this interface to add their own
// special-purpose annotations to LLVM assembly language printouts. Note that
// the assembly parser won't be able to parse these, in general, so
// implementations are advised to print stuff as LLVM comments.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_ASSEMBLYANNOTATIONWRITER_H
#define LLVM_IR_ASSEMBLYANNOTATIONWRITER_H
namespace llvm {
class Function;
class BasicBlock;
class Instruction;
class Value;
class formatted_raw_ostream;
class AssemblyAnnotationWriter {
public:
virtual ~AssemblyAnnotationWriter();
/// emitFunctionAnnot - This may be implemented to emit a string right before
/// the start of a function.
virtual void emitFunctionAnnot(const Function *,
formatted_raw_ostream &) {}
/// emitBasicBlockStartAnnot - This may be implemented to emit a string right
/// after the basic block label, but before the first instruction in the
/// block.
virtual void emitBasicBlockStartAnnot(const BasicBlock *,
formatted_raw_ostream &) {
}
/// emitBasicBlockEndAnnot - This may be implemented to emit a string right
/// after the basic block.
virtual void emitBasicBlockEndAnnot(const BasicBlock *,
formatted_raw_ostream &) {
}
/// emitInstructionAnnot - This may be implemented to emit a string right
/// before an instruction is emitted.
virtual void emitInstructionAnnot(const Instruction *,
formatted_raw_ostream &) {}
/// printInfoComment - This may be implemented to emit a comment to the
/// right of an instruction or global value.
virtual void printInfoComment(const Value &, formatted_raw_ostream &) {}
};
} // End llvm namespace
#endif

View File

@@ -0,0 +1,70 @@
//===--- Assumptions.h - Assumption handling and organization ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// String assumptions that are known to optimization passes should be placed in
// the KnownAssumptionStrings set. This can be done in various ways, i.a.,
// via a static KnownAssumptionString object.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_ASSUMPTIONS_H
#define LLVM_IR_ASSUMPTIONS_H
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/StringSet.h"
namespace llvm {
class Function;
class CallBase;
/// The key we use for assumption attributes.
constexpr StringRef AssumptionAttrKey = "llvm.assume";
/// A set of known assumption strings that are accepted without warning and
/// which can be recommended as typo correction.
extern StringSet<> KnownAssumptionStrings;
/// Helper that allows to insert a new assumption string in the known assumption
/// set by creating a (static) object.
struct KnownAssumptionString {
KnownAssumptionString(StringRef AssumptionStr)
: AssumptionStr(AssumptionStr) {
KnownAssumptionStrings.insert(AssumptionStr);
}
operator StringRef() const { return AssumptionStr; }
private:
StringRef AssumptionStr;
};
/// Return true if \p F has the assumption \p AssumptionStr attached.
bool hasAssumption(const Function &F,
const KnownAssumptionString &AssumptionStr);
/// Return true if \p CB or the callee has the assumption \p AssumptionStr
/// attached.
bool hasAssumption(const CallBase &CB,
const KnownAssumptionString &AssumptionStr);
/// Return the set of all assumptions for the function \p F.
DenseSet<StringRef> getAssumptions(const Function &F);
/// Return the set of all assumptions for the call \p CB.
DenseSet<StringRef> getAssumptions(const CallBase &CB);
/// Appends the set of assumptions \p Assumptions to \F.
bool addAssumptions(Function &F, const DenseSet<StringRef> &Assumptions);
/// Appends the set of assumptions \p Assumptions to \CB.
bool addAssumptions(CallBase &CB, const DenseSet<StringRef> &Assumptions);
} // namespace llvm
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,350 @@
//===- Attributes.td - Defines all LLVM attributes ---------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all the LLVM attributes.
//
//===----------------------------------------------------------------------===//
/// Attribute property base class.
class AttrProperty;
/// Can be used as function attribute.
def FnAttr : AttrProperty;
/// Can be used as parameter attribute.
def ParamAttr : AttrProperty;
/// Can be used as return attribute.
def RetAttr : AttrProperty;
/// Attribute base class.
class Attr<string S, list<AttrProperty> P> {
// String representation of this attribute in the IR.
string AttrString = S;
list<AttrProperty> Properties = P;
}
/// Enum attribute.
class EnumAttr<string S, list<AttrProperty> P> : Attr<S, P>;
/// Int attribute.
class IntAttr<string S, list<AttrProperty> P> : Attr<S, P>;
/// Type attribute.
class TypeAttr<string S, list<AttrProperty> P> : Attr<S, P>;
/// StringBool attribute.
class StrBoolAttr<string S> : Attr<S, []>;
/// Target-independent enum attributes.
/// Alignment of parameter (5 bits) stored as log2 of alignment with +1 bias.
/// 0 means unaligned (different from align(1)).
def Alignment : IntAttr<"align", [ParamAttr, RetAttr]>;
/// The result of the function is guaranteed to point to a number of bytes that
/// we can determine if we know the value of the function's arguments.
def AllocSize : IntAttr<"allocsize", [FnAttr]>;
/// inline=always.
def AlwaysInline : EnumAttr<"alwaysinline", [FnAttr]>;
/// Function can access memory only using pointers based on its arguments.
def ArgMemOnly : EnumAttr<"argmemonly", [FnAttr]>;
/// Callee is recognized as a builtin, despite nobuiltin attribute on its
/// declaration.
def Builtin : EnumAttr<"builtin", [FnAttr]>;
/// Pass structure by value.
def ByVal : TypeAttr<"byval", [ParamAttr]>;
/// Mark in-memory ABI type.
def ByRef : TypeAttr<"byref", [ParamAttr]>;
/// Parameter or return value may not contain uninitialized or poison bits.
def NoUndef : EnumAttr<"noundef", [ParamAttr, RetAttr]>;
/// Marks function as being in a cold path.
def Cold : EnumAttr<"cold", [FnAttr]>;
/// Can only be moved to control-equivalent blocks.
def Convergent : EnumAttr<"convergent", [FnAttr]>;
/// Marks function as being in a hot path and frequently called.
def Hot: EnumAttr<"hot", [FnAttr]>;
/// Pointer is known to be dereferenceable.
def Dereferenceable : IntAttr<"dereferenceable", [ParamAttr, RetAttr]>;
/// Pointer is either null or dereferenceable.
def DereferenceableOrNull : IntAttr<"dereferenceable_or_null",
[ParamAttr, RetAttr]>;
/// Do not instrument function with sanitizers.
def DisableSanitizerInstrumentation: EnumAttr<"disable_sanitizer_instrumentation", [FnAttr]>;
/// Provide pointer element type to intrinsic.
def ElementType : TypeAttr<"elementtype", [ParamAttr]>;
/// Function may only access memory that is inaccessible from IR.
def InaccessibleMemOnly : EnumAttr<"inaccessiblememonly", [FnAttr]>;
/// Function may only access memory that is either inaccessible from the IR,
/// or pointed to by its pointer arguments.
def InaccessibleMemOrArgMemOnly : EnumAttr<"inaccessiblemem_or_argmemonly",
[FnAttr]>;
/// Pass structure in an alloca.
def InAlloca : TypeAttr<"inalloca", [ParamAttr]>;
/// Source said inlining was desirable.
def InlineHint : EnumAttr<"inlinehint", [FnAttr]>;
/// Force argument to be passed in register.
def InReg : EnumAttr<"inreg", [ParamAttr, RetAttr]>;
/// Build jump-instruction tables and replace refs.
def JumpTable : EnumAttr<"jumptable", [FnAttr]>;
/// Function must be optimized for size first.
def MinSize : EnumAttr<"minsize", [FnAttr]>;
/// Naked function.
def Naked : EnumAttr<"naked", [FnAttr]>;
/// Nested function static chain.
def Nest : EnumAttr<"nest", [ParamAttr]>;
/// Considered to not alias after call.
def NoAlias : EnumAttr<"noalias", [ParamAttr, RetAttr]>;
/// Callee isn't recognized as a builtin.
def NoBuiltin : EnumAttr<"nobuiltin", [FnAttr]>;
/// Function cannot enter into caller's translation unit.
def NoCallback : EnumAttr<"nocallback", [FnAttr]>;
/// Function creates no aliases of pointer.
def NoCapture : EnumAttr<"nocapture", [ParamAttr]>;
/// Call cannot be duplicated.
def NoDuplicate : EnumAttr<"noduplicate", [FnAttr]>;
/// Function does not deallocate memory.
def NoFree : EnumAttr<"nofree", [FnAttr, ParamAttr]>;
/// Disable implicit floating point insts.
def NoImplicitFloat : EnumAttr<"noimplicitfloat", [FnAttr]>;
/// inline=never.
def NoInline : EnumAttr<"noinline", [FnAttr]>;
/// Function is called early and/or often, so lazy binding isn't worthwhile.
def NonLazyBind : EnumAttr<"nonlazybind", [FnAttr]>;
/// Disable merging for specified functions or call sites.
def NoMerge : EnumAttr<"nomerge", [FnAttr]>;
/// Pointer is known to be not null.
def NonNull : EnumAttr<"nonnull", [ParamAttr, RetAttr]>;
/// The function does not recurse.
def NoRecurse : EnumAttr<"norecurse", [FnAttr]>;
/// Disable redzone.
def NoRedZone : EnumAttr<"noredzone", [FnAttr]>;
/// Mark the function as not returning.
def NoReturn : EnumAttr<"noreturn", [FnAttr]>;
/// Function does not synchronize.
def NoSync : EnumAttr<"nosync", [FnAttr]>;
/// Disable Indirect Branch Tracking.
def NoCfCheck : EnumAttr<"nocf_check", [FnAttr]>;
/// Function should not be instrumented.
def NoProfile : EnumAttr<"noprofile", [FnAttr]>;
/// Function doesn't unwind stack.
def NoUnwind : EnumAttr<"nounwind", [FnAttr]>;
/// No SanitizeCoverage instrumentation.
def NoSanitizeCoverage : EnumAttr<"nosanitize_coverage", [FnAttr]>;
/// Null pointer in address space zero is valid.
def NullPointerIsValid : EnumAttr<"null_pointer_is_valid", [FnAttr]>;
/// Select optimizations for best fuzzing signal.
def OptForFuzzing : EnumAttr<"optforfuzzing", [FnAttr]>;
/// opt_size.
def OptimizeForSize : EnumAttr<"optsize", [FnAttr]>;
/// Function must not be optimized.
def OptimizeNone : EnumAttr<"optnone", [FnAttr]>;
/// Similar to byval but without a copy.
def Preallocated : TypeAttr<"preallocated", [FnAttr, ParamAttr]>;
/// Function does not access memory.
def ReadNone : EnumAttr<"readnone", [FnAttr, ParamAttr]>;
/// Function only reads from memory.
def ReadOnly : EnumAttr<"readonly", [FnAttr, ParamAttr]>;
/// Return value is always equal to this argument.
def Returned : EnumAttr<"returned", [ParamAttr]>;
/// Parameter is required to be a trivial constant.
def ImmArg : EnumAttr<"immarg", [ParamAttr]>;
/// Function can return twice.
def ReturnsTwice : EnumAttr<"returns_twice", [FnAttr]>;
/// Safe Stack protection.
def SafeStack : EnumAttr<"safestack", [FnAttr]>;
/// Shadow Call Stack protection.
def ShadowCallStack : EnumAttr<"shadowcallstack", [FnAttr]>;
/// Sign extended before/after call.
def SExt : EnumAttr<"signext", [ParamAttr, RetAttr]>;
/// Alignment of stack for function (3 bits) stored as log2 of alignment with
/// +1 bias 0 means unaligned (different from alignstack=(1)).
def StackAlignment : IntAttr<"alignstack", [FnAttr, ParamAttr]>;
/// Function can be speculated.
def Speculatable : EnumAttr<"speculatable", [FnAttr]>;
/// Stack protection.
def StackProtect : EnumAttr<"ssp", [FnAttr]>;
/// Stack protection required.
def StackProtectReq : EnumAttr<"sspreq", [FnAttr]>;
/// Strong Stack protection.
def StackProtectStrong : EnumAttr<"sspstrong", [FnAttr]>;
/// Function was called in a scope requiring strict floating point semantics.
def StrictFP : EnumAttr<"strictfp", [FnAttr]>;
/// Hidden pointer to structure to return.
def StructRet : TypeAttr<"sret", [ParamAttr]>;
/// AddressSanitizer is on.
def SanitizeAddress : EnumAttr<"sanitize_address", [FnAttr]>;
/// ThreadSanitizer is on.
def SanitizeThread : EnumAttr<"sanitize_thread", [FnAttr]>;
/// MemorySanitizer is on.
def SanitizeMemory : EnumAttr<"sanitize_memory", [FnAttr]>;
/// HWAddressSanitizer is on.
def SanitizeHWAddress : EnumAttr<"sanitize_hwaddress", [FnAttr]>;
/// MemTagSanitizer is on.
def SanitizeMemTag : EnumAttr<"sanitize_memtag", [FnAttr]>;
/// Speculative Load Hardening is enabled.
///
/// Note that this uses the default compatibility (always compatible during
/// inlining) and a conservative merge strategy where inlining an attributed
/// body will add the attribute to the caller. This ensures that code carrying
/// this attribute will always be lowered with hardening enabled.
def SpeculativeLoadHardening : EnumAttr<"speculative_load_hardening",
[FnAttr]>;
/// Argument is swift error.
def SwiftError : EnumAttr<"swifterror", [ParamAttr]>;
/// Argument is swift self/context.
def SwiftSelf : EnumAttr<"swiftself", [ParamAttr]>;
/// Argument is swift async context.
def SwiftAsync : EnumAttr<"swiftasync", [ParamAttr]>;
/// Function must be in a unwind table.
def UWTable : EnumAttr<"uwtable", [FnAttr]>;
/// Minimum/Maximum vscale value for function.
def VScaleRange : IntAttr<"vscale_range", [FnAttr]>;
/// Function always comes back to callsite.
def WillReturn : EnumAttr<"willreturn", [FnAttr]>;
/// Function only writes to memory.
def WriteOnly : EnumAttr<"writeonly", [FnAttr, ParamAttr]>;
/// Zero extended before/after call.
def ZExt : EnumAttr<"zeroext", [ParamAttr, RetAttr]>;
/// Function is required to make Forward Progress.
def MustProgress : EnumAttr<"mustprogress", [FnAttr]>;
/// Target-independent string attributes.
def LessPreciseFPMAD : StrBoolAttr<"less-precise-fpmad">;
def NoInfsFPMath : StrBoolAttr<"no-infs-fp-math">;
def NoNansFPMath : StrBoolAttr<"no-nans-fp-math">;
def NoSignedZerosFPMath : StrBoolAttr<"no-signed-zeros-fp-math">;
def UnsafeFPMath : StrBoolAttr<"unsafe-fp-math">;
def NoJumpTables : StrBoolAttr<"no-jump-tables">;
def NoInlineLineTables : StrBoolAttr<"no-inline-line-tables">;
def ProfileSampleAccurate : StrBoolAttr<"profile-sample-accurate">;
def UseSampleProfile : StrBoolAttr<"use-sample-profile">;
class CompatRule<string F> {
// The name of the function called to check the attribute of the caller and
// callee and decide whether inlining should be allowed. The function's
// signature must match "bool(const Function&, const Function &)", where the
// first parameter is the reference to the caller and the second parameter is
// the reference to the callee. It must return false if the attributes of the
// caller and callee are incompatible, and true otherwise.
string CompatFunc = F;
}
def : CompatRule<"isEqual<SanitizeAddressAttr>">;
def : CompatRule<"isEqual<SanitizeThreadAttr>">;
def : CompatRule<"isEqual<SanitizeMemoryAttr>">;
def : CompatRule<"isEqual<SanitizeHWAddressAttr>">;
def : CompatRule<"isEqual<SanitizeMemTagAttr>">;
def : CompatRule<"isEqual<SafeStackAttr>">;
def : CompatRule<"isEqual<ShadowCallStackAttr>">;
def : CompatRule<"isEqual<UseSampleProfileAttr>">;
def : CompatRule<"isEqual<NoProfileAttr>">;
class MergeRule<string F> {
// The name of the function called to merge the attributes of the caller and
// callee. The function's signature must match
// "void(Function&, const Function &)", where the first parameter is the
// reference to the caller and the second parameter is the reference to the
// callee.
string MergeFunc = F;
}
def : MergeRule<"setAND<LessPreciseFPMADAttr>">;
def : MergeRule<"setAND<NoInfsFPMathAttr>">;
def : MergeRule<"setAND<NoNansFPMathAttr>">;
def : MergeRule<"setAND<NoSignedZerosFPMathAttr>">;
def : MergeRule<"setAND<UnsafeFPMathAttr>">;
def : MergeRule<"setOR<NoImplicitFloatAttr>">;
def : MergeRule<"setOR<NoJumpTablesAttr>">;
def : MergeRule<"setOR<ProfileSampleAccurateAttr>">;
def : MergeRule<"setOR<SpeculativeLoadHardeningAttr>">;
def : MergeRule<"adjustCallerSSPLevel">;
def : MergeRule<"adjustCallerStackProbes">;
def : MergeRule<"adjustCallerStackProbeSize">;
def : MergeRule<"adjustMinLegalVectorWidth">;
def : MergeRule<"adjustNullPointerValidAttr">;
def : MergeRule<"setAND<MustProgressAttr>">;
// Target dependent attributes
include "llvm/IR/AttributesAMDGPU.td"

View File

@@ -0,0 +1,14 @@
//===- AttributesAMDGPU.td - Defines AMDGPU attributes -----*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines AMDGPU specific attributes.
//
//===----------------------------------------------------------------------===//
def AMDGPUUnsafeFPAtomics : StrBoolAttr<"amdgpu-unsafe-fp-atomics">;
def : MergeRule<"setAND<AMDGPUUnsafeFPAtomicsAttr>">;

View File

@@ -0,0 +1,103 @@
//===- AutoUpgrade.h - AutoUpgrade Helpers ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// These functions are implemented by lib/IR/AutoUpgrade.cpp.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_AUTOUPGRADE_H
#define LLVM_IR_AUTOUPGRADE_H
#include "llvm/ADT/StringRef.h"
namespace llvm {
class AttrBuilder;
class CallInst;
class Constant;
class Function;
class Instruction;
class MDNode;
class Module;
class GlobalVariable;
class Type;
class Value;
/// This is a more granular function that simply checks an intrinsic function
/// for upgrading, and returns true if it requires upgrading. It may return
/// null in NewFn if the all calls to the original intrinsic function
/// should be transformed to non-function-call instructions.
bool UpgradeIntrinsicFunction(Function *F, Function *&NewFn);
/// This is the complement to the above, replacing a specific call to an
/// intrinsic function with a call to the specified new function.
void UpgradeIntrinsicCall(CallInst *CI, Function *NewFn);
// This upgrades the comment for objc retain release markers in inline asm
// calls
void UpgradeInlineAsmString(std::string *AsmStr);
/// This is an auto-upgrade hook for any old intrinsic function syntaxes
/// which need to have both the function updated as well as all calls updated
/// to the new function. This should only be run in a post-processing fashion
/// so that it can update all calls to the old function.
void UpgradeCallsToIntrinsic(Function* F);
/// This checks for global variables which should be upgraded. It is requires
/// upgrading, returns a pointer to the upgraded variable.
GlobalVariable *UpgradeGlobalVariable(GlobalVariable *GV);
/// This checks for module flags which should be upgraded. It returns true if
/// module is modified.
bool UpgradeModuleFlags(Module &M);
/// Convert calls to ARC runtime functions to intrinsic calls and upgrade the
/// old retain release marker to new module flag format.
void UpgradeARCRuntime(Module &M);
void UpgradeSectionAttributes(Module &M);
/// Correct any IR that is relying on old function attribute behavior.
void UpgradeFunctionAttributes(Function &F);
/// If the given TBAA tag uses the scalar TBAA format, create a new node
/// corresponding to the upgrade to the struct-path aware TBAA format.
/// Otherwise return the \p TBAANode itself.
MDNode *UpgradeTBAANode(MDNode &TBAANode);
/// This is an auto-upgrade for bitcast between pointers with different
/// address spaces: the instruction is replaced by a pair ptrtoint+inttoptr.
Instruction *UpgradeBitCastInst(unsigned Opc, Value *V, Type *DestTy,
Instruction *&Temp);
/// This is an auto-upgrade for bitcast constant expression between pointers
/// with different address spaces: the instruction is replaced by a pair
/// ptrtoint+inttoptr.
Value *UpgradeBitCastExpr(unsigned Opc, Constant *C, Type *DestTy);
/// Check the debug info version number, if it is out-dated, drop the debug
/// info. Return true if module is modified.
bool UpgradeDebugInfo(Module &M);
/// Check whether a string looks like an old loop attachment tag.
inline bool mayBeOldLoopAttachmentTag(StringRef Name) {
return Name.startswith("llvm.vectorizer.");
}
/// Upgrade the loop attachment metadata node.
MDNode *upgradeInstructionLoopAttachment(MDNode &N);
/// Upgrade the datalayout string by adding a section for address space
/// pointers.
std::string UpgradeDataLayoutString(StringRef DL, StringRef Triple);
/// Upgrade attributes that changed format or kind.
void UpgradeAttributes(AttrBuilder &B);
} // End llvm namespace
#endif

View File

@@ -0,0 +1,588 @@
//===- llvm/BasicBlock.h - Represent a basic block in the VM ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the BasicBlock class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_BASICBLOCK_H
#define LLVM_IR_BASICBLOCK_H
#include "llvm-c/Types.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/SymbolTableListTraits.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
#include <cstddef>
#include <iterator>
namespace llvm {
class AssemblyAnnotationWriter;
class CallInst;
class Function;
class LandingPadInst;
class LLVMContext;
class Module;
class PHINode;
class ValueSymbolTable;
/// LLVM Basic Block Representation
///
/// This represents a single basic block in LLVM. A basic block is simply a
/// container of instructions that execute sequentially. Basic blocks are Values
/// because they are referenced by instructions such as branches and switch
/// tables. The type of a BasicBlock is "Type::LabelTy" because the basic block
/// represents a label to which a branch can jump.
///
/// A well formed basic block is formed of a list of non-terminating
/// instructions followed by a single terminator instruction. Terminator
/// instructions may not occur in the middle of basic blocks, and must terminate
/// the blocks. The BasicBlock class allows malformed basic blocks to occur
/// because it may be useful in the intermediate stage of constructing or
/// modifying a program. However, the verifier will ensure that basic blocks are
/// "well formed".
class BasicBlock final : public Value, // Basic blocks are data objects also
public ilist_node_with_parent<BasicBlock, Function> {
public:
using InstListType = SymbolTableList<Instruction>;
private:
friend class BlockAddress;
friend class SymbolTableListTraits<BasicBlock>;
InstListType InstList;
Function *Parent;
void setParent(Function *parent);
/// Constructor.
///
/// If the function parameter is specified, the basic block is automatically
/// inserted at either the end of the function (if InsertBefore is null), or
/// before the specified basic block.
explicit BasicBlock(LLVMContext &C, const Twine &Name = "",
Function *Parent = nullptr,
BasicBlock *InsertBefore = nullptr);
public:
BasicBlock(const BasicBlock &) = delete;
BasicBlock &operator=(const BasicBlock &) = delete;
~BasicBlock();
/// Get the context in which this basic block lives.
LLVMContext &getContext() const;
/// Instruction iterators...
using iterator = InstListType::iterator;
using const_iterator = InstListType::const_iterator;
using reverse_iterator = InstListType::reverse_iterator;
using const_reverse_iterator = InstListType::const_reverse_iterator;
/// Creates a new BasicBlock.
///
/// If the Parent parameter is specified, the basic block is automatically
/// inserted at either the end of the function (if InsertBefore is 0), or
/// before the specified basic block.
static BasicBlock *Create(LLVMContext &Context, const Twine &Name = "",
Function *Parent = nullptr,
BasicBlock *InsertBefore = nullptr) {
return new BasicBlock(Context, Name, Parent, InsertBefore);
}
/// Return the enclosing method, or null if none.
const Function *getParent() const { return Parent; }
Function *getParent() { return Parent; }
/// Return the module owning the function this basic block belongs to, or
/// nullptr if the function does not have a module.
///
/// Note: this is undefined behavior if the block does not have a parent.
const Module *getModule() const;
Module *getModule() {
return const_cast<Module *>(
static_cast<const BasicBlock *>(this)->getModule());
}
/// Returns the terminator instruction if the block is well formed or null
/// if the block is not well formed.
const Instruction *getTerminator() const LLVM_READONLY;
Instruction *getTerminator() {
return const_cast<Instruction *>(
static_cast<const BasicBlock *>(this)->getTerminator());
}
/// Returns the call instruction calling \@llvm.experimental.deoptimize
/// prior to the terminating return instruction of this basic block, if such
/// a call is present. Otherwise, returns null.
const CallInst *getTerminatingDeoptimizeCall() const;
CallInst *getTerminatingDeoptimizeCall() {
return const_cast<CallInst *>(
static_cast<const BasicBlock *>(this)->getTerminatingDeoptimizeCall());
}
/// Returns the call instruction calling \@llvm.experimental.deoptimize
/// that is present either in current basic block or in block that is a unique
/// successor to current block, if such call is present. Otherwise, returns null.
const CallInst *getPostdominatingDeoptimizeCall() const;
CallInst *getPostdominatingDeoptimizeCall() {
return const_cast<CallInst *>(
static_cast<const BasicBlock *>(this)->getPostdominatingDeoptimizeCall());
}
/// Returns the call instruction marked 'musttail' prior to the terminating
/// return instruction of this basic block, if such a call is present.
/// Otherwise, returns null.
const CallInst *getTerminatingMustTailCall() const;
CallInst *getTerminatingMustTailCall() {
return const_cast<CallInst *>(
static_cast<const BasicBlock *>(this)->getTerminatingMustTailCall());
}
/// Returns a pointer to the first instruction in this block that is not a
/// PHINode instruction.
///
/// When adding instructions to the beginning of the basic block, they should
/// be added before the returned value, not before the first instruction,
/// which might be PHI. Returns 0 is there's no non-PHI instruction.
const Instruction* getFirstNonPHI() const;
Instruction* getFirstNonPHI() {
return const_cast<Instruction *>(
static_cast<const BasicBlock *>(this)->getFirstNonPHI());
}
/// Returns a pointer to the first instruction in this block that is not a
/// PHINode or a debug intrinsic, or any pseudo operation if \c SkipPseudoOp
/// is true.
const Instruction *getFirstNonPHIOrDbg(bool SkipPseudoOp = true) const;
Instruction *getFirstNonPHIOrDbg(bool SkipPseudoOp = true) {
return const_cast<Instruction *>(
static_cast<const BasicBlock *>(this)->getFirstNonPHIOrDbg(
SkipPseudoOp));
}
/// Returns a pointer to the first instruction in this block that is not a
/// PHINode, a debug intrinsic, or a lifetime intrinsic, or any pseudo
/// operation if \c SkipPseudoOp is true.
const Instruction *
getFirstNonPHIOrDbgOrLifetime(bool SkipPseudoOp = true) const;
Instruction *getFirstNonPHIOrDbgOrLifetime(bool SkipPseudoOp = true) {
return const_cast<Instruction *>(
static_cast<const BasicBlock *>(this)->getFirstNonPHIOrDbgOrLifetime(
SkipPseudoOp));
}
/// Returns an iterator to the first instruction in this block that is
/// suitable for inserting a non-PHI instruction.
///
/// In particular, it skips all PHIs and LandingPad instructions.
const_iterator getFirstInsertionPt() const;
iterator getFirstInsertionPt() {
return static_cast<const BasicBlock *>(this)
->getFirstInsertionPt().getNonConst();
}
/// Return a const iterator range over the instructions in the block, skipping
/// any debug instructions. Skip any pseudo operations as well if \c
/// SkipPseudoOp is true.
iterator_range<filter_iterator<BasicBlock::const_iterator,
std::function<bool(const Instruction &)>>>
instructionsWithoutDebug(bool SkipPseudoOp = true) const;
/// Return an iterator range over the instructions in the block, skipping any
/// debug instructions. Skip and any pseudo operations as well if \c
/// SkipPseudoOp is true.
iterator_range<
filter_iterator<BasicBlock::iterator, std::function<bool(Instruction &)>>>
instructionsWithoutDebug(bool SkipPseudoOp = true);
/// Return the size of the basic block ignoring debug instructions
filter_iterator<BasicBlock::const_iterator,
std::function<bool(const Instruction &)>>::difference_type
sizeWithoutDebug() const;
/// Unlink 'this' from the containing function, but do not delete it.
void removeFromParent();
/// Unlink 'this' from the containing function and delete it.
///
// \returns an iterator pointing to the element after the erased one.
SymbolTableList<BasicBlock>::iterator eraseFromParent();
/// Unlink this basic block from its current function and insert it into
/// the function that \p MovePos lives in, right before \p MovePos.
void moveBefore(BasicBlock *MovePos);
/// Unlink this basic block from its current function and insert it
/// right after \p MovePos in the function \p MovePos lives in.
void moveAfter(BasicBlock *MovePos);
/// Insert unlinked basic block into a function.
///
/// Inserts an unlinked basic block into \c Parent. If \c InsertBefore is
/// provided, inserts before that basic block, otherwise inserts at the end.
///
/// \pre \a getParent() is \c nullptr.
void insertInto(Function *Parent, BasicBlock *InsertBefore = nullptr);
/// Return the predecessor of this block if it has a single predecessor
/// block. Otherwise return a null pointer.
const BasicBlock *getSinglePredecessor() const;
BasicBlock *getSinglePredecessor() {
return const_cast<BasicBlock *>(
static_cast<const BasicBlock *>(this)->getSinglePredecessor());
}
/// Return the predecessor of this block if it has a unique predecessor
/// block. Otherwise return a null pointer.
///
/// Note that unique predecessor doesn't mean single edge, there can be
/// multiple edges from the unique predecessor to this block (for example a
/// switch statement with multiple cases having the same destination).
const BasicBlock *getUniquePredecessor() const;
BasicBlock *getUniquePredecessor() {
return const_cast<BasicBlock *>(
static_cast<const BasicBlock *>(this)->getUniquePredecessor());
}
/// Return true if this block has exactly N predecessors.
bool hasNPredecessors(unsigned N) const;
/// Return true if this block has N predecessors or more.
bool hasNPredecessorsOrMore(unsigned N) const;
/// Return the successor of this block if it has a single successor.
/// Otherwise return a null pointer.
///
/// This method is analogous to getSinglePredecessor above.
const BasicBlock *getSingleSuccessor() const;
BasicBlock *getSingleSuccessor() {
return const_cast<BasicBlock *>(
static_cast<const BasicBlock *>(this)->getSingleSuccessor());
}
/// Return the successor of this block if it has a unique successor.
/// Otherwise return a null pointer.
///
/// This method is analogous to getUniquePredecessor above.
const BasicBlock *getUniqueSuccessor() const;
BasicBlock *getUniqueSuccessor() {
return const_cast<BasicBlock *>(
static_cast<const BasicBlock *>(this)->getUniqueSuccessor());
}
/// Print the basic block to an output stream with an optional
/// AssemblyAnnotationWriter.
void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW = nullptr,
bool ShouldPreserveUseListOrder = false,
bool IsForDebug = false) const;
//===--------------------------------------------------------------------===//
/// Instruction iterator methods
///
inline iterator begin() { return InstList.begin(); }
inline const_iterator begin() const { return InstList.begin(); }
inline iterator end () { return InstList.end(); }
inline const_iterator end () const { return InstList.end(); }
inline reverse_iterator rbegin() { return InstList.rbegin(); }
inline const_reverse_iterator rbegin() const { return InstList.rbegin(); }
inline reverse_iterator rend () { return InstList.rend(); }
inline const_reverse_iterator rend () const { return InstList.rend(); }
inline size_t size() const { return InstList.size(); }
inline bool empty() const { return InstList.empty(); }
inline const Instruction &front() const { return InstList.front(); }
inline Instruction &front() { return InstList.front(); }
inline const Instruction &back() const { return InstList.back(); }
inline Instruction &back() { return InstList.back(); }
/// Iterator to walk just the phi nodes in the basic block.
template <typename PHINodeT = PHINode, typename BBIteratorT = iterator>
class phi_iterator_impl
: public iterator_facade_base<phi_iterator_impl<PHINodeT, BBIteratorT>,
std::forward_iterator_tag, PHINodeT> {
friend BasicBlock;
PHINodeT *PN;
phi_iterator_impl(PHINodeT *PN) : PN(PN) {}
public:
// Allow default construction to build variables, but this doesn't build
// a useful iterator.
phi_iterator_impl() = default;
// Allow conversion between instantiations where valid.
template <typename PHINodeU, typename BBIteratorU,
typename = std::enable_if_t<
std::is_convertible<PHINodeU *, PHINodeT *>::value>>
phi_iterator_impl(const phi_iterator_impl<PHINodeU, BBIteratorU> &Arg)
: PN(Arg.PN) {}
bool operator==(const phi_iterator_impl &Arg) const { return PN == Arg.PN; }
PHINodeT &operator*() const { return *PN; }
using phi_iterator_impl::iterator_facade_base::operator++;
phi_iterator_impl &operator++() {
assert(PN && "Cannot increment the end iterator!");
PN = dyn_cast<PHINodeT>(std::next(BBIteratorT(PN)));
return *this;
}
};
using phi_iterator = phi_iterator_impl<>;
using const_phi_iterator =
phi_iterator_impl<const PHINode, BasicBlock::const_iterator>;
/// Returns a range that iterates over the phis in the basic block.
///
/// Note that this cannot be used with basic blocks that have no terminator.
iterator_range<const_phi_iterator> phis() const {
return const_cast<BasicBlock *>(this)->phis();
}
iterator_range<phi_iterator> phis();
/// Return the underlying instruction list container.
///
/// Currently you need to access the underlying instruction list container
/// directly if you want to modify it.
const InstListType &getInstList() const { return InstList; }
InstListType &getInstList() { return InstList; }
/// Returns a pointer to a member of the instruction list.
static InstListType BasicBlock::*getSublistAccess(Instruction*) {
return &BasicBlock::InstList;
}
/// Returns a pointer to the symbol table if one exists.
ValueSymbolTable *getValueSymbolTable();
/// Methods for support type inquiry through isa, cast, and dyn_cast.
static bool classof(const Value *V) {
return V->getValueID() == Value::BasicBlockVal;
}
/// Cause all subinstructions to "let go" of all the references that said
/// subinstructions are maintaining.
///
/// This allows one to 'delete' a whole class at a time, even though there may
/// be circular references... first all references are dropped, and all use
/// counts go to zero. Then everything is delete'd for real. Note that no
/// operations are valid on an object that has "dropped all references",
/// except operator delete.
void dropAllReferences();
/// Update PHI nodes in this BasicBlock before removal of predecessor \p Pred.
/// Note that this function does not actually remove the predecessor.
///
/// If \p KeepOneInputPHIs is true then don't remove PHIs that are left with
/// zero or one incoming values, and don't simplify PHIs with all incoming
/// values the same.
void removePredecessor(BasicBlock *Pred, bool KeepOneInputPHIs = false);
bool canSplitPredecessors() const;
/// Split the basic block into two basic blocks at the specified instruction.
///
/// If \p Before is true, splitBasicBlockBefore handles the
/// block splitting. Otherwise, execution proceeds as described below.
///
/// Note that all instructions BEFORE the specified iterator
/// stay as part of the original basic block, an unconditional branch is added
/// to the original BB, and the rest of the instructions in the BB are moved
/// to the new BB, including the old terminator. The newly formed basic block
/// is returned. This function invalidates the specified iterator.
///
/// Note that this only works on well formed basic blocks (must have a
/// terminator), and \p 'I' must not be the end of instruction list (which
/// would cause a degenerate basic block to be formed, having a terminator
/// inside of the basic block).
///
/// Also note that this doesn't preserve any passes. To split blocks while
/// keeping loop information consistent, use the SplitBlock utility function.
BasicBlock *splitBasicBlock(iterator I, const Twine &BBName = "",
bool Before = false);
BasicBlock *splitBasicBlock(Instruction *I, const Twine &BBName = "",
bool Before = false) {
return splitBasicBlock(I->getIterator(), BBName, Before);
}
/// Split the basic block into two basic blocks at the specified instruction
/// and insert the new basic blocks as the predecessor of the current block.
///
/// This function ensures all instructions AFTER and including the specified
/// iterator \p I are part of the original basic block. All Instructions
/// BEFORE the iterator \p I are moved to the new BB and an unconditional
/// branch is added to the new BB. The new basic block is returned.
///
/// Note that this only works on well formed basic blocks (must have a
/// terminator), and \p 'I' must not be the end of instruction list (which
/// would cause a degenerate basic block to be formed, having a terminator
/// inside of the basic block). \p 'I' cannot be a iterator for a PHINode
/// with multiple incoming blocks.
///
/// Also note that this doesn't preserve any passes. To split blocks while
/// keeping loop information consistent, use the SplitBlockBefore utility
/// function.
BasicBlock *splitBasicBlockBefore(iterator I, const Twine &BBName = "");
BasicBlock *splitBasicBlockBefore(Instruction *I, const Twine &BBName = "") {
return splitBasicBlockBefore(I->getIterator(), BBName);
}
/// Returns true if there are any uses of this basic block other than
/// direct branches, switches, etc. to it.
bool hasAddressTaken() const {
return getBasicBlockBits().BlockAddressRefCount != 0;
}
/// Update all phi nodes in this basic block to refer to basic block \p New
/// instead of basic block \p Old.
void replacePhiUsesWith(BasicBlock *Old, BasicBlock *New);
/// Update all phi nodes in this basic block's successors to refer to basic
/// block \p New instead of basic block \p Old.
void replaceSuccessorsPhiUsesWith(BasicBlock *Old, BasicBlock *New);
/// Update all phi nodes in this basic block's successors to refer to basic
/// block \p New instead of to it.
void replaceSuccessorsPhiUsesWith(BasicBlock *New);
/// Return true if this basic block is an exception handling block.
bool isEHPad() const { return getFirstNonPHI()->isEHPad(); }
/// Return true if this basic block is a landing pad.
///
/// Being a ``landing pad'' means that the basic block is the destination of
/// the 'unwind' edge of an invoke instruction.
bool isLandingPad() const;
/// Return the landingpad instruction associated with the landing pad.
const LandingPadInst *getLandingPadInst() const;
LandingPadInst *getLandingPadInst() {
return const_cast<LandingPadInst *>(
static_cast<const BasicBlock *>(this)->getLandingPadInst());
}
/// Return true if it is legal to hoist instructions into this block.
bool isLegalToHoistInto() const;
/// Return true if this is the entry block of the containing function.
/// This method can only be used on blocks that have a parent function.
bool isEntryBlock() const;
Optional<uint64_t> getIrrLoopHeaderWeight() const;
/// Returns true if the Order field of child Instructions is valid.
bool isInstrOrderValid() const {
return getBasicBlockBits().InstrOrderValid;
}
/// Mark instruction ordering invalid. Done on every instruction insert.
void invalidateOrders() {
validateInstrOrdering();
BasicBlockBits Bits = getBasicBlockBits();
Bits.InstrOrderValid = false;
setBasicBlockBits(Bits);
}
/// Renumber instructions and mark the ordering as valid.
void renumberInstructions();
/// Asserts that instruction order numbers are marked invalid, or that they
/// are in ascending order. This is constant time if the ordering is invalid,
/// and linear in the number of instructions if the ordering is valid. Callers
/// should be careful not to call this in ways that make common operations
/// O(n^2). For example, it takes O(n) time to assign order numbers to
/// instructions, so the order should be validated no more than once after
/// each ordering to ensure that transforms have the same algorithmic
/// complexity when asserts are enabled as when they are disabled.
void validateInstrOrdering() const;
private:
#if defined(_AIX) && (!defined(__GNUC__) || defined(__clang__))
// Except for GCC; by default, AIX compilers store bit-fields in 4-byte words
// and give the `pack` pragma push semantics.
#define BEGIN_TWO_BYTE_PACK() _Pragma("pack(2)")
#define END_TWO_BYTE_PACK() _Pragma("pack(pop)")
#else
#define BEGIN_TWO_BYTE_PACK()
#define END_TWO_BYTE_PACK()
#endif
BEGIN_TWO_BYTE_PACK()
/// Bitfield to help interpret the bits in Value::SubclassData.
struct BasicBlockBits {
unsigned short BlockAddressRefCount : 15;
unsigned short InstrOrderValid : 1;
};
END_TWO_BYTE_PACK()
#undef BEGIN_TWO_BYTE_PACK
#undef END_TWO_BYTE_PACK
/// Safely reinterpret the subclass data bits to a more useful form.
BasicBlockBits getBasicBlockBits() const {
static_assert(sizeof(BasicBlockBits) == sizeof(unsigned short),
"too many bits for Value::SubclassData");
unsigned short ValueData = getSubclassDataFromValue();
BasicBlockBits AsBits;
memcpy(&AsBits, &ValueData, sizeof(AsBits));
return AsBits;
}
/// Reinterpret our subclass bits and store them back into Value.
void setBasicBlockBits(BasicBlockBits AsBits) {
unsigned short D;
memcpy(&D, &AsBits, sizeof(D));
Value::setValueSubclassData(D);
}
/// Increment the internal refcount of the number of BlockAddresses
/// referencing this BasicBlock by \p Amt.
///
/// This is almost always 0, sometimes one possibly, but almost never 2, and
/// inconceivably 3 or more.
void AdjustBlockAddressRefCount(int Amt) {
BasicBlockBits Bits = getBasicBlockBits();
Bits.BlockAddressRefCount += Amt;
setBasicBlockBits(Bits);
assert(Bits.BlockAddressRefCount < 255 && "Refcount wrap-around");
}
/// Shadow Value::setValueSubclassData with a private forwarding method so
/// that any future subclasses cannot accidentally use it.
void setValueSubclassData(unsigned short D) {
Value::setValueSubclassData(D);
}
};
// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(BasicBlock, LLVMBasicBlockRef)
/// Advance \p It while it points to a debug instruction and return the result.
/// This assumes that \p It is not at the end of a block.
BasicBlock::iterator skipDebugIntrinsics(BasicBlock::iterator It);
#ifdef NDEBUG
/// In release builds, this is a no-op. For !NDEBUG builds, the checks are
/// implemented in the .cpp file to avoid circular header deps.
inline void BasicBlock::validateInstrOrdering() const {}
#endif
} // end namespace llvm
#endif // LLVM_IR_BASICBLOCK_H

View File

@@ -0,0 +1,33 @@
//===-- BuiltinGCs.h - Garbage collector linkage hacks --------------------===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains hack functions to force linking in the builtin GC
// components.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_BUILTINGCS_H
#define LLVM_IR_BUILTINGCS_H
namespace llvm {
/// FIXME: Collector instances are not useful on their own. These no longer
/// serve any purpose except to link in the plugins.
/// Ensure the definition of the builtin GCs gets linked in
void linkAllBuiltinGCs();
/// Creates an ocaml-compatible metadata printer.
void linkOcamlGCPrinter();
/// Creates an erlang-compatible metadata printer.
void linkErlangGCPrinter();
} // namespace llvm
#endif // LLVM_IR_BUILTINGCS_H

View File

@@ -0,0 +1,405 @@
//===- CFG.h ----------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file provides various utilities for inspecting and working with the
/// control flow graph in LLVM IR. This includes generic facilities for
/// iterating successors and predecessors of basic blocks, the successors of
/// specific terminator instructions, etc. It also defines specializations of
/// GraphTraits that allow Function and BasicBlock graphs to be treated as
/// proper graphs for generic algorithms.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_CFG_H
#define LLVM_IR_CFG_H
#include "llvm/ADT/GraphTraits.h"
#include "llvm/ADT/iterator.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include <cassert>
#include <cstddef>
#include <iterator>
namespace llvm {
class Instruction;
class Use;
//===----------------------------------------------------------------------===//
// BasicBlock pred_iterator definition
//===----------------------------------------------------------------------===//
template <class Ptr, class USE_iterator> // Predecessor Iterator
class PredIterator {
public:
using iterator_category = std::forward_iterator_tag;
using value_type = Ptr;
using difference_type = std::ptrdiff_t;
using pointer = Ptr *;
using reference = Ptr *;
private:
using Self = PredIterator<Ptr, USE_iterator>;
USE_iterator It;
inline void advancePastNonTerminators() {
// Loop to ignore non-terminator uses (for example BlockAddresses).
while (!It.atEnd()) {
if (auto *Inst = dyn_cast<Instruction>(*It))
if (Inst->isTerminator())
break;
++It;
}
}
public:
PredIterator() = default;
explicit inline PredIterator(Ptr *bb) : It(bb->user_begin()) {
advancePastNonTerminators();
}
inline PredIterator(Ptr *bb, bool) : It(bb->user_end()) {}
inline bool operator==(const Self& x) const { return It == x.It; }
inline bool operator!=(const Self& x) const { return !operator==(x); }
inline reference operator*() const {
assert(!It.atEnd() && "pred_iterator out of range!");
return cast<Instruction>(*It)->getParent();
}
inline pointer *operator->() const { return &operator*(); }
inline Self& operator++() { // Preincrement
assert(!It.atEnd() && "pred_iterator out of range!");
++It; advancePastNonTerminators();
return *this;
}
inline Self operator++(int) { // Postincrement
Self tmp = *this; ++*this; return tmp;
}
/// getOperandNo - Return the operand number in the predecessor's
/// terminator of the successor.
unsigned getOperandNo() const {
return It.getOperandNo();
}
/// getUse - Return the operand Use in the predecessor's terminator
/// of the successor.
Use &getUse() const {
return It.getUse();
}
};
using pred_iterator = PredIterator<BasicBlock, Value::user_iterator>;
using const_pred_iterator =
PredIterator<const BasicBlock, Value::const_user_iterator>;
using pred_range = iterator_range<pred_iterator>;
using const_pred_range = iterator_range<const_pred_iterator>;
inline pred_iterator pred_begin(BasicBlock *BB) { return pred_iterator(BB); }
inline const_pred_iterator pred_begin(const BasicBlock *BB) {
return const_pred_iterator(BB);
}
inline pred_iterator pred_end(BasicBlock *BB) { return pred_iterator(BB, true);}
inline const_pred_iterator pred_end(const BasicBlock *BB) {
return const_pred_iterator(BB, true);
}
inline bool pred_empty(const BasicBlock *BB) {
return pred_begin(BB) == pred_end(BB);
}
/// Get the number of predecessors of \p BB. This is a linear time operation.
/// Use \ref BasicBlock::hasNPredecessors() or hasNPredecessorsOrMore if able.
inline unsigned pred_size(const BasicBlock *BB) {
return std::distance(pred_begin(BB), pred_end(BB));
}
inline pred_range predecessors(BasicBlock *BB) {
return pred_range(pred_begin(BB), pred_end(BB));
}
inline const_pred_range predecessors(const BasicBlock *BB) {
return const_pred_range(pred_begin(BB), pred_end(BB));
}
//===----------------------------------------------------------------------===//
// Instruction and BasicBlock succ_iterator helpers
//===----------------------------------------------------------------------===//
template <class InstructionT, class BlockT>
class SuccIterator
: public iterator_facade_base<SuccIterator<InstructionT, BlockT>,
std::random_access_iterator_tag, BlockT, int,
BlockT *, BlockT *> {
public:
using difference_type = int;
using pointer = BlockT *;
using reference = BlockT *;
private:
InstructionT *Inst;
int Idx;
using Self = SuccIterator<InstructionT, BlockT>;
inline bool index_is_valid(int Idx) {
// Note that we specially support the index of zero being valid even in the
// face of a null instruction.
return Idx >= 0 && (Idx == 0 || Idx <= (int)Inst->getNumSuccessors());
}
/// Proxy object to allow write access in operator[]
class SuccessorProxy {
Self It;
public:
explicit SuccessorProxy(const Self &It) : It(It) {}
SuccessorProxy(const SuccessorProxy &) = default;
SuccessorProxy &operator=(SuccessorProxy RHS) {
*this = reference(RHS);
return *this;
}
SuccessorProxy &operator=(reference RHS) {
It.Inst->setSuccessor(It.Idx, RHS);
return *this;
}
operator reference() const { return *It; }
};
public:
// begin iterator
explicit inline SuccIterator(InstructionT *Inst) : Inst(Inst), Idx(0) {}
// end iterator
inline SuccIterator(InstructionT *Inst, bool) : Inst(Inst) {
if (Inst)
Idx = Inst->getNumSuccessors();
else
// Inst == NULL happens, if a basic block is not fully constructed and
// consequently getTerminator() returns NULL. In this case we construct
// a SuccIterator which describes a basic block that has zero
// successors.
// Defining SuccIterator for incomplete and malformed CFGs is especially
// useful for debugging.
Idx = 0;
}
/// This is used to interface between code that wants to
/// operate on terminator instructions directly.
int getSuccessorIndex() const { return Idx; }
inline bool operator==(const Self &x) const { return Idx == x.Idx; }
inline BlockT *operator*() const { return Inst->getSuccessor(Idx); }
// We use the basic block pointer directly for operator->.
inline BlockT *operator->() const { return operator*(); }
inline bool operator<(const Self &RHS) const {
assert(Inst == RHS.Inst && "Cannot compare iterators of different blocks!");
return Idx < RHS.Idx;
}
int operator-(const Self &RHS) const {
assert(Inst == RHS.Inst && "Cannot compare iterators of different blocks!");
return Idx - RHS.Idx;
}
inline Self &operator+=(int RHS) {
int NewIdx = Idx + RHS;
assert(index_is_valid(NewIdx) && "Iterator index out of bound");
Idx = NewIdx;
return *this;
}
inline Self &operator-=(int RHS) { return operator+=(-RHS); }
// Specially implement the [] operation using a proxy object to support
// assignment.
inline SuccessorProxy operator[](int Offset) {
Self TmpIt = *this;
TmpIt += Offset;
return SuccessorProxy(TmpIt);
}
/// Get the source BlockT of this iterator.
inline BlockT *getSource() {
assert(Inst && "Source not available, if basic block was malformed");
return Inst->getParent();
}
};
using succ_iterator = SuccIterator<Instruction, BasicBlock>;
using const_succ_iterator = SuccIterator<const Instruction, const BasicBlock>;
using succ_range = iterator_range<succ_iterator>;
using const_succ_range = iterator_range<const_succ_iterator>;
inline succ_iterator succ_begin(Instruction *I) { return succ_iterator(I); }
inline const_succ_iterator succ_begin(const Instruction *I) {
return const_succ_iterator(I);
}
inline succ_iterator succ_end(Instruction *I) { return succ_iterator(I, true); }
inline const_succ_iterator succ_end(const Instruction *I) {
return const_succ_iterator(I, true);
}
inline bool succ_empty(const Instruction *I) {
return succ_begin(I) == succ_end(I);
}
inline unsigned succ_size(const Instruction *I) {
return std::distance(succ_begin(I), succ_end(I));
}
inline succ_range successors(Instruction *I) {
return succ_range(succ_begin(I), succ_end(I));
}
inline const_succ_range successors(const Instruction *I) {
return const_succ_range(succ_begin(I), succ_end(I));
}
inline succ_iterator succ_begin(BasicBlock *BB) {
return succ_iterator(BB->getTerminator());
}
inline const_succ_iterator succ_begin(const BasicBlock *BB) {
return const_succ_iterator(BB->getTerminator());
}
inline succ_iterator succ_end(BasicBlock *BB) {
return succ_iterator(BB->getTerminator(), true);
}
inline const_succ_iterator succ_end(const BasicBlock *BB) {
return const_succ_iterator(BB->getTerminator(), true);
}
inline bool succ_empty(const BasicBlock *BB) {
return succ_begin(BB) == succ_end(BB);
}
inline unsigned succ_size(const BasicBlock *BB) {
return std::distance(succ_begin(BB), succ_end(BB));
}
inline succ_range successors(BasicBlock *BB) {
return succ_range(succ_begin(BB), succ_end(BB));
}
inline const_succ_range successors(const BasicBlock *BB) {
return const_succ_range(succ_begin(BB), succ_end(BB));
}
//===--------------------------------------------------------------------===//
// GraphTraits specializations for basic block graphs (CFGs)
//===--------------------------------------------------------------------===//
// Provide specializations of GraphTraits to be able to treat a function as a
// graph of basic blocks...
template <> struct GraphTraits<BasicBlock*> {
using NodeRef = BasicBlock *;
using ChildIteratorType = succ_iterator;
static NodeRef getEntryNode(BasicBlock *BB) { return BB; }
static ChildIteratorType child_begin(NodeRef N) { return succ_begin(N); }
static ChildIteratorType child_end(NodeRef N) { return succ_end(N); }
};
template <> struct GraphTraits<const BasicBlock*> {
using NodeRef = const BasicBlock *;
using ChildIteratorType = const_succ_iterator;
static NodeRef getEntryNode(const BasicBlock *BB) { return BB; }
static ChildIteratorType child_begin(NodeRef N) { return succ_begin(N); }
static ChildIteratorType child_end(NodeRef N) { return succ_end(N); }
};
// Provide specializations of GraphTraits to be able to treat a function as a
// graph of basic blocks... and to walk it in inverse order. Inverse order for
// a function is considered to be when traversing the predecessor edges of a BB
// instead of the successor edges.
//
template <> struct GraphTraits<Inverse<BasicBlock*>> {
using NodeRef = BasicBlock *;
using ChildIteratorType = pred_iterator;
static NodeRef getEntryNode(Inverse<BasicBlock *> G) { return G.Graph; }
static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
};
template <> struct GraphTraits<Inverse<const BasicBlock*>> {
using NodeRef = const BasicBlock *;
using ChildIteratorType = const_pred_iterator;
static NodeRef getEntryNode(Inverse<const BasicBlock *> G) { return G.Graph; }
static ChildIteratorType child_begin(NodeRef N) { return pred_begin(N); }
static ChildIteratorType child_end(NodeRef N) { return pred_end(N); }
};
//===--------------------------------------------------------------------===//
// GraphTraits specializations for function basic block graphs (CFGs)
//===--------------------------------------------------------------------===//
// Provide specializations of GraphTraits to be able to treat a function as a
// graph of basic blocks... these are the same as the basic block iterators,
// except that the root node is implicitly the first node of the function.
//
template <> struct GraphTraits<Function*> : public GraphTraits<BasicBlock*> {
static NodeRef getEntryNode(Function *F) { return &F->getEntryBlock(); }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
using nodes_iterator = pointer_iterator<Function::iterator>;
static nodes_iterator nodes_begin(Function *F) {
return nodes_iterator(F->begin());
}
static nodes_iterator nodes_end(Function *F) {
return nodes_iterator(F->end());
}
static size_t size(Function *F) { return F->size(); }
};
template <> struct GraphTraits<const Function*> :
public GraphTraits<const BasicBlock*> {
static NodeRef getEntryNode(const Function *F) { return &F->getEntryBlock(); }
// nodes_iterator/begin/end - Allow iteration over all nodes in the graph
using nodes_iterator = pointer_iterator<Function::const_iterator>;
static nodes_iterator nodes_begin(const Function *F) {
return nodes_iterator(F->begin());
}
static nodes_iterator nodes_end(const Function *F) {
return nodes_iterator(F->end());
}
static size_t size(const Function *F) { return F->size(); }
};
// Provide specializations of GraphTraits to be able to treat a function as a
// graph of basic blocks... and to walk it in inverse order. Inverse order for
// a function is considered to be when traversing the predecessor edges of a BB
// instead of the successor edges.
//
template <> struct GraphTraits<Inverse<Function*>> :
public GraphTraits<Inverse<BasicBlock*>> {
static NodeRef getEntryNode(Inverse<Function *> G) {
return &G.Graph->getEntryBlock();
}
};
template <> struct GraphTraits<Inverse<const Function*>> :
public GraphTraits<Inverse<const BasicBlock*>> {
static NodeRef getEntryNode(Inverse<const Function *> G) {
return &G.Graph->getEntryBlock();
}
};
} // end namespace llvm
#endif // LLVM_IR_CFG_H

View File

@@ -0,0 +1,263 @@
//===- llvm/CallingConv.h - LLVM Calling Conventions ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines LLVM's set of calling conventions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_CALLINGCONV_H
#define LLVM_IR_CALLINGCONV_H
namespace llvm {
/// CallingConv Namespace - This namespace contains an enum with a value for
/// the well-known calling conventions.
///
namespace CallingConv {
/// LLVM IR allows to use arbitrary numbers as calling convention identifiers.
using ID = unsigned;
/// A set of enums which specify the assigned numeric values for known llvm
/// calling conventions.
/// LLVM Calling Convention Representation
enum {
/// C - The default llvm calling convention, compatible with C. This
/// convention is the only calling convention that supports varargs calls.
/// As with typical C calling conventions, the callee/caller have to
/// tolerate certain amounts of prototype mismatch.
C = 0,
// Generic LLVM calling conventions. None of these calling conventions
// support varargs calls, and all assume that the caller and callee
// prototype exactly match.
/// Fast - This calling convention attempts to make calls as fast as
/// possible (e.g. by passing things in registers).
Fast = 8,
// Cold - This calling convention attempts to make code in the caller as
// efficient as possible under the assumption that the call is not commonly
// executed. As such, these calls often preserve all registers so that the
// call does not break any live ranges in the caller side.
Cold = 9,
// GHC - Calling convention used by the Glasgow Haskell Compiler (GHC).
GHC = 10,
// HiPE - Calling convention used by the High-Performance Erlang Compiler
// (HiPE).
HiPE = 11,
// WebKit JS - Calling convention for stack based JavaScript calls
WebKit_JS = 12,
// AnyReg - Calling convention for dynamic register based calls (e.g.
// stackmap and patchpoint intrinsics).
AnyReg = 13,
// PreserveMost - Calling convention for runtime calls that preserves most
// registers.
PreserveMost = 14,
// PreserveAll - Calling convention for runtime calls that preserves
// (almost) all registers.
PreserveAll = 15,
// Swift - Calling convention for Swift.
Swift = 16,
// CXX_FAST_TLS - Calling convention for access functions.
CXX_FAST_TLS = 17,
/// Tail - This calling convention attempts to make calls as fast as
/// possible while guaranteeing that tail call optimization can always
/// be performed.
Tail = 18,
/// Special calling convention on Windows for calling the Control
/// Guard Check ICall function. The function takes exactly one argument
/// (address of the target function) passed in the first argument register,
/// and has no return value. All register values are preserved.
CFGuard_Check = 19,
/// SwiftTail - This follows the Swift calling convention in how arguments
/// are passed but guarantees tail calls will be made by making the callee
/// clean up their stack.
SwiftTail = 20,
// Target - This is the start of the target-specific calling conventions,
// e.g. fastcall and thiscall on X86.
FirstTargetCC = 64,
/// X86_StdCall - stdcall is the calling conventions mostly used by the
/// Win32 API. It is basically the same as the C convention with the
/// difference in that the callee is responsible for popping the arguments
/// from the stack.
X86_StdCall = 64,
/// X86_FastCall - 'fast' analog of X86_StdCall. Passes first two arguments
/// in ECX:EDX registers, others - via stack. Callee is responsible for
/// stack cleaning.
X86_FastCall = 65,
/// ARM_APCS - ARM Procedure Calling Standard calling convention (obsolete,
/// but still used on some targets).
ARM_APCS = 66,
/// ARM_AAPCS - ARM Architecture Procedure Calling Standard calling
/// convention (aka EABI). Soft float variant.
ARM_AAPCS = 67,
/// ARM_AAPCS_VFP - Same as ARM_AAPCS, but uses hard floating point ABI.
ARM_AAPCS_VFP = 68,
/// MSP430_INTR - Calling convention used for MSP430 interrupt routines.
MSP430_INTR = 69,
/// X86_ThisCall - Similar to X86_StdCall. Passes first argument in ECX,
/// others via stack. Callee is responsible for stack cleaning. MSVC uses
/// this by default for methods in its ABI.
X86_ThisCall = 70,
/// PTX_Kernel - Call to a PTX kernel.
/// Passes all arguments in parameter space.
PTX_Kernel = 71,
/// PTX_Device - Call to a PTX device function.
/// Passes all arguments in register or parameter space.
PTX_Device = 72,
/// SPIR_FUNC - Calling convention for SPIR non-kernel device functions.
/// No lowering or expansion of arguments.
/// Structures are passed as a pointer to a struct with the byval attribute.
/// Functions can only call SPIR_FUNC and SPIR_KERNEL functions.
/// Functions can only have zero or one return values.
/// Variable arguments are not allowed, except for printf.
/// How arguments/return values are lowered are not specified.
/// Functions are only visible to the devices.
SPIR_FUNC = 75,
/// SPIR_KERNEL - Calling convention for SPIR kernel functions.
/// Inherits the restrictions of SPIR_FUNC, except
/// Cannot have non-void return values.
/// Cannot have variable arguments.
/// Can also be called by the host.
/// Is externally visible.
SPIR_KERNEL = 76,
/// Intel_OCL_BI - Calling conventions for Intel OpenCL built-ins
Intel_OCL_BI = 77,
/// The C convention as specified in the x86-64 supplement to the
/// System V ABI, used on most non-Windows systems.
X86_64_SysV = 78,
/// The C convention as implemented on Windows/x86-64 and
/// AArch64. This convention differs from the more common
/// \c X86_64_SysV convention in a number of ways, most notably in
/// that XMM registers used to pass arguments are shadowed by GPRs,
/// and vice versa.
/// On AArch64, this is identical to the normal C (AAPCS) calling
/// convention for normal functions, but floats are passed in integer
/// registers to variadic functions.
Win64 = 79,
/// MSVC calling convention that passes vectors and vector aggregates
/// in SSE registers.
X86_VectorCall = 80,
/// Calling convention used by HipHop Virtual Machine (HHVM) to
/// perform calls to and from translation cache, and for calling PHP
/// functions.
/// HHVM calling convention supports tail/sibling call elimination.
HHVM = 81,
/// HHVM calling convention for invoking C/C++ helpers.
HHVM_C = 82,
/// X86_INTR - x86 hardware interrupt context. Callee may take one or two
/// parameters, where the 1st represents a pointer to hardware context frame
/// and the 2nd represents hardware error code, the presence of the later
/// depends on the interrupt vector taken. Valid for both 32- and 64-bit
/// subtargets.
X86_INTR = 83,
/// Used for AVR interrupt routines.
AVR_INTR = 84,
/// Calling convention used for AVR signal routines.
AVR_SIGNAL = 85,
/// Calling convention used for special AVR rtlib functions
/// which have an "optimized" convention to preserve registers.
AVR_BUILTIN = 86,
/// Calling convention used for Mesa vertex shaders, or AMDPAL last shader
/// stage before rasterization (vertex shader if tessellation and geometry
/// are not in use, or otherwise copy shader if one is needed).
AMDGPU_VS = 87,
/// Calling convention used for Mesa/AMDPAL geometry shaders.
AMDGPU_GS = 88,
/// Calling convention used for Mesa/AMDPAL pixel shaders.
AMDGPU_PS = 89,
/// Calling convention used for Mesa/AMDPAL compute shaders.
AMDGPU_CS = 90,
/// Calling convention for AMDGPU code object kernels.
AMDGPU_KERNEL = 91,
/// Register calling convention used for parameters transfer optimization
X86_RegCall = 92,
/// Calling convention used for Mesa/AMDPAL hull shaders (= tessellation
/// control shaders).
AMDGPU_HS = 93,
/// Calling convention used for special MSP430 rtlib functions
/// which have an "optimized" convention using additional registers.
MSP430_BUILTIN = 94,
/// Calling convention used for AMDPAL vertex shader if tessellation is in
/// use.
AMDGPU_LS = 95,
/// Calling convention used for AMDPAL shader stage before geometry shader
/// if geometry is in use. So either the domain (= tessellation evaluation)
/// shader if tessellation is in use, or otherwise the vertex shader.
AMDGPU_ES = 96,
// Calling convention between AArch64 Advanced SIMD functions
AArch64_VectorCall = 97,
/// Calling convention between AArch64 SVE functions
AArch64_SVE_VectorCall = 98,
/// Calling convention for emscripten __invoke_* functions. The first
/// argument is required to be the function ptr being indirectly called.
/// The remainder matches the regular calling convention.
WASM_EmscriptenInvoke = 99,
/// Calling convention used for AMD graphics targets.
AMDGPU_Gfx = 100,
/// M68k_INTR - Calling convention used for M68k interrupt routines.
M68k_INTR = 101,
/// The highest possible calling convention ID. Must be some 2^k - 1.
MaxID = 1023
};
} // end namespace CallingConv
} // end namespace llvm
#endif // LLVM_IR_CALLINGCONV_H

View File

@@ -0,0 +1,78 @@
//===- llvm/IR/Comdat.h - Comdat definitions --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// @file
/// This file contains the declaration of the Comdat class, which represents a
/// single COMDAT in LLVM.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_COMDAT_H
#define LLVM_IR_COMDAT_H
#include "llvm-c/Types.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/Support/CBindingWrapping.h"
namespace llvm {
class GlobalObject;
class raw_ostream;
class StringRef;
template <typename ValueTy> class StringMapEntry;
// This is a Name X SelectionKind pair. The reason for having this be an
// independent object instead of just adding the name and the SelectionKind
// to a GlobalObject is that it is invalid to have two Comdats with the same
// name but different SelectionKind. This structure makes that unrepresentable.
class Comdat {
public:
enum SelectionKind {
Any, ///< The linker may choose any COMDAT.
ExactMatch, ///< The data referenced by the COMDAT must be the same.
Largest, ///< The linker will choose the largest COMDAT.
NoDeduplicate, ///< No deduplication is performed.
SameSize, ///< The data referenced by the COMDAT must be the same size.
};
Comdat(const Comdat &) = delete;
Comdat(Comdat &&C);
SelectionKind getSelectionKind() const { return SK; }
void setSelectionKind(SelectionKind Val) { SK = Val; }
StringRef getName() const;
void print(raw_ostream &OS, bool IsForDebug = false) const;
void dump() const;
const SmallPtrSetImpl<GlobalObject *> &getUsers() const { return Users; }
private:
friend class Module;
friend class GlobalObject;
Comdat();
void addUser(GlobalObject *GO);
void removeUser(GlobalObject *GO);
// Points to the map in Module.
StringMapEntry<Comdat> *Name = nullptr;
SelectionKind SK = Any;
// Globals using this comdat.
SmallPtrSet<GlobalObject *, 2> Users;
};
// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Comdat, LLVMComdatRef)
inline raw_ostream &operator<<(raw_ostream &OS, const Comdat &C) {
C.print(OS);
return OS;
}
} // end namespace llvm
#endif // LLVM_IR_COMDAT_H

View File

@@ -0,0 +1,259 @@
//===-- llvm/Constant.h - Constant class definition -------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the Constant class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_CONSTANT_H
#define LLVM_IR_CONSTANT_H
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
namespace llvm {
class APInt;
/// This is an important base class in LLVM. It provides the common facilities
/// of all constant values in an LLVM program. A constant is a value that is
/// immutable at runtime. Functions are constants because their address is
/// immutable. Same with global variables.
///
/// All constants share the capabilities provided in this class. All constants
/// can have a null value. They can have an operand list. Constants can be
/// simple (integer and floating point values), complex (arrays and structures),
/// or expression based (computations yielding a constant value composed of
/// only certain operators and other constant values).
///
/// Note that Constants are immutable (once created they never change)
/// and are fully shared by structural equivalence. This means that two
/// structurally equivalent constants will always have the same address.
/// Constants are created on demand as needed and never deleted: thus clients
/// don't have to worry about the lifetime of the objects.
/// LLVM Constant Representation
class Constant : public User {
protected:
Constant(Type *ty, ValueTy vty, Use *Ops, unsigned NumOps)
: User(ty, vty, Ops, NumOps) {}
~Constant() = default;
public:
void operator=(const Constant &) = delete;
Constant(const Constant &) = delete;
/// Return true if this is the value that would be returned by getNullValue.
bool isNullValue() const;
/// Returns true if the value is one.
bool isOneValue() const;
/// Return true if the value is not the one value, or,
/// for vectors, does not contain one value elements.
bool isNotOneValue() const;
/// Return true if this is the value that would be returned by
/// getAllOnesValue.
bool isAllOnesValue() const;
/// Return true if the value is what would be returned by
/// getZeroValueForNegation.
bool isNegativeZeroValue() const;
/// Return true if the value is negative zero or null value.
bool isZeroValue() const;
/// Return true if the value is not the smallest signed value, or,
/// for vectors, does not contain smallest signed value elements.
bool isNotMinSignedValue() const;
/// Return true if the value is the smallest signed value.
bool isMinSignedValue() const;
/// Return true if this is a finite and non-zero floating-point scalar
/// constant or a fixed width vector constant with all finite and non-zero
/// elements.
bool isFiniteNonZeroFP() const;
/// Return true if this is a normal (as opposed to denormal, infinity, nan,
/// or zero) floating-point scalar constant or a vector constant with all
/// normal elements. See APFloat::isNormal.
bool isNormalFP() const;
/// Return true if this scalar has an exact multiplicative inverse or this
/// vector has an exact multiplicative inverse for each element in the vector.
bool hasExactInverseFP() const;
/// Return true if this is a floating-point NaN constant or a vector
/// floating-point constant with all NaN elements.
bool isNaN() const;
/// Return true if this constant and a constant 'Y' are element-wise equal.
/// This is identical to just comparing the pointers, with the exception that
/// for vectors, if only one of the constants has an `undef` element in some
/// lane, the constants still match.
bool isElementWiseEqual(Value *Y) const;
/// Return true if this is a vector constant that includes any undef or
/// poison elements. Since it is impossible to inspect a scalable vector
/// element- wise at compile time, this function returns true only if the
/// entire vector is undef or poison.
bool containsUndefOrPoisonElement() const;
/// Return true if this is a vector constant that includes any poison
/// elements.
bool containsPoisonElement() const;
/// Return true if this is a fixed width vector constant that includes
/// any constant expressions.
bool containsConstantExpression() const;
/// Return true if evaluation of this constant could trap. This is true for
/// things like constant expressions that could divide by zero.
bool canTrap() const;
/// Return true if the value can vary between threads.
bool isThreadDependent() const;
/// Return true if the value is dependent on a dllimport variable.
bool isDLLImportDependent() const;
/// Return true if the constant has users other than constant expressions and
/// other dangling things.
bool isConstantUsed() const;
/// This method classifies the entry according to whether or not it may
/// generate a relocation entry (either static or dynamic). This must be
/// conservative, so if it might codegen to a relocatable entry, it should say
/// so.
///
/// FIXME: This really should not be in IR.
bool needsRelocation() const;
bool needsDynamicRelocation() const;
/// For aggregates (struct/array/vector) return the constant that corresponds
/// to the specified element if possible, or null if not. This can return null
/// if the element index is a ConstantExpr, if 'this' is a constant expr or
/// if the constant does not fit into an uint64_t.
Constant *getAggregateElement(unsigned Elt) const;
Constant *getAggregateElement(Constant *Elt) const;
/// If all elements of the vector constant have the same value, return that
/// value. Otherwise, return nullptr. Ignore undefined elements by setting
/// AllowUndefs to true.
Constant *getSplatValue(bool AllowUndefs = false) const;
/// If C is a constant integer then return its value, otherwise C must be a
/// vector of constant integers, all equal, and the common value is returned.
const APInt &getUniqueInteger() const;
/// Called if some element of this constant is no longer valid.
/// At this point only other constants may be on the use_list for this
/// constant. Any constants on our Use list must also be destroy'd. The
/// implementation must be sure to remove the constant from the list of
/// available cached constants. Implementations should implement
/// destroyConstantImpl to remove constants from any pools/maps they are
/// contained it.
void destroyConstant();
//// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
static_assert(ConstantFirstVal == 0, "V->getValueID() >= ConstantFirstVal always succeeds");
return V->getValueID() <= ConstantLastVal;
}
/// This method is a special form of User::replaceUsesOfWith
/// (which does not work on constants) that does work
/// on constants. Basically this method goes through the trouble of building
/// a new constant that is equivalent to the current one, with all uses of
/// From replaced with uses of To. After this construction is completed, all
/// of the users of 'this' are replaced to use the new constant, and then
/// 'this' is deleted. In general, you should not call this method, instead,
/// use Value::replaceAllUsesWith, which automatically dispatches to this
/// method as needed.
///
void handleOperandChange(Value *, Value *);
static Constant *getNullValue(Type* Ty);
/// @returns the value for an integer or vector of integer constant of the
/// given type that has all its bits set to true.
/// Get the all ones value
static Constant *getAllOnesValue(Type* Ty);
/// Return the value for an integer or pointer constant, or a vector thereof,
/// with the given scalar value.
static Constant *getIntegerValue(Type *Ty, const APInt &V);
/// If there are any dead constant users dangling off of this constant, remove
/// them. This method is useful for clients that want to check to see if a
/// global is unused, but don't want to deal with potentially dead constants
/// hanging off of the globals.
void removeDeadConstantUsers() const;
/// Return true if the constant has exactly one live use.
///
/// This returns the same result as calling Value::hasOneUse after
/// Constant::removeDeadConstantUsers, but doesn't remove dead constants.
bool hasOneLiveUse() const;
/// Return true if the constant has no live uses.
///
/// This returns the same result as calling Value::use_empty after
/// Constant::removeDeadConstantUsers, but doesn't remove dead constants.
bool hasZeroLiveUses() const;
const Constant *stripPointerCasts() const {
return cast<Constant>(Value::stripPointerCasts());
}
Constant *stripPointerCasts() {
return const_cast<Constant*>(
static_cast<const Constant *>(this)->stripPointerCasts());
}
/// Try to replace undefined constant C or undefined elements in C with
/// Replacement. If no changes are made, the constant C is returned.
static Constant *replaceUndefsWith(Constant *C, Constant *Replacement);
/// Merges undefs of a Constant with another Constant, along with the
/// undefs already present. Other doesn't have to be the same type as C, but
/// both must either be scalars or vectors with the same element count. If no
/// changes are made, the constant C is returned.
static Constant *mergeUndefsWith(Constant *C, Constant *Other);
/// Return true if a constant is ConstantData or a ConstantAggregate or
/// ConstantExpr that contain only ConstantData.
bool isManifestConstant() const;
private:
enum PossibleRelocationsTy {
/// This constant requires no relocations. That is, it holds simple
/// constants (like integrals).
NoRelocation = 0,
/// This constant holds static relocations that can be resolved by the
/// static linker.
LocalRelocation = 1,
/// This constant holds dynamic relocations that the dynamic linker will
/// need to resolve.
GlobalRelocation = 2,
};
/// Determine what potential relocations may be needed by this constant.
PossibleRelocationsTy getRelocationInfo() const;
bool hasNLiveUses(unsigned N) const;
};
} // end namespace llvm
#endif // LLVM_IR_CONSTANT_H

View File

@@ -0,0 +1,290 @@
//===- ConstantFolder.h - Constant folding helper ---------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the ConstantFolder class, a helper for IRBuilder.
// It provides IRBuilder with a set of methods for creating constants
// with minimal folding. For general constant creation and folding,
// use ConstantExpr and the routines in llvm/Analysis/ConstantFolding.h.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_CONSTANTFOLDER_H
#define LLVM_IR_CONSTANTFOLDER_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/IRBuilderFolder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
namespace llvm {
/// ConstantFolder - Create constants with minimum, target independent, folding.
class ConstantFolder final : public IRBuilderFolder {
virtual void anchor();
public:
explicit ConstantFolder() = default;
//===--------------------------------------------------------------------===//
// Value-based folders.
//
// Return an existing value or a constant if the operation can be simplified.
// Otherwise return nullptr.
//===--------------------------------------------------------------------===//
Value *FoldAdd(Value *LHS, Value *RHS, bool HasNUW = false,
bool HasNSW = false) const override {
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
if (LC && RC)
return ConstantExpr::getAdd(LC, RC, HasNUW, HasNSW);
return nullptr;
}
Value *FoldAnd(Value *LHS, Value *RHS) const override {
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
if (LC && RC)
return ConstantExpr::getAnd(LC, RC);
return nullptr;
}
Value *FoldOr(Value *LHS, Value *RHS) const override {
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
if (LC && RC)
return ConstantExpr::getOr(LC, RC);
return nullptr;
}
Value *FoldICmp(CmpInst::Predicate P, Value *LHS, Value *RHS) const override {
auto *LC = dyn_cast<Constant>(LHS);
auto *RC = dyn_cast<Constant>(RHS);
if (LC && RC)
return ConstantExpr::getCompare(P, LC, RC);
return nullptr;
}
Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
bool IsInBounds = false) const override {
if (auto *PC = dyn_cast<Constant>(Ptr)) {
// Every index must be constant.
if (any_of(IdxList, [](Value *V) { return !isa<Constant>(V); }))
return nullptr;
if (IsInBounds)
return ConstantExpr::getInBoundsGetElementPtr(Ty, PC, IdxList);
else
return ConstantExpr::getGetElementPtr(Ty, PC, IdxList);
}
return nullptr;
}
Value *FoldSelect(Value *C, Value *True, Value *False) const override {
auto *CC = dyn_cast<Constant>(C);
auto *TC = dyn_cast<Constant>(True);
auto *FC = dyn_cast<Constant>(False);
if (CC && TC && FC)
return ConstantExpr::getSelect(CC, TC, FC);
return nullptr;
}
//===--------------------------------------------------------------------===//
// Binary Operators
//===--------------------------------------------------------------------===//
Constant *CreateFAdd(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getFAdd(LHS, RHS);
}
Constant *CreateSub(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const override {
return ConstantExpr::getSub(LHS, RHS, HasNUW, HasNSW);
}
Constant *CreateFSub(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getFSub(LHS, RHS);
}
Constant *CreateMul(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const override {
return ConstantExpr::getMul(LHS, RHS, HasNUW, HasNSW);
}
Constant *CreateFMul(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getFMul(LHS, RHS);
}
Constant *CreateUDiv(Constant *LHS, Constant *RHS,
bool isExact = false) const override {
return ConstantExpr::getUDiv(LHS, RHS, isExact);
}
Constant *CreateSDiv(Constant *LHS, Constant *RHS,
bool isExact = false) const override {
return ConstantExpr::getSDiv(LHS, RHS, isExact);
}
Constant *CreateFDiv(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getFDiv(LHS, RHS);
}
Constant *CreateURem(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getURem(LHS, RHS);
}
Constant *CreateSRem(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getSRem(LHS, RHS);
}
Constant *CreateFRem(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getFRem(LHS, RHS);
}
Constant *CreateShl(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const override {
return ConstantExpr::getShl(LHS, RHS, HasNUW, HasNSW);
}
Constant *CreateLShr(Constant *LHS, Constant *RHS,
bool isExact = false) const override {
return ConstantExpr::getLShr(LHS, RHS, isExact);
}
Constant *CreateAShr(Constant *LHS, Constant *RHS,
bool isExact = false) const override {
return ConstantExpr::getAShr(LHS, RHS, isExact);
}
Constant *CreateOr(Constant *LHS, Constant *RHS) const {
return ConstantExpr::getOr(LHS, RHS);
}
Constant *CreateXor(Constant *LHS, Constant *RHS) const override {
return ConstantExpr::getXor(LHS, RHS);
}
Constant *CreateBinOp(Instruction::BinaryOps Opc,
Constant *LHS, Constant *RHS) const override {
return ConstantExpr::get(Opc, LHS, RHS);
}
//===--------------------------------------------------------------------===//
// Unary Operators
//===--------------------------------------------------------------------===//
Constant *CreateNeg(Constant *C,
bool HasNUW = false, bool HasNSW = false) const override {
return ConstantExpr::getNeg(C, HasNUW, HasNSW);
}
Constant *CreateFNeg(Constant *C) const override {
return ConstantExpr::getFNeg(C);
}
Constant *CreateNot(Constant *C) const override {
return ConstantExpr::getNot(C);
}
Constant *CreateUnOp(Instruction::UnaryOps Opc, Constant *C) const override {
return ConstantExpr::get(Opc, C);
}
//===--------------------------------------------------------------------===//
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
Constant *CreateCast(Instruction::CastOps Op, Constant *C,
Type *DestTy) const override {
return ConstantExpr::getCast(Op, C, DestTy);
}
Constant *CreatePointerCast(Constant *C, Type *DestTy) const override {
return ConstantExpr::getPointerCast(C, DestTy);
}
Constant *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
Type *DestTy) const override {
return ConstantExpr::getPointerBitCastOrAddrSpaceCast(C, DestTy);
}
Constant *CreateIntCast(Constant *C, Type *DestTy,
bool isSigned) const override {
return ConstantExpr::getIntegerCast(C, DestTy, isSigned);
}
Constant *CreateFPCast(Constant *C, Type *DestTy) const override {
return ConstantExpr::getFPCast(C, DestTy);
}
Constant *CreateBitCast(Constant *C, Type *DestTy) const override {
return CreateCast(Instruction::BitCast, C, DestTy);
}
Constant *CreateIntToPtr(Constant *C, Type *DestTy) const override {
return CreateCast(Instruction::IntToPtr, C, DestTy);
}
Constant *CreatePtrToInt(Constant *C, Type *DestTy) const override {
return CreateCast(Instruction::PtrToInt, C, DestTy);
}
Constant *CreateZExtOrBitCast(Constant *C, Type *DestTy) const override {
return ConstantExpr::getZExtOrBitCast(C, DestTy);
}
Constant *CreateSExtOrBitCast(Constant *C, Type *DestTy) const override {
return ConstantExpr::getSExtOrBitCast(C, DestTy);
}
Constant *CreateTruncOrBitCast(Constant *C, Type *DestTy) const override {
return ConstantExpr::getTruncOrBitCast(C, DestTy);
}
//===--------------------------------------------------------------------===//
// Compare Instructions
//===--------------------------------------------------------------------===//
Constant *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
Constant *RHS) const override {
return ConstantExpr::getCompare(P, LHS, RHS);
}
//===--------------------------------------------------------------------===//
// Other Instructions
//===--------------------------------------------------------------------===//
Constant *CreateExtractElement(Constant *Vec, Constant *Idx) const override {
return ConstantExpr::getExtractElement(Vec, Idx);
}
Constant *CreateInsertElement(Constant *Vec, Constant *NewElt,
Constant *Idx) const override {
return ConstantExpr::getInsertElement(Vec, NewElt, Idx);
}
Constant *CreateShuffleVector(Constant *V1, Constant *V2,
ArrayRef<int> Mask) const override {
return ConstantExpr::getShuffleVector(V1, V2, Mask);
}
Constant *CreateExtractValue(Constant *Agg,
ArrayRef<unsigned> IdxList) const override {
return ConstantExpr::getExtractValue(Agg, IdxList);
}
Constant *CreateInsertValue(Constant *Agg, Constant *Val,
ArrayRef<unsigned> IdxList) const override {
return ConstantExpr::getInsertValue(Agg, Val, IdxList);
}
};
} // end namespace llvm
#endif // LLVM_IR_CONSTANTFOLDER_H

View File

@@ -0,0 +1,575 @@
//===- ConstantRange.h - Represent a range ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Represent a range of possible values that may occur when the program is run
// for an integral value. This keeps track of a lower and upper bound for the
// constant, which MAY wrap around the end of the numeric range. To do this, it
// keeps track of a [lower, upper) bound, which specifies an interval just like
// STL iterators. When used with boolean values, the following are important
// ranges: :
//
// [F, F) = {} = Empty set
// [T, F) = {T}
// [F, T) = {F}
// [T, T) = {F, T} = Full set
//
// The other integral ranges use min/max values for special range values. For
// example, for 8-bit types, it uses:
// [0, 0) = {} = Empty set
// [255, 255) = {0..255} = Full Set
//
// Note that ConstantRange can be used to represent either signed or
// unsigned ranges.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_CONSTANTRANGE_H
#define LLVM_IR_CONSTANTRANGE_H
#include "llvm/ADT/APInt.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/Support/Compiler.h"
#include <cstdint>
namespace llvm {
class MDNode;
class raw_ostream;
struct KnownBits;
/// This class represents a range of values.
class LLVM_NODISCARD ConstantRange {
APInt Lower, Upper;
/// Create empty constant range with same bitwidth.
ConstantRange getEmpty() const {
return ConstantRange(getBitWidth(), false);
}
/// Create full constant range with same bitwidth.
ConstantRange getFull() const {
return ConstantRange(getBitWidth(), true);
}
public:
/// Initialize a full or empty set for the specified bit width.
explicit ConstantRange(uint32_t BitWidth, bool isFullSet);
/// Initialize a range to hold the single specified value.
ConstantRange(APInt Value);
/// Initialize a range of values explicitly. This will assert out if
/// Lower==Upper and Lower != Min or Max value for its type. It will also
/// assert out if the two APInt's are not the same bit width.
ConstantRange(APInt Lower, APInt Upper);
/// Create empty constant range with the given bit width.
static ConstantRange getEmpty(uint32_t BitWidth) {
return ConstantRange(BitWidth, false);
}
/// Create full constant range with the given bit width.
static ConstantRange getFull(uint32_t BitWidth) {
return ConstantRange(BitWidth, true);
}
/// Create non-empty constant range with the given bounds. If Lower and
/// Upper are the same, a full range is returned.
static ConstantRange getNonEmpty(APInt Lower, APInt Upper) {
if (Lower == Upper)
return getFull(Lower.getBitWidth());
return ConstantRange(std::move(Lower), std::move(Upper));
}
/// Initialize a range based on a known bits constraint. The IsSigned flag
/// indicates whether the constant range should not wrap in the signed or
/// unsigned domain.
static ConstantRange fromKnownBits(const KnownBits &Known, bool IsSigned);
/// Produce the smallest range such that all values that may satisfy the given
/// predicate with any value contained within Other is contained in the
/// returned range. Formally, this returns a superset of
/// 'union over all y in Other . { x : icmp op x y is true }'. If the exact
/// answer is not representable as a ConstantRange, the return value will be a
/// proper superset of the above.
///
/// Example: Pred = ult and Other = i8 [2, 5) returns Result = [0, 4)
static ConstantRange makeAllowedICmpRegion(CmpInst::Predicate Pred,
const ConstantRange &Other);
/// Produce the largest range such that all values in the returned range
/// satisfy the given predicate with all values contained within Other.
/// Formally, this returns a subset of
/// 'intersection over all y in Other . { x : icmp op x y is true }'. If the
/// exact answer is not representable as a ConstantRange, the return value
/// will be a proper subset of the above.
///
/// Example: Pred = ult and Other = i8 [2, 5) returns [0, 2)
static ConstantRange makeSatisfyingICmpRegion(CmpInst::Predicate Pred,
const ConstantRange &Other);
/// Produce the exact range such that all values in the returned range satisfy
/// the given predicate with any value contained within Other. Formally, this
/// returns the exact answer when the superset of 'union over all y in Other
/// is exactly same as the subset of intersection over all y in Other.
/// { x : icmp op x y is true}'.
///
/// Example: Pred = ult and Other = i8 3 returns [0, 3)
static ConstantRange makeExactICmpRegion(CmpInst::Predicate Pred,
const APInt &Other);
/// Does the predicate \p Pred hold between ranges this and \p Other?
/// NOTE: false does not mean that inverse predicate holds!
bool icmp(CmpInst::Predicate Pred, const ConstantRange &Other) const;
/// Return true iff CR1 ult CR2 is equivalent to CR1 slt CR2.
/// Does not depend on strictness/direction of the predicate.
static bool
areInsensitiveToSignednessOfICmpPredicate(const ConstantRange &CR1,
const ConstantRange &CR2);
/// Return true iff CR1 ult CR2 is equivalent to CR1 sge CR2.
/// Does not depend on strictness/direction of the predicate.
static bool
areInsensitiveToSignednessOfInvertedICmpPredicate(const ConstantRange &CR1,
const ConstantRange &CR2);
/// If the comparison between constant ranges this and Other
/// is insensitive to the signedness of the comparison predicate,
/// return a predicate equivalent to \p Pred, with flipped signedness
/// (i.e. unsigned instead of signed or vice versa), and maybe inverted,
/// otherwise returns CmpInst::Predicate::BAD_ICMP_PREDICATE.
static CmpInst::Predicate
getEquivalentPredWithFlippedSignedness(CmpInst::Predicate Pred,
const ConstantRange &CR1,
const ConstantRange &CR2);
/// Produce the largest range containing all X such that "X BinOp Y" is
/// guaranteed not to wrap (overflow) for *all* Y in Other. However, there may
/// be *some* Y in Other for which additional X not contained in the result
/// also do not overflow.
///
/// NoWrapKind must be one of OBO::NoUnsignedWrap or OBO::NoSignedWrap.
///
/// Examples:
/// typedef OverflowingBinaryOperator OBO;
/// #define MGNR makeGuaranteedNoWrapRegion
/// MGNR(Add, [i8 1, 2), OBO::NoSignedWrap) == [-128, 127)
/// MGNR(Add, [i8 1, 2), OBO::NoUnsignedWrap) == [0, -1)
/// MGNR(Add, [i8 0, 1), OBO::NoUnsignedWrap) == Full Set
/// MGNR(Add, [i8 -1, 6), OBO::NoSignedWrap) == [INT_MIN+1, INT_MAX-4)
/// MGNR(Sub, [i8 1, 2), OBO::NoSignedWrap) == [-127, 128)
/// MGNR(Sub, [i8 1, 2), OBO::NoUnsignedWrap) == [1, 0)
static ConstantRange makeGuaranteedNoWrapRegion(Instruction::BinaryOps BinOp,
const ConstantRange &Other,
unsigned NoWrapKind);
/// Produce the range that contains X if and only if "X BinOp Other" does
/// not wrap.
static ConstantRange makeExactNoWrapRegion(Instruction::BinaryOps BinOp,
const APInt &Other,
unsigned NoWrapKind);
/// Returns true if ConstantRange calculations are supported for intrinsic
/// with \p IntrinsicID.
static bool isIntrinsicSupported(Intrinsic::ID IntrinsicID);
/// Compute range of intrinsic result for the given operand ranges.
static ConstantRange intrinsic(Intrinsic::ID IntrinsicID,
ArrayRef<ConstantRange> Ops);
/// Set up \p Pred and \p RHS such that
/// ConstantRange::makeExactICmpRegion(Pred, RHS) == *this. Return true if
/// successful.
bool getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS) const;
/// Set up \p Pred, \p RHS and \p Offset such that (V + Offset) Pred RHS
/// is true iff V is in the range. Prefers using Offset == 0 if possible.
void
getEquivalentICmp(CmpInst::Predicate &Pred, APInt &RHS, APInt &Offset) const;
/// Return the lower value for this range.
const APInt &getLower() const { return Lower; }
/// Return the upper value for this range.
const APInt &getUpper() const { return Upper; }
/// Get the bit width of this ConstantRange.
uint32_t getBitWidth() const { return Lower.getBitWidth(); }
/// Return true if this set contains all of the elements possible
/// for this data-type.
bool isFullSet() const;
/// Return true if this set contains no members.
bool isEmptySet() const;
/// Return true if this set wraps around the unsigned domain. Special cases:
/// * Empty set: Not wrapped.
/// * Full set: Not wrapped.
/// * [X, 0) == [X, Max]: Not wrapped.
bool isWrappedSet() const;
/// Return true if the exclusive upper bound wraps around the unsigned
/// domain. Special cases:
/// * Empty set: Not wrapped.
/// * Full set: Not wrapped.
/// * [X, 0): Wrapped.
bool isUpperWrapped() const;
/// Return true if this set wraps around the signed domain. Special cases:
/// * Empty set: Not wrapped.
/// * Full set: Not wrapped.
/// * [X, SignedMin) == [X, SignedMax]: Not wrapped.
bool isSignWrappedSet() const;
/// Return true if the (exclusive) upper bound wraps around the signed
/// domain. Special cases:
/// * Empty set: Not wrapped.
/// * Full set: Not wrapped.
/// * [X, SignedMin): Wrapped.
bool isUpperSignWrapped() const;
/// Return true if the specified value is in the set.
bool contains(const APInt &Val) const;
/// Return true if the other range is a subset of this one.
bool contains(const ConstantRange &CR) const;
/// If this set contains a single element, return it, otherwise return null.
const APInt *getSingleElement() const {
if (Upper == Lower + 1)
return &Lower;
return nullptr;
}
/// If this set contains all but a single element, return it, otherwise return
/// null.
const APInt *getSingleMissingElement() const {
if (Lower == Upper + 1)
return &Upper;
return nullptr;
}
/// Return true if this set contains exactly one member.
bool isSingleElement() const { return getSingleElement() != nullptr; }
/// Compare set size of this range with the range CR.
bool isSizeStrictlySmallerThan(const ConstantRange &CR) const;
/// Compare set size of this range with Value.
bool isSizeLargerThan(uint64_t MaxSize) const;
/// Return true if all values in this range are negative.
bool isAllNegative() const;
/// Return true if all values in this range are non-negative.
bool isAllNonNegative() const;
/// Return the largest unsigned value contained in the ConstantRange.
APInt getUnsignedMax() const;
/// Return the smallest unsigned value contained in the ConstantRange.
APInt getUnsignedMin() const;
/// Return the largest signed value contained in the ConstantRange.
APInt getSignedMax() const;
/// Return the smallest signed value contained in the ConstantRange.
APInt getSignedMin() const;
/// Return true if this range is equal to another range.
bool operator==(const ConstantRange &CR) const {
return Lower == CR.Lower && Upper == CR.Upper;
}
bool operator!=(const ConstantRange &CR) const {
return !operator==(CR);
}
/// Compute the maximal number of active bits needed to represent every value
/// in this range.
unsigned getActiveBits() const;
/// Compute the maximal number of bits needed to represent every value
/// in this signed range.
unsigned getMinSignedBits() const;
/// Subtract the specified constant from the endpoints of this constant range.
ConstantRange subtract(const APInt &CI) const;
/// Subtract the specified range from this range (aka relative complement of
/// the sets).
ConstantRange difference(const ConstantRange &CR) const;
/// If represented precisely, the result of some range operations may consist
/// of multiple disjoint ranges. As only a single range may be returned, any
/// range covering these disjoint ranges constitutes a valid result, but some
/// may be more useful than others depending on context. The preferred range
/// type specifies whether a range that is non-wrapping in the unsigned or
/// signed domain, or has the smallest size, is preferred. If a signedness is
/// preferred but all ranges are non-wrapping or all wrapping, then the
/// smallest set size is preferred. If there are multiple smallest sets, any
/// one of them may be returned.
enum PreferredRangeType { Smallest, Unsigned, Signed };
/// Return the range that results from the intersection of this range with
/// another range. If the intersection is disjoint, such that two results
/// are possible, the preferred range is determined by the PreferredRangeType.
ConstantRange intersectWith(const ConstantRange &CR,
PreferredRangeType Type = Smallest) const;
/// Return the range that results from the union of this range
/// with another range. The resultant range is guaranteed to include the
/// elements of both sets, but may contain more. For example, [3, 9) union
/// [12,15) is [3, 15), which includes 9, 10, and 11, which were not included
/// in either set before.
ConstantRange unionWith(const ConstantRange &CR,
PreferredRangeType Type = Smallest) const;
/// Intersect the two ranges and return the result if it can be represented
/// exactly, otherwise return None.
Optional<ConstantRange> exactIntersectWith(const ConstantRange &CR) const;
/// Union the two ranges and return the result if it can be represented
/// exactly, otherwise return None.
Optional<ConstantRange> exactUnionWith(const ConstantRange &CR) const;
/// Return a new range representing the possible values resulting
/// from an application of the specified cast operator to this range. \p
/// BitWidth is the target bitwidth of the cast. For casts which don't
/// change bitwidth, it must be the same as the source bitwidth. For casts
/// which do change bitwidth, the bitwidth must be consistent with the
/// requested cast and source bitwidth.
ConstantRange castOp(Instruction::CastOps CastOp,
uint32_t BitWidth) const;
/// Return a new range in the specified integer type, which must
/// be strictly larger than the current type. The returned range will
/// correspond to the possible range of values if the source range had been
/// zero extended to BitWidth.
ConstantRange zeroExtend(uint32_t BitWidth) const;
/// Return a new range in the specified integer type, which must
/// be strictly larger than the current type. The returned range will
/// correspond to the possible range of values if the source range had been
/// sign extended to BitWidth.
ConstantRange signExtend(uint32_t BitWidth) const;
/// Return a new range in the specified integer type, which must be
/// strictly smaller than the current type. The returned range will
/// correspond to the possible range of values if the source range had been
/// truncated to the specified type.
ConstantRange truncate(uint32_t BitWidth) const;
/// Make this range have the bit width given by \p BitWidth. The
/// value is zero extended, truncated, or left alone to make it that width.
ConstantRange zextOrTrunc(uint32_t BitWidth) const;
/// Make this range have the bit width given by \p BitWidth. The
/// value is sign extended, truncated, or left alone to make it that width.
ConstantRange sextOrTrunc(uint32_t BitWidth) const;
/// Return a new range representing the possible values resulting
/// from an application of the specified binary operator to an left hand side
/// of this range and a right hand side of \p Other.
ConstantRange binaryOp(Instruction::BinaryOps BinOp,
const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting
/// from an application of the specified overflowing binary operator to a
/// left hand side of this range and a right hand side of \p Other given
/// the provided knowledge about lack of wrapping \p NoWrapKind.
ConstantRange overflowingBinaryOp(Instruction::BinaryOps BinOp,
const ConstantRange &Other,
unsigned NoWrapKind) const;
/// Return a new range representing the possible values resulting
/// from an addition of a value in this range and a value in \p Other.
ConstantRange add(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting
/// from an addition with wrap type \p NoWrapKind of a value in this
/// range and a value in \p Other.
/// If the result range is disjoint, the preferred range is determined by the
/// \p PreferredRangeType.
ConstantRange addWithNoWrap(const ConstantRange &Other, unsigned NoWrapKind,
PreferredRangeType RangeType = Smallest) const;
/// Return a new range representing the possible values resulting
/// from a subtraction of a value in this range and a value in \p Other.
ConstantRange sub(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting
/// from an subtraction with wrap type \p NoWrapKind of a value in this
/// range and a value in \p Other.
/// If the result range is disjoint, the preferred range is determined by the
/// \p PreferredRangeType.
ConstantRange subWithNoWrap(const ConstantRange &Other, unsigned NoWrapKind,
PreferredRangeType RangeType = Smallest) const;
/// Return a new range representing the possible values resulting
/// from a multiplication of a value in this range and a value in \p Other,
/// treating both this and \p Other as unsigned ranges.
ConstantRange multiply(const ConstantRange &Other) const;
/// Return range of possible values for a signed multiplication of this and
/// \p Other. However, if overflow is possible always return a full range
/// rather than trying to determine a more precise result.
ConstantRange smul_fast(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting
/// from a signed maximum of a value in this range and a value in \p Other.
ConstantRange smax(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting
/// from an unsigned maximum of a value in this range and a value in \p Other.
ConstantRange umax(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting
/// from a signed minimum of a value in this range and a value in \p Other.
ConstantRange smin(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting
/// from an unsigned minimum of a value in this range and a value in \p Other.
ConstantRange umin(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting
/// from an unsigned division of a value in this range and a value in
/// \p Other.
ConstantRange udiv(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting
/// from a signed division of a value in this range and a value in
/// \p Other. Division by zero and division of SignedMin by -1 are considered
/// undefined behavior, in line with IR, and do not contribute towards the
/// result.
ConstantRange sdiv(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting
/// from an unsigned remainder operation of a value in this range and a
/// value in \p Other.
ConstantRange urem(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting
/// from a signed remainder operation of a value in this range and a
/// value in \p Other.
ConstantRange srem(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting from
/// a binary-xor of a value in this range by an all-one value,
/// aka bitwise complement operation.
ConstantRange binaryNot() const;
/// Return a new range representing the possible values resulting
/// from a binary-and of a value in this range by a value in \p Other.
ConstantRange binaryAnd(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting
/// from a binary-or of a value in this range by a value in \p Other.
ConstantRange binaryOr(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting
/// from a binary-xor of a value in this range by a value in \p Other.
ConstantRange binaryXor(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting
/// from a left shift of a value in this range by a value in \p Other.
/// TODO: This isn't fully implemented yet.
ConstantRange shl(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting from a
/// logical right shift of a value in this range and a value in \p Other.
ConstantRange lshr(const ConstantRange &Other) const;
/// Return a new range representing the possible values resulting from a
/// arithmetic right shift of a value in this range and a value in \p Other.
ConstantRange ashr(const ConstantRange &Other) const;
/// Perform an unsigned saturating addition of two constant ranges.
ConstantRange uadd_sat(const ConstantRange &Other) const;
/// Perform a signed saturating addition of two constant ranges.
ConstantRange sadd_sat(const ConstantRange &Other) const;
/// Perform an unsigned saturating subtraction of two constant ranges.
ConstantRange usub_sat(const ConstantRange &Other) const;
/// Perform a signed saturating subtraction of two constant ranges.
ConstantRange ssub_sat(const ConstantRange &Other) const;
/// Perform an unsigned saturating multiplication of two constant ranges.
ConstantRange umul_sat(const ConstantRange &Other) const;
/// Perform a signed saturating multiplication of two constant ranges.
ConstantRange smul_sat(const ConstantRange &Other) const;
/// Perform an unsigned saturating left shift of this constant range by a
/// value in \p Other.
ConstantRange ushl_sat(const ConstantRange &Other) const;
/// Perform a signed saturating left shift of this constant range by a
/// value in \p Other.
ConstantRange sshl_sat(const ConstantRange &Other) const;
/// Return a new range that is the logical not of the current set.
ConstantRange inverse() const;
/// Calculate absolute value range. If the original range contains signed
/// min, then the resulting range will contain signed min if and only if
/// \p IntMinIsPoison is false.
ConstantRange abs(bool IntMinIsPoison = false) const;
/// Represents whether an operation on the given constant range is known to
/// always or never overflow.
enum class OverflowResult {
/// Always overflows in the direction of signed/unsigned min value.
AlwaysOverflowsLow,
/// Always overflows in the direction of signed/unsigned max value.
AlwaysOverflowsHigh,
/// May or may not overflow.
MayOverflow,
/// Never overflows.
NeverOverflows,
};
/// Return whether unsigned add of the two ranges always/never overflows.
OverflowResult unsignedAddMayOverflow(const ConstantRange &Other) const;
/// Return whether signed add of the two ranges always/never overflows.
OverflowResult signedAddMayOverflow(const ConstantRange &Other) const;
/// Return whether unsigned sub of the two ranges always/never overflows.
OverflowResult unsignedSubMayOverflow(const ConstantRange &Other) const;
/// Return whether signed sub of the two ranges always/never overflows.
OverflowResult signedSubMayOverflow(const ConstantRange &Other) const;
/// Return whether unsigned mul of the two ranges always/never overflows.
OverflowResult unsignedMulMayOverflow(const ConstantRange &Other) const;
/// Print out the bounds to a stream.
void print(raw_ostream &OS) const;
/// Allow printing from a debugger easily.
void dump() const;
};
inline raw_ostream &operator<<(raw_ostream &OS, const ConstantRange &CR) {
CR.print(OS);
return OS;
}
/// Parse out a conservative ConstantRange from !range metadata.
///
/// E.g. if RangeMD is !{i32 0, i32 10, i32 15, i32 20} then return [0, 20).
ConstantRange getConstantRangeFromMetadata(const MDNode &RangeMD);
} // end namespace llvm
#endif // LLVM_IR_CONSTANTRANGE_H

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,107 @@
//===- llvm/IR/ConstrainedOps.def - Constrained intrinsics ------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Defines properties of constrained intrinsics, in particular corresponding
// floating point operations and DAG nodes.
//
//===----------------------------------------------------------------------===//
// DAG_FUNCTION defers to DAG_INSTRUCTION if its defined, otherwise FUNCTION.
#ifndef DAG_FUNCTION
#ifdef DAG_INSTRUCTION
#define DAG_FUNCTION(N,A,R,I,D) DAG_INSTRUCTION(N,A,R,I,D)
#else
#define DAG_FUNCTION(N,A,R,I,D) FUNCTION(N,A,R,I)
#endif
#endif
#ifndef INSTRUCTION
#define INSTRUCTION(N,A,R,I)
#endif
// DAG_INSTRUCTION is treated like an INSTRUCTION if the DAG node isn't used.
#ifndef DAG_INSTRUCTION
#define DAG_INSTRUCTION(N,A,R,I,D) INSTRUCTION(N,A,R,I)
#endif
// In most cases intrinsic function is handled similar to instruction.
#ifndef FUNCTION
#define FUNCTION(N,A,R,I) INSTRUCTION(N,A,R,I)
#endif
// Compare instruction have a DAG node so they are treated like DAG_INSTRUCTION.
#ifndef CMP_INSTRUCTION
#define CMP_INSTRUCTION(N,A,R,I,D) DAG_INSTRUCTION(N,A,R,I,D)
#endif
// Arguments of the entries are:
// - instruction or intrinsic function name.
// - Number of original instruction/intrinsic arguments.
// - 1 if the corresponding constrained intrinsic has rounding mode argument.
// - name of the constrained intrinsic to represent this instruction/function.
// - DAG node corresponding to the constrained intrinsic without prefix STRICT_.
// These are definitions for instructions, that are converted into constrained
// intrinsics.
//
DAG_INSTRUCTION(FAdd, 2, 1, experimental_constrained_fadd, FADD)
DAG_INSTRUCTION(FSub, 2, 1, experimental_constrained_fsub, FSUB)
DAG_INSTRUCTION(FMul, 2, 1, experimental_constrained_fmul, FMUL)
DAG_INSTRUCTION(FDiv, 2, 1, experimental_constrained_fdiv, FDIV)
DAG_INSTRUCTION(FRem, 2, 1, experimental_constrained_frem, FREM)
DAG_INSTRUCTION(FPExt, 1, 0, experimental_constrained_fpext, FP_EXTEND)
DAG_INSTRUCTION(SIToFP, 1, 1, experimental_constrained_sitofp, SINT_TO_FP)
DAG_INSTRUCTION(UIToFP, 1, 1, experimental_constrained_uitofp, UINT_TO_FP)
DAG_INSTRUCTION(FPToSI, 1, 0, experimental_constrained_fptosi, FP_TO_SINT)
DAG_INSTRUCTION(FPToUI, 1, 0, experimental_constrained_fptoui, FP_TO_UINT)
DAG_INSTRUCTION(FPTrunc, 1, 1, experimental_constrained_fptrunc, FP_ROUND)
// These are definitions for compare instructions (signaling and quiet version).
// Both of these match to FCmp / SETCC.
CMP_INSTRUCTION(FCmp, 2, 0, experimental_constrained_fcmp, FSETCC)
CMP_INSTRUCTION(FCmp, 2, 0, experimental_constrained_fcmps, FSETCCS)
// Theses are definitions for intrinsic functions, that are converted into
// constrained intrinsics.
//
DAG_FUNCTION(ceil, 1, 0, experimental_constrained_ceil, FCEIL)
DAG_FUNCTION(cos, 1, 1, experimental_constrained_cos, FCOS)
DAG_FUNCTION(exp, 1, 1, experimental_constrained_exp, FEXP)
DAG_FUNCTION(exp2, 1, 1, experimental_constrained_exp2, FEXP2)
DAG_FUNCTION(floor, 1, 0, experimental_constrained_floor, FFLOOR)
DAG_FUNCTION(fma, 3, 1, experimental_constrained_fma, FMA)
DAG_FUNCTION(log, 1, 1, experimental_constrained_log, FLOG)
DAG_FUNCTION(log10, 1, 1, experimental_constrained_log10, FLOG10)
DAG_FUNCTION(log2, 1, 1, experimental_constrained_log2, FLOG2)
DAG_FUNCTION(lrint, 1, 1, experimental_constrained_lrint, LRINT)
DAG_FUNCTION(llrint, 1, 1, experimental_constrained_llrint, LLRINT)
DAG_FUNCTION(lround, 1, 0, experimental_constrained_lround, LROUND)
DAG_FUNCTION(llround, 1, 0, experimental_constrained_llround, LLROUND)
DAG_FUNCTION(maxnum, 2, 0, experimental_constrained_maxnum, FMAXNUM)
DAG_FUNCTION(minnum, 2, 0, experimental_constrained_minnum, FMINNUM)
DAG_FUNCTION(maximum, 2, 0, experimental_constrained_maximum, FMAXIMUM)
DAG_FUNCTION(minimum, 2, 0, experimental_constrained_minimum, FMINIMUM)
DAG_FUNCTION(nearbyint, 1, 1, experimental_constrained_nearbyint, FNEARBYINT)
DAG_FUNCTION(pow, 2, 1, experimental_constrained_pow, FPOW)
DAG_FUNCTION(powi, 2, 1, experimental_constrained_powi, FPOWI)
DAG_FUNCTION(rint, 1, 1, experimental_constrained_rint, FRINT)
DAG_FUNCTION(round, 1, 0, experimental_constrained_round, FROUND)
DAG_FUNCTION(roundeven, 1, 0, experimental_constrained_roundeven, FROUNDEVEN)
DAG_FUNCTION(sin, 1, 1, experimental_constrained_sin, FSIN)
DAG_FUNCTION(sqrt, 1, 1, experimental_constrained_sqrt, FSQRT)
DAG_FUNCTION(trunc, 1, 0, experimental_constrained_trunc, FTRUNC)
// This is definition for fmuladd intrinsic function, that is converted into
// constrained FMA or FMUL + FADD intrinsics.
FUNCTION(fmuladd, 3, 1, experimental_constrained_fmuladd)
#undef INSTRUCTION
#undef FUNCTION
#undef CMP_INSTRUCTION
#undef DAG_INSTRUCTION
#undef DAG_FUNCTION

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,723 @@
//===- llvm/DataLayout.h - Data size & alignment info -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines layout properties related to datatype size/offset/alignment
// information. It uses lazy annotations to cache information about how
// structure types are laid out and used.
//
// This structure should be created once, filled in if the defaults are not
// correct and then passed around by const&. None of the members functions
// require modification to the object.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_DATALAYOUT_H
#define LLVM_IR_DATALAYOUT_H
#include "llvm/ADT/APInt.h"
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MathExtras.h"
#include "llvm/Support/Alignment.h"
#include "llvm/Support/TrailingObjects.h"
#include "llvm/Support/TypeSize.h"
#include <cassert>
#include <cstdint>
#include <string>
// This needs to be outside of the namespace, to avoid conflict with llvm-c
// decl.
using LLVMTargetDataRef = struct LLVMOpaqueTargetData *;
namespace llvm {
class GlobalVariable;
class LLVMContext;
class Module;
class StructLayout;
class Triple;
class Value;
/// Enum used to categorize the alignment types stored by LayoutAlignElem
enum AlignTypeEnum {
INVALID_ALIGN = 0,
INTEGER_ALIGN = 'i',
VECTOR_ALIGN = 'v',
FLOAT_ALIGN = 'f',
AGGREGATE_ALIGN = 'a'
};
// FIXME: Currently the DataLayout string carries a "preferred alignment"
// for types. As the DataLayout is module/global, this should likely be
// sunk down to an FTTI element that is queried rather than a global
// preference.
/// Layout alignment element.
///
/// Stores the alignment data associated with a given alignment type (integer,
/// vector, float) and type bit width.
///
/// \note The unusual order of elements in the structure attempts to reduce
/// padding and make the structure slightly more cache friendly.
struct LayoutAlignElem {
/// Alignment type from \c AlignTypeEnum
unsigned AlignType : 8;
unsigned TypeBitWidth : 24;
Align ABIAlign;
Align PrefAlign;
static LayoutAlignElem get(AlignTypeEnum align_type, Align abi_align,
Align pref_align, uint32_t bit_width);
bool operator==(const LayoutAlignElem &rhs) const;
};
/// Layout pointer alignment element.
///
/// Stores the alignment data associated with a given pointer and address space.
///
/// \note The unusual order of elements in the structure attempts to reduce
/// padding and make the structure slightly more cache friendly.
struct PointerAlignElem {
Align ABIAlign;
Align PrefAlign;
uint32_t TypeBitWidth;
uint32_t AddressSpace;
uint32_t IndexBitWidth;
/// Initializer
static PointerAlignElem getInBits(uint32_t AddressSpace, Align ABIAlign,
Align PrefAlign, uint32_t TypeBitWidth,
uint32_t IndexBitWidth);
bool operator==(const PointerAlignElem &rhs) const;
};
/// A parsed version of the target data layout string in and methods for
/// querying it.
///
/// The target data layout string is specified *by the target* - a frontend
/// generating LLVM IR is required to generate the right target data for the
/// target being codegen'd to.
class DataLayout {
public:
enum class FunctionPtrAlignType {
/// The function pointer alignment is independent of the function alignment.
Independent,
/// The function pointer alignment is a multiple of the function alignment.
MultipleOfFunctionAlign,
};
private:
/// Defaults to false.
bool BigEndian;
unsigned AllocaAddrSpace;
MaybeAlign StackNaturalAlign;
unsigned ProgramAddrSpace;
unsigned DefaultGlobalsAddrSpace;
MaybeAlign FunctionPtrAlign;
FunctionPtrAlignType TheFunctionPtrAlignType;
enum ManglingModeT {
MM_None,
MM_ELF,
MM_MachO,
MM_WinCOFF,
MM_WinCOFFX86,
MM_GOFF,
MM_Mips,
MM_XCOFF
};
ManglingModeT ManglingMode;
SmallVector<unsigned char, 8> LegalIntWidths;
/// Primitive type alignment data. This is sorted by type and bit
/// width during construction.
using AlignmentsTy = SmallVector<LayoutAlignElem, 16>;
AlignmentsTy Alignments;
AlignmentsTy::const_iterator
findAlignmentLowerBound(AlignTypeEnum AlignType, uint32_t BitWidth) const {
return const_cast<DataLayout *>(this)->findAlignmentLowerBound(AlignType,
BitWidth);
}
AlignmentsTy::iterator
findAlignmentLowerBound(AlignTypeEnum AlignType, uint32_t BitWidth);
/// The string representation used to create this DataLayout
std::string StringRepresentation;
using PointersTy = SmallVector<PointerAlignElem, 8>;
PointersTy Pointers;
const PointerAlignElem &getPointerAlignElem(uint32_t AddressSpace) const;
// The StructType -> StructLayout map.
mutable void *LayoutMap = nullptr;
/// Pointers in these address spaces are non-integral, and don't have a
/// well-defined bitwise representation.
SmallVector<unsigned, 8> NonIntegralAddressSpaces;
/// Attempts to set the alignment of the given type. Returns an error
/// description on failure.
Error setAlignment(AlignTypeEnum align_type, Align abi_align,
Align pref_align, uint32_t bit_width);
/// Attempts to set the alignment of a pointer in the given address space.
/// Returns an error description on failure.
Error setPointerAlignmentInBits(uint32_t AddrSpace, Align ABIAlign,
Align PrefAlign, uint32_t TypeBitWidth,
uint32_t IndexBitWidth);
/// Internal helper to get alignment for integer of given bitwidth.
Align getIntegerAlignment(uint32_t BitWidth, bool abi_or_pref) const;
/// Internal helper method that returns requested alignment for type.
Align getAlignment(Type *Ty, bool abi_or_pref) const;
/// Attempts to parse a target data specification string and reports an error
/// if the string is malformed.
Error parseSpecifier(StringRef Desc);
// Free all internal data structures.
void clear();
public:
/// Constructs a DataLayout from a specification string. See reset().
explicit DataLayout(StringRef LayoutDescription) {
reset(LayoutDescription);
}
/// Initialize target data from properties stored in the module.
explicit DataLayout(const Module *M);
DataLayout(const DataLayout &DL) { *this = DL; }
~DataLayout(); // Not virtual, do not subclass this class
DataLayout &operator=(const DataLayout &DL) {
clear();
StringRepresentation = DL.StringRepresentation;
BigEndian = DL.isBigEndian();
AllocaAddrSpace = DL.AllocaAddrSpace;
StackNaturalAlign = DL.StackNaturalAlign;
FunctionPtrAlign = DL.FunctionPtrAlign;
TheFunctionPtrAlignType = DL.TheFunctionPtrAlignType;
ProgramAddrSpace = DL.ProgramAddrSpace;
DefaultGlobalsAddrSpace = DL.DefaultGlobalsAddrSpace;
ManglingMode = DL.ManglingMode;
LegalIntWidths = DL.LegalIntWidths;
Alignments = DL.Alignments;
Pointers = DL.Pointers;
NonIntegralAddressSpaces = DL.NonIntegralAddressSpaces;
return *this;
}
bool operator==(const DataLayout &Other) const;
bool operator!=(const DataLayout &Other) const { return !(*this == Other); }
void init(const Module *M);
/// Parse a data layout string (with fallback to default values).
void reset(StringRef LayoutDescription);
/// Parse a data layout string and return the layout. Return an error
/// description on failure.
static Expected<DataLayout> parse(StringRef LayoutDescription);
/// Layout endianness...
bool isLittleEndian() const { return !BigEndian; }
bool isBigEndian() const { return BigEndian; }
/// Returns the string representation of the DataLayout.
///
/// This representation is in the same format accepted by the string
/// constructor above. This should not be used to compare two DataLayout as
/// different string can represent the same layout.
const std::string &getStringRepresentation() const {
return StringRepresentation;
}
/// Test if the DataLayout was constructed from an empty string.
bool isDefault() const { return StringRepresentation.empty(); }
/// Returns true if the specified type is known to be a native integer
/// type supported by the CPU.
///
/// For example, i64 is not native on most 32-bit CPUs and i37 is not native
/// on any known one. This returns false if the integer width is not legal.
///
/// The width is specified in bits.
bool isLegalInteger(uint64_t Width) const {
return llvm::is_contained(LegalIntWidths, Width);
}
bool isIllegalInteger(uint64_t Width) const { return !isLegalInteger(Width); }
/// Returns true if the given alignment exceeds the natural stack alignment.
bool exceedsNaturalStackAlignment(Align Alignment) const {
return StackNaturalAlign && (Alignment > *StackNaturalAlign);
}
Align getStackAlignment() const {
assert(StackNaturalAlign && "StackNaturalAlign must be defined");
return *StackNaturalAlign;
}
unsigned getAllocaAddrSpace() const { return AllocaAddrSpace; }
/// Returns the alignment of function pointers, which may or may not be
/// related to the alignment of functions.
/// \see getFunctionPtrAlignType
MaybeAlign getFunctionPtrAlign() const { return FunctionPtrAlign; }
/// Return the type of function pointer alignment.
/// \see getFunctionPtrAlign
FunctionPtrAlignType getFunctionPtrAlignType() const {
return TheFunctionPtrAlignType;
}
unsigned getProgramAddressSpace() const { return ProgramAddrSpace; }
unsigned getDefaultGlobalsAddressSpace() const {
return DefaultGlobalsAddrSpace;
}
bool hasMicrosoftFastStdCallMangling() const {
return ManglingMode == MM_WinCOFFX86;
}
/// Returns true if symbols with leading question marks should not receive IR
/// mangling. True for Windows mangling modes.
bool doNotMangleLeadingQuestionMark() const {
return ManglingMode == MM_WinCOFF || ManglingMode == MM_WinCOFFX86;
}
bool hasLinkerPrivateGlobalPrefix() const { return ManglingMode == MM_MachO; }
StringRef getLinkerPrivateGlobalPrefix() const {
if (ManglingMode == MM_MachO)
return "l";
return "";
}
char getGlobalPrefix() const {
switch (ManglingMode) {
case MM_None:
case MM_ELF:
case MM_GOFF:
case MM_Mips:
case MM_WinCOFF:
case MM_XCOFF:
return '\0';
case MM_MachO:
case MM_WinCOFFX86:
return '_';
}
llvm_unreachable("invalid mangling mode");
}
StringRef getPrivateGlobalPrefix() const {
switch (ManglingMode) {
case MM_None:
return "";
case MM_ELF:
case MM_WinCOFF:
return ".L";
case MM_GOFF:
return "@";
case MM_Mips:
return "$";
case MM_MachO:
case MM_WinCOFFX86:
return "L";
case MM_XCOFF:
return "L..";
}
llvm_unreachable("invalid mangling mode");
}
static const char *getManglingComponent(const Triple &T);
/// Returns true if the specified type fits in a native integer type
/// supported by the CPU.
///
/// For example, if the CPU only supports i32 as a native integer type, then
/// i27 fits in a legal integer type but i45 does not.
bool fitsInLegalInteger(unsigned Width) const {
for (unsigned LegalIntWidth : LegalIntWidths)
if (Width <= LegalIntWidth)
return true;
return false;
}
/// Layout pointer alignment
Align getPointerABIAlignment(unsigned AS) const;
/// Return target's alignment for stack-based pointers
/// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
Align getPointerPrefAlignment(unsigned AS = 0) const;
/// Layout pointer size in bytes, rounded up to a whole
/// number of bytes.
/// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
unsigned getPointerSize(unsigned AS = 0) const;
/// Returns the maximum index size over all address spaces.
unsigned getMaxIndexSize() const;
// Index size in bytes used for address calculation,
/// rounded up to a whole number of bytes.
unsigned getIndexSize(unsigned AS) const;
/// Return the address spaces containing non-integral pointers. Pointers in
/// this address space don't have a well-defined bitwise representation.
ArrayRef<unsigned> getNonIntegralAddressSpaces() const {
return NonIntegralAddressSpaces;
}
bool isNonIntegralAddressSpace(unsigned AddrSpace) const {
ArrayRef<unsigned> NonIntegralSpaces = getNonIntegralAddressSpaces();
return is_contained(NonIntegralSpaces, AddrSpace);
}
bool isNonIntegralPointerType(PointerType *PT) const {
return isNonIntegralAddressSpace(PT->getAddressSpace());
}
bool isNonIntegralPointerType(Type *Ty) const {
auto *PTy = dyn_cast<PointerType>(Ty);
return PTy && isNonIntegralPointerType(PTy);
}
/// Layout pointer size, in bits
/// FIXME: The defaults need to be removed once all of
/// the backends/clients are updated.
unsigned getPointerSizeInBits(unsigned AS = 0) const {
return getPointerAlignElem(AS).TypeBitWidth;
}
/// Returns the maximum index size over all address spaces.
unsigned getMaxIndexSizeInBits() const {
return getMaxIndexSize() * 8;
}
/// Size in bits of index used for address calculation in getelementptr.
unsigned getIndexSizeInBits(unsigned AS) const {
return getPointerAlignElem(AS).IndexBitWidth;
}
/// Layout pointer size, in bits, based on the type. If this function is
/// called with a pointer type, then the type size of the pointer is returned.
/// If this function is called with a vector of pointers, then the type size
/// of the pointer is returned. This should only be called with a pointer or
/// vector of pointers.
unsigned getPointerTypeSizeInBits(Type *) const;
/// Layout size of the index used in GEP calculation.
/// The function should be called with pointer or vector of pointers type.
unsigned getIndexTypeSizeInBits(Type *Ty) const;
unsigned getPointerTypeSize(Type *Ty) const {
return getPointerTypeSizeInBits(Ty) / 8;
}
/// Size examples:
///
/// Type SizeInBits StoreSizeInBits AllocSizeInBits[*]
/// ---- ---------- --------------- ---------------
/// i1 1 8 8
/// i8 8 8 8
/// i19 19 24 32
/// i32 32 32 32
/// i100 100 104 128
/// i128 128 128 128
/// Float 32 32 32
/// Double 64 64 64
/// X86_FP80 80 80 96
///
/// [*] The alloc size depends on the alignment, and thus on the target.
/// These values are for x86-32 linux.
/// Returns the number of bits necessary to hold the specified type.
///
/// If Ty is a scalable vector type, the scalable property will be set and
/// the runtime size will be a positive integer multiple of the base size.
///
/// For example, returns 36 for i36 and 80 for x86_fp80. The type passed must
/// have a size (Type::isSized() must return true).
TypeSize getTypeSizeInBits(Type *Ty) const;
/// Returns the maximum number of bytes that may be overwritten by
/// storing the specified type.
///
/// If Ty is a scalable vector type, the scalable property will be set and
/// the runtime size will be a positive integer multiple of the base size.
///
/// For example, returns 5 for i36 and 10 for x86_fp80.
TypeSize getTypeStoreSize(Type *Ty) const {
TypeSize BaseSize = getTypeSizeInBits(Ty);
return {divideCeil(BaseSize.getKnownMinSize(), 8), BaseSize.isScalable()};
}
/// Returns the maximum number of bits that may be overwritten by
/// storing the specified type; always a multiple of 8.
///
/// If Ty is a scalable vector type, the scalable property will be set and
/// the runtime size will be a positive integer multiple of the base size.
///
/// For example, returns 40 for i36 and 80 for x86_fp80.
TypeSize getTypeStoreSizeInBits(Type *Ty) const {
return 8 * getTypeStoreSize(Ty);
}
/// Returns true if no extra padding bits are needed when storing the
/// specified type.
///
/// For example, returns false for i19 that has a 24-bit store size.
bool typeSizeEqualsStoreSize(Type *Ty) const {
return getTypeSizeInBits(Ty) == getTypeStoreSizeInBits(Ty);
}
/// Returns the offset in bytes between successive objects of the
/// specified type, including alignment padding.
///
/// If Ty is a scalable vector type, the scalable property will be set and
/// the runtime size will be a positive integer multiple of the base size.
///
/// This is the amount that alloca reserves for this type. For example,
/// returns 12 or 16 for x86_fp80, depending on alignment.
TypeSize getTypeAllocSize(Type *Ty) const {
// Round up to the next alignment boundary.
return alignTo(getTypeStoreSize(Ty), getABITypeAlignment(Ty));
}
/// Returns the offset in bits between successive objects of the
/// specified type, including alignment padding; always a multiple of 8.
///
/// If Ty is a scalable vector type, the scalable property will be set and
/// the runtime size will be a positive integer multiple of the base size.
///
/// This is the amount that alloca reserves for this type. For example,
/// returns 96 or 128 for x86_fp80, depending on alignment.
TypeSize getTypeAllocSizeInBits(Type *Ty) const {
return 8 * getTypeAllocSize(Ty);
}
/// Returns the minimum ABI-required alignment for the specified type.
/// FIXME: Deprecate this function once migration to Align is over.
uint64_t getABITypeAlignment(Type *Ty) const;
/// Returns the minimum ABI-required alignment for the specified type.
Align getABITypeAlign(Type *Ty) const;
/// Helper function to return `Alignment` if it's set or the result of
/// `getABITypeAlignment(Ty)`, in any case the result is a valid alignment.
inline Align getValueOrABITypeAlignment(MaybeAlign Alignment,
Type *Ty) const {
return Alignment ? *Alignment : getABITypeAlign(Ty);
}
/// Returns the minimum ABI-required alignment for an integer type of
/// the specified bitwidth.
Align getABIIntegerTypeAlignment(unsigned BitWidth) const {
return getIntegerAlignment(BitWidth, /* abi_or_pref */ true);
}
/// Returns the preferred stack/global alignment for the specified
/// type.
///
/// This is always at least as good as the ABI alignment.
/// FIXME: Deprecate this function once migration to Align is over.
uint64_t getPrefTypeAlignment(Type *Ty) const;
/// Returns the preferred stack/global alignment for the specified
/// type.
///
/// This is always at least as good as the ABI alignment.
Align getPrefTypeAlign(Type *Ty) const;
/// Returns an integer type with size at least as big as that of a
/// pointer in the given address space.
IntegerType *getIntPtrType(LLVMContext &C, unsigned AddressSpace = 0) const;
/// Returns an integer (vector of integer) type with size at least as
/// big as that of a pointer of the given pointer (vector of pointer) type.
Type *getIntPtrType(Type *) const;
/// Returns the smallest integer type with size at least as big as
/// Width bits.
Type *getSmallestLegalIntType(LLVMContext &C, unsigned Width = 0) const;
/// Returns the largest legal integer type, or null if none are set.
Type *getLargestLegalIntType(LLVMContext &C) const {
unsigned LargestSize = getLargestLegalIntTypeSizeInBits();
return (LargestSize == 0) ? nullptr : Type::getIntNTy(C, LargestSize);
}
/// Returns the size of largest legal integer type size, or 0 if none
/// are set.
unsigned getLargestLegalIntTypeSizeInBits() const;
/// Returns the type of a GEP index.
/// If it was not specified explicitly, it will be the integer type of the
/// pointer width - IntPtrType.
Type *getIndexType(Type *PtrTy) const;
/// Returns the offset from the beginning of the type for the specified
/// indices.
///
/// Note that this takes the element type, not the pointer type.
/// This is used to implement getelementptr.
int64_t getIndexedOffsetInType(Type *ElemTy, ArrayRef<Value *> Indices) const;
/// Get GEP indices to access Offset inside ElemTy. ElemTy is updated to be
/// the result element type and Offset to be the residual offset.
SmallVector<APInt> getGEPIndicesForOffset(Type *&ElemTy, APInt &Offset) const;
/// Get single GEP index to access Offset inside ElemTy. Returns None if
/// index cannot be computed, e.g. because the type is not an aggregate.
/// ElemTy is updated to be the result element type and Offset to be the
/// residual offset.
Optional<APInt> getGEPIndexForOffset(Type *&ElemTy, APInt &Offset) const;
/// Returns a StructLayout object, indicating the alignment of the
/// struct, its size, and the offsets of its fields.
///
/// Note that this information is lazily cached.
const StructLayout *getStructLayout(StructType *Ty) const;
/// Returns the preferred alignment of the specified global.
///
/// This includes an explicitly requested alignment (if the global has one).
Align getPreferredAlign(const GlobalVariable *GV) const;
};
inline DataLayout *unwrap(LLVMTargetDataRef P) {
return reinterpret_cast<DataLayout *>(P);
}
inline LLVMTargetDataRef wrap(const DataLayout *P) {
return reinterpret_cast<LLVMTargetDataRef>(const_cast<DataLayout *>(P));
}
/// Used to lazily calculate structure layout information for a target machine,
/// based on the DataLayout structure.
class StructLayout final : public TrailingObjects<StructLayout, uint64_t> {
uint64_t StructSize;
Align StructAlignment;
unsigned IsPadded : 1;
unsigned NumElements : 31;
public:
uint64_t getSizeInBytes() const { return StructSize; }
uint64_t getSizeInBits() const { return 8 * StructSize; }
Align getAlignment() const { return StructAlignment; }
/// Returns whether the struct has padding or not between its fields.
/// NB: Padding in nested element is not taken into account.
bool hasPadding() const { return IsPadded; }
/// Given a valid byte offset into the structure, returns the structure
/// index that contains it.
unsigned getElementContainingOffset(uint64_t Offset) const;
MutableArrayRef<uint64_t> getMemberOffsets() {
return llvm::makeMutableArrayRef(getTrailingObjects<uint64_t>(),
NumElements);
}
ArrayRef<uint64_t> getMemberOffsets() const {
return llvm::makeArrayRef(getTrailingObjects<uint64_t>(), NumElements);
}
uint64_t getElementOffset(unsigned Idx) const {
assert(Idx < NumElements && "Invalid element idx!");
return getMemberOffsets()[Idx];
}
uint64_t getElementOffsetInBits(unsigned Idx) const {
return getElementOffset(Idx) * 8;
}
private:
friend class DataLayout; // Only DataLayout can create this class
StructLayout(StructType *ST, const DataLayout &DL);
size_t numTrailingObjects(OverloadToken<uint64_t>) const {
return NumElements;
}
};
// The implementation of this method is provided inline as it is particularly
// well suited to constant folding when called on a specific Type subclass.
inline TypeSize DataLayout::getTypeSizeInBits(Type *Ty) const {
assert(Ty->isSized() && "Cannot getTypeInfo() on a type that is unsized!");
switch (Ty->getTypeID()) {
case Type::LabelTyID:
return TypeSize::Fixed(getPointerSizeInBits(0));
case Type::PointerTyID:
return TypeSize::Fixed(getPointerSizeInBits(Ty->getPointerAddressSpace()));
case Type::ArrayTyID: {
ArrayType *ATy = cast<ArrayType>(Ty);
return ATy->getNumElements() *
getTypeAllocSizeInBits(ATy->getElementType());
}
case Type::StructTyID:
// Get the layout annotation... which is lazily created on demand.
return TypeSize::Fixed(
getStructLayout(cast<StructType>(Ty))->getSizeInBits());
case Type::IntegerTyID:
return TypeSize::Fixed(Ty->getIntegerBitWidth());
case Type::HalfTyID:
case Type::BFloatTyID:
return TypeSize::Fixed(16);
case Type::FloatTyID:
return TypeSize::Fixed(32);
case Type::DoubleTyID:
case Type::X86_MMXTyID:
return TypeSize::Fixed(64);
case Type::PPC_FP128TyID:
case Type::FP128TyID:
return TypeSize::Fixed(128);
case Type::X86_AMXTyID:
return TypeSize::Fixed(8192);
// In memory objects this is always aligned to a higher boundary, but
// only 80 bits contain information.
case Type::X86_FP80TyID:
return TypeSize::Fixed(80);
case Type::FixedVectorTyID:
case Type::ScalableVectorTyID: {
VectorType *VTy = cast<VectorType>(Ty);
auto EltCnt = VTy->getElementCount();
uint64_t MinBits = EltCnt.getKnownMinValue() *
getTypeSizeInBits(VTy->getElementType()).getFixedSize();
return TypeSize(MinBits, EltCnt.isScalable());
}
default:
llvm_unreachable("DataLayout::getTypeSizeInBits(): Unsupported type");
}
}
} // end namespace llvm
#endif // LLVM_IR_DATALAYOUT_H

View File

@@ -0,0 +1,164 @@
//===- DebugInfo.h - Debug Information Helpers ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a bunch of datatypes that are useful for creating and
// walking debug info in LLVM IR form. They essentially provide wrappers around
// the information in the global variables that's needed when constructing the
// DWARF information.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_DEBUGINFO_H
#define LLVM_IR_DEBUGINFO_H
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/TinyPtrVector.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/DebugInfoMetadata.h"
namespace llvm {
class DbgDeclareInst;
class DbgValueInst;
class DbgVariableIntrinsic;
class Instruction;
class Module;
/// Finds all intrinsics declaring local variables as living in the memory that
/// 'V' points to. This may include a mix of dbg.declare and
/// dbg.addr intrinsics.
TinyPtrVector<DbgVariableIntrinsic *> FindDbgAddrUses(Value *V);
/// Like \c FindDbgAddrUses, but only returns dbg.declare intrinsics, not
/// dbg.addr.
TinyPtrVector<DbgDeclareInst *> FindDbgDeclareUses(Value *V);
/// Finds the llvm.dbg.value intrinsics describing a value.
void findDbgValues(SmallVectorImpl<DbgValueInst *> &DbgValues, Value *V);
/// Finds the debug info intrinsics describing a value.
void findDbgUsers(SmallVectorImpl<DbgVariableIntrinsic *> &DbgInsts, Value *V);
/// Find subprogram that is enclosing this scope.
DISubprogram *getDISubprogram(const MDNode *Scope);
/// Strip debug info in the module if it exists.
///
/// To do this, we remove all calls to the debugger intrinsics and any named
/// metadata for debugging. We also remove debug locations for instructions.
/// Return true if module is modified.
bool StripDebugInfo(Module &M);
bool stripDebugInfo(Function &F);
/// Downgrade the debug info in a module to contain only line table information.
///
/// In order to convert debug info to what -gline-tables-only would have
/// created, this does the following:
/// 1) Delete all debug intrinsics.
/// 2) Delete all non-CU named metadata debug info nodes.
/// 3) Create new DebugLocs for each instruction.
/// 4) Create a new CU debug info, and similarly for every metadata node
/// that's reachable from the CU debug info.
/// All debug type metadata nodes are unreachable and garbage collected.
bool stripNonLineTableDebugInfo(Module &M);
/// Update the debug locations contained within the MD_loop metadata attached
/// to the instruction \p I, if one exists. \p Updater is applied to Metadata
/// operand in the MD_loop metadata: the returned value is included in the
/// updated loop metadata node if it is non-null.
void updateLoopMetadataDebugLocations(
Instruction &I, function_ref<Metadata *(Metadata *)> Updater);
/// Return Debug Info Metadata Version by checking module flags.
unsigned getDebugMetadataVersionFromModule(const Module &M);
/// Utility to find all debug info in a module.
///
/// DebugInfoFinder tries to list all debug info MDNodes used in a module. To
/// list debug info MDNodes used by an instruction, DebugInfoFinder uses
/// processDeclare, processValue and processLocation to handle DbgDeclareInst,
/// DbgValueInst and DbgLoc attached to instructions. processModule will go
/// through all DICompileUnits in llvm.dbg.cu and list debug info MDNodes
/// used by the CUs.
class DebugInfoFinder {
public:
/// Process entire module and collect debug info anchors.
void processModule(const Module &M);
/// Process a single instruction and collect debug info anchors.
void processInstruction(const Module &M, const Instruction &I);
/// Process DbgVariableIntrinsic.
void processVariable(const Module &M, const DbgVariableIntrinsic &DVI);
/// Process debug info location.
void processLocation(const Module &M, const DILocation *Loc);
/// Process subprogram.
void processSubprogram(DISubprogram *SP);
/// Clear all lists.
void reset();
private:
void processCompileUnit(DICompileUnit *CU);
void processScope(DIScope *Scope);
void processType(DIType *DT);
bool addCompileUnit(DICompileUnit *CU);
bool addGlobalVariable(DIGlobalVariableExpression *DIG);
bool addScope(DIScope *Scope);
bool addSubprogram(DISubprogram *SP);
bool addType(DIType *DT);
public:
using compile_unit_iterator =
SmallVectorImpl<DICompileUnit *>::const_iterator;
using subprogram_iterator = SmallVectorImpl<DISubprogram *>::const_iterator;
using global_variable_expression_iterator =
SmallVectorImpl<DIGlobalVariableExpression *>::const_iterator;
using type_iterator = SmallVectorImpl<DIType *>::const_iterator;
using scope_iterator = SmallVectorImpl<DIScope *>::const_iterator;
iterator_range<compile_unit_iterator> compile_units() const {
return make_range(CUs.begin(), CUs.end());
}
iterator_range<subprogram_iterator> subprograms() const {
return make_range(SPs.begin(), SPs.end());
}
iterator_range<global_variable_expression_iterator> global_variables() const {
return make_range(GVs.begin(), GVs.end());
}
iterator_range<type_iterator> types() const {
return make_range(TYs.begin(), TYs.end());
}
iterator_range<scope_iterator> scopes() const {
return make_range(Scopes.begin(), Scopes.end());
}
unsigned compile_unit_count() const { return CUs.size(); }
unsigned global_variable_count() const { return GVs.size(); }
unsigned subprogram_count() const { return SPs.size(); }
unsigned type_count() const { return TYs.size(); }
unsigned scope_count() const { return Scopes.size(); }
private:
SmallVector<DICompileUnit *, 8> CUs;
SmallVector<DISubprogram *, 8> SPs;
SmallVector<DIGlobalVariableExpression *, 8> GVs;
SmallVector<DIType *, 8> TYs;
SmallVector<DIScope *, 8> Scopes;
SmallPtrSet<const MDNode *, 32> NodesSeen;
};
} // end namespace llvm
#endif // LLVM_IR_DEBUGINFO_H

View File

@@ -0,0 +1,103 @@
//===- llvm/IR/DebugInfoFlags.def - Debug info flag definitions -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Macros for running through debug info flags.
//
//===----------------------------------------------------------------------===//
#if !(defined HANDLE_DI_FLAG || defined HANDLE_DISP_FLAG)
#error "Missing macro definition of HANDLE_DI*"
#endif
#ifndef HANDLE_DI_FLAG
#define HANDLE_DI_FLAG(ID, NAME)
#endif
#ifndef HANDLE_DISP_FLAG
#define HANDLE_DISP_FLAG(ID, NAME)
#endif
// General flags kept in DINode.
HANDLE_DI_FLAG(0, Zero) // Use it as zero value.
// For example: void foo(DIFlags Flags = FlagZero).
HANDLE_DI_FLAG(1, Private)
HANDLE_DI_FLAG(2, Protected)
HANDLE_DI_FLAG(3, Public)
HANDLE_DI_FLAG((1 << 2), FwdDecl)
HANDLE_DI_FLAG((1 << 3), AppleBlock)
// Used to be BlockByRef, can be reused for anything except DICompositeType.
HANDLE_DI_FLAG((1 << 4), ReservedBit4)
HANDLE_DI_FLAG((1 << 5), Virtual)
HANDLE_DI_FLAG((1 << 6), Artificial)
HANDLE_DI_FLAG((1 << 7), Explicit)
HANDLE_DI_FLAG((1 << 8), Prototyped)
HANDLE_DI_FLAG((1 << 9), ObjcClassComplete)
HANDLE_DI_FLAG((1 << 10), ObjectPointer)
HANDLE_DI_FLAG((1 << 11), Vector)
HANDLE_DI_FLAG((1 << 12), StaticMember)
HANDLE_DI_FLAG((1 << 13), LValueReference)
HANDLE_DI_FLAG((1 << 14), RValueReference)
HANDLE_DI_FLAG((1 << 15), ExportSymbols)
HANDLE_DI_FLAG((1 << 16), SingleInheritance)
HANDLE_DI_FLAG((2 << 16), MultipleInheritance)
HANDLE_DI_FLAG((3 << 16), VirtualInheritance)
HANDLE_DI_FLAG((1 << 18), IntroducedVirtual)
HANDLE_DI_FLAG((1 << 19), BitField)
HANDLE_DI_FLAG((1 << 20), NoReturn)
HANDLE_DI_FLAG((1 << 22), TypePassByValue)
HANDLE_DI_FLAG((1 << 23), TypePassByReference)
HANDLE_DI_FLAG((1 << 24), EnumClass)
HANDLE_DI_FLAG((1 << 25), Thunk)
HANDLE_DI_FLAG((1 << 26), NonTrivial)
HANDLE_DI_FLAG((1 << 27), BigEndian)
HANDLE_DI_FLAG((1 << 28), LittleEndian)
HANDLE_DI_FLAG((1 << 29), AllCallsDescribed)
// To avoid needing a dedicated value for IndirectVirtualBase, we use
// the bitwise or of Virtual and FwdDecl, which does not otherwise
// make sense for inheritance.
HANDLE_DI_FLAG((1 << 2) | (1 << 5), IndirectVirtualBase)
#ifdef DI_FLAG_LARGEST_NEEDED
// intended to be used with ADT/BitmaskEnum.h
// NOTE: always must be equal to largest flag, check this when adding new flag
HANDLE_DI_FLAG((1 << 29), Largest)
#undef DI_FLAG_LARGEST_NEEDED
#endif
// Subprogram-specific flags kept in DISubprogram.
// Use this as a zero/initialization value.
// For example: void foo(DISPFlags Flags = SPFlagZero).
HANDLE_DISP_FLAG(0, Zero)
// Virtuality is a two-bit enum field in the LSB of the word.
// Values should match DW_VIRTUALITY_*.
HANDLE_DISP_FLAG(1u, Virtual)
HANDLE_DISP_FLAG(2u, PureVirtual)
HANDLE_DISP_FLAG((1u << 2), LocalToUnit)
HANDLE_DISP_FLAG((1u << 3), Definition)
HANDLE_DISP_FLAG((1u << 4), Optimized)
HANDLE_DISP_FLAG((1u << 5), Pure)
HANDLE_DISP_FLAG((1u << 6), Elemental)
HANDLE_DISP_FLAG((1u << 7), Recursive)
HANDLE_DISP_FLAG((1u << 8), MainSubprogram)
// May also utilize this Flag in future, when adding support
// for defaulted functions
HANDLE_DISP_FLAG((1u << 9), Deleted)
HANDLE_DISP_FLAG((1u << 11), ObjCDirect)
#ifdef DISP_FLAG_LARGEST_NEEDED
// Intended to be used with ADT/BitmaskEnum.h.
// NOTE: Always must be equal to largest flag, check this when adding new flags.
HANDLE_DISP_FLAG((1 << 11), Largest)
#undef DISP_FLAG_LARGEST_NEEDED
#endif
#undef HANDLE_DI_FLAG
#undef HANDLE_DISP_FLAG

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,116 @@
//===- DebugLoc.h - Debug Location Information ------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a number of light weight data structures used
// to describe and track debug location information.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_DEBUGLOC_H
#define LLVM_IR_DEBUGLOC_H
#include "llvm/IR/TrackingMDRef.h"
#include "llvm/Support/DataTypes.h"
namespace llvm {
class LLVMContext;
class raw_ostream;
class DILocation;
/// A debug info location.
///
/// This class is a wrapper around a tracking reference to an \a DILocation
/// pointer.
///
/// To avoid extra includes, \a DebugLoc doubles the \a DILocation API with a
/// one based on relatively opaque \a MDNode pointers.
class DebugLoc {
TrackingMDNodeRef Loc;
public:
DebugLoc() = default;
/// Construct from an \a DILocation.
DebugLoc(const DILocation *L);
/// Construct from an \a MDNode.
///
/// Note: if \c N is not an \a DILocation, a verifier check will fail, and
/// accessors will crash. However, construction from other nodes is
/// supported in order to handle forward references when reading textual
/// IR.
explicit DebugLoc(const MDNode *N);
/// Get the underlying \a DILocation.
///
/// \pre !*this or \c isa<DILocation>(getAsMDNode()).
/// @{
DILocation *get() const;
operator DILocation *() const { return get(); }
DILocation *operator->() const { return get(); }
DILocation &operator*() const { return *get(); }
/// @}
/// Check for null.
///
/// Check for null in a way that is safe with broken debug info. Unlike
/// the conversion to \c DILocation, this doesn't require that \c Loc is of
/// the right type. Important for cases like \a llvm::StripDebugInfo() and
/// \a Instruction::hasMetadata().
explicit operator bool() const { return Loc; }
/// Check whether this has a trivial destructor.
bool hasTrivialDestructor() const { return Loc.hasTrivialDestructor(); }
enum { ReplaceLastInlinedAt = true };
/// Rebuild the entire inlined-at chain for this instruction so that the top of
/// the chain now is inlined-at the new call site.
/// \param InlinedAt The new outermost inlined-at in the chain.
static DebugLoc appendInlinedAt(const DebugLoc &DL, DILocation *InlinedAt,
LLVMContext &Ctx,
DenseMap<const MDNode *, MDNode *> &Cache);
unsigned getLine() const;
unsigned getCol() const;
MDNode *getScope() const;
DILocation *getInlinedAt() const;
/// Get the fully inlined-at scope for a DebugLoc.
///
/// Gets the inlined-at scope for a DebugLoc.
MDNode *getInlinedAtScope() const;
/// Find the debug info location for the start of the function.
///
/// Walk up the scope chain of given debug loc and find line number info
/// for the function.
///
/// FIXME: Remove this. Users should use DILocation/DILocalScope API to
/// find the subprogram, and then DILocation::get().
DebugLoc getFnDebugLoc() const;
/// Return \c this as a bar \a MDNode.
MDNode *getAsMDNode() const { return Loc; }
/// Check if the DebugLoc corresponds to an implicit code.
bool isImplicitCode() const;
void setImplicitCode(bool ImplicitCode);
bool operator==(const DebugLoc &DL) const { return Loc == DL.Loc; }
bool operator!=(const DebugLoc &DL) const { return Loc != DL.Loc; }
void dump() const;
/// prints source location /path/to/file.exe:line:col @[inlined at]
void print(raw_ostream &OS) const;
};
} // end namespace llvm
#endif // LLVM_IR_DEBUGLOC_H

View File

@@ -0,0 +1,742 @@
//===- llvm/DerivedTypes.h - Classes for handling data types ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declarations of classes that represent "derived
// types". These are things like "arrays of x" or "structure of x, y, z" or
// "function returning x taking (y,z) as parameters", etc...
//
// The implementations of these classes live in the Type.cpp file.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_DERIVEDTYPES_H
#define LLVM_IR_DERIVEDTYPES_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Type.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/TypeSize.h"
#include <cassert>
#include <cstdint>
namespace llvm {
class Value;
class APInt;
class LLVMContext;
/// Class to represent integer types. Note that this class is also used to
/// represent the built-in integer types: Int1Ty, Int8Ty, Int16Ty, Int32Ty and
/// Int64Ty.
/// Integer representation type
class IntegerType : public Type {
friend class LLVMContextImpl;
protected:
explicit IntegerType(LLVMContext &C, unsigned NumBits) : Type(C, IntegerTyID){
setSubclassData(NumBits);
}
public:
/// This enum is just used to hold constants we need for IntegerType.
enum {
MIN_INT_BITS = 1, ///< Minimum number of bits that can be specified
MAX_INT_BITS = (1<<23) ///< Maximum number of bits that can be specified
///< Note that bit width is stored in the Type classes SubclassData field
///< which has 24 bits. SelectionDAG type legalization can require a
///< power of 2 IntegerType, so limit to the largest representable power
///< of 2, 8388608.
};
/// This static method is the primary way of constructing an IntegerType.
/// If an IntegerType with the same NumBits value was previously instantiated,
/// that instance will be returned. Otherwise a new one will be created. Only
/// one instance with a given NumBits value is ever created.
/// Get or create an IntegerType instance.
static IntegerType *get(LLVMContext &C, unsigned NumBits);
/// Returns type twice as wide the input type.
IntegerType *getExtendedType() const {
return Type::getIntNTy(getContext(), 2 * getScalarSizeInBits());
}
/// Get the number of bits in this IntegerType
unsigned getBitWidth() const { return getSubclassData(); }
/// Return a bitmask with ones set for all of the bits that can be set by an
/// unsigned version of this type. This is 0xFF for i8, 0xFFFF for i16, etc.
uint64_t getBitMask() const {
return ~uint64_t(0UL) >> (64-getBitWidth());
}
/// Return a uint64_t with just the most significant bit set (the sign bit, if
/// the value is treated as a signed number).
uint64_t getSignBit() const {
return 1ULL << (getBitWidth()-1);
}
/// For example, this is 0xFF for an 8 bit integer, 0xFFFF for i16, etc.
/// @returns a bit mask with ones set for all the bits of this type.
/// Get a bit mask for this type.
APInt getMask() const;
/// Methods for support type inquiry through isa, cast, and dyn_cast.
static bool classof(const Type *T) {
return T->getTypeID() == IntegerTyID;
}
};
unsigned Type::getIntegerBitWidth() const {
return cast<IntegerType>(this)->getBitWidth();
}
/// Class to represent function types
///
class FunctionType : public Type {
FunctionType(Type *Result, ArrayRef<Type*> Params, bool IsVarArgs);
public:
FunctionType(const FunctionType &) = delete;
FunctionType &operator=(const FunctionType &) = delete;
/// This static method is the primary way of constructing a FunctionType.
static FunctionType *get(Type *Result,
ArrayRef<Type*> Params, bool isVarArg);
/// Create a FunctionType taking no parameters.
static FunctionType *get(Type *Result, bool isVarArg);
/// Return true if the specified type is valid as a return type.
static bool isValidReturnType(Type *RetTy);
/// Return true if the specified type is valid as an argument type.
static bool isValidArgumentType(Type *ArgTy);
bool isVarArg() const { return getSubclassData()!=0; }
Type *getReturnType() const { return ContainedTys[0]; }
using param_iterator = Type::subtype_iterator;
param_iterator param_begin() const { return ContainedTys + 1; }
param_iterator param_end() const { return &ContainedTys[NumContainedTys]; }
ArrayRef<Type *> params() const {
return makeArrayRef(param_begin(), param_end());
}
/// Parameter type accessors.
Type *getParamType(unsigned i) const { return ContainedTys[i+1]; }
/// Return the number of fixed parameters this function type requires.
/// This does not consider varargs.
unsigned getNumParams() const { return NumContainedTys - 1; }
/// Methods for support type inquiry through isa, cast, and dyn_cast.
static bool classof(const Type *T) {
return T->getTypeID() == FunctionTyID;
}
};
static_assert(alignof(FunctionType) >= alignof(Type *),
"Alignment sufficient for objects appended to FunctionType");
bool Type::isFunctionVarArg() const {
return cast<FunctionType>(this)->isVarArg();
}
Type *Type::getFunctionParamType(unsigned i) const {
return cast<FunctionType>(this)->getParamType(i);
}
unsigned Type::getFunctionNumParams() const {
return cast<FunctionType>(this)->getNumParams();
}
/// A handy container for a FunctionType+Callee-pointer pair, which can be
/// passed around as a single entity. This assists in replacing the use of
/// PointerType::getElementType() to access the function's type, since that's
/// slated for removal as part of the [opaque pointer types] project.
class FunctionCallee {
public:
// Allow implicit conversion from types which have a getFunctionType member
// (e.g. Function and InlineAsm).
template <typename T, typename U = decltype(&T::getFunctionType)>
FunctionCallee(T *Fn)
: FnTy(Fn ? Fn->getFunctionType() : nullptr), Callee(Fn) {}
FunctionCallee(FunctionType *FnTy, Value *Callee)
: FnTy(FnTy), Callee(Callee) {
assert((FnTy == nullptr) == (Callee == nullptr));
}
FunctionCallee(std::nullptr_t) {}
FunctionCallee() = default;
FunctionType *getFunctionType() { return FnTy; }
Value *getCallee() { return Callee; }
explicit operator bool() { return Callee; }
private:
FunctionType *FnTy = nullptr;
Value *Callee = nullptr;
};
/// Class to represent struct types. There are two different kinds of struct
/// types: Literal structs and Identified structs.
///
/// Literal struct types (e.g. { i32, i32 }) are uniqued structurally, and must
/// always have a body when created. You can get one of these by using one of
/// the StructType::get() forms.
///
/// Identified structs (e.g. %foo or %42) may optionally have a name and are not
/// uniqued. The names for identified structs are managed at the LLVMContext
/// level, so there can only be a single identified struct with a given name in
/// a particular LLVMContext. Identified structs may also optionally be opaque
/// (have no body specified). You get one of these by using one of the
/// StructType::create() forms.
///
/// Independent of what kind of struct you have, the body of a struct type are
/// laid out in memory consecutively with the elements directly one after the
/// other (if the struct is packed) or (if not packed) with padding between the
/// elements as defined by DataLayout (which is required to match what the code
/// generator for a target expects).
///
class StructType : public Type {
StructType(LLVMContext &C) : Type(C, StructTyID) {}
enum {
/// This is the contents of the SubClassData field.
SCDB_HasBody = 1,
SCDB_Packed = 2,
SCDB_IsLiteral = 4,
SCDB_IsSized = 8
};
/// For a named struct that actually has a name, this is a pointer to the
/// symbol table entry (maintained by LLVMContext) for the struct.
/// This is null if the type is an literal struct or if it is a identified
/// type that has an empty name.
void *SymbolTableEntry = nullptr;
public:
StructType(const StructType &) = delete;
StructType &operator=(const StructType &) = delete;
/// This creates an identified struct.
static StructType *create(LLVMContext &Context, StringRef Name);
static StructType *create(LLVMContext &Context);
static StructType *create(ArrayRef<Type *> Elements, StringRef Name,
bool isPacked = false);
static StructType *create(ArrayRef<Type *> Elements);
static StructType *create(LLVMContext &Context, ArrayRef<Type *> Elements,
StringRef Name, bool isPacked = false);
static StructType *create(LLVMContext &Context, ArrayRef<Type *> Elements);
template <class... Tys>
static std::enable_if_t<are_base_of<Type, Tys...>::value, StructType *>
create(StringRef Name, Type *elt1, Tys *... elts) {
assert(elt1 && "Cannot create a struct type with no elements with this");
return create(ArrayRef<Type *>({elt1, elts...}), Name);
}
/// This static method is the primary way to create a literal StructType.
static StructType *get(LLVMContext &Context, ArrayRef<Type*> Elements,
bool isPacked = false);
/// Create an empty structure type.
static StructType *get(LLVMContext &Context, bool isPacked = false);
/// This static method is a convenience method for creating structure types by
/// specifying the elements as arguments. Note that this method always returns
/// a non-packed struct, and requires at least one element type.
template <class... Tys>
static std::enable_if_t<are_base_of<Type, Tys...>::value, StructType *>
get(Type *elt1, Tys *... elts) {
assert(elt1 && "Cannot create a struct type with no elements with this");
LLVMContext &Ctx = elt1->getContext();
return StructType::get(Ctx, ArrayRef<Type *>({elt1, elts...}));
}
/// Return the type with the specified name, or null if there is none by that
/// name.
static StructType *getTypeByName(LLVMContext &C, StringRef Name);
bool isPacked() const { return (getSubclassData() & SCDB_Packed) != 0; }
/// Return true if this type is uniqued by structural equivalence, false if it
/// is a struct definition.
bool isLiteral() const { return (getSubclassData() & SCDB_IsLiteral) != 0; }
/// Return true if this is a type with an identity that has no body specified
/// yet. These prints as 'opaque' in .ll files.
bool isOpaque() const { return (getSubclassData() & SCDB_HasBody) == 0; }
/// isSized - Return true if this is a sized type.
bool isSized(SmallPtrSetImpl<Type *> *Visited = nullptr) const;
/// Returns true if this struct contains a scalable vector.
bool containsScalableVectorType() const;
/// Return true if this is a named struct that has a non-empty name.
bool hasName() const { return SymbolTableEntry != nullptr; }
/// Return the name for this struct type if it has an identity.
/// This may return an empty string for an unnamed struct type. Do not call
/// this on an literal type.
StringRef getName() const;
/// Change the name of this type to the specified name, or to a name with a
/// suffix if there is a collision. Do not call this on an literal type.
void setName(StringRef Name);
/// Specify a body for an opaque identified type.
void setBody(ArrayRef<Type*> Elements, bool isPacked = false);
template <typename... Tys>
std::enable_if_t<are_base_of<Type, Tys...>::value, void>
setBody(Type *elt1, Tys *... elts) {
assert(elt1 && "Cannot create a struct type with no elements with this");
setBody(ArrayRef<Type *>({elt1, elts...}));
}
/// Return true if the specified type is valid as a element type.
static bool isValidElementType(Type *ElemTy);
// Iterator access to the elements.
using element_iterator = Type::subtype_iterator;
element_iterator element_begin() const { return ContainedTys; }
element_iterator element_end() const { return &ContainedTys[NumContainedTys];}
ArrayRef<Type *> elements() const {
return makeArrayRef(element_begin(), element_end());
}
/// Return true if this is layout identical to the specified struct.
bool isLayoutIdentical(StructType *Other) const;
/// Random access to the elements
unsigned getNumElements() const { return NumContainedTys; }
Type *getElementType(unsigned N) const {
assert(N < NumContainedTys && "Element number out of range!");
return ContainedTys[N];
}
/// Given an index value into the type, return the type of the element.
Type *getTypeAtIndex(const Value *V) const;
Type *getTypeAtIndex(unsigned N) const { return getElementType(N); }
bool indexValid(const Value *V) const;
bool indexValid(unsigned Idx) const { return Idx < getNumElements(); }
/// Methods for support type inquiry through isa, cast, and dyn_cast.
static bool classof(const Type *T) {
return T->getTypeID() == StructTyID;
}
};
StringRef Type::getStructName() const {
return cast<StructType>(this)->getName();
}
unsigned Type::getStructNumElements() const {
return cast<StructType>(this)->getNumElements();
}
Type *Type::getStructElementType(unsigned N) const {
return cast<StructType>(this)->getElementType(N);
}
/// Class to represent array types.
class ArrayType : public Type {
/// The element type of the array.
Type *ContainedType;
/// Number of elements in the array.
uint64_t NumElements;
ArrayType(Type *ElType, uint64_t NumEl);
public:
ArrayType(const ArrayType &) = delete;
ArrayType &operator=(const ArrayType &) = delete;
uint64_t getNumElements() const { return NumElements; }
Type *getElementType() const { return ContainedType; }
/// This static method is the primary way to construct an ArrayType
static ArrayType *get(Type *ElementType, uint64_t NumElements);
/// Return true if the specified type is valid as a element type.
static bool isValidElementType(Type *ElemTy);
/// Methods for support type inquiry through isa, cast, and dyn_cast.
static bool classof(const Type *T) {
return T->getTypeID() == ArrayTyID;
}
};
uint64_t Type::getArrayNumElements() const {
return cast<ArrayType>(this)->getNumElements();
}
/// Base class of all SIMD vector types
class VectorType : public Type {
/// A fully specified VectorType is of the form <vscale x n x Ty>. 'n' is the
/// minimum number of elements of type Ty contained within the vector, and
/// 'vscale x' indicates that the total element count is an integer multiple
/// of 'n', where the multiple is either guaranteed to be one, or is
/// statically unknown at compile time.
///
/// If the multiple is known to be 1, then the extra term is discarded in
/// textual IR:
///
/// <4 x i32> - a vector containing 4 i32s
/// <vscale x 4 x i32> - a vector containing an unknown integer multiple
/// of 4 i32s
/// The element type of the vector.
Type *ContainedType;
protected:
/// The element quantity of this vector. The meaning of this value depends
/// on the type of vector:
/// - For FixedVectorType = <ElementQuantity x ty>, there are
/// exactly ElementQuantity elements in this vector.
/// - For ScalableVectorType = <vscale x ElementQuantity x ty>,
/// there are vscale * ElementQuantity elements in this vector, where
/// vscale is a runtime-constant integer greater than 0.
const unsigned ElementQuantity;
VectorType(Type *ElType, unsigned EQ, Type::TypeID TID);
public:
VectorType(const VectorType &) = delete;
VectorType &operator=(const VectorType &) = delete;
Type *getElementType() const { return ContainedType; }
/// This static method is the primary way to construct an VectorType.
static VectorType *get(Type *ElementType, ElementCount EC);
static VectorType *get(Type *ElementType, unsigned NumElements,
bool Scalable) {
return VectorType::get(ElementType,
ElementCount::get(NumElements, Scalable));
}
static VectorType *get(Type *ElementType, const VectorType *Other) {
return VectorType::get(ElementType, Other->getElementCount());
}
/// This static method gets a VectorType with the same number of elements as
/// the input type, and the element type is an integer type of the same width
/// as the input element type.
static VectorType *getInteger(VectorType *VTy) {
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
assert(EltBits && "Element size must be of a non-zero size");
Type *EltTy = IntegerType::get(VTy->getContext(), EltBits);
return VectorType::get(EltTy, VTy->getElementCount());
}
/// This static method is like getInteger except that the element types are
/// twice as wide as the elements in the input type.
static VectorType *getExtendedElementVectorType(VectorType *VTy) {
assert(VTy->isIntOrIntVectorTy() && "VTy expected to be a vector of ints.");
auto *EltTy = cast<IntegerType>(VTy->getElementType());
return VectorType::get(EltTy->getExtendedType(), VTy->getElementCount());
}
// This static method gets a VectorType with the same number of elements as
// the input type, and the element type is an integer or float type which
// is half as wide as the elements in the input type.
static VectorType *getTruncatedElementVectorType(VectorType *VTy) {
Type *EltTy;
if (VTy->getElementType()->isFloatingPointTy()) {
switch(VTy->getElementType()->getTypeID()) {
case DoubleTyID:
EltTy = Type::getFloatTy(VTy->getContext());
break;
case FloatTyID:
EltTy = Type::getHalfTy(VTy->getContext());
break;
default:
llvm_unreachable("Cannot create narrower fp vector element type");
}
} else {
unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
assert((EltBits & 1) == 0 &&
"Cannot truncate vector element with odd bit-width");
EltTy = IntegerType::get(VTy->getContext(), EltBits / 2);
}
return VectorType::get(EltTy, VTy->getElementCount());
}
// This static method returns a VectorType with a smaller number of elements
// of a larger type than the input element type. For example, a <16 x i8>
// subdivided twice would return <4 x i32>
static VectorType *getSubdividedVectorType(VectorType *VTy, int NumSubdivs) {
for (int i = 0; i < NumSubdivs; ++i) {
VTy = VectorType::getDoubleElementsVectorType(VTy);
VTy = VectorType::getTruncatedElementVectorType(VTy);
}
return VTy;
}
/// This static method returns a VectorType with half as many elements as the
/// input type and the same element type.
static VectorType *getHalfElementsVectorType(VectorType *VTy) {
auto EltCnt = VTy->getElementCount();
assert(EltCnt.isKnownEven() &&
"Cannot halve vector with odd number of elements.");
return VectorType::get(VTy->getElementType(),
EltCnt.divideCoefficientBy(2));
}
/// This static method returns a VectorType with twice as many elements as the
/// input type and the same element type.
static VectorType *getDoubleElementsVectorType(VectorType *VTy) {
auto EltCnt = VTy->getElementCount();
assert((EltCnt.getKnownMinValue() * 2ull) <= UINT_MAX &&
"Too many elements in vector");
return VectorType::get(VTy->getElementType(), EltCnt * 2);
}
/// Return true if the specified type is valid as a element type.
static bool isValidElementType(Type *ElemTy);
/// Return an ElementCount instance to represent the (possibly scalable)
/// number of elements in the vector.
inline ElementCount getElementCount() const;
/// Methods for support type inquiry through isa, cast, and dyn_cast.
static bool classof(const Type *T) {
return T->getTypeID() == FixedVectorTyID ||
T->getTypeID() == ScalableVectorTyID;
}
};
/// Class to represent fixed width SIMD vectors
class FixedVectorType : public VectorType {
protected:
FixedVectorType(Type *ElTy, unsigned NumElts)
: VectorType(ElTy, NumElts, FixedVectorTyID) {}
public:
static FixedVectorType *get(Type *ElementType, unsigned NumElts);
static FixedVectorType *get(Type *ElementType, const FixedVectorType *FVTy) {
return get(ElementType, FVTy->getNumElements());
}
static FixedVectorType *getInteger(FixedVectorType *VTy) {
return cast<FixedVectorType>(VectorType::getInteger(VTy));
}
static FixedVectorType *getExtendedElementVectorType(FixedVectorType *VTy) {
return cast<FixedVectorType>(VectorType::getExtendedElementVectorType(VTy));
}
static FixedVectorType *getTruncatedElementVectorType(FixedVectorType *VTy) {
return cast<FixedVectorType>(
VectorType::getTruncatedElementVectorType(VTy));
}
static FixedVectorType *getSubdividedVectorType(FixedVectorType *VTy,
int NumSubdivs) {
return cast<FixedVectorType>(
VectorType::getSubdividedVectorType(VTy, NumSubdivs));
}
static FixedVectorType *getHalfElementsVectorType(FixedVectorType *VTy) {
return cast<FixedVectorType>(VectorType::getHalfElementsVectorType(VTy));
}
static FixedVectorType *getDoubleElementsVectorType(FixedVectorType *VTy) {
return cast<FixedVectorType>(VectorType::getDoubleElementsVectorType(VTy));
}
static bool classof(const Type *T) {
return T->getTypeID() == FixedVectorTyID;
}
unsigned getNumElements() const { return ElementQuantity; }
};
/// Class to represent scalable SIMD vectors
class ScalableVectorType : public VectorType {
protected:
ScalableVectorType(Type *ElTy, unsigned MinNumElts)
: VectorType(ElTy, MinNumElts, ScalableVectorTyID) {}
public:
static ScalableVectorType *get(Type *ElementType, unsigned MinNumElts);
static ScalableVectorType *get(Type *ElementType,
const ScalableVectorType *SVTy) {
return get(ElementType, SVTy->getMinNumElements());
}
static ScalableVectorType *getInteger(ScalableVectorType *VTy) {
return cast<ScalableVectorType>(VectorType::getInteger(VTy));
}
static ScalableVectorType *
getExtendedElementVectorType(ScalableVectorType *VTy) {
return cast<ScalableVectorType>(
VectorType::getExtendedElementVectorType(VTy));
}
static ScalableVectorType *
getTruncatedElementVectorType(ScalableVectorType *VTy) {
return cast<ScalableVectorType>(
VectorType::getTruncatedElementVectorType(VTy));
}
static ScalableVectorType *getSubdividedVectorType(ScalableVectorType *VTy,
int NumSubdivs) {
return cast<ScalableVectorType>(
VectorType::getSubdividedVectorType(VTy, NumSubdivs));
}
static ScalableVectorType *
getHalfElementsVectorType(ScalableVectorType *VTy) {
return cast<ScalableVectorType>(VectorType::getHalfElementsVectorType(VTy));
}
static ScalableVectorType *
getDoubleElementsVectorType(ScalableVectorType *VTy) {
return cast<ScalableVectorType>(
VectorType::getDoubleElementsVectorType(VTy));
}
/// Get the minimum number of elements in this vector. The actual number of
/// elements in the vector is an integer multiple of this value.
uint64_t getMinNumElements() const { return ElementQuantity; }
static bool classof(const Type *T) {
return T->getTypeID() == ScalableVectorTyID;
}
};
inline ElementCount VectorType::getElementCount() const {
return ElementCount::get(ElementQuantity, isa<ScalableVectorType>(this));
}
/// Class to represent pointers.
class PointerType : public Type {
explicit PointerType(Type *ElType, unsigned AddrSpace);
explicit PointerType(LLVMContext &C, unsigned AddrSpace);
Type *PointeeTy;
public:
PointerType(const PointerType &) = delete;
PointerType &operator=(const PointerType &) = delete;
/// This constructs a pointer to an object of the specified type in a numbered
/// address space.
static PointerType *get(Type *ElementType, unsigned AddressSpace);
/// This constructs an opaque pointer to an object in a numbered address
/// space.
static PointerType *get(LLVMContext &C, unsigned AddressSpace);
/// This constructs a pointer to an object of the specified type in the
/// default address space (address space zero).
static PointerType *getUnqual(Type *ElementType) {
return PointerType::get(ElementType, 0);
}
/// This constructs an opaque pointer to an object in the
/// default address space (address space zero).
static PointerType *getUnqual(LLVMContext &C) {
return PointerType::get(C, 0);
}
/// This constructs a pointer type with the same pointee type as input
/// PointerType (or opaque pointer is the input PointerType is opaque) and the
/// given address space. This is only useful during the opaque pointer
/// transition.
/// TODO: remove after opaque pointer transition is complete.
static PointerType *getWithSamePointeeType(PointerType *PT,
unsigned AddressSpace) {
if (PT->isOpaque())
return get(PT->getContext(), AddressSpace);
return get(PT->PointeeTy, AddressSpace);
}
[[deprecated("Pointer element types are deprecated. You can *temporarily* "
"use Type::getPointerElementType() instead")]]
Type *getElementType() const {
assert(!isOpaque() && "Attempting to get element type of opaque pointer");
return PointeeTy;
}
bool isOpaque() const { return !PointeeTy; }
/// Return true if the specified type is valid as a element type.
static bool isValidElementType(Type *ElemTy);
/// Return true if we can load or store from a pointer to this type.
static bool isLoadableOrStorableType(Type *ElemTy);
/// Return the address space of the Pointer type.
inline unsigned getAddressSpace() const { return getSubclassData(); }
/// Return true if either this is an opaque pointer type or if this pointee
/// type matches Ty. Primarily used for checking if an instruction's pointer
/// operands are valid types. Will be useless after non-opaque pointers are
/// removed.
bool isOpaqueOrPointeeTypeMatches(Type *Ty) {
return isOpaque() || PointeeTy == Ty;
}
/// Return true if both pointer types have the same element type. Two opaque
/// pointers are considered to have the same element type, while an opaque
/// and a non-opaque pointer have different element types.
/// TODO: Remove after opaque pointer transition is complete.
bool hasSameElementTypeAs(PointerType *Other) {
return PointeeTy == Other->PointeeTy;
}
/// Implement support type inquiry through isa, cast, and dyn_cast.
static bool classof(const Type *T) {
return T->getTypeID() == PointerTyID;
}
};
Type *Type::getExtendedType() const {
assert(
isIntOrIntVectorTy() &&
"Original type expected to be a vector of integers or a scalar integer.");
if (auto *VTy = dyn_cast<VectorType>(this))
return VectorType::getExtendedElementVectorType(
const_cast<VectorType *>(VTy));
return cast<IntegerType>(this)->getExtendedType();
}
Type *Type::getWithNewType(Type *EltTy) const {
if (auto *VTy = dyn_cast<VectorType>(this))
return VectorType::get(EltTy, VTy->getElementCount());
return EltTy;
}
Type *Type::getWithNewBitWidth(unsigned NewBitWidth) const {
assert(
isIntOrIntVectorTy() &&
"Original type expected to be a vector of integers or a scalar integer.");
return getWithNewType(getIntNTy(getContext(), NewBitWidth));
}
unsigned Type::getPointerAddressSpace() const {
return cast<PointerType>(getScalarType())->getAddressSpace();
}
} // end namespace llvm
#endif // LLVM_IR_DERIVEDTYPES_H

View File

@@ -0,0 +1,44 @@
//===- DerivedUser.h - Base for non-IR Users --------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_DERIVEDUSER_H
#define LLVM_IR_DERIVEDUSER_H
#include "llvm/IR/User.h"
namespace llvm {
class Type;
class Use;
/// Extension point for the Value hierarchy. All classes outside of lib/IR
/// that wish to inherit from User should instead inherit from DerivedUser
/// instead. Inheriting from this class is discouraged.
///
/// Generally speaking, Value is the base of a closed class hierarchy
/// that can't be extended by code outside of lib/IR. This class creates a
/// loophole that allows classes outside of lib/IR to extend User to leverage
/// its use/def list machinery.
class DerivedUser : public User {
protected:
using DeleteValueTy = void (*)(DerivedUser *);
private:
friend class Value;
DeleteValueTy DeleteValue;
public:
DerivedUser(Type *Ty, unsigned VK, Use *U, unsigned NumOps,
DeleteValueTy DeleteValue)
: User(Ty, VK, U, NumOps), DeleteValue(DeleteValue) {}
};
} // end namespace llvm
#endif // LLVM_IR_DERIVEDUSER_H

View File

@@ -0,0 +1,74 @@
//===- DiagnosticHandler.h - DiagnosticHandler class for LLVM ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
// Base DiagnosticHandler class declaration. Derive from this class to provide
// custom diagnostic reporting.
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_DIAGNOSTICHANDLER_H
#define LLVM_IR_DIAGNOSTICHANDLER_H
#include "llvm/ADT/StringRef.h"
namespace llvm {
class DiagnosticInfo;
/// This is the base class for diagnostic handling in LLVM.
/// The handleDiagnostics method must be overridden by the subclasses to handle
/// diagnostic. The *RemarkEnabled methods can be overridden to control
/// which remarks are enabled.
struct DiagnosticHandler {
void *DiagnosticContext = nullptr;
DiagnosticHandler(void *DiagContext = nullptr)
: DiagnosticContext(DiagContext) {}
virtual ~DiagnosticHandler() = default;
using DiagnosticHandlerTy = void (*)(const DiagnosticInfo &DI, void *Context);
/// DiagHandlerCallback is settable from the C API and base implementation
/// of DiagnosticHandler will call it from handleDiagnostics(). Any derived
/// class of DiagnosticHandler should not use callback but
/// implement handleDiagnostics().
DiagnosticHandlerTy DiagHandlerCallback = nullptr;
/// Override handleDiagnostics to provide custom implementation.
/// Return true if it handles diagnostics reporting properly otherwise
/// return false to make LLVMContext::diagnose() to print the message
/// with a prefix based on the severity.
virtual bool handleDiagnostics(const DiagnosticInfo &DI) {
if (DiagHandlerCallback) {
DiagHandlerCallback(DI, DiagnosticContext);
return true;
}
return false;
}
/// Return true if analysis remarks are enabled, override
/// to provide different implementation.
virtual bool isAnalysisRemarkEnabled(StringRef PassName) const;
/// Return true if missed optimization remarks are enabled, override
/// to provide different implementation.
virtual bool isMissedOptRemarkEnabled(StringRef PassName) const;
/// Return true if passed optimization remarks are enabled, override
/// to provide different implementation.
virtual bool isPassedOptRemarkEnabled(StringRef PassName) const;
/// Return true if any type of remarks are enabled for this pass.
bool isAnyRemarkEnabled(StringRef PassName) const {
return (isMissedOptRemarkEnabled(PassName) ||
isPassedOptRemarkEnabled(PassName) ||
isAnalysisRemarkEnabled(PassName));
}
/// Return true if any type of remarks are enabled for any pass.
virtual bool isAnyRemarkEnabled() const;
};
} // namespace llvm
#endif // LLVM_IR_DIAGNOSTICHANDLER_H

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,95 @@
//===- llvm/IR/DiagnosticPrinter.h - Diagnostic Printer ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the main interface for printer backend diagnostic.
//
// Clients of the backend diagnostics should overload this interface based
// on their needs.
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_DIAGNOSTICPRINTER_H
#define LLVM_IR_DIAGNOSTICPRINTER_H
#include <string>
namespace llvm {
// Forward declarations.
class Module;
class raw_ostream;
class SMDiagnostic;
class StringRef;
class Twine;
class Value;
/// Interface for custom diagnostic printing.
class DiagnosticPrinter {
public:
virtual ~DiagnosticPrinter() = default;
// Simple types.
virtual DiagnosticPrinter &operator<<(char C) = 0;
virtual DiagnosticPrinter &operator<<(unsigned char C) = 0;
virtual DiagnosticPrinter &operator<<(signed char C) = 0;
virtual DiagnosticPrinter &operator<<(StringRef Str) = 0;
virtual DiagnosticPrinter &operator<<(const char *Str) = 0;
virtual DiagnosticPrinter &operator<<(const std::string &Str) = 0;
virtual DiagnosticPrinter &operator<<(unsigned long N) = 0;
virtual DiagnosticPrinter &operator<<(long N) = 0;
virtual DiagnosticPrinter &operator<<(unsigned long long N) = 0;
virtual DiagnosticPrinter &operator<<(long long N) = 0;
virtual DiagnosticPrinter &operator<<(const void *P) = 0;
virtual DiagnosticPrinter &operator<<(unsigned int N) = 0;
virtual DiagnosticPrinter &operator<<(int N) = 0;
virtual DiagnosticPrinter &operator<<(double N) = 0;
virtual DiagnosticPrinter &operator<<(const Twine &Str) = 0;
// IR related types.
virtual DiagnosticPrinter &operator<<(const Value &V) = 0;
virtual DiagnosticPrinter &operator<<(const Module &M) = 0;
// Other types.
virtual DiagnosticPrinter &operator<<(const SMDiagnostic &Diag) = 0;
};
/// Basic diagnostic printer that uses an underlying raw_ostream.
class DiagnosticPrinterRawOStream : public DiagnosticPrinter {
protected:
raw_ostream &Stream;
public:
DiagnosticPrinterRawOStream(raw_ostream &Stream) : Stream(Stream) {}
// Simple types.
DiagnosticPrinter &operator<<(char C) override;
DiagnosticPrinter &operator<<(unsigned char C) override;
DiagnosticPrinter &operator<<(signed char C) override;
DiagnosticPrinter &operator<<(StringRef Str) override;
DiagnosticPrinter &operator<<(const char *Str) override;
DiagnosticPrinter &operator<<(const std::string &Str) override;
DiagnosticPrinter &operator<<(unsigned long N) override;
DiagnosticPrinter &operator<<(long N) override;
DiagnosticPrinter &operator<<(unsigned long long N) override;
DiagnosticPrinter &operator<<(long long N) override;
DiagnosticPrinter &operator<<(const void *P) override;
DiagnosticPrinter &operator<<(unsigned int N) override;
DiagnosticPrinter &operator<<(int N) override;
DiagnosticPrinter &operator<<(double N) override;
DiagnosticPrinter &operator<<(const Twine &Str) override;
// IR related types.
DiagnosticPrinter &operator<<(const Value &V) override;
DiagnosticPrinter &operator<<(const Module &M) override;
// Other types.
DiagnosticPrinter &operator<<(const SMDiagnostic &Diag) override;
};
} // end namespace llvm
#endif // LLVM_IR_DIAGNOSTICPRINTER_H

View File

@@ -0,0 +1,323 @@
//===- Dominators.h - Dominator Info Calculation ----------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the DominatorTree class, which provides fast and efficient
// dominance queries.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_DOMINATORS_H
#define LLVM_IR_DOMINATORS_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/DenseMapInfo.h"
#include "llvm/ADT/DepthFirstIterator.h"
#include "llvm/ADT/Hashing.h"
#include "llvm/ADT/PointerIntPair.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CFG.h"
#include "llvm/IR/PassManager.h"
#include "llvm/IR/Use.h"
#include "llvm/Pass.h"
#include "llvm/Support/CFGDiff.h"
#include "llvm/Support/CFGUpdate.h"
#include "llvm/Support/GenericDomTree.h"
#include "llvm/Support/GenericDomTreeConstruction.h"
#include <utility>
#include <vector>
namespace llvm {
class Function;
class Instruction;
class Module;
class Value;
class raw_ostream;
template <class GraphType> struct GraphTraits;
extern template class DomTreeNodeBase<BasicBlock>;
extern template class DominatorTreeBase<BasicBlock, false>; // DomTree
extern template class DominatorTreeBase<BasicBlock, true>; // PostDomTree
extern template class cfg::Update<BasicBlock *>;
namespace DomTreeBuilder {
using BBDomTree = DomTreeBase<BasicBlock>;
using BBPostDomTree = PostDomTreeBase<BasicBlock>;
using BBUpdates = ArrayRef<llvm::cfg::Update<BasicBlock *>>;
using BBDomTreeGraphDiff = GraphDiff<BasicBlock *, false>;
using BBPostDomTreeGraphDiff = GraphDiff<BasicBlock *, true>;
extern template void Calculate<BBDomTree>(BBDomTree &DT);
extern template void CalculateWithUpdates<BBDomTree>(BBDomTree &DT,
BBUpdates U);
extern template void Calculate<BBPostDomTree>(BBPostDomTree &DT);
extern template void InsertEdge<BBDomTree>(BBDomTree &DT, BasicBlock *From,
BasicBlock *To);
extern template void InsertEdge<BBPostDomTree>(BBPostDomTree &DT,
BasicBlock *From,
BasicBlock *To);
extern template void DeleteEdge<BBDomTree>(BBDomTree &DT, BasicBlock *From,
BasicBlock *To);
extern template void DeleteEdge<BBPostDomTree>(BBPostDomTree &DT,
BasicBlock *From,
BasicBlock *To);
extern template void ApplyUpdates<BBDomTree>(BBDomTree &DT,
BBDomTreeGraphDiff &,
BBDomTreeGraphDiff *);
extern template void ApplyUpdates<BBPostDomTree>(BBPostDomTree &DT,
BBPostDomTreeGraphDiff &,
BBPostDomTreeGraphDiff *);
extern template bool Verify<BBDomTree>(const BBDomTree &DT,
BBDomTree::VerificationLevel VL);
extern template bool Verify<BBPostDomTree>(const BBPostDomTree &DT,
BBPostDomTree::VerificationLevel VL);
} // namespace DomTreeBuilder
using DomTreeNode = DomTreeNodeBase<BasicBlock>;
class BasicBlockEdge {
const BasicBlock *Start;
const BasicBlock *End;
public:
BasicBlockEdge(const BasicBlock *Start_, const BasicBlock *End_) :
Start(Start_), End(End_) {}
BasicBlockEdge(const std::pair<BasicBlock *, BasicBlock *> &Pair)
: Start(Pair.first), End(Pair.second) {}
BasicBlockEdge(const std::pair<const BasicBlock *, const BasicBlock *> &Pair)
: Start(Pair.first), End(Pair.second) {}
const BasicBlock *getStart() const {
return Start;
}
const BasicBlock *getEnd() const {
return End;
}
/// Check if this is the only edge between Start and End.
bool isSingleEdge() const;
};
template <> struct DenseMapInfo<BasicBlockEdge> {
using BBInfo = DenseMapInfo<const BasicBlock *>;
static unsigned getHashValue(const BasicBlockEdge *V);
static inline BasicBlockEdge getEmptyKey() {
return BasicBlockEdge(BBInfo::getEmptyKey(), BBInfo::getEmptyKey());
}
static inline BasicBlockEdge getTombstoneKey() {
return BasicBlockEdge(BBInfo::getTombstoneKey(), BBInfo::getTombstoneKey());
}
static unsigned getHashValue(const BasicBlockEdge &Edge) {
return hash_combine(BBInfo::getHashValue(Edge.getStart()),
BBInfo::getHashValue(Edge.getEnd()));
}
static bool isEqual(const BasicBlockEdge &LHS, const BasicBlockEdge &RHS) {
return BBInfo::isEqual(LHS.getStart(), RHS.getStart()) &&
BBInfo::isEqual(LHS.getEnd(), RHS.getEnd());
}
};
/// Concrete subclass of DominatorTreeBase that is used to compute a
/// normal dominator tree.
///
/// Definition: A block is said to be forward statically reachable if there is
/// a path from the entry of the function to the block. A statically reachable
/// block may become statically unreachable during optimization.
///
/// A forward unreachable block may appear in the dominator tree, or it may
/// not. If it does, dominance queries will return results as if all reachable
/// blocks dominate it. When asking for a Node corresponding to a potentially
/// unreachable block, calling code must handle the case where the block was
/// unreachable and the result of getNode() is nullptr.
///
/// Generally, a block known to be unreachable when the dominator tree is
/// constructed will not be in the tree. One which becomes unreachable after
/// the dominator tree is initially constructed may still exist in the tree,
/// even if the tree is properly updated. Calling code should not rely on the
/// preceding statements; this is stated only to assist human understanding.
class DominatorTree : public DominatorTreeBase<BasicBlock, false> {
public:
using Base = DominatorTreeBase<BasicBlock, false>;
DominatorTree() = default;
explicit DominatorTree(Function &F) { recalculate(F); }
explicit DominatorTree(DominatorTree &DT, DomTreeBuilder::BBUpdates U) {
recalculate(*DT.Parent, U);
}
/// Handle invalidation explicitly.
bool invalidate(Function &F, const PreservedAnalyses &PA,
FunctionAnalysisManager::Invalidator &);
// Ensure base-class overloads are visible.
using Base::dominates;
/// Return true if the (end of the) basic block BB dominates the use U.
bool dominates(const BasicBlock *BB, const Use &U) const;
/// Return true if value Def dominates use U, in the sense that Def is
/// available at U, and could be substituted as the used value without
/// violating the SSA dominance requirement.
///
/// In particular, it is worth noting that:
/// * Non-instruction Defs dominate everything.
/// * Def does not dominate a use in Def itself (outside of degenerate cases
/// like unreachable code or trivial phi cycles).
/// * Invoke/callbr Defs only dominate uses in their default destination.
bool dominates(const Value *Def, const Use &U) const;
/// Return true if value Def dominates all possible uses inside instruction
/// User. Same comments as for the Use-based API apply.
bool dominates(const Value *Def, const Instruction *User) const;
// Does not accept Value to avoid ambiguity with dominance checks between
// two basic blocks.
bool dominates(const Instruction *Def, const BasicBlock *BB) const;
/// Return true if an edge dominates a use.
///
/// If BBE is not a unique edge between start and end of the edge, it can
/// never dominate the use.
bool dominates(const BasicBlockEdge &BBE, const Use &U) const;
bool dominates(const BasicBlockEdge &BBE, const BasicBlock *BB) const;
/// Returns true if edge \p BBE1 dominates edge \p BBE2.
bool dominates(const BasicBlockEdge &BBE1, const BasicBlockEdge &BBE2) const;
// Ensure base class overloads are visible.
using Base::isReachableFromEntry;
/// Provide an overload for a Use.
bool isReachableFromEntry(const Use &U) const;
// Pop up a GraphViz/gv window with the Dominator Tree rendered using `dot`.
void viewGraph(const Twine &Name, const Twine &Title);
void viewGraph();
};
//===-------------------------------------
// DominatorTree GraphTraits specializations so the DominatorTree can be
// iterable by generic graph iterators.
template <class Node, class ChildIterator> struct DomTreeGraphTraitsBase {
using NodeRef = Node *;
using ChildIteratorType = ChildIterator;
using nodes_iterator = df_iterator<Node *, df_iterator_default_set<Node*>>;
static NodeRef getEntryNode(NodeRef N) { return N; }
static ChildIteratorType child_begin(NodeRef N) { return N->begin(); }
static ChildIteratorType child_end(NodeRef N) { return N->end(); }
static nodes_iterator nodes_begin(NodeRef N) {
return df_begin(getEntryNode(N));
}
static nodes_iterator nodes_end(NodeRef N) { return df_end(getEntryNode(N)); }
};
template <>
struct GraphTraits<DomTreeNode *>
: public DomTreeGraphTraitsBase<DomTreeNode, DomTreeNode::const_iterator> {
};
template <>
struct GraphTraits<const DomTreeNode *>
: public DomTreeGraphTraitsBase<const DomTreeNode,
DomTreeNode::const_iterator> {};
template <> struct GraphTraits<DominatorTree*>
: public GraphTraits<DomTreeNode*> {
static NodeRef getEntryNode(DominatorTree *DT) { return DT->getRootNode(); }
static nodes_iterator nodes_begin(DominatorTree *N) {
return df_begin(getEntryNode(N));
}
static nodes_iterator nodes_end(DominatorTree *N) {
return df_end(getEntryNode(N));
}
};
/// Analysis pass which computes a \c DominatorTree.
class DominatorTreeAnalysis : public AnalysisInfoMixin<DominatorTreeAnalysis> {
friend AnalysisInfoMixin<DominatorTreeAnalysis>;
static AnalysisKey Key;
public:
/// Provide the result typedef for this analysis pass.
using Result = DominatorTree;
/// Run the analysis pass over a function and produce a dominator tree.
DominatorTree run(Function &F, FunctionAnalysisManager &);
};
/// Printer pass for the \c DominatorTree.
class DominatorTreePrinterPass
: public PassInfoMixin<DominatorTreePrinterPass> {
raw_ostream &OS;
public:
explicit DominatorTreePrinterPass(raw_ostream &OS);
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// Verifier pass for the \c DominatorTree.
struct DominatorTreeVerifierPass : PassInfoMixin<DominatorTreeVerifierPass> {
PreservedAnalyses run(Function &F, FunctionAnalysisManager &AM);
};
/// Enables verification of dominator trees.
///
/// This check is expensive and is disabled by default. `-verify-dom-info`
/// allows selectively enabling the check without needing to recompile.
extern bool VerifyDomInfo;
/// Legacy analysis pass which computes a \c DominatorTree.
class DominatorTreeWrapperPass : public FunctionPass {
DominatorTree DT;
public:
static char ID;
DominatorTreeWrapperPass();
DominatorTree &getDomTree() { return DT; }
const DominatorTree &getDomTree() const { return DT; }
bool runOnFunction(Function &F) override;
void verifyAnalysis() const override;
void getAnalysisUsage(AnalysisUsage &AU) const override {
AU.setPreservesAll();
}
void releaseMemory() override { DT.reset(); }
void print(raw_ostream &OS, const Module *M = nullptr) const override;
};
} // end namespace llvm
#endif // LLVM_IR_DOMINATORS_H

View File

@@ -0,0 +1,68 @@
//===- FPEnv.h ---- FP Environment ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// @file
/// This file contains the declarations of entities that describe floating
/// point environment and related functions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_FPENV_H
#define LLVM_IR_FPENV_H
#include "llvm/ADT/FloatingPointMode.h"
#include "llvm/ADT/Optional.h"
namespace llvm {
class StringRef;
namespace fp {
/// Exception behavior used for floating point operations.
///
/// Each of these values correspond to some metadata argument value of a
/// constrained floating point intrinsic. See the LLVM Language Reference Manual
/// for details.
enum ExceptionBehavior : uint8_t {
ebIgnore, ///< This corresponds to "fpexcept.ignore".
ebMayTrap, ///< This corresponds to "fpexcept.maytrap".
ebStrict ///< This corresponds to "fpexcept.strict".
};
}
/// Returns a valid RoundingMode enumerator when given a string
/// that is valid as input in constrained intrinsic rounding mode
/// metadata.
Optional<RoundingMode> convertStrToRoundingMode(StringRef);
/// For any RoundingMode enumerator, returns a string valid as input in
/// constrained intrinsic rounding mode metadata.
Optional<StringRef> convertRoundingModeToStr(RoundingMode);
/// Returns a valid ExceptionBehavior enumerator when given a string
/// valid as input in constrained intrinsic exception behavior metadata.
Optional<fp::ExceptionBehavior> convertStrToExceptionBehavior(StringRef);
/// For any ExceptionBehavior enumerator, returns a string valid as
/// input in constrained intrinsic exception behavior metadata.
Optional<StringRef> convertExceptionBehaviorToStr(fp::ExceptionBehavior);
/// Returns true if the exception handling behavior and rounding mode
/// match what is used in the default floating point environment.
inline bool isDefaultFPEnvironment(fp::ExceptionBehavior EB, RoundingMode RM) {
return EB == fp::ebIgnore && RM == RoundingMode::NearestTiesToEven;
}
/// Returns true if the rounding mode RM may be QRM at compile time or
/// at run time.
inline bool canRoundingModeBe(RoundingMode RM, RoundingMode QRM) {
return RM == QRM || RM == RoundingMode::Dynamic;
}
}
#endif

View File

@@ -0,0 +1,44 @@
/*===-- FixedMetadataKinds.def - Fixed metadata kind IDs -------*- C++ -*-=== *\
|*
|* Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|* See https://llvm.org/LICENSE.txt for license information.
|* SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|*
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_FIXED_MD_KIND
#error "LLVM_FIXED_MD_KIND(EnumID, Name, Value) is not defined."
#endif
LLVM_FIXED_MD_KIND(MD_dbg, "dbg", 0)
LLVM_FIXED_MD_KIND(MD_tbaa, "tbaa", 1)
LLVM_FIXED_MD_KIND(MD_prof, "prof", 2)
LLVM_FIXED_MD_KIND(MD_fpmath, "fpmath", 3)
LLVM_FIXED_MD_KIND(MD_range, "range", 4)
LLVM_FIXED_MD_KIND(MD_tbaa_struct, "tbaa.struct", 5)
LLVM_FIXED_MD_KIND(MD_invariant_load, "invariant.load", 6)
LLVM_FIXED_MD_KIND(MD_alias_scope, "alias.scope", 7)
LLVM_FIXED_MD_KIND(MD_noalias, "noalias", 8)
LLVM_FIXED_MD_KIND(MD_nontemporal, "nontemporal", 9)
LLVM_FIXED_MD_KIND(MD_mem_parallel_loop_access,
"llvm.mem.parallel_loop_access", 10)
LLVM_FIXED_MD_KIND(MD_nonnull, "nonnull", 11)
LLVM_FIXED_MD_KIND(MD_dereferenceable, "dereferenceable", 12)
LLVM_FIXED_MD_KIND(MD_dereferenceable_or_null, "dereferenceable_or_null", 13)
LLVM_FIXED_MD_KIND(MD_make_implicit, "make.implicit", 14)
LLVM_FIXED_MD_KIND(MD_unpredictable, "unpredictable", 15)
LLVM_FIXED_MD_KIND(MD_invariant_group, "invariant.group", 16)
LLVM_FIXED_MD_KIND(MD_align, "align", 17)
LLVM_FIXED_MD_KIND(MD_loop, "llvm.loop", 18)
LLVM_FIXED_MD_KIND(MD_type, "type", 19)
LLVM_FIXED_MD_KIND(MD_section_prefix, "section_prefix", 20)
LLVM_FIXED_MD_KIND(MD_absolute_symbol, "absolute_symbol", 21)
LLVM_FIXED_MD_KIND(MD_associated, "associated", 22)
LLVM_FIXED_MD_KIND(MD_callees, "callees", 23)
LLVM_FIXED_MD_KIND(MD_irr_loop, "irr_loop", 24)
LLVM_FIXED_MD_KIND(MD_access_group, "llvm.access.group", 25)
LLVM_FIXED_MD_KIND(MD_callback, "callback", 26)
LLVM_FIXED_MD_KIND(MD_preserve_access_index, "llvm.preserve.access.index", 27)
LLVM_FIXED_MD_KIND(MD_vcall_visibility, "vcall_visibility", 28)
LLVM_FIXED_MD_KIND(MD_noundef, "noundef", 29)
LLVM_FIXED_MD_KIND(MD_annotation, "annotation", 30)

View File

@@ -0,0 +1,465 @@
//===- llvm/FixedPointBuilder.h - Builder for fixed-point ops ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the FixedPointBuilder class, which is used as a convenient
// way to lower fixed-point arithmetic operations to LLVM IR.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_FIXEDPOINTBUILDER_H
#define LLVM_IR_FIXEDPOINTBUILDER_H
#include "llvm/ADT/APFixedPoint.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
namespace llvm {
template <class IRBuilderTy> class FixedPointBuilder {
IRBuilderTy &B;
Value *Convert(Value *Src, const FixedPointSemantics &SrcSema,
const FixedPointSemantics &DstSema, bool DstIsInteger) {
unsigned SrcWidth = SrcSema.getWidth();
unsigned DstWidth = DstSema.getWidth();
unsigned SrcScale = SrcSema.getScale();
unsigned DstScale = DstSema.getScale();
bool SrcIsSigned = SrcSema.isSigned();
bool DstIsSigned = DstSema.isSigned();
Type *DstIntTy = B.getIntNTy(DstWidth);
Value *Result = Src;
unsigned ResultWidth = SrcWidth;
// Downscale.
if (DstScale < SrcScale) {
// When converting to integers, we round towards zero. For negative
// numbers, right shifting rounds towards negative infinity. In this case,
// we can just round up before shifting.
if (DstIsInteger && SrcIsSigned) {
Value *Zero = Constant::getNullValue(Result->getType());
Value *IsNegative = B.CreateICmpSLT(Result, Zero);
Value *LowBits = ConstantInt::get(
B.getContext(), APInt::getLowBitsSet(ResultWidth, SrcScale));
Value *Rounded = B.CreateAdd(Result, LowBits);
Result = B.CreateSelect(IsNegative, Rounded, Result);
}
Result = SrcIsSigned
? B.CreateAShr(Result, SrcScale - DstScale, "downscale")
: B.CreateLShr(Result, SrcScale - DstScale, "downscale");
}
if (!DstSema.isSaturated()) {
// Resize.
Result = B.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
// Upscale.
if (DstScale > SrcScale)
Result = B.CreateShl(Result, DstScale - SrcScale, "upscale");
} else {
// Adjust the number of fractional bits.
if (DstScale > SrcScale) {
// Compare to DstWidth to prevent resizing twice.
ResultWidth = std::max(SrcWidth + DstScale - SrcScale, DstWidth);
Type *UpscaledTy = B.getIntNTy(ResultWidth);
Result = B.CreateIntCast(Result, UpscaledTy, SrcIsSigned, "resize");
Result = B.CreateShl(Result, DstScale - SrcScale, "upscale");
}
// Handle saturation.
bool LessIntBits = DstSema.getIntegralBits() < SrcSema.getIntegralBits();
if (LessIntBits) {
Value *Max = ConstantInt::get(
B.getContext(),
APFixedPoint::getMax(DstSema).getValue().extOrTrunc(ResultWidth));
Value *TooHigh = SrcIsSigned ? B.CreateICmpSGT(Result, Max)
: B.CreateICmpUGT(Result, Max);
Result = B.CreateSelect(TooHigh, Max, Result, "satmax");
}
// Cannot overflow min to dest type if src is unsigned since all fixed
// point types can cover the unsigned min of 0.
if (SrcIsSigned && (LessIntBits || !DstIsSigned)) {
Value *Min = ConstantInt::get(
B.getContext(),
APFixedPoint::getMin(DstSema).getValue().extOrTrunc(ResultWidth));
Value *TooLow = B.CreateICmpSLT(Result, Min);
Result = B.CreateSelect(TooLow, Min, Result, "satmin");
}
// Resize the integer part to get the final destination size.
if (ResultWidth != DstWidth)
Result = B.CreateIntCast(Result, DstIntTy, SrcIsSigned, "resize");
}
return Result;
}
/// Get the common semantic for two semantics, with the added imposition that
/// saturated padded types retain the padding bit.
FixedPointSemantics
getCommonBinopSemantic(const FixedPointSemantics &LHSSema,
const FixedPointSemantics &RHSSema) {
auto C = LHSSema.getCommonSemantics(RHSSema);
bool BothPadded =
LHSSema.hasUnsignedPadding() && RHSSema.hasUnsignedPadding();
return FixedPointSemantics(
C.getWidth() + (unsigned)(BothPadded && C.isSaturated()), C.getScale(),
C.isSigned(), C.isSaturated(), BothPadded);
}
/// Given a floating point type and a fixed-point semantic, return a floating
/// point type which can accommodate the fixed-point semantic. This is either
/// \p Ty, or a floating point type with a larger exponent than Ty.
Type *getAccommodatingFloatType(Type *Ty, const FixedPointSemantics &Sema) {
const fltSemantics *FloatSema = &Ty->getFltSemantics();
while (!Sema.fitsInFloatSemantics(*FloatSema))
FloatSema = APFixedPoint::promoteFloatSemantics(FloatSema);
return Type::getFloatingPointTy(Ty->getContext(), *FloatSema);
}
public:
FixedPointBuilder(IRBuilderTy &Builder) : B(Builder) {}
/// Convert an integer value representing a fixed-point number from one
/// fixed-point semantic to another fixed-point semantic.
/// \p Src - The source value
/// \p SrcSema - The fixed-point semantic of the source value
/// \p DstSema - The resulting fixed-point semantic
Value *CreateFixedToFixed(Value *Src, const FixedPointSemantics &SrcSema,
const FixedPointSemantics &DstSema) {
return Convert(Src, SrcSema, DstSema, false);
}
/// Convert an integer value representing a fixed-point number to an integer
/// with the given bit width and signedness.
/// \p Src - The source value
/// \p SrcSema - The fixed-point semantic of the source value
/// \p DstWidth - The bit width of the result value
/// \p DstIsSigned - The signedness of the result value
Value *CreateFixedToInteger(Value *Src, const FixedPointSemantics &SrcSema,
unsigned DstWidth, bool DstIsSigned) {
return Convert(
Src, SrcSema,
FixedPointSemantics::GetIntegerSemantics(DstWidth, DstIsSigned), true);
}
/// Convert an integer value with the given signedness to an integer value
/// representing the given fixed-point semantic.
/// \p Src - The source value
/// \p SrcIsSigned - The signedness of the source value
/// \p DstSema - The resulting fixed-point semantic
Value *CreateIntegerToFixed(Value *Src, unsigned SrcIsSigned,
const FixedPointSemantics &DstSema) {
return Convert(Src,
FixedPointSemantics::GetIntegerSemantics(
Src->getType()->getScalarSizeInBits(), SrcIsSigned),
DstSema, false);
}
Value *CreateFixedToFloating(Value *Src, const FixedPointSemantics &SrcSema,
Type *DstTy) {
Value *Result;
Type *OpTy = getAccommodatingFloatType(DstTy, SrcSema);
// Convert the raw fixed-point value directly to floating point. If the
// value is too large to fit, it will be rounded, not truncated.
Result = SrcSema.isSigned() ? B.CreateSIToFP(Src, OpTy)
: B.CreateUIToFP(Src, OpTy);
// Rescale the integral-in-floating point by the scaling factor. This is
// lossless, except for overflow to infinity which is unlikely.
Result = B.CreateFMul(Result,
ConstantFP::get(OpTy, std::pow(2, -(int)SrcSema.getScale())));
if (OpTy != DstTy)
Result = B.CreateFPTrunc(Result, DstTy);
return Result;
}
Value *CreateFloatingToFixed(Value *Src, const FixedPointSemantics &DstSema) {
bool UseSigned = DstSema.isSigned() || DstSema.hasUnsignedPadding();
Value *Result = Src;
Type *OpTy = getAccommodatingFloatType(Src->getType(), DstSema);
if (OpTy != Src->getType())
Result = B.CreateFPExt(Result, OpTy);
// Rescale the floating point value so that its significant bits (for the
// purposes of the conversion) are in the integral range.
Result = B.CreateFMul(Result,
ConstantFP::get(OpTy, std::pow(2, DstSema.getScale())));
Type *ResultTy = B.getIntNTy(DstSema.getWidth());
if (DstSema.isSaturated()) {
Intrinsic::ID IID =
UseSigned ? Intrinsic::fptosi_sat : Intrinsic::fptoui_sat;
Result = B.CreateIntrinsic(IID, {ResultTy, OpTy}, {Result});
} else {
Result = UseSigned ? B.CreateFPToSI(Result, ResultTy)
: B.CreateFPToUI(Result, ResultTy);
}
// When saturating unsigned-with-padding using signed operations, we may
// get negative values. Emit an extra clamp to zero.
if (DstSema.isSaturated() && DstSema.hasUnsignedPadding()) {
Constant *Zero = Constant::getNullValue(Result->getType());
Result =
B.CreateSelect(B.CreateICmpSLT(Result, Zero), Zero, Result, "satmin");
}
return Result;
}
/// Add two fixed-point values and return the result in their common semantic.
/// \p LHS - The left hand side
/// \p LHSSema - The semantic of the left hand side
/// \p RHS - The right hand side
/// \p RHSSema - The semantic of the right hand side
Value *CreateAdd(Value *LHS, const FixedPointSemantics &LHSSema,
Value *RHS, const FixedPointSemantics &RHSSema) {
auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
bool UseSigned = CommonSema.isSigned() || CommonSema.hasUnsignedPadding();
Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
Value *Result;
if (CommonSema.isSaturated()) {
Intrinsic::ID IID = UseSigned ? Intrinsic::sadd_sat : Intrinsic::uadd_sat;
Result = B.CreateBinaryIntrinsic(IID, WideLHS, WideRHS);
} else {
Result = B.CreateAdd(WideLHS, WideRHS);
}
return CreateFixedToFixed(Result, CommonSema,
LHSSema.getCommonSemantics(RHSSema));
}
/// Subtract two fixed-point values and return the result in their common
/// semantic.
/// \p LHS - The left hand side
/// \p LHSSema - The semantic of the left hand side
/// \p RHS - The right hand side
/// \p RHSSema - The semantic of the right hand side
Value *CreateSub(Value *LHS, const FixedPointSemantics &LHSSema,
Value *RHS, const FixedPointSemantics &RHSSema) {
auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
bool UseSigned = CommonSema.isSigned() || CommonSema.hasUnsignedPadding();
Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
Value *Result;
if (CommonSema.isSaturated()) {
Intrinsic::ID IID = UseSigned ? Intrinsic::ssub_sat : Intrinsic::usub_sat;
Result = B.CreateBinaryIntrinsic(IID, WideLHS, WideRHS);
} else {
Result = B.CreateSub(WideLHS, WideRHS);
}
// Subtraction can end up below 0 for padded unsigned operations, so emit
// an extra clamp in that case.
if (CommonSema.isSaturated() && CommonSema.hasUnsignedPadding()) {
Constant *Zero = Constant::getNullValue(Result->getType());
Result =
B.CreateSelect(B.CreateICmpSLT(Result, Zero), Zero, Result, "satmin");
}
return CreateFixedToFixed(Result, CommonSema,
LHSSema.getCommonSemantics(RHSSema));
}
/// Multiply two fixed-point values and return the result in their common
/// semantic.
/// \p LHS - The left hand side
/// \p LHSSema - The semantic of the left hand side
/// \p RHS - The right hand side
/// \p RHSSema - The semantic of the right hand side
Value *CreateMul(Value *LHS, const FixedPointSemantics &LHSSema,
Value *RHS, const FixedPointSemantics &RHSSema) {
auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
bool UseSigned = CommonSema.isSigned() || CommonSema.hasUnsignedPadding();
Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
Intrinsic::ID IID;
if (CommonSema.isSaturated()) {
IID = UseSigned ? Intrinsic::smul_fix_sat : Intrinsic::umul_fix_sat;
} else {
IID = UseSigned ? Intrinsic::smul_fix : Intrinsic::umul_fix;
}
Value *Result = B.CreateIntrinsic(
IID, {WideLHS->getType()},
{WideLHS, WideRHS, B.getInt32(CommonSema.getScale())});
return CreateFixedToFixed(Result, CommonSema,
LHSSema.getCommonSemantics(RHSSema));
}
/// Divide two fixed-point values and return the result in their common
/// semantic.
/// \p LHS - The left hand side
/// \p LHSSema - The semantic of the left hand side
/// \p RHS - The right hand side
/// \p RHSSema - The semantic of the right hand side
Value *CreateDiv(Value *LHS, const FixedPointSemantics &LHSSema,
Value *RHS, const FixedPointSemantics &RHSSema) {
auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
bool UseSigned = CommonSema.isSigned() || CommonSema.hasUnsignedPadding();
Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
Intrinsic::ID IID;
if (CommonSema.isSaturated()) {
IID = UseSigned ? Intrinsic::sdiv_fix_sat : Intrinsic::udiv_fix_sat;
} else {
IID = UseSigned ? Intrinsic::sdiv_fix : Intrinsic::udiv_fix;
}
Value *Result = B.CreateIntrinsic(
IID, {WideLHS->getType()},
{WideLHS, WideRHS, B.getInt32(CommonSema.getScale())});
return CreateFixedToFixed(Result, CommonSema,
LHSSema.getCommonSemantics(RHSSema));
}
/// Left shift a fixed-point value by an unsigned integer value. The integer
/// value can be any bit width.
/// \p LHS - The left hand side
/// \p LHSSema - The semantic of the left hand side
/// \p RHS - The right hand side
Value *CreateShl(Value *LHS, const FixedPointSemantics &LHSSema, Value *RHS) {
bool UseSigned = LHSSema.isSigned() || LHSSema.hasUnsignedPadding();
RHS = B.CreateIntCast(RHS, LHS->getType(), /*IsSigned=*/false);
Value *Result;
if (LHSSema.isSaturated()) {
Intrinsic::ID IID = UseSigned ? Intrinsic::sshl_sat : Intrinsic::ushl_sat;
Result = B.CreateBinaryIntrinsic(IID, LHS, RHS);
} else {
Result = B.CreateShl(LHS, RHS);
}
return Result;
}
/// Right shift a fixed-point value by an unsigned integer value. The integer
/// value can be any bit width.
/// \p LHS - The left hand side
/// \p LHSSema - The semantic of the left hand side
/// \p RHS - The right hand side
Value *CreateShr(Value *LHS, const FixedPointSemantics &LHSSema, Value *RHS) {
RHS = B.CreateIntCast(RHS, LHS->getType(), false);
return LHSSema.isSigned() ? B.CreateAShr(LHS, RHS) : B.CreateLShr(LHS, RHS);
}
/// Compare two fixed-point values for equality.
/// \p LHS - The left hand side
/// \p LHSSema - The semantic of the left hand side
/// \p RHS - The right hand side
/// \p RHSSema - The semantic of the right hand side
Value *CreateEQ(Value *LHS, const FixedPointSemantics &LHSSema,
Value *RHS, const FixedPointSemantics &RHSSema) {
auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
return B.CreateICmpEQ(WideLHS, WideRHS);
}
/// Compare two fixed-point values for inequality.
/// \p LHS - The left hand side
/// \p LHSSema - The semantic of the left hand side
/// \p RHS - The right hand side
/// \p RHSSema - The semantic of the right hand side
Value *CreateNE(Value *LHS, const FixedPointSemantics &LHSSema,
Value *RHS, const FixedPointSemantics &RHSSema) {
auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
return B.CreateICmpNE(WideLHS, WideRHS);
}
/// Compare two fixed-point values as LHS < RHS.
/// \p LHS - The left hand side
/// \p LHSSema - The semantic of the left hand side
/// \p RHS - The right hand side
/// \p RHSSema - The semantic of the right hand side
Value *CreateLT(Value *LHS, const FixedPointSemantics &LHSSema,
Value *RHS, const FixedPointSemantics &RHSSema) {
auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
return CommonSema.isSigned() ? B.CreateICmpSLT(WideLHS, WideRHS)
: B.CreateICmpULT(WideLHS, WideRHS);
}
/// Compare two fixed-point values as LHS <= RHS.
/// \p LHS - The left hand side
/// \p LHSSema - The semantic of the left hand side
/// \p RHS - The right hand side
/// \p RHSSema - The semantic of the right hand side
Value *CreateLE(Value *LHS, const FixedPointSemantics &LHSSema,
Value *RHS, const FixedPointSemantics &RHSSema) {
auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
return CommonSema.isSigned() ? B.CreateICmpSLE(WideLHS, WideRHS)
: B.CreateICmpULE(WideLHS, WideRHS);
}
/// Compare two fixed-point values as LHS > RHS.
/// \p LHS - The left hand side
/// \p LHSSema - The semantic of the left hand side
/// \p RHS - The right hand side
/// \p RHSSema - The semantic of the right hand side
Value *CreateGT(Value *LHS, const FixedPointSemantics &LHSSema,
Value *RHS, const FixedPointSemantics &RHSSema) {
auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
return CommonSema.isSigned() ? B.CreateICmpSGT(WideLHS, WideRHS)
: B.CreateICmpUGT(WideLHS, WideRHS);
}
/// Compare two fixed-point values as LHS >= RHS.
/// \p LHS - The left hand side
/// \p LHSSema - The semantic of the left hand side
/// \p RHS - The right hand side
/// \p RHSSema - The semantic of the right hand side
Value *CreateGE(Value *LHS, const FixedPointSemantics &LHSSema,
Value *RHS, const FixedPointSemantics &RHSSema) {
auto CommonSema = getCommonBinopSemantic(LHSSema, RHSSema);
Value *WideLHS = CreateFixedToFixed(LHS, LHSSema, CommonSema);
Value *WideRHS = CreateFixedToFixed(RHS, RHSSema, CommonSema);
return CommonSema.isSigned() ? B.CreateICmpSGE(WideLHS, WideRHS)
: B.CreateICmpUGE(WideLHS, WideRHS);
}
};
} // end namespace llvm
#endif // LLVM_IR_FIXEDPOINTBUILDER_H

View File

@@ -0,0 +1,917 @@
//===- llvm/Function.h - Class to represent a single function ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the Function class, which represents a
// single function/procedure in LLVM.
//
// A function basically consists of a list of basic blocks, a list of arguments,
// and a symbol table.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_FUNCTION_H
#define LLVM_IR_FUNCTION_H
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Argument.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/CallingConv.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/GlobalObject.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/SymbolTableListTraits.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/Compiler.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <memory>
#include <string>
namespace llvm {
namespace Intrinsic {
typedef unsigned ID;
}
class AssemblyAnnotationWriter;
class Constant;
struct DenormalMode;
class DISubprogram;
class LLVMContext;
class Module;
template <typename T> class Optional;
class raw_ostream;
class Type;
class User;
class BranchProbabilityInfo;
class BlockFrequencyInfo;
class LLVM_EXTERNAL_VISIBILITY Function : public GlobalObject,
public ilist_node<Function> {
public:
using BasicBlockListType = SymbolTableList<BasicBlock>;
// BasicBlock iterators...
using iterator = BasicBlockListType::iterator;
using const_iterator = BasicBlockListType::const_iterator;
using arg_iterator = Argument *;
using const_arg_iterator = const Argument *;
private:
// Important things that make up a function!
BasicBlockListType BasicBlocks; ///< The basic blocks
mutable Argument *Arguments = nullptr; ///< The formal arguments
size_t NumArgs;
std::unique_ptr<ValueSymbolTable>
SymTab; ///< Symbol table of args/instructions
AttributeList AttributeSets; ///< Parameter attributes
/*
* Value::SubclassData
*
* bit 0 : HasLazyArguments
* bit 1 : HasPrefixData
* bit 2 : HasPrologueData
* bit 3 : HasPersonalityFn
* bits 4-13 : CallingConvention
* bits 14 : HasGC
* bits 15 : [reserved]
*/
/// Bits from GlobalObject::GlobalObjectSubclassData.
enum {
/// Whether this function is materializable.
IsMaterializableBit = 0,
};
friend class SymbolTableListTraits<Function>;
/// hasLazyArguments/CheckLazyArguments - The argument list of a function is
/// built on demand, so that the list isn't allocated until the first client
/// needs it. The hasLazyArguments predicate returns true if the arg list
/// hasn't been set up yet.
public:
bool hasLazyArguments() const {
return getSubclassDataFromValue() & (1<<0);
}
private:
void CheckLazyArguments() const {
if (hasLazyArguments())
BuildLazyArguments();
}
void BuildLazyArguments() const;
void clearArguments();
/// Function ctor - If the (optional) Module argument is specified, the
/// function is automatically inserted into the end of the function list for
/// the module.
///
Function(FunctionType *Ty, LinkageTypes Linkage, unsigned AddrSpace,
const Twine &N = "", Module *M = nullptr);
public:
Function(const Function&) = delete;
void operator=(const Function&) = delete;
~Function();
// This is here to help easily convert from FunctionT * (Function * or
// MachineFunction *) in BlockFrequencyInfoImpl to Function * by calling
// FunctionT->getFunction().
const Function &getFunction() const { return *this; }
static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
unsigned AddrSpace, const Twine &N = "",
Module *M = nullptr) {
return new Function(Ty, Linkage, AddrSpace, N, M);
}
// TODO: remove this once all users have been updated to pass an AddrSpace
static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
const Twine &N = "", Module *M = nullptr) {
return new Function(Ty, Linkage, static_cast<unsigned>(-1), N, M);
}
/// Creates a new function and attaches it to a module.
///
/// Places the function in the program address space as specified
/// by the module's data layout.
static Function *Create(FunctionType *Ty, LinkageTypes Linkage,
const Twine &N, Module &M);
/// Creates a function with some attributes recorded in llvm.module.flags
/// applied.
///
/// Use this when synthesizing new functions that need attributes that would
/// have been set by command line options.
static Function *createWithDefaultAttr(FunctionType *Ty, LinkageTypes Linkage,
unsigned AddrSpace,
const Twine &N = "",
Module *M = nullptr);
// Provide fast operand accessors.
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
/// Returns the number of non-debug IR instructions in this function.
/// This is equivalent to the sum of the sizes of each basic block contained
/// within this function.
unsigned getInstructionCount() const;
/// Returns the FunctionType for me.
FunctionType *getFunctionType() const {
return cast<FunctionType>(getValueType());
}
/// Returns the type of the ret val.
Type *getReturnType() const { return getFunctionType()->getReturnType(); }
/// getContext - Return a reference to the LLVMContext associated with this
/// function.
LLVMContext &getContext() const;
/// isVarArg - Return true if this function takes a variable number of
/// arguments.
bool isVarArg() const { return getFunctionType()->isVarArg(); }
bool isMaterializable() const {
return getGlobalObjectSubClassData() & (1 << IsMaterializableBit);
}
void setIsMaterializable(bool V) {
unsigned Mask = 1 << IsMaterializableBit;
setGlobalObjectSubClassData((~Mask & getGlobalObjectSubClassData()) |
(V ? Mask : 0u));
}
/// getIntrinsicID - This method returns the ID number of the specified
/// function, or Intrinsic::not_intrinsic if the function is not an
/// intrinsic, or if the pointer is null. This value is always defined to be
/// zero to allow easy checking for whether a function is intrinsic or not.
/// The particular intrinsic functions which correspond to this value are
/// defined in llvm/Intrinsics.h.
Intrinsic::ID getIntrinsicID() const LLVM_READONLY { return IntID; }
/// isIntrinsic - Returns true if the function's name starts with "llvm.".
/// It's possible for this function to return true while getIntrinsicID()
/// returns Intrinsic::not_intrinsic!
bool isIntrinsic() const { return HasLLVMReservedName; }
/// isTargetIntrinsic - Returns true if IID is an intrinsic specific to a
/// certain target. If it is a generic intrinsic false is returned.
static bool isTargetIntrinsic(Intrinsic::ID IID);
/// isTargetIntrinsic - Returns true if this function is an intrinsic and the
/// intrinsic is specific to a certain target. If this is not an intrinsic
/// or a generic intrinsic, false is returned.
bool isTargetIntrinsic() const;
/// Returns true if the function is one of the "Constrained Floating-Point
/// Intrinsics". Returns false if not, and returns false when
/// getIntrinsicID() returns Intrinsic::not_intrinsic.
bool isConstrainedFPIntrinsic() const;
static Intrinsic::ID lookupIntrinsicID(StringRef Name);
/// Recalculate the ID for this function if it is an Intrinsic defined
/// in llvm/Intrinsics.h. Sets the intrinsic ID to Intrinsic::not_intrinsic
/// if the name of this function does not match an intrinsic in that header.
/// Note, this method does not need to be called directly, as it is called
/// from Value::setName() whenever the name of this function changes.
void recalculateIntrinsicID();
/// getCallingConv()/setCallingConv(CC) - These method get and set the
/// calling convention of this function. The enum values for the known
/// calling conventions are defined in CallingConv.h.
CallingConv::ID getCallingConv() const {
return static_cast<CallingConv::ID>((getSubclassDataFromValue() >> 4) &
CallingConv::MaxID);
}
void setCallingConv(CallingConv::ID CC) {
auto ID = static_cast<unsigned>(CC);
assert(!(ID & ~CallingConv::MaxID) && "Unsupported calling convention");
setValueSubclassData((getSubclassDataFromValue() & 0xc00f) | (ID << 4));
}
enum ProfileCountType { PCT_Real, PCT_Synthetic };
/// Class to represent profile counts.
///
/// This class represents both real and synthetic profile counts.
class ProfileCount {
private:
uint64_t Count = 0;
ProfileCountType PCT = PCT_Real;
public:
ProfileCount(uint64_t Count, ProfileCountType PCT)
: Count(Count), PCT(PCT) {}
uint64_t getCount() const { return Count; }
ProfileCountType getType() const { return PCT; }
bool isSynthetic() const { return PCT == PCT_Synthetic; }
};
/// Set the entry count for this function.
///
/// Entry count is the number of times this function was executed based on
/// pgo data. \p Imports points to a set of GUIDs that needs to
/// be imported by the function for sample PGO, to enable the same inlines as
/// the profiled optimized binary.
void setEntryCount(ProfileCount Count,
const DenseSet<GlobalValue::GUID> *Imports = nullptr);
/// A convenience wrapper for setting entry count
void setEntryCount(uint64_t Count, ProfileCountType Type = PCT_Real,
const DenseSet<GlobalValue::GUID> *Imports = nullptr);
/// Get the entry count for this function.
///
/// Entry count is the number of times the function was executed.
/// When AllowSynthetic is false, only pgo_data will be returned.
Optional<ProfileCount> getEntryCount(bool AllowSynthetic = false) const;
/// Return true if the function is annotated with profile data.
///
/// Presence of entry counts from a profile run implies the function has
/// profile annotations. If IncludeSynthetic is false, only return true
/// when the profile data is real.
bool hasProfileData(bool IncludeSynthetic = false) const {
return getEntryCount(IncludeSynthetic).hasValue();
}
/// Returns the set of GUIDs that needs to be imported to the function for
/// sample PGO, to enable the same inlines as the profiled optimized binary.
DenseSet<GlobalValue::GUID> getImportGUIDs() const;
/// Set the section prefix for this function.
void setSectionPrefix(StringRef Prefix);
/// Get the section prefix for this function.
Optional<StringRef> getSectionPrefix() const;
/// hasGC/getGC/setGC/clearGC - The name of the garbage collection algorithm
/// to use during code generation.
bool hasGC() const {
return getSubclassDataFromValue() & (1<<14);
}
const std::string &getGC() const;
void setGC(std::string Str);
void clearGC();
/// Return the attribute list for this Function.
AttributeList getAttributes() const { return AttributeSets; }
/// Set the attribute list for this Function.
void setAttributes(AttributeList Attrs) { AttributeSets = Attrs; }
// TODO: remove non-AtIndex versions of these methods.
/// adds the attribute to the list of attributes.
void addAttributeAtIndex(unsigned i, Attribute Attr);
/// Add function attributes to this function.
void addFnAttr(Attribute::AttrKind Kind);
/// Add function attributes to this function.
void addFnAttr(StringRef Kind, StringRef Val = StringRef());
/// Add function attributes to this function.
void addFnAttr(Attribute Attr);
/// Add function attributes to this function.
void addFnAttrs(const AttrBuilder &Attrs);
/// Add return value attributes to this function.
void addRetAttr(Attribute::AttrKind Kind);
/// Add return value attributes to this function.
void addRetAttr(Attribute Attr);
/// Add return value attributes to this function.
void addRetAttrs(const AttrBuilder &Attrs);
/// adds the attribute to the list of attributes for the given arg.
void addParamAttr(unsigned ArgNo, Attribute::AttrKind Kind);
/// adds the attribute to the list of attributes for the given arg.
void addParamAttr(unsigned ArgNo, Attribute Attr);
/// adds the attributes to the list of attributes for the given arg.
void addParamAttrs(unsigned ArgNo, const AttrBuilder &Attrs);
/// removes the attribute from the list of attributes.
void removeAttributeAtIndex(unsigned i, Attribute::AttrKind Kind);
/// removes the attribute from the list of attributes.
void removeAttributeAtIndex(unsigned i, StringRef Kind);
/// Remove function attributes from this function.
void removeFnAttr(Attribute::AttrKind Kind);
/// Remove function attribute from this function.
void removeFnAttr(StringRef Kind);
void removeFnAttrs(const AttributeMask &Attrs);
/// removes the attribute from the return value list of attributes.
void removeRetAttr(Attribute::AttrKind Kind);
/// removes the attribute from the return value list of attributes.
void removeRetAttr(StringRef Kind);
/// removes the attributes from the return value list of attributes.
void removeRetAttrs(const AttributeMask &Attrs);
/// removes the attribute from the list of attributes.
void removeParamAttr(unsigned ArgNo, Attribute::AttrKind Kind);
/// removes the attribute from the list of attributes.
void removeParamAttr(unsigned ArgNo, StringRef Kind);
/// removes the attribute from the list of attributes.
void removeParamAttrs(unsigned ArgNo, const AttributeMask &Attrs);
/// Return true if the function has the attribute.
bool hasFnAttribute(Attribute::AttrKind Kind) const;
/// Return true if the function has the attribute.
bool hasFnAttribute(StringRef Kind) const;
/// check if an attribute is in the list of attributes for the return value.
bool hasRetAttribute(Attribute::AttrKind Kind) const;
/// check if an attributes is in the list of attributes.
bool hasParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const;
/// gets the attribute from the list of attributes.
Attribute getAttributeAtIndex(unsigned i, Attribute::AttrKind Kind) const;
/// gets the attribute from the list of attributes.
Attribute getAttributeAtIndex(unsigned i, StringRef Kind) const;
/// Return the attribute for the given attribute kind.
Attribute getFnAttribute(Attribute::AttrKind Kind) const;
/// Return the attribute for the given attribute kind.
Attribute getFnAttribute(StringRef Kind) const;
/// gets the specified attribute from the list of attributes.
Attribute getParamAttribute(unsigned ArgNo, Attribute::AttrKind Kind) const;
/// removes noundef and other attributes that imply undefined behavior if a
/// `undef` or `poison` value is passed from the list of attributes.
void removeParamUndefImplyingAttrs(unsigned ArgNo);
/// Return the stack alignment for the function.
MaybeAlign getFnStackAlign() const {
return AttributeSets.getFnStackAlignment();
}
/// Returns true if the function has ssp, sspstrong, or sspreq fn attrs.
bool hasStackProtectorFnAttr() const;
/// adds the dereferenceable attribute to the list of attributes for
/// the given arg.
void addDereferenceableParamAttr(unsigned ArgNo, uint64_t Bytes);
/// adds the dereferenceable_or_null attribute to the list of
/// attributes for the given arg.
void addDereferenceableOrNullParamAttr(unsigned ArgNo, uint64_t Bytes);
/// Extract the alignment for a call or parameter (0=unknown).
/// FIXME: Remove this function once transition to Align is over.
/// Use getParamAlign() instead.
uint64_t getParamAlignment(unsigned ArgNo) const {
if (const auto MA = getParamAlign(ArgNo))
return MA->value();
return 0;
}
MaybeAlign getParamAlign(unsigned ArgNo) const {
return AttributeSets.getParamAlignment(ArgNo);
}
MaybeAlign getParamStackAlign(unsigned ArgNo) const {
return AttributeSets.getParamStackAlignment(ArgNo);
}
/// Extract the byval type for a parameter.
Type *getParamByValType(unsigned ArgNo) const {
return AttributeSets.getParamByValType(ArgNo);
}
/// Extract the sret type for a parameter.
Type *getParamStructRetType(unsigned ArgNo) const {
return AttributeSets.getParamStructRetType(ArgNo);
}
/// Extract the inalloca type for a parameter.
Type *getParamInAllocaType(unsigned ArgNo) const {
return AttributeSets.getParamInAllocaType(ArgNo);
}
/// Extract the byref type for a parameter.
Type *getParamByRefType(unsigned ArgNo) const {
return AttributeSets.getParamByRefType(ArgNo);
}
/// Extract the preallocated type for a parameter.
Type *getParamPreallocatedType(unsigned ArgNo) const {
return AttributeSets.getParamPreallocatedType(ArgNo);
}
/// Extract the number of dereferenceable bytes for a parameter.
/// @param ArgNo Index of an argument, with 0 being the first function arg.
uint64_t getParamDereferenceableBytes(unsigned ArgNo) const {
return AttributeSets.getParamDereferenceableBytes(ArgNo);
}
/// Extract the number of dereferenceable_or_null bytes for a
/// parameter.
/// @param ArgNo AttributeList ArgNo, referring to an argument.
uint64_t getParamDereferenceableOrNullBytes(unsigned ArgNo) const {
return AttributeSets.getParamDereferenceableOrNullBytes(ArgNo);
}
/// A function will have the "coroutine.presplit" attribute if it's
/// a coroutine and has not gone through full CoroSplit pass.
bool isPresplitCoroutine() const {
return hasFnAttribute("coroutine.presplit");
}
/// Determine if the function does not access memory.
bool doesNotAccessMemory() const {
return hasFnAttribute(Attribute::ReadNone);
}
void setDoesNotAccessMemory() {
addFnAttr(Attribute::ReadNone);
}
/// Determine if the function does not access or only reads memory.
bool onlyReadsMemory() const {
return doesNotAccessMemory() || hasFnAttribute(Attribute::ReadOnly);
}
void setOnlyReadsMemory() {
addFnAttr(Attribute::ReadOnly);
}
/// Determine if the function does not access or only writes memory.
bool onlyWritesMemory() const {
return doesNotAccessMemory() || hasFnAttribute(Attribute::WriteOnly);
}
void setOnlyWritesMemory() {
addFnAttr(Attribute::WriteOnly);
}
/// Determine if the call can access memory only using pointers based
/// on its arguments.
bool onlyAccessesArgMemory() const {
return hasFnAttribute(Attribute::ArgMemOnly);
}
void setOnlyAccessesArgMemory() { addFnAttr(Attribute::ArgMemOnly); }
/// Determine if the function may only access memory that is
/// inaccessible from the IR.
bool onlyAccessesInaccessibleMemory() const {
return hasFnAttribute(Attribute::InaccessibleMemOnly);
}
void setOnlyAccessesInaccessibleMemory() {
addFnAttr(Attribute::InaccessibleMemOnly);
}
/// Determine if the function may only access memory that is
/// either inaccessible from the IR or pointed to by its arguments.
bool onlyAccessesInaccessibleMemOrArgMem() const {
return hasFnAttribute(Attribute::InaccessibleMemOrArgMemOnly);
}
void setOnlyAccessesInaccessibleMemOrArgMem() {
addFnAttr(Attribute::InaccessibleMemOrArgMemOnly);
}
/// Determine if the function cannot return.
bool doesNotReturn() const {
return hasFnAttribute(Attribute::NoReturn);
}
void setDoesNotReturn() {
addFnAttr(Attribute::NoReturn);
}
/// Determine if the function should not perform indirect branch tracking.
bool doesNoCfCheck() const { return hasFnAttribute(Attribute::NoCfCheck); }
/// Determine if the function cannot unwind.
bool doesNotThrow() const {
return hasFnAttribute(Attribute::NoUnwind);
}
void setDoesNotThrow() {
addFnAttr(Attribute::NoUnwind);
}
/// Determine if the call cannot be duplicated.
bool cannotDuplicate() const {
return hasFnAttribute(Attribute::NoDuplicate);
}
void setCannotDuplicate() {
addFnAttr(Attribute::NoDuplicate);
}
/// Determine if the call is convergent.
bool isConvergent() const {
return hasFnAttribute(Attribute::Convergent);
}
void setConvergent() {
addFnAttr(Attribute::Convergent);
}
void setNotConvergent() {
removeFnAttr(Attribute::Convergent);
}
/// Determine if the call has sideeffects.
bool isSpeculatable() const {
return hasFnAttribute(Attribute::Speculatable);
}
void setSpeculatable() {
addFnAttr(Attribute::Speculatable);
}
/// Determine if the call might deallocate memory.
bool doesNotFreeMemory() const {
return onlyReadsMemory() || hasFnAttribute(Attribute::NoFree);
}
void setDoesNotFreeMemory() {
addFnAttr(Attribute::NoFree);
}
/// Determine if the call can synchroize with other threads
bool hasNoSync() const {
return hasFnAttribute(Attribute::NoSync);
}
void setNoSync() {
addFnAttr(Attribute::NoSync);
}
/// Determine if the function is known not to recurse, directly or
/// indirectly.
bool doesNotRecurse() const {
return hasFnAttribute(Attribute::NoRecurse);
}
void setDoesNotRecurse() {
addFnAttr(Attribute::NoRecurse);
}
/// Determine if the function is required to make forward progress.
bool mustProgress() const {
return hasFnAttribute(Attribute::MustProgress) ||
hasFnAttribute(Attribute::WillReturn);
}
void setMustProgress() { addFnAttr(Attribute::MustProgress); }
/// Determine if the function will return.
bool willReturn() const { return hasFnAttribute(Attribute::WillReturn); }
void setWillReturn() { addFnAttr(Attribute::WillReturn); }
/// True if the ABI mandates (or the user requested) that this
/// function be in a unwind table.
bool hasUWTable() const {
return hasFnAttribute(Attribute::UWTable);
}
void setHasUWTable() {
addFnAttr(Attribute::UWTable);
}
/// True if this function needs an unwind table.
bool needsUnwindTableEntry() const {
return hasUWTable() || !doesNotThrow() || hasPersonalityFn();
}
/// Determine if the function returns a structure through first
/// or second pointer argument.
bool hasStructRetAttr() const {
return AttributeSets.hasParamAttr(0, Attribute::StructRet) ||
AttributeSets.hasParamAttr(1, Attribute::StructRet);
}
/// Determine if the parameter or return value is marked with NoAlias
/// attribute.
bool returnDoesNotAlias() const {
return AttributeSets.hasRetAttr(Attribute::NoAlias);
}
void setReturnDoesNotAlias() { addRetAttr(Attribute::NoAlias); }
/// Do not optimize this function (-O0).
bool hasOptNone() const { return hasFnAttribute(Attribute::OptimizeNone); }
/// Optimize this function for minimum size (-Oz).
bool hasMinSize() const { return hasFnAttribute(Attribute::MinSize); }
/// Optimize this function for size (-Os) or minimum size (-Oz).
bool hasOptSize() const {
return hasFnAttribute(Attribute::OptimizeForSize) || hasMinSize();
}
/// Returns the denormal handling type for the default rounding mode of the
/// function.
DenormalMode getDenormalMode(const fltSemantics &FPType) const;
/// copyAttributesFrom - copy all additional attributes (those not needed to
/// create a Function) from the Function Src to this one.
void copyAttributesFrom(const Function *Src);
/// deleteBody - This method deletes the body of the function, and converts
/// the linkage to external.
///
void deleteBody() {
dropAllReferences();
setLinkage(ExternalLinkage);
}
/// removeFromParent - This method unlinks 'this' from the containing module,
/// but does not delete it.
///
void removeFromParent();
/// eraseFromParent - This method unlinks 'this' from the containing module
/// and deletes it.
///
void eraseFromParent();
/// Steal arguments from another function.
///
/// Drop this function's arguments and splice in the ones from \c Src.
/// Requires that this has no function body.
void stealArgumentListFrom(Function &Src);
/// Get the underlying elements of the Function... the basic block list is
/// empty for external functions.
///
const BasicBlockListType &getBasicBlockList() const { return BasicBlocks; }
BasicBlockListType &getBasicBlockList() { return BasicBlocks; }
static BasicBlockListType Function::*getSublistAccess(BasicBlock*) {
return &Function::BasicBlocks;
}
const BasicBlock &getEntryBlock() const { return front(); }
BasicBlock &getEntryBlock() { return front(); }
//===--------------------------------------------------------------------===//
// Symbol Table Accessing functions...
/// getSymbolTable() - Return the symbol table if any, otherwise nullptr.
///
inline ValueSymbolTable *getValueSymbolTable() { return SymTab.get(); }
inline const ValueSymbolTable *getValueSymbolTable() const {
return SymTab.get();
}
//===--------------------------------------------------------------------===//
// BasicBlock iterator forwarding functions
//
iterator begin() { return BasicBlocks.begin(); }
const_iterator begin() const { return BasicBlocks.begin(); }
iterator end () { return BasicBlocks.end(); }
const_iterator end () const { return BasicBlocks.end(); }
size_t size() const { return BasicBlocks.size(); }
bool empty() const { return BasicBlocks.empty(); }
const BasicBlock &front() const { return BasicBlocks.front(); }
BasicBlock &front() { return BasicBlocks.front(); }
const BasicBlock &back() const { return BasicBlocks.back(); }
BasicBlock &back() { return BasicBlocks.back(); }
/// @name Function Argument Iteration
/// @{
arg_iterator arg_begin() {
CheckLazyArguments();
return Arguments;
}
const_arg_iterator arg_begin() const {
CheckLazyArguments();
return Arguments;
}
arg_iterator arg_end() {
CheckLazyArguments();
return Arguments + NumArgs;
}
const_arg_iterator arg_end() const {
CheckLazyArguments();
return Arguments + NumArgs;
}
Argument* getArg(unsigned i) const {
assert (i < NumArgs && "getArg() out of range!");
CheckLazyArguments();
return Arguments + i;
}
iterator_range<arg_iterator> args() {
return make_range(arg_begin(), arg_end());
}
iterator_range<const_arg_iterator> args() const {
return make_range(arg_begin(), arg_end());
}
/// @}
size_t arg_size() const { return NumArgs; }
bool arg_empty() const { return arg_size() == 0; }
/// Check whether this function has a personality function.
bool hasPersonalityFn() const {
return getSubclassDataFromValue() & (1<<3);
}
/// Get the personality function associated with this function.
Constant *getPersonalityFn() const;
void setPersonalityFn(Constant *Fn);
/// Check whether this function has prefix data.
bool hasPrefixData() const {
return getSubclassDataFromValue() & (1<<1);
}
/// Get the prefix data associated with this function.
Constant *getPrefixData() const;
void setPrefixData(Constant *PrefixData);
/// Check whether this function has prologue data.
bool hasPrologueData() const {
return getSubclassDataFromValue() & (1<<2);
}
/// Get the prologue data associated with this function.
Constant *getPrologueData() const;
void setPrologueData(Constant *PrologueData);
/// Print the function to an output stream with an optional
/// AssemblyAnnotationWriter.
void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW = nullptr,
bool ShouldPreserveUseListOrder = false,
bool IsForDebug = false) const;
/// viewCFG - This function is meant for use from the debugger. You can just
/// say 'call F->viewCFG()' and a ghostview window should pop up from the
/// program, displaying the CFG of the current function with the code for each
/// basic block inside. This depends on there being a 'dot' and 'gv' program
/// in your path.
///
void viewCFG() const;
/// Extended form to print edge weights.
void viewCFG(bool ViewCFGOnly, const BlockFrequencyInfo *BFI,
const BranchProbabilityInfo *BPI) const;
/// viewCFGOnly - This function is meant for use from the debugger. It works
/// just like viewCFG, but it does not include the contents of basic blocks
/// into the nodes, just the label. If you are only interested in the CFG
/// this can make the graph smaller.
///
void viewCFGOnly() const;
/// Extended form to print edge weights.
void viewCFGOnly(const BlockFrequencyInfo *BFI,
const BranchProbabilityInfo *BPI) const;
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
return V->getValueID() == Value::FunctionVal;
}
/// dropAllReferences() - This method causes all the subinstructions to "let
/// go" of all references that they are maintaining. This allows one to
/// 'delete' a whole module at a time, even though there may be circular
/// references... first all references are dropped, and all use counts go to
/// zero. Then everything is deleted for real. Note that no operations are
/// valid on an object that has "dropped all references", except operator
/// delete.
///
/// Since no other object in the module can have references into the body of a
/// function, dropping all references deletes the entire body of the function,
/// including any contained basic blocks.
///
void dropAllReferences();
/// hasAddressTaken - returns true if there are any uses of this function
/// other than direct calls or invokes to it, or blockaddress expressions.
/// Optionally passes back an offending user for diagnostic purposes,
/// ignores callback uses, assume like pointer annotation calls, references in
/// llvm.used and llvm.compiler.used variables, and operand bundle
/// "clang.arc.attachedcall".
bool hasAddressTaken(const User ** = nullptr,
bool IgnoreCallbackUses = false,
bool IgnoreAssumeLikeCalls = true,
bool IngoreLLVMUsed = false,
bool IgnoreARCAttachedCall = false) const;
/// isDefTriviallyDead - Return true if it is trivially safe to remove
/// this function definition from the module (because it isn't externally
/// visible, does not have its address taken, and has no callers). To make
/// this more accurate, call removeDeadConstantUsers first.
bool isDefTriviallyDead() const;
/// callsFunctionThatReturnsTwice - Return true if the function has a call to
/// setjmp or other function that gcc recognizes as "returning twice".
bool callsFunctionThatReturnsTwice() const;
/// Set the attached subprogram.
///
/// Calls \a setMetadata() with \a LLVMContext::MD_dbg.
void setSubprogram(DISubprogram *SP);
/// Get the attached subprogram.
///
/// Calls \a getMetadata() with \a LLVMContext::MD_dbg and casts the result
/// to \a DISubprogram.
DISubprogram *getSubprogram() const;
/// Returns true if we should emit debug info for profiling.
bool isDebugInfoForProfiling() const;
/// Check if null pointer dereferencing is considered undefined behavior for
/// the function.
/// Return value: false => null pointer dereference is undefined.
/// Return value: true => null pointer dereference is not undefined.
bool nullPointerIsDefined() const;
private:
void allocHungoffUselist();
template<int Idx> void setHungoffOperand(Constant *C);
/// Shadow Value::setValueSubclassData with a private forwarding method so
/// that subclasses cannot accidentally use it.
void setValueSubclassData(unsigned short D) {
Value::setValueSubclassData(D);
}
void setValueSubclassDataBit(unsigned Bit, bool On);
};
/// Check whether null pointer dereferencing is considered undefined behavior
/// for a given function or an address space.
/// Null pointer access in non-zero address space is not considered undefined.
/// Return value: false => null pointer dereference is undefined.
/// Return value: true => null pointer dereference is not undefined.
bool NullPointerIsDefined(const Function *F, unsigned AS = 0);
template <>
struct OperandTraits<Function> : public HungoffOperandTraits<3> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(Function, Value)
} // end namespace llvm
#endif // LLVM_IR_FUNCTION_H

View File

@@ -0,0 +1,139 @@
//===- llvm/CodeGen/GCStrategy.h - Garbage collection -----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// GCStrategy coordinates code generation algorithms and implements some itself
// in order to generate code compatible with a target code generator as
// specified in a function's 'gc' attribute. Algorithms are enabled by setting
// flags in a subclass's constructor, and some virtual methods can be
// overridden.
//
// GCStrategy is relevant for implementations using either gc.root or
// gc.statepoint based lowering strategies, but is currently focused mostly on
// options for gc.root. This will change over time.
//
// When requested by a subclass of GCStrategy, the gc.root implementation will
// populate GCModuleInfo and GCFunctionInfo with that about each Function in
// the Module that opts in to garbage collection. Specifically:
//
// - Safe points
// Garbage collection is generally only possible at certain points in code.
// GCStrategy can request that the collector insert such points:
//
// - At and after any call to a subroutine
// - Before returning from the current function
// - Before backwards branches (loops)
//
// - Roots
// When a reference to a GC-allocated object exists on the stack, it must be
// stored in an alloca registered with llvm.gcoot.
//
// This information can used to emit the metadata tables which are required by
// the target garbage collector runtime.
//
// When used with gc.statepoint, information about safepoint and roots can be
// found in the binary StackMap section after code generation. Safepoint
// placement is currently the responsibility of the frontend, though late
// insertion support is planned. gc.statepoint does not currently support
// custom stack map formats; such can be generated by parsing the standard
// stack map section if desired.
//
// The read and write barrier support can be used with either implementation.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_GCSTRATEGY_H
#define LLVM_IR_GCSTRATEGY_H
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Support/Registry.h"
#include <string>
namespace llvm {
class Type;
/// GCStrategy describes a garbage collector algorithm's code generation
/// requirements, and provides overridable hooks for those needs which cannot
/// be abstractly described. GCStrategy objects must be looked up through
/// the Function. The objects themselves are owned by the Context and must
/// be immutable.
class GCStrategy {
private:
friend class GCModuleInfo;
std::string Name;
protected:
bool UseStatepoints = false; /// Uses gc.statepoints as opposed to gc.roots,
/// if set, none of the other options can be
/// anything but their default values.
bool NeededSafePoints = false; ///< if set, calls are inferred to be safepoints
bool UsesMetadata = false; ///< If set, backend must emit metadata tables.
public:
GCStrategy();
virtual ~GCStrategy() = default;
/// Return the name of the GC strategy. This is the value of the collector
/// name string specified on functions which use this strategy.
const std::string &getName() const { return Name; }
/// Returns true if this strategy is expecting the use of gc.statepoints,
/// and false otherwise.
bool useStatepoints() const { return UseStatepoints; }
/** @name Statepoint Specific Properties */
///@{
/// If the type specified can be reliably distinguished, returns true for
/// pointers to GC managed locations and false for pointers to non-GC
/// managed locations. Note a GCStrategy can always return 'None' (i.e. an
/// empty optional indicating it can't reliably distinguish.
virtual Optional<bool> isGCManagedPointer(const Type *Ty) const {
return None;
}
///@}
/** @name GCRoot Specific Properties
* These properties and overrides only apply to collector strategies using
* GCRoot.
*/
///@{
/// True if safe points need to be inferred on call sites
bool needsSafePoints() const { return NeededSafePoints; }
/// If set, appropriate metadata tables must be emitted by the back-end
/// (assembler, JIT, or otherwise). For statepoint, this method is
/// currently unsupported. The stackmap information can be found in the
/// StackMap section as described in the documentation.
bool usesMetadata() const { return UsesMetadata; }
///@}
};
/// Subclasses of GCStrategy are made available for use during compilation by
/// adding them to the global GCRegistry. This can done either within the
/// LLVM source tree or via a loadable plugin. An example registration
/// would be:
/// static GCRegistry::Add<CustomGC> X("custom-name",
/// "my custom supper fancy gc strategy");
///
/// Note that to use a custom GCMetadataPrinter w/gc.roots, you must also
/// register your GCMetadataPrinter subclass with the
/// GCMetadataPrinterRegistery as well.
using GCRegistry = Registry<GCStrategy>;
/// Lookup the GCStrategy object associated with the given gc name.
std::unique_ptr<GCStrategy> getGCStrategy(const StringRef Name);
} // end namespace llvm
#endif // LLVM_IR_GCSTRATEGY_H

View File

@@ -0,0 +1,51 @@
//===- GVMaterializer.h - Interface for GV materializers --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file provides an abstract interface for loading a module from some
// place. This interface allows incremental or random access loading of
// functions from the file. This is useful for applications like JIT compilers
// or interprocedural optimizers that do not need the entire program in memory
// at the same time.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_GVMATERIALIZER_H
#define LLVM_IR_GVMATERIALIZER_H
#include <vector>
namespace llvm {
class Error;
class GlobalValue;
class StructType;
class GVMaterializer {
protected:
GVMaterializer() = default;
public:
virtual ~GVMaterializer();
/// Make sure the given GlobalValue is fully read.
///
virtual Error materialize(GlobalValue *GV) = 0;
/// Make sure the entire Module has been completely read.
///
virtual Error materializeModule() = 0;
virtual Error materializeMetadata() = 0;
virtual void setStripDebugInfo() = 0;
virtual std::vector<StructType *> getIdentifiedStructTypes() const = 0;
};
} // end namespace llvm
#endif // LLVM_IR_GVMATERIALIZER_H

View File

@@ -0,0 +1,159 @@
//===- GetElementPtrTypeIterator.h ------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements an iterator for walking through the types indexed by
// getelementptr instructions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
#define LLVM_IR_GETELEMENTPTRTYPEITERATOR_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/PointerUnion.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Operator.h"
#include "llvm/IR/User.h"
#include "llvm/Support/Casting.h"
#include <cassert>
#include <cstddef>
#include <cstdint>
#include <iterator>
namespace llvm {
template <typename ItTy = User::const_op_iterator>
class generic_gep_type_iterator {
ItTy OpIt;
PointerUnion<StructType *, Type *> CurTy;
generic_gep_type_iterator() = default;
public:
using iterator_category = std::forward_iterator_tag;
using value_type = Type *;
using difference_type = std::ptrdiff_t;
using pointer = value_type *;
using reference = value_type &;
static generic_gep_type_iterator begin(Type *Ty, ItTy It) {
generic_gep_type_iterator I;
I.CurTy = Ty;
I.OpIt = It;
return I;
}
static generic_gep_type_iterator end(ItTy It) {
generic_gep_type_iterator I;
I.OpIt = It;
return I;
}
bool operator==(const generic_gep_type_iterator &x) const {
return OpIt == x.OpIt;
}
bool operator!=(const generic_gep_type_iterator &x) const {
return !operator==(x);
}
// FIXME: Make this the iterator's operator*() after the 4.0 release.
// operator*() had a different meaning in earlier releases, so we're
// temporarily not giving this iterator an operator*() to avoid a subtle
// semantics break.
Type *getIndexedType() const {
if (auto *T = CurTy.dyn_cast<Type *>())
return T;
return CurTy.get<StructType *>()->getTypeAtIndex(getOperand());
}
Value *getOperand() const { return const_cast<Value *>(&**OpIt); }
generic_gep_type_iterator &operator++() { // Preincrement
Type *Ty = getIndexedType();
if (auto *ATy = dyn_cast<ArrayType>(Ty))
CurTy = ATy->getElementType();
else if (auto *VTy = dyn_cast<VectorType>(Ty))
CurTy = VTy->getElementType();
else
CurTy = dyn_cast<StructType>(Ty);
++OpIt;
return *this;
}
generic_gep_type_iterator operator++(int) { // Postincrement
generic_gep_type_iterator tmp = *this;
++*this;
return tmp;
}
// All of the below API is for querying properties of the "outer type", i.e.
// the type that contains the indexed type. Most of the time this is just
// the type that was visited immediately prior to the indexed type, but for
// the first element this is an unbounded array of the GEP's source element
// type, for which there is no clearly corresponding IR type (we've
// historically used a pointer type as the outer type in this case, but
// pointers will soon lose their element type).
//
// FIXME: Most current users of this class are just interested in byte
// offsets (a few need to know whether the outer type is a struct because
// they are trying to replace a constant with a variable, which is only
// legal for arrays, e.g. canReplaceOperandWithVariable in SimplifyCFG.cpp);
// we should provide a more minimal API here that exposes not much more than
// that.
bool isStruct() const { return CurTy.is<StructType *>(); }
bool isSequential() const { return CurTy.is<Type *>(); }
StructType *getStructType() const { return CurTy.get<StructType *>(); }
StructType *getStructTypeOrNull() const {
return CurTy.dyn_cast<StructType *>();
}
};
using gep_type_iterator = generic_gep_type_iterator<>;
inline gep_type_iterator gep_type_begin(const User *GEP) {
auto *GEPOp = cast<GEPOperator>(GEP);
return gep_type_iterator::begin(
GEPOp->getSourceElementType(),
GEP->op_begin() + 1);
}
inline gep_type_iterator gep_type_end(const User *GEP) {
return gep_type_iterator::end(GEP->op_end());
}
inline gep_type_iterator gep_type_begin(const User &GEP) {
auto &GEPOp = cast<GEPOperator>(GEP);
return gep_type_iterator::begin(
GEPOp.getSourceElementType(),
GEP.op_begin() + 1);
}
inline gep_type_iterator gep_type_end(const User &GEP) {
return gep_type_iterator::end(GEP.op_end());
}
template<typename T>
inline generic_gep_type_iterator<const T *>
gep_type_begin(Type *Op0, ArrayRef<T> A) {
return generic_gep_type_iterator<const T *>::begin(Op0, A.begin());
}
template<typename T>
inline generic_gep_type_iterator<const T *>
gep_type_end(Type * /*Op0*/, ArrayRef<T> A) {
return generic_gep_type_iterator<const T *>::end(A.end());
}
} // end namespace llvm
#endif // LLVM_IR_GETELEMENTPTRTYPEITERATOR_H

View File

@@ -0,0 +1,114 @@
//===-------- llvm/GlobalAlias.h - GlobalAlias class ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the GlobalAlias class, which
// represents a single function or variable alias in the IR.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_GLOBALALIAS_H
#define LLVM_IR_GLOBALALIAS_H
#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/Value.h"
namespace llvm {
class Twine;
class Module;
template <typename ValueSubClass> class SymbolTableListTraits;
class GlobalAlias : public GlobalValue, public ilist_node<GlobalAlias> {
friend class SymbolTableListTraits<GlobalAlias>;
GlobalAlias(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage,
const Twine &Name, Constant *Aliasee, Module *Parent);
public:
GlobalAlias(const GlobalAlias &) = delete;
GlobalAlias &operator=(const GlobalAlias &) = delete;
/// If a parent module is specified, the alias is automatically inserted into
/// the end of the specified module's alias list.
static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
LinkageTypes Linkage, const Twine &Name,
Constant *Aliasee, Module *Parent);
// Without the Aliasee.
static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
LinkageTypes Linkage, const Twine &Name,
Module *Parent);
// The module is taken from the Aliasee.
static GlobalAlias *create(Type *Ty, unsigned AddressSpace,
LinkageTypes Linkage, const Twine &Name,
GlobalValue *Aliasee);
// Type, Parent and AddressSpace taken from the Aliasee.
static GlobalAlias *create(LinkageTypes Linkage, const Twine &Name,
GlobalValue *Aliasee);
// Linkage, Type, Parent and AddressSpace taken from the Aliasee.
static GlobalAlias *create(const Twine &Name, GlobalValue *Aliasee);
// allocate space for exactly one operand
void *operator new(size_t S) { return User::operator new(S, 1); }
void operator delete(void *Ptr) { User::operator delete(Ptr); }
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
void copyAttributesFrom(const GlobalAlias *Src) {
GlobalValue::copyAttributesFrom(Src);
}
/// removeFromParent - This method unlinks 'this' from the containing module,
/// but does not delete it.
///
void removeFromParent();
/// eraseFromParent - This method unlinks 'this' from the containing module
/// and deletes it.
///
void eraseFromParent();
/// These methods retrieve and set alias target.
void setAliasee(Constant *Aliasee);
const Constant *getAliasee() const {
return static_cast<Constant *>(Op<0>().get());
}
Constant *getAliasee() { return static_cast<Constant *>(Op<0>().get()); }
const GlobalObject *getAliaseeObject() const;
GlobalObject *getAliaseeObject() {
return const_cast<GlobalObject *>(
static_cast<const GlobalAlias *>(this)->getAliaseeObject());
}
static bool isValidLinkage(LinkageTypes L) {
return isExternalLinkage(L) || isLocalLinkage(L) ||
isWeakLinkage(L) || isLinkOnceLinkage(L);
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
return V->getValueID() == Value::GlobalAliasVal;
}
};
template <>
struct OperandTraits<GlobalAlias>
: public FixedNumOperandTraits<GlobalAlias, 1> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalAlias, Constant)
} // end namespace llvm
#endif // LLVM_IR_GLOBALALIAS_H

View File

@@ -0,0 +1,101 @@
//===-------- llvm/GlobalIFunc.h - GlobalIFunc class ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file contains the declaration of the GlobalIFunc class, which
/// represents a single indirect function in the IR. Indirect function uses
/// ELF symbol type extension to mark that the address of a declaration should
/// be resolved at runtime by calling a resolver function.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_GLOBALIFUNC_H
#define LLVM_IR_GLOBALIFUNC_H
#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/GlobalObject.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/Value.h"
namespace llvm {
class Twine;
class Module;
// Traits class for using GlobalIFunc in symbol table in Module.
template <typename ValueSubClass> class SymbolTableListTraits;
class GlobalIFunc final : public GlobalObject, public ilist_node<GlobalIFunc> {
friend class SymbolTableListTraits<GlobalIFunc>;
GlobalIFunc(Type *Ty, unsigned AddressSpace, LinkageTypes Linkage,
const Twine &Name, Constant *Resolver, Module *Parent);
public:
GlobalIFunc(const GlobalIFunc &) = delete;
GlobalIFunc &operator=(const GlobalIFunc &) = delete;
/// If a parent module is specified, the ifunc is automatically inserted into
/// the end of the specified module's ifunc list.
static GlobalIFunc *create(Type *Ty, unsigned AddressSpace,
LinkageTypes Linkage, const Twine &Name,
Constant *Resolver, Module *Parent);
// allocate space for exactly one operand
void *operator new(size_t S) { return User::operator new(S, 1); }
void operator delete(void *Ptr) { User::operator delete(Ptr); }
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Constant);
void copyAttributesFrom(const GlobalIFunc *Src) {
GlobalObject::copyAttributesFrom(Src);
}
/// This method unlinks 'this' from the containing module, but does not
/// delete it.
void removeFromParent();
/// This method unlinks 'this' from the containing module and deletes it.
void eraseFromParent();
/// These methods retrieve and set ifunc resolver function.
void setResolver(Constant *Resolver) { Op<0>().set(Resolver); }
const Constant *getResolver() const {
return static_cast<Constant *>(Op<0>().get());
}
Constant *getResolver() { return static_cast<Constant *>(Op<0>().get()); }
// Return the resolver function after peeling off potential ConstantExpr
// indirection.
const Function *getResolverFunction() const;
Function *getResolverFunction() {
return const_cast<Function *>(
static_cast<const GlobalIFunc *>(this)->getResolverFunction());
}
static FunctionType *getResolverFunctionType(Type *IFuncValTy) {
return FunctionType::get(IFuncValTy->getPointerTo(), false);
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
return V->getValueID() == Value::GlobalIFuncVal;
}
};
template <>
struct OperandTraits<GlobalIFunc>
: public FixedNumOperandTraits<GlobalIFunc, 1> {};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalIFunc, Constant)
} // end namespace llvm
#endif // LLVM_IR_GLOBALIFUNC_H

View File

@@ -0,0 +1,172 @@
//===-- llvm/GlobalObject.h - Class to represent global objects -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This represents an independent object. That is, a function or a global
// variable, but not an alias.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_GLOBALOBJECT_H
#define LLVM_IR_GLOBALOBJECT_H
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Alignment.h"
namespace llvm {
class Comdat;
class Metadata;
class GlobalObject : public GlobalValue {
public:
// VCallVisibility - values for visibility metadata attached to vtables. This
// describes the scope in which a virtual call could end up being dispatched
// through this vtable.
enum VCallVisibility {
// Type is potentially visible to external code.
VCallVisibilityPublic = 0,
// Type is only visible to code which will be in the current Module after
// LTO internalization.
VCallVisibilityLinkageUnit = 1,
// Type is only visible to code in the current Module.
VCallVisibilityTranslationUnit = 2,
};
protected:
GlobalObject(Type *Ty, ValueTy VTy, Use *Ops, unsigned NumOps,
LinkageTypes Linkage, const Twine &Name,
unsigned AddressSpace = 0)
: GlobalValue(Ty, VTy, Ops, NumOps, Linkage, Name, AddressSpace),
ObjComdat(nullptr) {
setGlobalValueSubClassData(0);
}
~GlobalObject();
Comdat *ObjComdat;
enum {
LastAlignmentBit = 5,
HasSectionHashEntryBit,
GlobalObjectBits,
};
static const unsigned GlobalObjectSubClassDataBits =
GlobalValueSubClassDataBits - GlobalObjectBits;
private:
static const unsigned AlignmentBits = LastAlignmentBit + 1;
static const unsigned AlignmentMask = (1 << AlignmentBits) - 1;
static const unsigned GlobalObjectMask = (1 << GlobalObjectBits) - 1;
public:
GlobalObject(const GlobalObject &) = delete;
/// FIXME: Remove this function once transition to Align is over.
uint64_t getAlignment() const {
MaybeAlign Align = getAlign();
return Align ? Align->value() : 0;
}
/// Returns the alignment of the given variable or function.
///
/// Note that for functions this is the alignment of the code, not the
/// alignment of a function pointer.
MaybeAlign getAlign() const {
unsigned Data = getGlobalValueSubClassData();
unsigned AlignmentData = Data & AlignmentMask;
return decodeMaybeAlign(AlignmentData);
}
void setAlignment(MaybeAlign Align);
unsigned getGlobalObjectSubClassData() const {
unsigned ValueData = getGlobalValueSubClassData();
return ValueData >> GlobalObjectBits;
}
void setGlobalObjectSubClassData(unsigned Val) {
unsigned OldData = getGlobalValueSubClassData();
setGlobalValueSubClassData((OldData & GlobalObjectMask) |
(Val << GlobalObjectBits));
assert(getGlobalObjectSubClassData() == Val && "representation error");
}
/// Check if this global has a custom object file section.
///
/// This is more efficient than calling getSection() and checking for an empty
/// string.
bool hasSection() const {
return getGlobalValueSubClassData() & (1 << HasSectionHashEntryBit);
}
/// Get the custom section of this global if it has one.
///
/// If this global does not have a custom section, this will be empty and the
/// default object file section (.text, .data, etc) will be used.
StringRef getSection() const {
return hasSection() ? getSectionImpl() : StringRef();
}
/// Change the section for this global.
///
/// Setting the section to the empty string tells LLVM to choose an
/// appropriate default object file section.
void setSection(StringRef S);
bool hasComdat() const { return getComdat() != nullptr; }
const Comdat *getComdat() const { return ObjComdat; }
Comdat *getComdat() { return ObjComdat; }
void setComdat(Comdat *C);
using Value::addMetadata;
using Value::clearMetadata;
using Value::eraseMetadata;
using Value::getAllMetadata;
using Value::getMetadata;
using Value::hasMetadata;
using Value::setMetadata;
/// Copy metadata from Src, adjusting offsets by Offset.
void copyMetadata(const GlobalObject *Src, unsigned Offset);
void addTypeMetadata(unsigned Offset, Metadata *TypeID);
void setVCallVisibilityMetadata(VCallVisibility Visibility);
VCallVisibility getVCallVisibility() const;
/// Returns true if the alignment of the value can be unilaterally
/// increased.
///
/// Note that for functions this is the alignment of the code, not the
/// alignment of a function pointer.
bool canIncreaseAlignment() const;
protected:
void copyAttributesFrom(const GlobalObject *Src);
public:
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
return V->getValueID() == Value::FunctionVal ||
V->getValueID() == Value::GlobalVariableVal ||
V->getValueID() == Value::GlobalIFuncVal;
}
private:
void setGlobalObjectFlag(unsigned Bit, bool Val) {
unsigned Mask = 1 << Bit;
setGlobalValueSubClassData((~Mask & getGlobalValueSubClassData()) |
(Val ? Mask : 0u));
}
StringRef getSectionImpl() const;
};
} // end namespace llvm
#endif // LLVM_IR_GLOBALOBJECT_H

View File

@@ -0,0 +1,599 @@
//===-- llvm/GlobalValue.h - Class to represent a global value --*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file is a common base class of all globally definable objects. As such,
// it is subclassed by GlobalVariable, GlobalAlias and by Function. This is
// used because you can do certain things with these global objects that you
// can't do to anything else. For example, use the address of one as a
// constant.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_GLOBALVALUE_H
#define LLVM_IR_GLOBALVALUE_H
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/Twine.h"
#include "llvm/IR/Constant.h"
#include "llvm/IR/DerivedTypes.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Casting.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/MD5.h"
#include <cassert>
#include <cstdint>
#include <string>
namespace llvm {
class Comdat;
class ConstantRange;
class Error;
class GlobalObject;
class Module;
namespace Intrinsic {
typedef unsigned ID;
} // end namespace Intrinsic
class GlobalValue : public Constant {
public:
/// An enumeration for the kinds of linkage for global values.
enum LinkageTypes {
ExternalLinkage = 0,///< Externally visible function
AvailableExternallyLinkage, ///< Available for inspection, not emission.
LinkOnceAnyLinkage, ///< Keep one copy of function when linking (inline)
LinkOnceODRLinkage, ///< Same, but only replaced by something equivalent.
WeakAnyLinkage, ///< Keep one copy of named function when linking (weak)
WeakODRLinkage, ///< Same, but only replaced by something equivalent.
AppendingLinkage, ///< Special purpose, only applies to global arrays
InternalLinkage, ///< Rename collisions when linking (static functions).
PrivateLinkage, ///< Like Internal, but omit from symbol table.
ExternalWeakLinkage,///< ExternalWeak linkage description.
CommonLinkage ///< Tentative definitions.
};
/// An enumeration for the kinds of visibility of global values.
enum VisibilityTypes {
DefaultVisibility = 0, ///< The GV is visible
HiddenVisibility, ///< The GV is hidden
ProtectedVisibility ///< The GV is protected
};
/// Storage classes of global values for PE targets.
enum DLLStorageClassTypes {
DefaultStorageClass = 0,
DLLImportStorageClass = 1, ///< Function to be imported from DLL
DLLExportStorageClass = 2 ///< Function to be accessible from DLL.
};
protected:
GlobalValue(Type *Ty, ValueTy VTy, Use *Ops, unsigned NumOps,
LinkageTypes Linkage, const Twine &Name, unsigned AddressSpace)
: Constant(PointerType::get(Ty, AddressSpace), VTy, Ops, NumOps),
ValueType(Ty), Visibility(DefaultVisibility),
UnnamedAddrVal(unsigned(UnnamedAddr::None)),
DllStorageClass(DefaultStorageClass), ThreadLocal(NotThreadLocal),
HasLLVMReservedName(false), IsDSOLocal(false), HasPartition(false),
IntID((Intrinsic::ID)0U), Parent(nullptr) {
setLinkage(Linkage);
setName(Name);
}
Type *ValueType;
static const unsigned GlobalValueSubClassDataBits = 16;
// All bitfields use unsigned as the underlying type so that MSVC will pack
// them.
unsigned Linkage : 4; // The linkage of this global
unsigned Visibility : 2; // The visibility style of this global
unsigned UnnamedAddrVal : 2; // This value's address is not significant
unsigned DllStorageClass : 2; // DLL storage class
unsigned ThreadLocal : 3; // Is this symbol "Thread Local", if so, what is
// the desired model?
/// True if the function's name starts with "llvm.". This corresponds to the
/// value of Function::isIntrinsic(), which may be true even if
/// Function::intrinsicID() returns Intrinsic::not_intrinsic.
unsigned HasLLVMReservedName : 1;
/// If true then there is a definition within the same linkage unit and that
/// definition cannot be runtime preempted.
unsigned IsDSOLocal : 1;
/// True if this symbol has a partition name assigned (see
/// https://lld.llvm.org/Partitions.html).
unsigned HasPartition : 1;
private:
// Give subclasses access to what otherwise would be wasted padding.
// (16 + 4 + 2 + 2 + 2 + 3 + 1 + 1 + 1) == 32.
unsigned SubClassData : GlobalValueSubClassDataBits;
friend class Constant;
void destroyConstantImpl();
Value *handleOperandChangeImpl(Value *From, Value *To);
/// Returns true if the definition of this global may be replaced by a
/// differently optimized variant of the same source level function at link
/// time.
bool mayBeDerefined() const {
switch (getLinkage()) {
case WeakODRLinkage:
case LinkOnceODRLinkage:
case AvailableExternallyLinkage:
return true;
case WeakAnyLinkage:
case LinkOnceAnyLinkage:
case CommonLinkage:
case ExternalWeakLinkage:
case ExternalLinkage:
case AppendingLinkage:
case InternalLinkage:
case PrivateLinkage:
return isInterposable();
}
llvm_unreachable("Fully covered switch above!");
}
protected:
/// The intrinsic ID for this subclass (which must be a Function).
///
/// This member is defined by this class, but not used for anything.
/// Subclasses can use it to store their intrinsic ID, if they have one.
///
/// This is stored here to save space in Function on 64-bit hosts.
Intrinsic::ID IntID;
unsigned getGlobalValueSubClassData() const {
return SubClassData;
}
void setGlobalValueSubClassData(unsigned V) {
assert(V < (1 << GlobalValueSubClassDataBits) && "It will not fit");
SubClassData = V;
}
Module *Parent; // The containing module.
// Used by SymbolTableListTraits.
void setParent(Module *parent) {
Parent = parent;
}
~GlobalValue() {
removeDeadConstantUsers(); // remove any dead constants using this.
}
public:
enum ThreadLocalMode {
NotThreadLocal = 0,
GeneralDynamicTLSModel,
LocalDynamicTLSModel,
InitialExecTLSModel,
LocalExecTLSModel
};
GlobalValue(const GlobalValue &) = delete;
unsigned getAddressSpace() const;
enum class UnnamedAddr {
None,
Local,
Global,
};
bool hasGlobalUnnamedAddr() const {
return getUnnamedAddr() == UnnamedAddr::Global;
}
/// Returns true if this value's address is not significant in this module.
/// This attribute is intended to be used only by the code generator and LTO
/// to allow the linker to decide whether the global needs to be in the symbol
/// table. It should probably not be used in optimizations, as the value may
/// have uses outside the module; use hasGlobalUnnamedAddr() instead.
bool hasAtLeastLocalUnnamedAddr() const {
return getUnnamedAddr() != UnnamedAddr::None;
}
UnnamedAddr getUnnamedAddr() const {
return UnnamedAddr(UnnamedAddrVal);
}
void setUnnamedAddr(UnnamedAddr Val) { UnnamedAddrVal = unsigned(Val); }
static UnnamedAddr getMinUnnamedAddr(UnnamedAddr A, UnnamedAddr B) {
if (A == UnnamedAddr::None || B == UnnamedAddr::None)
return UnnamedAddr::None;
if (A == UnnamedAddr::Local || B == UnnamedAddr::Local)
return UnnamedAddr::Local;
return UnnamedAddr::Global;
}
bool hasComdat() const { return getComdat() != nullptr; }
const Comdat *getComdat() const;
Comdat *getComdat() {
return const_cast<Comdat *>(
static_cast<const GlobalValue *>(this)->getComdat());
}
VisibilityTypes getVisibility() const { return VisibilityTypes(Visibility); }
bool hasDefaultVisibility() const { return Visibility == DefaultVisibility; }
bool hasHiddenVisibility() const { return Visibility == HiddenVisibility; }
bool hasProtectedVisibility() const {
return Visibility == ProtectedVisibility;
}
void setVisibility(VisibilityTypes V) {
assert((!hasLocalLinkage() || V == DefaultVisibility) &&
"local linkage requires default visibility");
Visibility = V;
if (isImplicitDSOLocal())
setDSOLocal(true);
}
/// If the value is "Thread Local", its value isn't shared by the threads.
bool isThreadLocal() const { return getThreadLocalMode() != NotThreadLocal; }
void setThreadLocal(bool Val) {
setThreadLocalMode(Val ? GeneralDynamicTLSModel : NotThreadLocal);
}
void setThreadLocalMode(ThreadLocalMode Val) {
assert(Val == NotThreadLocal || getValueID() != Value::FunctionVal);
ThreadLocal = Val;
}
ThreadLocalMode getThreadLocalMode() const {
return static_cast<ThreadLocalMode>(ThreadLocal);
}
DLLStorageClassTypes getDLLStorageClass() const {
return DLLStorageClassTypes(DllStorageClass);
}
bool hasDLLImportStorageClass() const {
return DllStorageClass == DLLImportStorageClass;
}
bool hasDLLExportStorageClass() const {
return DllStorageClass == DLLExportStorageClass;
}
void setDLLStorageClass(DLLStorageClassTypes C) { DllStorageClass = C; }
bool hasSection() const { return !getSection().empty(); }
StringRef getSection() const;
/// Global values are always pointers.
PointerType *getType() const { return cast<PointerType>(User::getType()); }
Type *getValueType() const { return ValueType; }
bool isImplicitDSOLocal() const {
return hasLocalLinkage() ||
(!hasDefaultVisibility() && !hasExternalWeakLinkage());
}
void setDSOLocal(bool Local) { IsDSOLocal = Local; }
bool isDSOLocal() const {
return IsDSOLocal;
}
bool hasPartition() const {
return HasPartition;
}
StringRef getPartition() const;
void setPartition(StringRef Part);
static LinkageTypes getLinkOnceLinkage(bool ODR) {
return ODR ? LinkOnceODRLinkage : LinkOnceAnyLinkage;
}
static LinkageTypes getWeakLinkage(bool ODR) {
return ODR ? WeakODRLinkage : WeakAnyLinkage;
}
static bool isExternalLinkage(LinkageTypes Linkage) {
return Linkage == ExternalLinkage;
}
static bool isAvailableExternallyLinkage(LinkageTypes Linkage) {
return Linkage == AvailableExternallyLinkage;
}
static bool isLinkOnceAnyLinkage(LinkageTypes Linkage) {
return Linkage == LinkOnceAnyLinkage;
}
static bool isLinkOnceODRLinkage(LinkageTypes Linkage) {
return Linkage == LinkOnceODRLinkage;
}
static bool isLinkOnceLinkage(LinkageTypes Linkage) {
return isLinkOnceAnyLinkage(Linkage) || isLinkOnceODRLinkage(Linkage);
}
static bool isWeakAnyLinkage(LinkageTypes Linkage) {
return Linkage == WeakAnyLinkage;
}
static bool isWeakODRLinkage(LinkageTypes Linkage) {
return Linkage == WeakODRLinkage;
}
static bool isWeakLinkage(LinkageTypes Linkage) {
return isWeakAnyLinkage(Linkage) || isWeakODRLinkage(Linkage);
}
static bool isAppendingLinkage(LinkageTypes Linkage) {
return Linkage == AppendingLinkage;
}
static bool isInternalLinkage(LinkageTypes Linkage) {
return Linkage == InternalLinkage;
}
static bool isPrivateLinkage(LinkageTypes Linkage) {
return Linkage == PrivateLinkage;
}
static bool isLocalLinkage(LinkageTypes Linkage) {
return isInternalLinkage(Linkage) || isPrivateLinkage(Linkage);
}
static bool isExternalWeakLinkage(LinkageTypes Linkage) {
return Linkage == ExternalWeakLinkage;
}
static bool isCommonLinkage(LinkageTypes Linkage) {
return Linkage == CommonLinkage;
}
static bool isValidDeclarationLinkage(LinkageTypes Linkage) {
return isExternalWeakLinkage(Linkage) || isExternalLinkage(Linkage);
}
/// Whether the definition of this global may be replaced by something
/// non-equivalent at link time. For example, if a function has weak linkage
/// then the code defining it may be replaced by different code.
static bool isInterposableLinkage(LinkageTypes Linkage) {
switch (Linkage) {
case WeakAnyLinkage:
case LinkOnceAnyLinkage:
case CommonLinkage:
case ExternalWeakLinkage:
return true;
case AvailableExternallyLinkage:
case LinkOnceODRLinkage:
case WeakODRLinkage:
// The above three cannot be overridden but can be de-refined.
case ExternalLinkage:
case AppendingLinkage:
case InternalLinkage:
case PrivateLinkage:
return false;
}
llvm_unreachable("Fully covered switch above!");
}
/// Whether the definition of this global may be discarded if it is not used
/// in its compilation unit.
static bool isDiscardableIfUnused(LinkageTypes Linkage) {
return isLinkOnceLinkage(Linkage) || isLocalLinkage(Linkage) ||
isAvailableExternallyLinkage(Linkage);
}
/// Whether the definition of this global may be replaced at link time. NB:
/// Using this method outside of the code generators is almost always a
/// mistake: when working at the IR level use isInterposable instead as it
/// knows about ODR semantics.
static bool isWeakForLinker(LinkageTypes Linkage) {
return Linkage == WeakAnyLinkage || Linkage == WeakODRLinkage ||
Linkage == LinkOnceAnyLinkage || Linkage == LinkOnceODRLinkage ||
Linkage == CommonLinkage || Linkage == ExternalWeakLinkage;
}
/// Return true if the currently visible definition of this global (if any) is
/// exactly the definition we will see at runtime.
///
/// Non-exact linkage types inhibits most non-inlining IPO, since a
/// differently optimized variant of the same function can have different
/// observable or undefined behavior than in the variant currently visible.
/// For instance, we could have started with
///
/// void foo(int *v) {
/// int t = 5 / v[0];
/// (void) t;
/// }
///
/// and "refined" it to
///
/// void foo(int *v) { }
///
/// However, we cannot infer readnone for `foo`, since that would justify
/// DSE'ing a store to `v[0]` across a call to `foo`, which can cause
/// undefined behavior if the linker replaces the actual call destination with
/// the unoptimized `foo`.
///
/// Inlining is okay across non-exact linkage types as long as they're not
/// interposable (see \c isInterposable), since in such cases the currently
/// visible variant is *a* correct implementation of the original source
/// function; it just isn't the *only* correct implementation.
bool isDefinitionExact() const {
return !mayBeDerefined();
}
/// Return true if this global has an exact definition.
bool hasExactDefinition() const {
// While this computes exactly the same thing as
// isStrongDefinitionForLinker, the intended uses are different. This
// function is intended to help decide if specific inter-procedural
// transforms are correct, while isStrongDefinitionForLinker's intended use
// is in low level code generation.
return !isDeclaration() && isDefinitionExact();
}
/// Return true if this global's definition can be substituted with an
/// *arbitrary* definition at link time or load time. We cannot do any IPO or
/// inlining across interposable call edges, since the callee can be
/// replaced with something arbitrary.
bool isInterposable() const;
bool canBenefitFromLocalAlias() const;
bool hasExternalLinkage() const { return isExternalLinkage(getLinkage()); }
bool hasAvailableExternallyLinkage() const {
return isAvailableExternallyLinkage(getLinkage());
}
bool hasLinkOnceLinkage() const { return isLinkOnceLinkage(getLinkage()); }
bool hasLinkOnceAnyLinkage() const {
return isLinkOnceAnyLinkage(getLinkage());
}
bool hasLinkOnceODRLinkage() const {
return isLinkOnceODRLinkage(getLinkage());
}
bool hasWeakLinkage() const { return isWeakLinkage(getLinkage()); }
bool hasWeakAnyLinkage() const { return isWeakAnyLinkage(getLinkage()); }
bool hasWeakODRLinkage() const { return isWeakODRLinkage(getLinkage()); }
bool hasAppendingLinkage() const { return isAppendingLinkage(getLinkage()); }
bool hasInternalLinkage() const { return isInternalLinkage(getLinkage()); }
bool hasPrivateLinkage() const { return isPrivateLinkage(getLinkage()); }
bool hasLocalLinkage() const { return isLocalLinkage(getLinkage()); }
bool hasExternalWeakLinkage() const {
return isExternalWeakLinkage(getLinkage());
}
bool hasCommonLinkage() const { return isCommonLinkage(getLinkage()); }
bool hasValidDeclarationLinkage() const {
return isValidDeclarationLinkage(getLinkage());
}
void setLinkage(LinkageTypes LT) {
if (isLocalLinkage(LT))
Visibility = DefaultVisibility;
Linkage = LT;
if (isImplicitDSOLocal())
setDSOLocal(true);
}
LinkageTypes getLinkage() const { return LinkageTypes(Linkage); }
bool isDiscardableIfUnused() const {
return isDiscardableIfUnused(getLinkage());
}
bool isWeakForLinker() const { return isWeakForLinker(getLinkage()); }
protected:
/// Copy all additional attributes (those not needed to create a GlobalValue)
/// from the GlobalValue Src to this one.
void copyAttributesFrom(const GlobalValue *Src);
public:
/// If the given string begins with the GlobalValue name mangling escape
/// character '\1', drop it.
///
/// This function applies a specific mangling that is used in PGO profiles,
/// among other things. If you're trying to get a symbol name for an
/// arbitrary GlobalValue, this is not the function you're looking for; see
/// Mangler.h.
static StringRef dropLLVMManglingEscape(StringRef Name) {
if (!Name.empty() && Name[0] == '\1')
return Name.substr(1);
return Name;
}
/// Return the modified name for a global value suitable to be
/// used as the key for a global lookup (e.g. profile or ThinLTO).
/// The value's original name is \c Name and has linkage of type
/// \c Linkage. The value is defined in module \c FileName.
static std::string getGlobalIdentifier(StringRef Name,
GlobalValue::LinkageTypes Linkage,
StringRef FileName);
/// Return the modified name for this global value suitable to be
/// used as the key for a global lookup (e.g. profile or ThinLTO).
std::string getGlobalIdentifier() const;
/// Declare a type to represent a global unique identifier for a global value.
/// This is a 64 bits hash that is used by PGO and ThinLTO to have a compact
/// unique way to identify a symbol.
using GUID = uint64_t;
/// Return a 64-bit global unique ID constructed from global value name
/// (i.e. returned by getGlobalIdentifier()).
static GUID getGUID(StringRef GlobalName) { return MD5Hash(GlobalName); }
/// Return a 64-bit global unique ID constructed from global value name
/// (i.e. returned by getGlobalIdentifier()).
GUID getGUID() const { return getGUID(getGlobalIdentifier()); }
/// @name Materialization
/// Materialization is used to construct functions only as they're needed.
/// This
/// is useful to reduce memory usage in LLVM or parsing work done by the
/// BitcodeReader to load the Module.
/// @{
/// If this function's Module is being lazily streamed in functions from disk
/// or some other source, this method can be used to check to see if the
/// function has been read in yet or not.
bool isMaterializable() const;
/// Make sure this GlobalValue is fully read.
Error materialize();
/// @}
/// Return true if the primary definition of this global value is outside of
/// the current translation unit.
bool isDeclaration() const;
bool isDeclarationForLinker() const {
if (hasAvailableExternallyLinkage())
return true;
return isDeclaration();
}
/// Returns true if this global's definition will be the one chosen by the
/// linker.
///
/// NB! Ideally this should not be used at the IR level at all. If you're
/// interested in optimization constraints implied by the linker's ability to
/// choose an implementation, prefer using \c hasExactDefinition.
bool isStrongDefinitionForLinker() const {
return !(isDeclarationForLinker() || isWeakForLinker());
}
const GlobalObject *getAliaseeObject() const;
GlobalObject *getAliaseeObject() {
return const_cast<GlobalObject *>(
static_cast<const GlobalValue *>(this)->getAliaseeObject());
}
/// Returns whether this is a reference to an absolute symbol.
bool isAbsoluteSymbolRef() const;
/// If this is an absolute symbol reference, returns the range of the symbol,
/// otherwise returns None.
Optional<ConstantRange> getAbsoluteSymbolRange() const;
/// This method unlinks 'this' from the containing module, but does not delete
/// it.
void removeFromParent();
/// This method unlinks 'this' from the containing module and deletes it.
void eraseFromParent();
/// Get the module that this global value is contained inside of...
Module *getParent() { return Parent; }
const Module *getParent() const { return Parent; }
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
return V->getValueID() == Value::FunctionVal ||
V->getValueID() == Value::GlobalVariableVal ||
V->getValueID() == Value::GlobalAliasVal ||
V->getValueID() == Value::GlobalIFuncVal;
}
/// True if GV can be left out of the object symbol table. This is the case
/// for linkonce_odr values whose address is not significant. While legal, it
/// is not normally profitable to omit them from the .o symbol table. Using
/// this analysis makes sense when the information can be passed down to the
/// linker or we are in LTO.
bool canBeOmittedFromSymbolTable() const;
};
} // end namespace llvm
#endif // LLVM_IR_GLOBALVALUE_H

View File

@@ -0,0 +1,264 @@
//===-- llvm/GlobalVariable.h - GlobalVariable class ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the GlobalVariable class, which
// represents a single global variable (or constant) in the VM.
//
// Global variables are constant pointers that refer to hunks of space that are
// allocated by either the VM, or by the linker in a static compiler. A global
// variable may have an initial value, which is copied into the executables .data
// area. Global Constants are required to have initializers.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_GLOBALVARIABLE_H
#define LLVM_IR_GLOBALVARIABLE_H
#include "llvm/ADT/Twine.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/GlobalObject.h"
#include "llvm/IR/OperandTraits.h"
#include "llvm/IR/Value.h"
#include <cassert>
#include <cstddef>
namespace llvm {
class Constant;
class Module;
template <typename ValueSubClass> class SymbolTableListTraits;
class DIGlobalVariableExpression;
class GlobalVariable : public GlobalObject, public ilist_node<GlobalVariable> {
friend class SymbolTableListTraits<GlobalVariable>;
AttributeSet Attrs;
bool isConstantGlobal : 1; // Is this a global constant?
bool isExternallyInitializedConstant : 1; // Is this a global whose value
// can change from its initial
// value before global
// initializers are run?
public:
/// GlobalVariable ctor - If a parent module is specified, the global is
/// automatically inserted into the end of the specified modules global list.
GlobalVariable(Type *Ty, bool isConstant, LinkageTypes Linkage,
Constant *Initializer = nullptr, const Twine &Name = "",
ThreadLocalMode = NotThreadLocal, unsigned AddressSpace = 0,
bool isExternallyInitialized = false);
/// GlobalVariable ctor - This creates a global and inserts it before the
/// specified other global.
GlobalVariable(Module &M, Type *Ty, bool isConstant, LinkageTypes Linkage,
Constant *Initializer, const Twine &Name = "",
GlobalVariable *InsertBefore = nullptr,
ThreadLocalMode = NotThreadLocal,
Optional<unsigned> AddressSpace = None,
bool isExternallyInitialized = false);
GlobalVariable(const GlobalVariable &) = delete;
GlobalVariable &operator=(const GlobalVariable &) = delete;
~GlobalVariable() {
dropAllReferences();
}
// allocate space for exactly one operand
void *operator new(size_t s) {
return User::operator new(s, 1);
}
// delete space for exactly one operand as created in the corresponding new operator
void operator delete(void *ptr){
assert(ptr != nullptr && "must not be nullptr");
User *Obj = static_cast<User *>(ptr);
// Number of operands can be set to 0 after construction and initialization. Make sure
// that number of operands is reset to 1, as this is needed in User::operator delete
Obj->setGlobalVariableNumOperands(1);
User::operator delete(Obj);
}
/// Provide fast operand accessors
DECLARE_TRANSPARENT_OPERAND_ACCESSORS(Value);
/// Definitions have initializers, declarations don't.
///
inline bool hasInitializer() const { return !isDeclaration(); }
/// hasDefinitiveInitializer - Whether the global variable has an initializer,
/// and any other instances of the global (this can happen due to weak
/// linkage) are guaranteed to have the same initializer.
///
/// Note that if you want to transform a global, you must use
/// hasUniqueInitializer() instead, because of the *_odr linkage type.
///
/// Example:
///
/// @a = global SomeType* null - Initializer is both definitive and unique.
///
/// @b = global weak SomeType* null - Initializer is neither definitive nor
/// unique.
///
/// @c = global weak_odr SomeType* null - Initializer is definitive, but not
/// unique.
inline bool hasDefinitiveInitializer() const {
return hasInitializer() &&
// The initializer of a global variable may change to something arbitrary
// at link time.
!isInterposable() &&
// The initializer of a global variable with the externally_initialized
// marker may change at runtime before C++ initializers are evaluated.
!isExternallyInitialized();
}
/// hasUniqueInitializer - Whether the global variable has an initializer, and
/// any changes made to the initializer will turn up in the final executable.
inline bool hasUniqueInitializer() const {
return
// We need to be sure this is the definition that will actually be used
isStrongDefinitionForLinker() &&
// It is not safe to modify initializers of global variables with the
// external_initializer marker since the value may be changed at runtime
// before C++ initializers are evaluated.
!isExternallyInitialized();
}
/// getInitializer - Return the initializer for this global variable. It is
/// illegal to call this method if the global is external, because we cannot
/// tell what the value is initialized to!
///
inline const Constant *getInitializer() const {
assert(hasInitializer() && "GV doesn't have initializer!");
return static_cast<Constant*>(Op<0>().get());
}
inline Constant *getInitializer() {
assert(hasInitializer() && "GV doesn't have initializer!");
return static_cast<Constant*>(Op<0>().get());
}
/// setInitializer - Sets the initializer for this global variable, removing
/// any existing initializer if InitVal==NULL. If this GV has type T*, the
/// initializer must have type T.
void setInitializer(Constant *InitVal);
/// If the value is a global constant, its value is immutable throughout the
/// runtime execution of the program. Assigning a value into the constant
/// leads to undefined behavior.
///
bool isConstant() const { return isConstantGlobal; }
void setConstant(bool Val) { isConstantGlobal = Val; }
bool isExternallyInitialized() const {
return isExternallyInitializedConstant;
}
void setExternallyInitialized(bool Val) {
isExternallyInitializedConstant = Val;
}
/// copyAttributesFrom - copy all additional attributes (those not needed to
/// create a GlobalVariable) from the GlobalVariable Src to this one.
void copyAttributesFrom(const GlobalVariable *Src);
/// removeFromParent - This method unlinks 'this' from the containing module,
/// but does not delete it.
///
void removeFromParent();
/// eraseFromParent - This method unlinks 'this' from the containing module
/// and deletes it.
///
void eraseFromParent();
/// Drop all references in preparation to destroy the GlobalVariable. This
/// drops not only the reference to the initializer but also to any metadata.
void dropAllReferences();
/// Attach a DIGlobalVariableExpression.
void addDebugInfo(DIGlobalVariableExpression *GV);
/// Fill the vector with all debug info attachments.
void getDebugInfo(SmallVectorImpl<DIGlobalVariableExpression *> &GVs) const;
/// Add attribute to this global.
void addAttribute(Attribute::AttrKind Kind) {
Attrs = Attrs.addAttribute(getContext(), Kind);
}
/// Add attribute to this global.
void addAttribute(StringRef Kind, StringRef Val = StringRef()) {
Attrs = Attrs.addAttribute(getContext(), Kind, Val);
}
/// Return true if the attribute exists.
bool hasAttribute(Attribute::AttrKind Kind) const {
return Attrs.hasAttribute(Kind);
}
/// Return true if the attribute exists.
bool hasAttribute(StringRef Kind) const {
return Attrs.hasAttribute(Kind);
}
/// Return true if any attributes exist.
bool hasAttributes() const {
return Attrs.hasAttributes();
}
/// Return the attribute object.
Attribute getAttribute(Attribute::AttrKind Kind) const {
return Attrs.getAttribute(Kind);
}
/// Return the attribute object.
Attribute getAttribute(StringRef Kind) const {
return Attrs.getAttribute(Kind);
}
/// Return the attribute set for this global
AttributeSet getAttributes() const {
return Attrs;
}
/// Return attribute set as list with index.
/// FIXME: This may not be required once ValueEnumerators
/// in bitcode-writer can enumerate attribute-set.
AttributeList getAttributesAsList(unsigned index) const {
if (!hasAttributes())
return AttributeList();
std::pair<unsigned, AttributeSet> AS[1] = {{index, Attrs}};
return AttributeList::get(getContext(), AS);
}
/// Set attribute list for this global
void setAttributes(AttributeSet A) {
Attrs = A;
}
/// Check if section name is present
bool hasImplicitSection() const {
return getAttributes().hasAttribute("bss-section") ||
getAttributes().hasAttribute("data-section") ||
getAttributes().hasAttribute("relro-section") ||
getAttributes().hasAttribute("rodata-section");
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
return V->getValueID() == Value::GlobalVariableVal;
}
};
template <>
struct OperandTraits<GlobalVariable> :
public OptionalOperandTraits<GlobalVariable> {
};
DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GlobalVariable, Value)
} // end namespace llvm
#endif // LLVM_IR_GLOBALVARIABLE_H

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,131 @@
//===- IRBuilderFolder.h - Const folder interface for IRBuilder -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines for constant folding interface used by IRBuilder.
// It is implemented by ConstantFolder (default), TargetFolder and NoFoler.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_IRBUILDERFOLDER_H
#define LLVM_IR_IRBUILDERFOLDER_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
namespace llvm {
/// IRBuilderFolder - Interface for constant folding in IRBuilder.
class IRBuilderFolder {
public:
virtual ~IRBuilderFolder();
//===--------------------------------------------------------------------===//
// Value-based folders.
//
// Return an existing value or a constant if the operation can be simplified.
// Otherwise return nullptr.
//===--------------------------------------------------------------------===//
virtual Value *FoldAdd(Value *LHS, Value *RHS, bool HasNUW = false,
bool HasNSW = false) const = 0;
virtual Value *FoldAnd(Value *LHS, Value *RHS) const = 0;
virtual Value *FoldOr(Value *LHS, Value *RHS) const = 0;
virtual Value *FoldICmp(CmpInst::Predicate P, Value *LHS,
Value *RHS) const = 0;
virtual Value *FoldGEP(Type *Ty, Value *Ptr, ArrayRef<Value *> IdxList,
bool IsInBounds = false) const = 0;
virtual Value *FoldSelect(Value *C, Value *True, Value *False) const = 0;
//===--------------------------------------------------------------------===//
// Binary Operators
//===--------------------------------------------------------------------===//
virtual Value *CreateFAdd(Constant *LHS, Constant *RHS) const = 0;
virtual Value *CreateSub(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const = 0;
virtual Value *CreateFSub(Constant *LHS, Constant *RHS) const = 0;
virtual Value *CreateMul(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const = 0;
virtual Value *CreateFMul(Constant *LHS, Constant *RHS) const = 0;
virtual Value *CreateUDiv(Constant *LHS, Constant *RHS,
bool isExact = false) const = 0;
virtual Value *CreateSDiv(Constant *LHS, Constant *RHS,
bool isExact = false) const = 0;
virtual Value *CreateFDiv(Constant *LHS, Constant *RHS) const = 0;
virtual Value *CreateURem(Constant *LHS, Constant *RHS) const = 0;
virtual Value *CreateSRem(Constant *LHS, Constant *RHS) const = 0;
virtual Value *CreateFRem(Constant *LHS, Constant *RHS) const = 0;
virtual Value *CreateShl(Constant *LHS, Constant *RHS,
bool HasNUW = false, bool HasNSW = false) const = 0;
virtual Value *CreateLShr(Constant *LHS, Constant *RHS,
bool isExact = false) const = 0;
virtual Value *CreateAShr(Constant *LHS, Constant *RHS,
bool isExact = false) const = 0;
virtual Value *CreateXor(Constant *LHS, Constant *RHS) const = 0;
virtual Value *CreateBinOp(Instruction::BinaryOps Opc,
Constant *LHS, Constant *RHS) const = 0;
//===--------------------------------------------------------------------===//
// Unary Operators
//===--------------------------------------------------------------------===//
virtual Value *CreateNeg(Constant *C,
bool HasNUW = false, bool HasNSW = false) const = 0;
virtual Value *CreateFNeg(Constant *C) const = 0;
virtual Value *CreateNot(Constant *C) const = 0;
virtual Value *CreateUnOp(Instruction::UnaryOps Opc, Constant *C) const = 0;
//===--------------------------------------------------------------------===//
// Cast/Conversion Operators
//===--------------------------------------------------------------------===//
virtual Value *CreateCast(Instruction::CastOps Op, Constant *C,
Type *DestTy) const = 0;
virtual Value *CreatePointerCast(Constant *C, Type *DestTy) const = 0;
virtual Value *CreatePointerBitCastOrAddrSpaceCast(Constant *C,
Type *DestTy) const = 0;
virtual Value *CreateIntCast(Constant *C, Type *DestTy,
bool isSigned) const = 0;
virtual Value *CreateFPCast(Constant *C, Type *DestTy) const = 0;
virtual Value *CreateBitCast(Constant *C, Type *DestTy) const = 0;
virtual Value *CreateIntToPtr(Constant *C, Type *DestTy) const = 0;
virtual Value *CreatePtrToInt(Constant *C, Type *DestTy) const = 0;
virtual Value *CreateZExtOrBitCast(Constant *C, Type *DestTy) const = 0;
virtual Value *CreateSExtOrBitCast(Constant *C, Type *DestTy) const = 0;
virtual Value *CreateTruncOrBitCast(Constant *C, Type *DestTy) const = 0;
//===--------------------------------------------------------------------===//
// Compare Instructions
//===--------------------------------------------------------------------===//
virtual Value *CreateFCmp(CmpInst::Predicate P, Constant *LHS,
Constant *RHS) const = 0;
//===--------------------------------------------------------------------===//
// Other Instructions
//===--------------------------------------------------------------------===//
virtual Value *CreateExtractElement(Constant *Vec, Constant *Idx) const = 0;
virtual Value *CreateInsertElement(Constant *Vec, Constant *NewElt,
Constant *Idx) const = 0;
virtual Value *CreateShuffleVector(Constant *V1, Constant *V2,
ArrayRef<int> Mask) const = 0;
virtual Value *CreateExtractValue(Constant *Agg,
ArrayRef<unsigned> IdxList) const = 0;
virtual Value *CreateInsertValue(Constant *Agg, Constant *Val,
ArrayRef<unsigned> IdxList) const = 0;
};
} // end namespace llvm
#endif // LLVM_IR_IRBUILDERFOLDER_H

View File

@@ -0,0 +1,89 @@
//===- IRPrintingPasses.h - Passes to print out IR constructs ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
/// \file
///
/// This file defines passes to print out IR in various granularities. The
/// PrintModulePass pass simply prints out the entire module when it is
/// executed. The PrintFunctionPass class is designed to be pipelined with
/// other FunctionPass's, and prints out the functions of the module as they
/// are processed.
///
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_IRPRINTINGPASSES_H
#define LLVM_IR_IRPRINTINGPASSES_H
#include "llvm/IR/PassManager.h"
#include <string>
namespace llvm {
class raw_ostream;
class StringRef;
class Function;
class FunctionPass;
class Module;
class ModulePass;
class Pass;
/// Create and return a pass that writes the module to the specified
/// \c raw_ostream.
ModulePass *createPrintModulePass(raw_ostream &OS,
const std::string &Banner = "",
bool ShouldPreserveUseListOrder = false);
/// Create and return a pass that prints functions to the specified
/// \c raw_ostream as they are processed.
FunctionPass *createPrintFunctionPass(raw_ostream &OS,
const std::string &Banner = "");
/// Print out a name of an LLVM value without any prefixes.
///
/// The name is surrounded with ""'s and escaped if it has any special or
/// non-printable characters in it.
void printLLVMNameWithoutPrefix(raw_ostream &OS, StringRef Name);
/// Return true if a pass is for IR printing.
bool isIRPrintingPass(Pass *P);
/// Pass for printing a Module as LLVM's text IR assembly.
///
/// Note: This pass is for use with the new pass manager. Use the create...Pass
/// functions above to create passes for use with the legacy pass manager.
class PrintModulePass : public PassInfoMixin<PrintModulePass> {
raw_ostream &OS;
std::string Banner;
bool ShouldPreserveUseListOrder;
public:
PrintModulePass();
PrintModulePass(raw_ostream &OS, const std::string &Banner = "",
bool ShouldPreserveUseListOrder = false);
PreservedAnalyses run(Module &M, AnalysisManager<Module> &);
static bool isRequired() { return true; }
};
/// Pass for printing a Function as LLVM's text IR assembly.
///
/// Note: This pass is for use with the new pass manager. Use the create...Pass
/// functions above to create passes for use with the legacy pass manager.
class PrintFunctionPass : public PassInfoMixin<PrintFunctionPass> {
raw_ostream &OS;
std::string Banner;
public:
PrintFunctionPass();
PrintFunctionPass(raw_ostream &OS, const std::string &Banner = "");
PreservedAnalyses run(Function &F, AnalysisManager<Function> &);
static bool isRequired() { return true; }
};
} // namespace llvm
#endif

View File

@@ -0,0 +1,464 @@
//===- llvm/InlineAsm.h - Class to represent inline asm strings -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This class represents the inline asm strings, which are Value*'s that are
// used as the callee operand of call instructions. InlineAsm's are uniqued
// like constants, and created via InlineAsm::get(...).
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_INLINEASM_H
#define LLVM_IR_INLINEASM_H
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/ErrorHandling.h"
#include <cassert>
#include <string>
#include <vector>
namespace llvm {
class FunctionType;
class PointerType;
template <class ConstantClass> class ConstantUniqueMap;
class InlineAsm final : public Value {
public:
enum AsmDialect {
AD_ATT,
AD_Intel
};
private:
friend struct InlineAsmKeyType;
friend class ConstantUniqueMap<InlineAsm>;
std::string AsmString, Constraints;
FunctionType *FTy;
bool HasSideEffects;
bool IsAlignStack;
AsmDialect Dialect;
bool CanThrow;
InlineAsm(FunctionType *Ty, const std::string &AsmString,
const std::string &Constraints, bool hasSideEffects,
bool isAlignStack, AsmDialect asmDialect, bool canThrow);
/// When the ConstantUniqueMap merges two types and makes two InlineAsms
/// identical, it destroys one of them with this method.
void destroyConstant();
public:
InlineAsm(const InlineAsm &) = delete;
InlineAsm &operator=(const InlineAsm &) = delete;
/// InlineAsm::get - Return the specified uniqued inline asm string.
///
static InlineAsm *get(FunctionType *Ty, StringRef AsmString,
StringRef Constraints, bool hasSideEffects,
bool isAlignStack = false,
AsmDialect asmDialect = AD_ATT, bool canThrow = false);
bool hasSideEffects() const { return HasSideEffects; }
bool isAlignStack() const { return IsAlignStack; }
AsmDialect getDialect() const { return Dialect; }
bool canThrow() const { return CanThrow; }
/// getType - InlineAsm's are always pointers.
///
PointerType *getType() const {
return reinterpret_cast<PointerType*>(Value::getType());
}
/// getFunctionType - InlineAsm's are always pointers to functions.
///
FunctionType *getFunctionType() const;
const std::string &getAsmString() const { return AsmString; }
const std::string &getConstraintString() const { return Constraints; }
/// Verify - This static method can be used by the parser to check to see if
/// the specified constraint string is legal for the type. This returns true
/// if legal, false if not.
///
static bool Verify(FunctionType *Ty, StringRef Constraints);
// Constraint String Parsing
enum ConstraintPrefix {
isInput, // 'x'
isOutput, // '=x'
isClobber // '~x'
};
using ConstraintCodeVector = std::vector<std::string>;
struct SubConstraintInfo {
/// MatchingInput - If this is not -1, this is an output constraint where an
/// input constraint is required to match it (e.g. "0"). The value is the
/// constraint number that matches this one (for example, if this is
/// constraint #0 and constraint #4 has the value "0", this will be 4).
int MatchingInput = -1;
/// Code - The constraint code, either the register name (in braces) or the
/// constraint letter/number.
ConstraintCodeVector Codes;
/// Default constructor.
SubConstraintInfo() = default;
};
using SubConstraintInfoVector = std::vector<SubConstraintInfo>;
struct ConstraintInfo;
using ConstraintInfoVector = std::vector<ConstraintInfo>;
struct ConstraintInfo {
/// Type - The basic type of the constraint: input/output/clobber
///
ConstraintPrefix Type = isInput;
/// isEarlyClobber - "&": output operand writes result before inputs are all
/// read. This is only ever set for an output operand.
bool isEarlyClobber = false;
/// MatchingInput - If this is not -1, this is an output constraint where an
/// input constraint is required to match it (e.g. "0"). The value is the
/// constraint number that matches this one (for example, if this is
/// constraint #0 and constraint #4 has the value "0", this will be 4).
int MatchingInput = -1;
/// hasMatchingInput - Return true if this is an output constraint that has
/// a matching input constraint.
bool hasMatchingInput() const { return MatchingInput != -1; }
/// isCommutative - This is set to true for a constraint that is commutative
/// with the next operand.
bool isCommutative = false;
/// isIndirect - True if this operand is an indirect operand. This means
/// that the address of the source or destination is present in the call
/// instruction, instead of it being returned or passed in explicitly. This
/// is represented with a '*' in the asm string.
bool isIndirect = false;
/// Code - The constraint code, either the register name (in braces) or the
/// constraint letter/number.
ConstraintCodeVector Codes;
/// isMultipleAlternative - '|': has multiple-alternative constraints.
bool isMultipleAlternative = false;
/// multipleAlternatives - If there are multiple alternative constraints,
/// this array will contain them. Otherwise it will be empty.
SubConstraintInfoVector multipleAlternatives;
/// The currently selected alternative constraint index.
unsigned currentAlternativeIndex = 0;
/// Default constructor.
ConstraintInfo() = default;
/// Parse - Analyze the specified string (e.g. "=*&{eax}") and fill in the
/// fields in this structure. If the constraint string is not understood,
/// return true, otherwise return false.
bool Parse(StringRef Str, ConstraintInfoVector &ConstraintsSoFar);
/// selectAlternative - Point this constraint to the alternative constraint
/// indicated by the index.
void selectAlternative(unsigned index);
/// Whether this constraint corresponds to an argument.
bool hasArg() const {
return Type == isInput || (Type == isOutput && isIndirect);
}
};
/// ParseConstraints - Split up the constraint string into the specific
/// constraints and their prefixes. If this returns an empty vector, and if
/// the constraint string itself isn't empty, there was an error parsing.
static ConstraintInfoVector ParseConstraints(StringRef ConstraintString);
/// ParseConstraints - Parse the constraints of this inlineasm object,
/// returning them the same way that ParseConstraints(str) does.
ConstraintInfoVector ParseConstraints() const {
return ParseConstraints(Constraints);
}
// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
return V->getValueID() == Value::InlineAsmVal;
}
// These are helper methods for dealing with flags in the INLINEASM SDNode
// in the backend.
//
// The encoding of the flag word is currently:
// Bits 2-0 - A Kind_* value indicating the kind of the operand.
// Bits 15-3 - The number of SDNode operands associated with this inline
// assembly operand.
// If bit 31 is set:
// Bit 30-16 - The operand number that this operand must match.
// When bits 2-0 are Kind_Mem, the Constraint_* value must be
// obtained from the flags for this operand number.
// Else if bits 2-0 are Kind_Mem:
// Bit 30-16 - A Constraint_* value indicating the original constraint
// code.
// Else:
// Bit 30-16 - The register class ID to use for the operand.
enum : uint32_t {
// Fixed operands on an INLINEASM SDNode.
Op_InputChain = 0,
Op_AsmString = 1,
Op_MDNode = 2,
Op_ExtraInfo = 3, // HasSideEffects, IsAlignStack, AsmDialect.
Op_FirstOperand = 4,
// Fixed operands on an INLINEASM MachineInstr.
MIOp_AsmString = 0,
MIOp_ExtraInfo = 1, // HasSideEffects, IsAlignStack, AsmDialect.
MIOp_FirstOperand = 2,
// Interpretation of the MIOp_ExtraInfo bit field.
Extra_HasSideEffects = 1,
Extra_IsAlignStack = 2,
Extra_AsmDialect = 4,
Extra_MayLoad = 8,
Extra_MayStore = 16,
Extra_IsConvergent = 32,
// Inline asm operands map to multiple SDNode / MachineInstr operands.
// The first operand is an immediate describing the asm operand, the low
// bits is the kind:
Kind_RegUse = 1, // Input register, "r".
Kind_RegDef = 2, // Output register, "=r".
Kind_RegDefEarlyClobber = 3, // Early-clobber output register, "=&r".
Kind_Clobber = 4, // Clobbered register, "~r".
Kind_Imm = 5, // Immediate.
Kind_Mem = 6, // Memory operand, "m".
// Memory constraint codes.
// These could be tablegenerated but there's little need to do that since
// there's plenty of space in the encoding to support the union of all
// constraint codes for all targets.
Constraint_Unknown = 0,
Constraint_es,
Constraint_i,
Constraint_m,
Constraint_o,
Constraint_v,
Constraint_A,
Constraint_Q,
Constraint_R,
Constraint_S,
Constraint_T,
Constraint_Um,
Constraint_Un,
Constraint_Uq,
Constraint_Us,
Constraint_Ut,
Constraint_Uv,
Constraint_Uy,
Constraint_X,
Constraint_Z,
Constraint_ZC,
Constraint_Zy,
Constraints_Max = Constraint_Zy,
Constraints_ShiftAmount = 16,
Flag_MatchingOperand = 0x80000000
};
static unsigned getFlagWord(unsigned Kind, unsigned NumOps) {
assert(((NumOps << 3) & ~0xffff) == 0 && "Too many inline asm operands!");
assert(Kind >= Kind_RegUse && Kind <= Kind_Mem && "Invalid Kind");
return Kind | (NumOps << 3);
}
static bool isRegDefKind(unsigned Flag){ return getKind(Flag) == Kind_RegDef;}
static bool isImmKind(unsigned Flag) { return getKind(Flag) == Kind_Imm; }
static bool isMemKind(unsigned Flag) { return getKind(Flag) == Kind_Mem; }
static bool isRegDefEarlyClobberKind(unsigned Flag) {
return getKind(Flag) == Kind_RegDefEarlyClobber;
}
static bool isClobberKind(unsigned Flag) {
return getKind(Flag) == Kind_Clobber;
}
/// getFlagWordForMatchingOp - Augment an existing flag word returned by
/// getFlagWord with information indicating that this input operand is tied
/// to a previous output operand.
static unsigned getFlagWordForMatchingOp(unsigned InputFlag,
unsigned MatchedOperandNo) {
assert(MatchedOperandNo <= 0x7fff && "Too big matched operand");
assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
return InputFlag | Flag_MatchingOperand | (MatchedOperandNo << 16);
}
/// getFlagWordForRegClass - Augment an existing flag word returned by
/// getFlagWord with the required register class for the following register
/// operands.
/// A tied use operand cannot have a register class, use the register class
/// from the def operand instead.
static unsigned getFlagWordForRegClass(unsigned InputFlag, unsigned RC) {
// Store RC + 1, reserve the value 0 to mean 'no register class'.
++RC;
assert(!isImmKind(InputFlag) && "Immediates cannot have a register class");
assert(!isMemKind(InputFlag) && "Memory operand cannot have a register class");
assert(RC <= 0x7fff && "Too large register class ID");
assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
return InputFlag | (RC << 16);
}
/// Augment an existing flag word returned by getFlagWord with the constraint
/// code for a memory constraint.
static unsigned getFlagWordForMem(unsigned InputFlag, unsigned Constraint) {
assert(isMemKind(InputFlag) && "InputFlag is not a memory constraint!");
assert(Constraint <= 0x7fff && "Too large a memory constraint ID");
assert(Constraint <= Constraints_Max && "Unknown constraint ID");
assert((InputFlag & ~0xffff) == 0 && "High bits already contain data");
return InputFlag | (Constraint << Constraints_ShiftAmount);
}
static unsigned convertMemFlagWordToMatchingFlagWord(unsigned InputFlag) {
assert(isMemKind(InputFlag));
return InputFlag & ~(0x7fff << Constraints_ShiftAmount);
}
static unsigned getKind(unsigned Flags) {
return Flags & 7;
}
static unsigned getMemoryConstraintID(unsigned Flag) {
assert(isMemKind(Flag));
return (Flag >> Constraints_ShiftAmount) & 0x7fff;
}
/// getNumOperandRegisters - Extract the number of registers field from the
/// inline asm operand flag.
static unsigned getNumOperandRegisters(unsigned Flag) {
return (Flag & 0xffff) >> 3;
}
/// isUseOperandTiedToDef - Return true if the flag of the inline asm
/// operand indicates it is an use operand that's matched to a def operand.
static bool isUseOperandTiedToDef(unsigned Flag, unsigned &Idx) {
if ((Flag & Flag_MatchingOperand) == 0)
return false;
Idx = (Flag & ~Flag_MatchingOperand) >> 16;
return true;
}
/// hasRegClassConstraint - Returns true if the flag contains a register
/// class constraint. Sets RC to the register class ID.
static bool hasRegClassConstraint(unsigned Flag, unsigned &RC) {
if (Flag & Flag_MatchingOperand)
return false;
unsigned High = Flag >> 16;
// getFlagWordForRegClass() uses 0 to mean no register class, and otherwise
// stores RC + 1.
if (!High)
return false;
RC = High - 1;
return true;
}
static std::vector<StringRef> getExtraInfoNames(unsigned ExtraInfo) {
std::vector<StringRef> Result;
if (ExtraInfo & InlineAsm::Extra_HasSideEffects)
Result.push_back("sideeffect");
if (ExtraInfo & InlineAsm::Extra_MayLoad)
Result.push_back("mayload");
if (ExtraInfo & InlineAsm::Extra_MayStore)
Result.push_back("maystore");
if (ExtraInfo & InlineAsm::Extra_IsConvergent)
Result.push_back("isconvergent");
if (ExtraInfo & InlineAsm::Extra_IsAlignStack)
Result.push_back("alignstack");
AsmDialect Dialect =
InlineAsm::AsmDialect((ExtraInfo & InlineAsm::Extra_AsmDialect));
if (Dialect == InlineAsm::AD_ATT)
Result.push_back("attdialect");
if (Dialect == InlineAsm::AD_Intel)
Result.push_back("inteldialect");
return Result;
}
static StringRef getKindName(unsigned Kind) {
switch (Kind) {
case InlineAsm::Kind_RegUse:
return "reguse";
case InlineAsm::Kind_RegDef:
return "regdef";
case InlineAsm::Kind_RegDefEarlyClobber:
return "regdef-ec";
case InlineAsm::Kind_Clobber:
return "clobber";
case InlineAsm::Kind_Imm:
return "imm";
case InlineAsm::Kind_Mem:
return "mem";
default:
llvm_unreachable("Unknown operand kind");
}
}
static StringRef getMemConstraintName(unsigned Constraint) {
switch (Constraint) {
case InlineAsm::Constraint_es:
return "es";
case InlineAsm::Constraint_i:
return "i";
case InlineAsm::Constraint_m:
return "m";
case InlineAsm::Constraint_o:
return "o";
case InlineAsm::Constraint_v:
return "v";
case InlineAsm::Constraint_Q:
return "Q";
case InlineAsm::Constraint_R:
return "R";
case InlineAsm::Constraint_S:
return "S";
case InlineAsm::Constraint_T:
return "T";
case InlineAsm::Constraint_Um:
return "Um";
case InlineAsm::Constraint_Un:
return "Un";
case InlineAsm::Constraint_Uq:
return "Uq";
case InlineAsm::Constraint_Us:
return "Us";
case InlineAsm::Constraint_Ut:
return "Ut";
case InlineAsm::Constraint_Uv:
return "Uv";
case InlineAsm::Constraint_Uy:
return "Uy";
case InlineAsm::Constraint_X:
return "X";
case InlineAsm::Constraint_Z:
return "Z";
case InlineAsm::Constraint_ZC:
return "ZC";
case InlineAsm::Constraint_Zy:
return "Zy";
default:
llvm_unreachable("Unknown memory constraint");
}
}
};
} // end namespace llvm
#endif // LLVM_IR_INLINEASM_H

View File

@@ -0,0 +1,162 @@
//===- InstIterator.h - Classes for inst iteration --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains definitions of two iterators for iterating over the
// instructions in a function. This is effectively a wrapper around a two level
// iterator that can probably be genericized later.
//
// Note that this iterator gets invalidated any time that basic blocks or
// instructions are moved around.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_INSTITERATOR_H
#define LLVM_IR_INSTITERATOR_H
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/BasicBlock.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/SymbolTableListTraits.h"
#include <iterator>
namespace llvm {
// This class implements inst_begin() & inst_end() for
// inst_iterator and const_inst_iterator's.
//
template <class BB_t, class BB_i_t, class BI_t, class II_t> class InstIterator {
using BBty = BB_t;
using BBIty = BB_i_t;
using BIty = BI_t;
using IIty = II_t;
BB_t *BBs; // BasicBlocksType
BB_i_t BB; // BasicBlocksType::iterator
BI_t BI; // BasicBlock::iterator
public:
using iterator_category = std::bidirectional_iterator_tag;
using value_type = IIty;
using difference_type = signed;
using pointer = IIty *;
using reference = IIty &;
// Default constructor
InstIterator() = default;
// Copy constructor...
template<typename A, typename B, typename C, typename D>
InstIterator(const InstIterator<A,B,C,D> &II)
: BBs(II.BBs), BB(II.BB), BI(II.BI) {}
template<typename A, typename B, typename C, typename D>
InstIterator(InstIterator<A,B,C,D> &II)
: BBs(II.BBs), BB(II.BB), BI(II.BI) {}
template<class M> InstIterator(M &m)
: BBs(&m.getBasicBlockList()), BB(BBs->begin()) { // begin ctor
if (BB != BBs->end()) {
BI = BB->begin();
advanceToNextBB();
}
}
template<class M> InstIterator(M &m, bool)
: BBs(&m.getBasicBlockList()), BB(BBs->end()) { // end ctor
}
// Accessors to get at the underlying iterators...
inline BBIty &getBasicBlockIterator() { return BB; }
inline BIty &getInstructionIterator() { return BI; }
inline reference operator*() const { return *BI; }
inline pointer operator->() const { return &operator*(); }
inline bool operator==(const InstIterator &y) const {
return BB == y.BB && (BB == BBs->end() || BI == y.BI);
}
inline bool operator!=(const InstIterator& y) const {
return !operator==(y);
}
InstIterator& operator++() {
++BI;
advanceToNextBB();
return *this;
}
inline InstIterator operator++(int) {
InstIterator tmp = *this; ++*this; return tmp;
}
InstIterator& operator--() {
while (BB == BBs->end() || BI == BB->begin()) {
--BB;
BI = BB->end();
}
--BI;
return *this;
}
inline InstIterator operator--(int) {
InstIterator tmp = *this; --*this; return tmp;
}
inline bool atEnd() const { return BB == BBs->end(); }
private:
inline void advanceToNextBB() {
// The only way that the II could be broken is if it is now pointing to
// the end() of the current BasicBlock and there are successor BBs.
while (BI == BB->end()) {
++BB;
if (BB == BBs->end()) break;
BI = BB->begin();
}
}
};
using inst_iterator =
InstIterator<SymbolTableList<BasicBlock>, Function::iterator,
BasicBlock::iterator, Instruction>;
using const_inst_iterator =
InstIterator<const SymbolTableList<BasicBlock>,
Function::const_iterator, BasicBlock::const_iterator,
const Instruction>;
using inst_range = iterator_range<inst_iterator>;
using const_inst_range = iterator_range<const_inst_iterator>;
inline inst_iterator inst_begin(Function *F) { return inst_iterator(*F); }
inline inst_iterator inst_end(Function *F) { return inst_iterator(*F, true); }
inline inst_range instructions(Function *F) {
return inst_range(inst_begin(F), inst_end(F));
}
inline const_inst_iterator inst_begin(const Function *F) {
return const_inst_iterator(*F);
}
inline const_inst_iterator inst_end(const Function *F) {
return const_inst_iterator(*F, true);
}
inline const_inst_range instructions(const Function *F) {
return const_inst_range(inst_begin(F), inst_end(F));
}
inline inst_iterator inst_begin(Function &F) { return inst_iterator(F); }
inline inst_iterator inst_end(Function &F) { return inst_iterator(F, true); }
inline inst_range instructions(Function &F) {
return inst_range(inst_begin(F), inst_end(F));
}
inline const_inst_iterator inst_begin(const Function &F) {
return const_inst_iterator(F);
}
inline const_inst_iterator inst_end(const Function &F) {
return const_inst_iterator(F, true);
}
inline const_inst_range instructions(const Function &F) {
return const_inst_range(inst_begin(F), inst_end(F));
}
} // end namespace llvm
#endif // LLVM_IR_INSTITERATOR_H

View File

@@ -0,0 +1,316 @@
//===- InstVisitor.h - Instruction visitor templates ------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_INSTVISITOR_H
#define LLVM_IR_INSTVISITOR_H
#include "llvm/IR/Function.h"
#include "llvm/IR/Instructions.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Intrinsics.h"
#include "llvm/IR/Module.h"
#include "llvm/Support/ErrorHandling.h"
namespace llvm {
// We operate on opaque instruction classes, so forward declare all instruction
// types now...
//
#define HANDLE_INST(NUM, OPCODE, CLASS) class CLASS;
#include "llvm/IR/Instruction.def"
#define DELEGATE(CLASS_TO_VISIT) \
return static_cast<SubClass*>(this)-> \
visit##CLASS_TO_VISIT(static_cast<CLASS_TO_VISIT&>(I))
/// Base class for instruction visitors
///
/// Instruction visitors are used when you want to perform different actions
/// for different kinds of instructions without having to use lots of casts
/// and a big switch statement (in your code, that is).
///
/// To define your own visitor, inherit from this class, specifying your
/// new type for the 'SubClass' template parameter, and "override" visitXXX
/// functions in your class. I say "override" because this class is defined
/// in terms of statically resolved overloading, not virtual functions.
///
/// For example, here is a visitor that counts the number of malloc
/// instructions processed:
///
/// /// Declare the class. Note that we derive from InstVisitor instantiated
/// /// with _our new subclasses_ type.
/// ///
/// struct CountAllocaVisitor : public InstVisitor<CountAllocaVisitor> {
/// unsigned Count;
/// CountAllocaVisitor() : Count(0) {}
///
/// void visitAllocaInst(AllocaInst &AI) { ++Count; }
/// };
///
/// And this class would be used like this:
/// CountAllocaVisitor CAV;
/// CAV.visit(function);
/// NumAllocas = CAV.Count;
///
/// The defined has 'visit' methods for Instruction, and also for BasicBlock,
/// Function, and Module, which recursively process all contained instructions.
///
/// Note that if you don't implement visitXXX for some instruction type,
/// the visitXXX method for instruction superclass will be invoked. So
/// if instructions are added in the future, they will be automatically
/// supported, if you handle one of their superclasses.
///
/// The optional second template argument specifies the type that instruction
/// visitation functions should return. If you specify this, you *MUST* provide
/// an implementation of visitInstruction though!.
///
/// Note that this class is specifically designed as a template to avoid
/// virtual function call overhead. Defining and using an InstVisitor is just
/// as efficient as having your own switch statement over the instruction
/// opcode.
template<typename SubClass, typename RetTy=void>
class InstVisitor {
//===--------------------------------------------------------------------===//
// Interface code - This is the public interface of the InstVisitor that you
// use to visit instructions...
//
public:
// Generic visit method - Allow visitation to all instructions in a range
template<class Iterator>
void visit(Iterator Start, Iterator End) {
while (Start != End)
static_cast<SubClass*>(this)->visit(*Start++);
}
// Define visitors for functions and basic blocks...
//
void visit(Module &M) {
static_cast<SubClass*>(this)->visitModule(M);
visit(M.begin(), M.end());
}
void visit(Function &F) {
static_cast<SubClass*>(this)->visitFunction(F);
visit(F.begin(), F.end());
}
void visit(BasicBlock &BB) {
static_cast<SubClass*>(this)->visitBasicBlock(BB);
visit(BB.begin(), BB.end());
}
// Forwarding functions so that the user can visit with pointers AND refs.
void visit(Module *M) { visit(*M); }
void visit(Function *F) { visit(*F); }
void visit(BasicBlock *BB) { visit(*BB); }
RetTy visit(Instruction *I) { return visit(*I); }
// visit - Finally, code to visit an instruction...
//
RetTy visit(Instruction &I) {
static_assert(std::is_base_of<InstVisitor, SubClass>::value,
"Must pass the derived type to this template!");
switch (I.getOpcode()) {
default: llvm_unreachable("Unknown instruction type encountered!");
// Build the switch statement using the Instruction.def file...
#define HANDLE_INST(NUM, OPCODE, CLASS) \
case Instruction::OPCODE: return \
static_cast<SubClass*>(this)-> \
visit##OPCODE(static_cast<CLASS&>(I));
#include "llvm/IR/Instruction.def"
}
}
//===--------------------------------------------------------------------===//
// Visitation functions... these functions provide default fallbacks in case
// the user does not specify what to do for a particular instruction type.
// The default behavior is to generalize the instruction type to its subtype
// and try visiting the subtype. All of this should be inlined perfectly,
// because there are no virtual functions to get in the way.
//
// When visiting a module, function or basic block directly, these methods get
// called to indicate when transitioning into a new unit.
//
void visitModule (Module &M) {}
void visitFunction (Function &F) {}
void visitBasicBlock(BasicBlock &BB) {}
// Define instruction specific visitor functions that can be overridden to
// handle SPECIFIC instructions. These functions automatically define
// visitMul to proxy to visitBinaryOperator for instance in case the user does
// not need this generality.
//
// These functions can also implement fan-out, when a single opcode and
// instruction have multiple more specific Instruction subclasses. The Call
// instruction currently supports this. We implement that by redirecting that
// instruction to a special delegation helper.
#define HANDLE_INST(NUM, OPCODE, CLASS) \
RetTy visit##OPCODE(CLASS &I) { \
if (NUM == Instruction::Call) \
return delegateCallInst(I); \
else \
DELEGATE(CLASS); \
}
#include "llvm/IR/Instruction.def"
// Specific Instruction type classes... note that all of the casts are
// necessary because we use the instruction classes as opaque types...
//
RetTy visitICmpInst(ICmpInst &I) { DELEGATE(CmpInst);}
RetTy visitFCmpInst(FCmpInst &I) { DELEGATE(CmpInst);}
RetTy visitAllocaInst(AllocaInst &I) { DELEGATE(UnaryInstruction);}
RetTy visitLoadInst(LoadInst &I) { DELEGATE(UnaryInstruction);}
RetTy visitStoreInst(StoreInst &I) { DELEGATE(Instruction);}
RetTy visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) { DELEGATE(Instruction);}
RetTy visitAtomicRMWInst(AtomicRMWInst &I) { DELEGATE(Instruction);}
RetTy visitFenceInst(FenceInst &I) { DELEGATE(Instruction);}
RetTy visitGetElementPtrInst(GetElementPtrInst &I){ DELEGATE(Instruction);}
RetTy visitPHINode(PHINode &I) { DELEGATE(Instruction);}
RetTy visitTruncInst(TruncInst &I) { DELEGATE(CastInst);}
RetTy visitZExtInst(ZExtInst &I) { DELEGATE(CastInst);}
RetTy visitSExtInst(SExtInst &I) { DELEGATE(CastInst);}
RetTy visitFPTruncInst(FPTruncInst &I) { DELEGATE(CastInst);}
RetTy visitFPExtInst(FPExtInst &I) { DELEGATE(CastInst);}
RetTy visitFPToUIInst(FPToUIInst &I) { DELEGATE(CastInst);}
RetTy visitFPToSIInst(FPToSIInst &I) { DELEGATE(CastInst);}
RetTy visitUIToFPInst(UIToFPInst &I) { DELEGATE(CastInst);}
RetTy visitSIToFPInst(SIToFPInst &I) { DELEGATE(CastInst);}
RetTy visitPtrToIntInst(PtrToIntInst &I) { DELEGATE(CastInst);}
RetTy visitIntToPtrInst(IntToPtrInst &I) { DELEGATE(CastInst);}
RetTy visitBitCastInst(BitCastInst &I) { DELEGATE(CastInst);}
RetTy visitAddrSpaceCastInst(AddrSpaceCastInst &I) { DELEGATE(CastInst);}
RetTy visitSelectInst(SelectInst &I) { DELEGATE(Instruction);}
RetTy visitVAArgInst(VAArgInst &I) { DELEGATE(UnaryInstruction);}
RetTy visitExtractElementInst(ExtractElementInst &I) { DELEGATE(Instruction);}
RetTy visitInsertElementInst(InsertElementInst &I) { DELEGATE(Instruction);}
RetTy visitShuffleVectorInst(ShuffleVectorInst &I) { DELEGATE(Instruction);}
RetTy visitExtractValueInst(ExtractValueInst &I){ DELEGATE(UnaryInstruction);}
RetTy visitInsertValueInst(InsertValueInst &I) { DELEGATE(Instruction); }
RetTy visitLandingPadInst(LandingPadInst &I) { DELEGATE(Instruction); }
RetTy visitFuncletPadInst(FuncletPadInst &I) { DELEGATE(Instruction); }
RetTy visitCleanupPadInst(CleanupPadInst &I) { DELEGATE(FuncletPadInst); }
RetTy visitCatchPadInst(CatchPadInst &I) { DELEGATE(FuncletPadInst); }
RetTy visitFreezeInst(FreezeInst &I) { DELEGATE(Instruction); }
// Handle the special intrinsic instruction classes.
RetTy visitDbgDeclareInst(DbgDeclareInst &I) { DELEGATE(DbgVariableIntrinsic);}
RetTy visitDbgValueInst(DbgValueInst &I) { DELEGATE(DbgVariableIntrinsic);}
RetTy visitDbgVariableIntrinsic(DbgVariableIntrinsic &I)
{ DELEGATE(DbgInfoIntrinsic);}
RetTy visitDbgLabelInst(DbgLabelInst &I) { DELEGATE(DbgInfoIntrinsic);}
RetTy visitDbgInfoIntrinsic(DbgInfoIntrinsic &I){ DELEGATE(IntrinsicInst); }
RetTy visitMemSetInst(MemSetInst &I) { DELEGATE(MemIntrinsic); }
RetTy visitMemCpyInst(MemCpyInst &I) { DELEGATE(MemTransferInst); }
RetTy visitMemCpyInlineInst(MemCpyInlineInst &I) {
DELEGATE(MemTransferInst);
}
RetTy visitMemMoveInst(MemMoveInst &I) { DELEGATE(MemTransferInst); }
RetTy visitMemTransferInst(MemTransferInst &I) { DELEGATE(MemIntrinsic); }
RetTy visitMemIntrinsic(MemIntrinsic &I) { DELEGATE(IntrinsicInst); }
RetTy visitVAStartInst(VAStartInst &I) { DELEGATE(IntrinsicInst); }
RetTy visitVAEndInst(VAEndInst &I) { DELEGATE(IntrinsicInst); }
RetTy visitVACopyInst(VACopyInst &I) { DELEGATE(IntrinsicInst); }
RetTy visitIntrinsicInst(IntrinsicInst &I) { DELEGATE(CallInst); }
RetTy visitCallInst(CallInst &I) { DELEGATE(CallBase); }
RetTy visitInvokeInst(InvokeInst &I) { DELEGATE(CallBase); }
RetTy visitCallBrInst(CallBrInst &I) { DELEGATE(CallBase); }
// While terminators don't have a distinct type modeling them, we support
// intercepting them with dedicated a visitor callback.
RetTy visitReturnInst(ReturnInst &I) {
return static_cast<SubClass *>(this)->visitTerminator(I);
}
RetTy visitBranchInst(BranchInst &I) {
return static_cast<SubClass *>(this)->visitTerminator(I);
}
RetTy visitSwitchInst(SwitchInst &I) {
return static_cast<SubClass *>(this)->visitTerminator(I);
}
RetTy visitIndirectBrInst(IndirectBrInst &I) {
return static_cast<SubClass *>(this)->visitTerminator(I);
}
RetTy visitResumeInst(ResumeInst &I) {
return static_cast<SubClass *>(this)->visitTerminator(I);
}
RetTy visitUnreachableInst(UnreachableInst &I) {
return static_cast<SubClass *>(this)->visitTerminator(I);
}
RetTy visitCleanupReturnInst(CleanupReturnInst &I) {
return static_cast<SubClass *>(this)->visitTerminator(I);
}
RetTy visitCatchReturnInst(CatchReturnInst &I) {
return static_cast<SubClass *>(this)->visitTerminator(I);
}
RetTy visitCatchSwitchInst(CatchSwitchInst &I) {
return static_cast<SubClass *>(this)->visitTerminator(I);
}
RetTy visitTerminator(Instruction &I) { DELEGATE(Instruction);}
// Next level propagators: If the user does not overload a specific
// instruction type, they can overload one of these to get the whole class
// of instructions...
//
RetTy visitCastInst(CastInst &I) { DELEGATE(UnaryInstruction);}
RetTy visitUnaryOperator(UnaryOperator &I) { DELEGATE(UnaryInstruction);}
RetTy visitBinaryOperator(BinaryOperator &I) { DELEGATE(Instruction);}
RetTy visitCmpInst(CmpInst &I) { DELEGATE(Instruction);}
RetTy visitUnaryInstruction(UnaryInstruction &I){ DELEGATE(Instruction);}
// The next level delegation for `CallBase` is slightly more complex in order
// to support visiting cases where the call is also a terminator.
RetTy visitCallBase(CallBase &I) {
if (isa<InvokeInst>(I) || isa<CallBrInst>(I))
return static_cast<SubClass *>(this)->visitTerminator(I);
DELEGATE(Instruction);
}
// If the user wants a 'default' case, they can choose to override this
// function. If this function is not overloaded in the user's subclass, then
// this instruction just gets ignored.
//
// Note that you MUST override this function if your return type is not void.
//
void visitInstruction(Instruction &I) {} // Ignore unhandled instructions
private:
// Special helper function to delegate to CallInst subclass visitors.
RetTy delegateCallInst(CallInst &I) {
if (const Function *F = I.getCalledFunction()) {
switch (F->getIntrinsicID()) {
default: DELEGATE(IntrinsicInst);
case Intrinsic::dbg_declare: DELEGATE(DbgDeclareInst);
case Intrinsic::dbg_value: DELEGATE(DbgValueInst);
case Intrinsic::dbg_label: DELEGATE(DbgLabelInst);
case Intrinsic::memcpy: DELEGATE(MemCpyInst);
case Intrinsic::memmove: DELEGATE(MemMoveInst);
case Intrinsic::memset: DELEGATE(MemSetInst);
case Intrinsic::vastart: DELEGATE(VAStartInst);
case Intrinsic::vaend: DELEGATE(VAEndInst);
case Intrinsic::vacopy: DELEGATE(VACopyInst);
case Intrinsic::not_intrinsic: break;
}
}
DELEGATE(CallInst);
}
// An overload that will never actually be called, it is used only from dead
// code in the dispatching from opcodes to instruction subclasses.
RetTy delegateCallInst(Instruction &I) {
llvm_unreachable("delegateCallInst called for non-CallInst");
}
};
#undef DELEGATE
} // End llvm namespace
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,255 @@
//===-- llvm/Instruction.def - File that describes Instructions -*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains descriptions of the various LLVM instructions. This is
// used as a central place for enumerating the different instructions and
// should eventually be the place to put comments about the instructions.
//
//===----------------------------------------------------------------------===//
// NOTE: NO INCLUDE GUARD DESIRED!
// Provide definitions of macros so that users of this file do not have to
// define everything to use it...
//
#ifndef FIRST_TERM_INST
#define FIRST_TERM_INST(num)
#endif
#ifndef HANDLE_TERM_INST
#ifndef HANDLE_INST
#define HANDLE_TERM_INST(num, opcode, Class)
#else
#define HANDLE_TERM_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
#endif
#endif
#ifndef LAST_TERM_INST
#define LAST_TERM_INST(num)
#endif
#ifndef FIRST_UNARY_INST
#define FIRST_UNARY_INST(num)
#endif
#ifndef HANDLE_UNARY_INST
#ifndef HANDLE_INST
#define HANDLE_UNARY_INST(num, opcode, instclass)
#else
#define HANDLE_UNARY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
#endif
#endif
#ifndef LAST_UNARY_INST
#define LAST_UNARY_INST(num)
#endif
#ifndef FIRST_BINARY_INST
#define FIRST_BINARY_INST(num)
#endif
#ifndef HANDLE_BINARY_INST
#ifndef HANDLE_INST
#define HANDLE_BINARY_INST(num, opcode, instclass)
#else
#define HANDLE_BINARY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
#endif
#endif
#ifndef LAST_BINARY_INST
#define LAST_BINARY_INST(num)
#endif
#ifndef FIRST_MEMORY_INST
#define FIRST_MEMORY_INST(num)
#endif
#ifndef HANDLE_MEMORY_INST
#ifndef HANDLE_INST
#define HANDLE_MEMORY_INST(num, opcode, Class)
#else
#define HANDLE_MEMORY_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
#endif
#endif
#ifndef LAST_MEMORY_INST
#define LAST_MEMORY_INST(num)
#endif
#ifndef FIRST_CAST_INST
#define FIRST_CAST_INST(num)
#endif
#ifndef HANDLE_CAST_INST
#ifndef HANDLE_INST
#define HANDLE_CAST_INST(num, opcode, Class)
#else
#define HANDLE_CAST_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
#endif
#endif
#ifndef LAST_CAST_INST
#define LAST_CAST_INST(num)
#endif
#ifndef FIRST_FUNCLETPAD_INST
#define FIRST_FUNCLETPAD_INST(num)
#endif
#ifndef HANDLE_FUNCLETPAD_INST
#ifndef HANDLE_INST
#define HANDLE_FUNCLETPAD_INST(num, opcode, Class)
#else
#define HANDLE_FUNCLETPAD_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
#endif
#endif
#ifndef LAST_FUNCLETPAD_INST
#define LAST_FUNCLETPAD_INST(num)
#endif
#ifndef FIRST_OTHER_INST
#define FIRST_OTHER_INST(num)
#endif
#ifndef HANDLE_OTHER_INST
#ifndef HANDLE_INST
#define HANDLE_OTHER_INST(num, opcode, Class)
#else
#define HANDLE_OTHER_INST(num, opcode, Class) HANDLE_INST(num, opcode, Class)
#endif
#endif
#ifndef LAST_OTHER_INST
#define LAST_OTHER_INST(num)
#endif
#ifndef HANDLE_USER_INST
#define HANDLE_USER_INST(num, opc, Class) HANDLE_OTHER_INST(num, opc, Class)
#endif
// Terminator Instructions - These instructions are used to terminate a basic
// block of the program. Every basic block must end with one of these
// instructions for it to be a well formed basic block.
//
FIRST_TERM_INST ( 1)
HANDLE_TERM_INST ( 1, Ret , ReturnInst)
HANDLE_TERM_INST ( 2, Br , BranchInst)
HANDLE_TERM_INST ( 3, Switch , SwitchInst)
HANDLE_TERM_INST ( 4, IndirectBr , IndirectBrInst)
HANDLE_TERM_INST ( 5, Invoke , InvokeInst)
HANDLE_TERM_INST ( 6, Resume , ResumeInst)
HANDLE_TERM_INST ( 7, Unreachable , UnreachableInst)
HANDLE_TERM_INST ( 8, CleanupRet , CleanupReturnInst)
HANDLE_TERM_INST ( 9, CatchRet , CatchReturnInst)
HANDLE_TERM_INST (10, CatchSwitch , CatchSwitchInst)
HANDLE_TERM_INST (11, CallBr , CallBrInst) // A call-site terminator
LAST_TERM_INST (11)
// Standard unary operators...
FIRST_UNARY_INST(12)
HANDLE_UNARY_INST(12, FNeg , UnaryOperator)
LAST_UNARY_INST(12)
// Standard binary operators...
FIRST_BINARY_INST(13)
HANDLE_BINARY_INST(13, Add , BinaryOperator)
HANDLE_BINARY_INST(14, FAdd , BinaryOperator)
HANDLE_BINARY_INST(15, Sub , BinaryOperator)
HANDLE_BINARY_INST(16, FSub , BinaryOperator)
HANDLE_BINARY_INST(17, Mul , BinaryOperator)
HANDLE_BINARY_INST(18, FMul , BinaryOperator)
HANDLE_BINARY_INST(19, UDiv , BinaryOperator)
HANDLE_BINARY_INST(20, SDiv , BinaryOperator)
HANDLE_BINARY_INST(21, FDiv , BinaryOperator)
HANDLE_BINARY_INST(22, URem , BinaryOperator)
HANDLE_BINARY_INST(23, SRem , BinaryOperator)
HANDLE_BINARY_INST(24, FRem , BinaryOperator)
// Logical operators (integer operands)
HANDLE_BINARY_INST(25, Shl , BinaryOperator) // Shift left (logical)
HANDLE_BINARY_INST(26, LShr , BinaryOperator) // Shift right (logical)
HANDLE_BINARY_INST(27, AShr , BinaryOperator) // Shift right (arithmetic)
HANDLE_BINARY_INST(28, And , BinaryOperator)
HANDLE_BINARY_INST(29, Or , BinaryOperator)
HANDLE_BINARY_INST(30, Xor , BinaryOperator)
LAST_BINARY_INST(30)
// Memory operators...
FIRST_MEMORY_INST(31)
HANDLE_MEMORY_INST(31, Alloca, AllocaInst) // Stack management
HANDLE_MEMORY_INST(32, Load , LoadInst ) // Memory manipulation instrs
HANDLE_MEMORY_INST(33, Store , StoreInst )
HANDLE_MEMORY_INST(34, GetElementPtr, GetElementPtrInst)
HANDLE_MEMORY_INST(35, Fence , FenceInst )
HANDLE_MEMORY_INST(36, AtomicCmpXchg , AtomicCmpXchgInst )
HANDLE_MEMORY_INST(37, AtomicRMW , AtomicRMWInst )
LAST_MEMORY_INST(37)
// Cast operators ...
// NOTE: The order matters here because CastInst::isEliminableCastPair
// NOTE: (see Instructions.cpp) encodes a table based on this ordering.
FIRST_CAST_INST(38)
HANDLE_CAST_INST(38, Trunc , TruncInst ) // Truncate integers
HANDLE_CAST_INST(39, ZExt , ZExtInst ) // Zero extend integers
HANDLE_CAST_INST(40, SExt , SExtInst ) // Sign extend integers
HANDLE_CAST_INST(41, FPToUI , FPToUIInst ) // floating point -> UInt
HANDLE_CAST_INST(42, FPToSI , FPToSIInst ) // floating point -> SInt
HANDLE_CAST_INST(43, UIToFP , UIToFPInst ) // UInt -> floating point
HANDLE_CAST_INST(44, SIToFP , SIToFPInst ) // SInt -> floating point
HANDLE_CAST_INST(45, FPTrunc , FPTruncInst ) // Truncate floating point
HANDLE_CAST_INST(46, FPExt , FPExtInst ) // Extend floating point
HANDLE_CAST_INST(47, PtrToInt, PtrToIntInst) // Pointer -> Integer
HANDLE_CAST_INST(48, IntToPtr, IntToPtrInst) // Integer -> Pointer
HANDLE_CAST_INST(49, BitCast , BitCastInst ) // Type cast
HANDLE_CAST_INST(50, AddrSpaceCast, AddrSpaceCastInst) // addrspace cast
LAST_CAST_INST(50)
FIRST_FUNCLETPAD_INST(51)
HANDLE_FUNCLETPAD_INST(51, CleanupPad, CleanupPadInst)
HANDLE_FUNCLETPAD_INST(52, CatchPad , CatchPadInst)
LAST_FUNCLETPAD_INST(52)
// Other operators...
FIRST_OTHER_INST(53)
HANDLE_OTHER_INST(53, ICmp , ICmpInst ) // Integer comparison instruction
HANDLE_OTHER_INST(54, FCmp , FCmpInst ) // Floating point comparison instr.
HANDLE_OTHER_INST(55, PHI , PHINode ) // PHI node instruction
HANDLE_OTHER_INST(56, Call , CallInst ) // Call a function
HANDLE_OTHER_INST(57, Select , SelectInst ) // select instruction
HANDLE_USER_INST (58, UserOp1, Instruction) // May be used internally in a pass
HANDLE_USER_INST (59, UserOp2, Instruction) // Internal to passes only
HANDLE_OTHER_INST(60, VAArg , VAArgInst ) // vaarg instruction
HANDLE_OTHER_INST(61, ExtractElement, ExtractElementInst)// extract from vector
HANDLE_OTHER_INST(62, InsertElement, InsertElementInst) // insert into vector
HANDLE_OTHER_INST(63, ShuffleVector, ShuffleVectorInst) // shuffle two vectors.
HANDLE_OTHER_INST(64, ExtractValue, ExtractValueInst)// extract from aggregate
HANDLE_OTHER_INST(65, InsertValue, InsertValueInst) // insert into aggregate
HANDLE_OTHER_INST(66, LandingPad, LandingPadInst) // Landing pad instruction.
HANDLE_OTHER_INST(67, Freeze, FreezeInst) // Freeze instruction.
LAST_OTHER_INST(67)
#undef FIRST_TERM_INST
#undef HANDLE_TERM_INST
#undef LAST_TERM_INST
#undef FIRST_UNARY_INST
#undef HANDLE_UNARY_INST
#undef LAST_UNARY_INST
#undef FIRST_BINARY_INST
#undef HANDLE_BINARY_INST
#undef LAST_BINARY_INST
#undef FIRST_MEMORY_INST
#undef HANDLE_MEMORY_INST
#undef LAST_MEMORY_INST
#undef FIRST_CAST_INST
#undef HANDLE_CAST_INST
#undef LAST_CAST_INST
#undef FIRST_FUNCLETPAD_INST
#undef HANDLE_FUNCLETPAD_INST
#undef LAST_FUNCLETPAD_INST
#undef FIRST_OTHER_INST
#undef HANDLE_OTHER_INST
#undef LAST_OTHER_INST
#undef HANDLE_USER_INST
#ifdef HANDLE_INST
#undef HANDLE_INST
#endif

View File

@@ -0,0 +1,878 @@
//===-- llvm/Instruction.h - Instruction class definition -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the declaration of the Instruction class, which is the
// base class for all of the LLVM instructions.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_INSTRUCTION_H
#define LLVM_IR_INSTRUCTION_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/Bitfields.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/ilist_node.h"
#include "llvm/IR/DebugLoc.h"
#include "llvm/IR/SymbolTableListTraits.h"
#include "llvm/IR/User.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/AtomicOrdering.h"
#include "llvm/Support/Casting.h"
#include <cstdint>
#include <utility>
namespace llvm {
class BasicBlock;
class FastMathFlags;
class MDNode;
class Module;
struct AAMDNodes;
template <> struct ilist_alloc_traits<Instruction> {
static inline void deleteNode(Instruction *V);
};
class Instruction : public User,
public ilist_node_with_parent<Instruction, BasicBlock> {
BasicBlock *Parent;
DebugLoc DbgLoc; // 'dbg' Metadata cache.
/// Relative order of this instruction in its parent basic block. Used for
/// O(1) local dominance checks between instructions.
mutable unsigned Order = 0;
protected:
// The 15 first bits of `Value::SubclassData` are available for subclasses of
// `Instruction` to use.
using OpaqueField = Bitfield::Element<uint16_t, 0, 15>;
// Template alias so that all Instruction storing alignment use the same
// definition.
// Valid alignments are powers of two from 2^0 to 2^MaxAlignmentExponent =
// 2^32. We store them as Log2(Alignment), so we need 6 bits to encode the 33
// possible values.
template <unsigned Offset>
using AlignmentBitfieldElementT =
typename Bitfield::Element<unsigned, Offset, 6,
Value::MaxAlignmentExponent>;
template <unsigned Offset>
using BoolBitfieldElementT = typename Bitfield::Element<bool, Offset, 1>;
template <unsigned Offset>
using AtomicOrderingBitfieldElementT =
typename Bitfield::Element<AtomicOrdering, Offset, 3,
AtomicOrdering::LAST>;
private:
// The last bit is used to store whether the instruction has metadata attached
// or not.
using HasMetadataField = Bitfield::Element<bool, 15, 1>;
protected:
~Instruction(); // Use deleteValue() to delete a generic Instruction.
public:
Instruction(const Instruction &) = delete;
Instruction &operator=(const Instruction &) = delete;
/// Specialize the methods defined in Value, as we know that an instruction
/// can only be used by other instructions.
Instruction *user_back() { return cast<Instruction>(*user_begin());}
const Instruction *user_back() const { return cast<Instruction>(*user_begin());}
inline const BasicBlock *getParent() const { return Parent; }
inline BasicBlock *getParent() { return Parent; }
/// Return the module owning the function this instruction belongs to
/// or nullptr it the function does not have a module.
///
/// Note: this is undefined behavior if the instruction does not have a
/// parent, or the parent basic block does not have a parent function.
const Module *getModule() const;
Module *getModule() {
return const_cast<Module *>(
static_cast<const Instruction *>(this)->getModule());
}
/// Return the function this instruction belongs to.
///
/// Note: it is undefined behavior to call this on an instruction not
/// currently inserted into a function.
const Function *getFunction() const;
Function *getFunction() {
return const_cast<Function *>(
static_cast<const Instruction *>(this)->getFunction());
}
/// This method unlinks 'this' from the containing basic block, but does not
/// delete it.
void removeFromParent();
/// This method unlinks 'this' from the containing basic block and deletes it.
///
/// \returns an iterator pointing to the element after the erased one
SymbolTableList<Instruction>::iterator eraseFromParent();
/// Insert an unlinked instruction into a basic block immediately before
/// the specified instruction.
void insertBefore(Instruction *InsertPos);
/// Insert an unlinked instruction into a basic block immediately after the
/// specified instruction.
void insertAfter(Instruction *InsertPos);
/// Unlink this instruction from its current basic block and insert it into
/// the basic block that MovePos lives in, right before MovePos.
void moveBefore(Instruction *MovePos);
/// Unlink this instruction and insert into BB before I.
///
/// \pre I is a valid iterator into BB.
void moveBefore(BasicBlock &BB, SymbolTableList<Instruction>::iterator I);
/// Unlink this instruction from its current basic block and insert it into
/// the basic block that MovePos lives in, right after MovePos.
void moveAfter(Instruction *MovePos);
/// Given an instruction Other in the same basic block as this instruction,
/// return true if this instruction comes before Other. In this worst case,
/// this takes linear time in the number of instructions in the block. The
/// results are cached, so in common cases when the block remains unmodified,
/// it takes constant time.
bool comesBefore(const Instruction *Other) const;
//===--------------------------------------------------------------------===//
// Subclass classification.
//===--------------------------------------------------------------------===//
/// Returns a member of one of the enums like Instruction::Add.
unsigned getOpcode() const { return getValueID() - InstructionVal; }
const char *getOpcodeName() const { return getOpcodeName(getOpcode()); }
bool isTerminator() const { return isTerminator(getOpcode()); }
bool isUnaryOp() const { return isUnaryOp(getOpcode()); }
bool isBinaryOp() const { return isBinaryOp(getOpcode()); }
bool isIntDivRem() const { return isIntDivRem(getOpcode()); }
bool isShift() const { return isShift(getOpcode()); }
bool isCast() const { return isCast(getOpcode()); }
bool isFuncletPad() const { return isFuncletPad(getOpcode()); }
bool isExceptionalTerminator() const {
return isExceptionalTerminator(getOpcode());
}
/// It checks if this instruction is the only user of at least one of
/// its operands.
bool isOnlyUserOfAnyOperand();
bool isIndirectTerminator() const {
return isIndirectTerminator(getOpcode());
}
static const char* getOpcodeName(unsigned OpCode);
static inline bool isTerminator(unsigned OpCode) {
return OpCode >= TermOpsBegin && OpCode < TermOpsEnd;
}
static inline bool isUnaryOp(unsigned Opcode) {
return Opcode >= UnaryOpsBegin && Opcode < UnaryOpsEnd;
}
static inline bool isBinaryOp(unsigned Opcode) {
return Opcode >= BinaryOpsBegin && Opcode < BinaryOpsEnd;
}
static inline bool isIntDivRem(unsigned Opcode) {
return Opcode == UDiv || Opcode == SDiv || Opcode == URem || Opcode == SRem;
}
/// Determine if the Opcode is one of the shift instructions.
static inline bool isShift(unsigned Opcode) {
return Opcode >= Shl && Opcode <= AShr;
}
/// Return true if this is a logical shift left or a logical shift right.
inline bool isLogicalShift() const {
return getOpcode() == Shl || getOpcode() == LShr;
}
/// Return true if this is an arithmetic shift right.
inline bool isArithmeticShift() const {
return getOpcode() == AShr;
}
/// Determine if the Opcode is and/or/xor.
static inline bool isBitwiseLogicOp(unsigned Opcode) {
return Opcode == And || Opcode == Or || Opcode == Xor;
}
/// Return true if this is and/or/xor.
inline bool isBitwiseLogicOp() const {
return isBitwiseLogicOp(getOpcode());
}
/// Determine if the OpCode is one of the CastInst instructions.
static inline bool isCast(unsigned OpCode) {
return OpCode >= CastOpsBegin && OpCode < CastOpsEnd;
}
/// Determine if the OpCode is one of the FuncletPadInst instructions.
static inline bool isFuncletPad(unsigned OpCode) {
return OpCode >= FuncletPadOpsBegin && OpCode < FuncletPadOpsEnd;
}
/// Returns true if the OpCode is a terminator related to exception handling.
static inline bool isExceptionalTerminator(unsigned OpCode) {
switch (OpCode) {
case Instruction::CatchSwitch:
case Instruction::CatchRet:
case Instruction::CleanupRet:
case Instruction::Invoke:
case Instruction::Resume:
return true;
default:
return false;
}
}
/// Returns true if the OpCode is a terminator with indirect targets.
static inline bool isIndirectTerminator(unsigned OpCode) {
switch (OpCode) {
case Instruction::IndirectBr:
case Instruction::CallBr:
return true;
default:
return false;
}
}
//===--------------------------------------------------------------------===//
// Metadata manipulation.
//===--------------------------------------------------------------------===//
/// Return true if this instruction has any metadata attached to it.
bool hasMetadata() const { return DbgLoc || Value::hasMetadata(); }
/// Return true if this instruction has metadata attached to it other than a
/// debug location.
bool hasMetadataOtherThanDebugLoc() const { return Value::hasMetadata(); }
/// Return true if this instruction has the given type of metadata attached.
bool hasMetadata(unsigned KindID) const {
return getMetadata(KindID) != nullptr;
}
/// Return true if this instruction has the given type of metadata attached.
bool hasMetadata(StringRef Kind) const {
return getMetadata(Kind) != nullptr;
}
/// Get the metadata of given kind attached to this Instruction.
/// If the metadata is not found then return null.
MDNode *getMetadata(unsigned KindID) const {
if (!hasMetadata()) return nullptr;
return getMetadataImpl(KindID);
}
/// Get the metadata of given kind attached to this Instruction.
/// If the metadata is not found then return null.
MDNode *getMetadata(StringRef Kind) const {
if (!hasMetadata()) return nullptr;
return getMetadataImpl(Kind);
}
/// Get all metadata attached to this Instruction. The first element of each
/// pair returned is the KindID, the second element is the metadata value.
/// This list is returned sorted by the KindID.
void
getAllMetadata(SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const {
if (hasMetadata())
getAllMetadataImpl(MDs);
}
/// This does the same thing as getAllMetadata, except that it filters out the
/// debug location.
void getAllMetadataOtherThanDebugLoc(
SmallVectorImpl<std::pair<unsigned, MDNode *>> &MDs) const {
Value::getAllMetadata(MDs);
}
/// Set the metadata of the specified kind to the specified node. This updates
/// or replaces metadata if already present, or removes it if Node is null.
void setMetadata(unsigned KindID, MDNode *Node);
void setMetadata(StringRef Kind, MDNode *Node);
/// Copy metadata from \p SrcInst to this instruction. \p WL, if not empty,
/// specifies the list of meta data that needs to be copied. If \p WL is
/// empty, all meta data will be copied.
void copyMetadata(const Instruction &SrcInst,
ArrayRef<unsigned> WL = ArrayRef<unsigned>());
/// If the instruction has "branch_weights" MD_prof metadata and the MDNode
/// has three operands (including name string), swap the order of the
/// metadata.
void swapProfMetadata();
/// Drop all unknown metadata except for debug locations.
/// @{
/// Passes are required to drop metadata they don't understand. This is a
/// convenience method for passes to do so.
/// dropUndefImplyingAttrsAndUnknownMetadata should be used instead of
/// this API if the Instruction being modified is a call.
void dropUnknownNonDebugMetadata(ArrayRef<unsigned> KnownIDs);
void dropUnknownNonDebugMetadata() {
return dropUnknownNonDebugMetadata(None);
}
void dropUnknownNonDebugMetadata(unsigned ID1) {
return dropUnknownNonDebugMetadata(makeArrayRef(ID1));
}
void dropUnknownNonDebugMetadata(unsigned ID1, unsigned ID2) {
unsigned IDs[] = {ID1, ID2};
return dropUnknownNonDebugMetadata(IDs);
}
/// @}
/// Adds an !annotation metadata node with \p Annotation to this instruction.
/// If this instruction already has !annotation metadata, append \p Annotation
/// to the existing node.
void addAnnotationMetadata(StringRef Annotation);
/// Returns the AA metadata for this instruction.
AAMDNodes getAAMetadata() const;
/// Sets the AA metadata on this instruction from the AAMDNodes structure.
void setAAMetadata(const AAMDNodes &N);
/// Retrieve the raw weight values of a conditional branch or select.
/// Returns true on success with profile weights filled in.
/// Returns false if no metadata or invalid metadata was found.
bool extractProfMetadata(uint64_t &TrueVal, uint64_t &FalseVal) const;
/// Retrieve total raw weight values of a branch.
/// Returns true on success with profile total weights filled in.
/// Returns false if no metadata was found.
bool extractProfTotalWeight(uint64_t &TotalVal) const;
/// Set the debug location information for this instruction.
void setDebugLoc(DebugLoc Loc) { DbgLoc = std::move(Loc); }
/// Return the debug location for this node as a DebugLoc.
const DebugLoc &getDebugLoc() const { return DbgLoc; }
/// Set or clear the nuw flag on this instruction, which must be an operator
/// which supports this flag. See LangRef.html for the meaning of this flag.
void setHasNoUnsignedWrap(bool b = true);
/// Set or clear the nsw flag on this instruction, which must be an operator
/// which supports this flag. See LangRef.html for the meaning of this flag.
void setHasNoSignedWrap(bool b = true);
/// Set or clear the exact flag on this instruction, which must be an operator
/// which supports this flag. See LangRef.html for the meaning of this flag.
void setIsExact(bool b = true);
/// Determine whether the no unsigned wrap flag is set.
bool hasNoUnsignedWrap() const;
/// Determine whether the no signed wrap flag is set.
bool hasNoSignedWrap() const;
/// Return true if this operator has flags which may cause this instruction
/// to evaluate to poison despite having non-poison inputs.
bool hasPoisonGeneratingFlags() const;
/// Drops flags that may cause this instruction to evaluate to poison despite
/// having non-poison inputs.
void dropPoisonGeneratingFlags();
/// This function drops non-debug unknown metadata (through
/// dropUnknownNonDebugMetadata). For calls, it also drops parameter and
/// return attributes that can cause undefined behaviour. Both of these should
/// be done by passes which move instructions in IR.
void
dropUndefImplyingAttrsAndUnknownMetadata(ArrayRef<unsigned> KnownIDs = {});
/// Determine whether the exact flag is set.
bool isExact() const;
/// Set or clear all fast-math-flags on this instruction, which must be an
/// operator which supports this flag. See LangRef.html for the meaning of
/// this flag.
void setFast(bool B);
/// Set or clear the reassociation flag on this instruction, which must be
/// an operator which supports this flag. See LangRef.html for the meaning of
/// this flag.
void setHasAllowReassoc(bool B);
/// Set or clear the no-nans flag on this instruction, which must be an
/// operator which supports this flag. See LangRef.html for the meaning of
/// this flag.
void setHasNoNaNs(bool B);
/// Set or clear the no-infs flag on this instruction, which must be an
/// operator which supports this flag. See LangRef.html for the meaning of
/// this flag.
void setHasNoInfs(bool B);
/// Set or clear the no-signed-zeros flag on this instruction, which must be
/// an operator which supports this flag. See LangRef.html for the meaning of
/// this flag.
void setHasNoSignedZeros(bool B);
/// Set or clear the allow-reciprocal flag on this instruction, which must be
/// an operator which supports this flag. See LangRef.html for the meaning of
/// this flag.
void setHasAllowReciprocal(bool B);
/// Set or clear the allow-contract flag on this instruction, which must be
/// an operator which supports this flag. See LangRef.html for the meaning of
/// this flag.
void setHasAllowContract(bool B);
/// Set or clear the approximate-math-functions flag on this instruction,
/// which must be an operator which supports this flag. See LangRef.html for
/// the meaning of this flag.
void setHasApproxFunc(bool B);
/// Convenience function for setting multiple fast-math flags on this
/// instruction, which must be an operator which supports these flags. See
/// LangRef.html for the meaning of these flags.
void setFastMathFlags(FastMathFlags FMF);
/// Convenience function for transferring all fast-math flag values to this
/// instruction, which must be an operator which supports these flags. See
/// LangRef.html for the meaning of these flags.
void copyFastMathFlags(FastMathFlags FMF);
/// Determine whether all fast-math-flags are set.
bool isFast() const;
/// Determine whether the allow-reassociation flag is set.
bool hasAllowReassoc() const;
/// Determine whether the no-NaNs flag is set.
bool hasNoNaNs() const;
/// Determine whether the no-infs flag is set.
bool hasNoInfs() const;
/// Determine whether the no-signed-zeros flag is set.
bool hasNoSignedZeros() const;
/// Determine whether the allow-reciprocal flag is set.
bool hasAllowReciprocal() const;
/// Determine whether the allow-contract flag is set.
bool hasAllowContract() const;
/// Determine whether the approximate-math-functions flag is set.
bool hasApproxFunc() const;
/// Convenience function for getting all the fast-math flags, which must be an
/// operator which supports these flags. See LangRef.html for the meaning of
/// these flags.
FastMathFlags getFastMathFlags() const;
/// Copy I's fast-math flags
void copyFastMathFlags(const Instruction *I);
/// Convenience method to copy supported exact, fast-math, and (optionally)
/// wrapping flags from V to this instruction.
void copyIRFlags(const Value *V, bool IncludeWrapFlags = true);
/// Logical 'and' of any supported wrapping, exact, and fast-math flags of
/// V and this instruction.
void andIRFlags(const Value *V);
/// Merge 2 debug locations and apply it to the Instruction. If the
/// instruction is a CallIns, we need to traverse the inline chain to find
/// the common scope. This is not efficient for N-way merging as each time
/// you merge 2 iterations, you need to rebuild the hashmap to find the
/// common scope. However, we still choose this API because:
/// 1) Simplicity: it takes 2 locations instead of a list of locations.
/// 2) In worst case, it increases the complexity from O(N*I) to
/// O(2*N*I), where N is # of Instructions to merge, and I is the
/// maximum level of inline stack. So it is still linear.
/// 3) Merging of call instructions should be extremely rare in real
/// applications, thus the N-way merging should be in code path.
/// The DebugLoc attached to this instruction will be overwritten by the
/// merged DebugLoc.
void applyMergedLocation(const DILocation *LocA, const DILocation *LocB);
/// Updates the debug location given that the instruction has been hoisted
/// from a block to a predecessor of that block.
/// Note: it is undefined behavior to call this on an instruction not
/// currently inserted into a function.
void updateLocationAfterHoist();
/// Drop the instruction's debug location. This does not guarantee removal
/// of the !dbg source location attachment, as it must set a line 0 location
/// with scope information attached on call instructions. To guarantee
/// removal of the !dbg attachment, use the \ref setDebugLoc() API.
/// Note: it is undefined behavior to call this on an instruction not
/// currently inserted into a function.
void dropLocation();
private:
// These are all implemented in Metadata.cpp.
MDNode *getMetadataImpl(unsigned KindID) const;
MDNode *getMetadataImpl(StringRef Kind) const;
void
getAllMetadataImpl(SmallVectorImpl<std::pair<unsigned, MDNode *>> &) const;
public:
//===--------------------------------------------------------------------===//
// Predicates and helper methods.
//===--------------------------------------------------------------------===//
/// Return true if the instruction is associative:
///
/// Associative operators satisfy: x op (y op z) === (x op y) op z
///
/// In LLVM, the Add, Mul, And, Or, and Xor operators are associative.
///
bool isAssociative() const LLVM_READONLY;
static bool isAssociative(unsigned Opcode) {
return Opcode == And || Opcode == Or || Opcode == Xor ||
Opcode == Add || Opcode == Mul;
}
/// Return true if the instruction is commutative:
///
/// Commutative operators satisfy: (x op y) === (y op x)
///
/// In LLVM, these are the commutative operators, plus SetEQ and SetNE, when
/// applied to any type.
///
bool isCommutative() const LLVM_READONLY;
static bool isCommutative(unsigned Opcode) {
switch (Opcode) {
case Add: case FAdd:
case Mul: case FMul:
case And: case Or: case Xor:
return true;
default:
return false;
}
}
/// Return true if the instruction is idempotent:
///
/// Idempotent operators satisfy: x op x === x
///
/// In LLVM, the And and Or operators are idempotent.
///
bool isIdempotent() const { return isIdempotent(getOpcode()); }
static bool isIdempotent(unsigned Opcode) {
return Opcode == And || Opcode == Or;
}
/// Return true if the instruction is nilpotent:
///
/// Nilpotent operators satisfy: x op x === Id,
///
/// where Id is the identity for the operator, i.e. a constant such that
/// x op Id === x and Id op x === x for all x.
///
/// In LLVM, the Xor operator is nilpotent.
///
bool isNilpotent() const { return isNilpotent(getOpcode()); }
static bool isNilpotent(unsigned Opcode) {
return Opcode == Xor;
}
/// Return true if this instruction may modify memory.
bool mayWriteToMemory() const;
/// Return true if this instruction may read memory.
bool mayReadFromMemory() const;
/// Return true if this instruction may read or write memory.
bool mayReadOrWriteMemory() const {
return mayReadFromMemory() || mayWriteToMemory();
}
/// Return true if this instruction has an AtomicOrdering of unordered or
/// higher.
bool isAtomic() const;
/// Return true if this atomic instruction loads from memory.
bool hasAtomicLoad() const;
/// Return true if this atomic instruction stores to memory.
bool hasAtomicStore() const;
/// Return true if this instruction has a volatile memory access.
bool isVolatile() const;
/// Return true if this instruction may throw an exception.
bool mayThrow() const;
/// Return true if this instruction behaves like a memory fence: it can load
/// or store to memory location without being given a memory location.
bool isFenceLike() const {
switch (getOpcode()) {
default:
return false;
// This list should be kept in sync with the list in mayWriteToMemory for
// all opcodes which don't have a memory location.
case Instruction::Fence:
case Instruction::CatchPad:
case Instruction::CatchRet:
case Instruction::Call:
case Instruction::Invoke:
return true;
}
}
/// Return true if the instruction may have side effects.
///
/// Side effects are:
/// * Writing to memory.
/// * Unwinding.
/// * Not returning (e.g. an infinite loop).
///
/// Note that this does not consider malloc and alloca to have side
/// effects because the newly allocated memory is completely invisible to
/// instructions which don't use the returned value. For cases where this
/// matters, isSafeToSpeculativelyExecute may be more appropriate.
bool mayHaveSideEffects() const;
/// Return true if the instruction can be removed if the result is unused.
///
/// When constant folding some instructions cannot be removed even if their
/// results are unused. Specifically terminator instructions and calls that
/// may have side effects cannot be removed without semantically changing the
/// generated program.
bool isSafeToRemove() const;
/// Return true if the instruction will return (unwinding is considered as
/// a form of returning control flow here).
bool willReturn() const;
/// Return true if the instruction is a variety of EH-block.
bool isEHPad() const {
switch (getOpcode()) {
case Instruction::CatchSwitch:
case Instruction::CatchPad:
case Instruction::CleanupPad:
case Instruction::LandingPad:
return true;
default:
return false;
}
}
/// Return true if the instruction is a llvm.lifetime.start or
/// llvm.lifetime.end marker.
bool isLifetimeStartOrEnd() const;
/// Return true if the instruction is a llvm.launder.invariant.group or
/// llvm.strip.invariant.group.
bool isLaunderOrStripInvariantGroup() const;
/// Return true if the instruction is a DbgInfoIntrinsic or PseudoProbeInst.
bool isDebugOrPseudoInst() const;
/// Return a pointer to the next non-debug instruction in the same basic
/// block as 'this', or nullptr if no such instruction exists. Skip any pseudo
/// operations if \c SkipPseudoOp is true.
const Instruction *
getNextNonDebugInstruction(bool SkipPseudoOp = false) const;
Instruction *getNextNonDebugInstruction(bool SkipPseudoOp = false) {
return const_cast<Instruction *>(
static_cast<const Instruction *>(this)->getNextNonDebugInstruction(
SkipPseudoOp));
}
/// Return a pointer to the previous non-debug instruction in the same basic
/// block as 'this', or nullptr if no such instruction exists. Skip any pseudo
/// operations if \c SkipPseudoOp is true.
const Instruction *
getPrevNonDebugInstruction(bool SkipPseudoOp = false) const;
Instruction *getPrevNonDebugInstruction(bool SkipPseudoOp = false) {
return const_cast<Instruction *>(
static_cast<const Instruction *>(this)->getPrevNonDebugInstruction(
SkipPseudoOp));
}
/// Create a copy of 'this' instruction that is identical in all ways except
/// the following:
/// * The instruction has no parent
/// * The instruction has no name
///
Instruction *clone() const;
/// Return true if the specified instruction is exactly identical to the
/// current one. This means that all operands match and any extra information
/// (e.g. load is volatile) agree.
bool isIdenticalTo(const Instruction *I) const;
/// This is like isIdenticalTo, except that it ignores the
/// SubclassOptionalData flags, which may specify conditions under which the
/// instruction's result is undefined.
bool isIdenticalToWhenDefined(const Instruction *I) const;
/// When checking for operation equivalence (using isSameOperationAs) it is
/// sometimes useful to ignore certain attributes.
enum OperationEquivalenceFlags {
/// Check for equivalence ignoring load/store alignment.
CompareIgnoringAlignment = 1<<0,
/// Check for equivalence treating a type and a vector of that type
/// as equivalent.
CompareUsingScalarTypes = 1<<1
};
/// This function determines if the specified instruction executes the same
/// operation as the current one. This means that the opcodes, type, operand
/// types and any other factors affecting the operation must be the same. This
/// is similar to isIdenticalTo except the operands themselves don't have to
/// be identical.
/// @returns true if the specified instruction is the same operation as
/// the current one.
/// Determine if one instruction is the same operation as another.
bool isSameOperationAs(const Instruction *I, unsigned flags = 0) const;
/// Return true if there are any uses of this instruction in blocks other than
/// the specified block. Note that PHI nodes are considered to evaluate their
/// operands in the corresponding predecessor block.
bool isUsedOutsideOfBlock(const BasicBlock *BB) const;
/// Return the number of successors that this instruction has. The instruction
/// must be a terminator.
unsigned getNumSuccessors() const;
/// Return the specified successor. This instruction must be a terminator.
BasicBlock *getSuccessor(unsigned Idx) const;
/// Update the specified successor to point at the provided block. This
/// instruction must be a terminator.
void setSuccessor(unsigned Idx, BasicBlock *BB);
/// Replace specified successor OldBB to point at the provided block.
/// This instruction must be a terminator.
void replaceSuccessorWith(BasicBlock *OldBB, BasicBlock *NewBB);
/// Methods for support type inquiry through isa, cast, and dyn_cast:
static bool classof(const Value *V) {
return V->getValueID() >= Value::InstructionVal;
}
//----------------------------------------------------------------------
// Exported enumerations.
//
enum TermOps { // These terminate basic blocks
#define FIRST_TERM_INST(N) TermOpsBegin = N,
#define HANDLE_TERM_INST(N, OPC, CLASS) OPC = N,
#define LAST_TERM_INST(N) TermOpsEnd = N+1
#include "llvm/IR/Instruction.def"
};
enum UnaryOps {
#define FIRST_UNARY_INST(N) UnaryOpsBegin = N,
#define HANDLE_UNARY_INST(N, OPC, CLASS) OPC = N,
#define LAST_UNARY_INST(N) UnaryOpsEnd = N+1
#include "llvm/IR/Instruction.def"
};
enum BinaryOps {
#define FIRST_BINARY_INST(N) BinaryOpsBegin = N,
#define HANDLE_BINARY_INST(N, OPC, CLASS) OPC = N,
#define LAST_BINARY_INST(N) BinaryOpsEnd = N+1
#include "llvm/IR/Instruction.def"
};
enum MemoryOps {
#define FIRST_MEMORY_INST(N) MemoryOpsBegin = N,
#define HANDLE_MEMORY_INST(N, OPC, CLASS) OPC = N,
#define LAST_MEMORY_INST(N) MemoryOpsEnd = N+1
#include "llvm/IR/Instruction.def"
};
enum CastOps {
#define FIRST_CAST_INST(N) CastOpsBegin = N,
#define HANDLE_CAST_INST(N, OPC, CLASS) OPC = N,
#define LAST_CAST_INST(N) CastOpsEnd = N+1
#include "llvm/IR/Instruction.def"
};
enum FuncletPadOps {
#define FIRST_FUNCLETPAD_INST(N) FuncletPadOpsBegin = N,
#define HANDLE_FUNCLETPAD_INST(N, OPC, CLASS) OPC = N,
#define LAST_FUNCLETPAD_INST(N) FuncletPadOpsEnd = N+1
#include "llvm/IR/Instruction.def"
};
enum OtherOps {
#define FIRST_OTHER_INST(N) OtherOpsBegin = N,
#define HANDLE_OTHER_INST(N, OPC, CLASS) OPC = N,
#define LAST_OTHER_INST(N) OtherOpsEnd = N+1
#include "llvm/IR/Instruction.def"
};
private:
friend class SymbolTableListTraits<Instruction>;
friend class BasicBlock; // For renumbering.
// Shadow Value::setValueSubclassData with a private forwarding method so that
// subclasses cannot accidentally use it.
void setValueSubclassData(unsigned short D) {
Value::setValueSubclassData(D);
}
unsigned short getSubclassDataFromValue() const {
return Value::getSubclassDataFromValue();
}
void setParent(BasicBlock *P);
protected:
// Instruction subclasses can stick up to 15 bits of stuff into the
// SubclassData field of instruction with these members.
template <typename BitfieldElement>
typename BitfieldElement::Type getSubclassData() const {
static_assert(
std::is_same<BitfieldElement, HasMetadataField>::value ||
!Bitfield::isOverlapping<BitfieldElement, HasMetadataField>(),
"Must not overlap with the metadata bit");
return Bitfield::get<BitfieldElement>(getSubclassDataFromValue());
}
template <typename BitfieldElement>
void setSubclassData(typename BitfieldElement::Type Value) {
static_assert(
std::is_same<BitfieldElement, HasMetadataField>::value ||
!Bitfield::isOverlapping<BitfieldElement, HasMetadataField>(),
"Must not overlap with the metadata bit");
auto Storage = getSubclassDataFromValue();
Bitfield::set<BitfieldElement>(Storage, Value);
setValueSubclassData(Storage);
}
Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
Instruction *InsertBefore = nullptr);
Instruction(Type *Ty, unsigned iType, Use *Ops, unsigned NumOps,
BasicBlock *InsertAtEnd);
private:
/// Create a copy of this instruction.
Instruction *cloneImpl() const;
};
inline void ilist_alloc_traits<Instruction>::deleteNode(Instruction *V) {
V->deleteValue();
}
} // end namespace llvm
#endif // LLVM_IR_INSTRUCTION_H

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,256 @@
//===- Intrinsics.h - LLVM Intrinsic Function Handling ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines a set of enums which allow processing of intrinsic
// functions. Values of these enum types are returned by
// Function::getIntrinsicID.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_INTRINSICS_H
#define LLVM_IR_INTRINSICS_H
#include "llvm/ADT/ArrayRef.h"
#include "llvm/ADT/None.h"
#include "llvm/ADT/Optional.h"
#include "llvm/Support/TypeSize.h"
#include <string>
namespace llvm {
class Type;
class FunctionType;
class Function;
class LLVMContext;
class Module;
class AttributeList;
/// This namespace contains an enum with a value for every intrinsic/builtin
/// function known by LLVM. The enum values are returned by
/// Function::getIntrinsicID().
namespace Intrinsic {
// Abstraction for the arguments of the noalias intrinsics
static const int NoAliasScopeDeclScopeArg = 0;
// Intrinsic ID type. This is an opaque typedef to facilitate splitting up
// the enum into target-specific enums.
typedef unsigned ID;
enum IndependentIntrinsics : unsigned {
not_intrinsic = 0, // Must be zero
// Get the intrinsic enums generated from Intrinsics.td
#define GET_INTRINSIC_ENUM_VALUES
#include "llvm/IR/IntrinsicEnums.inc"
#undef GET_INTRINSIC_ENUM_VALUES
};
/// Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx".
/// Note, this version is for intrinsics with no overloads. Use the other
/// version of getName if overloads are required.
StringRef getName(ID id);
/// Return the LLVM name for an intrinsic, without encoded types for
/// overloading, such as "llvm.ssa.copy".
StringRef getBaseName(ID id);
/// Return the LLVM name for an intrinsic, such as "llvm.ppc.altivec.lvx" or
/// "llvm.ssa.copy.p0s_s.1". Note, this version of getName supports overloads.
/// This is less efficient than the StringRef version of this function. If no
/// overloads are required, it is safe to use this version, but better to use
/// the StringRef version. If one of the types is based on an unnamed type, a
/// function type will be computed. Providing FT will avoid this computation.
std::string getName(ID Id, ArrayRef<Type *> Tys, Module *M,
FunctionType *FT = nullptr);
/// Return the LLVM name for an intrinsic. This is a special version only to
/// be used by LLVMIntrinsicCopyOverloadedName. It only supports overloads
/// based on named types.
std::string getNameNoUnnamedTypes(ID Id, ArrayRef<Type *> Tys);
/// Return the function type for an intrinsic.
FunctionType *getType(LLVMContext &Context, ID id,
ArrayRef<Type*> Tys = None);
/// Returns true if the intrinsic can be overloaded.
bool isOverloaded(ID id);
/// Returns true if the intrinsic is a leaf, i.e. it does not make any calls
/// itself. Most intrinsics are leafs, the exceptions being the patchpoint
/// and statepoint intrinsics. These call (or invoke) their "target" argument.
bool isLeaf(ID id);
/// Return the attributes for an intrinsic.
AttributeList getAttributes(LLVMContext &C, ID id);
/// Create or insert an LLVM Function declaration for an intrinsic, and return
/// it.
///
/// The Tys parameter is for intrinsics with overloaded types (e.g., those
/// using iAny, fAny, vAny, or iPTRAny). For a declaration of an overloaded
/// intrinsic, Tys must provide exactly one type for each overloaded type in
/// the intrinsic.
Function *getDeclaration(Module *M, ID id, ArrayRef<Type*> Tys = None);
/// Looks up Name in NameTable via binary search. NameTable must be sorted
/// and all entries must start with "llvm.". If NameTable contains an exact
/// match for Name or a prefix of Name followed by a dot, its index in
/// NameTable is returned. Otherwise, -1 is returned.
int lookupLLVMIntrinsicByName(ArrayRef<const char *> NameTable,
StringRef Name);
/// Map a GCC builtin name to an intrinsic ID.
ID getIntrinsicForGCCBuiltin(const char *Prefix, StringRef BuiltinName);
/// Map a MS builtin name to an intrinsic ID.
ID getIntrinsicForMSBuiltin(const char *Prefix, StringRef BuiltinName);
/// This is a type descriptor which explains the type requirements of an
/// intrinsic. This is returned by getIntrinsicInfoTableEntries.
struct IITDescriptor {
enum IITDescriptorKind {
Void,
VarArg,
MMX,
Token,
Metadata,
Half,
BFloat,
Float,
Double,
Quad,
Integer,
Vector,
Pointer,
Struct,
Argument,
ExtendArgument,
TruncArgument,
HalfVecArgument,
SameVecWidthArgument,
PtrToArgument,
PtrToElt,
VecOfAnyPtrsToElt,
VecElementArgument,
Subdivide2Argument,
Subdivide4Argument,
VecOfBitcastsToInt,
AMX,
PPCQuad,
} Kind;
union {
unsigned Integer_Width;
unsigned Float_Width;
unsigned Pointer_AddressSpace;
unsigned Struct_NumElements;
unsigned Argument_Info;
ElementCount Vector_Width;
};
enum ArgKind {
AK_Any,
AK_AnyInteger,
AK_AnyFloat,
AK_AnyVector,
AK_AnyPointer,
AK_MatchType = 7
};
unsigned getArgumentNumber() const {
assert(Kind == Argument || Kind == ExtendArgument ||
Kind == TruncArgument || Kind == HalfVecArgument ||
Kind == SameVecWidthArgument || Kind == PtrToArgument ||
Kind == PtrToElt || Kind == VecElementArgument ||
Kind == Subdivide2Argument || Kind == Subdivide4Argument ||
Kind == VecOfBitcastsToInt);
return Argument_Info >> 3;
}
ArgKind getArgumentKind() const {
assert(Kind == Argument || Kind == ExtendArgument ||
Kind == TruncArgument || Kind == HalfVecArgument ||
Kind == SameVecWidthArgument || Kind == PtrToArgument ||
Kind == VecElementArgument || Kind == Subdivide2Argument ||
Kind == Subdivide4Argument || Kind == VecOfBitcastsToInt);
return (ArgKind)(Argument_Info & 7);
}
// VecOfAnyPtrsToElt uses both an overloaded argument (for address space)
// and a reference argument (for matching vector width and element types)
unsigned getOverloadArgNumber() const {
assert(Kind == VecOfAnyPtrsToElt);
return Argument_Info >> 16;
}
unsigned getRefArgNumber() const {
assert(Kind == VecOfAnyPtrsToElt);
return Argument_Info & 0xFFFF;
}
static IITDescriptor get(IITDescriptorKind K, unsigned Field) {
IITDescriptor Result = { K, { Field } };
return Result;
}
static IITDescriptor get(IITDescriptorKind K, unsigned short Hi,
unsigned short Lo) {
unsigned Field = Hi << 16 | Lo;
IITDescriptor Result = {K, {Field}};
return Result;
}
static IITDescriptor getVector(unsigned Width, bool IsScalable) {
IITDescriptor Result = {Vector, {0}};
Result.Vector_Width = ElementCount::get(Width, IsScalable);
return Result;
}
};
/// Return the IIT table descriptor for the specified intrinsic into an array
/// of IITDescriptors.
void getIntrinsicInfoTableEntries(ID id, SmallVectorImpl<IITDescriptor> &T);
enum MatchIntrinsicTypesResult {
MatchIntrinsicTypes_Match = 0,
MatchIntrinsicTypes_NoMatchRet = 1,
MatchIntrinsicTypes_NoMatchArg = 2,
};
/// Match the specified function type with the type constraints specified by
/// the .td file. If the given type is an overloaded type it is pushed to the
/// ArgTys vector.
///
/// Returns false if the given type matches with the constraints, true
/// otherwise.
MatchIntrinsicTypesResult
matchIntrinsicSignature(FunctionType *FTy, ArrayRef<IITDescriptor> &Infos,
SmallVectorImpl<Type *> &ArgTys);
/// Verify if the intrinsic has variable arguments. This method is intended to
/// be called after all the fixed arguments have been matched first.
///
/// This method returns true on error.
bool matchIntrinsicVarArg(bool isVarArg, ArrayRef<IITDescriptor> &Infos);
/// Gets the type arguments of an intrinsic call by matching type constraints
/// specified by the .td file. The overloaded types are pushed into the
/// AgTys vector.
///
/// Returns false if the given function is not a valid intrinsic call.
bool getIntrinsicSignature(Function *F, SmallVectorImpl<Type *> &ArgTys);
// Checks if the intrinsic name matches with its signature and if not
// returns the declaration with the same signature and remangled name.
// An existing GlobalValue with the wanted name but with a wrong prototype
// or of the wrong kind will be renamed by adding ".renamed" to the name.
llvm::Optional<Function*> remangleIntrinsicFunction(Function *F);
} // End Intrinsic namespace
} // End llvm namespace
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,899 @@
/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|* *|
|* Intrinsic Function Source Fragment *|
|* *|
|* Automatically generated file, do not edit! *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_IR_INTRINSIC_AARCH64_ENUMS_H
#define LLVM_IR_INTRINSIC_AARCH64_ENUMS_H
namespace llvm {
namespace Intrinsic {
enum AARCH64Intrinsics : unsigned {
// Enum values for intrinsics
aarch64_addg = 364, // llvm.aarch64.addg
aarch64_clrex, // llvm.aarch64.clrex
aarch64_cls, // llvm.aarch64.cls
aarch64_cls64, // llvm.aarch64.cls64
aarch64_crc32b, // llvm.aarch64.crc32b
aarch64_crc32cb, // llvm.aarch64.crc32cb
aarch64_crc32ch, // llvm.aarch64.crc32ch
aarch64_crc32cw, // llvm.aarch64.crc32cw
aarch64_crc32cx, // llvm.aarch64.crc32cx
aarch64_crc32h, // llvm.aarch64.crc32h
aarch64_crc32w, // llvm.aarch64.crc32w
aarch64_crc32x, // llvm.aarch64.crc32x
aarch64_crypto_aesd, // llvm.aarch64.crypto.aesd
aarch64_crypto_aese, // llvm.aarch64.crypto.aese
aarch64_crypto_aesimc, // llvm.aarch64.crypto.aesimc
aarch64_crypto_aesmc, // llvm.aarch64.crypto.aesmc
aarch64_crypto_bcaxs, // llvm.aarch64.crypto.bcaxs
aarch64_crypto_bcaxu, // llvm.aarch64.crypto.bcaxu
aarch64_crypto_eor3s, // llvm.aarch64.crypto.eor3s
aarch64_crypto_eor3u, // llvm.aarch64.crypto.eor3u
aarch64_crypto_rax1, // llvm.aarch64.crypto.rax1
aarch64_crypto_sha1c, // llvm.aarch64.crypto.sha1c
aarch64_crypto_sha1h, // llvm.aarch64.crypto.sha1h
aarch64_crypto_sha1m, // llvm.aarch64.crypto.sha1m
aarch64_crypto_sha1p, // llvm.aarch64.crypto.sha1p
aarch64_crypto_sha1su0, // llvm.aarch64.crypto.sha1su0
aarch64_crypto_sha1su1, // llvm.aarch64.crypto.sha1su1
aarch64_crypto_sha256h, // llvm.aarch64.crypto.sha256h
aarch64_crypto_sha256h2, // llvm.aarch64.crypto.sha256h2
aarch64_crypto_sha256su0, // llvm.aarch64.crypto.sha256su0
aarch64_crypto_sha256su1, // llvm.aarch64.crypto.sha256su1
aarch64_crypto_sha512h, // llvm.aarch64.crypto.sha512h
aarch64_crypto_sha512h2, // llvm.aarch64.crypto.sha512h2
aarch64_crypto_sha512su0, // llvm.aarch64.crypto.sha512su0
aarch64_crypto_sha512su1, // llvm.aarch64.crypto.sha512su1
aarch64_crypto_sm3partw1, // llvm.aarch64.crypto.sm3partw1
aarch64_crypto_sm3partw2, // llvm.aarch64.crypto.sm3partw2
aarch64_crypto_sm3ss1, // llvm.aarch64.crypto.sm3ss1
aarch64_crypto_sm3tt1a, // llvm.aarch64.crypto.sm3tt1a
aarch64_crypto_sm3tt1b, // llvm.aarch64.crypto.sm3tt1b
aarch64_crypto_sm3tt2a, // llvm.aarch64.crypto.sm3tt2a
aarch64_crypto_sm3tt2b, // llvm.aarch64.crypto.sm3tt2b
aarch64_crypto_sm4e, // llvm.aarch64.crypto.sm4e
aarch64_crypto_sm4ekey, // llvm.aarch64.crypto.sm4ekey
aarch64_crypto_xar, // llvm.aarch64.crypto.xar
aarch64_dmb, // llvm.aarch64.dmb
aarch64_dsb, // llvm.aarch64.dsb
aarch64_fjcvtzs, // llvm.aarch64.fjcvtzs
aarch64_frint32x, // llvm.aarch64.frint32x
aarch64_frint32z, // llvm.aarch64.frint32z
aarch64_frint64x, // llvm.aarch64.frint64x
aarch64_frint64z, // llvm.aarch64.frint64z
aarch64_get_fpcr, // llvm.aarch64.get.fpcr
aarch64_gmi, // llvm.aarch64.gmi
aarch64_hint, // llvm.aarch64.hint
aarch64_irg, // llvm.aarch64.irg
aarch64_irg_sp, // llvm.aarch64.irg.sp
aarch64_isb, // llvm.aarch64.isb
aarch64_ld64b, // llvm.aarch64.ld64b
aarch64_ldaxp, // llvm.aarch64.ldaxp
aarch64_ldaxr, // llvm.aarch64.ldaxr
aarch64_ldg, // llvm.aarch64.ldg
aarch64_ldxp, // llvm.aarch64.ldxp
aarch64_ldxr, // llvm.aarch64.ldxr
aarch64_mops_memset_tag, // llvm.aarch64.mops.memset.tag
aarch64_neon_abs, // llvm.aarch64.neon.abs
aarch64_neon_addhn, // llvm.aarch64.neon.addhn
aarch64_neon_addp, // llvm.aarch64.neon.addp
aarch64_neon_bfcvt, // llvm.aarch64.neon.bfcvt
aarch64_neon_bfcvtn, // llvm.aarch64.neon.bfcvtn
aarch64_neon_bfcvtn2, // llvm.aarch64.neon.bfcvtn2
aarch64_neon_bfdot, // llvm.aarch64.neon.bfdot
aarch64_neon_bfmlalb, // llvm.aarch64.neon.bfmlalb
aarch64_neon_bfmlalt, // llvm.aarch64.neon.bfmlalt
aarch64_neon_bfmmla, // llvm.aarch64.neon.bfmmla
aarch64_neon_cls, // llvm.aarch64.neon.cls
aarch64_neon_fabd, // llvm.aarch64.neon.fabd
aarch64_neon_facge, // llvm.aarch64.neon.facge
aarch64_neon_facgt, // llvm.aarch64.neon.facgt
aarch64_neon_faddp, // llvm.aarch64.neon.faddp
aarch64_neon_faddv, // llvm.aarch64.neon.faddv
aarch64_neon_fcvtas, // llvm.aarch64.neon.fcvtas
aarch64_neon_fcvtau, // llvm.aarch64.neon.fcvtau
aarch64_neon_fcvtms, // llvm.aarch64.neon.fcvtms
aarch64_neon_fcvtmu, // llvm.aarch64.neon.fcvtmu
aarch64_neon_fcvtns, // llvm.aarch64.neon.fcvtns
aarch64_neon_fcvtnu, // llvm.aarch64.neon.fcvtnu
aarch64_neon_fcvtps, // llvm.aarch64.neon.fcvtps
aarch64_neon_fcvtpu, // llvm.aarch64.neon.fcvtpu
aarch64_neon_fcvtxn, // llvm.aarch64.neon.fcvtxn
aarch64_neon_fcvtzs, // llvm.aarch64.neon.fcvtzs
aarch64_neon_fcvtzu, // llvm.aarch64.neon.fcvtzu
aarch64_neon_fmax, // llvm.aarch64.neon.fmax
aarch64_neon_fmaxnm, // llvm.aarch64.neon.fmaxnm
aarch64_neon_fmaxnmp, // llvm.aarch64.neon.fmaxnmp
aarch64_neon_fmaxnmv, // llvm.aarch64.neon.fmaxnmv
aarch64_neon_fmaxp, // llvm.aarch64.neon.fmaxp
aarch64_neon_fmaxv, // llvm.aarch64.neon.fmaxv
aarch64_neon_fmin, // llvm.aarch64.neon.fmin
aarch64_neon_fminnm, // llvm.aarch64.neon.fminnm
aarch64_neon_fminnmp, // llvm.aarch64.neon.fminnmp
aarch64_neon_fminnmv, // llvm.aarch64.neon.fminnmv
aarch64_neon_fminp, // llvm.aarch64.neon.fminp
aarch64_neon_fminv, // llvm.aarch64.neon.fminv
aarch64_neon_fmlal, // llvm.aarch64.neon.fmlal
aarch64_neon_fmlal2, // llvm.aarch64.neon.fmlal2
aarch64_neon_fmlsl, // llvm.aarch64.neon.fmlsl
aarch64_neon_fmlsl2, // llvm.aarch64.neon.fmlsl2
aarch64_neon_fmulx, // llvm.aarch64.neon.fmulx
aarch64_neon_frecpe, // llvm.aarch64.neon.frecpe
aarch64_neon_frecps, // llvm.aarch64.neon.frecps
aarch64_neon_frecpx, // llvm.aarch64.neon.frecpx
aarch64_neon_frint32x, // llvm.aarch64.neon.frint32x
aarch64_neon_frint32z, // llvm.aarch64.neon.frint32z
aarch64_neon_frint64x, // llvm.aarch64.neon.frint64x
aarch64_neon_frint64z, // llvm.aarch64.neon.frint64z
aarch64_neon_frsqrte, // llvm.aarch64.neon.frsqrte
aarch64_neon_frsqrts, // llvm.aarch64.neon.frsqrts
aarch64_neon_ld1x2, // llvm.aarch64.neon.ld1x2
aarch64_neon_ld1x3, // llvm.aarch64.neon.ld1x3
aarch64_neon_ld1x4, // llvm.aarch64.neon.ld1x4
aarch64_neon_ld2, // llvm.aarch64.neon.ld2
aarch64_neon_ld2lane, // llvm.aarch64.neon.ld2lane
aarch64_neon_ld2r, // llvm.aarch64.neon.ld2r
aarch64_neon_ld3, // llvm.aarch64.neon.ld3
aarch64_neon_ld3lane, // llvm.aarch64.neon.ld3lane
aarch64_neon_ld3r, // llvm.aarch64.neon.ld3r
aarch64_neon_ld4, // llvm.aarch64.neon.ld4
aarch64_neon_ld4lane, // llvm.aarch64.neon.ld4lane
aarch64_neon_ld4r, // llvm.aarch64.neon.ld4r
aarch64_neon_pmul, // llvm.aarch64.neon.pmul
aarch64_neon_pmull, // llvm.aarch64.neon.pmull
aarch64_neon_pmull64, // llvm.aarch64.neon.pmull64
aarch64_neon_raddhn, // llvm.aarch64.neon.raddhn
aarch64_neon_rshrn, // llvm.aarch64.neon.rshrn
aarch64_neon_rsubhn, // llvm.aarch64.neon.rsubhn
aarch64_neon_sabd, // llvm.aarch64.neon.sabd
aarch64_neon_saddlp, // llvm.aarch64.neon.saddlp
aarch64_neon_saddlv, // llvm.aarch64.neon.saddlv
aarch64_neon_saddv, // llvm.aarch64.neon.saddv
aarch64_neon_scalar_sqxtn, // llvm.aarch64.neon.scalar.sqxtn
aarch64_neon_scalar_sqxtun, // llvm.aarch64.neon.scalar.sqxtun
aarch64_neon_scalar_uqxtn, // llvm.aarch64.neon.scalar.uqxtn
aarch64_neon_sdot, // llvm.aarch64.neon.sdot
aarch64_neon_shadd, // llvm.aarch64.neon.shadd
aarch64_neon_shll, // llvm.aarch64.neon.shll
aarch64_neon_shsub, // llvm.aarch64.neon.shsub
aarch64_neon_smax, // llvm.aarch64.neon.smax
aarch64_neon_smaxp, // llvm.aarch64.neon.smaxp
aarch64_neon_smaxv, // llvm.aarch64.neon.smaxv
aarch64_neon_smin, // llvm.aarch64.neon.smin
aarch64_neon_sminp, // llvm.aarch64.neon.sminp
aarch64_neon_sminv, // llvm.aarch64.neon.sminv
aarch64_neon_smmla, // llvm.aarch64.neon.smmla
aarch64_neon_smull, // llvm.aarch64.neon.smull
aarch64_neon_sqabs, // llvm.aarch64.neon.sqabs
aarch64_neon_sqadd, // llvm.aarch64.neon.sqadd
aarch64_neon_sqdmulh, // llvm.aarch64.neon.sqdmulh
aarch64_neon_sqdmulh_lane, // llvm.aarch64.neon.sqdmulh.lane
aarch64_neon_sqdmulh_laneq, // llvm.aarch64.neon.sqdmulh.laneq
aarch64_neon_sqdmull, // llvm.aarch64.neon.sqdmull
aarch64_neon_sqdmulls_scalar, // llvm.aarch64.neon.sqdmulls.scalar
aarch64_neon_sqneg, // llvm.aarch64.neon.sqneg
aarch64_neon_sqrdmlah, // llvm.aarch64.neon.sqrdmlah
aarch64_neon_sqrdmlsh, // llvm.aarch64.neon.sqrdmlsh
aarch64_neon_sqrdmulh, // llvm.aarch64.neon.sqrdmulh
aarch64_neon_sqrdmulh_lane, // llvm.aarch64.neon.sqrdmulh.lane
aarch64_neon_sqrdmulh_laneq, // llvm.aarch64.neon.sqrdmulh.laneq
aarch64_neon_sqrshl, // llvm.aarch64.neon.sqrshl
aarch64_neon_sqrshrn, // llvm.aarch64.neon.sqrshrn
aarch64_neon_sqrshrun, // llvm.aarch64.neon.sqrshrun
aarch64_neon_sqshl, // llvm.aarch64.neon.sqshl
aarch64_neon_sqshlu, // llvm.aarch64.neon.sqshlu
aarch64_neon_sqshrn, // llvm.aarch64.neon.sqshrn
aarch64_neon_sqshrun, // llvm.aarch64.neon.sqshrun
aarch64_neon_sqsub, // llvm.aarch64.neon.sqsub
aarch64_neon_sqxtn, // llvm.aarch64.neon.sqxtn
aarch64_neon_sqxtun, // llvm.aarch64.neon.sqxtun
aarch64_neon_srhadd, // llvm.aarch64.neon.srhadd
aarch64_neon_srshl, // llvm.aarch64.neon.srshl
aarch64_neon_sshl, // llvm.aarch64.neon.sshl
aarch64_neon_sshll, // llvm.aarch64.neon.sshll
aarch64_neon_st1x2, // llvm.aarch64.neon.st1x2
aarch64_neon_st1x3, // llvm.aarch64.neon.st1x3
aarch64_neon_st1x4, // llvm.aarch64.neon.st1x4
aarch64_neon_st2, // llvm.aarch64.neon.st2
aarch64_neon_st2lane, // llvm.aarch64.neon.st2lane
aarch64_neon_st3, // llvm.aarch64.neon.st3
aarch64_neon_st3lane, // llvm.aarch64.neon.st3lane
aarch64_neon_st4, // llvm.aarch64.neon.st4
aarch64_neon_st4lane, // llvm.aarch64.neon.st4lane
aarch64_neon_subhn, // llvm.aarch64.neon.subhn
aarch64_neon_suqadd, // llvm.aarch64.neon.suqadd
aarch64_neon_tbl1, // llvm.aarch64.neon.tbl1
aarch64_neon_tbl2, // llvm.aarch64.neon.tbl2
aarch64_neon_tbl3, // llvm.aarch64.neon.tbl3
aarch64_neon_tbl4, // llvm.aarch64.neon.tbl4
aarch64_neon_tbx1, // llvm.aarch64.neon.tbx1
aarch64_neon_tbx2, // llvm.aarch64.neon.tbx2
aarch64_neon_tbx3, // llvm.aarch64.neon.tbx3
aarch64_neon_tbx4, // llvm.aarch64.neon.tbx4
aarch64_neon_uabd, // llvm.aarch64.neon.uabd
aarch64_neon_uaddlp, // llvm.aarch64.neon.uaddlp
aarch64_neon_uaddlv, // llvm.aarch64.neon.uaddlv
aarch64_neon_uaddv, // llvm.aarch64.neon.uaddv
aarch64_neon_udot, // llvm.aarch64.neon.udot
aarch64_neon_uhadd, // llvm.aarch64.neon.uhadd
aarch64_neon_uhsub, // llvm.aarch64.neon.uhsub
aarch64_neon_umax, // llvm.aarch64.neon.umax
aarch64_neon_umaxp, // llvm.aarch64.neon.umaxp
aarch64_neon_umaxv, // llvm.aarch64.neon.umaxv
aarch64_neon_umin, // llvm.aarch64.neon.umin
aarch64_neon_uminp, // llvm.aarch64.neon.uminp
aarch64_neon_uminv, // llvm.aarch64.neon.uminv
aarch64_neon_ummla, // llvm.aarch64.neon.ummla
aarch64_neon_umull, // llvm.aarch64.neon.umull
aarch64_neon_uqadd, // llvm.aarch64.neon.uqadd
aarch64_neon_uqrshl, // llvm.aarch64.neon.uqrshl
aarch64_neon_uqrshrn, // llvm.aarch64.neon.uqrshrn
aarch64_neon_uqshl, // llvm.aarch64.neon.uqshl
aarch64_neon_uqshrn, // llvm.aarch64.neon.uqshrn
aarch64_neon_uqsub, // llvm.aarch64.neon.uqsub
aarch64_neon_uqxtn, // llvm.aarch64.neon.uqxtn
aarch64_neon_urecpe, // llvm.aarch64.neon.urecpe
aarch64_neon_urhadd, // llvm.aarch64.neon.urhadd
aarch64_neon_urshl, // llvm.aarch64.neon.urshl
aarch64_neon_ursqrte, // llvm.aarch64.neon.ursqrte
aarch64_neon_usdot, // llvm.aarch64.neon.usdot
aarch64_neon_ushl, // llvm.aarch64.neon.ushl
aarch64_neon_ushll, // llvm.aarch64.neon.ushll
aarch64_neon_usmmla, // llvm.aarch64.neon.usmmla
aarch64_neon_usqadd, // llvm.aarch64.neon.usqadd
aarch64_neon_vcadd_rot270, // llvm.aarch64.neon.vcadd.rot270
aarch64_neon_vcadd_rot90, // llvm.aarch64.neon.vcadd.rot90
aarch64_neon_vcmla_rot0, // llvm.aarch64.neon.vcmla.rot0
aarch64_neon_vcmla_rot180, // llvm.aarch64.neon.vcmla.rot180
aarch64_neon_vcmla_rot270, // llvm.aarch64.neon.vcmla.rot270
aarch64_neon_vcmla_rot90, // llvm.aarch64.neon.vcmla.rot90
aarch64_neon_vcopy_lane, // llvm.aarch64.neon.vcopy.lane
aarch64_neon_vcvtfp2fxs, // llvm.aarch64.neon.vcvtfp2fxs
aarch64_neon_vcvtfp2fxu, // llvm.aarch64.neon.vcvtfp2fxu
aarch64_neon_vcvtfp2hf, // llvm.aarch64.neon.vcvtfp2hf
aarch64_neon_vcvtfxs2fp, // llvm.aarch64.neon.vcvtfxs2fp
aarch64_neon_vcvtfxu2fp, // llvm.aarch64.neon.vcvtfxu2fp
aarch64_neon_vcvthf2fp, // llvm.aarch64.neon.vcvthf2fp
aarch64_neon_vsli, // llvm.aarch64.neon.vsli
aarch64_neon_vsri, // llvm.aarch64.neon.vsri
aarch64_rndr, // llvm.aarch64.rndr
aarch64_rndrrs, // llvm.aarch64.rndrrs
aarch64_sdiv, // llvm.aarch64.sdiv
aarch64_set_fpcr, // llvm.aarch64.set.fpcr
aarch64_settag, // llvm.aarch64.settag
aarch64_settag_zero, // llvm.aarch64.settag.zero
aarch64_sisd_fabd, // llvm.aarch64.sisd.fabd
aarch64_sisd_fcvtxn, // llvm.aarch64.sisd.fcvtxn
aarch64_space, // llvm.aarch64.space
aarch64_st64b, // llvm.aarch64.st64b
aarch64_st64bv, // llvm.aarch64.st64bv
aarch64_st64bv0, // llvm.aarch64.st64bv0
aarch64_stg, // llvm.aarch64.stg
aarch64_stgp, // llvm.aarch64.stgp
aarch64_stlxp, // llvm.aarch64.stlxp
aarch64_stlxr, // llvm.aarch64.stlxr
aarch64_stxp, // llvm.aarch64.stxp
aarch64_stxr, // llvm.aarch64.stxr
aarch64_subp, // llvm.aarch64.subp
aarch64_sve_abs, // llvm.aarch64.sve.abs
aarch64_sve_adclb, // llvm.aarch64.sve.adclb
aarch64_sve_adclt, // llvm.aarch64.sve.adclt
aarch64_sve_add, // llvm.aarch64.sve.add
aarch64_sve_addhnb, // llvm.aarch64.sve.addhnb
aarch64_sve_addhnt, // llvm.aarch64.sve.addhnt
aarch64_sve_addp, // llvm.aarch64.sve.addp
aarch64_sve_adrb, // llvm.aarch64.sve.adrb
aarch64_sve_adrd, // llvm.aarch64.sve.adrd
aarch64_sve_adrh, // llvm.aarch64.sve.adrh
aarch64_sve_adrw, // llvm.aarch64.sve.adrw
aarch64_sve_aesd, // llvm.aarch64.sve.aesd
aarch64_sve_aese, // llvm.aarch64.sve.aese
aarch64_sve_aesimc, // llvm.aarch64.sve.aesimc
aarch64_sve_aesmc, // llvm.aarch64.sve.aesmc
aarch64_sve_and, // llvm.aarch64.sve.and
aarch64_sve_and_z, // llvm.aarch64.sve.and.z
aarch64_sve_andv, // llvm.aarch64.sve.andv
aarch64_sve_asr, // llvm.aarch64.sve.asr
aarch64_sve_asr_wide, // llvm.aarch64.sve.asr.wide
aarch64_sve_asrd, // llvm.aarch64.sve.asrd
aarch64_sve_bcax, // llvm.aarch64.sve.bcax
aarch64_sve_bdep_x, // llvm.aarch64.sve.bdep.x
aarch64_sve_bext_x, // llvm.aarch64.sve.bext.x
aarch64_sve_bfdot, // llvm.aarch64.sve.bfdot
aarch64_sve_bfdot_lane, // llvm.aarch64.sve.bfdot.lane
aarch64_sve_bfmlalb, // llvm.aarch64.sve.bfmlalb
aarch64_sve_bfmlalb_lane, // llvm.aarch64.sve.bfmlalb.lane
aarch64_sve_bfmlalt, // llvm.aarch64.sve.bfmlalt
aarch64_sve_bfmlalt_lane, // llvm.aarch64.sve.bfmlalt.lane
aarch64_sve_bfmmla, // llvm.aarch64.sve.bfmmla
aarch64_sve_bgrp_x, // llvm.aarch64.sve.bgrp.x
aarch64_sve_bic, // llvm.aarch64.sve.bic
aarch64_sve_bic_z, // llvm.aarch64.sve.bic.z
aarch64_sve_brka, // llvm.aarch64.sve.brka
aarch64_sve_brka_z, // llvm.aarch64.sve.brka.z
aarch64_sve_brkb, // llvm.aarch64.sve.brkb
aarch64_sve_brkb_z, // llvm.aarch64.sve.brkb.z
aarch64_sve_brkn_z, // llvm.aarch64.sve.brkn.z
aarch64_sve_brkpa_z, // llvm.aarch64.sve.brkpa.z
aarch64_sve_brkpb_z, // llvm.aarch64.sve.brkpb.z
aarch64_sve_bsl, // llvm.aarch64.sve.bsl
aarch64_sve_bsl1n, // llvm.aarch64.sve.bsl1n
aarch64_sve_bsl2n, // llvm.aarch64.sve.bsl2n
aarch64_sve_cadd_x, // llvm.aarch64.sve.cadd.x
aarch64_sve_cdot, // llvm.aarch64.sve.cdot
aarch64_sve_cdot_lane, // llvm.aarch64.sve.cdot.lane
aarch64_sve_clasta, // llvm.aarch64.sve.clasta
aarch64_sve_clasta_n, // llvm.aarch64.sve.clasta.n
aarch64_sve_clastb, // llvm.aarch64.sve.clastb
aarch64_sve_clastb_n, // llvm.aarch64.sve.clastb.n
aarch64_sve_cls, // llvm.aarch64.sve.cls
aarch64_sve_clz, // llvm.aarch64.sve.clz
aarch64_sve_cmla_lane_x, // llvm.aarch64.sve.cmla.lane.x
aarch64_sve_cmla_x, // llvm.aarch64.sve.cmla.x
aarch64_sve_cmpeq, // llvm.aarch64.sve.cmpeq
aarch64_sve_cmpeq_wide, // llvm.aarch64.sve.cmpeq.wide
aarch64_sve_cmpge, // llvm.aarch64.sve.cmpge
aarch64_sve_cmpge_wide, // llvm.aarch64.sve.cmpge.wide
aarch64_sve_cmpgt, // llvm.aarch64.sve.cmpgt
aarch64_sve_cmpgt_wide, // llvm.aarch64.sve.cmpgt.wide
aarch64_sve_cmphi, // llvm.aarch64.sve.cmphi
aarch64_sve_cmphi_wide, // llvm.aarch64.sve.cmphi.wide
aarch64_sve_cmphs, // llvm.aarch64.sve.cmphs
aarch64_sve_cmphs_wide, // llvm.aarch64.sve.cmphs.wide
aarch64_sve_cmple_wide, // llvm.aarch64.sve.cmple.wide
aarch64_sve_cmplo_wide, // llvm.aarch64.sve.cmplo.wide
aarch64_sve_cmpls_wide, // llvm.aarch64.sve.cmpls.wide
aarch64_sve_cmplt_wide, // llvm.aarch64.sve.cmplt.wide
aarch64_sve_cmpne, // llvm.aarch64.sve.cmpne
aarch64_sve_cmpne_wide, // llvm.aarch64.sve.cmpne.wide
aarch64_sve_cnot, // llvm.aarch64.sve.cnot
aarch64_sve_cnt, // llvm.aarch64.sve.cnt
aarch64_sve_cntb, // llvm.aarch64.sve.cntb
aarch64_sve_cntd, // llvm.aarch64.sve.cntd
aarch64_sve_cnth, // llvm.aarch64.sve.cnth
aarch64_sve_cntp, // llvm.aarch64.sve.cntp
aarch64_sve_cntw, // llvm.aarch64.sve.cntw
aarch64_sve_compact, // llvm.aarch64.sve.compact
aarch64_sve_convert_from_svbool, // llvm.aarch64.sve.convert.from.svbool
aarch64_sve_convert_to_svbool, // llvm.aarch64.sve.convert.to.svbool
aarch64_sve_dup, // llvm.aarch64.sve.dup
aarch64_sve_dup_x, // llvm.aarch64.sve.dup.x
aarch64_sve_dupq_lane, // llvm.aarch64.sve.dupq.lane
aarch64_sve_eor, // llvm.aarch64.sve.eor
aarch64_sve_eor_z, // llvm.aarch64.sve.eor.z
aarch64_sve_eor3, // llvm.aarch64.sve.eor3
aarch64_sve_eorbt, // llvm.aarch64.sve.eorbt
aarch64_sve_eortb, // llvm.aarch64.sve.eortb
aarch64_sve_eorv, // llvm.aarch64.sve.eorv
aarch64_sve_ext, // llvm.aarch64.sve.ext
aarch64_sve_fabd, // llvm.aarch64.sve.fabd
aarch64_sve_fabs, // llvm.aarch64.sve.fabs
aarch64_sve_facge, // llvm.aarch64.sve.facge
aarch64_sve_facgt, // llvm.aarch64.sve.facgt
aarch64_sve_fadd, // llvm.aarch64.sve.fadd
aarch64_sve_fadda, // llvm.aarch64.sve.fadda
aarch64_sve_faddp, // llvm.aarch64.sve.faddp
aarch64_sve_faddv, // llvm.aarch64.sve.faddv
aarch64_sve_fcadd, // llvm.aarch64.sve.fcadd
aarch64_sve_fcmla, // llvm.aarch64.sve.fcmla
aarch64_sve_fcmla_lane, // llvm.aarch64.sve.fcmla.lane
aarch64_sve_fcmpeq, // llvm.aarch64.sve.fcmpeq
aarch64_sve_fcmpge, // llvm.aarch64.sve.fcmpge
aarch64_sve_fcmpgt, // llvm.aarch64.sve.fcmpgt
aarch64_sve_fcmpne, // llvm.aarch64.sve.fcmpne
aarch64_sve_fcmpuo, // llvm.aarch64.sve.fcmpuo
aarch64_sve_fcvt, // llvm.aarch64.sve.fcvt
aarch64_sve_fcvt_bf16f32, // llvm.aarch64.sve.fcvt.bf16f32
aarch64_sve_fcvt_f16f32, // llvm.aarch64.sve.fcvt.f16f32
aarch64_sve_fcvt_f16f64, // llvm.aarch64.sve.fcvt.f16f64
aarch64_sve_fcvt_f32f16, // llvm.aarch64.sve.fcvt.f32f16
aarch64_sve_fcvt_f32f64, // llvm.aarch64.sve.fcvt.f32f64
aarch64_sve_fcvt_f64f16, // llvm.aarch64.sve.fcvt.f64f16
aarch64_sve_fcvt_f64f32, // llvm.aarch64.sve.fcvt.f64f32
aarch64_sve_fcvtlt_f32f16, // llvm.aarch64.sve.fcvtlt.f32f16
aarch64_sve_fcvtlt_f64f32, // llvm.aarch64.sve.fcvtlt.f64f32
aarch64_sve_fcvtnt_bf16f32, // llvm.aarch64.sve.fcvtnt.bf16f32
aarch64_sve_fcvtnt_f16f32, // llvm.aarch64.sve.fcvtnt.f16f32
aarch64_sve_fcvtnt_f32f64, // llvm.aarch64.sve.fcvtnt.f32f64
aarch64_sve_fcvtx_f32f64, // llvm.aarch64.sve.fcvtx.f32f64
aarch64_sve_fcvtxnt_f32f64, // llvm.aarch64.sve.fcvtxnt.f32f64
aarch64_sve_fcvtzs, // llvm.aarch64.sve.fcvtzs
aarch64_sve_fcvtzs_i32f16, // llvm.aarch64.sve.fcvtzs.i32f16
aarch64_sve_fcvtzs_i32f64, // llvm.aarch64.sve.fcvtzs.i32f64
aarch64_sve_fcvtzs_i64f16, // llvm.aarch64.sve.fcvtzs.i64f16
aarch64_sve_fcvtzs_i64f32, // llvm.aarch64.sve.fcvtzs.i64f32
aarch64_sve_fcvtzu, // llvm.aarch64.sve.fcvtzu
aarch64_sve_fcvtzu_i32f16, // llvm.aarch64.sve.fcvtzu.i32f16
aarch64_sve_fcvtzu_i32f64, // llvm.aarch64.sve.fcvtzu.i32f64
aarch64_sve_fcvtzu_i64f16, // llvm.aarch64.sve.fcvtzu.i64f16
aarch64_sve_fcvtzu_i64f32, // llvm.aarch64.sve.fcvtzu.i64f32
aarch64_sve_fdiv, // llvm.aarch64.sve.fdiv
aarch64_sve_fdivr, // llvm.aarch64.sve.fdivr
aarch64_sve_fexpa_x, // llvm.aarch64.sve.fexpa.x
aarch64_sve_flogb, // llvm.aarch64.sve.flogb
aarch64_sve_fmad, // llvm.aarch64.sve.fmad
aarch64_sve_fmax, // llvm.aarch64.sve.fmax
aarch64_sve_fmaxnm, // llvm.aarch64.sve.fmaxnm
aarch64_sve_fmaxnmp, // llvm.aarch64.sve.fmaxnmp
aarch64_sve_fmaxnmv, // llvm.aarch64.sve.fmaxnmv
aarch64_sve_fmaxp, // llvm.aarch64.sve.fmaxp
aarch64_sve_fmaxv, // llvm.aarch64.sve.fmaxv
aarch64_sve_fmin, // llvm.aarch64.sve.fmin
aarch64_sve_fminnm, // llvm.aarch64.sve.fminnm
aarch64_sve_fminnmp, // llvm.aarch64.sve.fminnmp
aarch64_sve_fminnmv, // llvm.aarch64.sve.fminnmv
aarch64_sve_fminp, // llvm.aarch64.sve.fminp
aarch64_sve_fminv, // llvm.aarch64.sve.fminv
aarch64_sve_fmla, // llvm.aarch64.sve.fmla
aarch64_sve_fmla_lane, // llvm.aarch64.sve.fmla.lane
aarch64_sve_fmlalb, // llvm.aarch64.sve.fmlalb
aarch64_sve_fmlalb_lane, // llvm.aarch64.sve.fmlalb.lane
aarch64_sve_fmlalt, // llvm.aarch64.sve.fmlalt
aarch64_sve_fmlalt_lane, // llvm.aarch64.sve.fmlalt.lane
aarch64_sve_fmls, // llvm.aarch64.sve.fmls
aarch64_sve_fmls_lane, // llvm.aarch64.sve.fmls.lane
aarch64_sve_fmlslb, // llvm.aarch64.sve.fmlslb
aarch64_sve_fmlslb_lane, // llvm.aarch64.sve.fmlslb.lane
aarch64_sve_fmlslt, // llvm.aarch64.sve.fmlslt
aarch64_sve_fmlslt_lane, // llvm.aarch64.sve.fmlslt.lane
aarch64_sve_fmmla, // llvm.aarch64.sve.fmmla
aarch64_sve_fmsb, // llvm.aarch64.sve.fmsb
aarch64_sve_fmul, // llvm.aarch64.sve.fmul
aarch64_sve_fmul_lane, // llvm.aarch64.sve.fmul.lane
aarch64_sve_fmulx, // llvm.aarch64.sve.fmulx
aarch64_sve_fneg, // llvm.aarch64.sve.fneg
aarch64_sve_fnmad, // llvm.aarch64.sve.fnmad
aarch64_sve_fnmla, // llvm.aarch64.sve.fnmla
aarch64_sve_fnmls, // llvm.aarch64.sve.fnmls
aarch64_sve_fnmsb, // llvm.aarch64.sve.fnmsb
aarch64_sve_frecpe_x, // llvm.aarch64.sve.frecpe.x
aarch64_sve_frecps_x, // llvm.aarch64.sve.frecps.x
aarch64_sve_frecpx, // llvm.aarch64.sve.frecpx
aarch64_sve_frinta, // llvm.aarch64.sve.frinta
aarch64_sve_frinti, // llvm.aarch64.sve.frinti
aarch64_sve_frintm, // llvm.aarch64.sve.frintm
aarch64_sve_frintn, // llvm.aarch64.sve.frintn
aarch64_sve_frintp, // llvm.aarch64.sve.frintp
aarch64_sve_frintx, // llvm.aarch64.sve.frintx
aarch64_sve_frintz, // llvm.aarch64.sve.frintz
aarch64_sve_frsqrte_x, // llvm.aarch64.sve.frsqrte.x
aarch64_sve_frsqrts_x, // llvm.aarch64.sve.frsqrts.x
aarch64_sve_fscale, // llvm.aarch64.sve.fscale
aarch64_sve_fsqrt, // llvm.aarch64.sve.fsqrt
aarch64_sve_fsub, // llvm.aarch64.sve.fsub
aarch64_sve_fsubr, // llvm.aarch64.sve.fsubr
aarch64_sve_ftmad_x, // llvm.aarch64.sve.ftmad.x
aarch64_sve_ftsmul_x, // llvm.aarch64.sve.ftsmul.x
aarch64_sve_ftssel_x, // llvm.aarch64.sve.ftssel.x
aarch64_sve_histcnt, // llvm.aarch64.sve.histcnt
aarch64_sve_histseg, // llvm.aarch64.sve.histseg
aarch64_sve_index, // llvm.aarch64.sve.index
aarch64_sve_insr, // llvm.aarch64.sve.insr
aarch64_sve_lasta, // llvm.aarch64.sve.lasta
aarch64_sve_lastb, // llvm.aarch64.sve.lastb
aarch64_sve_ld1, // llvm.aarch64.sve.ld1
aarch64_sve_ld1_gather, // llvm.aarch64.sve.ld1.gather
aarch64_sve_ld1_gather_index, // llvm.aarch64.sve.ld1.gather.index
aarch64_sve_ld1_gather_scalar_offset, // llvm.aarch64.sve.ld1.gather.scalar.offset
aarch64_sve_ld1_gather_sxtw, // llvm.aarch64.sve.ld1.gather.sxtw
aarch64_sve_ld1_gather_sxtw_index, // llvm.aarch64.sve.ld1.gather.sxtw.index
aarch64_sve_ld1_gather_uxtw, // llvm.aarch64.sve.ld1.gather.uxtw
aarch64_sve_ld1_gather_uxtw_index, // llvm.aarch64.sve.ld1.gather.uxtw.index
aarch64_sve_ld1ro, // llvm.aarch64.sve.ld1ro
aarch64_sve_ld1rq, // llvm.aarch64.sve.ld1rq
aarch64_sve_ld2, // llvm.aarch64.sve.ld2
aarch64_sve_ld2_sret, // llvm.aarch64.sve.ld2.sret
aarch64_sve_ld3, // llvm.aarch64.sve.ld3
aarch64_sve_ld3_sret, // llvm.aarch64.sve.ld3.sret
aarch64_sve_ld4, // llvm.aarch64.sve.ld4
aarch64_sve_ld4_sret, // llvm.aarch64.sve.ld4.sret
aarch64_sve_ldff1, // llvm.aarch64.sve.ldff1
aarch64_sve_ldff1_gather, // llvm.aarch64.sve.ldff1.gather
aarch64_sve_ldff1_gather_index, // llvm.aarch64.sve.ldff1.gather.index
aarch64_sve_ldff1_gather_scalar_offset, // llvm.aarch64.sve.ldff1.gather.scalar.offset
aarch64_sve_ldff1_gather_sxtw, // llvm.aarch64.sve.ldff1.gather.sxtw
aarch64_sve_ldff1_gather_sxtw_index, // llvm.aarch64.sve.ldff1.gather.sxtw.index
aarch64_sve_ldff1_gather_uxtw, // llvm.aarch64.sve.ldff1.gather.uxtw
aarch64_sve_ldff1_gather_uxtw_index, // llvm.aarch64.sve.ldff1.gather.uxtw.index
aarch64_sve_ldnf1, // llvm.aarch64.sve.ldnf1
aarch64_sve_ldnt1, // llvm.aarch64.sve.ldnt1
aarch64_sve_ldnt1_gather, // llvm.aarch64.sve.ldnt1.gather
aarch64_sve_ldnt1_gather_index, // llvm.aarch64.sve.ldnt1.gather.index
aarch64_sve_ldnt1_gather_scalar_offset, // llvm.aarch64.sve.ldnt1.gather.scalar.offset
aarch64_sve_ldnt1_gather_uxtw, // llvm.aarch64.sve.ldnt1.gather.uxtw
aarch64_sve_lsl, // llvm.aarch64.sve.lsl
aarch64_sve_lsl_wide, // llvm.aarch64.sve.lsl.wide
aarch64_sve_lsr, // llvm.aarch64.sve.lsr
aarch64_sve_lsr_wide, // llvm.aarch64.sve.lsr.wide
aarch64_sve_mad, // llvm.aarch64.sve.mad
aarch64_sve_match, // llvm.aarch64.sve.match
aarch64_sve_mla, // llvm.aarch64.sve.mla
aarch64_sve_mla_lane, // llvm.aarch64.sve.mla.lane
aarch64_sve_mls, // llvm.aarch64.sve.mls
aarch64_sve_mls_lane, // llvm.aarch64.sve.mls.lane
aarch64_sve_msb, // llvm.aarch64.sve.msb
aarch64_sve_mul, // llvm.aarch64.sve.mul
aarch64_sve_mul_lane, // llvm.aarch64.sve.mul.lane
aarch64_sve_nand_z, // llvm.aarch64.sve.nand.z
aarch64_sve_nbsl, // llvm.aarch64.sve.nbsl
aarch64_sve_neg, // llvm.aarch64.sve.neg
aarch64_sve_nmatch, // llvm.aarch64.sve.nmatch
aarch64_sve_nor_z, // llvm.aarch64.sve.nor.z
aarch64_sve_not, // llvm.aarch64.sve.not
aarch64_sve_orn_z, // llvm.aarch64.sve.orn.z
aarch64_sve_orr, // llvm.aarch64.sve.orr
aarch64_sve_orr_z, // llvm.aarch64.sve.orr.z
aarch64_sve_orv, // llvm.aarch64.sve.orv
aarch64_sve_pfirst, // llvm.aarch64.sve.pfirst
aarch64_sve_pmul, // llvm.aarch64.sve.pmul
aarch64_sve_pmullb_pair, // llvm.aarch64.sve.pmullb.pair
aarch64_sve_pmullt_pair, // llvm.aarch64.sve.pmullt.pair
aarch64_sve_pnext, // llvm.aarch64.sve.pnext
aarch64_sve_prf, // llvm.aarch64.sve.prf
aarch64_sve_prfb_gather_index, // llvm.aarch64.sve.prfb.gather.index
aarch64_sve_prfb_gather_scalar_offset, // llvm.aarch64.sve.prfb.gather.scalar.offset
aarch64_sve_prfb_gather_sxtw_index, // llvm.aarch64.sve.prfb.gather.sxtw.index
aarch64_sve_prfb_gather_uxtw_index, // llvm.aarch64.sve.prfb.gather.uxtw.index
aarch64_sve_prfd_gather_index, // llvm.aarch64.sve.prfd.gather.index
aarch64_sve_prfd_gather_scalar_offset, // llvm.aarch64.sve.prfd.gather.scalar.offset
aarch64_sve_prfd_gather_sxtw_index, // llvm.aarch64.sve.prfd.gather.sxtw.index
aarch64_sve_prfd_gather_uxtw_index, // llvm.aarch64.sve.prfd.gather.uxtw.index
aarch64_sve_prfh_gather_index, // llvm.aarch64.sve.prfh.gather.index
aarch64_sve_prfh_gather_scalar_offset, // llvm.aarch64.sve.prfh.gather.scalar.offset
aarch64_sve_prfh_gather_sxtw_index, // llvm.aarch64.sve.prfh.gather.sxtw.index
aarch64_sve_prfh_gather_uxtw_index, // llvm.aarch64.sve.prfh.gather.uxtw.index
aarch64_sve_prfw_gather_index, // llvm.aarch64.sve.prfw.gather.index
aarch64_sve_prfw_gather_scalar_offset, // llvm.aarch64.sve.prfw.gather.scalar.offset
aarch64_sve_prfw_gather_sxtw_index, // llvm.aarch64.sve.prfw.gather.sxtw.index
aarch64_sve_prfw_gather_uxtw_index, // llvm.aarch64.sve.prfw.gather.uxtw.index
aarch64_sve_ptest_any, // llvm.aarch64.sve.ptest.any
aarch64_sve_ptest_first, // llvm.aarch64.sve.ptest.first
aarch64_sve_ptest_last, // llvm.aarch64.sve.ptest.last
aarch64_sve_ptrue, // llvm.aarch64.sve.ptrue
aarch64_sve_punpkhi, // llvm.aarch64.sve.punpkhi
aarch64_sve_punpklo, // llvm.aarch64.sve.punpklo
aarch64_sve_raddhnb, // llvm.aarch64.sve.raddhnb
aarch64_sve_raddhnt, // llvm.aarch64.sve.raddhnt
aarch64_sve_rax1, // llvm.aarch64.sve.rax1
aarch64_sve_rbit, // llvm.aarch64.sve.rbit
aarch64_sve_rdffr, // llvm.aarch64.sve.rdffr
aarch64_sve_rdffr_z, // llvm.aarch64.sve.rdffr.z
aarch64_sve_rev, // llvm.aarch64.sve.rev
aarch64_sve_revb, // llvm.aarch64.sve.revb
aarch64_sve_revh, // llvm.aarch64.sve.revh
aarch64_sve_revw, // llvm.aarch64.sve.revw
aarch64_sve_rshrnb, // llvm.aarch64.sve.rshrnb
aarch64_sve_rshrnt, // llvm.aarch64.sve.rshrnt
aarch64_sve_rsubhnb, // llvm.aarch64.sve.rsubhnb
aarch64_sve_rsubhnt, // llvm.aarch64.sve.rsubhnt
aarch64_sve_saba, // llvm.aarch64.sve.saba
aarch64_sve_sabalb, // llvm.aarch64.sve.sabalb
aarch64_sve_sabalt, // llvm.aarch64.sve.sabalt
aarch64_sve_sabd, // llvm.aarch64.sve.sabd
aarch64_sve_sabdlb, // llvm.aarch64.sve.sabdlb
aarch64_sve_sabdlt, // llvm.aarch64.sve.sabdlt
aarch64_sve_sadalp, // llvm.aarch64.sve.sadalp
aarch64_sve_saddlb, // llvm.aarch64.sve.saddlb
aarch64_sve_saddlbt, // llvm.aarch64.sve.saddlbt
aarch64_sve_saddlt, // llvm.aarch64.sve.saddlt
aarch64_sve_saddv, // llvm.aarch64.sve.saddv
aarch64_sve_saddwb, // llvm.aarch64.sve.saddwb
aarch64_sve_saddwt, // llvm.aarch64.sve.saddwt
aarch64_sve_sbclb, // llvm.aarch64.sve.sbclb
aarch64_sve_sbclt, // llvm.aarch64.sve.sbclt
aarch64_sve_scvtf, // llvm.aarch64.sve.scvtf
aarch64_sve_scvtf_f16i32, // llvm.aarch64.sve.scvtf.f16i32
aarch64_sve_scvtf_f16i64, // llvm.aarch64.sve.scvtf.f16i64
aarch64_sve_scvtf_f32i64, // llvm.aarch64.sve.scvtf.f32i64
aarch64_sve_scvtf_f64i32, // llvm.aarch64.sve.scvtf.f64i32
aarch64_sve_sdiv, // llvm.aarch64.sve.sdiv
aarch64_sve_sdivr, // llvm.aarch64.sve.sdivr
aarch64_sve_sdot, // llvm.aarch64.sve.sdot
aarch64_sve_sdot_lane, // llvm.aarch64.sve.sdot.lane
aarch64_sve_sel, // llvm.aarch64.sve.sel
aarch64_sve_setffr, // llvm.aarch64.sve.setffr
aarch64_sve_shadd, // llvm.aarch64.sve.shadd
aarch64_sve_shrnb, // llvm.aarch64.sve.shrnb
aarch64_sve_shrnt, // llvm.aarch64.sve.shrnt
aarch64_sve_shsub, // llvm.aarch64.sve.shsub
aarch64_sve_shsubr, // llvm.aarch64.sve.shsubr
aarch64_sve_sli, // llvm.aarch64.sve.sli
aarch64_sve_sm4e, // llvm.aarch64.sve.sm4e
aarch64_sve_sm4ekey, // llvm.aarch64.sve.sm4ekey
aarch64_sve_smax, // llvm.aarch64.sve.smax
aarch64_sve_smaxp, // llvm.aarch64.sve.smaxp
aarch64_sve_smaxv, // llvm.aarch64.sve.smaxv
aarch64_sve_smin, // llvm.aarch64.sve.smin
aarch64_sve_sminp, // llvm.aarch64.sve.sminp
aarch64_sve_sminv, // llvm.aarch64.sve.sminv
aarch64_sve_smlalb, // llvm.aarch64.sve.smlalb
aarch64_sve_smlalb_lane, // llvm.aarch64.sve.smlalb.lane
aarch64_sve_smlalt, // llvm.aarch64.sve.smlalt
aarch64_sve_smlalt_lane, // llvm.aarch64.sve.smlalt.lane
aarch64_sve_smlslb, // llvm.aarch64.sve.smlslb
aarch64_sve_smlslb_lane, // llvm.aarch64.sve.smlslb.lane
aarch64_sve_smlslt, // llvm.aarch64.sve.smlslt
aarch64_sve_smlslt_lane, // llvm.aarch64.sve.smlslt.lane
aarch64_sve_smmla, // llvm.aarch64.sve.smmla
aarch64_sve_smulh, // llvm.aarch64.sve.smulh
aarch64_sve_smullb, // llvm.aarch64.sve.smullb
aarch64_sve_smullb_lane, // llvm.aarch64.sve.smullb.lane
aarch64_sve_smullt, // llvm.aarch64.sve.smullt
aarch64_sve_smullt_lane, // llvm.aarch64.sve.smullt.lane
aarch64_sve_splice, // llvm.aarch64.sve.splice
aarch64_sve_sqabs, // llvm.aarch64.sve.sqabs
aarch64_sve_sqadd, // llvm.aarch64.sve.sqadd
aarch64_sve_sqadd_x, // llvm.aarch64.sve.sqadd.x
aarch64_sve_sqcadd_x, // llvm.aarch64.sve.sqcadd.x
aarch64_sve_sqdecb_n32, // llvm.aarch64.sve.sqdecb.n32
aarch64_sve_sqdecb_n64, // llvm.aarch64.sve.sqdecb.n64
aarch64_sve_sqdecd, // llvm.aarch64.sve.sqdecd
aarch64_sve_sqdecd_n32, // llvm.aarch64.sve.sqdecd.n32
aarch64_sve_sqdecd_n64, // llvm.aarch64.sve.sqdecd.n64
aarch64_sve_sqdech, // llvm.aarch64.sve.sqdech
aarch64_sve_sqdech_n32, // llvm.aarch64.sve.sqdech.n32
aarch64_sve_sqdech_n64, // llvm.aarch64.sve.sqdech.n64
aarch64_sve_sqdecp, // llvm.aarch64.sve.sqdecp
aarch64_sve_sqdecp_n32, // llvm.aarch64.sve.sqdecp.n32
aarch64_sve_sqdecp_n64, // llvm.aarch64.sve.sqdecp.n64
aarch64_sve_sqdecw, // llvm.aarch64.sve.sqdecw
aarch64_sve_sqdecw_n32, // llvm.aarch64.sve.sqdecw.n32
aarch64_sve_sqdecw_n64, // llvm.aarch64.sve.sqdecw.n64
aarch64_sve_sqdmlalb, // llvm.aarch64.sve.sqdmlalb
aarch64_sve_sqdmlalb_lane, // llvm.aarch64.sve.sqdmlalb.lane
aarch64_sve_sqdmlalbt, // llvm.aarch64.sve.sqdmlalbt
aarch64_sve_sqdmlalt, // llvm.aarch64.sve.sqdmlalt
aarch64_sve_sqdmlalt_lane, // llvm.aarch64.sve.sqdmlalt.lane
aarch64_sve_sqdmlslb, // llvm.aarch64.sve.sqdmlslb
aarch64_sve_sqdmlslb_lane, // llvm.aarch64.sve.sqdmlslb.lane
aarch64_sve_sqdmlslbt, // llvm.aarch64.sve.sqdmlslbt
aarch64_sve_sqdmlslt, // llvm.aarch64.sve.sqdmlslt
aarch64_sve_sqdmlslt_lane, // llvm.aarch64.sve.sqdmlslt.lane
aarch64_sve_sqdmulh, // llvm.aarch64.sve.sqdmulh
aarch64_sve_sqdmulh_lane, // llvm.aarch64.sve.sqdmulh.lane
aarch64_sve_sqdmullb, // llvm.aarch64.sve.sqdmullb
aarch64_sve_sqdmullb_lane, // llvm.aarch64.sve.sqdmullb.lane
aarch64_sve_sqdmullt, // llvm.aarch64.sve.sqdmullt
aarch64_sve_sqdmullt_lane, // llvm.aarch64.sve.sqdmullt.lane
aarch64_sve_sqincb_n32, // llvm.aarch64.sve.sqincb.n32
aarch64_sve_sqincb_n64, // llvm.aarch64.sve.sqincb.n64
aarch64_sve_sqincd, // llvm.aarch64.sve.sqincd
aarch64_sve_sqincd_n32, // llvm.aarch64.sve.sqincd.n32
aarch64_sve_sqincd_n64, // llvm.aarch64.sve.sqincd.n64
aarch64_sve_sqinch, // llvm.aarch64.sve.sqinch
aarch64_sve_sqinch_n32, // llvm.aarch64.sve.sqinch.n32
aarch64_sve_sqinch_n64, // llvm.aarch64.sve.sqinch.n64
aarch64_sve_sqincp, // llvm.aarch64.sve.sqincp
aarch64_sve_sqincp_n32, // llvm.aarch64.sve.sqincp.n32
aarch64_sve_sqincp_n64, // llvm.aarch64.sve.sqincp.n64
aarch64_sve_sqincw, // llvm.aarch64.sve.sqincw
aarch64_sve_sqincw_n32, // llvm.aarch64.sve.sqincw.n32
aarch64_sve_sqincw_n64, // llvm.aarch64.sve.sqincw.n64
aarch64_sve_sqneg, // llvm.aarch64.sve.sqneg
aarch64_sve_sqrdcmlah_lane_x, // llvm.aarch64.sve.sqrdcmlah.lane.x
aarch64_sve_sqrdcmlah_x, // llvm.aarch64.sve.sqrdcmlah.x
aarch64_sve_sqrdmlah, // llvm.aarch64.sve.sqrdmlah
aarch64_sve_sqrdmlah_lane, // llvm.aarch64.sve.sqrdmlah.lane
aarch64_sve_sqrdmlsh, // llvm.aarch64.sve.sqrdmlsh
aarch64_sve_sqrdmlsh_lane, // llvm.aarch64.sve.sqrdmlsh.lane
aarch64_sve_sqrdmulh, // llvm.aarch64.sve.sqrdmulh
aarch64_sve_sqrdmulh_lane, // llvm.aarch64.sve.sqrdmulh.lane
aarch64_sve_sqrshl, // llvm.aarch64.sve.sqrshl
aarch64_sve_sqrshrnb, // llvm.aarch64.sve.sqrshrnb
aarch64_sve_sqrshrnt, // llvm.aarch64.sve.sqrshrnt
aarch64_sve_sqrshrunb, // llvm.aarch64.sve.sqrshrunb
aarch64_sve_sqrshrunt, // llvm.aarch64.sve.sqrshrunt
aarch64_sve_sqshl, // llvm.aarch64.sve.sqshl
aarch64_sve_sqshlu, // llvm.aarch64.sve.sqshlu
aarch64_sve_sqshrnb, // llvm.aarch64.sve.sqshrnb
aarch64_sve_sqshrnt, // llvm.aarch64.sve.sqshrnt
aarch64_sve_sqshrunb, // llvm.aarch64.sve.sqshrunb
aarch64_sve_sqshrunt, // llvm.aarch64.sve.sqshrunt
aarch64_sve_sqsub, // llvm.aarch64.sve.sqsub
aarch64_sve_sqsub_x, // llvm.aarch64.sve.sqsub.x
aarch64_sve_sqsubr, // llvm.aarch64.sve.sqsubr
aarch64_sve_sqxtnb, // llvm.aarch64.sve.sqxtnb
aarch64_sve_sqxtnt, // llvm.aarch64.sve.sqxtnt
aarch64_sve_sqxtunb, // llvm.aarch64.sve.sqxtunb
aarch64_sve_sqxtunt, // llvm.aarch64.sve.sqxtunt
aarch64_sve_srhadd, // llvm.aarch64.sve.srhadd
aarch64_sve_sri, // llvm.aarch64.sve.sri
aarch64_sve_srshl, // llvm.aarch64.sve.srshl
aarch64_sve_srshr, // llvm.aarch64.sve.srshr
aarch64_sve_srsra, // llvm.aarch64.sve.srsra
aarch64_sve_sshllb, // llvm.aarch64.sve.sshllb
aarch64_sve_sshllt, // llvm.aarch64.sve.sshllt
aarch64_sve_ssra, // llvm.aarch64.sve.ssra
aarch64_sve_ssublb, // llvm.aarch64.sve.ssublb
aarch64_sve_ssublbt, // llvm.aarch64.sve.ssublbt
aarch64_sve_ssublt, // llvm.aarch64.sve.ssublt
aarch64_sve_ssubltb, // llvm.aarch64.sve.ssubltb
aarch64_sve_ssubwb, // llvm.aarch64.sve.ssubwb
aarch64_sve_ssubwt, // llvm.aarch64.sve.ssubwt
aarch64_sve_st1, // llvm.aarch64.sve.st1
aarch64_sve_st1_scatter, // llvm.aarch64.sve.st1.scatter
aarch64_sve_st1_scatter_index, // llvm.aarch64.sve.st1.scatter.index
aarch64_sve_st1_scatter_scalar_offset, // llvm.aarch64.sve.st1.scatter.scalar.offset
aarch64_sve_st1_scatter_sxtw, // llvm.aarch64.sve.st1.scatter.sxtw
aarch64_sve_st1_scatter_sxtw_index, // llvm.aarch64.sve.st1.scatter.sxtw.index
aarch64_sve_st1_scatter_uxtw, // llvm.aarch64.sve.st1.scatter.uxtw
aarch64_sve_st1_scatter_uxtw_index, // llvm.aarch64.sve.st1.scatter.uxtw.index
aarch64_sve_st2, // llvm.aarch64.sve.st2
aarch64_sve_st3, // llvm.aarch64.sve.st3
aarch64_sve_st4, // llvm.aarch64.sve.st4
aarch64_sve_stnt1, // llvm.aarch64.sve.stnt1
aarch64_sve_stnt1_scatter, // llvm.aarch64.sve.stnt1.scatter
aarch64_sve_stnt1_scatter_index, // llvm.aarch64.sve.stnt1.scatter.index
aarch64_sve_stnt1_scatter_scalar_offset, // llvm.aarch64.sve.stnt1.scatter.scalar.offset
aarch64_sve_stnt1_scatter_uxtw, // llvm.aarch64.sve.stnt1.scatter.uxtw
aarch64_sve_sub, // llvm.aarch64.sve.sub
aarch64_sve_subhnb, // llvm.aarch64.sve.subhnb
aarch64_sve_subhnt, // llvm.aarch64.sve.subhnt
aarch64_sve_subr, // llvm.aarch64.sve.subr
aarch64_sve_sudot_lane, // llvm.aarch64.sve.sudot.lane
aarch64_sve_sunpkhi, // llvm.aarch64.sve.sunpkhi
aarch64_sve_sunpklo, // llvm.aarch64.sve.sunpklo
aarch64_sve_suqadd, // llvm.aarch64.sve.suqadd
aarch64_sve_sxtb, // llvm.aarch64.sve.sxtb
aarch64_sve_sxth, // llvm.aarch64.sve.sxth
aarch64_sve_sxtw, // llvm.aarch64.sve.sxtw
aarch64_sve_tbl, // llvm.aarch64.sve.tbl
aarch64_sve_tbl2, // llvm.aarch64.sve.tbl2
aarch64_sve_tbx, // llvm.aarch64.sve.tbx
aarch64_sve_trn1, // llvm.aarch64.sve.trn1
aarch64_sve_trn1q, // llvm.aarch64.sve.trn1q
aarch64_sve_trn2, // llvm.aarch64.sve.trn2
aarch64_sve_trn2q, // llvm.aarch64.sve.trn2q
aarch64_sve_tuple_create2, // llvm.aarch64.sve.tuple.create2
aarch64_sve_tuple_create3, // llvm.aarch64.sve.tuple.create3
aarch64_sve_tuple_create4, // llvm.aarch64.sve.tuple.create4
aarch64_sve_tuple_get, // llvm.aarch64.sve.tuple.get
aarch64_sve_tuple_set, // llvm.aarch64.sve.tuple.set
aarch64_sve_uaba, // llvm.aarch64.sve.uaba
aarch64_sve_uabalb, // llvm.aarch64.sve.uabalb
aarch64_sve_uabalt, // llvm.aarch64.sve.uabalt
aarch64_sve_uabd, // llvm.aarch64.sve.uabd
aarch64_sve_uabdlb, // llvm.aarch64.sve.uabdlb
aarch64_sve_uabdlt, // llvm.aarch64.sve.uabdlt
aarch64_sve_uadalp, // llvm.aarch64.sve.uadalp
aarch64_sve_uaddlb, // llvm.aarch64.sve.uaddlb
aarch64_sve_uaddlt, // llvm.aarch64.sve.uaddlt
aarch64_sve_uaddv, // llvm.aarch64.sve.uaddv
aarch64_sve_uaddwb, // llvm.aarch64.sve.uaddwb
aarch64_sve_uaddwt, // llvm.aarch64.sve.uaddwt
aarch64_sve_ucvtf, // llvm.aarch64.sve.ucvtf
aarch64_sve_ucvtf_f16i32, // llvm.aarch64.sve.ucvtf.f16i32
aarch64_sve_ucvtf_f16i64, // llvm.aarch64.sve.ucvtf.f16i64
aarch64_sve_ucvtf_f32i64, // llvm.aarch64.sve.ucvtf.f32i64
aarch64_sve_ucvtf_f64i32, // llvm.aarch64.sve.ucvtf.f64i32
aarch64_sve_udiv, // llvm.aarch64.sve.udiv
aarch64_sve_udivr, // llvm.aarch64.sve.udivr
aarch64_sve_udot, // llvm.aarch64.sve.udot
aarch64_sve_udot_lane, // llvm.aarch64.sve.udot.lane
aarch64_sve_uhadd, // llvm.aarch64.sve.uhadd
aarch64_sve_uhsub, // llvm.aarch64.sve.uhsub
aarch64_sve_uhsubr, // llvm.aarch64.sve.uhsubr
aarch64_sve_umax, // llvm.aarch64.sve.umax
aarch64_sve_umaxp, // llvm.aarch64.sve.umaxp
aarch64_sve_umaxv, // llvm.aarch64.sve.umaxv
aarch64_sve_umin, // llvm.aarch64.sve.umin
aarch64_sve_uminp, // llvm.aarch64.sve.uminp
aarch64_sve_uminv, // llvm.aarch64.sve.uminv
aarch64_sve_umlalb, // llvm.aarch64.sve.umlalb
aarch64_sve_umlalb_lane, // llvm.aarch64.sve.umlalb.lane
aarch64_sve_umlalt, // llvm.aarch64.sve.umlalt
aarch64_sve_umlalt_lane, // llvm.aarch64.sve.umlalt.lane
aarch64_sve_umlslb, // llvm.aarch64.sve.umlslb
aarch64_sve_umlslb_lane, // llvm.aarch64.sve.umlslb.lane
aarch64_sve_umlslt, // llvm.aarch64.sve.umlslt
aarch64_sve_umlslt_lane, // llvm.aarch64.sve.umlslt.lane
aarch64_sve_ummla, // llvm.aarch64.sve.ummla
aarch64_sve_umulh, // llvm.aarch64.sve.umulh
aarch64_sve_umullb, // llvm.aarch64.sve.umullb
aarch64_sve_umullb_lane, // llvm.aarch64.sve.umullb.lane
aarch64_sve_umullt, // llvm.aarch64.sve.umullt
aarch64_sve_umullt_lane, // llvm.aarch64.sve.umullt.lane
aarch64_sve_uqadd, // llvm.aarch64.sve.uqadd
aarch64_sve_uqadd_x, // llvm.aarch64.sve.uqadd.x
aarch64_sve_uqdecb_n32, // llvm.aarch64.sve.uqdecb.n32
aarch64_sve_uqdecb_n64, // llvm.aarch64.sve.uqdecb.n64
aarch64_sve_uqdecd, // llvm.aarch64.sve.uqdecd
aarch64_sve_uqdecd_n32, // llvm.aarch64.sve.uqdecd.n32
aarch64_sve_uqdecd_n64, // llvm.aarch64.sve.uqdecd.n64
aarch64_sve_uqdech, // llvm.aarch64.sve.uqdech
aarch64_sve_uqdech_n32, // llvm.aarch64.sve.uqdech.n32
aarch64_sve_uqdech_n64, // llvm.aarch64.sve.uqdech.n64
aarch64_sve_uqdecp, // llvm.aarch64.sve.uqdecp
aarch64_sve_uqdecp_n32, // llvm.aarch64.sve.uqdecp.n32
aarch64_sve_uqdecp_n64, // llvm.aarch64.sve.uqdecp.n64
aarch64_sve_uqdecw, // llvm.aarch64.sve.uqdecw
aarch64_sve_uqdecw_n32, // llvm.aarch64.sve.uqdecw.n32
aarch64_sve_uqdecw_n64, // llvm.aarch64.sve.uqdecw.n64
aarch64_sve_uqincb_n32, // llvm.aarch64.sve.uqincb.n32
aarch64_sve_uqincb_n64, // llvm.aarch64.sve.uqincb.n64
aarch64_sve_uqincd, // llvm.aarch64.sve.uqincd
aarch64_sve_uqincd_n32, // llvm.aarch64.sve.uqincd.n32
aarch64_sve_uqincd_n64, // llvm.aarch64.sve.uqincd.n64
aarch64_sve_uqinch, // llvm.aarch64.sve.uqinch
aarch64_sve_uqinch_n32, // llvm.aarch64.sve.uqinch.n32
aarch64_sve_uqinch_n64, // llvm.aarch64.sve.uqinch.n64
aarch64_sve_uqincp, // llvm.aarch64.sve.uqincp
aarch64_sve_uqincp_n32, // llvm.aarch64.sve.uqincp.n32
aarch64_sve_uqincp_n64, // llvm.aarch64.sve.uqincp.n64
aarch64_sve_uqincw, // llvm.aarch64.sve.uqincw
aarch64_sve_uqincw_n32, // llvm.aarch64.sve.uqincw.n32
aarch64_sve_uqincw_n64, // llvm.aarch64.sve.uqincw.n64
aarch64_sve_uqrshl, // llvm.aarch64.sve.uqrshl
aarch64_sve_uqrshrnb, // llvm.aarch64.sve.uqrshrnb
aarch64_sve_uqrshrnt, // llvm.aarch64.sve.uqrshrnt
aarch64_sve_uqshl, // llvm.aarch64.sve.uqshl
aarch64_sve_uqshrnb, // llvm.aarch64.sve.uqshrnb
aarch64_sve_uqshrnt, // llvm.aarch64.sve.uqshrnt
aarch64_sve_uqsub, // llvm.aarch64.sve.uqsub
aarch64_sve_uqsub_x, // llvm.aarch64.sve.uqsub.x
aarch64_sve_uqsubr, // llvm.aarch64.sve.uqsubr
aarch64_sve_uqxtnb, // llvm.aarch64.sve.uqxtnb
aarch64_sve_uqxtnt, // llvm.aarch64.sve.uqxtnt
aarch64_sve_urecpe, // llvm.aarch64.sve.urecpe
aarch64_sve_urhadd, // llvm.aarch64.sve.urhadd
aarch64_sve_urshl, // llvm.aarch64.sve.urshl
aarch64_sve_urshr, // llvm.aarch64.sve.urshr
aarch64_sve_ursqrte, // llvm.aarch64.sve.ursqrte
aarch64_sve_ursra, // llvm.aarch64.sve.ursra
aarch64_sve_usdot, // llvm.aarch64.sve.usdot
aarch64_sve_usdot_lane, // llvm.aarch64.sve.usdot.lane
aarch64_sve_ushllb, // llvm.aarch64.sve.ushllb
aarch64_sve_ushllt, // llvm.aarch64.sve.ushllt
aarch64_sve_usmmla, // llvm.aarch64.sve.usmmla
aarch64_sve_usqadd, // llvm.aarch64.sve.usqadd
aarch64_sve_usra, // llvm.aarch64.sve.usra
aarch64_sve_usublb, // llvm.aarch64.sve.usublb
aarch64_sve_usublt, // llvm.aarch64.sve.usublt
aarch64_sve_usubwb, // llvm.aarch64.sve.usubwb
aarch64_sve_usubwt, // llvm.aarch64.sve.usubwt
aarch64_sve_uunpkhi, // llvm.aarch64.sve.uunpkhi
aarch64_sve_uunpklo, // llvm.aarch64.sve.uunpklo
aarch64_sve_uxtb, // llvm.aarch64.sve.uxtb
aarch64_sve_uxth, // llvm.aarch64.sve.uxth
aarch64_sve_uxtw, // llvm.aarch64.sve.uxtw
aarch64_sve_uzp1, // llvm.aarch64.sve.uzp1
aarch64_sve_uzp1q, // llvm.aarch64.sve.uzp1q
aarch64_sve_uzp2, // llvm.aarch64.sve.uzp2
aarch64_sve_uzp2q, // llvm.aarch64.sve.uzp2q
aarch64_sve_whilege, // llvm.aarch64.sve.whilege
aarch64_sve_whilegt, // llvm.aarch64.sve.whilegt
aarch64_sve_whilehi, // llvm.aarch64.sve.whilehi
aarch64_sve_whilehs, // llvm.aarch64.sve.whilehs
aarch64_sve_whilele, // llvm.aarch64.sve.whilele
aarch64_sve_whilelo, // llvm.aarch64.sve.whilelo
aarch64_sve_whilels, // llvm.aarch64.sve.whilels
aarch64_sve_whilelt, // llvm.aarch64.sve.whilelt
aarch64_sve_whilerw_b, // llvm.aarch64.sve.whilerw.b
aarch64_sve_whilerw_d, // llvm.aarch64.sve.whilerw.d
aarch64_sve_whilerw_h, // llvm.aarch64.sve.whilerw.h
aarch64_sve_whilerw_s, // llvm.aarch64.sve.whilerw.s
aarch64_sve_whilewr_b, // llvm.aarch64.sve.whilewr.b
aarch64_sve_whilewr_d, // llvm.aarch64.sve.whilewr.d
aarch64_sve_whilewr_h, // llvm.aarch64.sve.whilewr.h
aarch64_sve_whilewr_s, // llvm.aarch64.sve.whilewr.s
aarch64_sve_wrffr, // llvm.aarch64.sve.wrffr
aarch64_sve_xar, // llvm.aarch64.sve.xar
aarch64_sve_zip1, // llvm.aarch64.sve.zip1
aarch64_sve_zip1q, // llvm.aarch64.sve.zip1q
aarch64_sve_zip2, // llvm.aarch64.sve.zip2
aarch64_sve_zip2q, // llvm.aarch64.sve.zip2q
aarch64_tagp, // llvm.aarch64.tagp
aarch64_tcancel, // llvm.aarch64.tcancel
aarch64_tcommit, // llvm.aarch64.tcommit
aarch64_tstart, // llvm.aarch64.tstart
aarch64_ttest, // llvm.aarch64.ttest
aarch64_udiv, // llvm.aarch64.udiv
}; // enum
} // namespace Intrinsic
} // namespace llvm
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,749 @@
/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|* *|
|* Intrinsic Function Source Fragment *|
|* *|
|* Automatically generated file, do not edit! *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_IR_INTRINSIC_AMDGCN_ENUMS_H
#define LLVM_IR_INTRINSIC_AMDGCN_ENUMS_H
namespace llvm {
namespace Intrinsic {
enum AMDGCNIntrinsics : unsigned {
// Enum values for intrinsics
amdgcn_alignbyte = 1243, // llvm.amdgcn.alignbyte
amdgcn_atomic_dec, // llvm.amdgcn.atomic.dec
amdgcn_atomic_inc, // llvm.amdgcn.atomic.inc
amdgcn_ballot, // llvm.amdgcn.ballot
amdgcn_buffer_atomic_add, // llvm.amdgcn.buffer.atomic.add
amdgcn_buffer_atomic_and, // llvm.amdgcn.buffer.atomic.and
amdgcn_buffer_atomic_cmpswap, // llvm.amdgcn.buffer.atomic.cmpswap
amdgcn_buffer_atomic_csub, // llvm.amdgcn.buffer.atomic.csub
amdgcn_buffer_atomic_fadd, // llvm.amdgcn.buffer.atomic.fadd
amdgcn_buffer_atomic_or, // llvm.amdgcn.buffer.atomic.or
amdgcn_buffer_atomic_smax, // llvm.amdgcn.buffer.atomic.smax
amdgcn_buffer_atomic_smin, // llvm.amdgcn.buffer.atomic.smin
amdgcn_buffer_atomic_sub, // llvm.amdgcn.buffer.atomic.sub
amdgcn_buffer_atomic_swap, // llvm.amdgcn.buffer.atomic.swap
amdgcn_buffer_atomic_umax, // llvm.amdgcn.buffer.atomic.umax
amdgcn_buffer_atomic_umin, // llvm.amdgcn.buffer.atomic.umin
amdgcn_buffer_atomic_xor, // llvm.amdgcn.buffer.atomic.xor
amdgcn_buffer_load, // llvm.amdgcn.buffer.load
amdgcn_buffer_load_format, // llvm.amdgcn.buffer.load.format
amdgcn_buffer_store, // llvm.amdgcn.buffer.store
amdgcn_buffer_store_format, // llvm.amdgcn.buffer.store.format
amdgcn_buffer_wbinvl1, // llvm.amdgcn.buffer.wbinvl1
amdgcn_buffer_wbinvl1_sc, // llvm.amdgcn.buffer.wbinvl1.sc
amdgcn_buffer_wbinvl1_vol, // llvm.amdgcn.buffer.wbinvl1.vol
amdgcn_class, // llvm.amdgcn.class
amdgcn_cos, // llvm.amdgcn.cos
amdgcn_cubeid, // llvm.amdgcn.cubeid
amdgcn_cubema, // llvm.amdgcn.cubema
amdgcn_cubesc, // llvm.amdgcn.cubesc
amdgcn_cubetc, // llvm.amdgcn.cubetc
amdgcn_cvt_pk_i16, // llvm.amdgcn.cvt.pk.i16
amdgcn_cvt_pk_u16, // llvm.amdgcn.cvt.pk.u16
amdgcn_cvt_pk_u8_f32, // llvm.amdgcn.cvt.pk.u8.f32
amdgcn_cvt_pknorm_i16, // llvm.amdgcn.cvt.pknorm.i16
amdgcn_cvt_pknorm_u16, // llvm.amdgcn.cvt.pknorm.u16
amdgcn_cvt_pkrtz, // llvm.amdgcn.cvt.pkrtz
amdgcn_dispatch_id, // llvm.amdgcn.dispatch.id
amdgcn_dispatch_ptr, // llvm.amdgcn.dispatch.ptr
amdgcn_div_fixup, // llvm.amdgcn.div.fixup
amdgcn_div_fmas, // llvm.amdgcn.div.fmas
amdgcn_div_scale, // llvm.amdgcn.div.scale
amdgcn_ds_append, // llvm.amdgcn.ds.append
amdgcn_ds_bpermute, // llvm.amdgcn.ds.bpermute
amdgcn_ds_consume, // llvm.amdgcn.ds.consume
amdgcn_ds_fadd, // llvm.amdgcn.ds.fadd
amdgcn_ds_fmax, // llvm.amdgcn.ds.fmax
amdgcn_ds_fmin, // llvm.amdgcn.ds.fmin
amdgcn_ds_gws_barrier, // llvm.amdgcn.ds.gws.barrier
amdgcn_ds_gws_init, // llvm.amdgcn.ds.gws.init
amdgcn_ds_gws_sema_br, // llvm.amdgcn.ds.gws.sema.br
amdgcn_ds_gws_sema_p, // llvm.amdgcn.ds.gws.sema.p
amdgcn_ds_gws_sema_release_all, // llvm.amdgcn.ds.gws.sema.release.all
amdgcn_ds_gws_sema_v, // llvm.amdgcn.ds.gws.sema.v
amdgcn_ds_ordered_add, // llvm.amdgcn.ds.ordered.add
amdgcn_ds_ordered_swap, // llvm.amdgcn.ds.ordered.swap
amdgcn_ds_permute, // llvm.amdgcn.ds.permute
amdgcn_ds_swizzle, // llvm.amdgcn.ds.swizzle
amdgcn_else, // llvm.amdgcn.else
amdgcn_end_cf, // llvm.amdgcn.end.cf
amdgcn_endpgm, // llvm.amdgcn.endpgm
amdgcn_exp, // llvm.amdgcn.exp
amdgcn_exp_compr, // llvm.amdgcn.exp.compr
amdgcn_fcmp, // llvm.amdgcn.fcmp
amdgcn_fdiv_fast, // llvm.amdgcn.fdiv.fast
amdgcn_fdot2, // llvm.amdgcn.fdot2
amdgcn_flat_atomic_fadd, // llvm.amdgcn.flat.atomic.fadd
amdgcn_flat_atomic_fmax, // llvm.amdgcn.flat.atomic.fmax
amdgcn_flat_atomic_fmin, // llvm.amdgcn.flat.atomic.fmin
amdgcn_fma_legacy, // llvm.amdgcn.fma.legacy
amdgcn_fmad_ftz, // llvm.amdgcn.fmad.ftz
amdgcn_fmed3, // llvm.amdgcn.fmed3
amdgcn_fmul_legacy, // llvm.amdgcn.fmul.legacy
amdgcn_fract, // llvm.amdgcn.fract
amdgcn_frexp_exp, // llvm.amdgcn.frexp.exp
amdgcn_frexp_mant, // llvm.amdgcn.frexp.mant
amdgcn_global_atomic_csub, // llvm.amdgcn.global.atomic.csub
amdgcn_global_atomic_fadd, // llvm.amdgcn.global.atomic.fadd
amdgcn_global_atomic_fmax, // llvm.amdgcn.global.atomic.fmax
amdgcn_global_atomic_fmin, // llvm.amdgcn.global.atomic.fmin
amdgcn_groupstaticsize, // llvm.amdgcn.groupstaticsize
amdgcn_icmp, // llvm.amdgcn.icmp
amdgcn_if, // llvm.amdgcn.if
amdgcn_if_break, // llvm.amdgcn.if.break
amdgcn_image_atomic_add_1d, // llvm.amdgcn.image.atomic.add.1d
amdgcn_image_atomic_add_1darray, // llvm.amdgcn.image.atomic.add.1darray
amdgcn_image_atomic_add_2d, // llvm.amdgcn.image.atomic.add.2d
amdgcn_image_atomic_add_2darray, // llvm.amdgcn.image.atomic.add.2darray
amdgcn_image_atomic_add_2darraymsaa, // llvm.amdgcn.image.atomic.add.2darraymsaa
amdgcn_image_atomic_add_2dmsaa, // llvm.amdgcn.image.atomic.add.2dmsaa
amdgcn_image_atomic_add_3d, // llvm.amdgcn.image.atomic.add.3d
amdgcn_image_atomic_add_cube, // llvm.amdgcn.image.atomic.add.cube
amdgcn_image_atomic_and_1d, // llvm.amdgcn.image.atomic.and.1d
amdgcn_image_atomic_and_1darray, // llvm.amdgcn.image.atomic.and.1darray
amdgcn_image_atomic_and_2d, // llvm.amdgcn.image.atomic.and.2d
amdgcn_image_atomic_and_2darray, // llvm.amdgcn.image.atomic.and.2darray
amdgcn_image_atomic_and_2darraymsaa, // llvm.amdgcn.image.atomic.and.2darraymsaa
amdgcn_image_atomic_and_2dmsaa, // llvm.amdgcn.image.atomic.and.2dmsaa
amdgcn_image_atomic_and_3d, // llvm.amdgcn.image.atomic.and.3d
amdgcn_image_atomic_and_cube, // llvm.amdgcn.image.atomic.and.cube
amdgcn_image_atomic_cmpswap_1d, // llvm.amdgcn.image.atomic.cmpswap.1d
amdgcn_image_atomic_cmpswap_1darray, // llvm.amdgcn.image.atomic.cmpswap.1darray
amdgcn_image_atomic_cmpswap_2d, // llvm.amdgcn.image.atomic.cmpswap.2d
amdgcn_image_atomic_cmpswap_2darray, // llvm.amdgcn.image.atomic.cmpswap.2darray
amdgcn_image_atomic_cmpswap_2darraymsaa, // llvm.amdgcn.image.atomic.cmpswap.2darraymsaa
amdgcn_image_atomic_cmpswap_2dmsaa, // llvm.amdgcn.image.atomic.cmpswap.2dmsaa
amdgcn_image_atomic_cmpswap_3d, // llvm.amdgcn.image.atomic.cmpswap.3d
amdgcn_image_atomic_cmpswap_cube, // llvm.amdgcn.image.atomic.cmpswap.cube
amdgcn_image_atomic_dec_1d, // llvm.amdgcn.image.atomic.dec.1d
amdgcn_image_atomic_dec_1darray, // llvm.amdgcn.image.atomic.dec.1darray
amdgcn_image_atomic_dec_2d, // llvm.amdgcn.image.atomic.dec.2d
amdgcn_image_atomic_dec_2darray, // llvm.amdgcn.image.atomic.dec.2darray
amdgcn_image_atomic_dec_2darraymsaa, // llvm.amdgcn.image.atomic.dec.2darraymsaa
amdgcn_image_atomic_dec_2dmsaa, // llvm.amdgcn.image.atomic.dec.2dmsaa
amdgcn_image_atomic_dec_3d, // llvm.amdgcn.image.atomic.dec.3d
amdgcn_image_atomic_dec_cube, // llvm.amdgcn.image.atomic.dec.cube
amdgcn_image_atomic_fmax_1d, // llvm.amdgcn.image.atomic.fmax.1d
amdgcn_image_atomic_fmax_1darray, // llvm.amdgcn.image.atomic.fmax.1darray
amdgcn_image_atomic_fmax_2d, // llvm.amdgcn.image.atomic.fmax.2d
amdgcn_image_atomic_fmax_2darray, // llvm.amdgcn.image.atomic.fmax.2darray
amdgcn_image_atomic_fmax_2darraymsaa, // llvm.amdgcn.image.atomic.fmax.2darraymsaa
amdgcn_image_atomic_fmax_2dmsaa, // llvm.amdgcn.image.atomic.fmax.2dmsaa
amdgcn_image_atomic_fmax_3d, // llvm.amdgcn.image.atomic.fmax.3d
amdgcn_image_atomic_fmax_cube, // llvm.amdgcn.image.atomic.fmax.cube
amdgcn_image_atomic_fmin_1d, // llvm.amdgcn.image.atomic.fmin.1d
amdgcn_image_atomic_fmin_1darray, // llvm.amdgcn.image.atomic.fmin.1darray
amdgcn_image_atomic_fmin_2d, // llvm.amdgcn.image.atomic.fmin.2d
amdgcn_image_atomic_fmin_2darray, // llvm.amdgcn.image.atomic.fmin.2darray
amdgcn_image_atomic_fmin_2darraymsaa, // llvm.amdgcn.image.atomic.fmin.2darraymsaa
amdgcn_image_atomic_fmin_2dmsaa, // llvm.amdgcn.image.atomic.fmin.2dmsaa
amdgcn_image_atomic_fmin_3d, // llvm.amdgcn.image.atomic.fmin.3d
amdgcn_image_atomic_fmin_cube, // llvm.amdgcn.image.atomic.fmin.cube
amdgcn_image_atomic_inc_1d, // llvm.amdgcn.image.atomic.inc.1d
amdgcn_image_atomic_inc_1darray, // llvm.amdgcn.image.atomic.inc.1darray
amdgcn_image_atomic_inc_2d, // llvm.amdgcn.image.atomic.inc.2d
amdgcn_image_atomic_inc_2darray, // llvm.amdgcn.image.atomic.inc.2darray
amdgcn_image_atomic_inc_2darraymsaa, // llvm.amdgcn.image.atomic.inc.2darraymsaa
amdgcn_image_atomic_inc_2dmsaa, // llvm.amdgcn.image.atomic.inc.2dmsaa
amdgcn_image_atomic_inc_3d, // llvm.amdgcn.image.atomic.inc.3d
amdgcn_image_atomic_inc_cube, // llvm.amdgcn.image.atomic.inc.cube
amdgcn_image_atomic_or_1d, // llvm.amdgcn.image.atomic.or.1d
amdgcn_image_atomic_or_1darray, // llvm.amdgcn.image.atomic.or.1darray
amdgcn_image_atomic_or_2d, // llvm.amdgcn.image.atomic.or.2d
amdgcn_image_atomic_or_2darray, // llvm.amdgcn.image.atomic.or.2darray
amdgcn_image_atomic_or_2darraymsaa, // llvm.amdgcn.image.atomic.or.2darraymsaa
amdgcn_image_atomic_or_2dmsaa, // llvm.amdgcn.image.atomic.or.2dmsaa
amdgcn_image_atomic_or_3d, // llvm.amdgcn.image.atomic.or.3d
amdgcn_image_atomic_or_cube, // llvm.amdgcn.image.atomic.or.cube
amdgcn_image_atomic_smax_1d, // llvm.amdgcn.image.atomic.smax.1d
amdgcn_image_atomic_smax_1darray, // llvm.amdgcn.image.atomic.smax.1darray
amdgcn_image_atomic_smax_2d, // llvm.amdgcn.image.atomic.smax.2d
amdgcn_image_atomic_smax_2darray, // llvm.amdgcn.image.atomic.smax.2darray
amdgcn_image_atomic_smax_2darraymsaa, // llvm.amdgcn.image.atomic.smax.2darraymsaa
amdgcn_image_atomic_smax_2dmsaa, // llvm.amdgcn.image.atomic.smax.2dmsaa
amdgcn_image_atomic_smax_3d, // llvm.amdgcn.image.atomic.smax.3d
amdgcn_image_atomic_smax_cube, // llvm.amdgcn.image.atomic.smax.cube
amdgcn_image_atomic_smin_1d, // llvm.amdgcn.image.atomic.smin.1d
amdgcn_image_atomic_smin_1darray, // llvm.amdgcn.image.atomic.smin.1darray
amdgcn_image_atomic_smin_2d, // llvm.amdgcn.image.atomic.smin.2d
amdgcn_image_atomic_smin_2darray, // llvm.amdgcn.image.atomic.smin.2darray
amdgcn_image_atomic_smin_2darraymsaa, // llvm.amdgcn.image.atomic.smin.2darraymsaa
amdgcn_image_atomic_smin_2dmsaa, // llvm.amdgcn.image.atomic.smin.2dmsaa
amdgcn_image_atomic_smin_3d, // llvm.amdgcn.image.atomic.smin.3d
amdgcn_image_atomic_smin_cube, // llvm.amdgcn.image.atomic.smin.cube
amdgcn_image_atomic_sub_1d, // llvm.amdgcn.image.atomic.sub.1d
amdgcn_image_atomic_sub_1darray, // llvm.amdgcn.image.atomic.sub.1darray
amdgcn_image_atomic_sub_2d, // llvm.amdgcn.image.atomic.sub.2d
amdgcn_image_atomic_sub_2darray, // llvm.amdgcn.image.atomic.sub.2darray
amdgcn_image_atomic_sub_2darraymsaa, // llvm.amdgcn.image.atomic.sub.2darraymsaa
amdgcn_image_atomic_sub_2dmsaa, // llvm.amdgcn.image.atomic.sub.2dmsaa
amdgcn_image_atomic_sub_3d, // llvm.amdgcn.image.atomic.sub.3d
amdgcn_image_atomic_sub_cube, // llvm.amdgcn.image.atomic.sub.cube
amdgcn_image_atomic_swap_1d, // llvm.amdgcn.image.atomic.swap.1d
amdgcn_image_atomic_swap_1darray, // llvm.amdgcn.image.atomic.swap.1darray
amdgcn_image_atomic_swap_2d, // llvm.amdgcn.image.atomic.swap.2d
amdgcn_image_atomic_swap_2darray, // llvm.amdgcn.image.atomic.swap.2darray
amdgcn_image_atomic_swap_2darraymsaa, // llvm.amdgcn.image.atomic.swap.2darraymsaa
amdgcn_image_atomic_swap_2dmsaa, // llvm.amdgcn.image.atomic.swap.2dmsaa
amdgcn_image_atomic_swap_3d, // llvm.amdgcn.image.atomic.swap.3d
amdgcn_image_atomic_swap_cube, // llvm.amdgcn.image.atomic.swap.cube
amdgcn_image_atomic_umax_1d, // llvm.amdgcn.image.atomic.umax.1d
amdgcn_image_atomic_umax_1darray, // llvm.amdgcn.image.atomic.umax.1darray
amdgcn_image_atomic_umax_2d, // llvm.amdgcn.image.atomic.umax.2d
amdgcn_image_atomic_umax_2darray, // llvm.amdgcn.image.atomic.umax.2darray
amdgcn_image_atomic_umax_2darraymsaa, // llvm.amdgcn.image.atomic.umax.2darraymsaa
amdgcn_image_atomic_umax_2dmsaa, // llvm.amdgcn.image.atomic.umax.2dmsaa
amdgcn_image_atomic_umax_3d, // llvm.amdgcn.image.atomic.umax.3d
amdgcn_image_atomic_umax_cube, // llvm.amdgcn.image.atomic.umax.cube
amdgcn_image_atomic_umin_1d, // llvm.amdgcn.image.atomic.umin.1d
amdgcn_image_atomic_umin_1darray, // llvm.amdgcn.image.atomic.umin.1darray
amdgcn_image_atomic_umin_2d, // llvm.amdgcn.image.atomic.umin.2d
amdgcn_image_atomic_umin_2darray, // llvm.amdgcn.image.atomic.umin.2darray
amdgcn_image_atomic_umin_2darraymsaa, // llvm.amdgcn.image.atomic.umin.2darraymsaa
amdgcn_image_atomic_umin_2dmsaa, // llvm.amdgcn.image.atomic.umin.2dmsaa
amdgcn_image_atomic_umin_3d, // llvm.amdgcn.image.atomic.umin.3d
amdgcn_image_atomic_umin_cube, // llvm.amdgcn.image.atomic.umin.cube
amdgcn_image_atomic_xor_1d, // llvm.amdgcn.image.atomic.xor.1d
amdgcn_image_atomic_xor_1darray, // llvm.amdgcn.image.atomic.xor.1darray
amdgcn_image_atomic_xor_2d, // llvm.amdgcn.image.atomic.xor.2d
amdgcn_image_atomic_xor_2darray, // llvm.amdgcn.image.atomic.xor.2darray
amdgcn_image_atomic_xor_2darraymsaa, // llvm.amdgcn.image.atomic.xor.2darraymsaa
amdgcn_image_atomic_xor_2dmsaa, // llvm.amdgcn.image.atomic.xor.2dmsaa
amdgcn_image_atomic_xor_3d, // llvm.amdgcn.image.atomic.xor.3d
amdgcn_image_atomic_xor_cube, // llvm.amdgcn.image.atomic.xor.cube
amdgcn_image_bvh_intersect_ray, // llvm.amdgcn.image.bvh.intersect.ray
amdgcn_image_gather4_2d, // llvm.amdgcn.image.gather4.2d
amdgcn_image_gather4_2darray, // llvm.amdgcn.image.gather4.2darray
amdgcn_image_gather4_b_2d, // llvm.amdgcn.image.gather4.b.2d
amdgcn_image_gather4_b_2darray, // llvm.amdgcn.image.gather4.b.2darray
amdgcn_image_gather4_b_cl_2d, // llvm.amdgcn.image.gather4.b.cl.2d
amdgcn_image_gather4_b_cl_2darray, // llvm.amdgcn.image.gather4.b.cl.2darray
amdgcn_image_gather4_b_cl_cube, // llvm.amdgcn.image.gather4.b.cl.cube
amdgcn_image_gather4_b_cl_o_2d, // llvm.amdgcn.image.gather4.b.cl.o.2d
amdgcn_image_gather4_b_cl_o_2darray, // llvm.amdgcn.image.gather4.b.cl.o.2darray
amdgcn_image_gather4_b_cl_o_cube, // llvm.amdgcn.image.gather4.b.cl.o.cube
amdgcn_image_gather4_b_cube, // llvm.amdgcn.image.gather4.b.cube
amdgcn_image_gather4_b_o_2d, // llvm.amdgcn.image.gather4.b.o.2d
amdgcn_image_gather4_b_o_2darray, // llvm.amdgcn.image.gather4.b.o.2darray
amdgcn_image_gather4_b_o_cube, // llvm.amdgcn.image.gather4.b.o.cube
amdgcn_image_gather4_c_2d, // llvm.amdgcn.image.gather4.c.2d
amdgcn_image_gather4_c_2darray, // llvm.amdgcn.image.gather4.c.2darray
amdgcn_image_gather4_c_b_2d, // llvm.amdgcn.image.gather4.c.b.2d
amdgcn_image_gather4_c_b_2darray, // llvm.amdgcn.image.gather4.c.b.2darray
amdgcn_image_gather4_c_b_cl_2d, // llvm.amdgcn.image.gather4.c.b.cl.2d
amdgcn_image_gather4_c_b_cl_2darray, // llvm.amdgcn.image.gather4.c.b.cl.2darray
amdgcn_image_gather4_c_b_cl_cube, // llvm.amdgcn.image.gather4.c.b.cl.cube
amdgcn_image_gather4_c_b_cl_o_2d, // llvm.amdgcn.image.gather4.c.b.cl.o.2d
amdgcn_image_gather4_c_b_cl_o_2darray, // llvm.amdgcn.image.gather4.c.b.cl.o.2darray
amdgcn_image_gather4_c_b_cl_o_cube, // llvm.amdgcn.image.gather4.c.b.cl.o.cube
amdgcn_image_gather4_c_b_cube, // llvm.amdgcn.image.gather4.c.b.cube
amdgcn_image_gather4_c_b_o_2d, // llvm.amdgcn.image.gather4.c.b.o.2d
amdgcn_image_gather4_c_b_o_2darray, // llvm.amdgcn.image.gather4.c.b.o.2darray
amdgcn_image_gather4_c_b_o_cube, // llvm.amdgcn.image.gather4.c.b.o.cube
amdgcn_image_gather4_c_cl_2d, // llvm.amdgcn.image.gather4.c.cl.2d
amdgcn_image_gather4_c_cl_2darray, // llvm.amdgcn.image.gather4.c.cl.2darray
amdgcn_image_gather4_c_cl_cube, // llvm.amdgcn.image.gather4.c.cl.cube
amdgcn_image_gather4_c_cl_o_2d, // llvm.amdgcn.image.gather4.c.cl.o.2d
amdgcn_image_gather4_c_cl_o_2darray, // llvm.amdgcn.image.gather4.c.cl.o.2darray
amdgcn_image_gather4_c_cl_o_cube, // llvm.amdgcn.image.gather4.c.cl.o.cube
amdgcn_image_gather4_c_cube, // llvm.amdgcn.image.gather4.c.cube
amdgcn_image_gather4_c_l_2d, // llvm.amdgcn.image.gather4.c.l.2d
amdgcn_image_gather4_c_l_2darray, // llvm.amdgcn.image.gather4.c.l.2darray
amdgcn_image_gather4_c_l_cube, // llvm.amdgcn.image.gather4.c.l.cube
amdgcn_image_gather4_c_l_o_2d, // llvm.amdgcn.image.gather4.c.l.o.2d
amdgcn_image_gather4_c_l_o_2darray, // llvm.amdgcn.image.gather4.c.l.o.2darray
amdgcn_image_gather4_c_l_o_cube, // llvm.amdgcn.image.gather4.c.l.o.cube
amdgcn_image_gather4_c_lz_2d, // llvm.amdgcn.image.gather4.c.lz.2d
amdgcn_image_gather4_c_lz_2darray, // llvm.amdgcn.image.gather4.c.lz.2darray
amdgcn_image_gather4_c_lz_cube, // llvm.amdgcn.image.gather4.c.lz.cube
amdgcn_image_gather4_c_lz_o_2d, // llvm.amdgcn.image.gather4.c.lz.o.2d
amdgcn_image_gather4_c_lz_o_2darray, // llvm.amdgcn.image.gather4.c.lz.o.2darray
amdgcn_image_gather4_c_lz_o_cube, // llvm.amdgcn.image.gather4.c.lz.o.cube
amdgcn_image_gather4_c_o_2d, // llvm.amdgcn.image.gather4.c.o.2d
amdgcn_image_gather4_c_o_2darray, // llvm.amdgcn.image.gather4.c.o.2darray
amdgcn_image_gather4_c_o_cube, // llvm.amdgcn.image.gather4.c.o.cube
amdgcn_image_gather4_cl_2d, // llvm.amdgcn.image.gather4.cl.2d
amdgcn_image_gather4_cl_2darray, // llvm.amdgcn.image.gather4.cl.2darray
amdgcn_image_gather4_cl_cube, // llvm.amdgcn.image.gather4.cl.cube
amdgcn_image_gather4_cl_o_2d, // llvm.amdgcn.image.gather4.cl.o.2d
amdgcn_image_gather4_cl_o_2darray, // llvm.amdgcn.image.gather4.cl.o.2darray
amdgcn_image_gather4_cl_o_cube, // llvm.amdgcn.image.gather4.cl.o.cube
amdgcn_image_gather4_cube, // llvm.amdgcn.image.gather4.cube
amdgcn_image_gather4_l_2d, // llvm.amdgcn.image.gather4.l.2d
amdgcn_image_gather4_l_2darray, // llvm.amdgcn.image.gather4.l.2darray
amdgcn_image_gather4_l_cube, // llvm.amdgcn.image.gather4.l.cube
amdgcn_image_gather4_l_o_2d, // llvm.amdgcn.image.gather4.l.o.2d
amdgcn_image_gather4_l_o_2darray, // llvm.amdgcn.image.gather4.l.o.2darray
amdgcn_image_gather4_l_o_cube, // llvm.amdgcn.image.gather4.l.o.cube
amdgcn_image_gather4_lz_2d, // llvm.amdgcn.image.gather4.lz.2d
amdgcn_image_gather4_lz_2darray, // llvm.amdgcn.image.gather4.lz.2darray
amdgcn_image_gather4_lz_cube, // llvm.amdgcn.image.gather4.lz.cube
amdgcn_image_gather4_lz_o_2d, // llvm.amdgcn.image.gather4.lz.o.2d
amdgcn_image_gather4_lz_o_2darray, // llvm.amdgcn.image.gather4.lz.o.2darray
amdgcn_image_gather4_lz_o_cube, // llvm.amdgcn.image.gather4.lz.o.cube
amdgcn_image_gather4_o_2d, // llvm.amdgcn.image.gather4.o.2d
amdgcn_image_gather4_o_2darray, // llvm.amdgcn.image.gather4.o.2darray
amdgcn_image_gather4_o_cube, // llvm.amdgcn.image.gather4.o.cube
amdgcn_image_getlod_1d, // llvm.amdgcn.image.getlod.1d
amdgcn_image_getlod_1darray, // llvm.amdgcn.image.getlod.1darray
amdgcn_image_getlod_2d, // llvm.amdgcn.image.getlod.2d
amdgcn_image_getlod_2darray, // llvm.amdgcn.image.getlod.2darray
amdgcn_image_getlod_3d, // llvm.amdgcn.image.getlod.3d
amdgcn_image_getlod_cube, // llvm.amdgcn.image.getlod.cube
amdgcn_image_getresinfo_1d, // llvm.amdgcn.image.getresinfo.1d
amdgcn_image_getresinfo_1darray, // llvm.amdgcn.image.getresinfo.1darray
amdgcn_image_getresinfo_2d, // llvm.amdgcn.image.getresinfo.2d
amdgcn_image_getresinfo_2darray, // llvm.amdgcn.image.getresinfo.2darray
amdgcn_image_getresinfo_2darraymsaa, // llvm.amdgcn.image.getresinfo.2darraymsaa
amdgcn_image_getresinfo_2dmsaa, // llvm.amdgcn.image.getresinfo.2dmsaa
amdgcn_image_getresinfo_3d, // llvm.amdgcn.image.getresinfo.3d
amdgcn_image_getresinfo_cube, // llvm.amdgcn.image.getresinfo.cube
amdgcn_image_load_1d, // llvm.amdgcn.image.load.1d
amdgcn_image_load_1darray, // llvm.amdgcn.image.load.1darray
amdgcn_image_load_2d, // llvm.amdgcn.image.load.2d
amdgcn_image_load_2darray, // llvm.amdgcn.image.load.2darray
amdgcn_image_load_2darraymsaa, // llvm.amdgcn.image.load.2darraymsaa
amdgcn_image_load_2dmsaa, // llvm.amdgcn.image.load.2dmsaa
amdgcn_image_load_3d, // llvm.amdgcn.image.load.3d
amdgcn_image_load_cube, // llvm.amdgcn.image.load.cube
amdgcn_image_load_mip_1d, // llvm.amdgcn.image.load.mip.1d
amdgcn_image_load_mip_1darray, // llvm.amdgcn.image.load.mip.1darray
amdgcn_image_load_mip_2d, // llvm.amdgcn.image.load.mip.2d
amdgcn_image_load_mip_2darray, // llvm.amdgcn.image.load.mip.2darray
amdgcn_image_load_mip_3d, // llvm.amdgcn.image.load.mip.3d
amdgcn_image_load_mip_cube, // llvm.amdgcn.image.load.mip.cube
amdgcn_image_msaa_load_x_2darraymsaa, // llvm.amdgcn.image.msaa.load.x.2darraymsaa
amdgcn_image_msaa_load_x_2dmsaa, // llvm.amdgcn.image.msaa.load.x.2dmsaa
amdgcn_image_sample_1d, // llvm.amdgcn.image.sample.1d
amdgcn_image_sample_1darray, // llvm.amdgcn.image.sample.1darray
amdgcn_image_sample_2d, // llvm.amdgcn.image.sample.2d
amdgcn_image_sample_2darray, // llvm.amdgcn.image.sample.2darray
amdgcn_image_sample_3d, // llvm.amdgcn.image.sample.3d
amdgcn_image_sample_b_1d, // llvm.amdgcn.image.sample.b.1d
amdgcn_image_sample_b_1darray, // llvm.amdgcn.image.sample.b.1darray
amdgcn_image_sample_b_2d, // llvm.amdgcn.image.sample.b.2d
amdgcn_image_sample_b_2darray, // llvm.amdgcn.image.sample.b.2darray
amdgcn_image_sample_b_3d, // llvm.amdgcn.image.sample.b.3d
amdgcn_image_sample_b_cl_1d, // llvm.amdgcn.image.sample.b.cl.1d
amdgcn_image_sample_b_cl_1darray, // llvm.amdgcn.image.sample.b.cl.1darray
amdgcn_image_sample_b_cl_2d, // llvm.amdgcn.image.sample.b.cl.2d
amdgcn_image_sample_b_cl_2darray, // llvm.amdgcn.image.sample.b.cl.2darray
amdgcn_image_sample_b_cl_3d, // llvm.amdgcn.image.sample.b.cl.3d
amdgcn_image_sample_b_cl_cube, // llvm.amdgcn.image.sample.b.cl.cube
amdgcn_image_sample_b_cl_o_1d, // llvm.amdgcn.image.sample.b.cl.o.1d
amdgcn_image_sample_b_cl_o_1darray, // llvm.amdgcn.image.sample.b.cl.o.1darray
amdgcn_image_sample_b_cl_o_2d, // llvm.amdgcn.image.sample.b.cl.o.2d
amdgcn_image_sample_b_cl_o_2darray, // llvm.amdgcn.image.sample.b.cl.o.2darray
amdgcn_image_sample_b_cl_o_3d, // llvm.amdgcn.image.sample.b.cl.o.3d
amdgcn_image_sample_b_cl_o_cube, // llvm.amdgcn.image.sample.b.cl.o.cube
amdgcn_image_sample_b_cube, // llvm.amdgcn.image.sample.b.cube
amdgcn_image_sample_b_o_1d, // llvm.amdgcn.image.sample.b.o.1d
amdgcn_image_sample_b_o_1darray, // llvm.amdgcn.image.sample.b.o.1darray
amdgcn_image_sample_b_o_2d, // llvm.amdgcn.image.sample.b.o.2d
amdgcn_image_sample_b_o_2darray, // llvm.amdgcn.image.sample.b.o.2darray
amdgcn_image_sample_b_o_3d, // llvm.amdgcn.image.sample.b.o.3d
amdgcn_image_sample_b_o_cube, // llvm.amdgcn.image.sample.b.o.cube
amdgcn_image_sample_c_1d, // llvm.amdgcn.image.sample.c.1d
amdgcn_image_sample_c_1darray, // llvm.amdgcn.image.sample.c.1darray
amdgcn_image_sample_c_2d, // llvm.amdgcn.image.sample.c.2d
amdgcn_image_sample_c_2darray, // llvm.amdgcn.image.sample.c.2darray
amdgcn_image_sample_c_3d, // llvm.amdgcn.image.sample.c.3d
amdgcn_image_sample_c_b_1d, // llvm.amdgcn.image.sample.c.b.1d
amdgcn_image_sample_c_b_1darray, // llvm.amdgcn.image.sample.c.b.1darray
amdgcn_image_sample_c_b_2d, // llvm.amdgcn.image.sample.c.b.2d
amdgcn_image_sample_c_b_2darray, // llvm.amdgcn.image.sample.c.b.2darray
amdgcn_image_sample_c_b_3d, // llvm.amdgcn.image.sample.c.b.3d
amdgcn_image_sample_c_b_cl_1d, // llvm.amdgcn.image.sample.c.b.cl.1d
amdgcn_image_sample_c_b_cl_1darray, // llvm.amdgcn.image.sample.c.b.cl.1darray
amdgcn_image_sample_c_b_cl_2d, // llvm.amdgcn.image.sample.c.b.cl.2d
amdgcn_image_sample_c_b_cl_2darray, // llvm.amdgcn.image.sample.c.b.cl.2darray
amdgcn_image_sample_c_b_cl_3d, // llvm.amdgcn.image.sample.c.b.cl.3d
amdgcn_image_sample_c_b_cl_cube, // llvm.amdgcn.image.sample.c.b.cl.cube
amdgcn_image_sample_c_b_cl_o_1d, // llvm.amdgcn.image.sample.c.b.cl.o.1d
amdgcn_image_sample_c_b_cl_o_1darray, // llvm.amdgcn.image.sample.c.b.cl.o.1darray
amdgcn_image_sample_c_b_cl_o_2d, // llvm.amdgcn.image.sample.c.b.cl.o.2d
amdgcn_image_sample_c_b_cl_o_2darray, // llvm.amdgcn.image.sample.c.b.cl.o.2darray
amdgcn_image_sample_c_b_cl_o_3d, // llvm.amdgcn.image.sample.c.b.cl.o.3d
amdgcn_image_sample_c_b_cl_o_cube, // llvm.amdgcn.image.sample.c.b.cl.o.cube
amdgcn_image_sample_c_b_cube, // llvm.amdgcn.image.sample.c.b.cube
amdgcn_image_sample_c_b_o_1d, // llvm.amdgcn.image.sample.c.b.o.1d
amdgcn_image_sample_c_b_o_1darray, // llvm.amdgcn.image.sample.c.b.o.1darray
amdgcn_image_sample_c_b_o_2d, // llvm.amdgcn.image.sample.c.b.o.2d
amdgcn_image_sample_c_b_o_2darray, // llvm.amdgcn.image.sample.c.b.o.2darray
amdgcn_image_sample_c_b_o_3d, // llvm.amdgcn.image.sample.c.b.o.3d
amdgcn_image_sample_c_b_o_cube, // llvm.amdgcn.image.sample.c.b.o.cube
amdgcn_image_sample_c_cd_1d, // llvm.amdgcn.image.sample.c.cd.1d
amdgcn_image_sample_c_cd_1darray, // llvm.amdgcn.image.sample.c.cd.1darray
amdgcn_image_sample_c_cd_2d, // llvm.amdgcn.image.sample.c.cd.2d
amdgcn_image_sample_c_cd_2darray, // llvm.amdgcn.image.sample.c.cd.2darray
amdgcn_image_sample_c_cd_3d, // llvm.amdgcn.image.sample.c.cd.3d
amdgcn_image_sample_c_cd_cl_1d, // llvm.amdgcn.image.sample.c.cd.cl.1d
amdgcn_image_sample_c_cd_cl_1darray, // llvm.amdgcn.image.sample.c.cd.cl.1darray
amdgcn_image_sample_c_cd_cl_2d, // llvm.amdgcn.image.sample.c.cd.cl.2d
amdgcn_image_sample_c_cd_cl_2darray, // llvm.amdgcn.image.sample.c.cd.cl.2darray
amdgcn_image_sample_c_cd_cl_3d, // llvm.amdgcn.image.sample.c.cd.cl.3d
amdgcn_image_sample_c_cd_cl_cube, // llvm.amdgcn.image.sample.c.cd.cl.cube
amdgcn_image_sample_c_cd_cl_o_1d, // llvm.amdgcn.image.sample.c.cd.cl.o.1d
amdgcn_image_sample_c_cd_cl_o_1darray, // llvm.amdgcn.image.sample.c.cd.cl.o.1darray
amdgcn_image_sample_c_cd_cl_o_2d, // llvm.amdgcn.image.sample.c.cd.cl.o.2d
amdgcn_image_sample_c_cd_cl_o_2darray, // llvm.amdgcn.image.sample.c.cd.cl.o.2darray
amdgcn_image_sample_c_cd_cl_o_3d, // llvm.amdgcn.image.sample.c.cd.cl.o.3d
amdgcn_image_sample_c_cd_cl_o_cube, // llvm.amdgcn.image.sample.c.cd.cl.o.cube
amdgcn_image_sample_c_cd_cube, // llvm.amdgcn.image.sample.c.cd.cube
amdgcn_image_sample_c_cd_o_1d, // llvm.amdgcn.image.sample.c.cd.o.1d
amdgcn_image_sample_c_cd_o_1darray, // llvm.amdgcn.image.sample.c.cd.o.1darray
amdgcn_image_sample_c_cd_o_2d, // llvm.amdgcn.image.sample.c.cd.o.2d
amdgcn_image_sample_c_cd_o_2darray, // llvm.amdgcn.image.sample.c.cd.o.2darray
amdgcn_image_sample_c_cd_o_3d, // llvm.amdgcn.image.sample.c.cd.o.3d
amdgcn_image_sample_c_cd_o_cube, // llvm.amdgcn.image.sample.c.cd.o.cube
amdgcn_image_sample_c_cl_1d, // llvm.amdgcn.image.sample.c.cl.1d
amdgcn_image_sample_c_cl_1darray, // llvm.amdgcn.image.sample.c.cl.1darray
amdgcn_image_sample_c_cl_2d, // llvm.amdgcn.image.sample.c.cl.2d
amdgcn_image_sample_c_cl_2darray, // llvm.amdgcn.image.sample.c.cl.2darray
amdgcn_image_sample_c_cl_3d, // llvm.amdgcn.image.sample.c.cl.3d
amdgcn_image_sample_c_cl_cube, // llvm.amdgcn.image.sample.c.cl.cube
amdgcn_image_sample_c_cl_o_1d, // llvm.amdgcn.image.sample.c.cl.o.1d
amdgcn_image_sample_c_cl_o_1darray, // llvm.amdgcn.image.sample.c.cl.o.1darray
amdgcn_image_sample_c_cl_o_2d, // llvm.amdgcn.image.sample.c.cl.o.2d
amdgcn_image_sample_c_cl_o_2darray, // llvm.amdgcn.image.sample.c.cl.o.2darray
amdgcn_image_sample_c_cl_o_3d, // llvm.amdgcn.image.sample.c.cl.o.3d
amdgcn_image_sample_c_cl_o_cube, // llvm.amdgcn.image.sample.c.cl.o.cube
amdgcn_image_sample_c_cube, // llvm.amdgcn.image.sample.c.cube
amdgcn_image_sample_c_d_1d, // llvm.amdgcn.image.sample.c.d.1d
amdgcn_image_sample_c_d_1darray, // llvm.amdgcn.image.sample.c.d.1darray
amdgcn_image_sample_c_d_2d, // llvm.amdgcn.image.sample.c.d.2d
amdgcn_image_sample_c_d_2darray, // llvm.amdgcn.image.sample.c.d.2darray
amdgcn_image_sample_c_d_3d, // llvm.amdgcn.image.sample.c.d.3d
amdgcn_image_sample_c_d_cl_1d, // llvm.amdgcn.image.sample.c.d.cl.1d
amdgcn_image_sample_c_d_cl_1darray, // llvm.amdgcn.image.sample.c.d.cl.1darray
amdgcn_image_sample_c_d_cl_2d, // llvm.amdgcn.image.sample.c.d.cl.2d
amdgcn_image_sample_c_d_cl_2darray, // llvm.amdgcn.image.sample.c.d.cl.2darray
amdgcn_image_sample_c_d_cl_3d, // llvm.amdgcn.image.sample.c.d.cl.3d
amdgcn_image_sample_c_d_cl_cube, // llvm.amdgcn.image.sample.c.d.cl.cube
amdgcn_image_sample_c_d_cl_o_1d, // llvm.amdgcn.image.sample.c.d.cl.o.1d
amdgcn_image_sample_c_d_cl_o_1darray, // llvm.amdgcn.image.sample.c.d.cl.o.1darray
amdgcn_image_sample_c_d_cl_o_2d, // llvm.amdgcn.image.sample.c.d.cl.o.2d
amdgcn_image_sample_c_d_cl_o_2darray, // llvm.amdgcn.image.sample.c.d.cl.o.2darray
amdgcn_image_sample_c_d_cl_o_3d, // llvm.amdgcn.image.sample.c.d.cl.o.3d
amdgcn_image_sample_c_d_cl_o_cube, // llvm.amdgcn.image.sample.c.d.cl.o.cube
amdgcn_image_sample_c_d_cube, // llvm.amdgcn.image.sample.c.d.cube
amdgcn_image_sample_c_d_o_1d, // llvm.amdgcn.image.sample.c.d.o.1d
amdgcn_image_sample_c_d_o_1darray, // llvm.amdgcn.image.sample.c.d.o.1darray
amdgcn_image_sample_c_d_o_2d, // llvm.amdgcn.image.sample.c.d.o.2d
amdgcn_image_sample_c_d_o_2darray, // llvm.amdgcn.image.sample.c.d.o.2darray
amdgcn_image_sample_c_d_o_3d, // llvm.amdgcn.image.sample.c.d.o.3d
amdgcn_image_sample_c_d_o_cube, // llvm.amdgcn.image.sample.c.d.o.cube
amdgcn_image_sample_c_l_1d, // llvm.amdgcn.image.sample.c.l.1d
amdgcn_image_sample_c_l_1darray, // llvm.amdgcn.image.sample.c.l.1darray
amdgcn_image_sample_c_l_2d, // llvm.amdgcn.image.sample.c.l.2d
amdgcn_image_sample_c_l_2darray, // llvm.amdgcn.image.sample.c.l.2darray
amdgcn_image_sample_c_l_3d, // llvm.amdgcn.image.sample.c.l.3d
amdgcn_image_sample_c_l_cube, // llvm.amdgcn.image.sample.c.l.cube
amdgcn_image_sample_c_l_o_1d, // llvm.amdgcn.image.sample.c.l.o.1d
amdgcn_image_sample_c_l_o_1darray, // llvm.amdgcn.image.sample.c.l.o.1darray
amdgcn_image_sample_c_l_o_2d, // llvm.amdgcn.image.sample.c.l.o.2d
amdgcn_image_sample_c_l_o_2darray, // llvm.amdgcn.image.sample.c.l.o.2darray
amdgcn_image_sample_c_l_o_3d, // llvm.amdgcn.image.sample.c.l.o.3d
amdgcn_image_sample_c_l_o_cube, // llvm.amdgcn.image.sample.c.l.o.cube
amdgcn_image_sample_c_lz_1d, // llvm.amdgcn.image.sample.c.lz.1d
amdgcn_image_sample_c_lz_1darray, // llvm.amdgcn.image.sample.c.lz.1darray
amdgcn_image_sample_c_lz_2d, // llvm.amdgcn.image.sample.c.lz.2d
amdgcn_image_sample_c_lz_2darray, // llvm.amdgcn.image.sample.c.lz.2darray
amdgcn_image_sample_c_lz_3d, // llvm.amdgcn.image.sample.c.lz.3d
amdgcn_image_sample_c_lz_cube, // llvm.amdgcn.image.sample.c.lz.cube
amdgcn_image_sample_c_lz_o_1d, // llvm.amdgcn.image.sample.c.lz.o.1d
amdgcn_image_sample_c_lz_o_1darray, // llvm.amdgcn.image.sample.c.lz.o.1darray
amdgcn_image_sample_c_lz_o_2d, // llvm.amdgcn.image.sample.c.lz.o.2d
amdgcn_image_sample_c_lz_o_2darray, // llvm.amdgcn.image.sample.c.lz.o.2darray
amdgcn_image_sample_c_lz_o_3d, // llvm.amdgcn.image.sample.c.lz.o.3d
amdgcn_image_sample_c_lz_o_cube, // llvm.amdgcn.image.sample.c.lz.o.cube
amdgcn_image_sample_c_o_1d, // llvm.amdgcn.image.sample.c.o.1d
amdgcn_image_sample_c_o_1darray, // llvm.amdgcn.image.sample.c.o.1darray
amdgcn_image_sample_c_o_2d, // llvm.amdgcn.image.sample.c.o.2d
amdgcn_image_sample_c_o_2darray, // llvm.amdgcn.image.sample.c.o.2darray
amdgcn_image_sample_c_o_3d, // llvm.amdgcn.image.sample.c.o.3d
amdgcn_image_sample_c_o_cube, // llvm.amdgcn.image.sample.c.o.cube
amdgcn_image_sample_cd_1d, // llvm.amdgcn.image.sample.cd.1d
amdgcn_image_sample_cd_1darray, // llvm.amdgcn.image.sample.cd.1darray
amdgcn_image_sample_cd_2d, // llvm.amdgcn.image.sample.cd.2d
amdgcn_image_sample_cd_2darray, // llvm.amdgcn.image.sample.cd.2darray
amdgcn_image_sample_cd_3d, // llvm.amdgcn.image.sample.cd.3d
amdgcn_image_sample_cd_cl_1d, // llvm.amdgcn.image.sample.cd.cl.1d
amdgcn_image_sample_cd_cl_1darray, // llvm.amdgcn.image.sample.cd.cl.1darray
amdgcn_image_sample_cd_cl_2d, // llvm.amdgcn.image.sample.cd.cl.2d
amdgcn_image_sample_cd_cl_2darray, // llvm.amdgcn.image.sample.cd.cl.2darray
amdgcn_image_sample_cd_cl_3d, // llvm.amdgcn.image.sample.cd.cl.3d
amdgcn_image_sample_cd_cl_cube, // llvm.amdgcn.image.sample.cd.cl.cube
amdgcn_image_sample_cd_cl_o_1d, // llvm.amdgcn.image.sample.cd.cl.o.1d
amdgcn_image_sample_cd_cl_o_1darray, // llvm.amdgcn.image.sample.cd.cl.o.1darray
amdgcn_image_sample_cd_cl_o_2d, // llvm.amdgcn.image.sample.cd.cl.o.2d
amdgcn_image_sample_cd_cl_o_2darray, // llvm.amdgcn.image.sample.cd.cl.o.2darray
amdgcn_image_sample_cd_cl_o_3d, // llvm.amdgcn.image.sample.cd.cl.o.3d
amdgcn_image_sample_cd_cl_o_cube, // llvm.amdgcn.image.sample.cd.cl.o.cube
amdgcn_image_sample_cd_cube, // llvm.amdgcn.image.sample.cd.cube
amdgcn_image_sample_cd_o_1d, // llvm.amdgcn.image.sample.cd.o.1d
amdgcn_image_sample_cd_o_1darray, // llvm.amdgcn.image.sample.cd.o.1darray
amdgcn_image_sample_cd_o_2d, // llvm.amdgcn.image.sample.cd.o.2d
amdgcn_image_sample_cd_o_2darray, // llvm.amdgcn.image.sample.cd.o.2darray
amdgcn_image_sample_cd_o_3d, // llvm.amdgcn.image.sample.cd.o.3d
amdgcn_image_sample_cd_o_cube, // llvm.amdgcn.image.sample.cd.o.cube
amdgcn_image_sample_cl_1d, // llvm.amdgcn.image.sample.cl.1d
amdgcn_image_sample_cl_1darray, // llvm.amdgcn.image.sample.cl.1darray
amdgcn_image_sample_cl_2d, // llvm.amdgcn.image.sample.cl.2d
amdgcn_image_sample_cl_2darray, // llvm.amdgcn.image.sample.cl.2darray
amdgcn_image_sample_cl_3d, // llvm.amdgcn.image.sample.cl.3d
amdgcn_image_sample_cl_cube, // llvm.amdgcn.image.sample.cl.cube
amdgcn_image_sample_cl_o_1d, // llvm.amdgcn.image.sample.cl.o.1d
amdgcn_image_sample_cl_o_1darray, // llvm.amdgcn.image.sample.cl.o.1darray
amdgcn_image_sample_cl_o_2d, // llvm.amdgcn.image.sample.cl.o.2d
amdgcn_image_sample_cl_o_2darray, // llvm.amdgcn.image.sample.cl.o.2darray
amdgcn_image_sample_cl_o_3d, // llvm.amdgcn.image.sample.cl.o.3d
amdgcn_image_sample_cl_o_cube, // llvm.amdgcn.image.sample.cl.o.cube
amdgcn_image_sample_cube, // llvm.amdgcn.image.sample.cube
amdgcn_image_sample_d_1d, // llvm.amdgcn.image.sample.d.1d
amdgcn_image_sample_d_1darray, // llvm.amdgcn.image.sample.d.1darray
amdgcn_image_sample_d_2d, // llvm.amdgcn.image.sample.d.2d
amdgcn_image_sample_d_2darray, // llvm.amdgcn.image.sample.d.2darray
amdgcn_image_sample_d_3d, // llvm.amdgcn.image.sample.d.3d
amdgcn_image_sample_d_cl_1d, // llvm.amdgcn.image.sample.d.cl.1d
amdgcn_image_sample_d_cl_1darray, // llvm.amdgcn.image.sample.d.cl.1darray
amdgcn_image_sample_d_cl_2d, // llvm.amdgcn.image.sample.d.cl.2d
amdgcn_image_sample_d_cl_2darray, // llvm.amdgcn.image.sample.d.cl.2darray
amdgcn_image_sample_d_cl_3d, // llvm.amdgcn.image.sample.d.cl.3d
amdgcn_image_sample_d_cl_cube, // llvm.amdgcn.image.sample.d.cl.cube
amdgcn_image_sample_d_cl_o_1d, // llvm.amdgcn.image.sample.d.cl.o.1d
amdgcn_image_sample_d_cl_o_1darray, // llvm.amdgcn.image.sample.d.cl.o.1darray
amdgcn_image_sample_d_cl_o_2d, // llvm.amdgcn.image.sample.d.cl.o.2d
amdgcn_image_sample_d_cl_o_2darray, // llvm.amdgcn.image.sample.d.cl.o.2darray
amdgcn_image_sample_d_cl_o_3d, // llvm.amdgcn.image.sample.d.cl.o.3d
amdgcn_image_sample_d_cl_o_cube, // llvm.amdgcn.image.sample.d.cl.o.cube
amdgcn_image_sample_d_cube, // llvm.amdgcn.image.sample.d.cube
amdgcn_image_sample_d_o_1d, // llvm.amdgcn.image.sample.d.o.1d
amdgcn_image_sample_d_o_1darray, // llvm.amdgcn.image.sample.d.o.1darray
amdgcn_image_sample_d_o_2d, // llvm.amdgcn.image.sample.d.o.2d
amdgcn_image_sample_d_o_2darray, // llvm.amdgcn.image.sample.d.o.2darray
amdgcn_image_sample_d_o_3d, // llvm.amdgcn.image.sample.d.o.3d
amdgcn_image_sample_d_o_cube, // llvm.amdgcn.image.sample.d.o.cube
amdgcn_image_sample_l_1d, // llvm.amdgcn.image.sample.l.1d
amdgcn_image_sample_l_1darray, // llvm.amdgcn.image.sample.l.1darray
amdgcn_image_sample_l_2d, // llvm.amdgcn.image.sample.l.2d
amdgcn_image_sample_l_2darray, // llvm.amdgcn.image.sample.l.2darray
amdgcn_image_sample_l_3d, // llvm.amdgcn.image.sample.l.3d
amdgcn_image_sample_l_cube, // llvm.amdgcn.image.sample.l.cube
amdgcn_image_sample_l_o_1d, // llvm.amdgcn.image.sample.l.o.1d
amdgcn_image_sample_l_o_1darray, // llvm.amdgcn.image.sample.l.o.1darray
amdgcn_image_sample_l_o_2d, // llvm.amdgcn.image.sample.l.o.2d
amdgcn_image_sample_l_o_2darray, // llvm.amdgcn.image.sample.l.o.2darray
amdgcn_image_sample_l_o_3d, // llvm.amdgcn.image.sample.l.o.3d
amdgcn_image_sample_l_o_cube, // llvm.amdgcn.image.sample.l.o.cube
amdgcn_image_sample_lz_1d, // llvm.amdgcn.image.sample.lz.1d
amdgcn_image_sample_lz_1darray, // llvm.amdgcn.image.sample.lz.1darray
amdgcn_image_sample_lz_2d, // llvm.amdgcn.image.sample.lz.2d
amdgcn_image_sample_lz_2darray, // llvm.amdgcn.image.sample.lz.2darray
amdgcn_image_sample_lz_3d, // llvm.amdgcn.image.sample.lz.3d
amdgcn_image_sample_lz_cube, // llvm.amdgcn.image.sample.lz.cube
amdgcn_image_sample_lz_o_1d, // llvm.amdgcn.image.sample.lz.o.1d
amdgcn_image_sample_lz_o_1darray, // llvm.amdgcn.image.sample.lz.o.1darray
amdgcn_image_sample_lz_o_2d, // llvm.amdgcn.image.sample.lz.o.2d
amdgcn_image_sample_lz_o_2darray, // llvm.amdgcn.image.sample.lz.o.2darray
amdgcn_image_sample_lz_o_3d, // llvm.amdgcn.image.sample.lz.o.3d
amdgcn_image_sample_lz_o_cube, // llvm.amdgcn.image.sample.lz.o.cube
amdgcn_image_sample_o_1d, // llvm.amdgcn.image.sample.o.1d
amdgcn_image_sample_o_1darray, // llvm.amdgcn.image.sample.o.1darray
amdgcn_image_sample_o_2d, // llvm.amdgcn.image.sample.o.2d
amdgcn_image_sample_o_2darray, // llvm.amdgcn.image.sample.o.2darray
amdgcn_image_sample_o_3d, // llvm.amdgcn.image.sample.o.3d
amdgcn_image_sample_o_cube, // llvm.amdgcn.image.sample.o.cube
amdgcn_image_store_1d, // llvm.amdgcn.image.store.1d
amdgcn_image_store_1darray, // llvm.amdgcn.image.store.1darray
amdgcn_image_store_2d, // llvm.amdgcn.image.store.2d
amdgcn_image_store_2darray, // llvm.amdgcn.image.store.2darray
amdgcn_image_store_2darraymsaa, // llvm.amdgcn.image.store.2darraymsaa
amdgcn_image_store_2dmsaa, // llvm.amdgcn.image.store.2dmsaa
amdgcn_image_store_3d, // llvm.amdgcn.image.store.3d
amdgcn_image_store_cube, // llvm.amdgcn.image.store.cube
amdgcn_image_store_mip_1d, // llvm.amdgcn.image.store.mip.1d
amdgcn_image_store_mip_1darray, // llvm.amdgcn.image.store.mip.1darray
amdgcn_image_store_mip_2d, // llvm.amdgcn.image.store.mip.2d
amdgcn_image_store_mip_2darray, // llvm.amdgcn.image.store.mip.2darray
amdgcn_image_store_mip_3d, // llvm.amdgcn.image.store.mip.3d
amdgcn_image_store_mip_cube, // llvm.amdgcn.image.store.mip.cube
amdgcn_implicit_buffer_ptr, // llvm.amdgcn.implicit.buffer.ptr
amdgcn_implicitarg_ptr, // llvm.amdgcn.implicitarg.ptr
amdgcn_init_exec, // llvm.amdgcn.init.exec
amdgcn_init_exec_from_input, // llvm.amdgcn.init.exec.from.input
amdgcn_interp_mov, // llvm.amdgcn.interp.mov
amdgcn_interp_p1, // llvm.amdgcn.interp.p1
amdgcn_interp_p1_f16, // llvm.amdgcn.interp.p1.f16
amdgcn_interp_p2, // llvm.amdgcn.interp.p2
amdgcn_interp_p2_f16, // llvm.amdgcn.interp.p2.f16
amdgcn_is_private, // llvm.amdgcn.is.private
amdgcn_is_shared, // llvm.amdgcn.is.shared
amdgcn_kernarg_segment_ptr, // llvm.amdgcn.kernarg.segment.ptr
amdgcn_kill, // llvm.amdgcn.kill
amdgcn_ldexp, // llvm.amdgcn.ldexp
amdgcn_lerp, // llvm.amdgcn.lerp
amdgcn_live_mask, // llvm.amdgcn.live.mask
amdgcn_log_clamp, // llvm.amdgcn.log.clamp
amdgcn_loop, // llvm.amdgcn.loop
amdgcn_mbcnt_hi, // llvm.amdgcn.mbcnt.hi
amdgcn_mbcnt_lo, // llvm.amdgcn.mbcnt.lo
amdgcn_mfma_f32_16x16x16bf16_1k, // llvm.amdgcn.mfma.f32.16x16x16bf16.1k
amdgcn_mfma_f32_16x16x16f16, // llvm.amdgcn.mfma.f32.16x16x16f16
amdgcn_mfma_f32_16x16x1f32, // llvm.amdgcn.mfma.f32.16x16x1f32
amdgcn_mfma_f32_16x16x2bf16, // llvm.amdgcn.mfma.f32.16x16x2bf16
amdgcn_mfma_f32_16x16x4bf16_1k, // llvm.amdgcn.mfma.f32.16x16x4bf16.1k
amdgcn_mfma_f32_16x16x4f16, // llvm.amdgcn.mfma.f32.16x16x4f16
amdgcn_mfma_f32_16x16x4f32, // llvm.amdgcn.mfma.f32.16x16x4f32
amdgcn_mfma_f32_16x16x8bf16, // llvm.amdgcn.mfma.f32.16x16x8bf16
amdgcn_mfma_f32_32x32x1f32, // llvm.amdgcn.mfma.f32.32x32x1f32
amdgcn_mfma_f32_32x32x2bf16, // llvm.amdgcn.mfma.f32.32x32x2bf16
amdgcn_mfma_f32_32x32x2f32, // llvm.amdgcn.mfma.f32.32x32x2f32
amdgcn_mfma_f32_32x32x4bf16, // llvm.amdgcn.mfma.f32.32x32x4bf16
amdgcn_mfma_f32_32x32x4bf16_1k, // llvm.amdgcn.mfma.f32.32x32x4bf16.1k
amdgcn_mfma_f32_32x32x4f16, // llvm.amdgcn.mfma.f32.32x32x4f16
amdgcn_mfma_f32_32x32x8bf16_1k, // llvm.amdgcn.mfma.f32.32x32x8bf16.1k
amdgcn_mfma_f32_32x32x8f16, // llvm.amdgcn.mfma.f32.32x32x8f16
amdgcn_mfma_f32_4x4x1f32, // llvm.amdgcn.mfma.f32.4x4x1f32
amdgcn_mfma_f32_4x4x2bf16, // llvm.amdgcn.mfma.f32.4x4x2bf16
amdgcn_mfma_f32_4x4x4bf16_1k, // llvm.amdgcn.mfma.f32.4x4x4bf16.1k
amdgcn_mfma_f32_4x4x4f16, // llvm.amdgcn.mfma.f32.4x4x4f16
amdgcn_mfma_f64_16x16x4f64, // llvm.amdgcn.mfma.f64.16x16x4f64
amdgcn_mfma_f64_4x4x4f64, // llvm.amdgcn.mfma.f64.4x4x4f64
amdgcn_mfma_i32_16x16x16i8, // llvm.amdgcn.mfma.i32.16x16x16i8
amdgcn_mfma_i32_16x16x4i8, // llvm.amdgcn.mfma.i32.16x16x4i8
amdgcn_mfma_i32_32x32x4i8, // llvm.amdgcn.mfma.i32.32x32x4i8
amdgcn_mfma_i32_32x32x8i8, // llvm.amdgcn.mfma.i32.32x32x8i8
amdgcn_mfma_i32_4x4x4i8, // llvm.amdgcn.mfma.i32.4x4x4i8
amdgcn_mov_dpp, // llvm.amdgcn.mov.dpp
amdgcn_mov_dpp8, // llvm.amdgcn.mov.dpp8
amdgcn_mqsad_pk_u16_u8, // llvm.amdgcn.mqsad.pk.u16.u8
amdgcn_mqsad_u32_u8, // llvm.amdgcn.mqsad.u32.u8
amdgcn_msad_u8, // llvm.amdgcn.msad.u8
amdgcn_mul_i24, // llvm.amdgcn.mul.i24
amdgcn_mul_u24, // llvm.amdgcn.mul.u24
amdgcn_mulhi_i24, // llvm.amdgcn.mulhi.i24
amdgcn_mulhi_u24, // llvm.amdgcn.mulhi.u24
amdgcn_perm, // llvm.amdgcn.perm
amdgcn_permlane16, // llvm.amdgcn.permlane16
amdgcn_permlanex16, // llvm.amdgcn.permlanex16
amdgcn_ps_live, // llvm.amdgcn.ps.live
amdgcn_qsad_pk_u16_u8, // llvm.amdgcn.qsad.pk.u16.u8
amdgcn_queue_ptr, // llvm.amdgcn.queue.ptr
amdgcn_raw_buffer_atomic_add, // llvm.amdgcn.raw.buffer.atomic.add
amdgcn_raw_buffer_atomic_and, // llvm.amdgcn.raw.buffer.atomic.and
amdgcn_raw_buffer_atomic_cmpswap, // llvm.amdgcn.raw.buffer.atomic.cmpswap
amdgcn_raw_buffer_atomic_dec, // llvm.amdgcn.raw.buffer.atomic.dec
amdgcn_raw_buffer_atomic_fadd, // llvm.amdgcn.raw.buffer.atomic.fadd
amdgcn_raw_buffer_atomic_fmax, // llvm.amdgcn.raw.buffer.atomic.fmax
amdgcn_raw_buffer_atomic_fmin, // llvm.amdgcn.raw.buffer.atomic.fmin
amdgcn_raw_buffer_atomic_inc, // llvm.amdgcn.raw.buffer.atomic.inc
amdgcn_raw_buffer_atomic_or, // llvm.amdgcn.raw.buffer.atomic.or
amdgcn_raw_buffer_atomic_smax, // llvm.amdgcn.raw.buffer.atomic.smax
amdgcn_raw_buffer_atomic_smin, // llvm.amdgcn.raw.buffer.atomic.smin
amdgcn_raw_buffer_atomic_sub, // llvm.amdgcn.raw.buffer.atomic.sub
amdgcn_raw_buffer_atomic_swap, // llvm.amdgcn.raw.buffer.atomic.swap
amdgcn_raw_buffer_atomic_umax, // llvm.amdgcn.raw.buffer.atomic.umax
amdgcn_raw_buffer_atomic_umin, // llvm.amdgcn.raw.buffer.atomic.umin
amdgcn_raw_buffer_atomic_xor, // llvm.amdgcn.raw.buffer.atomic.xor
amdgcn_raw_buffer_load, // llvm.amdgcn.raw.buffer.load
amdgcn_raw_buffer_load_format, // llvm.amdgcn.raw.buffer.load.format
amdgcn_raw_buffer_store, // llvm.amdgcn.raw.buffer.store
amdgcn_raw_buffer_store_format, // llvm.amdgcn.raw.buffer.store.format
amdgcn_raw_tbuffer_load, // llvm.amdgcn.raw.tbuffer.load
amdgcn_raw_tbuffer_store, // llvm.amdgcn.raw.tbuffer.store
amdgcn_rcp, // llvm.amdgcn.rcp
amdgcn_rcp_legacy, // llvm.amdgcn.rcp.legacy
amdgcn_readfirstlane, // llvm.amdgcn.readfirstlane
amdgcn_readlane, // llvm.amdgcn.readlane
amdgcn_reloc_constant, // llvm.amdgcn.reloc.constant
amdgcn_rsq, // llvm.amdgcn.rsq
amdgcn_rsq_clamp, // llvm.amdgcn.rsq.clamp
amdgcn_rsq_legacy, // llvm.amdgcn.rsq.legacy
amdgcn_s_barrier, // llvm.amdgcn.s.barrier
amdgcn_s_buffer_load, // llvm.amdgcn.s.buffer.load
amdgcn_s_dcache_inv, // llvm.amdgcn.s.dcache.inv
amdgcn_s_dcache_inv_vol, // llvm.amdgcn.s.dcache.inv.vol
amdgcn_s_dcache_wb, // llvm.amdgcn.s.dcache.wb
amdgcn_s_dcache_wb_vol, // llvm.amdgcn.s.dcache.wb.vol
amdgcn_s_decperflevel, // llvm.amdgcn.s.decperflevel
amdgcn_s_get_waveid_in_workgroup, // llvm.amdgcn.s.get.waveid.in.workgroup
amdgcn_s_getpc, // llvm.amdgcn.s.getpc
amdgcn_s_getreg, // llvm.amdgcn.s.getreg
amdgcn_s_incperflevel, // llvm.amdgcn.s.incperflevel
amdgcn_s_memrealtime, // llvm.amdgcn.s.memrealtime
amdgcn_s_memtime, // llvm.amdgcn.s.memtime
amdgcn_s_sendmsg, // llvm.amdgcn.s.sendmsg
amdgcn_s_sendmsghalt, // llvm.amdgcn.s.sendmsghalt
amdgcn_s_sethalt, // llvm.amdgcn.s.sethalt
amdgcn_s_setreg, // llvm.amdgcn.s.setreg
amdgcn_s_sleep, // llvm.amdgcn.s.sleep
amdgcn_s_waitcnt, // llvm.amdgcn.s.waitcnt
amdgcn_sad_hi_u8, // llvm.amdgcn.sad.hi.u8
amdgcn_sad_u16, // llvm.amdgcn.sad.u16
amdgcn_sad_u8, // llvm.amdgcn.sad.u8
amdgcn_sbfe, // llvm.amdgcn.sbfe
amdgcn_sdot2, // llvm.amdgcn.sdot2
amdgcn_sdot4, // llvm.amdgcn.sdot4
amdgcn_sdot8, // llvm.amdgcn.sdot8
amdgcn_set_inactive, // llvm.amdgcn.set.inactive
amdgcn_sffbh, // llvm.amdgcn.sffbh
amdgcn_sin, // llvm.amdgcn.sin
amdgcn_softwqm, // llvm.amdgcn.softwqm
amdgcn_sqrt, // llvm.amdgcn.sqrt
amdgcn_strict_wqm, // llvm.amdgcn.strict.wqm
amdgcn_strict_wwm, // llvm.amdgcn.strict.wwm
amdgcn_struct_buffer_atomic_add, // llvm.amdgcn.struct.buffer.atomic.add
amdgcn_struct_buffer_atomic_and, // llvm.amdgcn.struct.buffer.atomic.and
amdgcn_struct_buffer_atomic_cmpswap, // llvm.amdgcn.struct.buffer.atomic.cmpswap
amdgcn_struct_buffer_atomic_dec, // llvm.amdgcn.struct.buffer.atomic.dec
amdgcn_struct_buffer_atomic_fadd, // llvm.amdgcn.struct.buffer.atomic.fadd
amdgcn_struct_buffer_atomic_fmax, // llvm.amdgcn.struct.buffer.atomic.fmax
amdgcn_struct_buffer_atomic_fmin, // llvm.amdgcn.struct.buffer.atomic.fmin
amdgcn_struct_buffer_atomic_inc, // llvm.amdgcn.struct.buffer.atomic.inc
amdgcn_struct_buffer_atomic_or, // llvm.amdgcn.struct.buffer.atomic.or
amdgcn_struct_buffer_atomic_smax, // llvm.amdgcn.struct.buffer.atomic.smax
amdgcn_struct_buffer_atomic_smin, // llvm.amdgcn.struct.buffer.atomic.smin
amdgcn_struct_buffer_atomic_sub, // llvm.amdgcn.struct.buffer.atomic.sub
amdgcn_struct_buffer_atomic_swap, // llvm.amdgcn.struct.buffer.atomic.swap
amdgcn_struct_buffer_atomic_umax, // llvm.amdgcn.struct.buffer.atomic.umax
amdgcn_struct_buffer_atomic_umin, // llvm.amdgcn.struct.buffer.atomic.umin
amdgcn_struct_buffer_atomic_xor, // llvm.amdgcn.struct.buffer.atomic.xor
amdgcn_struct_buffer_load, // llvm.amdgcn.struct.buffer.load
amdgcn_struct_buffer_load_format, // llvm.amdgcn.struct.buffer.load.format
amdgcn_struct_buffer_store, // llvm.amdgcn.struct.buffer.store
amdgcn_struct_buffer_store_format, // llvm.amdgcn.struct.buffer.store.format
amdgcn_struct_tbuffer_load, // llvm.amdgcn.struct.tbuffer.load
amdgcn_struct_tbuffer_store, // llvm.amdgcn.struct.tbuffer.store
amdgcn_tbuffer_load, // llvm.amdgcn.tbuffer.load
amdgcn_tbuffer_store, // llvm.amdgcn.tbuffer.store
amdgcn_trig_preop, // llvm.amdgcn.trig.preop
amdgcn_ubfe, // llvm.amdgcn.ubfe
amdgcn_udot2, // llvm.amdgcn.udot2
amdgcn_udot4, // llvm.amdgcn.udot4
amdgcn_udot8, // llvm.amdgcn.udot8
amdgcn_unreachable, // llvm.amdgcn.unreachable
amdgcn_update_dpp, // llvm.amdgcn.update.dpp
amdgcn_wave_barrier, // llvm.amdgcn.wave.barrier
amdgcn_wavefrontsize, // llvm.amdgcn.wavefrontsize
amdgcn_workgroup_id_x, // llvm.amdgcn.workgroup.id.x
amdgcn_workgroup_id_y, // llvm.amdgcn.workgroup.id.y
amdgcn_workgroup_id_z, // llvm.amdgcn.workgroup.id.z
amdgcn_workitem_id_x, // llvm.amdgcn.workitem.id.x
amdgcn_workitem_id_y, // llvm.amdgcn.workitem.id.y
amdgcn_workitem_id_z, // llvm.amdgcn.workitem.id.z
amdgcn_wqm, // llvm.amdgcn.wqm
amdgcn_wqm_demote, // llvm.amdgcn.wqm.demote
amdgcn_wqm_vote, // llvm.amdgcn.wqm.vote
amdgcn_writelane, // llvm.amdgcn.writelane
amdgcn_wwm, // llvm.amdgcn.wwm
}; // enum
} // namespace Intrinsic
} // namespace llvm
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,511 @@
/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|* *|
|* Intrinsic Function Source Fragment *|
|* *|
|* Automatically generated file, do not edit! *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_IR_INTRINSIC_ARM_ENUMS_H
#define LLVM_IR_INTRINSIC_ARM_ENUMS_H
namespace llvm {
namespace Intrinsic {
enum ARMIntrinsics : unsigned {
// Enum values for intrinsics
arm_cde_cx1 = 1972, // llvm.arm.cde.cx1
arm_cde_cx1a, // llvm.arm.cde.cx1a
arm_cde_cx1d, // llvm.arm.cde.cx1d
arm_cde_cx1da, // llvm.arm.cde.cx1da
arm_cde_cx2, // llvm.arm.cde.cx2
arm_cde_cx2a, // llvm.arm.cde.cx2a
arm_cde_cx2d, // llvm.arm.cde.cx2d
arm_cde_cx2da, // llvm.arm.cde.cx2da
arm_cde_cx3, // llvm.arm.cde.cx3
arm_cde_cx3a, // llvm.arm.cde.cx3a
arm_cde_cx3d, // llvm.arm.cde.cx3d
arm_cde_cx3da, // llvm.arm.cde.cx3da
arm_cde_vcx1, // llvm.arm.cde.vcx1
arm_cde_vcx1a, // llvm.arm.cde.vcx1a
arm_cde_vcx1q, // llvm.arm.cde.vcx1q
arm_cde_vcx1q_predicated, // llvm.arm.cde.vcx1q.predicated
arm_cde_vcx1qa, // llvm.arm.cde.vcx1qa
arm_cde_vcx1qa_predicated, // llvm.arm.cde.vcx1qa.predicated
arm_cde_vcx2, // llvm.arm.cde.vcx2
arm_cde_vcx2a, // llvm.arm.cde.vcx2a
arm_cde_vcx2q, // llvm.arm.cde.vcx2q
arm_cde_vcx2q_predicated, // llvm.arm.cde.vcx2q.predicated
arm_cde_vcx2qa, // llvm.arm.cde.vcx2qa
arm_cde_vcx2qa_predicated, // llvm.arm.cde.vcx2qa.predicated
arm_cde_vcx3, // llvm.arm.cde.vcx3
arm_cde_vcx3a, // llvm.arm.cde.vcx3a
arm_cde_vcx3q, // llvm.arm.cde.vcx3q
arm_cde_vcx3q_predicated, // llvm.arm.cde.vcx3q.predicated
arm_cde_vcx3qa, // llvm.arm.cde.vcx3qa
arm_cde_vcx3qa_predicated, // llvm.arm.cde.vcx3qa.predicated
arm_cdp, // llvm.arm.cdp
arm_cdp2, // llvm.arm.cdp2
arm_clrex, // llvm.arm.clrex
arm_cls, // llvm.arm.cls
arm_cls64, // llvm.arm.cls64
arm_cmse_tt, // llvm.arm.cmse.tt
arm_cmse_tta, // llvm.arm.cmse.tta
arm_cmse_ttat, // llvm.arm.cmse.ttat
arm_cmse_ttt, // llvm.arm.cmse.ttt
arm_crc32b, // llvm.arm.crc32b
arm_crc32cb, // llvm.arm.crc32cb
arm_crc32ch, // llvm.arm.crc32ch
arm_crc32cw, // llvm.arm.crc32cw
arm_crc32h, // llvm.arm.crc32h
arm_crc32w, // llvm.arm.crc32w
arm_dbg, // llvm.arm.dbg
arm_dmb, // llvm.arm.dmb
arm_dsb, // llvm.arm.dsb
arm_get_fpscr, // llvm.arm.get.fpscr
arm_gnu_eabi_mcount, // llvm.arm.gnu.eabi.mcount
arm_hint, // llvm.arm.hint
arm_isb, // llvm.arm.isb
arm_ldaex, // llvm.arm.ldaex
arm_ldaexd, // llvm.arm.ldaexd
arm_ldc, // llvm.arm.ldc
arm_ldc2, // llvm.arm.ldc2
arm_ldc2l, // llvm.arm.ldc2l
arm_ldcl, // llvm.arm.ldcl
arm_ldrex, // llvm.arm.ldrex
arm_ldrexd, // llvm.arm.ldrexd
arm_mcr, // llvm.arm.mcr
arm_mcr2, // llvm.arm.mcr2
arm_mcrr, // llvm.arm.mcrr
arm_mcrr2, // llvm.arm.mcrr2
arm_mrc, // llvm.arm.mrc
arm_mrc2, // llvm.arm.mrc2
arm_mrrc, // llvm.arm.mrrc
arm_mrrc2, // llvm.arm.mrrc2
arm_mve_abd_predicated, // llvm.arm.mve.abd.predicated
arm_mve_abs_predicated, // llvm.arm.mve.abs.predicated
arm_mve_add_predicated, // llvm.arm.mve.add.predicated
arm_mve_addlv, // llvm.arm.mve.addlv
arm_mve_addlv_predicated, // llvm.arm.mve.addlv.predicated
arm_mve_addv, // llvm.arm.mve.addv
arm_mve_addv_predicated, // llvm.arm.mve.addv.predicated
arm_mve_and_predicated, // llvm.arm.mve.and.predicated
arm_mve_asrl, // llvm.arm.mve.asrl
arm_mve_bic_predicated, // llvm.arm.mve.bic.predicated
arm_mve_cls_predicated, // llvm.arm.mve.cls.predicated
arm_mve_clz_predicated, // llvm.arm.mve.clz.predicated
arm_mve_eor_predicated, // llvm.arm.mve.eor.predicated
arm_mve_fma_predicated, // llvm.arm.mve.fma.predicated
arm_mve_hadd_predicated, // llvm.arm.mve.hadd.predicated
arm_mve_hsub_predicated, // llvm.arm.mve.hsub.predicated
arm_mve_lsll, // llvm.arm.mve.lsll
arm_mve_max_predicated, // llvm.arm.mve.max.predicated
arm_mve_maxav, // llvm.arm.mve.maxav
arm_mve_maxav_predicated, // llvm.arm.mve.maxav.predicated
arm_mve_maxnmav, // llvm.arm.mve.maxnmav
arm_mve_maxnmav_predicated, // llvm.arm.mve.maxnmav.predicated
arm_mve_maxnmv, // llvm.arm.mve.maxnmv
arm_mve_maxnmv_predicated, // llvm.arm.mve.maxnmv.predicated
arm_mve_maxv, // llvm.arm.mve.maxv
arm_mve_maxv_predicated, // llvm.arm.mve.maxv.predicated
arm_mve_min_predicated, // llvm.arm.mve.min.predicated
arm_mve_minav, // llvm.arm.mve.minav
arm_mve_minav_predicated, // llvm.arm.mve.minav.predicated
arm_mve_minnmav, // llvm.arm.mve.minnmav
arm_mve_minnmav_predicated, // llvm.arm.mve.minnmav.predicated
arm_mve_minnmv, // llvm.arm.mve.minnmv
arm_mve_minnmv_predicated, // llvm.arm.mve.minnmv.predicated
arm_mve_minv, // llvm.arm.mve.minv
arm_mve_minv_predicated, // llvm.arm.mve.minv.predicated
arm_mve_mul_predicated, // llvm.arm.mve.mul.predicated
arm_mve_mulh_predicated, // llvm.arm.mve.mulh.predicated
arm_mve_mull_int_predicated, // llvm.arm.mve.mull.int.predicated
arm_mve_mull_poly_predicated, // llvm.arm.mve.mull.poly.predicated
arm_mve_mvn_predicated, // llvm.arm.mve.mvn.predicated
arm_mve_neg_predicated, // llvm.arm.mve.neg.predicated
arm_mve_orn_predicated, // llvm.arm.mve.orn.predicated
arm_mve_orr_predicated, // llvm.arm.mve.orr.predicated
arm_mve_pred_i2v, // llvm.arm.mve.pred.i2v
arm_mve_pred_v2i, // llvm.arm.mve.pred.v2i
arm_mve_qabs_predicated, // llvm.arm.mve.qabs.predicated
arm_mve_qadd_predicated, // llvm.arm.mve.qadd.predicated
arm_mve_qdmulh_predicated, // llvm.arm.mve.qdmulh.predicated
arm_mve_qneg_predicated, // llvm.arm.mve.qneg.predicated
arm_mve_qrdmulh_predicated, // llvm.arm.mve.qrdmulh.predicated
arm_mve_qsub_predicated, // llvm.arm.mve.qsub.predicated
arm_mve_rhadd_predicated, // llvm.arm.mve.rhadd.predicated
arm_mve_rmulh_predicated, // llvm.arm.mve.rmulh.predicated
arm_mve_shl_imm_predicated, // llvm.arm.mve.shl.imm.predicated
arm_mve_shr_imm_predicated, // llvm.arm.mve.shr.imm.predicated
arm_mve_sqrshr, // llvm.arm.mve.sqrshr
arm_mve_sqrshrl, // llvm.arm.mve.sqrshrl
arm_mve_sqshl, // llvm.arm.mve.sqshl
arm_mve_sqshll, // llvm.arm.mve.sqshll
arm_mve_srshr, // llvm.arm.mve.srshr
arm_mve_srshrl, // llvm.arm.mve.srshrl
arm_mve_sub_predicated, // llvm.arm.mve.sub.predicated
arm_mve_uqrshl, // llvm.arm.mve.uqrshl
arm_mve_uqrshll, // llvm.arm.mve.uqrshll
arm_mve_uqshl, // llvm.arm.mve.uqshl
arm_mve_uqshll, // llvm.arm.mve.uqshll
arm_mve_urshr, // llvm.arm.mve.urshr
arm_mve_urshrl, // llvm.arm.mve.urshrl
arm_mve_vabav, // llvm.arm.mve.vabav
arm_mve_vabav_predicated, // llvm.arm.mve.vabav.predicated
arm_mve_vabd, // llvm.arm.mve.vabd
arm_mve_vadc, // llvm.arm.mve.vadc
arm_mve_vadc_predicated, // llvm.arm.mve.vadc.predicated
arm_mve_vbrsr, // llvm.arm.mve.vbrsr
arm_mve_vbrsr_predicated, // llvm.arm.mve.vbrsr.predicated
arm_mve_vcaddq, // llvm.arm.mve.vcaddq
arm_mve_vcaddq_predicated, // llvm.arm.mve.vcaddq.predicated
arm_mve_vcls, // llvm.arm.mve.vcls
arm_mve_vcmlaq, // llvm.arm.mve.vcmlaq
arm_mve_vcmlaq_predicated, // llvm.arm.mve.vcmlaq.predicated
arm_mve_vcmulq, // llvm.arm.mve.vcmulq
arm_mve_vcmulq_predicated, // llvm.arm.mve.vcmulq.predicated
arm_mve_vctp16, // llvm.arm.mve.vctp16
arm_mve_vctp32, // llvm.arm.mve.vctp32
arm_mve_vctp64, // llvm.arm.mve.vctp64
arm_mve_vctp8, // llvm.arm.mve.vctp8
arm_mve_vcvt_fix, // llvm.arm.mve.vcvt.fix
arm_mve_vcvt_fix_predicated, // llvm.arm.mve.vcvt.fix.predicated
arm_mve_vcvt_fp_int_predicated, // llvm.arm.mve.vcvt.fp.int.predicated
arm_mve_vcvt_narrow, // llvm.arm.mve.vcvt.narrow
arm_mve_vcvt_narrow_predicated, // llvm.arm.mve.vcvt.narrow.predicated
arm_mve_vcvt_widen, // llvm.arm.mve.vcvt.widen
arm_mve_vcvt_widen_predicated, // llvm.arm.mve.vcvt.widen.predicated
arm_mve_vcvta, // llvm.arm.mve.vcvta
arm_mve_vcvta_predicated, // llvm.arm.mve.vcvta.predicated
arm_mve_vcvtm, // llvm.arm.mve.vcvtm
arm_mve_vcvtm_predicated, // llvm.arm.mve.vcvtm.predicated
arm_mve_vcvtn, // llvm.arm.mve.vcvtn
arm_mve_vcvtn_predicated, // llvm.arm.mve.vcvtn.predicated
arm_mve_vcvtp, // llvm.arm.mve.vcvtp
arm_mve_vcvtp_predicated, // llvm.arm.mve.vcvtp.predicated
arm_mve_vddup, // llvm.arm.mve.vddup
arm_mve_vddup_predicated, // llvm.arm.mve.vddup.predicated
arm_mve_vdwdup, // llvm.arm.mve.vdwdup
arm_mve_vdwdup_predicated, // llvm.arm.mve.vdwdup.predicated
arm_mve_vhadd, // llvm.arm.mve.vhadd
arm_mve_vhsub, // llvm.arm.mve.vhsub
arm_mve_vidup, // llvm.arm.mve.vidup
arm_mve_vidup_predicated, // llvm.arm.mve.vidup.predicated
arm_mve_viwdup, // llvm.arm.mve.viwdup
arm_mve_viwdup_predicated, // llvm.arm.mve.viwdup.predicated
arm_mve_vld2q, // llvm.arm.mve.vld2q
arm_mve_vld4q, // llvm.arm.mve.vld4q
arm_mve_vldr_gather_base, // llvm.arm.mve.vldr.gather.base
arm_mve_vldr_gather_base_predicated, // llvm.arm.mve.vldr.gather.base.predicated
arm_mve_vldr_gather_base_wb, // llvm.arm.mve.vldr.gather.base.wb
arm_mve_vldr_gather_base_wb_predicated, // llvm.arm.mve.vldr.gather.base.wb.predicated
arm_mve_vldr_gather_offset, // llvm.arm.mve.vldr.gather.offset
arm_mve_vldr_gather_offset_predicated, // llvm.arm.mve.vldr.gather.offset.predicated
arm_mve_vmaxa_predicated, // llvm.arm.mve.vmaxa.predicated
arm_mve_vmaxnma_predicated, // llvm.arm.mve.vmaxnma.predicated
arm_mve_vmina_predicated, // llvm.arm.mve.vmina.predicated
arm_mve_vminnma_predicated, // llvm.arm.mve.vminnma.predicated
arm_mve_vmla_n_predicated, // llvm.arm.mve.vmla.n.predicated
arm_mve_vmlas_n_predicated, // llvm.arm.mve.vmlas.n.predicated
arm_mve_vmldava, // llvm.arm.mve.vmldava
arm_mve_vmldava_predicated, // llvm.arm.mve.vmldava.predicated
arm_mve_vmlldava, // llvm.arm.mve.vmlldava
arm_mve_vmlldava_predicated, // llvm.arm.mve.vmlldava.predicated
arm_mve_vmovl_predicated, // llvm.arm.mve.vmovl.predicated
arm_mve_vmovn_predicated, // llvm.arm.mve.vmovn.predicated
arm_mve_vmulh, // llvm.arm.mve.vmulh
arm_mve_vmull, // llvm.arm.mve.vmull
arm_mve_vmull_poly, // llvm.arm.mve.vmull.poly
arm_mve_vqdmlad, // llvm.arm.mve.vqdmlad
arm_mve_vqdmlad_predicated, // llvm.arm.mve.vqdmlad.predicated
arm_mve_vqdmlah, // llvm.arm.mve.vqdmlah
arm_mve_vqdmlah_predicated, // llvm.arm.mve.vqdmlah.predicated
arm_mve_vqdmlash, // llvm.arm.mve.vqdmlash
arm_mve_vqdmlash_predicated, // llvm.arm.mve.vqdmlash.predicated
arm_mve_vqdmulh, // llvm.arm.mve.vqdmulh
arm_mve_vqdmull, // llvm.arm.mve.vqdmull
arm_mve_vqdmull_predicated, // llvm.arm.mve.vqdmull.predicated
arm_mve_vqmovn, // llvm.arm.mve.vqmovn
arm_mve_vqmovn_predicated, // llvm.arm.mve.vqmovn.predicated
arm_mve_vqrdmlah, // llvm.arm.mve.vqrdmlah
arm_mve_vqrdmlah_predicated, // llvm.arm.mve.vqrdmlah.predicated
arm_mve_vqrdmlash, // llvm.arm.mve.vqrdmlash
arm_mve_vqrdmlash_predicated, // llvm.arm.mve.vqrdmlash.predicated
arm_mve_vqrdmulh, // llvm.arm.mve.vqrdmulh
arm_mve_vqshl_imm, // llvm.arm.mve.vqshl.imm
arm_mve_vqshl_imm_predicated, // llvm.arm.mve.vqshl.imm.predicated
arm_mve_vqshlu_imm, // llvm.arm.mve.vqshlu.imm
arm_mve_vqshlu_imm_predicated, // llvm.arm.mve.vqshlu.imm.predicated
arm_mve_vreinterpretq, // llvm.arm.mve.vreinterpretq
arm_mve_vrev_predicated, // llvm.arm.mve.vrev.predicated
arm_mve_vrhadd, // llvm.arm.mve.vrhadd
arm_mve_vrinta_predicated, // llvm.arm.mve.vrinta.predicated
arm_mve_vrintm_predicated, // llvm.arm.mve.vrintm.predicated
arm_mve_vrintn, // llvm.arm.mve.vrintn
arm_mve_vrintn_predicated, // llvm.arm.mve.vrintn.predicated
arm_mve_vrintp_predicated, // llvm.arm.mve.vrintp.predicated
arm_mve_vrintx_predicated, // llvm.arm.mve.vrintx.predicated
arm_mve_vrintz_predicated, // llvm.arm.mve.vrintz.predicated
arm_mve_vrmlldavha, // llvm.arm.mve.vrmlldavha
arm_mve_vrmlldavha_predicated, // llvm.arm.mve.vrmlldavha.predicated
arm_mve_vrmulh, // llvm.arm.mve.vrmulh
arm_mve_vrshr_imm, // llvm.arm.mve.vrshr.imm
arm_mve_vrshr_imm_predicated, // llvm.arm.mve.vrshr.imm.predicated
arm_mve_vsbc, // llvm.arm.mve.vsbc
arm_mve_vsbc_predicated, // llvm.arm.mve.vsbc.predicated
arm_mve_vshl_scalar, // llvm.arm.mve.vshl.scalar
arm_mve_vshl_scalar_predicated, // llvm.arm.mve.vshl.scalar.predicated
arm_mve_vshl_vector, // llvm.arm.mve.vshl.vector
arm_mve_vshl_vector_predicated, // llvm.arm.mve.vshl.vector.predicated
arm_mve_vshlc, // llvm.arm.mve.vshlc
arm_mve_vshlc_predicated, // llvm.arm.mve.vshlc.predicated
arm_mve_vshll_imm, // llvm.arm.mve.vshll.imm
arm_mve_vshll_imm_predicated, // llvm.arm.mve.vshll.imm.predicated
arm_mve_vshrn, // llvm.arm.mve.vshrn
arm_mve_vshrn_predicated, // llvm.arm.mve.vshrn.predicated
arm_mve_vsli, // llvm.arm.mve.vsli
arm_mve_vsli_predicated, // llvm.arm.mve.vsli.predicated
arm_mve_vsri, // llvm.arm.mve.vsri
arm_mve_vsri_predicated, // llvm.arm.mve.vsri.predicated
arm_mve_vst2q, // llvm.arm.mve.vst2q
arm_mve_vst4q, // llvm.arm.mve.vst4q
arm_mve_vstr_scatter_base, // llvm.arm.mve.vstr.scatter.base
arm_mve_vstr_scatter_base_predicated, // llvm.arm.mve.vstr.scatter.base.predicated
arm_mve_vstr_scatter_base_wb, // llvm.arm.mve.vstr.scatter.base.wb
arm_mve_vstr_scatter_base_wb_predicated, // llvm.arm.mve.vstr.scatter.base.wb.predicated
arm_mve_vstr_scatter_offset, // llvm.arm.mve.vstr.scatter.offset
arm_mve_vstr_scatter_offset_predicated, // llvm.arm.mve.vstr.scatter.offset.predicated
arm_neon_aesd, // llvm.arm.neon.aesd
arm_neon_aese, // llvm.arm.neon.aese
arm_neon_aesimc, // llvm.arm.neon.aesimc
arm_neon_aesmc, // llvm.arm.neon.aesmc
arm_neon_bfdot, // llvm.arm.neon.bfdot
arm_neon_bfmlalb, // llvm.arm.neon.bfmlalb
arm_neon_bfmlalt, // llvm.arm.neon.bfmlalt
arm_neon_bfmmla, // llvm.arm.neon.bfmmla
arm_neon_sdot, // llvm.arm.neon.sdot
arm_neon_sha1c, // llvm.arm.neon.sha1c
arm_neon_sha1h, // llvm.arm.neon.sha1h
arm_neon_sha1m, // llvm.arm.neon.sha1m
arm_neon_sha1p, // llvm.arm.neon.sha1p
arm_neon_sha1su0, // llvm.arm.neon.sha1su0
arm_neon_sha1su1, // llvm.arm.neon.sha1su1
arm_neon_sha256h, // llvm.arm.neon.sha256h
arm_neon_sha256h2, // llvm.arm.neon.sha256h2
arm_neon_sha256su0, // llvm.arm.neon.sha256su0
arm_neon_sha256su1, // llvm.arm.neon.sha256su1
arm_neon_smmla, // llvm.arm.neon.smmla
arm_neon_udot, // llvm.arm.neon.udot
arm_neon_ummla, // llvm.arm.neon.ummla
arm_neon_usdot, // llvm.arm.neon.usdot
arm_neon_usmmla, // llvm.arm.neon.usmmla
arm_neon_vabds, // llvm.arm.neon.vabds
arm_neon_vabdu, // llvm.arm.neon.vabdu
arm_neon_vabs, // llvm.arm.neon.vabs
arm_neon_vacge, // llvm.arm.neon.vacge
arm_neon_vacgt, // llvm.arm.neon.vacgt
arm_neon_vbsl, // llvm.arm.neon.vbsl
arm_neon_vcadd_rot270, // llvm.arm.neon.vcadd.rot270
arm_neon_vcadd_rot90, // llvm.arm.neon.vcadd.rot90
arm_neon_vcls, // llvm.arm.neon.vcls
arm_neon_vcvtas, // llvm.arm.neon.vcvtas
arm_neon_vcvtau, // llvm.arm.neon.vcvtau
arm_neon_vcvtbfp2bf, // llvm.arm.neon.vcvtbfp2bf
arm_neon_vcvtfp2bf, // llvm.arm.neon.vcvtfp2bf
arm_neon_vcvtfp2fxs, // llvm.arm.neon.vcvtfp2fxs
arm_neon_vcvtfp2fxu, // llvm.arm.neon.vcvtfp2fxu
arm_neon_vcvtfp2hf, // llvm.arm.neon.vcvtfp2hf
arm_neon_vcvtfxs2fp, // llvm.arm.neon.vcvtfxs2fp
arm_neon_vcvtfxu2fp, // llvm.arm.neon.vcvtfxu2fp
arm_neon_vcvthf2fp, // llvm.arm.neon.vcvthf2fp
arm_neon_vcvtms, // llvm.arm.neon.vcvtms
arm_neon_vcvtmu, // llvm.arm.neon.vcvtmu
arm_neon_vcvtns, // llvm.arm.neon.vcvtns
arm_neon_vcvtnu, // llvm.arm.neon.vcvtnu
arm_neon_vcvtps, // llvm.arm.neon.vcvtps
arm_neon_vcvtpu, // llvm.arm.neon.vcvtpu
arm_neon_vhadds, // llvm.arm.neon.vhadds
arm_neon_vhaddu, // llvm.arm.neon.vhaddu
arm_neon_vhsubs, // llvm.arm.neon.vhsubs
arm_neon_vhsubu, // llvm.arm.neon.vhsubu
arm_neon_vld1, // llvm.arm.neon.vld1
arm_neon_vld1x2, // llvm.arm.neon.vld1x2
arm_neon_vld1x3, // llvm.arm.neon.vld1x3
arm_neon_vld1x4, // llvm.arm.neon.vld1x4
arm_neon_vld2, // llvm.arm.neon.vld2
arm_neon_vld2dup, // llvm.arm.neon.vld2dup
arm_neon_vld2lane, // llvm.arm.neon.vld2lane
arm_neon_vld3, // llvm.arm.neon.vld3
arm_neon_vld3dup, // llvm.arm.neon.vld3dup
arm_neon_vld3lane, // llvm.arm.neon.vld3lane
arm_neon_vld4, // llvm.arm.neon.vld4
arm_neon_vld4dup, // llvm.arm.neon.vld4dup
arm_neon_vld4lane, // llvm.arm.neon.vld4lane
arm_neon_vmaxnm, // llvm.arm.neon.vmaxnm
arm_neon_vmaxs, // llvm.arm.neon.vmaxs
arm_neon_vmaxu, // llvm.arm.neon.vmaxu
arm_neon_vminnm, // llvm.arm.neon.vminnm
arm_neon_vmins, // llvm.arm.neon.vmins
arm_neon_vminu, // llvm.arm.neon.vminu
arm_neon_vmullp, // llvm.arm.neon.vmullp
arm_neon_vmulls, // llvm.arm.neon.vmulls
arm_neon_vmullu, // llvm.arm.neon.vmullu
arm_neon_vmulp, // llvm.arm.neon.vmulp
arm_neon_vpadals, // llvm.arm.neon.vpadals
arm_neon_vpadalu, // llvm.arm.neon.vpadalu
arm_neon_vpadd, // llvm.arm.neon.vpadd
arm_neon_vpaddls, // llvm.arm.neon.vpaddls
arm_neon_vpaddlu, // llvm.arm.neon.vpaddlu
arm_neon_vpmaxs, // llvm.arm.neon.vpmaxs
arm_neon_vpmaxu, // llvm.arm.neon.vpmaxu
arm_neon_vpmins, // llvm.arm.neon.vpmins
arm_neon_vpminu, // llvm.arm.neon.vpminu
arm_neon_vqabs, // llvm.arm.neon.vqabs
arm_neon_vqdmulh, // llvm.arm.neon.vqdmulh
arm_neon_vqdmull, // llvm.arm.neon.vqdmull
arm_neon_vqmovns, // llvm.arm.neon.vqmovns
arm_neon_vqmovnsu, // llvm.arm.neon.vqmovnsu
arm_neon_vqmovnu, // llvm.arm.neon.vqmovnu
arm_neon_vqneg, // llvm.arm.neon.vqneg
arm_neon_vqrdmlah, // llvm.arm.neon.vqrdmlah
arm_neon_vqrdmlsh, // llvm.arm.neon.vqrdmlsh
arm_neon_vqrdmulh, // llvm.arm.neon.vqrdmulh
arm_neon_vqrshiftns, // llvm.arm.neon.vqrshiftns
arm_neon_vqrshiftnsu, // llvm.arm.neon.vqrshiftnsu
arm_neon_vqrshiftnu, // llvm.arm.neon.vqrshiftnu
arm_neon_vqrshifts, // llvm.arm.neon.vqrshifts
arm_neon_vqrshiftu, // llvm.arm.neon.vqrshiftu
arm_neon_vqshiftns, // llvm.arm.neon.vqshiftns
arm_neon_vqshiftnsu, // llvm.arm.neon.vqshiftnsu
arm_neon_vqshiftnu, // llvm.arm.neon.vqshiftnu
arm_neon_vqshifts, // llvm.arm.neon.vqshifts
arm_neon_vqshiftsu, // llvm.arm.neon.vqshiftsu
arm_neon_vqshiftu, // llvm.arm.neon.vqshiftu
arm_neon_vraddhn, // llvm.arm.neon.vraddhn
arm_neon_vrecpe, // llvm.arm.neon.vrecpe
arm_neon_vrecps, // llvm.arm.neon.vrecps
arm_neon_vrhadds, // llvm.arm.neon.vrhadds
arm_neon_vrhaddu, // llvm.arm.neon.vrhaddu
arm_neon_vrinta, // llvm.arm.neon.vrinta
arm_neon_vrintm, // llvm.arm.neon.vrintm
arm_neon_vrintn, // llvm.arm.neon.vrintn
arm_neon_vrintp, // llvm.arm.neon.vrintp
arm_neon_vrintx, // llvm.arm.neon.vrintx
arm_neon_vrintz, // llvm.arm.neon.vrintz
arm_neon_vrshiftn, // llvm.arm.neon.vrshiftn
arm_neon_vrshifts, // llvm.arm.neon.vrshifts
arm_neon_vrshiftu, // llvm.arm.neon.vrshiftu
arm_neon_vrsqrte, // llvm.arm.neon.vrsqrte
arm_neon_vrsqrts, // llvm.arm.neon.vrsqrts
arm_neon_vrsubhn, // llvm.arm.neon.vrsubhn
arm_neon_vshiftins, // llvm.arm.neon.vshiftins
arm_neon_vshifts, // llvm.arm.neon.vshifts
arm_neon_vshiftu, // llvm.arm.neon.vshiftu
arm_neon_vst1, // llvm.arm.neon.vst1
arm_neon_vst1x2, // llvm.arm.neon.vst1x2
arm_neon_vst1x3, // llvm.arm.neon.vst1x3
arm_neon_vst1x4, // llvm.arm.neon.vst1x4
arm_neon_vst2, // llvm.arm.neon.vst2
arm_neon_vst2lane, // llvm.arm.neon.vst2lane
arm_neon_vst3, // llvm.arm.neon.vst3
arm_neon_vst3lane, // llvm.arm.neon.vst3lane
arm_neon_vst4, // llvm.arm.neon.vst4
arm_neon_vst4lane, // llvm.arm.neon.vst4lane
arm_neon_vtbl1, // llvm.arm.neon.vtbl1
arm_neon_vtbl2, // llvm.arm.neon.vtbl2
arm_neon_vtbl3, // llvm.arm.neon.vtbl3
arm_neon_vtbl4, // llvm.arm.neon.vtbl4
arm_neon_vtbx1, // llvm.arm.neon.vtbx1
arm_neon_vtbx2, // llvm.arm.neon.vtbx2
arm_neon_vtbx3, // llvm.arm.neon.vtbx3
arm_neon_vtbx4, // llvm.arm.neon.vtbx4
arm_qadd, // llvm.arm.qadd
arm_qadd16, // llvm.arm.qadd16
arm_qadd8, // llvm.arm.qadd8
arm_qasx, // llvm.arm.qasx
arm_qsax, // llvm.arm.qsax
arm_qsub, // llvm.arm.qsub
arm_qsub16, // llvm.arm.qsub16
arm_qsub8, // llvm.arm.qsub8
arm_sadd16, // llvm.arm.sadd16
arm_sadd8, // llvm.arm.sadd8
arm_sasx, // llvm.arm.sasx
arm_sel, // llvm.arm.sel
arm_set_fpscr, // llvm.arm.set.fpscr
arm_shadd16, // llvm.arm.shadd16
arm_shadd8, // llvm.arm.shadd8
arm_shasx, // llvm.arm.shasx
arm_shsax, // llvm.arm.shsax
arm_shsub16, // llvm.arm.shsub16
arm_shsub8, // llvm.arm.shsub8
arm_smlabb, // llvm.arm.smlabb
arm_smlabt, // llvm.arm.smlabt
arm_smlad, // llvm.arm.smlad
arm_smladx, // llvm.arm.smladx
arm_smlald, // llvm.arm.smlald
arm_smlaldx, // llvm.arm.smlaldx
arm_smlatb, // llvm.arm.smlatb
arm_smlatt, // llvm.arm.smlatt
arm_smlawb, // llvm.arm.smlawb
arm_smlawt, // llvm.arm.smlawt
arm_smlsd, // llvm.arm.smlsd
arm_smlsdx, // llvm.arm.smlsdx
arm_smlsld, // llvm.arm.smlsld
arm_smlsldx, // llvm.arm.smlsldx
arm_smuad, // llvm.arm.smuad
arm_smuadx, // llvm.arm.smuadx
arm_smulbb, // llvm.arm.smulbb
arm_smulbt, // llvm.arm.smulbt
arm_smultb, // llvm.arm.smultb
arm_smultt, // llvm.arm.smultt
arm_smulwb, // llvm.arm.smulwb
arm_smulwt, // llvm.arm.smulwt
arm_smusd, // llvm.arm.smusd
arm_smusdx, // llvm.arm.smusdx
arm_space, // llvm.arm.space
arm_ssat, // llvm.arm.ssat
arm_ssat16, // llvm.arm.ssat16
arm_ssax, // llvm.arm.ssax
arm_ssub16, // llvm.arm.ssub16
arm_ssub8, // llvm.arm.ssub8
arm_stc, // llvm.arm.stc
arm_stc2, // llvm.arm.stc2
arm_stc2l, // llvm.arm.stc2l
arm_stcl, // llvm.arm.stcl
arm_stlex, // llvm.arm.stlex
arm_stlexd, // llvm.arm.stlexd
arm_strex, // llvm.arm.strex
arm_strexd, // llvm.arm.strexd
arm_sxtab16, // llvm.arm.sxtab16
arm_sxtb16, // llvm.arm.sxtb16
arm_uadd16, // llvm.arm.uadd16
arm_uadd8, // llvm.arm.uadd8
arm_uasx, // llvm.arm.uasx
arm_uhadd16, // llvm.arm.uhadd16
arm_uhadd8, // llvm.arm.uhadd8
arm_uhasx, // llvm.arm.uhasx
arm_uhsax, // llvm.arm.uhsax
arm_uhsub16, // llvm.arm.uhsub16
arm_uhsub8, // llvm.arm.uhsub8
arm_undefined, // llvm.arm.undefined
arm_uqadd16, // llvm.arm.uqadd16
arm_uqadd8, // llvm.arm.uqadd8
arm_uqasx, // llvm.arm.uqasx
arm_uqsax, // llvm.arm.uqsax
arm_uqsub16, // llvm.arm.uqsub16
arm_uqsub8, // llvm.arm.uqsub8
arm_usad8, // llvm.arm.usad8
arm_usada8, // llvm.arm.usada8
arm_usat, // llvm.arm.usat
arm_usat16, // llvm.arm.usat16
arm_usax, // llvm.arm.usax
arm_usub16, // llvm.arm.usub16
arm_usub8, // llvm.arm.usub8
arm_uxtab16, // llvm.arm.uxtab16
arm_uxtb16, // llvm.arm.uxtb16
arm_vcvtr, // llvm.arm.vcvtr
arm_vcvtru, // llvm.arm.vcvtru
}; // enum
} // namespace Intrinsic
} // namespace llvm
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,30 @@
/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|* *|
|* Intrinsic Function Source Fragment *|
|* *|
|* Automatically generated file, do not edit! *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_IR_INTRINSIC_BPF_ENUMS_H
#define LLVM_IR_INTRINSIC_BPF_ENUMS_H
namespace llvm {
namespace Intrinsic {
enum BPFIntrinsics : unsigned {
// Enum values for intrinsics
bpf_btf_type_id = 2463, // llvm.bpf.btf.type.id
bpf_compare, // llvm.bpf.compare
bpf_load_byte, // llvm.bpf.load.byte
bpf_load_half, // llvm.bpf.load.half
bpf_load_word, // llvm.bpf.load.word
bpf_passthrough, // llvm.bpf.passthrough
bpf_preserve_enum_value, // llvm.bpf.preserve.enum.value
bpf_preserve_field_info, // llvm.bpf.preserve.field.info
bpf_preserve_type_info, // llvm.bpf.preserve.type.info
bpf_pseudo, // llvm.bpf.pseudo
}; // enum
} // namespace Intrinsic
} // namespace llvm
#endif

View File

@@ -0,0 +1,40 @@
//===- IntrinsicsBPF.td - Defines BPF intrinsics -----------*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the BPF-specific intrinsics.
//
//===----------------------------------------------------------------------===//
// Specialized loads from packet
let TargetPrefix = "bpf" in { // All intrinsics start with "llvm.bpf."
def int_bpf_load_byte : GCCBuiltin<"__builtin_bpf_load_byte">,
Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem]>;
def int_bpf_load_half : GCCBuiltin<"__builtin_bpf_load_half">,
Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem]>;
def int_bpf_load_word : GCCBuiltin<"__builtin_bpf_load_word">,
Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_i64_ty], [IntrReadMem]>;
def int_bpf_pseudo : GCCBuiltin<"__builtin_bpf_pseudo">,
Intrinsic<[llvm_i64_ty], [llvm_i64_ty, llvm_i64_ty]>;
def int_bpf_preserve_field_info : GCCBuiltin<"__builtin_bpf_preserve_field_info">,
Intrinsic<[llvm_i32_ty], [llvm_anyptr_ty, llvm_i64_ty],
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_bpf_btf_type_id : GCCBuiltin<"__builtin_bpf_btf_type_id">,
Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_i64_ty],
[IntrNoMem]>;
def int_bpf_preserve_type_info : GCCBuiltin<"__builtin_bpf_preserve_type_info">,
Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i64_ty],
[IntrNoMem]>;
def int_bpf_preserve_enum_value : GCCBuiltin<"__builtin_bpf_preserve_enum_value">,
Intrinsic<[llvm_i64_ty], [llvm_i32_ty, llvm_ptr_ty, llvm_i64_ty],
[IntrNoMem]>;
def int_bpf_passthrough : GCCBuiltin<"__builtin_bpf_passthrough">,
Intrinsic<[llvm_any_ty], [llvm_i32_ty, llvm_any_ty], [IntrNoMem]>;
def int_bpf_compare : GCCBuiltin<"__builtin_bpf_compare">,
Intrinsic<[llvm_i1_ty], [llvm_i32_ty, llvm_anyint_ty, llvm_anyint_ty],
[IntrNoMem]>;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,407 @@
//===- IntrinsicsHexagon.td - Defines Hexagon intrinsics ---*- tablegen -*-===//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the Hexagon-specific intrinsics.
//
//===----------------------------------------------------------------------===//
//===----------------------------------------------------------------------===//
// Definitions for all Hexagon intrinsics.
//
// All Hexagon intrinsics start with "llvm.hexagon.".
let TargetPrefix = "hexagon" in {
/// Hexagon_Intrinsic - Base class for the majority of Hexagon intrinsics.
class Hexagon_Intrinsic<string GCCIntSuffix, list<LLVMType> ret_types,
list<LLVMType> param_types,
list<IntrinsicProperty> properties>
: GCCBuiltin<!strconcat("__builtin_", GCCIntSuffix)>,
Intrinsic<ret_types, param_types, properties>;
/// Hexagon_NonGCC_Intrinsic - Base class for bitcode convertible Hexagon
/// intrinsics.
class Hexagon_NonGCC_Intrinsic<list<LLVMType> ret_types,
list<LLVMType> param_types,
list<IntrinsicProperty> properties>
: Intrinsic<ret_types, param_types, properties>;
}
class Hexagon_mem_memmemsi_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty,
llvm_i32_ty],
[IntrArgMemOnly]>;
class Hexagon_mem_memsisi_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty,
llvm_i32_ty],
[IntrWriteMem]>;
class Hexagon_mem_memdisi_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty,
llvm_i32_ty],
[IntrWriteMem]>;
class Hexagon_mem_memmemsisi_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_ptr_ty], [llvm_ptr_ty, llvm_ptr_ty,
llvm_i32_ty, llvm_i32_ty],
[IntrArgMemOnly, ImmArg<ArgIndex<3>>]>;
class Hexagon_mem_memsisisi_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty],
[IntrWriteMem, ImmArg<ArgIndex<3>>]>;
class Hexagon_mem_memdisisi_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_ptr_ty], [llvm_ptr_ty, llvm_i64_ty,
llvm_i32_ty, llvm_i32_ty],
[IntrWriteMem, ImmArg<ArgIndex<3>>]>;
//
// BUILTIN_INFO_NONCONST(circ_ldd,PTR_ftype_PTRPTRSISI,4)
//
def int_hexagon_circ_ldd :
Hexagon_mem_memmemsisi_Intrinsic<"circ_ldd">;
//
// BUILTIN_INFO_NONCONST(circ_ldw,PTR_ftype_PTRPTRSISI,4)
//
def int_hexagon_circ_ldw :
Hexagon_mem_memmemsisi_Intrinsic<"circ_ldw">;
//
// BUILTIN_INFO_NONCONST(circ_ldh,PTR_ftype_PTRPTRSISI,4)
//
def int_hexagon_circ_ldh :
Hexagon_mem_memmemsisi_Intrinsic<"circ_ldh">;
//
// BUILTIN_INFO_NONCONST(circ_lduh,PTR_ftype_PTRPTRSISI,4)
//
def int_hexagon_circ_lduh :
Hexagon_mem_memmemsisi_Intrinsic<"circ_lduh">;
//
// BUILTIN_INFO_NONCONST(circ_ldb,PTR_ftype_PTRPTRSISI,4)
//
def int_hexagon_circ_ldb :
Hexagon_mem_memmemsisi_Intrinsic<"circ_ldb">;
//
// BUILTIN_INFO_NONCONST(circ_ldub,PTR_ftype_PTRPTRSISI,4)
//
def int_hexagon_circ_ldub :
Hexagon_mem_memmemsisi_Intrinsic<"circ_ldub">;
//
// BUILTIN_INFO_NONCONST(circ_std,PTR_ftype_PTRDISISI,4)
//
def int_hexagon_circ_std :
Hexagon_mem_memdisisi_Intrinsic<"circ_std">;
//
// BUILTIN_INFO_NONCONST(circ_stw,PTR_ftype_PTRSISISI,4)
//
def int_hexagon_circ_stw :
Hexagon_mem_memsisisi_Intrinsic<"circ_stw">;
//
// BUILTIN_INFO_NONCONST(circ_sth,PTR_ftype_PTRSISISI,4)
//
def int_hexagon_circ_sth :
Hexagon_mem_memsisisi_Intrinsic<"circ_sth">;
//
// BUILTIN_INFO_NONCONST(circ_sthhi,PTR_ftype_PTRSISISI,4)
//
def int_hexagon_circ_sthhi :
Hexagon_mem_memsisisi_Intrinsic<"circ_sthhi">;
//
// BUILTIN_INFO_NONCONST(circ_stb,PTR_ftype_PTRSISISI,4)
//
def int_hexagon_circ_stb :
Hexagon_mem_memsisisi_Intrinsic<"circ_stb">;
def int_hexagon_prefetch :
Hexagon_Intrinsic<"HEXAGON_prefetch", [], [llvm_ptr_ty], []>;
def llvm_ptr32_ty : LLVMPointerType<llvm_i32_ty>;
def llvm_ptr64_ty : LLVMPointerType<llvm_i64_ty>;
// Mark locked loads as read/write to prevent any accidental reordering.
def int_hexagon_L2_loadw_locked :
Hexagon_Intrinsic<"HEXAGON_L2_loadw_locked", [llvm_i32_ty], [llvm_ptr32_ty],
[IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_hexagon_L4_loadd_locked :
Hexagon_Intrinsic<"HEXAGON_L4_loadd_locked", [llvm_i64_ty], [llvm_ptr64_ty],
[IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_hexagon_S2_storew_locked :
Hexagon_Intrinsic<"HEXAGON_S2_storew_locked", [llvm_i32_ty],
[llvm_ptr32_ty, llvm_i32_ty], [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_hexagon_S4_stored_locked :
Hexagon_Intrinsic<"HEXAGON_S4_stored_locked", [llvm_i32_ty],
[llvm_ptr64_ty, llvm_i64_ty], [IntrArgMemOnly, NoCapture<ArgIndex<0>>]>;
def int_hexagon_vmemcpy : Hexagon_Intrinsic<"hexagon_vmemcpy",
[], [llvm_ptr_ty, llvm_ptr_ty, llvm_i32_ty],
[IntrArgMemOnly, NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>, WriteOnly<ArgIndex<0>>, ReadOnly<ArgIndex<1>>]>;
def int_hexagon_vmemset : Hexagon_Intrinsic<"hexagon_vmemset",
[], [llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty],
[IntrArgMemOnly, NoCapture<ArgIndex<0>>, WriteOnly<ArgIndex<0>>]>;
multiclass Hexagon_custom_circ_ld_Intrinsic<LLVMType ElTy> {
def NAME#_pci : Hexagon_NonGCC_Intrinsic<
[ElTy, llvm_ptr_ty],
[llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, llvm_ptr_ty],
[IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;
def NAME#_pcr : Hexagon_NonGCC_Intrinsic<
[ElTy, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, llvm_ptr_ty],
[IntrArgMemOnly, NoCapture<ArgIndex<2>>]>;
}
defm int_hexagon_L2_loadrub : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
defm int_hexagon_L2_loadrb : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
defm int_hexagon_L2_loadruh : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
defm int_hexagon_L2_loadrh : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
defm int_hexagon_L2_loadri : Hexagon_custom_circ_ld_Intrinsic<llvm_i32_ty>;
defm int_hexagon_L2_loadrd : Hexagon_custom_circ_ld_Intrinsic<llvm_i64_ty>;
multiclass Hexagon_custom_circ_st_Intrinsic<LLVMType ElTy> {
def NAME#_pci : Hexagon_NonGCC_Intrinsic<
[llvm_ptr_ty],
[llvm_ptr_ty, llvm_i32_ty, llvm_i32_ty, ElTy, llvm_ptr_ty],
[IntrArgMemOnly, NoCapture<ArgIndex<4>>]>;
def NAME#_pcr : Hexagon_NonGCC_Intrinsic<
[llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty, ElTy, llvm_ptr_ty],
[IntrArgMemOnly, NoCapture<ArgIndex<3>>]>;
}
defm int_hexagon_S2_storerb : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
defm int_hexagon_S2_storerh : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
defm int_hexagon_S2_storerf : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
defm int_hexagon_S2_storeri : Hexagon_custom_circ_st_Intrinsic<llvm_i32_ty>;
defm int_hexagon_S2_storerd : Hexagon_custom_circ_st_Intrinsic<llvm_i64_ty>;
// The front-end emits the intrinsic call with only two arguments. The third
// argument from the builtin is already used by front-end to write to memory
// by generating a store.
class Hexagon_custom_brev_ld_Intrinsic<LLVMType ElTy>
: Hexagon_NonGCC_Intrinsic<
[ElTy, llvm_ptr_ty], [llvm_ptr_ty, llvm_i32_ty],
[IntrReadMem]>;
def int_hexagon_L2_loadrub_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
def int_hexagon_L2_loadrb_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
def int_hexagon_L2_loadruh_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
def int_hexagon_L2_loadrh_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
def int_hexagon_L2_loadri_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i32_ty>;
def int_hexagon_L2_loadrd_pbr : Hexagon_custom_brev_ld_Intrinsic<llvm_i64_ty>;
def int_hexagon_S2_storerb_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_stb">;
def int_hexagon_S2_storerh_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_sth">;
def int_hexagon_S2_storerf_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_sthhi">;
def int_hexagon_S2_storeri_pbr : Hexagon_mem_memsisi_Intrinsic<"brev_stw">;
def int_hexagon_S2_storerd_pbr : Hexagon_mem_memdisi_Intrinsic<"brev_std">;
// tag : V6_vrmpybub_rtt
class Hexagon_v32i32_v16i32i64_rtt_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v32i32_ty], [llvm_v16i32_ty,llvm_i64_ty],
[IntrNoMem]>;
// tag : V6_vrmpybub_rtt_128B
class Hexagon_v64i32_v32i32i64_rtt_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v64i32_ty], [llvm_v32i32_ty,llvm_i64_ty],
[IntrNoMem]>;
// tag : V6_vrmpybub_rtt_acc
class Hexagon_v32i32_v32i32v16i32i64_rtt_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v32i32_ty], [llvm_v32i32_ty,llvm_v16i32_ty,llvm_i64_ty],
[IntrNoMem]>;
// tag : V6_vrmpybub_rtt_acc_128B
class Hexagon_v64i32_v64i32v32i32i64_rtt_Intrinsic<string GCCIntSuffix>
: Hexagon_Intrinsic<GCCIntSuffix,
[llvm_v64i32_ty], [llvm_v64i32_ty,llvm_v32i32_ty,llvm_i64_ty],
[IntrNoMem]>;
def int_hexagon_V6_vrmpybub_rtt :
Hexagon_v32i32_v16i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpybub_rtt">;
def int_hexagon_V6_vrmpybub_rtt_128B :
Hexagon_v64i32_v32i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_128B">;
def int_hexagon_V6_vrmpybub_rtt_acc :
Hexagon_v32i32_v32i32v16i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_acc">;
def int_hexagon_V6_vrmpybub_rtt_acc_128B :
Hexagon_v64i32_v64i32v32i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpybub_rtt_acc_128B">;
def int_hexagon_V6_vrmpyub_rtt :
Hexagon_v32i32_v16i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpyub_rtt">;
def int_hexagon_V6_vrmpyub_rtt_128B :
Hexagon_v64i32_v32i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_128B">;
def int_hexagon_V6_vrmpyub_rtt_acc :
Hexagon_v32i32_v32i32v16i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_acc">;
def int_hexagon_V6_vrmpyub_rtt_acc_128B :
Hexagon_v64i32_v64i32v32i32i64_rtt_Intrinsic<"HEXAGON_V6_vrmpyub_rtt_acc_128B">;
// HVX conditional loads/stores
class Hexagon_pred_vload_imm<LLVMType ValTy>
: Hexagon_NonGCC_Intrinsic<
[ValTy],
[llvm_i1_ty, LLVMPointerType<ValTy>, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
ImmArg<ArgIndex<2>>]>;
class Hexagon_pred_vload_imm_64B: Hexagon_pred_vload_imm<llvm_v16i32_ty>;
class Hexagon_pred_vload_imm_128B: Hexagon_pred_vload_imm<llvm_v32i32_ty>;
def int_hexagon_V6_vL32b_pred_ai: Hexagon_pred_vload_imm_64B;
def int_hexagon_V6_vL32b_npred_ai: Hexagon_pred_vload_imm_64B;
def int_hexagon_V6_vL32b_nt_pred_ai: Hexagon_pred_vload_imm_64B;
def int_hexagon_V6_vL32b_nt_npred_ai: Hexagon_pred_vload_imm_64B;
def int_hexagon_V6_vL32b_pred_ai_128B: Hexagon_pred_vload_imm_128B;
def int_hexagon_V6_vL32b_npred_ai_128B: Hexagon_pred_vload_imm_128B;
def int_hexagon_V6_vL32b_nt_pred_ai_128B: Hexagon_pred_vload_imm_128B;
def int_hexagon_V6_vL32b_nt_npred_ai_128B: Hexagon_pred_vload_imm_128B;
class Hexagom_pred_vload_upd<LLVMType ValTy, bit TakesImm>
: Hexagon_NonGCC_Intrinsic<
[ValTy, LLVMPointerType<ValTy>],
[llvm_i1_ty, LLVMPointerType<ValTy>, llvm_i32_ty],
!if(TakesImm,
[IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
ImmArg<ArgIndex<2>>],
[IntrReadMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>])>;
class Hexagom_pred_vload_upd_64B<bit TakesImm>
: Hexagom_pred_vload_upd<llvm_v16i32_ty, TakesImm>;
class Hexagom_pred_vload_upd_128B<bit TakesImm>
: Hexagom_pred_vload_upd<llvm_v32i32_ty, TakesImm>;
def int_hexagon_V6_vL32b_pred_pi: Hexagom_pred_vload_upd_64B<1>;
def int_hexagon_V6_vL32b_npred_pi: Hexagom_pred_vload_upd_64B<1>;
def int_hexagon_V6_vL32b_nt_pred_pi: Hexagom_pred_vload_upd_64B<1>;
def int_hexagon_V6_vL32b_nt_npred_pi: Hexagom_pred_vload_upd_64B<1>;
def int_hexagon_V6_vL32b_pred_pi_128B: Hexagom_pred_vload_upd_128B<1>;
def int_hexagon_V6_vL32b_npred_pi_128B: Hexagom_pred_vload_upd_128B<1>;
def int_hexagon_V6_vL32b_nt_pred_pi_128B: Hexagom_pred_vload_upd_128B<1>;
def int_hexagon_V6_vL32b_nt_npred_pi_128B: Hexagom_pred_vload_upd_128B<1>;
def int_hexagon_V6_vL32b_pred_ppu: Hexagom_pred_vload_upd_64B<0>;
def int_hexagon_V6_vL32b_npred_ppu: Hexagom_pred_vload_upd_64B<0>;
def int_hexagon_V6_vL32b_nt_pred_ppu: Hexagom_pred_vload_upd_64B<0>;
def int_hexagon_V6_vL32b_nt_npred_ppu: Hexagom_pred_vload_upd_64B<0>;
def int_hexagon_V6_vL32b_pred_ppu_128B: Hexagom_pred_vload_upd_128B<0>;
def int_hexagon_V6_vL32b_npred_ppu_128B: Hexagom_pred_vload_upd_128B<0>;
def int_hexagon_V6_vL32b_nt_pred_ppu_128B: Hexagom_pred_vload_upd_128B<0>;
def int_hexagon_V6_vL32b_nt_npred_ppu_128B: Hexagom_pred_vload_upd_128B<0>;
class Hexagon_pred_vstore_imm<LLVMType ValTy>
: Hexagon_NonGCC_Intrinsic<
[],
[llvm_i1_ty, LLVMPointerType<ValTy>, llvm_i32_ty, ValTy],
[IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
ImmArg<ArgIndex<2>>]>;
class Hexagon_pred_vstore_imm_64B: Hexagon_pred_vstore_imm<llvm_v16i32_ty>;
class Hexagon_pred_vstore_imm_128B: Hexagon_pred_vstore_imm<llvm_v32i32_ty>;
def int_hexagon_V6_vS32b_pred_ai: Hexagon_pred_vstore_imm_64B;
def int_hexagon_V6_vS32b_npred_ai: Hexagon_pred_vstore_imm_64B;
def int_hexagon_V6_vS32Ub_pred_ai: Hexagon_pred_vstore_imm_64B;
def int_hexagon_V6_vS32Ub_npred_ai: Hexagon_pred_vstore_imm_64B;
def int_hexagon_V6_vS32b_nt_pred_ai: Hexagon_pred_vstore_imm_64B;
def int_hexagon_V6_vS32b_nt_npred_ai: Hexagon_pred_vstore_imm_64B;
def int_hexagon_V6_vS32b_pred_ai_128B: Hexagon_pred_vstore_imm_128B;
def int_hexagon_V6_vS32b_npred_ai_128B: Hexagon_pred_vstore_imm_128B;
def int_hexagon_V6_vS32Ub_pred_ai_128B: Hexagon_pred_vstore_imm_128B;
def int_hexagon_V6_vS32Ub_npred_ai_128B: Hexagon_pred_vstore_imm_128B;
def int_hexagon_V6_vS32b_nt_pred_ai_128B: Hexagon_pred_vstore_imm_128B;
def int_hexagon_V6_vS32b_nt_npred_ai_128B: Hexagon_pred_vstore_imm_128B;
class Hexagon_pred_vstore_upd<LLVMType ValTy, bit TakesImm>
: Hexagon_NonGCC_Intrinsic<
[LLVMPointerType<ValTy>],
[llvm_i1_ty, LLVMPointerType<ValTy>, llvm_i32_ty, ValTy],
!if(TakesImm,
[IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>,
ImmArg<ArgIndex<2>>],
[IntrWriteMem, IntrArgMemOnly, NoCapture<ArgIndex<1>>])>;
class Hexagon_pred_vstore_upd_64B<bit TakesImm>
: Hexagon_pred_vstore_upd<llvm_v16i32_ty, TakesImm>;
class Hexagon_pred_vstore_upd_128B<bit TakesImm>
: Hexagon_pred_vstore_upd<llvm_v32i32_ty, TakesImm>;
def int_hexagon_V6_vS32b_pred_pi: Hexagon_pred_vstore_upd_64B<1>;
def int_hexagon_V6_vS32b_npred_pi: Hexagon_pred_vstore_upd_64B<1>;
def int_hexagon_V6_vS32Ub_pred_pi: Hexagon_pred_vstore_upd_64B<1>;
def int_hexagon_V6_vS32Ub_npred_pi: Hexagon_pred_vstore_upd_64B<1>;
def int_hexagon_V6_vS32b_nt_pred_pi: Hexagon_pred_vstore_upd_64B<1>;
def int_hexagon_V6_vS32b_nt_npred_pi: Hexagon_pred_vstore_upd_64B<1>;
def int_hexagon_V6_vS32b_pred_pi_128B: Hexagon_pred_vstore_upd_128B<1>;
def int_hexagon_V6_vS32b_npred_pi_128B: Hexagon_pred_vstore_upd_128B<1>;
def int_hexagon_V6_vS32Ub_pred_pi_128B: Hexagon_pred_vstore_upd_128B<1>;
def int_hexagon_V6_vS32Ub_npred_pi_128B: Hexagon_pred_vstore_upd_128B<1>;
def int_hexagon_V6_vS32b_nt_pred_pi_128B: Hexagon_pred_vstore_upd_128B<1>;
def int_hexagon_V6_vS32b_nt_npred_pi_128B: Hexagon_pred_vstore_upd_128B<1>;
def int_hexagon_V6_vS32b_pred_ppu: Hexagon_pred_vstore_upd_64B<0>;
def int_hexagon_V6_vS32b_npred_ppu: Hexagon_pred_vstore_upd_64B<0>;
def int_hexagon_V6_vS32Ub_pred_ppu: Hexagon_pred_vstore_upd_64B<0>;
def int_hexagon_V6_vS32Ub_npred_ppu: Hexagon_pred_vstore_upd_64B<0>;
def int_hexagon_V6_vS32b_nt_pred_ppu: Hexagon_pred_vstore_upd_64B<0>;
def int_hexagon_V6_vS32b_nt_npred_ppu: Hexagon_pred_vstore_upd_64B<0>;
def int_hexagon_V6_vS32b_pred_ppu_128B: Hexagon_pred_vstore_upd_128B<0>;
def int_hexagon_V6_vS32b_npred_ppu_128B: Hexagon_pred_vstore_upd_128B<0>;
def int_hexagon_V6_vS32Ub_pred_ppu_128B: Hexagon_pred_vstore_upd_128B<0>;
def int_hexagon_V6_vS32Ub_npred_ppu_128B: Hexagon_pred_vstore_upd_128B<0>;
def int_hexagon_V6_vS32b_nt_pred_ppu_128B: Hexagon_pred_vstore_upd_128B<0>;
def int_hexagon_V6_vS32b_nt_npred_ppu_128B: Hexagon_pred_vstore_upd_128B<0>;
// HVX Vector predicate casts.
// These intrinsics do not emit (nor do they correspond to) any instructions,
// they are no-ops.
def int_hexagon_V6_pred_typecast :
Hexagon_NonGCC_Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
def int_hexagon_V6_pred_typecast_128B :
Hexagon_NonGCC_Intrinsic<[llvm_anyvector_ty], [llvm_anyvector_ty], [IntrNoMem]>;
// Masked vector stores
//
// These are all deprecated, the intrinsics matching instruction names
// should be used instead, e.g. int_hexagon_V6_vS32b_qpred_ai, etc.
class Hexagon_custom_vms_Intrinsic
: Hexagon_NonGCC_Intrinsic<
[], [llvm_v64i1_ty,llvm_ptr_ty,llvm_v16i32_ty], [IntrWriteMem]>;
class Hexagon_custom_vms_Intrinsic_128B
: Hexagon_NonGCC_Intrinsic<
[], [llvm_v128i1_ty,llvm_ptr_ty,llvm_v32i32_ty], [IntrWriteMem]>;
def int_hexagon_V6_vmaskedstoreq: Hexagon_custom_vms_Intrinsic;
def int_hexagon_V6_vmaskedstorenq: Hexagon_custom_vms_Intrinsic;
def int_hexagon_V6_vmaskedstorentq: Hexagon_custom_vms_Intrinsic;
def int_hexagon_V6_vmaskedstorentnq: Hexagon_custom_vms_Intrinsic;
def int_hexagon_V6_vmaskedstoreq_128B: Hexagon_custom_vms_Intrinsic_128B;
def int_hexagon_V6_vmaskedstorenq_128B: Hexagon_custom_vms_Intrinsic_128B;
def int_hexagon_V6_vmaskedstorentq_128B: Hexagon_custom_vms_Intrinsic_128B;
def int_hexagon_V6_vmaskedstorentnq_128B: Hexagon_custom_vms_Intrinsic_128B;
include "llvm/IR/IntrinsicsHexagonDep.td"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,691 @@
/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|* *|
|* Intrinsic Function Source Fragment *|
|* *|
|* Automatically generated file, do not edit! *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_IR_INTRINSIC_MIPS_ENUMS_H
#define LLVM_IR_INTRINSIC_MIPS_ENUMS_H
namespace llvm {
namespace Intrinsic {
enum MIPSIntrinsics : unsigned {
// Enum values for intrinsics
mips_absq_s_ph = 4441, // llvm.mips.absq.s.ph
mips_absq_s_qb, // llvm.mips.absq.s.qb
mips_absq_s_w, // llvm.mips.absq.s.w
mips_add_a_b, // llvm.mips.add.a.b
mips_add_a_d, // llvm.mips.add.a.d
mips_add_a_h, // llvm.mips.add.a.h
mips_add_a_w, // llvm.mips.add.a.w
mips_addq_ph, // llvm.mips.addq.ph
mips_addq_s_ph, // llvm.mips.addq.s.ph
mips_addq_s_w, // llvm.mips.addq.s.w
mips_addqh_ph, // llvm.mips.addqh.ph
mips_addqh_r_ph, // llvm.mips.addqh.r.ph
mips_addqh_r_w, // llvm.mips.addqh.r.w
mips_addqh_w, // llvm.mips.addqh.w
mips_adds_a_b, // llvm.mips.adds.a.b
mips_adds_a_d, // llvm.mips.adds.a.d
mips_adds_a_h, // llvm.mips.adds.a.h
mips_adds_a_w, // llvm.mips.adds.a.w
mips_adds_s_b, // llvm.mips.adds.s.b
mips_adds_s_d, // llvm.mips.adds.s.d
mips_adds_s_h, // llvm.mips.adds.s.h
mips_adds_s_w, // llvm.mips.adds.s.w
mips_adds_u_b, // llvm.mips.adds.u.b
mips_adds_u_d, // llvm.mips.adds.u.d
mips_adds_u_h, // llvm.mips.adds.u.h
mips_adds_u_w, // llvm.mips.adds.u.w
mips_addsc, // llvm.mips.addsc
mips_addu_ph, // llvm.mips.addu.ph
mips_addu_qb, // llvm.mips.addu.qb
mips_addu_s_ph, // llvm.mips.addu.s.ph
mips_addu_s_qb, // llvm.mips.addu.s.qb
mips_adduh_qb, // llvm.mips.adduh.qb
mips_adduh_r_qb, // llvm.mips.adduh.r.qb
mips_addv_b, // llvm.mips.addv.b
mips_addv_d, // llvm.mips.addv.d
mips_addv_h, // llvm.mips.addv.h
mips_addv_w, // llvm.mips.addv.w
mips_addvi_b, // llvm.mips.addvi.b
mips_addvi_d, // llvm.mips.addvi.d
mips_addvi_h, // llvm.mips.addvi.h
mips_addvi_w, // llvm.mips.addvi.w
mips_addwc, // llvm.mips.addwc
mips_and_v, // llvm.mips.and.v
mips_andi_b, // llvm.mips.andi.b
mips_append, // llvm.mips.append
mips_asub_s_b, // llvm.mips.asub.s.b
mips_asub_s_d, // llvm.mips.asub.s.d
mips_asub_s_h, // llvm.mips.asub.s.h
mips_asub_s_w, // llvm.mips.asub.s.w
mips_asub_u_b, // llvm.mips.asub.u.b
mips_asub_u_d, // llvm.mips.asub.u.d
mips_asub_u_h, // llvm.mips.asub.u.h
mips_asub_u_w, // llvm.mips.asub.u.w
mips_ave_s_b, // llvm.mips.ave.s.b
mips_ave_s_d, // llvm.mips.ave.s.d
mips_ave_s_h, // llvm.mips.ave.s.h
mips_ave_s_w, // llvm.mips.ave.s.w
mips_ave_u_b, // llvm.mips.ave.u.b
mips_ave_u_d, // llvm.mips.ave.u.d
mips_ave_u_h, // llvm.mips.ave.u.h
mips_ave_u_w, // llvm.mips.ave.u.w
mips_aver_s_b, // llvm.mips.aver.s.b
mips_aver_s_d, // llvm.mips.aver.s.d
mips_aver_s_h, // llvm.mips.aver.s.h
mips_aver_s_w, // llvm.mips.aver.s.w
mips_aver_u_b, // llvm.mips.aver.u.b
mips_aver_u_d, // llvm.mips.aver.u.d
mips_aver_u_h, // llvm.mips.aver.u.h
mips_aver_u_w, // llvm.mips.aver.u.w
mips_balign, // llvm.mips.balign
mips_bclr_b, // llvm.mips.bclr.b
mips_bclr_d, // llvm.mips.bclr.d
mips_bclr_h, // llvm.mips.bclr.h
mips_bclr_w, // llvm.mips.bclr.w
mips_bclri_b, // llvm.mips.bclri.b
mips_bclri_d, // llvm.mips.bclri.d
mips_bclri_h, // llvm.mips.bclri.h
mips_bclri_w, // llvm.mips.bclri.w
mips_binsl_b, // llvm.mips.binsl.b
mips_binsl_d, // llvm.mips.binsl.d
mips_binsl_h, // llvm.mips.binsl.h
mips_binsl_w, // llvm.mips.binsl.w
mips_binsli_b, // llvm.mips.binsli.b
mips_binsli_d, // llvm.mips.binsli.d
mips_binsli_h, // llvm.mips.binsli.h
mips_binsli_w, // llvm.mips.binsli.w
mips_binsr_b, // llvm.mips.binsr.b
mips_binsr_d, // llvm.mips.binsr.d
mips_binsr_h, // llvm.mips.binsr.h
mips_binsr_w, // llvm.mips.binsr.w
mips_binsri_b, // llvm.mips.binsri.b
mips_binsri_d, // llvm.mips.binsri.d
mips_binsri_h, // llvm.mips.binsri.h
mips_binsri_w, // llvm.mips.binsri.w
mips_bitrev, // llvm.mips.bitrev
mips_bmnz_v, // llvm.mips.bmnz.v
mips_bmnzi_b, // llvm.mips.bmnzi.b
mips_bmz_v, // llvm.mips.bmz.v
mips_bmzi_b, // llvm.mips.bmzi.b
mips_bneg_b, // llvm.mips.bneg.b
mips_bneg_d, // llvm.mips.bneg.d
mips_bneg_h, // llvm.mips.bneg.h
mips_bneg_w, // llvm.mips.bneg.w
mips_bnegi_b, // llvm.mips.bnegi.b
mips_bnegi_d, // llvm.mips.bnegi.d
mips_bnegi_h, // llvm.mips.bnegi.h
mips_bnegi_w, // llvm.mips.bnegi.w
mips_bnz_b, // llvm.mips.bnz.b
mips_bnz_d, // llvm.mips.bnz.d
mips_bnz_h, // llvm.mips.bnz.h
mips_bnz_v, // llvm.mips.bnz.v
mips_bnz_w, // llvm.mips.bnz.w
mips_bposge32, // llvm.mips.bposge32
mips_bsel_v, // llvm.mips.bsel.v
mips_bseli_b, // llvm.mips.bseli.b
mips_bset_b, // llvm.mips.bset.b
mips_bset_d, // llvm.mips.bset.d
mips_bset_h, // llvm.mips.bset.h
mips_bset_w, // llvm.mips.bset.w
mips_bseti_b, // llvm.mips.bseti.b
mips_bseti_d, // llvm.mips.bseti.d
mips_bseti_h, // llvm.mips.bseti.h
mips_bseti_w, // llvm.mips.bseti.w
mips_bz_b, // llvm.mips.bz.b
mips_bz_d, // llvm.mips.bz.d
mips_bz_h, // llvm.mips.bz.h
mips_bz_v, // llvm.mips.bz.v
mips_bz_w, // llvm.mips.bz.w
mips_ceq_b, // llvm.mips.ceq.b
mips_ceq_d, // llvm.mips.ceq.d
mips_ceq_h, // llvm.mips.ceq.h
mips_ceq_w, // llvm.mips.ceq.w
mips_ceqi_b, // llvm.mips.ceqi.b
mips_ceqi_d, // llvm.mips.ceqi.d
mips_ceqi_h, // llvm.mips.ceqi.h
mips_ceqi_w, // llvm.mips.ceqi.w
mips_cfcmsa, // llvm.mips.cfcmsa
mips_cle_s_b, // llvm.mips.cle.s.b
mips_cle_s_d, // llvm.mips.cle.s.d
mips_cle_s_h, // llvm.mips.cle.s.h
mips_cle_s_w, // llvm.mips.cle.s.w
mips_cle_u_b, // llvm.mips.cle.u.b
mips_cle_u_d, // llvm.mips.cle.u.d
mips_cle_u_h, // llvm.mips.cle.u.h
mips_cle_u_w, // llvm.mips.cle.u.w
mips_clei_s_b, // llvm.mips.clei.s.b
mips_clei_s_d, // llvm.mips.clei.s.d
mips_clei_s_h, // llvm.mips.clei.s.h
mips_clei_s_w, // llvm.mips.clei.s.w
mips_clei_u_b, // llvm.mips.clei.u.b
mips_clei_u_d, // llvm.mips.clei.u.d
mips_clei_u_h, // llvm.mips.clei.u.h
mips_clei_u_w, // llvm.mips.clei.u.w
mips_clt_s_b, // llvm.mips.clt.s.b
mips_clt_s_d, // llvm.mips.clt.s.d
mips_clt_s_h, // llvm.mips.clt.s.h
mips_clt_s_w, // llvm.mips.clt.s.w
mips_clt_u_b, // llvm.mips.clt.u.b
mips_clt_u_d, // llvm.mips.clt.u.d
mips_clt_u_h, // llvm.mips.clt.u.h
mips_clt_u_w, // llvm.mips.clt.u.w
mips_clti_s_b, // llvm.mips.clti.s.b
mips_clti_s_d, // llvm.mips.clti.s.d
mips_clti_s_h, // llvm.mips.clti.s.h
mips_clti_s_w, // llvm.mips.clti.s.w
mips_clti_u_b, // llvm.mips.clti.u.b
mips_clti_u_d, // llvm.mips.clti.u.d
mips_clti_u_h, // llvm.mips.clti.u.h
mips_clti_u_w, // llvm.mips.clti.u.w
mips_cmp_eq_ph, // llvm.mips.cmp.eq.ph
mips_cmp_le_ph, // llvm.mips.cmp.le.ph
mips_cmp_lt_ph, // llvm.mips.cmp.lt.ph
mips_cmpgdu_eq_qb, // llvm.mips.cmpgdu.eq.qb
mips_cmpgdu_le_qb, // llvm.mips.cmpgdu.le.qb
mips_cmpgdu_lt_qb, // llvm.mips.cmpgdu.lt.qb
mips_cmpgu_eq_qb, // llvm.mips.cmpgu.eq.qb
mips_cmpgu_le_qb, // llvm.mips.cmpgu.le.qb
mips_cmpgu_lt_qb, // llvm.mips.cmpgu.lt.qb
mips_cmpu_eq_qb, // llvm.mips.cmpu.eq.qb
mips_cmpu_le_qb, // llvm.mips.cmpu.le.qb
mips_cmpu_lt_qb, // llvm.mips.cmpu.lt.qb
mips_copy_s_b, // llvm.mips.copy.s.b
mips_copy_s_d, // llvm.mips.copy.s.d
mips_copy_s_h, // llvm.mips.copy.s.h
mips_copy_s_w, // llvm.mips.copy.s.w
mips_copy_u_b, // llvm.mips.copy.u.b
mips_copy_u_d, // llvm.mips.copy.u.d
mips_copy_u_h, // llvm.mips.copy.u.h
mips_copy_u_w, // llvm.mips.copy.u.w
mips_ctcmsa, // llvm.mips.ctcmsa
mips_div_s_b, // llvm.mips.div.s.b
mips_div_s_d, // llvm.mips.div.s.d
mips_div_s_h, // llvm.mips.div.s.h
mips_div_s_w, // llvm.mips.div.s.w
mips_div_u_b, // llvm.mips.div.u.b
mips_div_u_d, // llvm.mips.div.u.d
mips_div_u_h, // llvm.mips.div.u.h
mips_div_u_w, // llvm.mips.div.u.w
mips_dlsa, // llvm.mips.dlsa
mips_dotp_s_d, // llvm.mips.dotp.s.d
mips_dotp_s_h, // llvm.mips.dotp.s.h
mips_dotp_s_w, // llvm.mips.dotp.s.w
mips_dotp_u_d, // llvm.mips.dotp.u.d
mips_dotp_u_h, // llvm.mips.dotp.u.h
mips_dotp_u_w, // llvm.mips.dotp.u.w
mips_dpa_w_ph, // llvm.mips.dpa.w.ph
mips_dpadd_s_d, // llvm.mips.dpadd.s.d
mips_dpadd_s_h, // llvm.mips.dpadd.s.h
mips_dpadd_s_w, // llvm.mips.dpadd.s.w
mips_dpadd_u_d, // llvm.mips.dpadd.u.d
mips_dpadd_u_h, // llvm.mips.dpadd.u.h
mips_dpadd_u_w, // llvm.mips.dpadd.u.w
mips_dpaq_s_w_ph, // llvm.mips.dpaq.s.w.ph
mips_dpaq_sa_l_w, // llvm.mips.dpaq.sa.l.w
mips_dpaqx_s_w_ph, // llvm.mips.dpaqx.s.w.ph
mips_dpaqx_sa_w_ph, // llvm.mips.dpaqx.sa.w.ph
mips_dpau_h_qbl, // llvm.mips.dpau.h.qbl
mips_dpau_h_qbr, // llvm.mips.dpau.h.qbr
mips_dpax_w_ph, // llvm.mips.dpax.w.ph
mips_dps_w_ph, // llvm.mips.dps.w.ph
mips_dpsq_s_w_ph, // llvm.mips.dpsq.s.w.ph
mips_dpsq_sa_l_w, // llvm.mips.dpsq.sa.l.w
mips_dpsqx_s_w_ph, // llvm.mips.dpsqx.s.w.ph
mips_dpsqx_sa_w_ph, // llvm.mips.dpsqx.sa.w.ph
mips_dpsu_h_qbl, // llvm.mips.dpsu.h.qbl
mips_dpsu_h_qbr, // llvm.mips.dpsu.h.qbr
mips_dpsub_s_d, // llvm.mips.dpsub.s.d
mips_dpsub_s_h, // llvm.mips.dpsub.s.h
mips_dpsub_s_w, // llvm.mips.dpsub.s.w
mips_dpsub_u_d, // llvm.mips.dpsub.u.d
mips_dpsub_u_h, // llvm.mips.dpsub.u.h
mips_dpsub_u_w, // llvm.mips.dpsub.u.w
mips_dpsx_w_ph, // llvm.mips.dpsx.w.ph
mips_extp, // llvm.mips.extp
mips_extpdp, // llvm.mips.extpdp
mips_extr_r_w, // llvm.mips.extr.r.w
mips_extr_rs_w, // llvm.mips.extr.rs.w
mips_extr_s_h, // llvm.mips.extr.s.h
mips_extr_w, // llvm.mips.extr.w
mips_fadd_d, // llvm.mips.fadd.d
mips_fadd_w, // llvm.mips.fadd.w
mips_fcaf_d, // llvm.mips.fcaf.d
mips_fcaf_w, // llvm.mips.fcaf.w
mips_fceq_d, // llvm.mips.fceq.d
mips_fceq_w, // llvm.mips.fceq.w
mips_fclass_d, // llvm.mips.fclass.d
mips_fclass_w, // llvm.mips.fclass.w
mips_fcle_d, // llvm.mips.fcle.d
mips_fcle_w, // llvm.mips.fcle.w
mips_fclt_d, // llvm.mips.fclt.d
mips_fclt_w, // llvm.mips.fclt.w
mips_fcne_d, // llvm.mips.fcne.d
mips_fcne_w, // llvm.mips.fcne.w
mips_fcor_d, // llvm.mips.fcor.d
mips_fcor_w, // llvm.mips.fcor.w
mips_fcueq_d, // llvm.mips.fcueq.d
mips_fcueq_w, // llvm.mips.fcueq.w
mips_fcule_d, // llvm.mips.fcule.d
mips_fcule_w, // llvm.mips.fcule.w
mips_fcult_d, // llvm.mips.fcult.d
mips_fcult_w, // llvm.mips.fcult.w
mips_fcun_d, // llvm.mips.fcun.d
mips_fcun_w, // llvm.mips.fcun.w
mips_fcune_d, // llvm.mips.fcune.d
mips_fcune_w, // llvm.mips.fcune.w
mips_fdiv_d, // llvm.mips.fdiv.d
mips_fdiv_w, // llvm.mips.fdiv.w
mips_fexdo_h, // llvm.mips.fexdo.h
mips_fexdo_w, // llvm.mips.fexdo.w
mips_fexp2_d, // llvm.mips.fexp2.d
mips_fexp2_w, // llvm.mips.fexp2.w
mips_fexupl_d, // llvm.mips.fexupl.d
mips_fexupl_w, // llvm.mips.fexupl.w
mips_fexupr_d, // llvm.mips.fexupr.d
mips_fexupr_w, // llvm.mips.fexupr.w
mips_ffint_s_d, // llvm.mips.ffint.s.d
mips_ffint_s_w, // llvm.mips.ffint.s.w
mips_ffint_u_d, // llvm.mips.ffint.u.d
mips_ffint_u_w, // llvm.mips.ffint.u.w
mips_ffql_d, // llvm.mips.ffql.d
mips_ffql_w, // llvm.mips.ffql.w
mips_ffqr_d, // llvm.mips.ffqr.d
mips_ffqr_w, // llvm.mips.ffqr.w
mips_fill_b, // llvm.mips.fill.b
mips_fill_d, // llvm.mips.fill.d
mips_fill_h, // llvm.mips.fill.h
mips_fill_w, // llvm.mips.fill.w
mips_flog2_d, // llvm.mips.flog2.d
mips_flog2_w, // llvm.mips.flog2.w
mips_fmadd_d, // llvm.mips.fmadd.d
mips_fmadd_w, // llvm.mips.fmadd.w
mips_fmax_a_d, // llvm.mips.fmax.a.d
mips_fmax_a_w, // llvm.mips.fmax.a.w
mips_fmax_d, // llvm.mips.fmax.d
mips_fmax_w, // llvm.mips.fmax.w
mips_fmin_a_d, // llvm.mips.fmin.a.d
mips_fmin_a_w, // llvm.mips.fmin.a.w
mips_fmin_d, // llvm.mips.fmin.d
mips_fmin_w, // llvm.mips.fmin.w
mips_fmsub_d, // llvm.mips.fmsub.d
mips_fmsub_w, // llvm.mips.fmsub.w
mips_fmul_d, // llvm.mips.fmul.d
mips_fmul_w, // llvm.mips.fmul.w
mips_frcp_d, // llvm.mips.frcp.d
mips_frcp_w, // llvm.mips.frcp.w
mips_frint_d, // llvm.mips.frint.d
mips_frint_w, // llvm.mips.frint.w
mips_frsqrt_d, // llvm.mips.frsqrt.d
mips_frsqrt_w, // llvm.mips.frsqrt.w
mips_fsaf_d, // llvm.mips.fsaf.d
mips_fsaf_w, // llvm.mips.fsaf.w
mips_fseq_d, // llvm.mips.fseq.d
mips_fseq_w, // llvm.mips.fseq.w
mips_fsle_d, // llvm.mips.fsle.d
mips_fsle_w, // llvm.mips.fsle.w
mips_fslt_d, // llvm.mips.fslt.d
mips_fslt_w, // llvm.mips.fslt.w
mips_fsne_d, // llvm.mips.fsne.d
mips_fsne_w, // llvm.mips.fsne.w
mips_fsor_d, // llvm.mips.fsor.d
mips_fsor_w, // llvm.mips.fsor.w
mips_fsqrt_d, // llvm.mips.fsqrt.d
mips_fsqrt_w, // llvm.mips.fsqrt.w
mips_fsub_d, // llvm.mips.fsub.d
mips_fsub_w, // llvm.mips.fsub.w
mips_fsueq_d, // llvm.mips.fsueq.d
mips_fsueq_w, // llvm.mips.fsueq.w
mips_fsule_d, // llvm.mips.fsule.d
mips_fsule_w, // llvm.mips.fsule.w
mips_fsult_d, // llvm.mips.fsult.d
mips_fsult_w, // llvm.mips.fsult.w
mips_fsun_d, // llvm.mips.fsun.d
mips_fsun_w, // llvm.mips.fsun.w
mips_fsune_d, // llvm.mips.fsune.d
mips_fsune_w, // llvm.mips.fsune.w
mips_ftint_s_d, // llvm.mips.ftint.s.d
mips_ftint_s_w, // llvm.mips.ftint.s.w
mips_ftint_u_d, // llvm.mips.ftint.u.d
mips_ftint_u_w, // llvm.mips.ftint.u.w
mips_ftq_h, // llvm.mips.ftq.h
mips_ftq_w, // llvm.mips.ftq.w
mips_ftrunc_s_d, // llvm.mips.ftrunc.s.d
mips_ftrunc_s_w, // llvm.mips.ftrunc.s.w
mips_ftrunc_u_d, // llvm.mips.ftrunc.u.d
mips_ftrunc_u_w, // llvm.mips.ftrunc.u.w
mips_hadd_s_d, // llvm.mips.hadd.s.d
mips_hadd_s_h, // llvm.mips.hadd.s.h
mips_hadd_s_w, // llvm.mips.hadd.s.w
mips_hadd_u_d, // llvm.mips.hadd.u.d
mips_hadd_u_h, // llvm.mips.hadd.u.h
mips_hadd_u_w, // llvm.mips.hadd.u.w
mips_hsub_s_d, // llvm.mips.hsub.s.d
mips_hsub_s_h, // llvm.mips.hsub.s.h
mips_hsub_s_w, // llvm.mips.hsub.s.w
mips_hsub_u_d, // llvm.mips.hsub.u.d
mips_hsub_u_h, // llvm.mips.hsub.u.h
mips_hsub_u_w, // llvm.mips.hsub.u.w
mips_ilvev_b, // llvm.mips.ilvev.b
mips_ilvev_d, // llvm.mips.ilvev.d
mips_ilvev_h, // llvm.mips.ilvev.h
mips_ilvev_w, // llvm.mips.ilvev.w
mips_ilvl_b, // llvm.mips.ilvl.b
mips_ilvl_d, // llvm.mips.ilvl.d
mips_ilvl_h, // llvm.mips.ilvl.h
mips_ilvl_w, // llvm.mips.ilvl.w
mips_ilvod_b, // llvm.mips.ilvod.b
mips_ilvod_d, // llvm.mips.ilvod.d
mips_ilvod_h, // llvm.mips.ilvod.h
mips_ilvod_w, // llvm.mips.ilvod.w
mips_ilvr_b, // llvm.mips.ilvr.b
mips_ilvr_d, // llvm.mips.ilvr.d
mips_ilvr_h, // llvm.mips.ilvr.h
mips_ilvr_w, // llvm.mips.ilvr.w
mips_insert_b, // llvm.mips.insert.b
mips_insert_d, // llvm.mips.insert.d
mips_insert_h, // llvm.mips.insert.h
mips_insert_w, // llvm.mips.insert.w
mips_insv, // llvm.mips.insv
mips_insve_b, // llvm.mips.insve.b
mips_insve_d, // llvm.mips.insve.d
mips_insve_h, // llvm.mips.insve.h
mips_insve_w, // llvm.mips.insve.w
mips_lbux, // llvm.mips.lbux
mips_ld_b, // llvm.mips.ld.b
mips_ld_d, // llvm.mips.ld.d
mips_ld_h, // llvm.mips.ld.h
mips_ld_w, // llvm.mips.ld.w
mips_ldi_b, // llvm.mips.ldi.b
mips_ldi_d, // llvm.mips.ldi.d
mips_ldi_h, // llvm.mips.ldi.h
mips_ldi_w, // llvm.mips.ldi.w
mips_ldr_d, // llvm.mips.ldr.d
mips_ldr_w, // llvm.mips.ldr.w
mips_lhx, // llvm.mips.lhx
mips_lsa, // llvm.mips.lsa
mips_lwx, // llvm.mips.lwx
mips_madd, // llvm.mips.madd
mips_madd_q_h, // llvm.mips.madd.q.h
mips_madd_q_w, // llvm.mips.madd.q.w
mips_maddr_q_h, // llvm.mips.maddr.q.h
mips_maddr_q_w, // llvm.mips.maddr.q.w
mips_maddu, // llvm.mips.maddu
mips_maddv_b, // llvm.mips.maddv.b
mips_maddv_d, // llvm.mips.maddv.d
mips_maddv_h, // llvm.mips.maddv.h
mips_maddv_w, // llvm.mips.maddv.w
mips_maq_s_w_phl, // llvm.mips.maq.s.w.phl
mips_maq_s_w_phr, // llvm.mips.maq.s.w.phr
mips_maq_sa_w_phl, // llvm.mips.maq.sa.w.phl
mips_maq_sa_w_phr, // llvm.mips.maq.sa.w.phr
mips_max_a_b, // llvm.mips.max.a.b
mips_max_a_d, // llvm.mips.max.a.d
mips_max_a_h, // llvm.mips.max.a.h
mips_max_a_w, // llvm.mips.max.a.w
mips_max_s_b, // llvm.mips.max.s.b
mips_max_s_d, // llvm.mips.max.s.d
mips_max_s_h, // llvm.mips.max.s.h
mips_max_s_w, // llvm.mips.max.s.w
mips_max_u_b, // llvm.mips.max.u.b
mips_max_u_d, // llvm.mips.max.u.d
mips_max_u_h, // llvm.mips.max.u.h
mips_max_u_w, // llvm.mips.max.u.w
mips_maxi_s_b, // llvm.mips.maxi.s.b
mips_maxi_s_d, // llvm.mips.maxi.s.d
mips_maxi_s_h, // llvm.mips.maxi.s.h
mips_maxi_s_w, // llvm.mips.maxi.s.w
mips_maxi_u_b, // llvm.mips.maxi.u.b
mips_maxi_u_d, // llvm.mips.maxi.u.d
mips_maxi_u_h, // llvm.mips.maxi.u.h
mips_maxi_u_w, // llvm.mips.maxi.u.w
mips_min_a_b, // llvm.mips.min.a.b
mips_min_a_d, // llvm.mips.min.a.d
mips_min_a_h, // llvm.mips.min.a.h
mips_min_a_w, // llvm.mips.min.a.w
mips_min_s_b, // llvm.mips.min.s.b
mips_min_s_d, // llvm.mips.min.s.d
mips_min_s_h, // llvm.mips.min.s.h
mips_min_s_w, // llvm.mips.min.s.w
mips_min_u_b, // llvm.mips.min.u.b
mips_min_u_d, // llvm.mips.min.u.d
mips_min_u_h, // llvm.mips.min.u.h
mips_min_u_w, // llvm.mips.min.u.w
mips_mini_s_b, // llvm.mips.mini.s.b
mips_mini_s_d, // llvm.mips.mini.s.d
mips_mini_s_h, // llvm.mips.mini.s.h
mips_mini_s_w, // llvm.mips.mini.s.w
mips_mini_u_b, // llvm.mips.mini.u.b
mips_mini_u_d, // llvm.mips.mini.u.d
mips_mini_u_h, // llvm.mips.mini.u.h
mips_mini_u_w, // llvm.mips.mini.u.w
mips_mod_s_b, // llvm.mips.mod.s.b
mips_mod_s_d, // llvm.mips.mod.s.d
mips_mod_s_h, // llvm.mips.mod.s.h
mips_mod_s_w, // llvm.mips.mod.s.w
mips_mod_u_b, // llvm.mips.mod.u.b
mips_mod_u_d, // llvm.mips.mod.u.d
mips_mod_u_h, // llvm.mips.mod.u.h
mips_mod_u_w, // llvm.mips.mod.u.w
mips_modsub, // llvm.mips.modsub
mips_move_v, // llvm.mips.move.v
mips_msub, // llvm.mips.msub
mips_msub_q_h, // llvm.mips.msub.q.h
mips_msub_q_w, // llvm.mips.msub.q.w
mips_msubr_q_h, // llvm.mips.msubr.q.h
mips_msubr_q_w, // llvm.mips.msubr.q.w
mips_msubu, // llvm.mips.msubu
mips_msubv_b, // llvm.mips.msubv.b
mips_msubv_d, // llvm.mips.msubv.d
mips_msubv_h, // llvm.mips.msubv.h
mips_msubv_w, // llvm.mips.msubv.w
mips_mthlip, // llvm.mips.mthlip
mips_mul_ph, // llvm.mips.mul.ph
mips_mul_q_h, // llvm.mips.mul.q.h
mips_mul_q_w, // llvm.mips.mul.q.w
mips_mul_s_ph, // llvm.mips.mul.s.ph
mips_muleq_s_w_phl, // llvm.mips.muleq.s.w.phl
mips_muleq_s_w_phr, // llvm.mips.muleq.s.w.phr
mips_muleu_s_ph_qbl, // llvm.mips.muleu.s.ph.qbl
mips_muleu_s_ph_qbr, // llvm.mips.muleu.s.ph.qbr
mips_mulq_rs_ph, // llvm.mips.mulq.rs.ph
mips_mulq_rs_w, // llvm.mips.mulq.rs.w
mips_mulq_s_ph, // llvm.mips.mulq.s.ph
mips_mulq_s_w, // llvm.mips.mulq.s.w
mips_mulr_q_h, // llvm.mips.mulr.q.h
mips_mulr_q_w, // llvm.mips.mulr.q.w
mips_mulsa_w_ph, // llvm.mips.mulsa.w.ph
mips_mulsaq_s_w_ph, // llvm.mips.mulsaq.s.w.ph
mips_mult, // llvm.mips.mult
mips_multu, // llvm.mips.multu
mips_mulv_b, // llvm.mips.mulv.b
mips_mulv_d, // llvm.mips.mulv.d
mips_mulv_h, // llvm.mips.mulv.h
mips_mulv_w, // llvm.mips.mulv.w
mips_nloc_b, // llvm.mips.nloc.b
mips_nloc_d, // llvm.mips.nloc.d
mips_nloc_h, // llvm.mips.nloc.h
mips_nloc_w, // llvm.mips.nloc.w
mips_nlzc_b, // llvm.mips.nlzc.b
mips_nlzc_d, // llvm.mips.nlzc.d
mips_nlzc_h, // llvm.mips.nlzc.h
mips_nlzc_w, // llvm.mips.nlzc.w
mips_nor_v, // llvm.mips.nor.v
mips_nori_b, // llvm.mips.nori.b
mips_or_v, // llvm.mips.or.v
mips_ori_b, // llvm.mips.ori.b
mips_packrl_ph, // llvm.mips.packrl.ph
mips_pckev_b, // llvm.mips.pckev.b
mips_pckev_d, // llvm.mips.pckev.d
mips_pckev_h, // llvm.mips.pckev.h
mips_pckev_w, // llvm.mips.pckev.w
mips_pckod_b, // llvm.mips.pckod.b
mips_pckod_d, // llvm.mips.pckod.d
mips_pckod_h, // llvm.mips.pckod.h
mips_pckod_w, // llvm.mips.pckod.w
mips_pcnt_b, // llvm.mips.pcnt.b
mips_pcnt_d, // llvm.mips.pcnt.d
mips_pcnt_h, // llvm.mips.pcnt.h
mips_pcnt_w, // llvm.mips.pcnt.w
mips_pick_ph, // llvm.mips.pick.ph
mips_pick_qb, // llvm.mips.pick.qb
mips_preceq_w_phl, // llvm.mips.preceq.w.phl
mips_preceq_w_phr, // llvm.mips.preceq.w.phr
mips_precequ_ph_qbl, // llvm.mips.precequ.ph.qbl
mips_precequ_ph_qbla, // llvm.mips.precequ.ph.qbla
mips_precequ_ph_qbr, // llvm.mips.precequ.ph.qbr
mips_precequ_ph_qbra, // llvm.mips.precequ.ph.qbra
mips_preceu_ph_qbl, // llvm.mips.preceu.ph.qbl
mips_preceu_ph_qbla, // llvm.mips.preceu.ph.qbla
mips_preceu_ph_qbr, // llvm.mips.preceu.ph.qbr
mips_preceu_ph_qbra, // llvm.mips.preceu.ph.qbra
mips_precr_qb_ph, // llvm.mips.precr.qb.ph
mips_precr_sra_ph_w, // llvm.mips.precr.sra.ph.w
mips_precr_sra_r_ph_w, // llvm.mips.precr.sra.r.ph.w
mips_precrq_ph_w, // llvm.mips.precrq.ph.w
mips_precrq_qb_ph, // llvm.mips.precrq.qb.ph
mips_precrq_rs_ph_w, // llvm.mips.precrq.rs.ph.w
mips_precrqu_s_qb_ph, // llvm.mips.precrqu.s.qb.ph
mips_prepend, // llvm.mips.prepend
mips_raddu_w_qb, // llvm.mips.raddu.w.qb
mips_rddsp, // llvm.mips.rddsp
mips_repl_ph, // llvm.mips.repl.ph
mips_repl_qb, // llvm.mips.repl.qb
mips_sat_s_b, // llvm.mips.sat.s.b
mips_sat_s_d, // llvm.mips.sat.s.d
mips_sat_s_h, // llvm.mips.sat.s.h
mips_sat_s_w, // llvm.mips.sat.s.w
mips_sat_u_b, // llvm.mips.sat.u.b
mips_sat_u_d, // llvm.mips.sat.u.d
mips_sat_u_h, // llvm.mips.sat.u.h
mips_sat_u_w, // llvm.mips.sat.u.w
mips_shf_b, // llvm.mips.shf.b
mips_shf_h, // llvm.mips.shf.h
mips_shf_w, // llvm.mips.shf.w
mips_shilo, // llvm.mips.shilo
mips_shll_ph, // llvm.mips.shll.ph
mips_shll_qb, // llvm.mips.shll.qb
mips_shll_s_ph, // llvm.mips.shll.s.ph
mips_shll_s_w, // llvm.mips.shll.s.w
mips_shra_ph, // llvm.mips.shra.ph
mips_shra_qb, // llvm.mips.shra.qb
mips_shra_r_ph, // llvm.mips.shra.r.ph
mips_shra_r_qb, // llvm.mips.shra.r.qb
mips_shra_r_w, // llvm.mips.shra.r.w
mips_shrl_ph, // llvm.mips.shrl.ph
mips_shrl_qb, // llvm.mips.shrl.qb
mips_sld_b, // llvm.mips.sld.b
mips_sld_d, // llvm.mips.sld.d
mips_sld_h, // llvm.mips.sld.h
mips_sld_w, // llvm.mips.sld.w
mips_sldi_b, // llvm.mips.sldi.b
mips_sldi_d, // llvm.mips.sldi.d
mips_sldi_h, // llvm.mips.sldi.h
mips_sldi_w, // llvm.mips.sldi.w
mips_sll_b, // llvm.mips.sll.b
mips_sll_d, // llvm.mips.sll.d
mips_sll_h, // llvm.mips.sll.h
mips_sll_w, // llvm.mips.sll.w
mips_slli_b, // llvm.mips.slli.b
mips_slli_d, // llvm.mips.slli.d
mips_slli_h, // llvm.mips.slli.h
mips_slli_w, // llvm.mips.slli.w
mips_splat_b, // llvm.mips.splat.b
mips_splat_d, // llvm.mips.splat.d
mips_splat_h, // llvm.mips.splat.h
mips_splat_w, // llvm.mips.splat.w
mips_splati_b, // llvm.mips.splati.b
mips_splati_d, // llvm.mips.splati.d
mips_splati_h, // llvm.mips.splati.h
mips_splati_w, // llvm.mips.splati.w
mips_sra_b, // llvm.mips.sra.b
mips_sra_d, // llvm.mips.sra.d
mips_sra_h, // llvm.mips.sra.h
mips_sra_w, // llvm.mips.sra.w
mips_srai_b, // llvm.mips.srai.b
mips_srai_d, // llvm.mips.srai.d
mips_srai_h, // llvm.mips.srai.h
mips_srai_w, // llvm.mips.srai.w
mips_srar_b, // llvm.mips.srar.b
mips_srar_d, // llvm.mips.srar.d
mips_srar_h, // llvm.mips.srar.h
mips_srar_w, // llvm.mips.srar.w
mips_srari_b, // llvm.mips.srari.b
mips_srari_d, // llvm.mips.srari.d
mips_srari_h, // llvm.mips.srari.h
mips_srari_w, // llvm.mips.srari.w
mips_srl_b, // llvm.mips.srl.b
mips_srl_d, // llvm.mips.srl.d
mips_srl_h, // llvm.mips.srl.h
mips_srl_w, // llvm.mips.srl.w
mips_srli_b, // llvm.mips.srli.b
mips_srli_d, // llvm.mips.srli.d
mips_srli_h, // llvm.mips.srli.h
mips_srli_w, // llvm.mips.srli.w
mips_srlr_b, // llvm.mips.srlr.b
mips_srlr_d, // llvm.mips.srlr.d
mips_srlr_h, // llvm.mips.srlr.h
mips_srlr_w, // llvm.mips.srlr.w
mips_srlri_b, // llvm.mips.srlri.b
mips_srlri_d, // llvm.mips.srlri.d
mips_srlri_h, // llvm.mips.srlri.h
mips_srlri_w, // llvm.mips.srlri.w
mips_st_b, // llvm.mips.st.b
mips_st_d, // llvm.mips.st.d
mips_st_h, // llvm.mips.st.h
mips_st_w, // llvm.mips.st.w
mips_str_d, // llvm.mips.str.d
mips_str_w, // llvm.mips.str.w
mips_subq_ph, // llvm.mips.subq.ph
mips_subq_s_ph, // llvm.mips.subq.s.ph
mips_subq_s_w, // llvm.mips.subq.s.w
mips_subqh_ph, // llvm.mips.subqh.ph
mips_subqh_r_ph, // llvm.mips.subqh.r.ph
mips_subqh_r_w, // llvm.mips.subqh.r.w
mips_subqh_w, // llvm.mips.subqh.w
mips_subs_s_b, // llvm.mips.subs.s.b
mips_subs_s_d, // llvm.mips.subs.s.d
mips_subs_s_h, // llvm.mips.subs.s.h
mips_subs_s_w, // llvm.mips.subs.s.w
mips_subs_u_b, // llvm.mips.subs.u.b
mips_subs_u_d, // llvm.mips.subs.u.d
mips_subs_u_h, // llvm.mips.subs.u.h
mips_subs_u_w, // llvm.mips.subs.u.w
mips_subsus_u_b, // llvm.mips.subsus.u.b
mips_subsus_u_d, // llvm.mips.subsus.u.d
mips_subsus_u_h, // llvm.mips.subsus.u.h
mips_subsus_u_w, // llvm.mips.subsus.u.w
mips_subsuu_s_b, // llvm.mips.subsuu.s.b
mips_subsuu_s_d, // llvm.mips.subsuu.s.d
mips_subsuu_s_h, // llvm.mips.subsuu.s.h
mips_subsuu_s_w, // llvm.mips.subsuu.s.w
mips_subu_ph, // llvm.mips.subu.ph
mips_subu_qb, // llvm.mips.subu.qb
mips_subu_s_ph, // llvm.mips.subu.s.ph
mips_subu_s_qb, // llvm.mips.subu.s.qb
mips_subuh_qb, // llvm.mips.subuh.qb
mips_subuh_r_qb, // llvm.mips.subuh.r.qb
mips_subv_b, // llvm.mips.subv.b
mips_subv_d, // llvm.mips.subv.d
mips_subv_h, // llvm.mips.subv.h
mips_subv_w, // llvm.mips.subv.w
mips_subvi_b, // llvm.mips.subvi.b
mips_subvi_d, // llvm.mips.subvi.d
mips_subvi_h, // llvm.mips.subvi.h
mips_subvi_w, // llvm.mips.subvi.w
mips_vshf_b, // llvm.mips.vshf.b
mips_vshf_d, // llvm.mips.vshf.d
mips_vshf_h, // llvm.mips.vshf.h
mips_vshf_w, // llvm.mips.vshf.w
mips_wrdsp, // llvm.mips.wrdsp
mips_xor_v, // llvm.mips.xor.v
mips_xori_b, // llvm.mips.xori.b
}; // enum
} // namespace Intrinsic
} // namespace llvm
#endif

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,628 @@
/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|* *|
|* Intrinsic Function Source Fragment *|
|* *|
|* Automatically generated file, do not edit! *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_IR_INTRINSIC_PPC_ENUMS_H
#define LLVM_IR_INTRINSIC_PPC_ENUMS_H
namespace llvm {
namespace Intrinsic {
enum PPCIntrinsics : unsigned {
// Enum values for intrinsics
ppc_addex = 6619, // llvm.ppc.addex
ppc_addf128_round_to_odd, // llvm.ppc.addf128.round.to.odd
ppc_altivec_crypto_vcipher, // llvm.ppc.altivec.crypto.vcipher
ppc_altivec_crypto_vcipherlast, // llvm.ppc.altivec.crypto.vcipherlast
ppc_altivec_crypto_vncipher, // llvm.ppc.altivec.crypto.vncipher
ppc_altivec_crypto_vncipherlast, // llvm.ppc.altivec.crypto.vncipherlast
ppc_altivec_crypto_vpermxor, // llvm.ppc.altivec.crypto.vpermxor
ppc_altivec_crypto_vpermxor_be, // llvm.ppc.altivec.crypto.vpermxor.be
ppc_altivec_crypto_vpmsumb, // llvm.ppc.altivec.crypto.vpmsumb
ppc_altivec_crypto_vpmsumd, // llvm.ppc.altivec.crypto.vpmsumd
ppc_altivec_crypto_vpmsumh, // llvm.ppc.altivec.crypto.vpmsumh
ppc_altivec_crypto_vpmsumw, // llvm.ppc.altivec.crypto.vpmsumw
ppc_altivec_crypto_vsbox, // llvm.ppc.altivec.crypto.vsbox
ppc_altivec_crypto_vshasigmad, // llvm.ppc.altivec.crypto.vshasigmad
ppc_altivec_crypto_vshasigmaw, // llvm.ppc.altivec.crypto.vshasigmaw
ppc_altivec_dss, // llvm.ppc.altivec.dss
ppc_altivec_dssall, // llvm.ppc.altivec.dssall
ppc_altivec_dst, // llvm.ppc.altivec.dst
ppc_altivec_dstst, // llvm.ppc.altivec.dstst
ppc_altivec_dststt, // llvm.ppc.altivec.dststt
ppc_altivec_dstt, // llvm.ppc.altivec.dstt
ppc_altivec_lvebx, // llvm.ppc.altivec.lvebx
ppc_altivec_lvehx, // llvm.ppc.altivec.lvehx
ppc_altivec_lvewx, // llvm.ppc.altivec.lvewx
ppc_altivec_lvsl, // llvm.ppc.altivec.lvsl
ppc_altivec_lvsr, // llvm.ppc.altivec.lvsr
ppc_altivec_lvx, // llvm.ppc.altivec.lvx
ppc_altivec_lvxl, // llvm.ppc.altivec.lvxl
ppc_altivec_mfvscr, // llvm.ppc.altivec.mfvscr
ppc_altivec_mtvscr, // llvm.ppc.altivec.mtvscr
ppc_altivec_mtvsrbm, // llvm.ppc.altivec.mtvsrbm
ppc_altivec_mtvsrdm, // llvm.ppc.altivec.mtvsrdm
ppc_altivec_mtvsrhm, // llvm.ppc.altivec.mtvsrhm
ppc_altivec_mtvsrqm, // llvm.ppc.altivec.mtvsrqm
ppc_altivec_mtvsrwm, // llvm.ppc.altivec.mtvsrwm
ppc_altivec_stvebx, // llvm.ppc.altivec.stvebx
ppc_altivec_stvehx, // llvm.ppc.altivec.stvehx
ppc_altivec_stvewx, // llvm.ppc.altivec.stvewx
ppc_altivec_stvx, // llvm.ppc.altivec.stvx
ppc_altivec_stvxl, // llvm.ppc.altivec.stvxl
ppc_altivec_vabsdub, // llvm.ppc.altivec.vabsdub
ppc_altivec_vabsduh, // llvm.ppc.altivec.vabsduh
ppc_altivec_vabsduw, // llvm.ppc.altivec.vabsduw
ppc_altivec_vaddcuq, // llvm.ppc.altivec.vaddcuq
ppc_altivec_vaddcuw, // llvm.ppc.altivec.vaddcuw
ppc_altivec_vaddecuq, // llvm.ppc.altivec.vaddecuq
ppc_altivec_vaddeuqm, // llvm.ppc.altivec.vaddeuqm
ppc_altivec_vaddsbs, // llvm.ppc.altivec.vaddsbs
ppc_altivec_vaddshs, // llvm.ppc.altivec.vaddshs
ppc_altivec_vaddsws, // llvm.ppc.altivec.vaddsws
ppc_altivec_vaddubs, // llvm.ppc.altivec.vaddubs
ppc_altivec_vadduhs, // llvm.ppc.altivec.vadduhs
ppc_altivec_vadduws, // llvm.ppc.altivec.vadduws
ppc_altivec_vavgsb, // llvm.ppc.altivec.vavgsb
ppc_altivec_vavgsh, // llvm.ppc.altivec.vavgsh
ppc_altivec_vavgsw, // llvm.ppc.altivec.vavgsw
ppc_altivec_vavgub, // llvm.ppc.altivec.vavgub
ppc_altivec_vavguh, // llvm.ppc.altivec.vavguh
ppc_altivec_vavguw, // llvm.ppc.altivec.vavguw
ppc_altivec_vbpermd, // llvm.ppc.altivec.vbpermd
ppc_altivec_vbpermq, // llvm.ppc.altivec.vbpermq
ppc_altivec_vcfsx, // llvm.ppc.altivec.vcfsx
ppc_altivec_vcfuged, // llvm.ppc.altivec.vcfuged
ppc_altivec_vcfux, // llvm.ppc.altivec.vcfux
ppc_altivec_vclrlb, // llvm.ppc.altivec.vclrlb
ppc_altivec_vclrrb, // llvm.ppc.altivec.vclrrb
ppc_altivec_vclzdm, // llvm.ppc.altivec.vclzdm
ppc_altivec_vclzlsbb, // llvm.ppc.altivec.vclzlsbb
ppc_altivec_vcmpbfp, // llvm.ppc.altivec.vcmpbfp
ppc_altivec_vcmpbfp_p, // llvm.ppc.altivec.vcmpbfp.p
ppc_altivec_vcmpeqfp, // llvm.ppc.altivec.vcmpeqfp
ppc_altivec_vcmpeqfp_p, // llvm.ppc.altivec.vcmpeqfp.p
ppc_altivec_vcmpequb, // llvm.ppc.altivec.vcmpequb
ppc_altivec_vcmpequb_p, // llvm.ppc.altivec.vcmpequb.p
ppc_altivec_vcmpequd, // llvm.ppc.altivec.vcmpequd
ppc_altivec_vcmpequd_p, // llvm.ppc.altivec.vcmpequd.p
ppc_altivec_vcmpequh, // llvm.ppc.altivec.vcmpequh
ppc_altivec_vcmpequh_p, // llvm.ppc.altivec.vcmpequh.p
ppc_altivec_vcmpequq, // llvm.ppc.altivec.vcmpequq
ppc_altivec_vcmpequq_p, // llvm.ppc.altivec.vcmpequq.p
ppc_altivec_vcmpequw, // llvm.ppc.altivec.vcmpequw
ppc_altivec_vcmpequw_p, // llvm.ppc.altivec.vcmpequw.p
ppc_altivec_vcmpgefp, // llvm.ppc.altivec.vcmpgefp
ppc_altivec_vcmpgefp_p, // llvm.ppc.altivec.vcmpgefp.p
ppc_altivec_vcmpgtfp, // llvm.ppc.altivec.vcmpgtfp
ppc_altivec_vcmpgtfp_p, // llvm.ppc.altivec.vcmpgtfp.p
ppc_altivec_vcmpgtsb, // llvm.ppc.altivec.vcmpgtsb
ppc_altivec_vcmpgtsb_p, // llvm.ppc.altivec.vcmpgtsb.p
ppc_altivec_vcmpgtsd, // llvm.ppc.altivec.vcmpgtsd
ppc_altivec_vcmpgtsd_p, // llvm.ppc.altivec.vcmpgtsd.p
ppc_altivec_vcmpgtsh, // llvm.ppc.altivec.vcmpgtsh
ppc_altivec_vcmpgtsh_p, // llvm.ppc.altivec.vcmpgtsh.p
ppc_altivec_vcmpgtsq, // llvm.ppc.altivec.vcmpgtsq
ppc_altivec_vcmpgtsq_p, // llvm.ppc.altivec.vcmpgtsq.p
ppc_altivec_vcmpgtsw, // llvm.ppc.altivec.vcmpgtsw
ppc_altivec_vcmpgtsw_p, // llvm.ppc.altivec.vcmpgtsw.p
ppc_altivec_vcmpgtub, // llvm.ppc.altivec.vcmpgtub
ppc_altivec_vcmpgtub_p, // llvm.ppc.altivec.vcmpgtub.p
ppc_altivec_vcmpgtud, // llvm.ppc.altivec.vcmpgtud
ppc_altivec_vcmpgtud_p, // llvm.ppc.altivec.vcmpgtud.p
ppc_altivec_vcmpgtuh, // llvm.ppc.altivec.vcmpgtuh
ppc_altivec_vcmpgtuh_p, // llvm.ppc.altivec.vcmpgtuh.p
ppc_altivec_vcmpgtuq, // llvm.ppc.altivec.vcmpgtuq
ppc_altivec_vcmpgtuq_p, // llvm.ppc.altivec.vcmpgtuq.p
ppc_altivec_vcmpgtuw, // llvm.ppc.altivec.vcmpgtuw
ppc_altivec_vcmpgtuw_p, // llvm.ppc.altivec.vcmpgtuw.p
ppc_altivec_vcmpneb, // llvm.ppc.altivec.vcmpneb
ppc_altivec_vcmpneb_p, // llvm.ppc.altivec.vcmpneb.p
ppc_altivec_vcmpneh, // llvm.ppc.altivec.vcmpneh
ppc_altivec_vcmpneh_p, // llvm.ppc.altivec.vcmpneh.p
ppc_altivec_vcmpnew, // llvm.ppc.altivec.vcmpnew
ppc_altivec_vcmpnew_p, // llvm.ppc.altivec.vcmpnew.p
ppc_altivec_vcmpnezb, // llvm.ppc.altivec.vcmpnezb
ppc_altivec_vcmpnezb_p, // llvm.ppc.altivec.vcmpnezb.p
ppc_altivec_vcmpnezh, // llvm.ppc.altivec.vcmpnezh
ppc_altivec_vcmpnezh_p, // llvm.ppc.altivec.vcmpnezh.p
ppc_altivec_vcmpnezw, // llvm.ppc.altivec.vcmpnezw
ppc_altivec_vcmpnezw_p, // llvm.ppc.altivec.vcmpnezw.p
ppc_altivec_vcntmbb, // llvm.ppc.altivec.vcntmbb
ppc_altivec_vcntmbd, // llvm.ppc.altivec.vcntmbd
ppc_altivec_vcntmbh, // llvm.ppc.altivec.vcntmbh
ppc_altivec_vcntmbw, // llvm.ppc.altivec.vcntmbw
ppc_altivec_vctsxs, // llvm.ppc.altivec.vctsxs
ppc_altivec_vctuxs, // llvm.ppc.altivec.vctuxs
ppc_altivec_vctzdm, // llvm.ppc.altivec.vctzdm
ppc_altivec_vctzlsbb, // llvm.ppc.altivec.vctzlsbb
ppc_altivec_vdivesd, // llvm.ppc.altivec.vdivesd
ppc_altivec_vdivesq, // llvm.ppc.altivec.vdivesq
ppc_altivec_vdivesw, // llvm.ppc.altivec.vdivesw
ppc_altivec_vdiveud, // llvm.ppc.altivec.vdiveud
ppc_altivec_vdiveuq, // llvm.ppc.altivec.vdiveuq
ppc_altivec_vdiveuw, // llvm.ppc.altivec.vdiveuw
ppc_altivec_vexpandbm, // llvm.ppc.altivec.vexpandbm
ppc_altivec_vexpanddm, // llvm.ppc.altivec.vexpanddm
ppc_altivec_vexpandhm, // llvm.ppc.altivec.vexpandhm
ppc_altivec_vexpandqm, // llvm.ppc.altivec.vexpandqm
ppc_altivec_vexpandwm, // llvm.ppc.altivec.vexpandwm
ppc_altivec_vexptefp, // llvm.ppc.altivec.vexptefp
ppc_altivec_vextddvlx, // llvm.ppc.altivec.vextddvlx
ppc_altivec_vextddvrx, // llvm.ppc.altivec.vextddvrx
ppc_altivec_vextdubvlx, // llvm.ppc.altivec.vextdubvlx
ppc_altivec_vextdubvrx, // llvm.ppc.altivec.vextdubvrx
ppc_altivec_vextduhvlx, // llvm.ppc.altivec.vextduhvlx
ppc_altivec_vextduhvrx, // llvm.ppc.altivec.vextduhvrx
ppc_altivec_vextduwvlx, // llvm.ppc.altivec.vextduwvlx
ppc_altivec_vextduwvrx, // llvm.ppc.altivec.vextduwvrx
ppc_altivec_vextractbm, // llvm.ppc.altivec.vextractbm
ppc_altivec_vextractdm, // llvm.ppc.altivec.vextractdm
ppc_altivec_vextracthm, // llvm.ppc.altivec.vextracthm
ppc_altivec_vextractqm, // llvm.ppc.altivec.vextractqm
ppc_altivec_vextractwm, // llvm.ppc.altivec.vextractwm
ppc_altivec_vextsb2d, // llvm.ppc.altivec.vextsb2d
ppc_altivec_vextsb2w, // llvm.ppc.altivec.vextsb2w
ppc_altivec_vextsd2q, // llvm.ppc.altivec.vextsd2q
ppc_altivec_vextsh2d, // llvm.ppc.altivec.vextsh2d
ppc_altivec_vextsh2w, // llvm.ppc.altivec.vextsh2w
ppc_altivec_vextsw2d, // llvm.ppc.altivec.vextsw2d
ppc_altivec_vgbbd, // llvm.ppc.altivec.vgbbd
ppc_altivec_vgnb, // llvm.ppc.altivec.vgnb
ppc_altivec_vinsblx, // llvm.ppc.altivec.vinsblx
ppc_altivec_vinsbrx, // llvm.ppc.altivec.vinsbrx
ppc_altivec_vinsbvlx, // llvm.ppc.altivec.vinsbvlx
ppc_altivec_vinsbvrx, // llvm.ppc.altivec.vinsbvrx
ppc_altivec_vinsd, // llvm.ppc.altivec.vinsd
ppc_altivec_vinsdlx, // llvm.ppc.altivec.vinsdlx
ppc_altivec_vinsdrx, // llvm.ppc.altivec.vinsdrx
ppc_altivec_vinshlx, // llvm.ppc.altivec.vinshlx
ppc_altivec_vinshrx, // llvm.ppc.altivec.vinshrx
ppc_altivec_vinshvlx, // llvm.ppc.altivec.vinshvlx
ppc_altivec_vinshvrx, // llvm.ppc.altivec.vinshvrx
ppc_altivec_vinsw, // llvm.ppc.altivec.vinsw
ppc_altivec_vinswlx, // llvm.ppc.altivec.vinswlx
ppc_altivec_vinswrx, // llvm.ppc.altivec.vinswrx
ppc_altivec_vinswvlx, // llvm.ppc.altivec.vinswvlx
ppc_altivec_vinswvrx, // llvm.ppc.altivec.vinswvrx
ppc_altivec_vlogefp, // llvm.ppc.altivec.vlogefp
ppc_altivec_vmaddfp, // llvm.ppc.altivec.vmaddfp
ppc_altivec_vmaxfp, // llvm.ppc.altivec.vmaxfp
ppc_altivec_vmaxsb, // llvm.ppc.altivec.vmaxsb
ppc_altivec_vmaxsd, // llvm.ppc.altivec.vmaxsd
ppc_altivec_vmaxsh, // llvm.ppc.altivec.vmaxsh
ppc_altivec_vmaxsw, // llvm.ppc.altivec.vmaxsw
ppc_altivec_vmaxub, // llvm.ppc.altivec.vmaxub
ppc_altivec_vmaxud, // llvm.ppc.altivec.vmaxud
ppc_altivec_vmaxuh, // llvm.ppc.altivec.vmaxuh
ppc_altivec_vmaxuw, // llvm.ppc.altivec.vmaxuw
ppc_altivec_vmhaddshs, // llvm.ppc.altivec.vmhaddshs
ppc_altivec_vmhraddshs, // llvm.ppc.altivec.vmhraddshs
ppc_altivec_vminfp, // llvm.ppc.altivec.vminfp
ppc_altivec_vminsb, // llvm.ppc.altivec.vminsb
ppc_altivec_vminsd, // llvm.ppc.altivec.vminsd
ppc_altivec_vminsh, // llvm.ppc.altivec.vminsh
ppc_altivec_vminsw, // llvm.ppc.altivec.vminsw
ppc_altivec_vminub, // llvm.ppc.altivec.vminub
ppc_altivec_vminud, // llvm.ppc.altivec.vminud
ppc_altivec_vminuh, // llvm.ppc.altivec.vminuh
ppc_altivec_vminuw, // llvm.ppc.altivec.vminuw
ppc_altivec_vmladduhm, // llvm.ppc.altivec.vmladduhm
ppc_altivec_vmsumcud, // llvm.ppc.altivec.vmsumcud
ppc_altivec_vmsummbm, // llvm.ppc.altivec.vmsummbm
ppc_altivec_vmsumshm, // llvm.ppc.altivec.vmsumshm
ppc_altivec_vmsumshs, // llvm.ppc.altivec.vmsumshs
ppc_altivec_vmsumubm, // llvm.ppc.altivec.vmsumubm
ppc_altivec_vmsumudm, // llvm.ppc.altivec.vmsumudm
ppc_altivec_vmsumuhm, // llvm.ppc.altivec.vmsumuhm
ppc_altivec_vmsumuhs, // llvm.ppc.altivec.vmsumuhs
ppc_altivec_vmulesb, // llvm.ppc.altivec.vmulesb
ppc_altivec_vmulesd, // llvm.ppc.altivec.vmulesd
ppc_altivec_vmulesh, // llvm.ppc.altivec.vmulesh
ppc_altivec_vmulesw, // llvm.ppc.altivec.vmulesw
ppc_altivec_vmuleub, // llvm.ppc.altivec.vmuleub
ppc_altivec_vmuleud, // llvm.ppc.altivec.vmuleud
ppc_altivec_vmuleuh, // llvm.ppc.altivec.vmuleuh
ppc_altivec_vmuleuw, // llvm.ppc.altivec.vmuleuw
ppc_altivec_vmulhsd, // llvm.ppc.altivec.vmulhsd
ppc_altivec_vmulhsw, // llvm.ppc.altivec.vmulhsw
ppc_altivec_vmulhud, // llvm.ppc.altivec.vmulhud
ppc_altivec_vmulhuw, // llvm.ppc.altivec.vmulhuw
ppc_altivec_vmulosb, // llvm.ppc.altivec.vmulosb
ppc_altivec_vmulosd, // llvm.ppc.altivec.vmulosd
ppc_altivec_vmulosh, // llvm.ppc.altivec.vmulosh
ppc_altivec_vmulosw, // llvm.ppc.altivec.vmulosw
ppc_altivec_vmuloub, // llvm.ppc.altivec.vmuloub
ppc_altivec_vmuloud, // llvm.ppc.altivec.vmuloud
ppc_altivec_vmulouh, // llvm.ppc.altivec.vmulouh
ppc_altivec_vmulouw, // llvm.ppc.altivec.vmulouw
ppc_altivec_vnmsubfp, // llvm.ppc.altivec.vnmsubfp
ppc_altivec_vpdepd, // llvm.ppc.altivec.vpdepd
ppc_altivec_vperm, // llvm.ppc.altivec.vperm
ppc_altivec_vpextd, // llvm.ppc.altivec.vpextd
ppc_altivec_vpkpx, // llvm.ppc.altivec.vpkpx
ppc_altivec_vpksdss, // llvm.ppc.altivec.vpksdss
ppc_altivec_vpksdus, // llvm.ppc.altivec.vpksdus
ppc_altivec_vpkshss, // llvm.ppc.altivec.vpkshss
ppc_altivec_vpkshus, // llvm.ppc.altivec.vpkshus
ppc_altivec_vpkswss, // llvm.ppc.altivec.vpkswss
ppc_altivec_vpkswus, // llvm.ppc.altivec.vpkswus
ppc_altivec_vpkudus, // llvm.ppc.altivec.vpkudus
ppc_altivec_vpkuhus, // llvm.ppc.altivec.vpkuhus
ppc_altivec_vpkuwus, // llvm.ppc.altivec.vpkuwus
ppc_altivec_vprtybd, // llvm.ppc.altivec.vprtybd
ppc_altivec_vprtybq, // llvm.ppc.altivec.vprtybq
ppc_altivec_vprtybw, // llvm.ppc.altivec.vprtybw
ppc_altivec_vrefp, // llvm.ppc.altivec.vrefp
ppc_altivec_vrfim, // llvm.ppc.altivec.vrfim
ppc_altivec_vrfin, // llvm.ppc.altivec.vrfin
ppc_altivec_vrfip, // llvm.ppc.altivec.vrfip
ppc_altivec_vrfiz, // llvm.ppc.altivec.vrfiz
ppc_altivec_vrlb, // llvm.ppc.altivec.vrlb
ppc_altivec_vrld, // llvm.ppc.altivec.vrld
ppc_altivec_vrldmi, // llvm.ppc.altivec.vrldmi
ppc_altivec_vrldnm, // llvm.ppc.altivec.vrldnm
ppc_altivec_vrlh, // llvm.ppc.altivec.vrlh
ppc_altivec_vrlqmi, // llvm.ppc.altivec.vrlqmi
ppc_altivec_vrlqnm, // llvm.ppc.altivec.vrlqnm
ppc_altivec_vrlw, // llvm.ppc.altivec.vrlw
ppc_altivec_vrlwmi, // llvm.ppc.altivec.vrlwmi
ppc_altivec_vrlwnm, // llvm.ppc.altivec.vrlwnm
ppc_altivec_vrsqrtefp, // llvm.ppc.altivec.vrsqrtefp
ppc_altivec_vsel, // llvm.ppc.altivec.vsel
ppc_altivec_vsl, // llvm.ppc.altivec.vsl
ppc_altivec_vslb, // llvm.ppc.altivec.vslb
ppc_altivec_vsldbi, // llvm.ppc.altivec.vsldbi
ppc_altivec_vslh, // llvm.ppc.altivec.vslh
ppc_altivec_vslo, // llvm.ppc.altivec.vslo
ppc_altivec_vslv, // llvm.ppc.altivec.vslv
ppc_altivec_vslw, // llvm.ppc.altivec.vslw
ppc_altivec_vsr, // llvm.ppc.altivec.vsr
ppc_altivec_vsrab, // llvm.ppc.altivec.vsrab
ppc_altivec_vsrah, // llvm.ppc.altivec.vsrah
ppc_altivec_vsraw, // llvm.ppc.altivec.vsraw
ppc_altivec_vsrb, // llvm.ppc.altivec.vsrb
ppc_altivec_vsrdbi, // llvm.ppc.altivec.vsrdbi
ppc_altivec_vsrh, // llvm.ppc.altivec.vsrh
ppc_altivec_vsro, // llvm.ppc.altivec.vsro
ppc_altivec_vsrv, // llvm.ppc.altivec.vsrv
ppc_altivec_vsrw, // llvm.ppc.altivec.vsrw
ppc_altivec_vstribl, // llvm.ppc.altivec.vstribl
ppc_altivec_vstribl_p, // llvm.ppc.altivec.vstribl.p
ppc_altivec_vstribr, // llvm.ppc.altivec.vstribr
ppc_altivec_vstribr_p, // llvm.ppc.altivec.vstribr.p
ppc_altivec_vstrihl, // llvm.ppc.altivec.vstrihl
ppc_altivec_vstrihl_p, // llvm.ppc.altivec.vstrihl.p
ppc_altivec_vstrihr, // llvm.ppc.altivec.vstrihr
ppc_altivec_vstrihr_p, // llvm.ppc.altivec.vstrihr.p
ppc_altivec_vsubcuq, // llvm.ppc.altivec.vsubcuq
ppc_altivec_vsubcuw, // llvm.ppc.altivec.vsubcuw
ppc_altivec_vsubecuq, // llvm.ppc.altivec.vsubecuq
ppc_altivec_vsubeuqm, // llvm.ppc.altivec.vsubeuqm
ppc_altivec_vsubsbs, // llvm.ppc.altivec.vsubsbs
ppc_altivec_vsubshs, // llvm.ppc.altivec.vsubshs
ppc_altivec_vsubsws, // llvm.ppc.altivec.vsubsws
ppc_altivec_vsububs, // llvm.ppc.altivec.vsububs
ppc_altivec_vsubuhs, // llvm.ppc.altivec.vsubuhs
ppc_altivec_vsubuws, // llvm.ppc.altivec.vsubuws
ppc_altivec_vsum2sws, // llvm.ppc.altivec.vsum2sws
ppc_altivec_vsum4sbs, // llvm.ppc.altivec.vsum4sbs
ppc_altivec_vsum4shs, // llvm.ppc.altivec.vsum4shs
ppc_altivec_vsum4ubs, // llvm.ppc.altivec.vsum4ubs
ppc_altivec_vsumsws, // llvm.ppc.altivec.vsumsws
ppc_altivec_vupkhpx, // llvm.ppc.altivec.vupkhpx
ppc_altivec_vupkhsb, // llvm.ppc.altivec.vupkhsb
ppc_altivec_vupkhsh, // llvm.ppc.altivec.vupkhsh
ppc_altivec_vupkhsw, // llvm.ppc.altivec.vupkhsw
ppc_altivec_vupklpx, // llvm.ppc.altivec.vupklpx
ppc_altivec_vupklsb, // llvm.ppc.altivec.vupklsb
ppc_altivec_vupklsh, // llvm.ppc.altivec.vupklsh
ppc_altivec_vupklsw, // llvm.ppc.altivec.vupklsw
ppc_atomic_load_i128, // llvm.ppc.atomic.load.i128
ppc_atomic_store_i128, // llvm.ppc.atomic.store.i128
ppc_atomicrmw_add_i128, // llvm.ppc.atomicrmw.add.i128
ppc_atomicrmw_and_i128, // llvm.ppc.atomicrmw.and.i128
ppc_atomicrmw_nand_i128, // llvm.ppc.atomicrmw.nand.i128
ppc_atomicrmw_or_i128, // llvm.ppc.atomicrmw.or.i128
ppc_atomicrmw_sub_i128, // llvm.ppc.atomicrmw.sub.i128
ppc_atomicrmw_xchg_i128, // llvm.ppc.atomicrmw.xchg.i128
ppc_atomicrmw_xor_i128, // llvm.ppc.atomicrmw.xor.i128
ppc_bcdadd, // llvm.ppc.bcdadd
ppc_bcdadd_p, // llvm.ppc.bcdadd.p
ppc_bcdsub, // llvm.ppc.bcdsub
ppc_bcdsub_p, // llvm.ppc.bcdsub.p
ppc_bpermd, // llvm.ppc.bpermd
ppc_cfence, // llvm.ppc.cfence
ppc_cfuged, // llvm.ppc.cfuged
ppc_cmpb, // llvm.ppc.cmpb
ppc_cmpeqb, // llvm.ppc.cmpeqb
ppc_cmprb, // llvm.ppc.cmprb
ppc_cmpxchg_i128, // llvm.ppc.cmpxchg.i128
ppc_cntlzdm, // llvm.ppc.cntlzdm
ppc_cnttzdm, // llvm.ppc.cnttzdm
ppc_compare_exp_eq, // llvm.ppc.compare.exp.eq
ppc_compare_exp_gt, // llvm.ppc.compare.exp.gt
ppc_compare_exp_lt, // llvm.ppc.compare.exp.lt
ppc_compare_exp_uo, // llvm.ppc.compare.exp.uo
ppc_convert_f128_to_ppcf128, // llvm.ppc.convert.f128.to.ppcf128
ppc_convert_ppcf128_to_f128, // llvm.ppc.convert.ppcf128.to.f128
ppc_darn, // llvm.ppc.darn
ppc_darn32, // llvm.ppc.darn32
ppc_darnraw, // llvm.ppc.darnraw
ppc_dcba, // llvm.ppc.dcba
ppc_dcbf, // llvm.ppc.dcbf
ppc_dcbfl, // llvm.ppc.dcbfl
ppc_dcbflp, // llvm.ppc.dcbflp
ppc_dcbfps, // llvm.ppc.dcbfps
ppc_dcbi, // llvm.ppc.dcbi
ppc_dcbst, // llvm.ppc.dcbst
ppc_dcbstps, // llvm.ppc.dcbstps
ppc_dcbt, // llvm.ppc.dcbt
ppc_dcbt_with_hint, // llvm.ppc.dcbt.with.hint
ppc_dcbtst, // llvm.ppc.dcbtst
ppc_dcbtst_with_hint, // llvm.ppc.dcbtst.with.hint
ppc_dcbtstt, // llvm.ppc.dcbtstt
ppc_dcbtt, // llvm.ppc.dcbtt
ppc_dcbz, // llvm.ppc.dcbz
ppc_dcbzl, // llvm.ppc.dcbzl
ppc_divde, // llvm.ppc.divde
ppc_divdeu, // llvm.ppc.divdeu
ppc_divf128_round_to_odd, // llvm.ppc.divf128.round.to.odd
ppc_divwe, // llvm.ppc.divwe
ppc_divweu, // llvm.ppc.divweu
ppc_eieio, // llvm.ppc.eieio
ppc_extract_exp, // llvm.ppc.extract.exp
ppc_extract_sig, // llvm.ppc.extract.sig
ppc_fcfid, // llvm.ppc.fcfid
ppc_fcfud, // llvm.ppc.fcfud
ppc_fctid, // llvm.ppc.fctid
ppc_fctidz, // llvm.ppc.fctidz
ppc_fctiw, // llvm.ppc.fctiw
ppc_fctiwz, // llvm.ppc.fctiwz
ppc_fctudz, // llvm.ppc.fctudz
ppc_fctuwz, // llvm.ppc.fctuwz
ppc_fmaf128_round_to_odd, // llvm.ppc.fmaf128.round.to.odd
ppc_fmsub, // llvm.ppc.fmsub
ppc_fmsubs, // llvm.ppc.fmsubs
ppc_fnmadd, // llvm.ppc.fnmadd
ppc_fnmadds, // llvm.ppc.fnmadds
ppc_fnmsub, // llvm.ppc.fnmsub
ppc_fnmsubs, // llvm.ppc.fnmsubs
ppc_fre, // llvm.ppc.fre
ppc_fres, // llvm.ppc.fres
ppc_frsqrte, // llvm.ppc.frsqrte
ppc_frsqrtes, // llvm.ppc.frsqrtes
ppc_fsel, // llvm.ppc.fsel
ppc_fsels, // llvm.ppc.fsels
ppc_get_texasr, // llvm.ppc.get.texasr
ppc_get_texasru, // llvm.ppc.get.texasru
ppc_get_tfhar, // llvm.ppc.get.tfhar
ppc_get_tfiar, // llvm.ppc.get.tfiar
ppc_icbt, // llvm.ppc.icbt
ppc_insert_exp, // llvm.ppc.insert.exp
ppc_iospace_eieio, // llvm.ppc.iospace.eieio
ppc_iospace_lwsync, // llvm.ppc.iospace.lwsync
ppc_iospace_sync, // llvm.ppc.iospace.sync
ppc_isync, // llvm.ppc.isync
ppc_load2r, // llvm.ppc.load2r
ppc_load4r, // llvm.ppc.load4r
ppc_load8r, // llvm.ppc.load8r
ppc_lwsync, // llvm.ppc.lwsync
ppc_maddhd, // llvm.ppc.maddhd
ppc_maddhdu, // llvm.ppc.maddhdu
ppc_maddld, // llvm.ppc.maddld
ppc_mfmsr, // llvm.ppc.mfmsr
ppc_mfspr, // llvm.ppc.mfspr
ppc_mftbu, // llvm.ppc.mftbu
ppc_mma_assemble_acc, // llvm.ppc.mma.assemble.acc
ppc_mma_disassemble_acc, // llvm.ppc.mma.disassemble.acc
ppc_mma_pmxvbf16ger2, // llvm.ppc.mma.pmxvbf16ger2
ppc_mma_pmxvbf16ger2nn, // llvm.ppc.mma.pmxvbf16ger2nn
ppc_mma_pmxvbf16ger2np, // llvm.ppc.mma.pmxvbf16ger2np
ppc_mma_pmxvbf16ger2pn, // llvm.ppc.mma.pmxvbf16ger2pn
ppc_mma_pmxvbf16ger2pp, // llvm.ppc.mma.pmxvbf16ger2pp
ppc_mma_pmxvf16ger2, // llvm.ppc.mma.pmxvf16ger2
ppc_mma_pmxvf16ger2nn, // llvm.ppc.mma.pmxvf16ger2nn
ppc_mma_pmxvf16ger2np, // llvm.ppc.mma.pmxvf16ger2np
ppc_mma_pmxvf16ger2pn, // llvm.ppc.mma.pmxvf16ger2pn
ppc_mma_pmxvf16ger2pp, // llvm.ppc.mma.pmxvf16ger2pp
ppc_mma_pmxvf32ger, // llvm.ppc.mma.pmxvf32ger
ppc_mma_pmxvf32gernn, // llvm.ppc.mma.pmxvf32gernn
ppc_mma_pmxvf32gernp, // llvm.ppc.mma.pmxvf32gernp
ppc_mma_pmxvf32gerpn, // llvm.ppc.mma.pmxvf32gerpn
ppc_mma_pmxvf32gerpp, // llvm.ppc.mma.pmxvf32gerpp
ppc_mma_pmxvf64ger, // llvm.ppc.mma.pmxvf64ger
ppc_mma_pmxvf64gernn, // llvm.ppc.mma.pmxvf64gernn
ppc_mma_pmxvf64gernp, // llvm.ppc.mma.pmxvf64gernp
ppc_mma_pmxvf64gerpn, // llvm.ppc.mma.pmxvf64gerpn
ppc_mma_pmxvf64gerpp, // llvm.ppc.mma.pmxvf64gerpp
ppc_mma_pmxvi16ger2, // llvm.ppc.mma.pmxvi16ger2
ppc_mma_pmxvi16ger2pp, // llvm.ppc.mma.pmxvi16ger2pp
ppc_mma_pmxvi16ger2s, // llvm.ppc.mma.pmxvi16ger2s
ppc_mma_pmxvi16ger2spp, // llvm.ppc.mma.pmxvi16ger2spp
ppc_mma_pmxvi4ger8, // llvm.ppc.mma.pmxvi4ger8
ppc_mma_pmxvi4ger8pp, // llvm.ppc.mma.pmxvi4ger8pp
ppc_mma_pmxvi8ger4, // llvm.ppc.mma.pmxvi8ger4
ppc_mma_pmxvi8ger4pp, // llvm.ppc.mma.pmxvi8ger4pp
ppc_mma_pmxvi8ger4spp, // llvm.ppc.mma.pmxvi8ger4spp
ppc_mma_xvbf16ger2, // llvm.ppc.mma.xvbf16ger2
ppc_mma_xvbf16ger2nn, // llvm.ppc.mma.xvbf16ger2nn
ppc_mma_xvbf16ger2np, // llvm.ppc.mma.xvbf16ger2np
ppc_mma_xvbf16ger2pn, // llvm.ppc.mma.xvbf16ger2pn
ppc_mma_xvbf16ger2pp, // llvm.ppc.mma.xvbf16ger2pp
ppc_mma_xvf16ger2, // llvm.ppc.mma.xvf16ger2
ppc_mma_xvf16ger2nn, // llvm.ppc.mma.xvf16ger2nn
ppc_mma_xvf16ger2np, // llvm.ppc.mma.xvf16ger2np
ppc_mma_xvf16ger2pn, // llvm.ppc.mma.xvf16ger2pn
ppc_mma_xvf16ger2pp, // llvm.ppc.mma.xvf16ger2pp
ppc_mma_xvf32ger, // llvm.ppc.mma.xvf32ger
ppc_mma_xvf32gernn, // llvm.ppc.mma.xvf32gernn
ppc_mma_xvf32gernp, // llvm.ppc.mma.xvf32gernp
ppc_mma_xvf32gerpn, // llvm.ppc.mma.xvf32gerpn
ppc_mma_xvf32gerpp, // llvm.ppc.mma.xvf32gerpp
ppc_mma_xvf64ger, // llvm.ppc.mma.xvf64ger
ppc_mma_xvf64gernn, // llvm.ppc.mma.xvf64gernn
ppc_mma_xvf64gernp, // llvm.ppc.mma.xvf64gernp
ppc_mma_xvf64gerpn, // llvm.ppc.mma.xvf64gerpn
ppc_mma_xvf64gerpp, // llvm.ppc.mma.xvf64gerpp
ppc_mma_xvi16ger2, // llvm.ppc.mma.xvi16ger2
ppc_mma_xvi16ger2pp, // llvm.ppc.mma.xvi16ger2pp
ppc_mma_xvi16ger2s, // llvm.ppc.mma.xvi16ger2s
ppc_mma_xvi16ger2spp, // llvm.ppc.mma.xvi16ger2spp
ppc_mma_xvi4ger8, // llvm.ppc.mma.xvi4ger8
ppc_mma_xvi4ger8pp, // llvm.ppc.mma.xvi4ger8pp
ppc_mma_xvi8ger4, // llvm.ppc.mma.xvi8ger4
ppc_mma_xvi8ger4pp, // llvm.ppc.mma.xvi8ger4pp
ppc_mma_xvi8ger4spp, // llvm.ppc.mma.xvi8ger4spp
ppc_mma_xxmfacc, // llvm.ppc.mma.xxmfacc
ppc_mma_xxmtacc, // llvm.ppc.mma.xxmtacc
ppc_mma_xxsetaccz, // llvm.ppc.mma.xxsetaccz
ppc_mtfsb0, // llvm.ppc.mtfsb0
ppc_mtfsb1, // llvm.ppc.mtfsb1
ppc_mtfsf, // llvm.ppc.mtfsf
ppc_mtfsfi, // llvm.ppc.mtfsfi
ppc_mtmsr, // llvm.ppc.mtmsr
ppc_mtspr, // llvm.ppc.mtspr
ppc_mulf128_round_to_odd, // llvm.ppc.mulf128.round.to.odd
ppc_mulhd, // llvm.ppc.mulhd
ppc_mulhdu, // llvm.ppc.mulhdu
ppc_mulhw, // llvm.ppc.mulhw
ppc_mulhwu, // llvm.ppc.mulhwu
ppc_pack_longdouble, // llvm.ppc.pack.longdouble
ppc_pdepd, // llvm.ppc.pdepd
ppc_pextd, // llvm.ppc.pextd
ppc_popcntb, // llvm.ppc.popcntb
ppc_readflm, // llvm.ppc.readflm
ppc_scalar_extract_expq, // llvm.ppc.scalar.extract.expq
ppc_scalar_insert_exp_qp, // llvm.ppc.scalar.insert.exp.qp
ppc_set_texasr, // llvm.ppc.set.texasr
ppc_set_texasru, // llvm.ppc.set.texasru
ppc_set_tfhar, // llvm.ppc.set.tfhar
ppc_set_tfiar, // llvm.ppc.set.tfiar
ppc_setb, // llvm.ppc.setb
ppc_setflm, // llvm.ppc.setflm
ppc_setrnd, // llvm.ppc.setrnd
ppc_sqrtf128_round_to_odd, // llvm.ppc.sqrtf128.round.to.odd
ppc_stbcx, // llvm.ppc.stbcx
ppc_stdcx, // llvm.ppc.stdcx
ppc_stfiw, // llvm.ppc.stfiw
ppc_sthcx, // llvm.ppc.sthcx
ppc_store2r, // llvm.ppc.store2r
ppc_store4r, // llvm.ppc.store4r
ppc_store8r, // llvm.ppc.store8r
ppc_stwcx, // llvm.ppc.stwcx
ppc_subf128_round_to_odd, // llvm.ppc.subf128.round.to.odd
ppc_sync, // llvm.ppc.sync
ppc_tabort, // llvm.ppc.tabort
ppc_tabortdc, // llvm.ppc.tabortdc
ppc_tabortdci, // llvm.ppc.tabortdci
ppc_tabortwc, // llvm.ppc.tabortwc
ppc_tabortwci, // llvm.ppc.tabortwci
ppc_tbegin, // llvm.ppc.tbegin
ppc_tcheck, // llvm.ppc.tcheck
ppc_tdw, // llvm.ppc.tdw
ppc_tend, // llvm.ppc.tend
ppc_tendall, // llvm.ppc.tendall
ppc_test_data_class_d, // llvm.ppc.test.data.class.d
ppc_test_data_class_f, // llvm.ppc.test.data.class.f
ppc_trap, // llvm.ppc.trap
ppc_trapd, // llvm.ppc.trapd
ppc_trechkpt, // llvm.ppc.trechkpt
ppc_treclaim, // llvm.ppc.treclaim
ppc_tresume, // llvm.ppc.tresume
ppc_truncf128_round_to_odd, // llvm.ppc.truncf128.round.to.odd
ppc_tsr, // llvm.ppc.tsr
ppc_tsuspend, // llvm.ppc.tsuspend
ppc_ttest, // llvm.ppc.ttest
ppc_tw, // llvm.ppc.tw
ppc_unpack_longdouble, // llvm.ppc.unpack.longdouble
ppc_vsx_assemble_pair, // llvm.ppc.vsx.assemble.pair
ppc_vsx_disassemble_pair, // llvm.ppc.vsx.disassemble.pair
ppc_vsx_lxvd2x, // llvm.ppc.vsx.lxvd2x
ppc_vsx_lxvd2x_be, // llvm.ppc.vsx.lxvd2x.be
ppc_vsx_lxvl, // llvm.ppc.vsx.lxvl
ppc_vsx_lxvll, // llvm.ppc.vsx.lxvll
ppc_vsx_lxvp, // llvm.ppc.vsx.lxvp
ppc_vsx_lxvw4x, // llvm.ppc.vsx.lxvw4x
ppc_vsx_lxvw4x_be, // llvm.ppc.vsx.lxvw4x.be
ppc_vsx_stxvd2x, // llvm.ppc.vsx.stxvd2x
ppc_vsx_stxvd2x_be, // llvm.ppc.vsx.stxvd2x.be
ppc_vsx_stxvl, // llvm.ppc.vsx.stxvl
ppc_vsx_stxvll, // llvm.ppc.vsx.stxvll
ppc_vsx_stxvp, // llvm.ppc.vsx.stxvp
ppc_vsx_stxvw4x, // llvm.ppc.vsx.stxvw4x
ppc_vsx_stxvw4x_be, // llvm.ppc.vsx.stxvw4x.be
ppc_vsx_xsmaxdp, // llvm.ppc.vsx.xsmaxdp
ppc_vsx_xsmindp, // llvm.ppc.vsx.xsmindp
ppc_vsx_xvcmpeqdp, // llvm.ppc.vsx.xvcmpeqdp
ppc_vsx_xvcmpeqdp_p, // llvm.ppc.vsx.xvcmpeqdp.p
ppc_vsx_xvcmpeqsp, // llvm.ppc.vsx.xvcmpeqsp
ppc_vsx_xvcmpeqsp_p, // llvm.ppc.vsx.xvcmpeqsp.p
ppc_vsx_xvcmpgedp, // llvm.ppc.vsx.xvcmpgedp
ppc_vsx_xvcmpgedp_p, // llvm.ppc.vsx.xvcmpgedp.p
ppc_vsx_xvcmpgesp, // llvm.ppc.vsx.xvcmpgesp
ppc_vsx_xvcmpgesp_p, // llvm.ppc.vsx.xvcmpgesp.p
ppc_vsx_xvcmpgtdp, // llvm.ppc.vsx.xvcmpgtdp
ppc_vsx_xvcmpgtdp_p, // llvm.ppc.vsx.xvcmpgtdp.p
ppc_vsx_xvcmpgtsp, // llvm.ppc.vsx.xvcmpgtsp
ppc_vsx_xvcmpgtsp_p, // llvm.ppc.vsx.xvcmpgtsp.p
ppc_vsx_xvcvbf16spn, // llvm.ppc.vsx.xvcvbf16spn
ppc_vsx_xvcvdpsp, // llvm.ppc.vsx.xvcvdpsp
ppc_vsx_xvcvdpsxws, // llvm.ppc.vsx.xvcvdpsxws
ppc_vsx_xvcvdpuxws, // llvm.ppc.vsx.xvcvdpuxws
ppc_vsx_xvcvhpsp, // llvm.ppc.vsx.xvcvhpsp
ppc_vsx_xvcvspbf16, // llvm.ppc.vsx.xvcvspbf16
ppc_vsx_xvcvspdp, // llvm.ppc.vsx.xvcvspdp
ppc_vsx_xvcvsphp, // llvm.ppc.vsx.xvcvsphp
ppc_vsx_xvcvspsxds, // llvm.ppc.vsx.xvcvspsxds
ppc_vsx_xvcvspuxds, // llvm.ppc.vsx.xvcvspuxds
ppc_vsx_xvcvsxdsp, // llvm.ppc.vsx.xvcvsxdsp
ppc_vsx_xvcvsxwdp, // llvm.ppc.vsx.xvcvsxwdp
ppc_vsx_xvcvuxdsp, // llvm.ppc.vsx.xvcvuxdsp
ppc_vsx_xvcvuxwdp, // llvm.ppc.vsx.xvcvuxwdp
ppc_vsx_xvdivdp, // llvm.ppc.vsx.xvdivdp
ppc_vsx_xvdivsp, // llvm.ppc.vsx.xvdivsp
ppc_vsx_xviexpdp, // llvm.ppc.vsx.xviexpdp
ppc_vsx_xviexpsp, // llvm.ppc.vsx.xviexpsp
ppc_vsx_xvmaxdp, // llvm.ppc.vsx.xvmaxdp
ppc_vsx_xvmaxsp, // llvm.ppc.vsx.xvmaxsp
ppc_vsx_xvmindp, // llvm.ppc.vsx.xvmindp
ppc_vsx_xvminsp, // llvm.ppc.vsx.xvminsp
ppc_vsx_xvrdpip, // llvm.ppc.vsx.xvrdpip
ppc_vsx_xvredp, // llvm.ppc.vsx.xvredp
ppc_vsx_xvresp, // llvm.ppc.vsx.xvresp
ppc_vsx_xvrspip, // llvm.ppc.vsx.xvrspip
ppc_vsx_xvrsqrtedp, // llvm.ppc.vsx.xvrsqrtedp
ppc_vsx_xvrsqrtesp, // llvm.ppc.vsx.xvrsqrtesp
ppc_vsx_xvtdivdp, // llvm.ppc.vsx.xvtdivdp
ppc_vsx_xvtdivsp, // llvm.ppc.vsx.xvtdivsp
ppc_vsx_xvtlsbb, // llvm.ppc.vsx.xvtlsbb
ppc_vsx_xvtsqrtdp, // llvm.ppc.vsx.xvtsqrtdp
ppc_vsx_xvtsqrtsp, // llvm.ppc.vsx.xvtsqrtsp
ppc_vsx_xvtstdcdp, // llvm.ppc.vsx.xvtstdcdp
ppc_vsx_xvtstdcsp, // llvm.ppc.vsx.xvtstdcsp
ppc_vsx_xvxexpdp, // llvm.ppc.vsx.xvxexpdp
ppc_vsx_xvxexpsp, // llvm.ppc.vsx.xvxexpsp
ppc_vsx_xvxsigdp, // llvm.ppc.vsx.xvxsigdp
ppc_vsx_xvxsigsp, // llvm.ppc.vsx.xvxsigsp
ppc_vsx_xxblendvb, // llvm.ppc.vsx.xxblendvb
ppc_vsx_xxblendvd, // llvm.ppc.vsx.xxblendvd
ppc_vsx_xxblendvh, // llvm.ppc.vsx.xxblendvh
ppc_vsx_xxblendvw, // llvm.ppc.vsx.xxblendvw
ppc_vsx_xxeval, // llvm.ppc.vsx.xxeval
ppc_vsx_xxextractuw, // llvm.ppc.vsx.xxextractuw
ppc_vsx_xxgenpcvbm, // llvm.ppc.vsx.xxgenpcvbm
ppc_vsx_xxgenpcvdm, // llvm.ppc.vsx.xxgenpcvdm
ppc_vsx_xxgenpcvhm, // llvm.ppc.vsx.xxgenpcvhm
ppc_vsx_xxgenpcvwm, // llvm.ppc.vsx.xxgenpcvwm
ppc_vsx_xxinsertw, // llvm.ppc.vsx.xxinsertw
ppc_vsx_xxleqv, // llvm.ppc.vsx.xxleqv
ppc_vsx_xxpermx, // llvm.ppc.vsx.xxpermx
}; // enum
} // namespace Intrinsic
} // namespace llvm
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,55 @@
/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|* *|
|* Intrinsic Function Source Fragment *|
|* *|
|* Automatically generated file, do not edit! *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_IR_INTRINSIC_R600_ENUMS_H
#define LLVM_IR_INTRINSIC_R600_ENUMS_H
namespace llvm {
namespace Intrinsic {
enum R600Intrinsics : unsigned {
// Enum values for intrinsics
r600_cube = 7227, // llvm.r600.cube
r600_ddx, // llvm.r600.ddx
r600_ddy, // llvm.r600.ddy
r600_dot4, // llvm.r600.dot4
r600_group_barrier, // llvm.r600.group.barrier
r600_implicitarg_ptr, // llvm.r600.implicitarg.ptr
r600_kill, // llvm.r600.kill
r600_rat_store_typed, // llvm.r600.rat.store.typed
r600_read_global_size_x, // llvm.r600.read.global.size.x
r600_read_global_size_y, // llvm.r600.read.global.size.y
r600_read_global_size_z, // llvm.r600.read.global.size.z
r600_read_local_size_x, // llvm.r600.read.local.size.x
r600_read_local_size_y, // llvm.r600.read.local.size.y
r600_read_local_size_z, // llvm.r600.read.local.size.z
r600_read_ngroups_x, // llvm.r600.read.ngroups.x
r600_read_ngroups_y, // llvm.r600.read.ngroups.y
r600_read_ngroups_z, // llvm.r600.read.ngroups.z
r600_read_tgid_x, // llvm.r600.read.tgid.x
r600_read_tgid_y, // llvm.r600.read.tgid.y
r600_read_tgid_z, // llvm.r600.read.tgid.z
r600_read_tidig_x, // llvm.r600.read.tidig.x
r600_read_tidig_y, // llvm.r600.read.tidig.y
r600_read_tidig_z, // llvm.r600.read.tidig.z
r600_recipsqrt_clamped, // llvm.r600.recipsqrt.clamped
r600_recipsqrt_ieee, // llvm.r600.recipsqrt.ieee
r600_store_stream_output, // llvm.r600.store.stream.output
r600_store_swizzle, // llvm.r600.store.swizzle
r600_tex, // llvm.r600.tex
r600_texc, // llvm.r600.texc
r600_txb, // llvm.r600.txb
r600_txbc, // llvm.r600.txbc
r600_txf, // llvm.r600.txf
r600_txl, // llvm.r600.txl
r600_txlc, // llvm.r600.txlc
r600_txq, // llvm.r600.txq
}; // enum
} // namespace Intrinsic
} // namespace llvm
#endif

View File

@@ -0,0 +1,591 @@
/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|* *|
|* Intrinsic Function Source Fragment *|
|* *|
|* Automatically generated file, do not edit! *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_IR_INTRINSIC_RISCV_ENUMS_H
#define LLVM_IR_INTRINSIC_RISCV_ENUMS_H
namespace llvm {
namespace Intrinsic {
enum RISCVIntrinsics : unsigned {
// Enum values for intrinsics
riscv_aes32dsi = 7262, // llvm.riscv.aes32dsi
riscv_aes32dsmi, // llvm.riscv.aes32dsmi
riscv_aes32esi, // llvm.riscv.aes32esi
riscv_aes32esmi, // llvm.riscv.aes32esmi
riscv_aes64ds, // llvm.riscv.aes64ds
riscv_aes64dsm, // llvm.riscv.aes64dsm
riscv_aes64es, // llvm.riscv.aes64es
riscv_aes64esm, // llvm.riscv.aes64esm
riscv_aes64im, // llvm.riscv.aes64im
riscv_aes64ks1i, // llvm.riscv.aes64ks1i
riscv_aes64ks2, // llvm.riscv.aes64ks2
riscv_bcompress, // llvm.riscv.bcompress
riscv_bdecompress, // llvm.riscv.bdecompress
riscv_bfp, // llvm.riscv.bfp
riscv_brev8, // llvm.riscv.brev8
riscv_clmul, // llvm.riscv.clmul
riscv_clmulh, // llvm.riscv.clmulh
riscv_clmulr, // llvm.riscv.clmulr
riscv_crc32_b, // llvm.riscv.crc32.b
riscv_crc32_d, // llvm.riscv.crc32.d
riscv_crc32_h, // llvm.riscv.crc32.h
riscv_crc32_w, // llvm.riscv.crc32.w
riscv_crc32c_b, // llvm.riscv.crc32c.b
riscv_crc32c_d, // llvm.riscv.crc32c.d
riscv_crc32c_h, // llvm.riscv.crc32c.h
riscv_crc32c_w, // llvm.riscv.crc32c.w
riscv_fsl, // llvm.riscv.fsl
riscv_fsr, // llvm.riscv.fsr
riscv_gorc, // llvm.riscv.gorc
riscv_grev, // llvm.riscv.grev
riscv_masked_atomicrmw_add_i32, // llvm.riscv.masked.atomicrmw.add.i32
riscv_masked_atomicrmw_add_i64, // llvm.riscv.masked.atomicrmw.add.i64
riscv_masked_atomicrmw_max_i32, // llvm.riscv.masked.atomicrmw.max.i32
riscv_masked_atomicrmw_max_i64, // llvm.riscv.masked.atomicrmw.max.i64
riscv_masked_atomicrmw_min_i32, // llvm.riscv.masked.atomicrmw.min.i32
riscv_masked_atomicrmw_min_i64, // llvm.riscv.masked.atomicrmw.min.i64
riscv_masked_atomicrmw_nand_i32, // llvm.riscv.masked.atomicrmw.nand.i32
riscv_masked_atomicrmw_nand_i64, // llvm.riscv.masked.atomicrmw.nand.i64
riscv_masked_atomicrmw_sub_i32, // llvm.riscv.masked.atomicrmw.sub.i32
riscv_masked_atomicrmw_sub_i64, // llvm.riscv.masked.atomicrmw.sub.i64
riscv_masked_atomicrmw_umax_i32, // llvm.riscv.masked.atomicrmw.umax.i32
riscv_masked_atomicrmw_umax_i64, // llvm.riscv.masked.atomicrmw.umax.i64
riscv_masked_atomicrmw_umin_i32, // llvm.riscv.masked.atomicrmw.umin.i32
riscv_masked_atomicrmw_umin_i64, // llvm.riscv.masked.atomicrmw.umin.i64
riscv_masked_atomicrmw_xchg_i32, // llvm.riscv.masked.atomicrmw.xchg.i32
riscv_masked_atomicrmw_xchg_i64, // llvm.riscv.masked.atomicrmw.xchg.i64
riscv_masked_cmpxchg_i32, // llvm.riscv.masked.cmpxchg.i32
riscv_masked_cmpxchg_i64, // llvm.riscv.masked.cmpxchg.i64
riscv_masked_strided_load, // llvm.riscv.masked.strided.load
riscv_masked_strided_store, // llvm.riscv.masked.strided.store
riscv_orc_b, // llvm.riscv.orc.b
riscv_sha256sig0, // llvm.riscv.sha256sig0
riscv_sha256sig1, // llvm.riscv.sha256sig1
riscv_sha256sum0, // llvm.riscv.sha256sum0
riscv_sha256sum1, // llvm.riscv.sha256sum1
riscv_sha512sig0, // llvm.riscv.sha512sig0
riscv_sha512sig0h, // llvm.riscv.sha512sig0h
riscv_sha512sig0l, // llvm.riscv.sha512sig0l
riscv_sha512sig1, // llvm.riscv.sha512sig1
riscv_sha512sig1h, // llvm.riscv.sha512sig1h
riscv_sha512sig1l, // llvm.riscv.sha512sig1l
riscv_sha512sum0, // llvm.riscv.sha512sum0
riscv_sha512sum0r, // llvm.riscv.sha512sum0r
riscv_sha512sum1, // llvm.riscv.sha512sum1
riscv_sha512sum1r, // llvm.riscv.sha512sum1r
riscv_shfl, // llvm.riscv.shfl
riscv_sm3p0, // llvm.riscv.sm3p0
riscv_sm3p1, // llvm.riscv.sm3p1
riscv_sm4ed, // llvm.riscv.sm4ed
riscv_sm4ks, // llvm.riscv.sm4ks
riscv_unshfl, // llvm.riscv.unshfl
riscv_unzip, // llvm.riscv.unzip
riscv_vaadd, // llvm.riscv.vaadd
riscv_vaadd_mask, // llvm.riscv.vaadd.mask
riscv_vaaddu, // llvm.riscv.vaaddu
riscv_vaaddu_mask, // llvm.riscv.vaaddu.mask
riscv_vadc, // llvm.riscv.vadc
riscv_vadd, // llvm.riscv.vadd
riscv_vadd_mask, // llvm.riscv.vadd.mask
riscv_vand, // llvm.riscv.vand
riscv_vand_mask, // llvm.riscv.vand.mask
riscv_vasub, // llvm.riscv.vasub
riscv_vasub_mask, // llvm.riscv.vasub.mask
riscv_vasubu, // llvm.riscv.vasubu
riscv_vasubu_mask, // llvm.riscv.vasubu.mask
riscv_vcompress, // llvm.riscv.vcompress
riscv_vcpop, // llvm.riscv.vcpop
riscv_vcpop_mask, // llvm.riscv.vcpop.mask
riscv_vdiv, // llvm.riscv.vdiv
riscv_vdiv_mask, // llvm.riscv.vdiv.mask
riscv_vdivu, // llvm.riscv.vdivu
riscv_vdivu_mask, // llvm.riscv.vdivu.mask
riscv_vfadd, // llvm.riscv.vfadd
riscv_vfadd_mask, // llvm.riscv.vfadd.mask
riscv_vfclass, // llvm.riscv.vfclass
riscv_vfclass_mask, // llvm.riscv.vfclass.mask
riscv_vfcvt_f_x_v, // llvm.riscv.vfcvt.f.x.v
riscv_vfcvt_f_x_v_mask, // llvm.riscv.vfcvt.f.x.v.mask
riscv_vfcvt_f_xu_v, // llvm.riscv.vfcvt.f.xu.v
riscv_vfcvt_f_xu_v_mask, // llvm.riscv.vfcvt.f.xu.v.mask
riscv_vfcvt_rtz_x_f_v, // llvm.riscv.vfcvt.rtz.x.f.v
riscv_vfcvt_rtz_x_f_v_mask, // llvm.riscv.vfcvt.rtz.x.f.v.mask
riscv_vfcvt_rtz_xu_f_v, // llvm.riscv.vfcvt.rtz.xu.f.v
riscv_vfcvt_rtz_xu_f_v_mask, // llvm.riscv.vfcvt.rtz.xu.f.v.mask
riscv_vfcvt_x_f_v, // llvm.riscv.vfcvt.x.f.v
riscv_vfcvt_x_f_v_mask, // llvm.riscv.vfcvt.x.f.v.mask
riscv_vfcvt_xu_f_v, // llvm.riscv.vfcvt.xu.f.v
riscv_vfcvt_xu_f_v_mask, // llvm.riscv.vfcvt.xu.f.v.mask
riscv_vfdiv, // llvm.riscv.vfdiv
riscv_vfdiv_mask, // llvm.riscv.vfdiv.mask
riscv_vfirst, // llvm.riscv.vfirst
riscv_vfirst_mask, // llvm.riscv.vfirst.mask
riscv_vfmacc, // llvm.riscv.vfmacc
riscv_vfmacc_mask, // llvm.riscv.vfmacc.mask
riscv_vfmadd, // llvm.riscv.vfmadd
riscv_vfmadd_mask, // llvm.riscv.vfmadd.mask
riscv_vfmax, // llvm.riscv.vfmax
riscv_vfmax_mask, // llvm.riscv.vfmax.mask
riscv_vfmerge, // llvm.riscv.vfmerge
riscv_vfmin, // llvm.riscv.vfmin
riscv_vfmin_mask, // llvm.riscv.vfmin.mask
riscv_vfmsac, // llvm.riscv.vfmsac
riscv_vfmsac_mask, // llvm.riscv.vfmsac.mask
riscv_vfmsub, // llvm.riscv.vfmsub
riscv_vfmsub_mask, // llvm.riscv.vfmsub.mask
riscv_vfmul, // llvm.riscv.vfmul
riscv_vfmul_mask, // llvm.riscv.vfmul.mask
riscv_vfmv_f_s, // llvm.riscv.vfmv.f.s
riscv_vfmv_s_f, // llvm.riscv.vfmv.s.f
riscv_vfmv_v_f, // llvm.riscv.vfmv.v.f
riscv_vfncvt_f_f_w, // llvm.riscv.vfncvt.f.f.w
riscv_vfncvt_f_f_w_mask, // llvm.riscv.vfncvt.f.f.w.mask
riscv_vfncvt_f_x_w, // llvm.riscv.vfncvt.f.x.w
riscv_vfncvt_f_x_w_mask, // llvm.riscv.vfncvt.f.x.w.mask
riscv_vfncvt_f_xu_w, // llvm.riscv.vfncvt.f.xu.w
riscv_vfncvt_f_xu_w_mask, // llvm.riscv.vfncvt.f.xu.w.mask
riscv_vfncvt_rod_f_f_w, // llvm.riscv.vfncvt.rod.f.f.w
riscv_vfncvt_rod_f_f_w_mask, // llvm.riscv.vfncvt.rod.f.f.w.mask
riscv_vfncvt_rtz_x_f_w, // llvm.riscv.vfncvt.rtz.x.f.w
riscv_vfncvt_rtz_x_f_w_mask, // llvm.riscv.vfncvt.rtz.x.f.w.mask
riscv_vfncvt_rtz_xu_f_w, // llvm.riscv.vfncvt.rtz.xu.f.w
riscv_vfncvt_rtz_xu_f_w_mask, // llvm.riscv.vfncvt.rtz.xu.f.w.mask
riscv_vfncvt_x_f_w, // llvm.riscv.vfncvt.x.f.w
riscv_vfncvt_x_f_w_mask, // llvm.riscv.vfncvt.x.f.w.mask
riscv_vfncvt_xu_f_w, // llvm.riscv.vfncvt.xu.f.w
riscv_vfncvt_xu_f_w_mask, // llvm.riscv.vfncvt.xu.f.w.mask
riscv_vfnmacc, // llvm.riscv.vfnmacc
riscv_vfnmacc_mask, // llvm.riscv.vfnmacc.mask
riscv_vfnmadd, // llvm.riscv.vfnmadd
riscv_vfnmadd_mask, // llvm.riscv.vfnmadd.mask
riscv_vfnmsac, // llvm.riscv.vfnmsac
riscv_vfnmsac_mask, // llvm.riscv.vfnmsac.mask
riscv_vfnmsub, // llvm.riscv.vfnmsub
riscv_vfnmsub_mask, // llvm.riscv.vfnmsub.mask
riscv_vfrdiv, // llvm.riscv.vfrdiv
riscv_vfrdiv_mask, // llvm.riscv.vfrdiv.mask
riscv_vfrec7, // llvm.riscv.vfrec7
riscv_vfrec7_mask, // llvm.riscv.vfrec7.mask
riscv_vfredmax, // llvm.riscv.vfredmax
riscv_vfredmax_mask, // llvm.riscv.vfredmax.mask
riscv_vfredmin, // llvm.riscv.vfredmin
riscv_vfredmin_mask, // llvm.riscv.vfredmin.mask
riscv_vfredosum, // llvm.riscv.vfredosum
riscv_vfredosum_mask, // llvm.riscv.vfredosum.mask
riscv_vfredusum, // llvm.riscv.vfredusum
riscv_vfredusum_mask, // llvm.riscv.vfredusum.mask
riscv_vfrsqrt7, // llvm.riscv.vfrsqrt7
riscv_vfrsqrt7_mask, // llvm.riscv.vfrsqrt7.mask
riscv_vfrsub, // llvm.riscv.vfrsub
riscv_vfrsub_mask, // llvm.riscv.vfrsub.mask
riscv_vfsgnj, // llvm.riscv.vfsgnj
riscv_vfsgnj_mask, // llvm.riscv.vfsgnj.mask
riscv_vfsgnjn, // llvm.riscv.vfsgnjn
riscv_vfsgnjn_mask, // llvm.riscv.vfsgnjn.mask
riscv_vfsgnjx, // llvm.riscv.vfsgnjx
riscv_vfsgnjx_mask, // llvm.riscv.vfsgnjx.mask
riscv_vfslide1down, // llvm.riscv.vfslide1down
riscv_vfslide1down_mask, // llvm.riscv.vfslide1down.mask
riscv_vfslide1up, // llvm.riscv.vfslide1up
riscv_vfslide1up_mask, // llvm.riscv.vfslide1up.mask
riscv_vfsqrt, // llvm.riscv.vfsqrt
riscv_vfsqrt_mask, // llvm.riscv.vfsqrt.mask
riscv_vfsub, // llvm.riscv.vfsub
riscv_vfsub_mask, // llvm.riscv.vfsub.mask
riscv_vfwadd, // llvm.riscv.vfwadd
riscv_vfwadd_mask, // llvm.riscv.vfwadd.mask
riscv_vfwadd_w, // llvm.riscv.vfwadd.w
riscv_vfwadd_w_mask, // llvm.riscv.vfwadd.w.mask
riscv_vfwcvt_f_f_v, // llvm.riscv.vfwcvt.f.f.v
riscv_vfwcvt_f_f_v_mask, // llvm.riscv.vfwcvt.f.f.v.mask
riscv_vfwcvt_f_x_v, // llvm.riscv.vfwcvt.f.x.v
riscv_vfwcvt_f_x_v_mask, // llvm.riscv.vfwcvt.f.x.v.mask
riscv_vfwcvt_f_xu_v, // llvm.riscv.vfwcvt.f.xu.v
riscv_vfwcvt_f_xu_v_mask, // llvm.riscv.vfwcvt.f.xu.v.mask
riscv_vfwcvt_rtz_x_f_v, // llvm.riscv.vfwcvt.rtz.x.f.v
riscv_vfwcvt_rtz_x_f_v_mask, // llvm.riscv.vfwcvt.rtz.x.f.v.mask
riscv_vfwcvt_rtz_xu_f_v, // llvm.riscv.vfwcvt.rtz.xu.f.v
riscv_vfwcvt_rtz_xu_f_v_mask, // llvm.riscv.vfwcvt.rtz.xu.f.v.mask
riscv_vfwcvt_x_f_v, // llvm.riscv.vfwcvt.x.f.v
riscv_vfwcvt_x_f_v_mask, // llvm.riscv.vfwcvt.x.f.v.mask
riscv_vfwcvt_xu_f_v, // llvm.riscv.vfwcvt.xu.f.v
riscv_vfwcvt_xu_f_v_mask, // llvm.riscv.vfwcvt.xu.f.v.mask
riscv_vfwmacc, // llvm.riscv.vfwmacc
riscv_vfwmacc_mask, // llvm.riscv.vfwmacc.mask
riscv_vfwmsac, // llvm.riscv.vfwmsac
riscv_vfwmsac_mask, // llvm.riscv.vfwmsac.mask
riscv_vfwmul, // llvm.riscv.vfwmul
riscv_vfwmul_mask, // llvm.riscv.vfwmul.mask
riscv_vfwnmacc, // llvm.riscv.vfwnmacc
riscv_vfwnmacc_mask, // llvm.riscv.vfwnmacc.mask
riscv_vfwnmsac, // llvm.riscv.vfwnmsac
riscv_vfwnmsac_mask, // llvm.riscv.vfwnmsac.mask
riscv_vfwredosum, // llvm.riscv.vfwredosum
riscv_vfwredosum_mask, // llvm.riscv.vfwredosum.mask
riscv_vfwredusum, // llvm.riscv.vfwredusum
riscv_vfwredusum_mask, // llvm.riscv.vfwredusum.mask
riscv_vfwsub, // llvm.riscv.vfwsub
riscv_vfwsub_mask, // llvm.riscv.vfwsub.mask
riscv_vfwsub_w, // llvm.riscv.vfwsub.w
riscv_vfwsub_w_mask, // llvm.riscv.vfwsub.w.mask
riscv_vid, // llvm.riscv.vid
riscv_vid_mask, // llvm.riscv.vid.mask
riscv_viota, // llvm.riscv.viota
riscv_viota_mask, // llvm.riscv.viota.mask
riscv_vle, // llvm.riscv.vle
riscv_vle_mask, // llvm.riscv.vle.mask
riscv_vleff, // llvm.riscv.vleff
riscv_vleff_mask, // llvm.riscv.vleff.mask
riscv_vlm, // llvm.riscv.vlm
riscv_vloxei, // llvm.riscv.vloxei
riscv_vloxei_mask, // llvm.riscv.vloxei.mask
riscv_vloxseg2, // llvm.riscv.vloxseg2
riscv_vloxseg2_mask, // llvm.riscv.vloxseg2.mask
riscv_vloxseg3, // llvm.riscv.vloxseg3
riscv_vloxseg3_mask, // llvm.riscv.vloxseg3.mask
riscv_vloxseg4, // llvm.riscv.vloxseg4
riscv_vloxseg4_mask, // llvm.riscv.vloxseg4.mask
riscv_vloxseg5, // llvm.riscv.vloxseg5
riscv_vloxseg5_mask, // llvm.riscv.vloxseg5.mask
riscv_vloxseg6, // llvm.riscv.vloxseg6
riscv_vloxseg6_mask, // llvm.riscv.vloxseg6.mask
riscv_vloxseg7, // llvm.riscv.vloxseg7
riscv_vloxseg7_mask, // llvm.riscv.vloxseg7.mask
riscv_vloxseg8, // llvm.riscv.vloxseg8
riscv_vloxseg8_mask, // llvm.riscv.vloxseg8.mask
riscv_vlse, // llvm.riscv.vlse
riscv_vlse_mask, // llvm.riscv.vlse.mask
riscv_vlseg2, // llvm.riscv.vlseg2
riscv_vlseg2_mask, // llvm.riscv.vlseg2.mask
riscv_vlseg2ff, // llvm.riscv.vlseg2ff
riscv_vlseg2ff_mask, // llvm.riscv.vlseg2ff.mask
riscv_vlseg3, // llvm.riscv.vlseg3
riscv_vlseg3_mask, // llvm.riscv.vlseg3.mask
riscv_vlseg3ff, // llvm.riscv.vlseg3ff
riscv_vlseg3ff_mask, // llvm.riscv.vlseg3ff.mask
riscv_vlseg4, // llvm.riscv.vlseg4
riscv_vlseg4_mask, // llvm.riscv.vlseg4.mask
riscv_vlseg4ff, // llvm.riscv.vlseg4ff
riscv_vlseg4ff_mask, // llvm.riscv.vlseg4ff.mask
riscv_vlseg5, // llvm.riscv.vlseg5
riscv_vlseg5_mask, // llvm.riscv.vlseg5.mask
riscv_vlseg5ff, // llvm.riscv.vlseg5ff
riscv_vlseg5ff_mask, // llvm.riscv.vlseg5ff.mask
riscv_vlseg6, // llvm.riscv.vlseg6
riscv_vlseg6_mask, // llvm.riscv.vlseg6.mask
riscv_vlseg6ff, // llvm.riscv.vlseg6ff
riscv_vlseg6ff_mask, // llvm.riscv.vlseg6ff.mask
riscv_vlseg7, // llvm.riscv.vlseg7
riscv_vlseg7_mask, // llvm.riscv.vlseg7.mask
riscv_vlseg7ff, // llvm.riscv.vlseg7ff
riscv_vlseg7ff_mask, // llvm.riscv.vlseg7ff.mask
riscv_vlseg8, // llvm.riscv.vlseg8
riscv_vlseg8_mask, // llvm.riscv.vlseg8.mask
riscv_vlseg8ff, // llvm.riscv.vlseg8ff
riscv_vlseg8ff_mask, // llvm.riscv.vlseg8ff.mask
riscv_vlsseg2, // llvm.riscv.vlsseg2
riscv_vlsseg2_mask, // llvm.riscv.vlsseg2.mask
riscv_vlsseg3, // llvm.riscv.vlsseg3
riscv_vlsseg3_mask, // llvm.riscv.vlsseg3.mask
riscv_vlsseg4, // llvm.riscv.vlsseg4
riscv_vlsseg4_mask, // llvm.riscv.vlsseg4.mask
riscv_vlsseg5, // llvm.riscv.vlsseg5
riscv_vlsseg5_mask, // llvm.riscv.vlsseg5.mask
riscv_vlsseg6, // llvm.riscv.vlsseg6
riscv_vlsseg6_mask, // llvm.riscv.vlsseg6.mask
riscv_vlsseg7, // llvm.riscv.vlsseg7
riscv_vlsseg7_mask, // llvm.riscv.vlsseg7.mask
riscv_vlsseg8, // llvm.riscv.vlsseg8
riscv_vlsseg8_mask, // llvm.riscv.vlsseg8.mask
riscv_vluxei, // llvm.riscv.vluxei
riscv_vluxei_mask, // llvm.riscv.vluxei.mask
riscv_vluxseg2, // llvm.riscv.vluxseg2
riscv_vluxseg2_mask, // llvm.riscv.vluxseg2.mask
riscv_vluxseg3, // llvm.riscv.vluxseg3
riscv_vluxseg3_mask, // llvm.riscv.vluxseg3.mask
riscv_vluxseg4, // llvm.riscv.vluxseg4
riscv_vluxseg4_mask, // llvm.riscv.vluxseg4.mask
riscv_vluxseg5, // llvm.riscv.vluxseg5
riscv_vluxseg5_mask, // llvm.riscv.vluxseg5.mask
riscv_vluxseg6, // llvm.riscv.vluxseg6
riscv_vluxseg6_mask, // llvm.riscv.vluxseg6.mask
riscv_vluxseg7, // llvm.riscv.vluxseg7
riscv_vluxseg7_mask, // llvm.riscv.vluxseg7.mask
riscv_vluxseg8, // llvm.riscv.vluxseg8
riscv_vluxseg8_mask, // llvm.riscv.vluxseg8.mask
riscv_vmacc, // llvm.riscv.vmacc
riscv_vmacc_mask, // llvm.riscv.vmacc.mask
riscv_vmadc, // llvm.riscv.vmadc
riscv_vmadc_carry_in, // llvm.riscv.vmadc.carry.in
riscv_vmadd, // llvm.riscv.vmadd
riscv_vmadd_mask, // llvm.riscv.vmadd.mask
riscv_vmand, // llvm.riscv.vmand
riscv_vmandn, // llvm.riscv.vmandn
riscv_vmax, // llvm.riscv.vmax
riscv_vmax_mask, // llvm.riscv.vmax.mask
riscv_vmaxu, // llvm.riscv.vmaxu
riscv_vmaxu_mask, // llvm.riscv.vmaxu.mask
riscv_vmclr, // llvm.riscv.vmclr
riscv_vmerge, // llvm.riscv.vmerge
riscv_vmfeq, // llvm.riscv.vmfeq
riscv_vmfeq_mask, // llvm.riscv.vmfeq.mask
riscv_vmfge, // llvm.riscv.vmfge
riscv_vmfge_mask, // llvm.riscv.vmfge.mask
riscv_vmfgt, // llvm.riscv.vmfgt
riscv_vmfgt_mask, // llvm.riscv.vmfgt.mask
riscv_vmfle, // llvm.riscv.vmfle
riscv_vmfle_mask, // llvm.riscv.vmfle.mask
riscv_vmflt, // llvm.riscv.vmflt
riscv_vmflt_mask, // llvm.riscv.vmflt.mask
riscv_vmfne, // llvm.riscv.vmfne
riscv_vmfne_mask, // llvm.riscv.vmfne.mask
riscv_vmin, // llvm.riscv.vmin
riscv_vmin_mask, // llvm.riscv.vmin.mask
riscv_vminu, // llvm.riscv.vminu
riscv_vminu_mask, // llvm.riscv.vminu.mask
riscv_vmnand, // llvm.riscv.vmnand
riscv_vmnor, // llvm.riscv.vmnor
riscv_vmor, // llvm.riscv.vmor
riscv_vmorn, // llvm.riscv.vmorn
riscv_vmsbc, // llvm.riscv.vmsbc
riscv_vmsbc_borrow_in, // llvm.riscv.vmsbc.borrow.in
riscv_vmsbf, // llvm.riscv.vmsbf
riscv_vmsbf_mask, // llvm.riscv.vmsbf.mask
riscv_vmseq, // llvm.riscv.vmseq
riscv_vmseq_mask, // llvm.riscv.vmseq.mask
riscv_vmset, // llvm.riscv.vmset
riscv_vmsge, // llvm.riscv.vmsge
riscv_vmsge_mask, // llvm.riscv.vmsge.mask
riscv_vmsgeu, // llvm.riscv.vmsgeu
riscv_vmsgeu_mask, // llvm.riscv.vmsgeu.mask
riscv_vmsgt, // llvm.riscv.vmsgt
riscv_vmsgt_mask, // llvm.riscv.vmsgt.mask
riscv_vmsgtu, // llvm.riscv.vmsgtu
riscv_vmsgtu_mask, // llvm.riscv.vmsgtu.mask
riscv_vmsif, // llvm.riscv.vmsif
riscv_vmsif_mask, // llvm.riscv.vmsif.mask
riscv_vmsle, // llvm.riscv.vmsle
riscv_vmsle_mask, // llvm.riscv.vmsle.mask
riscv_vmsleu, // llvm.riscv.vmsleu
riscv_vmsleu_mask, // llvm.riscv.vmsleu.mask
riscv_vmslt, // llvm.riscv.vmslt
riscv_vmslt_mask, // llvm.riscv.vmslt.mask
riscv_vmsltu, // llvm.riscv.vmsltu
riscv_vmsltu_mask, // llvm.riscv.vmsltu.mask
riscv_vmsne, // llvm.riscv.vmsne
riscv_vmsne_mask, // llvm.riscv.vmsne.mask
riscv_vmsof, // llvm.riscv.vmsof
riscv_vmsof_mask, // llvm.riscv.vmsof.mask
riscv_vmul, // llvm.riscv.vmul
riscv_vmul_mask, // llvm.riscv.vmul.mask
riscv_vmulh, // llvm.riscv.vmulh
riscv_vmulh_mask, // llvm.riscv.vmulh.mask
riscv_vmulhsu, // llvm.riscv.vmulhsu
riscv_vmulhsu_mask, // llvm.riscv.vmulhsu.mask
riscv_vmulhu, // llvm.riscv.vmulhu
riscv_vmulhu_mask, // llvm.riscv.vmulhu.mask
riscv_vmv_s_x, // llvm.riscv.vmv.s.x
riscv_vmv_v_v, // llvm.riscv.vmv.v.v
riscv_vmv_v_x, // llvm.riscv.vmv.v.x
riscv_vmv_x_s, // llvm.riscv.vmv.x.s
riscv_vmxnor, // llvm.riscv.vmxnor
riscv_vmxor, // llvm.riscv.vmxor
riscv_vnclip, // llvm.riscv.vnclip
riscv_vnclip_mask, // llvm.riscv.vnclip.mask
riscv_vnclipu, // llvm.riscv.vnclipu
riscv_vnclipu_mask, // llvm.riscv.vnclipu.mask
riscv_vnmsac, // llvm.riscv.vnmsac
riscv_vnmsac_mask, // llvm.riscv.vnmsac.mask
riscv_vnmsub, // llvm.riscv.vnmsub
riscv_vnmsub_mask, // llvm.riscv.vnmsub.mask
riscv_vnsra, // llvm.riscv.vnsra
riscv_vnsra_mask, // llvm.riscv.vnsra.mask
riscv_vnsrl, // llvm.riscv.vnsrl
riscv_vnsrl_mask, // llvm.riscv.vnsrl.mask
riscv_vor, // llvm.riscv.vor
riscv_vor_mask, // llvm.riscv.vor.mask
riscv_vredand, // llvm.riscv.vredand
riscv_vredand_mask, // llvm.riscv.vredand.mask
riscv_vredmax, // llvm.riscv.vredmax
riscv_vredmax_mask, // llvm.riscv.vredmax.mask
riscv_vredmaxu, // llvm.riscv.vredmaxu
riscv_vredmaxu_mask, // llvm.riscv.vredmaxu.mask
riscv_vredmin, // llvm.riscv.vredmin
riscv_vredmin_mask, // llvm.riscv.vredmin.mask
riscv_vredminu, // llvm.riscv.vredminu
riscv_vredminu_mask, // llvm.riscv.vredminu.mask
riscv_vredor, // llvm.riscv.vredor
riscv_vredor_mask, // llvm.riscv.vredor.mask
riscv_vredsum, // llvm.riscv.vredsum
riscv_vredsum_mask, // llvm.riscv.vredsum.mask
riscv_vredxor, // llvm.riscv.vredxor
riscv_vredxor_mask, // llvm.riscv.vredxor.mask
riscv_vrem, // llvm.riscv.vrem
riscv_vrem_mask, // llvm.riscv.vrem.mask
riscv_vremu, // llvm.riscv.vremu
riscv_vremu_mask, // llvm.riscv.vremu.mask
riscv_vrgather_vv, // llvm.riscv.vrgather.vv
riscv_vrgather_vv_mask, // llvm.riscv.vrgather.vv.mask
riscv_vrgather_vx, // llvm.riscv.vrgather.vx
riscv_vrgather_vx_mask, // llvm.riscv.vrgather.vx.mask
riscv_vrgatherei16_vv, // llvm.riscv.vrgatherei16.vv
riscv_vrgatherei16_vv_mask, // llvm.riscv.vrgatherei16.vv.mask
riscv_vrsub, // llvm.riscv.vrsub
riscv_vrsub_mask, // llvm.riscv.vrsub.mask
riscv_vsadd, // llvm.riscv.vsadd
riscv_vsadd_mask, // llvm.riscv.vsadd.mask
riscv_vsaddu, // llvm.riscv.vsaddu
riscv_vsaddu_mask, // llvm.riscv.vsaddu.mask
riscv_vsbc, // llvm.riscv.vsbc
riscv_vse, // llvm.riscv.vse
riscv_vse_mask, // llvm.riscv.vse.mask
riscv_vsetvli, // llvm.riscv.vsetvli
riscv_vsetvli_opt, // llvm.riscv.vsetvli.opt
riscv_vsetvlimax, // llvm.riscv.vsetvlimax
riscv_vsetvlimax_opt, // llvm.riscv.vsetvlimax.opt
riscv_vsext, // llvm.riscv.vsext
riscv_vsext_mask, // llvm.riscv.vsext.mask
riscv_vslide1down, // llvm.riscv.vslide1down
riscv_vslide1down_mask, // llvm.riscv.vslide1down.mask
riscv_vslide1up, // llvm.riscv.vslide1up
riscv_vslide1up_mask, // llvm.riscv.vslide1up.mask
riscv_vslidedown, // llvm.riscv.vslidedown
riscv_vslidedown_mask, // llvm.riscv.vslidedown.mask
riscv_vslideup, // llvm.riscv.vslideup
riscv_vslideup_mask, // llvm.riscv.vslideup.mask
riscv_vsll, // llvm.riscv.vsll
riscv_vsll_mask, // llvm.riscv.vsll.mask
riscv_vsm, // llvm.riscv.vsm
riscv_vsmul, // llvm.riscv.vsmul
riscv_vsmul_mask, // llvm.riscv.vsmul.mask
riscv_vsoxei, // llvm.riscv.vsoxei
riscv_vsoxei_mask, // llvm.riscv.vsoxei.mask
riscv_vsoxseg2, // llvm.riscv.vsoxseg2
riscv_vsoxseg2_mask, // llvm.riscv.vsoxseg2.mask
riscv_vsoxseg3, // llvm.riscv.vsoxseg3
riscv_vsoxseg3_mask, // llvm.riscv.vsoxseg3.mask
riscv_vsoxseg4, // llvm.riscv.vsoxseg4
riscv_vsoxseg4_mask, // llvm.riscv.vsoxseg4.mask
riscv_vsoxseg5, // llvm.riscv.vsoxseg5
riscv_vsoxseg5_mask, // llvm.riscv.vsoxseg5.mask
riscv_vsoxseg6, // llvm.riscv.vsoxseg6
riscv_vsoxseg6_mask, // llvm.riscv.vsoxseg6.mask
riscv_vsoxseg7, // llvm.riscv.vsoxseg7
riscv_vsoxseg7_mask, // llvm.riscv.vsoxseg7.mask
riscv_vsoxseg8, // llvm.riscv.vsoxseg8
riscv_vsoxseg8_mask, // llvm.riscv.vsoxseg8.mask
riscv_vsra, // llvm.riscv.vsra
riscv_vsra_mask, // llvm.riscv.vsra.mask
riscv_vsrl, // llvm.riscv.vsrl
riscv_vsrl_mask, // llvm.riscv.vsrl.mask
riscv_vsse, // llvm.riscv.vsse
riscv_vsse_mask, // llvm.riscv.vsse.mask
riscv_vsseg2, // llvm.riscv.vsseg2
riscv_vsseg2_mask, // llvm.riscv.vsseg2.mask
riscv_vsseg3, // llvm.riscv.vsseg3
riscv_vsseg3_mask, // llvm.riscv.vsseg3.mask
riscv_vsseg4, // llvm.riscv.vsseg4
riscv_vsseg4_mask, // llvm.riscv.vsseg4.mask
riscv_vsseg5, // llvm.riscv.vsseg5
riscv_vsseg5_mask, // llvm.riscv.vsseg5.mask
riscv_vsseg6, // llvm.riscv.vsseg6
riscv_vsseg6_mask, // llvm.riscv.vsseg6.mask
riscv_vsseg7, // llvm.riscv.vsseg7
riscv_vsseg7_mask, // llvm.riscv.vsseg7.mask
riscv_vsseg8, // llvm.riscv.vsseg8
riscv_vsseg8_mask, // llvm.riscv.vsseg8.mask
riscv_vssra, // llvm.riscv.vssra
riscv_vssra_mask, // llvm.riscv.vssra.mask
riscv_vssrl, // llvm.riscv.vssrl
riscv_vssrl_mask, // llvm.riscv.vssrl.mask
riscv_vssseg2, // llvm.riscv.vssseg2
riscv_vssseg2_mask, // llvm.riscv.vssseg2.mask
riscv_vssseg3, // llvm.riscv.vssseg3
riscv_vssseg3_mask, // llvm.riscv.vssseg3.mask
riscv_vssseg4, // llvm.riscv.vssseg4
riscv_vssseg4_mask, // llvm.riscv.vssseg4.mask
riscv_vssseg5, // llvm.riscv.vssseg5
riscv_vssseg5_mask, // llvm.riscv.vssseg5.mask
riscv_vssseg6, // llvm.riscv.vssseg6
riscv_vssseg6_mask, // llvm.riscv.vssseg6.mask
riscv_vssseg7, // llvm.riscv.vssseg7
riscv_vssseg7_mask, // llvm.riscv.vssseg7.mask
riscv_vssseg8, // llvm.riscv.vssseg8
riscv_vssseg8_mask, // llvm.riscv.vssseg8.mask
riscv_vssub, // llvm.riscv.vssub
riscv_vssub_mask, // llvm.riscv.vssub.mask
riscv_vssubu, // llvm.riscv.vssubu
riscv_vssubu_mask, // llvm.riscv.vssubu.mask
riscv_vsub, // llvm.riscv.vsub
riscv_vsub_mask, // llvm.riscv.vsub.mask
riscv_vsuxei, // llvm.riscv.vsuxei
riscv_vsuxei_mask, // llvm.riscv.vsuxei.mask
riscv_vsuxseg2, // llvm.riscv.vsuxseg2
riscv_vsuxseg2_mask, // llvm.riscv.vsuxseg2.mask
riscv_vsuxseg3, // llvm.riscv.vsuxseg3
riscv_vsuxseg3_mask, // llvm.riscv.vsuxseg3.mask
riscv_vsuxseg4, // llvm.riscv.vsuxseg4
riscv_vsuxseg4_mask, // llvm.riscv.vsuxseg4.mask
riscv_vsuxseg5, // llvm.riscv.vsuxseg5
riscv_vsuxseg5_mask, // llvm.riscv.vsuxseg5.mask
riscv_vsuxseg6, // llvm.riscv.vsuxseg6
riscv_vsuxseg6_mask, // llvm.riscv.vsuxseg6.mask
riscv_vsuxseg7, // llvm.riscv.vsuxseg7
riscv_vsuxseg7_mask, // llvm.riscv.vsuxseg7.mask
riscv_vsuxseg8, // llvm.riscv.vsuxseg8
riscv_vsuxseg8_mask, // llvm.riscv.vsuxseg8.mask
riscv_vwadd, // llvm.riscv.vwadd
riscv_vwadd_mask, // llvm.riscv.vwadd.mask
riscv_vwadd_w, // llvm.riscv.vwadd.w
riscv_vwadd_w_mask, // llvm.riscv.vwadd.w.mask
riscv_vwaddu, // llvm.riscv.vwaddu
riscv_vwaddu_mask, // llvm.riscv.vwaddu.mask
riscv_vwaddu_w, // llvm.riscv.vwaddu.w
riscv_vwaddu_w_mask, // llvm.riscv.vwaddu.w.mask
riscv_vwmacc, // llvm.riscv.vwmacc
riscv_vwmacc_mask, // llvm.riscv.vwmacc.mask
riscv_vwmaccsu, // llvm.riscv.vwmaccsu
riscv_vwmaccsu_mask, // llvm.riscv.vwmaccsu.mask
riscv_vwmaccu, // llvm.riscv.vwmaccu
riscv_vwmaccu_mask, // llvm.riscv.vwmaccu.mask
riscv_vwmaccus, // llvm.riscv.vwmaccus
riscv_vwmaccus_mask, // llvm.riscv.vwmaccus.mask
riscv_vwmul, // llvm.riscv.vwmul
riscv_vwmul_mask, // llvm.riscv.vwmul.mask
riscv_vwmulsu, // llvm.riscv.vwmulsu
riscv_vwmulsu_mask, // llvm.riscv.vwmulsu.mask
riscv_vwmulu, // llvm.riscv.vwmulu
riscv_vwmulu_mask, // llvm.riscv.vwmulu.mask
riscv_vwredsum, // llvm.riscv.vwredsum
riscv_vwredsum_mask, // llvm.riscv.vwredsum.mask
riscv_vwredsumu, // llvm.riscv.vwredsumu
riscv_vwredsumu_mask, // llvm.riscv.vwredsumu.mask
riscv_vwsub, // llvm.riscv.vwsub
riscv_vwsub_mask, // llvm.riscv.vwsub.mask
riscv_vwsub_w, // llvm.riscv.vwsub.w
riscv_vwsub_w_mask, // llvm.riscv.vwsub.w.mask
riscv_vwsubu, // llvm.riscv.vwsubu
riscv_vwsubu_mask, // llvm.riscv.vwsubu.mask
riscv_vwsubu_w, // llvm.riscv.vwsubu.w
riscv_vwsubu_w_mask, // llvm.riscv.vwsubu.w.mask
riscv_vxor, // llvm.riscv.vxor
riscv_vxor_mask, // llvm.riscv.vxor.mask
riscv_vzext, // llvm.riscv.vzext
riscv_vzext_mask, // llvm.riscv.vzext.mask
riscv_xperm_b, // llvm.riscv.xperm.b
riscv_xperm_h, // llvm.riscv.xperm.h
riscv_xperm_n, // llvm.riscv.xperm.n
riscv_xperm_w, // llvm.riscv.xperm.w
riscv_xperm4, // llvm.riscv.xperm4
riscv_xperm8, // llvm.riscv.xperm8
riscv_zip, // llvm.riscv.zip
}; // enum
} // namespace Intrinsic
} // namespace llvm
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,253 @@
/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|* *|
|* Intrinsic Function Source Fragment *|
|* *|
|* Automatically generated file, do not edit! *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_IR_INTRINSIC_S390_ENUMS_H
#define LLVM_IR_INTRINSIC_S390_ENUMS_H
namespace llvm {
namespace Intrinsic {
enum S390Intrinsics : unsigned {
// Enum values for intrinsics
s390_efpc = 7833, // llvm.s390.efpc
s390_etnd, // llvm.s390.etnd
s390_lcbb, // llvm.s390.lcbb
s390_ntstg, // llvm.s390.ntstg
s390_ppa_txassist, // llvm.s390.ppa.txassist
s390_sfpc, // llvm.s390.sfpc
s390_tabort, // llvm.s390.tabort
s390_tbegin, // llvm.s390.tbegin
s390_tbegin_nofloat, // llvm.s390.tbegin.nofloat
s390_tbeginc, // llvm.s390.tbeginc
s390_tdc, // llvm.s390.tdc
s390_tend, // llvm.s390.tend
s390_vaccb, // llvm.s390.vaccb
s390_vacccq, // llvm.s390.vacccq
s390_vaccf, // llvm.s390.vaccf
s390_vaccg, // llvm.s390.vaccg
s390_vacch, // llvm.s390.vacch
s390_vaccq, // llvm.s390.vaccq
s390_vacq, // llvm.s390.vacq
s390_vaq, // llvm.s390.vaq
s390_vavgb, // llvm.s390.vavgb
s390_vavgf, // llvm.s390.vavgf
s390_vavgg, // llvm.s390.vavgg
s390_vavgh, // llvm.s390.vavgh
s390_vavglb, // llvm.s390.vavglb
s390_vavglf, // llvm.s390.vavglf
s390_vavglg, // llvm.s390.vavglg
s390_vavglh, // llvm.s390.vavglh
s390_vbperm, // llvm.s390.vbperm
s390_vceqbs, // llvm.s390.vceqbs
s390_vceqfs, // llvm.s390.vceqfs
s390_vceqgs, // llvm.s390.vceqgs
s390_vceqhs, // llvm.s390.vceqhs
s390_vcfn, // llvm.s390.vcfn
s390_vchbs, // llvm.s390.vchbs
s390_vchfs, // llvm.s390.vchfs
s390_vchgs, // llvm.s390.vchgs
s390_vchhs, // llvm.s390.vchhs
s390_vchlbs, // llvm.s390.vchlbs
s390_vchlfs, // llvm.s390.vchlfs
s390_vchlgs, // llvm.s390.vchlgs
s390_vchlhs, // llvm.s390.vchlhs
s390_vcksm, // llvm.s390.vcksm
s390_vclfnhs, // llvm.s390.vclfnhs
s390_vclfnls, // llvm.s390.vclfnls
s390_vcnf, // llvm.s390.vcnf
s390_vcrnfs, // llvm.s390.vcrnfs
s390_verimb, // llvm.s390.verimb
s390_verimf, // llvm.s390.verimf
s390_verimg, // llvm.s390.verimg
s390_verimh, // llvm.s390.verimh
s390_verllb, // llvm.s390.verllb
s390_verllf, // llvm.s390.verllf
s390_verllg, // llvm.s390.verllg
s390_verllh, // llvm.s390.verllh
s390_verllvb, // llvm.s390.verllvb
s390_verllvf, // llvm.s390.verllvf
s390_verllvg, // llvm.s390.verllvg
s390_verllvh, // llvm.s390.verllvh
s390_vfaeb, // llvm.s390.vfaeb
s390_vfaebs, // llvm.s390.vfaebs
s390_vfaef, // llvm.s390.vfaef
s390_vfaefs, // llvm.s390.vfaefs
s390_vfaeh, // llvm.s390.vfaeh
s390_vfaehs, // llvm.s390.vfaehs
s390_vfaezb, // llvm.s390.vfaezb
s390_vfaezbs, // llvm.s390.vfaezbs
s390_vfaezf, // llvm.s390.vfaezf
s390_vfaezfs, // llvm.s390.vfaezfs
s390_vfaezh, // llvm.s390.vfaezh
s390_vfaezhs, // llvm.s390.vfaezhs
s390_vfcedbs, // llvm.s390.vfcedbs
s390_vfcesbs, // llvm.s390.vfcesbs
s390_vfchdbs, // llvm.s390.vfchdbs
s390_vfchedbs, // llvm.s390.vfchedbs
s390_vfchesbs, // llvm.s390.vfchesbs
s390_vfchsbs, // llvm.s390.vfchsbs
s390_vfeeb, // llvm.s390.vfeeb
s390_vfeebs, // llvm.s390.vfeebs
s390_vfeef, // llvm.s390.vfeef
s390_vfeefs, // llvm.s390.vfeefs
s390_vfeeh, // llvm.s390.vfeeh
s390_vfeehs, // llvm.s390.vfeehs
s390_vfeezb, // llvm.s390.vfeezb
s390_vfeezbs, // llvm.s390.vfeezbs
s390_vfeezf, // llvm.s390.vfeezf
s390_vfeezfs, // llvm.s390.vfeezfs
s390_vfeezh, // llvm.s390.vfeezh
s390_vfeezhs, // llvm.s390.vfeezhs
s390_vfeneb, // llvm.s390.vfeneb
s390_vfenebs, // llvm.s390.vfenebs
s390_vfenef, // llvm.s390.vfenef
s390_vfenefs, // llvm.s390.vfenefs
s390_vfeneh, // llvm.s390.vfeneh
s390_vfenehs, // llvm.s390.vfenehs
s390_vfenezb, // llvm.s390.vfenezb
s390_vfenezbs, // llvm.s390.vfenezbs
s390_vfenezf, // llvm.s390.vfenezf
s390_vfenezfs, // llvm.s390.vfenezfs
s390_vfenezh, // llvm.s390.vfenezh
s390_vfenezhs, // llvm.s390.vfenezhs
s390_vfidb, // llvm.s390.vfidb
s390_vfisb, // llvm.s390.vfisb
s390_vfmaxdb, // llvm.s390.vfmaxdb
s390_vfmaxsb, // llvm.s390.vfmaxsb
s390_vfmindb, // llvm.s390.vfmindb
s390_vfminsb, // llvm.s390.vfminsb
s390_vftcidb, // llvm.s390.vftcidb
s390_vftcisb, // llvm.s390.vftcisb
s390_vgfmab, // llvm.s390.vgfmab
s390_vgfmaf, // llvm.s390.vgfmaf
s390_vgfmag, // llvm.s390.vgfmag
s390_vgfmah, // llvm.s390.vgfmah
s390_vgfmb, // llvm.s390.vgfmb
s390_vgfmf, // llvm.s390.vgfmf
s390_vgfmg, // llvm.s390.vgfmg
s390_vgfmh, // llvm.s390.vgfmh
s390_vistrb, // llvm.s390.vistrb
s390_vistrbs, // llvm.s390.vistrbs
s390_vistrf, // llvm.s390.vistrf
s390_vistrfs, // llvm.s390.vistrfs
s390_vistrh, // llvm.s390.vistrh
s390_vistrhs, // llvm.s390.vistrhs
s390_vlbb, // llvm.s390.vlbb
s390_vll, // llvm.s390.vll
s390_vlrl, // llvm.s390.vlrl
s390_vmaeb, // llvm.s390.vmaeb
s390_vmaef, // llvm.s390.vmaef
s390_vmaeh, // llvm.s390.vmaeh
s390_vmahb, // llvm.s390.vmahb
s390_vmahf, // llvm.s390.vmahf
s390_vmahh, // llvm.s390.vmahh
s390_vmaleb, // llvm.s390.vmaleb
s390_vmalef, // llvm.s390.vmalef
s390_vmaleh, // llvm.s390.vmaleh
s390_vmalhb, // llvm.s390.vmalhb
s390_vmalhf, // llvm.s390.vmalhf
s390_vmalhh, // llvm.s390.vmalhh
s390_vmalob, // llvm.s390.vmalob
s390_vmalof, // llvm.s390.vmalof
s390_vmaloh, // llvm.s390.vmaloh
s390_vmaob, // llvm.s390.vmaob
s390_vmaof, // llvm.s390.vmaof
s390_vmaoh, // llvm.s390.vmaoh
s390_vmeb, // llvm.s390.vmeb
s390_vmef, // llvm.s390.vmef
s390_vmeh, // llvm.s390.vmeh
s390_vmhb, // llvm.s390.vmhb
s390_vmhf, // llvm.s390.vmhf
s390_vmhh, // llvm.s390.vmhh
s390_vmleb, // llvm.s390.vmleb
s390_vmlef, // llvm.s390.vmlef
s390_vmleh, // llvm.s390.vmleh
s390_vmlhb, // llvm.s390.vmlhb
s390_vmlhf, // llvm.s390.vmlhf
s390_vmlhh, // llvm.s390.vmlhh
s390_vmlob, // llvm.s390.vmlob
s390_vmlof, // llvm.s390.vmlof
s390_vmloh, // llvm.s390.vmloh
s390_vmob, // llvm.s390.vmob
s390_vmof, // llvm.s390.vmof
s390_vmoh, // llvm.s390.vmoh
s390_vmslg, // llvm.s390.vmslg
s390_vpdi, // llvm.s390.vpdi
s390_vperm, // llvm.s390.vperm
s390_vpklsf, // llvm.s390.vpklsf
s390_vpklsfs, // llvm.s390.vpklsfs
s390_vpklsg, // llvm.s390.vpklsg
s390_vpklsgs, // llvm.s390.vpklsgs
s390_vpklsh, // llvm.s390.vpklsh
s390_vpklshs, // llvm.s390.vpklshs
s390_vpksf, // llvm.s390.vpksf
s390_vpksfs, // llvm.s390.vpksfs
s390_vpksg, // llvm.s390.vpksg
s390_vpksgs, // llvm.s390.vpksgs
s390_vpksh, // llvm.s390.vpksh
s390_vpkshs, // llvm.s390.vpkshs
s390_vsbcbiq, // llvm.s390.vsbcbiq
s390_vsbiq, // llvm.s390.vsbiq
s390_vscbib, // llvm.s390.vscbib
s390_vscbif, // llvm.s390.vscbif
s390_vscbig, // llvm.s390.vscbig
s390_vscbih, // llvm.s390.vscbih
s390_vscbiq, // llvm.s390.vscbiq
s390_vsl, // llvm.s390.vsl
s390_vslb, // llvm.s390.vslb
s390_vsld, // llvm.s390.vsld
s390_vsldb, // llvm.s390.vsldb
s390_vsq, // llvm.s390.vsq
s390_vsra, // llvm.s390.vsra
s390_vsrab, // llvm.s390.vsrab
s390_vsrd, // llvm.s390.vsrd
s390_vsrl, // llvm.s390.vsrl
s390_vsrlb, // llvm.s390.vsrlb
s390_vstl, // llvm.s390.vstl
s390_vstrcb, // llvm.s390.vstrcb
s390_vstrcbs, // llvm.s390.vstrcbs
s390_vstrcf, // llvm.s390.vstrcf
s390_vstrcfs, // llvm.s390.vstrcfs
s390_vstrch, // llvm.s390.vstrch
s390_vstrchs, // llvm.s390.vstrchs
s390_vstrczb, // llvm.s390.vstrczb
s390_vstrczbs, // llvm.s390.vstrczbs
s390_vstrczf, // llvm.s390.vstrczf
s390_vstrczfs, // llvm.s390.vstrczfs
s390_vstrczh, // llvm.s390.vstrczh
s390_vstrczhs, // llvm.s390.vstrczhs
s390_vstrl, // llvm.s390.vstrl
s390_vstrsb, // llvm.s390.vstrsb
s390_vstrsf, // llvm.s390.vstrsf
s390_vstrsh, // llvm.s390.vstrsh
s390_vstrszb, // llvm.s390.vstrszb
s390_vstrszf, // llvm.s390.vstrszf
s390_vstrszh, // llvm.s390.vstrszh
s390_vsumb, // llvm.s390.vsumb
s390_vsumgf, // llvm.s390.vsumgf
s390_vsumgh, // llvm.s390.vsumgh
s390_vsumh, // llvm.s390.vsumh
s390_vsumqf, // llvm.s390.vsumqf
s390_vsumqg, // llvm.s390.vsumqg
s390_vtm, // llvm.s390.vtm
s390_vuphb, // llvm.s390.vuphb
s390_vuphf, // llvm.s390.vuphf
s390_vuphh, // llvm.s390.vuphh
s390_vuplb, // llvm.s390.vuplb
s390_vuplf, // llvm.s390.vuplf
s390_vuplhb, // llvm.s390.vuplhb
s390_vuplhf, // llvm.s390.vuplhf
s390_vuplhh, // llvm.s390.vuplhh
s390_vuplhw, // llvm.s390.vuplhw
s390_vupllb, // llvm.s390.vupllb
s390_vupllf, // llvm.s390.vupllf
s390_vupllh, // llvm.s390.vupllh
}; // enum
} // namespace Intrinsic
} // namespace llvm
#endif

View File

@@ -0,0 +1,477 @@
//===- IntrinsicsSystemZ.td - Defines SystemZ intrinsics ---*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the SystemZ-specific intrinsics.
//
//===----------------------------------------------------------------------===//
class SystemZUnaryConv<string name, LLVMType result, LLVMType arg>
: GCCBuiltin<"__builtin_s390_" # name>,
Intrinsic<[result], [arg], [IntrNoMem]>;
class SystemZUnary<string name, LLVMType type>
: SystemZUnaryConv<name, type, type>;
class SystemZUnaryConvCC<LLVMType result, LLVMType arg>
: Intrinsic<[result, llvm_i32_ty], [arg], [IntrNoMem]>;
class SystemZUnaryCC<LLVMType type>
: SystemZUnaryConvCC<type, type>;
class SystemZBinaryConv<string name, LLVMType result, LLVMType arg>
: GCCBuiltin<"__builtin_s390_" # name>,
Intrinsic<[result], [arg, arg], [IntrNoMem]>;
class SystemZBinary<string name, LLVMType type>
: SystemZBinaryConv<name, type, type>;
class SystemZBinaryInt<string name, LLVMType type>
: GCCBuiltin<"__builtin_s390_" # name>,
Intrinsic<[type], [type, llvm_i32_ty], [IntrNoMem]>;
class SystemZBinaryConvCC<LLVMType result, LLVMType arg>
: Intrinsic<[result, llvm_i32_ty], [arg, arg], [IntrNoMem]>;
class SystemZBinaryConvIntCC<LLVMType result, LLVMType arg>
: Intrinsic<[result, llvm_i32_ty], [arg, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
class SystemZBinaryCC<LLVMType type>
: SystemZBinaryConvCC<type, type>;
class SystemZTernaryConv<string name, LLVMType result, LLVMType arg>
: GCCBuiltin<"__builtin_s390_" # name>,
Intrinsic<[result], [arg, arg, result], [IntrNoMem]>;
class SystemZTernaryConvCC<LLVMType result, LLVMType arg>
: Intrinsic<[result, llvm_i32_ty], [arg, arg, result], [IntrNoMem]>;
class SystemZTernary<string name, LLVMType type>
: SystemZTernaryConv<name, type, type>;
class SystemZTernaryInt<string name, LLVMType type>
: GCCBuiltin<"__builtin_s390_" # name>,
Intrinsic<[type], [type, type, llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<2>>]>;
class SystemZTernaryIntCC<LLVMType type>
: Intrinsic<[type, llvm_i32_ty], [type, type, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
class SystemZQuaternaryInt<string name, LLVMType type>
: GCCBuiltin<"__builtin_s390_" # name>,
Intrinsic<[type], [type, type, type, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<3>>]>;
class SystemZQuaternaryIntCC<LLVMType type>
: Intrinsic<[type, llvm_i32_ty], [type, type, type, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<3>>]>;
multiclass SystemZUnaryExtBHF<string name> {
def b : SystemZUnaryConv<name#"b", llvm_v8i16_ty, llvm_v16i8_ty>;
def h : SystemZUnaryConv<name#"h", llvm_v4i32_ty, llvm_v8i16_ty>;
def f : SystemZUnaryConv<name#"f", llvm_v2i64_ty, llvm_v4i32_ty>;
}
multiclass SystemZUnaryExtBHWF<string name> {
def b : SystemZUnaryConv<name#"b", llvm_v8i16_ty, llvm_v16i8_ty>;
def hw : SystemZUnaryConv<name#"hw", llvm_v4i32_ty, llvm_v8i16_ty>;
def f : SystemZUnaryConv<name#"f", llvm_v2i64_ty, llvm_v4i32_ty>;
}
multiclass SystemZUnaryBHF<string name> {
def b : SystemZUnary<name#"b", llvm_v16i8_ty>;
def h : SystemZUnary<name#"h", llvm_v8i16_ty>;
def f : SystemZUnary<name#"f", llvm_v4i32_ty>;
}
multiclass SystemZUnaryBHFG<string name> : SystemZUnaryBHF<name> {
def g : SystemZUnary<name#"g", llvm_v2i64_ty>;
}
multiclass SystemZUnaryCCBHF {
def bs : SystemZUnaryCC<llvm_v16i8_ty>;
def hs : SystemZUnaryCC<llvm_v8i16_ty>;
def fs : SystemZUnaryCC<llvm_v4i32_ty>;
}
multiclass SystemZBinaryTruncHFG<string name> {
def h : SystemZBinaryConv<name#"h", llvm_v16i8_ty, llvm_v8i16_ty>;
def f : SystemZBinaryConv<name#"f", llvm_v8i16_ty, llvm_v4i32_ty>;
def g : SystemZBinaryConv<name#"g", llvm_v4i32_ty, llvm_v2i64_ty>;
}
multiclass SystemZBinaryTruncCCHFG {
def hs : SystemZBinaryConvCC<llvm_v16i8_ty, llvm_v8i16_ty>;
def fs : SystemZBinaryConvCC<llvm_v8i16_ty, llvm_v4i32_ty>;
def gs : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v2i64_ty>;
}
multiclass SystemZBinaryExtBHF<string name> {
def b : SystemZBinaryConv<name#"b", llvm_v8i16_ty, llvm_v16i8_ty>;
def h : SystemZBinaryConv<name#"h", llvm_v4i32_ty, llvm_v8i16_ty>;
def f : SystemZBinaryConv<name#"f", llvm_v2i64_ty, llvm_v4i32_ty>;
}
multiclass SystemZBinaryExtBHFG<string name> : SystemZBinaryExtBHF<name> {
def g : SystemZBinaryConv<name#"g", llvm_v16i8_ty, llvm_v2i64_ty>;
}
multiclass SystemZBinaryBHF<string name> {
def b : SystemZBinary<name#"b", llvm_v16i8_ty>;
def h : SystemZBinary<name#"h", llvm_v8i16_ty>;
def f : SystemZBinary<name#"f", llvm_v4i32_ty>;
}
multiclass SystemZBinaryBHFG<string name> : SystemZBinaryBHF<name> {
def g : SystemZBinary<name#"g", llvm_v2i64_ty>;
}
multiclass SystemZBinaryIntBHFG<string name> {
def b : SystemZBinaryInt<name#"b", llvm_v16i8_ty>;
def h : SystemZBinaryInt<name#"h", llvm_v8i16_ty>;
def f : SystemZBinaryInt<name#"f", llvm_v4i32_ty>;
def g : SystemZBinaryInt<name#"g", llvm_v2i64_ty>;
}
multiclass SystemZBinaryCCBHF {
def bs : SystemZBinaryCC<llvm_v16i8_ty>;
def hs : SystemZBinaryCC<llvm_v8i16_ty>;
def fs : SystemZBinaryCC<llvm_v4i32_ty>;
}
multiclass SystemZCompareBHFG {
def bs : SystemZBinaryCC<llvm_v16i8_ty>;
def hs : SystemZBinaryCC<llvm_v8i16_ty>;
def fs : SystemZBinaryCC<llvm_v4i32_ty>;
def gs : SystemZBinaryCC<llvm_v2i64_ty>;
}
multiclass SystemZTernaryExtBHF<string name> {
def b : SystemZTernaryConv<name#"b", llvm_v8i16_ty, llvm_v16i8_ty>;
def h : SystemZTernaryConv<name#"h", llvm_v4i32_ty, llvm_v8i16_ty>;
def f : SystemZTernaryConv<name#"f", llvm_v2i64_ty, llvm_v4i32_ty>;
}
multiclass SystemZTernaryExtBHFG<string name> : SystemZTernaryExtBHF<name> {
def g : SystemZTernaryConv<name#"g", llvm_v16i8_ty, llvm_v2i64_ty>;
}
multiclass SystemZTernaryBHF<string name> {
def b : SystemZTernary<name#"b", llvm_v16i8_ty>;
def h : SystemZTernary<name#"h", llvm_v8i16_ty>;
def f : SystemZTernary<name#"f", llvm_v4i32_ty>;
}
multiclass SystemZTernaryIntBHF<string name> {
def b : SystemZTernaryInt<name#"b", llvm_v16i8_ty>;
def h : SystemZTernaryInt<name#"h", llvm_v8i16_ty>;
def f : SystemZTernaryInt<name#"f", llvm_v4i32_ty>;
}
multiclass SystemZTernaryIntCCBHF {
def bs : SystemZTernaryIntCC<llvm_v16i8_ty>;
def hs : SystemZTernaryIntCC<llvm_v8i16_ty>;
def fs : SystemZTernaryIntCC<llvm_v4i32_ty>;
}
multiclass SystemZQuaternaryIntBHF<string name> {
def b : SystemZQuaternaryInt<name#"b", llvm_v16i8_ty>;
def h : SystemZQuaternaryInt<name#"h", llvm_v8i16_ty>;
def f : SystemZQuaternaryInt<name#"f", llvm_v4i32_ty>;
}
multiclass SystemZQuaternaryIntBHFG<string name> :
SystemZQuaternaryIntBHF<name> {
def g : SystemZQuaternaryInt<name#"g", llvm_v2i64_ty>;
}
multiclass SystemZQuaternaryIntCCBHF {
def bs : SystemZQuaternaryIntCC<llvm_v16i8_ty>;
def hs : SystemZQuaternaryIntCC<llvm_v8i16_ty>;
def fs : SystemZQuaternaryIntCC<llvm_v4i32_ty>;
}
//===----------------------------------------------------------------------===//
//
// Transactional-execution intrinsics
//
//===----------------------------------------------------------------------===//
let TargetPrefix = "s390" in {
def int_s390_tbegin : Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
[IntrNoDuplicate, IntrWriteMem]>;
def int_s390_tbegin_nofloat : Intrinsic<[llvm_i32_ty],
[llvm_ptr_ty, llvm_i32_ty],
[IntrNoDuplicate, IntrWriteMem]>;
def int_s390_tbeginc : Intrinsic<[], [llvm_ptr_ty, llvm_i32_ty],
[IntrNoDuplicate, IntrWriteMem]>;
def int_s390_tabort : Intrinsic<[], [llvm_i64_ty],
[IntrNoReturn, Throws, IntrWriteMem]>;
def int_s390_tend : GCCBuiltin<"__builtin_tend">,
Intrinsic<[llvm_i32_ty], []>;
def int_s390_etnd : GCCBuiltin<"__builtin_tx_nesting_depth">,
Intrinsic<[llvm_i32_ty], [], [IntrNoMem]>;
def int_s390_ntstg : Intrinsic<[], [llvm_i64_ty, llvm_ptr64_ty],
[IntrArgMemOnly, IntrWriteMem]>;
def int_s390_ppa_txassist : GCCBuiltin<"__builtin_tx_assist">,
Intrinsic<[], [llvm_i32_ty]>;
}
//===----------------------------------------------------------------------===//
//
// Vector intrinsics
//
//===----------------------------------------------------------------------===//
let TargetPrefix = "s390" in {
def int_s390_lcbb : GCCBuiltin<"__builtin_s390_lcbb">,
Intrinsic<[llvm_i32_ty], [llvm_ptr_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_s390_vlbb : GCCBuiltin<"__builtin_s390_vlbb">,
Intrinsic<[llvm_v16i8_ty], [llvm_ptr_ty, llvm_i32_ty],
[IntrReadMem, IntrArgMemOnly, ImmArg<ArgIndex<1>>]>;
def int_s390_vll : GCCBuiltin<"__builtin_s390_vll">,
Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty, llvm_ptr_ty],
[IntrReadMem, IntrArgMemOnly]>;
def int_s390_vpdi : GCCBuiltin<"__builtin_s390_vpdi">,
Intrinsic<[llvm_v2i64_ty],
[llvm_v2i64_ty, llvm_v2i64_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_s390_vperm : GCCBuiltin<"__builtin_s390_vperm">,
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_v16i8_ty],
[IntrNoMem]>;
defm int_s390_vpks : SystemZBinaryTruncHFG<"vpks">;
defm int_s390_vpks : SystemZBinaryTruncCCHFG;
defm int_s390_vpkls : SystemZBinaryTruncHFG<"vpkls">;
defm int_s390_vpkls : SystemZBinaryTruncCCHFG;
def int_s390_vstl : GCCBuiltin<"__builtin_s390_vstl">,
Intrinsic<[], [llvm_v16i8_ty, llvm_i32_ty, llvm_ptr_ty],
[IntrArgMemOnly, IntrWriteMem]>;
defm int_s390_vupl : SystemZUnaryExtBHWF<"vupl">;
defm int_s390_vupll : SystemZUnaryExtBHF<"vupll">;
defm int_s390_vuph : SystemZUnaryExtBHF<"vuph">;
defm int_s390_vuplh : SystemZUnaryExtBHF<"vuplh">;
defm int_s390_vacc : SystemZBinaryBHFG<"vacc">;
def int_s390_vaq : SystemZBinary<"vaq", llvm_v16i8_ty>;
def int_s390_vacq : SystemZTernary<"vacq", llvm_v16i8_ty>;
def int_s390_vaccq : SystemZBinary<"vaccq", llvm_v16i8_ty>;
def int_s390_vacccq : SystemZTernary<"vacccq", llvm_v16i8_ty>;
defm int_s390_vavg : SystemZBinaryBHFG<"vavg">;
defm int_s390_vavgl : SystemZBinaryBHFG<"vavgl">;
def int_s390_vcksm : SystemZBinary<"vcksm", llvm_v4i32_ty>;
defm int_s390_vgfm : SystemZBinaryExtBHFG<"vgfm">;
defm int_s390_vgfma : SystemZTernaryExtBHFG<"vgfma">;
defm int_s390_vmah : SystemZTernaryBHF<"vmah">;
defm int_s390_vmalh : SystemZTernaryBHF<"vmalh">;
defm int_s390_vmae : SystemZTernaryExtBHF<"vmae">;
defm int_s390_vmale : SystemZTernaryExtBHF<"vmale">;
defm int_s390_vmao : SystemZTernaryExtBHF<"vmao">;
defm int_s390_vmalo : SystemZTernaryExtBHF<"vmalo">;
defm int_s390_vmh : SystemZBinaryBHF<"vmh">;
defm int_s390_vmlh : SystemZBinaryBHF<"vmlh">;
defm int_s390_vme : SystemZBinaryExtBHF<"vme">;
defm int_s390_vmle : SystemZBinaryExtBHF<"vmle">;
defm int_s390_vmo : SystemZBinaryExtBHF<"vmo">;
defm int_s390_vmlo : SystemZBinaryExtBHF<"vmlo">;
defm int_s390_verllv : SystemZBinaryBHFG<"verllv">;
defm int_s390_verll : SystemZBinaryIntBHFG<"verll">;
defm int_s390_verim : SystemZQuaternaryIntBHFG<"verim">;
def int_s390_vsl : SystemZBinary<"vsl", llvm_v16i8_ty>;
def int_s390_vslb : SystemZBinary<"vslb", llvm_v16i8_ty>;
def int_s390_vsra : SystemZBinary<"vsra", llvm_v16i8_ty>;
def int_s390_vsrab : SystemZBinary<"vsrab", llvm_v16i8_ty>;
def int_s390_vsrl : SystemZBinary<"vsrl", llvm_v16i8_ty>;
def int_s390_vsrlb : SystemZBinary<"vsrlb", llvm_v16i8_ty>;
def int_s390_vsldb : GCCBuiltin<"__builtin_s390_vsldb">,
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
defm int_s390_vscbi : SystemZBinaryBHFG<"vscbi">;
def int_s390_vsq : SystemZBinary<"vsq", llvm_v16i8_ty>;
def int_s390_vsbiq : SystemZTernary<"vsbiq", llvm_v16i8_ty>;
def int_s390_vscbiq : SystemZBinary<"vscbiq", llvm_v16i8_ty>;
def int_s390_vsbcbiq : SystemZTernary<"vsbcbiq", llvm_v16i8_ty>;
def int_s390_vsumb : SystemZBinaryConv<"vsumb", llvm_v4i32_ty, llvm_v16i8_ty>;
def int_s390_vsumh : SystemZBinaryConv<"vsumh", llvm_v4i32_ty, llvm_v8i16_ty>;
def int_s390_vsumgh : SystemZBinaryConv<"vsumgh", llvm_v2i64_ty,
llvm_v8i16_ty>;
def int_s390_vsumgf : SystemZBinaryConv<"vsumgf", llvm_v2i64_ty,
llvm_v4i32_ty>;
def int_s390_vsumqf : SystemZBinaryConv<"vsumqf", llvm_v16i8_ty,
llvm_v4i32_ty>;
def int_s390_vsumqg : SystemZBinaryConv<"vsumqg", llvm_v16i8_ty,
llvm_v2i64_ty>;
def int_s390_vtm : SystemZBinaryConv<"vtm", llvm_i32_ty, llvm_v16i8_ty>;
defm int_s390_vceq : SystemZCompareBHFG;
defm int_s390_vch : SystemZCompareBHFG;
defm int_s390_vchl : SystemZCompareBHFG;
defm int_s390_vfae : SystemZTernaryIntBHF<"vfae">;
defm int_s390_vfae : SystemZTernaryIntCCBHF;
defm int_s390_vfaez : SystemZTernaryIntBHF<"vfaez">;
defm int_s390_vfaez : SystemZTernaryIntCCBHF;
defm int_s390_vfee : SystemZBinaryBHF<"vfee">;
defm int_s390_vfee : SystemZBinaryCCBHF;
defm int_s390_vfeez : SystemZBinaryBHF<"vfeez">;
defm int_s390_vfeez : SystemZBinaryCCBHF;
defm int_s390_vfene : SystemZBinaryBHF<"vfene">;
defm int_s390_vfene : SystemZBinaryCCBHF;
defm int_s390_vfenez : SystemZBinaryBHF<"vfenez">;
defm int_s390_vfenez : SystemZBinaryCCBHF;
defm int_s390_vistr : SystemZUnaryBHF<"vistr">;
defm int_s390_vistr : SystemZUnaryCCBHF;
defm int_s390_vstrc : SystemZQuaternaryIntBHF<"vstrc">;
defm int_s390_vstrc : SystemZQuaternaryIntCCBHF;
defm int_s390_vstrcz : SystemZQuaternaryIntBHF<"vstrcz">;
defm int_s390_vstrcz : SystemZQuaternaryIntCCBHF;
def int_s390_vfcedbs : SystemZBinaryConvCC<llvm_v2i64_ty, llvm_v2f64_ty>;
def int_s390_vfchdbs : SystemZBinaryConvCC<llvm_v2i64_ty, llvm_v2f64_ty>;
def int_s390_vfchedbs : SystemZBinaryConvCC<llvm_v2i64_ty, llvm_v2f64_ty>;
def int_s390_vftcidb : SystemZBinaryConvIntCC<llvm_v2i64_ty, llvm_v2f64_ty>;
def int_s390_vfidb : Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
// Instructions from the Vector Enhancements Facility 1
def int_s390_vbperm : SystemZBinaryConv<"vbperm", llvm_v2i64_ty,
llvm_v16i8_ty>;
def int_s390_vmslg : GCCBuiltin<"__builtin_s390_vmslg">,
Intrinsic<[llvm_v16i8_ty],
[llvm_v2i64_ty, llvm_v2i64_ty, llvm_v16i8_ty,
llvm_i32_ty], [IntrNoMem, ImmArg<ArgIndex<3>>]>;
def int_s390_vfmaxdb : Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_s390_vfmindb : Intrinsic<[llvm_v2f64_ty],
[llvm_v2f64_ty, llvm_v2f64_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_s390_vfmaxsb : Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_s390_vfminsb : Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_s390_vfcesbs : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v4f32_ty>;
def int_s390_vfchsbs : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v4f32_ty>;
def int_s390_vfchesbs : SystemZBinaryConvCC<llvm_v4i32_ty, llvm_v4f32_ty>;
def int_s390_vftcisb : SystemZBinaryConvIntCC<llvm_v4i32_ty, llvm_v4f32_ty>;
def int_s390_vfisb : Intrinsic<[llvm_v4f32_ty],
[llvm_v4f32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<1>>, ImmArg<ArgIndex<2>>]>;
// Instructions from the Vector Packed Decimal Facility
def int_s390_vlrl : GCCBuiltin<"__builtin_s390_vlrl">,
Intrinsic<[llvm_v16i8_ty], [llvm_i32_ty, llvm_ptr_ty],
[IntrReadMem, IntrArgMemOnly]>;
def int_s390_vstrl : GCCBuiltin<"__builtin_s390_vstrl">,
Intrinsic<[], [llvm_v16i8_ty, llvm_i32_ty, llvm_ptr_ty],
[IntrArgMemOnly, IntrWriteMem]>;
// Instructions from the Vector Enhancements Facility 2
def int_s390_vsld : GCCBuiltin<"__builtin_s390_vsld">,
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_s390_vsrd : GCCBuiltin<"__builtin_s390_vsrd">,
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_s390_vstrsb : SystemZTernaryConvCC<llvm_v16i8_ty, llvm_v16i8_ty>;
def int_s390_vstrsh : SystemZTernaryConvCC<llvm_v16i8_ty, llvm_v8i16_ty>;
def int_s390_vstrsf : SystemZTernaryConvCC<llvm_v16i8_ty, llvm_v4i32_ty>;
def int_s390_vstrszb : SystemZTernaryConvCC<llvm_v16i8_ty, llvm_v16i8_ty>;
def int_s390_vstrszh : SystemZTernaryConvCC<llvm_v16i8_ty, llvm_v8i16_ty>;
def int_s390_vstrszf : SystemZTernaryConvCC<llvm_v16i8_ty, llvm_v4i32_ty>;
// Instructions from the NNP-assist Facility
def int_s390_vclfnhs : GCCBuiltin<"__builtin_s390_vclfnhs">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v8i16_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_s390_vclfnls : GCCBuiltin<"__builtin_s390_vclfnls">,
Intrinsic<[llvm_v4f32_ty],
[llvm_v8i16_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_s390_vcrnfs : GCCBuiltin<"__builtin_s390_vcrnfs">,
Intrinsic<[llvm_v8i16_ty],
[llvm_v4f32_ty, llvm_v4f32_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<2>>]>;
def int_s390_vcfn : GCCBuiltin<"__builtin_s390_vcfn">,
Intrinsic<[llvm_v8i16_ty],
[llvm_v8i16_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
def int_s390_vcnf : GCCBuiltin<"__builtin_s390_vcnf">,
Intrinsic<[llvm_v8i16_ty],
[llvm_v8i16_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
}
//===----------------------------------------------------------------------===//
//
// Misc intrinsics
//
//===----------------------------------------------------------------------===//
let TargetPrefix = "s390" in {
def int_s390_sfpc : GCCBuiltin<"__builtin_s390_sfpc">,
Intrinsic<[], [llvm_i32_ty], []>;
def int_s390_efpc : GCCBuiltin<"__builtin_s390_efpc">,
Intrinsic<[llvm_i32_ty], [], []>;
def int_s390_tdc : Intrinsic<[llvm_i32_ty], [llvm_anyfloat_ty, llvm_i64_ty],
[IntrNoMem]>;
}

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,35 @@
// Define intrinsics written by hand
// VEL Intrinsic instructions.
let TargetPrefix = "ve" in {
def int_ve_vl_svob : GCCBuiltin<"__builtin_ve_vl_svob">,
Intrinsic<[], [], [IntrHasSideEffects]>;
def int_ve_vl_pack_f32p : GCCBuiltin<"__builtin_ve_vl_pack_f32p">,
Intrinsic<[llvm_i64_ty], [llvm_ptr_ty, llvm_ptr_ty],
[IntrReadMem]>;
def int_ve_vl_pack_f32a : GCCBuiltin<"__builtin_ve_vl_pack_f32a">,
Intrinsic<[llvm_i64_ty], [llvm_ptr_ty],
[IntrReadMem]>;
def int_ve_vl_extract_vm512u :
GCCBuiltin<"__builtin_ve_vl_extract_vm512u">,
Intrinsic<[LLVMType<v256i1>], [LLVMType<v512i1>], [IntrNoMem]>;
def int_ve_vl_extract_vm512l :
GCCBuiltin<"__builtin_ve_vl_extract_vm512l">,
Intrinsic<[LLVMType<v256i1>], [LLVMType<v512i1>], [IntrNoMem]>;
def int_ve_vl_insert_vm512u :
GCCBuiltin<"__builtin_ve_vl_insert_vm512u">,
Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>, LLVMType<v256i1>],
[IntrNoMem]>;
def int_ve_vl_insert_vm512l :
GCCBuiltin<"__builtin_ve_vl_insert_vm512l">,
Intrinsic<[LLVMType<v512i1>], [LLVMType<v512i1>, LLVMType<v256i1>],
[IntrNoMem]>;
}
// Define intrinsics automatically generated
include "llvm/IR/IntrinsicsVEVL.gen.td"

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,74 @@
/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|* *|
|* Intrinsic Function Source Fragment *|
|* *|
|* Automatically generated file, do not edit! *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_IR_INTRINSIC_WASM_ENUMS_H
#define LLVM_IR_INTRINSIC_WASM_ENUMS_H
namespace llvm {
namespace Intrinsic {
enum WASMIntrinsics : unsigned {
// Enum values for intrinsics
wasm_alltrue = 9286, // llvm.wasm.alltrue
wasm_anytrue, // llvm.wasm.anytrue
wasm_avgr_unsigned, // llvm.wasm.avgr.unsigned
wasm_bitmask, // llvm.wasm.bitmask
wasm_bitselect, // llvm.wasm.bitselect
wasm_catch, // llvm.wasm.catch
wasm_dot, // llvm.wasm.dot
wasm_extadd_pairwise_signed, // llvm.wasm.extadd.pairwise.signed
wasm_extadd_pairwise_unsigned, // llvm.wasm.extadd.pairwise.unsigned
wasm_fma, // llvm.wasm.fma
wasm_fms, // llvm.wasm.fms
wasm_get_ehselector, // llvm.wasm.get.ehselector
wasm_get_exception, // llvm.wasm.get.exception
wasm_landingpad_index, // llvm.wasm.landingpad.index
wasm_laneselect, // llvm.wasm.laneselect
wasm_lsda, // llvm.wasm.lsda
wasm_memory_atomic_notify, // llvm.wasm.memory.atomic.notify
wasm_memory_atomic_wait32, // llvm.wasm.memory.atomic.wait32
wasm_memory_atomic_wait64, // llvm.wasm.memory.atomic.wait64
wasm_memory_grow, // llvm.wasm.memory.grow
wasm_memory_size, // llvm.wasm.memory.size
wasm_narrow_signed, // llvm.wasm.narrow.signed
wasm_narrow_unsigned, // llvm.wasm.narrow.unsigned
wasm_pmax, // llvm.wasm.pmax
wasm_pmin, // llvm.wasm.pmin
wasm_q15mulr_sat_signed, // llvm.wasm.q15mulr.sat.signed
wasm_ref_null_extern, // llvm.wasm.ref.null.extern
wasm_ref_null_func, // llvm.wasm.ref.null.func
wasm_relaxed_max, // llvm.wasm.relaxed.max
wasm_relaxed_min, // llvm.wasm.relaxed.min
wasm_relaxed_swizzle, // llvm.wasm.relaxed.swizzle
wasm_relaxed_trunc_signed, // llvm.wasm.relaxed.trunc.signed
wasm_relaxed_trunc_unsigned, // llvm.wasm.relaxed.trunc.unsigned
wasm_relaxed_trunc_zero_signed, // llvm.wasm.relaxed.trunc.zero.signed
wasm_relaxed_trunc_zero_unsigned, // llvm.wasm.relaxed.trunc.zero.unsigned
wasm_rethrow, // llvm.wasm.rethrow
wasm_shuffle, // llvm.wasm.shuffle
wasm_sub_sat_signed, // llvm.wasm.sub.sat.signed
wasm_sub_sat_unsigned, // llvm.wasm.sub.sat.unsigned
wasm_swizzle, // llvm.wasm.swizzle
wasm_table_copy, // llvm.wasm.table.copy
wasm_table_fill_externref, // llvm.wasm.table.fill.externref
wasm_table_fill_funcref, // llvm.wasm.table.fill.funcref
wasm_table_grow_externref, // llvm.wasm.table.grow.externref
wasm_table_grow_funcref, // llvm.wasm.table.grow.funcref
wasm_table_size, // llvm.wasm.table.size
wasm_throw, // llvm.wasm.throw
wasm_tls_align, // llvm.wasm.tls.align
wasm_tls_base, // llvm.wasm.tls.base
wasm_tls_size, // llvm.wasm.tls.size
wasm_trunc_saturate_signed, // llvm.wasm.trunc.saturate.signed
wasm_trunc_saturate_unsigned, // llvm.wasm.trunc.saturate.unsigned
wasm_trunc_signed, // llvm.wasm.trunc.signed
wasm_trunc_unsigned, // llvm.wasm.trunc.unsigned
}; // enum
} // namespace Intrinsic
} // namespace llvm
#endif

View File

@@ -0,0 +1,289 @@
//===- IntrinsicsWebAssembly.td - Defines wasm intrinsics --*- tablegen -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
///
/// \file
/// This file defines all of the WebAssembly-specific intrinsics.
///
//===----------------------------------------------------------------------===//
// Type definition for a table in an intrinsic
def llvm_table_ty : LLVMQualPointerType<llvm_i8_ty, 1>;
let TargetPrefix = "wasm" in { // All intrinsics start with "llvm.wasm.".
// Query the current memory size, and increase the current memory size.
// Note that memory.size is not IntrNoMem because it must be sequenced with
// respect to memory.grow calls.
def int_wasm_memory_size : Intrinsic<[llvm_anyint_ty],
[llvm_i32_ty],
[IntrReadMem]>;
def int_wasm_memory_grow : Intrinsic<[llvm_anyint_ty],
[llvm_i32_ty, LLVMMatchType<0>],
[]>;
//===----------------------------------------------------------------------===//
// ref.null intrinsics
//===----------------------------------------------------------------------===//
def int_wasm_ref_null_extern : Intrinsic<[llvm_externref_ty], [], [IntrNoMem]>;
def int_wasm_ref_null_func : Intrinsic<[llvm_funcref_ty], [], [IntrNoMem]>;
//===----------------------------------------------------------------------===//
// Table intrinsics
//===----------------------------------------------------------------------===//
// Query the current table size, and increase the current table size.
def int_wasm_table_size : Intrinsic<[llvm_i32_ty],
[llvm_table_ty],
[IntrReadMem]>;
def int_wasm_table_copy : Intrinsic<[],
[llvm_table_ty, llvm_table_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[]>;
def int_wasm_table_grow_externref : Intrinsic<[llvm_i32_ty],
[llvm_table_ty, llvm_externref_ty, llvm_i32_ty],
[]>;
def int_wasm_table_grow_funcref : Intrinsic<[llvm_i32_ty],
[llvm_table_ty, llvm_funcref_ty, llvm_i32_ty],
[]>;
def int_wasm_table_fill_externref : Intrinsic<[],
[llvm_table_ty, llvm_i32_ty, llvm_externref_ty, llvm_i32_ty],
[]>;
def int_wasm_table_fill_funcref : Intrinsic<[],
[llvm_table_ty, llvm_i32_ty, llvm_funcref_ty, llvm_i32_ty],
[]>;
//===----------------------------------------------------------------------===//
// Trapping float-to-int conversions
//===----------------------------------------------------------------------===//
def int_wasm_trunc_signed : Intrinsic<[llvm_anyint_ty],
[llvm_anyfloat_ty],
[IntrNoMem]>;
def int_wasm_trunc_unsigned : Intrinsic<[llvm_anyint_ty],
[llvm_anyfloat_ty],
[IntrNoMem]>;
//===----------------------------------------------------------------------===//
// Saturating float-to-int conversions
//===----------------------------------------------------------------------===//
def int_wasm_trunc_saturate_signed : Intrinsic<[llvm_anyint_ty],
[llvm_anyfloat_ty],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_trunc_saturate_unsigned : Intrinsic<[llvm_anyint_ty],
[llvm_anyfloat_ty],
[IntrNoMem, IntrSpeculatable]>;
//===----------------------------------------------------------------------===//
// Exception handling intrinsics
//===----------------------------------------------------------------------===//
// throw / rethrow
// The first immediate argument is an index to a tag, which is 0 for C++
// exception. The second argument is the thrown exception pointer.
def int_wasm_throw : Intrinsic<[], [llvm_i32_ty, llvm_ptr_ty],
[Throws, IntrNoReturn, ImmArg<ArgIndex<0>>]>;
def int_wasm_rethrow : Intrinsic<[], [], [Throws, IntrNoReturn]>;
// Since wasm does not use landingpad instructions, these instructions return
// exception pointer and selector values until we lower them in WasmEHPrepare.
def int_wasm_get_exception : Intrinsic<[llvm_ptr_ty], [llvm_token_ty],
[IntrHasSideEffects]>;
def int_wasm_get_ehselector : Intrinsic<[llvm_i32_ty], [llvm_token_ty],
[IntrHasSideEffects]>;
// wasm.catch returns the pointer to the exception object caught by wasm 'catch'
// instruction. This returns a single pointer, which is the case for C++
// exceptions. The immediate argument is an index to for a tag, which is 0 for
// C++ exceptions.
def int_wasm_catch : Intrinsic<[llvm_ptr_ty], [llvm_i32_ty],
[IntrHasSideEffects, ImmArg<ArgIndex<0>>]>;
// WebAssembly EH must maintain the landingpads in the order assigned to them
// by WasmEHPrepare pass to generate landingpad table in EHStreamer. This is
// used in order to give them the indices in WasmEHPrepare.
def int_wasm_landingpad_index: Intrinsic<[], [llvm_token_ty, llvm_i32_ty],
[IntrNoMem, ImmArg<ArgIndex<1>>]>;
// Returns LSDA address of the current function.
def int_wasm_lsda : Intrinsic<[llvm_ptr_ty], [], [IntrNoMem]>;
//===----------------------------------------------------------------------===//
// Atomic intrinsics
//===----------------------------------------------------------------------===//
// wait / notify
def int_wasm_memory_atomic_wait32 :
Intrinsic<[llvm_i32_ty],
[LLVMPointerType<llvm_i32_ty>, llvm_i32_ty, llvm_i64_ty],
[IntrInaccessibleMemOrArgMemOnly, ReadOnly<ArgIndex<0>>,
NoCapture<ArgIndex<0>>, IntrHasSideEffects],
"", [SDNPMemOperand]>;
def int_wasm_memory_atomic_wait64 :
Intrinsic<[llvm_i32_ty],
[LLVMPointerType<llvm_i64_ty>, llvm_i64_ty, llvm_i64_ty],
[IntrInaccessibleMemOrArgMemOnly, ReadOnly<ArgIndex<0>>,
NoCapture<ArgIndex<0>>, IntrHasSideEffects],
"", [SDNPMemOperand]>;
def int_wasm_memory_atomic_notify:
Intrinsic<[llvm_i32_ty], [LLVMPointerType<llvm_i32_ty>, llvm_i32_ty],
[IntrInaccessibleMemOnly, NoCapture<ArgIndex<0>>,
IntrHasSideEffects],
"", [SDNPMemOperand]>;
//===----------------------------------------------------------------------===//
// SIMD intrinsics
//===----------------------------------------------------------------------===//
def int_wasm_swizzle :
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_shuffle :
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty, llvm_i32_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty,
llvm_i32_ty, llvm_i32_ty, llvm_i32_ty, llvm_i32_ty],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_sub_sat_signed :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_sub_sat_unsigned :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_avgr_unsigned :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_bitselect :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_anytrue :
Intrinsic<[llvm_i32_ty],
[llvm_anyvector_ty],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_alltrue :
Intrinsic<[llvm_i32_ty],
[llvm_anyvector_ty],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_bitmask :
Intrinsic<[llvm_i32_ty],
[llvm_anyvector_ty],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_dot :
Intrinsic<[llvm_v4i32_ty],
[llvm_v8i16_ty, llvm_v8i16_ty],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_narrow_signed :
Intrinsic<[llvm_anyvector_ty],
[llvm_anyvector_ty, LLVMMatchType<1>],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_narrow_unsigned :
Intrinsic<[llvm_anyvector_ty],
[llvm_anyvector_ty, LLVMMatchType<1>],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_q15mulr_sat_signed :
Intrinsic<[llvm_v8i16_ty],
[llvm_v8i16_ty, llvm_v8i16_ty],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_pmin :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_pmax :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_extadd_pairwise_signed :
Intrinsic<[llvm_anyvector_ty],
[LLVMSubdivide2VectorType<0>],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_extadd_pairwise_unsigned :
Intrinsic<[llvm_anyvector_ty],
[LLVMSubdivide2VectorType<0>],
[IntrNoMem, IntrSpeculatable]>;
//===----------------------------------------------------------------------===//
// Relaxed SIMD intrinsics (experimental)
//===----------------------------------------------------------------------===//
def int_wasm_fma :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_fms :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_laneselect :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_relaxed_swizzle :
Intrinsic<[llvm_v16i8_ty],
[llvm_v16i8_ty, llvm_v16i8_ty],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_relaxed_min :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_relaxed_max :
Intrinsic<[llvm_anyvector_ty],
[LLVMMatchType<0>, LLVMMatchType<0>],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_relaxed_trunc_signed:
Intrinsic<[llvm_v4i32_ty],
[llvm_v4f32_ty],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_relaxed_trunc_unsigned:
Intrinsic<[llvm_v4i32_ty],
[llvm_v4f32_ty],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_relaxed_trunc_zero_signed:
Intrinsic<[llvm_v4i32_ty],
[llvm_v2f64_ty],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_relaxed_trunc_zero_unsigned:
Intrinsic<[llvm_v4i32_ty],
[llvm_v2f64_ty],
[IntrNoMem, IntrSpeculatable]>;
//===----------------------------------------------------------------------===//
// Thread-local storage intrinsics
//===----------------------------------------------------------------------===//
def int_wasm_tls_size :
Intrinsic<[llvm_anyint_ty],
[],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_tls_align :
Intrinsic<[llvm_anyint_ty],
[],
[IntrNoMem, IntrSpeculatable]>;
def int_wasm_tls_base :
Intrinsic<[llvm_ptr_ty],
[],
[IntrReadMem]>;
} // TargetPrefix = "wasm"

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,73 @@
/*===- TableGen'erated file -------------------------------------*- C++ -*-===*\
|* *|
|* Intrinsic Function Source Fragment *|
|* *|
|* Automatically generated file, do not edit! *|
|* *|
\*===----------------------------------------------------------------------===*/
#ifndef LLVM_IR_INTRINSIC_XCORE_ENUMS_H
#define LLVM_IR_INTRINSIC_XCORE_ENUMS_H
namespace llvm {
namespace Intrinsic {
enum XCOREIntrinsics : unsigned {
// Enum values for intrinsics
xcore_bitrev = 10708, // llvm.xcore.bitrev
xcore_checkevent, // llvm.xcore.checkevent
xcore_chkct, // llvm.xcore.chkct
xcore_clre, // llvm.xcore.clre
xcore_clrpt, // llvm.xcore.clrpt
xcore_clrsr, // llvm.xcore.clrsr
xcore_crc32, // llvm.xcore.crc32
xcore_crc8, // llvm.xcore.crc8
xcore_edu, // llvm.xcore.edu
xcore_eeu, // llvm.xcore.eeu
xcore_endin, // llvm.xcore.endin
xcore_freer, // llvm.xcore.freer
xcore_geted, // llvm.xcore.geted
xcore_getet, // llvm.xcore.getet
xcore_getid, // llvm.xcore.getid
xcore_getps, // llvm.xcore.getps
xcore_getr, // llvm.xcore.getr
xcore_getst, // llvm.xcore.getst
xcore_getts, // llvm.xcore.getts
xcore_in, // llvm.xcore.in
xcore_inct, // llvm.xcore.inct
xcore_initcp, // llvm.xcore.initcp
xcore_initdp, // llvm.xcore.initdp
xcore_initlr, // llvm.xcore.initlr
xcore_initpc, // llvm.xcore.initpc
xcore_initsp, // llvm.xcore.initsp
xcore_inshr, // llvm.xcore.inshr
xcore_int, // llvm.xcore.int
xcore_mjoin, // llvm.xcore.mjoin
xcore_msync, // llvm.xcore.msync
xcore_out, // llvm.xcore.out
xcore_outct, // llvm.xcore.outct
xcore_outshr, // llvm.xcore.outshr
xcore_outt, // llvm.xcore.outt
xcore_peek, // llvm.xcore.peek
xcore_setc, // llvm.xcore.setc
xcore_setclk, // llvm.xcore.setclk
xcore_setd, // llvm.xcore.setd
xcore_setev, // llvm.xcore.setev
xcore_setps, // llvm.xcore.setps
xcore_setpsc, // llvm.xcore.setpsc
xcore_setpt, // llvm.xcore.setpt
xcore_setrdy, // llvm.xcore.setrdy
xcore_setsr, // llvm.xcore.setsr
xcore_settw, // llvm.xcore.settw
xcore_setv, // llvm.xcore.setv
xcore_sext, // llvm.xcore.sext
xcore_ssync, // llvm.xcore.ssync
xcore_syncr, // llvm.xcore.syncr
xcore_testct, // llvm.xcore.testct
xcore_testwct, // llvm.xcore.testwct
xcore_waitevent, // llvm.xcore.waitevent
xcore_zext, // llvm.xcore.zext
}; // enum
} // namespace Intrinsic
} // namespace llvm
#endif

View File

@@ -0,0 +1,120 @@
//==- IntrinsicsXCore.td - XCore intrinsics -*- tablegen -*-==//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines all of the XCore-specific intrinsics.
//
//===----------------------------------------------------------------------===//
let TargetPrefix = "xcore" in { // All intrinsics start with "llvm.xcore.".
// Miscellaneous instructions.
def int_xcore_bitrev : Intrinsic<[llvm_i32_ty],[llvm_i32_ty],[IntrNoMem]>,
GCCBuiltin<"__builtin_bitrev">;
def int_xcore_crc8 : Intrinsic<[llvm_i32_ty, llvm_i32_ty],
[llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
[IntrNoMem]>;
def int_xcore_crc32 : Intrinsic<[llvm_i32_ty],
[llvm_i32_ty,llvm_i32_ty,llvm_i32_ty],
[IntrNoMem]>;
def int_xcore_sext : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_xcore_zext : Intrinsic<[llvm_i32_ty], [llvm_i32_ty, llvm_i32_ty],
[IntrNoMem]>;
def int_xcore_getid : Intrinsic<[llvm_i32_ty],[],[IntrNoMem]>,
GCCBuiltin<"__builtin_getid">;
def int_xcore_getps : Intrinsic<[llvm_i32_ty],[llvm_i32_ty]>,
GCCBuiltin<"__builtin_getps">;
def int_xcore_setps : Intrinsic<[],[llvm_i32_ty, llvm_i32_ty]>,
GCCBuiltin<"__builtin_setps">;
def int_xcore_geted : Intrinsic<[llvm_i32_ty],[]>;
def int_xcore_getet : Intrinsic<[llvm_i32_ty],[]>;
def int_xcore_setsr : Intrinsic<[],[llvm_i32_ty]>;
def int_xcore_clrsr : Intrinsic<[],[llvm_i32_ty]>;
// Resource instructions.
def int_xcore_getr : Intrinsic<[llvm_anyptr_ty],[llvm_i32_ty]>;
def int_xcore_freer : Intrinsic<[],[llvm_anyptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_in : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],[NoCapture<ArgIndex<0>>]>;
def int_xcore_int : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_inct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_out : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_outt : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_outct : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_chkct : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_testct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_testwct : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_setd : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_setc : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_inshr : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty, llvm_i32_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_outshr : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty, llvm_i32_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_setpt : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_clrpt : Intrinsic<[],[llvm_anyptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_getts : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_syncr : Intrinsic<[],[llvm_anyptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_settw : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_setv : Intrinsic<[],[llvm_anyptr_ty, llvm_ptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_setev : Intrinsic<[],[llvm_anyptr_ty, llvm_ptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_eeu : Intrinsic<[],[llvm_anyptr_ty], [NoCapture<ArgIndex<0>>]>;
def int_xcore_edu : Intrinsic<[],[llvm_anyptr_ty], [NoCapture<ArgIndex<0>>]>;
def int_xcore_setclk : Intrinsic<[],[llvm_anyptr_ty, llvm_anyptr_ty],
[NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>]>;
def int_xcore_setrdy : Intrinsic<[],[llvm_anyptr_ty, llvm_anyptr_ty],
[NoCapture<ArgIndex<0>>, NoCapture<ArgIndex<1>>]>;
def int_xcore_setpsc : Intrinsic<[],[llvm_anyptr_ty, llvm_i32_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_peek : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_endin : Intrinsic<[llvm_i32_ty],[llvm_anyptr_ty],
[NoCapture<ArgIndex<0>>]>;
// Intrinsics for events.
def int_xcore_waitevent : Intrinsic<[llvm_ptr_ty],[], [IntrReadMem]>;
// If any of the resources owned by the thread are ready this returns the
// vector of one of the ready resources. If no resources owned by the thread
// are ready then the operand passed to the intrinsic is returned.
def int_xcore_checkevent : Intrinsic<[llvm_ptr_ty],[llvm_ptr_ty]>;
def int_xcore_clre : Intrinsic<[],[],[]>;
// Intrinsics for threads.
def int_xcore_getst : Intrinsic <[llvm_anyptr_ty],[llvm_anyptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_msync : Intrinsic <[],[llvm_anyptr_ty], [NoCapture<ArgIndex<0>>]>;
def int_xcore_ssync : Intrinsic <[],[]>;
def int_xcore_mjoin : Intrinsic <[],[llvm_anyptr_ty], [NoCapture<ArgIndex<0>>]>;
def int_xcore_initsp : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_initpc : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_initlr : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_initcp : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
[NoCapture<ArgIndex<0>>]>;
def int_xcore_initdp : Intrinsic <[],[llvm_anyptr_ty, llvm_ptr_ty],
[NoCapture<ArgIndex<0>>]>;
}

View File

@@ -0,0 +1,341 @@
//===- llvm/LLVMContext.h - Class for managing "global" state ---*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares LLVMContext, a container of "global" state in LLVM, such
// as the global type and constant uniquing tables.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_LLVMCONTEXT_H
#define LLVM_IR_LLVMCONTEXT_H
#include "llvm-c/Types.h"
#include "llvm/ADT/Optional.h"
#include "llvm/IR/DiagnosticHandler.h"
#include "llvm/Support/CBindingWrapping.h"
#include <cstdint>
#include <memory>
#include <string>
namespace llvm {
class DiagnosticInfo;
enum DiagnosticSeverity : char;
class Function;
class Instruction;
class LLVMContextImpl;
class Module;
class OptPassGate;
template <typename T> class SmallVectorImpl;
template <typename T> class StringMapEntry;
class StringRef;
class Twine;
class LLVMRemarkStreamer;
namespace remarks {
class RemarkStreamer;
}
namespace SyncScope {
typedef uint8_t ID;
/// Known synchronization scope IDs, which always have the same value. All
/// synchronization scope IDs that LLVM has special knowledge of are listed
/// here. Additionally, this scheme allows LLVM to efficiently check for
/// specific synchronization scope ID without comparing strings.
enum {
/// Synchronized with respect to signal handlers executing in the same thread.
SingleThread = 0,
/// Synchronized with respect to all concurrently executing threads.
System = 1
};
} // end namespace SyncScope
/// This is an important class for using LLVM in a threaded context. It
/// (opaquely) owns and manages the core "global" data of LLVM's core
/// infrastructure, including the type and constant uniquing tables.
/// LLVMContext itself provides no locking guarantees, so you should be careful
/// to have one context per thread.
class LLVMContext {
public:
LLVMContextImpl *const pImpl;
LLVMContext();
LLVMContext(LLVMContext &) = delete;
LLVMContext &operator=(const LLVMContext &) = delete;
~LLVMContext();
// Pinned metadata names, which always have the same value. This is a
// compile-time performance optimization, not a correctness optimization.
enum : unsigned {
#define LLVM_FIXED_MD_KIND(EnumID, Name, Value) EnumID = Value,
#include "llvm/IR/FixedMetadataKinds.def"
#undef LLVM_FIXED_MD_KIND
};
/// Known operand bundle tag IDs, which always have the same value. All
/// operand bundle tags that LLVM has special knowledge of are listed here.
/// Additionally, this scheme allows LLVM to efficiently check for specific
/// operand bundle tags without comparing strings. Keep this in sync with
/// LLVMContext::LLVMContext().
enum : unsigned {
OB_deopt = 0, // "deopt"
OB_funclet = 1, // "funclet"
OB_gc_transition = 2, // "gc-transition"
OB_cfguardtarget = 3, // "cfguardtarget"
OB_preallocated = 4, // "preallocated"
OB_gc_live = 5, // "gc-live"
OB_clang_arc_attachedcall = 6, // "clang.arc.attachedcall"
};
/// getMDKindID - Return a unique non-zero ID for the specified metadata kind.
/// This ID is uniqued across modules in the current LLVMContext.
unsigned getMDKindID(StringRef Name) const;
/// getMDKindNames - Populate client supplied SmallVector with the name for
/// custom metadata IDs registered in this LLVMContext.
void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;
/// getOperandBundleTags - Populate client supplied SmallVector with the
/// bundle tags registered in this LLVMContext. The bundle tags are ordered
/// by increasing bundle IDs.
/// \see LLVMContext::getOperandBundleTagID
void getOperandBundleTags(SmallVectorImpl<StringRef> &Result) const;
/// getOrInsertBundleTag - Returns the Tag to use for an operand bundle of
/// name TagName.
StringMapEntry<uint32_t> *getOrInsertBundleTag(StringRef TagName) const;
/// getOperandBundleTagID - Maps a bundle tag to an integer ID. Every bundle
/// tag registered with an LLVMContext has an unique ID.
uint32_t getOperandBundleTagID(StringRef Tag) const;
/// getOrInsertSyncScopeID - Maps synchronization scope name to
/// synchronization scope ID. Every synchronization scope registered with
/// LLVMContext has unique ID except pre-defined ones.
SyncScope::ID getOrInsertSyncScopeID(StringRef SSN);
/// getSyncScopeNames - Populates client supplied SmallVector with
/// synchronization scope names registered with LLVMContext. Synchronization
/// scope names are ordered by increasing synchronization scope IDs.
void getSyncScopeNames(SmallVectorImpl<StringRef> &SSNs) const;
/// Define the GC for a function
void setGC(const Function &Fn, std::string GCName);
/// Return the GC for a function
const std::string &getGC(const Function &Fn);
/// Remove the GC for a function
void deleteGC(const Function &Fn);
/// Return true if the Context runtime configuration is set to discard all
/// value names. When true, only GlobalValue names will be available in the
/// IR.
bool shouldDiscardValueNames() const;
/// Set the Context runtime configuration to discard all value name (but
/// GlobalValue). Clients can use this flag to save memory and runtime,
/// especially in release mode.
void setDiscardValueNames(bool Discard);
/// Whether there is a string map for uniquing debug info
/// identifiers across the context. Off by default.
bool isODRUniquingDebugTypes() const;
void enableDebugTypeODRUniquing();
void disableDebugTypeODRUniquing();
/// Defines the type of a yield callback.
/// \see LLVMContext::setYieldCallback.
using YieldCallbackTy = void (*)(LLVMContext *Context, void *OpaqueHandle);
/// setDiagnosticHandlerCallBack - This method sets a handler call back
/// that is invoked when the backend needs to report anything to the user.
/// The first argument is a function pointer and the second is a context pointer
/// that gets passed into the DiagHandler. The third argument should be set to
/// true if the handler only expects enabled diagnostics.
///
/// LLVMContext doesn't take ownership or interpret either of these
/// pointers.
void setDiagnosticHandlerCallBack(
DiagnosticHandler::DiagnosticHandlerTy DiagHandler,
void *DiagContext = nullptr, bool RespectFilters = false);
/// setDiagnosticHandler - This method sets unique_ptr to object of
/// DiagnosticHandler to provide custom diagnostic handling. The first
/// argument is unique_ptr of object of type DiagnosticHandler or a derived
/// of that. The second argument should be set to true if the handler only
/// expects enabled diagnostics.
///
/// Ownership of this pointer is moved to LLVMContextImpl.
void setDiagnosticHandler(std::unique_ptr<DiagnosticHandler> &&DH,
bool RespectFilters = false);
/// getDiagnosticHandlerCallBack - Return the diagnostic handler call back set by
/// setDiagnosticHandlerCallBack.
DiagnosticHandler::DiagnosticHandlerTy getDiagnosticHandlerCallBack() const;
/// getDiagnosticContext - Return the diagnostic context set by
/// setDiagnosticContext.
void *getDiagnosticContext() const;
/// getDiagHandlerPtr - Returns const raw pointer of DiagnosticHandler set by
/// setDiagnosticHandler.
const DiagnosticHandler *getDiagHandlerPtr() const;
/// getDiagnosticHandler - transfers ownership of DiagnosticHandler unique_ptr
/// to caller.
std::unique_ptr<DiagnosticHandler> getDiagnosticHandler();
/// Return if a code hotness metric should be included in optimization
/// diagnostics.
bool getDiagnosticsHotnessRequested() const;
/// Set if a code hotness metric should be included in optimization
/// diagnostics.
void setDiagnosticsHotnessRequested(bool Requested);
/// Return the minimum hotness value a diagnostic would need in order
/// to be included in optimization diagnostics.
///
/// Three possible return values:
/// 0 - threshold is disabled. Everything will be printed out.
/// positive int - threshold is set.
/// UINT64_MAX - threshold is not yet set, and needs to be synced from
/// profile summary. Note that in case of missing profile
/// summary, threshold will be kept at "MAX", effectively
/// suppresses all remarks output.
uint64_t getDiagnosticsHotnessThreshold() const;
/// Set the minimum hotness value a diagnostic needs in order to be
/// included in optimization diagnostics.
void setDiagnosticsHotnessThreshold(Optional<uint64_t> Threshold);
/// Return if hotness threshold is requested from PSI.
bool isDiagnosticsHotnessThresholdSetFromPSI() const;
/// The "main remark streamer" used by all the specialized remark streamers.
/// This streamer keeps generic remark metadata in memory throughout the life
/// of the LLVMContext. This metadata may be emitted in a section in object
/// files depending on the format requirements.
///
/// All specialized remark streamers should convert remarks to
/// llvm::remarks::Remark and emit them through this streamer.
remarks::RemarkStreamer *getMainRemarkStreamer();
const remarks::RemarkStreamer *getMainRemarkStreamer() const;
void setMainRemarkStreamer(
std::unique_ptr<remarks::RemarkStreamer> MainRemarkStreamer);
/// The "LLVM remark streamer" used by LLVM to serialize remark diagnostics
/// coming from IR and MIR passes.
///
/// If it does not exist, diagnostics are not saved in a file but only emitted
/// via the diagnostic handler.
LLVMRemarkStreamer *getLLVMRemarkStreamer();
const LLVMRemarkStreamer *getLLVMRemarkStreamer() const;
void
setLLVMRemarkStreamer(std::unique_ptr<LLVMRemarkStreamer> RemarkStreamer);
/// Get the prefix that should be printed in front of a diagnostic of
/// the given \p Severity
static const char *getDiagnosticMessagePrefix(DiagnosticSeverity Severity);
/// Report a message to the currently installed diagnostic handler.
///
/// This function returns, in particular in the case of error reporting
/// (DI.Severity == \a DS_Error), so the caller should leave the compilation
/// process in a self-consistent state, even though the generated code
/// need not be correct.
///
/// The diagnostic message will be implicitly prefixed with a severity keyword
/// according to \p DI.getSeverity(), i.e., "error: " for \a DS_Error,
/// "warning: " for \a DS_Warning, and "note: " for \a DS_Note.
void diagnose(const DiagnosticInfo &DI);
/// Registers a yield callback with the given context.
///
/// The yield callback function may be called by LLVM to transfer control back
/// to the client that invoked the LLVM compilation. This can be used to yield
/// control of the thread, or perform periodic work needed by the client.
/// There is no guaranteed frequency at which callbacks must occur; in fact,
/// the client is not guaranteed to ever receive this callback. It is at the
/// sole discretion of LLVM to do so and only if it can guarantee that
/// suspending the thread won't block any forward progress in other LLVM
/// contexts in the same process.
///
/// At a suspend point, the state of the current LLVM context is intentionally
/// undefined. No assumptions about it can or should be made. Only LLVM
/// context API calls that explicitly state that they can be used during a
/// yield callback are allowed to be used. Any other API calls into the
/// context are not supported until the yield callback function returns
/// control to LLVM. Other LLVM contexts are unaffected by this restriction.
void setYieldCallback(YieldCallbackTy Callback, void *OpaqueHandle);
/// Calls the yield callback (if applicable).
///
/// This transfers control of the current thread back to the client, which may
/// suspend the current thread. Only call this method when LLVM doesn't hold
/// any global mutex or cannot block the execution in another LLVM context.
void yield();
/// emitError - Emit an error message to the currently installed error handler
/// with optional location information. This function returns, so code should
/// be prepared to drop the erroneous construct on the floor and "not crash".
/// The generated code need not be correct. The error message will be
/// implicitly prefixed with "error: " and should not end with a ".".
void emitError(uint64_t LocCookie, const Twine &ErrorStr);
void emitError(const Instruction *I, const Twine &ErrorStr);
void emitError(const Twine &ErrorStr);
/// Access the object which can disable optional passes and individual
/// optimizations at compile time.
OptPassGate &getOptPassGate() const;
/// Set the object which can disable optional passes and individual
/// optimizations at compile time.
///
/// The lifetime of the object must be guaranteed to extend as long as the
/// LLVMContext is used by compilation.
void setOptPassGate(OptPassGate&);
/// Enable opaque pointers. Can only be called before creating the first
/// pointer type.
void enableOpaquePointers() const;
/// Whether typed pointers are supported. If false, all pointers are opaque.
bool supportsTypedPointers() const;
private:
// Module needs access to the add/removeModule methods.
friend class Module;
/// addModule - Register a module as being instantiated in this context. If
/// the context is deleted, the module will be deleted as well.
void addModule(Module*);
/// removeModule - Unregister a module from this context.
void removeModule(Module*);
};
// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(LLVMContext, LLVMContextRef)
/* Specialized opaque context conversions.
*/
inline LLVMContext **unwrap(LLVMContextRef* Tys) {
return reinterpret_cast<LLVMContext**>(Tys);
}
inline LLVMContextRef *wrap(const LLVMContext **Tys) {
return reinterpret_cast<LLVMContextRef*>(const_cast<LLVMContext**>(Tys));
}
} // end namespace llvm
#endif // LLVM_IR_LLVMCONTEXT_H

View File

@@ -0,0 +1,100 @@
//===- llvm/IR/LLVMRemarkStreamer.h - Streamer for LLVM remarks--*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file implements the conversion between IR Diagnostics and
// serializable remarks::Remark objects.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_LLVMREMARKSTREAMER_H
#define LLVM_IR_LLVMREMARKSTREAMER_H
#include "llvm/Remarks/Remark.h"
#include "llvm/Support/Error.h"
#include <memory>
#include <string>
namespace llvm {
class DiagnosticInfoOptimizationBase;
class LLVMContext;
class ToolOutputFile;
namespace remarks {
class RemarkStreamer;
}
/// Streamer for LLVM remarks which has logic for dealing with DiagnosticInfo
/// objects.
class LLVMRemarkStreamer {
remarks::RemarkStreamer &RS;
/// Convert diagnostics into remark objects.
/// The lifetime of the members of the result is bound to the lifetime of
/// the LLVM diagnostics.
remarks::Remark toRemark(const DiagnosticInfoOptimizationBase &Diag) const;
public:
LLVMRemarkStreamer(remarks::RemarkStreamer &RS) : RS(RS) {}
/// Emit a diagnostic through the streamer.
void emit(const DiagnosticInfoOptimizationBase &Diag);
};
template <typename ThisError>
struct LLVMRemarkSetupErrorInfo : public ErrorInfo<ThisError> {
std::string Msg;
std::error_code EC;
LLVMRemarkSetupErrorInfo(Error E) {
handleAllErrors(std::move(E), [&](const ErrorInfoBase &EIB) {
Msg = EIB.message();
EC = EIB.convertToErrorCode();
});
}
void log(raw_ostream &OS) const override { OS << Msg; }
std::error_code convertToErrorCode() const override { return EC; }
};
struct LLVMRemarkSetupFileError
: LLVMRemarkSetupErrorInfo<LLVMRemarkSetupFileError> {
static char ID;
using LLVMRemarkSetupErrorInfo<
LLVMRemarkSetupFileError>::LLVMRemarkSetupErrorInfo;
};
struct LLVMRemarkSetupPatternError
: LLVMRemarkSetupErrorInfo<LLVMRemarkSetupPatternError> {
static char ID;
using LLVMRemarkSetupErrorInfo<
LLVMRemarkSetupPatternError>::LLVMRemarkSetupErrorInfo;
};
struct LLVMRemarkSetupFormatError
: LLVMRemarkSetupErrorInfo<LLVMRemarkSetupFormatError> {
static char ID;
using LLVMRemarkSetupErrorInfo<
LLVMRemarkSetupFormatError>::LLVMRemarkSetupErrorInfo;
};
/// Setup optimization remarks that output to a file.
Expected<std::unique_ptr<ToolOutputFile>>
setupLLVMOptimizationRemarks(LLVMContext &Context, StringRef RemarksFilename,
StringRef RemarksPasses, StringRef RemarksFormat,
bool RemarksWithHotness,
Optional<uint64_t> RemarksHotnessThreshold = 0);
/// Setup optimization remarks that output directly to a raw_ostream.
/// \p OS is managed by the caller and should be open for writing as long as \p
/// Context is streaming remarks to it.
Error setupLLVMOptimizationRemarks(
LLVMContext &Context, raw_ostream &OS, StringRef RemarksPasses,
StringRef RemarksFormat, bool RemarksWithHotness,
Optional<uint64_t> RemarksHotnessThreshold = 0);
} // end namespace llvm
#endif // LLVM_IR_LLVMREMARKSTREAMER_H

View File

@@ -0,0 +1,106 @@
//===- LegacyPassManager.h - Legacy Container for Passes --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the legacy PassManager class. This class is used to hold,
// maintain, and optimize execution of Passes. The PassManager class ensures
// that analysis results are available before a pass runs, and that Pass's are
// destroyed when the PassManager is destroyed.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_LEGACYPASSMANAGER_H
#define LLVM_IR_LEGACYPASSMANAGER_H
#include "llvm/Support/CBindingWrapping.h"
namespace llvm {
class Function;
class Pass;
class Module;
namespace legacy {
// Whether or not -debug-pass has been specified. For use to check if it's
// specified alongside the new PM.
bool debugPassSpecified();
class PassManagerImpl;
class FunctionPassManagerImpl;
/// PassManagerBase - An abstract interface to allow code to add passes to
/// a pass manager without having to hard-code what kind of pass manager
/// it is.
class PassManagerBase {
public:
virtual ~PassManagerBase();
/// Add a pass to the queue of passes to run. This passes ownership of
/// the Pass to the PassManager. When the PassManager is destroyed, the pass
/// will be destroyed as well, so there is no need to delete the pass. This
/// may even destroy the pass right away if it is found to be redundant. This
/// implies that all passes MUST be allocated with 'new'.
virtual void add(Pass *P) = 0;
};
/// PassManager manages ModulePassManagers
class PassManager : public PassManagerBase {
public:
PassManager();
~PassManager() override;
void add(Pass *P) override;
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
bool run(Module &M);
private:
/// PassManagerImpl_New is the actual class. PassManager is just the
/// wrapper to publish simple pass manager interface
PassManagerImpl *PM;
};
/// FunctionPassManager manages FunctionPasses.
class FunctionPassManager : public PassManagerBase {
public:
/// FunctionPassManager ctor - This initializes the pass manager. It needs,
/// but does not take ownership of, the specified Module.
explicit FunctionPassManager(Module *M);
~FunctionPassManager() override;
void add(Pass *P) override;
/// run - Execute all of the passes scheduled for execution. Keep
/// track of whether any of the passes modifies the function, and if
/// so, return true.
///
bool run(Function &F);
/// doInitialization - Run all of the initializers for the function passes.
///
bool doInitialization();
/// doFinalization - Run all of the finalizers for the function passes.
///
bool doFinalization();
private:
FunctionPassManagerImpl *FPM;
Module *M;
};
} // End legacy namespace
// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_STDCXX_CONVERSION_FUNCTIONS(legacy::PassManagerBase, LLVMPassManagerRef)
} // End llvm namespace
#endif

View File

@@ -0,0 +1,516 @@
//===- LegacyPassManagers.h - Legacy Pass Infrastructure --------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file declares the LLVM Pass Manager infrastructure.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_LEGACYPASSMANAGERS_H
#define LLVM_IR_LEGACYPASSMANAGERS_H
#include "llvm/ADT/DenseMap.h"
#include "llvm/ADT/FoldingSet.h"
#include "llvm/ADT/SmallPtrSet.h"
#include "llvm/ADT/SmallVector.h"
#include "llvm/Pass.h"
#include <vector>
//===----------------------------------------------------------------------===//
// Overview:
// The Pass Manager Infrastructure manages passes. It's responsibilities are:
//
// o Manage optimization pass execution order
// o Make required Analysis information available before pass P is run
// o Release memory occupied by dead passes
// o If Analysis information is dirtied by a pass then regenerate Analysis
// information before it is consumed by another pass.
//
// Pass Manager Infrastructure uses multiple pass managers. They are
// PassManager, FunctionPassManager, MPPassManager, FPPassManager, BBPassManager.
// This class hierarchy uses multiple inheritance but pass managers do not
// derive from another pass manager.
//
// PassManager and FunctionPassManager are two top-level pass manager that
// represents the external interface of this entire pass manager infrastructure.
//
// Important classes :
//
// [o] class PMTopLevelManager;
//
// Two top level managers, PassManager and FunctionPassManager, derive from
// PMTopLevelManager. PMTopLevelManager manages information used by top level
// managers such as last user info.
//
// [o] class PMDataManager;
//
// PMDataManager manages information, e.g. list of available analysis info,
// used by a pass manager to manage execution order of passes. It also provides
// a place to implement common pass manager APIs. All pass managers derive from
// PMDataManager.
//
// [o] class FunctionPassManager;
//
// This is a external interface used to manage FunctionPasses. This
// interface relies on FunctionPassManagerImpl to do all the tasks.
//
// [o] class FunctionPassManagerImpl : public ModulePass, PMDataManager,
// public PMTopLevelManager;
//
// FunctionPassManagerImpl is a top level manager. It manages FPPassManagers
//
// [o] class FPPassManager : public ModulePass, public PMDataManager;
//
// FPPassManager manages FunctionPasses and BBPassManagers
//
// [o] class MPPassManager : public Pass, public PMDataManager;
//
// MPPassManager manages ModulePasses and FPPassManagers
//
// [o] class PassManager;
//
// This is a external interface used by various tools to manages passes. It
// relies on PassManagerImpl to do all the tasks.
//
// [o] class PassManagerImpl : public Pass, public PMDataManager,
// public PMTopLevelManager
//
// PassManagerImpl is a top level pass manager responsible for managing
// MPPassManagers.
//===----------------------------------------------------------------------===//
#include "llvm/Support/PrettyStackTrace.h"
namespace llvm {
template <typename T> class ArrayRef;
class Module;
class StringRef;
class Value;
class PMDataManager;
// enums for debugging strings
enum PassDebuggingString {
EXECUTION_MSG, // "Executing Pass '" + PassName
MODIFICATION_MSG, // "Made Modification '" + PassName
FREEING_MSG, // " Freeing Pass '" + PassName
ON_FUNCTION_MSG, // "' on Function '" + FunctionName + "'...\n"
ON_MODULE_MSG, // "' on Module '" + ModuleName + "'...\n"
ON_REGION_MSG, // "' on Region '" + Msg + "'...\n'"
ON_LOOP_MSG, // "' on Loop '" + Msg + "'...\n'"
ON_CG_MSG // "' on Call Graph Nodes '" + Msg + "'...\n'"
};
/// PassManagerPrettyStackEntry - This is used to print informative information
/// about what pass is running when/if a stack trace is generated.
class PassManagerPrettyStackEntry : public PrettyStackTraceEntry {
Pass *P;
Value *V;
Module *M;
public:
explicit PassManagerPrettyStackEntry(Pass *p)
: P(p), V(nullptr), M(nullptr) {} // When P is releaseMemory'd.
PassManagerPrettyStackEntry(Pass *p, Value &v)
: P(p), V(&v), M(nullptr) {} // When P is run on V
PassManagerPrettyStackEntry(Pass *p, Module &m)
: P(p), V(nullptr), M(&m) {} // When P is run on M
/// print - Emit information about this stack frame to OS.
void print(raw_ostream &OS) const override;
};
//===----------------------------------------------------------------------===//
// PMStack
//
/// PMStack - This class implements a stack data structure of PMDataManager
/// pointers.
///
/// Top level pass managers (see PassManager.cpp) maintain active Pass Managers
/// using PMStack. Each Pass implements assignPassManager() to connect itself
/// with appropriate manager. assignPassManager() walks PMStack to find
/// suitable manager.
class PMStack {
public:
typedef std::vector<PMDataManager *>::const_reverse_iterator iterator;
iterator begin() const { return S.rbegin(); }
iterator end() const { return S.rend(); }
void pop();
PMDataManager *top() const { return S.back(); }
void push(PMDataManager *PM);
bool empty() const { return S.empty(); }
void dump() const;
private:
std::vector<PMDataManager *> S;
};
//===----------------------------------------------------------------------===//
// PMTopLevelManager
//
/// PMTopLevelManager manages LastUser info and collects common APIs used by
/// top level pass managers.
class PMTopLevelManager {
protected:
explicit PMTopLevelManager(PMDataManager *PMDM);
unsigned getNumContainedManagers() const {
return (unsigned)PassManagers.size();
}
void initializeAllAnalysisInfo();
private:
virtual PMDataManager *getAsPMDataManager() = 0;
virtual PassManagerType getTopLevelPassManagerType() = 0;
public:
/// Schedule pass P for execution. Make sure that passes required by
/// P are run before P is run. Update analysis info maintained by
/// the manager. Remove dead passes. This is a recursive function.
void schedulePass(Pass *P);
/// Set pass P as the last user of the given analysis passes.
void setLastUser(ArrayRef<Pass*> AnalysisPasses, Pass *P);
/// Collect passes whose last user is P
void collectLastUses(SmallVectorImpl<Pass *> &LastUses, Pass *P);
/// Find the pass that implements Analysis AID. Search immutable
/// passes and all pass managers. If desired pass is not found
/// then return NULL.
Pass *findAnalysisPass(AnalysisID AID);
/// Retrieve the PassInfo for an analysis.
const PassInfo *findAnalysisPassInfo(AnalysisID AID) const;
/// Find analysis usage information for the pass P.
AnalysisUsage *findAnalysisUsage(Pass *P);
virtual ~PMTopLevelManager();
/// Add immutable pass and initialize it.
void addImmutablePass(ImmutablePass *P);
inline SmallVectorImpl<ImmutablePass *>& getImmutablePasses() {
return ImmutablePasses;
}
void addPassManager(PMDataManager *Manager) {
PassManagers.push_back(Manager);
}
// Add Manager into the list of managers that are not directly
// maintained by this top level pass manager
inline void addIndirectPassManager(PMDataManager *Manager) {
IndirectPassManagers.push_back(Manager);
}
// Print passes managed by this top level manager.
void dumpPasses() const;
void dumpArguments() const;
// Active Pass Managers
PMStack activeStack;
protected:
/// Collection of pass managers
SmallVector<PMDataManager *, 8> PassManagers;
private:
/// Collection of pass managers that are not directly maintained
/// by this pass manager
SmallVector<PMDataManager *, 8> IndirectPassManagers;
// Map to keep track of last user of the analysis pass.
// LastUser->second is the last user of Lastuser->first.
// This is kept in sync with InversedLastUser.
DenseMap<Pass *, Pass *> LastUser;
// Map to keep track of passes that are last used by a pass.
// This is kept in sync with LastUser.
DenseMap<Pass *, SmallPtrSet<Pass *, 8> > InversedLastUser;
/// Immutable passes are managed by top level manager.
SmallVector<ImmutablePass *, 16> ImmutablePasses;
/// Map from ID to immutable passes.
SmallDenseMap<AnalysisID, ImmutablePass *, 8> ImmutablePassMap;
/// A wrapper around AnalysisUsage for the purpose of uniqueing. The wrapper
/// is used to avoid needing to make AnalysisUsage itself a folding set node.
struct AUFoldingSetNode : public FoldingSetNode {
AnalysisUsage AU;
AUFoldingSetNode(const AnalysisUsage &AU) : AU(AU) {}
void Profile(FoldingSetNodeID &ID) const {
Profile(ID, AU);
}
static void Profile(FoldingSetNodeID &ID, const AnalysisUsage &AU) {
// TODO: We could consider sorting the dependency arrays within the
// AnalysisUsage (since they are conceptually unordered).
ID.AddBoolean(AU.getPreservesAll());
auto ProfileVec = [&](const SmallVectorImpl<AnalysisID>& Vec) {
ID.AddInteger(Vec.size());
for(AnalysisID AID : Vec)
ID.AddPointer(AID);
};
ProfileVec(AU.getRequiredSet());
ProfileVec(AU.getRequiredTransitiveSet());
ProfileVec(AU.getPreservedSet());
ProfileVec(AU.getUsedSet());
}
};
// Contains all of the unique combinations of AnalysisUsage. This is helpful
// when we have multiple instances of the same pass since they'll usually
// have the same analysis usage and can share storage.
FoldingSet<AUFoldingSetNode> UniqueAnalysisUsages;
// Allocator used for allocating UAFoldingSetNodes. This handles deletion of
// all allocated nodes in one fell swoop.
SpecificBumpPtrAllocator<AUFoldingSetNode> AUFoldingSetNodeAllocator;
// Maps from a pass to its associated entry in UniqueAnalysisUsages. Does
// not own the storage associated with either key or value..
DenseMap<Pass *, AnalysisUsage*> AnUsageMap;
/// Collection of PassInfo objects found via analysis IDs and in this top
/// level manager. This is used to memoize queries to the pass registry.
/// FIXME: This is an egregious hack because querying the pass registry is
/// either slow or racy.
mutable DenseMap<AnalysisID, const PassInfo *> AnalysisPassInfos;
};
//===----------------------------------------------------------------------===//
// PMDataManager
/// PMDataManager provides the common place to manage the analysis data
/// used by pass managers.
class PMDataManager {
public:
explicit PMDataManager() : TPM(nullptr), Depth(0) {
initializeAnalysisInfo();
}
virtual ~PMDataManager();
virtual Pass *getAsPass() = 0;
/// Augment AvailableAnalysis by adding analysis made available by pass P.
void recordAvailableAnalysis(Pass *P);
/// verifyPreservedAnalysis -- Verify analysis preserved by pass P.
void verifyPreservedAnalysis(Pass *P);
/// Remove Analysis that is not preserved by the pass
void removeNotPreservedAnalysis(Pass *P);
/// Remove dead passes used by P.
void removeDeadPasses(Pass *P, StringRef Msg,
enum PassDebuggingString);
/// Remove P.
void freePass(Pass *P, StringRef Msg,
enum PassDebuggingString);
/// Add pass P into the PassVector. Update
/// AvailableAnalysis appropriately if ProcessAnalysis is true.
void add(Pass *P, bool ProcessAnalysis = true);
/// Add RequiredPass into list of lower level passes required by pass P.
/// RequiredPass is run on the fly by Pass Manager when P requests it
/// through getAnalysis interface.
virtual void addLowerLevelRequiredPass(Pass *P, Pass *RequiredPass);
virtual std::tuple<Pass *, bool> getOnTheFlyPass(Pass *P, AnalysisID PI,
Function &F);
/// Initialize available analysis information.
void initializeAnalysisInfo() {
AvailableAnalysis.clear();
for (auto &IA : InheritedAnalysis)
IA = nullptr;
}
// Return true if P preserves high level analysis used by other
// passes that are managed by this manager.
bool preserveHigherLevelAnalysis(Pass *P);
/// Populate UsedPasses with analysis pass that are used or required by pass
/// P and are available. Populate ReqPassNotAvailable with analysis pass that
/// are required by pass P but are not available.
void collectRequiredAndUsedAnalyses(
SmallVectorImpl<Pass *> &UsedPasses,
SmallVectorImpl<AnalysisID> &ReqPassNotAvailable, Pass *P);
/// All Required analyses should be available to the pass as it runs! Here
/// we fill in the AnalysisImpls member of the pass so that it can
/// successfully use the getAnalysis() method to retrieve the
/// implementations it needs.
void initializeAnalysisImpl(Pass *P);
/// Find the pass that implements Analysis AID. If desired pass is not found
/// then return NULL.
Pass *findAnalysisPass(AnalysisID AID, bool Direction);
// Access toplevel manager
PMTopLevelManager *getTopLevelManager() { return TPM; }
void setTopLevelManager(PMTopLevelManager *T) { TPM = T; }
unsigned getDepth() const { return Depth; }
void setDepth(unsigned newDepth) { Depth = newDepth; }
// Print routines used by debug-pass
void dumpLastUses(Pass *P, unsigned Offset) const;
void dumpPassArguments() const;
void dumpPassInfo(Pass *P, enum PassDebuggingString S1,
enum PassDebuggingString S2, StringRef Msg);
void dumpRequiredSet(const Pass *P) const;
void dumpPreservedSet(const Pass *P) const;
void dumpUsedSet(const Pass *P) const;
unsigned getNumContainedPasses() const {
return (unsigned)PassVector.size();
}
virtual PassManagerType getPassManagerType() const {
assert ( 0 && "Invalid use of getPassManagerType");
return PMT_Unknown;
}
DenseMap<AnalysisID, Pass*> *getAvailableAnalysis() {
return &AvailableAnalysis;
}
// Collect AvailableAnalysis from all the active Pass Managers.
void populateInheritedAnalysis(PMStack &PMS) {
unsigned Index = 0;
for (PMDataManager *PMDM : PMS)
InheritedAnalysis[Index++] = PMDM->getAvailableAnalysis();
}
/// Set the initial size of the module if the user has specified that they
/// want remarks for size.
/// Returns 0 if the remark was not requested.
unsigned initSizeRemarkInfo(
Module &M,
StringMap<std::pair<unsigned, unsigned>> &FunctionToInstrCount);
/// Emit a remark signifying that the number of IR instructions in the module
/// changed.
/// \p F is optionally passed by passes which run on Functions, and thus
/// always know whether or not a non-empty function is available.
///
/// \p FunctionToInstrCount maps the name of a \p Function to a pair. The
/// first member of the pair is the IR count of the \p Function before running
/// \p P, and the second member is the IR count of the \p Function after
/// running \p P.
void emitInstrCountChangedRemark(
Pass *P, Module &M, int64_t Delta, unsigned CountBefore,
StringMap<std::pair<unsigned, unsigned>> &FunctionToInstrCount,
Function *F = nullptr);
protected:
// Top level manager.
PMTopLevelManager *TPM;
// Collection of pass that are managed by this manager
SmallVector<Pass *, 16> PassVector;
// Collection of Analysis provided by Parent pass manager and
// used by current pass manager. At time there can not be more
// then PMT_Last active pass mangers.
DenseMap<AnalysisID, Pass *> *InheritedAnalysis[PMT_Last];
/// isPassDebuggingExecutionsOrMore - Return true if -debug-pass=Executions
/// or higher is specified.
bool isPassDebuggingExecutionsOrMore() const;
private:
void dumpAnalysisUsage(StringRef Msg, const Pass *P,
const AnalysisUsage::VectorType &Set) const;
// Set of available Analysis. This information is used while scheduling
// pass. If a pass requires an analysis which is not available then
// the required analysis pass is scheduled to run before the pass itself is
// scheduled to run.
DenseMap<AnalysisID, Pass*> AvailableAnalysis;
// Collection of higher level analysis used by the pass managed by
// this manager.
SmallVector<Pass *, 16> HigherLevelAnalysis;
unsigned Depth;
};
//===----------------------------------------------------------------------===//
// FPPassManager
//
/// FPPassManager manages BBPassManagers and FunctionPasses.
/// It batches all function passes and basic block pass managers together and
/// sequence them to process one function at a time before processing next
/// function.
class FPPassManager : public ModulePass, public PMDataManager {
public:
static char ID;
explicit FPPassManager() : ModulePass(ID) {}
/// run - Execute all of the passes scheduled for execution. Keep track of
/// whether any of the passes modifies the module, and if so, return true.
bool runOnFunction(Function &F);
bool runOnModule(Module &M) override;
/// cleanup - After running all passes, clean up pass manager cache.
void cleanup();
/// doInitialization - Overrides ModulePass doInitialization for global
/// initialization tasks
///
using ModulePass::doInitialization;
/// doInitialization - Run all of the initializers for the function passes.
///
bool doInitialization(Module &M) override;
/// doFinalization - Overrides ModulePass doFinalization for global
/// finalization tasks
///
using ModulePass::doFinalization;
/// doFinalization - Run all of the finalizers for the function passes.
///
bool doFinalization(Module &M) override;
PMDataManager *getAsPMDataManager() override { return this; }
Pass *getAsPass() override { return this; }
/// Pass Manager itself does not invalidate any analysis info.
void getAnalysisUsage(AnalysisUsage &Info) const override {
Info.setPreservesAll();
}
// Print passes managed by this manager
void dumpPassStructure(unsigned Offset) override;
StringRef getPassName() const override { return "Function Pass Manager"; }
FunctionPass *getContainedPass(unsigned N) {
assert ( N < PassVector.size() && "Pass number out of range!");
FunctionPass *FP = static_cast<FunctionPass *>(PassVector[N]);
return FP;
}
PassManagerType getPassManagerType() const override {
return PMT_FunctionPassManager;
}
};
}
#endif

View File

@@ -0,0 +1,97 @@
//===- LegacyPassNameParser.h -----------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file contains the PassNameParser and FilteredPassNameParser<> classes,
// which are used to add command line arguments to a utility for all of the
// passes that have been registered into the system.
//
// The PassNameParser class adds ALL passes linked into the system (that are
// creatable) as command line arguments to the tool (when instantiated with the
// appropriate command line option template). The FilteredPassNameParser<>
// template is used for the same purposes as PassNameParser, except that it only
// includes passes that have a PassType that are compatible with the filter
// (which is the template argument).
//
// Note that this is part of the legacy pass manager infrastructure and will be
// (eventually) going away.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_LEGACYPASSNAMEPARSER_H
#define LLVM_IR_LEGACYPASSNAMEPARSER_H
#include "llvm/ADT/STLExtras.h"
#include "llvm/Pass.h"
#include "llvm/Support/CommandLine.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/raw_ostream.h"
#include <cstring>
namespace llvm {
//===----------------------------------------------------------------------===//
// PassNameParser class - Make use of the pass registration mechanism to
// automatically add a command line argument to opt for each pass.
//
class PassNameParser : public PassRegistrationListener,
public cl::parser<const PassInfo*> {
public:
PassNameParser(cl::Option &O);
~PassNameParser() override;
void initialize() {
cl::parser<const PassInfo*>::initialize();
// Add all of the passes to the map that got initialized before 'this' did.
enumeratePasses();
}
// ignorablePassImpl - Can be overridden in subclasses to refine the list of
// which passes we want to include.
//
virtual bool ignorablePassImpl(const PassInfo *P) const { return false; }
inline bool ignorablePass(const PassInfo *P) const {
// Ignore non-selectable and non-constructible passes! Ignore
// non-optimizations.
return P->getPassArgument().empty() || P->getNormalCtor() == nullptr ||
ignorablePassImpl(P);
}
// Implement the PassRegistrationListener callbacks used to populate our map
//
void passRegistered(const PassInfo *P) override {
if (ignorablePass(P)) return;
if (findOption(P->getPassArgument().data()) != getNumOptions()) {
errs() << "Two passes with the same argument (-"
<< P->getPassArgument() << ") attempted to be registered!\n";
llvm_unreachable(nullptr);
}
addLiteralOption(P->getPassArgument().data(), P, P->getPassName().data());
}
void passEnumerate(const PassInfo *P) override { passRegistered(P); }
// printOptionInfo - Print out information about this option. Override the
// default implementation to sort the table before we print...
void printOptionInfo(const cl::Option &O, size_t GlobalWidth) const override {
PassNameParser *PNP = const_cast<PassNameParser*>(this);
array_pod_sort(PNP->Values.begin(), PNP->Values.end(), ValCompare);
cl::parser<const PassInfo*>::printOptionInfo(O, GlobalWidth);
}
private:
// ValCompare - Provide a sorting comparator for Values elements...
static int ValCompare(const PassNameParser::OptionInfo *VT1,
const PassNameParser::OptionInfo *VT2) {
return VT1->Name.compare(VT2->Name);
}
};
} // End llvm namespace
#endif

View File

@@ -0,0 +1,217 @@
//===---- llvm/MDBuilder.h - Builder for LLVM metadata ----------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the MDBuilder class, which is used as a convenient way to
// create LLVM metadata with a consistent and simplified interface.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_MDBUILDER_H
#define LLVM_IR_MDBUILDER_H
#include "llvm/ADT/DenseSet.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/IR/GlobalValue.h"
#include "llvm/Support/DataTypes.h"
#include <utility>
namespace llvm {
class APInt;
template <typename T> class ArrayRef;
class LLVMContext;
class Constant;
class ConstantAsMetadata;
class Function;
class MDNode;
class MDString;
class Metadata;
class MDBuilder {
LLVMContext &Context;
public:
MDBuilder(LLVMContext &context) : Context(context) {}
/// Return the given string as metadata.
MDString *createString(StringRef Str);
/// Return the given constant as metadata.
ConstantAsMetadata *createConstant(Constant *C);
//===------------------------------------------------------------------===//
// FPMath metadata.
//===------------------------------------------------------------------===//
/// Return metadata with the given settings. The special value 0.0
/// for the Accuracy parameter indicates the default (maximal precision)
/// setting.
MDNode *createFPMath(float Accuracy);
//===------------------------------------------------------------------===//
// Prof metadata.
//===------------------------------------------------------------------===//
/// Return metadata containing two branch weights.
MDNode *createBranchWeights(uint32_t TrueWeight, uint32_t FalseWeight);
/// Return metadata containing a number of branch weights.
MDNode *createBranchWeights(ArrayRef<uint32_t> Weights);
/// Return metadata specifying that a branch or switch is unpredictable.
MDNode *createUnpredictable();
/// Return metadata containing the entry \p Count for a function, a boolean
/// \Synthetic indicating whether the counts were synthetized, and the
/// GUIDs stored in \p Imports that need to be imported for sample PGO, to
/// enable the same inlines as the profiled optimized binary
MDNode *createFunctionEntryCount(uint64_t Count, bool Synthetic,
const DenseSet<GlobalValue::GUID> *Imports);
/// Return metadata containing the section prefix for a function.
MDNode *createFunctionSectionPrefix(StringRef Prefix);
/// Return metadata containing the pseudo probe descriptor for a function.
MDNode *createPseudoProbeDesc(uint64_t GUID, uint64_t Hash, Function *F);
//===------------------------------------------------------------------===//
// Range metadata.
//===------------------------------------------------------------------===//
/// Return metadata describing the range [Lo, Hi).
MDNode *createRange(const APInt &Lo, const APInt &Hi);
/// Return metadata describing the range [Lo, Hi).
MDNode *createRange(Constant *Lo, Constant *Hi);
//===------------------------------------------------------------------===//
// Callees metadata.
//===------------------------------------------------------------------===//
/// Return metadata indicating the possible callees of indirect
/// calls.
MDNode *createCallees(ArrayRef<Function *> Callees);
//===------------------------------------------------------------------===//
// Callback metadata.
//===------------------------------------------------------------------===//
/// Return metadata describing a callback (see llvm::AbstractCallSite).
MDNode *createCallbackEncoding(unsigned CalleeArgNo, ArrayRef<int> Arguments,
bool VarArgsArePassed);
/// Merge the new callback encoding \p NewCB into \p ExistingCallbacks.
MDNode *mergeCallbackEncodings(MDNode *ExistingCallbacks, MDNode *NewCB);
//===------------------------------------------------------------------===//
// AA metadata.
//===------------------------------------------------------------------===//
protected:
/// Return metadata appropriate for a AA root node (scope or TBAA).
/// Each returned node is distinct from all other metadata and will never
/// be identified (uniqued) with anything else.
MDNode *createAnonymousAARoot(StringRef Name = StringRef(),
MDNode *Extra = nullptr);
public:
/// Return metadata appropriate for a TBAA root node. Each returned
/// node is distinct from all other metadata and will never be identified
/// (uniqued) with anything else.
MDNode *createAnonymousTBAARoot() {
return createAnonymousAARoot();
}
/// Return metadata appropriate for an alias scope domain node.
/// Each returned node is distinct from all other metadata and will never
/// be identified (uniqued) with anything else.
MDNode *createAnonymousAliasScopeDomain(StringRef Name = StringRef()) {
return createAnonymousAARoot(Name);
}
/// Return metadata appropriate for an alias scope root node.
/// Each returned node is distinct from all other metadata and will never
/// be identified (uniqued) with anything else.
MDNode *createAnonymousAliasScope(MDNode *Domain,
StringRef Name = StringRef()) {
return createAnonymousAARoot(Name, Domain);
}
/// Return metadata appropriate for a TBAA root node with the given
/// name. This may be identified (uniqued) with other roots with the same
/// name.
MDNode *createTBAARoot(StringRef Name);
/// Return metadata appropriate for an alias scope domain node with
/// the given name. This may be identified (uniqued) with other roots with
/// the same name.
MDNode *createAliasScopeDomain(StringRef Name);
/// Return metadata appropriate for an alias scope node with
/// the given name. This may be identified (uniqued) with other scopes with
/// the same name and domain.
MDNode *createAliasScope(StringRef Name, MDNode *Domain);
/// Return metadata for a non-root TBAA node with the given name,
/// parent in the TBAA tree, and value for 'pointsToConstantMemory'.
MDNode *createTBAANode(StringRef Name, MDNode *Parent,
bool isConstant = false);
struct TBAAStructField {
uint64_t Offset;
uint64_t Size;
MDNode *Type;
TBAAStructField(uint64_t Offset, uint64_t Size, MDNode *Type) :
Offset(Offset), Size(Size), Type(Type) {}
};
/// Return metadata for a tbaa.struct node with the given
/// struct field descriptions.
MDNode *createTBAAStructNode(ArrayRef<TBAAStructField> Fields);
/// Return metadata for a TBAA struct node in the type DAG
/// with the given name, a list of pairs (offset, field type in the type DAG).
MDNode *
createTBAAStructTypeNode(StringRef Name,
ArrayRef<std::pair<MDNode *, uint64_t>> Fields);
/// Return metadata for a TBAA scalar type node with the
/// given name, an offset and a parent in the TBAA type DAG.
MDNode *createTBAAScalarTypeNode(StringRef Name, MDNode *Parent,
uint64_t Offset = 0);
/// Return metadata for a TBAA tag node with the given
/// base type, access type and offset relative to the base type.
MDNode *createTBAAStructTagNode(MDNode *BaseType, MDNode *AccessType,
uint64_t Offset, bool IsConstant = false);
/// Return metadata for a TBAA type node in the TBAA type DAG with the
/// given parent type, size in bytes, type identifier and a list of fields.
MDNode *createTBAATypeNode(MDNode *Parent, uint64_t Size, Metadata *Id,
ArrayRef<TBAAStructField> Fields =
ArrayRef<TBAAStructField>());
/// Return metadata for a TBAA access tag with the given base type,
/// final access type, offset of the access relative to the base type, size of
/// the access and flag indicating whether the accessed object can be
/// considered immutable for the purposes of the TBAA analysis.
MDNode *createTBAAAccessTag(MDNode *BaseType, MDNode *AccessType,
uint64_t Offset, uint64_t Size,
bool IsImmutable = false);
/// Return mutable version of the given mutable or immutable TBAA
/// access tag.
MDNode *createMutableTBAAAccessTag(MDNode *Tag);
/// Return metadata containing an irreducible loop header weight.
MDNode *createIrrLoopHeaderWeight(uint64_t Weight);
};
} // end namespace llvm
#endif

View File

@@ -0,0 +1,57 @@
//===-- llvm/IR/Mangler.h - Self-contained name mangler ---------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Unified name mangler for various backends.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_MANGLER_H
#define LLVM_IR_MANGLER_H
#include "llvm/ADT/DenseMap.h"
namespace llvm {
class DataLayout;
class GlobalValue;
template <typename T> class SmallVectorImpl;
class Triple;
class Twine;
class raw_ostream;
class Mangler {
/// We need to give global values the same name every time they are mangled.
/// This keeps track of the number we give to anonymous ones.
mutable DenseMap<const GlobalValue*, unsigned> AnonGlobalIDs;
public:
/// Print the appropriate prefix and the specified global variable's name.
/// If the global variable doesn't have a name, this fills in a unique name
/// for the global.
void getNameWithPrefix(raw_ostream &OS, const GlobalValue *GV,
bool CannotUsePrivateLabel) const;
void getNameWithPrefix(SmallVectorImpl<char> &OutName, const GlobalValue *GV,
bool CannotUsePrivateLabel) const;
/// Print the appropriate prefix and the specified name as the global variable
/// name. GVName must not be empty.
static void getNameWithPrefix(raw_ostream &OS, const Twine &GVName,
const DataLayout &DL);
static void getNameWithPrefix(SmallVectorImpl<char> &OutName,
const Twine &GVName, const DataLayout &DL);
};
void emitLinkerFlagsForGlobalCOFF(raw_ostream &OS, const GlobalValue *GV,
const Triple &TT, Mangler &Mangler);
void emitLinkerFlagsForUsedCOFF(raw_ostream &OS, const GlobalValue *GV,
const Triple &T, Mangler &M);
} // End llvm namespace
#endif

View File

@@ -0,0 +1,264 @@
//===- llvm/MatrixBuilder.h - Builder to lower matrix ops -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// This file defines the MatrixBuilder class, which is used as a convenient way
// to lower matrix operations to LLVM IR.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_MATRIXBUILDER_H
#define LLVM_IR_MATRIXBUILDER_H
#include "llvm/IR/Constant.h"
#include "llvm/IR/Constants.h"
#include "llvm/IR/IRBuilder.h"
#include "llvm/IR/InstrTypes.h"
#include "llvm/IR/Instruction.h"
#include "llvm/IR/IntrinsicInst.h"
#include "llvm/IR/Type.h"
#include "llvm/IR/Value.h"
#include "llvm/Support/Alignment.h"
namespace llvm {
class Function;
class Twine;
class Module;
template <class IRBuilderTy> class MatrixBuilder {
IRBuilderTy &B;
Module *getModule() { return B.GetInsertBlock()->getParent()->getParent(); }
std::pair<Value *, Value *> splatScalarOperandIfNeeded(Value *LHS,
Value *RHS) {
assert((LHS->getType()->isVectorTy() || RHS->getType()->isVectorTy()) &&
"One of the operands must be a matrix (embedded in a vector)");
if (LHS->getType()->isVectorTy() && !RHS->getType()->isVectorTy()) {
assert(!isa<ScalableVectorType>(LHS->getType()) &&
"LHS Assumed to be fixed width");
RHS = B.CreateVectorSplat(
cast<VectorType>(LHS->getType())->getElementCount(), RHS,
"scalar.splat");
} else if (!LHS->getType()->isVectorTy() && RHS->getType()->isVectorTy()) {
assert(!isa<ScalableVectorType>(RHS->getType()) &&
"RHS Assumed to be fixed width");
LHS = B.CreateVectorSplat(
cast<VectorType>(RHS->getType())->getElementCount(), LHS,
"scalar.splat");
}
return {LHS, RHS};
}
public:
MatrixBuilder(IRBuilderTy &Builder) : B(Builder) {}
/// Create a column major, strided matrix load.
/// \p DataPtr - Start address of the matrix read
/// \p Rows - Number of rows in matrix (must be a constant)
/// \p Columns - Number of columns in matrix (must be a constant)
/// \p Stride - Space between columns
CallInst *CreateColumnMajorLoad(Value *DataPtr, Align Alignment,
Value *Stride, bool IsVolatile, unsigned Rows,
unsigned Columns, const Twine &Name = "") {
// Deal with the pointer
PointerType *PtrTy = cast<PointerType>(DataPtr->getType());
Type *EltTy = PtrTy->getPointerElementType();
auto *RetType = FixedVectorType::get(EltTy, Rows * Columns);
Value *Ops[] = {DataPtr, Stride, B.getInt1(IsVolatile), B.getInt32(Rows),
B.getInt32(Columns)};
Type *OverloadedTypes[] = {RetType, Stride->getType()};
Function *TheFn = Intrinsic::getDeclaration(
getModule(), Intrinsic::matrix_column_major_load, OverloadedTypes);
CallInst *Call = B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
Attribute AlignAttr =
Attribute::getWithAlignment(Call->getContext(), Alignment);
Call->addParamAttr(0, AlignAttr);
return Call;
}
/// Create a column major, strided matrix store.
/// \p Matrix - Matrix to store
/// \p Ptr - Pointer to write back to
/// \p Stride - Space between columns
CallInst *CreateColumnMajorStore(Value *Matrix, Value *Ptr, Align Alignment,
Value *Stride, bool IsVolatile,
unsigned Rows, unsigned Columns,
const Twine &Name = "") {
Value *Ops[] = {Matrix, Ptr,
Stride, B.getInt1(IsVolatile),
B.getInt32(Rows), B.getInt32(Columns)};
Type *OverloadedTypes[] = {Matrix->getType(), Stride->getType()};
Function *TheFn = Intrinsic::getDeclaration(
getModule(), Intrinsic::matrix_column_major_store, OverloadedTypes);
CallInst *Call = B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
Attribute AlignAttr =
Attribute::getWithAlignment(Call->getContext(), Alignment);
Call->addParamAttr(1, AlignAttr);
return Call;
}
/// Create a llvm.matrix.transpose call, transposing \p Matrix with \p Rows
/// rows and \p Columns columns.
CallInst *CreateMatrixTranspose(Value *Matrix, unsigned Rows,
unsigned Columns, const Twine &Name = "") {
auto *OpType = cast<VectorType>(Matrix->getType());
auto *ReturnType =
FixedVectorType::get(OpType->getElementType(), Rows * Columns);
Type *OverloadedTypes[] = {ReturnType};
Value *Ops[] = {Matrix, B.getInt32(Rows), B.getInt32(Columns)};
Function *TheFn = Intrinsic::getDeclaration(
getModule(), Intrinsic::matrix_transpose, OverloadedTypes);
return B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
}
/// Create a llvm.matrix.multiply call, multiplying matrixes \p LHS and \p
/// RHS.
CallInst *CreateMatrixMultiply(Value *LHS, Value *RHS, unsigned LHSRows,
unsigned LHSColumns, unsigned RHSColumns,
const Twine &Name = "") {
auto *LHSType = cast<VectorType>(LHS->getType());
auto *RHSType = cast<VectorType>(RHS->getType());
auto *ReturnType =
FixedVectorType::get(LHSType->getElementType(), LHSRows * RHSColumns);
Value *Ops[] = {LHS, RHS, B.getInt32(LHSRows), B.getInt32(LHSColumns),
B.getInt32(RHSColumns)};
Type *OverloadedTypes[] = {ReturnType, LHSType, RHSType};
Function *TheFn = Intrinsic::getDeclaration(
getModule(), Intrinsic::matrix_multiply, OverloadedTypes);
return B.CreateCall(TheFn->getFunctionType(), TheFn, Ops, Name);
}
/// Insert a single element \p NewVal into \p Matrix at indices (\p RowIdx, \p
/// ColumnIdx).
Value *CreateMatrixInsert(Value *Matrix, Value *NewVal, Value *RowIdx,
Value *ColumnIdx, unsigned NumRows) {
return B.CreateInsertElement(
Matrix, NewVal,
B.CreateAdd(B.CreateMul(ColumnIdx, ConstantInt::get(
ColumnIdx->getType(), NumRows)),
RowIdx));
}
/// Add matrixes \p LHS and \p RHS. Support both integer and floating point
/// matrixes.
Value *CreateAdd(Value *LHS, Value *RHS) {
assert(LHS->getType()->isVectorTy() || RHS->getType()->isVectorTy());
if (LHS->getType()->isVectorTy() && !RHS->getType()->isVectorTy()) {
assert(!isa<ScalableVectorType>(LHS->getType()) &&
"LHS Assumed to be fixed width");
RHS = B.CreateVectorSplat(
cast<VectorType>(LHS->getType())->getElementCount(), RHS,
"scalar.splat");
} else if (!LHS->getType()->isVectorTy() && RHS->getType()->isVectorTy()) {
assert(!isa<ScalableVectorType>(RHS->getType()) &&
"RHS Assumed to be fixed width");
LHS = B.CreateVectorSplat(
cast<VectorType>(RHS->getType())->getElementCount(), LHS,
"scalar.splat");
}
return cast<VectorType>(LHS->getType())
->getElementType()
->isFloatingPointTy()
? B.CreateFAdd(LHS, RHS)
: B.CreateAdd(LHS, RHS);
}
/// Subtract matrixes \p LHS and \p RHS. Support both integer and floating
/// point matrixes.
Value *CreateSub(Value *LHS, Value *RHS) {
assert(LHS->getType()->isVectorTy() || RHS->getType()->isVectorTy());
if (LHS->getType()->isVectorTy() && !RHS->getType()->isVectorTy()) {
assert(!isa<ScalableVectorType>(LHS->getType()) &&
"LHS Assumed to be fixed width");
RHS = B.CreateVectorSplat(
cast<VectorType>(LHS->getType())->getElementCount(), RHS,
"scalar.splat");
} else if (!LHS->getType()->isVectorTy() && RHS->getType()->isVectorTy()) {
assert(!isa<ScalableVectorType>(RHS->getType()) &&
"RHS Assumed to be fixed width");
LHS = B.CreateVectorSplat(
cast<VectorType>(RHS->getType())->getElementCount(), LHS,
"scalar.splat");
}
return cast<VectorType>(LHS->getType())
->getElementType()
->isFloatingPointTy()
? B.CreateFSub(LHS, RHS)
: B.CreateSub(LHS, RHS);
}
/// Multiply matrix \p LHS with scalar \p RHS or scalar \p LHS with matrix \p
/// RHS.
Value *CreateScalarMultiply(Value *LHS, Value *RHS) {
std::tie(LHS, RHS) = splatScalarOperandIfNeeded(LHS, RHS);
if (LHS->getType()->getScalarType()->isFloatingPointTy())
return B.CreateFMul(LHS, RHS);
return B.CreateMul(LHS, RHS);
}
/// Divide matrix \p LHS by scalar \p RHS. If the operands are integers, \p
/// IsUnsigned indicates whether UDiv or SDiv should be used.
Value *CreateScalarDiv(Value *LHS, Value *RHS, bool IsUnsigned) {
assert(LHS->getType()->isVectorTy() && !RHS->getType()->isVectorTy());
assert(!isa<ScalableVectorType>(LHS->getType()) &&
"LHS Assumed to be fixed width");
RHS =
B.CreateVectorSplat(cast<VectorType>(LHS->getType())->getElementCount(),
RHS, "scalar.splat");
return cast<VectorType>(LHS->getType())
->getElementType()
->isFloatingPointTy()
? B.CreateFDiv(LHS, RHS)
: (IsUnsigned ? B.CreateUDiv(LHS, RHS) : B.CreateSDiv(LHS, RHS));
}
/// Create an assumption that \p Idx is less than \p NumElements.
void CreateIndexAssumption(Value *Idx, unsigned NumElements,
Twine const &Name = "") {
Value *NumElts =
B.getIntN(Idx->getType()->getScalarSizeInBits(), NumElements);
auto *Cmp = B.CreateICmpULT(Idx, NumElts);
if (auto *ConstCond = dyn_cast<ConstantInt>(Cmp))
assert(ConstCond->isOne() && "Index must be valid!");
else
B.CreateAssumption(Cmp);
}
/// Compute the index to access the element at (\p RowIdx, \p ColumnIdx) from
/// a matrix with \p NumRows embedded in a vector.
Value *CreateIndex(Value *RowIdx, Value *ColumnIdx, unsigned NumRows,
Twine const &Name = "") {
unsigned MaxWidth = std::max(RowIdx->getType()->getScalarSizeInBits(),
ColumnIdx->getType()->getScalarSizeInBits());
Type *IntTy = IntegerType::get(RowIdx->getType()->getContext(), MaxWidth);
RowIdx = B.CreateZExt(RowIdx, IntTy);
ColumnIdx = B.CreateZExt(ColumnIdx, IntTy);
Value *NumRowsV = B.getIntN(MaxWidth, NumRows);
return B.CreateAdd(B.CreateMul(ColumnIdx, NumRowsV), RowIdx);
}
};
} // end namespace llvm
#endif // LLVM_IR_MATRIXBUILDER_H

View File

@@ -0,0 +1,129 @@
//===- llvm/IR/Metadata.def - Metadata definitions --------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
// Macros for running through all types of metadata.
//
//===----------------------------------------------------------------------===//
#if !(defined HANDLE_METADATA || defined HANDLE_METADATA_LEAF || \
defined HANDLE_METADATA_BRANCH || defined HANDLE_MDNODE_LEAF || \
defined HANDLE_MDNODE_LEAF_UNIQUABLE || defined HANDLE_MDNODE_BRANCH || \
defined HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE || \
defined HANDLE_SPECIALIZED_MDNODE_LEAF || \
defined HANDLE_SPECIALIZED_MDNODE_BRANCH)
#error "Missing macro definition of HANDLE_METADATA*"
#endif
// Handler for all types of metadata.
#ifndef HANDLE_METADATA
#define HANDLE_METADATA(CLASS)
#endif
// Handler for leaf nodes in the class hierarchy.
#ifndef HANDLE_METADATA_LEAF
#define HANDLE_METADATA_LEAF(CLASS) HANDLE_METADATA(CLASS)
#endif
// Handler for non-leaf nodes in the class hierarchy.
#ifndef HANDLE_METADATA_BRANCH
#define HANDLE_METADATA_BRANCH(CLASS) HANDLE_METADATA(CLASS)
#endif
// Handler for specialized and uniquable leaf nodes under MDNode. Defers to
// HANDLE_MDNODE_LEAF_UNIQUABLE if it's defined, otherwise to
// HANDLE_SPECIALIZED_MDNODE_LEAF.
#ifndef HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE
#ifdef HANDLE_MDNODE_LEAF_UNIQUABLE
#define HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(CLASS) \
HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS)
#else
#define HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(CLASS) \
HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS)
#endif
#endif
// Handler for leaf nodes under MDNode.
#ifndef HANDLE_MDNODE_LEAF_UNIQUABLE
#define HANDLE_MDNODE_LEAF_UNIQUABLE(CLASS) HANDLE_MDNODE_LEAF(CLASS)
#endif
// Handler for leaf nodes under MDNode.
#ifndef HANDLE_MDNODE_LEAF
#define HANDLE_MDNODE_LEAF(CLASS) HANDLE_METADATA_LEAF(CLASS)
#endif
// Handler for non-leaf nodes under MDNode.
#ifndef HANDLE_MDNODE_BRANCH
#define HANDLE_MDNODE_BRANCH(CLASS) HANDLE_METADATA_BRANCH(CLASS)
#endif
// Handler for specialized leaf nodes under MDNode.
#ifndef HANDLE_SPECIALIZED_MDNODE_LEAF
#define HANDLE_SPECIALIZED_MDNODE_LEAF(CLASS) HANDLE_MDNODE_LEAF(CLASS)
#endif
// Handler for specialized non-leaf nodes under MDNode.
#ifndef HANDLE_SPECIALIZED_MDNODE_BRANCH
#define HANDLE_SPECIALIZED_MDNODE_BRANCH(CLASS) HANDLE_MDNODE_BRANCH(CLASS)
#endif
HANDLE_METADATA_LEAF(MDString)
HANDLE_METADATA_BRANCH(ValueAsMetadata)
HANDLE_METADATA_LEAF(ConstantAsMetadata)
HANDLE_METADATA_LEAF(LocalAsMetadata)
HANDLE_METADATA_LEAF(DistinctMDOperandPlaceholder)
HANDLE_MDNODE_BRANCH(MDNode)
HANDLE_MDNODE_LEAF_UNIQUABLE(MDTuple)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILocation)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIExpression)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIGlobalVariableExpression)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DINode)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(GenericDINode)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DISubrange)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIEnumerator)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DIScope)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DIType)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIBasicType)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIDerivedType)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DICompositeType)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DISubroutineType)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIFile)
HANDLE_SPECIALIZED_MDNODE_LEAF(DICompileUnit)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DILocalScope)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DISubprogram)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DILexicalBlockBase)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILexicalBlock)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILexicalBlockFile)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DINamespace)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIModule)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DITemplateParameter)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DITemplateTypeParameter)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DITemplateValueParameter)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DIVariable)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIGlobalVariable)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILocalVariable)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DILabel)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIObjCProperty)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIImportedEntity)
HANDLE_SPECIALIZED_MDNODE_BRANCH(DIMacroNode)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIMacro)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIMacroFile)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DICommonBlock)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIArgList)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIStringType)
HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE(DIGenericSubrange)
#undef HANDLE_METADATA
#undef HANDLE_METADATA_LEAF
#undef HANDLE_METADATA_BRANCH
#undef HANDLE_MDNODE_LEAF
#undef HANDLE_MDNODE_LEAF_UNIQUABLE
#undef HANDLE_MDNODE_BRANCH
#undef HANDLE_SPECIALIZED_MDNODE_LEAF
#undef HANDLE_SPECIALIZED_MDNODE_LEAF_UNIQUABLE
#undef HANDLE_SPECIALIZED_MDNODE_BRANCH

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,973 @@
//===- llvm/Module.h - C++ class to represent a VM module -------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
//
/// @file
/// Module.h This file contains the declarations for the Module class.
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_MODULE_H
#define LLVM_IR_MODULE_H
#include "llvm-c/Types.h"
#include "llvm/ADT/Optional.h"
#include "llvm/ADT/STLExtras.h"
#include "llvm/ADT/StringMap.h"
#include "llvm/ADT/StringRef.h"
#include "llvm/ADT/iterator_range.h"
#include "llvm/IR/Attributes.h"
#include "llvm/IR/Comdat.h"
#include "llvm/IR/DataLayout.h"
#include "llvm/IR/Function.h"
#include "llvm/IR/GlobalAlias.h"
#include "llvm/IR/GlobalIFunc.h"
#include "llvm/IR/GlobalVariable.h"
#include "llvm/IR/Metadata.h"
#include "llvm/IR/ProfileSummary.h"
#include "llvm/IR/SymbolTableListTraits.h"
#include "llvm/Support/CBindingWrapping.h"
#include "llvm/Support/CodeGen.h"
#include <cstddef>
#include <cstdint>
#include <iterator>
#include <memory>
#include <string>
#include <vector>
namespace llvm {
class Error;
class FunctionType;
class GVMaterializer;
class LLVMContext;
class MemoryBuffer;
class ModuleSummaryIndex;
class RandomNumberGenerator;
class StructType;
class VersionTuple;
/// A Module instance is used to store all the information related to an
/// LLVM module. Modules are the top level container of all other LLVM
/// Intermediate Representation (IR) objects. Each module directly contains a
/// list of globals variables, a list of functions, a list of libraries (or
/// other modules) this module depends on, a symbol table, and various data
/// about the target's characteristics.
///
/// A module maintains a GlobalValRefMap object that is used to hold all
/// constant references to global variables in the module. When a global
/// variable is destroyed, it should have no entries in the GlobalValueRefMap.
/// The main container class for the LLVM Intermediate Representation.
class LLVM_EXTERNAL_VISIBILITY Module {
/// @name Types And Enumerations
/// @{
public:
/// The type for the list of global variables.
using GlobalListType = SymbolTableList<GlobalVariable>;
/// The type for the list of functions.
using FunctionListType = SymbolTableList<Function>;
/// The type for the list of aliases.
using AliasListType = SymbolTableList<GlobalAlias>;
/// The type for the list of ifuncs.
using IFuncListType = SymbolTableList<GlobalIFunc>;
/// The type for the list of named metadata.
using NamedMDListType = ilist<NamedMDNode>;
/// The type of the comdat "symbol" table.
using ComdatSymTabType = StringMap<Comdat>;
/// The type for mapping names to named metadata.
using NamedMDSymTabType = StringMap<NamedMDNode *>;
/// The Global Variable iterator.
using global_iterator = GlobalListType::iterator;
/// The Global Variable constant iterator.
using const_global_iterator = GlobalListType::const_iterator;
/// The Function iterators.
using iterator = FunctionListType::iterator;
/// The Function constant iterator
using const_iterator = FunctionListType::const_iterator;
/// The Function reverse iterator.
using reverse_iterator = FunctionListType::reverse_iterator;
/// The Function constant reverse iterator.
using const_reverse_iterator = FunctionListType::const_reverse_iterator;
/// The Global Alias iterators.
using alias_iterator = AliasListType::iterator;
/// The Global Alias constant iterator
using const_alias_iterator = AliasListType::const_iterator;
/// The Global IFunc iterators.
using ifunc_iterator = IFuncListType::iterator;
/// The Global IFunc constant iterator
using const_ifunc_iterator = IFuncListType::const_iterator;
/// The named metadata iterators.
using named_metadata_iterator = NamedMDListType::iterator;
/// The named metadata constant iterators.
using const_named_metadata_iterator = NamedMDListType::const_iterator;
/// This enumeration defines the supported behaviors of module flags.
enum ModFlagBehavior {
/// Emits an error if two values disagree, otherwise the resulting value is
/// that of the operands.
Error = 1,
/// Emits a warning if two values disagree. The result value will be the
/// operand for the flag from the first module being linked.
Warning = 2,
/// Adds a requirement that another module flag be present and have a
/// specified value after linking is performed. The value must be a metadata
/// pair, where the first element of the pair is the ID of the module flag
/// to be restricted, and the second element of the pair is the value the
/// module flag should be restricted to. This behavior can be used to
/// restrict the allowable results (via triggering of an error) of linking
/// IDs with the **Override** behavior.
Require = 3,
/// Uses the specified value, regardless of the behavior or value of the
/// other module. If both modules specify **Override**, but the values
/// differ, an error will be emitted.
Override = 4,
/// Appends the two values, which are required to be metadata nodes.
Append = 5,
/// Appends the two values, which are required to be metadata
/// nodes. However, duplicate entries in the second list are dropped
/// during the append operation.
AppendUnique = 6,
/// Takes the max of the two values, which are required to be integers.
Max = 7,
// Markers:
ModFlagBehaviorFirstVal = Error,
ModFlagBehaviorLastVal = Max
};
/// Checks if Metadata represents a valid ModFlagBehavior, and stores the
/// converted result in MFB.
static bool isValidModFlagBehavior(Metadata *MD, ModFlagBehavior &MFB);
/// Check if the given module flag metadata represents a valid module flag,
/// and store the flag behavior, the key string and the value metadata.
static bool isValidModuleFlag(const MDNode &ModFlag, ModFlagBehavior &MFB,
MDString *&Key, Metadata *&Val);
struct ModuleFlagEntry {
ModFlagBehavior Behavior;
MDString *Key;
Metadata *Val;
ModuleFlagEntry(ModFlagBehavior B, MDString *K, Metadata *V)
: Behavior(B), Key(K), Val(V) {}
};
/// @}
/// @name Member Variables
/// @{
private:
LLVMContext &Context; ///< The LLVMContext from which types and
///< constants are allocated.
GlobalListType GlobalList; ///< The Global Variables in the module
FunctionListType FunctionList; ///< The Functions in the module
AliasListType AliasList; ///< The Aliases in the module
IFuncListType IFuncList; ///< The IFuncs in the module
NamedMDListType NamedMDList; ///< The named metadata in the module
std::string GlobalScopeAsm; ///< Inline Asm at global scope.
std::unique_ptr<ValueSymbolTable> ValSymTab; ///< Symbol table for values
ComdatSymTabType ComdatSymTab; ///< Symbol table for COMDATs
std::unique_ptr<MemoryBuffer>
OwnedMemoryBuffer; ///< Memory buffer directly owned by this
///< module, for legacy clients only.
std::unique_ptr<GVMaterializer>
Materializer; ///< Used to materialize GlobalValues
std::string ModuleID; ///< Human readable identifier for the module
std::string SourceFileName; ///< Original source file name for module,
///< recorded in bitcode.
std::string TargetTriple; ///< Platform target triple Module compiled on
///< Format: (arch)(sub)-(vendor)-(sys0-(abi)
NamedMDSymTabType NamedMDSymTab; ///< NamedMDNode names.
DataLayout DL; ///< DataLayout associated with the module
StringMap<unsigned>
CurrentIntrinsicIds; ///< Keep track of the current unique id count for
///< the specified intrinsic basename.
DenseMap<std::pair<Intrinsic::ID, const FunctionType *>, unsigned>
UniquedIntrinsicNames; ///< Keep track of uniqued names of intrinsics
///< based on unnamed types. The combination of
///< ID and FunctionType maps to the extension that
///< is used to make the intrinsic name unique.
friend class Constant;
/// @}
/// @name Constructors
/// @{
public:
/// The Module constructor. Note that there is no default constructor. You
/// must provide a name for the module upon construction.
explicit Module(StringRef ModuleID, LLVMContext& C);
/// The module destructor. This will dropAllReferences.
~Module();
/// @}
/// @name Module Level Accessors
/// @{
/// Get the module identifier which is, essentially, the name of the module.
/// @returns the module identifier as a string
const std::string &getModuleIdentifier() const { return ModuleID; }
/// Returns the number of non-debug IR instructions in the module.
/// This is equivalent to the sum of the IR instruction counts of each
/// function contained in the module.
unsigned getInstructionCount() const;
/// Get the module's original source file name. When compiling from
/// bitcode, this is taken from a bitcode record where it was recorded.
/// For other compiles it is the same as the ModuleID, which would
/// contain the source file name.
const std::string &getSourceFileName() const { return SourceFileName; }
/// Get a short "name" for the module.
///
/// This is useful for debugging or logging. It is essentially a convenience
/// wrapper around getModuleIdentifier().
StringRef getName() const { return ModuleID; }
/// Get the data layout string for the module's target platform. This is
/// equivalent to getDataLayout()->getStringRepresentation().
const std::string &getDataLayoutStr() const {
return DL.getStringRepresentation();
}
/// Get the data layout for the module's target platform.
const DataLayout &getDataLayout() const;
/// Get the target triple which is a string describing the target host.
/// @returns a string containing the target triple.
const std::string &getTargetTriple() const { return TargetTriple; }
/// Get the global data context.
/// @returns LLVMContext - a container for LLVM's global information
LLVMContext &getContext() const { return Context; }
/// Get any module-scope inline assembly blocks.
/// @returns a string containing the module-scope inline assembly blocks.
const std::string &getModuleInlineAsm() const { return GlobalScopeAsm; }
/// Get a RandomNumberGenerator salted for use with this module. The
/// RNG can be seeded via -rng-seed=<uint64> and is salted with the
/// ModuleID and the provided pass salt. The returned RNG should not
/// be shared across threads or passes.
///
/// A unique RNG per pass ensures a reproducible random stream even
/// when other randomness consuming passes are added or removed. In
/// addition, the random stream will be reproducible across LLVM
/// versions when the pass does not change.
std::unique_ptr<RandomNumberGenerator> createRNG(const StringRef Name) const;
/// Return true if size-info optimization remark is enabled, false
/// otherwise.
bool shouldEmitInstrCountChangedRemark() {
return getContext().getDiagHandlerPtr()->isAnalysisRemarkEnabled(
"size-info");
}
/// @}
/// @name Module Level Mutators
/// @{
/// Set the module identifier.
void setModuleIdentifier(StringRef ID) { ModuleID = std::string(ID); }
/// Set the module's original source file name.
void setSourceFileName(StringRef Name) { SourceFileName = std::string(Name); }
/// Set the data layout
void setDataLayout(StringRef Desc);
void setDataLayout(const DataLayout &Other);
/// Set the target triple.
void setTargetTriple(StringRef T) { TargetTriple = std::string(T); }
/// Set the module-scope inline assembly blocks.
/// A trailing newline is added if the input doesn't have one.
void setModuleInlineAsm(StringRef Asm) {
GlobalScopeAsm = std::string(Asm);
if (!GlobalScopeAsm.empty() && GlobalScopeAsm.back() != '\n')
GlobalScopeAsm += '\n';
}
/// Append to the module-scope inline assembly blocks.
/// A trailing newline is added if the input doesn't have one.
void appendModuleInlineAsm(StringRef Asm) {
GlobalScopeAsm += Asm;
if (!GlobalScopeAsm.empty() && GlobalScopeAsm.back() != '\n')
GlobalScopeAsm += '\n';
}
/// @}
/// @name Generic Value Accessors
/// @{
/// Return the global value in the module with the specified name, of
/// arbitrary type. This method returns null if a global with the specified
/// name is not found.
GlobalValue *getNamedValue(StringRef Name) const;
/// Return the number of global values in the module.
unsigned getNumNamedValues() const;
/// Return a unique non-zero ID for the specified metadata kind. This ID is
/// uniqued across modules in the current LLVMContext.
unsigned getMDKindID(StringRef Name) const;
/// Populate client supplied SmallVector with the name for custom metadata IDs
/// registered in this LLVMContext.
void getMDKindNames(SmallVectorImpl<StringRef> &Result) const;
/// Populate client supplied SmallVector with the bundle tags registered in
/// this LLVMContext. The bundle tags are ordered by increasing bundle IDs.
/// \see LLVMContext::getOperandBundleTagID
void getOperandBundleTags(SmallVectorImpl<StringRef> &Result) const;
std::vector<StructType *> getIdentifiedStructTypes() const;
/// Return a unique name for an intrinsic whose mangling is based on an
/// unnamed type. The Proto represents the function prototype.
std::string getUniqueIntrinsicName(StringRef BaseName, Intrinsic::ID Id,
const FunctionType *Proto);
/// @}
/// @name Function Accessors
/// @{
/// Look up the specified function in the module symbol table. Four
/// possibilities:
/// 1. If it does not exist, add a prototype for the function and return it.
/// 2. Otherwise, if the existing function has the correct prototype, return
/// the existing function.
/// 3. Finally, the function exists but has the wrong prototype: return the
/// function with a constantexpr cast to the right prototype.
///
/// In all cases, the returned value is a FunctionCallee wrapper around the
/// 'FunctionType *T' passed in, as well as a 'Value*' either of the Function or
/// the bitcast to the function.
FunctionCallee getOrInsertFunction(StringRef Name, FunctionType *T,
AttributeList AttributeList);
FunctionCallee getOrInsertFunction(StringRef Name, FunctionType *T);
/// Look up the specified function in the module symbol table. If it does not
/// exist, add a prototype for the function and return it. This function
/// guarantees to return a constant of pointer to the specified function type
/// or a ConstantExpr BitCast of that type if the named function has a
/// different type. This version of the method takes a list of
/// function arguments, which makes it easier for clients to use.
template <typename... ArgsTy>
FunctionCallee getOrInsertFunction(StringRef Name,
AttributeList AttributeList, Type *RetTy,
ArgsTy... Args) {
SmallVector<Type*, sizeof...(ArgsTy)> ArgTys{Args...};
return getOrInsertFunction(Name,
FunctionType::get(RetTy, ArgTys, false),
AttributeList);
}
/// Same as above, but without the attributes.
template <typename... ArgsTy>
FunctionCallee getOrInsertFunction(StringRef Name, Type *RetTy,
ArgsTy... Args) {
return getOrInsertFunction(Name, AttributeList{}, RetTy, Args...);
}
// Avoid an incorrect ordering that'd otherwise compile incorrectly.
template <typename... ArgsTy>
FunctionCallee
getOrInsertFunction(StringRef Name, AttributeList AttributeList,
FunctionType *Invalid, ArgsTy... Args) = delete;
/// Look up the specified function in the module symbol table. If it does not
/// exist, return null.
Function *getFunction(StringRef Name) const;
/// @}
/// @name Global Variable Accessors
/// @{
/// Look up the specified global variable in the module symbol table. If it
/// does not exist, return null. If AllowInternal is set to true, this
/// function will return types that have InternalLinkage. By default, these
/// types are not returned.
GlobalVariable *getGlobalVariable(StringRef Name) const {
return getGlobalVariable(Name, false);
}
GlobalVariable *getGlobalVariable(StringRef Name, bool AllowInternal) const;
GlobalVariable *getGlobalVariable(StringRef Name,
bool AllowInternal = false) {
return static_cast<const Module *>(this)->getGlobalVariable(Name,
AllowInternal);
}
/// Return the global variable in the module with the specified name, of
/// arbitrary type. This method returns null if a global with the specified
/// name is not found.
const GlobalVariable *getNamedGlobal(StringRef Name) const {
return getGlobalVariable(Name, true);
}
GlobalVariable *getNamedGlobal(StringRef Name) {
return const_cast<GlobalVariable *>(
static_cast<const Module *>(this)->getNamedGlobal(Name));
}
/// Look up the specified global in the module symbol table.
/// If it does not exist, invoke a callback to create a declaration of the
/// global and return it. The global is constantexpr casted to the expected
/// type if necessary.
Constant *
getOrInsertGlobal(StringRef Name, Type *Ty,
function_ref<GlobalVariable *()> CreateGlobalCallback);
/// Look up the specified global in the module symbol table. If required, this
/// overload constructs the global variable using its constructor's defaults.
Constant *getOrInsertGlobal(StringRef Name, Type *Ty);
/// @}
/// @name Global Alias Accessors
/// @{
/// Return the global alias in the module with the specified name, of
/// arbitrary type. This method returns null if a global with the specified
/// name is not found.
GlobalAlias *getNamedAlias(StringRef Name) const;
/// @}
/// @name Global IFunc Accessors
/// @{
/// Return the global ifunc in the module with the specified name, of
/// arbitrary type. This method returns null if a global with the specified
/// name is not found.
GlobalIFunc *getNamedIFunc(StringRef Name) const;
/// @}
/// @name Named Metadata Accessors
/// @{
/// Return the first NamedMDNode in the module with the specified name. This
/// method returns null if a NamedMDNode with the specified name is not found.
NamedMDNode *getNamedMetadata(const Twine &Name) const;
/// Return the named MDNode in the module with the specified name. This method
/// returns a new NamedMDNode if a NamedMDNode with the specified name is not
/// found.
NamedMDNode *getOrInsertNamedMetadata(StringRef Name);
/// Remove the given NamedMDNode from this module and delete it.
void eraseNamedMetadata(NamedMDNode *NMD);
/// @}
/// @name Comdat Accessors
/// @{
/// Return the Comdat in the module with the specified name. It is created
/// if it didn't already exist.
Comdat *getOrInsertComdat(StringRef Name);
/// @}
/// @name Module Flags Accessors
/// @{
/// Returns the module flags in the provided vector.
void getModuleFlagsMetadata(SmallVectorImpl<ModuleFlagEntry> &Flags) const;
/// Return the corresponding value if Key appears in module flags, otherwise
/// return null.
Metadata *getModuleFlag(StringRef Key) const;
/// Returns the NamedMDNode in the module that represents module-level flags.
/// This method returns null if there are no module-level flags.
NamedMDNode *getModuleFlagsMetadata() const;
/// Returns the NamedMDNode in the module that represents module-level flags.
/// If module-level flags aren't found, it creates the named metadata that
/// contains them.
NamedMDNode *getOrInsertModuleFlagsMetadata();
/// Add a module-level flag to the module-level flags metadata. It will create
/// the module-level flags named metadata if it doesn't already exist.
void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, Metadata *Val);
void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, Constant *Val);
void addModuleFlag(ModFlagBehavior Behavior, StringRef Key, uint32_t Val);
void addModuleFlag(MDNode *Node);
/// Like addModuleFlag but replaces the old module flag if it already exists.
void setModuleFlag(ModFlagBehavior Behavior, StringRef Key, Metadata *Val);
/// @}
/// @name Materialization
/// @{
/// Sets the GVMaterializer to GVM. This module must not yet have a
/// Materializer. To reset the materializer for a module that already has one,
/// call materializeAll first. Destroying this module will destroy
/// its materializer without materializing any more GlobalValues. Without
/// destroying the Module, there is no way to detach or destroy a materializer
/// without materializing all the GVs it controls, to avoid leaving orphan
/// unmaterialized GVs.
void setMaterializer(GVMaterializer *GVM);
/// Retrieves the GVMaterializer, if any, for this Module.
GVMaterializer *getMaterializer() const { return Materializer.get(); }
bool isMaterialized() const { return !getMaterializer(); }
/// Make sure the GlobalValue is fully read.
llvm::Error materialize(GlobalValue *GV);
/// Make sure all GlobalValues in this Module are fully read and clear the
/// Materializer.
llvm::Error materializeAll();
llvm::Error materializeMetadata();
/// @}
/// @name Direct access to the globals list, functions list, and symbol table
/// @{
/// Get the Module's list of global variables (constant).
const GlobalListType &getGlobalList() const { return GlobalList; }
/// Get the Module's list of global variables.
GlobalListType &getGlobalList() { return GlobalList; }
static GlobalListType Module::*getSublistAccess(GlobalVariable*) {
return &Module::GlobalList;
}
/// Get the Module's list of functions (constant).
const FunctionListType &getFunctionList() const { return FunctionList; }
/// Get the Module's list of functions.
FunctionListType &getFunctionList() { return FunctionList; }
static FunctionListType Module::*getSublistAccess(Function*) {
return &Module::FunctionList;
}
/// Get the Module's list of aliases (constant).
const AliasListType &getAliasList() const { return AliasList; }
/// Get the Module's list of aliases.
AliasListType &getAliasList() { return AliasList; }
static AliasListType Module::*getSublistAccess(GlobalAlias*) {
return &Module::AliasList;
}
/// Get the Module's list of ifuncs (constant).
const IFuncListType &getIFuncList() const { return IFuncList; }
/// Get the Module's list of ifuncs.
IFuncListType &getIFuncList() { return IFuncList; }
static IFuncListType Module::*getSublistAccess(GlobalIFunc*) {
return &Module::IFuncList;
}
/// Get the Module's list of named metadata (constant).
const NamedMDListType &getNamedMDList() const { return NamedMDList; }
/// Get the Module's list of named metadata.
NamedMDListType &getNamedMDList() { return NamedMDList; }
static NamedMDListType Module::*getSublistAccess(NamedMDNode*) {
return &Module::NamedMDList;
}
/// Get the symbol table of global variable and function identifiers
const ValueSymbolTable &getValueSymbolTable() const { return *ValSymTab; }
/// Get the Module's symbol table of global variable and function identifiers.
ValueSymbolTable &getValueSymbolTable() { return *ValSymTab; }
/// Get the Module's symbol table for COMDATs (constant).
const ComdatSymTabType &getComdatSymbolTable() const { return ComdatSymTab; }
/// Get the Module's symbol table for COMDATs.
ComdatSymTabType &getComdatSymbolTable() { return ComdatSymTab; }
/// @}
/// @name Global Variable Iteration
/// @{
global_iterator global_begin() { return GlobalList.begin(); }
const_global_iterator global_begin() const { return GlobalList.begin(); }
global_iterator global_end () { return GlobalList.end(); }
const_global_iterator global_end () const { return GlobalList.end(); }
size_t global_size () const { return GlobalList.size(); }
bool global_empty() const { return GlobalList.empty(); }
iterator_range<global_iterator> globals() {
return make_range(global_begin(), global_end());
}
iterator_range<const_global_iterator> globals() const {
return make_range(global_begin(), global_end());
}
/// @}
/// @name Function Iteration
/// @{
iterator begin() { return FunctionList.begin(); }
const_iterator begin() const { return FunctionList.begin(); }
iterator end () { return FunctionList.end(); }
const_iterator end () const { return FunctionList.end(); }
reverse_iterator rbegin() { return FunctionList.rbegin(); }
const_reverse_iterator rbegin() const{ return FunctionList.rbegin(); }
reverse_iterator rend() { return FunctionList.rend(); }
const_reverse_iterator rend() const { return FunctionList.rend(); }
size_t size() const { return FunctionList.size(); }
bool empty() const { return FunctionList.empty(); }
iterator_range<iterator> functions() {
return make_range(begin(), end());
}
iterator_range<const_iterator> functions() const {
return make_range(begin(), end());
}
/// @}
/// @name Alias Iteration
/// @{
alias_iterator alias_begin() { return AliasList.begin(); }
const_alias_iterator alias_begin() const { return AliasList.begin(); }
alias_iterator alias_end () { return AliasList.end(); }
const_alias_iterator alias_end () const { return AliasList.end(); }
size_t alias_size () const { return AliasList.size(); }
bool alias_empty() const { return AliasList.empty(); }
iterator_range<alias_iterator> aliases() {
return make_range(alias_begin(), alias_end());
}
iterator_range<const_alias_iterator> aliases() const {
return make_range(alias_begin(), alias_end());
}
/// @}
/// @name IFunc Iteration
/// @{
ifunc_iterator ifunc_begin() { return IFuncList.begin(); }
const_ifunc_iterator ifunc_begin() const { return IFuncList.begin(); }
ifunc_iterator ifunc_end () { return IFuncList.end(); }
const_ifunc_iterator ifunc_end () const { return IFuncList.end(); }
size_t ifunc_size () const { return IFuncList.size(); }
bool ifunc_empty() const { return IFuncList.empty(); }
iterator_range<ifunc_iterator> ifuncs() {
return make_range(ifunc_begin(), ifunc_end());
}
iterator_range<const_ifunc_iterator> ifuncs() const {
return make_range(ifunc_begin(), ifunc_end());
}
/// @}
/// @name Convenience iterators
/// @{
using global_object_iterator =
concat_iterator<GlobalObject, iterator, global_iterator>;
using const_global_object_iterator =
concat_iterator<const GlobalObject, const_iterator,
const_global_iterator>;
iterator_range<global_object_iterator> global_objects();
iterator_range<const_global_object_iterator> global_objects() const;
using global_value_iterator =
concat_iterator<GlobalValue, iterator, global_iterator, alias_iterator,
ifunc_iterator>;
using const_global_value_iterator =
concat_iterator<const GlobalValue, const_iterator, const_global_iterator,
const_alias_iterator, const_ifunc_iterator>;
iterator_range<global_value_iterator> global_values();
iterator_range<const_global_value_iterator> global_values() const;
/// @}
/// @name Named Metadata Iteration
/// @{
named_metadata_iterator named_metadata_begin() { return NamedMDList.begin(); }
const_named_metadata_iterator named_metadata_begin() const {
return NamedMDList.begin();
}
named_metadata_iterator named_metadata_end() { return NamedMDList.end(); }
const_named_metadata_iterator named_metadata_end() const {
return NamedMDList.end();
}
size_t named_metadata_size() const { return NamedMDList.size(); }
bool named_metadata_empty() const { return NamedMDList.empty(); }
iterator_range<named_metadata_iterator> named_metadata() {
return make_range(named_metadata_begin(), named_metadata_end());
}
iterator_range<const_named_metadata_iterator> named_metadata() const {
return make_range(named_metadata_begin(), named_metadata_end());
}
/// An iterator for DICompileUnits that skips those marked NoDebug.
class debug_compile_units_iterator {
NamedMDNode *CUs;
unsigned Idx;
void SkipNoDebugCUs();
public:
using iterator_category = std::input_iterator_tag;
using value_type = DICompileUnit *;
using difference_type = std::ptrdiff_t;
using pointer = value_type *;
using reference = value_type &;
explicit debug_compile_units_iterator(NamedMDNode *CUs, unsigned Idx)
: CUs(CUs), Idx(Idx) {
SkipNoDebugCUs();
}
debug_compile_units_iterator &operator++() {
++Idx;
SkipNoDebugCUs();
return *this;
}
debug_compile_units_iterator operator++(int) {
debug_compile_units_iterator T(*this);
++Idx;
return T;
}
bool operator==(const debug_compile_units_iterator &I) const {
return Idx == I.Idx;
}
bool operator!=(const debug_compile_units_iterator &I) const {
return Idx != I.Idx;
}
DICompileUnit *operator*() const;
DICompileUnit *operator->() const;
};
debug_compile_units_iterator debug_compile_units_begin() const {
auto *CUs = getNamedMetadata("llvm.dbg.cu");
return debug_compile_units_iterator(CUs, 0);
}
debug_compile_units_iterator debug_compile_units_end() const {
auto *CUs = getNamedMetadata("llvm.dbg.cu");
return debug_compile_units_iterator(CUs, CUs ? CUs->getNumOperands() : 0);
}
/// Return an iterator for all DICompileUnits listed in this Module's
/// llvm.dbg.cu named metadata node and aren't explicitly marked as
/// NoDebug.
iterator_range<debug_compile_units_iterator> debug_compile_units() const {
auto *CUs = getNamedMetadata("llvm.dbg.cu");
return make_range(
debug_compile_units_iterator(CUs, 0),
debug_compile_units_iterator(CUs, CUs ? CUs->getNumOperands() : 0));
}
/// @}
/// Destroy ConstantArrays in LLVMContext if they are not used.
/// ConstantArrays constructed during linking can cause quadratic memory
/// explosion. Releasing all unused constants can cause a 20% LTO compile-time
/// slowdown for a large application.
///
/// NOTE: Constants are currently owned by LLVMContext. This can then only
/// be called where all uses of the LLVMContext are understood.
void dropTriviallyDeadConstantArrays();
/// @name Utility functions for printing and dumping Module objects
/// @{
/// Print the module to an output stream with an optional
/// AssemblyAnnotationWriter. If \c ShouldPreserveUseListOrder, then include
/// uselistorder directives so that use-lists can be recreated when reading
/// the assembly.
void print(raw_ostream &OS, AssemblyAnnotationWriter *AAW,
bool ShouldPreserveUseListOrder = false,
bool IsForDebug = false) const;
/// Dump the module to stderr (for debugging).
void dump() const;
/// This function causes all the subinstructions to "let go" of all references
/// that they are maintaining. This allows one to 'delete' a whole class at
/// a time, even though there may be circular references... first all
/// references are dropped, and all use counts go to zero. Then everything
/// is delete'd for real. Note that no operations are valid on an object
/// that has "dropped all references", except operator delete.
void dropAllReferences();
/// @}
/// @name Utility functions for querying Debug information.
/// @{
/// Returns the Number of Register ParametersDwarf Version by checking
/// module flags.
unsigned getNumberRegisterParameters() const;
/// Returns the Dwarf Version by checking module flags.
unsigned getDwarfVersion() const;
/// Returns the DWARF format by checking module flags.
bool isDwarf64() const;
/// Returns the CodeView Version by checking module flags.
/// Returns zero if not present in module.
unsigned getCodeViewFlag() const;
/// @}
/// @name Utility functions for querying and setting PIC level
/// @{
/// Returns the PIC level (small or large model)
PICLevel::Level getPICLevel() const;
/// Set the PIC level (small or large model)
void setPICLevel(PICLevel::Level PL);
/// @}
/// @}
/// @name Utility functions for querying and setting PIE level
/// @{
/// Returns the PIE level (small or large model)
PIELevel::Level getPIELevel() const;
/// Set the PIE level (small or large model)
void setPIELevel(PIELevel::Level PL);
/// @}
/// @}
/// @name Utility function for querying and setting code model
/// @{
/// Returns the code model (tiny, small, kernel, medium or large model)
Optional<CodeModel::Model> getCodeModel() const;
/// Set the code model (tiny, small, kernel, medium or large)
void setCodeModel(CodeModel::Model CL);
/// @}
/// @name Utility functions for querying and setting PGO summary
/// @{
/// Attach profile summary metadata to this module.
void setProfileSummary(Metadata *M, ProfileSummary::Kind Kind);
/// Returns profile summary metadata. When IsCS is true, use the context
/// sensitive profile summary.
Metadata *getProfileSummary(bool IsCS) const;
/// @}
/// Returns whether semantic interposition is to be respected.
bool getSemanticInterposition() const;
/// Set whether semantic interposition is to be respected.
void setSemanticInterposition(bool);
/// Returns true if PLT should be avoided for RTLib calls.
bool getRtLibUseGOT() const;
/// Set that PLT should be avoid for RTLib calls.
void setRtLibUseGOT();
/// Get/set whether synthesized functions should get the uwtable attribute.
bool getUwtable() const;
void setUwtable();
/// Get/set whether synthesized functions should get the "frame-pointer"
/// attribute.
FramePointerKind getFramePointer() const;
void setFramePointer(FramePointerKind Kind);
/// Get/set what kind of stack protector guard to use.
StringRef getStackProtectorGuard() const;
void setStackProtectorGuard(StringRef Kind);
/// Get/set which register to use as the stack protector guard register. The
/// empty string is equivalent to "global". Other values may be "tls" or
/// "sysreg".
StringRef getStackProtectorGuardReg() const;
void setStackProtectorGuardReg(StringRef Reg);
/// Get/set what offset from the stack protector to use.
int getStackProtectorGuardOffset() const;
void setStackProtectorGuardOffset(int Offset);
/// Get/set the stack alignment overridden from the default.
unsigned getOverrideStackAlignment() const;
void setOverrideStackAlignment(unsigned Align);
/// @name Utility functions for querying and setting the build SDK version
/// @{
/// Attach a build SDK version metadata to this module.
void setSDKVersion(const VersionTuple &V);
/// Get the build SDK version metadata.
///
/// An empty version is returned if no such metadata is attached.
VersionTuple getSDKVersion() const;
/// @}
/// Take ownership of the given memory buffer.
void setOwnedMemoryBuffer(std::unique_ptr<MemoryBuffer> MB);
/// Set the partial sample profile ratio in the profile summary module flag,
/// if applicable.
void setPartialSampleProfileRatio(const ModuleSummaryIndex &Index);
/// Get the target variant triple which is a string describing a variant of
/// the target host platform. For example, Mac Catalyst can be a variant
/// target triple for a macOS target.
/// @returns a string containing the target variant triple.
StringRef getDarwinTargetVariantTriple() const;
/// Get the target variant version build SDK version metadata.
///
/// An empty version is returned if no such metadata is attached.
VersionTuple getDarwinTargetVariantSDKVersion() const;
};
/// Given "llvm.used" or "llvm.compiler.used" as a global name, collect the
/// initializer elements of that global in a SmallVector and return the global
/// itself.
GlobalVariable *collectUsedGlobalVariables(const Module &M,
SmallVectorImpl<GlobalValue *> &Vec,
bool CompilerUsed);
/// An raw_ostream inserter for modules.
inline raw_ostream &operator<<(raw_ostream &O, const Module &M) {
M.print(O, nullptr);
return O;
}
// Create wrappers for C Binding types (see CBindingWrapping.h).
DEFINE_SIMPLE_CONVERSION_FUNCTIONS(Module, LLVMModuleRef)
/* LLVMModuleProviderRef exists for historical reasons, but now just holds a
* Module.
*/
inline Module *unwrap(LLVMModuleProviderRef MP) {
return reinterpret_cast<Module*>(MP);
}
} // end namespace llvm
#endif // LLVM_IR_MODULE_H

View File

@@ -0,0 +1,109 @@
//===-- llvm/IR/ModuleSlotTracker.h -----------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_MODULESLOTTRACKER_H
#define LLVM_IR_MODULESLOTTRACKER_H
#include <functional>
#include <memory>
#include <utility>
#include <vector>
namespace llvm {
class Module;
class Function;
class SlotTracker;
class Value;
class MDNode;
/// Abstract interface of slot tracker storage.
class AbstractSlotTrackerStorage {
public:
virtual ~AbstractSlotTrackerStorage();
virtual unsigned getNextMetadataSlot() = 0;
virtual void createMetadataSlot(const MDNode *) = 0;
virtual int getMetadataSlot(const MDNode *) = 0;
};
/// Manage lifetime of a slot tracker for printing IR.
///
/// Wrapper around the \a SlotTracker used internally by \a AsmWriter. This
/// class allows callers to share the cost of incorporating the metadata in a
/// module or a function.
///
/// If the IR changes from underneath \a ModuleSlotTracker, strings like
/// "<badref>" will be printed, or, worse, the wrong slots entirely.
class ModuleSlotTracker {
/// Storage for a slot tracker.
std::unique_ptr<SlotTracker> MachineStorage;
bool ShouldCreateStorage = false;
bool ShouldInitializeAllMetadata = false;
const Module *M = nullptr;
const Function *F = nullptr;
SlotTracker *Machine = nullptr;
std::function<void(AbstractSlotTrackerStorage *, const Module *, bool)>
ProcessModuleHookFn;
std::function<void(AbstractSlotTrackerStorage *, const Function *, bool)>
ProcessFunctionHookFn;
public:
/// Wrap a preinitialized SlotTracker.
ModuleSlotTracker(SlotTracker &Machine, const Module *M,
const Function *F = nullptr);
/// Construct a slot tracker from a module.
///
/// If \a M is \c nullptr, uses a null slot tracker. Otherwise, initializes
/// a slot tracker, and initializes all metadata slots. \c
/// ShouldInitializeAllMetadata defaults to true because this is expected to
/// be shared between multiple callers, and otherwise MDNode references will
/// not match up.
explicit ModuleSlotTracker(const Module *M,
bool ShouldInitializeAllMetadata = true);
/// Destructor to clean up storage.
virtual ~ModuleSlotTracker();
/// Lazily creates a slot tracker.
SlotTracker *getMachine();
const Module *getModule() const { return M; }
const Function *getCurrentFunction() const { return F; }
/// Incorporate the given function.
///
/// Purge the currently incorporated function and incorporate \c F. If \c F
/// is currently incorporated, this is a no-op.
void incorporateFunction(const Function &F);
/// Return the slot number of the specified local value.
///
/// A function that defines this value should be incorporated prior to calling
/// this method.
/// Return -1 if the value is not in the function's SlotTracker.
int getLocalSlot(const Value *V);
void setProcessHook(
std::function<void(AbstractSlotTrackerStorage *, const Module *, bool)>);
void setProcessHook(std::function<void(AbstractSlotTrackerStorage *,
const Function *, bool)>);
using MachineMDNodeListType =
std::vector<std::pair<unsigned, const MDNode *>>;
void collectMDNodes(MachineMDNodeListType &L, unsigned LB, unsigned UB) const;
};
} // end namespace llvm
#endif

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,307 @@
//===-- llvm/ModuleSummaryIndexYAML.h - YAML I/O for summary ----*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef LLVM_IR_MODULESUMMARYINDEXYAML_H
#define LLVM_IR_MODULESUMMARYINDEXYAML_H
#include "llvm/IR/ModuleSummaryIndex.h"
#include "llvm/Support/YAMLTraits.h"
namespace llvm {
namespace yaml {
template <> struct ScalarEnumerationTraits<TypeTestResolution::Kind> {
static void enumeration(IO &io, TypeTestResolution::Kind &value) {
io.enumCase(value, "Unknown", TypeTestResolution::Unknown);
io.enumCase(value, "Unsat", TypeTestResolution::Unsat);
io.enumCase(value, "ByteArray", TypeTestResolution::ByteArray);
io.enumCase(value, "Inline", TypeTestResolution::Inline);
io.enumCase(value, "Single", TypeTestResolution::Single);
io.enumCase(value, "AllOnes", TypeTestResolution::AllOnes);
}
};
template <> struct MappingTraits<TypeTestResolution> {
static void mapping(IO &io, TypeTestResolution &res) {
io.mapOptional("Kind", res.TheKind);
io.mapOptional("SizeM1BitWidth", res.SizeM1BitWidth);
io.mapOptional("AlignLog2", res.AlignLog2);
io.mapOptional("SizeM1", res.SizeM1);
io.mapOptional("BitMask", res.BitMask);
io.mapOptional("InlineBits", res.InlineBits);
}
};
template <>
struct ScalarEnumerationTraits<WholeProgramDevirtResolution::ByArg::Kind> {
static void enumeration(IO &io,
WholeProgramDevirtResolution::ByArg::Kind &value) {
io.enumCase(value, "Indir", WholeProgramDevirtResolution::ByArg::Indir);
io.enumCase(value, "UniformRetVal",
WholeProgramDevirtResolution::ByArg::UniformRetVal);
io.enumCase(value, "UniqueRetVal",
WholeProgramDevirtResolution::ByArg::UniqueRetVal);
io.enumCase(value, "VirtualConstProp",
WholeProgramDevirtResolution::ByArg::VirtualConstProp);
}
};
template <> struct MappingTraits<WholeProgramDevirtResolution::ByArg> {
static void mapping(IO &io, WholeProgramDevirtResolution::ByArg &res) {
io.mapOptional("Kind", res.TheKind);
io.mapOptional("Info", res.Info);
io.mapOptional("Byte", res.Byte);
io.mapOptional("Bit", res.Bit);
}
};
template <>
struct CustomMappingTraits<
std::map<std::vector<uint64_t>, WholeProgramDevirtResolution::ByArg>> {
static void inputOne(
IO &io, StringRef Key,
std::map<std::vector<uint64_t>, WholeProgramDevirtResolution::ByArg> &V) {
std::vector<uint64_t> Args;
std::pair<StringRef, StringRef> P = {"", Key};
while (!P.second.empty()) {
P = P.second.split(',');
uint64_t Arg;
if (P.first.getAsInteger(0, Arg)) {
io.setError("key not an integer");
return;
}
Args.push_back(Arg);
}
io.mapRequired(Key.str().c_str(), V[Args]);
}
static void output(
IO &io,
std::map<std::vector<uint64_t>, WholeProgramDevirtResolution::ByArg> &V) {
for (auto &P : V) {
std::string Key;
for (uint64_t Arg : P.first) {
if (!Key.empty())
Key += ',';
Key += llvm::utostr(Arg);
}
io.mapRequired(Key.c_str(), P.second);
}
}
};
template <> struct ScalarEnumerationTraits<WholeProgramDevirtResolution::Kind> {
static void enumeration(IO &io, WholeProgramDevirtResolution::Kind &value) {
io.enumCase(value, "Indir", WholeProgramDevirtResolution::Indir);
io.enumCase(value, "SingleImpl", WholeProgramDevirtResolution::SingleImpl);
io.enumCase(value, "BranchFunnel",
WholeProgramDevirtResolution::BranchFunnel);
}
};
template <> struct MappingTraits<WholeProgramDevirtResolution> {
static void mapping(IO &io, WholeProgramDevirtResolution &res) {
io.mapOptional("Kind", res.TheKind);
io.mapOptional("SingleImplName", res.SingleImplName);
io.mapOptional("ResByArg", res.ResByArg);
}
};
template <>
struct CustomMappingTraits<std::map<uint64_t, WholeProgramDevirtResolution>> {
static void inputOne(IO &io, StringRef Key,
std::map<uint64_t, WholeProgramDevirtResolution> &V) {
uint64_t KeyInt;
if (Key.getAsInteger(0, KeyInt)) {
io.setError("key not an integer");
return;
}
io.mapRequired(Key.str().c_str(), V[KeyInt]);
}
static void output(IO &io, std::map<uint64_t, WholeProgramDevirtResolution> &V) {
for (auto &P : V)
io.mapRequired(llvm::utostr(P.first).c_str(), P.second);
}
};
template <> struct MappingTraits<TypeIdSummary> {
static void mapping(IO &io, TypeIdSummary& summary) {
io.mapOptional("TTRes", summary.TTRes);
io.mapOptional("WPDRes", summary.WPDRes);
}
};
struct FunctionSummaryYaml {
unsigned Linkage, Visibility;
bool NotEligibleToImport, Live, IsLocal, CanAutoHide;
std::vector<uint64_t> Refs;
std::vector<uint64_t> TypeTests;
std::vector<FunctionSummary::VFuncId> TypeTestAssumeVCalls,
TypeCheckedLoadVCalls;
std::vector<FunctionSummary::ConstVCall> TypeTestAssumeConstVCalls,
TypeCheckedLoadConstVCalls;
};
} // End yaml namespace
} // End llvm namespace
namespace llvm {
namespace yaml {
template <> struct MappingTraits<FunctionSummary::VFuncId> {
static void mapping(IO &io, FunctionSummary::VFuncId& id) {
io.mapOptional("GUID", id.GUID);
io.mapOptional("Offset", id.Offset);
}
};
template <> struct MappingTraits<FunctionSummary::ConstVCall> {
static void mapping(IO &io, FunctionSummary::ConstVCall& id) {
io.mapOptional("VFunc", id.VFunc);
io.mapOptional("Args", id.Args);
}
};
} // End yaml namespace
} // End llvm namespace
LLVM_YAML_IS_SEQUENCE_VECTOR(FunctionSummary::VFuncId)
LLVM_YAML_IS_SEQUENCE_VECTOR(FunctionSummary::ConstVCall)
namespace llvm {
namespace yaml {
template <> struct MappingTraits<FunctionSummaryYaml> {
static void mapping(IO &io, FunctionSummaryYaml& summary) {
io.mapOptional("Linkage", summary.Linkage);
io.mapOptional("Visibility", summary.Visibility);
io.mapOptional("NotEligibleToImport", summary.NotEligibleToImport);
io.mapOptional("Live", summary.Live);
io.mapOptional("Local", summary.IsLocal);
io.mapOptional("CanAutoHide", summary.CanAutoHide);
io.mapOptional("Refs", summary.Refs);
io.mapOptional("TypeTests", summary.TypeTests);
io.mapOptional("TypeTestAssumeVCalls", summary.TypeTestAssumeVCalls);
io.mapOptional("TypeCheckedLoadVCalls", summary.TypeCheckedLoadVCalls);
io.mapOptional("TypeTestAssumeConstVCalls",
summary.TypeTestAssumeConstVCalls);
io.mapOptional("TypeCheckedLoadConstVCalls",
summary.TypeCheckedLoadConstVCalls);
}
};
} // End yaml namespace
} // End llvm namespace
LLVM_YAML_IS_SEQUENCE_VECTOR(FunctionSummaryYaml)
namespace llvm {
namespace yaml {
// FIXME: Add YAML mappings for the rest of the module summary.
template <> struct CustomMappingTraits<GlobalValueSummaryMapTy> {
static void inputOne(IO &io, StringRef Key, GlobalValueSummaryMapTy &V) {
std::vector<FunctionSummaryYaml> FSums;
io.mapRequired(Key.str().c_str(), FSums);
uint64_t KeyInt;
if (Key.getAsInteger(0, KeyInt)) {
io.setError("key not an integer");
return;
}
if (!V.count(KeyInt))
V.emplace(KeyInt, /*IsAnalysis=*/false);
auto &Elem = V.find(KeyInt)->second;
for (auto &FSum : FSums) {
std::vector<ValueInfo> Refs;
for (auto &RefGUID : FSum.Refs) {
if (!V.count(RefGUID))
V.emplace(RefGUID, /*IsAnalysis=*/false);
Refs.push_back(ValueInfo(/*IsAnalysis=*/false, &*V.find(RefGUID)));
}
Elem.SummaryList.push_back(std::make_unique<FunctionSummary>(
GlobalValueSummary::GVFlags(
static_cast<GlobalValue::LinkageTypes>(FSum.Linkage),
static_cast<GlobalValue::VisibilityTypes>(FSum.Visibility),
FSum.NotEligibleToImport, FSum.Live, FSum.IsLocal,
FSum.CanAutoHide),
/*NumInsts=*/0, FunctionSummary::FFlags{}, /*EntryCount=*/0, Refs,
ArrayRef<FunctionSummary::EdgeTy>{}, std::move(FSum.TypeTests),
std::move(FSum.TypeTestAssumeVCalls),
std::move(FSum.TypeCheckedLoadVCalls),
std::move(FSum.TypeTestAssumeConstVCalls),
std::move(FSum.TypeCheckedLoadConstVCalls),
ArrayRef<FunctionSummary::ParamAccess>{}));
}
}
static void output(IO &io, GlobalValueSummaryMapTy &V) {
for (auto &P : V) {
std::vector<FunctionSummaryYaml> FSums;
for (auto &Sum : P.second.SummaryList) {
if (auto *FSum = dyn_cast<FunctionSummary>(Sum.get())) {
std::vector<uint64_t> Refs;
for (auto &VI : FSum->refs())
Refs.push_back(VI.getGUID());
FSums.push_back(FunctionSummaryYaml{
FSum->flags().Linkage, FSum->flags().Visibility,
static_cast<bool>(FSum->flags().NotEligibleToImport),
static_cast<bool>(FSum->flags().Live),
static_cast<bool>(FSum->flags().DSOLocal),
static_cast<bool>(FSum->flags().CanAutoHide), Refs,
FSum->type_tests(), FSum->type_test_assume_vcalls(),
FSum->type_checked_load_vcalls(),
FSum->type_test_assume_const_vcalls(),
FSum->type_checked_load_const_vcalls()});
}
}
if (!FSums.empty())
io.mapRequired(llvm::utostr(P.first).c_str(), FSums);
}
}
};
template <> struct CustomMappingTraits<TypeIdSummaryMapTy> {
static void inputOne(IO &io, StringRef Key, TypeIdSummaryMapTy &V) {
TypeIdSummary TId;
io.mapRequired(Key.str().c_str(), TId);
V.insert({GlobalValue::getGUID(Key), {std::string(Key), TId}});
}
static void output(IO &io, TypeIdSummaryMapTy &V) {
for (auto TidIter = V.begin(); TidIter != V.end(); TidIter++)
io.mapRequired(TidIter->second.first.c_str(), TidIter->second.second);
}
};
template <> struct MappingTraits<ModuleSummaryIndex> {
static void mapping(IO &io, ModuleSummaryIndex& index) {
io.mapOptional("GlobalValueMap", index.GlobalValueMap);
io.mapOptional("TypeIdMap", index.TypeIdMap);
io.mapOptional("WithGlobalValueDeadStripping",
index.WithGlobalValueDeadStripping);
if (io.outputting()) {
std::vector<std::string> CfiFunctionDefs(index.CfiFunctionDefs.begin(),
index.CfiFunctionDefs.end());
io.mapOptional("CfiFunctionDefs", CfiFunctionDefs);
std::vector<std::string> CfiFunctionDecls(index.CfiFunctionDecls.begin(),
index.CfiFunctionDecls.end());
io.mapOptional("CfiFunctionDecls", CfiFunctionDecls);
} else {
std::vector<std::string> CfiFunctionDefs;
io.mapOptional("CfiFunctionDefs", CfiFunctionDefs);
index.CfiFunctionDefs = {CfiFunctionDefs.begin(), CfiFunctionDefs.end()};
std::vector<std::string> CfiFunctionDecls;
io.mapOptional("CfiFunctionDecls", CfiFunctionDecls);
index.CfiFunctionDecls = {CfiFunctionDecls.begin(),
CfiFunctionDecls.end()};
}
}
};
} // End yaml namespace
} // End llvm namespace
#endif

Some files were not shown because too many files have changed in this diff Show More