aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/TargetInfo.h
blob: a122e882e2fb277cff972ed1c487cb8937463231 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
//===---- TargetInfo.h - Encapsulate target details -------------*- C++ -*-===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// These classes wrap the information about a call or function
// definition used to handle ABI compliancy.
//
//===----------------------------------------------------------------------===//

#ifndef CLANG_CODEGEN_TARGETINFO_H
#define CLANG_CODEGEN_TARGETINFO_H

#include "clang/AST/Type.h"
#include "clang/Basic/LLVM.h"
#include "llvm/ADT/StringRef.h"

namespace llvm {
  class GlobalValue;
  class Type;
  class Value;
}

namespace clang {
  class ABIInfo;
  class Decl;

  namespace CodeGen {
    class CallArgList;
    class CodeGenModule;
    class CodeGenFunction;
    class CGFunctionInfo;
  }

  /// TargetCodeGenInfo - This class organizes various target-specific
  /// codegeneration issues, like target-specific attributes, builtins and so
  /// on.
  class TargetCodeGenInfo {
    ABIInfo *Info;
  public:
    // WARNING: Acquires the ownership of ABIInfo.
    TargetCodeGenInfo(ABIInfo *info = 0):Info(info) { }
    virtual ~TargetCodeGenInfo();

    /// getABIInfo() - Returns ABI info helper for the target.
    const ABIInfo& getABIInfo() const { return *Info; }

    /// SetTargetAttributes - Provides a convenient hook to handle extra
    /// target-specific attributes for the given global.
    virtual void SetTargetAttributes(const Decl *D, llvm::GlobalValue *GV,
                                     CodeGen::CodeGenModule &M) const { }

    /// Determines the size of struct _Unwind_Exception on this platform,
    /// in 8-bit units.  The Itanium ABI defines this as:
    ///   struct _Unwind_Exception {
    ///     uint64 exception_class;
    ///     _Unwind_Exception_Cleanup_Fn exception_cleanup;
    ///     uint64 private_1;
    ///     uint64 private_2;
    ///   };
    virtual unsigned getSizeOfUnwindException() const;

    /// Controls whether __builtin_extend_pointer should sign-extend
    /// pointers to uint64_t or zero-extend them (the default).  Has
    /// no effect for targets:
    ///   - that have 64-bit pointers, or
    ///   - that cannot address through registers larger than pointers, or
    ///   - that implicitly ignore/truncate the top bits when addressing
    ///     through such registers.
    virtual bool extendPointerWithSExt() const { return false; }

    /// Controls whether BIpow* emit an intrinsic call instead of a library
    /// function call.
    virtual bool emitIntrinsicForPow() const { return true; }

    /// Determines the DWARF register number for the stack pointer, for
    /// exception-handling purposes.  Implements __builtin_dwarf_sp_column.
    ///
    /// Returns -1 if the operation is unsupported by this target.
    virtual int getDwarfEHStackPointer(CodeGen::CodeGenModule &M) const {
      return -1;
    }

    /// Initializes the given DWARF EH register-size table, a char*.
    /// Implements __builtin_init_dwarf_reg_size_table.
    ///
    /// Returns true if the operation is unsupported by this target.
    virtual bool initDwarfEHRegSizeTable(CodeGen::CodeGenFunction &CGF,
                                         llvm::Value *Address) const {
      return true;
    }

    /// Performs the code-generation required to convert a return
    /// address as stored by the system into the actual address of the
    /// next instruction that will be executed.
    ///
    /// Used by __builtin_extract_return_addr().
    virtual llvm::Value *decodeReturnAddress(CodeGen::CodeGenFunction &CGF,
                                             llvm::Value *Address) const {
      return Address;
    }

    /// Performs the code-generation required to convert the address
    /// of an instruction into a return address suitable for storage
    /// by the system in a return slot.
    ///
    /// Used by __builtin_frob_return_addr().
    virtual llvm::Value *encodeReturnAddress(CodeGen::CodeGenFunction &CGF,
                                             llvm::Value *Address) const {
      return Address;
    }

    virtual llvm::Type* adjustInlineAsmType(CodeGen::CodeGenFunction &CGF,
                                            StringRef Constraint, 
                                            llvm::Type* Ty) const {
      return Ty;
    }

    /// Retrieve the address of a function to call immediately before
    /// calling objc_retainAutoreleasedReturnValue.  The
    /// implementation of objc_autoreleaseReturnValue sniffs the
    /// instruction stream following its return address to decide
    /// whether it's a call to objc_retainAutoreleasedReturnValue.
    /// This can be prohibitively expensive, depending on the
    /// relocation model, and so on some targets it instead sniffs for
    /// a particular instruction sequence.  This functions returns
    /// that instruction sequence in inline assembly, which will be
    /// empty if none is required.
    virtual StringRef getARCRetainAutoreleasedReturnValueMarker() const {
      return "";
    }

    /// Determine whether a call to an unprototyped functions under
    /// the given calling convention should use the variadic
    /// convention or the non-variadic convention.
    ///
    /// There's a good reason to make a platform's variadic calling
    /// convention be different from its non-variadic calling
    /// convention: the non-variadic arguments can be passed in
    /// registers (better for performance), and the variadic arguments
    /// can be passed on the stack (also better for performance).  If
    /// this is done, however, unprototyped functions *must* use the
    /// non-variadic convention, because C99 states that a call
    /// through an unprototyped function type must succeed if the
    /// function was defined with a non-variadic prototype with
    /// compatible parameters.  Therefore, splitting the conventions
    /// makes it impossible to call a variadic function through an
    /// unprototyped type.  Since function prototypes came out in the
    /// late 1970s, this is probably an acceptable trade-off.
    /// Nonetheless, not all platforms are willing to make it, and in
    /// particularly x86-64 bends over backwards to make the
    /// conventions compatible.
    ///
    /// The default is false.  This is correct whenever:
    ///   - the conventions are exactly the same, because it does not
    ///     matter and the resulting IR will be somewhat prettier in
    ///     certain cases; or
    ///   - the conventions are substantively different in how they pass
    ///     arguments, because in this case using the variadic convention
    ///     will lead to C99 violations.
    ///
    /// However, some platforms make the conventions identical except
    /// for passing additional out-of-band information to a variadic
    /// function: for example, x86-64 passes the number of SSE
    /// arguments in %al.  On these platforms, it is desireable to
    /// call unprototyped functions using the variadic convention so
    /// that unprototyped calls to varargs functions still succeed.
    virtual bool isNoProtoCallVariadic(const CodeGen::CallArgList &args,
                                       const FunctionNoProtoType *fnType) const;

    // @LOCALMOD-START
    /// Determine whether the sequentially consistent fence generated for
    /// the legacy GCC-style ``__sync_synchronize()`` builtin should be
    /// surrounded by empty assembly directives which touch all of
    /// memory. This allows platforms which aim for portability to
    /// isolate themselves from changes in sequentially consistent
    /// fence's semantics, since its intent is to represent the
    /// C11/C++11 memory model which only orders atomic memory accesses.
    /// This won't guarantee that all accesses (e.g. those to
    /// non-escaping objects) will not be reordered.
    virtual bool addAsmMemoryAroundSyncSynchronize() const {
      return false;
    }

    /// Determine whether a full sequentially consistent fence should be
    /// emitted when ``asm("":::"memory")`` is encountered, treating it
    /// like ``__sync_synchronize()``.
    virtual bool asmMemoryIsFence() const {
      return false;
    }
    // @LOCALMOD-END
  };
}

#endif // CLANG_CODEGEN_TARGETINFO_H