aboutsummaryrefslogtreecommitdiff
path: root/lib/Target/X86/X86JITInfo.cpp
blob: 3f88fa69d0ee1a88f51dc46f497ad5ad5fd6561e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
//===-- X86JITInfo.cpp - Implement the JIT interfaces for the X86 target --===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This file implements the JIT interfaces for the X86 target.
//
//===----------------------------------------------------------------------===//

#define DEBUG_TYPE "jit"
#include "X86JITInfo.h"
#include "X86Relocations.h"
#include "X86Subtarget.h"
#include "X86TargetMachine.h"
#include "llvm/Function.h"
#include "llvm/Support/Compiler.h"
#include "llvm/Support/ErrorHandling.h"
#include "llvm/Support/Valgrind.h"
#include <cstdlib>
#include <cstring>
using namespace llvm;

// Determine the platform we're running on
#if defined (__x86_64__) || defined (_M_AMD64) || defined (_M_X64)
# define X86_64_JIT
#elif defined(__i386__) || defined(i386) || defined(_M_IX86)
# define X86_32_JIT
#endif

void X86JITInfo::replaceMachineCodeForFunction(void *Old, void *New) {
  unsigned char *OldByte = (unsigned char *)Old;
  *OldByte++ = 0xE9;                // Emit JMP opcode.
  unsigned *OldWord = (unsigned *)OldByte;
  unsigned NewAddr = (intptr_t)New;
  unsigned OldAddr = (intptr_t)OldWord;
  *OldWord = NewAddr - OldAddr - 4; // Emit PC-relative addr of New code.

  // X86 doesn't need to invalidate the processor cache, so just invalidate
  // Valgrind's cache directly.
  sys::ValgrindDiscardTranslations(Old, 5);
}


/// JITCompilerFunction - This contains the address of the JIT function used to
/// compile a function lazily.
static TargetJITInfo::JITCompilerFn JITCompilerFunction;

// Get the ASMPREFIX for the current host.  This is often '_'.
#ifndef __USER_LABEL_PREFIX__
#define __USER_LABEL_PREFIX__
#endif
#define GETASMPREFIX2(X) #X
#define GETASMPREFIX(X) GETASMPREFIX2(X)
#define ASMPREFIX GETASMPREFIX(__USER_LABEL_PREFIX__)

// For ELF targets, use a .size and .type directive, to let tools
// know the extent of functions defined in assembler.
#if defined(__ELF__)
# define SIZE(sym) ".size " #sym ", . - " #sym "\n"
# define TYPE_FUNCTION(sym) ".type " #sym ", @function\n"
#else
# define SIZE(sym)
# define TYPE_FUNCTION(sym)
#endif

// Provide a convenient way for disabling usage of CFI directives.
// This is needed for old/broken assemblers (for example, gas on
// Darwin is pretty old and doesn't support these directives)
#if defined(__APPLE__)
# define CFI(x)
#else
// FIXME: Disable this until we really want to use it. Also, we will
//        need to add some workarounds for compilers, which support
//        only subset of these directives.
# define CFI(x)
#endif

// Provide a wrapper for X86CompilationCallback2 that saves non-traditional
// callee saved registers, for the fastcc calling convention.
extern "C" {
#if defined(X86_64_JIT)
# ifndef _MSC_VER
  // No need to save EAX/EDX for X86-64.
  void X86CompilationCallback(void);
  asm(
    ".text\n"
    ".align 8\n"
    ".globl " ASMPREFIX "X86CompilationCallback\n"
    TYPE_FUNCTION(X86CompilationCallback)
  ASMPREFIX "X86CompilationCallback:\n"
    CFI(".cfi_startproc\n")
    // Save RBP
    "pushq   %rbp\n"
    CFI(".cfi_def_cfa_offset 16\n")
    CFI(".cfi_offset %rbp, -16\n")
    // Save RSP
    "movq    %rsp, %rbp\n"
    CFI(".cfi_def_cfa_register %rbp\n")
    // Save all int arg registers
    "pushq   %rdi\n"
    CFI(".cfi_rel_offset %rdi, 0\n")
    "pushq   %rsi\n"
    CFI(".cfi_rel_offset %rsi, 8\n")
    "pushq   %rdx\n"
    CFI(".cfi_rel_offset %rdx, 16\n")
    "pushq   %rcx\n"
    CFI(".cfi_rel_offset %rcx, 24\n")
    "pushq   %r8\n"
    CFI(".cfi_rel_offset %r8, 32\n")
    "pushq   %r9\n"
    CFI(".cfi_rel_offset %r9, 40\n")
    // Align stack on 16-byte boundary. ESP might not be properly aligned
    // (8 byte) if this is called from an indirect stub.
    "andq    $-16, %rsp\n"
    // Save all XMM arg registers
    "subq    $128, %rsp\n"
    "movaps  %xmm0, (%rsp)\n"
    "movaps  %xmm1, 16(%rsp)\n"
    "movaps  %xmm2, 32(%rsp)\n"
    "movaps  %xmm3, 48(%rsp)\n"
    "movaps  %xmm4, 64(%rsp)\n"
    "movaps  %xmm5, 80(%rsp)\n"
    "movaps  %xmm6, 96(%rsp)\n"
    "movaps  %xmm7, 112(%rsp)\n"
    // JIT callee
#ifdef _WIN64
    "subq    $32, %rsp\n"
    "movq    %rbp, %rcx\n"    // Pass prev frame and return address
    "movq    8(%rbp), %rdx\n"
    "call    " ASMPREFIX "X86CompilationCallback2\n"
    "addq    $32, %rsp\n"
#else
    "movq    %rbp, %rdi\n"    // Pass prev frame and return address
    "movq    8(%rbp), %rsi\n"
    "call    " ASMPREFIX "X86CompilationCallback2\n"
#endif
    // Restore all XMM arg registers
    "movaps  112(%rsp), %xmm7\n"
    "movaps  96(%rsp), %xmm6\n"
    "movaps  80(%rsp), %xmm5\n"
    "movaps  64(%rsp), %xmm4\n"
    "movaps  48(%rsp), %xmm3\n"
    "movaps  32(%rsp), %xmm2\n"
    "movaps  16(%rsp), %xmm1\n"
    "movaps  (%rsp), %xmm0\n"
    // Restore RSP
    "movq    %rbp, %rsp\n"
    CFI(".cfi_def_cfa_register %rsp\n")
    // Restore all int arg registers
    "subq    $48, %rsp\n"
    CFI(".cfi_adjust_cfa_offset 48\n")
    "popq    %r9\n"
    CFI(".cfi_adjust_cfa_offset -8\n")
    CFI(".cfi_restore %r9\n")
    "popq    %r8\n"
    CFI(".cfi_adjust_cfa_offset -8\n")
    CFI(".cfi_restore %r8\n")
    "popq    %rcx\n"
    CFI(".cfi_adjust_cfa_offset -8\n")
    CFI(".cfi_restore %rcx\n")
    "popq    %rdx\n"
    CFI(".cfi_adjust_cfa_offset -8\n")
    CFI(".cfi_restore %rdx\n")
    "popq    %rsi\n"
    CFI(".cfi_adjust_cfa_offset -8\n")
    CFI(".cfi_restore %rsi\n")
    "popq    %rdi\n"
    CFI(".cfi_adjust_cfa_offset -8\n")
    CFI(".cfi_restore %rdi\n")
    // Restore RBP
    "popq    %rbp\n"
    CFI(".cfi_adjust_cfa_offset -8\n")
    CFI(".cfi_restore %rbp\n")
    "ret\n"
    CFI(".cfi_endproc\n")
    SIZE(X86CompilationCallback)
  );
# else
  // No inline assembler support on this platform. The routine is in external
  // file.
  void X86CompilationCallback();

# endif
#elif defined (X86_32_JIT)
# ifndef _MSC_VER
  void X86CompilationCallback(void);
  asm(
    ".text\n"
    ".align 8\n"
    ".globl " ASMPREFIX "X86CompilationCallback\n"
    TYPE_FUNCTION(X86CompilationCallback)
  ASMPREFIX "X86CompilationCallback:\n"
    CFI(".cfi_startproc\n")
    "pushl   %ebp\n"
    CFI(".cfi_def_cfa_offset 8\n")
    CFI(".cfi_offset %ebp, -8\n")
    "movl    %esp, %ebp\n"    // Standard prologue
    CFI(".cfi_def_cfa_register %ebp\n")
    "pushl   %eax\n"
    CFI(".cfi_rel_offset %eax, 0\n")
    "pushl   %edx\n"          // Save EAX/EDX/ECX
    CFI(".cfi_rel_offset %edx, 4\n")
    "pushl   %ecx\n"
    CFI(".cfi_rel_offset %ecx, 8\n")
#  if defined(__APPLE__)
    "andl    $-16, %esp\n"    // Align ESP on 16-byte boundary
#  endif
    "subl    $16, %esp\n"
    "movl    4(%ebp), %eax\n" // Pass prev frame and return address
    "movl    %eax, 4(%esp)\n"
    "movl    %ebp, (%esp)\n"
    "call    " ASMPREFIX "X86CompilationCallback2\n"
    "movl    %ebp, %esp\n"    // Restore ESP
    CFI(".cfi_def_cfa_register %esp\n")
    "subl    $12, %esp\n"
    CFI(".cfi_adjust_cfa_offset 12\n")
    "popl    %ecx\n"
    CFI(".cfi_adjust_cfa_offset -4\n")
    CFI(".cfi_restore %ecx\n")
    "popl    %edx\n"
    CFI(".cfi_adjust_cfa_offset -4\n")
    CFI(".cfi_restore %edx\n")
    "popl    %eax\n"
    CFI(".cfi_adjust_cfa_offset -4\n")
    CFI(".cfi_restore %eax\n")
    "popl    %ebp\n"
    CFI(".cfi_adjust_cfa_offset -4\n")
    CFI(".cfi_restore %ebp\n")
    "ret\n"
    CFI(".cfi_endproc\n")
    SIZE(X86CompilationCallback)
  );

  // Same as X86CompilationCallback but also saves XMM argument registers.
  void X86CompilationCallback_SSE(void);
  asm(
    ".text\n"
    ".align 8\n"
    ".globl " ASMPREFIX "X86CompilationCallback_SSE\n"
    TYPE_FUNCTION(X86CompilationCallback_SSE)
  ASMPREFIX "X86CompilationCallback_SSE:\n"
    CFI(".cfi_startproc\n")
    "pushl   %ebp\n"
    CFI(".cfi_def_cfa_offset 8\n")
    CFI(".cfi_offset %ebp, -8\n")
    "movl    %esp, %ebp\n"    // Standard prologue
    CFI(".cfi_def_cfa_register %ebp\n")
    "pushl   %eax\n"
    CFI(".cfi_rel_offset %eax, 0\n")
    "pushl   %edx\n"          // Save EAX/EDX/ECX
    CFI(".cfi_rel_offset %edx, 4\n")
    "pushl   %ecx\n"
    CFI(".cfi_rel_offset %ecx, 8\n")
    "andl    $-16, %esp\n"    // Align ESP on 16-byte boundary
    // Save all XMM arg registers
    "subl    $64, %esp\n"
    // FIXME: provide frame move information for xmm registers.
    // This can be tricky, because CFA register is ebp (unaligned)
    // and we need to produce offsets relative to it.
    "movaps  %xmm0, (%esp)\n"
    "movaps  %xmm1, 16(%esp)\n"
    "movaps  %xmm2, 32(%esp)\n"
    "movaps  %xmm3, 48(%esp)\n"
    "subl    $16, %esp\n"
    "movl    4(%ebp), %eax\n" // Pass prev frame and return address
    "movl    %eax, 4(%esp)\n"
    "movl    %ebp, (%esp)\n"
    "call    " ASMPREFIX "X86CompilationCallback2\n"
    "addl    $16, %esp\n"
    "movaps  48(%esp), %xmm3\n"
    CFI(".cfi_restore %xmm3\n")
    "movaps  32(%esp), %xmm2\n"
    CFI(".cfi_restore %xmm2\n")
    "movaps  16(%esp), %xmm1\n"
    CFI(".cfi_restore %xmm1\n")
    "movaps  (%esp), %xmm0\n"
    CFI(".cfi_restore %xmm0\n")
    "movl    %ebp, %esp\n"    // Restore ESP
    CFI(".cfi_def_cfa_register esp\n")
    "subl    $12, %esp\n"
    CFI(".cfi_adjust_cfa_offset 12\n")
    "popl    %ecx\n"
    CFI(".cfi_adjust_cfa_offset -4\n")
    CFI(".cfi_restore %ecx\n")
    "popl    %edx\n"
    CFI(".cfi_adjust_cfa_offset -4\n")
    CFI(".cfi_restore %edx\n")
    "popl    %eax\n"
    CFI(".cfi_adjust_cfa_offset -4\n")
    CFI(".cfi_restore %eax\n")
    "popl    %ebp\n"
    CFI(".cfi_adjust_cfa_offset -4\n")
    CFI(".cfi_restore %ebp\n")
    "ret\n"
    CFI(".cfi_endproc\n")
    SIZE(X86CompilationCallback_SSE)
  );
# else
  void X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr);

  _declspec(naked) void X86CompilationCallback(void) {
    __asm {
      push  ebp
      mov   ebp, esp
      push  eax
      push  edx
      push  ecx
      and   esp, -16
      sub   esp, 16
      mov   eax, dword ptr [ebp+4]
      mov   dword ptr [esp+4], eax
      mov   dword ptr [esp], ebp
      call  X86CompilationCallback2
      mov   esp, ebp
      sub   esp, 12
      pop   ecx
      pop   edx
      pop   eax
      pop   ebp
      ret
    }
  }

# endif // _MSC_VER

#else // Not an i386 host
  void X86CompilationCallback() {
    llvm_unreachable("Cannot call X86CompilationCallback() on a non-x86 arch!");
  }
#endif
}

/// X86CompilationCallback2 - This is the target-specific function invoked by the
/// function stub when we did not know the real target of a call.  This function
/// must locate the start of the stub or call site and pass it into the JIT
/// compiler function.
extern "C" {
#if !(defined (X86_64_JIT) && defined(_MSC_VER))
 // the following function is called only from this translation unit,
 // unless we are under 64bit Windows with MSC, where there is
 // no support for inline assembly
static
#endif
void LLVM_ATTRIBUTE_USED
X86CompilationCallback2(intptr_t *StackPtr, intptr_t RetAddr) {
  intptr_t *RetAddrLoc = &StackPtr[1];
  assert(*RetAddrLoc == RetAddr &&
         "Could not find return address on the stack!");

  // It's a stub if there is an interrupt marker after the call.
  bool isStub = ((unsigned char*)RetAddr)[0] == 0xCE;

  // The call instruction should have pushed the return value onto the stack...
#if defined (X86_64_JIT)
  RetAddr--;     // Backtrack to the reference itself...
#else
  RetAddr -= 4;  // Backtrack to the reference itself...
#endif

#if 0
  DEBUG(dbgs() << "In callback! Addr=" << (void*)RetAddr
               << " ESP=" << (void*)StackPtr
               << ": Resolving call to function: "
               << TheVM->getFunctionReferencedName((void*)RetAddr) << "\n");
#endif

  // Sanity check to make sure this really is a call instruction.
#if defined (X86_64_JIT)
  assert(((unsigned char*)RetAddr)[-2] == 0x41 &&"Not a call instr!");
  assert(((unsigned char*)RetAddr)[-1] == 0xFF &&"Not a call instr!");
#else
  assert(((unsigned char*)RetAddr)[-1] == 0xE8 &&"Not a call instr!");
#endif

  intptr_t NewVal = (intptr_t)JITCompilerFunction((void*)RetAddr);

  // Rewrite the call target... so that we don't end up here every time we
  // execute the call.
#if defined (X86_64_JIT)
  assert(isStub &&
         "X86-64 doesn't support rewriting non-stub lazy compilation calls:"
         " the call instruction varies too much.");
#else
  *(intptr_t *)RetAddr = (intptr_t)(NewVal-RetAddr-4);
#endif

  if (isStub) {
    // If this is a stub, rewrite the call into an unconditional branch
    // instruction so that two return addresses are not pushed onto the stack
    // when the requested function finally gets called.  This also makes the
    // 0xCE byte (interrupt) dead, so the marker doesn't effect anything.
#if defined (X86_64_JIT)
    // If the target address is within 32-bit range of the stub, use a
    // PC-relative branch instead of loading the actual address.  (This is
    // considerably shorter than the 64-bit immediate load already there.)
    // We assume here intptr_t is 64 bits.
    intptr_t diff = NewVal-RetAddr+7;
    if (diff >= -2147483648LL && diff <= 2147483647LL) {
      *(unsigned char*)(RetAddr-0xc) = 0xE9;
      *(intptr_t *)(RetAddr-0xb) = diff & 0xffffffff;
    } else {
      *(intptr_t *)(RetAddr - 0xa) = NewVal;
      ((unsigned char*)RetAddr)[0] = (2 | (4 << 3) | (3 << 6));
    }
    sys::ValgrindDiscardTranslations((void*)(RetAddr-0xc), 0xd);
#else
    ((unsigned char*)RetAddr)[-1] = 0xE9;
    sys::ValgrindDiscardTranslations((void*)(RetAddr-1), 5);
#endif
  }

  // Change the return address to reexecute the call instruction...
#if defined (X86_64_JIT)
  *RetAddrLoc -= 0xd;
#else
  *RetAddrLoc -= 5;
#endif
}
}

TargetJITInfo::LazyResolverFn
X86JITInfo::getLazyResolverFunction(JITCompilerFn F) {
  JITCompilerFunction = F;

#if defined (X86_32_JIT) && !defined (_MSC_VER)
  if (Subtarget->hasSSE1())
    return X86CompilationCallback_SSE;
#endif

  return X86CompilationCallback;
}

X86JITInfo::X86JITInfo(X86TargetMachine &tm) : TM(tm) {
  Subtarget = &TM.getSubtarget<X86Subtarget>();
  useGOT = 0;
  TLSOffset = 0;
}

void *X86JITInfo::emitGlobalValueIndirectSym(const GlobalValue* GV, void *ptr,
                                             JITCodeEmitter &JCE) {
#if defined (X86_64_JIT)
  const unsigned Alignment = 8;
  uint8_t Buffer[8];
  uint8_t *Cur = Buffer;
  MachineCodeEmitter::emitWordLEInto(Cur, (unsigned)(intptr_t)ptr);
  MachineCodeEmitter::emitWordLEInto(Cur, (unsigned)(((intptr_t)ptr) >> 32));
#else
  const unsigned Alignment = 4;
  uint8_t Buffer[4];
  uint8_t *Cur = Buffer;
  MachineCodeEmitter::emitWordLEInto(Cur, (intptr_t)ptr);
#endif
  return JCE.allocIndirectGV(GV, Buffer, sizeof(Buffer), Alignment);
}

TargetJITInfo::StubLayout X86JITInfo::getStubLayout() {
  // The 64-bit stub contains:
  //   movabs r10 <- 8-byte-target-address  # 10 bytes
  //   call|jmp *r10  # 3 bytes
  // The 32-bit stub contains a 5-byte call|jmp.
  // If the stub is a call to the compilation callback, an extra byte is added
  // to mark it as a stub.
  StubLayout Result = {14, 4};
  return Result;
}

void *X86JITInfo::emitFunctionStub(const Function* F, void *Target,
                                   JITCodeEmitter &JCE) {
  // Note, we cast to intptr_t here to silence a -pedantic warning that
  // complains about casting a function pointer to a normal pointer.
#if defined (X86_32_JIT) && !defined (_MSC_VER)
  bool NotCC = (Target != (void*)(intptr_t)X86CompilationCallback &&
                Target != (void*)(intptr_t)X86CompilationCallback_SSE);
#else
  bool NotCC = Target != (void*)(intptr_t)X86CompilationCallback;
#endif
  JCE.emitAlignment(4);
  void *Result = (void*)JCE.getCurrentPCValue();
  if (NotCC) {
#if defined (X86_64_JIT)
    JCE.emitByte(0x49);          // REX prefix
    JCE.emitByte(0xB8+2);        // movabsq r10
    JCE.emitWordLE((unsigned)(intptr_t)Target);
    JCE.emitWordLE((unsigned)(((intptr_t)Target) >> 32));
    JCE.emitByte(0x41);          // REX prefix
    JCE.emitByte(0xFF);          // jmpq *r10
    JCE.emitByte(2 | (4 << 3) | (3 << 6));
#else
    JCE.emitByte(0xE9);
    JCE.emitWordLE((intptr_t)Target-JCE.getCurrentPCValue()-4);
#endif
    return Result;
  }

#if defined (X86_64_JIT)
  JCE.emitByte(0x49);          // REX prefix
  JCE.emitByte(0xB8+2);        // movabsq r10
  JCE.emitWordLE((unsigned)(intptr_t)Target);
  JCE.emitWordLE((unsigned)(((intptr_t)Target) >> 32));
  JCE.emitByte(0x41);          // REX prefix
  JCE.emitByte(0xFF);          // callq *r10
  JCE.emitByte(2 | (2 << 3) | (3 << 6));
#else
  JCE.emitByte(0xE8);   // Call with 32 bit pc-rel destination...

  JCE.emitWordLE((intptr_t)Target-JCE.getCurrentPCValue()-4);
#endif

  // This used to use 0xCD, but that value is used by JITMemoryManager to
  // initialize the buffer with garbage, which means it may follow a
  // noreturn function call, confusing X86CompilationCallback2.  PR 4929.
  JCE.emitByte(0xCE);   // Interrupt - Just a marker identifying the stub!
  return Result;
}

/// getPICJumpTableEntry - Returns the value of the jumptable entry for the
/// specific basic block.
uintptr_t X86JITInfo::getPICJumpTableEntry(uintptr_t BB, uintptr_t Entry) {
#if defined(X86_64_JIT)
  return BB - Entry;
#else
  return BB - PICBase;
#endif
}

/// relocate - Before the JIT can run a block of code that has been emitted,
/// it must rewrite the code to contain the actual addresses of any
/// referenced global symbols.
void X86JITInfo::relocate(void *Function, MachineRelocation *MR,
                          unsigned NumRelocs, unsigned char* GOTBase) {
  for (unsigned i = 0; i != NumRelocs; ++i, ++MR) {
    void *RelocPos = (char*)Function + MR->getMachineCodeOffset();
    intptr_t ResultPtr = (intptr_t)MR->getResultPointer();
    switch ((X86::RelocationType)MR->getRelocationType()) {
    case X86::reloc_pcrel_word: {
      // PC relative relocation, add the relocated value to the value already in
      // memory, after we adjust it for where the PC is.
      ResultPtr = ResultPtr -(intptr_t)RelocPos - 4 - MR->getConstantVal();
      *((unsigned*)RelocPos) += (unsigned)ResultPtr;
      break;
    }
    case X86::reloc_picrel_word: {
      // PIC base relative relocation, add the relocated value to the value
      // already in memory, after we adjust it for where the PIC base is.
      ResultPtr = ResultPtr - ((intptr_t)Function + MR->getConstantVal());
      *((unsigned*)RelocPos) += (unsigned)ResultPtr;
      break;
    }
    case X86::reloc_absolute_word:
    case X86::reloc_absolute_word_sext:
      // Absolute relocation, just add the relocated value to the value already
      // in memory.
      *((unsigned*)RelocPos) += (unsigned)ResultPtr;
      break;
    case X86::reloc_absolute_dword:
      *((intptr_t*)RelocPos) += ResultPtr;
      break;
    }
  }
}

char* X86JITInfo::allocateThreadLocalMemory(size_t size) {
#if defined(X86_32_JIT) && !defined(__APPLE__) && !defined(_MSC_VER)
  TLSOffset -= size;
  return TLSOffset;
#else
  llvm_unreachable("Cannot allocate thread local storage on this arch!");
  return 0;
#endif
}