aboutsummaryrefslogtreecommitdiff
path: root/lib/CodeGen/DwarfEHPrepare.cpp
blob: 01e60e36771031e36194d268e706891de396b1a4 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
//===-- DwarfEHPrepare - Prepare exception handling for code generation ---===//
//
//                     The LLVM Compiler Infrastructure
//
// This file is distributed under the University of Illinois Open Source
// License. See LICENSE.TXT for details.
//
//===----------------------------------------------------------------------===//
//
// This pass mulches exception handling code into a form adapted to code
// generation. Required if using dwarf exception handling.
//
//===----------------------------------------------------------------------===//

#define DEBUG_TYPE "dwarfehprepare"
#include "llvm/Function.h"
#include "llvm/Instructions.h"
#include "llvm/IntrinsicInst.h"
#include "llvm/Module.h"
#include "llvm/Pass.h"
#include "llvm/ADT/Statistic.h"
#include "llvm/Analysis/Dominators.h"
#include "llvm/CodeGen/Passes.h"
#include "llvm/MC/MCAsmInfo.h"
#include "llvm/Support/CallSite.h"
#include "llvm/Target/TargetLowering.h"
#include "llvm/Transforms/Utils/BasicBlockUtils.h"
#include "llvm/Transforms/Utils/PromoteMemToReg.h"
using namespace llvm;

STATISTIC(NumLandingPadsSplit,     "Number of landing pads split");
STATISTIC(NumUnwindsLowered,       "Number of unwind instructions lowered");
STATISTIC(NumExceptionValuesMoved, "Number of eh.exception calls moved");
STATISTIC(NumStackTempsIntroduced, "Number of stack temporaries introduced");

namespace {
  class DwarfEHPrepare : public FunctionPass {
    const TargetMachine *TM;
    const TargetLowering *TLI;
    bool CompileFast;

    // The eh.exception intrinsic.
    Function *ExceptionValueIntrinsic;

    // The eh.selector intrinsic.
    Function *SelectorIntrinsic;

    // _Unwind_Resume_or_Rethrow call.
    Constant *URoR;

    // The EH language-specific catch-all type.
    GlobalVariable *EHCatchAllValue;

    // _Unwind_Resume or the target equivalent.
    Constant *RewindFunction;

    // Dominator info is used when turning stack temporaries into registers.
    DominatorTree *DT;
    DominanceFrontier *DF;

    // The function we are running on.
    Function *F;

    // The landing pads for this function.
    typedef SmallPtrSet<BasicBlock*, 8> BBSet;
    BBSet LandingPads;

    // Stack temporary used to hold eh.exception values.
    AllocaInst *ExceptionValueVar;

    bool NormalizeLandingPads();
    bool LowerUnwinds();
    bool MoveExceptionValueCalls();
    bool FinishStackTemporaries();
    bool PromoteStackTemporaries();

    Instruction *CreateExceptionValueCall(BasicBlock *BB);
    Instruction *CreateValueLoad(BasicBlock *BB);

    /// CreateReadOfExceptionValue - Return the result of the eh.exception
    /// intrinsic by calling the intrinsic if in a landing pad, or loading it
    /// from the exception value variable otherwise.
    Instruction *CreateReadOfExceptionValue(BasicBlock *BB) {
      return LandingPads.count(BB) ?
        CreateExceptionValueCall(BB) : CreateValueLoad(BB);
    }

    /// CleanupSelectors - Any remaining eh.selector intrinsic calls which still
    /// use the "llvm.eh.catch.all.value" call need to convert to using its
    /// initializer instead.
    bool CleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels);

    bool HasCatchAllInSelector(IntrinsicInst *);

    /// FindAllCleanupSelectors - Find all eh.selector calls that are clean-ups.
    void FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels,
                                 SmallPtrSet<IntrinsicInst*, 32> &CatchAllSels);

    /// FindAllURoRInvokes - Find all URoR invokes in the function.
    void FindAllURoRInvokes(SmallPtrSet<InvokeInst*, 32> &URoRInvokes);

    /// HandleURoRInvokes - Handle invokes of "_Unwind_Resume_or_Rethrow"
    /// calls. The "unwind" part of these invokes jump to a landing pad within
    /// the current function. This is a candidate to merge the selector
    /// associated with the URoR invoke with the one from the URoR's landing
    /// pad.
    bool HandleURoRInvokes();

    /// FindSelectorAndURoR - Find the eh.selector call and URoR call associated
    /// with the eh.exception call. This recursively looks past instructions
    /// which don't change the EH pointer value, like casts or PHI nodes.
    bool FindSelectorAndURoR(Instruction *Inst, bool &URoRInvoke,
                             SmallPtrSet<IntrinsicInst*, 8> &SelCalls);
      
    /// PromoteStoreInst - Perform Mem2Reg on a StoreInst.
    bool PromoteStoreInst(StoreInst *SI) {
      if (!SI || !DT || !DF) return false;
      
      AllocaInst *AI = dyn_cast<AllocaInst>(SI->getOperand(1));
      if (!AI || !isAllocaPromotable(AI)) return false;
      
      // Turn the alloca into a register.
      std::vector<AllocaInst*> Allocas(1, AI);
      PromoteMemToReg(Allocas, *DT, *DF);
      return true;
    }

    /// PromoteEHPtrStore - Promote the storing of an EH pointer into a
    /// register. This should get rid of the store and subsequent loads.
    bool PromoteEHPtrStore(IntrinsicInst *II) {
      if (!DT || !DF) return false;

      bool Changed = false;
      StoreInst *SI;

      while (1) {
        SI = 0;
        for (Value::use_iterator
               I = II->use_begin(), E = II->use_end(); I != E; ++I) {
          SI = dyn_cast<StoreInst>(*I);
          if (SI) break;
        }

        if (!PromoteStoreInst(SI))
          break;

        Changed = true;
      }

      return Changed;
    }

  public:
    static char ID; // Pass identification, replacement for typeid.
    DwarfEHPrepare(const TargetMachine *tm, bool fast) :
      FunctionPass(ID), TM(tm), TLI(TM->getTargetLowering()),
      CompileFast(fast),
      ExceptionValueIntrinsic(0), SelectorIntrinsic(0),
      URoR(0), EHCatchAllValue(0), RewindFunction(0) {}

    virtual bool runOnFunction(Function &Fn);

    // getAnalysisUsage - We need dominance frontiers for memory promotion.
    virtual void getAnalysisUsage(AnalysisUsage &AU) const {
      if (!CompileFast)
        AU.addRequired<DominatorTree>();
      AU.addPreserved<DominatorTree>();
      if (!CompileFast)
        AU.addRequired<DominanceFrontier>();
      AU.addPreserved<DominanceFrontier>();
    }

    const char *getPassName() const {
      return "Exception handling preparation";
    }

  };
} // end anonymous namespace

char DwarfEHPrepare::ID = 0;

FunctionPass *llvm::createDwarfEHPass(const TargetMachine *tm, bool fast) {
  return new DwarfEHPrepare(tm, fast);
}

/// HasCatchAllInSelector - Return true if the intrinsic instruction has a
/// catch-all.
bool DwarfEHPrepare::HasCatchAllInSelector(IntrinsicInst *II) {
  if (!EHCatchAllValue) return false;

  unsigned ArgIdx = II->getNumArgOperands() - 1;
  GlobalVariable *GV = dyn_cast<GlobalVariable>(II->getArgOperand(ArgIdx));
  return GV == EHCatchAllValue;
}

/// FindAllCleanupSelectors - Find all eh.selector calls that are clean-ups.
void DwarfEHPrepare::
FindAllCleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels,
                        SmallPtrSet<IntrinsicInst*, 32> &CatchAllSels) {
  for (Value::use_iterator
         I = SelectorIntrinsic->use_begin(),
         E = SelectorIntrinsic->use_end(); I != E; ++I) {
    IntrinsicInst *II = cast<IntrinsicInst>(*I);

    if (II->getParent()->getParent() != F)
      continue;

    if (!HasCatchAllInSelector(II))
      Sels.insert(II);
    else
      CatchAllSels.insert(II);
  }
}

/// FindAllURoRInvokes - Find all URoR invokes in the function.
void DwarfEHPrepare::
FindAllURoRInvokes(SmallPtrSet<InvokeInst*, 32> &URoRInvokes) {
  for (Value::use_iterator
         I = URoR->use_begin(),
         E = URoR->use_end(); I != E; ++I) {
    if (InvokeInst *II = dyn_cast<InvokeInst>(*I))
      URoRInvokes.insert(II);
  }
}

/// CleanupSelectors - Any remaining eh.selector intrinsic calls which still use
/// the "llvm.eh.catch.all.value" call need to convert to using its
/// initializer instead.
bool DwarfEHPrepare::CleanupSelectors(SmallPtrSet<IntrinsicInst*, 32> &Sels) {
  if (!EHCatchAllValue) return false;

  if (!SelectorIntrinsic) {
    SelectorIntrinsic =
      Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_selector);
    if (!SelectorIntrinsic) return false;
  }

  bool Changed = false;
  for (SmallPtrSet<IntrinsicInst*, 32>::iterator
         I = Sels.begin(), E = Sels.end(); I != E; ++I) {
    IntrinsicInst *Sel = *I;

    // Index of the "llvm.eh.catch.all.value" variable.
    unsigned OpIdx = Sel->getNumArgOperands() - 1;
    GlobalVariable *GV = dyn_cast<GlobalVariable>(Sel->getArgOperand(OpIdx));
    if (GV != EHCatchAllValue) continue;
    Sel->setArgOperand(OpIdx, EHCatchAllValue->getInitializer());
    Changed = true;
  }

  return Changed;
}

/// FindSelectorAndURoR - Find the eh.selector call associated with the
/// eh.exception call. And indicate if there is a URoR "invoke" associated with
/// the eh.exception call. This recursively looks past instructions which don't
/// change the EH pointer value, like casts or PHI nodes.
bool
DwarfEHPrepare::FindSelectorAndURoR(Instruction *Inst, bool &URoRInvoke,
                                    SmallPtrSet<IntrinsicInst*, 8> &SelCalls) {
  SmallPtrSet<PHINode*, 32> SeenPHIs;
  bool Changed = false;

 restart:
  for (Value::use_iterator
         I = Inst->use_begin(), E = Inst->use_end(); I != E; ++I) {
    Instruction *II = dyn_cast<Instruction>(*I);
    if (!II || II->getParent()->getParent() != F) continue;
    
    if (IntrinsicInst *Sel = dyn_cast<IntrinsicInst>(II)) {
      if (Sel->getIntrinsicID() == Intrinsic::eh_selector)
        SelCalls.insert(Sel);
    } else if (InvokeInst *Invoke = dyn_cast<InvokeInst>(II)) {
      if (Invoke->getCalledFunction() == URoR)
        URoRInvoke = true;
    } else if (CastInst *CI = dyn_cast<CastInst>(II)) {
      Changed |= FindSelectorAndURoR(CI, URoRInvoke, SelCalls);
    } else if (StoreInst *SI = dyn_cast<StoreInst>(II)) {
      if (!PromoteStoreInst(SI)) continue;
      Changed = true;
      SeenPHIs.clear();
      goto restart;             // Uses may have changed, restart loop.
    } else if (PHINode *PN = dyn_cast<PHINode>(II)) {
      if (SeenPHIs.insert(PN))
        // Don't process a PHI node more than once.
        Changed |= FindSelectorAndURoR(PN, URoRInvoke, SelCalls);
    }
  }

  return Changed;
}

/// HandleURoRInvokes - Handle invokes of "_Unwind_Resume_or_Rethrow" calls. The
/// "unwind" part of these invokes jump to a landing pad within the current
/// function. This is a candidate to merge the selector associated with the URoR
/// invoke with the one from the URoR's landing pad.
bool DwarfEHPrepare::HandleURoRInvokes() {
  if (!EHCatchAllValue) {
    EHCatchAllValue =
      F->getParent()->getNamedGlobal("llvm.eh.catch.all.value");
    if (!EHCatchAllValue) return false;
  }

  if (!SelectorIntrinsic) {
    SelectorIntrinsic =
      Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_selector);
    if (!SelectorIntrinsic) return false;
  }

  SmallPtrSet<IntrinsicInst*, 32> Sels;
  SmallPtrSet<IntrinsicInst*, 32> CatchAllSels;
  FindAllCleanupSelectors(Sels, CatchAllSels);

  if (!DT)
    // We require DominatorTree information.
    return CleanupSelectors(CatchAllSels);

  if (!URoR) {
    URoR = F->getParent()->getFunction("_Unwind_Resume_or_Rethrow");
    if (!URoR) return CleanupSelectors(CatchAllSels);
  }

  SmallPtrSet<InvokeInst*, 32> URoRInvokes;
  FindAllURoRInvokes(URoRInvokes);

  SmallPtrSet<IntrinsicInst*, 32> SelsToConvert;

  for (SmallPtrSet<IntrinsicInst*, 32>::iterator
         SI = Sels.begin(), SE = Sels.end(); SI != SE; ++SI) {
    const BasicBlock *SelBB = (*SI)->getParent();
    for (SmallPtrSet<InvokeInst*, 32>::iterator
           UI = URoRInvokes.begin(), UE = URoRInvokes.end(); UI != UE; ++UI) {
      const BasicBlock *URoRBB = (*UI)->getParent();
      if (DT->dominates(SelBB, URoRBB)) {
        SelsToConvert.insert(*SI);
        break;
      }
    }
  }

  bool Changed = false;

  if (Sels.size() != SelsToConvert.size()) {
    // If we haven't been able to convert all of the clean-up selectors, then
    // loop through the slow way to see if they still need to be converted.
    if (!ExceptionValueIntrinsic) {
      ExceptionValueIntrinsic =
        Intrinsic::getDeclaration(F->getParent(), Intrinsic::eh_exception);
      if (!ExceptionValueIntrinsic)
        return CleanupSelectors(CatchAllSels);
    }

    for (Value::use_iterator
           I = ExceptionValueIntrinsic->use_begin(),
           E = ExceptionValueIntrinsic->use_end(); I != E; ++I) {
      IntrinsicInst *EHPtr = dyn_cast<IntrinsicInst>(*I);
      if (!EHPtr || EHPtr->getParent()->getParent() != F) continue;

      Changed |= PromoteEHPtrStore(EHPtr);

      bool URoRInvoke = false;
      SmallPtrSet<IntrinsicInst*, 8> SelCalls;
      Changed |= FindSelectorAndURoR(EHPtr, URoRInvoke, SelCalls);

      if (URoRInvoke) {
        // This EH pointer is being used by an invoke of an URoR instruction and
        // an eh.selector intrinsic call. If the eh.selector is a 'clean-up', we
        // need to convert it to a 'catch-all'.
        for (SmallPtrSet<IntrinsicInst*, 8>::iterator
               SI = SelCalls.begin(), SE = SelCalls.end(); SI != SE; ++SI)
          if (!HasCatchAllInSelector(*SI))
              SelsToConvert.insert(*SI);
      }
    }
  }

  if (!SelsToConvert.empty()) {
    // Convert all clean-up eh.selectors, which are associated with "invokes" of
    // URoR calls, into catch-all eh.selectors.
    Changed = true;

    for (SmallPtrSet<IntrinsicInst*, 8>::iterator
           SI = SelsToConvert.begin(), SE = SelsToConvert.end();
         SI != SE; ++SI) {
      IntrinsicInst *II = *SI;

      // Use the exception object pointer and the personality function
      // from the original selector.
      CallSite CS(II);
      IntrinsicInst::op_iterator I = CS.arg_begin();
      IntrinsicInst::op_iterator E = CS.arg_end();
      IntrinsicInst::op_iterator B = prior(E);

      // Exclude last argument if it is an integer.
      if (isa<ConstantInt>(B)) E = B;

      // Add exception object pointer (front).
      // Add personality function (next).
      // Add in any filter IDs (rest).
      SmallVector<Value*, 8> Args(I, E);

      Args.push_back(EHCatchAllValue->getInitializer()); // Catch-all indicator.

      CallInst *NewSelector =
        CallInst::Create(SelectorIntrinsic, Args.begin(), Args.end(),
                         "eh.sel.catch.all", II);

      NewSelector->setTailCall(II->isTailCall());
      NewSelector->setAttributes(II->getAttributes());
      NewSelector->setCallingConv(II->getCallingConv());

      II->replaceAllUsesWith(NewSelector);
      II->eraseFromParent();
    }
  }

  Changed |= CleanupSelectors(CatchAllSels);
  return Changed;
}

/// NormalizeLandingPads - Normalize and discover landing pads, noting them
/// in the LandingPads set.  A landing pad is normal if the only CFG edges
/// that end at it are unwind edges from invoke instructions. If we inlined
/// through an invoke we could have a normal branch from the previous
/// unwind block through to the landing pad for the original invoke.
/// Abnormal landing pads are fixed up by redirecting all unwind edges to
/// a new basic block which falls through to the original.
bool DwarfEHPrepare::NormalizeLandingPads() {
  bool Changed = false;

  const MCAsmInfo *MAI = TM->getMCAsmInfo();
  bool usingSjLjEH = MAI->getExceptionHandlingType() == ExceptionHandling::SjLj;

  for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
    TerminatorInst *TI = I->getTerminator();
    if (!isa<InvokeInst>(TI))
      continue;
    BasicBlock *LPad = TI->getSuccessor(1);
    // Skip landing pads that have already been normalized.
    if (LandingPads.count(LPad))
      continue;

    // Check that only invoke unwind edges end at the landing pad.
    bool OnlyUnwoundTo = true;
    bool SwitchOK = usingSjLjEH;
    for (pred_iterator PI = pred_begin(LPad), PE = pred_end(LPad);
         PI != PE; ++PI) {
      TerminatorInst *PT = (*PI)->getTerminator();
      // The SjLj dispatch block uses a switch instruction. This is effectively
      // an unwind edge, so we can disregard it here. There will only ever
      // be one dispatch, however, so if there are multiple switches, one
      // of them truly is a normal edge, not an unwind edge.
      if (SwitchOK && isa<SwitchInst>(PT)) {
        SwitchOK = false;
        continue;
      }
      if (!isa<InvokeInst>(PT) || LPad == PT->getSuccessor(0)) {
        OnlyUnwoundTo = false;
        break;
      }
    }

    if (OnlyUnwoundTo) {
      // Only unwind edges lead to the landing pad.  Remember the landing pad.
      LandingPads.insert(LPad);
      continue;
    }

    // At least one normal edge ends at the landing pad.  Redirect the unwind
    // edges to a new basic block which falls through into this one.

    // Create the new basic block.
    BasicBlock *NewBB = BasicBlock::Create(F->getContext(),
                                           LPad->getName() + "_unwind_edge");

    // Insert it into the function right before the original landing pad.
    LPad->getParent()->getBasicBlockList().insert(LPad, NewBB);

    // Redirect unwind edges from the original landing pad to NewBB.
    for (pred_iterator PI = pred_begin(LPad), PE = pred_end(LPad); PI != PE; ) {
      TerminatorInst *PT = (*PI++)->getTerminator();
      if (isa<InvokeInst>(PT) && PT->getSuccessor(1) == LPad)
        // Unwind to the new block.
        PT->setSuccessor(1, NewBB);
    }

    // If there are any PHI nodes in LPad, we need to update them so that they
    // merge incoming values from NewBB instead.
    for (BasicBlock::iterator II = LPad->begin(); isa<PHINode>(II); ++II) {
      PHINode *PN = cast<PHINode>(II);
      pred_iterator PB = pred_begin(NewBB), PE = pred_end(NewBB);

      // Check to see if all of the values coming in via unwind edges are the
      // same.  If so, we don't need to create a new PHI node.
      Value *InVal = PN->getIncomingValueForBlock(*PB);
      for (pred_iterator PI = PB; PI != PE; ++PI) {
        if (PI != PB && InVal != PN->getIncomingValueForBlock(*PI)) {
          InVal = 0;
          break;
        }
      }

      if (InVal == 0) {
        // Different unwind edges have different values.  Create a new PHI node
        // in NewBB.
        PHINode *NewPN = PHINode::Create(PN->getType(), PN->getName()+".unwind",
                                         NewBB);
        // Add an entry for each unwind edge, using the value from the old PHI.
        for (pred_iterator PI = PB; PI != PE; ++PI)
          NewPN->addIncoming(PN->getIncomingValueForBlock(*PI), *PI);

        // Now use this new PHI as the common incoming value for NewBB in PN.
        InVal = NewPN;
      }

      // Revector exactly one entry in the PHI node to come from NewBB
      // and delete all other entries that come from unwind edges.  If
      // there are both normal and unwind edges from the same predecessor,
      // this leaves an entry for the normal edge.
      for (pred_iterator PI = PB; PI != PE; ++PI)
        PN->removeIncomingValue(*PI);
      PN->addIncoming(InVal, NewBB);
    }

    // Add a fallthrough from NewBB to the original landing pad.
    BranchInst::Create(LPad, NewBB);

    // Now update DominatorTree and DominanceFrontier analysis information.
    if (DT)
      DT->splitBlock(NewBB);
    if (DF)
      DF->splitBlock(NewBB);

    // Remember the newly constructed landing pad.  The original landing pad
    // LPad is no longer a landing pad now that all unwind edges have been
    // revectored to NewBB.
    LandingPads.insert(NewBB);
    ++NumLandingPadsSplit;
    Changed = true;
  }

  return Changed;
}

/// LowerUnwinds - Turn unwind instructions into calls to _Unwind_Resume,
/// rethrowing any previously caught exception.  This will crash horribly
/// at runtime if there is no such exception: using unwind to throw a new
/// exception is currently not supported.
bool DwarfEHPrepare::LowerUnwinds() {
  SmallVector<TerminatorInst*, 16> UnwindInsts;

  for (Function::iterator I = F->begin(), E = F->end(); I != E; ++I) {
    TerminatorInst *TI = I->getTerminator();
    if (isa<UnwindInst>(TI))
      UnwindInsts.push_back(TI);
  }

  if (UnwindInsts.empty()) return false;

  // Find the rewind function if we didn't already.
  if (!RewindFunction) {
    LLVMContext &Ctx = UnwindInsts[0]->getContext();
    std::vector<const Type*>
      Params(1, Type::getInt8PtrTy(Ctx));
    FunctionType *FTy = FunctionType::get(Type::getVoidTy(Ctx),
                                          Params, false);
    const char *RewindName = TLI->getLibcallName(RTLIB::UNWIND_RESUME);
    RewindFunction = F->getParent()->getOrInsertFunction(RewindName, FTy);
  }

  bool Changed = false;

  for (SmallVectorImpl<TerminatorInst*>::iterator
         I = UnwindInsts.begin(), E = UnwindInsts.end(); I != E; ++I) {
    TerminatorInst *TI = *I;

    // Replace the unwind instruction with a call to _Unwind_Resume (or the
    // appropriate target equivalent) followed by an UnreachableInst.

    // Create the call...
    CallInst *CI = CallInst::Create(RewindFunction,
                                    CreateReadOfExceptionValue(TI->getParent()),
                                    "", TI);
    CI->setCallingConv(TLI->getLibcallCallingConv(RTLIB::UNWIND_RESUME));
    // ...followed by an UnreachableInst.
    new UnreachableInst(TI->getContext(), TI);

    // Nuke the unwind instruction.
    TI->eraseFromParent();
    ++NumUnwindsLowered;
    Changed = true;
  }

  return Changed;
}

/// MoveExceptionValueCalls - Ensure that eh.exception is only ever called from
/// landing pads by replacing calls outside of landing pads with loads from a
/// stack temporary.  Move eh.exception calls inside landing pads to the start
/// of the landing pad (optional, but may make things simpler for later passes).
bool DwarfEHPrepare::MoveExceptionValueCalls() {
  // If the eh.exception intrinsic is not declared in the module then there is
  // nothing to do.  Speed up compilation by checking for this common case.
  if (!ExceptionValueIntrinsic &&
      !F->getParent()->getFunction(Intrinsic::getName(Intrinsic::eh_exception)))
    return false;

  bool Changed = false;

  for (Function::iterator BB = F->begin(), E = F->end(); BB != E; ++BB) {
    for (BasicBlock::iterator II = BB->begin(), E = BB->end(); II != E;)
      if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(II++))
        if (CI->getIntrinsicID() == Intrinsic::eh_exception) {
          if (!CI->use_empty()) {
            Value *ExceptionValue = CreateReadOfExceptionValue(BB);
            if (CI == ExceptionValue) {
              // The call was at the start of a landing pad - leave it alone.
              assert(LandingPads.count(BB) &&
                     "Created eh.exception call outside landing pad!");
              continue;
            }
            CI->replaceAllUsesWith(ExceptionValue);
          }
          CI->eraseFromParent();
          ++NumExceptionValuesMoved;
          Changed = true;
        }
  }

  return Changed;
}

/// FinishStackTemporaries - If we introduced a stack variable to hold the
/// exception value then initialize it in each landing pad.
bool DwarfEHPrepare::FinishStackTemporaries() {
  if (!ExceptionValueVar)
    // Nothing to do.
    return false;

  bool Changed = false;

  // Make sure that there is a store of the exception value at the start of
  // each landing pad.
  for (BBSet::iterator LI = LandingPads.begin(), LE = LandingPads.end();
       LI != LE; ++LI) {
    Instruction *ExceptionValue = CreateReadOfExceptionValue(*LI);
    Instruction *Store = new StoreInst(ExceptionValue, ExceptionValueVar);
    Store->insertAfter(ExceptionValue);
    Changed = true;
  }

  return Changed;
}

/// PromoteStackTemporaries - Turn any stack temporaries we introduced into
/// registers if possible.
bool DwarfEHPrepare::PromoteStackTemporaries() {
  if (ExceptionValueVar && DT && DF && isAllocaPromotable(ExceptionValueVar)) {
    // Turn the exception temporary into registers and phi nodes if possible.
    std::vector<AllocaInst*> Allocas(1, ExceptionValueVar);
    PromoteMemToReg(Allocas, *DT, *DF);
    return true;
  }
  return false;
}

/// CreateExceptionValueCall - Insert a call to the eh.exception intrinsic at
/// the start of the basic block (unless there already is one, in which case
/// the existing call is returned).
Instruction *DwarfEHPrepare::CreateExceptionValueCall(BasicBlock *BB) {
  Instruction *Start = BB->getFirstNonPHIOrDbg();
  // Is this a call to eh.exception?
  if (IntrinsicInst *CI = dyn_cast<IntrinsicInst>(Start))
    if (CI->getIntrinsicID() == Intrinsic::eh_exception)
      // Reuse the existing call.
      return Start;

  // Find the eh.exception intrinsic if we didn't already.
  if (!ExceptionValueIntrinsic)
    ExceptionValueIntrinsic = Intrinsic::getDeclaration(F->getParent(),
                                                       Intrinsic::eh_exception);

  // Create the call.
  return CallInst::Create(ExceptionValueIntrinsic, "eh.value.call", Start);
}

/// CreateValueLoad - Insert a load of the exception value stack variable
/// (creating it if necessary) at the start of the basic block (unless
/// there already is a load, in which case the existing load is returned).
Instruction *DwarfEHPrepare::CreateValueLoad(BasicBlock *BB) {
  Instruction *Start = BB->getFirstNonPHIOrDbg();
  // Is this a load of the exception temporary?
  if (ExceptionValueVar)
    if (LoadInst* LI = dyn_cast<LoadInst>(Start))
      if (LI->getPointerOperand() == ExceptionValueVar)
        // Reuse the existing load.
        return Start;

  // Create the temporary if we didn't already.
  if (!ExceptionValueVar) {
    ExceptionValueVar = new AllocaInst(PointerType::getUnqual(
           Type::getInt8Ty(BB->getContext())), "eh.value", F->begin()->begin());
    ++NumStackTempsIntroduced;
  }

  // Load the value.
  return new LoadInst(ExceptionValueVar, "eh.value.load", Start);
}

bool DwarfEHPrepare::runOnFunction(Function &Fn) {
  bool Changed = false;

  // Initialize internal state.
  DT = getAnalysisIfAvailable<DominatorTree>();
  DF = getAnalysisIfAvailable<DominanceFrontier>();
  ExceptionValueVar = 0;
  F = &Fn;

  // Ensure that only unwind edges end at landing pads (a landing pad is a
  // basic block where an invoke unwind edge ends).
  Changed |= NormalizeLandingPads();

  // Turn unwind instructions into libcalls.
  Changed |= LowerUnwinds();

  // TODO: Move eh.selector calls to landing pads and combine them.

  // Move eh.exception calls to landing pads.
  Changed |= MoveExceptionValueCalls();

  // Initialize any stack temporaries we introduced.
  Changed |= FinishStackTemporaries();

  // Turn any stack temporaries into registers if possible.
  if (!CompileFast)
    Changed |= PromoteStackTemporaries();

  Changed |= HandleURoRInvokes();

  LandingPads.clear();

  return Changed;
}