diff options
author | Chris Lattner <sabre@nondot.org> | 2011-04-15 05:18:47 +0000 |
---|---|---|
committer | Chris Lattner <sabre@nondot.org> | 2011-04-15 05:18:47 +0000 |
commit | 7a2bdde0a0eebcd2125055e0eacaca040f0b766c (patch) | |
tree | 1cd5fa470f290368855c9081cb213ed118812805 | |
parent | bcb8c6d09ee426e0f774e3412912f6ae9e5f78dd (diff) |
Fix a ton of comment typos found by codespell. Patch by
Luis Felipe Strano Moraes!
git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@129558 91177308-0d34-0410-b5e6-96231b3b80d8
193 files changed, 266 insertions, 267 deletions
diff --git a/CMakeLists.txt b/CMakeLists.txt index 471b45eb81..48cd55b095 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -154,7 +154,7 @@ else() option(LLVM_ENABLE_ASSERTIONS "Enable assertions" ON) endif() -# All options refered to from HandleLLVMOptions have to be specified +# All options referred to from HandleLLVMOptions have to be specified # BEFORE this include, otherwise options will not be correctly set on # first cmake run include(config-ix) diff --git a/Makefile.rules b/Makefile.rules index 3001262d73..71d4307aba 100644 --- a/Makefile.rules +++ b/Makefile.rules @@ -2024,7 +2024,7 @@ $(DistZip) : $(TopDistDir)/.makedistdir $(Verb) cd $(PROJ_OBJ_ROOT) ; $(ZIP) -rq $(DistZip) $(DistName) dist :: $(DistTarGZip) $(DistTarBZ2) $(DistZip) - $(Echo) ===== DISTRIBUTION PACKAGING SUCESSFUL ===== + $(Echo) ===== DISTRIBUTION PACKAGING SUCCESSFUL ===== DistCheckDir := $(PROJ_OBJ_ROOT)/_distcheckdir diff --git a/autoconf/m4/libtool.m4 b/autoconf/m4/libtool.m4 index a8b5e6a94f..e89738cc91 100644 --- a/autoconf/m4/libtool.m4 +++ b/autoconf/m4/libtool.m4 @@ -1118,7 +1118,7 @@ if test -n "$_LT_AC_TAGVAR(hardcode_libdir_flag_spec, $1)" || \ test -n "$_LT_AC_TAGVAR(runpath_var, $1)" || \ test "X$_LT_AC_TAGVAR(hardcode_automatic, $1)" = "Xyes" ; then - # We can hardcode non-existant directories. + # We can hardcode non-existent directories. if test "$_LT_AC_TAGVAR(hardcode_direct, $1)" != no && # If the only mechanism to avoid hardcoding is shlibpath_var, we # have to relink, otherwise we might link with an installed library diff --git a/autoconf/m4/ltdl.m4 b/autoconf/m4/ltdl.m4 index bc9e2ad241..407a16e2d6 100644 --- a/autoconf/m4/ltdl.m4 +++ b/autoconf/m4/ltdl.m4 @@ -156,7 +156,7 @@ AC_CACHE_CHECK([whether deplibs are loaded by dlopen], osf[[1234]]*) # dlopen did load deplibs (at least at 4.x), but until the 5.x series, # it did *not* use an RPATH in a shared library to find objects the - # library depends on, so we explictly say `no'. + # library depends on, so we explicitly say `no'. libltdl_cv_sys_dlopen_deplibs=no ;; osf5.0|osf5.0a|osf5.1) diff --git a/cmake/modules/LLVM-Config.cmake b/cmake/modules/LLVM-Config.cmake index bd6a7a2c55..a6286fee68 100755 --- a/cmake/modules/LLVM-Config.cmake +++ b/cmake/modules/LLVM-Config.cmake @@ -135,7 +135,7 @@ function(explicit_map_components_to_libraries out_libs) string(TOUPPER "${c}" capitalized) list(FIND capitalized_libs LLVM${capitalized} lib_idx) if( lib_idx LESS 0 ) - # The component is unkown. Maybe is an ommitted target? + # The component is unknown. Maybe is an omitted target? is_llvm_target_library(${c} iltl_result) if( NOT iltl_result ) message(FATAL_ERROR "Library `${c}' not found in list of llvm libraries.") @@ -11723,7 +11723,7 @@ else osf[1234]*) # dlopen did load deplibs (at least at 4.x), but until the 5.x series, # it did *not* use an RPATH in a shared library to find objects the - # library depends on, so we explictly say `no'. + # library depends on, so we explicitly say `no'. libltdl_cv_sys_dlopen_deplibs=no ;; osf5.0|osf5.0a|osf5.1) diff --git a/docs/CommandGuide/llvm-bcanalyzer.pod b/docs/CommandGuide/llvm-bcanalyzer.pod index 3b16b5de13..9c5021b639 100644 --- a/docs/CommandGuide/llvm-bcanalyzer.pod +++ b/docs/CommandGuide/llvm-bcanalyzer.pod @@ -268,7 +268,7 @@ The number of bytes consumed by instructions in the function. =item B<Average Instruction Size> -The average number of bytes consumed by the instructions in the funtion. This +The average number of bytes consumed by the instructions in the function. This value is computed by dividing Instruction Size by Instructions. =item B<Bytes Per Instruction> diff --git a/docs/CompilerDriver.html b/docs/CompilerDriver.html index 9ba0acd8d3..42e69b61b7 100644 --- a/docs/CompilerDriver.html +++ b/docs/CompilerDriver.html @@ -308,13 +308,13 @@ separate option groups syntactically.</p> <tt class="docutils literal"><span class="pre">-std=c99</span></tt>. It is also allowed to use spaces instead of the equality sign: <tt class="docutils literal"><span class="pre">-std</span> <span class="pre">c99</span></tt>. At most one occurrence is allowed.</li> <li><tt class="docutils literal"><span class="pre">parameter_list_option</span></tt> - same as the above, but more than one option -occurence is allowed.</li> +occurrence is allowed.</li> <li><tt class="docutils literal"><span class="pre">prefix_option</span></tt> - same as the parameter_option, but the option name and argument do not have to be separated. Example: <tt class="docutils literal"><span class="pre">-ofile</span></tt>. This can be also specified as <tt class="docutils literal"><span class="pre">-o</span> <span class="pre">file</span></tt>; however, <tt class="docutils literal"><span class="pre">-o=file</span></tt> will be parsed incorrectly (<tt class="docutils literal"><span class="pre">=file</span></tt> will be interpreted as option value). At most one occurrence is allowed.</li> -<li><tt class="docutils literal"><span class="pre">prefix_list_option</span></tt> - same as the above, but more than one occurence of +<li><tt class="docutils literal"><span class="pre">prefix_list_option</span></tt> - same as the above, but more than one occurrence of the option is allowed; example: <tt class="docutils literal"><span class="pre">-lm</span> <span class="pre">-lpthread</span></tt>.</li> <li><tt class="docutils literal"><span class="pre">alias_option</span></tt> - a special option type for creating aliases. Unlike other option types, aliases are not allowed to have any properties besides the diff --git a/docs/ExceptionHandling.html b/docs/ExceptionHandling.html index 9cf1324dd6..1738bdf5cc 100644 --- a/docs/ExceptionHandling.html +++ b/docs/ExceptionHandling.html @@ -507,7 +507,7 @@ style exception handling. The single parameter is a pointer to a buffer populated by <a href="#llvm_eh_sjlj_setjmp"> <tt>llvm.eh.sjlj.setjmp</tt></a>. The frame pointer and stack pointer - are restored from the buffer, then control is transfered to the + are restored from the buffer, then control is transferred to the destination address.</p> </div> diff --git a/docs/HistoricalNotes/2000-11-18-EarlyDesignIdeasResp.txt b/docs/HistoricalNotes/2000-11-18-EarlyDesignIdeasResp.txt index 1c725f5aa7..81ca53919d 100644 --- a/docs/HistoricalNotes/2000-11-18-EarlyDesignIdeasResp.txt +++ b/docs/HistoricalNotes/2000-11-18-EarlyDesignIdeasResp.txt @@ -60,11 +60,11 @@ Understood. :) Yup, I think that this makes a lot of sense. I am still intrigued, however, by the prospect of a minimally allocated VM representation... I -think that it could have definate advantages for certain applications +think that it could have definite advantages for certain applications (think very small machines, like PDAs). I don't, however, think that our initial implementations should focus on this. :) -Here are some other auxilliary goals that I think we should consider: +Here are some other auxiliary goals that I think we should consider: 1. Primary goal: Support a high performance dynamic compilation system. This means that we have an "ideal" division of labor between diff --git a/docs/HistoricalNotes/2000-12-06-MeetingSummary.txt b/docs/HistoricalNotes/2000-12-06-MeetingSummary.txt index b66e18556f..01b644b351 100644 --- a/docs/HistoricalNotes/2000-12-06-MeetingSummary.txt +++ b/docs/HistoricalNotes/2000-12-06-MeetingSummary.txt @@ -40,7 +40,7 @@ IDEAS TO CONSIDER packaged with the bytecodes themselves. As a conceptual implementation idea, we could include an immediate dominator number for each basic block in the LLVM bytecode program. Basic blocks could be numbered according - to the order of occurance in the bytecode representation. + to the order of occurrence in the bytecode representation. 2. Including loop header and body information. This would facilitate detection of intervals and natural loops. diff --git a/docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp4.txt b/docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp4.txt index 7b9032742a..839732444f 100644 --- a/docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp4.txt +++ b/docs/HistoricalNotes/2001-02-06-TypeNotationDebateResp4.txt @@ -39,7 +39,7 @@ declaration and calling syntax. Very true. If you're implementing an object oriented language, however, remember that you have to do all the pointer to member function stuff -yourself.... so everytime you invoke a virtual method one is involved +yourself.... so every time you invoke a virtual method one is involved (instead of having C++ hide it for you behind "syntactic sugar"). > And the old array syntax: diff --git a/docs/HistoricalNotes/2001-02-09-AdveCommentsResponse.txt b/docs/HistoricalNotes/2001-02-09-AdveCommentsResponse.txt index 5c87330fb7..da50263665 100644 --- a/docs/HistoricalNotes/2001-02-09-AdveCommentsResponse.txt +++ b/docs/HistoricalNotes/2001-02-09-AdveCommentsResponse.txt @@ -18,7 +18,7 @@ suggested, as specified below: Very true. We should discuss this more, but my reasoning is more of a consistency argument. There are VERY few instructions that can have all -of the types eliminated, and doing so when available unnecesarily makes +of the types eliminated, and doing so when available unnecessarily makes the language more difficult to handle. Especially when you see 'int %this' and 'bool %that' all over the place, I think it would be disorienting to see: @@ -44,7 +44,7 @@ branches). No. This was something I was debating for a while, and didn't really feel strongly about either way. It is common to switch on other types in HLL's -(for example signed int's are particually common), but in this case, all +(for example signed int's are particularly common), but in this case, all that will be added is an additional 'cast' instruction. I removed that from the spec. @@ -160,7 +160,7 @@ that can be trivally translated into a conditional move... > I agree that we need a static data space. Otherwise, emulating global > data gets unnecessarily complex. -Definately. Also a later item though. :) +Definitely. Also a later item though. :) > We once talked about adding a symbolic thread-id field to each > .. diff --git a/docs/HistoricalNotes/2001-06-01-GCCOptimizations2.txt b/docs/HistoricalNotes/2001-06-01-GCCOptimizations2.txt index 6c9e0971a0..e61042fd65 100644 --- a/docs/HistoricalNotes/2001-06-01-GCCOptimizations2.txt +++ b/docs/HistoricalNotes/2001-06-01-GCCOptimizations2.txt @@ -42,7 +42,7 @@ Does using GCC's backend buy us anything? > optimization (step 16 in your list). Do you have a breakdown of that? Not really. The irritating part of GCC is that it mixes it all up and -doesn't have a clean seperation of concerns. A lot of the "back end +doesn't have a clean separation of concerns. A lot of the "back end optimization" happens right along with other data optimizations (ie, CSE of machine specific things). diff --git a/docs/HistoricalNotes/2002-05-12-InstListChange.txt b/docs/HistoricalNotes/2002-05-12-InstListChange.txt index 004edb068d..638682b49f 100644 --- a/docs/HistoricalNotes/2002-05-12-InstListChange.txt +++ b/docs/HistoricalNotes/2002-05-12-InstListChange.txt @@ -17,7 +17,7 @@ iterator to an instruction, which, given just an Instruction*, requires a linear search of the basic block the instruction is contained in... just to insert an instruction before another instruction, or to delete an instruction! This complicates algorithms that should be very simple (like -simple constant propogation), because they aren't actually sparse anymore, +simple constant propagation), because they aren't actually sparse anymore, they have to traverse basic blocks to remove constant propogated instructions. diff --git a/docs/LangRef.html b/docs/LangRef.html index dfb0bd1d7e..8c5ba804c7 100644 --- a/docs/LangRef.html +++ b/docs/LangRef.html @@ -2369,11 +2369,11 @@ b: unreachable <a href="#terminators">terminator instruction</a> if the terminator instruction has multiple successors and the instruction is always executed when control transfers to one of the successors, and - may not be executed when control is transfered to another.</li> + may not be executed when control is transferred to another.</li> <li>Additionally, an instruction also <i>control-depends</i> on a terminator instruction if the set of instructions it otherwise depends on would be - different if the terminator had transfered control to a different + different if the terminator had transferred control to a different successor.</li> <li>Dependence is transitive.</li> diff --git a/examples/ExceptionDemo/ExceptionDemo.cpp b/examples/ExceptionDemo/ExceptionDemo.cpp index 5fa10ed6d3..e5bd377770 100644 --- a/examples/ExceptionDemo/ExceptionDemo.cpp +++ b/examples/ExceptionDemo/ExceptionDemo.cpp @@ -1577,7 +1577,7 @@ void throwCppException (int32_t ignoreIt) { typedef void (*OurExceptionThrowFunctType) (int32_t typeToThrow); /// This is a test harness which runs test by executing generated -/// function with a type info type to throw. Harness wraps the excecution +/// function with a type info type to throw. Harness wraps the execution /// of generated function in a C++ try catch clause. /// @param engine execution engine to use for executing generated function. /// This demo program expects this to be a JIT instance for demo diff --git a/include/llvm-c/Disassembler.h b/include/llvm-c/Disassembler.h index dda7111ca2..9f10973404 100644 --- a/include/llvm-c/Disassembler.h +++ b/include/llvm-c/Disassembler.h @@ -31,7 +31,7 @@ typedef void *LLVMDisasmContextRef; * the call back in the DisInfo parameter. The instruction containing operand * is at the PC parameter. For some instruction sets, there can be more than * one operand with symbolic information. To determine the symbolic operand - * infomation for each operand, the bytes for the specific operand in the + * information for each operand, the bytes for the specific operand in the * instruction are specified by the Offset parameter and its byte widith is the * size parameter. For instructions sets with fixed widths and one symbolic * operand per instruction, the Offset parameter will be zero and Size parameter @@ -109,7 +109,7 @@ extern "C" { * Create a disassembler for the TripleName. Symbolic disassembly is supported * by passing a block of information in the DisInfo parameter and specifing the * TagType and call back functions as described above. These can all be passed - * as NULL. If successfull this returns a disassembler context if not it + * as NULL. If successful this returns a disassembler context if not it * returns NULL. */ extern LLVMDisasmContextRef @@ -127,7 +127,7 @@ LLVMDisasmDispose(LLVMDisasmContextRef DC); /** * Disassmble a single instruction using the disassembler context specified in - * the parameter DC. The bytes of the instuction are specified in the parameter + * the parameter DC. The bytes of the instruction are specified in the parameter * Bytes, and contains at least BytesSize number of bytes. The instruction is * at the address specified by the PC parameter. If a valid instruction can be * disassembled its string is returned indirectly in OutString which whos size diff --git a/include/llvm-c/lto.h b/include/llvm-c/lto.h index e4ede9cc6d..7ea7ad01a2 100644 --- a/include/llvm-c/lto.h +++ b/include/llvm-c/lto.h @@ -72,7 +72,7 @@ lto_get_version(void); /** - * Returns the last error string or NULL if last operation was sucessful. + * Returns the last error string or NULL if last operation was successful. */ extern const char* lto_get_error_message(void); @@ -263,7 +263,7 @@ lto_codegen_write_merged_modules(lto_code_gen_t cg, const char* path); /** * Generates code for all added modules into one native object file. - * On sucess returns a pointer to a generated mach-o/ELF buffer and + * On success returns a pointer to a generated mach-o/ELF buffer and * length set to the buffer size. The buffer is owned by the * lto_code_gen_t and will be freed when lto_codegen_dispose() * is called, or lto_codegen_compile() is called again. diff --git a/include/llvm/ADT/StringExtras.h b/include/llvm/ADT/StringExtras.h index acbed66ef4..a28545c470 100644 --- a/include/llvm/ADT/StringExtras.h +++ b/include/llvm/ADT/StringExtras.h @@ -153,7 +153,7 @@ void SplitString(StringRef Source, SmallVectorImpl<StringRef> &OutFragments, StringRef Delimiters = " \t\n\v\f\r"); -/// HashString - Hash funtion for strings. +/// HashString - Hash function for strings. /// /// This is the Bernstein hash function. // diff --git a/include/llvm/ADT/ilist.h b/include/llvm/ADT/ilist.h index 865fcb3d8a..bcacfd9df4 100644 --- a/include/llvm/ADT/ilist.h +++ b/include/llvm/ADT/ilist.h @@ -289,7 +289,7 @@ template<typename NodeTy> struct simplify_type<const ilist_iterator<NodeTy> > { //===----------------------------------------------------------------------===// // /// iplist - The subset of list functionality that can safely be used on nodes -/// of polymorphic types, i.e. a heterogenous list with a common base class that +/// of polymorphic types, i.e. a heterogeneous list with a common base class that /// holds the next/prev pointers. The only state of the list itself is a single /// pointer to the head of the list. /// diff --git a/include/llvm/Analysis/InlineCost.h b/include/llvm/Analysis/InlineCost.h index b08bf57ace..a0cce515e9 100644 --- a/include/llvm/Analysis/InlineCost.h +++ b/include/llvm/Analysis/InlineCost.h @@ -43,7 +43,7 @@ namespace llvm { /// InlineCost - Represent the cost of inlining a function. This /// supports special values for functions which should "always" or /// "never" be inlined. Otherwise, the cost represents a unitless - /// amount; smaller values increase the likelyhood of the function + /// amount; smaller values increase the likelihood of the function /// being inlined. class InlineCost { enum Kind { diff --git a/include/llvm/Analysis/RegionInfo.h b/include/llvm/Analysis/RegionInfo.h index 81b71f9fcb..9d8954595d 100644 --- a/include/llvm/Analysis/RegionInfo.h +++ b/include/llvm/Analysis/RegionInfo.h @@ -146,7 +146,7 @@ inline Region* RegionNode::getNodeAs<Region>() const { /// two connections to the remaining graph. It can be used to analyze or /// optimize parts of the control flow graph. /// -/// A <em> simple Region </em> is connected to the remaing graph by just two +/// A <em> simple Region </em> is connected to the remaining graph by just two /// edges. One edge entering the Region and another one leaving the Region. /// /// An <em> extended Region </em> (or just Region) is a subgraph that can be @@ -443,7 +443,7 @@ public: /// @brief Move all direct child nodes of this Region to another Region. /// - /// @param To The Region the child nodes will be transfered to. + /// @param To The Region the child nodes will be transferred to. void transferChildrenTo(Region *To); /// @brief Verify if the region is a correct region. diff --git a/include/llvm/Analysis/RegionIterator.h b/include/llvm/Analysis/RegionIterator.h index ced5b528cb..7adc71ca82 100644 --- a/include/llvm/Analysis/RegionIterator.h +++ b/include/llvm/Analysis/RegionIterator.h @@ -20,7 +20,7 @@ namespace llvm { //===----------------------------------------------------------------------===// -/// @brief Hierachical RegionNode successor iterator. +/// @brief Hierarchical RegionNode successor iterator. /// /// This iterator iterates over all successors of a RegionNode. /// diff --git a/include/llvm/Analysis/RegionPass.h b/include/llvm/Analysis/RegionPass.h index aedc06aa6c..5403e09c48 100644 --- a/include/llvm/Analysis/RegionPass.h +++ b/include/llvm/Analysis/RegionPass.h @@ -54,7 +54,7 @@ public: /// @brief Get a pass to print the LLVM IR in the region. /// /// @param O The ouput stream to print the Region. - /// @param Banner The banner to seperate different printed passes. + /// @param Banner The banner to separate different printed passes. /// /// @return The pass to print the LLVM IR in the region. Pass *createPrinterPass(raw_ostream &O, const std::string &Banner) const; diff --git a/include/llvm/Bitcode/Archive.h b/include/llvm/Bitcode/Archive.h index 4abfa6e844..f89a86cb0f 100644 --- a/include/llvm/Bitcode/Archive.h +++ b/include/llvm/Bitcode/Archive.h @@ -435,7 +435,7 @@ class Archive { /// to determine just enough information to create an ArchiveMember object /// which is then inserted into the Archive object's ilist at the location /// given by \p where. - /// @returns true if an error occured, false otherwise + /// @returns true if an error occurred, false otherwise /// @brief Add a file to the archive. bool addFileBefore( const sys::Path& filename, ///< The file to be added diff --git a/include/llvm/CodeGen/ISDOpcodes.h b/include/llvm/CodeGen/ISDOpcodes.h index 3da11c4a0e..f0de9361da 100644 --- a/include/llvm/CodeGen/ISDOpcodes.h +++ b/include/llvm/CodeGen/ISDOpcodes.h @@ -219,7 +219,7 @@ namespace ISD { // RESULT, BOOL = [SU]ADDO(LHS, RHS) - Overflow-aware nodes for addition. // These nodes take two operands: the normal LHS and RHS to the add. They // produce two results: the normal result of the add, and a boolean that - // indicates if an overflow occured (*not* a flag, because it may be stored + // indicates if an overflow occurred (*not* a flag, because it may be stored // to memory, etc.). If the type of the boolean is not i1 then the high // bits conform to getBooleanContents. // These nodes are generated from the llvm.[su]add.with.overflow intrinsics. diff --git a/include/llvm/CodeGen/JITCodeEmitter.h b/include/llvm/CodeGen/JITCodeEmitter.h index 54e70ba966..88e22d6a24 100644 --- a/include/llvm/CodeGen/JITCodeEmitter.h +++ b/include/llvm/CodeGen/JITCodeEmitter.h @@ -36,7 +36,7 @@ class GlobalValue; class Function; /// JITCodeEmitter - This class defines two sorts of methods: those for -/// emitting the actual bytes of machine code, and those for emitting auxillary +/// emitting the actual bytes of machine code, and those for emitting auxiliary /// structures, such as jump tables, relocations, etc. /// /// Emission of machine code is complicated by the fact that we don't (in diff --git a/include/llvm/CodeGen/MachineCodeEmitter.h b/include/llvm/CodeGen/MachineCodeEmitter.h index 8fc80adf7f..428aada7ba 100644 --- a/include/llvm/CodeGen/MachineCodeEmitter.h +++ b/include/llvm/CodeGen/MachineCodeEmitter.h @@ -34,7 +34,7 @@ class Function; class MCSymbol; /// MachineCodeEmitter - This class defines two sorts of methods: those for -/// emitting the actual bytes of machine code, and those for emitting auxillary +/// emitting the actual bytes of machine code, and those for emitting auxiliary /// structures, such as jump tables, relocations, etc. /// /// Emission of machine code is complicated by the fact that we don't (in @@ -54,7 +54,7 @@ protected: /// allocated for this code buffer. uint8_t *BufferBegin, *BufferEnd; /// CurBufferPtr - Pointer to the next byte of memory to fill when emitting - /// code. This is guranteed to be in the range [BufferBegin,BufferEnd]. If + /// code. This is guaranteed to be in the range [BufferBegin,BufferEnd]. If /// this pointer is at BufferEnd, it will never move due to code emission, and /// all code emission requests will be ignored (this is the buffer overflow /// condition). diff --git a/include/llvm/CodeGen/ScheduleDAG.h b/include/llvm/CodeGen/ScheduleDAG.h index 281832c607..4e0971fc2e 100644 --- a/include/llvm/CodeGen/ScheduleDAG.h +++ b/include/llvm/CodeGen/ScheduleDAG.h @@ -692,11 +692,11 @@ namespace llvm { /// will create a cycle. bool WillCreateCycle(SUnit *SU, SUnit *TargetSU); - /// AddPred - Updates the topological ordering to accomodate an edge + /// AddPred - Updates the topological ordering to accommodate an edge /// to be added from SUnit X to SUnit Y. void AddPred(SUnit *Y, SUnit *X); - /// RemovePred - Updates the topological ordering to accomodate an + /// RemovePred - Updates the topological ordering to accommodate an /// an edge to be removed from the specified node N from the predecessors /// of the current node M. void RemovePred(SUnit *M, SUnit *N); diff --git a/include/llvm/CodeGen/SelectionDAG.h b/include/llvm/CodeGen/SelectionDAG.h index b537a77a76..92fd0c9e1c 100644 --- a/include/llvm/CodeGen/SelectionDAG.h +++ b/include/llvm/CodeGen/SelectionDAG.h @@ -829,7 +829,7 @@ public: /// These functions only replace all existing uses. It's possible that as /// these replacements are being performed, CSE may cause the From node /// to be given new uses. These new uses of From are left in place, and - /// not automatically transfered to To. + /// not automatically transferred to To. /// void ReplaceAllUsesWith(SDValue From, SDValue Op, DAGUpdateListener *UpdateListener = 0); diff --git a/include/llvm/CodeGen/SelectionDAGISel.h b/include/llvm/CodeGen/SelectionDAGISel.h index 54576794de..e0cfabd095 100644 --- a/include/llvm/CodeGen/SelectionDAGISel.h +++ b/include/llvm/CodeGen/SelectionDAGISel.h @@ -258,7 +258,7 @@ public: } virtual SDValue RunSDNodeXForm(SDValue V, unsigned XFormNo) { - assert(0 && "Tblgen shoudl generate this!"); + assert(0 && "Tblgen should generate this!"); return SDValue(); } diff --git a/include/llvm/CodeGen/SelectionDAGNodes.h b/include/llvm/CodeGen/SelectionDAGNodes.h index 64546394ce..9d265f1451 100644 --- a/include/llvm/CodeGen/SelectionDAGNodes.h +++ b/include/llvm/CodeGen/SelectionDAGNodes.h @@ -838,7 +838,7 @@ public: /// HandleSDNode - This class is used to form a handle around another node that -/// is persistant and is updated across invocations of replaceAllUsesWith on its +/// is persistent and is updated across invocations of replaceAllUsesWith on its /// operand. This node should be directly created by end-users and not added to /// the AllNodes list. class HandleSDNode : public SDNode { diff --git a/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h b/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h index fba3e48c47..fceea8fe0e 100644 --- a/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h +++ b/include/llvm/CodeGen/TargetLoweringObjectFileImpl.h @@ -94,7 +94,7 @@ class TargetLoweringObjectFileMachO : public TargetLoweringObjectFile { /// const MCSection *TLSBSSSection; // Defaults to ".tbss". - /// TLSTLVSection - Section for thread local structure infomation. + /// TLSTLVSection - Section for thread local structure information. /// Contains the source code name of the variable, visibility and a pointer /// to the initial value (.tdata or .tbss). const MCSection *TLSTLVSection; // Defaults to ".tlv". diff --git a/include/llvm/ExecutionEngine/ExecutionEngine.h b/include/llvm/ExecutionEngine/ExecutionEngine.h index ef5e9ec644..a01ad3ae77 100644 --- a/include/llvm/ExecutionEngine/ExecutionEngine.h +++ b/include/llvm/ExecutionEngine/ExecutionEngine.h @@ -185,7 +185,7 @@ public: /// \param GVsWithCode - Allocating globals with code breaks /// freeMachineCodeForFunction and is probably unsafe and bad for performance. /// However, we have clients who depend on this behavior, so we must support - /// it. Eventually, when we're willing to break some backwards compatability, + /// it. Eventually, when we're willing to break some backwards compatibility, /// this flag should be flipped to false, so that by default /// freeMachineCodeForFunction works. static ExecutionEngine *create(Module *M, diff --git a/include/llvm/GlobalVariable.h b/include/llvm/GlobalVariable.h index 1769c665d0..442e0c0e1b 100644 --- a/include/llvm/GlobalVariable.h +++ b/include/llvm/GlobalVariable.h @@ -12,7 +12,7 @@ // // Global variables are constant pointers that refer to hunks of space that are // allocated by either the VM, or by the linker in a static compiler. A global -// variable may have an intial value, which is copied into the executables .data +// variable may have an initial value, which is copied into the executables .data // area. Global Constants are required to have initializers. // //===----------------------------------------------------------------------===// diff --git a/include/llvm/Instructions.h b/include/llvm/Instructions.h index f14893ad28..54dfe3957f 100644 --- a/include/llvm/Instructions.h +++ b/include/llvm/Instructions.h @@ -584,7 +584,7 @@ DEFINE_TRANSPARENT_OPERAND_ACCESSORS(GetElementPtrInst, Value) /// @brief Represent an integer comparison operator. class ICmpInst: public CmpInst { protected: - /// @brief Clone an indentical ICmpInst + /// @brief Clone an identical ICmpInst virtual ICmpInst *clone_impl() const; public: /// @brief Constructor with insert-before-instruction semantics. @@ -735,7 +735,7 @@ public: /// @brief Represents a floating point comparison operator. class FCmpInst: public CmpInst { protected: - /// @brief Clone an indentical FCmpInst + /// @brief Clone an identical FCmpInst virtual FCmpInst *clone_impl() const; public: /// @brief Constructor with insert-before-instruction semantics. diff --git a/include/llvm/MC/MCAsmLayout.h b/include/llvm/MC/MCAsmLayout.h index 01cb0006b3..a4585d1f19 100644 --- a/include/llvm/MC/MCAsmLayout.h +++ b/include/llvm/MC/MCAsmLayout.h @@ -36,8 +36,8 @@ private: /// List of sections in layout order. llvm::SmallVector<MCSectionData*, 16> SectionOrder; - /// The last fragment which was layed out, or 0 if nothing has been layed - /// out. Fragments are always layed out in order, so all fragments with a + /// The last fragment which was laid out, or 0 if nothing has been laid + /// out. Fragments are always laid out in order, so all fragments with a /// lower ordinal will be up to date. mutable DenseMap<const MCSectionData*, MCFragment *> LastValidFragment; @@ -58,7 +58,7 @@ public: void Invalidate(MCFragment *F); /// \brief Perform layout for a single fragment, assuming that the previous - /// fragment has already been layed out correctly, and the parent section has + /// fragment has already been laid out correctly, and the parent section has /// been initialized. void LayoutFragment(MCFragment *Fragment); diff --git a/include/llvm/MC/MCAssembler.h b/include/llvm/MC/MCAssembler.h index 30971c62a9..fc919669e8 100644 --- a/include/llvm/MC/MCAssembler.h +++ b/include/llvm/MC/MCAssembler.h @@ -706,7 +706,7 @@ private: /// \param DF The fragment the fixup is inside. /// \param Target [out] On return, the relocatable expression the fixup /// evaluates to. - /// \param Value [out] On return, the value of the fixup as currently layed + /// \param Value [out] On return, the value of the fixup as currently laid /// out. /// \return Whether the fixup value was fully resolved. This is true if the /// \arg Value result is fixed, otherwise the value may change due to @@ -745,7 +745,7 @@ private: MCFragment &F, const MCFixup &Fixup); public: - /// Compute the effective fragment size assuming it is layed out at the given + /// Compute the effective fragment size assuming it is laid out at the given /// \arg SectionAddress and \arg FragmentOffset. uint64_t ComputeFragmentSize(const MCAsmLayout &Layout, const MCFragment &F) const; diff --git a/include/llvm/Pass.h b/include/llvm/Pass.h index ed0fb39f5d..1de4782021 100644 --- a/include/llvm/Pass.h +++ b/include/llvm/Pass.h @@ -13,7 +13,7 @@ // Passes are designed this way so that it is possible to run passes in a cache // and organizationally optimal order without having to specify it at the front // end. This allows arbitrary passes to be strung together and have them -// executed as effeciently as possible. +// executed as efficiently as possible. // // Passes should extend one of the classes below, depending on the guarantees // that it can make about what will be modified as it is run. For example, most diff --git a/include/llvm/Support/GraphWriter.h b/include/llvm/Support/GraphWriter.h index a5165f44d5..eab0c9d18d 100644 --- a/include/llvm/Support/GraphWriter.h +++ b/include/llvm/Support/GraphWriter.h @@ -272,7 +272,7 @@ public: const void *DestNodeID, int DestNodePort, const std::string &Attrs) { if (SrcNodePort > 64) return; // Eminating from truncated part? - if (DestNodePort > 64) DestNodePort = 64; // Targetting the truncated part? + if (DestNodePort > 64) DestNodePort = 64; // Targeting the truncated part? O << "\tNode" << SrcNodeID; if (SrcNodePort >= 0) diff --git a/include/llvm/Support/PrettyStackTrace.h b/include/llvm/Support/PrettyStackTrace.h index 6dbce393b9..9b3ecda50c 100644 --- a/include/llvm/Support/PrettyStackTrace.h +++ b/include/llvm/Support/PrettyStackTrace.h @@ -20,7 +20,7 @@ namespace llvm { class raw_ostream; /// DisablePrettyStackTrace - Set this to true to disable this module. This - /// might be neccessary if the host application installs its own signal + /// might be necessary if the host application installs its own signal /// handlers which conflict with the ones installed by this module. /// Defaults to false. extern bool DisablePrettyStackTrace; diff --git a/include/llvm/Support/Program.h b/include/llvm/Support/Program.h index 78a495ef21..96b35660f9 100644 --- a/include/llvm/Support/Program.h +++ b/include/llvm/Support/Program.h @@ -102,7 +102,7 @@ namespace sys { ); /// This function terminates the program. - /// @returns true if an error occured. + /// @returns true if an error occurred. /// @see Execute /// @brief Terminates the program. bool Kill diff --git a/include/llvm/Support/Regex.h b/include/llvm/Support/Regex.h index b46a66889e..7648e77bfb 100644 --- a/include/llvm/Support/Regex.h +++ b/include/llvm/Support/Regex.h @@ -53,7 +53,7 @@ namespace llvm { /// matches - Match the regex against a given \arg String. /// - /// \param Matches - If given, on a succesful match this will be filled in + /// \param Matches - If given, on a successful match this will be filled in /// with references to the matched group expressions (inside \arg String), /// the first group is always the entire pattern. /// diff --git a/include/llvm/Support/Signals.h b/include/llvm/Support/Signals.h index 9a84df68dd..634f4cf76d 100644 --- a/include/llvm/Support/Signals.h +++ b/include/llvm/Support/Signals.h @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // // This file defines some helpful functions for dealing with the possibility of -// unix signals occuring while your program is running. +// unix signals occurring while your program is running. // //===----------------------------------------------------------------------===// diff --git a/include/llvm/Support/TimeValue.h b/include/llvm/Support/TimeValue.h index e1227118c2..94f132a05c 100644 --- a/include/llvm/Support/TimeValue.h +++ b/include/llvm/Support/TimeValue.h @@ -35,13 +35,13 @@ namespace sys { public: /// A constant TimeValue representing the smallest time - /// value permissable by the class. MinTime is some point + /// value permissible by the class. MinTime is some point /// in the distant past, about 300 billion years BCE. /// @brief The smallest possible time value. static const TimeValue MinTime; /// A constant TimeValue representing the largest time - /// value permissable by the class. MaxTime is some point + /// value permissible by the class. MaxTime is some point /// in the distant future, about 300 billion years AD. /// @brief The largest possible time value. static const TimeValue MaxTime; diff --git a/include/llvm/Target/TargetInstrInfo.h b/include/llvm/Target/TargetInstrInfo.h index c903f3153e..418f3fe062 100644 --- a/include/llvm/Target/TargetInstrInfo.h +++ b/include/llvm/Target/TargetInstrInfo.h @@ -477,7 +477,7 @@ public: } /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to - /// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should + /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should /// be scheduled togther. On some targets if two loads are loading from /// addresses in the same cache line, it's better if they are scheduled /// together. This function takes two integers that represent the load offsets diff --git a/include/llvm/Target/TargetLowering.h b/include/llvm/Target/TargetLowering.h index ac5b1f602f..7f714c98e5 100644 --- a/include/llvm/Target/TargetLowering.h +++ b/include/llvm/Target/TargetLowering.h @@ -1042,7 +1042,7 @@ protected: } /// JumpIsExpensive - Tells the code generator not to expand sequence of - /// operations into a seperate sequences that increases the amount of + /// operations into a separate sequences that increases the amount of /// flow control. void setJumpIsExpensive(bool isExpensive = true) { JumpIsExpensive = isExpensive; diff --git a/include/llvm/Transforms/Utils/Cloning.h b/include/llvm/Transforms/Utils/Cloning.h index 24ebb109a0..853de2dc03 100644 --- a/include/llvm/Transforms/Utils/Cloning.h +++ b/include/llvm/Transforms/Utils/Cloning.h @@ -207,7 +207,7 @@ public: /// /// Note that this only does one level of inlining. For example, if the /// instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now -/// exists in the instruction stream. Similiarly this will inline a recursive +/// exists in the instruction stream. Similarly this will inline a recursive /// function by one level. /// bool InlineFunction(CallInst *C, InlineFunctionInfo &IFI); diff --git a/include/llvm/TypeSymbolTable.h b/include/llvm/TypeSymbolTable.h index 9fdcb98323..89ad534ffb 100644 --- a/include/llvm/TypeSymbolTable.h +++ b/include/llvm/TypeSymbolTable.h @@ -133,7 +133,7 @@ private: /// is refined. virtual void refineAbstractType(const DerivedType *OldTy, const Type *NewTy); - /// This function markes a type as being concrete (defined). + /// This function marks a type as being concrete (defined). virtual void typeBecameConcrete(const DerivedType *AbsTy); /// @} diff --git a/lib/Analysis/BasicAliasAnalysis.cpp b/lib/Analysis/BasicAliasAnalysis.cpp index f7bcd9ec44..be8ef96a87 100644 --- a/lib/Analysis/BasicAliasAnalysis.cpp +++ b/lib/Analysis/BasicAliasAnalysis.cpp @@ -350,7 +350,7 @@ DecomposeGEPExpression(const Value *V, int64_t &BaseOffs, Scale *= IndexScale.getSExtValue(); - // If we already had an occurrance of this index variable, merge this + // If we already had an occurrence of this index variable, merge this // scale into it. For example, we want to handle: // A[x][x] -> x*16 + x*4 -> x*20 // This also ensures that 'x' only appears in the index list once. @@ -883,7 +883,7 @@ BasicAliasAnalysis::aliasGEP(const GEPOperator *GEP1, uint64_t V1Size, if (GEP1BaseOffset == 0 && GEP1VariableIndices.empty()) return MustAlias; - // If there is a difference betwen the pointers, but the difference is + // If there is a difference between the pointers, but the difference is // less than the size of the associated memory object, then we know // that the objects are partially overlapping. if (GEP1BaseOffset != 0 && GEP1VariableIndices.empty()) { diff --git a/lib/Analysis/IPA/GlobalsModRef.cpp b/lib/Analysis/IPA/GlobalsModRef.cpp index 116aaf418e..b226d66cd7 100644 --- a/lib/Analysis/IPA/GlobalsModRef.cpp +++ b/lib/Analysis/IPA/GlobalsModRef.cpp @@ -602,7 +602,7 @@ void GlobalsModRef::addEscapingUse(Use &U) { // For the purposes of this analysis, it is conservatively correct to treat // a newly escaping value equivalently to a deleted one. We could perhaps // be more precise by processing the new use and attempting to update our - // saved analysis results to accomodate it. + // saved analysis results to accommodate it. deleteValue(U); AliasAnalysis::addEscapingUse(U); diff --git a/lib/Analysis/InlineCost.cpp b/lib/Analysis/InlineCost.cpp index 47f91cfc3b..a820ecf037 100644 --- a/lib/Analysis/InlineCost.cpp +++ b/lib/Analysis/InlineCost.cpp @@ -501,7 +501,7 @@ InlineCost InlineCostAnalyzer::getInlineCost(CallSite CS, return InlineCost::getAlways(); if (CalleeFI->Metrics.usesDynamicAlloca) { - // Get infomation about the caller. + // Get information about the caller. FunctionInfo &CallerFI = CachedFunctionInfo[Caller]; // If we haven't calculated this information yet, do so now. @@ -549,7 +549,7 @@ InlineCost InlineCostAnalyzer::getSpecializationCost(Function *Callee, int Cost = 0; - // Look at the orginal size of the callee. Each instruction counts as 5. + // Look at the original size of the callee. Each instruction counts as 5. Cost += CalleeFI->Metrics.NumInsts * InlineConstants::InstrCost; // Offset that with the amount of code that can be constant-folded diff --git a/lib/Analysis/PathNumbering.cpp b/lib/Analysis/PathNumbering.cpp index 5d3f6bbc7b..f2c40763a8 100644 --- a/lib/Analysis/PathNumbering.cpp +++ b/lib/Analysis/PathNumbering.cpp @@ -286,7 +286,7 @@ void BallLarusDag::calculatePathNumbers() { BallLarusEdge* exitEdge = addEdge(node, getExit(), 0); exitEdge->setType(BallLarusEdge::SPLITEDGE_PHONY); - // Counters to handle the possibilty of a multi-graph + // Counters to handle the possibility of a multi-graph BasicBlock* oldTarget = 0; unsigned duplicateNumber = 0; diff --git a/lib/Analysis/PathProfileVerifier.cpp b/lib/Analysis/PathProfileVerifier.cpp index c549773142..0ae734e259 100644 --- a/lib/Analysis/PathProfileVerifier.cpp +++ b/lib/Analysis/PathProfileVerifier.cpp @@ -124,7 +124,7 @@ bool PathProfileVerifier::runOnModule (Module &M) { ProfilePathEdgeVector* pev = currentPath->getPathEdges(); DEBUG(dbgs () << "path #" << currentPath->getNumber() << ": " << currentPath->getCount() << "\n"); - // setup the entry edge (normally path profiling doens't care about this) + // setup the entry edge (normally path profiling doesn't care about this) if (currentPath->getFirstBlockInPath() == &F->getEntryBlock()) edgeArray[arrayMap[0][currentPath->getFirstBlockInPath()][0]] += currentPath->getCount(); diff --git a/lib/Analysis/ProfileEstimatorPass.cpp b/lib/Analysis/ProfileEstimatorPass.cpp index 667ee1cc34..b594e2ba55 100644 --- a/lib/Analysis/ProfileEstimatorPass.cpp +++ b/lib/Analysis/ProfileEstimatorPass.cpp @@ -140,7 +140,7 @@ void ProfileEstimatorPass::recurseBasicBlock(BasicBlock *BB) { // loop, thus the edge is a backedge, continue and do not check if the // value is valid. if (BBisHeader && BBLoop->contains(*bbi)) { - printEdgeError(edge, "but is backedge, continueing"); + printEdgeError(edge, "but is backedge, continuing"); continue; } // If the edges value is missing (and this is no loop header, and this is diff --git a/lib/Analysis/ProfileInfo.cpp b/lib/Analysis/ProfileInfo.cpp index 36f211e858..173de2c027 100644 --- a/lib/Analysis/ProfileInfo.cpp +++ b/lib/Analysis/ProfileInfo.cpp @@ -309,9 +309,9 @@ void ProfileInfoT<Function,BasicBlock>:: removeEdge(oldedge); } -/// Replaces all occurences of RmBB in the ProfilingInfo with DestBB. +/// Replaces all occurrences of RmBB in the ProfilingInfo with DestBB. /// This checks all edges of the function the blocks reside in and replaces the -/// occurences of RmBB with DestBB. +/// occurrences of RmBB with DestBB. template<> void ProfileInfoT<Function,BasicBlock>:: replaceAllUses(const BasicBlock *RmBB, const BasicBlock *DestBB) { @@ -812,7 +812,7 @@ void ProfileInfoT<Function,BasicBlock>::repair(const Function *F) { } if (iw < 0) continue; - // Check the recieving end of the path if it can handle the flow. + // Check the receiving end of the path if it can handle the flow. double ow = getExecutionCount(Dest); Processed.clear(); for (succ_const_iterator NBB = succ_begin(BB), End = succ_end(BB); diff --git a/lib/Analysis/ScalarEvolution.cpp b/lib/Analysis/ScalarEvolution.cpp index 228974dde5..1810ded68b 100644 --- a/lib/Analysis/ScalarEvolution.cpp +++ b/lib/Analysis/ScalarEvolution.cpp @@ -1882,7 +1882,7 @@ const SCEV *ScalarEvolution::getMulExpr(SmallVectorImpl<const SCEV *> &Ops, // outer mul and the inner addrec are guaranteed to have no overflow. // // No self-wrap cannot be guaranteed after changing the step size, but - // will be infered if either NUW or NSW is true. + // will be inferred if either NUW or NSW is true. Flags = AddRec->getNoWrapFlags(clearFlags(Flags, SCEV::FlagNW)); const SCEV *NewRec = getAddRecExpr(NewOps, AddRecLoop, Flags); diff --git a/lib/Analysis/TypeBasedAliasAnalysis.cpp b/lib/Analysis/TypeBasedAliasAnalysis.cpp index 1b4ff3c7e7..0faf1398ec 100644 --- a/lib/Analysis/TypeBasedAliasAnalysis.cpp +++ b/lib/Analysis/TypeBasedAliasAnalysis.cpp @@ -31,7 +31,7 @@ // // The second field identifies the type's parent node in the tree, or // is null or omitted for a root node. A type is considered to alias -// all of its decendents and all of its ancestors in the tree. Also, +// all of its descendants and all of its ancestors in the tree. Also, // a type is considered to alias all types in other trees, so that // bitcode produced from multiple front-ends is handled conservatively. // diff --git a/lib/Analysis/ValueTracking.cpp b/lib/Analysis/ValueTracking.cpp index a8117e6e42..8f18dd278a 100644 --- a/lib/Analysis/ValueTracking.cpp +++ b/lib/Analysis/ValueTracking.cpp @@ -1328,7 +1328,7 @@ static Value *BuildSubAggregate(Value *From, Value* To, const Type *IndexedType, break; } } - // If we succesfully found a value for each of our subaggregates + // If we successfully found a value for each of our subaggregates if (To) return To; } @@ -1757,7 +1757,7 @@ llvm::GetUnderlyingObject(Value *V, const TargetData *TD, unsigned MaxLookup) { } else { // See if InstructionSimplify knows any relevant tricks. if (Instruction *I = dyn_cast<Instruction>(V)) - // TODO: Aquire a DominatorTree and use it. + // TODO: Acquire a DominatorTree and use it. if (Value *Simplified = SimplifyInstruction(I, TD, 0)) { V = Simplified; continue; diff --git a/lib/Bitcode/Writer/ValueEnumerator.cpp b/lib/Bitcode/Writer/ValueEnumerator.cpp index 54404cd03a..21f004a7dc 100644 --- a/lib/Bitcode/Writer/ValueEnumerator.cpp +++ b/lib/Bitcode/Writer/ValueEnumerator.cpp @@ -363,7 +363,7 @@ void ValueEnumerator::EnumerateValue(const Value *V) { // Initializers for globals are handled explicitly elsewhere. } else if (isa<ConstantArray>(C) && cast<ConstantArray>(C)->isString()) { // Do not enumerate the initializers for an array of simple characters. - // The initializers just polute the value table, and we emit the strings + // The initializers just pollute the value table, and we emit the strings // specially. } else if (C->getNumOperands()) { // If a constant has operands, enumerate them. This makes sure that if a diff --git a/lib/CodeGen/AggressiveAntiDepBreaker.cpp b/lib/CodeGen/AggressiveAntiDepBreaker.cpp index b520d8fced..5c809f7fd6 100644 --- a/lib/CodeGen/AggressiveAntiDepBreaker.cpp +++ b/lib/CodeGen/AggressiveAntiDepBreaker.cpp @@ -357,7 +357,7 @@ void AggressiveAntiDepBreaker::PrescanInstruction(MachineInstr *MI, RegRefs = State->GetRegRefs(); // Handle dead defs by simulating a last-use of the register just - // after the def. A dead def can occur because the def is truely + // after the def. A dead def can occur because the def is truly // dead, or because only a subregister is live at the def. If we // don't do this the dead def will be incorrectly merged into the // previous def. diff --git a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp index 70053264c3..9363295df5 100644 --- a/lib/CodeGen/AsmPrinter/DwarfDebug.cpp +++ b/lib/CodeGen/AsmPrinter/DwarfDebug.cpp @@ -53,7 +53,7 @@ static cl::opt<bool> DisableDebugInfoPrinting("disable-debug-info-print", cl::desc("Disable debug info printing")); static cl::opt<bool> UnknownLocations("use-unknown-locations", cl::Hidden, - cl::desc("Make an absense of debug location information explicit."), + cl::desc("Make an absence of debug location information explicit."), cl::init(false)); #ifndef NDEBUG @@ -1422,7 +1422,7 @@ DwarfDebug::collectVariableInfo(const MachineFunction *MF, if (HI + 1 == HE) // If Begin is the last instruction in History then its value is valid - // until the end of the funtion. + // until the end of the function. SLabel = FunctionEndSym; else { const MachineInstr *End = HI[1]; diff --git a/lib/CodeGen/CodePlacementOpt.cpp b/lib/CodeGen/CodePlacementOpt.cpp index e37356a60c..270c337ef6 100644 --- a/lib/CodeGen/CodePlacementOpt.cpp +++ b/lib/CodeGen/CodePlacementOpt.cpp @@ -254,7 +254,7 @@ bool CodePlacementOpt::MoveDiscontiguousLoopBlocks(MachineFunction &MF, // Determine a position to move orphaned loop blocks to. If TopMBB is not // entered via fallthrough and BotMBB is exited via fallthrough, prepend them - // to the top of the loop to avoid loosing that fallthrough. Otherwise append + // to the top of the loop to avoid losing that fallthrough. Otherwise append // them to the bottom, even if it previously had a fallthrough, on the theory // that it's worth an extra branch to keep the loop contiguous. MachineFunction::iterator InsertPt = diff --git a/lib/CodeGen/ELF.h b/lib/CodeGen/ELF.h index e08feeb275..5b634682cc 100644 --- a/lib/CodeGen/ELF.h +++ b/lib/CodeGen/ELF.h @@ -173,7 +173,7 @@ namespace llvm { unsigned Offset; // sh_offset - Offset from the file start unsigned Size; // sh_size - The section size. unsigned Link; // sh_link - Section header table index link. - unsigned Info; // sh_info - Auxillary information. + unsigned Info; // sh_info - Auxiliary information. unsigned Align; // sh_addralign - Alignment of section. unsigned EntSize; // sh_entsize - Size of entries in the section e diff --git a/lib/CodeGen/ELFWriter.cpp b/lib/CodeGen/ELFWriter.cpp index b321a15add..fa2319bff7 100644 --- a/lib/CodeGen/ELFWriter.cpp +++ b/lib/CodeGen/ELFWriter.cpp @@ -77,7 +77,7 @@ ELFWriter::ELFWriter(raw_ostream &o, TargetMachine &tm) // Create the object code emitter object for this target. ElfCE = new ELFCodeEmitter(*this); - // Inital number of sections + // Initial number of sections NumSections = 0; } diff --git a/lib/CodeGen/ExpandISelPseudos.cpp b/lib/CodeGen/ExpandISelPseudos.cpp index b5ec303f5d..ebc2fc91ef 100644 --- a/lib/CodeGen/ExpandISelPseudos.cpp +++ b/lib/CodeGen/ExpandISelPseudos.cpp @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// // -// Expand Psuedo-instructions produced by ISel. These are usually to allow +// Expand Pseudo-instructions produced by ISel. These are usually to allow // the expansion to contain control flow, such as a conditional move // implemented with a conditional branch and a phi, or an atomic operation // implemented with a loop. diff --git a/lib/CodeGen/LiveDebugVariables.cpp b/lib/CodeGen/LiveDebugVariables.cpp index 333d15fbe2..8b214831d2 100644 --- a/lib/CodeGen/LiveDebugVariables.cpp +++ b/lib/CodeGen/LiveDebugVariables.cpp @@ -290,7 +290,7 @@ public: /// mapVirtReg - Map virtual register to an equivalence class. void mapVirtReg(unsigned VirtReg, UserValue *EC); - /// renameRegister - Replace all references to OldReg wiht NewReg:SubIdx. + /// renameRegister - Replace all references to OldReg with NewReg:SubIdx. void renameRegister(unsigned OldReg, unsigned NewReg, unsigned SubIdx); /// emitDebugVariables - Recreate DBG_VALUE instruction from data structures. diff --git a/lib/CodeGen/LiveIntervalAnalysis.cpp b/lib/CodeGen/LiveIntervalAnalysis.cpp index 7a8ca63ce7..19bb7e34d2 100644 --- a/lib/CodeGen/LiveIntervalAnalysis.cpp +++ b/lib/CodeGen/LiveIntervalAnalysis.cpp @@ -1507,7 +1507,7 @@ rewriteInstructionsForSpills(const LiveInterval &li, bool TrySplit, // ... // def = ... // = use - // It's better to start a new interval to avoid artifically + // It's better to start a new interval to avoid artificially // extend the new interval. if (MI->readsWritesVirtualRegister(li.reg) == std::make_pair(false,true)) { diff --git a/lib/CodeGen/PrologEpilogInserter.cpp b/lib/CodeGen/PrologEpilogInserter.cpp index 92e25e150f..f1f3c9969c 100644 --- a/lib/CodeGen/PrologEpilogInserter.cpp +++ b/lib/CodeGen/PrologEpilogInserter.cpp @@ -337,7 +337,7 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) { --BeforeI; // Restore all registers immediately before the return and any - // terminators that preceed it. + // terminators that precede it. if (!TFI->restoreCalleeSavedRegisters(*MBB, I, CSI, TRI)) { for (unsigned i = 0, e = CSI.size(); i != e; ++i) { unsigned Reg = CSI[i].getReg(); @@ -437,7 +437,7 @@ void PEI::insertCSRSpillsAndRestores(MachineFunction &Fn) { --BeforeI; // Restore all registers immediately before the return and any - // terminators that preceed it. + // terminators that precede it. for (unsigned i = 0, e = blockCSI.size(); i != e; ++i) { unsigned Reg = blockCSI[i].getReg(); const TargetRegisterClass *RC = TRI->getMinimalPhysRegClass(Reg); diff --git a/lib/CodeGen/README.txt b/lib/CodeGen/README.txt index b655dda411..7f75f65167 100644 --- a/lib/CodeGen/README.txt +++ b/lib/CodeGen/README.txt @@ -26,7 +26,7 @@ and then "merge" mul and mov: sxth r3, r3 mla r4, r3, lr, r4 -It also increase the likelyhood the store may become dead. +It also increase the likelihood the store may become dead. //===---------------------------------------------------------------------===// @@ -162,7 +162,7 @@ synthesize the various copy insertion/inspection methods in TargetInstrInfo. //===---------------------------------------------------------------------===// -Stack coloring improvments: +Stack coloring improvements: 1. Do proper LiveStackAnalysis on all stack objects including those which are not spill slots. diff --git a/lib/CodeGen/RegAllocLinearScan.cpp b/lib/CodeGen/RegAllocLinearScan.cpp index ef78949c54..b8cb5a7c92 100644 --- a/lib/CodeGen/RegAllocLinearScan.cpp +++ b/lib/CodeGen/RegAllocLinearScan.cpp @@ -792,7 +792,7 @@ void RALinScan::updateSpillWeights(std::vector<float> &Weights, // register class we are trying to allocate. Then add the weight to all // sub-registers of the super-register even if they are not aliases. // e.g. allocating for GR32, bh is not used, updating bl spill weight. - // bl should get the same spill weight otherwise it will be choosen + // bl should get the same spill weight otherwise it will be chosen // as a spill candidate since spilling bh doesn't make ebx available. for (unsigned i = 0, e = Supers.size(); i != e; ++i) { for (const unsigned *sr = tri_->getSubRegisters(Supers[i]); *sr; ++sr) diff --git a/lib/CodeGen/RenderMachineFunction.cpp b/lib/CodeGen/RenderMachineFunction.cpp index cbfd5a23d6..c8de382355 100644 --- a/lib/CodeGen/RenderMachineFunction.cpp +++ b/lib/CodeGen/RenderMachineFunction.cpp @@ -47,7 +47,7 @@ outputFileSuffix("rmf-file-suffix", static cl::opt<std::string> machineFuncsToRender("rmf-funcs", - cl::desc("Coma seperated list of functions to render" + cl::desc("Comma separated list of functions to render" ", or \"*\"."), cl::init(""), cl::Hidden); diff --git a/lib/CodeGen/ScheduleDAG.cpp b/lib/CodeGen/ScheduleDAG.cpp index 3388889c9e..1302395f42 100644 --- a/lib/CodeGen/ScheduleDAG.cpp +++ b/lib/CodeGen/ScheduleDAG.cpp @@ -472,7 +472,7 @@ void ScheduleDAGTopologicalSort::InitDAGTopologicalSorting() { #endif } -/// AddPred - Updates the topological ordering to accomodate an edge +/// AddPred - Updates the topological ordering to accommodate an edge /// to be added from SUnit X to SUnit Y. void ScheduleDAGTopologicalSort::AddPred(SUnit *Y, SUnit *X) { int UpperBound, LowerBound; @@ -490,7 +490,7 @@ void ScheduleDAGTopologicalSort::AddPred(SUnit *Y, SUnit *X) { } } -/// RemovePred - Updates the topological ordering to accomodate an +/// RemovePred - Updates the topological ordering to accommodate an /// an edge to be removed from the specified node N from the predecessors /// of the current node M. void ScheduleDAGTopologicalSort::RemovePred(SUnit *M, SUnit *N) { diff --git a/lib/CodeGen/ScheduleDAGInstrs.cpp b/lib/CodeGen/ScheduleDAGInstrs.cpp index f17023eabb..67c209ea19 100644 --- a/lib/CodeGen/ScheduleDAGInstrs.cpp +++ b/lib/CodeGen/ScheduleDAGInstrs.cpp @@ -371,7 +371,7 @@ void ScheduleDAGInstrs::BuildSchedGraph(AliasAnalysis *AA) { // will be overlapped by work done outside the current // scheduling region. Latency -= std::min(Latency, Count); - // Add the artifical edge. + // Add the artificial edge. ExitSU.addPred(SDep(SU, SDep::Order, Latency, /*Reg=*/0, /*isNormalMemory=*/false, /*isMustAlias=*/false, diff --git a/lib/CodeGen/SelectionDAG/FastISel.cpp b/lib/CodeGen/SelectionDAG/FastISel.cpp index 4952b5b575..aab23ea1c7 100644 --- a/lib/CodeGen/SelectionDAG/FastISel.cpp +++ b/lib/CodeGen/SelectionDAG/FastISel.cpp @@ -1239,7 +1239,7 @@ bool FastISel::HandlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) { // Only handle legal types. Two interesting things to note here. First, // by bailing out early, we may leave behind some dead instructions, // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its - // own moves. Second, this check is necessary becuase FastISel doesn't + // own moves. Second, this check is necessary because FastISel doesn't // use CreateRegs to create registers, so it always creates // exactly one register for each non-void instruction. EVT VT = TLI.getValueType(PN->getType(), /*AllowUnknown=*/true); diff --git a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp index b837261744..e42e4cd22e 100644 --- a/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp +++ b/lib/CodeGen/SelectionDAG/LegalizeDAG.cpp @@ -2878,7 +2878,7 @@ void SelectionDAGLegalize::ExpandNode(SDNode *Node, } case ISD::FP_ROUND_INREG: { // The only way we can lower this is to turn it into a TRUNCSTORE, - // EXTLOAD pair, targetting a temporary location (a stack slot). + // EXTLOAD pair, targeting a temporary location (a stack slot). // NOTE: there is a choice here between constantly creating new stack // slots and always reusing the same one. We currently always create diff --git a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp index b258e6eefe..94b8c2f204 100644 --- a/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp +++ b/lib/CodeGen/SelectionDAG/ScheduleDAGSDNodes.cpp @@ -617,7 +617,7 @@ namespace { }; } -/// ProcessSDDbgValues - Process SDDbgValues assoicated with this node. +/// ProcessSDDbgValues - Process SDDbgValues associated with this node. static void ProcessSDDbgValues(SDNode *N, SelectionDAG *DAG, InstrEmitter &Emitter, SmallVector<std::pair<unsigned, MachineInstr*>, 32> &Orders, diff --git a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp index 8fb881b270..4178a4a350 100644 --- a/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp +++ b/lib/CodeGen/SelectionDAG/SelectionDAGBuilder.cpp @@ -6197,7 +6197,7 @@ TargetLowering::LowerCallTo(SDValue Chain, const Type *RetTy, // For a function returning void, there is no return value. We can't create // such a node, so we just return a null return value in that case. In - // that case, nothing will actualy look at the value. + // that case, nothing will actually look at the value. if (ReturnValues.empty()) return std::make_pair(SDValue(), Chain); @@ -6413,7 +6413,7 @@ void SelectionDAGISel::LowerArguments(const BasicBlock *LLVMBB) { SDB->setValue(I, Res); // If this argument is live outside of the entry block, insert a copy from - // whereever we got it to the vreg that other BB's will reference it as. + // wherever we got it to the vreg that other BB's will reference it as. SDB->CopyToExportRegsIfNeeded(I); } } diff --git a/lib/CodeGen/SelectionDAG/TargetLowering.cpp b/lib/CodeGen/SelectionDAG/TargetLowering.cpp index 7cfa152662..843ed96cba 100644 --- a/lib/CodeGen/SelectionDAG/TargetLowering.cpp +++ b/lib/CodeGen/SelectionDAG/TargetLowering.cpp @@ -1859,12 +1859,11 @@ TargetLowering::SimplifySetCC(EVT VT, SDValue N0, SDValue N1, case ISD::SETTRUE2: return DAG.getConstant(1, VT); } - if (isa<ConstantSDNode>(N0.getNode())) { - // Ensure that the constant occurs on the RHS, and fold constant - // comparisons. + // Ensure that the constant occurs on the RHS, and fold constant + // comparisons. + if (isa<ConstantSDNode>(N0.getNode())) return DAG.getSetCC(dl, VT, N1, N0, ISD::getSetCCSwappedOperands(Cond)); - } - + if (ConstantSDNode *N1C = dyn_cast<ConstantSDNode>(N1.getNode())) { const APInt &C1 = N1C->getAPIntValue(); diff --git a/lib/CodeGen/ShrinkWrapping.cpp b/lib/CodeGen/ShrinkWrapping.cpp index 7b5bca4952..160f38f692 100644 --- a/lib/CodeGen/ShrinkWrapping.cpp +++ b/lib/CodeGen/ShrinkWrapping.cpp @@ -277,7 +277,7 @@ void PEI::calculateAnticAvail(MachineFunction &Fn) { // Initialize data flow sets. clearAnticAvailSets(); - // Calulate Antic{In,Out} and Avail{In,Out} iteratively on the MCFG. + // Calculate Antic{In,Out} and Avail{In,Out} iteratively on the MCFG. bool changed = true; unsigned iterations = 0; while (changed) { diff --git a/lib/CodeGen/StrongPHIElimination.cpp b/lib/CodeGen/StrongPHIElimination.cpp index ec7829ec39..227eb47e68 100644 --- a/lib/CodeGen/StrongPHIElimination.cpp +++ b/lib/CodeGen/StrongPHIElimination.cpp @@ -587,7 +587,7 @@ StrongPHIElimination::SplitInterferencesForBasicBlock( } // We now walk the PHIs in successor blocks and check for interferences. This - // is necesary because the use of a PHI's operands are logically contained in + // is necessary because the use of a PHI's operands are logically contained in // the predecessor block. The def of a PHI's destination register is processed // along with the other defs in a basic block. diff --git a/lib/CodeGen/VirtRegRewriter.cpp b/lib/CodeGen/VirtRegRewriter.cpp index 67be1b0842..1850658805 100644 --- a/lib/CodeGen/VirtRegRewriter.cpp +++ b/lib/CodeGen/VirtRegRewriter.cpp @@ -32,7 +32,7 @@ STATISTIC(NumCommutes, "Number of instructions commuted"); STATISTIC(NumDRM , "Number of re-materializable defs elided"); STATISTIC(NumStores , "Number of stores added"); STATISTIC(NumPSpills , "Number of physical register spills"); -STATISTIC(NumOmitted , "Number of reloads omited"); +STATISTIC(NumOmitted , "Number of reloads omitted"); STATISTIC(NumAvoided , "Number of reloads deemed unnecessary"); STATISTIC(NumCopified, "Number of available reloads turned into copies"); STATISTIC(NumReMats , "Number of re-materialization"); @@ -669,7 +669,7 @@ static void UpdateKills(MachineInstr &MI, const TargetRegisterInfo* TRI, } } -/// ReMaterialize - Re-materialize definition for Reg targetting DestReg. +/// ReMaterialize - Re-materialize definition for Reg targeting DestReg. /// static void ReMaterialize(MachineBasicBlock &MBB, MachineBasicBlock::iterator &MII, diff --git a/lib/ExecutionEngine/JIT/JIT.cpp b/lib/ExecutionEngine/JIT/JIT.cpp index 56121c1b70..d1f87acd61 100644 --- a/lib/ExecutionEngine/JIT/JIT.cpp +++ b/lib/ExecutionEngine/JIT/JIT.cpp @@ -666,7 +666,7 @@ void JIT::jitTheFunction(Function *F, const MutexGuard &locked) { } /// getPointerToFunction - This method is used to get the address of the -/// specified function, compiling it if neccesary. +/// specified function, compiling it if necessary. /// void *JIT::getPointerToFunction(Function *F) { diff --git a/lib/MC/MCDisassembler/Disassembler.cpp b/lib/MC/MCDisassembler/Disassembler.cpp index 95c7b845b4..ced57e8ca2 100644 --- a/lib/MC/MCDisassembler/Disassembler.cpp +++ b/lib/MC/MCDisassembler/Disassembler.cpp @@ -36,7 +36,7 @@ extern "C" { // disassembly is supported by passing a block of information in the DisInfo // parameter and specifing the TagType and call back functions as described in // the header llvm-c/Disassembler.h . The pointer to the block and the -// functions can all be passed as NULL. If successfull this returns a +// functions can all be passed as NULL. If successful this returns a // disassembler context if not it returns NULL. // LLVMDisasmContextRef LLVMCreateDisasm(const char *TripleName, void *DisInfo, diff --git a/lib/MC/MCExpr.cpp b/lib/MC/MCExpr.cpp index 2debe18bfb..312621509f 100644 --- a/lib/MC/MCExpr.cpp +++ b/lib/MC/MCExpr.cpp @@ -389,7 +389,7 @@ static bool EvaluateSymbolicAdd(const MCAssembler *Asm, // (LHS_A - RHS_B), // (RHS_A - LHS_B), // (RHS_A - RHS_B). - // Since we are attempting to be as aggresive as possible about folding, we + // Since we are attempting to be as aggressive as possible about folding, we // attempt to evaluate each possible alternative. AttemptToFoldSymbolOffsetDifference(Asm, Layout, Addrs, InSet, LHS_A, LHS_B, Result_Cst); diff --git a/lib/MC/MachObjectWriter.cpp b/lib/MC/MachObjectWriter.cpp index 105506a113..afa3b5c13e 100644 --- a/lib/MC/MachObjectWriter.cpp +++ b/lib/MC/MachObjectWriter.cpp @@ -440,7 +440,7 @@ public: // Compensate for the relocation offset, Darwin x86_64 relocations only // have the addend and appear to have attempted to define it to be the // actual expression addend without the PCrel bias. However, instructions - // with data following the relocation are not accomodated for (see comment + // with data following the relocation are not accommodated for (see comment // below regarding SIGNED{1,2,4}), so it isn't exactly that either. Value += 1LL << Log2Size; } @@ -541,7 +541,7 @@ public: } // x86_64 almost always uses external relocations, except when there is no - // symbol to use as a base address (a local symbol with no preceeding + // symbol to use as a base address (a local symbol with no preceding // non-local symbol). if (Base) { Index = Base->getIndex(); diff --git a/lib/Support/APFloat.cpp b/lib/Support/APFloat.cpp index 3a63258cd2..c3169acabb 100644 --- a/lib/Support/APFloat.cpp +++ b/lib/Support/APFloat.cpp @@ -3564,7 +3564,7 @@ void APFloat::toString(SmallVectorImpl<char> &Str, } bool APFloat::getExactInverse(APFloat *inv) const { - // We can only guarantee the existance of an exact inverse for IEEE floats. + // We can only guarantee the existence of an exact inverse for IEEE floats. if (semantics != &IEEEhalf && semantics != &IEEEsingle && semantics != &IEEEdouble && semantics != &IEEEquad) return false; diff --git a/lib/Support/APInt.cpp b/lib/Support/APInt.cpp index 5789721c3a..23a22ac68f 100644 --- a/lib/Support/APInt.cpp +++ b/lib/Support/APInt.cpp @@ -1518,7 +1518,7 @@ APInt::ms APInt::magic() const { /// Requires that the divisor not be 0. Taken from "Hacker's Delight", Henry /// S. Warren, Jr., chapter 10. /// LeadingZeros can be used to simplify the calculation if the upper bits -/// of the devided value are known zero. +/// of the divided value are known zero. APInt::mu APInt::magicu(unsigned LeadingZeros) const { const APInt& d = *this; unsigned p; diff --git a/lib/Support/FileUtilities.cpp b/lib/Support/FileUtilities.cpp index 5dbabee7a7..4c8c0c63ff 100644 --- a/lib/Support/FileUtilities.cpp +++ b/lib/Support/FileUtilities.cpp @@ -198,7 +198,7 @@ int llvm::DiffFilesWithTolerance(const sys::PathWithStatus &FileA, return 1; } - // Now its safe to mmap the files into memory becasue both files + // Now its safe to mmap the files into memory because both files // have a non-zero size. error_code ec; OwningPtr<MemoryBuffer> F1; diff --git a/lib/Support/PrettyStackTrace.cpp b/lib/Support/PrettyStackTrace.cpp index a9f4709e4b..082b7012eb 100644 --- a/lib/Support/PrettyStackTrace.cpp +++ b/lib/Support/PrettyStackTrace.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // // This file defines some helpful functions for dealing with the possibility of -// Unix signals occuring while your program is running. +// Unix signals occurring while your program is running. // //===----------------------------------------------------------------------===// diff --git a/lib/Support/Signals.cpp b/lib/Support/Signals.cpp index a3af37d5fe..a11789372d 100644 --- a/lib/Support/Signals.cpp +++ b/lib/Support/Signals.cpp @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // // This file defines some helpful functions for dealing with the possibility of -// Unix signals occuring while your program is running. +// Unix signals occurring while your program is running. // //===----------------------------------------------------------------------===// diff --git a/lib/Support/StringRef.cpp b/lib/Support/StringRef.cpp index 5398051964..8c3fc094cd 100644 --- a/lib/Support/StringRef.cpp +++ b/lib/Support/StringRef.cpp @@ -131,7 +131,7 @@ unsigned StringRef::edit_distance(llvm::StringRef Other, /// find - Search for the first string \arg Str in the string. /// -/// \return - The index of the first occurence of \arg Str, or npos if not +/// \return - The index of the first occurrence of \arg Str, or npos if not /// found. size_t StringRef::find(StringRef Str, size_t From) const { size_t N = Str.size(); @@ -145,7 +145,7 @@ size_t StringRef::find(StringRef Str, size_t From) const { /// rfind - Search for the last string \arg Str in the string. /// -/// \return - The index of the last occurence of \arg Str, or npos if not +/// \return - The index of the last occurrence of \arg Str, or npos if not /// found. size_t StringRef::rfind(StringRef Str) const { size_t N = Str.size(); diff --git a/lib/Support/Unix/Program.inc b/lib/Support/Unix/Program.inc index 6efc8cd5e4..9f0a9ef052 100644 --- a/lib/Support/Unix/Program.inc +++ b/lib/Support/Unix/Program.inc @@ -236,7 +236,7 @@ Program::Execute(const Path &path, const char **args, const char **envp, // Create a child process. int child = fork(); switch (child) { - // An error occured: Return to the caller. + // An error occurred: Return to the caller. case -1: MakeErrMsg(ErrMsg, "Couldn't fork"); return false; diff --git a/lib/Support/Unix/Signals.inc b/lib/Support/Unix/Signals.inc index 0a61759155..fade9069ba 100644 --- a/lib/Support/Unix/Signals.inc +++ b/lib/Support/Unix/Signals.inc @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// // // This file defines some helpful functions for dealing with the possibility of -// Unix signals occuring while your program is running. +// Unix signals occurring while your program is running. // //===----------------------------------------------------------------------===// diff --git a/lib/Target/ARM/ARMAsmPrinter.cpp b/lib/Target/ARM/ARMAsmPrinter.cpp index 8eb1993a15..4d66f852d0 100644 --- a/lib/Target/ARM/ARMAsmPrinter.cpp +++ b/lib/Target/ARM/ARMAsmPrinter.cpp @@ -1708,7 +1708,7 @@ void ARMAsmPrinter::EmitInstruction(const MachineInstr *MI) { return; } // Tail jump branches are really just branch instructions with additional - // code-gen attributes. Convert them to the cannonical form here. + // code-gen attributes. Convert them to the canonical form here. case ARM::TAILJMPd: case ARM::TAILJMPdND: { MCInst TmpInst, TmpInst2; diff --git a/lib/Target/ARM/ARMBaseInstrInfo.cpp b/lib/Target/ARM/ARMBaseInstrInfo.cpp index fdc5b9eea4..af51fe7989 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.cpp +++ b/lib/Target/ARM/ARMBaseInstrInfo.cpp @@ -1201,7 +1201,7 @@ bool ARMBaseInstrInfo::areLoadsFromSameBasePtr(SDNode *Load1, SDNode *Load2, } /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to -/// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should +/// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should /// be scheduled togther. On some targets if two loads are loading from /// addresses in the same cache line, it's better if they are scheduled /// together. This function takes two integers that represent the load offsets diff --git a/lib/Target/ARM/ARMBaseInstrInfo.h b/lib/Target/ARM/ARMBaseInstrInfo.h index 517328b2a9..9a2faf8f9a 100644 --- a/lib/Target/ARM/ARMBaseInstrInfo.h +++ b/lib/Target/ARM/ARMBaseInstrInfo.h @@ -291,7 +291,7 @@ public: int64_t &Offset1, int64_t &Offset2)const; /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to - /// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should + /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should /// be scheduled togther. On some targets if two loads are loading from /// addresses in the same cache line, it's better if they are scheduled /// together. This function takes two integers that represent the load offsets diff --git a/lib/Target/ARM/ARMBaseRegisterInfo.cpp b/lib/Target/ARM/ARMBaseRegisterInfo.cpp index 1918fd9585..3d1eaf0891 100644 --- a/lib/Target/ARM/ARMBaseRegisterInfo.cpp +++ b/lib/Target/ARM/ARMBaseRegisterInfo.cpp @@ -88,7 +88,7 @@ BitVector ARMBaseRegisterInfo:: getReservedRegs(const MachineFunction &MF) const { const TargetFrameLowering *TFI = MF.getTarget().getFrameLowering(); - // FIXME: avoid re-calculating this everytime. + // FIXME: avoid re-calculating this every time. BitVector Reserved(getNumRegs()); Reserved.set(ARM::SP); Reserved.set(ARM::PC); diff --git a/lib/Target/ARM/ARMISelLowering.cpp b/lib/Target/ARM/ARMISelLowering.cpp index ded62eb383..62d5b16a9d 100644 --- a/lib/Target/ARM/ARMISelLowering.cpp +++ b/lib/Target/ARM/ARMISelLowering.cpp @@ -725,7 +725,7 @@ ARMTargetLowering::ARMTargetLowering(TargetMachine &TM) // pressure of the register class's representative and all of it's super // classes' representatives transitively. We have not implemented this because // of the difficulty prior to coalescing of modeling operand register classes -// due to the common occurence of cross class copies and subregister insertions +// due to the common occurrence of cross class copies and subregister insertions // and extractions. std::pair<const TargetRegisterClass*, uint8_t> ARMTargetLowering::findRepresentativeClass(EVT VT) const{ @@ -1323,7 +1323,7 @@ ARMTargetLowering::LowerCall(SDValue Chain, SDValue Callee, // than necessary, because it means that each store effectively depends // on every argument instead of just those arguments it would clobber. - // Do not flag preceeding copytoreg stuff together with the following stuff. + // Do not flag preceding copytoreg stuff together with the following stuff. InFlag = SDValue(); for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, diff --git a/lib/Target/ARM/ARMInstrInfo.td b/lib/Target/ARM/ARMInstrInfo.td index eb8f4aad4c..b2961f8254 100644 --- a/lib/Target/ARM/ARMInstrInfo.td +++ b/lib/Target/ARM/ARMInstrInfo.td @@ -3690,7 +3690,7 @@ let isCall = 1, // here, and we're using the stack frame for the containing function to // save/restore registers, we can't keep anything live in regs across // the eh_sjlj_setjmp(), else it will almost certainly have been tromped upon -// when we get here from a longjmp(). We force everthing out of registers +// when we get here from a longjmp(). We force everything out of registers // except for our own input by listing the relevant registers in Defs. By // doing so, we also cause the prologue/epilogue code to actively preserve // all of the callee-saved resgisters, which is exactly what we want. diff --git a/lib/Target/ARM/ARMInstrThumb.td b/lib/Target/ARM/ARMInstrThumb.td index 736b56aad7..9c88c10315 100644 --- a/lib/Target/ARM/ARMInstrThumb.td +++ b/lib/Target/ARM/ARMInstrThumb.td @@ -1439,7 +1439,7 @@ def tTPsoft : TIx2<0b11110, 0b11, 1, (outs), (ins), IIC_Br, // from some other function to get here, and we're using the stack frame for the // containing function to save/restore registers, we can't keep anything live in // regs across the eh_sjlj_setjmp(), else it will almost certainly have been -// tromped upon when we get here from a longjmp(). We force everthing out of +// tromped upon when we get here from a longjmp(). We force everything out of // registers except for our own input by listing the relevant registers in // Defs. By doing so, we also cause the prologue/epilogue code to actively // preserve all of the callee-saved resgisters, which is exactly what we want. diff --git a/lib/Target/ARM/ARMInstrThumb2.td b/lib/Target/ARM/ARMInstrThumb2.td index 685a9c31b2..50065f9f1a 100644 --- a/lib/Target/ARM/ARMInstrThumb2.td +++ b/lib/Target/ARM/ARMInstrThumb2.td @@ -2965,7 +2965,7 @@ let isCall = 1, // here, and we're using the stack frame for the containing function to // save/restore registers, we can't keep anything live in regs across // the eh_sjlj_setjmp(), else it will almost certainly have been tromped upon -// when we get here from a longjmp(). We force everthing out of registers +// when we get here from a longjmp(). We force everything out of registers // except for our own input by listing the relevant registers in Defs. By // doing so, we also cause the prologue/epilogue code to actively preserve // all of the callee-saved resgisters, which is exactly what we want. diff --git a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp index ac5cbfef30..334b50fd8a 100644 --- a/lib/Target/ARM/ARMLoadStoreOptimizer.cpp +++ b/lib/Target/ARM/ARMLoadStoreOptimizer.cpp @@ -1287,14 +1287,14 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) { MergeLDR_STR(MBB, 0, CurrBase, CurrOpc, CurrSize, CurrPred, CurrPredReg, Scratch, MemOps, Merges); - // Try folding preceeding/trailing base inc/dec into the generated + // Try folding preceding/trailing base inc/dec into the generated // LDM/STM ops. for (unsigned i = 0, e = Merges.size(); i < e; ++i) if (MergeBaseUpdateLSMultiple(MBB, Merges[i], Advance, MBBI)) ++NumMerges; NumMerges += Merges.size(); - // Try folding preceeding/trailing base inc/dec into those load/store + // Try folding preceding/trailing base inc/dec into those load/store // that were not merged to form LDM/STM ops. for (unsigned i = 0; i != NumMemOps; ++i) if (!MemOps[i].Merged) @@ -1304,7 +1304,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) { // RS may be pointing to an instruction that's deleted. RS->skipTo(prior(MBBI)); } else if (NumMemOps == 1) { - // Try folding preceeding/trailing base inc/dec into the single + // Try folding preceding/trailing base inc/dec into the single // load/store. if (MergeBaseUpdateLoadStore(MBB, MemOps[0].MBBI, TII, Advance, MBBI)) { ++NumMerges; @@ -1334,7 +1334,7 @@ bool ARMLoadStoreOpt::LoadStoreMultipleOpti(MachineBasicBlock &MBB) { } /// MergeReturnIntoLDM - If this is a exit BB, try merging the return ops -/// ("bx lr" and "mov pc, lr") into the preceeding stack restore so it +/// ("bx lr" and "mov pc, lr") into the preceding stack restore so it /// directly restore the value of LR into pc. /// ldmfd sp!, {..., lr} /// bx lr diff --git a/lib/Target/ARM/ARMSelectionDAGInfo.cpp b/lib/Target/ARM/ARMSelectionDAGInfo.cpp index 2b9202bff0..aa1e398c0e 100644 --- a/lib/Target/ARM/ARMSelectionDAGInfo.cpp +++ b/lib/Target/ARM/ARMSelectionDAGInfo.cpp @@ -35,7 +35,7 @@ ARMSelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, // This requires 4-byte alignment. if ((Align & 3) != 0) return SDValue(); - // This requires the copy size to be a constant, preferrably + // This requires the copy size to be a constant, preferably // within a subtarget-specific limit. ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); if (!ConstantSize) diff --git a/lib/Target/Alpha/Alpha.td b/lib/Target/Alpha/Alpha.td index 4508eda897..ae79c2e4b7 100644 --- a/lib/Target/Alpha/Alpha.td +++ b/lib/Target/Alpha/Alpha.td @@ -21,7 +21,7 @@ include "llvm/Target/Target.td" //===----------------------------------------------------------------------===// def FeatureCIX : SubtargetFeature<"cix", "HasCT", "true", - "Enable CIX extentions">; + "Enable CIX extensions">; //===----------------------------------------------------------------------===// // Register File Description diff --git a/lib/Target/Alpha/AlphaISelLowering.cpp b/lib/Target/Alpha/AlphaISelLowering.cpp index c4f43ab9e4..ee404f06fc 100644 --- a/lib/Target/Alpha/AlphaISelLowering.cpp +++ b/lib/Target/Alpha/AlphaISelLowering.cpp @@ -296,7 +296,7 @@ AlphaTargetLowering::LowerCall(SDValue Chain, SDValue Callee, // Build a sequence of copy-to-reg nodes chained together with token chain and // flag operands which copy the outgoing args into registers. The InFlag in - // necessary since all emited instructions must be stuck together. + // necessary since all emitted instructions must be stuck together. SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, diff --git a/lib/Target/Alpha/AlphaInstrInfo.td b/lib/Target/Alpha/AlphaInstrInfo.td index 099d7157ca..b20171224e 100644 --- a/lib/Target/Alpha/AlphaInstrInfo.td +++ b/lib/Target/Alpha/AlphaInstrInfo.td @@ -1030,7 +1030,7 @@ def : Pat<(brcond (setune F8RC:$RA, immFPZ), bb:$DISP), //WMB Mfc 18.4400 Write memory barrier //MF_FPCR F-P 17.025 Move from FPCR //MT_FPCR F-P 17.024 Move to FPCR -//There are in the Multimedia extentions, so let's not use them yet +//There are in the Multimedia extensions, so let's not use them yet //def MAXSB8 : OForm<0x1C, 0x3E, "MAXSB8 $RA,$RB,$RC">; //Vector signed byte maximum //def MAXSW4 : OForm< 0x1C, 0x3F, "MAXSW4 $RA,$RB,$RC">; //Vector signed word maximum //def MAXUB8 : OForm<0x1C, 0x3C, "MAXUB8 $RA,$RB,$RC">; //Vector unsigned byte maximum diff --git a/lib/Target/Alpha/README.txt b/lib/Target/Alpha/README.txt index 9ae15174c5..cc170e3130 100644 --- a/lib/Target/Alpha/README.txt +++ b/lib/Target/Alpha/README.txt @@ -33,9 +33,9 @@ add crazy vector instructions (MVI): (MIN|MAX)(U|S)(B8|W4) min and max, signed and unsigned, byte and word PKWB, UNPKBW pack/unpack word to byte PKLB UNPKBL pack/unpack long to byte -PERR pixel error (sum accross bytes of bytewise abs(i8v8 a - i8v8 b)) +PERR pixel error (sum across bytes of bytewise abs(i8v8 a - i8v8 b)) -cmpbytes bytewise cmpeq of i8v8 a and i8v8 b (not part of MVI extentions) +cmpbytes bytewise cmpeq of i8v8 a and i8v8 b (not part of MVI extensions) this has some good examples for other operations that can be synthesised well from these rather meager vector ops (such as saturating add). diff --git a/lib/Target/Blackfin/BlackfinISelLowering.cpp b/lib/Target/Blackfin/BlackfinISelLowering.cpp index 7c80eec3ba..1e1f8c9dc2 100644 --- a/lib/Target/Blackfin/BlackfinISelLowering.cpp +++ b/lib/Target/Blackfin/BlackfinISelLowering.cpp @@ -345,7 +345,7 @@ BlackfinTargetLowering::LowerCall(SDValue Chain, SDValue Callee, // Build a sequence of copy-to-reg nodes chained together with token // chain and flag operands which copy the outgoing args into registers. - // The InFlag in necessary since all emited instructions must be + // The InFlag in necessary since all emitted instructions must be // stuck together. SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { diff --git a/lib/Target/CellSPU/SPU64InstrInfo.td b/lib/Target/CellSPU/SPU64InstrInfo.td index 5ef5716bd8..f340edfb0f 100644 --- a/lib/Target/CellSPU/SPU64InstrInfo.td +++ b/lib/Target/CellSPU/SPU64InstrInfo.td @@ -24,7 +24,7 @@ // 5. The code sequences for r64 and v2i64 are probably overly conservative, // compared to the code that gcc produces. // -// M00$E B!tes Kan be Pretty N@sTi!!!!! (appologies to Monty!) +// M00$E B!tes Kan be Pretty N@sTi!!!!! (apologies to Monty!) //-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~-~ // selb instruction definition for i64. Note that the selection mask is diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp index 743a4d7a0f..8668da3ca2 100644 --- a/lib/Target/CellSPU/SPUISelLowering.cpp +++ b/lib/Target/CellSPU/SPUISelLowering.cpp @@ -705,7 +705,7 @@ LowerLOAD(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) { offset )); - // Shift the low similarily + // Shift the low similarly // TODO: add SPUISD::SHL_BYTES low = DAG.getNode(SPUISD::SHL_BYTES, dl, MVT::i128, low, offset ); diff --git a/lib/Target/MBlaze/MBlazeISelLowering.cpp b/lib/Target/MBlaze/MBlazeISelLowering.cpp index f39826b1cf..585469b315 100644 --- a/lib/Target/MBlaze/MBlazeISelLowering.cpp +++ b/lib/Target/MBlaze/MBlazeISelLowering.cpp @@ -274,7 +274,7 @@ MBlazeTargetLowering::EmitCustomShift(MachineInstr *MI, F->insert(It, loop); F->insert(It, finish); - // Update machine-CFG edges by transfering adding all successors and + // Update machine-CFG edges by transferring adding all successors and // remaining instructions from the current block to the new block which // will contain the Phi node for the select. finish->splice(finish->begin(), MBB, @@ -456,7 +456,7 @@ MBlazeTargetLowering::EmitCustomAtomic(MachineInstr *MI, F->insert(It, start); F->insert(It, exit); - // Update machine-CFG edges by transfering adding all successors and + // Update machine-CFG edges by transferring adding all successors and // remaining instructions from the current block to the new block which // will contain the Phi node for the select. exit->splice(exit->begin(), MBB, llvm::next(MachineBasicBlock::iterator(MI)), @@ -778,7 +778,7 @@ LowerCall(SDValue Chain, SDValue Callee, CallingConv::ID CallConv, // Build a sequence of copy-to-reg nodes chained together with token // chain and flag operands which copy the outgoing args into registers. - // The InFlag in necessary since all emited instructions must be + // The InFlag in necessary since all emitted instructions must be // stuck together. SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { diff --git a/lib/Target/MBlaze/MBlazeSchedule3.td b/lib/Target/MBlaze/MBlazeSchedule3.td index 29848517b0..ccbf99dbd3 100644 --- a/lib/Target/MBlaze/MBlazeSchedule3.td +++ b/lib/Target/MBlaze/MBlazeSchedule3.td @@ -8,7 +8,7 @@ //===----------------------------------------------------------------------===// //===----------------------------------------------------------------------===// -// MBlaze instruction itineraries for the threee stage pipeline. +// MBlaze instruction itineraries for the three stage pipeline. //===----------------------------------------------------------------------===// def MBlazePipe3Itineraries : ProcessorItineraries< [IF,ID,EX], [], [ diff --git a/lib/Target/MSP430/MSP430ISelLowering.cpp b/lib/Target/MSP430/MSP430ISelLowering.cpp index a95d59c057..006785b1f7 100644 --- a/lib/Target/MSP430/MSP430ISelLowering.cpp +++ b/lib/Target/MSP430/MSP430ISelLowering.cpp @@ -515,7 +515,7 @@ MSP430TargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, // Build a sequence of copy-to-reg nodes chained together with token chain and // flag operands which copy the outgoing args into registers. The InFlag in - // necessary since all emited instructions must be stuck together. + // necessary since all emitted instructions must be stuck together. SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, diff --git a/lib/Target/Mips/Mips.td b/lib/Target/Mips/Mips.td index 5102c699f0..b79016d788 100644 --- a/lib/Target/Mips/Mips.td +++ b/lib/Target/Mips/Mips.td @@ -81,7 +81,7 @@ def : Proc<"r6000", [FeatureMips2]>; def : Proc<"4ke", [FeatureMips32r2]>; -// Allegrex is a 32bit subset of r4000, both for interger and fp registers, +// Allegrex is a 32bit subset of r4000, both for integer and fp registers, // but much more similar to Mips2 than Mips3. It also contains some of // Mips32/Mips32r2 instructions and a custom vector fpu processor. def : Proc<"allegrex", [FeatureMips2, FeatureSingleFloat, FeatureEABI, diff --git a/lib/Target/Mips/MipsFrameLowering.cpp b/lib/Target/Mips/MipsFrameLowering.cpp index 5e4a7da1f1..66a3588a97 100644 --- a/lib/Target/Mips/MipsFrameLowering.cpp +++ b/lib/Target/Mips/MipsFrameLowering.cpp @@ -285,7 +285,7 @@ void MipsFrameLowering::emitPrologue(MachineFunction &MF) const { if (ATUsed) BuildMI(MBB, MBBI, dl, TII.get(Mips::ATMACRO)); - // Save the return address only if the function isnt a leaf one. + // Save the return address only if the function isn't a leaf one. // sw $ra, stack_loc($sp) if (MFI->adjustsStack()) { ATUsed = expandRegLargeImmPair(Mips::SP, RAOffset, NewReg, NewImm, MBB, @@ -360,7 +360,7 @@ void MipsFrameLowering::emitEpilogue(MachineFunction &MF, BuildMI(MBB, MBBI, dl, TII.get(Mips::ATMACRO)); } - // Restore the return address only if the function isnt a leaf one. + // Restore the return address only if the function isn't a leaf one. // lw $ra, stack_loc($sp) if (MFI->adjustsStack()) { ATUsed = expandRegLargeImmPair(Mips::SP, RAOffset, NewReg, NewImm, MBB, diff --git a/lib/Target/Mips/MipsISelLowering.cpp b/lib/Target/Mips/MipsISelLowering.cpp index 0e193f2953..7baaa0f4d0 100644 --- a/lib/Target/Mips/MipsISelLowering.cpp +++ b/lib/Target/Mips/MipsISelLowering.cpp @@ -723,7 +723,7 @@ LowerBRCOND(SDValue Op, SelectionDAG &DAG) const SDValue CondRes = CreateFPCmp(DAG, Op.getOperand(1)); - // Return if flag is not set by a floating point comparision. + // Return if flag is not set by a floating point comparison. if (CondRes.getOpcode() != MipsISD::FPCmp) return Op; @@ -741,7 +741,7 @@ LowerSELECT(SDValue Op, SelectionDAG &DAG) const { SDValue Cond = CreateFPCmp(DAG, Op.getOperand(0)); - // Return if flag is not set by a floating point comparision. + // Return if flag is not set by a floating point comparison. if (Cond.getOpcode() != MipsISD::FPCmp) return Op; @@ -867,7 +867,7 @@ LowerConstantPool(SDValue Op, SelectionDAG &DAG) const // gp_rel relocation // FIXME: we should reference the constant pool using small data sections, - // but the asm printer currently doens't support this feature without + // but the asm printer currently doesn't support this feature without // hacking it. This feature should come soon so we can uncomment the // stuff below. //if (IsInSmallSection(C->getType())) { @@ -1189,7 +1189,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee, // Build a sequence of copy-to-reg nodes chained together with token // chain and flag operands which copy the outgoing args into registers. - // The InFlag in necessary since all emited instructions must be + // The InFlag in necessary since all emitted instructions must be // stuck together. SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { @@ -1272,7 +1272,7 @@ MipsTargetLowering::LowerCall(SDValue Chain, SDValue Callee, // Create a stack location to hold GP when PIC is used. This stack // location is used on function prologue to save GP and also after all - // emited CALL's to restore GP. + // emitted CALL's to restore GP. if (IsPIC) { // Function can have an arbitrary number of calls, so // hold the LastArgStackLoc with the biggest offset. diff --git a/lib/Target/Mips/MipsInstrFPU.td b/lib/Target/Mips/MipsInstrFPU.td index 251f377769..995b6cd071 100644 --- a/lib/Target/Mips/MipsInstrFPU.td +++ b/lib/Target/Mips/MipsInstrFPU.td @@ -187,7 +187,7 @@ let Predicates = [IsNotSingleFloat, IsNotMipsI] in { "sdc1 $ft, $addr", [(store AFGR64:$ft, addr:$addr)]>; } -// LWC1 and SWC1 can always be emited with odd registers. +// LWC1 and SWC1 can always be emitted with odd registers. def LWC1 : FFI<0b110001, (outs FGR32:$ft), (ins mem:$addr), "lwc1 $ft, $addr", [(set FGR32:$ft, (load addr:$addr))]>; def SWC1 : FFI<0b111001, (outs), (ins FGR32:$ft, mem:$addr), "swc1 $ft, $addr", diff --git a/lib/Target/PowerPC/PPCISelLowering.cpp b/lib/Target/PowerPC/PPCISelLowering.cpp index 46b97e1334..5ed7600a15 100644 --- a/lib/Target/PowerPC/PPCISelLowering.cpp +++ b/lib/Target/PowerPC/PPCISelLowering.cpp @@ -2155,7 +2155,7 @@ CalculateParameterAndLinkageAreaSize(SelectionDAG &DAG, } /// CalculateTailCallSPDiff - Get the amount the stack pointer has to be -/// adjusted to accomodate the arguments for the tailcall. +/// adjusted to accommodate the arguments for the tailcall. static int CalculateTailCallSPDiff(SelectionDAG& DAG, bool isTailCall, unsigned ParamSize) { @@ -2396,7 +2396,7 @@ void PrepareTailCall(SelectionDAG &DAG, SDValue &InFlag, SDValue &Chain, // Emit a sequence of copyto/copyfrom virtual registers for arguments that // might overwrite each other in case of tail call optimization. SmallVector<SDValue, 8> MemOpChains2; - // Do not flag preceeding copytoreg stuff together with the following stuff. + // Do not flag preceding copytoreg stuff together with the following stuff. InFlag = SDValue(); StoreTailCallArgumentsToStackSlot(DAG, Chain, TailCallArguments, MemOpChains2, dl); diff --git a/lib/Target/Sparc/SparcISelLowering.cpp b/lib/Target/Sparc/SparcISelLowering.cpp index 70574c370f..edb62fa0c6 100644 --- a/lib/Target/Sparc/SparcISelLowering.cpp +++ b/lib/Target/Sparc/SparcISelLowering.cpp @@ -544,7 +544,7 @@ SparcTargetLowering::LowerCall(SDValue Chain, SDValue Callee, // Build a sequence of copy-to-reg nodes chained together with token // chain and flag operands which copy the outgoing args into registers. - // The InFlag in necessary since all emited instructions must be + // The InFlag in necessary since all emitted instructions must be // stuck together. SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { diff --git a/lib/Target/SystemZ/SystemZISelLowering.cpp b/lib/Target/SystemZ/SystemZISelLowering.cpp index 90939c3120..d331614400 100644 --- a/lib/Target/SystemZ/SystemZISelLowering.cpp +++ b/lib/Target/SystemZ/SystemZISelLowering.cpp @@ -451,7 +451,7 @@ SystemZTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, // Build a sequence of copy-to-reg nodes chained together with token chain and // flag operands which copy the outgoing args into registers. The InFlag in - // necessary since all emited instructions must be stuck together. + // necessary since all emitted instructions must be stuck together. SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, diff --git a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h index d4a88d765d..a9c90f8f9b 100644 --- a/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h +++ b/lib/Target/X86/Disassembler/X86DisassemblerDecoder.h @@ -485,7 +485,7 @@ struct InternalInstruction { consumed___ indicates that the byte was already consumed and does not need to be consumed again */ - /* The VEX.vvvv field, which contains a thrid register operand for some AVX + /* The VEX.vvvv field, which contains a third register operand for some AVX instructions */ Reg vvvv; diff --git a/lib/Target/X86/README-X86-64.txt b/lib/Target/X86/README-X86-64.txt index e21d69a7bc..e7429a3081 100644 --- a/lib/Target/X86/README-X86-64.txt +++ b/lib/Target/X86/README-X86-64.txt @@ -36,7 +36,7 @@ _conv: cmovb %rcx, %rax ret -Seems like the jb branch has high likelyhood of being taken. It would have +Seems like the jb branch has high likelihood of being taken. It would have saved a few instructions. //===---------------------------------------------------------------------===// diff --git a/lib/Target/X86/README.txt b/lib/Target/X86/README.txt index 07722f44ec..94cf25b44f 100644 --- a/lib/Target/X86/README.txt +++ b/lib/Target/X86/README.txt @@ -1572,7 +1572,7 @@ Implement processor-specific optimizations for parity with GCC on these processors. GCC does two optimizations: 1. ix86_pad_returns inserts a noop before ret instructions if immediately - preceeded by a conditional branch or is the target of a jump. + preceded by a conditional branch or is the target of a jump. 2. ix86_avoid_jump_misspredicts inserts noops in cases where a 16-byte block of code contains more than 3 branches. diff --git a/lib/Target/X86/X86.td b/lib/Target/X86/X86.td index 912dff0f1d..25b8d3ea1d 100644 --- a/lib/Target/X86/X86.td +++ b/lib/Target/X86/X86.td @@ -7,7 +7,7 @@ // //===----------------------------------------------------------------------===// // -// This is a target description file for the Intel i386 architecture, refered to +// This is a target description file for the Intel i386 architecture, referred to // here as the "X86" architecture. // //===----------------------------------------------------------------------===// diff --git a/lib/Target/X86/X86FloatingPoint.cpp b/lib/Target/X86/X86FloatingPoint.cpp index 3aaa693279..325d061181 100644 --- a/lib/Target/X86/X86FloatingPoint.cpp +++ b/lib/Target/X86/X86FloatingPoint.cpp @@ -1307,7 +1307,7 @@ void FPS::handleSpecialFP(MachineBasicBlock::iterator &I) { // set up by FpSET_ST0, and our StackTop is off by one because of it. unsigned Op0 = getFPReg(MI->getOperand(0)); // Restore the actual StackTop from before Fp_SET_ST0. - // Note we can't handle Fp_SET_ST1 without a preceeding Fp_SET_ST0, and we + // Note we can't handle Fp_SET_ST1 without a preceding Fp_SET_ST0, and we // are not enforcing the constraint. ++StackTop; unsigned RegOnTop = getStackEntry(0); // This reg must remain in st(0). diff --git a/lib/Target/X86/X86FrameLowering.cpp b/lib/Target/X86/X86FrameLowering.cpp index 071fbe0cef..dee27a0336 100644 --- a/lib/Target/X86/X86FrameLowering.cpp +++ b/lib/Target/X86/X86FrameLowering.cpp @@ -296,7 +296,7 @@ void X86FrameLowering::emitCalleeSavedFrameMoves(MachineFunction &MF, // FIXME: This is dirty hack. The code itself is pretty mess right now. // It should be rewritten from scratch and generalized sometimes. - // Determine maximum offset (minumum due to stack growth). + // Determine maximum offset (minimum due to stack growth). int64_t MaxOffset = 0; for (std::vector<CalleeSavedInfo>::const_iterator I = CSI.begin(), E = CSI.end(); I != E; ++I) @@ -785,7 +785,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, assert(Offset >= 0 && "Offset should never be negative"); if (Offset) { - // Check for possible merge with preceeding ADD instruction. + // Check for possible merge with preceding ADD instruction. Offset += mergeSPUpdates(MBB, MBBI, StackPtr, true); emitSPUpdate(MBB, MBBI, StackPtr, Offset, Is64Bit, TII, *RegInfo); } @@ -829,7 +829,7 @@ void X86FrameLowering::emitEpilogue(MachineFunction &MF, int delta = -1*X86FI->getTCReturnAddrDelta(); MBBI = MBB.getLastNonDebugInstr(); - // Check for possible merge with preceeding ADD instruction. + // Check for possible merge with preceding ADD instruction. delta += mergeSPUpdates(MBB, MBBI, StackPtr, true); emitSPUpdate(MBB, MBBI, StackPtr, delta, Is64Bit, TII, *RegInfo); } diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index cd1d2019d2..449b87a55b 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -1952,7 +1952,7 @@ X86TargetLowering::EmitTailCallLoadRetAddr(SelectionDAG &DAG, return SDValue(OutRetAddr.getNode(), 1); } -/// EmitTailCallStoreRetAddr - Emit a store of the return adress if tail call +/// EmitTailCallStoreRetAddr - Emit a store of the return address if tail call /// optimization is performed and it is required (FPDiff!=0). static SDValue EmitTailCallStoreRetAddr(SelectionDAG & DAG, MachineFunction &MF, @@ -2043,7 +2043,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, Chain = DAG.getCALLSEQ_START(Chain, DAG.getIntPtrConstant(NumBytes, true)); SDValue RetAddrFrIdx; - // Load return adress for tail calls. + // Load return address for tail calls. if (isTailCall && FPDiff) Chain = EmitTailCallLoadRetAddr(DAG, RetAddrFrIdx, Chain, isTailCall, Is64Bit, FPDiff, dl); @@ -2200,7 +2200,7 @@ X86TargetLowering::LowerCall(SDValue Chain, SDValue Callee, SmallVector<SDValue, 8> MemOpChains2; SDValue FIN; int FI = 0; - // Do not flag preceeding copytoreg stuff together with the following stuff. + // Do not flag preceding copytoreg stuff together with the following stuff. InFlag = SDValue(); if (GuaranteedTailCallOpt) { for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) { @@ -4018,7 +4018,7 @@ static SDValue getShuffleScalarElt(SDNode *N, int Index, SelectionDAG &DAG, /// getNumOfConsecutiveZeros - Return the number of elements of a vector /// shuffle operation which come from a consecutively from a zero. The -/// search can start in two diferent directions, from left or right. +/// search can start in two different directions, from left or right. static unsigned getNumOfConsecutiveZeros(SDNode *N, int NumElems, bool ZerosFromLeft, SelectionDAG &DAG) { @@ -12216,7 +12216,7 @@ bool X86TargetLowering::ExpandInlineAsm(CallInst *CI) const { AsmPieces.clear(); SplitString(AsmStr, AsmPieces, " \t"); // Split with whitespace. - // FIXME: this should verify that we are targetting a 486 or better. If not, + // FIXME: this should verify that we are targeting a 486 or better. If not, // we will turn this bswap into something that will be lowered to logical ops // instead of emitting the bswap asm. For now, we don't support 486 or lower // so don't worry about this. diff --git a/lib/Target/X86/X86InstrArithmetic.td b/lib/Target/X86/X86InstrArithmetic.td index f0ea068708..9f7a4b06dc 100644 --- a/lib/Target/X86/X86InstrArithmetic.td +++ b/lib/Target/X86/X86InstrArithmetic.td @@ -163,7 +163,7 @@ def IMUL64rm : RI<0xAF, MRMSrcMem, (outs GR64:$dst), } // Defs = [EFLAGS] -// Suprisingly enough, these are not two address instructions! +// Surprisingly enough, these are not two address instructions! let Defs = [EFLAGS] in { // Register-Integer Signed Integer Multiply def IMUL16rri : Ii16<0x69, MRMSrcReg, // GR16 = GR16*I16 diff --git a/lib/Target/X86/X86InstrInfo.cpp b/lib/Target/X86/X86InstrInfo.cpp index 85ab916d71..c48ea154ad 100644 --- a/lib/Target/X86/X86InstrInfo.cpp +++ b/lib/Target/X86/X86InstrInfo.cpp @@ -232,7 +232,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) assert(!RegOp2MemOpTable2Addr.count(RegOp) && "Duplicated entries?"); RegOp2MemOpTable2Addr[RegOp] = std::make_pair(MemOp, 0U); - // If this is not a reversable operation (because there is a many->one) + // If this is not a reversible operation (because there is a many->one) // mapping, don't insert the reverse of the operation into MemOp2RegOpTable. if (OpTbl2Addr[i][1] & TB_NOT_REVERSABLE) continue; @@ -335,7 +335,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) assert(!RegOp2MemOpTable0.count(RegOp) && "Duplicated entries?"); RegOp2MemOpTable0[RegOp] = std::make_pair(MemOp, Align); - // If this is not a reversable operation (because there is a many->one) + // If this is not a reversible operation (because there is a many->one) // mapping, don't insert the reverse of the operation into MemOp2RegOpTable. if (OpTbl0[i][1] & TB_NOT_REVERSABLE) continue; @@ -460,7 +460,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) assert(!RegOp2MemOpTable1.count(RegOp) && "Duplicate entries"); RegOp2MemOpTable1[RegOp] = std::make_pair(MemOp, Align); - // If this is not a reversable operation (because there is a many->one) + // If this is not a reversible operation (because there is a many->one) // mapping, don't insert the reverse of the operation into MemOp2RegOpTable. if (OpTbl1[i][1] & TB_NOT_REVERSABLE) continue; @@ -682,7 +682,7 @@ X86InstrInfo::X86InstrInfo(X86TargetMachine &tm) assert(!RegOp2MemOpTable2.count(RegOp) && "Duplicate entry!"); RegOp2MemOpTable2[RegOp] = std::make_pair(MemOp, Align); - // If this is not a reversable operation (because there is a many->one) + // If this is not a reversible operation (because there is a many->one) // mapping, don't insert the reverse of the operation into MemOp2RegOpTable. if (OpTbl2[i][1] & TB_NOT_REVERSABLE) continue; diff --git a/lib/Target/X86/X86InstrInfo.h b/lib/Target/X86/X86InstrInfo.h index 4625b4c986..8da68b5701 100644 --- a/lib/Target/X86/X86InstrInfo.h +++ b/lib/Target/X86/X86InstrInfo.h @@ -807,7 +807,7 @@ public: int64_t &Offset1, int64_t &Offset2) const; /// shouldScheduleLoadsNear - This is a used by the pre-regalloc scheduler to - /// determine (in conjuction with areLoadsFromSameBasePtr) if two loads should + /// determine (in conjunction with areLoadsFromSameBasePtr) if two loads should /// be scheduled togther. On some targets if two loads are loading from /// addresses in the same cache line, it's better if they are scheduled /// together. This function takes two integers that represent the load offsets diff --git a/lib/Target/X86/X86MCCodeEmitter.cpp b/lib/Target/X86/X86MCCodeEmitter.cpp index a2bd638c29..f195a67a30 100644 --- a/lib/Target/X86/X86MCCodeEmitter.cpp +++ b/lib/Target/X86/X86MCCodeEmitter.cpp @@ -514,7 +514,7 @@ void X86MCCodeEmitter::EmitVEXOpcodePrefix(uint64_t TSFlags, unsigned &CurByte, } // To only check operands before the memory address ones, start - // the search from the begining + // the search from the beginning if (IsDestMem) CurOp = 0; diff --git a/lib/Target/X86/X86RegisterInfo.cpp b/lib/Target/X86/X86RegisterInfo.cpp index 1f464f4be4..40b65e13f0 100644 --- a/lib/Target/X86/X86RegisterInfo.cpp +++ b/lib/Target/X86/X86RegisterInfo.cpp @@ -470,7 +470,7 @@ bool X86RegisterInfo::needsStackRealignment(const MachineFunction &MF) const { // FIXME: It's more complicated than this... if (0 && requiresRealignment && MFI->hasVarSizedObjects()) report_fatal_error( - "Stack realignment in presense of dynamic allocas is not supported"); + "Stack realignment in presence of dynamic allocas is not supported"); // If we've requested that we force align the stack do so now. if (ForceStackAlign) diff --git a/lib/Target/X86/X86SelectionDAGInfo.cpp b/lib/Target/X86/X86SelectionDAGInfo.cpp index 42e819343b..02754f9ae5 100644 --- a/lib/Target/X86/X86SelectionDAGInfo.cpp +++ b/lib/Target/X86/X86SelectionDAGInfo.cpp @@ -178,7 +178,7 @@ X86SelectionDAGInfo::EmitTargetCodeForMemcpy(SelectionDAG &DAG, DebugLoc dl, bool isVolatile, bool AlwaysInline, MachinePointerInfo DstPtrInfo, MachinePointerInfo SrcPtrInfo) const { - // This requires the copy size to be a constant, preferrably + // This requires the copy size to be a constant, preferably // within a subtarget-specific limit. ConstantSDNode *ConstantSize = dyn_cast<ConstantSDNode>(Size); if (!ConstantSize) diff --git a/lib/Target/XCore/XCoreISelLowering.cpp b/lib/Target/XCore/XCoreISelLowering.cpp index 4817787d75..89dbf3dfe5 100644 --- a/lib/Target/XCore/XCoreISelLowering.cpp +++ b/lib/Target/XCore/XCoreISelLowering.cpp @@ -967,7 +967,7 @@ XCoreTargetLowering::LowerCCCCallTo(SDValue Chain, SDValue Callee, // Build a sequence of copy-to-reg nodes chained together with token // chain and flag operands which copy the outgoing args into registers. - // The InFlag in necessary since all emited instructions must be + // The InFlag in necessary since all emitted instructions must be // stuck together. SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { diff --git a/lib/Transforms/IPO/ArgumentPromotion.cpp b/lib/Transforms/IPO/ArgumentPromotion.cpp index 0c650cfe64..54a7f679e0 100644 --- a/lib/Transforms/IPO/ArgumentPromotion.cpp +++ b/lib/Transforms/IPO/ArgumentPromotion.cpp @@ -771,8 +771,8 @@ CallGraphNode *ArgPromotion::DoPromotion(Function *F, // function empty. NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList()); - // Loop over the argument list, transfering uses of the old arguments over to - // the new arguments, also transfering over the names as well. + // Loop over the argument list, transferring uses of the old arguments over to + // the new arguments, also transferring over the names as well. // for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(), I2 = NF->arg_begin(); I != E; ++I) { diff --git a/lib/Transforms/IPO/DeadArgumentElimination.cpp b/lib/Transforms/IPO/DeadArgumentElimination.cpp index 4d1f7abdc3..d4eaf0c4a3 100644 --- a/lib/Transforms/IPO/DeadArgumentElimination.cpp +++ b/lib/Transforms/IPO/DeadArgumentElimination.cpp @@ -49,7 +49,7 @@ namespace { /// Struct that represents (part of) either a return value or a function /// argument. Used so that arguments and return values can be used - /// interchangably. + /// interchangeably. struct RetOrArg { RetOrArg(const Function *F, unsigned Idx, bool IsArg) : F(F), Idx(Idx), IsArg(IsArg) {} @@ -273,8 +273,8 @@ bool DAE::DeleteDeadVarargs(Function &Fn) { // function empty. NF->getBasicBlockList().splice(NF->begin(), Fn.getBasicBlockList()); - // Loop over the argument list, transfering uses of the old arguments over to - // the new arguments, also transfering over the names as well. While we're at + // Loop over the argument list, transferring uses of the old arguments over to + // the new arguments, also transferring over the names as well. While we're at // it, remove the dead arguments from the DeadArguments list. // for (Function::arg_iterator I = Fn.arg_begin(), E = Fn.arg_end(), @@ -379,7 +379,7 @@ DAE::Liveness DAE::SurveyUse(Value::const_use_iterator U, // The value is returned from a function. It's only live when the // function's return value is live. We use RetValNum here, for the case // that U is really a use of an insertvalue instruction that uses the - // orginal Use. + // original Use. RetOrArg Use = CreateRet(RI->getParent()->getParent(), RetValNum); // We might be live, depending on the liveness of Use. return MarkIfNotLive(Use, MaybeLiveUses); @@ -894,8 +894,8 @@ bool DAE::RemoveDeadStuffFromFunction(Function *F) { // function empty. NF->getBasicBlockList().splice(NF->begin(), F->getBasicBlockList()); - // Loop over the argument list, transfering uses of the old arguments over to - // the new arguments, also transfering over the names as well. + // Loop over the argument list, transferring uses of the old arguments over to + // the new arguments, also transferring over the names as well. i = 0; for (Function::arg_iterator I = F->arg_begin(), E = F->arg_end(), I2 = NF->arg_begin(); I != E; ++I, ++i) diff --git a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp index 6cf405328a..980a42b8f4 100644 --- a/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp +++ b/lib/Transforms/InstCombine/InstCombineAndOrXor.cpp @@ -331,7 +331,7 @@ Instruction *InstCombiner::OptAndOp(Instruction *Op, /// InsertRangeTest - Emit a computation of: (V >= Lo && V < Hi) if Inside is -/// true, otherwise (V < Lo || V >= Hi). In pratice, we emit the more efficient +/// true, otherwise (V < Lo || V >= Hi). In practice, we emit the more efficient /// (V-Lo) <u Hi-Lo. This method expects that Lo <= Hi. isSigned indicates /// whether to treat the V, Lo and HI as signed or not. IB is the location to /// insert new instructions. diff --git a/lib/Transforms/InstCombine/InstCombineCompares.cpp b/lib/Transforms/InstCombine/InstCombineCompares.cpp index 8afd2f8d00..d3af0aa6fa 100644 --- a/lib/Transforms/InstCombine/InstCombineCompares.cpp +++ b/lib/Transforms/InstCombine/InstCombineCompares.cpp @@ -699,7 +699,7 @@ Instruction *InstCombiner::FoldICmpAddOpCst(ICmpInst &ICI, return ReplaceInstUsesWith(ICI, ConstantInt::getTrue(X->getContext())); // From this point on, we know that (X+C <= X) --> (X+C < X) because C != 0, - // so the values can never be equal. Similiarly for all other "or equals" + // so the values can never be equal. Similarly for all other "or equals" // operators. // (X+1) <u X --> X >u (MAXUINT-1) --> X == 255 diff --git a/lib/Transforms/InstCombine/InstCombinePHI.cpp b/lib/Transforms/InstCombine/InstCombinePHI.cpp index c5f31fb202..c91b79338e 100644 --- a/lib/Transforms/InstCombine/InstCombinePHI.cpp +++ b/lib/Transforms/InstCombine/InstCombinePHI.cpp @@ -237,7 +237,7 @@ Instruction *InstCombiner::FoldPHIArgGEPIntoPHI(PHINode &PN) { /// obvious the value of the load is not changed from the point of the load to /// the end of the block it is in. /// -/// Finally, it is safe, but not profitable, to sink a load targetting a +/// Finally, it is safe, but not profitable, to sink a load targeting a /// non-address-taken alloca. Doing so will cause us to not promote the alloca /// to a register. static bool isSafeAndProfitableToSinkLoad(LoadInst *L) { diff --git a/lib/Transforms/Instrumentation/PathProfiling.cpp b/lib/Transforms/Instrumentation/PathProfiling.cpp index 830251c1b1..3def2af2fe 100644 --- a/lib/Transforms/Instrumentation/PathProfiling.cpp +++ b/lib/Transforms/Instrumentation/PathProfiling.cpp @@ -259,7 +259,7 @@ private: }; // --------------------------------------------------------------------------- -// PathProfiler is a module pass which intruments path profiling instructions +// PathProfiler is a module pass which instruments path profiling instructions // --------------------------------------------------------------------------- class PathProfiler : public ModulePass { private: diff --git a/lib/Transforms/Scalar/DeadStoreElimination.cpp b/lib/Transforms/Scalar/DeadStoreElimination.cpp index 9f36a38691..53e46400dc 100644 --- a/lib/Transforms/Scalar/DeadStoreElimination.cpp +++ b/lib/Transforms/Scalar/DeadStoreElimination.cpp @@ -485,7 +485,7 @@ bool DSE::runOnBasicBlock(BasicBlock &BB) { // away the store and we bail out. However, if we depend on on something // that overwrites the memory location we *can* potentially optimize it. // - // Find out what memory location the dependant instruction stores. + // Find out what memory location the dependent instruction stores. Instruction *DepWrite = InstDep.getInst(); AliasAnalysis::Location DepLoc = getLocForWrite(DepWrite, *AA); // If we didn't get a useful location, or if it isn't a size, bail out. diff --git a/lib/Transforms/Scalar/LoopStrengthReduce.cpp b/lib/Transforms/Scalar/LoopStrengthReduce.cpp index f6e2c88bc6..5abc790423 100644 --- a/lib/Transforms/Scalar/LoopStrengthReduce.cpp +++ b/lib/Transforms/Scalar/LoopStrengthReduce.cpp @@ -786,7 +786,7 @@ void Cost::RateFormula(const Formula &F, } } -/// Loose - Set this cost to a loosing value. +/// Loose - Set this cost to a losing value. void Cost::Loose() { NumRegs = ~0u; AddRecCost = ~0u; @@ -1824,7 +1824,7 @@ LSRInstance::OptimizeLoopTermCond() { } } -/// reconcileNewOffset - Determine if the given use can accomodate a fixup +/// reconcileNewOffset - Determine if the given use can accommodate a fixup /// at the given offset and other details. If so, update the use and /// return true. bool diff --git a/lib/Transforms/Scalar/MemCpyOptimizer.cpp b/lib/Transforms/Scalar/MemCpyOptimizer.cpp index bde0e5316c..1db2e6e227 100644 --- a/lib/Transforms/Scalar/MemCpyOptimizer.cpp +++ b/lib/Transforms/Scalar/MemCpyOptimizer.cpp @@ -688,7 +688,7 @@ bool MemCpyOpt::processMemCpyMemCpyDependence(MemCpyInst *M, MemCpyInst *MDep, if (M->getSource() == MDep->getSource()) return false; - // Second, the length of the memcpy's must be the same, or the preceeding one + // Second, the length of the memcpy's must be the same, or the preceding one // must be larger than the following one. ConstantInt *MDepLen = dyn_cast<ConstantInt>(MDep->getLength()); ConstantInt *MLen = dyn_cast<ConstantInt>(M->getLength()); diff --git a/lib/Transforms/Scalar/Reg2Mem.cpp b/lib/Transforms/Scalar/Reg2Mem.cpp index 459bb0621f..47afc770bb 100644 --- a/lib/Transforms/Scalar/Reg2Mem.cpp +++ b/lib/Transforms/Scalar/Reg2Mem.cpp @@ -9,7 +9,7 @@ // // This file demotes all registers to memory references. It is intented to be // the inverse of PromoteMemoryToRegister. By converting to loads, the only -// values live accross basic blocks are allocas and loads before phi nodes. +// values live across basic blocks are allocas and loads before phi nodes. // It is intended that this should make CFG hacking much easier. // To make later hacking easier, the entry block is split into two, such that // all introduced allocas and nothing else are in the entry block. diff --git a/lib/Transforms/Scalar/SCCP.cpp b/lib/Transforms/Scalar/SCCP.cpp index c82e929b36..db8eb85044 100644 --- a/lib/Transforms/Scalar/SCCP.cpp +++ b/lib/Transforms/Scalar/SCCP.cpp @@ -1989,7 +1989,7 @@ bool IPSCCP::runOnModule(Module &M) { ReturnsToZap[i]->setOperand(0, UndefValue::get(F->getReturnType())); } - // If we infered constant or undef values for globals variables, we can delete + // If we inferred constant or undef values for globals variables, we can delete // the global and any stores that remain to it. const DenseMap<GlobalVariable*, LatticeVal> &TG = Solver.getTrackedGlobals(); for (DenseMap<GlobalVariable*, LatticeVal>::const_iterator I = TG.begin(), diff --git a/lib/Transforms/Scalar/TailRecursionElimination.cpp b/lib/Transforms/Scalar/TailRecursionElimination.cpp index 5768ccbcd8..a221759710 100644 --- a/lib/Transforms/Scalar/TailRecursionElimination.cpp +++ b/lib/Transforms/Scalar/TailRecursionElimination.cpp @@ -36,7 +36,7 @@ // evaluated each time through the tail recursion. Safely keeping allocas // in the entry block requires analysis to proves that the tail-called // function does not read or write the stack object. -// 2. Tail recursion is only performed if the call immediately preceeds the +// 2. Tail recursion is only performed if the call immediately precedes the // return instruction. It's possible that there could be a jump between // the call and the return. // 3. There can be intervening operations between the call and the return that @@ -433,7 +433,7 @@ bool TailCallElim::EliminateRecursiveTailCall(CallInst *CI, ReturnInst *Ret, if (CanMoveAboveCall(BBI, CI)) continue; // If we can't move the instruction above the call, it might be because it - // is an associative and commutative operation that could be tranformed + // is an associative and commutative operation that could be transformed // using accumulator recursion elimination. Check to see if this is the // case, and if so, remember the initial accumulator value for later. if ((AccumulatorRecursionEliminationInitVal = diff --git a/lib/Transforms/Utils/BreakCriticalEdges.cpp b/lib/Transforms/Utils/BreakCriticalEdges.cpp index 14a3c9579d..caf2aeb4d3 100644 --- a/lib/Transforms/Utils/BreakCriticalEdges.cpp +++ b/lib/Transforms/Utils/BreakCriticalEdges.cpp @@ -56,7 +56,7 @@ char BreakCriticalEdges::ID = 0; INITIALIZE_PASS(BreakCriticalEdges, "break-crit-edges", "Break critical edges in CFG", false, false) -// Publically exposed interface to pass... +// Publicly exposed interface to pass... char &llvm::BreakCriticalEdgesID = BreakCriticalEdges::ID; FunctionPass *llvm::createBreakCriticalEdgesPass() { return new BreakCriticalEdges(); diff --git a/lib/Transforms/Utils/CodeExtractor.cpp b/lib/Transforms/Utils/CodeExtractor.cpp index 46601b4f5e..8c133ea7f5 100644 --- a/lib/Transforms/Utils/CodeExtractor.cpp +++ b/lib/Transforms/Utils/CodeExtractor.cpp @@ -157,7 +157,7 @@ void CodeExtractor::severSplitPHINodes(BasicBlock *&Header) { TI->replaceUsesOfWith(OldPred, NewBB); } - // Okay, everthing within the region is now branching to the right block, we + // Okay, everything within the region is now branching to the right block, we // just have to update the PHI nodes now, inserting PHI nodes into NewBB. for (AfterPHIs = OldPred->begin(); isa<PHINode>(AfterPHIs); ++AfterPHIs) { PHINode *PN = cast<PHINode>(AfterPHIs); diff --git a/lib/Transforms/Utils/InlineFunction.cpp b/lib/Transforms/Utils/InlineFunction.cpp index 2cb1d3b136..7d179092c0 100644 --- a/lib/Transforms/Utils/InlineFunction.cpp +++ b/lib/Transforms/Utils/InlineFunction.cpp @@ -320,7 +320,7 @@ static Value *HandleByValArgument(Value *Arg, Instruction *TheCall, // // Note that this only does one level of inlining. For example, if the // instruction 'call B' is inlined, and 'B' calls 'C', then the call to 'C' now -// exists in the instruction stream. Similiarly this will inline a recursive +// exists in the instruction stream. Similarly this will inline a recursive // function by one level. // bool llvm::InlineFunction(CallSite CS, InlineFunctionInfo &IFI) { diff --git a/lib/Transforms/Utils/LoopSimplify.cpp b/lib/Transforms/Utils/LoopSimplify.cpp index 9fe592962d..f02ffd20bc 100644 --- a/lib/Transforms/Utils/LoopSimplify.cpp +++ b/lib/Transforms/Utils/LoopSimplify.cpp @@ -115,7 +115,7 @@ INITIALIZE_PASS_DEPENDENCY(LoopInfo) INITIALIZE_PASS_END(LoopSimplify, "loop-simplify", "Canonicalize natural loops", true, false) -// Publically exposed interface to pass... +// Publicly exposed interface to pass... char &llvm::LoopSimplifyID = LoopSimplify::ID; Pass *llvm::createLoopSimplifyPass() { return new LoopSimplify(); } diff --git a/lib/Transforms/Utils/LowerSwitch.cpp b/lib/Transforms/Utils/LowerSwitch.cpp index 914a439718..ed733d393a 100644 --- a/lib/Transforms/Utils/LowerSwitch.cpp +++ b/lib/Transforms/Utils/LowerSwitch.cpp @@ -84,7 +84,7 @@ char LowerSwitch::ID = 0; INITIALIZE_PASS(LowerSwitch, "lowerswitch", "Lower SwitchInst's to branches", false, false) -// Publically exposed interface to pass... +// Publicly exposed interface to pass... char &llvm::LowerSwitchID = LowerSwitch::ID; // createLowerSwitchPass - Interface to this file... FunctionPass *llvm::createLowerSwitchPass() { diff --git a/lib/VMCore/ConstantFold.cpp b/lib/VMCore/ConstantFold.cpp index a12ff82124..9985adaf57 100644 --- a/lib/VMCore/ConstantFold.cpp +++ b/lib/VMCore/ConstantFold.cpp @@ -1736,7 +1736,7 @@ static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2, // with a single zero index, it must be nonzero. assert(CE1->getNumOperands() == 2 && !CE1->getOperand(1)->isNullValue() && - "Suprising getelementptr!"); + "Surprising getelementptr!"); return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT; } else { // If they are different globals, we don't know what the value is, diff --git a/lib/VMCore/LLVMContextImpl.h b/lib/VMCore/LLVMContextImpl.h index 23971aafa7..6ea4b48e79 100644 --- a/lib/VMCore/LLVMContextImpl.h +++ b/lib/VMCore/LLVMContextImpl.h @@ -184,7 +184,7 @@ public: // Concrete/Abstract TypeDescriptions - We lazily calculate type descriptions // for types as they are needed. Because resolution of types must invalidate - // all of the abstract type descriptions, we keep them in a seperate map to + // all of the abstract type descriptions, we keep them in a separate map to // make this easy. TypePrinting ConcreteTypeDescriptions; TypePrinting AbstractTypeDescriptions; diff --git a/lib/VMCore/PassManager.cpp b/lib/VMCore/PassManager.cpp index ca4455a436..637fa79195 100644 --- a/lib/VMCore/PassManager.cpp +++ b/lib/VMCore/PassManager.cpp @@ -982,7 +982,7 @@ void PMDataManager::add(Pass *P, bool ProcessAnalysis) { // Keep track of higher level analysis used by this manager. HigherLevelAnalysis.push_back(PRequired); } else - llvm_unreachable("Unable to accomodate Required Pass"); + llvm_unreachable("Unable to accommodate Required Pass"); } // Set P as P's last user until someone starts using P. diff --git a/lib/VMCore/PassRegistry.cpp b/lib/VMCore/PassRegistry.cpp index c97a170f50..fa92620b28 100644 --- a/lib/VMCore/PassRegistry.cpp +++ b/lib/VMCore/PassRegistry.cpp @@ -26,7 +26,7 @@ using namespace llvm; // FIXME: We use ManagedStatic to erase the pass registrar on shutdown. // Unfortunately, passes are registered with static ctors, and having -// llvm_shutdown clear this map prevents successful ressurection after +// llvm_shutdown clear this map prevents successful resurrection after // llvm_shutdown is run. Ideally we should find a solution so that we don't // leak the map, AND can still resurrect after shutdown. static ManagedStatic<PassRegistry> PassRegistryObj; diff --git a/lib/VMCore/TypesContext.h b/lib/VMCore/TypesContext.h index 6fb53be959..ad09478bbc 100644 --- a/lib/VMCore/TypesContext.h +++ b/lib/VMCore/TypesContext.h @@ -370,7 +370,7 @@ public: // Remove the old entry form TypesByHash. If the hash values differ // now, remove it from the old place. Otherwise, continue scanning - // withing this hashcode to reduce work. + // within this hashcode to reduce work. if (NewTypeHash != OldTypeHash) { RemoveFromTypesByHash(OldTypeHash, Ty); } else { diff --git a/projects/sample/autoconf/configure.ac b/projects/sample/autoconf/configure.ac index 4e61bee5e7..bb75bbdeb5 100644 --- a/projects/sample/autoconf/configure.ac +++ b/projects/sample/autoconf/configure.ac @@ -15,7 +15,7 @@ dnl Tell autoconf that this is an LLVM project being configured dnl This provides the --with-llvmsrc and --with-llvmobj options LLVM_CONFIG_PROJECT($LLVM_ABS_SRC_ROOT,$LLVM_ABS_OBJ_ROOT) -dnl Tell autoconf that the auxilliary files are actually located in +dnl Tell autoconf that the auxiliary files are actually located in dnl the LLVM autoconf directory, not here. AC_CONFIG_AUX_DIR($LLVM_SRC/autoconf) diff --git a/runtime/libprofile/OptimalEdgeProfiling.c b/runtime/libprofile/OptimalEdgeProfiling.c index eb7887b2ae..947da53260 100644 --- a/runtime/libprofile/OptimalEdgeProfiling.c +++ b/runtime/libprofile/OptimalEdgeProfiling.c @@ -26,7 +26,7 @@ static void OptEdgeProfAtExitHandler() { /* Note that, although the array has a counter for each edge, not all * counters are updated, the ones that are not used are initialised with -1. * When loading this information the counters with value -1 have to be - * recalculated, it is guranteed that this is possible. + * recalculated, it is guaranteed that this is possible. */ write_profiling_data(OptEdgeInfo, ArrayStart, NumElements); } diff --git a/runtime/libprofile/PathProfiling.c b/runtime/libprofile/PathProfiling.c index 651e63cbdd..29671f53d6 100644 --- a/runtime/libprofile/PathProfiling.c +++ b/runtime/libprofile/PathProfiling.c @@ -105,7 +105,7 @@ void writeArrayTable(uint32_t fNumber, ftEntry_t* ft, uint32_t* funcCount) { } inline uint32_t hash (uint32_t key) { - /* this may benifit from a proper hash function */ + /* this may benefit from a proper hash function */ return key%ARBITRARY_HASH_BIN_COUNT; } diff --git a/test/DebugInfo/2010-04-13-PubType.ll b/test/DebugInfo/2010-04-13-PubType.ll index 371169fe18..db7bb0ad60 100644 --- a/test/DebugInfo/2010-04-13-PubType.ll +++ b/test/DebugInfo/2010-04-13-PubType.ll @@ -1,7 +1,7 @@ ; RUN: llc -O0 -asm-verbose < %s > %t ; RUN: grep "External Name" %t | grep -v X ; RUN: grep "External Name" %t | grep Y | count 1 -; Test to check type with no defintion is listed in pubtypes section. +; Test to check type with no definition is listed in pubtypes section. %struct.X = type opaque %struct.Y = type { i32 } diff --git a/test/FrontendC/2006-05-01-AppleAlignmentPragma.c b/test/FrontendC/2006-05-01-AppleAlignmentPragma.c index c9050aa9e4..233968b51a 100644 --- a/test/FrontendC/2006-05-01-AppleAlignmentPragma.c +++ b/test/FrontendC/2006-05-01-AppleAlignmentPragma.c @@ -1,7 +1,7 @@ // RUN: %llvmgcc %s -S -o - #ifdef __APPLE__ -/* test that X is layed out correctly when this pragma is used. */ +/* test that X is laid out correctly when this pragma is used. */ #pragma options align=mac68k #endif diff --git a/test/FrontendC/2010-07-27-MinNoFoldConst.c b/test/FrontendC/2010-07-27-MinNoFoldConst.c index 7cd8b4c437..ea711e5ddd 100644 --- a/test/FrontendC/2010-07-27-MinNoFoldConst.c +++ b/test/FrontendC/2010-07-27-MinNoFoldConst.c @@ -10,7 +10,7 @@ static void bad(unsigned int v1, unsigned int v2) { // MIN(1631381461u * v2 - 4047041419, 1631381461u * v1 - 4047041419) // // 1631381461u * 1273463329u = 2077504466193943669, but 32-bit overflow clips -// this to 4047041419. This breaks the comparision implicit in the MIN(). +// this to 4047041419. This breaks the comparison implicit in the MIN(). // Two multiply operations suggests the bad optimization is happening; // one multiplication, after the MIN(), is correct. // CHECK: mul diff --git a/test/MC/MachO/darwin-x86_64-diff-relocs.s b/test/MC/MachO/darwin-x86_64-diff-relocs.s index 449d2f593e..f5d93ae6c0 100644 --- a/test/MC/MachO/darwin-x86_64-diff-relocs.s +++ b/test/MC/MachO/darwin-x86_64-diff-relocs.s @@ -157,7 +157,7 @@ L3: // FIXME: Unfortunately, we do not get these relocations in exactly the same // order as Darwin 'as'. It turns out that 'as' *usually* ends up emitting // them in reverse address order, but sometimes it allocates some -// additional relocations late so these end up preceed the other entries. I +// additional relocations late so these end up precede the other entries. I // haven't figured out the exact criteria for this yet. // CHECK: (('word-0', 0x56), diff --git a/test/TableGen/TargetInstrInfo.td b/test/TableGen/TargetInstrInfo.td index 146ef6fd76..6c39d5ce57 100644 --- a/test/TableGen/TargetInstrInfo.td +++ b/test/TableGen/TargetInstrInfo.td @@ -110,7 +110,7 @@ def SHL32rCL : Inst<(ops R32:$dst, R32:$src), [(set R32:$dst, (shl R32:$src, CL))]>; // The RTL list is a list, allowing complex instructions to be defined easily. -// Temporary 'internal' registers can be used to break instructions appart. +// Temporary 'internal' registers can be used to break instructions apart. let isTwoAddress = 1 in def XOR32mi : Inst<(ops addr:$addr, imm32:$imm), "xor $dst, $src2", 0x81, MRM6m, diff --git a/test/Transforms/ConstProp/2002-05-03-NotOperator.ll b/test/Transforms/ConstProp/2002-05-03-NotOperator.ll index d9cd67406b..b957220aa9 100644 --- a/test/Transforms/ConstProp/2002-05-03-NotOperator.ll +++ b/test/Transforms/ConstProp/2002-05-03-NotOperator.ll @@ -1,4 +1,4 @@ -; This bug has to do with the fact that constant propogation was implemented in +; This bug has to do with the fact that constant propagation was implemented in ; terms of _logical_ not (! in C) instead of _bitwise_ not (~ in C). This was ; due to a spec change. diff --git a/test/Transforms/ConstProp/basictest.ll b/test/Transforms/ConstProp/basictest.ll index df57fb6870..d0d0a5bb33 100644 --- a/test/Transforms/ConstProp/basictest.ll +++ b/test/Transforms/ConstProp/basictest.ll @@ -1,6 +1,6 @@ ; RUN: opt < %s -constprop -die -S | FileCheck %s -; This is a basic sanity check for constant propogation. The add instruction +; This is a basic sanity check for constant propagation. The add instruction ; should be eliminated. define i32 @test1(i1 %B) { br i1 %B, label %BB1, label %BB2 diff --git a/test/Transforms/ConstProp/logicaltest.ll b/test/Transforms/ConstProp/logicaltest.ll index c74296aa2c..abd3275a4f 100644 --- a/test/Transforms/ConstProp/logicaltest.ll +++ b/test/Transforms/ConstProp/logicaltest.ll @@ -1,4 +1,4 @@ -; Ensure constant propogation of logical instructions is working correctly. +; Ensure constant propagation of logical instructions is working correctly. ; RUN: opt < %s -constprop -die -S | FileCheck %s ; CHECK-NOT: {{and|or|xor}} diff --git a/test/Transforms/ConstProp/phi.ll b/test/Transforms/ConstProp/phi.ll index 3d9e284457..c65d34cc93 100644 --- a/test/Transforms/ConstProp/phi.ll +++ b/test/Transforms/ConstProp/phi.ll @@ -1,4 +1,4 @@ -; This is a basic sanity check for constant propogation. The add instruction +; This is a basic sanity check for constant propagation. The add instruction ; should be eliminated. ; RUN: opt < %s -constprop -die -S | not grep phi diff --git a/test/Transforms/InstCombine/and-or-not.ll b/test/Transforms/InstCombine/and-or-not.ll index 37ec3bc1aa..bd878b04a3 100644 --- a/test/Transforms/InstCombine/and-or-not.ll +++ b/test/Transforms/InstCombine/and-or-not.ll @@ -4,7 +4,7 @@ ; PR1510 -; These are all equivelent to A^B +; These are all equivalent to A^B define i32 @test1(i32 %a, i32 %b) { entry: diff --git a/test/Transforms/SCCP/apint-basictest.ll b/test/Transforms/SCCP/apint-basictest.ll index c03bfef743..f6ef1ab3f2 100644 --- a/test/Transforms/SCCP/apint-basictest.ll +++ b/test/Transforms/SCCP/apint-basictest.ll @@ -1,4 +1,4 @@ -; This is a basic sanity check for constant propogation. The add instruction +; This is a basic sanity check for constant propagation. The add instruction ; should be eliminated. ; RUN: opt < %s -sccp -S | not grep add diff --git a/test/Transforms/SCCP/apint-basictest2.ll b/test/Transforms/SCCP/apint-basictest2.ll index 173482786f..ad8b4a460c 100644 --- a/test/Transforms/SCCP/apint-basictest2.ll +++ b/test/Transforms/SCCP/apint-basictest2.ll @@ -1,4 +1,4 @@ -; This is a basic sanity check for constant propogation. The add instruction +; This is a basic sanity check for constant propagation. The add instruction ; and phi instruction should be eliminated. ; RUN: opt < %s -sccp -S | not grep phi diff --git a/test/Transforms/SCCP/apint-basictest3.ll b/test/Transforms/SCCP/apint-basictest3.ll index 47671bf46b..b8fcca6fda 100644 --- a/test/Transforms/SCCP/apint-basictest3.ll +++ b/test/Transforms/SCCP/apint-basictest3.ll @@ -1,4 +1,4 @@ -; This is a basic sanity check for constant propogation. It tests the basic +; This is a basic sanity check for constant propagation. It tests the basic ; arithmatic operations. diff --git a/test/Transforms/SCCP/apint-basictest4.ll b/test/Transforms/SCCP/apint-basictest4.ll index 41036ea002..862426020e 100644 --- a/test/Transforms/SCCP/apint-basictest4.ll +++ b/test/Transforms/SCCP/apint-basictest4.ll @@ -1,4 +1,4 @@ -; This is a basic sanity check for constant propogation. It tests the basic +; This is a basic sanity check for constant propagation. It tests the basic ; logic operations. diff --git a/test/Transforms/ScalarRepl/2008-06-05-loadstore-agg.ll b/test/Transforms/ScalarRepl/2008-06-05-loadstore-agg.ll index 87a08b7eaa..ce70a1b13b 100644 --- a/test/Transforms/ScalarRepl/2008-06-05-loadstore-agg.ll +++ b/test/Transforms/ScalarRepl/2008-06-05-loadstore-agg.ll @@ -13,7 +13,7 @@ define i32 @foo() { %res2 = insertvalue { i32, i32 } %res1, i32 2, 1 ; <{ i32, i32 }> [#uses=1] ; And store it store { i32, i32 } %res2, { i32, i32 }* %target - ; Actually use %target, so it doesn't get removed alltogether + ; Actually use %target, so it doesn't get removed altogether %ptr = getelementptr { i32, i32 }* %target, i32 0, i32 0 %val = load i32* %ptr ret i32 %val @@ -26,7 +26,7 @@ define i32 @bar() { %res2 = insertvalue [ 2 x i32 ] %res1, i32 2, 1 ; <{ i32, i32 }> [#uses=1] ; And store it store [ 2 x i32 ] %res2, [ 2 x i32 ]* %target - ; Actually use %target, so it doesn't get removed alltogether + ; Actually use %target, so it doesn't get removed altogether %ptr = getelementptr [ 2 x i32 ]* %target, i32 0, i32 0 %val = load i32* %ptr ret i32 %val diff --git a/tools/llvm-diff/DifferenceEngine.cpp b/tools/llvm-diff/DifferenceEngine.cpp index 2344e398b0..ba2cec2992 100644 --- a/tools/llvm-diff/DifferenceEngine.cpp +++ b/tools/llvm-diff/DifferenceEngine.cpp @@ -267,7 +267,7 @@ class FunctionDifferenceEngine { } else if (isa<PHINode>(L)) { // FIXME: implement. - // This is really wierd; type uniquing is broken? + // This is really weird; type uniquing is broken? if (L->getType() != R->getType()) { if (!L->getType()->isPointerTy() || !R->getType()->isPointerTy()) { if (Complain) Engine.log("different phi types"); diff --git a/tools/llvmc/doc/LLVMC-Reference.rst b/tools/llvmc/doc/LLVMC-Reference.rst index ec9098b90c..44d1e17f04 100644 --- a/tools/llvmc/doc/LLVMC-Reference.rst +++ b/tools/llvmc/doc/LLVMC-Reference.rst @@ -306,7 +306,7 @@ separate option groups syntactically. sign: ``-std c99``. At most one occurrence is allowed. - ``parameter_list_option`` - same as the above, but more than one option - occurence is allowed. + occurrence is allowed. - ``prefix_option`` - same as the parameter_option, but the option name and argument do not have to be separated. Example: ``-ofile``. This can be also @@ -314,7 +314,7 @@ separate option groups syntactically. (``=file`` will be interpreted as option value). At most one occurrence is allowed. - - ``prefix_list_option`` - same as the above, but more than one occurence of + - ``prefix_list_option`` - same as the above, but more than one occurrence of the option is allowed; example: ``-lm -lpthread``. - ``alias_option`` - a special option type for creating aliases. Unlike other diff --git a/tools/llvmc/src/Base.td.in b/tools/llvmc/src/Base.td.in index 50533f11fa..527913cba1 100644 --- a/tools/llvmc/src/Base.td.in +++ b/tools/llvmc/src/Base.td.in @@ -191,7 +191,7 @@ class llvm_gcc_based <string cmd, string in_lang, string E_ext, dag out_lang, // ('-S' && '-emit-llvm') && !('opt') -> output .ll (and (switch_on "emit-llvm", "S"), (not (switch_on "opt"))), [(forward "S"), (output_suffix "ll")], - // Ususally just output .bc + // Usually just output .bc (not (switch_on "fsyntax-only")), [(append_cmd "-c"), (append_cmd "-emit-llvm")], diff --git a/tools/lto/lto.cpp b/tools/lto/lto.cpp index fe19921400..dd658d1751 100644 --- a/tools/lto/lto.cpp +++ b/tools/lto/lto.cpp @@ -281,7 +281,7 @@ bool lto_codegen_write_merged_modules(lto_code_gen_t cg, const char* path) // // Generates code for all added modules into one native object file. -// On sucess returns a pointer to a generated mach-o/ELF buffer and +// On success returns a pointer to a generated mach-o/ELF buffer and // length set to the buffer size. The buffer is owned by the // lto_code_gen_t and will be freed when lto_codegen_dispose() // is called, or lto_codegen_compile() is called again. diff --git a/utils/DSAextract.py b/utils/DSAextract.py index 134e9453fb..89dece1f33 100644 --- a/utils/DSAextract.py +++ b/utils/DSAextract.py @@ -58,7 +58,7 @@ node_set = set() #read the file one line at a time buffer = input.readline() while buffer != '': - #filter out the unecessary checks on all the edge lines + #filter out the unnecessary checks on all the edge lines if not arrowexp.search(buffer): #check to see if this is a node we are looking for for regexp in regexp_list: diff --git a/utils/KillTheDoctor/KillTheDoctor.cpp b/utils/KillTheDoctor/KillTheDoctor.cpp index 7a89dd379b..1ddae0bc8b 100644 --- a/utils/KillTheDoctor/KillTheDoctor.cpp +++ b/utils/KillTheDoctor/KillTheDoctor.cpp @@ -169,14 +169,14 @@ namespace { static error_code GetFileNameFromHandle(HANDLE FileHandle, std::string& Name) { char Filename[MAX_PATH+1]; - bool Sucess = false; + bool Success = false; Name.clear(); // Get the file size. LARGE_INTEGER FileSize; - Sucess = ::GetFileSizeEx(FileHandle, &FileSize); + Success = ::GetFileSizeEx(FileHandle, &FileSize); - if (!Sucess) + if (!Success) return windows_error(::GetLastError()); // Create a file mapping object. @@ -198,12 +198,12 @@ static error_code GetFileNameFromHandle(HANDLE FileHandle, if (!MappedFile) return windows_error(::GetLastError()); - Sucess = ::GetMappedFileNameA(::GetCurrentProcess(), + Success = ::GetMappedFileNameA(::GetCurrentProcess(), MappedFile, Filename, array_lengthof(Filename) - 1); - if (!Sucess) + if (!Success) return windows_error(::GetLastError()); else { Name = Filename; diff --git a/utils/NewNightlyTest.pl b/utils/NewNightlyTest.pl index 1b48168262..da806e9c0e 100755 --- a/utils/NewNightlyTest.pl +++ b/utils/NewNightlyTest.pl @@ -794,7 +794,7 @@ my %hash_of_data = ( 'endtime' => $endtime, 'target_triple' => $targetTriple, - # Unused, but left around for backwards compatability. + # Unused, but left around for backwards compatibility. 'warnings' => "", 'cvsusercommitlist' => "", 'cvsuserupdatelist' => "", diff --git a/utils/TableGen/ARMDecoderEmitter.cpp b/utils/TableGen/ARMDecoderEmitter.cpp index 07ff213bed..ed44f821a2 100644 --- a/utils/TableGen/ARMDecoderEmitter.cpp +++ b/utils/TableGen/ARMDecoderEmitter.cpp @@ -607,7 +607,7 @@ void ARMFilter::recurse() { for (bitIndex = 0; bitIndex < NumBits; bitIndex++) BitValueArray[StartBit + bitIndex] = BIT_UNSET; - // Delegates to an inferior filter chooser for futher processing on this + // Delegates to an inferior filter chooser for further processing on this // group of instructions whose segment values are variable. FilterChooserMap.insert(std::pair<unsigned, ARMFilterChooser*>( (unsigned)-1, @@ -639,7 +639,7 @@ void ARMFilter::recurse() { BitValueArray[StartBit + bitIndex] = BIT_FALSE; } - // Delegates to an inferior filter chooser for futher processing on this + // Delegates to an inferior filter chooser for further processing on this // category of instructions. FilterChooserMap.insert(std::pair<unsigned, ARMFilterChooser*>( mapIterator->first, diff --git a/utils/TableGen/AsmMatcherEmitter.cpp b/utils/TableGen/AsmMatcherEmitter.cpp index 369ec9001b..1d14037d3a 100644 --- a/utils/TableGen/AsmMatcherEmitter.cpp +++ b/utils/TableGen/AsmMatcherEmitter.cpp @@ -88,7 +88,7 @@ // 2. The operand matcher will try every possible entry with the same // mnemonic and will check if the target feature for this mnemonic also // matches. After that, if the operand to be matched has its index -// present in the mask, a successfull match occurs. Otherwise, fallback +// present in the mask, a successful match occurs. Otherwise, fallback // to the regular operand parsing. // // 3. For a match success, each operand class that has a 'ParserMethod' @@ -258,7 +258,7 @@ public: return ValueName < RHS.ValueName; default: - // This class preceeds the RHS if it is a proper subset of the RHS. + // This class precedes the RHS if it is a proper subset of the RHS. if (isSubsetOf(RHS)) return true; if (RHS.isSubsetOf(*this)) @@ -1265,7 +1265,7 @@ void AsmMatcherInfo::BuildInfo() { II->BuildAliasResultOperands(); } - // Reorder classes so that classes preceed super classes. + // Reorder classes so that classes precede super classes. std::sort(Classes.begin(), Classes.end(), less_ptr<ClassInfo>()); } @@ -1538,7 +1538,7 @@ static void EmitConvertToMCInst(CodeGenTarget &Target, StringRef ClassName, // operand from the earlier one.We can only tie single MCOperand values. //assert(OpInfo.MINumOperands == 1 && "Not a singular MCOperand"); unsigned TiedOp = OpInfo.TiedOperandNum; - assert(i > TiedOp && "Tied operand preceeds its target!"); + assert(i > TiedOp && "Tied operand precedes its target!"); CaseOS << " Inst.addOperand(Inst.getOperand(" << TiedOp << "));\n"; Signature += "__Tie" + utostr(TiedOp); break; diff --git a/utils/TableGen/FixedLenDecoderEmitter.cpp b/utils/TableGen/FixedLenDecoderEmitter.cpp index bbcecabf76..af73bcfa67 100644 --- a/utils/TableGen/FixedLenDecoderEmitter.cpp +++ b/utils/TableGen/FixedLenDecoderEmitter.cpp @@ -438,7 +438,7 @@ void Filter::recurse() { for (bitIndex = 0; bitIndex < NumBits; bitIndex++) BitValueArray[StartBit + bitIndex] = BIT_UNSET; - // Delegates to an inferior filter chooser for futher processing on this + // Delegates to an inferior filter chooser for further processing on this // group of instructions whose segment values are variable. FilterChooserMap.insert(std::pair<unsigned, FilterChooser*>( (unsigned)-1, @@ -471,7 +471,7 @@ void Filter::recurse() { BitValueArray[StartBit + bitIndex] = BIT_FALSE; } - // Delegates to an inferior filter chooser for futher processing on this + // Delegates to an inferior filter chooser for further processing on this // category of instructions. FilterChooserMap.insert(std::pair<unsigned, FilterChooser*>( mapIterator->first, diff --git a/utils/TableGen/OptParserEmitter.cpp b/utils/TableGen/OptParserEmitter.cpp index 6892912eee..431026c669 100644 --- a/utils/TableGen/OptParserEmitter.cpp +++ b/utils/TableGen/OptParserEmitter.cpp @@ -35,7 +35,7 @@ static int CompareOptionRecords(const void *Av, const void *Bv) { const Record *A = *(Record**) Av; const Record *B = *(Record**) Bv; - // Sentinel options preceed all others and are only ordered by precedence. + // Sentinel options precede all others and are only ordered by precedence. bool ASent = A->getValueAsDef("Kind")->getValueAsBit("Sentinel"); bool BSent = B->getValueAsDef("Kind")->getValueAsBit("Sentinel"); if (ASent != BSent) diff --git a/utils/TableGen/X86DisassemblerTables.h b/utils/TableGen/X86DisassemblerTables.h index fe4ad6f00d..d16ebfca41 100644 --- a/utils/TableGen/X86DisassemblerTables.h +++ b/utils/TableGen/X86DisassemblerTables.h @@ -79,7 +79,7 @@ private: /// regardless of ModR/M byte, two entries - one for bytes 0x00-0xbf and one /// for bytes 0xc0-0xff -, or 256 entries, one for each possible byte. /// nnnn is the number of a table for looking up these values. The tables - /// are writen separately so that tables consisting entirely of zeros will + /// are written separately so that tables consisting entirely of zeros will /// not be duplicated. (These all have the name modRMEmptyTable.) A table /// is printed as: /// diff --git a/utils/buildit/GNUmakefile b/utils/buildit/GNUmakefile index 5140e1508a..470ee76b60 100644 --- a/utils/buildit/GNUmakefile +++ b/utils/buildit/GNUmakefile @@ -6,7 +6,7 @@ # # You can specify TARGETS=ppc (or i386) on the buildit command line to limit the # build to just one target. The default is for ppc and i386. The compiler -# targetted at this host gets built anyway, but not installed unless it's listed +# targeted at this host gets built anyway, but not installed unless it's listed # in TARGETS. # Include the set of standard Apple makefile definitions. diff --git a/utils/lit/setup.py b/utils/lit/setup.py index 738ee23776..a94e6ea833 100644 --- a/utils/lit/setup.py +++ b/utils/lit/setup.py @@ -38,7 +38,7 @@ Features Documentation ============= -The offical *lit* documentation is in the man page, available online at the LLVM +The official *lit* documentation is in the man page, available online at the LLVM Command Guide: http://llvm.org/cmds/lit.html. diff --git a/utils/unittest/googletest/gtest-filepath.cc b/utils/unittest/googletest/gtest-filepath.cc index c1ef9188ad..8d1d67ec7b 100644 --- a/utils/unittest/googletest/gtest-filepath.cc +++ b/utils/unittest/googletest/gtest-filepath.cc @@ -123,7 +123,7 @@ FilePath FilePath::RemoveExtension(const char* extension) const { return *this; } -// Returns a pointer to the last occurence of a valid path separator in +// Returns a pointer to the last occurrence of a valid path separator in // the FilePath. On Windows, for example, both '/' and '\' are valid path // separators. Returns NULL if no path separator was found. const char* FilePath::FindLastPathSeparator() const { diff --git a/utils/unittest/googletest/gtest.cc b/utils/unittest/googletest/gtest.cc index 51732afd49..9aa5441149 100644 --- a/utils/unittest/googletest/gtest.cc +++ b/utils/unittest/googletest/gtest.cc @@ -1415,7 +1415,7 @@ AssertionResult IsHRESULTFailure(const char* expr, long hr) { // NOLINT // Utility functions for encoding Unicode text (wide strings) in // UTF-8. -// A Unicode code-point can have upto 21 bits, and is encoded in UTF-8 +// A Unicode code-point can have up to 21 bits, and is encoded in UTF-8 // like this: // // Code-point length Encoding diff --git a/utils/unittest/googletest/include/gtest/internal/gtest-filepath.h b/utils/unittest/googletest/include/gtest/internal/gtest-filepath.h index 4b76d79506..efbc176029 100644 --- a/utils/unittest/googletest/include/gtest/internal/gtest-filepath.h +++ b/utils/unittest/googletest/include/gtest/internal/gtest-filepath.h @@ -196,7 +196,7 @@ class GTEST_API_ FilePath { void Normalize(); - // Returns a pointer to the last occurence of a valid path separator in + // Returns a pointer to the last occurrence of a valid path separator in // the FilePath. On Windows, for example, both '/' and '\' are valid path // separators. Returns NULL if no path separator was found. const char* FindLastPathSeparator() const; |