aboutsummaryrefslogtreecommitdiff
path: root/lib/Lex/Lexer.cpp
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2009-01-17 06:22:33 +0000
committerChris Lattner <sabre@nondot.org>2009-01-17 06:22:33 +0000
commit2b2453a7d8fe732561795431f39ceb2b2a832d84 (patch)
treead3d68197002f997b30e6617e41e290eff963b03 /lib/Lex/Lexer.cpp
parent05816591ec488a933dfecc9ff9f3cbf3c32767c2 (diff)
this massive patch introduces a simple new abstraction: it makes
"FileID" a concept that is now enforced by the compiler's type checker instead of yet-another-random-unsigned floating around. This is an important distinction from the "FileID" currently tracked by SourceLocation. *That* FileID may refer to the start of a file or to a chunk within it. The new FileID *only* refers to the file (and its #include stack and eventually #line data), it cannot refer to a chunk. FileID is a completely opaque datatype to all clients, only SourceManager is allowed to poke and prod it. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@62407 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Lex/Lexer.cpp')
-rw-r--r--lib/Lex/Lexer.cpp24
1 files changed, 14 insertions, 10 deletions
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
index 5a14c1356a..d63c8cc37b 100644
--- a/lib/Lex/Lexer.cpp
+++ b/lib/Lex/Lexer.cpp
@@ -62,14 +62,18 @@ tok::ObjCKeywordKind Token::getObjCKeywordID() const {
/// with the specified preprocessor managing the lexing process. This lexer
/// assumes that the associated file buffer and Preprocessor objects will
/// outlive it, so it doesn't take ownership of either of them.
-Lexer::Lexer(SourceLocation fileloc, Preprocessor &pp,
+Lexer::Lexer(SourceLocation fileloc, Preprocessor &PP,
const char *BufStart, const char *BufEnd)
- : PreprocessorLexer(&pp, fileloc), FileLoc(fileloc),
- Features(pp.getLangOptions()) {
+// FIXME: This is really horrible and only needed for _Pragma lexers, split this
+// out of the main lexer path!
+ : PreprocessorLexer(&PP,
+ PP.getSourceManager().getCanonicalFileID(
+ PP.getSourceManager().getSpellingLoc(fileloc))),
+ FileLoc(fileloc),
+ Features(PP.getLangOptions()) {
- SourceManager &SourceMgr = PP->getSourceManager();
- unsigned InputFileID = SourceMgr.getSpellingLoc(FileLoc).getFileID();
- const llvm::MemoryBuffer *InputFile = SourceMgr.getBuffer(InputFileID);
+ SourceManager &SourceMgr = PP.getSourceManager();
+ const llvm::MemoryBuffer *InputFile = SourceMgr.getBuffer(getFileID());
Is_PragmaLexer = false;
InitCharacterInfo();
@@ -103,7 +107,7 @@ Lexer::Lexer(SourceLocation fileloc, Preprocessor &pp,
// Default to keeping comments if the preprocessor wants them.
ExtendedTokenMode = 0;
- SetCommentRetentionState(PP->getCommentRetentionState());
+ SetCommentRetentionState(PP.getCommentRetentionState());
}
/// Lexer constructor - Create a new raw lexer object. This object is only
@@ -187,9 +191,7 @@ unsigned Lexer::MeasureTokenLength(SourceLocation Loc,
// all obviously single-char tokens. This could use
// Lexer::isObviouslySimpleCharacter for example to handle identifiers or
// something.
-
-
- const char *BufEnd = SM.getBufferData(Loc.getFileID()).second;
+ const char *BufEnd = SM.getBufferData(Loc).second;
// Create a langops struct and enable trigraphs. This is sufficient for
// measuring tokens.
@@ -303,6 +305,8 @@ SourceLocation Lexer::getSourceLocation(const char *Loc) const {
if (FileLoc.isFileID())
return SourceLocation::getFileLoc(FileLoc.getFileID(), CharNo);
+ // Otherwise, this is the _Pragma lexer case, which pretends that all of the
+ // tokens are lexed from where the _Pragma was defined.
assert(PP && "This doesn't work on raw lexers");
return GetMappedTokenLoc(*PP, FileLoc, CharNo);
}