aboutsummaryrefslogtreecommitdiff
path: root/lib/Lex/Lexer.cpp
diff options
context:
space:
mode:
authorChris Lattner <sabre@nondot.org>2009-01-19 06:46:35 +0000
committerChris Lattner <sabre@nondot.org>2009-01-19 06:46:35 +0000
commitbcc2a67e5180612417727cbdd8afd0f79fdf726d (patch)
tree6ec0a17c76a1d8ac80373d73b1e732d125dffc79 /lib/Lex/Lexer.cpp
parentec0d7a6f4b0699cc9960e6d9fee0f957c64d1cf9 (diff)
Make SourceLocation::getFileLoc private to reduce the API exposure of
SourceLocation. This requires making some cleanups to token pasting and _Pragma expansion. git-svn-id: https://llvm.org/svn/llvm-project/cfe/trunk@62490 91177308-0d34-0410-b5e6-96231b3b80d8
Diffstat (limited to 'lib/Lex/Lexer.cpp')
-rw-r--r--lib/Lex/Lexer.cpp16
1 files changed, 9 insertions, 7 deletions
diff --git a/lib/Lex/Lexer.cpp b/lib/Lex/Lexer.cpp
index 6e5402e8a3..9280526d22 100644
--- a/lib/Lex/Lexer.cpp
+++ b/lib/Lex/Lexer.cpp
@@ -150,13 +150,14 @@ Lexer::Lexer(FileID FID, const SourceManager &SM, const LangOptions &features)
/// interface that could handle this stuff. This would pull GetMappedTokenLoc
/// out of the critical path of the lexer!
///
-Lexer *Lexer::Create_PragmaLexer(SourceLocation TokStartLoc, unsigned TokLen,
- Preprocessor &PP) {
+Lexer *Lexer::Create_PragmaLexer(SourceLocation SpellingLoc,
+ SourceLocation InstantiationLoc,
+ unsigned TokLen, Preprocessor &PP) {
SourceManager &SM = PP.getSourceManager();
- SourceLocation SpellingLoc = SM.getSpellingLoc(TokStartLoc);
// Create the lexer as if we were going to lex the file normally.
- Lexer *L = new Lexer(SM.getCanonicalFileID(SpellingLoc), PP);
+ FileID SpellingFID = SM.getCanonicalFileID(SpellingLoc);
+ Lexer *L = new Lexer(SpellingFID, PP);
// Now that the lexer is created, change the start/end locations so that we
// just lex the subsection of the file that we want. This is lexing from a
@@ -168,7 +169,8 @@ Lexer *Lexer::Create_PragmaLexer(SourceLocation TokStartLoc, unsigned TokLen,
// Set the SourceLocation with the remapping information. This ensures that
// GetMappedTokenLoc will remap the tokens as they are lexed.
- L->FileLoc = TokStartLoc;
+ L->FileLoc = SM.getInstantiationLoc(SM.getLocForStartOfFile(SpellingFID),
+ InstantiationLoc);
// Ensure that the lexer thinks it is inside a directive, so that end \n will
// return an EOM token.
@@ -321,7 +323,7 @@ static SourceLocation GetMappedTokenLoc(Preprocessor &PP,
// characters come from spelling(FileLoc)+Offset.
SourceLocation InstLoc = SourceMgr.getInstantiationLoc(FileLoc);
SourceLocation SpellingLoc = SourceMgr.getSpellingLoc(FileLoc);
- SpellingLoc = SourceLocation::getFileLoc(SpellingLoc.getChunkID(), CharNo);
+ SpellingLoc = SpellingLoc.getFileLocWithOffset(CharNo);
return SourceMgr.getInstantiationLoc(SpellingLoc, InstLoc);
}
@@ -335,7 +337,7 @@ SourceLocation Lexer::getSourceLocation(const char *Loc) const {
// the file id from FileLoc with the offset specified.
unsigned CharNo = Loc-BufferStart;
if (FileLoc.isFileID())
- return SourceLocation::getFileLoc(FileLoc.getChunkID(), CharNo);
+ return FileLoc.getFileLocWithOffset(CharNo);
// Otherwise, this is the _Pragma lexer case, which pretends that all of the
// tokens are lexed from where the _Pragma was defined.