|
|
194aa3 |
commit f40c7887d3cc9bb0b56576ed9edbe505ff8058c0
|
|
|
194aa3 |
Author: Florian Weimer <fweimer@redhat.com>
|
|
|
194aa3 |
Date: Thu Sep 22 12:10:41 2022 +0200
|
|
|
194aa3 |
|
|
|
194aa3 |
scripts: Extract glibcpp.py from check-obsolete-constructs.py
|
|
|
194aa3 |
|
|
|
194aa3 |
The C tokenizer is useful separately.
|
|
|
194aa3 |
|
|
|
194aa3 |
Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
|
|
|
194aa3 |
|
|
|
194aa3 |
diff --git a/scripts/check-obsolete-constructs.py b/scripts/check-obsolete-constructs.py
|
|
|
194aa3 |
index 89d21dea6e788783..7c7a092e440a3258 100755
|
|
|
194aa3 |
--- a/scripts/check-obsolete-constructs.py
|
|
|
194aa3 |
+++ b/scripts/check-obsolete-constructs.py
|
|
|
194aa3 |
@@ -24,193 +24,14 @@
|
|
|
194aa3 |
"""
|
|
|
194aa3 |
|
|
|
194aa3 |
import argparse
|
|
|
194aa3 |
-import collections
|
|
|
194aa3 |
+import os
|
|
|
194aa3 |
import re
|
|
|
194aa3 |
import sys
|
|
|
194aa3 |
|
|
|
194aa3 |
-# Simplified lexical analyzer for C preprocessing tokens.
|
|
|
194aa3 |
-# Does not implement trigraphs.
|
|
|
194aa3 |
-# Does not implement backslash-newline in the middle of any lexical
|
|
|
194aa3 |
-# item other than a string literal.
|
|
|
194aa3 |
-# Does not implement universal-character-names in identifiers.
|
|
|
194aa3 |
-# Treats prefixed strings (e.g. L"...") as two tokens (L and "...")
|
|
|
194aa3 |
-# Accepts non-ASCII characters only within comments and strings.
|
|
|
194aa3 |
-
|
|
|
194aa3 |
-# Caution: The order of the outermost alternation matters.
|
|
|
194aa3 |
-# STRING must be before BAD_STRING, CHARCONST before BAD_CHARCONST,
|
|
|
194aa3 |
-# BLOCK_COMMENT before BAD_BLOCK_COM before PUNCTUATOR, and OTHER must
|
|
|
194aa3 |
-# be last.
|
|
|
194aa3 |
-# Caution: There should be no capturing groups other than the named
|
|
|
194aa3 |
-# captures in the outermost alternation.
|
|
|
194aa3 |
-
|
|
|
194aa3 |
-# For reference, these are all of the C punctuators as of C11:
|
|
|
194aa3 |
-# [ ] ( ) { } , ; ? ~
|
|
|
194aa3 |
-# ! != * *= / /= ^ ^= = ==
|
|
|
194aa3 |
-# # ##
|
|
|
194aa3 |
-# % %= %> %: %:%:
|
|
|
194aa3 |
-# & &= &&
|
|
|
194aa3 |
-# | |= ||
|
|
|
194aa3 |
-# + += ++
|
|
|
194aa3 |
-# - -= -- ->
|
|
|
194aa3 |
-# . ...
|
|
|
194aa3 |
-# : :>
|
|
|
194aa3 |
-# < <% <: << <<= <=
|
|
|
194aa3 |
-# > >= >> >>=
|
|
|
194aa3 |
-
|
|
|
194aa3 |
-# The BAD_* tokens are not part of the official definition of pp-tokens;
|
|
|
194aa3 |
-# they match unclosed strings, character constants, and block comments,
|
|
|
194aa3 |
-# so that the regex engine doesn't have to backtrack all the way to the
|
|
|
194aa3 |
-# beginning of a broken construct and then emit dozens of junk tokens.
|
|
|
194aa3 |
-
|
|
|
194aa3 |
-PP_TOKEN_RE_ = re.compile(r"""
|
|
|
194aa3 |
- (?P<STRING> \"(?:[^\"\\\r\n]|\\(?:[\r\n -~]|\r\n))*\")
|
|
|
194aa3 |
- |(?P<BAD_STRING> \"(?:[^\"\\\r\n]|\\[ -~])*)
|
|
|
194aa3 |
- |(?P<CHARCONST> \'(?:[^\'\\\r\n]|\\(?:[\r\n -~]|\r\n))*\')
|
|
|
194aa3 |
- |(?P<BAD_CHARCONST> \'(?:[^\'\\\r\n]|\\[ -~])*)
|
|
|
194aa3 |
- |(?P<BLOCK_COMMENT> /\*(?:\*(?!/)|[^*])*\*/)
|
|
|
194aa3 |
- |(?P<BAD_BLOCK_COM> /\*(?:\*(?!/)|[^*])*\*?)
|
|
|
194aa3 |
- |(?P<LINE_COMMENT> //[^\r\n]*)
|
|
|
194aa3 |
- |(?P<IDENT> [_a-zA-Z][_a-zA-Z0-9]*)
|
|
|
194aa3 |
- |(?P<PP_NUMBER> \.?[0-9](?:[0-9a-df-oq-zA-DF-OQ-Z_.]|[eEpP][+-]?)*)
|
|
|
194aa3 |
- |(?P<PUNCTUATOR>
|
|
|
194aa3 |
- [,;?~(){}\[\]]
|
|
|
194aa3 |
- | [!*/^=]=?
|
|
|
194aa3 |
- | \#\#?
|
|
|
194aa3 |
- | %(?:[=>]|:(?:%:)?)?
|
|
|
194aa3 |
- | &[=&]?
|
|
|
194aa3 |
- |\|[=|]?
|
|
|
194aa3 |
- |\+[=+]?
|
|
|
194aa3 |
- | -[=->]?
|
|
|
194aa3 |
- |\.(?:\.\.)?
|
|
|
194aa3 |
- | :>?
|
|
|
194aa3 |
- | <(?:[%:]|<(?:=|<=?)?)?
|
|
|
194aa3 |
- | >(?:=|>=?)?)
|
|
|
194aa3 |
- |(?P<ESCNL> \\(?:\r|\n|\r\n))
|
|
|
194aa3 |
- |(?P<WHITESPACE> [ \t\n\r\v\f]+)
|
|
|
194aa3 |
- |(?P<OTHER> .)
|
|
|
194aa3 |
-""", re.DOTALL | re.VERBOSE)
|
|
|
194aa3 |
-
|
|
|
194aa3 |
-HEADER_NAME_RE_ = re.compile(r"""
|
|
|
194aa3 |
- < [^>\r\n]+ >
|
|
|
194aa3 |
- | " [^"\r\n]+ "
|
|
|
194aa3 |
-""", re.DOTALL | re.VERBOSE)
|
|
|
194aa3 |
-
|
|
|
194aa3 |
-ENDLINE_RE_ = re.compile(r"""\r|\n|\r\n""")
|
|
|
194aa3 |
-
|
|
|
194aa3 |
-# based on the sample code in the Python re documentation
|
|
|
194aa3 |
-Token_ = collections.namedtuple("Token", (
|
|
|
194aa3 |
- "kind", "text", "line", "column", "context"))
|
|
|
194aa3 |
-Token_.__doc__ = """
|
|
|
194aa3 |
- One C preprocessing token, comment, or chunk of whitespace.
|
|
|
194aa3 |
- 'kind' identifies the token type, which will be one of:
|
|
|
194aa3 |
- STRING, CHARCONST, BLOCK_COMMENT, LINE_COMMENT, IDENT,
|
|
|
194aa3 |
- PP_NUMBER, PUNCTUATOR, ESCNL, WHITESPACE, HEADER_NAME,
|
|
|
194aa3 |
- or OTHER. The BAD_* alternatives in PP_TOKEN_RE_ are
|
|
|
194aa3 |
- handled within tokenize_c, below.
|
|
|
194aa3 |
-
|
|
|
194aa3 |
- 'text' is the sequence of source characters making up the token;
|
|
|
194aa3 |
- no decoding whatsoever is performed.
|
|
|
194aa3 |
-
|
|
|
194aa3 |
- 'line' and 'column' give the position of the first character of the
|
|
|
194aa3 |
- token within the source file. They are both 1-based.
|
|
|
194aa3 |
-
|
|
|
194aa3 |
- 'context' indicates whether or not this token occurred within a
|
|
|
194aa3 |
- preprocessing directive; it will be None for running text,
|
|
|
194aa3 |
- '<null>' for the leading '#' of a directive line (because '#'
|
|
|
194aa3 |
- all by itself on a line is a "null directive"), or the name of
|
|
|
194aa3 |
- the directive for tokens within a directive line, starting with
|
|
|
194aa3 |
- the IDENT for the name itself.
|
|
|
194aa3 |
-"""
|
|
|
194aa3 |
-
|
|
|
194aa3 |
-def tokenize_c(file_contents, reporter):
|
|
|
194aa3 |
- """Yield a series of Token objects, one for each preprocessing
|
|
|
194aa3 |
- token, comment, or chunk of whitespace within FILE_CONTENTS.
|
|
|
194aa3 |
- The REPORTER object is expected to have one method,
|
|
|
194aa3 |
- reporter.error(token, message), which will be called to
|
|
|
194aa3 |
- indicate a lexical error at the position of TOKEN.
|
|
|
194aa3 |
- If MESSAGE contains the four-character sequence '{!r}', that
|
|
|
194aa3 |
- is expected to be replaced by repr(token.text).
|
|
|
194aa3 |
- """
|
|
|
194aa3 |
+# Make available glibc Python modules.
|
|
|
194aa3 |
+sys.path.append(os.path.dirname(os.path.realpath(__file__)))
|
|
|
194aa3 |
|
|
|
194aa3 |
- Token = Token_
|
|
|
194aa3 |
- PP_TOKEN_RE = PP_TOKEN_RE_
|
|
|
194aa3 |
- ENDLINE_RE = ENDLINE_RE_
|
|
|
194aa3 |
- HEADER_NAME_RE = HEADER_NAME_RE_
|
|
|
194aa3 |
-
|
|
|
194aa3 |
- line_num = 1
|
|
|
194aa3 |
- line_start = 0
|
|
|
194aa3 |
- pos = 0
|
|
|
194aa3 |
- limit = len(file_contents)
|
|
|
194aa3 |
- directive = None
|
|
|
194aa3 |
- at_bol = True
|
|
|
194aa3 |
- while pos < limit:
|
|
|
194aa3 |
- if directive == "include":
|
|
|
194aa3 |
- mo = HEADER_NAME_RE.match(file_contents, pos)
|
|
|
194aa3 |
- if mo:
|
|
|
194aa3 |
- kind = "HEADER_NAME"
|
|
|
194aa3 |
- directive = "after_include"
|
|
|
194aa3 |
- else:
|
|
|
194aa3 |
- mo = PP_TOKEN_RE.match(file_contents, pos)
|
|
|
194aa3 |
- kind = mo.lastgroup
|
|
|
194aa3 |
- if kind != "WHITESPACE":
|
|
|
194aa3 |
- directive = "after_include"
|
|
|
194aa3 |
- else:
|
|
|
194aa3 |
- mo = PP_TOKEN_RE.match(file_contents, pos)
|
|
|
194aa3 |
- kind = mo.lastgroup
|
|
|
194aa3 |
-
|
|
|
194aa3 |
- text = mo.group()
|
|
|
194aa3 |
- line = line_num
|
|
|
194aa3 |
- column = mo.start() - line_start
|
|
|
194aa3 |
- adj_line_start = 0
|
|
|
194aa3 |
- # only these kinds can contain a newline
|
|
|
194aa3 |
- if kind in ("WHITESPACE", "BLOCK_COMMENT", "LINE_COMMENT",
|
|
|
194aa3 |
- "STRING", "CHARCONST", "BAD_BLOCK_COM", "ESCNL"):
|
|
|
194aa3 |
- for tmo in ENDLINE_RE.finditer(text):
|
|
|
194aa3 |
- line_num += 1
|
|
|
194aa3 |
- adj_line_start = tmo.end()
|
|
|
194aa3 |
- if adj_line_start:
|
|
|
194aa3 |
- line_start = mo.start() + adj_line_start
|
|
|
194aa3 |
-
|
|
|
194aa3 |
- # Track whether or not we are scanning a preprocessing directive.
|
|
|
194aa3 |
- if kind == "LINE_COMMENT" or (kind == "WHITESPACE" and adj_line_start):
|
|
|
194aa3 |
- at_bol = True
|
|
|
194aa3 |
- directive = None
|
|
|
194aa3 |
- else:
|
|
|
194aa3 |
- if kind == "PUNCTUATOR" and text == "#" and at_bol:
|
|
|
194aa3 |
- directive = "<null>"
|
|
|
194aa3 |
- elif kind == "IDENT" and directive == "<null>":
|
|
|
194aa3 |
- directive = text
|
|
|
194aa3 |
- at_bol = False
|
|
|
194aa3 |
-
|
|
|
194aa3 |
- # Report ill-formed tokens and rewrite them as their well-formed
|
|
|
194aa3 |
- # equivalents, so downstream processing doesn't have to know about them.
|
|
|
194aa3 |
- # (Rewriting instead of discarding provides better error recovery.)
|
|
|
194aa3 |
- if kind == "BAD_BLOCK_COM":
|
|
|
194aa3 |
- reporter.error(Token("BAD_BLOCK_COM", "", line, column+1, ""),
|
|
|
194aa3 |
- "unclosed block comment")
|
|
|
194aa3 |
- text += "*/"
|
|
|
194aa3 |
- kind = "BLOCK_COMMENT"
|
|
|
194aa3 |
- elif kind == "BAD_STRING":
|
|
|
194aa3 |
- reporter.error(Token("BAD_STRING", "", line, column+1, ""),
|
|
|
194aa3 |
- "unclosed string")
|
|
|
194aa3 |
- text += "\""
|
|
|
194aa3 |
- kind = "STRING"
|
|
|
194aa3 |
- elif kind == "BAD_CHARCONST":
|
|
|
194aa3 |
- reporter.error(Token("BAD_CHARCONST", "", line, column+1, ""),
|
|
|
194aa3 |
- "unclosed char constant")
|
|
|
194aa3 |
- text += "'"
|
|
|
194aa3 |
- kind = "CHARCONST"
|
|
|
194aa3 |
-
|
|
|
194aa3 |
- tok = Token(kind, text, line, column+1,
|
|
|
194aa3 |
- "include" if directive == "after_include" else directive)
|
|
|
194aa3 |
- # Do not complain about OTHER tokens inside macro definitions.
|
|
|
194aa3 |
- # $ and @ appear in macros defined by headers intended to be
|
|
|
194aa3 |
- # included from assembly language, e.g. sysdeps/mips/sys/asm.h.
|
|
|
194aa3 |
- if kind == "OTHER" and directive != "define":
|
|
|
194aa3 |
- self.error(tok, "stray {!r} in program")
|
|
|
194aa3 |
-
|
|
|
194aa3 |
- yield tok
|
|
|
194aa3 |
- pos = mo.end()
|
|
|
194aa3 |
+import glibcpp
|
|
|
194aa3 |
|
|
|
194aa3 |
#
|
|
|
194aa3 |
# Base and generic classes for individual checks.
|
|
|
194aa3 |
@@ -446,7 +267,7 @@ class HeaderChecker:
|
|
|
194aa3 |
|
|
|
194aa3 |
typedef_checker = ObsoleteTypedefChecker(self, self.fname)
|
|
|
194aa3 |
|
|
|
194aa3 |
- for tok in tokenize_c(contents, self):
|
|
|
194aa3 |
+ for tok in glibcpp.tokenize_c(contents, self):
|
|
|
194aa3 |
typedef_checker.examine(tok)
|
|
|
194aa3 |
|
|
|
194aa3 |
def main():
|
|
|
194aa3 |
diff --git a/scripts/glibcpp.py b/scripts/glibcpp.py
|
|
|
194aa3 |
new file mode 100644
|
|
|
194aa3 |
index 0000000000000000..b44c6a4392dde8ce
|
|
|
194aa3 |
--- /dev/null
|
|
|
194aa3 |
+++ b/scripts/glibcpp.py
|
|
|
194aa3 |
@@ -0,0 +1,212 @@
|
|
|
194aa3 |
+#! /usr/bin/python3
|
|
|
194aa3 |
+# Approximation to C preprocessing.
|
|
|
194aa3 |
+# Copyright (C) 2019-2022 Free Software Foundation, Inc.
|
|
|
194aa3 |
+# This file is part of the GNU C Library.
|
|
|
194aa3 |
+#
|
|
|
194aa3 |
+# The GNU C Library is free software; you can redistribute it and/or
|
|
|
194aa3 |
+# modify it under the terms of the GNU Lesser General Public
|
|
|
194aa3 |
+# License as published by the Free Software Foundation; either
|
|
|
194aa3 |
+# version 2.1 of the License, or (at your option) any later version.
|
|
|
194aa3 |
+#
|
|
|
194aa3 |
+# The GNU C Library is distributed in the hope that it will be useful,
|
|
|
194aa3 |
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
194aa3 |
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
|
194aa3 |
+# Lesser General Public License for more details.
|
|
|
194aa3 |
+#
|
|
|
194aa3 |
+# You should have received a copy of the GNU Lesser General Public
|
|
|
194aa3 |
+# License along with the GNU C Library; if not, see
|
|
|
194aa3 |
+# <https://www.gnu.org/licenses/>.
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+"""
|
|
|
194aa3 |
+Simplified lexical analyzer for C preprocessing tokens.
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+Does not implement trigraphs.
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+Does not implement backslash-newline in the middle of any lexical
|
|
|
194aa3 |
+item other than a string literal.
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+Does not implement universal-character-names in identifiers.
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+Treats prefixed strings (e.g. L"...") as two tokens (L and "...").
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+Accepts non-ASCII characters only within comments and strings.
|
|
|
194aa3 |
+"""
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+import collections
|
|
|
194aa3 |
+import re
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+# Caution: The order of the outermost alternation matters.
|
|
|
194aa3 |
+# STRING must be before BAD_STRING, CHARCONST before BAD_CHARCONST,
|
|
|
194aa3 |
+# BLOCK_COMMENT before BAD_BLOCK_COM before PUNCTUATOR, and OTHER must
|
|
|
194aa3 |
+# be last.
|
|
|
194aa3 |
+# Caution: There should be no capturing groups other than the named
|
|
|
194aa3 |
+# captures in the outermost alternation.
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+# For reference, these are all of the C punctuators as of C11:
|
|
|
194aa3 |
+# [ ] ( ) { } , ; ? ~
|
|
|
194aa3 |
+# ! != * *= / /= ^ ^= = ==
|
|
|
194aa3 |
+# # ##
|
|
|
194aa3 |
+# % %= %> %: %:%:
|
|
|
194aa3 |
+# & &= &&
|
|
|
194aa3 |
+# | |= ||
|
|
|
194aa3 |
+# + += ++
|
|
|
194aa3 |
+# - -= -- ->
|
|
|
194aa3 |
+# . ...
|
|
|
194aa3 |
+# : :>
|
|
|
194aa3 |
+# < <% <: << <<= <=
|
|
|
194aa3 |
+# > >= >> >>=
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+# The BAD_* tokens are not part of the official definition of pp-tokens;
|
|
|
194aa3 |
+# they match unclosed strings, character constants, and block comments,
|
|
|
194aa3 |
+# so that the regex engine doesn't have to backtrack all the way to the
|
|
|
194aa3 |
+# beginning of a broken construct and then emit dozens of junk tokens.
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+PP_TOKEN_RE_ = re.compile(r"""
|
|
|
194aa3 |
+ (?P<STRING> \"(?:[^\"\\\r\n]|\\(?:[\r\n -~]|\r\n))*\")
|
|
|
194aa3 |
+ |(?P<BAD_STRING> \"(?:[^\"\\\r\n]|\\[ -~])*)
|
|
|
194aa3 |
+ |(?P<CHARCONST> \'(?:[^\'\\\r\n]|\\(?:[\r\n -~]|\r\n))*\')
|
|
|
194aa3 |
+ |(?P<BAD_CHARCONST> \'(?:[^\'\\\r\n]|\\[ -~])*)
|
|
|
194aa3 |
+ |(?P<BLOCK_COMMENT> /\*(?:\*(?!/)|[^*])*\*/)
|
|
|
194aa3 |
+ |(?P<BAD_BLOCK_COM> /\*(?:\*(?!/)|[^*])*\*?)
|
|
|
194aa3 |
+ |(?P<LINE_COMMENT> //[^\r\n]*)
|
|
|
194aa3 |
+ |(?P<IDENT> [_a-zA-Z][_a-zA-Z0-9]*)
|
|
|
194aa3 |
+ |(?P<PP_NUMBER> \.?[0-9](?:[0-9a-df-oq-zA-DF-OQ-Z_.]|[eEpP][+-]?)*)
|
|
|
194aa3 |
+ |(?P<PUNCTUATOR>
|
|
|
194aa3 |
+ [,;?~(){}\[\]]
|
|
|
194aa3 |
+ | [!*/^=]=?
|
|
|
194aa3 |
+ | \#\#?
|
|
|
194aa3 |
+ | %(?:[=>]|:(?:%:)?)?
|
|
|
194aa3 |
+ | &[=&]?
|
|
|
194aa3 |
+ |\|[=|]?
|
|
|
194aa3 |
+ |\+[=+]?
|
|
|
194aa3 |
+ | -[=->]?
|
|
|
194aa3 |
+ |\.(?:\.\.)?
|
|
|
194aa3 |
+ | :>?
|
|
|
194aa3 |
+ | <(?:[%:]|<(?:=|<=?)?)?
|
|
|
194aa3 |
+ | >(?:=|>=?)?)
|
|
|
194aa3 |
+ |(?P<ESCNL> \\(?:\r|\n|\r\n))
|
|
|
194aa3 |
+ |(?P<WHITESPACE> [ \t\n\r\v\f]+)
|
|
|
194aa3 |
+ |(?P<OTHER> .)
|
|
|
194aa3 |
+""", re.DOTALL | re.VERBOSE)
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+HEADER_NAME_RE_ = re.compile(r"""
|
|
|
194aa3 |
+ < [^>\r\n]+ >
|
|
|
194aa3 |
+ | " [^"\r\n]+ "
|
|
|
194aa3 |
+""", re.DOTALL | re.VERBOSE)
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+ENDLINE_RE_ = re.compile(r"""\r|\n|\r\n""")
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+# based on the sample code in the Python re documentation
|
|
|
194aa3 |
+Token_ = collections.namedtuple("Token", (
|
|
|
194aa3 |
+ "kind", "text", "line", "column", "context"))
|
|
|
194aa3 |
+Token_.__doc__ = """
|
|
|
194aa3 |
+ One C preprocessing token, comment, or chunk of whitespace.
|
|
|
194aa3 |
+ 'kind' identifies the token type, which will be one of:
|
|
|
194aa3 |
+ STRING, CHARCONST, BLOCK_COMMENT, LINE_COMMENT, IDENT,
|
|
|
194aa3 |
+ PP_NUMBER, PUNCTUATOR, ESCNL, WHITESPACE, HEADER_NAME,
|
|
|
194aa3 |
+ or OTHER. The BAD_* alternatives in PP_TOKEN_RE_ are
|
|
|
194aa3 |
+ handled within tokenize_c, below.
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+ 'text' is the sequence of source characters making up the token;
|
|
|
194aa3 |
+ no decoding whatsoever is performed.
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+ 'line' and 'column' give the position of the first character of the
|
|
|
194aa3 |
+ token within the source file. They are both 1-based.
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+ 'context' indicates whether or not this token occurred within a
|
|
|
194aa3 |
+ preprocessing directive; it will be None for running text,
|
|
|
194aa3 |
+ '<null>' for the leading '#' of a directive line (because '#'
|
|
|
194aa3 |
+ all by itself on a line is a "null directive"), or the name of
|
|
|
194aa3 |
+ the directive for tokens within a directive line, starting with
|
|
|
194aa3 |
+ the IDENT for the name itself.
|
|
|
194aa3 |
+"""
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+def tokenize_c(file_contents, reporter):
|
|
|
194aa3 |
+ """Yield a series of Token objects, one for each preprocessing
|
|
|
194aa3 |
+ token, comment, or chunk of whitespace within FILE_CONTENTS.
|
|
|
194aa3 |
+ The REPORTER object is expected to have one method,
|
|
|
194aa3 |
+ reporter.error(token, message), which will be called to
|
|
|
194aa3 |
+ indicate a lexical error at the position of TOKEN.
|
|
|
194aa3 |
+ If MESSAGE contains the four-character sequence '{!r}', that
|
|
|
194aa3 |
+ is expected to be replaced by repr(token.text).
|
|
|
194aa3 |
+ """
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+ Token = Token_
|
|
|
194aa3 |
+ PP_TOKEN_RE = PP_TOKEN_RE_
|
|
|
194aa3 |
+ ENDLINE_RE = ENDLINE_RE_
|
|
|
194aa3 |
+ HEADER_NAME_RE = HEADER_NAME_RE_
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+ line_num = 1
|
|
|
194aa3 |
+ line_start = 0
|
|
|
194aa3 |
+ pos = 0
|
|
|
194aa3 |
+ limit = len(file_contents)
|
|
|
194aa3 |
+ directive = None
|
|
|
194aa3 |
+ at_bol = True
|
|
|
194aa3 |
+ while pos < limit:
|
|
|
194aa3 |
+ if directive == "include":
|
|
|
194aa3 |
+ mo = HEADER_NAME_RE.match(file_contents, pos)
|
|
|
194aa3 |
+ if mo:
|
|
|
194aa3 |
+ kind = "HEADER_NAME"
|
|
|
194aa3 |
+ directive = "after_include"
|
|
|
194aa3 |
+ else:
|
|
|
194aa3 |
+ mo = PP_TOKEN_RE.match(file_contents, pos)
|
|
|
194aa3 |
+ kind = mo.lastgroup
|
|
|
194aa3 |
+ if kind != "WHITESPACE":
|
|
|
194aa3 |
+ directive = "after_include"
|
|
|
194aa3 |
+ else:
|
|
|
194aa3 |
+ mo = PP_TOKEN_RE.match(file_contents, pos)
|
|
|
194aa3 |
+ kind = mo.lastgroup
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+ text = mo.group()
|
|
|
194aa3 |
+ line = line_num
|
|
|
194aa3 |
+ column = mo.start() - line_start
|
|
|
194aa3 |
+ adj_line_start = 0
|
|
|
194aa3 |
+ # only these kinds can contain a newline
|
|
|
194aa3 |
+ if kind in ("WHITESPACE", "BLOCK_COMMENT", "LINE_COMMENT",
|
|
|
194aa3 |
+ "STRING", "CHARCONST", "BAD_BLOCK_COM", "ESCNL"):
|
|
|
194aa3 |
+ for tmo in ENDLINE_RE.finditer(text):
|
|
|
194aa3 |
+ line_num += 1
|
|
|
194aa3 |
+ adj_line_start = tmo.end()
|
|
|
194aa3 |
+ if adj_line_start:
|
|
|
194aa3 |
+ line_start = mo.start() + adj_line_start
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+ # Track whether or not we are scanning a preprocessing directive.
|
|
|
194aa3 |
+ if kind == "LINE_COMMENT" or (kind == "WHITESPACE" and adj_line_start):
|
|
|
194aa3 |
+ at_bol = True
|
|
|
194aa3 |
+ directive = None
|
|
|
194aa3 |
+ else:
|
|
|
194aa3 |
+ if kind == "PUNCTUATOR" and text == "#" and at_bol:
|
|
|
194aa3 |
+ directive = "<null>"
|
|
|
194aa3 |
+ elif kind == "IDENT" and directive == "<null>":
|
|
|
194aa3 |
+ directive = text
|
|
|
194aa3 |
+ at_bol = False
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+ # Report ill-formed tokens and rewrite them as their well-formed
|
|
|
194aa3 |
+ # equivalents, so downstream processing doesn't have to know about them.
|
|
|
194aa3 |
+ # (Rewriting instead of discarding provides better error recovery.)
|
|
|
194aa3 |
+ if kind == "BAD_BLOCK_COM":
|
|
|
194aa3 |
+ reporter.error(Token("BAD_BLOCK_COM", "", line, column+1, ""),
|
|
|
194aa3 |
+ "unclosed block comment")
|
|
|
194aa3 |
+ text += "*/"
|
|
|
194aa3 |
+ kind = "BLOCK_COMMENT"
|
|
|
194aa3 |
+ elif kind == "BAD_STRING":
|
|
|
194aa3 |
+ reporter.error(Token("BAD_STRING", "", line, column+1, ""),
|
|
|
194aa3 |
+ "unclosed string")
|
|
|
194aa3 |
+ text += "\""
|
|
|
194aa3 |
+ kind = "STRING"
|
|
|
194aa3 |
+ elif kind == "BAD_CHARCONST":
|
|
|
194aa3 |
+ reporter.error(Token("BAD_CHARCONST", "", line, column+1, ""),
|
|
|
194aa3 |
+ "unclosed char constant")
|
|
|
194aa3 |
+ text += "'"
|
|
|
194aa3 |
+ kind = "CHARCONST"
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+ tok = Token(kind, text, line, column+1,
|
|
|
194aa3 |
+ "include" if directive == "after_include" else directive)
|
|
|
194aa3 |
+ # Do not complain about OTHER tokens inside macro definitions.
|
|
|
194aa3 |
+ # $ and @ appear in macros defined by headers intended to be
|
|
|
194aa3 |
+ # included from assembly language, e.g. sysdeps/mips/sys/asm.h.
|
|
|
194aa3 |
+ if kind == "OTHER" and directive != "define":
|
|
|
194aa3 |
+ self.error(tok, "stray {!r} in program")
|
|
|
194aa3 |
+
|
|
|
194aa3 |
+ yield tok
|
|
|
194aa3 |
+ pos = mo.end()
|