76b6d9
commit f40c7887d3cc9bb0b56576ed9edbe505ff8058c0
76b6d9
Author: Florian Weimer <fweimer@redhat.com>
76b6d9
Date:   Thu Sep 22 12:10:41 2022 +0200
76b6d9
76b6d9
    scripts: Extract glibcpp.py from check-obsolete-constructs.py
76b6d9
    
76b6d9
    The C tokenizer is useful separately.
76b6d9
    
76b6d9
    Reviewed-by: Siddhesh Poyarekar <siddhesh@sourceware.org>
76b6d9
76b6d9
diff --git a/scripts/check-obsolete-constructs.py b/scripts/check-obsolete-constructs.py
76b6d9
index 89d21dea6e788783..7c7a092e440a3258 100755
76b6d9
--- a/scripts/check-obsolete-constructs.py
76b6d9
+++ b/scripts/check-obsolete-constructs.py
76b6d9
@@ -24,193 +24,14 @@
76b6d9
 """
76b6d9
 
76b6d9
 import argparse
76b6d9
-import collections
76b6d9
+import os
76b6d9
 import re
76b6d9
 import sys
76b6d9
 
76b6d9
-# Simplified lexical analyzer for C preprocessing tokens.
76b6d9
-# Does not implement trigraphs.
76b6d9
-# Does not implement backslash-newline in the middle of any lexical
76b6d9
-#   item other than a string literal.
76b6d9
-# Does not implement universal-character-names in identifiers.
76b6d9
-# Treats prefixed strings (e.g. L"...") as two tokens (L and "...")
76b6d9
-# Accepts non-ASCII characters only within comments and strings.
76b6d9
-
76b6d9
-# Caution: The order of the outermost alternation matters.
76b6d9
-# STRING must be before BAD_STRING, CHARCONST before BAD_CHARCONST,
76b6d9
-# BLOCK_COMMENT before BAD_BLOCK_COM before PUNCTUATOR, and OTHER must
76b6d9
-# be last.
76b6d9
-# Caution: There should be no capturing groups other than the named
76b6d9
-# captures in the outermost alternation.
76b6d9
-
76b6d9
-# For reference, these are all of the C punctuators as of C11:
76b6d9
-#   [ ] ( ) { } , ; ? ~
76b6d9
-#   ! != * *= / /= ^ ^= = ==
76b6d9
-#   # ##
76b6d9
-#   % %= %> %: %:%:
76b6d9
-#   & &= &&
76b6d9
-#   | |= ||
76b6d9
-#   + += ++
76b6d9
-#   - -= -- ->
76b6d9
-#   . ...
76b6d9
-#   : :>
76b6d9
-#   < <% <: << <<= <=
76b6d9
-#   > >= >> >>=
76b6d9
-
76b6d9
-# The BAD_* tokens are not part of the official definition of pp-tokens;
76b6d9
-# they match unclosed strings, character constants, and block comments,
76b6d9
-# so that the regex engine doesn't have to backtrack all the way to the
76b6d9
-# beginning of a broken construct and then emit dozens of junk tokens.
76b6d9
-
76b6d9
-PP_TOKEN_RE_ = re.compile(r"""
76b6d9
-    (?P<STRING>        \"(?:[^\"\\\r\n]|\\(?:[\r\n -~]|\r\n))*\")
76b6d9
-   |(?P<BAD_STRING>    \"(?:[^\"\\\r\n]|\\[ -~])*)
76b6d9
-   |(?P<CHARCONST>     \'(?:[^\'\\\r\n]|\\(?:[\r\n -~]|\r\n))*\')
76b6d9
-   |(?P<BAD_CHARCONST> \'(?:[^\'\\\r\n]|\\[ -~])*)
76b6d9
-   |(?P<BLOCK_COMMENT> /\*(?:\*(?!/)|[^*])*\*/)
76b6d9
-   |(?P<BAD_BLOCK_COM> /\*(?:\*(?!/)|[^*])*\*?)
76b6d9
-   |(?P<LINE_COMMENT>  //[^\r\n]*)
76b6d9
-   |(?P<IDENT>         [_a-zA-Z][_a-zA-Z0-9]*)
76b6d9
-   |(?P<PP_NUMBER>     \.?[0-9](?:[0-9a-df-oq-zA-DF-OQ-Z_.]|[eEpP][+-]?)*)
76b6d9
-   |(?P<PUNCTUATOR>
76b6d9
-       [,;?~(){}\[\]]
76b6d9
-     | [!*/^=]=?
76b6d9
-     | \#\#?
76b6d9
-     | %(?:[=>]|:(?:%:)?)?
76b6d9
-     | &[=&]?
76b6d9
-     |\|[=|]?
76b6d9
-     |\+[=+]?
76b6d9
-     | -[=->]?
76b6d9
-     |\.(?:\.\.)?
76b6d9
-     | :>?
76b6d9
-     | <(?:[%:]|<(?:=|<=?)?)?
76b6d9
-     | >(?:=|>=?)?)
76b6d9
-   |(?P<ESCNL>         \\(?:\r|\n|\r\n))
76b6d9
-   |(?P<WHITESPACE>    [ \t\n\r\v\f]+)
76b6d9
-   |(?P<OTHER>         .)
76b6d9
-""", re.DOTALL | re.VERBOSE)
76b6d9
-
76b6d9
-HEADER_NAME_RE_ = re.compile(r"""
76b6d9
-    < [^>\r\n]+ >
76b6d9
-  | " [^"\r\n]+ "
76b6d9
-""", re.DOTALL | re.VERBOSE)
76b6d9
-
76b6d9
-ENDLINE_RE_ = re.compile(r"""\r|\n|\r\n""")
76b6d9
-
76b6d9
-# based on the sample code in the Python re documentation
76b6d9
-Token_ = collections.namedtuple("Token", (
76b6d9
-    "kind", "text", "line", "column", "context"))
76b6d9
-Token_.__doc__ = """
76b6d9
-   One C preprocessing token, comment, or chunk of whitespace.
76b6d9
-   'kind' identifies the token type, which will be one of:
76b6d9
-       STRING, CHARCONST, BLOCK_COMMENT, LINE_COMMENT, IDENT,
76b6d9
-       PP_NUMBER, PUNCTUATOR, ESCNL, WHITESPACE, HEADER_NAME,
76b6d9
-       or OTHER.  The BAD_* alternatives in PP_TOKEN_RE_ are
76b6d9
-       handled within tokenize_c, below.
76b6d9
-
76b6d9
-   'text' is the sequence of source characters making up the token;
76b6d9
-       no decoding whatsoever is performed.
76b6d9
-
76b6d9
-   'line' and 'column' give the position of the first character of the
76b6d9
-      token within the source file.  They are both 1-based.
76b6d9
-
76b6d9
-   'context' indicates whether or not this token occurred within a
76b6d9
-      preprocessing directive; it will be None for running text,
76b6d9
-      '<null>' for the leading '#' of a directive line (because '#'
76b6d9
-      all by itself on a line is a "null directive"), or the name of
76b6d9
-      the directive for tokens within a directive line, starting with
76b6d9
-      the IDENT for the name itself.
76b6d9
-"""
76b6d9
-
76b6d9
-def tokenize_c(file_contents, reporter):
76b6d9
-    """Yield a series of Token objects, one for each preprocessing
76b6d9
-       token, comment, or chunk of whitespace within FILE_CONTENTS.
76b6d9
-       The REPORTER object is expected to have one method,
76b6d9
-       reporter.error(token, message), which will be called to
76b6d9
-       indicate a lexical error at the position of TOKEN.
76b6d9
-       If MESSAGE contains the four-character sequence '{!r}', that
76b6d9
-       is expected to be replaced by repr(token.text).
76b6d9
-    """
76b6d9
+# Make available glibc Python modules.
76b6d9
+sys.path.append(os.path.dirname(os.path.realpath(__file__)))
76b6d9
 
76b6d9
-    Token = Token_
76b6d9
-    PP_TOKEN_RE = PP_TOKEN_RE_
76b6d9
-    ENDLINE_RE = ENDLINE_RE_
76b6d9
-    HEADER_NAME_RE = HEADER_NAME_RE_
76b6d9
-
76b6d9
-    line_num = 1
76b6d9
-    line_start = 0
76b6d9
-    pos = 0
76b6d9
-    limit = len(file_contents)
76b6d9
-    directive = None
76b6d9
-    at_bol = True
76b6d9
-    while pos < limit:
76b6d9
-        if directive == "include":
76b6d9
-            mo = HEADER_NAME_RE.match(file_contents, pos)
76b6d9
-            if mo:
76b6d9
-                kind = "HEADER_NAME"
76b6d9
-                directive = "after_include"
76b6d9
-            else:
76b6d9
-                mo = PP_TOKEN_RE.match(file_contents, pos)
76b6d9
-                kind = mo.lastgroup
76b6d9
-                if kind != "WHITESPACE":
76b6d9
-                    directive = "after_include"
76b6d9
-        else:
76b6d9
-            mo = PP_TOKEN_RE.match(file_contents, pos)
76b6d9
-            kind = mo.lastgroup
76b6d9
-
76b6d9
-        text = mo.group()
76b6d9
-        line = line_num
76b6d9
-        column = mo.start() - line_start
76b6d9
-        adj_line_start = 0
76b6d9
-        # only these kinds can contain a newline
76b6d9
-        if kind in ("WHITESPACE", "BLOCK_COMMENT", "LINE_COMMENT",
76b6d9
-                    "STRING", "CHARCONST", "BAD_BLOCK_COM", "ESCNL"):
76b6d9
-            for tmo in ENDLINE_RE.finditer(text):
76b6d9
-                line_num += 1
76b6d9
-                adj_line_start = tmo.end()
76b6d9
-            if adj_line_start:
76b6d9
-                line_start = mo.start() + adj_line_start
76b6d9
-
76b6d9
-        # Track whether or not we are scanning a preprocessing directive.
76b6d9
-        if kind == "LINE_COMMENT" or (kind == "WHITESPACE" and adj_line_start):
76b6d9
-            at_bol = True
76b6d9
-            directive = None
76b6d9
-        else:
76b6d9
-            if kind == "PUNCTUATOR" and text == "#" and at_bol:
76b6d9
-                directive = "<null>"
76b6d9
-            elif kind == "IDENT" and directive == "<null>":
76b6d9
-                directive = text
76b6d9
-            at_bol = False
76b6d9
-
76b6d9
-        # Report ill-formed tokens and rewrite them as their well-formed
76b6d9
-        # equivalents, so downstream processing doesn't have to know about them.
76b6d9
-        # (Rewriting instead of discarding provides better error recovery.)
76b6d9
-        if kind == "BAD_BLOCK_COM":
76b6d9
-            reporter.error(Token("BAD_BLOCK_COM", "", line, column+1, ""),
76b6d9
-                           "unclosed block comment")
76b6d9
-            text += "*/"
76b6d9
-            kind = "BLOCK_COMMENT"
76b6d9
-        elif kind == "BAD_STRING":
76b6d9
-            reporter.error(Token("BAD_STRING", "", line, column+1, ""),
76b6d9
-                           "unclosed string")
76b6d9
-            text += "\""
76b6d9
-            kind = "STRING"
76b6d9
-        elif kind == "BAD_CHARCONST":
76b6d9
-            reporter.error(Token("BAD_CHARCONST", "", line, column+1, ""),
76b6d9
-                           "unclosed char constant")
76b6d9
-            text += "'"
76b6d9
-            kind = "CHARCONST"
76b6d9
-
76b6d9
-        tok = Token(kind, text, line, column+1,
76b6d9
-                    "include" if directive == "after_include" else directive)
76b6d9
-        # Do not complain about OTHER tokens inside macro definitions.
76b6d9
-        # $ and @ appear in macros defined by headers intended to be
76b6d9
-        # included from assembly language, e.g. sysdeps/mips/sys/asm.h.
76b6d9
-        if kind == "OTHER" and directive != "define":
76b6d9
-            self.error(tok, "stray {!r} in program")
76b6d9
-
76b6d9
-        yield tok
76b6d9
-        pos = mo.end()
76b6d9
+import glibcpp
76b6d9
 
76b6d9
 #
76b6d9
 # Base and generic classes for individual checks.
76b6d9
@@ -446,7 +267,7 @@ class HeaderChecker:
76b6d9
 
76b6d9
         typedef_checker = ObsoleteTypedefChecker(self, self.fname)
76b6d9
 
76b6d9
-        for tok in tokenize_c(contents, self):
76b6d9
+        for tok in glibcpp.tokenize_c(contents, self):
76b6d9
             typedef_checker.examine(tok)
76b6d9
 
76b6d9
 def main():
76b6d9
diff --git a/scripts/glibcpp.py b/scripts/glibcpp.py
76b6d9
new file mode 100644
76b6d9
index 0000000000000000..b44c6a4392dde8ce
76b6d9
--- /dev/null
76b6d9
+++ b/scripts/glibcpp.py
76b6d9
@@ -0,0 +1,212 @@
76b6d9
+#! /usr/bin/python3
76b6d9
+# Approximation to C preprocessing.
76b6d9
+# Copyright (C) 2019-2022 Free Software Foundation, Inc.
76b6d9
+# This file is part of the GNU C Library.
76b6d9
+#
76b6d9
+# The GNU C Library is free software; you can redistribute it and/or
76b6d9
+# modify it under the terms of the GNU Lesser General Public
76b6d9
+# License as published by the Free Software Foundation; either
76b6d9
+# version 2.1 of the License, or (at your option) any later version.
76b6d9
+#
76b6d9
+# The GNU C Library is distributed in the hope that it will be useful,
76b6d9
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
76b6d9
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
76b6d9
+# Lesser General Public License for more details.
76b6d9
+#
76b6d9
+# You should have received a copy of the GNU Lesser General Public
76b6d9
+# License along with the GNU C Library; if not, see
76b6d9
+# <https://www.gnu.org/licenses/>.
76b6d9
+
76b6d9
+"""
76b6d9
+Simplified lexical analyzer for C preprocessing tokens.
76b6d9
+
76b6d9
+Does not implement trigraphs.
76b6d9
+
76b6d9
+Does not implement backslash-newline in the middle of any lexical
76b6d9
+item other than a string literal.
76b6d9
+
76b6d9
+Does not implement universal-character-names in identifiers.
76b6d9
+
76b6d9
+Treats prefixed strings (e.g. L"...") as two tokens (L and "...").
76b6d9
+
76b6d9
+Accepts non-ASCII characters only within comments and strings.
76b6d9
+"""
76b6d9
+
76b6d9
+import collections
76b6d9
+import re
76b6d9
+
76b6d9
+# Caution: The order of the outermost alternation matters.
76b6d9
+# STRING must be before BAD_STRING, CHARCONST before BAD_CHARCONST,
76b6d9
+# BLOCK_COMMENT before BAD_BLOCK_COM before PUNCTUATOR, and OTHER must
76b6d9
+# be last.
76b6d9
+# Caution: There should be no capturing groups other than the named
76b6d9
+# captures in the outermost alternation.
76b6d9
+
76b6d9
+# For reference, these are all of the C punctuators as of C11:
76b6d9
+#   [ ] ( ) { } , ; ? ~
76b6d9
+#   ! != * *= / /= ^ ^= = ==
76b6d9
+#   # ##
76b6d9
+#   % %= %> %: %:%:
76b6d9
+#   & &= &&
76b6d9
+#   | |= ||
76b6d9
+#   + += ++
76b6d9
+#   - -= -- ->
76b6d9
+#   . ...
76b6d9
+#   : :>
76b6d9
+#   < <% <: << <<= <=
76b6d9
+#   > >= >> >>=
76b6d9
+
76b6d9
+# The BAD_* tokens are not part of the official definition of pp-tokens;
76b6d9
+# they match unclosed strings, character constants, and block comments,
76b6d9
+# so that the regex engine doesn't have to backtrack all the way to the
76b6d9
+# beginning of a broken construct and then emit dozens of junk tokens.
76b6d9
+
76b6d9
+PP_TOKEN_RE_ = re.compile(r"""
76b6d9
+    (?P<STRING>        \"(?:[^\"\\\r\n]|\\(?:[\r\n -~]|\r\n))*\")
76b6d9
+   |(?P<BAD_STRING>    \"(?:[^\"\\\r\n]|\\[ -~])*)
76b6d9
+   |(?P<CHARCONST>     \'(?:[^\'\\\r\n]|\\(?:[\r\n -~]|\r\n))*\')
76b6d9
+   |(?P<BAD_CHARCONST> \'(?:[^\'\\\r\n]|\\[ -~])*)
76b6d9
+   |(?P<BLOCK_COMMENT> /\*(?:\*(?!/)|[^*])*\*/)
76b6d9
+   |(?P<BAD_BLOCK_COM> /\*(?:\*(?!/)|[^*])*\*?)
76b6d9
+   |(?P<LINE_COMMENT>  //[^\r\n]*)
76b6d9
+   |(?P<IDENT>         [_a-zA-Z][_a-zA-Z0-9]*)
76b6d9
+   |(?P<PP_NUMBER>     \.?[0-9](?:[0-9a-df-oq-zA-DF-OQ-Z_.]|[eEpP][+-]?)*)
76b6d9
+   |(?P<PUNCTUATOR>
76b6d9
+       [,;?~(){}\[\]]
76b6d9
+     | [!*/^=]=?
76b6d9
+     | \#\#?
76b6d9
+     | %(?:[=>]|:(?:%:)?)?
76b6d9
+     | &[=&]?
76b6d9
+     |\|[=|]?
76b6d9
+     |\+[=+]?
76b6d9
+     | -[=->]?
76b6d9
+     |\.(?:\.\.)?
76b6d9
+     | :>?
76b6d9
+     | <(?:[%:]|<(?:=|<=?)?)?
76b6d9
+     | >(?:=|>=?)?)
76b6d9
+   |(?P<ESCNL>         \\(?:\r|\n|\r\n))
76b6d9
+   |(?P<WHITESPACE>    [ \t\n\r\v\f]+)
76b6d9
+   |(?P<OTHER>         .)
76b6d9
+""", re.DOTALL | re.VERBOSE)
76b6d9
+
76b6d9
+HEADER_NAME_RE_ = re.compile(r"""
76b6d9
+    < [^>\r\n]+ >
76b6d9
+  | " [^"\r\n]+ "
76b6d9
+""", re.DOTALL | re.VERBOSE)
76b6d9
+
76b6d9
+ENDLINE_RE_ = re.compile(r"""\r|\n|\r\n""")
76b6d9
+
76b6d9
+# based on the sample code in the Python re documentation
76b6d9
+Token_ = collections.namedtuple("Token", (
76b6d9
+    "kind", "text", "line", "column", "context"))
76b6d9
+Token_.__doc__ = """
76b6d9
+   One C preprocessing token, comment, or chunk of whitespace.
76b6d9
+   'kind' identifies the token type, which will be one of:
76b6d9
+       STRING, CHARCONST, BLOCK_COMMENT, LINE_COMMENT, IDENT,
76b6d9
+       PP_NUMBER, PUNCTUATOR, ESCNL, WHITESPACE, HEADER_NAME,
76b6d9
+       or OTHER.  The BAD_* alternatives in PP_TOKEN_RE_ are
76b6d9
+       handled within tokenize_c, below.
76b6d9
+
76b6d9
+   'text' is the sequence of source characters making up the token;
76b6d9
+       no decoding whatsoever is performed.
76b6d9
+
76b6d9
+   'line' and 'column' give the position of the first character of the
76b6d9
+      token within the source file.  They are both 1-based.
76b6d9
+
76b6d9
+   'context' indicates whether or not this token occurred within a
76b6d9
+      preprocessing directive; it will be None for running text,
76b6d9
+      '<null>' for the leading '#' of a directive line (because '#'
76b6d9
+      all by itself on a line is a "null directive"), or the name of
76b6d9
+      the directive for tokens within a directive line, starting with
76b6d9
+      the IDENT for the name itself.
76b6d9
+"""
76b6d9
+
76b6d9
+def tokenize_c(file_contents, reporter):
76b6d9
+    """Yield a series of Token objects, one for each preprocessing
76b6d9
+       token, comment, or chunk of whitespace within FILE_CONTENTS.
76b6d9
+       The REPORTER object is expected to have one method,
76b6d9
+       reporter.error(token, message), which will be called to
76b6d9
+       indicate a lexical error at the position of TOKEN.
76b6d9
+       If MESSAGE contains the four-character sequence '{!r}', that
76b6d9
+       is expected to be replaced by repr(token.text).
76b6d9
+    """
76b6d9
+
76b6d9
+    Token = Token_
76b6d9
+    PP_TOKEN_RE = PP_TOKEN_RE_
76b6d9
+    ENDLINE_RE = ENDLINE_RE_
76b6d9
+    HEADER_NAME_RE = HEADER_NAME_RE_
76b6d9
+
76b6d9
+    line_num = 1
76b6d9
+    line_start = 0
76b6d9
+    pos = 0
76b6d9
+    limit = len(file_contents)
76b6d9
+    directive = None
76b6d9
+    at_bol = True
76b6d9
+    while pos < limit:
76b6d9
+        if directive == "include":
76b6d9
+            mo = HEADER_NAME_RE.match(file_contents, pos)
76b6d9
+            if mo:
76b6d9
+                kind = "HEADER_NAME"
76b6d9
+                directive = "after_include"
76b6d9
+            else:
76b6d9
+                mo = PP_TOKEN_RE.match(file_contents, pos)
76b6d9
+                kind = mo.lastgroup
76b6d9
+                if kind != "WHITESPACE":
76b6d9
+                    directive = "after_include"
76b6d9
+        else:
76b6d9
+            mo = PP_TOKEN_RE.match(file_contents, pos)
76b6d9
+            kind = mo.lastgroup
76b6d9
+
76b6d9
+        text = mo.group()
76b6d9
+        line = line_num
76b6d9
+        column = mo.start() - line_start
76b6d9
+        adj_line_start = 0
76b6d9
+        # only these kinds can contain a newline
76b6d9
+        if kind in ("WHITESPACE", "BLOCK_COMMENT", "LINE_COMMENT",
76b6d9
+                    "STRING", "CHARCONST", "BAD_BLOCK_COM", "ESCNL"):
76b6d9
+            for tmo in ENDLINE_RE.finditer(text):
76b6d9
+                line_num += 1
76b6d9
+                adj_line_start = tmo.end()
76b6d9
+            if adj_line_start:
76b6d9
+                line_start = mo.start() + adj_line_start
76b6d9
+
76b6d9
+        # Track whether or not we are scanning a preprocessing directive.
76b6d9
+        if kind == "LINE_COMMENT" or (kind == "WHITESPACE" and adj_line_start):
76b6d9
+            at_bol = True
76b6d9
+            directive = None
76b6d9
+        else:
76b6d9
+            if kind == "PUNCTUATOR" and text == "#" and at_bol:
76b6d9
+                directive = "<null>"
76b6d9
+            elif kind == "IDENT" and directive == "<null>":
76b6d9
+                directive = text
76b6d9
+            at_bol = False
76b6d9
+
76b6d9
+        # Report ill-formed tokens and rewrite them as their well-formed
76b6d9
+        # equivalents, so downstream processing doesn't have to know about them.
76b6d9
+        # (Rewriting instead of discarding provides better error recovery.)
76b6d9
+        if kind == "BAD_BLOCK_COM":
76b6d9
+            reporter.error(Token("BAD_BLOCK_COM", "", line, column+1, ""),
76b6d9
+                           "unclosed block comment")
76b6d9
+            text += "*/"
76b6d9
+            kind = "BLOCK_COMMENT"
76b6d9
+        elif kind == "BAD_STRING":
76b6d9
+            reporter.error(Token("BAD_STRING", "", line, column+1, ""),
76b6d9
+                           "unclosed string")
76b6d9
+            text += "\""
76b6d9
+            kind = "STRING"
76b6d9
+        elif kind == "BAD_CHARCONST":
76b6d9
+            reporter.error(Token("BAD_CHARCONST", "", line, column+1, ""),
76b6d9
+                           "unclosed char constant")
76b6d9
+            text += "'"
76b6d9
+            kind = "CHARCONST"
76b6d9
+
76b6d9
+        tok = Token(kind, text, line, column+1,
76b6d9
+                    "include" if directive == "after_include" else directive)
76b6d9
+        # Do not complain about OTHER tokens inside macro definitions.
76b6d9
+        # $ and @ appear in macros defined by headers intended to be
76b6d9
+        # included from assembly language, e.g. sysdeps/mips/sys/asm.h.
76b6d9
+        if kind == "OTHER" and directive != "define":
76b6d9
+            self.error(tok, "stray {!r} in program")
76b6d9
+
76b6d9
+        yield tok
76b6d9
+        pos = mo.end()