https://github.com/zeyi2 updated 
https://github.com/llvm/llvm-project/pull/166072

>From 7b4e4172fc3cea8e8194aa3544f2c2ec30e3616a Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Sun, 2 Nov 2025 22:56:53 +0800
Subject: [PATCH 1/5] [clang-tidy][docs] Implement alphabetical order check

---
 .../clang-tidy-alphabetical-order-check.py    | 301 ++++++++++++++++++
 .../infrastructure/alphabetical-order.cpp     |   6 +
 clang-tools-extra/test/lit.cfg.py             |   1 +
 3 files changed, 308 insertions(+)
 create mode 100644 
clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
 create mode 100644 
clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
new file mode 100644
index 0000000000000..321663bb7d577
--- /dev/null
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
@@ -0,0 +1,301 @@
+#!/usr/bin/env python3
+
+"""
+Normalize clang-tidy docs with deterministic sorting for linting/tests.
+
+Subcommands:
+  - checks-list: Sort entries in docs/clang-tidy/checks/list.rst csv-table.
+  - release-notes: Sort key sections in docs/ReleaseNotes.rst and de-duplicate
+                   entries in "Changes in existing checks".
+
+Usage:
+  clang-tidy-alphabetical-order-check.py <subcommand> [-i <input rst>] [-o 
<output rst>] [--fix]
+
+Flags:
+  -i/--input   Input file.
+  -o/--output  Write normalized content here; omit to write to stdout.
+  --fix        Rewrite the input file in place. Cannot be combined with 
-o/--output.
+"""
+
+import argparse
+import io
+import os
+import re
+import sys
+from typing import List, Optional, Sequence, Tuple
+
+DOC_LABEL_RN_RE = re.compile(r":doc:`(?P<label>[^`<]+)\s*(?:<[^>]+>)?`")
+DOC_LINE_RE = re.compile(r"^\s*:doc:`(?P<label>[^`<]+?)\s*<[^>]+>`.*$")
+
+
+def script_dir() -> str:
+    return os.path.dirname(os.path.abspath(__file__))
+
+
+def read_text(path: str) -> List[str]:
+    with io.open(path, "r", encoding="utf-8") as f:
+        return f.read().splitlines(True)
+
+
+def write_text(path: str, content: str) -> None:
+    with io.open(path, "w", encoding="utf-8", newline="") as f:
+        f.write(content)
+
+
+def normalize_list_rst(lines: List[str]) -> str:
+    out: List[str] = []
+    i = 0
+    n = len(lines)
+    while i < n:
+        out.append(lines[i])
+        if lines[i].lstrip().startswith(".. csv-table::"):
+            i += 1
+            break
+        i += 1
+
+    while i < n and (lines[i].startswith(" ") or lines[i].strip() == ""):
+        if DOC_LINE_RE.match(lines[i]):
+            break
+        out.append(lines[i])
+        i += 1
+
+    entries: List[str] = []
+    while i < n and lines[i].startswith(" "):
+        if DOC_LINE_RE.match(lines[i]):
+            entries.append(lines[i])
+        else:
+            entries.append(lines[i])
+        i += 1
+
+    def key_for(line: str):
+        m = DOC_LINE_RE.match(line)
+        if not m:
+            return (1, "")
+        return (0, m.group("label"))
+
+    entries_sorted = sorted(entries, key=key_for)
+    out.extend(entries_sorted)
+    out.extend(lines[i:])
+
+    return "".join(out)
+
+
+def run_checks_list(
+    inp: Optional[str], out_path: Optional[str], fix: bool
+) -> int:
+    if not inp:
+        inp = os.path.normpath(
+            os.path.join(
+                script_dir(),
+                "..",
+                "..",
+                "docs",
+                "clang-tidy",
+                "checks",
+                "list.rst",
+            )
+        )
+    lines = read_text(inp)
+    normalized = normalize_list_rst(lines)
+    if fix and out_path:
+        sys.stderr.write("error: --fix cannot be used together with 
--output\n")
+        return 2
+    if fix:
+        original = "".join(lines)
+        if original != normalized:
+            write_text(inp, normalized)
+        return 0
+    if out_path:
+        write_text(out_path, normalized)
+        return 0
+    sys.stdout.write(normalized)
+    return 0
+
+
+def find_heading(lines: Sequence[str], title: str) -> Optional[int]:
+    for i in range(len(lines) - 1):
+        if lines[i].rstrip("\n") == title:
+            underline = lines[i + 1].rstrip("\n")
+            if (
+                underline
+                and set(underline) == {"^"}
+                and len(underline) >= len(title)
+            ):
+                return i
+    return None
+
+
+def extract_label(text: str) -> str:
+    m = DOC_LABEL_RN_RE.search(text)
+    return m.group("label") if m else text
+
+
+def is_bullet_start(line: str) -> bool:
+    return line.startswith("- ")
+
+
+def collect_bullet_blocks(
+    lines: Sequence[str], start: int, end: int
+) -> Tuple[List[str], List[Tuple[str, List[str]]], List[str]]:
+    i = start
+    n = end
+    first_bullet = i
+    while first_bullet < n and not is_bullet_start(lines[first_bullet]):
+        first_bullet += 1
+    prefix = list(lines[i:first_bullet])
+
+    blocks: List[Tuple[str, List[str]]] = []
+    i = first_bullet
+    while i < n:
+        if not is_bullet_start(lines[i]):
+            break
+        bstart = i
+        i += 1
+        while i < n and not is_bullet_start(lines[i]):
+            if (
+                i + 1 < n
+                and set(lines[i + 1].rstrip("\n")) == {"^"}
+                and lines[i].strip()
+            ):
+                break
+            i += 1
+        block = list(lines[bstart:i])
+        key = extract_label(block[0])
+        blocks.append((key, block))
+
+    suffix = list(lines[i:n])
+    return prefix, blocks, suffix
+
+
+def sort_and_dedup_blocks(
+    blocks: List[Tuple[str, List[str]]], dedup: bool = False
+) -> List[List[str]]:
+    seen = set()
+    filtered: List[Tuple[str, List[str]]] = []
+    for key, block in blocks:
+        if dedup:
+            if key in seen:
+                continue
+            seen.add(key)
+        filtered.append((key, block))
+    filtered.sort(key=lambda kb: kb[0])
+    return [b for _, b in filtered]
+
+
+def normalize_release_notes(lines: List[str]) -> str:
+    sections = [
+        ("New checks", False),
+        ("New check aliases", False),
+        ("Changes in existing checks", True),
+    ]
+
+    out = list(lines)
+
+    for idx in range(len(sections) - 1, -1, -1):
+        title, dedup = sections[idx]
+        h_start = find_heading(out, title)
+
+        if h_start is None:
+            continue
+
+        sec_start = h_start + 2
+
+        if idx + 1 < len(sections):
+            next_title = sections[idx + 1][0]
+            h_end = find_heading(out, next_title)
+            if h_end is None:
+                h_end = sec_start
+                while h_end + 1 < len(out):
+                    if out[h_end].strip() and set(
+                        out[h_end + 1].rstrip("\n")
+                    ) == {"^"}:
+                        break
+                    h_end += 1
+            sec_end = h_end
+        else:
+            h_end = sec_start
+            while h_end + 1 < len(out):
+                if out[h_end].strip() and set(out[h_end + 1].rstrip("\n")) == {
+                    "^"
+                }:
+                    break
+                h_end += 1
+            sec_end = h_end
+
+        prefix, blocks, suffix = collect_bullet_blocks(out, sec_start, sec_end)
+        sorted_blocks = sort_and_dedup_blocks(blocks, dedup=dedup)
+
+        new_section: List[str] = []
+        new_section.extend(prefix)
+        for i_b, b in enumerate(sorted_blocks):
+            if i_b > 0 and (
+                not new_section
+                or (new_section and new_section[-1].strip() != "")
+            ):
+                new_section.append("\n")
+            new_section.extend(b)
+        new_section.extend(suffix)
+
+        out = out[:sec_start] + new_section + out[sec_end:]
+
+    return "".join(out)
+
+
+def run_release_notes(
+    inp: Optional[str], out_path: Optional[str], fix: bool
+) -> int:
+    if not inp:
+        inp = os.path.normpath(
+            os.path.join(script_dir(), "..", "..", "docs", "ReleaseNotes.rst")
+        )
+    lines = read_text(inp)
+    normalized = normalize_release_notes(lines)
+    if fix and out_path:
+        sys.stderr.write("error: --fix cannot be used together with 
--output\n")
+        return 2
+    if fix:
+        original = "".join(lines)
+        if original != normalized:
+            write_text(inp, normalized)
+        return 0
+    if out_path:
+        write_text(out_path, normalized)
+        return 0
+    sys.stdout.write(normalized)
+    return 0
+
+
+def main(argv: List[str]) -> int:
+    ap = argparse.ArgumentParser()
+    sub = ap.add_subparsers(dest="cmd", required=True)
+
+    ap_checks = sub.add_parser(
+        "checks-list", help="normalize clang-tidy checks list.rst"
+    )
+    ap_checks.add_argument("-i", "--input", dest="inp", default=None)
+    ap_checks.add_argument("-o", "--output", dest="out", default=None)
+    ap_checks.add_argument(
+        "--fix", action="store_true", help="rewrite the input file in place"
+    )
+
+    ap_rn = sub.add_parser(
+        "release-notes", help="normalize ReleaseNotes.rst sections"
+    )
+    ap_rn.add_argument("-i", "--input", dest="inp", default=None)
+    ap_rn.add_argument("-o", "--output", dest="out", default=None)
+    ap_rn.add_argument(
+        "--fix", action="store_true", help="rewrite the input file in place"
+    )
+
+    args = ap.parse_args(argv)
+
+    if args.cmd == "checks-list":
+        return run_checks_list(args.inp, args.out, args.fix)
+    if args.cmd == "release-notes":
+        return run_release_notes(args.inp, args.out, args.fix)
+
+    ap.error("unknown command")
+
+
+if __name__ == "__main__":
+    main(sys.argv[1:])
diff --git 
a/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp 
b/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
new file mode 100644
index 0000000000000..4a2598b93942b
--- /dev/null
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
@@ -0,0 +1,6 @@
+// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py checks-list 
-i %S/../../../docs/clang-tidy/checks/list.rst -o %t.list
+// RUN: diff --strip-trailing-cr %t.list \
+// RUN:   %S/../../../docs/clang-tidy/checks/list.rst
+
+// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
release-notes -i %S/../../../docs/ReleaseNotes.rst -o %t.rn
+// RUN: diff --strip-trailing-cr %t.rn %S/../../../docs/ReleaseNotes.rst
diff --git a/clang-tools-extra/test/lit.cfg.py 
b/clang-tools-extra/test/lit.cfg.py
index c1da37d61bd61..c39ea29329674 100644
--- a/clang-tools-extra/test/lit.cfg.py
+++ b/clang-tools-extra/test/lit.cfg.py
@@ -57,6 +57,7 @@
 if config.clang_tidy_custom_check:
     config.available_features.add("custom-check")
 python_exec = shlex.quote(config.python_executable)
+config.substitutions.append(("%python", python_exec))
 check_clang_tidy = os.path.join(
     config.test_source_root, "clang-tidy", "check_clang_tidy.py"
 )

>From c08b734ec6337afb2fbfb45fb7574ea8cf82add1 Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Sun, 2 Nov 2025 23:25:39 +0800
Subject: [PATCH 2/5] fix format

---
 .../clang-tidy-alphabetical-order-check.py    | 41 +++++++++----------
 .../infrastructure/alphabetical-order.cpp     |  7 +---
 2 files changed, 21 insertions(+), 27 deletions(-)

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
index 321663bb7d577..680f21ec0e02c 100644
--- a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
@@ -1,6 +1,18 @@
 #!/usr/bin/env python3
+#
+# 
===-----------------------------------------------------------------------===#
+#
+# Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
+# See https://llvm.org/LICENSE.txt for license information.
+# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
+#
+# 
===-----------------------------------------------------------------------===#
 
 """
+
+ClangTidy Alphabetical Order Checker
+====================================
+
 Normalize clang-tidy docs with deterministic sorting for linting/tests.
 
 Subcommands:
@@ -80,9 +92,7 @@ def key_for(line: str):
     return "".join(out)
 
 
-def run_checks_list(
-    inp: Optional[str], out_path: Optional[str], fix: bool
-) -> int:
+def run_checks_list(inp: Optional[str], out_path: Optional[str], fix: bool) -> 
int:
     if not inp:
         inp = os.path.normpath(
             os.path.join(
@@ -116,11 +126,7 @@ def find_heading(lines: Sequence[str], title: str) -> 
Optional[int]:
     for i in range(len(lines) - 1):
         if lines[i].rstrip("\n") == title:
             underline = lines[i + 1].rstrip("\n")
-            if (
-                underline
-                and set(underline) == {"^"}
-                and len(underline) >= len(title)
-            ):
+            if underline and set(underline) == {"^"} and len(underline) >= 
len(title):
                 return i
     return None
 
@@ -206,18 +212,14 @@ def normalize_release_notes(lines: List[str]) -> str:
             if h_end is None:
                 h_end = sec_start
                 while h_end + 1 < len(out):
-                    if out[h_end].strip() and set(
-                        out[h_end + 1].rstrip("\n")
-                    ) == {"^"}:
+                    if out[h_end].strip() and set(out[h_end + 1].rstrip("\n")) 
== {"^"}:
                         break
                     h_end += 1
             sec_end = h_end
         else:
             h_end = sec_start
             while h_end + 1 < len(out):
-                if out[h_end].strip() and set(out[h_end + 1].rstrip("\n")) == {
-                    "^"
-                }:
+                if out[h_end].strip() and set(out[h_end + 1].rstrip("\n")) == 
{"^"}:
                     break
                 h_end += 1
             sec_end = h_end
@@ -229,8 +231,7 @@ def normalize_release_notes(lines: List[str]) -> str:
         new_section.extend(prefix)
         for i_b, b in enumerate(sorted_blocks):
             if i_b > 0 and (
-                not new_section
-                or (new_section and new_section[-1].strip() != "")
+                    not new_section or (new_section and 
new_section[-1].strip() != "")
             ):
                 new_section.append("\n")
             new_section.extend(b)
@@ -241,9 +242,7 @@ def normalize_release_notes(lines: List[str]) -> str:
     return "".join(out)
 
 
-def run_release_notes(
-    inp: Optional[str], out_path: Optional[str], fix: bool
-) -> int:
+def run_release_notes(inp: Optional[str], out_path: Optional[str], fix: bool) 
-> int:
     if not inp:
         inp = os.path.normpath(
             os.path.join(script_dir(), "..", "..", "docs", "ReleaseNotes.rst")
@@ -278,9 +277,7 @@ def main(argv: List[str]) -> int:
         "--fix", action="store_true", help="rewrite the input file in place"
     )
 
-    ap_rn = sub.add_parser(
-        "release-notes", help="normalize ReleaseNotes.rst sections"
-    )
+    ap_rn = sub.add_parser("release-notes", help="normalize ReleaseNotes.rst 
sections")
     ap_rn.add_argument("-i", "--input", dest="inp", default=None)
     ap_rn.add_argument("-o", "--output", dest="out", default=None)
     ap_rn.add_argument(
diff --git 
a/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp 
b/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
index 4a2598b93942b..0ac1484a00561 100644
--- a/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
@@ -1,6 +1,3 @@
-// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py checks-list 
-i %S/../../../docs/clang-tidy/checks/list.rst -o %t.list
-// RUN: diff --strip-trailing-cr %t.list \
-// RUN:   %S/../../../docs/clang-tidy/checks/list.rst
+// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py checks-list 
-i %S/../../../docs/clang-tidy/checks/list.rst | diff --strip-trailing-cr - 
%S/../../../docs/clang-tidy/checks/list.rst
 
-// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
release-notes -i %S/../../../docs/ReleaseNotes.rst -o %t.rn
-// RUN: diff --strip-trailing-cr %t.rn %S/../../../docs/ReleaseNotes.rst
+// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
release-notes -i %S/../../../docs/ReleaseNotes.rst | diff --strip-trailing-cr - 
%S/../../../docs/ReleaseNotes.rst

>From 20b66111f4ba000a5cbcba45b62674e42a4f1f1e Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Sun, 2 Nov 2025 23:28:13 +0800
Subject: [PATCH 3/5] ~

---
 .../clang-tidy/tool/clang-tidy-alphabetical-order-check.py      | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
index 680f21ec0e02c..fbb55efa536ff 100644
--- a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
@@ -231,7 +231,7 @@ def normalize_release_notes(lines: List[str]) -> str:
         new_section.extend(prefix)
         for i_b, b in enumerate(sorted_blocks):
             if i_b > 0 and (
-                    not new_section or (new_section and 
new_section[-1].strip() != "")
+                not new_section or (new_section and new_section[-1].strip() != 
"")
             ):
                 new_section.append("\n")
             new_section.extend(b)

>From c7dc5e99207804473cdee184bc8b435874fbf5c4 Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Mon, 3 Nov 2025 11:36:52 +0800
Subject: [PATCH 4/5] refactor

---
 .../clang-tidy-alphabetical-order-check.py    | 259 +++++++++---------
 .../infrastructure/alphabetical-order.cpp     |   6 +-
 2 files changed, 126 insertions(+), 139 deletions(-)

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
index fbb55efa536ff..58d93dcf31235 100644
--- a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
@@ -15,18 +15,13 @@
 
 Normalize clang-tidy docs with deterministic sorting for linting/tests.
 
-Subcommands:
-  - checks-list: Sort entries in docs/clang-tidy/checks/list.rst csv-table.
-  - release-notes: Sort key sections in docs/ReleaseNotes.rst and de-duplicate
-                   entries in "Changes in existing checks".
-
-Usage:
-  clang-tidy-alphabetical-order-check.py <subcommand> [-i <input rst>] [-o 
<output rst>] [--fix]
+Behavior:
+- Sort entries in docs/clang-tidy/checks/list.rst csv-table.
+- Sort key sections in docs/ReleaseNotes.rst. Does not remove duplicate
+  entries; developers should merge duplicates manually when needed.
 
 Flags:
-  -i/--input   Input file.
-  -o/--output  Write normalized content here; omit to write to stdout.
-  --fix        Rewrite the input file in place. Cannot be combined with 
-o/--output.
+  -o/--output  Write normalized content to this path instead of updating docs.
 """
 
 import argparse
@@ -36,7 +31,13 @@
 import sys
 from typing import List, Optional, Sequence, Tuple
 
+# Matches a :doc:`label <path>` or :doc:`label` reference anywhere in text and
+# captures the label. Used to sort bullet items alphabetically in ReleaseNotes
+# items by their label.
 DOC_LABEL_RN_RE = re.compile(r":doc:`(?P<label>[^`<]+)\s*(?:<[^>]+>)?`")
+
+# Matches a single csv-table row line in list.rst that begins with a :doc:
+# reference, capturing the label. Used to extract the sort key per row.
 DOC_LINE_RE = re.compile(r"^\s*:doc:`(?P<label>[^`<]+?)\s*<[^>]+>`.*$")
 
 
@@ -55,6 +56,12 @@ def write_text(path: str, content: str) -> None:
 
 
 def normalize_list_rst(lines: List[str]) -> str:
+    """Return normalized content of checks list.rst from given lines.
+
+    Input: full file content split into lines.
+    Output: single string with csv-table rows sorted by :doc: label while
+            preserving header/leading comments and trailing content.
+    """
     out: List[str] = []
     i = 0
     n = len(lines)
@@ -92,37 +99,15 @@ def key_for(line: str):
     return "".join(out)
 
 
-def run_checks_list(inp: Optional[str], out_path: Optional[str], fix: bool) -> 
int:
-    if not inp:
-        inp = os.path.normpath(
-            os.path.join(
-                script_dir(),
-                "..",
-                "..",
-                "docs",
-                "clang-tidy",
-                "checks",
-                "list.rst",
-            )
-        )
-    lines = read_text(inp)
-    normalized = normalize_list_rst(lines)
-    if fix and out_path:
-        sys.stderr.write("error: --fix cannot be used together with 
--output\n")
-        return 2
-    if fix:
-        original = "".join(lines)
-        if original != normalized:
-            write_text(inp, normalized)
-        return 0
-    if out_path:
-        write_text(out_path, normalized)
-        return 0
-    sys.stdout.write(normalized)
-    return 0
+def find_heading(lines: Sequence[str], title: str) -> Optional[int]:
+    """Find heading start index for a section underlined with ^ characters.
 
+    The function looks for a line equal to `title` followed by a line that
+    consists solely of ^, which matches the ReleaseNotes style for subsection
+    headings used here.
 
-def find_heading(lines: Sequence[str], title: str) -> Optional[int]:
+    Returns index of the title line, or None if not found.
+    """
     for i in range(len(lines) - 1):
         if lines[i].rstrip("\n") == title:
             underline = lines[i + 1].rstrip("\n")
@@ -173,125 +158,125 @@ def collect_bullet_blocks(
     return prefix, blocks, suffix
 
 
-def sort_and_dedup_blocks(
-    blocks: List[Tuple[str, List[str]]], dedup: bool = False
-) -> List[List[str]]:
-    seen = set()
-    filtered: List[Tuple[str, List[str]]] = []
-    for key, block in blocks:
-        if dedup:
-            if key in seen:
-                continue
-            seen.add(key)
-        filtered.append((key, block))
-    filtered.sort(key=lambda kb: kb[0])
-    return [b for _, b in filtered]
+def sort_blocks(blocks: List[Tuple[str, List[str]]]) -> List[List[str]]:
+    """Return blocks sorted deterministically by their extracted label.
 
+    Duplicates are preserved; merging is left to authors to handle manually.
+    """
+    return [b for _, b in sorted(blocks, key=lambda kb: kb[0])]
 
-def normalize_release_notes(lines: List[str]) -> str:
-    sections = [
-        ("New checks", False),
-        ("New check aliases", False),
-        ("Changes in existing checks", True),
-    ]
 
-    out = list(lines)
+def _find_section_bounds(
+    lines: Sequence[str], title: str, next_title: Optional[str]
+) -> Optional[Tuple[int, int, int]]:
+    """Return (h_start, sec_start, sec_end) for section `title`.
 
-    for idx in range(len(sections) - 1, -1, -1):
-        title, dedup = sections[idx]
-        h_start = find_heading(out, title)
-
-        if h_start is None:
-            continue
-
-        sec_start = h_start + 2
-
-        if idx + 1 < len(sections):
-            next_title = sections[idx + 1][0]
-            h_end = find_heading(out, next_title)
-            if h_end is None:
-                h_end = sec_start
-                while h_end + 1 < len(out):
-                    if out[h_end].strip() and set(out[h_end + 1].rstrip("\n")) 
== {"^"}:
-                        break
-                    h_end += 1
-            sec_end = h_end
-        else:
+    - h_start: index of the section title line
+    - sec_start: index of the first content line after underline
+    - sec_end: index of the first line of the next section title (or end)
+    """
+    h_start = find_heading(lines, title)
+    if h_start is None:
+        return None
+
+    sec_start = h_start + 2
+
+    # Determine end of section either from next_title or by scanning.
+    if next_title is not None:
+        h_end = find_heading(lines, next_title)
+        if h_end is None:
+            # Scan forward to the next heading-like underline.
             h_end = sec_start
-            while h_end + 1 < len(out):
-                if out[h_end].strip() and set(out[h_end + 1].rstrip("\n")) == 
{"^"}:
+            while h_end + 1 < len(lines):
+                if lines[h_end].strip() and set(lines[h_end + 1].rstrip("\n")) 
== {"^"}:
                     break
                 h_end += 1
-            sec_end = h_end
+        sec_end = h_end
+    else:
+        # Scan to end or until a heading underline is found.
+        h_end = sec_start
+        while h_end + 1 < len(lines):
+            if lines[h_end].strip() and set(lines[h_end + 1].rstrip("\n")) == 
{"^"}:
+                break
+            h_end += 1
+        sec_end = h_end
 
-        prefix, blocks, suffix = collect_bullet_blocks(out, sec_start, sec_end)
-        sorted_blocks = sort_and_dedup_blocks(blocks, dedup=dedup)
+    return h_start, sec_start, sec_end
 
-        new_section: List[str] = []
-        new_section.extend(prefix)
-        for i_b, b in enumerate(sorted_blocks):
-            if i_b > 0 and (
-                not new_section or (new_section and new_section[-1].strip() != 
"")
-            ):
-                new_section.append("\n")
-            new_section.extend(b)
-        new_section.extend(suffix)
 
-        out = out[:sec_start] + new_section + out[sec_end:]
+def _normalize_release_notes_section(
+    lines: List[str], title: str, next_title: Optional[str]
+) -> List[str]:
+    """Normalize a single release-notes section and return updated lines."""
+    bounds = _find_section_bounds(lines, title, next_title)
+    if bounds is None:
+        return lines
+    _, sec_start, sec_end = bounds
+
+    prefix, blocks, suffix = collect_bullet_blocks(lines, sec_start, sec_end)
+    sorted_blocks = sort_blocks(blocks)
+
+    new_section: List[str] = []
+    new_section.extend(prefix)
+    for i_b, b in enumerate(sorted_blocks):
+        if i_b > 0 and (not new_section or (new_section and 
new_section[-1].strip() != "")):
+            new_section.append("\n")
+        new_section.extend(b)
+    new_section.extend(suffix)
+
+    return lines[:sec_start] + new_section + lines[sec_end:]
+
+
+def normalize_release_notes(lines: List[str]) -> str:
+    sections = ["New checks", "New check aliases", "Changes in existing 
checks"]
+
+    out = list(lines)
+
+    for idx in range(len(sections) - 1, -1, -1):
+        title = sections[idx]
+        next_title = sections[idx + 1] if idx + 1 < len(sections) else None
+        out = _normalize_release_notes_section(out, title, next_title)
 
     return "".join(out)
 
 
-def run_release_notes(inp: Optional[str], out_path: Optional[str], fix: bool) 
-> int:
-    if not inp:
-        inp = os.path.normpath(
-            os.path.join(script_dir(), "..", "..", "docs", "ReleaseNotes.rst")
-        )
-    lines = read_text(inp)
-    normalized = normalize_release_notes(lines)
-    if fix and out_path:
-        sys.stderr.write("error: --fix cannot be used together with 
--output\n")
-        return 2
-    if fix:
-        original = "".join(lines)
-        if original != normalized:
-            write_text(inp, normalized)
-        return 0
-    if out_path:
-        write_text(out_path, normalized)
-        return 0
-    sys.stdout.write(normalized)
-    return 0
+def _default_paths() -> Tuple[str, str]:
+    base = os.path.normpath(os.path.join(script_dir(), "..", ".."))
+    list_doc = os.path.join(base, "docs", "clang-tidy", "checks", "list.rst")
+    rn_doc = os.path.join(base, "docs", "ReleaseNotes.rst")
+    return list_doc, rn_doc
 
 
 def main(argv: List[str]) -> int:
     ap = argparse.ArgumentParser()
-    sub = ap.add_subparsers(dest="cmd", required=True)
-
-    ap_checks = sub.add_parser(
-        "checks-list", help="normalize clang-tidy checks list.rst"
-    )
-    ap_checks.add_argument("-i", "--input", dest="inp", default=None)
-    ap_checks.add_argument("-o", "--output", dest="out", default=None)
-    ap_checks.add_argument(
-        "--fix", action="store_true", help="rewrite the input file in place"
-    )
-
-    ap_rn = sub.add_parser("release-notes", help="normalize ReleaseNotes.rst 
sections")
-    ap_rn.add_argument("-i", "--input", dest="inp", default=None)
-    ap_rn.add_argument("-o", "--output", dest="out", default=None)
-    ap_rn.add_argument(
-        "--fix", action="store_true", help="rewrite the input file in place"
-    )
-
+    ap.add_argument("-o", "--output", dest="out", default=None)
     args = ap.parse_args(argv)
 
-    if args.cmd == "checks-list":
-        return run_checks_list(args.inp, args.out, args.fix)
-    if args.cmd == "release-notes":
-        return run_release_notes(args.inp, args.out, args.fix)
+    list_doc, rn_doc = _default_paths()
 
-    ap.error("unknown command")
+    if args.out:
+        out_path = args.out
+        out_lower = os.path.basename(out_path).lower()
+        if "release" in out_lower:
+            lines = read_text(rn_doc)
+            normalized = normalize_release_notes(lines)
+            write_text(out_path, normalized)
+            return 0
+        else:
+            lines = read_text(list_doc)
+            normalized = normalize_list_rst(lines)
+            write_text(out_path, normalized)
+            return 0
+
+    list_lines = read_text(list_doc)
+    rn_lines = read_text(rn_doc)
+    list_norm = normalize_list_rst(list_lines)
+    rn_norm = normalize_release_notes(rn_lines)
+    if "".join(list_lines) != list_norm:
+        write_text(list_doc, list_norm)
+    if "".join(rn_lines) != rn_norm:
+        write_text(rn_doc, rn_norm)
+    return 0
 
 
 if __name__ == "__main__":
diff --git 
a/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp 
b/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
index 0ac1484a00561..c238884007595 100644
--- a/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
+++ b/clang-tools-extra/test/clang-tidy/infrastructure/alphabetical-order.cpp
@@ -1,3 +1,5 @@
-// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py checks-list 
-i %S/../../../docs/clang-tidy/checks/list.rst | diff --strip-trailing-cr - 
%S/../../../docs/clang-tidy/checks/list.rst
+// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py -o 
%t.clang-tidy-checks-list.rst
+// RUN: diff --strip-trailing-cr %t.clang-tidy-checks-list.rst 
%S/../../../docs/clang-tidy/checks/list.rst
 
-// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
release-notes -i %S/../../../docs/ReleaseNotes.rst | diff --strip-trailing-cr - 
%S/../../../docs/ReleaseNotes.rst
+// RUN: %python 
%S/../../../clang-tidy/tool/clang-tidy-alphabetical-order-check.py -o 
%t.ReleaseNotes.rst
+// RUN: diff --strip-trailing-cr %t.ReleaseNotes.rst 
%S/../../../docs/ReleaseNotes.rst

>From 555beb058a7ac8118c0603690a4570dc51b74733 Mon Sep 17 00:00:00 2001
From: mtx <[email protected]>
Date: Mon, 3 Nov 2025 11:51:23 +0800
Subject: [PATCH 5/5] fix format

---
 .../clang-tidy/tool/clang-tidy-alphabetical-order-check.py    | 4 +++-
 1 file changed, 3 insertions(+), 1 deletion(-)

diff --git 
a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py 
b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
index 58d93dcf31235..80e0450c5987c 100644
--- a/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
+++ b/clang-tools-extra/clang-tidy/tool/clang-tidy-alphabetical-order-check.py
@@ -219,7 +219,9 @@ def _normalize_release_notes_section(
     new_section: List[str] = []
     new_section.extend(prefix)
     for i_b, b in enumerate(sorted_blocks):
-        if i_b > 0 and (not new_section or (new_section and 
new_section[-1].strip() != "")):
+        if i_b > 0 and (
+            not new_section or (new_section and new_section[-1].strip() != "")
+        ):
             new_section.append("\n")
         new_section.extend(b)
     new_section.extend(suffix)

_______________________________________________
cfe-commits mailing list
[email protected]
https://lists.llvm.org/cgi-bin/mailman/listinfo/cfe-commits

Reply via email to