This is an automated email from the ASF dual-hosted git repository.

xiaoxiang pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/nuttx.git

commit 412644fcf2d3f65fc4c6f2a4ae46ddc24364c219
Author: Xu Xingliang <[email protected]>
AuthorDate: Fri Nov 1 10:29:04 2024 +0800

    gdb: optimize memory commands
    
    Tested on a complex project, results are promising.
    
    Command                 Time cost(s)           Time saving(s)   Peformance 
Boost
                            Before      After
    memleak                 39.494172   22.366415   17.127757       1.8
    memdump                 41.872441   26.458386   15.414055       1.6
    memdump -a 0x1234       28.116294   1.114119    27.002175       25.2
    memdump --no-backtrace    N/A       1.114119
    memmap                  7.973809    6.836468    1.137341        1.2
    
    Signed-off-by: Xu Xingliang <[email protected]>
---
 tools/gdb/nuttxgdb/memdump.py      | 1310 ++++++++++++------------------------
 tools/gdb/nuttxgdb/mm.py           |  637 ++++++++++++++++++
 tools/gdb/nuttxgdb/prefix.py       |    7 +
 tools/gdb/nuttxgdb/protocols/mm.py |  120 ++++
 tools/gdb/nuttxgdb/utils.py        |    7 +
 5 files changed, 1195 insertions(+), 886 deletions(-)

diff --git a/tools/gdb/nuttxgdb/memdump.py b/tools/gdb/nuttxgdb/memdump.py
index d65734781f..eeddb86493 100644
--- a/tools/gdb/nuttxgdb/memdump.py
+++ b/tools/gdb/nuttxgdb/memdump.py
@@ -22,888 +22,352 @@
 
 import argparse
 import bisect
+import json
 import time
+from collections import defaultdict
+from os import path
+from typing import Dict, Generator, List, Protocol, Tuple
 
 import gdb
 
-from . import utils
-from .lists import NxSQueue
-from .utils import get_long_type, get_symbol_value, lookup_type, read_ulong
-
-MM_ALLOC_BIT = 0x1
-MM_PREVFREE_BIT = 0x2
-MM_MASK_BIT = MM_ALLOC_BIT | MM_PREVFREE_BIT
-MEMPOOL_MAGIC_ALLOC = 0x55555555
-
-PID_MM_ORPHAN = -6
-PID_MM_BIGGEST = -5
-PID_MM_FREE = -4
-PID_MM_ALLOC = -3
-PID_MM_LEAK = -2
-PID_MM_MEMPOOL = -1
-
-mm_allocnode_type = lookup_type("struct mm_allocnode_s")
-sizeof_size_t = lookup_type("size_t").sizeof
-mempool_backtrace_type = lookup_type("struct mempool_backtrace_s")
-
-CONFIG_MM_BACKTRACE = get_symbol_value("CONFIG_MM_BACKTRACE")
-CONFIG_MM_DFAULT_ALIGNMENT = get_symbol_value("CONFIG_MM_DFAULT_ALIGNMENT")
-
-
-def align_up(size, align) -> int:
-    """Align the size to the specified alignment"""
-    return (size + (align - 1)) & ~(align - 1)
-
-
-def mm_nodesize(size) -> int:
-    """Return the real size of a memory node"""
-    return size & ~MM_MASK_BIT
-
-
-def mm_node_is_alloc(size) -> bool:
-    """Return node is allocated according to recorded size"""
-    return size & MM_ALLOC_BIT != 0
-
-
-def mm_prevnode_is_free(size) -> bool:
-    """Return prevnode is free according to recorded size"""
-    return size & MM_PREVFREE_BIT != 0
-
-
-def mm_foreach(heap):
-    """Iterate over a heap, yielding each node"""
-    nregions = get_symbol_value("CONFIG_MM_REGIONS")
-    heapstart = heap["mm_heapstart"]
-    heapend = heap["mm_heapend"]
-
-    for region in range(0, nregions):
-        start = heapstart[region]
-        end = heapend[region]
-        node = start
-        while node <= end:
-            yield node
-            next = int(node) + mm_nodesize(node["size"])
-            next = gdb.Value(next).cast(mm_allocnode_type.pointer())
-            if node == next:
-                gdb.write(f"Error: maybe have memory fault on {hex(node)}\n")
-                break
-            node = next
-
-
-def mm_dumpnode(node, count, align, simple, detail, alive):
-    if node["size"] & MM_ALLOC_BIT != 0:
-        charnode = int(node)
-        if not alive:
-            # if pid is not alive put a red asterisk.
-            gdb.write("\x1b[33;1m*\x1b[m")
-
-        if not detail:
-            gdb.write("%*d" % (6 if alive else 5, count))
-
-        gdb.write(
-            "%6d%12u%12u%#*x"
-            % (
-                node["pid"],
-                mm_nodesize(node["size"]),
-                node["seqno"],
-                align,
-                charnode + mm_allocnode_type.sizeof,
-            )
-        )
-
-        if node.type.has_key("backtrace"):
-            firstrow = True
-            for backtrace in utils.ArrayIterator(node["backtrace"]):
-                if int(backtrace) == 0:
-                    break
-
-                if simple:
-                    gdb.write(" %0#*x" % (align, int(backtrace)))
-                else:
-                    if firstrow:
-                        firstrow = False
-                    else:
-                        if not detail:
-                            gdb.write(" " * 6)
-                        gdb.write(" " * (6 + 12 + 12 + align))
-                    gdb.write(
-                        "  [%0#*x] %-20s %s:%d\n"
-                        % (
-                            align,
-                            int(backtrace),
-                            backtrace.format_string(
-                                raw=False, symbols=True, address=False
-                            ),
-                            gdb.find_pc_line(backtrace).symtab,
-                            gdb.find_pc_line(backtrace).line,
-                        )
-                    )
-
-    else:
-        charnode = int(node)
-        gdb.write(
-            "%12u%#*x"
-            % (
-                mm_nodesize(node["size"]),
-                align,
-                charnode + mm_allocnode_type.sizeof,
-            )
+from . import mm, utils
+
+
+class MMNodeDump(Protocol):
+    """Node information protocol for dump"""
+
+    address: int  # Note that address should be in type of int
+    nodesize: int
+    seqno: int
+    pid: int
+    backtrace: Tuple[int]
+    is_free: bool
+    from_pool: bool
+    overhead: int
+
+    def contains(self, addr: int) -> bool: ...
+
+    def read_memory(self) -> memoryview: ...
+
+
+def dump_nodes(
+    heaps: List[mm.MMHeap],
+    pid=None,
+    nodesize=None,
+    used=None,
+    free=None,
+    seqmin=None,
+    seqmax=None,
+    orphan=None,
+    no_heap=False,
+    no_pool=False,
+    no_pid=None,
+) -> Generator[MMNodeDump, None, None]:
+    def filter_node(node: MMNodeDump) -> bool:
+        return (
+            (pid is None or node.pid == pid)
+            and (no_pid is None or node.pid != no_pid)
+            and (nodesize is None or node.nodesize == nodesize)
+            and (not used or not node.is_free)
+            and (not free or node.is_free)
+            and (seqmin is None or node.seqno >= seqmin)
+            and (seqmax is None or node.seqno <= seqmax)
+            and (not orphan or node.is_orphan)
         )
 
-    gdb.write("\n")
-
-
-def mempool_multiple_foreach(mpool):
-    """Iterate over all pools in a mempool, yielding each pool"""
-    i = 0
-    while i < mpool["npools"]:
-        pool = mpool["pools"] + i
-        yield pool
-        i += 1
-
-
-def mempool_realblocksize(pool):
-    """Return the real block size of a mempool"""
+    if not no_heap:
+        yield from (node for heap in heaps for node in filter(filter_node, 
heap.nodes))
 
-    if CONFIG_MM_DFAULT_ALIGNMENT:
-        mempool_align = CONFIG_MM_DFAULT_ALIGNMENT
-    else:
-        mempool_align = 2 * sizeof_size_t
-
-    if CONFIG_MM_BACKTRACE >= 0:
-        return align_up(
-            pool["blocksize"] + mempool_backtrace_type.sizeof,
-            mempool_align,
+    if not no_pool:
+        yield from (
+            blk
+            for pool in mm.get_pools(heaps)
+            for blk in filter(filter_node, pool.blks)
         )
-    else:
-        return pool["blocksize"]
-
-
-def get_backtrace(node):
-
-    backtrace_list = []
-    max = node["backtrace"].type.range()[1]
-    for x in range(0, max):
-        if node["backtrace"][x] != 0:
-            backtrace_list.append(int(node["backtrace"][x]))
-        else:
-            break
-
-    return tuple(backtrace_list)
-
 
-def record_backtrace(node, size, backtrace_dict):
-    if node.type.has_key("backtrace"):
-        backtrace = get_backtrace(node)
-        if (backtrace, int(node["pid"])) not in backtrace_dict.keys():
-            info = {}
-            info["node"] = node
-            info["count"] = 1
-            info["size"] = size
-            info["pid"] = node["pid"]
-            backtrace_dict[(backtrace, int(node["pid"]))] = info
-        else:
-            backtrace_dict[(backtrace, int(node["pid"]))]["count"] += 1
 
-    return backtrace_dict
+def group_nodes(
+    nodes: List[MMNodeDump], grouped: Dict[MMNodeDump, List[MMNodeDump]] = None
+) -> Dict[MMNodeDump, List[MMNodeDump]]:
+    grouped = grouped or defaultdict(list)
+    for node in nodes:
+        grouped[node].append(node)
+    return grouped
 
 
-def get_count(element):
-    return element["count"]
-
-
-def mempool_foreach(pool):
-    """Iterate over all block in a mempool"""
-
-    sq_entry_type = lookup_type("sq_entry_t")
-
-    blocksize = mempool_realblocksize(pool)
-    if pool["ibase"] != 0:
-        nblk = pool["interruptsize"] / blocksize
-        while nblk > 0:
-            bufaddr = gdb.Value(pool["ibase"] + nblk * blocksize + 
pool["blocksize"])
-            buf = bufaddr.cast(mempool_backtrace_type.pointer())
-            yield buf
-            nblk -= 1
-
-    for entry in NxSQueue(pool["equeue"]):
-        nblk = (pool["expandsize"] - sq_entry_type.sizeof) / blocksize
-        base = int(entry) - nblk * blocksize
-        while nblk > 0:
-            nblk -= 1
-            bufaddr = gdb.Value(base + nblk * blocksize + pool["blocksize"])
-            buf = bufaddr.cast(mempool_backtrace_type.pointer())
-            yield buf
-
-
-def mempool_dumpbuf(buf, blksize, count, align, simple, detail, alive):
-    charnode = gdb.Value(buf).cast(lookup_type("char").pointer())
-
-    if not alive:
-        # if pid is not alive put a red asterisk.
-        gdb.write("\x1b[33;1m*\x1b[m")
-
-    if not detail:
-        gdb.write("%*d" % (6 if alive else 5, count))
-
+def print_node(node: MMNodeDump, alive, count=1, formatter=None, 
no_backtrace=False):
+    formatter = formatter or "{:>1} {:>4} {:>12} {:>12} {:>12} {:>14} {:>18} 
{:}\n"
     gdb.write(
-        "%6d%12u%12u%#*x"
-        % (
-            buf["pid"],
-            blksize,
-            buf["seqno"],
-            align,
-            (int)(charnode - blksize),
+        formatter.format(
+            "\x1b[33;1m*\x1b[m" if not alive else "",
+            "P" if node.from_pool else "H",
+            count,
+            node.pid,
+            node.nodesize,
+            node.seqno,
+            hex(node.address),
+            "",
         )
     )
 
-    if buf.type.has_key("backtrace"):
-        max = buf["backtrace"].type.range()[1]
-        firstrow = True
-        for x in range(0, max):
-            backtrace = int(buf["backtrace"][x])
-            if backtrace == 0:
-                break
-
-            if simple:
-                gdb.write(" %0#*x" % (align, backtrace))
-            else:
-                if firstrow:
-                    firstrow = False
-                else:
-                    if not detail:
-                        gdb.write(" " * 6)
-                    gdb.write(" " * (6 + 12 + 12 + align))
-                gdb.write(
-                    "  [%0#*x] %-20s %s:%d\n"
-                    % (
-                        align,
-                        backtrace,
-                        buf["backtrace"][x].format_string(
-                            raw=False, symbols=True, address=False
-                        ),
-                        gdb.find_pc_line(backtrace).symtab,
-                        gdb.find_pc_line(backtrace).line,
-                    )
-                )
-
-    gdb.write("\n")
+    if mm.CONFIG_MM_BACKTRACE and not no_backtrace:
+        leading = formatter.format("", "", "", "", "", "", "", "")[:-1]
+        btformat = leading + "{1:<48}{2}\n"
+        if node.backtrace and node.backtrace[0]:
+            gdb.write(f"{utils.Backtrace(node.backtrace, 
formatter=btformat)}\n")
 
 
-class HeapNode:
-    def __init__(self, gdb_node, nextfree=False):
-        self.gdb_node = gdb_node
+def print_header(formatter=None):
+    formatter = formatter or "{:>1} {:>4} {:>12} {:>12} {:>12} {:>14} {:>18} 
{:}\n"
+    head = ("", "Pool", "CNT", "PID", "Size", "Seqno", "Address", "Backtrace")
+    gdb.write(formatter.format(*head))
 
-        record_size = gdb_node["size"]
 
-        try:
-            seqno = gdb_node["seqno"]
-        except gdb.error:
-            seqno = 0
-
-        try:
-            node_pid = gdb_node["pid"]
-        except gdb.error:
-            node_pid = 0
-
-        self.size = mm_nodesize(record_size)
-        self.alloc = mm_node_is_alloc(record_size)
-        self.seqno = seqno
-        self.pid = node_pid
-        self.base = int(gdb_node)
-        self.prevfree = mm_prevnode_is_free(record_size)
-        self.nextfree = nextfree
-
-    def __lt__(self, other):
-        return self.size < other.size
-
-    def inside_sequence(self, seqmin, seqmax):
-        return self.seqno >= seqmin and self.seqno <= seqmax
-
-    def contains_address(self, address):
-        return address >= self.base and address < self.base + self.size
-
-    def is_orphan(self):
-        return self.prevfree or self.nextfree
-
-    def dump(self, detail, simple, align, check_alive, backtrace_dict):
-        if detail:
-            mm_dumpnode(
-                self.gdb_node,
-                1,
-                align,
-                simple,
-                detail,
-                check_alive(self.pid),
-            )
-        else:
-            backtrace_dict = record_backtrace(self.gdb_node, self.size, 
backtrace_dict)
+def get_heaps(args_heap=None) -> List[mm.MMHeap]:
+    if args_heap:
+        return [mm.MMHeap(gdb.parse_and_eval(args_heap))]
+    return mm.get_heaps()
 
 
-class Memdump(gdb.Command):
-    """Dump the heap and mempool memory"""
+class MMDump(gdb.Command):
+    """Dump memory manager heap"""
 
     def __init__(self):
-        super().__init__("memdump", gdb.COMMAND_USER)
-
-    def check_alive(self, pid):
-        return self.pidhash[pid & self.npidhash - 1] != 0
-
-    def mempool_dump(self, mpool, pid, seqmin, seqmax, address, simple, 
detail):
-        """Dump the mempool memory"""
-        for pool in mempool_multiple_foreach(mpool):
-            if pid == PID_MM_FREE:
-                for entry in NxSQueue(pool["queue"]):
-                    gdb.write("%12u%#*x\n" % (pool["blocksize"], self.align, 
entry))
-                    self.aordblks += 1
-                    self.uordblks += mempool_realblocksize(pool)
-
-                for entry in NxSQueue(pool["iqueue"]):
-                    gdb.write("%12u%#*x\n" % (pool["blocksize"], self.align, 
entry))
-                    self.aordblks += 1
-                    self.uordblks += mempool_realblocksize(pool)
-            else:
-                for buf in mempool_foreach(pool):
-                    if (
-                        (pid == buf["pid"] or pid == PID_MM_ALLOC)
-                        and (buf["seqno"] >= seqmin and buf["seqno"] < seqmax)
-                        and buf["magic"] == MEMPOOL_MAGIC_ALLOC
-                    ):
-                        charnode = int(buf)
-                        if detail:
-                            mempool_dumpbuf(
-                                buf,
-                                pool["blocksize"],
-                                1,
-                                self.align,
-                                simple,
-                                detail,
-                                self.check_alive(buf["pid"]),
-                            )
-                        else:
-                            self.backtrace_dict = record_backtrace(
-                                buf, pool["blocksize"], self.backtrace_dict
-                            )
-                        if address and (
-                            address < charnode
-                            and address >= charnode - pool["blocksize"]
-                        ):
-                            mempool_dumpbuf(
-                                buf,
-                                pool["blocksize"],
-                                1,
-                                self.align,
-                                simple,
-                                detail,
-                                self.check_alive(buf["pid"]),
-                            )
-                            gdb.write(
-                                "\nThe address 0x%x found belongs to"
-                                "the mempool node with base address 0x%x\n"
-                                % (address, charnode)
-                            )
-                            print_node = "p *(struct mempool_backtrace_s 
*)0x%x" % (
-                                charnode
-                            )
-                            gdb.write(print_node + "\n")
-                            gdb.execute(print_node)
-                            return True
-                        self.aordblks += 1
-                        self.uordblks += mempool_realblocksize(pool)
-        return False
-
-    def memnode_dump(self, node):
-        self.aordblks += 1
-        self.uordblks += node.size
-        node.dump(
-            detail=self.detail,
-            simple=self.simple,
-            align=self.align,
-            check_alive=self.check_alive,
-            backtrace_dict=self.backtrace_dict,
+        super().__init__("mm dump", gdb.COMMAND_USER)
+        # define memdump as mm dump
+        utils.alias("memdump", "mm dump")
+
+    def find(self, heaps: List[mm.MMHeap], addr):
+        """Find the node that contains the address"""
+        # Search pools firstly.
+        for pool in mm.get_pools(heaps):
+            if blk := pool.find(addr):
+                return blk
+
+        # Search heaps
+        for heap in heaps:
+            if node := heap.find(addr):
+                return node
+
+    def parse_args(self, arg):
+        parser = argparse.ArgumentParser(description=self.__doc__)
+        parser.add_argument(
+            "-a",
+            "--address",
+            type=str,
+            default=None,
+            help="The address to inspect",
         )
 
-    def memdump_tail(self, detail, simple):
-        if not detail:
-            output = [v for v in self.backtrace_dict.values()]
-            output.sort(key=get_count, reverse=True)
-            for node in output:
-                if node["node"].type == mm_allocnode_type.pointer():
-                    mm_dumpnode(
-                        node["node"],
-                        node["count"],
-                        self.align,
-                        simple,
-                        detail,
-                        self.check_alive(node["pid"]),
-                    )
-                else:
-                    mempool_dumpbuf(
-                        node["node"],
-                        node["size"],
-                        node["count"],
-                        self.align,
-                        simple,
-                        detail,
-                        self.check_alive(node["pid"]),
-                    )
-
-        gdb.write("%12s%12s\n" % ("Total Blks", "Total Size"))
-        gdb.write("%12d%12d\n" % (self.aordblks, self.uordblks))
-
-    def memdump(self, pid, seqmin, seqmax, address, simple, detail, 
biggest_top=30):
-        """Dump the heap memory"""
-
-        self.simple = simple
-        self.detail = detail
-
-        alloc_node = []
-        free_node = []
-        mempool_node = []
-
-        heap = gdb.parse_and_eval("g_mmheap")
-        if heap.type.has_key("mm_mpool"):
-            if self.mempool_dump(
-                heap["mm_mpool"], pid, seqmin, seqmax, address, simple, detail
-            ):
-                return
-
-        prev_node = None
-
-        for gdb_node in mm_foreach(heap):
-            node = HeapNode(gdb_node)
-
-            if prev_node:
-                prev_node.nextfree = not node.alloc
-
-            prev_node = node
-
-            if not node.inside_sequence(seqmin, seqmax):
-                continue
-
-            if address:
-                if node.contains_address(address):
-                    gdb.write(
-                        "\nThe address 0x%x found belongs to"
-                        "the memory node with base address 0x%x\n"
-                        % (address, node.base)
-                    )
-                    print_node = "p *(struct mm_allocnode_s *)0x%x" % 
(node.base)
-                    gdb.write(print_node + "\n")
-                    gdb.execute(print_node)
-                    return
-
-            if node.pid == PID_MM_MEMPOOL:
-                mempool_node.append(node)
-            elif node.alloc:
-                alloc_node.append(node)
-            else:
-                free_node.append(node)
-
-        title_dict = {
-            PID_MM_ALLOC: "Dump all used memory node info, use 
'\x1b[33;1m*\x1b[m' mark pid does not exist:\n",
-            PID_MM_MEMPOOL: "Dump mempool:\n",
-            PID_MM_FREE: "Dump all free memory node info:\n",
-            PID_MM_BIGGEST: f"Dump biggest allocated top {biggest_top}\n",
-            PID_MM_ORPHAN: "Dump allocated orphan nodes\n",
-        }
-
-        if pid in title_dict.keys():
-            title = title_dict[pid]
-        elif pid >= 0:
-            title = title_dict[PID_MM_ALLOC]
-        else:
-            title = "Dump unspecific\n"
-
-        gdb.write(title)
-        if not detail:
-            gdb.write("%6s" % ("CNT"))
-        gdb.write(
-            "%6s%12s%12s%8s%8s%8s\n"
-            % ("PID", "Size", "Sequence", str(self.align), "Address", 
"Callstack")
+        parser.add_argument(
+            "--heap",
+            type=str,
+            default=None,
+            help="Which heap to inspect",
         )
 
-        if pid == PID_MM_FREE:
-            self.detail = True
-            for node in free_node:
-                self.memnode_dump(node)
-        elif pid == PID_MM_ALLOC:
-            for node in alloc_node:
-                self.memnode_dump(node)
-        elif pid == PID_MM_BIGGEST:
-            sorted_alloc = sorted(alloc_node)[-biggest_top:]
-            for node in sorted_alloc:
-                self.memnode_dump(node)
-        elif pid == PID_MM_ORPHAN:
-            for node in alloc_node:
-                if node.is_orphan():
-                    self.memnode_dump(node)
-        elif pid == PID_MM_MEMPOOL:
-            for node in mempool_node:
-                self.memnode_dump(node)
-        elif pid >= 0:
-            for node in alloc_node:
-                if node.pid == pid:
-                    self.memnode_dump(node)
-
-        self.memdump_tail(detail, simple)
-
-    def complete(self, text, word):
-        return gdb.COMPLETE_SYMBOL
-
-    def parse_arguments(self, argv):
-        parser = argparse.ArgumentParser(description="memdump command")
-        parser.add_argument("-p", "--pid", type=str, help="Thread PID, -1 for 
mempool")
-        parser.add_argument("-a", "--addr", type=str, help="Query memory 
address")
-        parser.add_argument("-i", "--min", type=str, help="Minimum value")
-        parser.add_argument("-x", "--max", type=str, help="Maximum value")
-        parser.add_argument("--used", action="store_true", help="Used flag")
+        parser.add_argument(
+            "-p", "--pid", type=int, default=None, help="Thread PID, -1 for 
mempool"
+        )
+        parser.add_argument(
+            "-i", "--min", type=int, default=None, help="Minimum sequence 
number"
+        )
+        parser.add_argument(
+            "-x", "--max", type=int, default=None, help="Maximum sequence 
number"
+        )
         parser.add_argument("--free", action="store_true", help="Free flag")
         parser.add_argument("--biggest", action="store_true", help="biggest 
allocated")
-        parser.add_argument("--top", type=str, help="biggest top n, default 
30")
         parser.add_argument(
-            "--orphan", action="store_true", help="orphan allocated(neighbor 
of free)"
+            "--orphan", action="store_true", help="Filter nodes that are 
orphan"
         )
         parser.add_argument(
-            "-d",
-            "--detail",
+            "--top", type=int, default=None, help="biggest top n, default to 
all"
+        )
+        parser.add_argument(
+            "--size", type=int, default=None, help="Node block size filter."
+        )
+        parser.add_argument(
+            "--no-pool",
+            "--nop",
             action="store_true",
-            help="Output details of each node",
-            default=False,
+            help="Exclude dump from memory pool",
+        )
+        parser.add_argument(
+            "--no-heap", "--noh", action="store_true", help="Exclude heap dump"
         )
         parser.add_argument(
-            "-s",
-            "--simple",
+            "--no-group", "--nog", action="store_true", help="Do not group the 
nodes"
+        )
+        parser.add_argument(
+            "--no-backtrace",
+            "--nob",
             action="store_true",
-            help="Simplified Output",
-            default=False,
+            help="Do not print backtrace",
+        )
+
+        # add option to sort the node by size or count
+        parser.add_argument(
+            "--sort",
+            type=str,
+            choices=["size", "nodesize", "count", "seq", "address"],
+            default="count",
+            help="sort the node by size(nodesize * count), nodesize,  count or 
sequence number",
         )
 
-        if argv[0] == "":
-            argv = None
         try:
-            args = parser.parse_args(argv)
+            return parser.parse_args(gdb.string_to_argv(arg))
         except SystemExit:
-            return None
-
-        return {
-            "pid": int(args.pid, 0) if args.pid else None,
-            "seqmin": int(args.min, 0) if args.min else 0,
-            "seqmax": int(args.max, 0) if args.max else 0xFFFFFFFF,
-            "used": args.used,
-            "free": args.free,
-            "addr": int(utils.parse_arg(args.addr)) if args.addr else None,
-            "simple": args.simple,
-            "detail": args.detail,
-            "biggest": args.biggest,
-            "orphan": args.orphan,
-            "top": int(args.top) if args.top else 30,
-        }
-
-    def invoke(self, args, from_tty):
-        if sizeof_size_t == 4:
-            self.align = 11
-        else:
-            self.align = 19
-
-        arg = self.parse_arguments(args.split(" "))
-
-        if arg is None:
             return
 
-        pid = PID_MM_ALLOC
-        if arg["used"]:
-            pid = PID_MM_ALLOC
-        elif arg["free"]:
-            pid = PID_MM_FREE
-        elif arg["biggest"]:
-            pid = PID_MM_BIGGEST
-        elif arg["orphan"]:
-            pid = PID_MM_ORPHAN
-        elif arg["pid"]:
-            pid = arg["pid"]
-        if CONFIG_MM_BACKTRACE <= 0:
-            arg["detail"] = True
-
-        self.aordblks = 0
-        self.uordblks = 0
-        self.backtrace_dict = {}
-        self.npidhash = gdb.parse_and_eval("g_npidhash")
-        self.pidhash = gdb.parse_and_eval("g_pidhash")
-        self.memdump(
-            pid,
-            arg["seqmin"],
-            arg["seqmax"],
-            arg["addr"],
-            arg["simple"],
-            arg["detail"],
-            arg["top"],
-        )
-
-
-class Memleak(gdb.Command):
-    """Memleak check"""
-
-    def __init__(self):
-        self.elf = utils.import_check(
-            "elftools.elf.elffile", "ELFFile", "Plase pip install pyelftools\n"
-        )
-        if not self.elf:
+    def invoke(self, arg: str, from_tty: bool) -> None:
+        if not (args := self.parse_args(arg)):
             return
 
-        super().__init__("memleak", gdb.COMMAND_USER)
-
-    def check_alive(self, pid):
-        return self.pidhash[pid & self.npidhash - 1] != 0
+        heaps = (
+            [mm.MMHeap(gdb.parse_and_eval(args.heap))] if args.heap else 
mm.get_heaps()
+        )
+        pids = [int(tcb["pid"]) for tcb in utils.get_tcbs()]
 
-    def next_ptr(self):
-        inf = gdb.selected_inferior()
-        heap = gdb.parse_and_eval("g_mmheap")
-        longsize = get_long_type().sizeof
-        region = get_symbol_value("CONFIG_MM_REGIONS")
-        regions = []
+        print_header()
 
-        for i in range(0, region):
-            start = int(heap["mm_heapstart"][i])
-            end = int(heap["mm_heapend"][i])
-            regions.append({"start": start, "end": end})
+        def printnode(node, count):
+            print_node(node, node.pid in pids, count, 
no_backtrace=args.no_backtrace)
 
-        # Search global variables
-        for objfile in gdb.objfiles():
-            gdb.write(f"Searching global symbol in: {objfile.filename}\n")
-            elf = self.elf.load_from_path(objfile.filename)
-            symtab = elf.get_section_by_name(".symtab")
-            for symbol in symtab.iter_symbols():
-                if symbol["st_info"]["type"] != "STT_OBJECT":
-                    continue
-
-                if symbol["st_size"] < longsize:
-                    continue
+        if args.address:
+            addr = int(gdb.parse_and_eval(args.address))
+            # Address specified, find and return directly.
+            node = None
+            for pool in mm.get_pools(heaps):
+                if node := pool.find(addr):
+                    break
 
-                global_size = symbol["st_size"] // longsize * longsize
-                global_mem = inf.read_memory(symbol["st_value"], global_size)
-                while global_size:
-                    global_size = global_size - longsize
-                    ptr = read_ulong(global_mem, global_size)
-                    for region in regions:
-                        if ptr >= region["start"] and ptr < region["end"]:
-                            yield ptr
-                            break
-
-        gdb.write("Searching in grey memory\n")
-        for node in self.grey_list:
-            addr = node["addr"]
-            mem = inf.read_memory(addr, node["size"])
-            i = 0
-            while i < node["size"]:
-                ptr = read_ulong(mem, i)
-                for region in regions:
-                    if ptr >= region["start"] and ptr < region["end"]:
-                        yield ptr
-                        break
-                i = i + longsize
-
-    def collect_white_dict(self):
-        white_dict = {}
-        allocnode_size = mm_allocnode_type.sizeof
-
-        # collect all user malloc ptr
-
-        heap = gdb.parse_and_eval("g_mmheap")
-        for node in mm_foreach(heap):
-            if node["size"] & MM_ALLOC_BIT != 0 and node["pid"] != 
PID_MM_MEMPOOL:
-                addr = int(node) + allocnode_size
-
-                node_dict = {}
-                node_dict["node"] = node
-                node_dict["size"] = mm_nodesize(node["size"]) - allocnode_size
-                node_dict["addr"] = addr
-                white_dict[int(addr)] = node_dict
-
-        if heap.type.has_key("mm_mpool"):
-            for pool in mempool_multiple_foreach(heap["mm_mpool"]):
-                for buf in mempool_foreach(pool):
-                    if buf["magic"] == MEMPOOL_MAGIC_ALLOC:
-                        addr = int(buf) - pool["blocksize"]
-
-                        buf_dict = {}
-                        buf_dict["node"] = buf
-                        buf_dict["size"] = pool["blocksize"]
-                        buf_dict["addr"] = addr
-                        white_dict[int(addr)] = buf_dict
-
-        return white_dict
+            if node or (node := self.find(heaps, addr)):
+                printnode(node, 1)
+                source = "Pool" if node.from_pool else "Heap"
+                print(f"{addr: #x} found belongs to {source}, 
node@{node.address:#x}")
+            else:
+                print(f"Address {addr:#x} not found in any heap")
+            return
 
-    def parse_arguments(self, argv):
-        parser = argparse.ArgumentParser(description="memleak command")
-        parser.add_argument(
-            "-s",
-            "--simple",
-            action="store_true",
-            help="Simplified Output",
-            default=False,
-        )
-        parser.add_argument(
-            "-d",
-            "--detail",
-            action="store_true",
-            help="Output details of each node",
-            default=False,
-        )
+        filters = {
+            "pid": args.pid,
+            "nodesize": args.size,
+            "used": not args.free,
+            "free": args.free,
+            "seqmin": args.min,
+            "seqmax": args.max,
+            "orphan": args.orphan,
+        }
 
-        if argv[0] == "":
-            argv = None
-        try:
-            args = parser.parse_args(argv)
-        except SystemExit:
-            return None
+        heap_nodes = dump_nodes(heaps, **filters, no_heap=args.no_heap, 
no_pool=True)
+        pool_nodes = dump_nodes(heaps, **filters, no_heap=True, 
no_pool=args.no_pool)
 
-        return {"simple": args.simple, "detail": args.detail}
+        if args.biggest:
+            # Find the biggest nodes, only applicable to heaps
+            nodes = sorted(
+                heap_nodes,
+                key=lambda node: node.nodesize,
+                reverse=True,
+            )
+            for node in nodes[: args.top]:
+                print(f"node@{node.address}: {node}")
+            return
 
-    def diagnose(self, *args, **kwargs):
-        output = gdb.execute("memleak", to_string=True)
-        return {
-            "title": "Memory Leak Report",
-            "command": "memleak",
-            "details": output,
+        sort_method = {
+            "count": lambda node: 1,
+            "size": lambda node: node.nodesize,
+            "nodesize": lambda node: node.nodesize,
+            "seq": lambda node: node.seqno,
+            "address": lambda node: node.address,
         }
 
-    def invoke(self, args, from_tty):
-        if sizeof_size_t == 4:
-            align = 11
-        else:
-            align = 19
+        def sort_nodes(nodes):
+            nodes = sorted(nodes, key=sort_method[args.sort], reverse=True)
+            if args.top is not None:
+                nodes = nodes[: args.top] if args.top > 0 else nodes[args.top 
:]
+            return nodes
 
-        arg = self.parse_arguments(args.split(" "))
+        if args.no_group:
+            # Print nodes without grouping
+            nodes = list(heap_nodes)
+            nodes.extend(pool_nodes)
 
-        if arg is None:
-            return
+            for node in sort_nodes(nodes):
+                printnode(node, 1)
 
-        if CONFIG_MM_BACKTRACE < 0:
-            gdb.write("Need to set CONFIG_MM_BACKTRACE to 8 or 16 better.\n")
+            gdb.write(f"Total blks: {len(nodes)}\n")
             return
-        elif CONFIG_MM_BACKTRACE == 0:
-            gdb.write("CONFIG_MM_BACKTRACE is 0, no backtrace available\n")
 
-        start = last = time.time()
-        white_dict = self.collect_white_dict()
+        # Finally group the nodes and then print
 
-        self.grey_list = []
-        gdb.write("Searching for leaked memory, please wait a moment\n")
-        last = time.time()
+        grouped: Dict[MMNodeDump, MMNodeDump] = defaultdict(list)
+        grouped = group_nodes(heap_nodes)
+        grouped = group_nodes(pool_nodes, grouped)
 
-        sorted_keys = sorted(white_dict.keys())
-        for ptr in self.next_ptr():
-            # Find a closest addres in white_dict
-            pos = bisect.bisect_right(sorted_keys, ptr)
-            if pos == 0:
-                continue
-            grey_key = sorted_keys[pos - 1]
-            if grey_key in white_dict and ptr < grey_key + 
white_dict[grey_key]["size"]:
-                self.grey_list.append(white_dict[grey_key])
-                del white_dict[grey_key]
-
-        # All white node is leak
+        # Replace the count and size to count grouped nodes
+        sort_method["count"] = lambda node: len(grouped[node])
+        sort_method["size"] = lambda node: node.nodesize * len(grouped[node])
+        total_blk = total_size = 0
+        for node in sort_nodes(grouped.keys()):
+            count = len(grouped[node])
+            total_blk += count
+            if node.pid != mm.PID_MM_MEMPOOL:
+                total_size += count * node.nodesize
+            printnode(node, count)
 
-        gdb.write(f"Search all memory use {(time.time() - last):.2f} 
seconds\n")
+        print(f"Total {total_blk} blks, {total_size} bytes")
 
-        gdb.write("\n")
-        if len(white_dict) == 0:
-            gdb.write("All nodes have references, no memory leak!\n")
-            return
 
-        gdb.write("Leak catch!, use '\x1b[33;1m*\x1b[m' mark pid does not 
exist:\n")
+class MMfrag(gdb.Command):
+    """Show memory fragmentation rate"""
 
-        if CONFIG_MM_BACKTRACE > 0 and not arg["detail"]:
-            gdb.write("%6s" % ("CNT"))
+    def __init__(self):
+        super().__init__("mm frag", gdb.COMMAND_USER)
+        utils.alias("memfrag", "mm frag")
 
-        gdb.write(
-            "%6s%12s%12s%*s %s\n"
-            % ("PID", "Size", "Sequence", align, "Address", "Callstack")
+    def invoke(self, args, from_tty):
+        parser = argparse.ArgumentParser(description=self.__doc__)
+        parser.add_argument(
+            "--heap",
+            type=str,
+            default=None,
+            help="Which heap to inspect",
         )
 
-        self.npidhash = gdb.parse_and_eval("g_npidhash")
-        self.pidhash = gdb.parse_and_eval("g_pidhash")
+        try:
+            args = parser.parse_args(gdb.string_to_argv(args))
+        except SystemExit:
+            return None
 
-        if CONFIG_MM_BACKTRACE > 0 and not arg["detail"]:
+        for heap in get_heaps(args.heap):
+            nodes = list(
+                sorted(heap.nodes_free(), key=lambda node: node.nodesize, 
reverse=True)
+            )
+            if not nodes:
+                gdb.write(f"{heap}: no free nodes\n")
+                continue
 
-            # Filter same backtrace
+            freesize = sum(node.nodesize for node in nodes)
+            remaining = freesize
+            fragrate = 0
 
-            backtrace_dict = {}
-            for addr in white_dict.keys():
-                backtrace_dict = record_backtrace(
-                    white_dict[addr]["node"], white_dict[addr]["size"], 
backtrace_dict
+            for node in nodes:
+                fragrate += (1 - (node.nodesize / remaining)) * (
+                    node.nodesize / freesize
                 )
+                remaining -= node.nodesize
 
-            leaksize = 0
-            leaklist = []
-            for node in backtrace_dict.values():
-                leaklist.append(node)
-
-            # sort by count
-            leaklist.sort(key=get_count, reverse=True)
-
-            i = 0
-            for node in leaklist:
-                if node["node"].type == mm_allocnode_type.pointer():
-                    mm_dumpnode(
-                        node["node"],
-                        node["count"],
-                        align,
-                        arg["simple"],
-                        arg["detail"],
-                        self.check_alive(node["pid"]),
-                    )
-                else:
-                    mempool_dumpbuf(
-                        node["node"],
-                        node["size"],
-                        node["count"],
-                        align,
-                        arg["simple"],
-                        arg["detail"],
-                        self.check_alive(node["pid"]),
-                    )
-
-                leaksize += node["count"] * node["size"]
-                i += 1
-
-            gdb.write(
-                f"Alloc {len(white_dict)} count,\
-have {i} some backtrace leak, total leak memory is {int(leaksize)} bytes\n"
-            )
-        else:
-            leaksize = 0
-            for node in white_dict.values():
-                if node["node"].type == mm_allocnode_type.pointer():
-                    mm_dumpnode(
-                        node["node"],
-                        1,
-                        align,
-                        arg["simple"],
-                        True,
-                        self.check_alive(node["pid"]),
-                    )
-                else:
-                    mempool_dumpbuf(
-                        node["node"],
-                        node["size"],
-                        1,
-                        align,
-                        arg["simple"],
-                        True,
-                        self.check_alive(node["pid"]),
-                    )
-                leaksize += node["size"]
-
+            fragrate = fragrate * 1000
             gdb.write(
-                f"Alloc {len(white_dict)} count, total leak memory is 
{int(leaksize)} bytes\n"
+                f"{heap.name}@{heap.address:#x}, fragmentation 
rate:{fragrate:.2f},"
+                f" heapsize: {heap.heapsize}, free size: {freesize},"
+                f" free count: {len(nodes)}, largest: {nodes[0].nodesize}\n"
             )
 
-        gdb.write(f"Finished in {(time.time() - start):.2f} seconds\n")
 
+class MMMap(gdb.Command):
+    """Generate memory map image to visualize memory layout"""
 
-class Memmap(gdb.Command):
     def __init__(self):
         self.np = utils.import_check("numpy", errmsg="Please pip install 
numpy\n")
         self.plt = utils.import_check(
@@ -913,111 +377,185 @@ class Memmap(gdb.Command):
         if not self.np or not self.plt or not self.math:
             return
 
-        super().__init__("memmap", gdb.COMMAND_USER)
-
-    def save_memory_map(self, mallinfo, output_file):
-        mallinfo = sorted(mallinfo, key=lambda item: item["addr"])
-        start = mallinfo[0]["addr"]
-        size = mallinfo[-1]["addr"] - start
+        super().__init__("mm map", gdb.COMMAND_USER)
+        utils.alias("memmap", "mm map")
 
+    def save_memory_map(self, nodes: List[MMNodeDump], output_file):
+        mallinfo = sorted(nodes, key=lambda node: node.address)
+        start = mallinfo[0].address
+        size = mallinfo[-1].address - start
         order = self.math.ceil(size**0.5)
         img = self.np.zeros([order, order])
 
         for node in mallinfo:
-            addr = node["addr"]
-            size = node["size"]
+            addr = node.address
+            size = node.nodesize
             start_index = addr - start
             end_index = start_index + size
-            img.flat[start_index:end_index] = 1 + 
self.math.log2(node["sequence"] + 1)
+            img.flat[start_index:end_index] = 1 + self.math.log2(node.seqno + 
1)
 
         self.plt.imsave(output_file, img, cmap=self.plt.get_cmap("Greens"))
 
-    def allocinfo(self):
-        info = []
-        heap = gdb.parse_and_eval("g_mmheap")
-        for node in mm_foreach(heap):
-            if node["size"] & MM_ALLOC_BIT != 0:
-                allocnode = gdb.Value(node).cast(lookup_type("char").pointer())
-                info.append(
-                    {
-                        "addr": int(allocnode),
-                        "size": int(mm_nodesize(node["size"])),
-                        "sequence": int(node["seqno"]),
-                    }
-                )
-        return info
-
     def parse_arguments(self, argv):
-        parser = argparse.ArgumentParser(description="memdump command")
+        parser = argparse.ArgumentParser(description=self.__doc__)
+        parser.add_argument(
+            "-o", "--output", type=str, default=None, help="img output file"
+        )
         parser.add_argument(
-            "-o", "--output", type=str, default="memmap", help="img output 
file"
+            "--heap", type=str, help="Which heap's pool to show", default=None
         )
-        if argv[0] == "":
-            argv = None
+
         try:
             args = parser.parse_args(argv)
         except SystemExit:
             return None
-        return args.output
+
+        return args
 
     def invoke(self, args, from_tty):
-        output_file = self.parse_arguments(args.split(" "))
-        meminfo = self.allocinfo()
-        self.save_memory_map(meminfo, output_file + ".png")
+        if not (args := self.parse_arguments(gdb.string_to_argv(args))):
+            return
 
+        for heap in get_heaps(args.heap):
+            name = heap.name or f"heap@{heap.address:#x}"
+            output = args.output or f"{name}.png"
+            self.save_memory_map(heap.nodes_used(), output)
+            gdb.write(f"Memory map saved to {output}\n")
 
-class Memfrag(gdb.Command):
-    def __init__(self):
-        super().__init__("memfrag", gdb.COMMAND_USER)
 
-    def parse_arguments(self, argv):
-        parser = argparse.ArgumentParser(description="memfrag command")
-        parser.add_argument(
-            "-d", "--detail", action="store_true", help="Output details"
+class GlobalNode(MMNodeDump):
+    def __init__(self, address: int, nodesize: int):
+        self.address = address
+        self.nodesize = nodesize
+        self.pid = None
+        self.seqno = None
+        self.overhead = 0
+        self.backtrace = ()
+
+    def __repr__(self):
+        return f"GlobalVar@{self.address:x}:{self.nodesize}Bytes"
+
+    def contains(self, addr: int) -> bool:
+        pass
+
+    def read_memory(self) -> memoryview:
+        return gdb.selected_inferior().read_memory(self.address, self.nodesize)
+
+
+class MMLeak(gdb.Command):
+    """Dump memory manager heap"""
+
+    def __init__(self):
+        self.elf = utils.import_check(
+            "elftools.elf.elffile", "ELFFile", "Please pip install 
pyelftools\n"
         )
-        if argv[0] == "":
-            argv = None
-        try:
-            args = parser.parse_args(argv)
-        except SystemExit:
-            return None
-        return args.detail
-
-    def freeinfo(self):
-        info = []
-        heap = gdb.parse_and_eval("g_mmheap")
-        for node in mm_foreach(heap):
-            if node["size"] & MM_ALLOC_BIT == 0:
-                freenode = gdb.Value(node).cast(lookup_type("char").pointer())
-                info.append(
-                    {
-                        "addr": int(freenode),
-                        "size": int(mm_nodesize(node["size"])),
-                    }
-                )
-        return info
+        if not self.elf:
+            return
 
-    def invoke(self, args, from_tty):
-        detail = self.parse_arguments(args.split(" "))
-        info = self.freeinfo()
-
-        info = sorted(info, key=lambda item: item["size"], reverse=True)
-        if detail:
-            for node in info:
-                gdb.write(f"addr: {node['addr']}, size: {node['size']}\n")
-
-        heapsize = gdb.parse_and_eval("*g_mmheap")["mm_heapsize"]
-        freesize = sum(node["size"] for node in info)
-        remaining = freesize
-        fragrate = 0
-
-        for node in info:
-            fragrate += (1 - (node["size"] / remaining)) * (node["size"] / 
freesize)
-            remaining -= node["size"]
-
-        fragrate = fragrate * 1000
-        gdb.write(f"memory fragmentation rate: {fragrate:.2f}\n")
-        gdb.write(
-            f"heap size: {heapsize}, free size: {freesize}, uordblks:"
-            f"{info.__len__()} largest block: {info[0]['size']} \n"
+        super().__init__("mm leak", gdb.COMMAND_USER)
+        utils.alias("memleak", "mm leak")
+
+    def global_nodes(self) -> List[GlobalNode]:
+        cache = path.join(
+            path.dirname(path.abspath(gdb.objfiles()[0].filename)),
+            f"{utils.get_elf_md5()}-globals.json",
         )
+
+        nodes: List[GlobalNode] = []
+
+        if path.isfile(cache):
+            with open(cache, "r") as f:
+                variables = json.load(f)
+                for var in variables:
+                    nodes.append(GlobalNode(var["address"], var["size"]))
+                return nodes
+
+        longsize = utils.get_long_type().sizeof
+        for objfile in gdb.objfiles():
+            elf = self.elf.load_from_path(objfile.filename)
+            symtab = elf.get_section_by_name(".symtab")
+            symbols = filter(
+                lambda s: s["st_info"]["type"] == "STT_OBJECT"
+                and s["st_size"] >= longsize,
+                symtab.iter_symbols(),
+            )
+
+            for symbol in symbols:
+                size = symbol["st_size"] // longsize * longsize
+                address = symbol["st_value"]
+                nodes.append(GlobalNode(address, size))
+
+        with open(cache, "w") as f:
+            variables = [
+                {"address": node.address, "size": node.nodesize} for node in 
nodes
+            ]
+            str = utils.jsonify(variables)
+            f.write(str)
+
+        return nodes
+
+    def invoke(self, arg: str, from_tty: bool) -> None:
+        heaps = get_heaps("g_mmheap")
+        pids = [int(tcb["pid"]) for tcb in utils.get_tcbs()]
+
+        def is_pid_alive(pid):
+            return pid in pids
+
+        t = time.time()
+        print("Loading globals from elf...", flush=True, end="")
+        good_nodes = self.global_nodes()  # Global memory are all good.
+        print(f" {time.time() - t:.2f}s", flush=True, end="\n")
+
+        nodes_dict: Dict[int, MMNodeDump] = {}
+        sorted_addr = set()
+        t = time.time()
+        print("Gather memory nodes...", flush=True, end="")
+        for node in dump_nodes(heaps, used=True, no_pid=mm.PID_MM_MEMPOOL):
+            nodes_dict[node.address] = node
+            sorted_addr.add(node.address)
+
+        sorted_addr = sorted(sorted_addr)
+        print(f" {time.time() - t:.2f}s", flush=True, end="\n")
+
+        regions = [
+            {"start": start.address, "end": end.address}
+            for heap in heaps
+            for start, end in heap.regions
+        ]
+
+        longsize = utils.get_long_type().sizeof
+
+        def pointers(node: MMNodeDump) -> Generator[int, None, None]:
+            # Return all possible pointers stored in this node
+            size = node.nodesize - node.overhead
+            memory = node.read_memory()
+            while size > 0:
+                size -= longsize
+                ptr = int.from_bytes(memory[size : size + longsize], "little")
+                if any(region["start"] <= ptr < region["end"] for region in 
regions):
+                    yield ptr
+
+        print("Leak analyzing...", flush=True, end="")
+        t = time.time()
+        for good in good_nodes:
+            for ptr in pointers(good):
+                if not (idx := bisect.bisect_right(sorted_addr, ptr)):
+                    continue
+
+                node = nodes_dict[sorted_addr[idx - 1]]
+                if node.contains(ptr):
+                    del sorted_addr[idx - 1]
+                    good_nodes.append(node)
+
+        print(f" {time.time() - t:.2f}s", flush=True, end="\n")
+
+        leak_nodes = group_nodes(nodes_dict[addr] for addr in sorted_addr)
+        print_header()
+        total_blk = total_size = 0
+        for node in leak_nodes.keys():
+            count = len(leak_nodes[node])
+            total_blk += count
+            total_size += count * node.nodesize
+            print_node(node, is_pid_alive(node.pid), 
count=len(leak_nodes[node]))
+
+        print(f"Leaked {total_blk} blks, {total_size} bytes")
diff --git a/tools/gdb/nuttxgdb/mm.py b/tools/gdb/nuttxgdb/mm.py
new file mode 100644
index 0000000000..c1943bbc68
--- /dev/null
+++ b/tools/gdb/nuttxgdb/mm.py
@@ -0,0 +1,637 @@
+############################################################################
+# tools/gdb/nuttxgdb/mm.py
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.  The
+# ASF licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the
+# License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+############################################################################
+
+from __future__ import annotations
+
+import argparse
+from typing import Generator, List, Tuple
+
+import gdb
+
+from . import lists, utils
+from .protocols import mm as p
+from .utils import Value
+
+CONFIG_MM_BACKTRACE = utils.get_symbol_value("CONFIG_MM_BACKTRACE")
+CONFIG_MM_BACKTRACE = -1 if CONFIG_MM_BACKTRACE is None else 
int(CONFIG_MM_BACKTRACE)
+
+
+PID_MM_INVALID = -100
+PID_MM_MEMPOOL = -1
+
+
+class MemPoolBlock:
+    """
+    Memory pool block instance.
+    """
+
+    MAGIC_ALLOC = 0x5555_5555
+
+    mempool_backtrace_s = utils.lookup_type("struct mempool_backtrace_s")
+
+    def __init__(self, addr: int, blocksize: int, overhead: int) -> None:
+        """
+        Initialize the memory pool block instance.
+        block: must be start address of the block,
+        blocksize: block size without backtrace overhead,
+        overhead: backtrace overhead size.
+        """
+        self.overhead = overhead
+        self.from_pool = True
+        self.is_orphan = False
+        self.address = addr
+        self.blocksize = int(blocksize)
+        self.nodesize = int(blocksize) + self.overhead
+        # Lazy evaluation
+        self._backtrace = self._pid = self._seqno = self._magic = self._blk = 
None
+
+    def __repr__(self) -> str:
+        return 
f"block@{hex(self.address)},size:{self.blocksize},seqno:{self.seqno},pid:{self.pid}"
+
+    def __str__(self) -> str:
+        return self.__repr__()
+
+    def __hash__(self) -> int:
+        return hash((self.pid, self.nodesize, self.backtrace))
+
+    def __eq__(self, value: MemPoolBlock) -> bool:
+        return (
+            self.pid == value.pid
+            and self.nodesize == value.nodesize
+            and self.backtrace == value.backtrace
+        )
+
+    def contains(self, address: int) -> bool:
+        """Check if the address is in block's range, excluding overhead"""
+        return self.address <= address < self.address + self.blocksize
+
+    @property
+    def blk(self) -> p.MemPoolBlock:
+        if not self._blk:
+            addr = int(self.address) + self.blocksize
+            self._blk = (
+                
gdb.Value(addr).cast(self.mempool_backtrace_s.pointer()).dereference()
+            )
+        return self._blk
+
+    @property
+    def is_free(self) -> bool:
+        if not self._magic:
+            self._magic = int(self.blk["magic"])
+
+        return CONFIG_MM_BACKTRACE and self._magic != self.MAGIC_ALLOC
+
+    @property
+    def seqno(self) -> int:
+        if not self._seqno:
+            self._seqno = int(self.blk["seqno"]) if CONFIG_MM_BACKTRACE >= 0 
else -100
+        return self._seqno
+
+    @property
+    def pid(self) -> int:
+        if not self._pid:
+            self._pid = (
+                int(self.blk["pid"]) if CONFIG_MM_BACKTRACE >= 0 else 
PID_MM_INVALID
+            )
+        return self._pid
+
+    @property
+    def backtrace(self) -> Tuple[int]:
+        if CONFIG_MM_BACKTRACE <= 0:
+            return ()
+
+        if not self._backtrace:
+            self._backtrace = tuple(
+                int(self.blk["backtrace"][i]) for i in 
range(CONFIG_MM_BACKTRACE)
+            )
+        return self._backtrace
+
+    def read_memory(self) -> memoryview:
+        return gdb.selected_inferior().read_memory(self.address, 
self.blocksize)
+
+
+class MemPool(Value, p.MemPool):
+    """
+    Memory pool instance.
+    """
+
+    def __init__(self, mpool: Value, name=None) -> None:
+        if mpool.type.code == gdb.TYPE_CODE_PTR:
+            mpool = mpool.dereference()
+        super().__init__(mpool)
+        self._blksize = None
+        self._nfree = None
+        self._nifree = None
+        self._overhead = None
+
+    def __repr__(self) -> str:
+        return 
f"{self.name}@{hex(self.address)},size:{self.size}/{self['blocksize']},nused:{self.nused},nfree:{self.nfree}"
+
+    def __str__(self) -> str:
+        return self.__repr__()
+
+    @property
+    def name(self) -> str:
+        try:
+            return self.procfs.name.string()
+        except Exception:
+            return "<noname>"
+
+    @property
+    def memranges(self) -> Generator[Tuple[int, int], None, None]:
+        """Memory ranges of the pool"""
+        sq_entry_t = utils.lookup_type("sq_entry_t")
+        blksize = self.size
+
+        if self.ibase:
+            blks = int(self.interruptsize) // blksize
+            base = int(self.ibase)
+            yield (base, base + blks * blksize)
+
+        if not self.equeue.head:
+            return None
+
+        # First queue has size of initialsize
+        ninit = int(self.initialsize)
+        ninit = ninit and (ninit - sq_entry_t.sizeof) // blksize
+        nexpand = (int(self.expandsize) - sq_entry_t.sizeof) // blksize
+
+        for entry in lists.NxSQueue(self.equeue):
+            blks = ninit or nexpand
+            ninit = 0
+            yield (int(entry) - blks * blksize, int(entry))
+
+    @property
+    def size(self) -> int:
+        """Real block size including backtrace overhead"""
+        if not self._blksize:
+            blksize = self["blocksize"]
+            backtrace = utils.get_symbol_value("CONFIG_MM_BACKTRACE")
+            if CONFIG_MM_BACKTRACE is not None and backtrace >= 0:
+                mempool_backtrace_s = utils.lookup_type("struct 
mempool_backtrace_s")
+                size_t = utils.lookup_type("size_t")
+                align = (
+                    utils.get_symbol_value("CONFIG_MM_DEFAULT_ALIGNMENT")
+                    or 2 * size_t.sizeof
+                )
+                blksize = blksize + mempool_backtrace_s.sizeof
+                blksize = (blksize + align - 1) & ~(align - 1)
+            self._blksize = int(blksize)
+        return self._blksize
+
+    @property
+    def overhead(self) -> int:
+        if not self._overhead:
+            self._overhead = self.size - int(self["blocksize"])
+        return self._overhead
+
+    @property
+    def nwaiter(self) -> int:
+        return -int(self.waitsem.semcount) if self.wait and self.expandsize == 
0 else 0
+
+    @property
+    def nused(self) -> int:
+        return int(self.nalloc)
+
+    @property
+    def free(self) -> int:
+        return (self.nfree + self.nifree) * self.size
+
+    @property
+    def nfree(self) -> int:
+        if not self._nfree:
+            self._nfree = lists.sq_count(self.queue)
+        return self._nfree + self.nifree
+
+    @property
+    def nifree(self) -> int:
+        """Interrupt pool free blocks count"""
+        if not self._nifree:
+            self._nifree = lists.sq_count(self.iqueue)
+        return self._nifree
+
+    @property
+    def total(self) -> int:
+        nqueue = lists.sq_count(self.equeue)
+        sq_entry_t = utils.lookup_type("sq_entry_t")
+        blocks = self.nused + self.nfree
+        return int(nqueue * sq_entry_t.sizeof + blocks * self.size)
+
+    @property
+    def blks(self) -> Generator[MemPoolBlock, None, None]:
+        """Iterate over all blocks in the pool"""
+        sq_entry_t = utils.lookup_type("sq_entry_t")
+        blksize = self.size  # Real block size including backtrace overhead
+        blocksize = self["blocksize"]
+
+        def iterate(entry, nblocks):
+            base = int(entry) - nblocks * blksize
+            while nblocks > 0:
+                yield MemPoolBlock(base, blocksize, self.overhead)
+                base += blksize
+                nblocks -= 1
+
+        if self.ibase:
+            blks = int(self.interruptsize) // blksize
+            yield from iterate(self.ibase + blks * blksize, blks)
+
+        if not self.equeue.head:
+            return None
+
+        # First queue has size of initialsize
+        ninit = int(self.initialsize)
+        ninit = ninit and (ninit - sq_entry_t.sizeof) // blksize
+        nexpand = (int(self.expandsize) - sq_entry_t.sizeof) // blksize
+
+        for entry in lists.NxSQueue(self.equeue):
+            yield from iterate(entry, ninit or nexpand)
+            ninit = 0
+
+    def contains(self, address: int) -> Tuple[bool, Value]:
+        ranges = self.memranges
+        if not ranges:
+            return False, None
+
+        for start, end in ranges:
+            if start <= address < end:
+                return True, None
+
+    def find(self, address: int) -> Value:
+        """Find the block that contains the given address"""
+        sq_entry_t = utils.lookup_type("sq_entry_t")
+        blksize = self.size
+        blocksize = self["blocksize"]
+
+        def get_blk(base):
+            blkstart = base + (address - base) // blksize * blksize
+            return MemPoolBlock(blkstart, blocksize, self.overhead)
+
+        if self.ibase:
+            # Check if it belongs to interrupt pool
+            blks = int(self.interruptsize) // blksize
+            base = int(self.ibase)
+            if base <= address < base + blks * blksize:
+                return get_blk(base)
+
+        if not self.equeue.head:
+            return None
+
+        # First queue has size of initialsize
+        ninit = int(self.initialsize)
+        ninit = ninit and (ninit - sq_entry_t.sizeof) // blksize
+        nexpand = (int(self.expandsize) - sq_entry_t.sizeof) // blksize
+
+        for entry in lists.NxSQueue(self.equeue):
+            blks = ninit or nexpand
+            ninit = 0
+            base = int(entry) - blks * blksize
+            if base <= address < int(entry):
+                return get_blk(base)
+
+    def blks_free(self) -> Generator[MemPoolBlock, None, None]:
+        """Iterate over all free blocks in the pool"""
+        blocksize = self["blocksize"]
+        for entry in lists.NxSQueue(self.queue):
+            yield MemPoolBlock(int(entry), blocksize, self.overhead)
+
+    def blks_used(self) -> Generator[MemPoolBlock, None, None]:
+        """Iterate over all used blocks in the pool"""
+        return filter(lambda blk: not blk.is_free, self.blks)
+
+
+class MemPoolMultiple(Value, p.MemPoolMultiple):
+    """
+    Multiple level memory pool instance.
+    """
+
+    def __init__(self, mpool: Value, name=None) -> None:
+        if mpool.type.code == gdb.TYPE_CODE_PTR:
+            mpool = mpool.dereference()
+        super().__init__(mpool)
+
+    def __repr__(self) -> str:
+        return f"Multiple Level Memory Pool: {self.address}"
+
+    def __str__(self) -> str:
+        return self.__repr__()
+
+    @property
+    def pools(self) -> Generator[MemPool, None, None]:
+        for pool in utils.ArrayIterator(self["pools"], self.npools):
+            yield MemPool(pool)
+
+    @property
+    def free(self) -> int:
+        return sum(pool.free for pool in self.pools)
+
+
+class MMNode(gdb.Value, p.MMFreeNode):
+    """
+    One memory node in the memory manager heap, either free or allocated.
+    The instance is always dereferenced to the actual node.
+    """
+
+    MM_ALLOC_BIT = 0x1
+    MM_PREVFREE_BIT = 0x2
+    MM_MASK_BIT = MM_ALLOC_BIT | MM_PREVFREE_BIT
+    MM_SIZEOF_ALLOCNODE = utils.sizeof("struct mm_allocnode_s")
+    MM_ALLOCNODE_OVERHEAD = MM_SIZEOF_ALLOCNODE - utils.sizeof("mmsize_t")
+
+    def __init__(self, node: gdb.Value):
+        if node.type.code == gdb.TYPE_CODE_PTR:
+            node = node.dereference()
+        self._backtrace = None
+        self._address = None
+        self._nodesize = None
+        super().__init__(node)
+
+    def __repr__(self):
+        return (
+            f"{hex(self.address)}({'F' if self.is_free else 'A'}{'F' if 
self.is_prev_free else 'A'})"
+            f" size:{self.nodesize}/{self.prevsize if self.is_prev_free else 
'-'}"
+            f" seq:{self.seqno} pid:{self.pid} "
+        )
+
+    def __str__(self) -> str:
+        return self.__repr__()
+
+    def __hash__(self) -> int:
+        return hash((self.pid, self.nodesize, self.backtrace))
+
+    def __eq__(self, value: MMNode) -> bool:
+        return (
+            self.pid == value.pid
+            and self.nodesize == value.nodesize
+            and self.backtrace == value.backtrace
+        )
+
+    def contains(self, address):
+        """Check if the address is in node's range, excluding oeprhead"""
+        return (
+            self.address + self.overhead
+            <= address
+            < self.address + self.nodesize - MMNode.MM_ALLOCNODE_OVERHEAD
+        )
+
+    def read_memory(self):
+        addr = int(self.address) + MMNode.MM_ALLOCNODE_OVERHEAD
+        size = self.nodesize - MMNode.MM_ALLOCNODE_OVERHEAD
+        return gdb.selected_inferior().read_memory(addr, size)
+
+    @property
+    def address(self) -> int:
+        """Change 'void *' to int"""
+        if not self._address:
+            self._address = int(super().address)
+        return self._address
+
+    @property
+    def prevsize(self) -> int:
+        """Size of preceding chunk size"""
+        return int(self["preceding"]) & ~MMNode.MM_MASK_BIT
+
+    @property
+    def nodesize(self) -> int:
+        """Size of this chunk, including overhead"""
+        if not self._nodesize:
+            self._nodesize = int(self["size"]) & ~MMNode.MM_MASK_BIT
+        return self._nodesize
+
+    @property
+    def usersize(self) -> int:
+        """Size of this chunk, excluding overhead"""
+        return self.nodesize - MMNode.MM_ALLOCNODE_OVERHEAD
+
+    @property
+    def flink(self):
+        # Only free node has flink and blink
+        return self["flink"] if self.is_free else None
+
+    @property
+    def blink(self):
+        # Only free node has flink and blink
+        return self["blink"] if self.is_free else None
+
+    @property
+    def pid(self) -> int:
+        # Only available when CONFIG_MM_BACKTRACE >= 0
+        if CONFIG_MM_BACKTRACE >= 0:
+            return int(self["pid"])
+        return PID_MM_INVALID
+
+    @property
+    def seqno(self) -> int:
+        return int(self["seqno"]) if CONFIG_MM_BACKTRACE >= 0 else -1
+
+    @property
+    def backtrace(self) -> List[Tuple[int, str, str]]:
+        if CONFIG_MM_BACKTRACE <= 0:
+            return ()
+
+        if not self._backtrace:
+            self._backtrace = tuple(
+                int(self["backtrace"][i]) for i in range(CONFIG_MM_BACKTRACE)
+            )
+        return self._backtrace
+
+    @property
+    def prevnode(self) -> MMNode:
+        addr = int(self.address) - self["presize"]
+        type = utils.lookup_type("struct mm_allocnode_s").pointer()
+        return MMNode(gdb.Value(addr).cast(type))
+
+    @property
+    def nextnode(self) -> MMNode:
+        addr = int(self.address) + self.nodesize
+        type = utils.lookup_type("struct mm_allocnode_s").pointer()
+        # Use gdb.Value for better performance
+        return MMNode(gdb.Value(addr).cast(type))
+
+    @property
+    def is_free(self) -> bool:
+        return not self["size"] & MMNode.MM_ALLOC_BIT
+
+    @property
+    def is_prev_free(self) -> bool:
+        return self["preceding"] & MMNode.MM_PREVFREE_BIT
+
+    @property
+    def is_orphan(self) -> bool:
+        # Report orphaned node and node likely to be 
orphaned(free-used-used-free)
+        return self.is_prev_free or self.nextnode.is_free
+
+    @property
+    def from_pool(self) -> bool:
+        return False
+
+    @property
+    def overhead(self) -> int:
+        return MMNode.MM_ALLOCNODE_OVERHEAD
+
+
+class MMHeap(Value, p.MMHeap):
+    """
+    One memory manager heap. It may contains multiple regions.
+    """
+
+    def __init__(self, heap: Value, name=None) -> None:
+        if heap.type.code == gdb.TYPE_CODE_PTR:
+            heap = heap.dereference()
+        super().__init__(heap)
+
+        self.name = name or "<noname>"
+
+    def __repr__(self) -> str:
+        return f"{self.name}@{self.address}, {self.nregions}regions, 
{int(self.heapsize) / 1024 :.1f}kB"
+
+    def __str__(self) -> str:
+        return self.__repr__()
+
+    @property
+    def curused(self) -> int:
+        return int(self.mm_curused)
+
+    @property
+    def heapsize(self) -> int:
+        return int(self.mm_heapsize)
+
+    @property
+    def free(self) -> int:
+        return self.heapsize - self.curused
+
+    @property
+    def nregions(self) -> int:
+        return int(utils.get_field(self, "mm_nregions", default=1))
+
+    @property
+    def regions(self):
+        regions = self.nregions
+        for start, end in zip(
+            utils.ArrayIterator(self.mm_heapstart, regions),
+            utils.ArrayIterator(self.mm_heapend, regions),
+        ):
+            yield MMNode(start), MMNode(end)
+
+    @property
+    def nodes(self) -> Generator[MMNode, None, None]:
+        for start, end in self.regions:
+            node = start
+            while node.address <= end.address:
+                yield node
+                node = node.nextnode
+
+    def nodes_free(self) -> Generator[MMNode, None, None]:
+        return filter(lambda node: node.is_free, self.nodes)
+
+    def nodes_used(self) -> Generator[MMNode, None, None]:
+        return filter(lambda node: not node.is_free, self.nodes)
+
+    def contains(self, address: int) -> bool:
+        return any(
+            start.address <= address < end.address for start, end in 
self.regions
+        )
+
+    def find(self, address: int) -> MMNode:
+        for node in self.nodes:
+            if node.address <= address < node.address + node.nodesize:
+                return node
+
+
+def get_heaps() -> List[MMHeap]:
+    # parse g_procfs_meminfo to get all heaps
+    heaps = []
+    meminfo: p.ProcfsMeminfoEntry = utils.gdb_eval_or_none("g_procfs_meminfo")
+    if not meminfo and (heap := gdb.parse_and_eval("g_mmheap")):
+        heaps.append(MMHeap(heap))
+
+    while meminfo:
+        heaps.append(MMHeap(meminfo.heap, name=meminfo.name.string()))
+        meminfo = meminfo.next
+
+    return heaps
+
+
+def get_pools(heaps: List[Value] = []) -> Generator[MemPool, None, None]:
+    for heap in heaps or get_heaps():
+        if not (mm_pool := heap.mm_mpool):
+            continue
+
+        mpool = MemPoolMultiple(mm_pool)
+        for pool in mpool.pools:
+            yield pool
+
+
+class MMHeapInfo(gdb.Command):
+    """Show basic heap information"""
+
+    def __init__(self):
+        super().__init__("mm heap", gdb.COMMAND_USER)
+
+    def invoke(self, arg: str, from_tty: bool) -> None:
+        for heap in get_heaps():
+            regions = [(start.address, end.address) for start, end in 
heap.regions]
+            gdb.write(f"{heap} - has {len(list(heap.nodes))} nodes, regions:")
+            gdb.write(" ".join(f"{hex(start)}~{hex(end)}" for start, end in 
regions))
+            gdb.write("\n")
+
+
+class MMPoolInfo(gdb.Command):
+    """Show basic heap information"""
+
+    def __init__(self):
+        super().__init__("mm pool", gdb.COMMAND_USER)
+        utils.alias("mempool", "mm pool")
+
+    def invoke(self, arg: str, from_tty: bool) -> None:
+        parser = argparse.ArgumentParser(description="Dump memory pool 
information.")
+        parser.add_argument(
+            "--heap", type=str, help="Which heap's pool to show", default=None
+        )
+
+        try:
+            args = parser.parse_args(gdb.string_to_argv(arg))
+        except SystemExit:
+            return
+
+        heaps = [gdb.parse_and_eval(args.heap)] if args.heap else get_heaps()
+        if not (pools := list(get_pools(heaps))):
+            gdb.write("No pools found.\n")
+            return
+
+        count = len(pools)
+        gdb.write(f"Total {count} pools\n")
+
+        name_max = max(len(pool.name) for pool in pools) + 11  # 11: 
"@0x12345678"
+        formatter = "{:>%d} {:>11} {:>9} {:>9} {:>9} {:>9} {:>9}\n" % name_max
+        head = ("", "total", "bsize", "nused", "nfree", "nifree", "nwaiter")
+
+        gdb.write(formatter.format(*head))
+        for pool in pools:
+            gdb.write(
+                formatter.format(
+                    f"{pool.name}@{pool.address:#x}",
+                    pool.total,
+                    pool.size,
+                    pool.nused,
+                    pool.nfree,
+                    pool.nifree,
+                    pool.nwaiter,
+                )
+            )
diff --git a/tools/gdb/nuttxgdb/prefix.py b/tools/gdb/nuttxgdb/prefix.py
index 2f135acbf0..903d69bf25 100644
--- a/tools/gdb/nuttxgdb/prefix.py
+++ b/tools/gdb/nuttxgdb/prefix.py
@@ -28,3 +28,10 @@ class ForeachPrefix(gdb.Command):
 
     def __init__(self):
         super(ForeachPrefix, self).__init__("foreach", gdb.COMMAND_USER, 
prefix=True)
+
+
+class MMPrefixCommand(gdb.Command):
+    """Memory manager related commands prefix."""
+
+    def __init__(self):
+        super().__init__("mm", gdb.COMMAND_USER, prefix=True)
diff --git a/tools/gdb/nuttxgdb/protocols/mm.py 
b/tools/gdb/nuttxgdb/protocols/mm.py
new file mode 100644
index 0000000000..96d9ae66a6
--- /dev/null
+++ b/tools/gdb/nuttxgdb/protocols/mm.py
@@ -0,0 +1,120 @@
+############################################################################
+# tools/gdb/nuttxgdb/protocols/mm.py
+#
+# Licensed to the Apache Software Foundation (ASF) under one or more
+# contributor license agreements.  See the NOTICE file distributed with
+# this work for additional information regarding copyright ownership.  The
+# ASF licenses this file to you under the Apache License, Version 2.0 (the
+# "License"); you may not use this file except in compliance with the
+# License.  You may obtain a copy of the License at
+#
+#   http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.  See the
+# License for the specific language governing permissions and limitations
+# under the License.
+#
+############################################################################
+
+from __future__ import annotations
+
+from typing import List
+
+from .value import Value
+
+
+class ProcfsMeminfoEntry(Value):
+    """struct procfs_meminfo_entry_s"""
+
+    name: Value
+    heap: Value
+    next: ProcfsMeminfoEntry
+
+
+class MMAllocNode(Value):
+    """struct mm_allocnode_s"""
+
+    preceding: Value
+    size: Value
+    pid: Value
+    seqno: Value
+    backtrace: Value
+
+
+class MMFreeNode(Value):
+    """struct mm_freenode_s"""
+
+    preceding: Value
+    size: Value
+    pid: Value
+    seqno: Value
+    backtrace: Value
+    flink: MMFreeNode
+    blink: MMFreeNode
+
+
+class MMHeap(Value):
+    """struct mm_heap_s"""
+
+    mm_lock: Value
+    mm_heapsize: Value
+    mm_maxused: Value
+    mm_curused: Value
+    mm_heapstart: List[MMAllocNode]
+    mm_heapend: List[MMAllocNode]
+    mm_nregions: Value
+    mm_nodelist: Value
+
+
+class MemPool(Value):
+    """struct mempool_s"""
+
+    initialsize: Value
+    interruptsize: Value
+    expandsize: Value
+    wait: Value
+    priv: Value
+    alloc: Value
+    free: Value
+    check: Value
+    ibase: Value
+    queue: Value
+    iqueue: Value
+    equeue: Value
+    nalloc: Value
+    lock: Value
+    waitsem: Value
+    procfs: Value
+
+
+class MemPoolMultiple(Value):
+    """struct mempool_multiple_s"""
+
+    pools: List[MemPool]
+    npools: Value
+    expandsize: Value
+    minpoolsize: Value
+    arg: Value
+    alloc: Value
+    alloc_size: Value
+    free: Value
+    alloced: Value
+    delta: Value
+    lock: Value
+    chunk_queue: Value
+    chunk_size: Value
+    dict_used: Value
+    dict_col_num_log2: Value
+    dict_row_num: Value
+    dict: Value
+
+
+class MemPoolBlock(Value):
+    """struct mempool_backtrace_s"""
+
+    magic: Value
+    pid: Value
+    seqno: Value
+    backtrace: Value
diff --git a/tools/gdb/nuttxgdb/utils.py b/tools/gdb/nuttxgdb/utils.py
index a4c0eebfb9..66237c82ff 100644
--- a/tools/gdb/nuttxgdb/utils.py
+++ b/tools/gdb/nuttxgdb/utils.py
@@ -500,6 +500,13 @@ def parse_arg(arg: str) -> Union[gdb.Value, int]:
         return None
 
 
+def alias(name, command):
+    try:
+        gdb.execute(f"alias {name} = {command}")
+    except gdb.error:
+        pass
+
+
 def nitems(array):
     array_type = array.type
     element_type = array_type.target()

Reply via email to