This is an automated email from the ASF dual-hosted git repository.

SpriCoder pushed a commit to branch fs/inner-view
in repository https://gitbox.apache.org/repos/asf/iotdb.git

commit d0672836c6ff1df5dc812f55ab71ac9c123e8f9e
Author: spricoder <[email protected]>
AuthorDate: Thu Apr 30 17:58:29 2026 +0800

    use sidecat file
---
 .../plans/2026-04-29-cli-filesystem-mode.md        | 110 +++++++--
 .../specs/2026-04-29-cli-filesystem-mode-design.md | 238 +++++++++++++------
 .../org/apache/iotdb/cli/fs/FilesystemShell.java   | 186 +++++++++++++--
 .../iotdb/cli/fs/command/FilesystemCommand.java    |   6 +
 .../cli/fs/command/FilesystemCommandParser.java    |  59 ++++-
 .../org/apache/iotdb/cli/fs/node/FsNodeType.java   |   3 +
 .../cli/fs/provider/FilesystemSchemaProvider.java  |   8 +
 .../provider/TableFilesystemMutationProvider.java  |  18 +-
 .../fs/provider/TableFilesystemSchemaProvider.java | 257 ++++++++++++++++++++-
 .../apache/iotdb/cli/fs/FilesystemShellTest.java   | 181 +++++++++++++--
 .../fs/command/FilesystemCommandParserTest.java    |  48 ++++
 .../TableFilesystemMutationProviderTest.java       |  24 +-
 .../TableFilesystemSchemaProviderTest.java         |  88 ++++++-
 13 files changed, 1078 insertions(+), 148 deletions(-)

diff --git a/docs/superpowers/plans/2026-04-29-cli-filesystem-mode.md 
b/docs/superpowers/plans/2026-04-29-cli-filesystem-mode.md
index fff6ef69730..081a5ce8748 100644
--- a/docs/superpowers/plans/2026-04-29-cli-filesystem-mode.md
+++ b/docs/superpowers/plans/2026-04-29-cli-filesystem-mode.md
@@ -23,7 +23,9 @@
 
 > **For agentic workers:** REQUIRED SUB-SKILL: Use 
 > superpowers:subagent-driven-development (recommended) or 
 > superpowers:executing-plans to implement this plan task-by-task. Steps use 
 > checkbox (`- [ ]`) syntax for tracking.
 
-**Goal:** Add an explicit read-only filesystem mode to IoTDB CLI while 
preserving default SQL CLI behavior.
+**Goal:** Add an explicit filesystem mode to IoTDB CLI while preserving 
default SQL CLI behavior.
+Filesystem mode is read-only by default; table mode has an opt-in minimal 
write loop behind
+`--fs_write_mode enabled`.
 
 **Architecture:** Add a small `org.apache.iotdb.cli.fs` subsystem with path 
parsing, command parsing, typed nodes, and tree/table schema providers backed 
by JDBC SQL. Existing `Cli` keeps ownership of startup parsing and dispatches 
to filesystem mode only when `--access_mode filesystem` is passed.
 
@@ -42,9 +44,12 @@
 - Create 
`iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/node/FsNode.java`: 
typed filesystem node.
 - Create 
`iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/sql/SqlExecutor.java`: 
minimal JDBC query abstraction.
 - Create 
`iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/sql/SqlRow.java`: row 
data object for tests and providers.
-- Create 
`iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/FilesystemSchemaProvider.java`:
 read-only provider interface.
+- Create 
`iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/FilesystemSchemaProvider.java`:
 schema and data read provider interface.
+- Create 
`iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/FilesystemMutationProvider.java`:
 write-gated mutation provider interface.
 - Create 
`iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/TreeFilesystemSchemaProvider.java`:
 tree SQL mapping.
 - Create 
`iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/TableFilesystemSchemaProvider.java`:
 table SQL mapping.
+- Create 
`iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/TableFilesystemMutationProvider.java`:
 table write mapping.
+- Create 
`iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/UnsupportedFilesystemMutationProvider.java`:
 unsupported mutation provider.
 - Create 
`iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/FilesystemShell.java`: 
command execution surface and interactive shell.
 - Test 
`iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/path/FsPathTest.java`.
 - Test 
`iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/command/FilesystemCommandParserTest.java`.
@@ -58,17 +63,52 @@ These notes capture follow-up implementation experience for 
quickly resuming thi
 
 - The current filesystem mode commands are implemented in-process by 
`FilesystemCommandParser` and
   `FilesystemShell`; they do not call `/bin/ls`, `/bin/cat`, or 
`java.nio.file.FileSystem`.
-- Keep user-visible output Unix-like:
-  - `ls` prints names only.
+- `FilesystemCommandParser` currently parses `sql <statement>`, but 
`FilesystemShell` does not
+  execute it and will report `Unsupported filesystem command: SQL`. Raw SQL 
should be run in SQL
+  access mode until this command is explicitly implemented.
+- Keep user-visible output strictly aligned with standard Unix/POSIX 
filesystem command semantics.
+  Any deviation is a bug unless it is explicitly documented as a temporary 
compatibility exception.
+  - `ls` prints names only. The baseline output should be one entry per line, 
matching `ls -1`; do
+    not use comma-separated output.
+  - `ls -a` and `ll -a` include `.` and `..`; `ll` reuses `ls` option parsing 
as the long-listing
+    alias.
   - `tree` prints indented names only.
-  - `cat` and `paste` print tab-separated values.
+  - `cat /db/table.csv` prints CSV records. Legacy compatibility table/column 
paths and `paste`
+    continue to print tab-separated values.
+  - `cut -d, -f2,3 /db/table.csv` is the Unix-compatible multi-field 
projection form for CSV-first
+    table files. It is delimiter-based text cutting, not CSV quote parsing and 
not a database
+    column-selection dialect.
+  - `less` and `more` are non-interactive read aliases today; they print 
readable content with the
+    default read limit.
   - `stat` is the place to show metadata.
-- Do not add filesystem command dialects such as `cat --columns` or `select`. 
Multi-column reads use
+- Do not add filesystem command dialects such as `cat --columns` or `select`. 
Multi-column reads
+  should use `cut` for CSV-first table files; legacy column paths may still use
   `paste /db/table/col1 /db/table/col2`.
 - Table provider can optimize Unix-looking commands internally:
-  - `cat /db/table` -> `SELECT * FROM db.table LIMIT 20`
-  - `cat /db/table/col` -> `SELECT col FROM db.table LIMIT 20`
-  - `paste /db/table/col1 /db/table/col2` -> `SELECT col1, col2 FROM db.table 
LIMIT 20`
+  - Table mode is CSV-first: a database is a directory, and each table is 
exposed as
+    `/db/table.csv` with `/db/table.schema` and `/db/table.meta` sidecar 
regular files.
+  - `ls /db` should list `table.csv`, `table.schema`, and `table.meta` entries 
for each table.
+  - `cat /db/table.csv` -> `SELECT * FROM db.table LIMIT 20`, formatted as CSV.
+  - `cut -d, -f2,3 /db/table.csv` -> delimiter-based field selection over the 
CSV records.
+  - `cat /db/table.schema` -> `DESC db.table DETAILS`, formatted as CSV with 
IoTDB result columns
+    preserved.
+  - `cat /db/table.meta` -> `SHOW TABLES DETAILS FROM db`, filtered to the 
table and formatted as
+    CSV with IoTDB result columns preserved.
+  - Legacy `/db/table/column` paths may remain as compatibility paths or 
migration sources.
+  - Legacy `cat /db/table/col` -> `SELECT col FROM db.table LIMIT 20`
+  - Legacy `paste /db/table/col1 /db/table/col2` -> `SELECT col1, col2 FROM 
db.table LIMIT 20`
+- Table-mode write boundaries are intentionally narrow and only active with
+  `--fs_write_mode enabled`:
+  - `mkdir /db` creates a database.
+  - `rm /db/table.csv` drops a table.
+  - `mv /db/t1.csv /db/t2.csv` renames a table inside the same database.
+  - Forbid `rm` or `mv` of `/db/table.schema` and `/db/table.meta`.
+  - Forbid `rm /db`.
+  - Forbid cross-database rename such as `mv /db1/t.csv /db2/t.csv`.
+- Tree-mode writes remain unsupported even when `--fs_write_mode enabled` is 
set.
+- Filesystem completion is mode-aware after login: filesystem mode installs
+  `FilesystemShell.createCompleter()`, which completes command names at the 
first word and path
+  children later, appending `/` to directories.
 - Interactive filesystem command errors must be handled at the single-command 
loop level. A
   `SQLException` from `FilesystemShell.execute()` should print `<command>: 
<message>` and continue
   the prompt, not bubble out to `receiveCommands()` and exit the CLI.
@@ -77,6 +117,34 @@ These notes capture follow-up implementation experience for 
quickly resuming thi
   the server returned `550`; the unchecked propagation exited the CLI. Keep a 
regression test for
   this behavior.
 
+## Supported Command Quick Reference
+
+| Command | Description | Example |
+| --- | --- | --- |
+| `pwd` | Print the current filesystem path. | `pwd` |
+| `ls [-a|-l|-la] [path]` | List child names; `-a` includes dot entries and 
`-l` enables long listing. | `ls -la /db` |
+| `ll [-a] [path]` | Long listing alias with read-only permissions in output. 
| `ll -a /db` |
+| `cd <path>` | Change directory when the target is a directory node. | `cd 
/db` |
+| `stat [path]` | Print filesystem-style metadata for a node. | `stat 
/db/table.csv` |
+| `cat <path>...` | Print readable paths sequentially. | `cat /db/table.csv` |
+| `head [-n lines] <path>` | Print the first rows or text lines; short form 
such as `-5` is accepted. | `head -n 5 /db/table.csv` |
+| `tail [-n lines] <path>` | Print the last rows or text lines where 
supported. | `tail -n 5 /db/table.csv` |
+| `wc -l <path>` | Print logical count and path. | `wc -l /db/table.csv` |
+| `grep <pattern> <path>` | Print rows or lines containing a literal 
substring. | `grep spricoder /db/table.csv` |
+| `find [path] [-name name]` | Recursively print matching paths; `-name` is 
exact node-name matching. | `find /db -name table.csv` |
+| `less <path>` | Non-interactive read alias using the default read limit. | 
`less /db/table.csv` |
+| `more <path>` | Non-interactive read alias using the default read limit. | 
`more /db/table.schema` |
+| `file <path>` | Print `directory`, `regular file`, or `unknown`. | `file 
/db/table.meta` |
+| `du <path>` | Print logical count and path using provider count. | `du 
/db/table.csv` |
+| `cut -d<delimiter> -f<fields> <path>` | Delimiter-based Unix field 
selection; supports lists and closed ranges. | `cut -d, -f2,3 /db/table.csv` |
+| `paste <path>...` | Print multiple readable paths side by side; table mode 
supports legacy same-table column paths. | `paste /db/table/key 
/db/table/value` |
+| `tree [-L depth] [path]` | Print descendants with indentation and names 
only. | `tree -L 2 /db` |
+| `mkdir <path>` | Write-gated; in table mode with writes enabled, creates a 
database. | `mkdir /newdb` |
+| `rm <path>` | Write-gated; in table mode with writes enabled, only table CSV 
drop is allowed. | `rm /db/table.csv` |
+| `mv <source> <target>` | Write-gated; in table mode with writes enabled, 
only same-database table CSV rename is allowed. | `mv /db/t1.csv /db/t2.csv` |
+| `help` | Print filesystem-mode help. | `help` |
+| `exit` / `quit` | Exit filesystem mode. | `exit` |
+
 ## Subagent Usage Notes
 
 All later work on this feature may use subagents to accelerate execution. 
Prefer subagents when the
@@ -168,7 +236,7 @@ test commands listed below.
 - The broader filesystem-mode focused suite is:
 
   ```bash
-  mvn test -o -nsu 
-Dtest=AbstractCliTest,CliFilesystemModeTest,FsPathTest,FilesystemCommandParserTest,TreeFilesystemSchemaProviderTest,TableFilesystemSchemaProviderTest,FilesystemShellTest,JdbcSqlExecutorTest
+  mvn test -o -nsu 
-Dtest=AbstractCliTest,CliFilesystemModeTest,JlineUtilsTest,FsPathTest,FilesystemCommandParserTest,TreeFilesystemSchemaProviderTest,TableFilesystemSchemaProviderTest,TableFilesystemMutationProviderTest,FilesystemShellTest,JdbcSqlExecutorTest
   ```
 
 - Maven/Develocity may print `Operation not permitted` stack traces for writes 
under
@@ -249,7 +317,11 @@ Expected: all tree provider tests pass.
 
 - [ ] **Step 1: Write failing `TableFilesystemSchemaProviderTest`**
 
-Use a mocked `SqlExecutor` to verify `list(/)`, `list(/db)`, 
`list(/db/table)`, `describe(/db/table/col)`, and `read(/db/table/col)` issue 
expected SQL and return typed nodes.
+Use a mocked `SqlExecutor` to verify `list(/)`, `list(/db)`, 
`describe(/db/table.csv)`,
+`read(/db/table.csv)`, `describe(/db/table.schema)`, 
`readLines(/db/table.schema)`,
+`describe(/db/table.meta)`, and `readLines(/db/table.meta)` issue expected SQL 
and return typed nodes
+or text lines.
+Also cover legacy `/db/table/col` paths if compatibility behavior remains 
enabled.
 
 - [ ] **Step 2: Run test to verify it fails**
 
@@ -258,7 +330,10 @@ Expected: compilation failure or missing behavior.
 
 - [ ] **Step 3: Implement table provider**
 
-Implement database/table/column mapping with centralized identifier rendering.
+Implement CSV-first database/table sidecar mapping with centralized identifier 
rendering:
+`/db/table.csv` is the table data regular file, `/db/table.schema` is the 
schema sidecar regular
+file, and `/db/table.meta` is the metadata sidecar regular file. Keep 
`/db/table/column` only as a
+legacy compatibility path or migration source.
 
 - [ ] **Step 4: Run test to verify it passes**
 
@@ -282,22 +357,23 @@ Add the long option and route filesystem mode to 
`FilesystemShell` without chang
 
 - [ ] **Step 4: Run focused tests**
 
-Run: `mvn test -pl iotdb-client/cli 
-Dtest=AbstractCliTest,FsPathTest,FilesystemCommandParserTest,TreeFilesystemSchemaProviderTest,TableFilesystemSchemaProviderTest`
+Run: `mvn test -pl iotdb-client/cli 
-Dtest=AbstractCliTest,CliFilesystemModeTest,JlineUtilsTest,FsPathTest,FilesystemCommandParserTest,TreeFilesystemSchemaProviderTest,TableFilesystemSchemaProviderTest,TableFilesystemMutationProviderTest,FilesystemShellTest,JdbcSqlExecutorTest`
 Expected: all focused tests pass.
 
 ### Task 6: Verification
 
 - [ ] **Step 1: Run CLI module unit tests**
 
-Run: `mvn test -pl iotdb-client/cli`
-Expected: CLI module unit tests pass, or any unrelated pre-existing failure is 
documented with output.
+Run: `mvn test -o -nsu 
-Dtest=AbstractCliTest,CliFilesystemModeTest,JlineUtilsTest,FsPathTest,FilesystemCommandParserTest,TreeFilesystemSchemaProviderTest,TableFilesystemSchemaProviderTest,TableFilesystemMutationProviderTest,FilesystemShellTest,JdbcSqlExecutorTest`
 from `iotdb-client/cli`.
+Expected: focused filesystem-mode unit tests pass, or any unrelated 
pre-existing failure is
+documented with output.
 
 - [ ] **Step 2: Run formatting**
 
-Run: `mvn spotless:apply -pl iotdb-client/cli`
+Run: `mvn spotless:apply -o -nsu` from `iotdb-client/cli`.
 Expected: formatting applied without errors.
 
 - [ ] **Step 3: Re-run focused tests after formatting**
 
-Run: `mvn test -pl iotdb-client/cli 
-Dtest=AbstractCliTest,FsPathTest,FilesystemCommandParserTest,TreeFilesystemSchemaProviderTest,TableFilesystemSchemaProviderTest`
+Run: `mvn test -o -nsu 
-Dtest=AbstractCliTest,CliFilesystemModeTest,JlineUtilsTest,FsPathTest,FilesystemCommandParserTest,TreeFilesystemSchemaProviderTest,TableFilesystemSchemaProviderTest,TableFilesystemMutationProviderTest,FilesystemShellTest,JdbcSqlExecutorTest`
 from `iotdb-client/cli`.
 Expected: all focused tests pass.
diff --git a/docs/superpowers/specs/2026-04-29-cli-filesystem-mode-design.md 
b/docs/superpowers/specs/2026-04-29-cli-filesystem-mode-design.md
index 0d40b79a167..2de2d8a71c6 100644
--- a/docs/superpowers/specs/2026-04-29-cli-filesystem-mode-design.md
+++ b/docs/superpowers/specs/2026-04-29-cli-filesystem-mode-design.md
@@ -27,9 +27,14 @@ Extend the IoTDB CLI with an explicit filesystem access mode 
that lets users bro
 metadata through directory-like commands while preserving the existing SQL CLI 
behavior by
 default.
 
-The first version is read-only. The architecture must still leave clear 
extension points for
-future write operations such as creating databases, creating tables, dropping 
schema objects, or
-writing data.
+Filesystem-mode command behavior and output must strictly follow standard 
Unix/POSIX filesystem
+command semantics wherever an equivalent command exists. A deviation is 
treated as a bug unless it
+is explicitly documented as a temporary compatibility exception.
+
+Filesystem mode is read-only by default. Table mode also exposes a minimal, 
opt-in write loop
+behind `--fs_write_mode enabled`; unsupported or unsafe write paths must fail 
with filesystem-style
+errors. The architecture must still leave clear extension points for richer 
future write operations
+such as creating tables, altering schema objects, or writing data rows.
 
 ## Non-Goals
 
@@ -37,7 +42,9 @@ writing data.
 - Do not change the default SQL CLI behavior.
 - Do not make filesystem commands available implicitly in the existing SQL 
mode.
 - Do not bypass server-side SQL, permissions, dialect handling, timeout 
handling, or SSL handling.
-- Do not implement write operations in the first version.
+- Do not expose broad write operations by default. Writes are disabled unless
+  `--fs_write_mode enabled` is set, and the current writable surface is 
intentionally limited to
+  table-mode database creation, table drop, and same-database table rename.
 
 ## Compatibility Requirements
 
@@ -52,6 +59,8 @@ Backward compatibility is a hard requirement.
   established meanings in CLI and tool scripts.
 - The new mode must be selected explicitly with a long option such as
   `--access_mode filesystem`; the default remains `sql`.
+- Filesystem writes must be selected explicitly with `--fs_write_mode 
enabled`; the default remains
+  `disabled`.
 - In filesystem mode, `-e` executes a filesystem command such as `ls /`, not a 
SQL statement. This
   distinction must be documented in help output.
 
@@ -91,6 +100,9 @@ Add a new access mode to the existing `Cli` entry point:
 - `--access_mode sql`: default. Runs the current SQL CLI path.
 - `--access_mode filesystem`: runs the new filesystem shell after the existing 
authentication and
   JDBC connection setup.
+- `--fs_write_mode disabled`: default. Filesystem write commands return 
read-only errors.
+- `--fs_write_mode enabled`: enables the table-mode mutation provider. 
Tree-mode writes still use
+  the unsupported mutation provider.
 
 The current `Cli` class remains responsible for command-line option parsing 
and connection setup.
 After the mode is known, it dispatches to either the existing SQL command loop 
or the new
@@ -131,14 +143,30 @@ on children, while `stat` performs more detailed 
resolution.
 
 ### Table Model Mapping
 
-When `sql_dialect=table`, filesystem paths map to relational schema objects.
+When `sql_dialect=table`, filesystem paths use a CSV-first sidecar model. A 
database is exposed as
+a directory. Each table is exposed as a data regular file named `<table>.csv`, 
with adjacent
+sidecar regular files named `<table>.schema` and `<table>.meta`.
+
+The table data file is the primary path for user data operations:
+
+- `/<database>/<table>.csv`: table rows as CSV-like tabular content.
+- `/<database>/<table>.schema`: sidecar schema content rendered as CSV from
+  `DESC <database>.<table> DETAILS`, preserving the column names and values 
returned by IoTDB.
+- `/<database>/<table>.meta`: sidecar table metadata rendered as CSV from the 
table row returned by
+  `SHOW TABLES DETAILS FROM <database>`, preserving the column names and 
values returned by IoTDB.
+
+The legacy column-oriented path `/<database>/<table>/<column>` is not the 
primary table-model
+filesystem abstraction. It may remain as a compatibility path for existing 
read behavior during a
+migration period, or be documented as the migration source when moving users 
to the sidecar model.
 
 | Filesystem Path | IoTDB Object | Node Type | Discovery |
 | --- | --- | --- | --- |
 | `/` | Virtual root | `VIRTUAL_ROOT` | `SHOW DATABASES` |
-| `/<database>` | Database | `TABLE_DATABASE` | `SHOW DATABASES` |
-| `/<database>/<table>` | Table or view | `TABLE_TABLE` / `TABLE_VIEW` | `SHOW 
TABLES DETAILS FROM <database>` |
-| `/<database>/<table>/<column>` | Column | `TABLE_COLUMN` | `DESC 
<database>.<table> DETAILS` |
+| `/<database>` | Database directory | `TABLE_DATABASE` | `SHOW DATABASES` |
+| `/<database>/<table>.csv` | Table data regular file | `TABLE_DATA_FILE` | 
`SHOW TABLES FROM <database>` |
+| `/<database>/<table>.schema` | Schema sidecar regular file | 
`TABLE_SCHEMA_FILE` | Exists via `SHOW TABLES FROM <database>`; content via 
`DESC <database>.<table> DETAILS` |
+| `/<database>/<table>.meta` | Metadata sidecar regular file | 
`TABLE_META_FILE` | Exists via `SHOW TABLES FROM <database>`; content via `SHOW 
TABLES DETAILS FROM <database>` |
+| `/<database>/<table>/<column>` | Legacy column compatibility path | 
`TABLE_COLUMN` | `DESC <database>.<table> DETAILS` |
 
 Table-model devices from `SHOW DEVICES FROM <table>` are not part of the first 
version's base path
 hierarchy because they are data-instance-oriented rather than 
schema-container-oriented. They can
@@ -151,31 +179,51 @@ be added later as a virtual directory such as 
`/<database>/<table>/.devices`.
 - Relative paths are resolved against the current directory.
 - Attempts to navigate above `/` resolve to `/`.
 - Tree-model paths must begin at `/root` for real IoTDB metadata.
-- Table-model paths use `/database/table/column`.
-- Wildcard paths are not treated as filesystem nodes in the first version. 
Users can run wildcard
-  SQL through `sql <statement>`.
+- Table-model paths use `/database/table.csv`, `/database/table.schema`, and
+  `/database/table.meta`. The database component is a directory; the table 
data and sidecar paths
+  are regular files.
+- Legacy table-model paths of the form `/database/table/column` are 
compatibility paths or
+  migration inputs, not the primary table filesystem model.
+- Wildcard paths are not treated as filesystem nodes in the first version. 
Users should run
+  wildcard SQL through normal SQL mode.
 - SQL escaping and identifier quoting must be centralized in provider helper 
methods. Command
   implementations must not hand-build SQL strings for IoTDB identifiers.
 
-## Commands
-
-The first version supports a compact read-only command set.
-
-| Command | Behavior |
-| --- | --- |
-| `pwd` | Print the current filesystem path. |
-| `ls [path]` | List child nodes for a directory. |
-| `cd <path>` | Change current directory if the target is a directory node. |
-| `stat [path]` | Print node type and metadata. |
-| `cat <path>` | Print file-like schema content for a leaf node. |
-| `paste <path>...` | Print multiple file-like paths side by side, following 
Unix `paste` semantics. |
-| `tree [path] [-L depth]` | Recursively list children with an explicit or 
default depth limit. |
-| `sql <statement>` | Execute a raw SQL statement through the existing SQL 
result printer. |
-| `help` | Print filesystem-mode help. |
-| `exit` / `quit` | Exit filesystem mode. |
-
-Unsupported write-oriented commands can be reserved for future use and return 
a clear read-only
-message if introduced before write support.
+## Supported Command Reference
+
+Filesystem mode commands are implemented in-process by 
`FilesystemCommandParser` and
+`FilesystemShell`. They are not delegated to `/bin/ls`, `/bin/cat`, 
`/bin/cut`, or
+`java.nio.file.FileSystem`, but their visible syntax and output should match 
Unix command
+semantics wherever the same command exists. Provider support can still vary by 
dialect and path.
+
+| Command | Description | Example |
+| --- | --- | --- |
+| `pwd` | Print the current filesystem path. | `pwd` |
+| `ls [-a|-l|-la] [path]` | List child names, one per line. `-a` includes `.` 
and `..`; `-l` uses long listing output. | `ls -a /db` |
+| `ll [-a] [path]` | Long listing alias. Uses read-only permissions by 
default: directories as `dr-xr-xr-x`, files as `-r--r--r--`. | `ll -a /db` |
+| `cd <path>` | Change the current directory only if the target is a directory 
node. | `cd /db` |
+| `stat [path]` | Print filesystem-style metadata, including path, Unix type, 
and provider metadata. | `stat /db/table.csv` |
+| `cat <path>...` | Print one or more readable paths sequentially. Table 
`.csv`, `.schema`, and `.meta` sidecars print CSV lines; legacy table/column 
paths print tab-separated values. | `cat /db/table.csv` |
+| `head [-n lines] <path>` | Print the first rows or text lines for a readable 
path. Short numeric form such as `head -5 <path>` is also parsed. | `head -n 5 
/db/table.csv` |
+| `tail [-n lines] <path>` | Print the last rows or text lines where the 
provider supports tail. Table `.csv` uses `ORDER BY time DESC LIMIT n` 
internally and returns original order. | `tail -n 5 /db/table.csv` |
+| `wc -l <path>` | Print logical row or line count plus path. Only `-l` is 
supported. | `wc -l /db/table.csv` |
+| `grep <pattern> <path>` | Print lines or rows containing the literal 
pattern. This is substring matching, not regular-expression matching. | `grep 
spricoder /db/table.csv` |
+| `find [path] [-name name]` | Recursively list the starting path and 
descendants whose node name exactly matches `name`; without `-name`, it prints 
all visited paths. | `find /db -name table.csv` |
+| `less <path>` | Current implementation prints readable content like `cat` 
with the default read limit; it is not an interactive pager. | `less 
/db/table.csv` |
+| `more <path>` | Current implementation prints readable content like `cat` 
with the default read limit; it is not an interactive pager. | `more 
/db/table.schema` |
+| `file <path>` | Print the Unix type for the path: `directory`, `regular 
file`, or `unknown`. | `file /db/table.meta` |
+| `du <path>` | Print logical count plus path, using the provider count 
operation. | `du /db/table.csv` |
+| `cut -d<delimiter> -f<fields> <path>` | Apply Unix delimiter-based field 
selection to each line. The delimiter must be one character. Field lists and 
closed ranges such as `2,3` and `1-2` are supported. | `cut -d, -f2,3 
/db/table.csv` |
+| `paste <path>...` | Read multiple file-like paths side by side. Table mode 
currently supports legacy same-table column paths and optimizes them to one SQL 
projection. | `paste /db/table/key /db/table/value` |
+| `tree [-L depth] [path]` | Recursively print descendants with indentation 
and names only. `-L` limits recursion depth. | `tree -L 2 /db` |
+| `mkdir <path>` | Write-gated command. With table mode and `--fs_write_mode 
enabled`, `mkdir /db` creates a database. Otherwise it returns a read-only or 
unsupported error. | `mkdir /newdb` |
+| `rm <path>` | Write-gated command. With table mode and `--fs_write_mode 
enabled`, only `rm /db/table.csv` is allowed and maps to table drop. | `rm 
/db/table.csv` |
+| `mv <source> <target>` | Write-gated command. With table mode and 
`--fs_write_mode enabled`, only same-database table CSV rename is allowed. | 
`mv /db/t1.csv /db/t2.csv` |
+| `help` | Print filesystem-mode help. | `help` |
+| `exit` / `quit` | Exit filesystem mode. | `exit` |
+
+The parser currently recognizes `sql <statement>`, but `FilesystemShell` does 
not execute it yet.
+Raw SQL should be run in the default SQL access mode.
 
 ## Command Mapping
 
@@ -196,24 +244,39 @@ message if introduced before write support.
 | Operation | SQL/API Mapping |
 | --- | --- |
 | `ls /` | `SHOW DATABASES`. |
-| `ls /db` | `SHOW TABLES FROM db`. |
-| `ls /db/table` | `DESC db.table`. |
+| `ls /db` | `SHOW TABLES FROM db`, formatted as `table.csv`, `table.schema`, 
and `table.meta` entries for each table. |
 | `stat /db` | `SHOW DATABASES DETAILS`, filtered to the database. |
-| `stat /db/table` | `SHOW TABLES DETAILS FROM db`, filtered to the table. |
-| `stat /db/table/col` | `DESC db.table DETAILS`, filtered to the column. |
-| `cat /db/table/col` | `DESC db.table DETAILS`, formatted as schema text for 
the column. |
+| `stat /db/table.csv` | `SHOW TABLES FROM db`, filtered to the table and 
rendered as filesystem metadata for the data file. |
+| `stat /db/table.schema` | `SHOW TABLES FROM db`, filtered to the table and 
rendered as filesystem metadata for the schema sidecar. |
+| `stat /db/table.meta` | `SHOW TABLES FROM db`, filtered to the table and 
rendered as filesystem metadata for the metadata sidecar. |
+| `cat /db/table.csv` | `SELECT * FROM db.table LIMIT <limit>`, formatted as 
CSV records. |
+| `cut -d, -f2,3 /db/table.csv` | Delimiter-based text field projection over 
the CSV records. |
+| `cat /db/table.schema` | `DESC db.table DETAILS`, formatted as CSV with 
IoTDB result columns preserved. |
+| `cat /db/table.meta` | `SHOW TABLES DETAILS FROM db`, filtered to the table 
and formatted as CSV with IoTDB result columns preserved. |
+| `stat /db/table/col` | Legacy compatibility: `DESC db.table DETAILS`, 
filtered to the column. |
+| `cat /db/table/col` | Legacy compatibility: `SELECT col FROM db.table LIMIT 
<limit>`. |
 
 ## Unix Output Semantics
 
-Filesystem mode should keep command output close to standard Unix command 
behavior. Avoid exposing
-internal implementation types or Java debug-style structures in normal command 
output.
+Filesystem mode must keep command output aligned with standard Unix command 
behavior. Avoid
+exposing internal implementation types or Java debug-style structures in 
normal command output.
 
-- `ls` prints child names only, one entry per line.
+- `ls` prints child names only. The baseline implementation should use one 
entry per line, matching
+  `ls -1`; it must not introduce comma-separated output or database-specific 
listing dialects.
+- `ls -a` and `ll -a` include `.` and `..` before normal entries.
 - `tree` prints the hierarchy with indentation and names only.
-- `cat` prints row content as tab-separated values, one row per line.
+- `cat` prints regular file content without Java object formatting. For table 
data files this is
+  CSV; for legacy compatibility table/column paths this remains tab-separated 
row values.
+- `cut -d, -f2,3 /db/table.csv` is the Unix-compatible way to project fields 
from table CSV
+  content. It performs delimiter-based text cutting like Unix `cut`; it does 
not parse CSV quoting
+  or introduce table-specific column-selection flags.
 - `paste` prints multiple file-like paths side by side as tab-separated values.
+- `less` and `more` are currently non-interactive read aliases with the 
default read limit.
 - `stat` is the command that may expose typed metadata, because Unix `stat` is 
explicitly about
   object metadata.
+- `mkdir`, `rm`, and `mv` are write-gated. In default mode they report a 
read-only filesystem.
+  When enabled, unsupported levels must report an invalid filesystem write 
operation instead of
+  falling through to broad SQL execution.
 - Error output should follow a command-prefixed style such as `cat: <message>` 
or
   `cd: <path>: Not a directory`.
 
@@ -221,19 +284,38 @@ This means provider-internal node types such as 
`TABLE_DATABASE`, `TABLE_COLUMN`
 `TREE_DATABASE` must not appear in `ls` or `tree` output. Similarly, 
`SqlRow.asMap().toString()`
 must not be used for `cat` or `paste` output.
 
+## Completion Semantics
+
+Interactive completion must be mode-aware:
+
+- SQL mode keeps the existing SQL completer.
+- Filesystem mode installs `FilesystemShell.createCompleter()`.
+- At the first word, filesystem completion suggests filesystem commands.
+- At later words, filesystem completion lists children from the relevant path, 
filters by prefix,
+  and appends `/` to directory candidates.
+- Completion failures are ignored so TAB never interrupts the interactive 
session.
+
 ## Read Semantics
 
 Table-mode read behavior is currently:
 
-- `cat /db/table` maps to `SELECT * FROM db.table LIMIT <limit>`.
-- `cat /db/table/column` maps to `SELECT column FROM db.table LIMIT <limit>`.
-- `paste /db/table/col1 /db/table/col2` maps to
+- `cat /db/table.csv` maps to `SELECT * FROM db.table LIMIT <limit>`.
+- `cat /db/table.schema` maps to `DESC db.table DETAILS` and preserves IoTDB 
result columns in CSV.
+- `cat /db/table.meta` maps to `SHOW TABLES DETAILS FROM db`, filters to the 
table row, and
+  preserves IoTDB result columns in CSV.
+- Legacy `cat /db/table/column` may map to `SELECT column FROM db.table LIMIT 
<limit>` during the
+  migration period.
+- Legacy `paste /db/table/col1 /db/table/col2` may map to
   `SELECT col1, col2 FROM db.table LIMIT <limit>` when all paths are columns 
from the same table.
 
 Although `paste` is implemented through one optimized SQL query for table 
mode, the user-facing
 semantics remain Unix-like: users pass multiple file paths to a standard Unix 
command rather than
 using a database-specific `select` command or `cat --columns` dialect.
 
+For CSV-first table files, multi-column projection should prefer Unix `cut` 
syntax such as
+`cut -d, -f2,3 /db/table.csv`. The implementation may later optimize this 
internally, but the
+public interface must remain the standard `cut` form.
+
 In interactive filesystem mode, a single command failure must not terminate 
the CLI session. For
 example, if `cat time` is resolved from `/testtest` to `/testtest/time`, table 
mode treats that as a
 table path and may receive a server error such as `550: Table 'testtest.time' 
does not exist`. That
@@ -244,39 +326,61 @@ error should be printed as `cat: 550: ...`, then the 
prompt should continue.
 New code should live under `org.apache.iotdb.cli.fs`.
 
 - `FilesystemShell`: filesystem-mode command loop and `-e` single-command 
execution.
-- `command/*`: command parsing and command handlers for `pwd`, `ls`, `cd`, 
`stat`, `cat`, `tree`,
-  `sql`, and `help`.
+- `command/*`: command parsing and command value objects for filesystem 
commands such as `pwd`,
+  `ls`, `cd`, `stat`, `cat`, `cut`, `paste`, `tree`, and `help`.
 - `path/FsPath`: path normalization and resolution for absolute and relative 
paths.
 - `node/FsNode`, `node/FsNodeType`, `node/FsNodeMetadata`: typed metadata 
model.
-- `provider/FilesystemSchemaProvider`: read-only provider interface.
+- `provider/FilesystemSchemaProvider`: schema and data read provider interface.
+- `provider/FilesystemMutationProvider`: write-gated mutation provider 
interface.
 - `provider/TreeFilesystemSchemaProvider`: tree-model SQL mapping.
 - `provider/TableFilesystemSchemaProvider`: table-model SQL mapping.
+- `provider/TableFilesystemMutationProvider`: opt-in table-model write mapping.
+- `provider/UnsupportedFilesystemMutationProvider`: mutation provider used 
when writes are disabled
+  at the provider layer or unsupported by the dialect.
 - `sql/SqlExecutor`: JDBC statement execution and result extraction helpers.
 - `print/FilesystemPrinter`: text output for filesystem commands.
 
 Existing SQL CLI code should not be broadly refactored. The implementation 
should only add the
-small hooks needed to select filesystem mode and to let `sql <statement>` 
reuse existing SQL result
-printing.
+small hooks needed to select filesystem mode, install the mode-specific 
completer, and route
+filesystem commands through the provider layer. Raw SQL execution remains the 
responsibility of SQL
+access mode until a filesystem-mode `sql` command is explicitly implemented.
 
 ## Provider Interface Direction
 
-The first version needs read-only operations:
+Filesystem mode separates reads from mutations. The schema provider owns read 
operations:
 
-- `resolve(FsPath path)`
 - `list(FsPath path)`
 - `describe(FsPath path)`
 - `read(FsPath path)`
+- `readLines(FsPath path)` for text sidecars such as `.csv`, `.schema`, and 
`.meta`
+- `tail(FsPath path)` / `tailLines(FsPath path)` where the provider supports 
tail
+- `count(FsPath path)` where the provider supports logical count
+- `read(List<FsPath> paths)` for provider-optimized multi-path reads such as 
legacy table-column
+  `paste`
+
+The mutation provider owns the current write-gated operations:
+
+- `mkdir(FsPath path)`
+- `remove(FsPath path)`
+- `move(FsPath source, FsPath target)`
+
+When writes are disabled, `FilesystemShell` rejects `mkdir`, `rm`, and `mv` 
before calling the
+mutation provider. When writes are enabled, table mode uses 
`TableFilesystemMutationProvider`; tree
+mode still uses `UnsupportedFilesystemMutationProvider`.
+
+The current table-model write boundary is intentionally narrow:
 
-The interface should be shaped so future writes can be added without changing 
command parsing:
+- `mkdir /db` creates a database directory.
+- `rm /db/table.csv` drops the table data file and therefore the table.
+- `mv /db/t1.csv /db/t2.csv` renames a table within the same database.
 
-- `create(FsPath path, CreateOptions options)`
-- `delete(FsPath path, DeleteOptions options)`
-- `rename(FsPath source, FsPath target)`
-- `write(FsPath path, FsWriteContent content, WriteOptions options)`
+The sidecar files are metadata views, not independently writable objects:
 
-The first version exposes only read methods in the public provider interface. 
Write operation names
-remain documented here as future extension points, and command handlers for 
unsupported write-like
-commands return a read-only error instead of calling provider methods.
+- `rm /db/table.schema` and `rm /db/table.meta` are forbidden.
+- `mv /db/table.schema ...` and `mv /db/table.meta ...` are forbidden.
+- `rm /db` is forbidden; database deletion must remain an explicit SQL 
operation or a separately
+  designed filesystem command with stronger safeguards.
+- Cross-database table rename, such as `mv /db1/t.csv /db2/t.csv`, is 
forbidden.
 
 ## Dependency Strategy
 
@@ -285,7 +389,7 @@ Use existing, proven dependencies already present in the 
CLI module:
 - `commons-cli` for startup argument parsing.
 - JLine for terminal input, history, and autosuggestion.
 - JDBC and IoTDB SQL for metadata access.
-- Existing IoTDB result printing where raw SQL output is required.
+- Existing IoTDB result printing if a future filesystem-mode raw SQL command 
is added.
 
 Do not add a shell framework, FUSE dependency, or local filesystem abstraction 
library for the
 first version. The only custom path logic should be the IoTDB-specific mapping 
from slash paths to
@@ -330,14 +434,18 @@ Unit tests should cover the behavior without needing a 
live IoTDB instance where
 
 - `FsPath` tests for absolute paths, relative paths, `.`, `..`, empty input, 
and attempts to move
   above root.
-- Command parser tests for valid and invalid forms of `ls`, `cd`, `stat`, 
`cat`, `tree -L`, `sql`,
-  `help`, `exit`, and `quit`.
+- Command parser tests for valid and invalid forms of all supported shell 
commands, including
+  listing options, `head`/`tail` limits, `wc -l`, `find -name`, `cut`, 
`paste`, write-gated
+  commands, and the parser-only `sql` form.
 - Provider tests with a mocked `SqlExecutor`, verifying tree-mode path-to-SQL 
mapping.
-- Provider tests with a mocked `SqlExecutor`, verifying table-mode path-to-SQL 
mapping.
+- Provider tests with a mocked `SqlExecutor`, verifying table-mode path-to-SQL 
mapping, CSV
+  sidecars, IoTDB-preserved `.schema`/`.meta` content, multi-column legacy 
reads, and table
+  mutation restrictions.
 - CLI option tests extending existing CLI unit coverage for default 
`access_mode`, filesystem
-  mode, and invalid mode values.
+  mode, invalid mode values, and `fs_write_mode`.
 - Shell tests proving SQL mode remains the default and filesystem mode 
dispatches to
-  `FilesystemShell`.
+  `FilesystemShell`, keeps single-command failures inside the prompt loop, and 
preserves Unix-like
+  output.
 
 Integration tests against a real IoTDB cluster can be added after the 
unit-tested shell behavior is
 stable.
@@ -349,5 +457,5 @@ stable.
 - Identifier escaping bugs: centralize escaping in provider helpers.
 - Backward compatibility regressions: make `sql` the default access mode and 
keep SQL-mode command
   processing unchanged.
-- Future write support complexity: route all object semantics through typed 
nodes and providers,
-  not through command-specific SQL construction.
+- Write support complexity: keep writes opt-in and narrow, and route object 
semantics through typed
+  paths and mutation providers rather than broad command-specific SQL 
construction.
diff --git 
a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/FilesystemShell.java 
b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/FilesystemShell.java
index 8929c964524..3ac9eca9d94 100644
--- 
a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/FilesystemShell.java
+++ 
b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/FilesystemShell.java
@@ -40,6 +40,7 @@ import java.util.ArrayList;
 import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
+import java.util.regex.Pattern;
 
 public class FilesystemShell {
 
@@ -47,7 +48,8 @@ public class FilesystemShell {
   private static final List<String> COMMANDS =
       Arrays.asList(
           "pwd", "ls", "ll", "cd", "stat", "cat", "head", "tail", "wc", 
"grep", "find", "less",
-          "more", "file", "du", "mkdir", "rm", "mv", "paste", "tree", "help", 
"exit", "quit");
+          "more", "file", "du", "mkdir", "rm", "mv", "cut", "paste", "tree", 
"help", "exit",
+          "quit");
 
   private final CliContext ctx;
   private final FilesystemSchemaProvider provider;
@@ -77,10 +79,10 @@ public class FilesystemShell {
         ctx.getPrinter().println(currentPath.toString());
         return true;
       case LS:
-        printNodes(provider.list(resolve(command.getPath())));
+        printNodes(provider.list(resolve(command.getPath())), 
isAllOption(command));
         return true;
       case LL:
-        printLongNodes(provider.list(resolve(command.getPath())));
+        printLongNodes(provider.list(resolve(command.getPath())), 
isAllOption(command));
         return true;
       case CD:
         changeDirectory(command.getPath());
@@ -92,10 +94,10 @@ public class FilesystemShell {
         printSequentialReads(command.getPaths(), DEFAULT_READ_LIMIT);
         return true;
       case HEAD:
-        printRows(provider.read(resolve(command.getPath()), 
command.getLimit()));
+        printHead(command.getPath(), command.getLimit());
         return true;
       case TAIL:
-        printRows(provider.tail(resolve(command.getPath()), 
command.getLimit()));
+        printTail(command.getPath(), command.getLimit());
         return true;
       case WC:
         printLineCount(command.getPath());
@@ -108,7 +110,7 @@ public class FilesystemShell {
         return true;
       case LESS:
       case MORE:
-        printRows(provider.read(resolve(command.getPath()), 
DEFAULT_READ_LIMIT));
+        printReadable(command.getPath(), DEFAULT_READ_LIMIT);
         return true;
       case FILE:
         printFile(command.getPath());
@@ -125,6 +127,9 @@ public class FilesystemShell {
       case MV:
         move(command.getPaths());
         return true;
+      case CUT:
+        printCut(command.getPath(), command.getOption(), command.getPattern());
+        return true;
       case PASTE:
         printRows(provider.read(resolve(command.getPaths()), 
DEFAULT_READ_LIMIT));
         return true;
@@ -196,29 +201,31 @@ public class FilesystemShell {
     return resolvedPaths;
   }
 
-  private void printNodes(List<FsNode> nodes) {
-    StringBuilder builder = new StringBuilder();
-    for (FsNode node : nodes) {
-      if (builder.length() > 0) {
-        builder.append(',');
-      }
-      builder.append(node.getName());
+  private void printNodes(List<FsNode> nodes, boolean all) {
+    if (all) {
+      ctx.getPrinter().println(".");
+      ctx.getPrinter().println("..");
     }
-    if (builder.length() > 0) {
-      ctx.getPrinter().println(builder.toString());
+    for (FsNode node : nodes) {
+      ctx.getPrinter().println(node.getName());
     }
   }
 
-  private void printLongNodes(List<FsNode> nodes) {
+  private void printLongNodes(List<FsNode> nodes, boolean all) {
+    if (all) {
+      ctx.getPrinter().println(longMode(FsNodeType.VIRTUAL_ROOT) + "  1 iotdb 
iotdb 0 .");
+      ctx.getPrinter().println(longMode(FsNodeType.VIRTUAL_ROOT) + "  1 iotdb 
iotdb 0 ..");
+    }
     for (FsNode node : nodes) {
       ctx.getPrinter().println(longMode(node.getType()) + "  1 iotdb iotdb 0 " 
+ node.getName());
     }
   }
 
   private void printNode(FsNode node) {
-    ctx.getPrinter().println(node.getName() + "\t" + node.getType());
+    ctx.getPrinter().println("File: " + node.getPath());
+    ctx.getPrinter().println("Type: " + unixType(node.getType()));
     for (Map.Entry<String, String> entry : node.getMetadata().entrySet()) {
-      ctx.getPrinter().println(entry.getKey() + "\t" + entry.getValue());
+      ctx.getPrinter().println(entry.getKey() + ": " + entry.getValue());
     }
   }
 
@@ -228,10 +235,38 @@ public class FilesystemShell {
     }
   }
 
+  private void printLines(List<String> lines) {
+    for (String line : lines) {
+      ctx.getPrinter().println(line);
+    }
+  }
+
   private void printSequentialReads(List<String> paths, int limit) throws 
SQLException {
     for (String path : paths) {
-      printRows(provider.read(resolve(path), limit));
+      printReadable(path, limit);
+    }
+  }
+
+  private void printReadable(String path, int limit) throws SQLException {
+    FsPath resolvedPath = resolve(path);
+    if (isTextFile(resolvedPath)) {
+      printLines(provider.readLines(resolvedPath, limit));
+      return;
+    }
+    printRows(provider.read(resolvedPath, limit));
+  }
+
+  private void printHead(String path, int limit) throws SQLException {
+    printReadable(path, limit);
+  }
+
+  private void printTail(String path, int limit) throws SQLException {
+    FsPath resolvedPath = resolve(path);
+    if (isTextFile(resolvedPath)) {
+      printLines(provider.tailLines(resolvedPath, limit));
+      return;
     }
+    printRows(provider.tail(resolvedPath, limit));
   }
 
   private void printLineCount(String path) throws SQLException {
@@ -240,7 +275,16 @@ public class FilesystemShell {
   }
 
   private void printMatchingRows(String path, String pattern) throws 
SQLException {
-    for (SqlRow row : provider.read(resolve(path), DEFAULT_READ_LIMIT)) {
+    FsPath resolvedPath = resolve(path);
+    if (isTextFile(resolvedPath)) {
+      for (String line : provider.readLines(resolvedPath, DEFAULT_READ_LIMIT)) 
{
+        if (line.contains(pattern)) {
+          ctx.getPrinter().println(line);
+        }
+      }
+      return;
+    }
+    for (SqlRow row : provider.read(resolvedPath, DEFAULT_READ_LIMIT)) {
       String line = joinValues(row);
       if (line.contains(pattern)) {
         ctx.getPrinter().println(line);
@@ -248,6 +292,24 @@ public class FilesystemShell {
     }
   }
 
+  private void printCut(String path, String delimiter, String fields) throws 
SQLException {
+    FsPath resolvedPath = resolve(path);
+    for (String line : readableLines(resolvedPath, DEFAULT_READ_LIMIT)) {
+      ctx.getPrinter().println(cutLine(line, delimiter, fields));
+    }
+  }
+
+  private List<String> readableLines(FsPath path, int limit) throws 
SQLException {
+    if (isTextFile(path)) {
+      return provider.readLines(path, limit);
+    }
+    List<String> lines = new ArrayList<>();
+    for (SqlRow row : provider.read(path, limit)) {
+      lines.add(joinValues(row));
+    }
+    return lines;
+  }
+
   private void printFind(FsPath path, String pattern) throws SQLException {
     FsNode node = provider.describe(path);
     if (matchesFind(node, pattern)) {
@@ -267,7 +329,8 @@ public class FilesystemShell {
 
   private void printFile(String path) throws SQLException {
     FsPath resolvedPath = resolve(path);
-    ctx.getPrinter().println(resolvedPath + ": " + 
provider.describe(resolvedPath).getType());
+    ctx.getPrinter()
+        .println(resolvedPath + ": " + 
unixType(provider.describe(resolvedPath).getType()));
   }
 
   private void printDiskUsage(String path) throws SQLException {
@@ -340,6 +403,7 @@ public class FilesystemShell {
     ctx.getPrinter().println("mkdir <path>");
     ctx.getPrinter().println("rm <path>");
     ctx.getPrinter().println("mv <source> <target>");
+    ctx.getPrinter().println("cut -d<delimiter> -f<fields> <path>");
     ctx.getPrinter().println("paste <path>...");
     ctx.getPrinter().println("tree [-L depth] [path]");
     ctx.getPrinter().println("exit");
@@ -363,6 +427,86 @@ public class FilesystemShell {
     return "-r--r--r--";
   }
 
+  private static String unixType(FsNodeType type) {
+    if (isDirectory(type)) {
+      return "directory";
+    }
+    if (type == FsNodeType.UNKNOWN) {
+      return "unknown";
+    }
+    return "regular file";
+  }
+
+  private static boolean isAllOption(FilesystemCommand command) {
+    return "-a".equals(command.getOption());
+  }
+
+  private static String cutLine(String line, String delimiter, String fields) {
+    if (!line.contains(delimiter)) {
+      return line;
+    }
+    String[] values = line.split(Pattern.quote(delimiter), -1);
+    boolean[] selected = selectedFields(fields, values.length);
+    StringBuilder builder = new StringBuilder();
+    for (int i = 0; i < values.length; i++) {
+      if (!selected[i]) {
+        continue;
+      }
+      if (builder.length() > 0) {
+        builder.append(delimiter);
+      }
+      builder.append(values[i]);
+    }
+    return builder.toString();
+  }
+
+  private static boolean[] selectedFields(String fields, int fieldCount) {
+    boolean[] selected = new boolean[fieldCount];
+    for (String field : fields.split(",")) {
+      selectField(field.trim(), selected);
+    }
+    return selected;
+  }
+
+  private static void selectField(String field, boolean[] selected) {
+    if (field.isEmpty()) {
+      return;
+    }
+    int dash = field.indexOf('-');
+    if (dash < 0) {
+      selectFieldNumber(field, selected);
+      return;
+    }
+    int start = parsePositiveInt(field.substring(0, dash));
+    int end = parsePositiveInt(field.substring(dash + 1));
+    if (start <= 0 || end <= 0 || start > end) {
+      return;
+    }
+    for (int i = start; i <= end && i <= selected.length; i++) {
+      selected[i - 1] = true;
+    }
+  }
+
+  private static void selectFieldNumber(String field, boolean[] selected) {
+    int fieldNumber = parsePositiveInt(field);
+    if (fieldNumber > 0 && fieldNumber <= selected.length) {
+      selected[fieldNumber - 1] = true;
+    }
+  }
+
+  private static int parsePositiveInt(String value) {
+    try {
+      return Integer.parseInt(value);
+    } catch (NumberFormatException e) {
+      return -1;
+    }
+  }
+
+  private static boolean isTextFile(FsPath path) {
+    String fileName = path.getFileName();
+    return fileName.endsWith(".csv") || fileName.endsWith(".schema") || 
fileName.endsWith(".meta");
+  }
+
   private class FilesystemCompleter implements Completer {
 
     @Override
diff --git 
a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/command/FilesystemCommand.java
 
b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/command/FilesystemCommand.java
index 67aa6561d41..df86856ab80 100644
--- 
a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/command/FilesystemCommand.java
+++ 
b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/command/FilesystemCommand.java
@@ -43,6 +43,7 @@ public class FilesystemCommand {
     MKDIR,
     RM,
     MV,
+    CUT,
     PASTE,
     TREE,
     SQL,
@@ -117,6 +118,11 @@ public class FilesystemCommand {
         type, path, Collections.singletonList(path), -1, -1, "", pattern, "", 
"");
   }
 
+  public static FilesystemCommand cut(String delimiter, String fields, String 
path) {
+    return new FilesystemCommand(
+        Type.CUT, path, Collections.singletonList(path), -1, -1, delimiter, 
fields, "", "");
+  }
+
   public static FilesystemCommand tree(String path, int depth) {
     return new FilesystemCommand(
         Type.TREE, path, Collections.singletonList(path), depth, -1, "", "", 
"", "");
diff --git 
a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/command/FilesystemCommandParser.java
 
b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/command/FilesystemCommandParser.java
index 61000c17ee3..62d267153b7 100644
--- 
a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/command/FilesystemCommandParser.java
+++ 
b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/command/FilesystemCommandParser.java
@@ -26,6 +26,7 @@ import java.util.Locale;
 public class FilesystemCommandParser {
 
   private static final String DEFAULT_PATH = ".";
+  private static final String DEFAULT_CUT_DELIMITER = "\t";
   private static final int DEFAULT_TREE_DEPTH = Integer.MAX_VALUE;
   private static final int DEFAULT_HEAD_LIMIT = 10;
 
@@ -54,10 +55,10 @@ public class FilesystemCommandParser {
     String[] tokens = line.split("\\s+");
     String command = tokens[0].toLowerCase(Locale.ROOT);
     if ("ls".equals(command)) {
-      return parseLs(tokens);
+      return parseList(tokens, false);
     }
     if ("ll".equals(command)) {
-      return FilesystemCommand.path(FilesystemCommand.Type.LL, 
pathArgument(tokens));
+      return parseList(tokens, true);
     }
     if ("cd".equals(command)) {
       return FilesystemCommand.path(FilesystemCommand.Type.CD, 
pathArgument(tokens));
@@ -104,6 +105,9 @@ public class FilesystemCommandParser {
     if ("mv".equals(command)) {
       return parseMv(tokens);
     }
+    if ("cut".equals(command)) {
+      return parseCut(tokens);
+    }
     if ("paste".equals(command)) {
       return parsePaste(tokens);
     }
@@ -132,6 +136,46 @@ public class FilesystemCommandParser {
     return FilesystemCommand.paths(FilesystemCommand.Type.PASTE, paths);
   }
 
+  private static FilesystemCommand parseCut(String[] tokens) {
+    String delimiter = DEFAULT_CUT_DELIMITER;
+    String fields = "";
+    String path = "";
+
+    for (int i = 1; i < tokens.length; i++) {
+      String token = tokens[i];
+      if ("-d".equals(token)) {
+        if (i + 1 >= tokens.length) {
+          return FilesystemCommand.invalid("Missing cut delimiter");
+        }
+        delimiter = tokens[++i];
+      } else if (token.startsWith("-d") && token.length() > 2) {
+        delimiter = token.substring(2);
+      } else if ("-f".equals(token)) {
+        if (i + 1 >= tokens.length) {
+          return FilesystemCommand.invalid("Missing cut fields");
+        }
+        fields = tokens[++i];
+      } else if (token.startsWith("-f") && token.length() > 2) {
+        fields = token.substring(2);
+      } else if (token.startsWith("-")) {
+        return FilesystemCommand.invalid("Unsupported cut option: " + token);
+      } else {
+        path = token;
+      }
+    }
+
+    if (delimiter.length() != 1) {
+      return FilesystemCommand.invalid("Cut delimiter must be a single 
character");
+    }
+    if (fields.isEmpty()) {
+      return FilesystemCommand.invalid("Missing cut fields");
+    }
+    if (path.isEmpty()) {
+      return FilesystemCommand.invalid("Missing cut path");
+    }
+    return FilesystemCommand.cut(delimiter, fields, path);
+  }
+
   private static FilesystemCommand parseRm(String[] tokens) {
     if (tokens.length < 2) {
       return FilesystemCommand.invalid("Missing rm path");
@@ -152,9 +196,10 @@ public class FilesystemCommandParser {
     return FilesystemCommand.paths(FilesystemCommand.Type.MV, paths);
   }
 
-  private static FilesystemCommand parseLs(String[] tokens) {
-    FilesystemCommand.Type type = FilesystemCommand.Type.LS;
+  private static FilesystemCommand parseList(String[] tokens, boolean 
longMode) {
+    FilesystemCommand.Type type = longMode ? FilesystemCommand.Type.LL : 
FilesystemCommand.Type.LS;
     String path = DEFAULT_PATH;
+    boolean all = false;
 
     for (int i = 1; i < tokens.length; i++) {
       String token = tokens[i];
@@ -163,7 +208,9 @@ public class FilesystemCommandParser {
           char option = token.charAt(j);
           if (option == 'l') {
             type = FilesystemCommand.Type.LL;
-          } else if (option != 'a') {
+          } else if (option == 'a') {
+            all = true;
+          } else {
             return FilesystemCommand.invalid("Unsupported ls option: -" + 
option);
           }
         }
@@ -171,7 +218,7 @@ public class FilesystemCommandParser {
         path = token;
       }
     }
-    return FilesystemCommand.path(type, path);
+    return FilesystemCommand.option(type, all ? "-a" : "", path);
   }
 
   private static FilesystemCommand parseCat(String[] tokens) {
diff --git 
a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/node/FsNodeType.java 
b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/node/FsNodeType.java
index a38ac575520..df63ef5c817 100644
--- 
a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/node/FsNodeType.java
+++ 
b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/node/FsNodeType.java
@@ -30,5 +30,8 @@ public enum FsNodeType {
   TABLE_TABLE,
   TABLE_VIEW,
   TABLE_COLUMN,
+  TABLE_DATA_FILE,
+  TABLE_SCHEMA_FILE,
+  TABLE_META_FILE,
   UNKNOWN
 }
diff --git 
a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/FilesystemSchemaProvider.java
 
b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/FilesystemSchemaProvider.java
index de92c9bf983..ac015bd19ae 100644
--- 
a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/FilesystemSchemaProvider.java
+++ 
b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/FilesystemSchemaProvider.java
@@ -34,10 +34,18 @@ public interface FilesystemSchemaProvider {
 
   List<SqlRow> read(FsPath path, int limit) throws SQLException;
 
+  default List<String> readLines(FsPath path, int limit) throws SQLException {
+    throw new SQLException("Path is not readable as text: " + path);
+  }
+
   default List<SqlRow> tail(FsPath path, int limit) throws SQLException {
     throw new SQLException("Path does not support tail: " + path);
   }
 
+  default List<String> tailLines(FsPath path, int limit) throws SQLException {
+    throw new SQLException("Path does not support tail: " + path);
+  }
+
   default long count(FsPath path) throws SQLException {
     throw new SQLException("Path does not support count: " + path);
   }
diff --git 
a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/TableFilesystemMutationProvider.java
 
b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/TableFilesystemMutationProvider.java
index 153bd53d9ae..d8220fee7f7 100644
--- 
a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/TableFilesystemMutationProvider.java
+++ 
b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/TableFilesystemMutationProvider.java
@@ -29,6 +29,7 @@ public class TableFilesystemMutationProvider implements 
FilesystemMutationProvid
 
   private static final String INVALID_WRITE_OPERATION =
       "Invalid filesystem write operation for this path";
+  private static final String CSV_SUFFIX = ".csv";
 
   private final SqlExecutor executor;
 
@@ -46,7 +47,7 @@ public class TableFilesystemMutationProvider implements 
FilesystemMutationProvid
 
   @Override
   public void remove(FsPath path) throws SQLException {
-    if (path.getSegments().size() != 2) {
+    if (!isDataFile(path)) {
       throw invalidOperation();
     }
     executor.execute("DROP TABLE " + toTablePath(path));
@@ -54,13 +55,13 @@ public class TableFilesystemMutationProvider implements 
FilesystemMutationProvid
 
   @Override
   public void move(FsPath source, FsPath target) throws SQLException {
-    if (source.getSegments().size() != 2 || target.getSegments().size() != 2) {
+    if (!isDataFile(source) || !isDataFile(target)) {
       throw invalidOperation();
     }
     if (!parent(source).equals(parent(target))) {
       throw invalidOperation();
     }
-    executor.execute("ALTER TABLE " + toTablePath(source) + " RENAME TO " + 
target.getFileName());
+    executor.execute("ALTER TABLE " + toTablePath(source) + " RENAME TO " + 
tableName(target));
   }
 
   private static SQLException invalidOperation() {
@@ -69,7 +70,16 @@ public class TableFilesystemMutationProvider implements 
FilesystemMutationProvid
 
   private static String toTablePath(FsPath path) {
     List<String> segments = path.getSegments();
-    return segments.get(0) + "." + segments.get(1);
+    return segments.get(0) + "." + tableName(path);
+  }
+
+  private static boolean isDataFile(FsPath path) {
+    return path.getSegments().size() == 2 && 
path.getFileName().endsWith(CSV_SUFFIX);
+  }
+
+  private static String tableName(FsPath path) {
+    String fileName = path.getFileName();
+    return fileName.substring(0, fileName.length() - CSV_SUFFIX.length());
   }
 
   private static FsPath parent(FsPath path) {
diff --git 
a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/TableFilesystemSchemaProvider.java
 
b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/TableFilesystemSchemaProvider.java
index d735fa99a8b..879145b69d4 100644
--- 
a/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/TableFilesystemSchemaProvider.java
+++ 
b/iotdb-client/cli/src/main/java/org/apache/iotdb/cli/fs/provider/TableFilesystemSchemaProvider.java
@@ -25,13 +25,33 @@ import org.apache.iotdb.cli.fs.path.FsPath;
 import org.apache.iotdb.cli.fs.sql.SqlExecutor;
 import org.apache.iotdb.cli.fs.sql.SqlRow;
 
+import org.apache.commons.csv.CSVFormat;
+import org.apache.commons.csv.CSVPrinter;
+
+import java.io.IOException;
+import java.io.StringWriter;
 import java.sql.SQLException;
 import java.util.ArrayList;
 import java.util.Collections;
+import java.util.LinkedHashMap;
 import java.util.List;
+import java.util.Map;
 
 public class TableFilesystemSchemaProvider implements FilesystemSchemaProvider 
{
 
+  private enum TableFileKind {
+    DATA_CSV,
+    SCHEMA,
+    META,
+    UNKNOWN
+  }
+
+  private static final String CSV_SUFFIX = ".csv";
+  private static final String SCHEMA_SUFFIX = ".schema";
+  private static final String META_SUFFIX = ".meta";
+  private static final CSVFormat CSV_FORMAT =
+      CSVFormat.DEFAULT.builder().setRecordSeparator("").build();
+
   private final SqlExecutor executor;
 
   public TableFilesystemSchemaProvider(SqlExecutor executor) {
@@ -45,9 +65,12 @@ public class TableFilesystemSchemaProvider implements 
FilesystemSchemaProvider {
       return listDatabases();
     }
     if (depth == 1) {
-      return listTables(path);
+      return listTableFiles(path);
     }
     if (depth == 2) {
+      if (parseTableFile(path).kind != TableFileKind.UNKNOWN) {
+        return new ArrayList<>();
+      }
       return listColumns(path);
     }
     return new ArrayList<>();
@@ -63,6 +86,10 @@ public class TableFilesystemSchemaProvider implements 
FilesystemSchemaProvider {
       return describeDatabase(path);
     }
     if (depth == 2) {
+      TableFileRef file = parseTableFile(path);
+      if (file.kind != TableFileKind.UNKNOWN) {
+        return describeTableFile(path, file);
+      }
       return describeTable(path);
     }
     String columnName = path.getFileName();
@@ -77,7 +104,11 @@ public class TableFilesystemSchemaProvider implements 
FilesystemSchemaProvider {
   @Override
   public List<SqlRow> read(FsPath path, int limit) throws SQLException {
     int depth = path.getSegments().size();
-    if (depth == 2) {
+    TableFileRef file = parseTableFile(path);
+    if (depth == 2 && file.kind == TableFileKind.DATA_CSV) {
+      return executor.query("SELECT * FROM " + file.toTablePath() + " LIMIT " 
+ limit);
+    }
+    if (depth == 2 && file.kind == TableFileKind.UNKNOWN) {
       return executor.query("SELECT * FROM " + toTablePath(path) + " LIMIT " + 
limit);
     }
     if (depth == 3) {
@@ -88,11 +119,32 @@ public class TableFilesystemSchemaProvider implements 
FilesystemSchemaProvider {
     throw new SQLException("Path is not readable: " + path);
   }
 
+  @Override
+  public List<String> readLines(FsPath path, int limit) throws SQLException {
+    TableFileRef file = parseTableFile(path);
+    if (file.kind == TableFileKind.DATA_CSV) {
+      return rowsToCsvLines(
+          executor.query("SELECT * FROM " + file.toTablePath() + " LIMIT " + 
limit));
+    }
+    if (file.kind == TableFileKind.SCHEMA) {
+      return rowsToCsvLines(executor.query("DESC " + file.toTablePath() + " 
DETAILS"));
+    }
+    if (file.kind == TableFileKind.META) {
+      return metaLines(file);
+    }
+    throw new SQLException("Path is not readable as text: " + path);
+  }
+
   @Override
   public List<SqlRow> tail(FsPath path, int limit) throws SQLException {
     int depth = path.getSegments().size();
+    TableFileRef file = parseTableFile(path);
     List<SqlRow> rows;
-    if (depth == 2) {
+    if (depth == 2 && file.kind == TableFileKind.DATA_CSV) {
+      rows =
+          executor.query(
+              "SELECT * FROM " + file.toTablePath() + " ORDER BY time DESC 
LIMIT " + limit);
+    } else if (depth == 2 && file.kind == TableFileKind.UNKNOWN) {
       rows =
           executor.query(
               "SELECT * FROM " + toTablePath(path) + " ORDER BY time DESC 
LIMIT " + limit);
@@ -113,11 +165,27 @@ public class TableFilesystemSchemaProvider implements 
FilesystemSchemaProvider {
     return rows;
   }
 
+  @Override
+  public List<String> tailLines(FsPath path, int limit) throws SQLException {
+    TableFileRef file = parseTableFile(path);
+    if (file.kind != TableFileKind.DATA_CSV) {
+      throw new SQLException("Path does not support tail: " + path);
+    }
+    List<SqlRow> rows =
+        executor.query(
+            "SELECT * FROM " + file.toTablePath() + " ORDER BY time DESC LIMIT 
" + limit);
+    Collections.reverse(rows);
+    return rowsToCsvLines(rows);
+  }
+
   @Override
   public long count(FsPath path) throws SQLException {
     int depth = path.getSegments().size();
+    TableFileRef file = parseTableFile(path);
     List<SqlRow> rows;
-    if (depth == 2) {
+    if (depth == 2 && file.kind == TableFileKind.DATA_CSV) {
+      rows = executor.query("SELECT COUNT(*) FROM " + file.toTablePath());
+    } else if (depth == 2 && file.kind == TableFileKind.UNKNOWN) {
       rows = executor.query("SELECT COUNT(*) FROM " + toTablePath(path));
     } else if (depth == 3) {
       String tablePath = toTablePath(parent(path));
@@ -174,18 +242,63 @@ public class TableFilesystemSchemaProvider implements 
FilesystemSchemaProvider {
     return new FsNode(table, path, FsNodeType.UNKNOWN);
   }
 
+  private FsNode describeTableFile(FsPath path, TableFileRef file) throws 
SQLException {
+    for (String table : listTableNames(parent(path))) {
+      if (table.equals(file.table)) {
+        return new FsNode(file.fileName(), path, file.nodeType(), 
file.metadata());
+      }
+    }
+    return new FsNode(path.getFileName(), path, FsNodeType.UNKNOWN);
+  }
+
   private List<FsNode> listTables(FsPath databasePath) throws SQLException {
+    List<FsNode> nodes = new ArrayList<>();
+    for (String table : listTableNames(databasePath)) {
+      nodes.add(
+          new FsNode(
+              table,
+              FsPath.absolute("/" + databasePath.getFileName() + "/" + table),
+              FsNodeType.TABLE_TABLE));
+    }
+    return nodes;
+  }
+
+  private List<FsNode> listTableFiles(FsPath databasePath) throws SQLException 
{
     String database = databasePath.getFileName();
     List<FsNode> nodes = new ArrayList<>();
+    for (String table : listTableNames(databasePath)) {
+      nodes.add(
+          new FsNode(
+              table + CSV_SUFFIX,
+              FsPath.absolute("/" + database + "/" + table + CSV_SUFFIX),
+              FsNodeType.TABLE_DATA_FILE,
+              tableFileMetadata(database, table, TableFileKind.DATA_CSV)));
+      nodes.add(
+          new FsNode(
+              table + SCHEMA_SUFFIX,
+              FsPath.absolute("/" + database + "/" + table + SCHEMA_SUFFIX),
+              FsNodeType.TABLE_SCHEMA_FILE,
+              tableFileMetadata(database, table, TableFileKind.SCHEMA)));
+      nodes.add(
+          new FsNode(
+              table + META_SUFFIX,
+              FsPath.absolute("/" + database + "/" + table + META_SUFFIX),
+              FsNodeType.TABLE_META_FILE,
+              tableFileMetadata(database, table, TableFileKind.META)));
+    }
+    return nodes;
+  }
+
+  private List<String> listTableNames(FsPath databasePath) throws SQLException 
{
+    String database = databasePath.getFileName();
+    List<String> tables = new ArrayList<>();
     for (SqlRow row : executor.query("SHOW TABLES FROM " + database)) {
       String table = row.get("TableName");
       if (table != null) {
-        nodes.add(
-            new FsNode(
-                table, FsPath.absolute("/" + database + "/" + table), 
FsNodeType.TABLE_TABLE));
+        tables.add(table);
       }
     }
-    return nodes;
+    return tables;
   }
 
   private List<FsNode> listColumns(FsPath tablePath) throws SQLException {
@@ -212,6 +325,75 @@ public class TableFilesystemSchemaProvider implements 
FilesystemSchemaProvider {
     return segments.get(0) + "." + segments.get(1);
   }
 
+  private List<String> metaLines(TableFileRef file) throws SQLException {
+    List<SqlRow> rows = new ArrayList<>();
+    for (SqlRow row : executor.query("SHOW TABLES DETAILS FROM " + 
file.database)) {
+      if (file.table.equals(row.get("TableName"))) {
+        rows.add(row);
+      }
+    }
+    return rowsToCsvLines(rows);
+  }
+
+  private static List<String> rowsToCsvLines(List<SqlRow> rows) throws 
SQLException {
+    if (rows.isEmpty()) {
+      return new ArrayList<>();
+    }
+    List<String> lines = new ArrayList<>();
+    List<String> headers = new ArrayList<>(rows.get(0).asMap().keySet());
+    lines.add(csvRecord(headers));
+    for (SqlRow row : rows) {
+      List<String> values = new ArrayList<>();
+      for (String header : headers) {
+        values.add(row.get(header));
+      }
+      lines.add(csvRecord(values));
+    }
+    return lines;
+  }
+
+  private static String csvRecord(List<String> values) throws SQLException {
+    try {
+      StringWriter writer = new StringWriter();
+      try (CSVPrinter printer = new CSVPrinter(writer, CSV_FORMAT)) {
+        printer.printRecord(values);
+      }
+      return writer.toString();
+    } catch (IOException e) {
+      throw new SQLException("Failed to format CSV output", e);
+    }
+  }
+
+  private static TableFileRef parseTableFile(FsPath path) {
+    List<String> segments = path.getSegments();
+    if (segments.size() != 2) {
+      return TableFileRef.unknown(path);
+    }
+    String fileName = segments.get(1);
+    if (fileName.endsWith(CSV_SUFFIX)) {
+      return new TableFileRef(
+          segments.get(0), removeSuffix(fileName, CSV_SUFFIX), 
TableFileKind.DATA_CSV);
+    }
+    if (fileName.endsWith(SCHEMA_SUFFIX)) {
+      return new TableFileRef(
+          segments.get(0), removeSuffix(fileName, SCHEMA_SUFFIX), 
TableFileKind.SCHEMA);
+    }
+    if (fileName.endsWith(META_SUFFIX)) {
+      return new TableFileRef(
+          segments.get(0), removeSuffix(fileName, META_SUFFIX), 
TableFileKind.META);
+    }
+    return TableFileRef.unknown(path);
+  }
+
+  private static String removeSuffix(String value, String suffix) {
+    return value.substring(0, value.length() - suffix.length());
+  }
+
+  private static Map<String, String> tableFileMetadata(
+      String database, String table, TableFileKind kind) {
+    return new TableFileRef(database, table, kind).metadata();
+  }
+
   private static String columnList(List<FsPath> paths) {
     StringBuilder builder = new StringBuilder();
     for (FsPath path : paths) {
@@ -241,4 +423,63 @@ public class TableFilesystemSchemaProvider implements 
FilesystemSchemaProvider {
     }
     return FsPath.absolute(builder.toString());
   }
+
+  private static class TableFileRef {
+    private final String database;
+    private final String table;
+    private final TableFileKind kind;
+
+    private TableFileRef(String database, String table, TableFileKind kind) {
+      this.database = database;
+      this.table = table;
+      this.kind = kind;
+    }
+
+    private static TableFileRef unknown(FsPath path) {
+      List<String> segments = path.getSegments();
+      String database = segments.isEmpty() ? "" : segments.get(0);
+      String table = segments.size() < 2 ? "" : segments.get(1);
+      return new TableFileRef(database, table, TableFileKind.UNKNOWN);
+    }
+
+    private String fileName() {
+      switch (kind) {
+        case DATA_CSV:
+          return table + CSV_SUFFIX;
+        case SCHEMA:
+          return table + SCHEMA_SUFFIX;
+        case META:
+          return table + META_SUFFIX;
+        case UNKNOWN:
+        default:
+          return table;
+      }
+    }
+
+    private String toTablePath() {
+      return database + "." + table;
+    }
+
+    private FsNodeType nodeType() {
+      switch (kind) {
+        case DATA_CSV:
+          return FsNodeType.TABLE_DATA_FILE;
+        case SCHEMA:
+          return FsNodeType.TABLE_SCHEMA_FILE;
+        case META:
+          return FsNodeType.TABLE_META_FILE;
+        case UNKNOWN:
+        default:
+          return FsNodeType.UNKNOWN;
+      }
+    }
+
+    private Map<String, String> metadata() {
+      Map<String, String> metadata = new LinkedHashMap<>();
+      metadata.put("database", database);
+      metadata.put("table", table);
+      metadata.put("format", kind == TableFileKind.DATA_CSV ? "csv" : 
kind.name().toLowerCase());
+      return metadata;
+    }
+  }
 }
diff --git 
a/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/FilesystemShellTest.java
 
b/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/FilesystemShellTest.java
index 7461aad1367..3911b65f382 100644
--- 
a/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/FilesystemShellTest.java
+++ 
b/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/FilesystemShellTest.java
@@ -46,6 +46,7 @@ import java.util.Arrays;
 import java.util.List;
 import java.util.stream.Collectors;
 
+import static org.junit.Assert.assertEquals;
 import static org.junit.Assert.assertFalse;
 import static org.junit.Assert.assertTrue;
 import static org.mockito.Mockito.times;
@@ -91,7 +92,8 @@ public class FilesystemShellTest {
 
     assertTrue(shell.execute("ls /"));
 
-    assertTrue(out.toString().contains("root,test"));
+    assertEquals("root" + System.lineSeparator() + "test" + 
System.lineSeparator(), out.toString());
+    assertFalse(out.toString().contains(","));
     assertFalse(out.toString().contains("TREE_ROOT"));
     verify(provider).list(FsPath.absolute("/"));
   }
@@ -126,6 +128,42 @@ public class FilesystemShellTest {
     verify(provider).list(FsPath.absolute("/"));
   }
 
+  @Test
+  public void executeLsAllPrintsDotEntries() throws SQLException {
+    when(provider.list(FsPath.absolute("/")))
+        .thenReturn(
+            Arrays.asList(
+                new FsNode("testtest", FsPath.absolute("/testtest"), 
FsNodeType.TABLE_DATABASE)));
+
+    assertTrue(shell.execute("ls -a /"));
+
+    assertEquals(
+        "."
+            + System.lineSeparator()
+            + ".."
+            + System.lineSeparator()
+            + "testtest"
+            + System.lineSeparator(),
+        out.toString());
+    verify(provider).list(FsPath.absolute("/"));
+  }
+
+  @Test
+  public void executeLlAllPrintsDotEntriesInLongListing() throws SQLException {
+    when(provider.list(FsPath.absolute("/")))
+        .thenReturn(
+            Arrays.asList(
+                new FsNode("testtest", FsPath.absolute("/testtest"), 
FsNodeType.TABLE_DATABASE)));
+
+    assertTrue(shell.execute("ll -a /"));
+
+    assertTrue(out.toString().contains("dr-xr-xr-x  1 iotdb iotdb 0 ."));
+    assertTrue(out.toString().contains("dr-xr-xr-x  1 iotdb iotdb 0 .."));
+    assertTrue(out.toString().contains("dr-xr-xr-x  1 iotdb iotdb 0 
testtest"));
+    assertFalse(out.toString().contains("-a"));
+    verify(provider).list(FsPath.absolute("/"));
+  }
+
   @Test
   public void executeCdUpdatesCurrentPath() throws SQLException {
     when(provider.describe(FsPath.absolute("/root")))
@@ -155,12 +193,13 @@ public class FilesystemShellTest {
     shell = new FilesystemShell(shellContext(), provider, mutationProvider, 
true);
 
     assertTrue(shell.execute("mkdir /db1"));
-    assertTrue(shell.execute("rm /db1/table1"));
-    assertTrue(shell.execute("mv /db1/table1 /db1/table2"));
+    assertTrue(shell.execute("rm /db1/table1.csv"));
+    assertTrue(shell.execute("mv /db1/table1.csv /db1/table2.csv"));
 
     verify(mutationProvider).mkdir(FsPath.absolute("/db1"));
-    verify(mutationProvider).remove(FsPath.absolute("/db1/table1"));
-    verify(mutationProvider).move(FsPath.absolute("/db1/table1"), 
FsPath.absolute("/db1/table2"));
+    verify(mutationProvider).remove(FsPath.absolute("/db1/table1.csv"));
+    verify(mutationProvider)
+        .move(FsPath.absolute("/db1/table1.csv"), 
FsPath.absolute("/db1/table2.csv"));
   }
 
   @Test
@@ -194,6 +233,18 @@ public class FilesystemShellTest {
     verify(provider).read(FsPath.absolute("/db1/table1"), 20);
   }
 
+  @Test
+  public void executeCatPrintsCsvFileLines() throws SQLException {
+    when(provider.readLines(FsPath.absolute("/db1/table1.csv"), 20))
+        .thenReturn(Arrays.asList("Time,tag1,s1", "1,a,42"));
+
+    assertTrue(shell.execute("cat /db1/table1.csv"));
+
+    assertTrue(out.toString().contains("Time,tag1,s1"));
+    assertTrue(out.toString().contains("1,a,42"));
+    verify(provider).readLines(FsPath.absolute("/db1/table1.csv"), 20);
+  }
+
   @Test
   public void executeCatReadsMultiplePathsSequentially() throws SQLException {
     when(provider.read(FsPath.absolute("/db1/table1/tag1"), 20))
@@ -220,6 +271,18 @@ public class FilesystemShellTest {
     verify(provider).read(FsPath.absolute("/db1/table1"), 5);
   }
 
+  @Test
+  public void executeHeadReadsCsvFileLinesWithLimit() throws SQLException {
+    when(provider.readLines(FsPath.absolute("/db1/table1.csv"), 5))
+        .thenReturn(Arrays.asList("Time,tag1,s1", "1,a,42"));
+
+    assertTrue(shell.execute("head -n 5 /db1/table1.csv"));
+
+    assertTrue(out.toString().contains("Time,tag1,s1"));
+    assertTrue(out.toString().contains("1,a,42"));
+    verify(provider).readLines(FsPath.absolute("/db1/table1.csv"), 5);
+  }
+
   @Test
   public void executeTailReadsPathWithLimit() throws SQLException {
     when(provider.tail(FsPath.absolute("/db1/table1"), 3))
@@ -231,14 +294,38 @@ public class FilesystemShellTest {
     verify(provider).tail(FsPath.absolute("/db1/table1"), 3);
   }
 
+  @Test
+  public void executeTailReadsCsvFileLinesWithLimit() throws SQLException {
+    when(provider.tailLines(FsPath.absolute("/db1/table1.csv"), 3))
+        .thenReturn(Arrays.asList("Time,tag1,s1", "2,b,43"));
+
+    assertTrue(shell.execute("tail -n 3 /db1/table1.csv"));
+
+    assertTrue(out.toString().contains("Time,tag1,s1"));
+    assertTrue(out.toString().contains("2,b,43"));
+    verify(provider).tailLines(FsPath.absolute("/db1/table1.csv"), 3);
+  }
+
   @Test
   public void executeWcLineCountPrintsCountAndPath() throws SQLException {
-    when(provider.count(FsPath.absolute("/db1/table1"))).thenReturn(2L);
+    when(provider.count(FsPath.absolute("/db1/table1.csv"))).thenReturn(2L);
+
+    assertTrue(shell.execute("wc -l /db1/table1.csv"));
 
-    assertTrue(shell.execute("wc -l /db1/table1"));
+    assertTrue(out.toString().contains("2 /db1/table1.csv"));
+    verify(provider).count(FsPath.absolute("/db1/table1.csv"));
+  }
+
+  @Test
+  public void executeGrepFiltersCsvFileLines() throws SQLException {
+    when(provider.readLines(FsPath.absolute("/db1/table1.csv"), 20))
+        .thenReturn(Arrays.asList("Time,tag1,s1", "1,spricoder,42", 
"2,other,43"));
+
+    assertTrue(shell.execute("grep spricoder /db1/table1.csv"));
 
-    assertTrue(out.toString().contains("2 /db1/table1"));
-    verify(provider).count(FsPath.absolute("/db1/table1"));
+    assertTrue(out.toString().contains("1,spricoder,42"));
+    assertFalse(out.toString().contains("2,other,43"));
+    verify(provider).readLines(FsPath.absolute("/db1/table1.csv"), 20);
   }
 
   @Test
@@ -293,24 +380,57 @@ public class FilesystemShellTest {
   }
 
   @Test
-  public void executeFilePrintsNodeType() throws SQLException {
+  public void executeLessAndMoreReadCsvFileLines() throws SQLException {
+    when(provider.readLines(FsPath.absolute("/db1/table1.csv"), 20))
+        .thenReturn(Arrays.asList("Time,tag1,s1", "1,a,42"));
+
+    assertTrue(shell.execute("less /db1/table1.csv"));
+    assertTrue(shell.execute("more /db1/table1.csv"));
+
+    assertTrue(out.toString().contains("Time,tag1,s1"));
+    assertTrue(out.toString().contains("1,a,42"));
+    verify(provider, times(2)).readLines(FsPath.absolute("/db1/table1.csv"), 
20);
+  }
+
+  @Test
+  public void executeStatPrintsUnixStyleMetadata() throws SQLException {
+    when(provider.describe(FsPath.absolute("/db1/table1.csv")))
+        .thenReturn(
+            new FsNode(
+                "table1.csv",
+                FsPath.absolute("/db1/table1.csv"),
+                FsNodeType.TABLE_DATA_FILE,
+                java.util.Collections.singletonMap("table", "table1")));
+
+    assertTrue(shell.execute("stat /db1/table1.csv"));
+
+    assertTrue(out.toString().contains("File: /db1/table1.csv"));
+    assertTrue(out.toString().contains("Type: regular file"));
+    assertTrue(out.toString().contains("table: table1"));
+    assertFalse(out.toString().contains("TABLE_DATA_FILE"));
+    verify(provider).describe(FsPath.absolute("/db1/table1.csv"));
+  }
+
+  @Test
+  public void executeFilePrintsUnixFileType() throws SQLException {
     when(provider.describe(FsPath.absolute("/db1/table1")))
         .thenReturn(new FsNode("table1", FsPath.absolute("/db1/table1"), 
FsNodeType.TABLE_TABLE));
 
     assertTrue(shell.execute("file /db1/table1"));
 
-    assertTrue(out.toString().contains("/db1/table1: TABLE_TABLE"));
+    assertTrue(out.toString().contains("/db1/table1: directory"));
+    assertFalse(out.toString().contains("TABLE_TABLE"));
     verify(provider).describe(FsPath.absolute("/db1/table1"));
   }
 
   @Test
   public void executeDuPrintsLogicalSizeAndPath() throws SQLException {
-    when(provider.count(FsPath.absolute("/db1/table1"))).thenReturn(2L);
+    when(provider.count(FsPath.absolute("/db1/table1.csv"))).thenReturn(2L);
 
-    assertTrue(shell.execute("du /db1/table1"));
+    assertTrue(shell.execute("du /db1/table1.csv"));
 
-    assertTrue(out.toString().contains("2\t/db1/table1"));
-    verify(provider).count(FsPath.absolute("/db1/table1"));
+    assertTrue(out.toString().contains("2\t/db1/table1.csv"));
+    verify(provider).count(FsPath.absolute("/db1/table1.csv"));
   }
 
   @Test
@@ -330,6 +450,37 @@ public class FilesystemShellTest {
             20);
   }
 
+  @Test
+  public void executeCutSelectsCsvFieldsByNumber() throws SQLException {
+    when(provider.readLines(FsPath.absolute("/db1/table1.csv"), 20))
+        .thenReturn(Arrays.asList("time,key,value", "1,spricoder,2.0", 
"2,other,3.0"));
+
+    assertTrue(shell.execute("cut -d, -f2,3 /db1/table1.csv"));
+
+    assertEquals(
+        "key,value"
+            + System.lineSeparator()
+            + "spricoder,2.0"
+            + System.lineSeparator()
+            + "other,3.0"
+            + System.lineSeparator(),
+        out.toString());
+    verify(provider).readLines(FsPath.absolute("/db1/table1.csv"), 20);
+  }
+
+  @Test
+  public void executeCutSelectsCsvFieldRange() throws SQLException {
+    when(provider.readLines(FsPath.absolute("/db1/table1.csv"), 20))
+        .thenReturn(Arrays.asList("time,key,value", "1,spricoder,2.0"));
+
+    assertTrue(shell.execute("cut -d, -f1-2 /db1/table1.csv"));
+
+    assertEquals(
+        "time,key" + System.lineSeparator() + "1,spricoder" + 
System.lineSeparator(),
+        out.toString());
+    verify(provider).readLines(FsPath.absolute("/db1/table1.csv"), 20);
+  }
+
   @Test
   public void completerCompletesChildrenFromCurrentDirectory() throws 
SQLException {
     when(provider.list(FsPath.absolute("/")))
diff --git 
a/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/command/FilesystemCommandParserTest.java
 
b/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/command/FilesystemCommandParserTest.java
index 7224876d239..8039335d797 100644
--- 
a/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/command/FilesystemCommandParserTest.java
+++ 
b/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/command/FilesystemCommandParserTest.java
@@ -43,12 +43,31 @@ public class FilesystemCommandParserTest {
     assertEquals("/db1", command.getPath());
   }
 
+  @Test
+  public void parseLlAllOptionAsCurrentDirectoryLongListCommand() {
+    FilesystemCommand command = FilesystemCommandParser.parse("ll -a");
+
+    assertEquals(FilesystemCommand.Type.LL, command.getType());
+    assertEquals(".", command.getPath());
+    assertEquals("-a", command.getOption());
+  }
+
+  @Test
+  public void parseLlCombinedOptionsAndPath() {
+    FilesystemCommand command = FilesystemCommandParser.parse("ll -al /db1");
+
+    assertEquals(FilesystemCommand.Type.LL, command.getType());
+    assertEquals("/db1", command.getPath());
+    assertEquals("-a", command.getOption());
+  }
+
   @Test
   public void parseLsLongOptionAsLongListCommand() {
     FilesystemCommand command = FilesystemCommandParser.parse("ls -la /db1");
 
     assertEquals(FilesystemCommand.Type.LL, command.getType());
     assertEquals("/db1", command.getPath());
+    assertEquals("-a", command.getOption());
   }
 
   @Test
@@ -146,6 +165,35 @@ public class FilesystemCommandParserTest {
     assertEquals("/db1/table1/s1", command.getPaths().get(1));
   }
 
+  @Test
+  public void parseCutDelimiterFieldsAndPath() {
+    FilesystemCommand command = FilesystemCommandParser.parse("cut -d, -f2,3 
/db1/table1.csv");
+
+    assertEquals(FilesystemCommand.Type.CUT, command.getType());
+    assertEquals(",", command.getOption());
+    assertEquals("2,3", command.getPattern());
+    assertEquals("/db1/table1.csv", command.getPath());
+  }
+
+  @Test
+  public void parseCutSeparatedOptionArguments() {
+    FilesystemCommand command = FilesystemCommandParser.parse("cut -d , -f 1-2 
/db1/table1.csv");
+
+    assertEquals(FilesystemCommand.Type.CUT, command.getType());
+    assertEquals(",", command.getOption());
+    assertEquals("1-2", command.getPattern());
+    assertEquals("/db1/table1.csv", command.getPath());
+  }
+
+  @Test
+  public void parseCutRequiresFieldsAndPath() {
+    assertEquals(
+        FilesystemCommand.Type.INVALID,
+        FilesystemCommandParser.parse("cut -d, /db1/table1.csv").getType());
+    assertEquals(
+        FilesystemCommand.Type.INVALID, FilesystemCommandParser.parse("cut 
-f2,3").getType());
+  }
+
   @Test
   public void parseWriteCommands() {
     FilesystemCommand mkdir = FilesystemCommandParser.parse("mkdir /db1");
diff --git 
a/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/provider/TableFilesystemMutationProviderTest.java
 
b/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/provider/TableFilesystemMutationProviderTest.java
index 955046688c4..2403bffffb3 100644
--- 
a/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/provider/TableFilesystemMutationProviderTest.java
+++ 
b/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/provider/TableFilesystemMutationProviderTest.java
@@ -55,26 +55,29 @@ public class TableFilesystemMutationProviderTest {
   @Test
   public void mkdirRejectsRootAndTableLevel() throws SQLException {
     assertInvalidOperation(() -> provider.mkdir(FsPath.absolute("/")));
-    assertInvalidOperation(() -> 
provider.mkdir(FsPath.absolute("/db1/table1")));
+    assertInvalidOperation(() -> 
provider.mkdir(FsPath.absolute("/db1/table1.csv")));
   }
 
   @Test
-  public void removeTableDropsTable() throws SQLException {
-    provider.remove(FsPath.absolute("/db1/table1"));
+  public void removeTableCsvDropsTable() throws SQLException {
+    provider.remove(FsPath.absolute("/db1/table1.csv"));
 
     verify(executor).execute("DROP TABLE db1.table1");
   }
 
   @Test
-  public void removeRejectsRootDatabaseAndColumnLevel() throws SQLException {
+  public void removeRejectsRootDatabaseSchemaMetaAndLegacyTableLevel() throws 
SQLException {
     assertInvalidOperation(() -> provider.remove(FsPath.absolute("/")));
     assertInvalidOperation(() -> provider.remove(FsPath.absolute("/db1")));
+    assertInvalidOperation(() -> 
provider.remove(FsPath.absolute("/db1/table1")));
+    assertInvalidOperation(() -> 
provider.remove(FsPath.absolute("/db1/table1.schema")));
+    assertInvalidOperation(() -> 
provider.remove(FsPath.absolute("/db1/table1.meta")));
     assertInvalidOperation(() -> 
provider.remove(FsPath.absolute("/db1/table1/s1")));
   }
 
   @Test
-  public void moveTableRenamesTableInSameDatabase() throws SQLException {
-    provider.move(FsPath.absolute("/db1/table1"), 
FsPath.absolute("/db1/table2"));
+  public void moveTableCsvRenamesTableInSameDatabase() throws SQLException {
+    provider.move(FsPath.absolute("/db1/table1.csv"), 
FsPath.absolute("/db1/table2.csv"));
 
     verify(executor).execute("ALTER TABLE db1.table1 RENAME TO table2");
   }
@@ -82,10 +85,17 @@ public class TableFilesystemMutationProviderTest {
   @Test
   public void moveRejectsUnsafeLevelsAndCrossDatabaseRename() throws 
SQLException {
     assertInvalidOperation(() -> provider.move(FsPath.absolute("/db1"), 
FsPath.absolute("/db2")));
+    assertInvalidOperation(
+        () -> provider.move(FsPath.absolute("/db1/table1"), 
FsPath.absolute("/db1/table2")));
+    assertInvalidOperation(
+        () ->
+            provider.move(
+                FsPath.absolute("/db1/table1.schema"), 
FsPath.absolute("/db1/table2.schema")));
     assertInvalidOperation(
         () -> provider.move(FsPath.absolute("/db1/table1/s1"), 
FsPath.absolute("/db1/table1/s2")));
     assertInvalidOperation(
-        () -> provider.move(FsPath.absolute("/db1/table1"), 
FsPath.absolute("/db2/table1")));
+        () ->
+            provider.move(FsPath.absolute("/db1/table1.csv"), 
FsPath.absolute("/db2/table1.csv")));
   }
 
   private static void assertInvalidOperation(SqlOperation operation) throws 
SQLException {
diff --git 
a/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/provider/TableFilesystemSchemaProviderTest.java
 
b/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/provider/TableFilesystemSchemaProviderTest.java
index 9d7c1bb6715..b99a30761d7 100644
--- 
a/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/provider/TableFilesystemSchemaProviderTest.java
+++ 
b/iotdb-client/cli/src/test/java/org/apache/iotdb/cli/fs/provider/TableFilesystemSchemaProviderTest.java
@@ -71,13 +71,31 @@ public class TableFilesystemSchemaProviderTest {
 
     List<FsNode> children = provider.list(FsPath.absolute("/db1"));
 
-    assertEquals(2, children.size());
-    assertEquals("/db1/table1", children.get(0).getPath().toString());
-    assertEquals(FsNodeType.TABLE_TABLE, children.get(0).getType());
-    assertEquals("/db1/view1", children.get(1).getPath().toString());
+    assertEquals(6, children.size());
+    assertEquals("table1.csv", children.get(0).getName());
+    assertEquals("/db1/table1.csv", children.get(0).getPath().toString());
+    assertEquals(FsNodeType.TABLE_DATA_FILE, children.get(0).getType());
+    assertEquals("table1.schema", children.get(1).getName());
+    assertEquals(FsNodeType.TABLE_SCHEMA_FILE, children.get(1).getType());
+    assertEquals("table1.meta", children.get(2).getName());
+    assertEquals(FsNodeType.TABLE_META_FILE, children.get(2).getType());
+    assertEquals("view1.csv", children.get(3).getName());
     verify(executor).query("SHOW TABLES FROM db1");
   }
 
+  @Test
+  public void describeTableCsvReturnsDataFileNode() throws SQLException {
+    when(executor.query("SHOW TABLES FROM db1"))
+        .thenReturn(SqlRow.list(SqlRow.of("TableName", "table1")));
+
+    FsNode node = provider.describe(FsPath.absolute("/db1/table1.csv"));
+
+    assertEquals("table1.csv", node.getName());
+    assertEquals(FsNodeType.TABLE_DATA_FILE, node.getType());
+    assertEquals("table1", node.getMetadata().get("table"));
+    assertEquals("csv", node.getMetadata().get("format"));
+  }
+
   @Test
   public void listTableReturnsColumns() throws SQLException {
     when(executor.query("DESC db1.table1 DETAILS"))
@@ -171,6 +189,66 @@ public class TableFilesystemSchemaProviderTest {
     verify(executor).query("SELECT * FROM db1.table1 LIMIT 5");
   }
 
+  @Test
+  public void readTableCsvReturnsCsvLinesWithHeader() throws SQLException {
+    when(executor.query("SELECT * FROM db1.table1 LIMIT 5"))
+        .thenReturn(SqlRow.list(SqlRow.of("Time", "1", "tag1", "a", "s1", 
"42")));
+
+    List<String> lines = 
provider.readLines(FsPath.absolute("/db1/table1.csv"), 5);
+
+    assertEquals("Time,tag1,s1", lines.get(0));
+    assertEquals("1,a,42", lines.get(1));
+    verify(executor).query("SELECT * FROM db1.table1 LIMIT 5");
+  }
+
+  @Test
+  public void readTableSchemaReturnsIoTDBDescCsvLines() throws SQLException {
+    when(executor.query("DESC db1.table1 DETAILS"))
+        .thenReturn(
+            SqlRow.list(
+                SqlRow.of("ColumnName", "tag1", "DataType", "STRING", 
"Category", "TAG"),
+                SqlRow.of("ColumnName", "s1", "DataType", "INT32", "Category", 
"FIELD")));
+
+    List<String> lines = 
provider.readLines(FsPath.absolute("/db1/table1.schema"), 5);
+
+    assertEquals("ColumnName,DataType,Category", lines.get(0));
+    assertEquals("tag1,STRING,TAG", lines.get(1));
+    assertEquals("s1,INT32,FIELD", lines.get(2));
+    verify(executor).query("DESC db1.table1 DETAILS");
+  }
+
+  @Test
+  public void readTableMetaReturnsIoTDBTableCsvLines() throws SQLException {
+    when(executor.query("SHOW TABLES DETAILS FROM db1"))
+        .thenReturn(
+            SqlRow.list(
+                SqlRow.of(
+                    "TableName",
+                    "table1",
+                    "TTL(ms)",
+                    "3600000",
+                    "Status",
+                    "USING",
+                    "Comment",
+                    "main table"),
+                SqlRow.of(
+                    "TableName",
+                    "table2",
+                    "TTL(ms)",
+                    "INF",
+                    "Status",
+                    "USING",
+                    "Comment",
+                    "archive")));
+
+    List<String> lines = 
provider.readLines(FsPath.absolute("/db1/table1.meta"), 5);
+
+    assertEquals("TableName,TTL(ms),Status,Comment", lines.get(0));
+    assertEquals("table1,3600000,USING,main table", lines.get(1));
+    assertEquals(2, lines.size());
+    verify(executor).query("SHOW TABLES DETAILS FROM db1");
+  }
+
   @Test
   public void tailTableSelectsNewestRowsAndReturnsOriginalOrder() throws 
SQLException {
     when(executor.query("SELECT * FROM db1.table1 ORDER BY time DESC LIMIT 2"))
@@ -191,7 +269,7 @@ public class TableFilesystemSchemaProviderTest {
     when(executor.query("SELECT COUNT(*) FROM db1.table1"))
         .thenReturn(SqlRow.list(SqlRow.of("count", "2")));
 
-    long count = provider.count(FsPath.absolute("/db1/table1"));
+    long count = provider.count(FsPath.absolute("/db1/table1.csv"));
 
     assertEquals(2L, count);
     verify(executor).query("SELECT COUNT(*) FROM db1.table1");


Reply via email to