This is an automated email from the ASF dual-hosted git repository.
lichaoyong pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-doris.git
The following commit(s) were added to refs/heads/master by this push:
new 6c4d7c6 [Feature] Add QueryDetail to store query statistics. (#3744)
6c4d7c6 is described below
commit 6c4d7c60dd03386c4186c7a4a1eb1d609ded8278
Author: lichaoyong <[email protected]>
AuthorDate: Mon Jun 15 18:16:54 2020 +0800
[Feature] Add QueryDetail to store query statistics. (#3744)
1. Store the query statistics in memory.
2. Supporting RESTFUL interface to get the statistics.
---
be/src/http/action/stream_load.cpp | 4 +-
be/src/runtime/client_cache.cpp | 4 +-
be/src/runtime/memory/chunk_allocator.cpp | 8 +-
be/src/runtime/mysql_result_writer.cpp | 9 +-
be/src/runtime/tmp_file_mgr.cc | 2 +-
be/src/util/doris_metrics.cpp | 5 +-
be/src/util/doris_metrics.h | 170 ++++++++++-----------
be/src/util/metrics.cpp | 12 +-
be/src/util/metrics.h | 7 +-
be/src/util/system_metrics.cpp | 12 +-
be/src/util/thrift_server.cpp | 4 +-
be/test/http/metrics_action_test.cpp | 2 +-
be/test/util/doris_metrics_test.cpp | 12 --
be/test/util/new_metrics_test.cpp | 18 +--
docs/.vuepress/sidebar/en.js | 4 +
docs/.vuepress/sidebar/zh-CN.js | 4 +
.../http-actions/connection-action.md | 42 +++++
.../http-actions/profile-action.md | 120 +++++++++++++++
.../http-actions/query-detail-action.md | 61 ++++++++
.../http-actions/show-data-action.md | 35 +++++
.../http-actions/connection-action.md | 42 +++++
.../http-actions/profile-action.md | 120 +++++++++++++++
.../http-actions/query-detail-action.md | 59 +++++++
.../http-actions/show-data-action.md | 35 +++++
fe/src/main/java/org/apache/doris/PaloFe.java | 1 -
.../org/apache/doris/common/ThreadPoolManager.java | 2 +-
.../apache/doris/common/util/RuntimeProfile.java | 2 +-
.../java/org/apache/doris/http/HttpServer.java | 8 +
.../apache/doris/http/rest/ConnectionAction.java | 61 ++++++++
.../org/apache/doris/http/rest/ProfileAction.java | 60 ++++++++
.../apache/doris/http/rest/QueryDetailAction.java | 56 +++++++
.../org/apache/doris/http/rest/ShowDataAction.java | 88 +++++++++++
.../org/apache/doris/master/ReportHandler.java | 2 +-
.../main/java/org/apache/doris/metric/Metric.java | 7 +-
.../java/org/apache/doris/metric/MetricRepo.java | 48 +++---
.../java/org/apache/doris/qe/ConnectContext.java | 12 ++
.../java/org/apache/doris/qe/ConnectProcessor.java | 10 +-
.../main/java/org/apache/doris/qe/QueryDetail.java | 129 ++++++++++++++++
.../java/org/apache/doris/qe/QueryDetailQueue.java | 69 +++++++++
.../java/org/apache/doris/qe/StmtExecutor.java | 18 ++-
.../doris/common/util/RuntimeProfileTest.java | 2 +-
.../org/apache/doris/qe/QueryDetailQueueTest.java | 73 +++++++++
42 files changed, 1266 insertions(+), 173 deletions(-)
diff --git a/be/src/http/action/stream_load.cpp
b/be/src/http/action/stream_load.cpp
index 1322e85..798e702 100644
--- a/be/src/http/action/stream_load.cpp
+++ b/be/src/http/action/stream_load.cpp
@@ -59,10 +59,10 @@
namespace doris {
-METRIC_DEFINE_INT_COUNTER(streaming_load_requests_total, MetricUnit::NUMBER);
+METRIC_DEFINE_INT_COUNTER(streaming_load_requests_total, MetricUnit::REQUESTS);
METRIC_DEFINE_INT_COUNTER(streaming_load_bytes, MetricUnit::BYTES);
METRIC_DEFINE_INT_COUNTER(streaming_load_duration_ms,
MetricUnit::MILLISECONDS);
-METRIC_DEFINE_INT_GAUGE(streaming_load_current_processing, MetricUnit::NUMBER);
+METRIC_DEFINE_INT_GAUGE(streaming_load_current_processing,
MetricUnit::REQUESTS);
#ifdef BE_TEST
TStreamLoadPutResult k_stream_load_put_result;
diff --git a/be/src/runtime/client_cache.cpp b/be/src/runtime/client_cache.cpp
index 7144684..832678d 100644
--- a/be/src/runtime/client_cache.cpp
+++ b/be/src/runtime/client_cache.cpp
@@ -216,12 +216,12 @@ void ClientCacheHelper::init_metrics(MetricRegistry*
metrics, const std::string&
// usage, but ensures that _metrics_enabled is published.
boost::lock_guard<boost::mutex> lock(_lock);
- _used_clients.reset(new IntGauge(MetricUnit::NUMBER));
+ _used_clients.reset(new IntGauge(MetricUnit::NOUNIT));
metrics->register_metric("thrift_used_clients",
MetricLabels().add("name", key_prefix),
_used_clients.get());
- _opened_clients.reset(new IntGauge(MetricUnit::NUMBER));
+ _opened_clients.reset(new IntGauge(MetricUnit::NOUNIT));
metrics->register_metric("thrift_opened_clients",
MetricLabels().add("name", key_prefix),
_opened_clients.get());
diff --git a/be/src/runtime/memory/chunk_allocator.cpp
b/be/src/runtime/memory/chunk_allocator.cpp
index c5e68f2..1ccbc5c 100644
--- a/be/src/runtime/memory/chunk_allocator.cpp
+++ b/be/src/runtime/memory/chunk_allocator.cpp
@@ -34,10 +34,10 @@ namespace doris {
ChunkAllocator* ChunkAllocator::_s_instance = nullptr;
-static IntCounter local_core_alloc_count(MetricUnit::NUMBER);
-static IntCounter other_core_alloc_count(MetricUnit::NUMBER);
-static IntCounter system_alloc_count(MetricUnit::NUMBER);
-static IntCounter system_free_count(MetricUnit::NUMBER);
+static IntCounter local_core_alloc_count(MetricUnit::NOUNIT);
+static IntCounter other_core_alloc_count(MetricUnit::NOUNIT);
+static IntCounter system_alloc_count(MetricUnit::NOUNIT);
+static IntCounter system_free_count(MetricUnit::NOUNIT);
static IntCounter system_alloc_cost_ns(MetricUnit::NANOSECONDS);
static IntCounter system_free_cost_ns(MetricUnit::NANOSECONDS);
diff --git a/be/src/runtime/mysql_result_writer.cpp
b/be/src/runtime/mysql_result_writer.cpp
index 036ec4a..0c8cc43 100644
--- a/be/src/runtime/mysql_result_writer.cpp
+++ b/be/src/runtime/mysql_result_writer.cpp
@@ -61,11 +61,10 @@ Status MysqlResultWriter::init(RuntimeState* state) {
}
void MysqlResultWriter::_init_profile() {
- RuntimeProfile* profile =
_parent_profile->create_child("MySQLResultWriter", true, true);
- _append_row_batch_timer = ADD_TIMER(profile, "AppendBatchTime");
- _convert_tuple_timer = ADD_CHILD_TIMER(profile, "TupleConvertTime",
"AppendBatchTime");
- _result_send_timer = ADD_CHILD_TIMER(profile, "ResultRendTime",
"AppendBatchTime");
- _sent_rows_counter = ADD_COUNTER(profile, "NumSentRows", TUnit::UNIT);
+ _append_row_batch_timer = ADD_TIMER(_parent_profile, "AppendBatchTime");
+ _convert_tuple_timer = ADD_CHILD_TIMER(_parent_profile,
"TupleConvertTime", "AppendBatchTime");
+ _result_send_timer = ADD_CHILD_TIMER(_parent_profile, "ResultRendTime",
"AppendBatchTime");
+ _sent_rows_counter = ADD_COUNTER(_parent_profile, "NumSentRows",
TUnit::UNIT);
}
Status MysqlResultWriter::_add_one_row(TupleRow* row) {
diff --git a/be/src/runtime/tmp_file_mgr.cc b/be/src/runtime/tmp_file_mgr.cc
index badc903..d286cea 100644
--- a/be/src/runtime/tmp_file_mgr.cc
+++ b/be/src/runtime/tmp_file_mgr.cc
@@ -119,7 +119,7 @@ Status TmpFileMgr::init_custom(
}
DCHECK(metrics != NULL);
- _num_active_scratch_dirs_metric.reset(new IntGauge(MetricUnit::NUMBER));
+ _num_active_scratch_dirs_metric.reset(new IntGauge(MetricUnit::NOUNIT));
metrics->register_metric("active_scratch_dirs",
_num_active_scratch_dirs_metric.get());
//_active_scratch_dirs_metric = metrics->register_metric(new
SetMetric<std::string>(
// TMP_FILE_MGR_ACTIVE_SCRATCH_DIRS_LIST,
diff --git a/be/src/util/doris_metrics.cpp b/be/src/util/doris_metrics.cpp
index 0853bd4..a90b0d0 100644
--- a/be/src/util/doris_metrics.cpp
+++ b/be/src/util/doris_metrics.cpp
@@ -34,11 +34,9 @@ DorisMetrics::DorisMetrics() : _name("doris_be"),
_hook_name("doris_metrics"), _
REGISTER_DORIS_METRIC(fragment_requests_total);
REGISTER_DORIS_METRIC(fragment_request_duration_us);
REGISTER_DORIS_METRIC(http_requests_total);
- REGISTER_DORIS_METRIC(http_request_duration_us);
REGISTER_DORIS_METRIC(http_request_send_bytes);
REGISTER_DORIS_METRIC(query_scan_bytes);
REGISTER_DORIS_METRIC(query_scan_rows);
- REGISTER_DORIS_METRIC(ranges_processed_total);
REGISTER_DORIS_METRIC(memtable_flush_total);
REGISTER_DORIS_METRIC(memtable_flush_duration_us);
@@ -180,7 +178,6 @@ DorisMetrics::DorisMetrics() : _name("doris_be"),
_hook_name("doris_metrics"), _
REGISTER_DORIS_METRIC(disk_sync_total);
REGISTER_DORIS_METRIC(blocks_open_reading);
REGISTER_DORIS_METRIC(blocks_open_writing);
- REGISTER_DORIS_METRIC(blocks_push_remote_duration_us);
}
void DorisMetrics::initialize(
@@ -196,7 +193,7 @@ void DorisMetrics::initialize(
_metrics.register_metric("disks_avail_capacity",
MetricLabels().add("path", path), gauge);
gauge = disks_data_used_capacity.set_key(path, MetricUnit::BYTES);
_metrics.register_metric("disks_data_used_capacity",
MetricLabels().add("path", path), gauge);
- gauge = disks_state.set_key(path, MetricUnit::BYTES);
+ gauge = disks_state.set_key(path, MetricUnit::NOUNIT);
_metrics.register_metric("disks_state", MetricLabels().add("path",
path), gauge);
}
diff --git a/be/src/util/doris_metrics.h b/be/src/util/doris_metrics.h
index e6775b5..1e802e2 100644
--- a/be/src/util/doris_metrics.h
+++ b/be/src/util/doris_metrics.h
@@ -55,67 +55,65 @@ private:
class DorisMetrics {
public:
// counters
- METRIC_DEFINE_INT_COUNTER(fragment_requests_total, MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_COUNTER(fragment_requests_total,
MetricUnit::REQUESTS);
METRIC_DEFINE_INT_COUNTER(fragment_request_duration_us,
MetricUnit::MICROSECONDS);
- METRIC_DEFINE_INT_COUNTER(http_requests_total, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(http_request_duration_us,
MetricUnit::MICROSECONDS);
+ METRIC_DEFINE_INT_COUNTER(http_requests_total, MetricUnit::REQUESTS);
METRIC_DEFINE_INT_COUNTER(http_request_send_bytes, MetricUnit::BYTES);
METRIC_DEFINE_INT_COUNTER(query_scan_bytes, MetricUnit::BYTES);
- METRIC_DEFINE_INT_COUNTER(query_scan_rows, MetricUnit::BYTES);
- METRIC_DEFINE_INT_COUNTER(ranges_processed_total, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(push_requests_success_total,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(push_requests_fail_total, MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_COUNTER(query_scan_rows, MetricUnit::ROWS);
+ METRIC_DEFINE_INT_COUNTER(push_requests_success_total,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(push_requests_fail_total,
MetricUnit::REQUESTS);
METRIC_DEFINE_INT_COUNTER(push_request_duration_us,
MetricUnit::MICROSECONDS);
METRIC_DEFINE_INT_COUNTER(push_request_write_bytes, MetricUnit::BYTES);
METRIC_DEFINE_INT_COUNTER(push_request_write_rows, MetricUnit::ROWS);
- METRIC_DEFINE_INT_COUNTER(create_tablet_requests_total,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(create_tablet_requests_failed,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(drop_tablet_requests_total,
MetricUnit::NUMBER);
-
- METRIC_DEFINE_INT_COUNTER(report_all_tablets_requests_total,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(report_all_tablets_requests_failed,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(report_tablet_requests_total,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(report_tablet_requests_failed,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(report_disk_requests_total,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(report_disk_requests_failed,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(report_task_requests_total,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(report_task_requests_failed,
MetricUnit::NUMBER);
-
- METRIC_DEFINE_INT_COUNTER(schema_change_requests_total,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(schema_change_requests_failed,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(create_rollup_requests_total,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(create_rollup_requests_failed,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(storage_migrate_requests_total,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(delete_requests_total, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(delete_requests_failed, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(clone_requests_total, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(clone_requests_failed, MetricUnit::NUMBER);
-
- METRIC_DEFINE_INT_COUNTER(finish_task_requests_total,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(finish_task_requests_failed,
MetricUnit::NUMBER);
-
- METRIC_DEFINE_INT_COUNTER(base_compaction_request_total,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(base_compaction_request_failed,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(cumulative_compaction_request_total,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(cumulative_compaction_request_failed,
MetricUnit::NUMBER);
-
- METRIC_DEFINE_INT_COUNTER(base_compaction_deltas_total,
MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_COUNTER(create_tablet_requests_total,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(create_tablet_requests_failed,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(drop_tablet_requests_total,
MetricUnit::REQUESTS);
+
+ METRIC_DEFINE_INT_COUNTER(report_all_tablets_requests_total,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(report_all_tablets_requests_failed,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(report_tablet_requests_total,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(report_tablet_requests_failed,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(report_disk_requests_total,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(report_disk_requests_failed,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(report_task_requests_total,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(report_task_requests_failed,
MetricUnit::REQUESTS);
+
+ METRIC_DEFINE_INT_COUNTER(schema_change_requests_total,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(schema_change_requests_failed,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(create_rollup_requests_total,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(create_rollup_requests_failed,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(storage_migrate_requests_total,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(delete_requests_total, MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(delete_requests_failed, MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(clone_requests_total, MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(clone_requests_failed, MetricUnit::REQUESTS);
+
+ METRIC_DEFINE_INT_COUNTER(finish_task_requests_total,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(finish_task_requests_failed,
MetricUnit::REQUESTS);
+
+ METRIC_DEFINE_INT_COUNTER(base_compaction_request_total,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(base_compaction_request_failed,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(cumulative_compaction_request_total,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(cumulative_compaction_request_failed,
MetricUnit::REQUESTS);
+
+ METRIC_DEFINE_INT_COUNTER(base_compaction_deltas_total,
MetricUnit::ROWSETS);
METRIC_DEFINE_INT_COUNTER(base_compaction_bytes_total,
MetricUnit::BYTES);
- METRIC_DEFINE_INT_COUNTER(cumulative_compaction_deltas_total,
MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_COUNTER(cumulative_compaction_deltas_total,
MetricUnit::ROWSETS);
METRIC_DEFINE_INT_COUNTER(cumulative_compaction_bytes_total,
MetricUnit::BYTES);
- METRIC_DEFINE_INT_COUNTER(publish_task_request_total,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(publish_task_failed_total,
MetricUnit::NUMBER);
-
- METRIC_DEFINE_INT_COUNTER(meta_write_request_total, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(meta_write_request_duration_us,
MetricUnit::MICROSECONDS);
- METRIC_DEFINE_INT_COUNTER(meta_read_request_total, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(meta_read_request_duration_us,
MetricUnit::MICROSECONDS);
+ METRIC_DEFINE_INT_COUNTER(publish_task_request_total,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(publish_task_failed_total,
MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(meta_write_request_total, MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(meta_write_request_duration_us,
MetricUnit::MICROSECONDS);
+ METRIC_DEFINE_INT_COUNTER(meta_read_request_total, MetricUnit::REQUESTS);
+ METRIC_DEFINE_INT_COUNTER(meta_read_request_duration_us,
MetricUnit::MICROSECONDS);
+
// Counters for segment_v2
// -----------------------
// total number of segments read
- METRIC_DEFINE_INT_COUNTER(segment_read_total, MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_COUNTER(segment_read_total, MetricUnit::OPERATIONS);
// total number of rows in queried segments (before index pruning)
METRIC_DEFINE_INT_COUNTER(segment_row_total, MetricUnit::ROWS);
// total number of rows selected by short key index
@@ -123,24 +121,24 @@ public:
// total number of rows selected by zone map index
METRIC_DEFINE_INT_COUNTER(segment_rows_read_by_zone_map,
MetricUnit::ROWS);
- METRIC_DEFINE_INT_COUNTER(txn_begin_request_total, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(txn_commit_request_total, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(txn_rollback_request_total,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(txn_exec_plan_total, MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_COUNTER(txn_begin_request_total,
MetricUnit::OPERATIONS);
+ METRIC_DEFINE_INT_COUNTER(txn_commit_request_total,
MetricUnit::OPERATIONS);
+ METRIC_DEFINE_INT_COUNTER(txn_rollback_request_total,
MetricUnit::OPERATIONS);
+ METRIC_DEFINE_INT_COUNTER(txn_exec_plan_total, MetricUnit::OPERATIONS);
METRIC_DEFINE_INT_COUNTER(stream_receive_bytes_total,
MetricUnit::BYTES);
METRIC_DEFINE_INT_COUNTER(stream_load_rows_total, MetricUnit::ROWS);
METRIC_DEFINE_INT_COUNTER(load_rows_total, MetricUnit::ROWS);
METRIC_DEFINE_INT_COUNTER(load_bytes_total, MetricUnit::BYTES);
- METRIC_DEFINE_INT_COUNTER(memtable_flush_total, MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_COUNTER(memtable_flush_total, MetricUnit::OPERATIONS);
METRIC_DEFINE_INT_COUNTER(memtable_flush_duration_us,
MetricUnit::MICROSECONDS);
// Gauges
METRIC_DEFINE_INT_GAUGE(memory_pool_bytes_total, MetricUnit::BYTES);
- METRIC_DEFINE_INT_GAUGE(process_thread_num, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_GAUGE(process_fd_num_used, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_GAUGE(process_fd_num_limit_soft, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_GAUGE(process_fd_num_limit_hard, MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_GAUGE(process_thread_num, MetricUnit::NOUNIT);
+ METRIC_DEFINE_INT_GAUGE(process_fd_num_used, MetricUnit::NOUNIT);
+ METRIC_DEFINE_INT_GAUGE(process_fd_num_limit_soft, MetricUnit::NOUNIT);
+ METRIC_DEFINE_INT_GAUGE(process_fd_num_limit_hard, MetricUnit::NOUNIT);
IntGaugeMetricsMap disks_total_capacity;
IntGaugeMetricsMap disks_avail_capacity;
IntGaugeMetricsMap disks_data_used_capacity;
@@ -149,46 +147,44 @@ public:
// the max compaction score of all tablets.
// Record base and cumulative scores separately, because
// we need to get the larger of the two.
- METRIC_DEFINE_INT_GAUGE(tablet_cumulative_max_compaction_score,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_GAUGE(tablet_base_max_compaction_score,
MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_GAUGE(tablet_cumulative_max_compaction_score,
MetricUnit::NOUNIT);
+ METRIC_DEFINE_INT_GAUGE(tablet_base_max_compaction_score,
MetricUnit::NOUNIT);
// The following metrics will be calculated
// by metric calculator
- METRIC_DEFINE_INT_GAUGE(push_request_write_bytes_per_second,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_GAUGE(query_scan_bytes_per_second,
MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_GAUGE(push_request_write_bytes_per_second,
MetricUnit::BYTES);
+ METRIC_DEFINE_INT_GAUGE(query_scan_bytes_per_second, MetricUnit::BYTES);
METRIC_DEFINE_INT_GAUGE(max_disk_io_util_percent, MetricUnit::PERCENT);
- METRIC_DEFINE_INT_GAUGE(max_network_send_bytes_rate,
MetricUnit::NUMBER);
- METRIC_DEFINE_INT_GAUGE(max_network_receive_bytes_rate,
MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_GAUGE(max_network_send_bytes_rate, MetricUnit::BYTES);
+ METRIC_DEFINE_INT_GAUGE(max_network_receive_bytes_rate,
MetricUnit::BYTES);
// Metrics related with BlockManager
- METRIC_DEFINE_INT_COUNTER(readable_blocks_total, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(writable_blocks_total, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(blocks_created_total, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_COUNTER(blocks_deleted_total, MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_COUNTER(readable_blocks_total, MetricUnit::BLOCKS);
+ METRIC_DEFINE_INT_COUNTER(writable_blocks_total, MetricUnit::BLOCKS);
+ METRIC_DEFINE_INT_COUNTER(blocks_created_total, MetricUnit::OPERATIONS);
+ METRIC_DEFINE_INT_COUNTER(blocks_deleted_total, MetricUnit::OPERATIONS);
METRIC_DEFINE_INT_COUNTER(bytes_read_total, MetricUnit::BYTES);
METRIC_DEFINE_INT_COUNTER(bytes_written_total, MetricUnit::BYTES);
- METRIC_DEFINE_INT_COUNTER(disk_sync_total, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_GAUGE(blocks_open_reading, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_GAUGE(blocks_open_writing, MetricUnit::NUMBER);
-
- METRIC_DEFINE_INT_COUNTER(blocks_push_remote_duration_us,
MetricUnit::MICROSECONDS);
+ METRIC_DEFINE_INT_COUNTER(disk_sync_total, MetricUnit::OPERATIONS);
+ METRIC_DEFINE_INT_GAUGE(blocks_open_reading, MetricUnit::BLOCKS);
+ METRIC_DEFINE_INT_GAUGE(blocks_open_writing, MetricUnit::BLOCKS);
// Size of some global containers
- METRIC_DEFINE_UINT_GAUGE(rowset_count_generated_and_in_use,
MetricUnit::NUMBER);
- METRIC_DEFINE_UINT_GAUGE(unused_rowsets_count, MetricUnit::NUMBER);
- METRIC_DEFINE_UINT_GAUGE(broker_count, MetricUnit::NUMBER);
- METRIC_DEFINE_UINT_GAUGE(data_stream_receiver_count,
MetricUnit::NUMBER);
- METRIC_DEFINE_UINT_GAUGE(fragment_endpoint_count, MetricUnit::NUMBER);
- METRIC_DEFINE_UINT_GAUGE(active_scan_context_count, MetricUnit::NUMBER);
- METRIC_DEFINE_UINT_GAUGE(plan_fragment_count, MetricUnit::NUMBER);
- METRIC_DEFINE_UINT_GAUGE(load_channel_count, MetricUnit::NUMBER);
- METRIC_DEFINE_UINT_GAUGE(result_buffer_block_count, MetricUnit::NUMBER);
- METRIC_DEFINE_UINT_GAUGE(result_block_queue_count, MetricUnit::NUMBER);
- METRIC_DEFINE_UINT_GAUGE(routine_load_task_count, MetricUnit::NUMBER);
- METRIC_DEFINE_UINT_GAUGE(small_file_cache_count, MetricUnit::NUMBER);
- METRIC_DEFINE_UINT_GAUGE(stream_load_pipe_count, MetricUnit::NUMBER);
- METRIC_DEFINE_UINT_GAUGE(brpc_endpoint_stub_count, MetricUnit::NUMBER);
- METRIC_DEFINE_UINT_GAUGE(tablet_writer_count, MetricUnit::NUMBER);
+ METRIC_DEFINE_UINT_GAUGE(rowset_count_generated_and_in_use,
MetricUnit::ROWSETS);
+ METRIC_DEFINE_UINT_GAUGE(unused_rowsets_count, MetricUnit::ROWSETS);
+ METRIC_DEFINE_UINT_GAUGE(broker_count, MetricUnit::NOUNIT);
+ METRIC_DEFINE_UINT_GAUGE(data_stream_receiver_count,
MetricUnit::NOUNIT);
+ METRIC_DEFINE_UINT_GAUGE(fragment_endpoint_count, MetricUnit::NOUNIT);
+ METRIC_DEFINE_UINT_GAUGE(active_scan_context_count, MetricUnit::NOUNIT);
+ METRIC_DEFINE_UINT_GAUGE(plan_fragment_count, MetricUnit::NOUNIT);
+ METRIC_DEFINE_UINT_GAUGE(load_channel_count, MetricUnit::NOUNIT);
+ METRIC_DEFINE_UINT_GAUGE(result_buffer_block_count, MetricUnit::NOUNIT);
+ METRIC_DEFINE_UINT_GAUGE(result_block_queue_count, MetricUnit::NOUNIT);
+ METRIC_DEFINE_UINT_GAUGE(routine_load_task_count, MetricUnit::NOUNIT);
+ METRIC_DEFINE_UINT_GAUGE(small_file_cache_count, MetricUnit::NOUNIT);
+ METRIC_DEFINE_UINT_GAUGE(stream_load_pipe_count, MetricUnit::NOUNIT);
+ METRIC_DEFINE_UINT_GAUGE(brpc_endpoint_stub_count, MetricUnit::NOUNIT);
+ METRIC_DEFINE_UINT_GAUGE(tablet_writer_count, MetricUnit::NOUNIT);
static DorisMetrics* instance() {
static DorisMetrics instance;
diff --git a/be/src/util/metrics.cpp b/be/src/util/metrics.cpp
index b702489..7ad9433 100644
--- a/be/src/util/metrics.cpp
+++ b/be/src/util/metrics.cpp
@@ -59,10 +59,18 @@ const char* unit_name(MetricUnit unit) {
return "bytes";
case MetricUnit::ROWS:
return "rows";
- case MetricUnit::NUMBER:
- return "number";
case MetricUnit::PERCENT:
return "percent";
+ case MetricUnit::REQUESTS:
+ return "requests";
+ case MetricUnit::OPERATIONS:
+ return "operations";
+ case MetricUnit::BLOCKS:
+ return "blocks";
+ case MetricUnit::ROWSETS:
+ return "rowsets";
+ case MetricUnit::CONNECTIONS:
+ return "rowsets";
default:
return "nounit";
}
diff --git a/be/src/util/metrics.h b/be/src/util/metrics.h
index 4a22f2a..f6e9bdd 100644
--- a/be/src/util/metrics.h
+++ b/be/src/util/metrics.h
@@ -54,8 +54,13 @@ enum class MetricUnit {
SECONDS,
BYTES,
ROWS,
- NUMBER,
PERCENT,
+ REQUESTS,
+ OPERATIONS,
+ BLOCKS,
+ ROWSETS,
+ CONNECTIONS,
+ PACKETS,
NOUNIT
};
diff --git a/be/src/util/system_metrics.cpp b/be/src/util/system_metrics.cpp
index ba9e91a..79d8c6b 100644
--- a/be/src/util/system_metrics.cpp
+++ b/be/src/util/system_metrics.cpp
@@ -49,10 +49,10 @@ struct MemoryMetrics {
};
struct DiskMetrics {
- METRIC_DEFINE_INT_LOCK_COUNTER(reads_completed, MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_LOCK_COUNTER(reads_completed, MetricUnit::OPERATIONS);
METRIC_DEFINE_INT_LOCK_COUNTER(bytes_read, MetricUnit::BYTES);
METRIC_DEFINE_INT_LOCK_COUNTER(read_time_ms, MetricUnit::MILLISECONDS);
- METRIC_DEFINE_INT_LOCK_COUNTER(writes_completed, MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_LOCK_COUNTER(writes_completed, MetricUnit::OPERATIONS);
METRIC_DEFINE_INT_LOCK_COUNTER(bytes_written, MetricUnit::BYTES);
METRIC_DEFINE_INT_LOCK_COUNTER(write_time_ms, MetricUnit::MILLISECONDS);
METRIC_DEFINE_INT_LOCK_COUNTER(io_time_ms, MetricUnit::MILLISECONDS);
@@ -61,9 +61,9 @@ struct DiskMetrics {
struct NetMetrics {
METRIC_DEFINE_INT_LOCK_COUNTER(receive_bytes, MetricUnit::BYTES);
- METRIC_DEFINE_INT_LOCK_COUNTER(receive_packets, MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_LOCK_COUNTER(receive_packets, MetricUnit::PACKETS);
METRIC_DEFINE_INT_LOCK_COUNTER(send_bytes, MetricUnit::BYTES);
- METRIC_DEFINE_INT_LOCK_COUNTER(send_packets, MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_LOCK_COUNTER(send_packets, MetricUnit::PACKETS);
};
// metrics read from /proc/net/snmp
@@ -75,8 +75,8 @@ struct SnmpMetrics {
};
struct FileDescriptorMetrics {
- METRIC_DEFINE_INT_GAUGE(fd_num_limit, MetricUnit::NUMBER);
- METRIC_DEFINE_INT_GAUGE(fd_num_used, MetricUnit::NUMBER);
+ METRIC_DEFINE_INT_GAUGE(fd_num_limit, MetricUnit::NOUNIT);
+ METRIC_DEFINE_INT_GAUGE(fd_num_used, MetricUnit::NOUNIT);
};
SystemMetrics::SystemMetrics() {
diff --git a/be/src/util/thrift_server.cpp b/be/src/util/thrift_server.cpp
index 9985732..2931c76 100644
--- a/be/src/util/thrift_server.cpp
+++ b/be/src/util/thrift_server.cpp
@@ -276,12 +276,12 @@ ThriftServer::ThriftServer(
_session_handler(NULL) {
if (metrics != NULL) {
_metrics_enabled = true;
- _current_connections.reset(new IntGauge(MetricUnit::NUMBER));
+ _current_connections.reset(new IntGauge(MetricUnit::CONNECTIONS));
metrics->register_metric("thrift_current_connections",
MetricLabels().add("name", name),
_current_connections.get());
- _connections_total.reset(new IntCounter(MetricUnit::NUMBER));
+ _connections_total.reset(new IntCounter(MetricUnit::CONNECTIONS));
metrics->register_metric("thrift_connections_total",
MetricLabels().add("name", name),
_connections_total.get());
diff --git a/be/test/http/metrics_action_test.cpp
b/be/test/http/metrics_action_test.cpp
index 6bbab74..417696e 100644
--- a/be/test/http/metrics_action_test.cpp
+++ b/be/test/http/metrics_action_test.cpp
@@ -56,7 +56,7 @@ TEST_F(MetricsActionTest, prometheus_output) {
IntGauge cpu_idle(MetricUnit::PERCENT);
cpu_idle.set_value(50);
registry.register_metric("cpu_idle", &cpu_idle);
- IntCounter put_requests_total(MetricUnit::NUMBER);
+ IntCounter put_requests_total(MetricUnit::NOUNIT);
put_requests_total.increment(2345);
registry.register_metric("requests_total",
MetricLabels().add("type", "put").add("path",
"/sports"),
diff --git a/be/test/util/doris_metrics_test.cpp
b/be/test/util/doris_metrics_test.cpp
index 76a6a2e..cf728b4 100644
--- a/be/test/util/doris_metrics_test.cpp
+++ b/be/test/util/doris_metrics_test.cpp
@@ -100,12 +100,6 @@ TEST_F(DorisMetricsTest, Normal) {
ASSERT_STREQ("102", metric->to_string().c_str());
}
{
- DorisMetrics::instance()->http_request_duration_us.increment(103);
- auto metric = metrics->get_metric("http_request_duration_us");
- ASSERT_TRUE(metric != nullptr);
- ASSERT_STREQ("103", metric->to_string().c_str());
- }
- {
DorisMetrics::instance()->http_request_send_bytes.increment(104);
auto metric = metrics->get_metric("http_request_send_bytes");
ASSERT_TRUE(metric != nullptr);
@@ -124,12 +118,6 @@ TEST_F(DorisMetricsTest, Normal) {
ASSERT_STREQ("105", metric->to_string().c_str());
}
{
- DorisMetrics::instance()->ranges_processed_total.increment(13);
- auto metric = metrics->get_metric("ranges_processed_total");
- ASSERT_TRUE(metric != nullptr);
- ASSERT_STREQ("13", metric->to_string().c_str());
- }
- {
DorisMetrics::instance()->push_requests_success_total.increment(106);
auto metric = metrics->get_metric("push_requests_total",
MetricLabels().add("status",
"SUCCESS"));
diff --git a/be/test/util/new_metrics_test.cpp
b/be/test/util/new_metrics_test.cpp
index 649593b..d8a7860 100644
--- a/be/test/util/new_metrics_test.cpp
+++ b/be/test/util/new_metrics_test.cpp
@@ -36,7 +36,7 @@ public:
TEST_F(MetricsTest, Counter) {
{
- IntCounter counter(MetricUnit::NUMBER);
+ IntCounter counter(MetricUnit::NOUNIT);
ASSERT_EQ(0, counter.value());
counter.increment(100);
ASSERT_EQ(100, counter.value());
@@ -44,7 +44,7 @@ TEST_F(MetricsTest, Counter) {
ASSERT_STREQ("100", counter.to_string().c_str());
}
{
- DoubleCounter counter(MetricUnit::NUMBER);
+ DoubleCounter counter(MetricUnit::NOUNIT);
ASSERT_EQ(0.0, counter.value());
counter.increment(1.23);
ASSERT_EQ(1.23, counter.value());
@@ -65,7 +65,7 @@ void mt_updater(IntCounter* counter, std::atomic<uint64_t>*
used_time) {
}
TEST_F(MetricsTest, CounterPerf) {
- IntCounter counter(MetricUnit::NUMBER);
+ IntCounter counter(MetricUnit::NOUNIT);
volatile int64_t sum = 0;
{
@@ -91,7 +91,7 @@ TEST_F(MetricsTest, CounterPerf) {
ASSERT_EQ(100000000, counter.value());
ASSERT_EQ(100000000, sum);
{
- IntCounter mt_counter(MetricUnit::NUMBER);
+ IntCounter mt_counter(MetricUnit::NOUNIT);
std::vector<std::thread> updaters;
std::atomic<uint64_t> used_time(0);
for (int i = 0; i < 8; ++i) {
@@ -108,7 +108,7 @@ TEST_F(MetricsTest, CounterPerf) {
TEST_F(MetricsTest, Gauge) {
{
- IntGauge gauge(MetricUnit::NUMBER);
+ IntGauge gauge(MetricUnit::NOUNIT);
ASSERT_EQ(0, gauge.value());
gauge.set_value(100);
ASSERT_EQ(100, gauge.value());
@@ -116,7 +116,7 @@ TEST_F(MetricsTest, Gauge) {
ASSERT_STREQ("100", gauge.to_string().c_str());
}
{
- DoubleGauge gauge(MetricUnit::NUMBER);
+ DoubleGauge gauge(MetricUnit::NOUNIT);
ASSERT_EQ(0.0, gauge.value());
gauge.set_value(1.23);
ASSERT_EQ(1.23, gauge.value());
@@ -205,9 +205,9 @@ private:
};
TEST_F(MetricsTest, MetricCollector) {
- IntCounter puts(MetricUnit::NUMBER);
+ IntCounter puts(MetricUnit::NOUNIT);
puts.increment(101);
- IntCounter gets(MetricUnit::NUMBER);
+ IntCounter gets(MetricUnit::NOUNIT);
gets.increment(201);
MetricCollector collector;
ASSERT_TRUE(collector.add_metic(MetricLabels().add("type", "put"), &puts));
@@ -216,7 +216,7 @@ TEST_F(MetricsTest, MetricCollector) {
{
// Can't add different type to one collector
- IntGauge post(MetricUnit::NUMBER);
+ IntGauge post(MetricUnit::NOUNIT);
ASSERT_FALSE(collector.add_metic(MetricLabels().add("type", "post"),
&post));
}
diff --git a/docs/.vuepress/sidebar/en.js b/docs/.vuepress/sidebar/en.js
index a1607be..9f5d0c1 100644
--- a/docs/.vuepress/sidebar/en.js
+++ b/docs/.vuepress/sidebar/en.js
@@ -77,6 +77,10 @@ module.exports = [
"fe-get-log-file",
"get-label-state",
"restore-tablet",
+ "profile-action",
+ "connection-action",
+ "query-detail-action",
+ "show-data-action",
],
sidebarDepth: 1,
},
diff --git a/docs/.vuepress/sidebar/zh-CN.js b/docs/.vuepress/sidebar/zh-CN.js
index 6c5e77f..7e33c48 100644
--- a/docs/.vuepress/sidebar/zh-CN.js
+++ b/docs/.vuepress/sidebar/zh-CN.js
@@ -84,6 +84,10 @@ module.exports = [
"fe-get-log-file",
"get-label-state",
"restore-tablet",
+ "profile-action",
+ "connection-action",
+ "query-detail-action",
+ "show-data-action",
],
sidebarDepth: 1,
},
diff --git a/docs/en/administrator-guide/http-actions/connection-action.md
b/docs/en/administrator-guide/http-actions/connection-action.md
new file mode 100644
index 0000000..d7d81e0
--- /dev/null
+++ b/docs/en/administrator-guide/http-actions/connection-action.md
@@ -0,0 +1,42 @@
+---
+{
+ "title": "CONNECTION",
+ "language": "en"
+}
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+# CONNECTION
+
+To get current query_id from connection
+
+```
+curl -X GET http://fe_host:fe_http_port/api/connection?connection_id=123
+```
+
+If connection_id does not exist, return 404 NOT FOUND ERROR
+
+If connection_id exists, return last query_id belongs to connection_id
+```
+{
+ "query_id" : 9133b7efa92a44c8-8ed4b44772ec2a0c
+}
+```
diff --git a/docs/en/administrator-guide/http-actions/profile-action.md
b/docs/en/administrator-guide/http-actions/profile-action.md
new file mode 100644
index 0000000..7851fa6
--- /dev/null
+++ b/docs/en/administrator-guide/http-actions/profile-action.md
@@ -0,0 +1,120 @@
+---
+{
+ "title": "RPOFILE",
+ "language": "en"
+}
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+# PROFILE
+
+To get query profile using query_id
+
+```
+curl -X GET http://fe_host:fe_http_port/api/profile?query_id=123
+```
+
+If query_id is not exists, return 404 NOT FOUND ERROR
+
+If query_id exists, return query profile like this
+```
+Query:
+ Summary:
+ - Query ID: a0a9259df9844029-845331577440a3bd
+ - Start Time: 2020-06-15 14:10:05
+ - End Time: 2020-06-15 14:10:05
+ - Total: 8ms
+ - Query Type: Query
+ - Query State: EOF
+ - Doris Version: trunk
+ - User: root
+ - Default Db: default_cluster:test
+ - Sql Statement: select * from table1
+ Execution Profile a0a9259df9844029-845331577440a3bd:(Active: 7.315ms, %
non-child: 100.00%)
+ Fragment 0:
+ Instance a0a9259df9844029-845331577440a3be
(host=TNetworkAddress(hostname:172.26.108.176, port:9560)):(Active: 1.523ms, %
non-child: 0.24%)
+ - MemoryLimit: 2.00 GB
+ - PeakUsedReservation: 0.00
+ - PeakMemoryUsage: 72.00 KB
+ - RowsProduced: 5
+ - AverageThreadTokens: 0.00
+ - PeakReservation: 0.00
+ BlockMgr:
+ - BlocksCreated: 0
+ - BlockWritesOutstanding: 0
+ - BytesWritten: 0.00
+ - TotalEncryptionTime: 0ns
+ - BufferedPins: 0
+ - TotalReadBlockTime: 0ns
+ - TotalBufferWaitTime: 0ns
+ - BlocksRecycled: 0
+ - TotalIntegrityCheckTime: 0ns
+ - MaxBlockSize: 8.00 MB
+ DataBufferSender
(dst_fragment_instance_id=a0a9259df9844029-845331577440a3be):
+ - AppendBatchTime: 9.23us
+ - ResultRendTime: 956ns
+ - TupleConvertTime: 5.735us
+ - NumSentRows: 5
+ OLAP_SCAN_NODE (id=0):(Active: 1.506ms, % non-child: 20.59%)
+ - TotalRawReadTime: 0ns
+ - CompressedBytesRead: 6.47 KB
+ - PeakMemoryUsage: 0.00
+ - RowsPushedCondFiltered: 0
+ - ScanRangesComplete: 0
+ - ScanTime: 25.195us
+ - BitmapIndexFilterTimer: 0ns
+ - BitmapIndexFilterCount: 0
+ - NumScanners: 65
+ - RowsStatsFiltered: 0
+ - VectorPredEvalTime: 0ns
+ - BlockSeekTime: 1.299ms
+ - RawRowsRead: 1.91K (1910)
+ - ScannerThreadsVoluntaryContextSwitches: 0
+ - RowsDelFiltered: 0
+ - IndexLoadTime: 911.104us
+ - NumDiskAccess: 1
+ - ScannerThreadsTotalWallClockTime: 0ns
+ - MaterializeTupleTime: 0ns
+ - ScannerThreadsUserTime: 0ns
+ - ScannerThreadsSysTime: 0ns
+ - TotalPagesNum: 0
+ - RowsReturnedRate: 3.319K /sec
+ - BlockLoadTime: 539.289us
+ - CachedPagesNum: 0
+ - BlocksLoad: 384
+ - UncompressedBytesRead: 0.00
+ - RowsBloomFilterFiltered: 0
+ - TabletCount : 1
+ - RowsReturned: 5
+ - ScannerThreadsInvoluntaryContextSwitches: 0
+ - DecompressorTimer: 0ns
+ - RowsVectorPredFiltered: 0
+ - ReaderInitTime: 6.498ms
+ - RowsRead: 5
+ - PerReadThreadRawHdfsThroughput: 0.0 /sec
+ - BlockFetchTime: 4.318ms
+ - ShowHintsTime: 0ns
+ - TotalReadThroughput: 0.0 /sec
+ - IOTimer: 1.154ms
+ - BytesRead: 48.49 KB
+ - BlockConvertTime: 97.539us
+ - BlockSeekCount: 0
+```
diff --git a/docs/en/administrator-guide/http-actions/query-detail-action.md
b/docs/en/administrator-guide/http-actions/query-detail-action.md
new file mode 100644
index 0000000..b4b5f0e
--- /dev/null
+++ b/docs/en/administrator-guide/http-actions/query-detail-action.md
@@ -0,0 +1,61 @@
+---
+{
+ "title": "QUERY DETAIL",
+ "language": "en"
+}
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+# QUERY DETAIL
+
+Collect the query details from FE. You should set the event_time.
+FE will return the query detail after the event_time.
+The unit of event_time is milliseconds.
+
+```
+curl -X GET
http://fe_host:fe_http_port/api/query_detail?event_time=1592054515284
+```
+
+The query details will be be returned as JSON
+```
+[
+ {
+ "eventTime": 1592201405063,
+ "queryId": "a0a9259df9844029-845331577440a3bd",
+ "startTime": 1592201405055,
+ "endTime": 1592201405063,
+ "latency": 8,
+ "state": "FINISHED",
+ "database": "test",
+ "sql": "select * from table1"
+ },
+ {
+ "eventTime": 1592201420842,
+ "queryId": "21cd79c3e1634e8a-bdac090c7e7bcc36",
+ "startTime": 1592201420834,
+ "endTime": 1592201420842,
+ "latency": 8,
+ "state": "FINISHED",
+ "database": "test",
+ "sql": "select * from table1"
+ }
+]
+```
diff --git a/docs/en/administrator-guide/http-actions/show-data-action.md
b/docs/en/administrator-guide/http-actions/show-data-action.md
new file mode 100644
index 0000000..5e767ff
--- /dev/null
+++ b/docs/en/administrator-guide/http-actions/show-data-action.md
@@ -0,0 +1,35 @@
+---
+{
+ "title": "SHOW DATA",
+ "language": "en"
+}
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+# SHOW DATA
+
+To all size occupied by cluster
+
+```
+curl -X GET http://fe_host:fe_http_port/api/show_data
+```
+
+The return value is the total size of the cluster
diff --git a/docs/zh-CN/administrator-guide/http-actions/connection-action.md
b/docs/zh-CN/administrator-guide/http-actions/connection-action.md
new file mode 100644
index 0000000..3852575
--- /dev/null
+++ b/docs/zh-CN/administrator-guide/http-actions/connection-action.md
@@ -0,0 +1,42 @@
+---
+{
+ "title": "CONNECTION",
+ "language": "zh-CN"
+}
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+# CONNECTION
+
+通过connection_id, 获取当前连接的query_id
+
+```
+curl -X GET http://fe_host:fe_http_port/api/connection?connection_id=123
+```
+
+如果connection_id不存在,直接返回404 NOT FOUND错误
+
+如果connection_id存在,会返回当前connectiond_id上一个进行的query_id
+```
+{
+ "query_id" : 9133b7efa92a44c8-8ed4b44772ec2a0c
+}
+```
diff --git a/docs/zh-CN/administrator-guide/http-actions/profile-action.md
b/docs/zh-CN/administrator-guide/http-actions/profile-action.md
new file mode 100644
index 0000000..84bde4c
--- /dev/null
+++ b/docs/zh-CN/administrator-guide/http-actions/profile-action.md
@@ -0,0 +1,120 @@
+---
+{
+ "title": "RPOFILE",
+ "language": "zh-CN"
+}
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+# PROFILE
+
+通过query_id获取query profile
+
+```
+curl -X GET http://fe_host:fe_http_port/api/profile?query_id=123
+```
+
+如果query_id不存在, 直接返回404 NOT FOUND错误
+
+如果query_id存在,返回下列文本的profile
+```
+Query:
+ Summary:
+ - Query ID: a0a9259df9844029-845331577440a3bd
+ - Start Time: 2020-06-15 14:10:05
+ - End Time: 2020-06-15 14:10:05
+ - Total: 8ms
+ - Query Type: Query
+ - Query State: EOF
+ - Doris Version: trunk
+ - User: root
+ - Default Db: default_cluster:test
+ - Sql Statement: select * from table1
+ Execution Profile a0a9259df9844029-845331577440a3bd:(Active: 7.315ms, %
non-child: 100.00%)
+ Fragment 0:
+ Instance a0a9259df9844029-845331577440a3be
(host=TNetworkAddress(hostname:172.26.108.176, port:9560)):(Active: 1.523ms, %
non-child: 0.24%)
+ - MemoryLimit: 2.00 GB
+ - PeakUsedReservation: 0.00
+ - PeakMemoryUsage: 72.00 KB
+ - RowsProduced: 5
+ - AverageThreadTokens: 0.00
+ - PeakReservation: 0.00
+ BlockMgr:
+ - BlocksCreated: 0
+ - BlockWritesOutstanding: 0
+ - BytesWritten: 0.00
+ - TotalEncryptionTime: 0ns
+ - BufferedPins: 0
+ - TotalReadBlockTime: 0ns
+ - TotalBufferWaitTime: 0ns
+ - BlocksRecycled: 0
+ - TotalIntegrityCheckTime: 0ns
+ - MaxBlockSize: 8.00 MB
+ DataBufferSender
(dst_fragment_instance_id=a0a9259df9844029-845331577440a3be):
+ - AppendBatchTime: 9.23us
+ - ResultRendTime: 956ns
+ - TupleConvertTime: 5.735us
+ - NumSentRows: 5
+ OLAP_SCAN_NODE (id=0):(Active: 1.506ms, % non-child: 20.59%)
+ - TotalRawReadTime: 0ns
+ - CompressedBytesRead: 6.47 KB
+ - PeakMemoryUsage: 0.00
+ - RowsPushedCondFiltered: 0
+ - ScanRangesComplete: 0
+ - ScanTime: 25.195us
+ - BitmapIndexFilterTimer: 0ns
+ - BitmapIndexFilterCount: 0
+ - NumScanners: 65
+ - RowsStatsFiltered: 0
+ - VectorPredEvalTime: 0ns
+ - BlockSeekTime: 1.299ms
+ - RawRowsRead: 1.91K (1910)
+ - ScannerThreadsVoluntaryContextSwitches: 0
+ - RowsDelFiltered: 0
+ - IndexLoadTime: 911.104us
+ - NumDiskAccess: 1
+ - ScannerThreadsTotalWallClockTime: 0ns
+ - MaterializeTupleTime: 0ns
+ - ScannerThreadsUserTime: 0ns
+ - ScannerThreadsSysTime: 0ns
+ - TotalPagesNum: 0
+ - RowsReturnedRate: 3.319K /sec
+ - BlockLoadTime: 539.289us
+ - CachedPagesNum: 0
+ - BlocksLoad: 384
+ - UncompressedBytesRead: 0.00
+ - RowsBloomFilterFiltered: 0
+ - TabletCount : 1
+ - RowsReturned: 5
+ - ScannerThreadsInvoluntaryContextSwitches: 0
+ - DecompressorTimer: 0ns
+ - RowsVectorPredFiltered: 0
+ - ReaderInitTime: 6.498ms
+ - RowsRead: 5
+ - PerReadThreadRawHdfsThroughput: 0.0 /sec
+ - BlockFetchTime: 4.318ms
+ - ShowHintsTime: 0ns
+ - TotalReadThroughput: 0.0 /sec
+ - IOTimer: 1.154ms
+ - BytesRead: 48.49 KB
+ - BlockConvertTime: 97.539us
+ - BlockSeekCount: 0
+```
diff --git a/docs/zh-CN/administrator-guide/http-actions/query-detail-action.md
b/docs/zh-CN/administrator-guide/http-actions/query-detail-action.md
new file mode 100644
index 0000000..da07e3f
--- /dev/null
+++ b/docs/zh-CN/administrator-guide/http-actions/query-detail-action.md
@@ -0,0 +1,59 @@
+---
+{
+ "title": "QUERY DETAIL",
+ "language": "zh-CN"
+}
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+# QUERY DETAIL
+
+从FE获取所有的查询细节,获取关于查询执行的相关信息。
+FE会返回在event_time之后的查询细节,其中event_time单位会精确到毫秒。
+
+```
+curl -X GET
http://fe_host:fe_http_port/api/query_detail?event_time=1592054515284
+```
+
+查询信息会以JSON格式返回。
+```
+[
+ {
+ "eventTime": 1592201405063,
+ "queryId": "a0a9259df9844029-845331577440a3bd",
+ "startTime": 1592201405055,
+ "endTime": 1592201405063,
+ "latency": 8,
+ "state": "FINISHED",
+ "database": "test",
+ "sql": "select * from table1"
+ },
+ {
+ "eventTime": 1592201420842,
+ "queryId": "21cd79c3e1634e8a-bdac090c7e7bcc36",
+ "startTime": 1592201420834,
+ "endTime": 1592201420842,
+ "latency": 8,
+ "state": "FINISHED",
+ "database": "test",
+ "sql": "select * from table1"
+ }
+]
diff --git a/docs/zh-CN/administrator-guide/http-actions/show-data-action.md
b/docs/zh-CN/administrator-guide/http-actions/show-data-action.md
new file mode 100644
index 0000000..178bfcc
--- /dev/null
+++ b/docs/zh-CN/administrator-guide/http-actions/show-data-action.md
@@ -0,0 +1,35 @@
+---
+{
+ "title": "SHOW DATA",
+ "language": "zh-CN"
+}
+---
+
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements. See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership. The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License. You may obtain a copy of the License at
+
+ http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied. See the License for the
+specific language governing permissions and limitations
+under the License.
+-->
+
+# SHOW DATA
+
+获取当前集群占用的总空间大小
+
+```
+curl -X GET http://fe_host:fe_http_port/api/show_data
+```
+
+返回值就是集群的总数据大小
diff --git a/fe/src/main/java/org/apache/doris/PaloFe.java
b/fe/src/main/java/org/apache/doris/PaloFe.java
index 2f01788..2fbae1f 100644
--- a/fe/src/main/java/org/apache/doris/PaloFe.java
+++ b/fe/src/main/java/org/apache/doris/PaloFe.java
@@ -73,7 +73,6 @@ public class PaloFe {
}
CommandLineOptions cmdLineOpts = parseArgs(args);
- System.out.println(cmdLineOpts.toString());
try {
// pid file
diff --git a/fe/src/main/java/org/apache/doris/common/ThreadPoolManager.java
b/fe/src/main/java/org/apache/doris/common/ThreadPoolManager.java
index ef9b088..c7dafeb 100644
--- a/fe/src/main/java/org/apache/doris/common/ThreadPoolManager.java
+++ b/fe/src/main/java/org/apache/doris/common/ThreadPoolManager.java
@@ -69,7 +69,7 @@ public class ThreadPoolManager {
public static void registerThreadPoolMetric(String poolName,
ThreadPoolExecutor threadPool) {
for (String poolMetricType : poolMerticTypes) {
- GaugeMetric<Integer> gauge = new
GaugeMetric<Integer>("thread_pool", MetricUnit.NUMBER, "thread_pool
statistics") {
+ GaugeMetric<Integer> gauge = new
GaugeMetric<Integer>("thread_pool", MetricUnit.NOUNIT, "thread_pool
statistics") {
@Override
public Integer getValue() {
String metricType = this.getLabels().get(1).getValue();
diff --git a/fe/src/main/java/org/apache/doris/common/util/RuntimeProfile.java
b/fe/src/main/java/org/apache/doris/common/util/RuntimeProfile.java
index a6ee648..d462c08 100644
--- a/fe/src/main/java/org/apache/doris/common/util/RuntimeProfile.java
+++ b/fe/src/main/java/org/apache/doris/common/util/RuntimeProfile.java
@@ -204,7 +204,7 @@ public class RuntimeProfile {
// 2. info String
for (String key : this.infoStringsDisplayOrder) {
- builder.append(prefix).append(" ").append(key).append(": ")
+ builder.append(prefix).append(" - ").append(key).append(": ")
.append(this.infoStrings.get(key)).append("\n");
}
diff --git a/fe/src/main/java/org/apache/doris/http/HttpServer.java
b/fe/src/main/java/org/apache/doris/http/HttpServer.java
index 37d7c5e..1d4dcc4 100644
--- a/fe/src/main/java/org/apache/doris/http/HttpServer.java
+++ b/fe/src/main/java/org/apache/doris/http/HttpServer.java
@@ -42,6 +42,7 @@ import org.apache.doris.http.meta.MetaService.VersionAction;
import org.apache.doris.http.rest.BootstrapFinishAction;
import org.apache.doris.http.rest.CancelStreamLoad;
import org.apache.doris.http.rest.CheckDecommissionAction;
+import org.apache.doris.http.rest.ConnectionAction;
import org.apache.doris.http.rest.GetDdlStmtAction;
import org.apache.doris.http.rest.GetLoadInfoAction;
import org.apache.doris.http.rest.GetLogFileAction;
@@ -58,8 +59,11 @@ import org.apache.doris.http.rest.MultiDesc;
import org.apache.doris.http.rest.MultiList;
import org.apache.doris.http.rest.MultiStart;
import org.apache.doris.http.rest.MultiUnload;
+import org.apache.doris.http.rest.ProfileAction;
+import org.apache.doris.http.rest.QueryDetailAction;
import org.apache.doris.http.rest.RowCountAction;
import org.apache.doris.http.rest.SetConfigAction;
+import org.apache.doris.http.rest.ShowDataAction;
import org.apache.doris.http.rest.ShowMetaInfoAction;
import org.apache.doris.http.rest.ShowProcAction;
import org.apache.doris.http.rest.ShowRuntimeInfoAction;
@@ -152,6 +156,10 @@ public class HttpServer {
ColocateMetaService.BucketSeqAction.registerAction(controller);
ColocateMetaService.ColocateMetaAction.registerAction(controller);
ColocateMetaService.MarkGroupStableAction.registerAction(controller);
+ ProfileAction.registerAction(controller);
+ QueryDetailAction.registerAction(controller);
+ ConnectionAction.registerAction(controller);
+ ShowDataAction.registerAction(controller);
// meta service action
File imageDir = MetaHelper.getMasterImageDir();
diff --git a/fe/src/main/java/org/apache/doris/http/rest/ConnectionAction.java
b/fe/src/main/java/org/apache/doris/http/rest/ConnectionAction.java
new file mode 100644
index 0000000..2d486be
--- /dev/null
+++ b/fe/src/main/java/org/apache/doris/http/rest/ConnectionAction.java
@@ -0,0 +1,61 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.http.rest;
+
+import io.netty.handler.codec.http.HttpMethod;
+import io.netty.handler.codec.http.HttpResponseStatus;
+import org.apache.doris.common.util.DebugUtil;
+import org.apache.doris.http.ActionController;
+import org.apache.doris.http.BaseRequest;
+import org.apache.doris.http.BaseResponse;
+import org.apache.doris.http.IllegalArgException;
+import org.apache.doris.qe.ConnectContext;
+import org.apache.doris.service.ExecuteEnv;
+
+// This class is used to get current query_id of connection_id.
+// Every connection holds at most one query at every point.
+// Some we can get query_id firstly, and get query by query_id.
+public class ConnectionAction extends RestBaseAction {
+ public ConnectionAction(ActionController controller) {
+ super(controller);
+ }
+
+ public static void registerAction (ActionController controller) throws
IllegalArgException {
+ controller.registerHandler(HttpMethod.GET, "/api/connection", new
ConnectionAction(controller));
+ }
+
+ @Override
+ public void execute(BaseRequest request, BaseResponse response) {
+ String connStr = request.getSingleParameter("connection_id");
+ if (connStr == null) {
+ response.getContent().append("not valid parameter");
+ sendResult(request, response, HttpResponseStatus.BAD_REQUEST);
+ return;
+ }
+ long connectionId = Long.valueOf(connStr.trim());
+ ConnectContext context =
ExecuteEnv.getInstance().getScheduler().getContext(connectionId);
+ if (context == null || context.queryId() == null) {
+ response.getContent().append("connection id " + connectionId + "
not found.");
+ sendResult(request, response, HttpResponseStatus.NOT_FOUND);
+ return;
+ }
+ String queryId = DebugUtil.printId(context.queryId());
+ response.getContent().append("{\"query_id\" : " + queryId + "}");
+ sendResult(request, response);
+ }
+}
diff --git a/fe/src/main/java/org/apache/doris/http/rest/ProfileAction.java
b/fe/src/main/java/org/apache/doris/http/rest/ProfileAction.java
new file mode 100644
index 0000000..0bb35ed
--- /dev/null
+++ b/fe/src/main/java/org/apache/doris/http/rest/ProfileAction.java
@@ -0,0 +1,60 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.http.rest;
+
+import org.apache.doris.common.util.ProfileManager;
+import org.apache.doris.http.ActionController;
+import org.apache.doris.http.BaseRequest;
+import org.apache.doris.http.BaseResponse;
+import org.apache.doris.http.IllegalArgException;
+
+import io.netty.handler.codec.http.HttpMethod;
+import io.netty.handler.codec.http.HttpResponseStatus;
+
+// This class is a RESTFUL interface to get query profile.
+// It will be used in query monitor to collect profiles.
+// Usage:
+// wget http://fe_host:fe_http_port/api/profile?query_id=123456
+public class ProfileAction extends RestBaseAction {
+
+ public ProfileAction(ActionController controller) {
+ super(controller);
+ }
+
+ public static void registerAction (ActionController controller) throws
IllegalArgException {
+ controller.registerHandler(HttpMethod.GET, "/api/profile", new
ProfileAction(controller));
+ }
+
+ @Override
+ public void execute(BaseRequest request, BaseResponse response) {
+ String queryId = request.getSingleParameter("query_id");
+ if (queryId == null) {
+ response.getContent().append("not valid parameter");
+ sendResult(request, response, HttpResponseStatus.BAD_REQUEST);
+ return;
+ }
+ String queryProfileStr =
ProfileManager.getInstance().getProfile(queryId);
+ if (queryProfileStr != null) {
+ response.getContent().append(queryProfileStr);
+ sendResult(request, response);
+ } else {
+ response.getContent().append("query id " + queryId + " not
found.");
+ sendResult(request, response, HttpResponseStatus.NOT_FOUND);
+ }
+ }
+}
diff --git a/fe/src/main/java/org/apache/doris/http/rest/QueryDetailAction.java
b/fe/src/main/java/org/apache/doris/http/rest/QueryDetailAction.java
new file mode 100644
index 0000000..c12a640
--- /dev/null
+++ b/fe/src/main/java/org/apache/doris/http/rest/QueryDetailAction.java
@@ -0,0 +1,56 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.http.rest;
+
+import com.google.gson.Gson;
+import io.netty.handler.codec.http.HttpMethod;
+import io.netty.handler.codec.http.HttpResponseStatus;
+import java.util.List;
+import org.apache.doris.http.ActionController;
+import org.apache.doris.http.BaseRequest;
+import org.apache.doris.http.BaseResponse;
+import org.apache.doris.http.IllegalArgException;
+import org.apache.doris.qe.QueryDetail;
+import org.apache.doris.qe.QueryDetailQueue;
+
+public class QueryDetailAction extends RestBaseAction {
+
+ public QueryDetailAction(ActionController controller) {
+ super(controller);
+ }
+
+ public static void registerAction (ActionController controller) throws
IllegalArgException {
+ controller.registerHandler(HttpMethod.GET, "/api/query_detail", new
QueryDetailAction(controller));
+ }
+
+ @Override
+ public void execute(BaseRequest request, BaseResponse response) {
+ String eventTimeStr = request.getSingleParameter("event_time");
+ if (eventTimeStr == null) {
+ response.getContent().append("not valid parameter");
+ sendResult(request, response, HttpResponseStatus.BAD_REQUEST);
+ return;
+ }
+ long eventTime = Long.valueOf(eventTimeStr.trim());
+ List<QueryDetail> queryDetails =
QueryDetailQueue.getQueryDetails(eventTime);
+ Gson gson = new Gson();
+ String json_string = gson.toJson(queryDetails);
+ response.getContent().append(json_string);
+ sendResult(request, response);
+ }
+}
diff --git a/fe/src/main/java/org/apache/doris/http/rest/ShowDataAction.java
b/fe/src/main/java/org/apache/doris/http/rest/ShowDataAction.java
new file mode 100644
index 0000000..2f4e77a
--- /dev/null
+++ b/fe/src/main/java/org/apache/doris/http/rest/ShowDataAction.java
@@ -0,0 +1,88 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.http.rest;
+
+import io.netty.handler.codec.http.HttpMethod;
+import io.netty.handler.codec.http.HttpResponseStatus;
+import org.apache.doris.catalog.Catalog;
+import org.apache.doris.catalog.Database;
+import org.apache.doris.catalog.OlapTable;
+import org.apache.doris.catalog.Table;
+import org.apache.doris.catalog.Table.TableType;
+import org.apache.doris.http.ActionController;
+import org.apache.doris.http.BaseRequest;
+import org.apache.doris.http.BaseResponse;
+import org.apache.doris.http.IllegalArgException;
+
+import io.netty.handler.codec.http.HttpMethod;
+import java.util.concurrent.ConcurrentHashMap;
+import java.util.List;
+import org.apache.logging.log4j.LogManager;
+import org.apache.logging.log4j.Logger;
+
+public class ShowDataAction extends RestBaseAction {
+ private static final Logger LOG =
LogManager.getLogger(ShowDataAction.class);
+
+ public ShowDataAction(ActionController controller) {
+ super(controller);
+ }
+
+ public static void registerAction (ActionController controller) throws
IllegalArgException {
+ controller.registerHandler(HttpMethod.GET, "/api/show_data", new
ShowDataAction(controller));
+ }
+
+ public long getDataSizeOfDatabase(Database db) {
+ long totalSize = 0;
+ db.readLock();
+ // sort by table name
+ List<Table> tables = db.getTables();
+ for (Table table : tables) {
+ if (table.getType() != TableType.OLAP) {
+ continue;
+ }
+
+ long tableSize = ((OlapTable)table).getDataSize();
+ totalSize += tableSize;
+ } // end for tables
+ db.readUnlock();
+ return totalSize;
+ }
+
+ @Override
+ public void execute(BaseRequest request, BaseResponse response) {
+ String dbName = request.getSingleParameter("db");
+ ConcurrentHashMap<String, Database> fullNameToDb =
Catalog.getCurrentCatalog().getFullNameToDb();
+ long totalSize = 0;
+ if (dbName != null) {
+ Database db = fullNameToDb.get("default_cluster:"+dbName);
+ if (db == null) {
+ response.getContent().append("database " + dbName + " not
found.");
+ sendResult(request, response, HttpResponseStatus.NOT_FOUND);
+ return;
+ }
+ totalSize = getDataSizeOfDatabase(db);
+ } else {
+ for (Database db : fullNameToDb.values()) {
+ LOG.info("database name: {}", db.getFullName());
+ totalSize += getDataSizeOfDatabase(db);
+ }
+ }
+ response.getContent().append(String.valueOf(totalSize));
+ sendResult(request, response);
+ }
+}
diff --git a/fe/src/main/java/org/apache/doris/master/ReportHandler.java
b/fe/src/main/java/org/apache/doris/master/ReportHandler.java
index 18550af..ca7b8d5 100644
--- a/fe/src/main/java/org/apache/doris/master/ReportHandler.java
+++ b/fe/src/main/java/org/apache/doris/master/ReportHandler.java
@@ -100,7 +100,7 @@ public class ReportHandler extends Daemon {
public ReportHandler() {
GaugeMetric<Long> gaugeQueueSize = new GaugeMetric<Long>(
- "report_queue_size", MetricUnit.NUMBER, "report queue size") {
+ "report_queue_size", MetricUnit.NOUNIT, "report queue size") {
@Override
public Long getValue() {
return (long) reportQueue.size();
diff --git a/fe/src/main/java/org/apache/doris/metric/Metric.java
b/fe/src/main/java/org/apache/doris/metric/Metric.java
index e5e029c..b6e2a0e 100644
--- a/fe/src/main/java/org/apache/doris/metric/Metric.java
+++ b/fe/src/main/java/org/apache/doris/metric/Metric.java
@@ -33,8 +33,13 @@ public abstract class Metric<T> {
SECONDS,
BYTES,
ROWS,
- NUMBER,
PERCENT,
+ REQUESTS,
+ OPERATIONS,
+ BLOCKS,
+ ROWSETS,
+ CONNECTIONS,
+ PACKETS,
NOUNIT
};
diff --git a/fe/src/main/java/org/apache/doris/metric/MetricRepo.java
b/fe/src/main/java/org/apache/doris/metric/MetricRepo.java
index 82407bf..2bc8434 100644
--- a/fe/src/main/java/org/apache/doris/metric/MetricRepo.java
+++ b/fe/src/main/java/org/apache/doris/metric/MetricRepo.java
@@ -98,7 +98,7 @@ public final class MetricRepo {
for (EtlJobType jobType : EtlJobType.values()) {
for (JobState state : JobState.values()) {
GaugeMetric<Long> gauge = (GaugeMetric<Long>) new
GaugeMetric<Long>("job",
- MetricUnit.NUMBER, "job statistics") {
+ MetricUnit.NOUNIT, "job statistics") {
@Override
public Long getValue() {
if (!Catalog.getCurrentCatalog().isMaster()) {
@@ -122,7 +122,7 @@ public final class MetricRepo {
}
GaugeMetric<Long> gauge = (GaugeMetric<Long>) new
GaugeMetric<Long>("job",
- MetricUnit.NUMBER, "job statistics") {
+ MetricUnit.NOUNIT, "job statistics") {
@Override
public Long getValue() {
if (!Catalog.getCurrentCatalog().isMaster()) {
@@ -146,7 +146,7 @@ public final class MetricRepo {
// connections
GaugeMetric<Integer> conections = (GaugeMetric<Integer>) new
GaugeMetric<Integer>(
- "connection_total", MetricUnit.NUMBER, "total connections") {
+ "connection_total", MetricUnit.CONNECTIONS, "total
connections") {
@Override
public Integer getValue() {
return
ExecuteEnv.getInstance().getScheduler().getConnectionNum();
@@ -156,7 +156,7 @@ public final class MetricRepo {
// journal id
GaugeMetric<Long> maxJournalId = (GaugeMetric<Long>) new
GaugeMetric<Long>(
- "max_journal_id", MetricUnit.NUMBER, "max journal id of this
frontends") {
+ "max_journal_id", MetricUnit.NOUNIT, "max journal id of this
frontends") {
@Override
public Long getValue() {
EditLog editLog = Catalog.getCurrentCatalog().getEditLog();
@@ -170,7 +170,7 @@ public final class MetricRepo {
// scheduled tablet num
GaugeMetric<Long> scheduledTabletNum = (GaugeMetric<Long>) new
GaugeMetric<Long>(
- "scheduled_tablet_num", MetricUnit.NUMBER, "number of tablets
being scheduled") {
+ "scheduled_tablet_num", MetricUnit.NOUNIT, "number of tablets
being scheduled") {
@Override
public Long getValue() {
if (!Catalog.getCurrentCatalog().isMaster()) {
@@ -183,50 +183,50 @@ public final class MetricRepo {
// qps, rps and error rate
// these metrics should be set an init value, in case that metric
calculator is not running
- GAUGE_QUERY_PER_SECOND = new GaugeMetricImpl<>("qps",
MetricUnit.NUMBER, "query per second");
+ GAUGE_QUERY_PER_SECOND = new GaugeMetricImpl<>("qps",
MetricUnit.NOUNIT, "query per second");
GAUGE_QUERY_PER_SECOND.setValue(0.0);
PALO_METRIC_REGISTER.addPaloMetrics(GAUGE_QUERY_PER_SECOND);
- GAUGE_REQUEST_PER_SECOND = new GaugeMetricImpl<>("rps",
MetricUnit.NUMBER, "request per second");
+ GAUGE_REQUEST_PER_SECOND = new GaugeMetricImpl<>("rps",
MetricUnit.NOUNIT, "request per second");
GAUGE_REQUEST_PER_SECOND.setValue(0.0);
PALO_METRIC_REGISTER.addPaloMetrics(GAUGE_REQUEST_PER_SECOND);
- GAUGE_QUERY_ERR_RATE = new GaugeMetricImpl<>("query_err_rate",
MetricUnit.NUMBER, "query error rate");
+ GAUGE_QUERY_ERR_RATE = new GaugeMetricImpl<>("query_err_rate",
MetricUnit.NOUNIT, "query error rate");
PALO_METRIC_REGISTER.addPaloMetrics(GAUGE_QUERY_ERR_RATE);
GAUGE_QUERY_ERR_RATE.setValue(0.0);
GAUGE_MAX_TABLET_COMPACTION_SCORE = new
GaugeMetricImpl<>("max_tablet_compaction_score",
- MetricUnit.NUMBER, "max tablet compaction score of all
backends");
+ MetricUnit.NOUNIT, "max tablet compaction score of all
backends");
PALO_METRIC_REGISTER.addPaloMetrics(GAUGE_MAX_TABLET_COMPACTION_SCORE);
GAUGE_MAX_TABLET_COMPACTION_SCORE.setValue(0L);
// 2. counter
- COUNTER_REQUEST_ALL = new LongCounterMetric("request_total",
MetricUnit.NUMBER, "total request");
+ COUNTER_REQUEST_ALL = new LongCounterMetric("request_total",
MetricUnit.REQUESTS, "total request");
PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_REQUEST_ALL);
- COUNTER_QUERY_ALL = new LongCounterMetric("query_total",
MetricUnit.NUMBER, "total query");
+ COUNTER_QUERY_ALL = new LongCounterMetric("query_total",
MetricUnit.REQUESTS, "total query");
PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_QUERY_ALL);
- COUNTER_QUERY_ERR = new LongCounterMetric("query_err",
MetricUnit.NUMBER, "total error query");
+ COUNTER_QUERY_ERR = new LongCounterMetric("query_err",
MetricUnit.REQUESTS, "total error query");
PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_QUERY_ERR);
- COUNTER_LOAD_ADD = new LongCounterMetric("load_add",
MetricUnit.NUMBER, "total load submit");
+ COUNTER_LOAD_ADD = new LongCounterMetric("load_add",
MetricUnit.REQUESTS, "total load submit");
PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_LOAD_ADD);
- COUNTER_LOAD_FINISHED = new LongCounterMetric("load_finished",
MetricUnit.NUMBER, "total load finished");
+ COUNTER_LOAD_FINISHED = new LongCounterMetric("load_finished",
MetricUnit.REQUESTS, "total load finished");
PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_LOAD_FINISHED);
- COUNTER_EDIT_LOG_WRITE = new LongCounterMetric("edit_log_write",
MetricUnit.NUMBER, "counter of edit log write into bdbje");
+ COUNTER_EDIT_LOG_WRITE = new LongCounterMetric("edit_log_write",
MetricUnit.OPERATIONS, "counter of edit log write into bdbje");
PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_EDIT_LOG_WRITE);
- COUNTER_EDIT_LOG_READ = new LongCounterMetric("edit_log_read",
MetricUnit.NUMBER, "counter of edit log read from bdbje");
+ COUNTER_EDIT_LOG_READ = new LongCounterMetric("edit_log_read",
MetricUnit.OPERATIONS, "counter of edit log read from bdbje");
PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_EDIT_LOG_READ);
COUNTER_EDIT_LOG_SIZE_BYTES = new
LongCounterMetric("edit_log_size_bytes", MetricUnit.BYTES, "size of edit log");
PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_EDIT_LOG_SIZE_BYTES);
- COUNTER_IMAGE_WRITE = new LongCounterMetric("image_write",
MetricUnit.NUMBER, "counter of image generated");
+ COUNTER_IMAGE_WRITE = new LongCounterMetric("image_write",
MetricUnit.OPERATIONS, "counter of image generated");
PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_IMAGE_WRITE);
- COUNTER_IMAGE_PUSH = new LongCounterMetric("image_push",
MetricUnit.NUMBER,
+ COUNTER_IMAGE_PUSH = new LongCounterMetric("image_push",
MetricUnit.OPERATIONS,
"counter of image succeeded in pushing to other frontends");
PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_IMAGE_PUSH);
- COUNTER_TXN_REJECT = new LongCounterMetric("txn_reject",
MetricUnit.NUMBER, "counter of rejected transactions");
+ COUNTER_TXN_REJECT = new LongCounterMetric("txn_reject",
MetricUnit.REQUESTS, "counter of rejected transactions");
PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_TXN_REJECT);
- COUNTER_TXN_BEGIN = new LongCounterMetric("txn_begin",
MetricUnit.NUMBER, "counter of begining transactions");
+ COUNTER_TXN_BEGIN = new LongCounterMetric("txn_begin",
MetricUnit.REQUESTS, "counter of begining transactions");
PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_TXN_BEGIN);
- COUNTER_TXN_SUCCESS = new LongCounterMetric("txn_success",
MetricUnit.NUMBER, "counter of success transactions");
+ COUNTER_TXN_SUCCESS = new LongCounterMetric("txn_success",
MetricUnit.REQUESTS, "counter of success transactions");
PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_TXN_SUCCESS);
- COUNTER_TXN_FAILED = new LongCounterMetric("txn_failed",
MetricUnit.NUMBER, "counter of failed transactions");
+ COUNTER_TXN_FAILED = new LongCounterMetric("txn_failed",
MetricUnit.REQUESTS, "counter of failed transactions");
PALO_METRIC_REGISTER.addPaloMetrics(COUNTER_TXN_FAILED);
COUNTER_ROUTINE_LOAD_ROWS = new LongCounterMetric("routine_load_rows",
MetricUnit.ROWS, "total rows of routine load");
@@ -296,7 +296,7 @@ public final class MetricRepo {
// tablet number of each backends
GaugeMetric<Long> tabletNum = (GaugeMetric<Long>) new
GaugeMetric<Long>(TABLET_NUM,
- MetricUnit.NUMBER, "tablet number") {
+ MetricUnit.NOUNIT, "tablet number") {
@Override
public Long getValue() {
if (!Catalog.getCurrentCatalog().isMaster()) {
@@ -310,7 +310,7 @@ public final class MetricRepo {
// max compaction score of tablets on each backends
GaugeMetric<Long> tabletMaxCompactionScore = (GaugeMetric<Long>)
new GaugeMetric<Long>(
- TABLET_MAX_COMPACTION_SCORE, MetricUnit.NUMBER,
+ TABLET_MAX_COMPACTION_SCORE, MetricUnit.NOUNIT,
"tablet max compaction score") {
@Override
public Long getValue() {
diff --git a/fe/src/main/java/org/apache/doris/qe/ConnectContext.java
b/fe/src/main/java/org/apache/doris/qe/ConnectContext.java
index 6094818..8627087 100644
--- a/fe/src/main/java/org/apache/doris/qe/ConnectContext.java
+++ b/fe/src/main/java/org/apache/doris/qe/ConnectContext.java
@@ -25,6 +25,7 @@ import org.apache.doris.mysql.MysqlChannel;
import org.apache.doris.mysql.MysqlCommand;
import org.apache.doris.mysql.MysqlSerializer;
import org.apache.doris.plugin.AuditEvent.AuditEventBuilder;
+import org.apache.doris.qe.QueryDetail;
import org.apache.doris.thrift.TResourceInfo;
import org.apache.doris.thrift.TUniqueId;
@@ -97,6 +98,8 @@ public class ConnectContext {
protected String remoteIP;
+ protected QueryDetail queryDetail;
+
public static ConnectContext get() {
return threadLocalInfo.get();
}
@@ -135,6 +138,7 @@ public class ConnectContext {
if (channel != null) {
remoteIP = mysqlChannel.getRemoteIp();
}
+ queryDetail = null;
}
public long getStmtId() {
@@ -161,6 +165,14 @@ public class ConnectContext {
this.remoteIP = remoteIP;
}
+ public void setQueryDetail(QueryDetail queryDetail) {
+ this.queryDetail = queryDetail;
+ }
+
+ public QueryDetail getQueryDetail() {
+ return queryDetail;
+ }
+
public AuditEventBuilder getAuditEventBuilder() {
return auditEventBuilder;
}
diff --git a/fe/src/main/java/org/apache/doris/qe/ConnectProcessor.java
b/fe/src/main/java/org/apache/doris/qe/ConnectProcessor.java
index 74833ab..100f2b1 100644
--- a/fe/src/main/java/org/apache/doris/qe/ConnectProcessor.java
+++ b/fe/src/main/java/org/apache/doris/qe/ConnectProcessor.java
@@ -43,6 +43,8 @@ import org.apache.doris.mysql.MysqlSerializer;
import org.apache.doris.mysql.MysqlServerStatusFlag;
import org.apache.doris.plugin.AuditEvent.EventType;
import org.apache.doris.proto.PQueryStatistics;
+import org.apache.doris.qe.QueryDetail;
+import org.apache.doris.qe.QueryDetailQueue;
import org.apache.doris.service.FrontendOptions;
import org.apache.doris.thrift.TMasterOpRequest;
import org.apache.doris.thrift.TMasterOpResult;
@@ -106,7 +108,8 @@ public class ConnectProcessor {
private void auditAfterExec(String origStmt, StatementBase parsedStmt,
PQueryStatistics statistics) {
// slow query
- long elapseMs = System.currentTimeMillis() - ctx.getStartTime();
+ long endTime = System.currentTimeMillis();
+ long elapseMs = endTime - ctx.getStartTime();
ctx.getAuditEventBuilder().setEventType(EventType.AFTER_QUERY)
.setState(ctx.getState().toString()).setQueryTime(elapseMs)
@@ -127,6 +130,11 @@ public class ConnectProcessor {
MetricRepo.HISTO_QUERY_LATENCY.update(elapseMs);
}
ctx.getAuditEventBuilder().setIsQuery(true);
+ ctx.getQueryDetail().setEventTime(endTime);
+ ctx.getQueryDetail().setEndTime(endTime);
+ ctx.getQueryDetail().setLatency(elapseMs);
+ ctx.getQueryDetail().setState(QueryDetail.QueryMemState.FINISHED);
+ QueryDetailQueue.addOrUpdateQueryDetail(ctx.getQueryDetail());
} else {
ctx.getAuditEventBuilder().setIsQuery(false);
}
diff --git a/fe/src/main/java/org/apache/doris/qe/QueryDetail.java
b/fe/src/main/java/org/apache/doris/qe/QueryDetail.java
new file mode 100644
index 0000000..e87c464
--- /dev/null
+++ b/fe/src/main/java/org/apache/doris/qe/QueryDetail.java
@@ -0,0 +1,129 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.qe;
+
+public class QueryDetail {
+ public enum QueryMemState {
+ RUNNING,
+ FINISHED,
+ FAILED,
+ CANCELLED
+ };
+
+ // When query received, FE will construct a QueryDetail
+ // object. This object will set queryId, startTime, sql
+ // fields. As well state is be set as RUNNING.
+ // After query finished, endTime and latency will
+ // be set and state will be updated to be FINISHED/FAILED/CANCELLED
+ // according to the query execution results.
+ // So, one query will be inserted into as a item and
+ // be udpated upon finished. To indicate the two event,
+ // a extra field named eventTime is added.
+ private long eventTime;
+ private String queryId;
+ private long startTime;
+ // endTime and latency are update upon query finished.
+ // default value will set to be minus one(-1).
+ private long endTime;
+ private long latency;
+ private QueryMemState state;
+ private String database;
+ private String sql;
+
+ public QueryDetail(long eventTime, String queryId, long startTime,
+ long endTime, long latency, QueryMemState state,
+ String database, String sql) {
+ this.eventTime = eventTime;
+ this.queryId = queryId;
+ this.startTime = startTime;
+ this.endTime = endTime;
+ this.latency = latency;
+ this.state = state;
+ if (database.equals("")) {
+ this.database = "";
+ } else {
+ String[] stringPieces = database.split(":", -1);
+ this.database = stringPieces[1]; // eliminate cluster name
+ }
+ this.sql = sql;
+ }
+
+ public void setEventTime(long eventTime) {
+ this.eventTime = eventTime;
+ }
+
+ public long getEventTime() {
+ return eventTime;
+ }
+
+ public void setQueryId(String queryId) {
+ this.queryId = queryId;
+ }
+
+ public String getQueryId() {
+ return queryId;
+ }
+
+ public void setStartTime(long startTime) {
+ this.startTime = startTime;
+ }
+
+ public long getStartTime() {
+ return startTime;
+ }
+
+ public void setEndTime(long endTime) {
+ this.endTime = endTime;
+ }
+
+ public long getEndTime() {
+ return endTime;
+ }
+
+ public void setLatency(long latency) {
+ this.latency = latency;
+ }
+
+ public long getLatency() {
+ return latency;
+ }
+
+ public void setState(QueryMemState state) {
+ this.state = state;
+ }
+
+ public QueryMemState getState() {
+ return state;
+ }
+
+ public void setDatabase(String database) {
+ this.database = database;
+ }
+
+ public String getDatabase() {
+ return database;
+ }
+
+ public void setSql(String sql) {
+ this.sql = sql;
+ }
+
+ public String getSql() {
+ return sql;
+ }
+}
diff --git a/fe/src/main/java/org/apache/doris/qe/QueryDetailQueue.java
b/fe/src/main/java/org/apache/doris/qe/QueryDetailQueue.java
new file mode 100644
index 0000000..97f26d5
--- /dev/null
+++ b/fe/src/main/java/org/apache/doris/qe/QueryDetailQueue.java
@@ -0,0 +1,69 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.qe;
+
+import com.google.common.collect.Lists;
+import com.google.common.collect.Maps;
+import java.util.Iterator;
+import java.util.LinkedList;
+import java.util.List;
+import java.util.Map;
+import org.apache.doris.qe.QueryDetail;
+
+// Queue of QueryDetail.
+// It's used to collect queries for monitor.
+// The default copacity is 10000.
+public class QueryDetailQueue {
+ private static Map<String, QueryDetail> runningQueries = Maps.newHashMap();
+ private static LinkedList<QueryDetail> totalQueries = new
LinkedList<QueryDetail>();
+ private static int queryCapacity = 10000;
+
+ public static synchronized void addOrUpdateQueryDetail(QueryDetail
queryDetail) {
+ if (runningQueries.get(queryDetail.getQueryId()) == null) {
+ if (queryDetail.getState() == QueryDetail.QueryMemState.RUNNING) {
+ runningQueries.put(queryDetail.getQueryId(), queryDetail);
+ totalQueries.add(queryDetail);
+ } else {
+ totalQueries.add(queryDetail);
+ }
+ } else {
+ if (queryDetail.getState() != QueryDetail.QueryMemState.RUNNING) {
+ QueryDetail qDetail =
runningQueries.remove(queryDetail.getQueryId());
+ qDetail.setLatency(queryDetail.getLatency());
+ qDetail.setState(queryDetail.getState());
+ }
+ }
+ if (totalQueries.size() > queryCapacity) {
+ QueryDetail qDetail = totalQueries.remove();
+ runningQueries.remove(qDetail.getQueryId());
+ }
+ }
+
+ public static synchronized List<QueryDetail> getQueryDetails(long
eventTime) {
+ List<QueryDetail> results = Lists.newArrayList();
+ Iterator<QueryDetail> it = totalQueries.iterator();
+ while(it.hasNext()) {
+ QueryDetail queryDetail = it.next();
+ if (queryDetail.getEventTime() > eventTime) {
+ results.add(queryDetail);
+ }
+ }
+ return results;
+ }
+
+};
diff --git a/fe/src/main/java/org/apache/doris/qe/StmtExecutor.java
b/fe/src/main/java/org/apache/doris/qe/StmtExecutor.java
index ec4d529..876f2f6 100644
--- a/fe/src/main/java/org/apache/doris/qe/StmtExecutor.java
+++ b/fe/src/main/java/org/apache/doris/qe/StmtExecutor.java
@@ -64,6 +64,8 @@ import org.apache.doris.mysql.MysqlSerializer;
import org.apache.doris.mysql.privilege.PrivPredicate;
import org.apache.doris.planner.Planner;
import org.apache.doris.proto.PQueryStatistics;
+import org.apache.doris.qe.QueryDetail;
+import org.apache.doris.qe.QueryDetailQueue;
import org.apache.doris.qe.QueryState.MysqlStateType;
import org.apache.doris.rewrite.ExprRewriter;
import org.apache.doris.rpc.RpcException;
@@ -223,6 +225,10 @@ public class StmtExecutor {
long beginTimeInNanoSecond = TimeUtils.getStartTime();
context.setStmtId(STMT_ID_GENERATOR.incrementAndGet());
+
+ // set query id
+ UUID uuid = UUID.randomUUID();
+ context.setQueryId(new TUniqueId(uuid.getMostSignificantBits(),
uuid.getLeastSignificantBits()));
try {
// analyze this query
analyze(context.getSessionVariable().toThrift());
@@ -340,7 +346,6 @@ public class StmtExecutor {
profile.computeTimeInChildProfile();
StringBuilder builder = new StringBuilder();
profile.prettyPrint(builder, "");
- System.out.println(builder.toString());
ProfileManager.getInstance().pushProfile(profile);
}
@@ -559,9 +564,14 @@ public class StmtExecutor {
context.getMysqlChannel().reset();
QueryStmt queryStmt = (QueryStmt) parsedStmt;
- // assign query id before explain query return
- UUID uuid = UUID.randomUUID();
- context.setQueryId(new TUniqueId(uuid.getMostSignificantBits(),
uuid.getLeastSignificantBits()));
+ QueryDetail queryDetail = new QueryDetail(context.getStartTime(),
+
DebugUtil.printId(context.queryId()),
+ context.getStartTime(), -1,
-1,
+
QueryDetail.QueryMemState.RUNNING,
+ context.getDatabase(),
+ originStmt.originStmt);
+ context.setQueryDetail(queryDetail);
+ QueryDetailQueue.addOrUpdateQueryDetail(queryDetail);
if (queryStmt.isExplain()) {
String explainString =
planner.getExplainString(planner.getFragments(), TExplainLevel.VERBOSE);
diff --git
a/fe/src/test/java/org/apache/doris/common/util/RuntimeProfileTest.java
b/fe/src/test/java/org/apache/doris/common/util/RuntimeProfileTest.java
index b1c10ef..6ca409c 100644
--- a/fe/src/test/java/org/apache/doris/common/util/RuntimeProfileTest.java
+++ b/fe/src/test/java/org/apache/doris/common/util/RuntimeProfileTest.java
@@ -95,7 +95,7 @@ public class RuntimeProfileTest {
StringBuilder builder = new StringBuilder();
profile.prettyPrint(builder, "");
Assert.assertEquals(builder.toString(),
- "profileName:\n key: value4\n key3: value3\n");
+ "profileName:\n - key: value4\n - key3: value3\n");
}
@Test
diff --git a/fe/src/test/java/org/apache/doris/qe/QueryDetailQueueTest.java
b/fe/src/test/java/org/apache/doris/qe/QueryDetailQueueTest.java
new file mode 100644
index 0000000..cd7b707
--- /dev/null
+++ b/fe/src/test/java/org/apache/doris/qe/QueryDetailQueueTest.java
@@ -0,0 +1,73 @@
+// Licensed to the Apache Software Foundation (ASF) under one
+// or more contributor license agreements. See the NOTICE file
+// distributed with this work for additional information
+// regarding copyright ownership. The ASF licenses this file
+// to you under the Apache License, Version 2.0 (the
+// "License"); you may not use this file except in compliance
+// with the License. You may obtain a copy of the License at
+//
+// http://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing,
+// software distributed under the License is distributed on an
+// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+// KIND, either express or implied. See the License for the
+// specific language governing permissions and limitations
+// under the License.
+
+package org.apache.doris.qe;
+
+import com.google.common.collect.Lists;
+import com.google.gson.Gson;
+
+import org.apache.doris.qe.QueryDetail;
+import org.apache.doris.qe.QueryDetailQueue;
+import org.junit.Assert;
+import org.junit.Before;
+import org.junit.Ignore;
+import org.junit.Test;
+
+import java.util.List;
+
+public class QueryDetailQueueTest {
+ @Test
+ public void testQueryDetailQueue() {
+ long eventTime = 1592208814796L;
+ QueryDetail queryDetail = new QueryDetail(eventTime,
"219a2d5443c542d4-8fc938db37c892e3",
+ eventTime, -1, -1,
QueryDetail.QueryMemState.RUNNING,
+ "default_cluster:testDb",
"select * from table1 limit 1");
+ QueryDetailQueue.addOrUpdateQueryDetail(queryDetail);
+
+ List<QueryDetail> queryDetails =
QueryDetailQueue.getQueryDetails(eventTime);
+ Assert.assertTrue(queryDetails.size() == 0);
+
+ queryDetails = QueryDetailQueue.getQueryDetails(eventTime - 1);
+ Assert.assertTrue(queryDetails.size() == 1);
+
+ Gson gson = new Gson();
+ String json_string = gson.toJson(queryDetails);
+ String query_detail_string = "[{\"eventTime\":1592208814796,"
+ +
"\"queryId\":\"219a2d5443c542d4-8fc938db37c892e3\","
+ +
"\"startTime\":1592208814796,\"endTime\":-1,\"latency\":-1,"
+ +
"\"state\":\"RUNNING\",\"database\":\"testDb\","
+ + "\"sql\":\"select * from table1 limit
1\"}]";
+ Assert.assertEquals(json_string, query_detail_string);
+
+ queryDetail.setEventTime(eventTime + 1);
+ queryDetail.setEndTime(eventTime + 1);
+ queryDetail.setLatency(1);
+ queryDetail.setState(QueryDetail.QueryMemState.FINISHED);
+ QueryDetailQueue.addOrUpdateQueryDetail(queryDetail);
+
+ queryDetails = QueryDetailQueue.getQueryDetails(eventTime);
+ Assert.assertTrue(queryDetails.size() == 1);
+
+ json_string = gson.toJson(queryDetails);
+ query_detail_string = "[{\"eventTime\":1592208814797,"
+ +
"\"queryId\":\"219a2d5443c542d4-8fc938db37c892e3\","
+ +
"\"startTime\":1592208814796,\"endTime\":1592208814797,"
+ +
"\"latency\":1,\"state\":\"FINISHED\",\"database\":\"testDb\","
+ + "\"sql\":\"select * from table1 limit 1\"}]";
+ Assert.assertEquals(json_string, query_detail_string);
+ }
+}
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]