This is an automated email from the ASF dual-hosted git repository.
dataroaring pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris.git
The following commit(s) were added to refs/heads/master by this push:
new 63a9a886f5 [enhance](S3) add s3 bvar metrics for all s3 operation
(#22105)
63a9a886f5 is described below
commit 63a9a886f551b7b7baee11e73eb70194c7bdb949
Author: AlexYue <[email protected]>
AuthorDate: Sun Jul 30 21:09:17 2023 +0800
[enhance](S3) add s3 bvar metrics for all s3 operation (#22105)
---
be/src/io/fs/s3_file_reader.cpp | 2 ++
be/src/io/fs/s3_file_system.cpp | 11 +++++++++++
be/src/io/fs/s3_file_writer.cpp | 6 ++++++
be/src/util/s3_util.cpp | 13 +++++++++++++
be/src/util/s3_util.h | 16 ++++++++++++++++
5 files changed, 48 insertions(+)
diff --git a/be/src/io/fs/s3_file_reader.cpp b/be/src/io/fs/s3_file_reader.cpp
index 2403b2497e..950f9cff17 100644
--- a/be/src/io/fs/s3_file_reader.cpp
+++ b/be/src/io/fs/s3_file_reader.cpp
@@ -33,6 +33,7 @@
#include "common/compiler_util.h" // IWYU pragma: keep
#include "io/fs/s3_common.h"
#include "util/doris_metrics.h"
+#include "util/s3_util.h"
namespace doris {
namespace io {
@@ -95,6 +96,7 @@ Status S3FileReader::read_at_impl(size_t offset, Slice
result, size_t* bytes_rea
return Status::InternalError("init s3 client error");
}
auto outcome = client->GetObject(request);
+ s3_bvar::s3_get_total << 1;
if (!outcome.IsSuccess()) {
return Status::IOError("failed to read from {}: {}", _path.native(),
outcome.GetError().GetMessage());
diff --git a/be/src/io/fs/s3_file_system.cpp b/be/src/io/fs/s3_file_system.cpp
index ca4fd0bda8..8bde016aae 100644
--- a/be/src/io/fs/s3_file_system.cpp
+++ b/be/src/io/fs/s3_file_system.cpp
@@ -161,6 +161,7 @@ Status S3FileSystem::delete_file_impl(const Path& file) {
request.WithBucket(_s3_conf.bucket).WithKey(key);
auto outcome = client->DeleteObject(request);
+ s3_bvar::s3_delete_total << 1;
if (outcome.IsSuccess() ||
outcome.GetError().GetResponseCode() ==
Aws::Http::HttpResponseCode::NOT_FOUND) {
return Status::OK();
@@ -184,6 +185,7 @@ Status S3FileSystem::delete_directory_impl(const Path& dir)
{
bool is_trucated = false;
do {
auto outcome = client->ListObjectsV2(request);
+ s3_bvar::s3_list_total << 1;
if (!outcome.IsSuccess()) {
return Status::IOError("failed to list objects when delete dir {}:
{}", dir.native(),
error_msg(prefix, outcome));
@@ -199,6 +201,7 @@ Status S3FileSystem::delete_directory_impl(const Path& dir)
{
del.WithObjects(std::move(objects)).SetQuiet(true);
delete_request.SetDelete(std::move(del));
auto delete_outcome = client->DeleteObjects(delete_request);
+ s3_bvar::s3_delete_total << 1;
if (!delete_outcome.IsSuccess()) {
return Status::IOError("failed to delete dir {}: {}",
dir.native(),
error_msg(prefix, delete_outcome));
@@ -243,6 +246,7 @@ Status S3FileSystem::batch_delete_impl(const
std::vector<Path>& remote_files) {
del.WithObjects(std::move(objects)).SetQuiet(true);
delete_request.SetDelete(std::move(del));
auto delete_outcome = client->DeleteObjects(delete_request);
+ s3_bvar::s3_delete_total << 1;
if (UNLIKELY(!delete_outcome.IsSuccess())) {
return Status::IOError(
"failed to delete objects: {}",
@@ -268,6 +272,7 @@ Status S3FileSystem::exists_impl(const Path& path, bool*
res) const {
request.WithBucket(_s3_conf.bucket).WithKey(key);
auto outcome = client->HeadObject(request);
+ s3_bvar::s3_head_total << 1;
if (outcome.IsSuccess()) {
*res = true;
} else if (outcome.GetError().GetResponseCode() ==
Aws::Http::HttpResponseCode::NOT_FOUND) {
@@ -288,6 +293,7 @@ Status S3FileSystem::file_size_impl(const Path& file,
int64_t* file_size) const
request.WithBucket(_s3_conf.bucket).WithKey(key);
auto outcome = client->HeadObject(request);
+ s3_bvar::s3_head_total << 1;
if (outcome.IsSuccess()) {
*file_size = outcome.GetResult().GetContentLength();
} else {
@@ -314,6 +320,7 @@ Status S3FileSystem::list_impl(const Path& dir, bool
only_file, std::vector<File
bool is_trucated = false;
do {
auto outcome = client->ListObjectsV2(request);
+ s3_bvar::s3_list_total << 1;
if (!outcome.IsSuccess()) {
return Status::IOError("failed to list {}: {}", dir.native(),
error_msg(prefix, outcome));
@@ -430,6 +437,7 @@ Status S3FileSystem::direct_upload_impl(const Path&
remote_file, const std::stri
remote_file.native());
}
Aws::S3::Model::PutObjectOutcome response = _client->PutObject(request);
+ s3_bvar::s3_put_total << 1;
if (response.IsSuccess()) {
return Status::OK();
} else {
@@ -450,6 +458,7 @@ Status S3FileSystem::download_impl(const Path& remote_file,
const Path& local_fi
Aws::S3::Model::GetObjectRequest request;
request.WithBucket(_s3_conf.bucket).WithKey(key);
Aws::S3::Model::GetObjectOutcome response = _client->GetObject(request);
+ s3_bvar::s3_get_total << 1;
if (response.IsSuccess()) {
Aws::OFStream local_file_s;
local_file_s.open(local_file, std::ios::out | std::ios::binary);
@@ -473,6 +482,7 @@ Status S3FileSystem::direct_download_impl(const Path&
remote, std::string* conte
GET_KEY(key, remote);
request.WithBucket(_s3_conf.bucket).WithKey(key);
Aws::S3::Model::GetObjectOutcome response = _client->GetObject(request);
+ s3_bvar::s3_get_total << 1;
if (response.IsSuccess()) {
std::stringstream ss;
ss << response.GetResult().GetBody().rdbuf();
@@ -493,6 +503,7 @@ Status S3FileSystem::copy(const Path& src, const Path& dst)
{
.WithKey(dst_key)
.WithBucket(_s3_conf.bucket);
Aws::S3::Model::CopyObjectOutcome response = _client->CopyObject(request);
+ s3_bvar::s3_copy_object_total << 1;
if (response.IsSuccess()) {
return Status::OK();
} else {
diff --git a/be/src/io/fs/s3_file_writer.cpp b/be/src/io/fs/s3_file_writer.cpp
index 4b70aaf344..6b7bd6ad8e 100644
--- a/be/src/io/fs/s3_file_writer.cpp
+++ b/be/src/io/fs/s3_file_writer.cpp
@@ -50,6 +50,7 @@
#include "util/defer_op.h"
#include "util/doris_metrics.h"
#include "util/runtime_profile.h"
+#include "util/s3_util.h"
namespace Aws {
namespace S3 {
@@ -112,6 +113,7 @@ Status S3FileWriter::_create_multi_upload_request() {
create_request.SetContentType("application/octet-stream");
auto outcome = _client->CreateMultipartUpload(create_request);
+ s3_bvar::s3_multi_part_upload_total << 1;
if (outcome.IsSuccess()) {
_upload_id = outcome.GetResult().GetUploadId();
@@ -151,6 +153,7 @@ Status S3FileWriter::abort() {
AbortMultipartUploadRequest request;
request.WithBucket(_bucket).WithKey(_key).WithUploadId(_upload_id);
auto outcome = _client->AbortMultipartUpload(request);
+ s3_bvar::s3_multi_part_upload_total << 1;
if (outcome.IsSuccess() ||
outcome.GetError().GetErrorType() == Aws::S3::S3Errors::NO_SUCH_UPLOAD
||
outcome.GetError().GetResponseCode() ==
Aws::Http::HttpResponseCode::NOT_FOUND) {
@@ -267,6 +270,7 @@ void S3FileWriter::_upload_one_part(int64_t part_num,
S3FileBuffer& buf) {
upload_request.SetContentType("application/octet-stream");
auto upload_part_callable = _client->UploadPartCallable(upload_request);
+ s3_bvar::s3_multi_part_upload_total << 1;
UploadPartOutcome upload_part_outcome = upload_part_callable.get();
if (!upload_part_outcome.IsSuccess()) {
@@ -317,6 +321,7 @@ Status S3FileWriter::_complete() {
complete_request.WithMultipartUpload(completed_upload);
auto compute_outcome = _client->CompleteMultipartUpload(complete_request);
+ s3_bvar::s3_multi_part_upload_total << 1;
if (!compute_outcome.IsSuccess()) {
auto s = Status::IOError(
@@ -356,6 +361,7 @@ void S3FileWriter::_put_object(S3FileBuffer& buf) {
request.SetContentLength(buf.get_size());
request.SetContentType("application/octet-stream");
auto response = _client->PutObject(request);
+ s3_bvar::s3_put_total << 1;
if (!response.IsSuccess()) {
_st = Status::InternalError("Error: [{}:{}, responseCode:{}]",
response.GetError().GetExceptionName(),
diff --git a/be/src/util/s3_util.cpp b/be/src/util/s3_util.cpp
index 9c93832209..07a6a72768 100644
--- a/be/src/util/s3_util.cpp
+++ b/be/src/util/s3_util.cpp
@@ -23,6 +23,7 @@
#include <aws/core/utils/logging/LogSystemInterface.h>
#include <aws/core/utils/memory/stl/AWSStringStream.h>
#include <aws/s3/S3Client.h>
+#include <bvar/reducer.h>
#include <util/string_util.h>
#include <atomic>
@@ -37,6 +38,18 @@
namespace doris {
+namespace s3_bvar {
+bvar::Adder<uint64_t> s3_get_total("s3_get", "total_num");
+bvar::Adder<uint64_t> s3_put_total("s3_put", "total_num");
+bvar::Adder<uint64_t> s3_delete_total("s3_delete", "total_num");
+bvar::Adder<uint64_t> s3_head_total("s3_head", "total_num");
+bvar::Adder<uint64_t> s3_multi_part_upload_total("s3_multi_part_upload",
"total_num");
+bvar::Adder<uint64_t> s3_list_total("s3_list", "total_num");
+bvar::Adder<uint64_t> s3_list_object_versions_total("s3_list_object_versions",
"total_num");
+bvar::Adder<uint64_t> s3_get_bucket_version_total("s3_get_bucket_version",
"total_num");
+bvar::Adder<uint64_t> s3_copy_object_total("s3_copy_object", "total_num");
+}; // namespace s3_bvar
+
class DorisAWSLogger final : public Aws::Utils::Logging::LogSystemInterface {
public:
DorisAWSLogger() : _log_level(Aws::Utils::Logging::LogLevel::Info) {}
diff --git a/be/src/util/s3_util.h b/be/src/util/s3_util.h
index 9611026ecc..1f00a82aa9 100644
--- a/be/src/util/s3_util.h
+++ b/be/src/util/s3_util.h
@@ -36,9 +36,25 @@ namespace S3 {
class S3Client;
} // namespace S3
} // namespace Aws
+namespace bvar {
+template <typename T>
+class Adder;
+}
namespace doris {
+namespace s3_bvar {
+extern bvar::Adder<uint64_t> s3_get_total;
+extern bvar::Adder<uint64_t> s3_put_total;
+extern bvar::Adder<uint64_t> s3_delete_total;
+extern bvar::Adder<uint64_t> s3_head_total;
+extern bvar::Adder<uint64_t> s3_multi_part_upload_total;
+extern bvar::Adder<uint64_t> s3_list_total;
+extern bvar::Adder<uint64_t> s3_list_object_versions_total;
+extern bvar::Adder<uint64_t> s3_get_bucket_version_total;
+extern bvar::Adder<uint64_t> s3_copy_object_total;
+}; // namespace s3_bvar
+
class S3URI;
const static std::string S3_AK = "AWS_ACCESS_KEY";
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]