This is an automated email from the ASF dual-hosted git repository.
gehafearless pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/incubator-pegasus.git
The following commit(s) were added to refs/heads/master by this push:
new 18ddcec14 refactor(fmt): Use operator<< instead of explict to_string()
(#1874)
18ddcec14 is described below
commit 18ddcec14aaa4c71d8ecbfd135131cf95ee13715
Author: Yingchun Lai <[email protected]>
AuthorDate: Tue Jan 30 16:27:13 2024 +0800
refactor(fmt): Use operator<< instead of explict to_string() (#1874)
There is no functional changes, but only refactor to simplify the code.
---
src/block_service/block_service_manager.cpp | 4 +-
src/block_service/test/hdfs_service_test.cpp | 9 +--
src/client/replication_ddl_client.cpp | 24 +++----
src/failure_detector/failure_detector.cpp | 2 +-
src/http/test/main.cpp | 3 +-
src/meta/backup_engine.cpp | 26 ++++---
src/meta/cluster_balance_policy.cpp | 8 +--
src/meta/duplication/duplication_info.h | 9 +++
src/meta/duplication/meta_duplication_service.cpp | 12 ++--
src/meta/load_balance_policy.cpp | 14 ++--
src/meta/meta_backup_service.cpp | 56 +++++++--------
src/meta/meta_bulk_load_ingestion_context.cpp | 4 +-
src/meta/meta_bulk_load_service.cpp | 79 ++++++++++------------
src/meta/meta_http_service.cpp | 4 +-
src/meta/meta_service.cpp | 4 +-
src/meta/partition_guardian.cpp | 4 +-
src/meta/server_state.cpp | 33 +++++----
src/meta/test/duplication_info_test.cpp | 2 +-
src/meta/test/meta_duplication_service_test.cpp | 3 +-
src/meta/test/meta_test_base.cpp | 8 +--
src/meta/test/misc/misc.cpp | 4 +-
src/redis_protocol/proxy_lib/redis_parser.cpp | 2 +-
src/replica/backup/replica_backup_server.cpp | 2 +-
src/replica/bulk_load/replica_bulk_loader.cpp | 45 ++++++------
src/replica/duplication/duplication_sync_timer.cpp | 4 +-
src/replica/duplication/mutation_batch.cpp | 2 +-
src/replica/duplication/replica_follower.cpp | 11 ++-
src/replica/replica_2pc.cpp | 6 +-
src/replica/replica_check.cpp | 2 +-
src/replica/replica_context.cpp | 2 +-
src/replica/replica_disk_migrator.cpp | 8 +--
src/replica/replica_learn.cpp | 12 ++--
src/replica/replica_restore.cpp | 7 +-
src/replica/replica_stub.cpp | 22 ++----
src/replica/split/replica_split_manager.cpp | 21 +++---
src/replica/storage/simple_kv/test/case.cpp | 29 +++-----
src/replica/storage/simple_kv/test/case.h | 6 ++
src/replica/storage/simple_kv/test/common.cpp | 19 +++---
src/replica/storage/simple_kv/test/common.h | 4 ++
src/replica/test/mutation_log_test.cpp | 4 +-
src/replica/test/replica_disk_migrate_test.cpp | 33 ++++-----
src/runtime/fault_injector.cpp | 4 +-
src/runtime/tracer.cpp | 4 +-
src/security/client_negotiation.cpp | 2 +-
src/security/negotiation_manager.cpp | 5 +-
src/security/server_negotiation.cpp | 4 +-
src/server/hotspot_partition_calculator.cpp | 7 +-
src/server/pegasus_server_impl.cpp | 13 ++--
src/server/pegasus_server_write.cpp | 11 ++-
src/utils/blob.h | 8 +++
50 files changed, 293 insertions(+), 318 deletions(-)
diff --git a/src/block_service/block_service_manager.cpp
b/src/block_service/block_service_manager.cpp
index b771a432b..f77361cac 100644
--- a/src/block_service/block_service_manager.cpp
+++ b/src/block_service/block_service_manager.cpp
@@ -161,7 +161,7 @@ error_code block_service_manager::download_file(const
std::string &remote_dir,
create_block_file_sync(remote_file_name, false /*ignore file meta*/,
fs, &tracker);
error_code err = create_resp.err;
if (err != ERR_OK) {
- LOG_ERROR("create file({}) failed with error({})", remote_file_name,
err.to_string());
+ LOG_ERROR("create file({}) failed with error({})", remote_file_name,
err);
return err;
}
block_file_ptr bf = create_resp.file_handle;
@@ -180,7 +180,7 @@ error_code block_service_manager::download_file(const
std::string &remote_dir,
}
LOG_INFO("download file({}) succeed, file_size = {}, md5 = {}",
- local_file_name.c_str(),
+ local_file_name,
resp.downloaded_size,
resp.file_md5);
download_file_size = resp.downloaded_size;
diff --git a/src/block_service/test/hdfs_service_test.cpp
b/src/block_service/test/hdfs_service_test.cpp
index 638901822..54a1337b3 100644
--- a/src/block_service/test/hdfs_service_test.cpp
+++ b/src/block_service/test/hdfs_service_test.cpp
@@ -26,6 +26,7 @@
#include <memory>
#include <string>
#include <vector>
+#include <fmt/printf.h>
#include "block_service/block_service.h"
#include "block_service/hdfs/hdfs_service.h"
@@ -232,7 +233,7 @@ TEST_P(HDFSClientTest, test_upload_and_download)
ASSERT_TRUE(dsn::ERR_OK == rem_resp.err || dsn::ERR_OBJECT_NOT_FOUND ==
rem_resp.err);
// 2. create file.
- printf("create and upload: %s.\n", kRemoteTestFile.c_str());
+ fmt::printf("create and upload: {}.\n", kRemoteTestFile);
create_file_response cf_resp;
s->create_file(create_file_request{kRemoteTestFile, true},
LPC_TEST_HDFS,
@@ -266,7 +267,7 @@ TEST_P(HDFSClientTest, test_upload_and_download)
// 5. download file.
download_response d_resp;
- printf("test download %s.\n", kRemoteTestFile.c_str());
+ fmt::printf("test download {}.\n", kRemoteTestFile);
s->create_file(create_file_request{kRemoteTestFile, false},
LPC_TEST_HDFS,
[&cf_resp](const create_file_response &resp) { cf_resp =
resp; },
@@ -370,7 +371,7 @@ TEST_P(HDFSClientTest, test_concurrent_upload_download)
p->upload(upload_request{local_file_names[i]},
LPC_TEST_HDFS,
[p, &local_file_names, &files_size, i](const
upload_response &resp) {
- printf("file %s upload finished.\n",
local_file_names[i].c_str());
+ fmt::printf("file {} upload finished.\n",
local_file_names[i]);
ASSERT_EQ(dsn::ERR_OK, resp.err);
ASSERT_EQ(files_size[i], resp.uploaded_size);
ASSERT_EQ(files_size[i], p->get_size());
@@ -406,7 +407,7 @@ TEST_P(HDFSClientTest, test_concurrent_upload_download)
LPC_TEST_HDFS,
[&files_md5sum, &downloaded_file_names, &files_size, i, p](
const download_response &dr) {
- printf("file %s download finished\n",
downloaded_file_names[i].c_str());
+ fmt::printf("file {} download finished\n",
downloaded_file_names[i]);
ASSERT_EQ(dsn::ERR_OK, dr.err);
ASSERT_EQ(files_size[i], dr.downloaded_size);
ASSERT_EQ(files_size[i], p->get_size());
diff --git a/src/client/replication_ddl_client.cpp
b/src/client/replication_ddl_client.cpp
index e08661305..a98dcbf8e 100644
--- a/src/client/replication_ddl_client.cpp
+++ b/src/client/replication_ddl_client.cpp
@@ -114,8 +114,7 @@ dsn::error_code
replication_ddl_client::wait_app_ready(const std::string &app_na
if (query_task->error() != dsn::ERR_OK) {
std::cout << "create app " << app_name
- << " failed: [query] call server error: " <<
query_task->error().to_string()
- << std::endl;
+ << " failed: [query] call server error: " <<
query_task->error() << std::endl;
return query_task->error();
}
@@ -123,8 +122,7 @@ dsn::error_code
replication_ddl_client::wait_app_ready(const std::string &app_na
::dsn::unmarshall(query_task->get_response(), query_resp);
if (query_resp.err != dsn::ERR_OK) {
std::cout << "create app " << app_name
- << " failed: [query] received server error: " <<
query_resp.err.to_string()
- << std::endl;
+ << " failed: [query] received server error: " <<
query_resp.err << std::endl;
return query_resp.err;
}
CHECK_EQ(partition_count, query_resp.partition_count);
@@ -196,15 +194,13 @@ dsn::error_code replication_ddl_client::create_app(const
std::string &app_name,
if (resp_task->error() != dsn::ERR_OK) {
std::cout << "create app " << app_name
- << " failed: [create] call server error: " <<
resp_task->error().to_string()
- << std::endl;
+ << " failed: [create] call server error: " <<
resp_task->error() << std::endl;
return resp_task->error();
}
if (resp.err != dsn::ERR_OK) {
std::cout << "create app " << app_name
- << " failed: [create] received server error: " <<
resp.err.to_string()
- << std::endl;
+ << " failed: [create] received server error: " << resp.err
<< std::endl;
return resp.err;
}
@@ -911,7 +907,7 @@ dsn::error_code replication_ddl_client::do_recovery(const
std::vector<rpc_addres
for (const dsn::rpc_address &node : replica_nodes) {
if (std::find(req->recovery_set.begin(), req->recovery_set.end(),
node) !=
req->recovery_set.end()) {
- out << "duplicate replica node " << node.to_string() << ", just
ingore it" << std::endl;
+ out << "duplicate replica node " << node << ", just ingore it" <<
std::endl;
} else {
req->recovery_set.push_back(node);
}
@@ -929,7 +925,7 @@ dsn::error_code replication_ddl_client::do_recovery(const
std::vector<rpc_addres
out << "Node list:" << std::endl;
out << "=============================" << std::endl;
for (auto &node : req->recovery_set) {
- out << node.to_string() << std::endl;
+ out << node << std::endl;
}
out << "=============================" << std::endl;
@@ -950,7 +946,7 @@ dsn::error_code replication_ddl_client::do_recovery(const
std::vector<rpc_addres
} else {
configuration_recovery_response resp;
dsn::unmarshall(response_task->get_response(), resp);
- out << "Recover result: " << resp.err.to_string() << std::endl;
+ out << "Recover result: " << resp.err << std::endl;
if (!resp.hint_message.empty()) {
out << "=============================" << std::endl;
out << resp.hint_message;
@@ -1099,7 +1095,7 @@ dsn::error_code
replication_ddl_client::disable_backup_policy(const std::string
std::cout << "disable backup policy failed: " << resp.hint_message <<
std::endl;
return resp.err;
} else {
- std::cout << "disable policy result: " << resp.err.to_string() <<
std::endl;
+ std::cout << "disable policy result: " << resp.err << std::endl;
if (!resp.hint_message.empty()) {
std::cout << "=============================" << std::endl;
std::cout << resp.hint_message << std::endl;
@@ -1131,7 +1127,7 @@ dsn::error_code
replication_ddl_client::enable_backup_policy(const std::string &
std::cout << "policy is under backup, please try disable later" <<
std::endl;
return ERR_OK;
} else {
- std::cout << "enable policy result: " << resp.err.to_string() <<
std::endl;
+ std::cout << "enable policy result: " << resp.err << std::endl;
if (!resp.hint_message.empty()) {
std::cout << "=============================" << std::endl;
std::cout << resp.hint_message << std::endl;
@@ -1280,7 +1276,7 @@ replication_ddl_client::update_backup_policy(const
std::string &policy_name,
std::cout << "modify backup policy failed: " << resp.hint_message <<
std::endl;
return resp.err;
} else {
- std::cout << "Modify policy result: " << resp.err.to_string() <<
std::endl;
+ std::cout << "Modify policy result: " << resp.err << std::endl;
if (!resp.hint_message.empty()) {
std::cout << "=============================" << std::endl;
std::cout << resp.hint_message << std::endl;
diff --git a/src/failure_detector/failure_detector.cpp
b/src/failure_detector/failure_detector.cpp
index 9baa237cf..af264d21f 100644
--- a/src/failure_detector/failure_detector.cpp
+++ b/src/failure_detector/failure_detector.cpp
@@ -344,7 +344,7 @@ std::string failure_detector::get_allow_list(const
std::vector<std::string> &arg
for (auto iter = _allow_list.begin(); iter != _allow_list.end(); ++iter) {
if (iter != _allow_list.begin())
oss << ",";
- oss << iter->to_string();
+ oss << *iter;
}
return oss.str();
}
diff --git a/src/http/test/main.cpp b/src/http/test/main.cpp
index a020e9d35..bde70c9d0 100644
--- a/src/http/test/main.cpp
+++ b/src/http/test/main.cpp
@@ -73,8 +73,7 @@ private:
std::string postfix;
if (target_method == dsn::http_method::POST) {
- postfix = " ";
- postfix += req.body.to_string();
+ postfix = fmt::format(" {}", req.body);
}
resp.body =
diff --git a/src/meta/backup_engine.cpp b/src/meta/backup_engine.cpp
index c7d3769ce..88791eb5e 100644
--- a/src/meta/backup_engine.cpp
+++ b/src/meta/backup_engine.cpp
@@ -188,7 +188,7 @@ void backup_engine::backup_app_partition(const gpid &pid)
LOG_WARNING(
"backup_id({}): partition {} doesn't have a primary now, retry to
backup it later.",
_cur_backup.backup_id,
- pid.to_string());
+ pid);
tasking::enqueue(LPC_DEFAULT_CALLBACK,
&_tracker,
[this, pid]() { backup_app_partition(pid); },
@@ -211,8 +211,8 @@ void backup_engine::backup_app_partition(const gpid &pid)
LOG_INFO("backup_id({}): send backup request to partition {}, target_addr
= {}",
_cur_backup.backup_id,
- pid.to_string(),
- partition_primary.to_string());
+ pid,
+ partition_primary);
backup_rpc rpc(std::move(req), RPC_COLD_BACKUP, 10000_ms, 0,
pid.thread_hash());
rpc.call(
partition_primary, &_tracker, [this, rpc, pid,
partition_primary](error_code err) mutable {
@@ -231,8 +231,8 @@ inline void
backup_engine::handle_replica_backup_failed(const backup_response &r
LOG_ERROR("backup_id({}): backup for partition {} failed, response.err:
{}",
_cur_backup.backup_id,
- pid.to_string(),
- response.err.to_string());
+ pid,
+ response.err);
zauto_lock l(_lock);
// if one partition fail, the whole backup plan fail.
_is_backup_failed = true;
@@ -278,8 +278,8 @@ void backup_engine::on_backup_reply(const error_code err,
LOG_ERROR("backup_id({}): backup request to server {} failed, error:
{}, retry to "
"send backup request.",
_cur_backup.backup_id,
- primary.to_string(),
- rep_error.to_string());
+ primary,
+ rep_error);
retry_backup(pid);
return;
};
@@ -287,9 +287,7 @@ void backup_engine::on_backup_reply(const error_code err,
if (response.progress == cold_backup_constant::PROGRESS_FINISHED) {
CHECK_EQ(response.pid, pid);
CHECK_EQ(response.backup_id, _cur_backup.backup_id);
- LOG_INFO("backup_id({}): backup for partition {} completed.",
- _cur_backup.backup_id,
- pid.to_string());
+ LOG_INFO("backup_id({}): backup for partition {} completed.",
_cur_backup.backup_id, pid);
{
zauto_lock l(_lock);
_backup_status[pid.get_partition_index()] =
backup_status::COMPLETED;
@@ -302,8 +300,8 @@ void backup_engine::on_backup_reply(const error_code err,
LOG_INFO("backup_id({}): receive backup response for partition {} from
server {}, now "
"progress {}, retry to send backup request.",
_cur_backup.backup_id,
- pid.to_string(),
- primary.to_string(),
+ pid,
+ primary,
response.progress);
retry_backup(pid);
@@ -320,7 +318,7 @@ void backup_engine::write_backup_info()
LOG_ERROR(
"backup_id({}): write backup info failed, error {}, do not try
again for this error.",
_cur_backup.backup_id,
- err.to_string());
+ err);
zauto_lock l(_lock);
_is_backup_failed = true;
return;
@@ -364,7 +362,7 @@ error_code backup_engine::start()
LOG_ERROR("backup_id({}): backup meta data for app {} failed, error
{}",
_cur_backup.backup_id,
_cur_backup.app_id,
- err.to_string());
+ err);
return err;
}
for (int i = 0; i < _backup_status.size(); ++i) {
diff --git a/src/meta/cluster_balance_policy.cpp
b/src/meta/cluster_balance_policy.cpp
index bc6fe64de..febfff535 100644
--- a/src/meta/cluster_balance_policy.cpp
+++ b/src/meta/cluster_balance_policy.cpp
@@ -370,7 +370,7 @@ bool cluster_balance_policy::pick_up_move(const
cluster_migration_info &cluster_
auto index = rand() % max_load_disk_set.size();
auto max_load_disk = *select_random(max_load_disk_set, index);
LOG_INFO("most load disk({}) on node({}) is picked, has {} partition",
- max_load_disk.node.to_string(),
+ max_load_disk.node,
max_load_disk.disk_tag,
max_load_disk.partitions.size());
for (const auto &node_addr : min_nodes) {
@@ -384,14 +384,14 @@ bool cluster_balance_policy::pick_up_move(const
cluster_migration_info &cluster_
move_info.type = cluster_info.type;
LOG_INFO("partition[{}] will migrate from {} to {}",
picked_pid,
- max_load_disk.node.to_string(),
- node_addr.to_string());
+ max_load_disk.node,
+ node_addr);
return true;
}
}
LOG_INFO("can not find a partition(app_id={}) from random max load
disk(node={}, disk={})",
app_id,
- max_load_disk.node.to_string(),
+ max_load_disk.node,
max_load_disk.disk_tag);
return false;
}
diff --git a/src/meta/duplication/duplication_info.h
b/src/meta/duplication/duplication_info.h
index 99b4115af..279a57ce6 100644
--- a/src/meta/duplication/duplication_info.h
+++ b/src/meta/duplication/duplication_info.h
@@ -20,6 +20,7 @@
#include <fmt/core.h>
#include <algorithm>
#include <cstdint>
+#include <iosfwd>
#include <map>
#include <memory>
#include <string>
@@ -34,6 +35,7 @@
#include "utils/blob.h"
#include "utils/error_code.h"
#include "utils/fmt_logging.h"
+#include "utils/fmt_utils.h"
#include "utils/zlocks.h"
namespace dsn {
@@ -189,6 +191,11 @@ public:
// To json encoded string.
std::string to_string() const;
+ friend std::ostream &operator<<(std::ostream &os, const duplication_info
&di)
+ {
+ return os << di.to_string();
+ }
+
const char *log_prefix() const { return prefix_for_log.c_str(); }
private:
@@ -256,3 +263,5 @@ extern bool json_decode(const dsn::json::JsonObject &in,
duplication_fail_mode::
} // namespace replication
} // namespace dsn
+
+USER_DEFINED_STRUCTURE_FORMATTER(::dsn::replication::duplication_info);
diff --git a/src/meta/duplication/meta_duplication_service.cpp
b/src/meta/duplication/meta_duplication_service.cpp
index fd1d034f1..094dda698 100644
--- a/src/meta/duplication/meta_duplication_service.cpp
+++ b/src/meta/duplication/meta_duplication_service.cpp
@@ -214,7 +214,7 @@ void
meta_duplication_service::do_add_duplication(std::shared_ptr<app_state> &ap
{
const auto err = dup->start(rpc.request().is_duplicating_checkpoint);
if (dsn_unlikely(err != ERR_OK)) {
- LOG_ERROR("start dup[{}({})] failed: err = {}", app->app_name,
dup->id, err.to_string());
+ LOG_ERROR("start dup[{}({})] failed: err = {}", app->app_name,
dup->id, err);
return;
}
blob value = dup->to_json_blob();
@@ -275,7 +275,7 @@ void
meta_duplication_service::duplication_sync(duplication_sync_rpc rpc)
node_state *ns = get_node_state(_state->_nodes, request.node, false);
if (ns == nullptr) {
- LOG_WARNING("node({}) is not found in meta server",
request.node.to_string());
+ LOG_WARNING("node({}) is not found in meta server", request.node);
response.err = ERR_OBJECT_NOT_FOUND;
return;
}
@@ -397,8 +397,8 @@ void
meta_duplication_service::create_follower_app_for_duplication(
dup->follower_cluster_name,
dup->app_name,
duplication_status_to_string(dup->status()),
- create_err.to_string(),
- update_err.to_string());
+ create_err,
+ update_err);
}
});
}
@@ -481,7 +481,7 @@ void
meta_duplication_service::check_follower_app_if_create_completed(
dup->follower_cluster_name,
dup->app_name,
duplication_status_to_string(dup->status()),
- query_err.to_string(),
+ query_err,
update_err);
}
});
@@ -616,7 +616,7 @@ void
meta_duplication_service::do_restore_duplication_progress(
if (!buf2int64(value.to_string_view(), confirmed_decree)) {
LOG_ERROR("[{}] invalid confirmed_decree {} on
partition_idx {}",
dup->log_prefix(),
- value.to_string(),
+ value,
partition_idx);
return; // fail fast
}
diff --git a/src/meta/load_balance_policy.cpp b/src/meta/load_balance_policy.cpp
index 61e846479..59d30dd2e 100644
--- a/src/meta/load_balance_policy.cpp
+++ b/src/meta/load_balance_policy.cpp
@@ -45,7 +45,7 @@ void dump_disk_load(app_id id, const rpc_address &node, bool
only_primary, const
{
std::ostringstream load_string;
load_string << std::endl << "<<<<<<<<<<" << std::endl;
- load_string << "load for " << node.to_string() << ", "
+ load_string << "load for " << node << ", "
<< "app id: " << id;
if (only_primary) {
load_string << ", only for primary";
@@ -68,7 +68,7 @@ bool calc_disk_load(node_mapper &nodes,
{
load.clear();
const node_state *ns = get_node_state(nodes, node, false);
- CHECK_NOTNULL(ns, "can't find node({}) from node_state", node.to_string());
+ CHECK_NOTNULL(ns, "can't find node({}) from node_state", node);
auto add_one_replica_to_disk_load = [&](const gpid &pid) {
LOG_DEBUG("add gpid({}) to node({}) disk load", pid, node);
@@ -107,7 +107,7 @@ get_node_loads(const std::shared_ptr<app_state> &app,
nodes, apps, app->app_id, iter->first, only_primary,
node_loads[iter->first])) {
LOG_WARNING(
"stop the balancer as some replica infos aren't collected,
node({}), app({})",
- iter->first.to_string(),
+ iter->first,
app->get_logname());
return node_loads;
}
@@ -259,7 +259,7 @@ bool
load_balance_policy::move_primary(std::unique_ptr<flow_path> path)
if (!calc_disk_load(
nodes, apps, path->_app->app_id, address_vec[current], true,
*current_load)) {
LOG_WARNING("stop move primary as some replica infos aren't collected,
node({}), app({})",
- address_vec[current].to_string(),
+ address_vec[current],
path->_app->get_logname());
return false;
}
@@ -271,7 +271,7 @@ bool
load_balance_policy::move_primary(std::unique_ptr<flow_path> path)
if (!calc_disk_load(nodes, apps, path->_app->app_id, from, true,
*prev_load)) {
LOG_WARNING(
"stop move primary as some replica infos aren't collected,
node({}), app({})",
- from.to_string(),
+ from,
path->_app->get_logname());
return false;
}
@@ -730,9 +730,7 @@ gpid
copy_replica_operation::select_partition(migration_list *result)
int id_max = *_ordered_address_ids.rbegin();
const node_state &ns = _nodes.find(_address_vec[id_max])->second;
- CHECK(partitions != nullptr && !partitions->empty(),
- "max load({}) shouldn't empty",
- ns.addr().to_string());
+ CHECK(partitions != nullptr && !partitions->empty(), "max load({})
shouldn't empty", ns.addr());
return select_max_load_gpid(partitions, result);
}
diff --git a/src/meta/meta_backup_service.cpp b/src/meta/meta_backup_service.cpp
index 1f2879140..2ed5425c5 100644
--- a/src/meta/meta_backup_service.cpp
+++ b/src/meta/meta_backup_service.cpp
@@ -194,7 +194,7 @@ void policy_context::start_backup_app_meta_unlocked(int32_t
app_id)
_is_backup_failed = true;
LOG_ERROR("write {} failed, err = {}, don't try again when got
this error.",
remote_file->file_name(),
- resp.err.to_string());
+ resp.err);
return;
} else {
LOG_WARNING("write {} failed, reason({}), try it later",
@@ -310,7 +310,7 @@ void
policy_context::write_backup_app_finish_flag_unlocked(int32_t app_id,
_is_backup_failed = true;
LOG_ERROR("write {} failed, err = {}, don't try again when got
this error.",
remote_file->file_name(),
- resp.err.to_string());
+ resp.err);
return;
} else {
LOG_WARNING("write {} failed, reason({}), try it later",
@@ -417,7 +417,7 @@ void policy_context::write_backup_info_unlocked(const
backup_info &b_info,
_is_backup_failed = true;
LOG_ERROR("write {} failed, err = {}, don't try again when got
this error.",
remote_file->file_name(),
- resp.err.to_string());
+ resp.err);
return;
} else {
LOG_WARNING("write {} failed, reason({}), try it later",
@@ -444,8 +444,8 @@ bool
policy_context::update_partition_progress_unlocked(gpid pid,
LOG_WARNING(
"{}: backup of partition {} has been finished, ignore the backup
response from {} ",
_backup_sig,
- pid.to_string(),
- source.to_string());
+ pid,
+ source);
return true;
}
@@ -455,18 +455,17 @@ bool
policy_context::update_partition_progress_unlocked(gpid pid,
_backup_sig,
local_progress,
progress,
- source.to_string(),
- pid.to_string());
+ source,
+ pid);
}
local_progress = progress;
- LOG_DEBUG(
- "{}: update partition {} backup progress to {}.", _backup_sig,
pid.to_string(), progress);
+ LOG_DEBUG("{}: update partition {} backup progress to {}.", _backup_sig,
pid, progress);
if (local_progress == cold_backup_constant::PROGRESS_FINISHED) {
LOG_INFO("{}: finish backup for partition {}, the app has {}
unfinished backup "
"partition now.",
_backup_sig,
- pid.to_string(),
+ pid,
_progress.unfinished_partitions_per_app[pid.get_app_id()]);
// update the progress-chain: partition => app =>
current_backup_instance
@@ -509,7 +508,7 @@ void policy_context::start_backup_partition_unlocked(gpid
pid)
if (partition_primary.is_invalid()) {
LOG_WARNING("{}: partition {} doesn't have a primary now, retry to
backup it later",
_backup_sig,
- pid.to_string());
+ pid);
tasking::enqueue(LPC_DEFAULT_CALLBACK,
&_tracker,
[this, pid]() {
@@ -537,8 +536,8 @@ void policy_context::start_backup_partition_unlocked(gpid
pid)
});
LOG_INFO("{}: send backup command to partition {}, target_addr = {}",
_backup_sig,
- pid.to_string(),
- partition_primary.to_string());
+ pid,
+ partition_primary);
_backup_service->get_meta_service()->send_request(request,
partition_primary, rpc_callback);
}
@@ -547,10 +546,8 @@ void policy_context::on_backup_reply(error_code err,
gpid pid,
const rpc_address &primary)
{
- LOG_INFO("{}: receive backup response for partition {} from server {}.",
- _backup_sig,
- pid.to_string(),
- primary.to_string());
+ LOG_INFO(
+ "{}: receive backup response for partition {} from server {}.",
_backup_sig, pid, primary);
if (err == dsn::ERR_OK && response.err == dsn::ERR_OK) {
CHECK_EQ_MSG(response.policy_name,
_policy.policy_name,
@@ -574,8 +571,8 @@ void policy_context::on_backup_reply(error_code err,
LOG_WARNING("{}: got a backup response of partition {} from server
{}, whose backup id "
"{} is smaller than current backup id {}, maybe it is
a stale message",
_backup_sig,
- pid.to_string(),
- primary.to_string(),
+ pid,
+ primary,
response.backup_id,
_cur_backup.backup_id);
} else {
@@ -592,18 +589,18 @@ void policy_context::on_backup_reply(error_code err,
LOG_ERROR("{}: backup got error {} for partition {} from {}, don't try
again when got "
"this error.",
_backup_sig.c_str(),
- response.err.to_string(),
- pid.to_string(),
- primary.to_string());
+ response.err,
+ pid,
+ primary);
return;
} else {
LOG_WARNING(
"{}: backup got error for partition {} from {}, rpc error {},
response error {}",
_backup_sig.c_str(),
- pid.to_string(),
- primary.to_string(),
- err.to_string(),
- response.err.to_string());
+ pid,
+ primary,
+ err,
+ response.err);
}
// retry to backup the partition.
@@ -699,10 +696,7 @@ void
policy_context::sync_backup_to_remote_storage_unlocked(const backup_info &b
0,
_backup_service->backup_option().meta_retry_delay_ms);
} else {
- CHECK(false,
- "{}: we can't handle this right now, error({})",
- _backup_sig,
- err.to_string());
+ CHECK(false, "{}: we can't handle this right now, error({})",
_backup_sig, err);
}
};
@@ -1114,7 +1108,7 @@ void
backup_service::start_create_policy_meta_root(dsn::task_ptr callback)
0,
_opt.meta_retry_delay_ms);
} else {
- CHECK(false, "we can't handle this error({}) right now",
err.to_string());
+ CHECK(false, "we can't handle this error({}) right now", err);
}
});
}
diff --git a/src/meta/meta_bulk_load_ingestion_context.cpp
b/src/meta/meta_bulk_load_ingestion_context.cpp
index 8ab82495a..fbb2c775e 100644
--- a/src/meta/meta_bulk_load_ingestion_context.cpp
+++ b/src/meta/meta_bulk_load_ingestion_context.cpp
@@ -90,7 +90,7 @@ bool ingestion_context::node_context::check_if_add(const
std::string &disk_tag)
auto max_node_ingestion_count = FLAGS_bulk_load_node_max_ingesting_count;
if (node_ingesting_count >= max_node_ingestion_count) {
LOG_WARNING("node[{}] has {} partition executing ingestion, max_count
= {}",
- address.to_string(),
+ address,
node_ingesting_count,
max_node_ingestion_count);
return false;
@@ -99,7 +99,7 @@ bool ingestion_context::node_context::check_if_add(const
std::string &disk_tag)
auto max_disk_ingestion_count =
get_max_disk_ingestion_count(max_node_ingestion_count);
if (disk_ingesting_counts[disk_tag] >= max_disk_ingestion_count) {
LOG_WARNING("node[{}] disk[{}] has {} partition executing ingestion,
max_count = {}",
- address.to_string(),
+ address,
disk_tag,
disk_ingesting_counts[disk_tag],
max_disk_ingestion_count);
diff --git a/src/meta/meta_bulk_load_service.cpp
b/src/meta/meta_bulk_load_service.cpp
index 1f3cc05d4..8648cf989 100644
--- a/src/meta/meta_bulk_load_service.cpp
+++ b/src/meta/meta_bulk_load_service.cpp
@@ -220,7 +220,7 @@ bulk_load_service::check_bulk_load_request_params(const
start_bulk_load_request
LOG_ERROR("failed to read file({}) on remote provider({}), error = {}",
remote_path,
file_provider,
- r_resp.err.to_string());
+ r_resp.err);
hint_msg = "read bulk_load_info failed";
return r_resp.err;
}
@@ -331,7 +331,7 @@ void
bulk_load_service::create_partition_bulk_load_dir(const std::string &app_na
get_partition_bulk_load_path(pid),
std::move(value),
[app_name, pid, partition_count, rpc, pinfo, this]() {
- LOG_DEBUG("app({}) create partition({}) bulk_load_info", app_name,
pid.to_string());
+ LOG_DEBUG("app({}) create partition({}) bulk_load_info", app_name,
pid);
{
zauto_write_lock l(_lock);
_partition_bulk_load_info[pid] = pinfo;
@@ -440,7 +440,7 @@ void bulk_load_service::partition_bulk_load(const
std::string &app_name, const g
LOG_INFO("send bulk load request to node({}), app({}), partition({}),
partition "
"status = {}, remote provider = {}, cluster_name = {},
remote_root_path = {}",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid,
dsn::enum_to_string(req->meta_bulk_load_status),
@@ -468,8 +468,8 @@ void
bulk_load_service::on_partition_bulk_load_reply(error_code err,
"app({}), partition({}) failed to receive bulk load response from
node({}), error = {}",
app_name,
pid,
- primary_addr.to_string(),
- err.to_string());
+ primary_addr,
+ err);
try_rollback_to_downloading(app_name, pid);
try_resend_bulk_load_request(app_name, pid);
return;
@@ -480,8 +480,8 @@ void
bulk_load_service::on_partition_bulk_load_reply(error_code err,
"app({}), partition({}) doesn't exist or has invalid state on
node({}), error = {}",
app_name,
pid,
- primary_addr.to_string(),
- response.err.to_string());
+ primary_addr,
+ response.err);
try_rollback_to_downloading(app_name, pid);
try_resend_bulk_load_request(app_name, pid);
return;
@@ -491,7 +491,7 @@ void
bulk_load_service::on_partition_bulk_load_reply(error_code err,
LOG_WARNING(
"node({}) has enough replicas downloading, wait for next round to
send bulk load "
"request for app({}), partition({})",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid);
try_resend_bulk_load_request(app_name, pid);
@@ -503,8 +503,8 @@ void
bulk_load_service::on_partition_bulk_load_reply(error_code err,
"{}, primary status = {}",
app_name,
pid,
- primary_addr.to_string(),
- response.err.to_string(),
+ primary_addr,
+ response.err,
dsn::enum_to_string(response.primary_bulk_load_status));
handle_bulk_load_failed(pid.get_app_id(), response.err);
try_resend_bulk_load_request(app_name, pid);
@@ -524,7 +524,7 @@ void
bulk_load_service::on_partition_bulk_load_reply(error_code err,
LOG_WARNING(
"receive out-date response from node({}), app({}), partition({}),
request ballot = "
"{}, current ballot= {}",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid,
request.ballot,
@@ -592,7 +592,7 @@ void bulk_load_service::handle_app_downloading(const
bulk_load_response &respons
LOG_WARNING(
"receive bulk load response from node({}) app({}), partition({}),
primary_status({}), "
"but total_download_progress is not set",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid,
dsn::enum_to_string(response.primary_bulk_load_status));
@@ -605,11 +605,11 @@ void bulk_load_service::handle_app_downloading(const
bulk_load_response &respons
!bulk_load_states.__isset.download_status) {
LOG_WARNING("receive bulk load response from node({}) app({}),
partition({}), "
"primary_status({}), but node({}) progress or status
is not set",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid,
dsn::enum_to_string(response.primary_bulk_load_status),
- kv.first.to_string());
+ kv.first);
return;
}
// check partition download status
@@ -618,7 +618,7 @@ void bulk_load_service::handle_app_downloading(const
bulk_load_response &respons
"downloading files, error = {}",
app_name,
pid,
- kv.first.to_string(),
+ kv.first,
bulk_load_states.download_status);
error_code err = ERR_UNKNOWN;
@@ -644,7 +644,7 @@ void bulk_load_service::handle_app_downloading(const
bulk_load_response &respons
int32_t total_progress = response.total_download_progress;
LOG_INFO("receive bulk load response from node({}) app({}) partition({}),
primary_status({}), "
"total_download_progress = {}",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid,
dsn::enum_to_string(response.primary_bulk_load_status),
@@ -673,7 +673,7 @@ void bulk_load_service::handle_app_ingestion(const
bulk_load_response &response,
if (!response.__isset.is_group_ingestion_finished) {
LOG_WARNING("receive bulk load response from node({}) app({})
partition({}), "
"primary_status({}), but is_group_ingestion_finished is
not set",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid,
dsn::enum_to_string(response.primary_bulk_load_status));
@@ -685,19 +685,17 @@ void bulk_load_service::handle_app_ingestion(const
bulk_load_response &response,
if (!bulk_load_states.__isset.ingest_status) {
LOG_WARNING("receive bulk load response from node({}) app({})
partition({}), "
"primary_status({}), but node({}) ingestion_status is
not set",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid,
dsn::enum_to_string(response.primary_bulk_load_status),
- kv.first.to_string());
+ kv.first);
return;
}
if (bulk_load_states.ingest_status == ingestion_status::IS_FAILED) {
- LOG_ERROR("app({}) partition({}) on node({}) ingestion failed",
- app_name,
- pid,
- kv.first.to_string());
+ LOG_ERROR(
+ "app({}) partition({}) on node({}) ingestion failed",
app_name, pid, kv.first);
finish_ingestion(pid);
handle_bulk_load_failed(pid.get_app_id(), ERR_INGESTION_FAILED);
return;
@@ -706,7 +704,7 @@ void bulk_load_service::handle_app_ingestion(const
bulk_load_response &response,
LOG_INFO("receive bulk load response from node({}) app({}) partition({}),
primary_status({}), "
"is_group_ingestion_finished = {}",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid,
dsn::enum_to_string(response.primary_bulk_load_status),
@@ -733,7 +731,7 @@ void bulk_load_service::handle_bulk_load_finish(const
bulk_load_response &respon
if (!response.__isset.is_group_bulk_load_context_cleaned_up) {
LOG_WARNING("receive bulk load response from node({}) app({})
partition({}), "
"primary_status({}), but
is_group_bulk_load_context_cleaned_up is not set",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid,
dsn::enum_to_string(response.primary_bulk_load_status));
@@ -744,11 +742,11 @@ void bulk_load_service::handle_bulk_load_finish(const
bulk_load_response &respon
if (!kv.second.__isset.is_cleaned_up) {
LOG_WARNING("receive bulk load response from node({}) app({}),
partition({}), "
"primary_status({}), but node({}) is_cleaned_up is not
set",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid,
dsn::enum_to_string(response.primary_bulk_load_status),
- kv.first.to_string());
+ kv.first);
return;
}
}
@@ -759,7 +757,7 @@ void bulk_load_service::handle_bulk_load_finish(const
bulk_load_response &respon
LOG_WARNING(
"receive bulk load response from node({}) app({})
partition({}), current partition "
"has already been cleaned up",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid);
return;
@@ -770,7 +768,7 @@ void bulk_load_service::handle_bulk_load_finish(const
bulk_load_response &respon
bool group_cleaned_up = response.is_group_bulk_load_context_cleaned_up;
LOG_INFO("receive bulk load response from node({}) app({}) partition({}),
primary status = {}, "
"is_group_bulk_load_context_cleaned_up = {}",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid,
dsn::enum_to_string(response.primary_bulk_load_status),
@@ -814,7 +812,7 @@ void bulk_load_service::handle_app_pausing(const
bulk_load_response &response,
if (!response.__isset.is_group_bulk_load_paused) {
LOG_WARNING("receive bulk load response from node({}) app({})
partition({}), "
"primary_status({}), but is_group_bulk_load_paused is not
set",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid,
dsn::enum_to_string(response.primary_bulk_load_status));
@@ -825,11 +823,11 @@ void bulk_load_service::handle_app_pausing(const
bulk_load_response &response,
if (!kv.second.__isset.is_paused) {
LOG_WARNING("receive bulk load response from node({}) app({}),
partition({}), "
"primary_status({}), but node({}) is_paused is not
set",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid,
dsn::enum_to_string(response.primary_bulk_load_status),
- kv.first.to_string());
+ kv.first);
return;
}
}
@@ -837,7 +835,7 @@ void bulk_load_service::handle_app_pausing(const
bulk_load_response &response,
bool is_group_paused = response.is_group_bulk_load_paused;
LOG_INFO("receive bulk load response from node({}) app({}) partition({}),
primary status = {}, "
"is_group_bulk_load_paused = {}",
- primary_addr.to_string(),
+ primary_addr,
app_name,
pid,
dsn::enum_to_string(response.primary_bulk_load_status),
@@ -1303,10 +1301,7 @@ void bulk_load_service::send_ingestion_request(const
std::string &app_name,
on_partition_ingestion_reply(err, std::move(resp), app_name, pid,
primary_addr);
});
_meta_svc->send_request(msg, primary_addr, rpc_callback);
- LOG_INFO("send ingest_request to node({}), app({}) partition({})",
- primary_addr.to_string(),
- app_name,
- pid);
+ LOG_INFO("send ingest_request to node({}), app({}) partition({})",
primary_addr, app_name, pid);
}
// ThreadPool: THREAD_POOL_DEFAULT
@@ -1326,7 +1321,7 @@ void
bulk_load_service::on_partition_ingestion_reply(error_code err,
"repeated request",
app_name,
pid,
- primary_addr.to_string());
+ primary_addr);
return;
}
@@ -1335,7 +1330,7 @@ void
bulk_load_service::on_partition_ingestion_reply(error_code err,
LOG_ERROR("app({}) partition({}) on node({}) ingestion files failed,
error = {}",
app_name,
pid,
- primary_addr.to_string(),
+ primary_addr,
err);
tasking::enqueue(
LPC_META_STATE_NORMAL,
@@ -1350,7 +1345,7 @@ void
bulk_load_service::on_partition_ingestion_reply(error_code err,
"{}, retry it later",
app_name,
pid,
- primary_addr.to_string(),
+ primary_addr,
resp.rocksdb_error);
tasking::enqueue(LPC_BULK_LOAD_INGESTION,
_meta_svc->tracker(),
@@ -1368,7 +1363,7 @@ void
bulk_load_service::on_partition_ingestion_reply(error_code err,
"error = {}",
app_name,
pid,
- primary_addr.to_string(),
+ primary_addr,
resp.err,
resp.rocksdb_error);
@@ -1384,7 +1379,7 @@ void
bulk_load_service::on_partition_ingestion_reply(error_code err,
LOG_INFO("app({}) partition({}) receive ingestion response from node({})
succeed",
app_name,
pid,
- primary_addr.to_string());
+ primary_addr);
}
// ThreadPool: THREAD_POOL_META_STATE
diff --git a/src/meta/meta_http_service.cpp b/src/meta/meta_http_service.cpp
index 4fdde1aef..19f3e5345 100644
--- a/src/meta/meta_http_service.cpp
+++ b/src/meta/meta_http_service.cpp
@@ -102,7 +102,7 @@ void meta_http_service::get_app_handler(const http_request
&req, http_response &
return;
}
if (response.err != dsn::ERR_OK) {
- resp.body = response.err.to_string();
+ resp.body = response.err;
resp.status_code = http_status_code::kInternalServerError;
return;
}
@@ -226,7 +226,7 @@ void meta_http_service::list_app_handler(const http_request
&req, http_response
_service->_state->list_apps(request, response);
if (response.err != dsn::ERR_OK) {
- resp.body = response.err.to_string();
+ resp.body = response.err;
resp.status_code = http_status_code::kInternalServerError;
return;
}
diff --git a/src/meta/meta_service.cpp b/src/meta/meta_service.cpp
index 0acd4890a..8fc64c1ba 100644
--- a/src/meta/meta_service.cpp
+++ b/src/meta/meta_service.cpp
@@ -729,7 +729,7 @@ void
meta_service::on_query_cluster_info(configuration_cluster_info_rpc rpc)
for (size_t i = 0; i < _opts.meta_servers.size(); ++i) {
if (i != 0)
oss << ",";
- oss << _opts.meta_servers[i].to_string();
+ oss << _opts.meta_servers[i];
}
response.values.push_back(oss.str());
@@ -774,7 +774,7 @@ void
meta_service::on_query_configuration_by_index(configuration_query_by_index_
_state->query_configuration_by_index(rpc.request(), response);
if (ERR_OK == response.err) {
LOG_INFO("client {} queried an available app {} with appid {}",
- rpc.dsn_request()->header->from_address.to_string(),
+ rpc.dsn_request()->header->from_address,
rpc.request().app_name,
response.app_id);
}
diff --git a/src/meta/partition_guardian.cpp b/src/meta/partition_guardian.cpp
index 7e97f1874..0c161067d 100644
--- a/src/meta/partition_guardian.cpp
+++ b/src/meta/partition_guardian.cpp
@@ -527,7 +527,7 @@ pc_status
partition_guardian::on_missing_secondary(meta_view &view, const dsn::g
for (int i = 0; i < cc.dropped.size(); ++i) {
if (i != 0)
oss << ",";
- oss << cc.dropped[i].node.to_string();
+ oss << cc.dropped[i].node;
}
LOG_INFO(
"gpid({}): try to choose node in dropped list, dropped_list({}),
prefered_dropped({})",
@@ -730,7 +730,7 @@ partition_guardian::ctrl_assign_secondary_black_list(const
std::vector<std::stri
++iter) {
if (iter != _assign_secondary_black_list.begin())
oss << ",";
- oss << iter->to_string();
+ oss << *iter;
}
return oss.str();
}
diff --git a/src/meta/server_state.cpp b/src/meta/server_state.cpp
index 89241f399..9cb3e5ccc 100644
--- a/src/meta/server_state.cpp
+++ b/src/meta/server_state.cpp
@@ -878,7 +878,7 @@ void
server_state::on_config_sync(configuration_query_by_node_rpc rpc)
LOG_WARNING(
"notify node({}) to gc replica({}) because it is
useless partition "
"which is caused by cancel split",
- request.node.to_string(),
+ request.node,
rep.pid);
} else {
// app is not recognized or partition is not recognized
@@ -944,7 +944,7 @@ void
server_state::on_config_sync(configuration_query_by_node_rpc rpc)
}
LOG_INFO("send config sync response to {}, err({}), partitions_count({}), "
"gc_replicas_count({})",
- request.node.to_string(),
+ request.node,
response.err,
response.partitions.size(),
response.gc_replicas.size());
@@ -2309,12 +2309,11 @@ server_state::sync_apps_from_replica_nodes(const
std::vector<dsn::rpc_address> &
query_replica_errors[i] = err;
std::ostringstream oss;
if (skip_bad_nodes) {
- oss << "WARNING: collect app and replica info from node("
- << replica_nodes[i].to_string() << ") failed with err(" <<
err.to_string()
- << "), skip the bad node" << std::endl;
+ oss << "WARNING: collect app and replica info from node(" <<
replica_nodes[i]
+ << ") failed with err(" << err << "), skip the bad node"
<< std::endl;
} else {
- oss << "ERROR: collect app and replica info from node("
- << replica_nodes[i].to_string() << ") failed with err(" <<
err.to_string()
+ oss << "ERROR: collect app and replica info from node(" <<
replica_nodes[i]
+ << ") failed with err(" << err
<< "), you can skip it by set skip_bad_nodes option" <<
std::endl;
}
hint_message += oss.str();
@@ -3194,7 +3193,7 @@ void
server_state::get_max_replica_count(configuration_get_max_replica_count_rpc
response.max_replica_count = 0;
LOG_WARNING("failed to get max_replica_count: app_name={},
error_code={}, hint_message={}",
app_name,
- response.err.to_string(),
+ response.err,
response.hint_message);
return;
}
@@ -3205,7 +3204,7 @@ void
server_state::get_max_replica_count(configuration_get_max_replica_count_rpc
"hint_message={}",
app_name,
app->app_id,
- response.err.to_string(),
+ response.err,
response.hint_message);
return;
}
@@ -3239,7 +3238,7 @@ void
server_state::set_max_replica_count(configuration_set_max_replica_count_rpc
LOG_WARNING(
"failed to set max_replica_count: app_name={}, error_code={},
hint_message={}",
app_name,
- response.err.to_string(),
+ response.err,
response.hint_message);
return;
}
@@ -3252,7 +3251,7 @@ void
server_state::set_max_replica_count(configuration_set_max_replica_count_rpc
"hint_message={}",
app_name,
app_id,
- response.err.to_string(),
+ response.err,
response.hint_message);
return;
}
@@ -3266,7 +3265,7 @@ void
server_state::set_max_replica_count(configuration_set_max_replica_count_rpc
"hint_message={}",
app_name,
app_id,
- response.err.to_string(),
+ response.err,
response.hint_message);
return;
}
@@ -3281,7 +3280,7 @@ void
server_state::set_max_replica_count(configuration_set_max_replica_count_rpc
"failed to set max_replica_count: app_name={}, app_id={},
error_code={}, message={}",
app_name,
app_id,
- response.err.to_string(),
+ response.err,
response.hint_message);
return;
}
@@ -3292,7 +3291,7 @@ void
server_state::set_max_replica_count(configuration_set_max_replica_count_rpc
"failed to set max_replica_count: app_name={}, app_id={},
error_code={}, message={}",
app_name,
app_id,
- response.err.to_string(),
+ response.err,
response.hint_message);
return;
}
@@ -3335,7 +3334,7 @@ void
server_state::set_max_replica_count_env_updating(std::shared_ptr<app_state>
"hint_message={}",
app->app_name,
app->app_id,
- response.err.to_string(),
+ response.err,
response.hint_message);
return;
}
@@ -3435,7 +3434,7 @@ void
server_state::do_update_max_replica_count(std::shared_ptr<app_state> &app,
"An error that can't be handled occurs while updating
partition-level"
"max_replica_count: error_code={}, app_name={}, app_id={}, "
"partition_index={}, partition_count={},
new_max_replica_count={}",
- ec.to_string(),
+ ec,
app_name,
app->app_id,
i,
@@ -3684,7 +3683,7 @@ void
server_state::on_update_partition_max_replica_count_on_remote_reply(
LOG_INFO("reply for updating partition-level max_replica_count on remote
storage: "
"error_code={}, app_name={}, app_id={}, partition_id={},
new_max_replica_count={}, "
"new_ballot={}",
- ec.to_string(),
+ ec,
app->app_name,
app->app_id,
partition_index,
diff --git a/src/meta/test/duplication_info_test.cpp
b/src/meta/test/duplication_info_test.cpp
index 6ac67d090..9383958e4 100644
--- a/src/meta/test/duplication_info_test.cpp
+++ b/src/meta/test/duplication_info_test.cpp
@@ -170,7 +170,7 @@ public:
auto dup_sptr = duplication_info::decode_from_blob(
1, 1, "temp", 4, "/meta_test/101/duplication/1", json);
- ASSERT_TRUE(dup_sptr->equals_to(dup)) << dup_sptr->to_string() << " "
<< dup.to_string();
+ ASSERT_TRUE(dup_sptr->equals_to(dup)) << *dup_sptr << " " << dup;
blob new_json =
blob::create_from_bytes(boost::replace_all_copy(json.to_string(),
"DS_APP", "DS_FOO"));
diff --git a/src/meta/test/meta_duplication_service_test.cpp
b/src/meta/test/meta_duplication_service_test.cpp
index 2d5d389ca..644c8c17c 100644
--- a/src/meta/test/meta_duplication_service_test.cpp
+++ b/src/meta/test/meta_duplication_service_test.cpp
@@ -254,8 +254,7 @@ public:
auto &dup = kv.second;
ASSERT_TRUE(after.find(dupid) != after.end());
- ASSERT_TRUE(dup->equals_to(*after[dupid])) << dup->to_string()
<< std::endl
- <<
after[dupid]->to_string();
+ ASSERT_TRUE(dup->equals_to(*after[dupid])) << *dup <<
std::endl << *after[dupid];
}
}
}
diff --git a/src/meta/test/meta_test_base.cpp b/src/meta/test/meta_test_base.cpp
index 9b0a86545..720efcfee 100644
--- a/src/meta/test/meta_test_base.cpp
+++ b/src/meta/test/meta_test_base.cpp
@@ -153,7 +153,7 @@ std::vector<rpc_address>
meta_test_base::ensure_enough_alive_nodes(int min_node_
LOG_DEBUG("already exists {} alive nodes: ", nodes.size());
for (const auto &node : nodes) {
- LOG_DEBUG(" {}", node.to_string());
+ LOG_DEBUG(" {}", node);
}
// ensure that _ms->_alive_set is identical with _ss->_nodes
@@ -177,7 +177,7 @@ std::vector<rpc_address>
meta_test_base::ensure_enough_alive_nodes(int min_node_
LOG_DEBUG("created {} alive nodes: ", nodes.size());
for (const auto &node : nodes) {
- LOG_DEBUG(" {}", node.to_string());
+ LOG_DEBUG(" {}", node);
}
return nodes;
}
@@ -199,7 +199,7 @@ void meta_test_base::create_app(const std::string &name,
uint32_t partition_coun
auto result = fake_create_app(_ss.get(), req);
fake_wait_rpc(result, resp);
- ASSERT_EQ(resp.err, ERR_OK) << resp.err.to_string() << " " << name;
+ ASSERT_EQ(resp.err, ERR_OK) << resp.err << " " << name;
// wait for the table to create
ASSERT_TRUE(_ss->spin_wait_staging(30));
@@ -215,7 +215,7 @@ void meta_test_base::drop_app(const std::string &name)
auto result = fake_drop_app(_ss.get(), req);
fake_wait_rpc(result, resp);
- ASSERT_EQ(resp.err, ERR_OK) << resp.err.to_string() << " " << name;
+ ASSERT_EQ(resp.err, ERR_OK) << resp.err << " " << name;
ASSERT_TRUE(_ss->spin_wait_staging(30));
}
diff --git a/src/meta/test/misc/misc.cpp b/src/meta/test/misc/misc.cpp
index 25c16d675..500c7aaf1 100644
--- a/src/meta/test/misc/misc.cpp
+++ b/src/meta/test/misc/misc.cpp
@@ -74,9 +74,9 @@ void verbose_apps(const app_mapper &input_apps)
std::cout << apps.first << " " << app->partition_count << std::endl;
for (int i = 0; i < app->partition_count; ++i) {
std::cout << app->partitions[i].secondaries.size() + 1 << " "
- << app->partitions[i].primary.to_string();
+ << app->partitions[i].primary;
for (int j = 0; j < app->partitions[i].secondaries.size(); ++j) {
- std::cout << " " <<
app->partitions[i].secondaries[j].to_string();
+ std::cout << " " << app->partitions[i].secondaries[j];
}
std::cout << std::endl;
}
diff --git a/src/redis_protocol/proxy_lib/redis_parser.cpp
b/src/redis_protocol/proxy_lib/redis_parser.cpp
index e37dafab0..baf9aed8b 100644
--- a/src/redis_protocol/proxy_lib/redis_parser.cpp
+++ b/src/redis_protocol/proxy_lib/redis_parser.cpp
@@ -931,7 +931,7 @@ void redis_parser::counter_internal(message_entry &entry)
LOG_WARNING_PREFIX("command {} seqid({}) with invalid 'increment':
{}",
command,
entry.sequence_id,
- entry.request.sub_requests[2].data.to_string());
+ entry.request.sub_requests[2].data);
simple_error_reply(entry,
fmt::format("wrong type of argument 'increment
'for '{}'", command));
return;
diff --git a/src/replica/backup/replica_backup_server.cpp
b/src/replica/backup/replica_backup_server.cpp
index 23de8972b..fba320e94 100644
--- a/src/replica/backup/replica_backup_server.cpp
+++ b/src/replica/backup/replica_backup_server.cpp
@@ -95,7 +95,7 @@ void replica_backup_server::on_cold_backup(backup_rpc rpc)
void replica_backup_server::on_clear_cold_backup(const backup_clear_request
&request)
{
LOG_INFO("receive clear cold backup request: backup({}.{})",
- request.pid.to_string(),
+ request.pid,
request.policy_name.c_str());
replica_ptr rep = _stub->get_replica(request.pid);
diff --git a/src/replica/bulk_load/replica_bulk_loader.cpp
b/src/replica/bulk_load/replica_bulk_loader.cpp
index b92a98885..ba32e4106 100644
--- a/src/replica/bulk_load/replica_bulk_loader.cpp
+++ b/src/replica/bulk_load/replica_bulk_loader.cpp
@@ -197,7 +197,7 @@ void replica_bulk_loader::broadcast_group_bulk_load(const
bulk_load_request &met
request->meta_bulk_load_status = meta_req.meta_bulk_load_status;
request->remote_root_path = meta_req.remote_root_path;
- LOG_INFO_PREFIX("send group_bulk_load_request to {}",
addr.to_string());
+ LOG_INFO_PREFIX("send group_bulk_load_request to {}", addr);
group_bulk_load_rpc rpc(
std::move(request), RPC_GROUP_BULK_LOAD, 0_ms, 0,
get_gpid().thread_hash());
@@ -242,7 +242,7 @@ void replica_bulk_loader::on_group_bulk_load(const
group_bulk_load_request &requ
LOG_INFO_PREFIX("receive group_bulk_load request, primary address = {},
ballot = {}, "
"meta bulk_load_status = {}, local bulk_load_status = {}",
- request.config.primary.to_string(),
+ request.config.primary,
request.config.ballot,
enum_to_string(request.meta_bulk_load_status),
enum_to_string(_status));
@@ -278,17 +278,16 @@ void
replica_bulk_loader::on_group_bulk_load_reply(error_code err,
_replica->_primary_states.group_bulk_load_pending_replies.erase(req.target_address);
if (err != ERR_OK) {
- LOG_ERROR_PREFIX("failed to receive group_bulk_load_reply from {},
error = {}",
- req.target_address.to_string(),
- err.to_string());
+ LOG_ERROR_PREFIX(
+ "failed to receive group_bulk_load_reply from {}, error = {}",
req.target_address, err);
_replica->_primary_states.reset_node_bulk_load_states(req.target_address);
return;
}
if (resp.err != ERR_OK) {
LOG_ERROR_PREFIX("receive group_bulk_load response from {} failed,
error = {}",
- req.target_address.to_string(),
- resp.err.to_string());
+ req.target_address,
+ resp.err);
_replica->_primary_states.reset_node_bulk_load_states(req.target_address);
return;
}
@@ -296,7 +295,7 @@ void
replica_bulk_loader::on_group_bulk_load_reply(error_code err,
if (req.config.ballot != get_ballot()) {
LOG_ERROR_PREFIX("recevied wrong group_bulk_load response from {},
request ballot = {}, "
"current ballot = {}",
- req.target_address.to_string(),
+ req.target_address,
req.config.ballot,
get_ballot());
_replica->_primary_states.reset_node_bulk_load_states(req.target_address);
@@ -495,8 +494,7 @@ void replica_bulk_loader::download_files(const std::string
&provider_name,
if (err != ERR_OK && err != ERR_PATH_ALREADY_EXIST) {
try_decrease_bulk_load_download_count();
_download_status.store(err);
- LOG_ERROR_PREFIX("download bulk load metadata file failed, error =
{}",
- err.to_string());
+ LOG_ERROR_PREFIX("download bulk load metadata file failed, error =
{}", err);
return;
}
@@ -507,7 +505,7 @@ void replica_bulk_loader::download_files(const std::string
&provider_name,
if (err != ERR_OK) {
try_decrease_bulk_load_download_count();
_download_status.store(err);
- LOG_ERROR_PREFIX("parse bulk load metadata failed, error = {}",
err.to_string());
+ LOG_ERROR_PREFIX("parse bulk load metadata failed, error = {}",
err);
return;
}
}
@@ -574,7 +572,7 @@ void replica_bulk_loader::download_sst_file(const
std::string &remote_dir,
try_decrease_bulk_load_download_count();
_download_status.store(ec);
}
- LOG_ERROR_PREFIX("failed to download file({}), error = {}",
f_meta.name, ec.to_string());
+ LOG_ERROR_PREFIX("failed to download file({}), error = {}",
f_meta.name, ec);
METRIC_VAR_INCREMENT(bulk_load_download_file_failed_count);
return;
}
@@ -924,7 +922,7 @@ void
replica_bulk_loader::report_group_download_progress(/*out*/ bulk_load_respo
}
response.group_bulk_load_state[_replica->_primary_states.membership.primary] =
primary_state;
LOG_INFO_PREFIX("primary = {}, download progress = {}%, status = {}",
- _replica->_primary_states.membership.primary.to_string(),
+ _replica->_primary_states.membership.primary,
primary_state.download_progress,
primary_state.download_status);
@@ -937,7 +935,7 @@ void
replica_bulk_loader::report_group_download_progress(/*out*/ bulk_load_respo
error_code s_status =
secondary_state.__isset.download_status ?
secondary_state.download_status : ERR_OK;
LOG_INFO_PREFIX("secondary = {}, download progress = {}%, status={}",
- target_address.to_string(),
+ target_address,
s_progress,
s_status);
response.group_bulk_load_state[target_address] = secondary_state;
@@ -964,7 +962,7 @@ void
replica_bulk_loader::report_group_ingestion_status(/*out*/ bulk_load_respon
primary_state.__set_ingest_status(_replica->_app->get_ingestion_status());
response.group_bulk_load_state[_replica->_primary_states.membership.primary] =
primary_state;
LOG_INFO_PREFIX("primary = {}, ingestion status = {}",
- _replica->_primary_states.membership.primary.to_string(),
+ _replica->_primary_states.membership.primary,
enum_to_string(primary_state.ingest_status));
bool is_group_ingestion_finish =
@@ -977,9 +975,8 @@ void
replica_bulk_loader::report_group_ingestion_status(/*out*/ bulk_load_respon
ingestion_status::type ingest_status =
secondary_state.__isset.ingest_status
?
secondary_state.ingest_status
:
ingestion_status::IS_INVALID;
- LOG_INFO_PREFIX("secondary = {}, ingestion status={}",
- target_address.to_string(),
- enum_to_string(ingest_status));
+ LOG_INFO_PREFIX(
+ "secondary = {}, ingestion status={}", target_address,
enum_to_string(ingest_status));
response.group_bulk_load_state[target_address] = secondary_state;
is_group_ingestion_finish &= (ingest_status ==
ingestion_status::IS_SUCCEED);
}
@@ -1008,7 +1005,7 @@ void
replica_bulk_loader::report_group_cleaned_up(bulk_load_response &response)
primary_state.__set_is_cleaned_up(is_cleaned_up());
response.group_bulk_load_state[_replica->_primary_states.membership.primary] =
primary_state;
LOG_INFO_PREFIX("primary = {}, bulk load states cleaned_up = {}",
- _replica->_primary_states.membership.primary.to_string(),
+ _replica->_primary_states.membership.primary,
primary_state.is_cleaned_up);
bool group_flag = (primary_state.is_cleaned_up) &&
@@ -1019,9 +1016,8 @@ void
replica_bulk_loader::report_group_cleaned_up(bulk_load_response &response)
_replica->_primary_states.secondary_bulk_load_states[target_address];
bool is_cleaned_up =
secondary_state.__isset.is_cleaned_up ?
secondary_state.is_cleaned_up : false;
- LOG_INFO_PREFIX("secondary = {}, bulk load states cleaned_up = {}",
- target_address.to_string(),
- is_cleaned_up);
+ LOG_INFO_PREFIX(
+ "secondary = {}, bulk load states cleaned_up = {}",
target_address, is_cleaned_up);
response.group_bulk_load_state[target_address] = secondary_state;
group_flag &= is_cleaned_up;
}
@@ -1044,7 +1040,7 @@ void
replica_bulk_loader::report_group_is_paused(bulk_load_response &response)
primary_state.__set_is_paused(_status == bulk_load_status::BLS_PAUSED);
response.group_bulk_load_state[_replica->_primary_states.membership.primary] =
primary_state;
LOG_INFO_PREFIX("primary = {}, bulk_load is_paused = {}",
- _replica->_primary_states.membership.primary.to_string(),
+ _replica->_primary_states.membership.primary,
primary_state.is_paused);
bool group_is_paused =
@@ -1054,8 +1050,7 @@ void
replica_bulk_loader::report_group_is_paused(bulk_load_response &response)
partition_bulk_load_state secondary_state =
_replica->_primary_states.secondary_bulk_load_states[target_address];
bool is_paused = secondary_state.__isset.is_paused ?
secondary_state.is_paused : false;
- LOG_INFO_PREFIX(
- "secondary = {}, bulk_load is_paused = {}",
target_address.to_string(), is_paused);
+ LOG_INFO_PREFIX("secondary = {}, bulk_load is_paused = {}",
target_address, is_paused);
response.group_bulk_load_state[target_address] = secondary_state;
group_is_paused &= is_paused;
}
diff --git a/src/replica/duplication/duplication_sync_timer.cpp
b/src/replica/duplication/duplication_sync_timer.cpp
index 5a33681b3..0682bd431 100644
--- a/src/replica/duplication/duplication_sync_timer.cpp
+++ b/src/replica/duplication/duplication_sync_timer.cpp
@@ -83,7 +83,7 @@ void duplication_sync_timer::run()
duplication_sync_rpc rpc(std::move(req), RPC_CM_DUPLICATION_SYNC, 3_s);
rpc_address meta_server_address(_stub->get_meta_server_address());
- LOG_INFO("duplication_sync to meta({})", meta_server_address.to_string());
+ LOG_INFO("duplication_sync to meta({})", meta_server_address);
zauto_lock l(_lock);
_rpc_task =
@@ -99,7 +99,7 @@ void
duplication_sync_timer::on_duplication_sync_reply(error_code err,
err = resp.err;
}
if (err != ERR_OK) {
- LOG_ERROR("on_duplication_sync_reply: err({})", err.to_string());
+ LOG_ERROR("on_duplication_sync_reply: err({})", err);
} else {
update_duplication_map(resp.dup_map);
}
diff --git a/src/replica/duplication/mutation_batch.cpp
b/src/replica/duplication/mutation_batch.cpp
index 8d226d078..8b7a815fc 100644
--- a/src/replica/duplication/mutation_batch.cpp
+++ b/src/replica/duplication/mutation_batch.cpp
@@ -116,7 +116,7 @@ error_s mutation_batch::add(mutation_ptr mu)
return FMT_ERR(
ERR_INVALID_DATA,
"failed to add mutation [err:{}, logged:{}, decree:{},
committed:{}, start_decree:{}]",
- ec.to_string(),
+ ec,
mu->is_logged(),
mu->get_decree(),
mu->data.header.last_committed_decree,
diff --git a/src/replica/duplication/replica_follower.cpp
b/src/replica/duplication/replica_follower.cpp
index 141493022..10a09c783 100644
--- a/src/replica/duplication/replica_follower.cpp
+++ b/src/replica/duplication/replica_follower.cpp
@@ -137,8 +137,7 @@ error_code
replica_follower::update_master_replica_config(error_code err, query_
{
error_code err_code = err != ERR_OK ? err : resp.err;
if (dsn_unlikely(err_code != ERR_OK)) {
- LOG_ERROR_PREFIX(
- "query master[{}] config failed: {}", master_replica_name(),
err_code.to_string());
+ LOG_ERROR_PREFIX("query master[{}] config failed: {}",
master_replica_name(), err_code);
return err_code;
}
@@ -176,8 +175,8 @@ error_code
replica_follower::update_master_replica_config(error_code err, query_
LOG_INFO_PREFIX(
"query master[{}] config successfully and update local config:
remote={}, gpid={}",
master_replica_name(),
- _master_replica_config.primary.to_string(),
- _master_replica_config.pid.to_string());
+ _master_replica_config.primary,
+ _master_replica_config.pid);
return ERR_OK;
}
@@ -206,7 +205,7 @@ error_code replica_follower::nfs_copy_checkpoint(error_code
err, learn_response
if (dsn_unlikely(err_code != ERR_OK)) {
LOG_ERROR_PREFIX("query master[{}] replica checkpoint info failed, err
= {}",
master_replica_name(),
- err_code.to_string());
+ err_code);
return err_code;
}
@@ -254,7 +253,7 @@ void replica_follower::nfs_copy_remote_files(const
rpc_address &remote_node,
LOG_ERROR_PREFIX("nfs copy master[{}] checkpoint failed:
checkpoint = {}, err = {}",
master_replica_name(),
remote_dir,
- err.to_string());
+ err);
return;
}
LOG_INFO_PREFIX("nfs copy master[{}] checkpoint completed:
checkpoint = {}, size = {}",
diff --git a/src/replica/replica_2pc.cpp b/src/replica/replica_2pc.cpp
index 31be50c7f..0e335d49b 100644
--- a/src/replica/replica_2pc.cpp
+++ b/src/replica/replica_2pc.cpp
@@ -135,7 +135,7 @@ void replica::on_client_write(dsn::message_ex *request,
bool ignore_throttling)
"client from {} write request body size exceed threshold, request
= [{}], "
"request_body_size "
"= {}, FLAGS_max_allowed_write_size = {}, it will be rejected!",
- request->header->from_address.to_string(),
+ request->header->from_address,
request_info,
request->body_size(),
FLAGS_max_allowed_write_size);
@@ -147,8 +147,8 @@ void replica::on_client_write(dsn::message_ex *request,
bool ignore_throttling)
task_spec *spec = task_spec::get(request->rpc_code());
if (dsn_unlikely(nullptr == spec || request->rpc_code() ==
TASK_CODE_INVALID)) {
LOG_ERROR("recv message with unhandled rpc name {} from {}, trace_id =
{}",
- request->rpc_code().to_string(),
- request->header->from_address.to_string(),
+ request->rpc_code(),
+ request->header->from_address,
request->header->trace_id);
response_client_write(request, ERR_HANDLER_NOT_FOUND);
return;
diff --git a/src/replica/replica_check.cpp b/src/replica/replica_check.cpp
index f5a4f5f58..ae12c2bff 100644
--- a/src/replica/replica_check.cpp
+++ b/src/replica/replica_check.cpp
@@ -174,7 +174,7 @@ void replica::on_group_check(const group_check_request
&request,
LOG_INFO_PREFIX("process group check, primary = {}, ballot = {}, status =
{}, "
"last_committed_decree = {}, confirmed_decree = {}",
- request.config.primary.to_string(),
+ request.config.primary,
request.config.ballot,
enum_to_string(request.config.status),
request.last_committed_decree,
diff --git a/src/replica/replica_context.cpp b/src/replica/replica_context.cpp
index 5c882d153..f55746316 100644
--- a/src/replica/replica_context.cpp
+++ b/src/replica/replica_context.cpp
@@ -177,7 +177,7 @@ bool primary_context::secondary_disk_abnormal() const
if (kv.second != disk_status::NORMAL) {
LOG_INFO("partition[{}] secondary[{}] disk space is {}",
membership.pid,
- kv.first.to_string(),
+ kv.first,
enum_to_string(kv.second));
return true;
}
diff --git a/src/replica/replica_disk_migrator.cpp
b/src/replica/replica_disk_migrator.cpp
index 4ad9cb98c..0017c5872 100644
--- a/src/replica/replica_disk_migrator.cpp
+++ b/src/replica/replica_disk_migrator.cpp
@@ -221,7 +221,7 @@ bool
replica_disk_migrator::migrate_replica_checkpoint(const replica_disk_migrat
LOG_ERROR_PREFIX("disk migration(origin={}, target={}) sync_checkpoint
failed({})",
req.origin_disk,
req.target_disk,
- sync_checkpoint_err.to_string());
+ sync_checkpoint_err);
reset_status();
return false;
}
@@ -234,7 +234,7 @@ bool
replica_disk_migrator::migrate_replica_checkpoint(const replica_disk_migrat
req.origin_disk,
req.target_disk,
_target_data_dir,
- copy_checkpoint_err.to_string(),
+ copy_checkpoint_err,
_target_replica_dir);
reset_status();
utils::filesystem::remove_path(_target_replica_dir);
@@ -259,7 +259,7 @@ bool replica_disk_migrator::migrate_replica_app_info(const
replica_disk_migrate_
LOG_ERROR_PREFIX("disk migration(origin={}, target={}) stores app init
info failed({})",
req.origin_disk,
req.target_disk,
- store_init_info_err.to_string());
+ store_init_info_err);
reset_status();
return false;
}
@@ -271,7 +271,7 @@ bool replica_disk_migrator::migrate_replica_app_info(const
replica_disk_migrate_
LOG_ERROR_PREFIX("disk migration(origin={}, target={}) stores app info
failed({})",
req.origin_disk,
req.target_disk,
- store_info_err.to_string());
+ store_info_err);
reset_status();
return false;
}
diff --git a/src/replica/replica_learn.cpp b/src/replica/replica_learn.cpp
index c69076ac9..1fc4a7ada 100644
--- a/src/replica/replica_learn.cpp
+++ b/src/replica/replica_learn.cpp
@@ -581,9 +581,9 @@ void replica::on_learn_reply(error_code err, learn_request
&&req, learn_response
"{}, learn_start_decree = {}, last_commit_decree = {},
current_learning_status = "
"{} ",
req.signature,
- resp.config.primary.to_string(),
+ resp.config.primary,
_potential_secondary_states.duration_ms(),
- resp.err.to_string(),
+ resp.err,
resp.last_committed_decree,
resp.prepare_start_decree,
enum_to_string(resp.type),
@@ -1408,7 +1408,7 @@ void replica::on_add_learner(const group_check_request
&request)
{
LOG_INFO_PREFIX("process add learner, primary = {}, ballot ={}, status
={}, "
"last_committed_decree = {}, duplicating = {}",
- request.config.primary.to_string(),
+ request.config.primary,
request.config.ballot,
enum_to_string(request.config.status),
request.last_committed_decree,
@@ -1554,7 +1554,7 @@ error_code
replica::apply_learned_state_from_private_log(learn_state &state)
_potential_secondary_states.learning_version,
duplicating,
step_back,
- _config.primary.to_string(),
+ _config.primary,
_potential_secondary_states.duration_ms(),
state.files.size(),
_potential_secondary_states.first_learn_start_decree,
@@ -1585,7 +1585,7 @@ error_code
replica::apply_learned_state_from_private_log(learn_state &state)
"learned_to_decree_included({}) >
last_committed_decree({}), commit to "
"to_decree_included",
_potential_secondary_states.learning_version,
- _config.primary.to_string(),
+ _config.primary,
state.to_decree_included,
last_committed_decree());
plist.commit(state.to_decree_included, COMMIT_TO_DECREE_SOFT);
@@ -1595,7 +1595,7 @@ error_code
replica::apply_learned_state_from_private_log(learn_state &state)
"learn_duration ={} ms, apply in-buffer private logs
done, "
"replay_count ={}, app_committed_decree = {}",
_potential_secondary_states.learning_version,
- _config.primary.to_string(),
+ _config.primary,
_potential_secondary_states.duration_ms(),
replay_count,
_app->last_committed_decree());
diff --git a/src/replica/replica_restore.cpp b/src/replica/replica_restore.cpp
index 47ef20d2b..d4e268cc7 100644
--- a/src/replica/replica_restore.cpp
+++ b/src/replica/replica_restore.cpp
@@ -255,9 +255,8 @@ dsn::error_code replica::find_valid_checkpoint(const
configuration_restore_reque
->wait();
if (create_response.err != dsn::ERR_OK) {
- LOG_ERROR("{}: create file of block_service failed, reason {}",
- name(),
- create_response.err.to_string());
+ LOG_ERROR(
+ "{}: create file of block_service failed, reason {}", name(),
create_response.err);
return create_response.err;
}
@@ -274,7 +273,7 @@ dsn::error_code replica::find_valid_checkpoint(const
configuration_restore_reque
LOG_ERROR("{}: read file {} failed, reason {}",
name(),
create_response.file_handle->file_name(),
- r.err.to_string());
+ r.err);
return r.err;
}
diff --git a/src/replica/replica_stub.cpp b/src/replica/replica_stub.cpp
index 5e3f90971..1a54b5281 100644
--- a/src/replica/replica_stub.cpp
+++ b/src/replica/replica_stub.cpp
@@ -1689,9 +1689,9 @@ void replica_stub::open_replica(
configuration_update->config.last_committed_decree == 0,
"{}@{}: cannot load replica({}.{}), ballot = {}, "
"last_committed_decree = {}, but it does not existed!",
- id.to_string(),
+ id,
_primary_address_str,
- id.to_string(),
+ id,
app.app_type.c_str(),
configuration_update->config.ballot,
configuration_update->config.last_committed_decree);
@@ -1731,25 +1731,17 @@ void replica_stub::open_replica(
LOG_WARNING(
"{}@{}: open replica failed, erase from opening replicas", id,
_primary_address_str);
zauto_write_lock l(_replicas_lock);
- CHECK_GT_MSG(_opening_replicas.erase(id),
- 0,
- "replica {} is not in _opening_replicas",
- id.to_string());
+ CHECK_GT_MSG(_opening_replicas.erase(id), 0, "replica {} is not in
_opening_replicas", id);
METRIC_VAR_DECREMENT(opening_replicas);
return;
}
{
zauto_write_lock l(_replicas_lock);
- CHECK_GT_MSG(_opening_replicas.erase(id),
- 0,
- "replica {} is not in _opening_replicas",
- id.to_string());
+ CHECK_GT_MSG(_opening_replicas.erase(id), 0, "replica {} is not in
_opening_replicas", id);
METRIC_VAR_DECREMENT(opening_replicas);
- CHECK(_replicas.find(id) == _replicas.end(),
- "replica {} is already in _replicas",
- id.to_string());
+ CHECK(_replicas.find(id) == _replicas.end(), "replica {} is already in
_replicas", id);
_replicas.insert(replicas::value_type(rep->get_gpid(), rep));
METRIC_VAR_INCREMENT(total_replicas);
@@ -2355,7 +2347,7 @@ replica_stub::exec_command_on_replica(const
std::vector<std::string> &args,
std::stringstream query_state;
query_state << processed << " processed, " << not_found << " not found";
for (auto &kv : results) {
- query_state << "\n " << kv.first.to_string() << "@" <<
_primary_address_str;
+ query_state << "\n " << kv.first << "@" << _primary_address_str;
if (kv.second.first != partition_status::PS_INVALID)
query_state << "@" << (kv.second.first ==
partition_status::PS_PRIMARY ? "P" : "S");
query_state << " : " << kv.second.second;
@@ -2663,7 +2655,7 @@ void replica_stub::on_group_bulk_load(group_bulk_load_rpc
rpc)
"meta_bulk_load_status = {}",
request.config.pid,
_primary_address_str,
- request.config.primary.to_string(),
+ request.config.primary,
request.config.ballot,
enum_to_string(request.meta_bulk_load_status));
diff --git a/src/replica/split/replica_split_manager.cpp
b/src/replica/split/replica_split_manager.cpp
index ab5e0ac39..8df996e5d 100644
--- a/src/replica/split/replica_split_manager.cpp
+++ b/src/replica/split/replica_split_manager.cpp
@@ -148,7 +148,7 @@ void replica_split_manager::parent_start_split(
_child_gpid,
_child_init_ballot,
enum_to_string(status()),
- request.config.primary.to_string());
+ request.config.primary);
tasking::enqueue(LPC_CREATE_CHILD,
tracker(),
@@ -618,7 +618,7 @@ void replica_split_manager::child_notify_catch_up() // on
child partition
LOG_INFO_PREFIX("send notification to primary parent[{}@{}], ballot={}",
_replica->_split_states.parent_gpid,
- _replica->_config.primary.to_string(),
+ _replica->_config.primary,
get_ballot());
notify_catch_up_rpc rpc(std::move(request),
@@ -649,7 +649,7 @@ void replica_split_manager::child_notify_catch_up() // on
child partition
}
LOG_INFO_PREFIX("notify primary parent[{}@{}] catch up succeed",
_replica->_split_states.parent_gpid,
- _replica->_config.primary.to_string());
+ _replica->_config.primary);
});
}
@@ -683,7 +683,7 @@ void replica_split_manager::parent_handle_child_catch_up(
response.err = ERR_OK;
LOG_INFO_PREFIX("receive catch_up request from {}@{}, current ballot={}",
request.child_gpid,
- request.child_address.to_string(),
+ request.child_address,
request.child_ballot);
_replica->_primary_states.caught_up_children.insert(request.child_address);
@@ -803,7 +803,7 @@ void
replica_split_manager::parent_send_update_partition_count_request(
LOG_INFO_PREFIX(
"send update child group partition count request to node({}), new
partition_count = {}",
- address.to_string(),
+ address,
new_partition_count);
update_child_group_partition_count_rpc rpc(std::move(request),
RPC_SPLIT_UPDATE_CHILD_PARTITION_COUNT,
@@ -903,7 +903,7 @@ void
replica_split_manager::on_update_child_group_partition_count_reply(
if (error == ERR_TIMEOUT) {
LOG_WARNING_PREFIX(
"failed to update child node({}) partition_count, error = {}, wait
and retry",
- request.target_address.to_string(),
+ request.target_address,
error);
tasking::enqueue(
LPC_PARTITION_SPLIT,
@@ -920,7 +920,7 @@ void
replica_split_manager::on_update_child_group_partition_count_reply(
if (error != ERR_OK) {
LOG_ERROR_PREFIX("failed to update child node({}) partition_count({}),
error = {}",
- request.target_address.to_string(),
+ request.target_address,
request.new_partition_count,
error);
parent_handle_split_error("on_update_child_group_partition_count_reply
error", true);
@@ -928,7 +928,7 @@ void
replica_split_manager::on_update_child_group_partition_count_reply(
}
LOG_INFO_PREFIX("update node({}) child({}) partition_count({}) succeed",
- request.target_address.to_string(),
+ request.target_address,
request.child_pid,
request.new_partition_count);
@@ -1509,7 +1509,7 @@ void
replica_split_manager::parent_send_notify_stop_request(
LOG_INFO_PREFIX("group {} split succeed, send notify_stop_request to meta
server({})",
meta_split_status == split_status::PAUSING ? "pause" :
"cancel",
- meta_address.to_string());
+ meta_address);
notify_stop_split_rpc rpc(
std::move(req), RPC_CM_NOTIFY_STOP_SPLIT, 0_ms, 0,
get_gpid().thread_hash());
rpc.call(meta_address, tracker(), [this, rpc](error_code ec) mutable {
@@ -1532,8 +1532,7 @@ void replica_split_manager::query_child_state() // on
primary parent
request->partition_count = _replica->_app_info.partition_count;
rpc_address meta_address(_stub->_failure_detector->get_servers());
- LOG_INFO_PREFIX("send query child partition state request to meta
server({})",
- meta_address.to_string());
+ LOG_INFO_PREFIX("send query child partition state request to meta
server({})", meta_address);
query_child_state_rpc rpc(
std::move(request), RPC_CM_QUERY_CHILD_STATE, 0_ms, 0,
get_gpid().thread_hash());
_replica->_primary_states.query_child_task =
diff --git a/src/replica/storage/simple_kv/test/case.cpp
b/src/replica/storage/simple_kv/test/case.cpp
index a69e3129a..644638137 100644
--- a/src/replica/storage/simple_kv/test/case.cpp
+++ b/src/replica/storage/simple_kv/test/case.cpp
@@ -263,12 +263,7 @@ std::string exit_case_line::to_string() const
bool exit_case_line::parse(const std::string ¶ms) { return true; }
-std::string state_case_line::to_string() const
-{
- std::ostringstream oss;
- oss << name() << ":" << _state.to_string();
- return oss.str();
-}
+std::string state_case_line::to_string() const { return fmt::format("{}:{}",
name(), _state); }
bool state_case_line::parse(const std::string ¶ms) { return
_state.from_string(params); }
@@ -285,12 +280,7 @@ bool state_case_line::check_state(const state_snapshot
&cur_state, bool &forward
return false;
}
-std::string config_case_line::to_string() const
-{
- std::ostringstream oss;
- oss << name() << ":" << _config.to_string();
- return oss.str();
-}
+std::string config_case_line::to_string() const { return fmt::format("{}:{}",
name(), _config); }
bool config_case_line::parse(const std::string ¶ms) { return
_config.from_string(params); }
@@ -678,7 +668,10 @@ void event_on_aio_enqueue::init(aio_task *tsk)
_transferred_size =
boost::lexical_cast<std::string>(tsk->get_transferred_size());
}
-std::string event_case_line::to_string() const { return name() + ":" +
_event_cond->to_string(); }
+std::string event_case_line::to_string() const
+{
+ return fmt::format("{}:{}", name(), *_event_cond);
+}
bool event_case_line::parse(const std::string ¶ms)
{
@@ -749,11 +742,11 @@ std::string client_case_line::to_string() const
break;
}
case end_write: {
- oss << "id=" << _id << ",err=" << _err.to_string() << ",resp=" <<
_write_resp;
+ oss << "id=" << _id << ",err=" << _err << ",resp=" << _write_resp;
break;
}
case end_read: {
- oss << "id=" << _id << ",err=" << _err.to_string() << ",resp=" <<
_read_resp;
+ oss << "id=" << _id << ",err=" << _err << ",resp=" << _read_resp;
break;
}
case replica_config: {
@@ -1050,7 +1043,7 @@ void test_case::forward()
output(cl->to_string());
print(cl, "");
}
- LOG_INFO("=== on_case_forward:[{}]{}", cl->line_no(),
cl->to_string());
+ LOG_INFO("=== on_case_forward:[{}]{}", cl->line_no(), *cl);
}
_next++;
if (_next >= _case_lines.size()) {
@@ -1331,7 +1324,7 @@ void test_case::on_config_change(const parti_config
&last, const parti_config &c
_null_loop_count = 0; // reset null loop count
- std::string buf = std::string(config_case_line::NAME()) + ":" +
cur.to_string();
+ std::string buf = fmt::format("{}:{}", config_case_line::NAME(), cur);
LOG_INFO("=== on_config_change: {}", cur);
if (check_skip(true)) {
@@ -1367,7 +1360,7 @@ void test_case::on_state_change(const state_snapshot
&last, const state_snapshot
_null_loop_count = 0; // reset null loop count
- std::string buf = std::string(state_case_line::NAME()) + ":" +
cur.to_string();
+ std::string buf = fmt::format("{}:{}", state_case_line::NAME(), cur);
LOG_INFO("=== on_state_change: {}\n{}", cur, cur.diff_string(last));
if (check_skip(true)) {
diff --git a/src/replica/storage/simple_kv/test/case.h
b/src/replica/storage/simple_kv/test/case.h
index 4450b9134..3f549ebd1 100644
--- a/src/replica/storage/simple_kv/test/case.h
+++ b/src/replica/storage/simple_kv/test/case.h
@@ -74,6 +74,11 @@ public:
virtual std::string to_string() const = 0;
virtual bool parse(const std::string ¶ms) = 0;
+ friend std::ostream &operator<<(std::ostream &os, const case_line &cl)
+ {
+ return os << cl.to_string();
+ }
+
private:
int _line_no;
};
@@ -499,4 +504,5 @@ private:
}
}
+USER_DEFINED_STRUCTURE_FORMATTER(::dsn::replication::test::case_line);
USER_DEFINED_STRUCTURE_FORMATTER(::dsn::replication::test::event);
diff --git a/src/replica/storage/simple_kv/test/common.cpp
b/src/replica/storage/simple_kv/test/common.cpp
index cdc3ed115..621218faf 100644
--- a/src/replica/storage/simple_kv/test/common.cpp
+++ b/src/replica/storage/simple_kv/test/common.cpp
@@ -152,8 +152,8 @@ bool replica_id::from_string(const std::string &str)
std::string replica_state::to_string() const
{
std::stringstream oss;
- oss << "{" << id.to_string() << "," <<
partition_status_to_short_string(status) << "," << ballot
- << "," << last_committed_decree;
+ oss << "{" << id << "," << partition_status_to_short_string(status) << ","
<< ballot << ","
+ << last_committed_decree;
if (last_durable_decree != -1)
oss << "," << last_durable_decree;
oss << "}";
@@ -189,7 +189,7 @@ std::string state_snapshot::to_string() const
const replica_state &s = kv.second;
if (i != 0)
oss << ",";
- oss << s.to_string();
+ oss << s;
i++;
}
oss << "}";
@@ -237,29 +237,28 @@ std::string state_snapshot::diff_string(const
state_snapshot &other) const
oss << "{" << std::endl;
while (oth_it != oth.end() && cur_it != cur.end()) {
if (oth_it->first < cur_it->first) {
- oss << del_mark << oth_it->second.to_string() << std::endl;
+ oss << del_mark << oth_it->second << std::endl;
++oth_it;
} else if (cur_it->first < oth_it->first) {
- oss << add_mark << cur_it->second.to_string() << std::endl;
+ oss << add_mark << cur_it->second << std::endl;
++cur_it;
} else {
CHECK_EQ(oth_it->first, cur_it->first);
if (oth_it->second != cur_it->second) {
- oss << chg_mark << cur_it->second.to_string()
- << " <= " << oth_it->second.to_string() << std::endl;
+ oss << chg_mark << cur_it->second << " <= " << oth_it->second
<< std::endl;
} else {
- oss << unc_mark << cur_it->second.to_string() << std::endl;
+ oss << unc_mark << cur_it->second << std::endl;
}
++oth_it;
++cur_it;
}
}
while (oth_it != oth.end()) {
- oss << del_mark << oth_it->second.to_string() << std::endl;
+ oss << del_mark << oth_it->second << std::endl;
++oth_it;
}
while (cur_it != cur.end()) {
- oss << add_mark << cur_it->second.to_string() << std::endl;
+ oss << add_mark << cur_it->second << std::endl;
++cur_it;
}
oss << "}";
diff --git a/src/replica/storage/simple_kv/test/common.h
b/src/replica/storage/simple_kv/test/common.h
index f042d2b21..b1ad6c666 100644
--- a/src/replica/storage/simple_kv/test/common.h
+++ b/src/replica/storage/simple_kv/test/common.h
@@ -128,6 +128,10 @@ struct replica_state
bool operator!=(const replica_state &o) const { return !(*this == o); }
std::string to_string() const;
bool from_string(const std::string &str);
+ friend std::ostream &operator<<(std::ostream &os, const replica_state &rs)
+ {
+ return os << rs.to_string();
+ }
};
struct state_snapshot
diff --git a/src/replica/test/mutation_log_test.cpp
b/src/replica/test/mutation_log_test.cpp
index 283f11767..5d2f339b7 100644
--- a/src/replica/test/mutation_log_test.cpp
+++ b/src/replica/test/mutation_log_test.cpp
@@ -381,7 +381,7 @@ public:
error_code ec;
log_file_ptr file = log_file::open_read(log_file_path.c_str(), ec);
- ASSERT_EQ(ec, ERR_OK) << ec.to_string();
+ ASSERT_EQ(ec, ERR_OK) << ec;
int64_t end_offset;
int mutation_index = -1;
@@ -397,7 +397,7 @@ public:
return true;
},
end_offset);
- ASSERT_EQ(ec, ERR_HANDLE_EOF) << ec.to_string();
+ ASSERT_EQ(ec, ERR_HANDLE_EOF) << ec;
}
}
diff --git a/src/replica/test/replica_disk_migrate_test.cpp
b/src/replica/test/replica_disk_migrate_test.cpp
index 08ee075d9..2436b8352 100644
--- a/src/replica/test/replica_disk_migrate_test.cpp
+++ b/src/replica/test/replica_disk_migrate_test.cpp
@@ -235,27 +235,24 @@ TEST_P(replica_disk_migrate_test,
disk_migrate_replica_run)
request.pid = dsn::gpid(app_info_1.app_id, 2);
request.origin_disk = "tag_1";
request.target_disk = "tag_empty_1";
- set_replica_dir(request.pid,
- fmt::format("./{}/{}.replica", request.origin_disk,
request.pid.to_string()));
+ set_replica_dir(request.pid, fmt::format("./{}/{}.replica",
request.origin_disk, request.pid));
set_migration_status(request.pid, disk_migration_status::MOVING);
- const std::string kTargetReplicaDir = fmt::format(
- "./{}/{}.replica.disk.migrate.tmp/", request.target_disk,
request.pid.to_string());
+ const std::string kTargetReplicaDir =
+ fmt::format("./{}/{}.replica.disk.migrate.tmp/", request.target_disk,
request.pid);
- const std::string kTargetDataDir = fmt::format(
- "./{}/{}.replica.disk.migrate.tmp/data/rdb/", request.target_disk,
request.pid.to_string());
+ const std::string kTargetDataDir =
+ fmt::format("./{}/{}.replica.disk.migrate.tmp/data/rdb/",
request.target_disk, request.pid);
const std::string kTargetCheckPointFile =
fmt::format("./{}/{}.replica.disk.migrate.tmp/data/rdb/checkpoint.file",
request.target_disk,
- request.pid.to_string());
+ request.pid);
const std::string kTargetInitInfoFile =
fmt::format("./{}/{}.replica.disk.migrate.tmp/{}",
request.target_disk,
-
request.pid.to_string(),
+ request.pid,
replica_init_info::kInitInfo);
- const std::string kTargetAppInfoFile =
fmt::format("./{}/{}.replica.disk.migrate.tmp/{}",
- request.target_disk,
- request.pid.to_string(),
- replica::kAppInfo);
+ const std::string kTargetAppInfoFile = fmt::format(
+ "./{}/{}.replica.disk.migrate.tmp/{}", request.target_disk,
request.pid, replica::kAppInfo);
init_migration_target_dir(fake_migrate_rpc);
ASSERT_TRUE(utils::filesystem::directory_exists(kTargetDataDir));
@@ -316,13 +313,13 @@ TEST_P(replica_disk_migrate_test,
disk_migrate_replica_update)
request.target_disk = "tag_empty_1";
const std::string kReplicaOriginDir =
- fmt::format("./{}/{}.replica", request.origin_disk,
request.pid.to_string());
- const std::string kReplicaNewTempDir = fmt::format(
- "./{}/{}.replica.disk.migrate.tmp/", request.target_disk,
request.pid.to_string());
- const std::string kReplicaOriginSuffixDir = fmt::format(
- "./{}/{}.replica.disk.migrate.ori/", request.origin_disk,
request.pid.to_string());
+ fmt::format("./{}/{}.replica", request.origin_disk, request.pid);
+ const std::string kReplicaNewTempDir =
+ fmt::format("./{}/{}.replica.disk.migrate.tmp/", request.target_disk,
request.pid);
+ const std::string kReplicaOriginSuffixDir =
+ fmt::format("./{}/{}.replica.disk.migrate.ori/", request.origin_disk,
request.pid);
const std::string kReplicaNewDir =
- fmt::format("./{}/{}.replica/", request.target_disk,
request.pid.to_string());
+ fmt::format("./{}/{}.replica/", request.target_disk, request.pid);
utils::filesystem::create_directory(kReplicaOriginDir);
utils::filesystem::create_directory(kReplicaNewTempDir);
diff --git a/src/runtime/fault_injector.cpp b/src/runtime/fault_injector.cpp
index b47f7d04f..653c052a4 100644
--- a/src/runtime/fault_injector.cpp
+++ b/src/runtime/fault_injector.cpp
@@ -33,6 +33,7 @@
#include <vector>
#include "aio/aio_task.h"
+#include "fmt/core.h"
#include "runtime/rpc/rpc_message.h"
#include "runtime/task/task.h"
#include "runtime/task/task_code.h"
@@ -322,8 +323,7 @@ void fault_injector::install(service_spec &spec)
if (i == TASK_CODE_INVALID)
continue;
- std::string section_name =
- std::string("task.") + std::string(dsn::task_code(i).to_string());
+ std::string section_name = fmt::format("task.{}", dsn::task_code(i));
task_spec *spec = task_spec::get(i);
CHECK_NOTNULL(spec, "");
diff --git a/src/runtime/tracer.cpp b/src/runtime/tracer.cpp
index 008ef4388..3de063e08 100644
--- a/src/runtime/tracer.cpp
+++ b/src/runtime/tracer.cpp
@@ -34,6 +34,7 @@
#include <vector>
#include "aio/aio_task.h"
+#include "fmt/core.h"
#include "fmt/format.h"
#include "runtime/global_config.h"
#include "runtime/rpc/rpc_message.h"
@@ -302,8 +303,7 @@ void tracer::install(service_spec &spec)
if (i == TASK_CODE_INVALID)
continue;
- std::string section_name =
- std::string("task.") + std::string(dsn::task_code(i).to_string());
+ std::string section_name = fmt::format("task.{}", dsn::task_code(i));
task_spec *spec = task_spec::get(i);
CHECK_NOTNULL(spec, "");
diff --git a/src/security/client_negotiation.cpp
b/src/security/client_negotiation.cpp
index 9d19b82fb..68a954baa 100644
--- a/src/security/client_negotiation.cpp
+++ b/src/security/client_negotiation.cpp
@@ -41,7 +41,7 @@ namespace security {
client_negotiation::client_negotiation(rpc_session_ptr session) :
negotiation(session)
{
- _name = fmt::format("CLIENT_NEGOTIATION(SERVER={})",
_session->remote_address().to_string());
+ _name = fmt::format("CLIENT_NEGOTIATION(SERVER={})",
_session->remote_address());
}
void client_negotiation::start()
diff --git a/src/security/negotiation_manager.cpp
b/src/security/negotiation_manager.cpp
index 4f7105049..26c60a337 100644
--- a/src/security/negotiation_manager.cpp
+++ b/src/security/negotiation_manager.cpp
@@ -24,7 +24,6 @@
#include "http/http_server.h"
#include "negotiation_utils.h"
#include "runtime/rpc/network.h"
-#include "runtime/rpc/rpc_address.h"
#include "runtime/rpc/rpc_message.h"
#include "runtime/task/task_code.h"
#include "security_types.h"
@@ -144,8 +143,8 @@ std::shared_ptr<negotiation>
negotiation_manager::get_negotiation(negotiation_rp
auto it = _negotiations.find(rpc.dsn_request()->io_session);
if (it == _negotiations.end()) {
LOG_INFO("negotiation was removed for msg: {}, {}",
- rpc.dsn_request()->rpc_code().to_string(),
- rpc.remote_address().to_string());
+ rpc.dsn_request()->rpc_code(),
+ rpc.remote_address());
return nullptr;
}
diff --git a/src/security/server_negotiation.cpp
b/src/security/server_negotiation.cpp
index b85edca63..2785ff84f 100644
--- a/src/security/server_negotiation.cpp
+++ b/src/security/server_negotiation.cpp
@@ -41,7 +41,7 @@ DSN_DECLARE_string(service_name);
server_negotiation::server_negotiation(rpc_session_ptr session) :
negotiation(session)
{
- _name = fmt::format("SERVER_NEGOTIATION(CLIENT={})",
_session->remote_address().to_string());
+ _name = fmt::format("SERVER_NEGOTIATION(CLIENT={})",
_session->remote_address());
}
void server_negotiation::start()
@@ -143,7 +143,7 @@ void server_negotiation::do_challenge(negotiation_rpc rpc,
error_s err_s, const
if (!err_s.is_ok() && err_s.code() != ERR_SASL_INCOMPLETE) {
LOG_WARNING("{}: negotiation failed, with err = {}, msg = {}",
_name,
- err_s.code().to_string(),
+ err_s.code(),
err_s.description());
fail_negotiation();
return;
diff --git a/src/server/hotspot_partition_calculator.cpp
b/src/server/hotspot_partition_calculator.cpp
index 379254033..2d028c828 100644
--- a/src/server/hotspot_partition_calculator.cpp
+++ b/src/server/hotspot_partition_calculator.cpp
@@ -22,18 +22,17 @@
#include <algorithm>
#include <cmath>
+#include "absl/strings/string_view.h"
#include "client/replication_ddl_client.h"
#include "common/gpid.h"
#include "common/serialization_helper/dsn.layer2_types.h"
#include "perf_counter/perf_counter.h"
-#include "runtime/rpc/rpc_address.h"
#include "server/hotspot_partition_stat.h"
#include "shell/command_executor.h"
#include "utils/error_code.h"
#include "utils/fail_point.h"
#include "utils/flags.h"
#include "utils/fmt_logging.h"
-#include "absl/strings/string_view.h"
struct row_data;
@@ -232,13 +231,13 @@ void
hotspot_partition_calculator::send_detect_hotkey_request(
(hotkey_type == dsn::replication::hotkey_type::WRITE) ? "write" :
"read",
app_name,
partition_index,
- target_address.to_string());
+ target_address);
if (error != dsn::ERR_OK) {
LOG_ERROR("Hotkey detect rpc sending failed, in {}.{}, error_hint:{}",
app_name,
partition_index,
- error.to_string());
+ error);
}
if (resp.err != dsn::ERR_OK) {
diff --git a/src/server/pegasus_server_impl.cpp
b/src/server/pegasus_server_impl.cpp
index 56f8ff583..3fd549f81 100644
--- a/src/server/pegasus_server_impl.cpp
+++ b/src/server/pegasus_server_impl.cpp
@@ -44,6 +44,7 @@
#include <ostream>
#include <set>
+#include "absl/strings/string_view.h"
#include "base/idl_utils.h" // IWYU pragma: keep
#include "base/pegasus_key_schema.h"
#include "base/pegasus_utils.h"
@@ -62,7 +63,6 @@
#include "rrdb/rrdb.code.definition.h"
#include "rrdb/rrdb_types.h"
#include "runtime/api_layer1.h"
-#include "runtime/rpc/rpc_address.h"
#include "runtime/rpc/rpc_message.h"
#include "runtime/task/async_calls.h"
#include "runtime/task/task_code.h"
@@ -81,7 +81,6 @@
#include "utils/fmt_logging.h"
#include "utils/ports.h"
#include "utils/string_conv.h"
-#include "absl/strings/string_view.h"
#include "utils/strings.h"
#include "utils/threadpool_code.h"
#include "utils/token_bucket_throttling_controller.h"
@@ -878,7 +877,7 @@ void pegasus_server_impl::on_batch_get(batch_get_rpc rpc)
if (request.keys.empty()) {
response.error = rocksdb::Status::kInvalidArgument;
LOG_ERROR_PREFIX("Invalid argument for batch_get from {}: 'keys' field
in request is empty",
- rpc.remote_address().to_string());
+ rpc.remote_address());
_cu_calculator->add_batch_get_cu(rpc.dsn_request(), response.error,
response.data);
return;
}
@@ -932,12 +931,12 @@ void pegasus_server_impl::on_batch_get(batch_get_rpc rpc)
if (FLAGS_rocksdb_verbose_log) {
LOG_ERROR_PREFIX(
"rocksdb get failed for batch_get from {}: error = {},
key size = {}",
- rpc.remote_address().to_string(),
+ rpc.remote_address(),
status.ToString(),
request.keys.size());
} else {
LOG_ERROR_PREFIX("rocksdb get failed for batch_get from {}:
error = {}",
- rpc.remote_address().to_string(),
+ rpc.remote_address(),
status.ToString());
}
@@ -1748,7 +1747,7 @@ dsn::error_code pegasus_server_impl::start(int argc, char
**argv)
last_flushed);
auto err = async_checkpoint(false);
if (err != dsn::ERR_OK) {
- LOG_ERROR_PREFIX("create checkpoint failed, error = {}",
err.to_string());
+ LOG_ERROR_PREFIX("create checkpoint failed, error = {}", err);
release_db();
return err;
}
@@ -2038,7 +2037,7 @@ private:
::dsn::error_code err =
copy_checkpoint_to_dir_unsafe(tmp_dir.c_str(), &checkpoint_decree,
flush_memtable);
if (err != ::dsn::ERR_OK) {
- LOG_ERROR_PREFIX("copy_checkpoint_to_dir_unsafe failed with err = {}",
err.to_string());
+ LOG_ERROR_PREFIX("copy_checkpoint_to_dir_unsafe failed with err = {}",
err);
return ::dsn::ERR_LOCAL_APP_FAILURE;
}
diff --git a/src/server/pegasus_server_write.cpp
b/src/server/pegasus_server_write.cpp
index 6c98971cb..4cc1039ab 100644
--- a/src/server/pegasus_server_write.cpp
+++ b/src/server/pegasus_server_write.cpp
@@ -33,7 +33,6 @@
#include "pegasus_server_write.h"
#include "pegasus_utils.h"
#include "rrdb/rrdb.code.definition.h"
-#include "runtime/rpc/rpc_address.h"
#include "runtime/rpc/rpc_holder.h"
#include "runtime/rpc/rpc_message.h"
#include "server/pegasus_write_service.h"
@@ -84,7 +83,7 @@ int
pegasus_server_write::on_batched_write_requests(dsn::message_ex **requests,
} catch (TTransportException &ex) {
METRIC_VAR_INCREMENT(corrupt_writes);
LOG_ERROR_PREFIX("pegasus not batch write handler failed, from = {},
exception = {}",
- requests[0]->header->from_address.to_string(),
+ requests[0]->header->from_address,
ex.what());
return rocksdb::Status::kOk;
}
@@ -120,15 +119,15 @@ int
pegasus_server_write::on_batched_writes(dsn::message_ex **requests, int coun
} else {
if (_non_batch_write_handlers.find(rpc_code) !=
_non_batch_write_handlers.end()) {
- LOG_FATAL("rpc code not allow batch: {}",
rpc_code.to_string());
+ LOG_FATAL("rpc code not allow batch: {}", rpc_code);
} else {
- LOG_FATAL("rpc code not handled: {}",
rpc_code.to_string());
+ LOG_FATAL("rpc code not handled: {}", rpc_code);
}
}
} catch (TTransportException &ex) {
METRIC_VAR_INCREMENT(corrupt_writes);
LOG_ERROR_PREFIX("pegasus batch writes handler failed, from =
{}, exception = {}",
- requests[i]->header->from_address.to_string(),
+ requests[i]->header->from_address,
ex.what());
}
@@ -171,7 +170,7 @@ void pegasus_server_write::request_key_check(int64_t decree,
LOG_INFO_ROCKSDB("Write",
"decree: {}, code: {}, hash_key: {}, sort_key: {}",
decree,
- msg->local_rpc_code.to_string(),
+ msg->local_rpc_code,
utils::c_escape_sensitive_string(hash_key),
utils::c_escape_sensitive_string(sort_key));
}
diff --git a/src/utils/blob.h b/src/utils/blob.h
index 29b24f619..f05e33524 100644
--- a/src/utils/blob.h
+++ b/src/utils/blob.h
@@ -33,6 +33,7 @@
#include <thrift/protocol/TBinaryProtocol.h>
#include <thrift/protocol/TProtocol.h>
+#include "utils/fmt_utils.h"
#include "utils.h"
namespace dsn {
@@ -158,6 +159,11 @@ public:
return std::string(_data, _length);
}
+ friend std::ostream &operator<<(std::ostream &os, const blob &bb)
+ {
+ return os << bb.to_string();
+ }
+
absl::string_view to_string_view() const { return absl::string_view(_data,
_length); }
uint32_t read(::apache::thrift::protocol::TProtocol *iprot);
@@ -214,3 +220,5 @@ inline uint32_t
blob::write(apache::thrift::protocol::TProtocol *oprot) const
}
} // namespace dsn
+
+USER_DEFINED_STRUCTURE_FORMATTER(::dsn::blob);
---------------------------------------------------------------------
To unsubscribe, e-mail: [email protected]
For additional commands, e-mail: [email protected]