This is an automated email from the ASF dual-hosted git repository.

zwoop pushed a commit to branch 8.0.x
in repository https://gitbox.apache.org/repos/asf/trafficserver.git

commit b0237c585f8492da07a1868e6788ff0ea8187c2c
Author: Fei Deng <duke8...@gmail.com>
AuthorDate: Thu Jun 14 10:36:55 2018 -0500

    thread counters
    
    (cherry picked from commit f7a8a9d747d2ea633edc0d6a2468824177651abc)
---
 iocore/eventsystem/I_EventProcessor.h    | 13 +++++++------
 iocore/eventsystem/UnixEventProcessor.cc | 10 +++++-----
 proxy/http/HttpProxyServerMain.cc        |  5 +----
 proxy/http/HttpProxyServerMain.h         |  1 -
 src/traffic_server/traffic_server.cc     |  3 +--
 5 files changed, 14 insertions(+), 18 deletions(-)

diff --git a/iocore/eventsystem/I_EventProcessor.h 
b/iocore/eventsystem/I_EventProcessor.h
index f85a0c5..c95a97d 100644
--- a/iocore/eventsystem/I_EventProcessor.h
+++ b/iocore/eventsystem/I_EventProcessor.h
@@ -27,6 +27,7 @@
 #include "I_Continuation.h"
 #include "I_Processor.h"
 #include "I_Event.h"
+#include <atomic>
 
 #ifdef TS_MAX_THREADS_IN_EACH_THREAD_TYPE
 constexpr int MAX_THREADS_IN_EACH_TYPE = TS_MAX_THREADS_IN_EACH_THREAD_TYPE;
@@ -301,12 +302,12 @@ public:
   /// Data kept for each thread group.
   /// The thread group ID is the index into an array of these and so is not 
stored explicitly.
   struct ThreadGroupDescriptor {
-    ats_scoped_str _name;         ///< Name for the thread group.
-    int _count;                   ///< # of threads of this type.
-    int _next_round_robin;        ///< Index of thread to use for events 
assigned to this group.
-    Que(Event, link) _spawnQueue; ///< Events to dispatch when thread is 
spawned.
-    /// The actual threads in this group.
-    EThread *_thread[MAX_THREADS_IN_EACH_TYPE];
+    std::string _name;                               ///< Name for the thread 
group.
+    int _count                = 0;                   ///< # of threads of this 
type.
+    std::atomic<int> _started = 0;                   ///< # of started threads 
of this type.
+    int _next_round_robin     = 0;                   ///< Index of thread to 
use for events assigned to this group.
+    Que(Event, link) _spawnQueue;                    ///< Events to dispatch 
when thread is spawned.
+    EThread *_thread[MAX_THREADS_IN_EACH_TYPE] = {}; ///< The actual threads 
in this group.
   };
 
   /// Storage for per group data.
diff --git a/iocore/eventsystem/UnixEventProcessor.cc 
b/iocore/eventsystem/UnixEventProcessor.cc
index 74ec302..d2e86c7 100644
--- a/iocore/eventsystem/UnixEventProcessor.cc
+++ b/iocore/eventsystem/UnixEventProcessor.cc
@@ -278,7 +278,6 @@ EventProcessor::EventProcessor() : thread_initializer(this)
 {
   ink_zero(all_ethreads);
   ink_zero(all_dthreads);
-  ink_zero(thread_group);
   ink_mutex_init(&dedicated_thread_spawn_mutex);
   // Because ET_NET is compile time set to 0 it *must* be the first type 
registered.
   this->register_event_type("ET_NET");
@@ -329,7 +328,7 @@ EventProcessor::register_event_type(char const *name)
   ThreadGroupDescriptor *tg = &(thread_group[n_thread_groups++]);
   ink_release_assert(n_thread_groups <= MAX_EVENT_TYPES); // check for overflow
 
-  tg->_name = ats_strdup(name);
+  tg->_name = name;
   return n_thread_groups - 1;
 }
 
@@ -377,13 +376,13 @@ EventProcessor::spawn_event_threads(EventType ev_type, 
int n_threads, size_t sta
   // the group. Some thread set up depends on knowing the total number of 
threads but that can't be
   // safely updated until all the EThread instances are created and stored in 
the table.
   for (i = 0; i < n_threads; ++i) {
-    Debug("iocore_thread_start", "Created %s thread #%d", tg->_name.get(), i + 
1);
-    snprintf(thr_name, MAX_THREAD_NAME_LENGTH, "[%s %d]", tg->_name.get(), i);
+    Debug("iocore_thread_start", "Created %s thread #%d", tg->_name.c_str(), i 
+ 1);
+    snprintf(thr_name, MAX_THREAD_NAME_LENGTH, "[%s %d]", tg->_name.c_str(), 
i);
     void *stack = Thread_Affinity_Initializer.alloc_stack(tg->_thread[i], 
stacksize);
     tg->_thread[i]->start(thr_name, stack, stacksize);
   }
 
-  Debug("iocore_thread", "Created thread group '%s' id %d with %d threads", 
tg->_name.get(), ev_type, n_threads);
+  Debug("iocore_thread", "Created thread group '%s' id %d with %d threads", 
tg->_name.c_str(), ev_type, n_threads);
 
   return ev_type; // useless but not sure what would be better.
 }
@@ -395,6 +394,7 @@ EventProcessor::initThreadState(EThread *t)
 {
   // Run all thread type initialization continuations that match the event 
types for this thread.
   for (int i = 0; i < MAX_EVENT_TYPES; ++i) {
+    thread_group[i]._started++;
     if (t->is_event_type(i)) { // that event type done here, roll thread start 
events of that type.
       // To avoid race conditions on the event in the spawn queue, create a 
local one to actually send.
       // Use the spawn queue event as a read only model.
diff --git a/proxy/http/HttpProxyServerMain.cc 
b/proxy/http/HttpProxyServerMain.cc
index 5e40a4d..ab17791 100644
--- a/proxy/http/HttpProxyServerMain.cc
+++ b/proxy/http/HttpProxyServerMain.cc
@@ -49,8 +49,6 @@ HttpSessionAccept *plugin_http_transparent_accept = nullptr;
 static SLL<SSLNextProtocolAccept> ssl_plugin_acceptors;
 static Ptr<ProxyMutex> ssl_plugin_mutex;
 
-// used to keep count of how many et_net threads we have started
-std::atomic<int> started_et_net_threads;
 std::mutex proxyServerMutex;
 std::condition_variable proxyServerCheck;
 bool et_net_threads_ready = false;
@@ -316,8 +314,7 @@ init_accept_HttpProxyServer(int n_accept_threads)
 void
 init_HttpProxyServer(EThread *)
 {
-  auto check_et_net_num = ++started_et_net_threads;
-  if (check_et_net_num == num_of_net_threads) {
+  if (eventProcessor.thread_group[ET_NET]._started == num_of_net_threads) {
     std::unique_lock<std::mutex> lock(proxyServerMutex);
     et_net_threads_ready = true;
     lock.unlock();
diff --git a/proxy/http/HttpProxyServerMain.h b/proxy/http/HttpProxyServerMain.h
index c21a363..33f19ec 100644
--- a/proxy/http/HttpProxyServerMain.h
+++ b/proxy/http/HttpProxyServerMain.h
@@ -51,7 +51,6 @@ void start_HttpProxyServerBackDoor(int port, int 
accept_threads = 0);
 
 NetProcessor::AcceptOptions make_net_accept_options(const HttpProxyPort *port, 
unsigned nthreads);
 
-extern std::atomic<int> started_et_net_threads;
 extern std::mutex proxyServerMutex;
 extern std::condition_variable proxyServerCheck;
 extern bool et_net_threads_ready;
diff --git a/src/traffic_server/traffic_server.cc 
b/src/traffic_server/traffic_server.cc
index ad4ecce..e444097 100644
--- a/src/traffic_server/traffic_server.cc
+++ b/src/traffic_server/traffic_server.cc
@@ -1800,8 +1800,7 @@ main(int /* argc ATS_UNUSED */, const char **argv)
   ink_dns_init(makeModuleVersion(HOSTDB_MODULE_MAJOR_VERSION, 
HOSTDB_MODULE_MINOR_VERSION, PRIVATE_MODULE_HEADER));
   ink_split_dns_init(makeModuleVersion(1, 0, PRIVATE_MODULE_HEADER));
 
-  naVecMutex             = new_ProxyMutex();
-  started_et_net_threads = 0;
+  naVecMutex = new_ProxyMutex();
 
   // Do the inits for NetProcessors that use ET_NET threads. MUST be before 
starting those threads.
   netProcessor.init();

Reply via email to