This is an automated email from the ASF dual-hosted git repository.

tarmstrong pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/impala.git

commit 0a2261f3c237d9a7c77e8c1a19d95a2318dbbe1d
Author: Bikramjeet Vig <bikramjeet....@cloudera.com>
AuthorDate: Wed Jan 2 15:48:37 2019 -0800

    IMPALA-8092: Add an admission controller debug page
    
    This patch adds a new debug page "admission" that provides the
    following details about resource pools:
    - Pool configuration
    - Relevant pool stats
    - Queued Queries in order of being queued (local to the coordinator)
    - Running Queries (local to this coordinator)
    - Histogram of the distribution of peak memory used by queries admitted
      to the pool
    
    The aforementioned details can also be viewed for a single resource
    pool using a search string and are also available as a JSON object
    from the same http endpoint.
    
    Testing:
    - Added a test that checks if the admission debug page loads correctly
    and checks if the reset informational stats http end point works
    as expected.
    - Did manual stress testing by running the e2e tests and constantly
    fetching the admission debug page.
    
    Change-Id: Iff055d9709ea1bcc2f492adcde92241b6149f766
    Reviewed-on: http://gerrit.cloudera.org:8080/12244
    Reviewed-by: Impala Public Jenkins <impala-public-jenk...@cloudera.com>
    Tested-by: Impala Public Jenkins <impala-public-jenk...@cloudera.com>
---
 be/src/catalog/catalog-server.cc          |   2 +-
 be/src/rpc/rpc-trace.cc                   |   2 +-
 be/src/runtime/coordinator.cc             |   3 +-
 be/src/scheduling/admission-controller.cc | 155 +++++++++++++-
 be/src/scheduling/admission-controller.h  |  64 +++++-
 be/src/service/impala-http-handler.cc     | 104 ++++++++-
 be/src/service/impala-http-handler.h      |  61 ++++++
 be/src/statestore/statestore.cc           |   4 +-
 be/src/util/default-path-handlers.cc      |  10 +-
 be/src/util/logging-support.cc            |   2 +-
 be/src/util/metrics.cc                    |   2 +-
 be/src/util/thread.cc                     |   3 +-
 be/src/util/webserver-test.cc             |  14 +-
 be/src/util/webserver.h                   |   2 +-
 tests/webserver/test_web_pages.py         |  47 ++++
 www/admission_controller.tmpl             | 343 ++++++++++++++++++++++++++++++
 www/common-header.tmpl                    |   7 +-
 17 files changed, 787 insertions(+), 38 deletions(-)

diff --git a/be/src/catalog/catalog-server.cc b/be/src/catalog/catalog-server.cc
index c4ee301..02b2703 100644
--- a/be/src/catalog/catalog-server.cc
+++ b/be/src/catalog/catalog-server.cc
@@ -281,7 +281,7 @@ Status CatalogServer::Start() {
 
 void CatalogServer::RegisterWebpages(Webserver* webserver) {
   webserver->RegisterUrlCallback(CATALOG_WEB_PAGE, CATALOG_TEMPLATE,
-      [this](const auto& args, auto* doc) { this->CatalogUrlCallback(args, 
doc); });
+      [this](const auto& args, auto* doc) { this->CatalogUrlCallback(args, 
doc); }, true);
   webserver->RegisterUrlCallback(CATALOG_OBJECT_WEB_PAGE, 
CATALOG_OBJECT_TEMPLATE,
       [this](const auto& args, auto* doc) { 
this->CatalogObjectsUrlCallback(args, doc); },
       false);
diff --git a/be/src/rpc/rpc-trace.cc b/be/src/rpc/rpc-trace.cc
index 425312f..e2193a1 100644
--- a/be/src/rpc/rpc-trace.cc
+++ b/be/src/rpc/rpc-trace.cc
@@ -82,7 +82,7 @@ void impala::InitRpcEventTracing(Webserver* webserver, 
RpcMgr* rpc_mgr) {
   if (webserver != nullptr) {
     Webserver::UrlCallback json = bind<void>(
         mem_fn(&RpcEventHandlerManager::JsonCallback), handler_manager.get(), 
_1, _2);
-    webserver->RegisterUrlCallback("/rpcz", "rpcz.tmpl", json);
+    webserver->RegisterUrlCallback("/rpcz", "rpcz.tmpl", json, true);
 
     Webserver::UrlCallback reset = bind<void>(
         mem_fn(&RpcEventHandlerManager::ResetCallback), handler_manager.get(), 
_1, _2);
diff --git a/be/src/runtime/coordinator.cc b/be/src/runtime/coordinator.cc
index 0142be9..21da855 100644
--- a/be/src/runtime/coordinator.cc
+++ b/be/src/runtime/coordinator.cc
@@ -852,7 +852,8 @@ void Coordinator::ReleaseAdmissionControlResources() {
   AdmissionController* admission_controller =
       ExecEnv::GetInstance()->admission_controller();
   DCHECK(admission_controller != nullptr);
-  admission_controller->ReleaseQuery(schedule_);
+  admission_controller->ReleaseQuery(
+      schedule_, 
ComputeQueryResourceUtilization().peak_per_host_mem_consumption);
   query_events_->MarkEvent("Released admission control resources");
 }
 
diff --git a/be/src/scheduling/admission-controller.cc 
b/be/src/scheduling/admission-controller.cc
index 13d9adc..848060e 100644
--- a/be/src/scheduling/admission-controller.cc
+++ b/be/src/scheduling/admission-controller.cc
@@ -26,6 +26,7 @@
 #include "runtime/exec-env.h"
 #include "runtime/mem-tracker.h"
 #include "scheduling/scheduler.h"
+#include "util/bit-util.h"
 #include "util/debug-util.h"
 #include "util/pretty-printer.h"
 #include "util/runtime-profile-counters.h"
@@ -40,6 +41,10 @@ DEFINE_int64(queue_wait_timeout_ms, 60 * 1000, "Maximum 
amount of time (in "
 
 namespace impala {
 
+const int64_t AdmissionController::PoolStats::HISTOGRAM_NUM_OF_BINS = 128;
+const int64_t AdmissionController::PoolStats::HISTOGRAM_BIN_SIZE = 1024L * 
1024L * 1024L;
+const double AdmissionController::PoolStats::EMA_MULTIPLIER = 0.2;
+
 /// Convenience method.
 string PrintBytes(int64_t value) {
   return PrettyPrinter::Print(value, TUnit::BYTES);
@@ -285,7 +290,8 @@ void AdmissionController::PoolStats::Admit(const 
QuerySchedule& schedule) {
   metrics_.total_admitted->Increment(1L);
 }
 
-void AdmissionController::PoolStats::Release(const QuerySchedule& schedule) {
+void AdmissionController::PoolStats::Release(
+    const QuerySchedule& schedule, int64_t peak_mem_consumption) {
   int64_t cluster_mem_admitted = schedule.GetClusterMemoryToAdmit();
   DCHECK_GT(cluster_mem_admitted, 0);
   local_mem_admitted_ -= cluster_mem_admitted;
@@ -301,6 +307,10 @@ void AdmissionController::PoolStats::Release(const 
QuerySchedule& schedule) {
   DCHECK_GE(local_stats_.num_admitted_running, 0);
   DCHECK_GE(agg_num_running_, 0);
   DCHECK_GE(local_mem_admitted_, 0);
+  int64_t histogram_bucket =
+      BitUtil::RoundUp(peak_mem_consumption, HISTOGRAM_BIN_SIZE) / 
HISTOGRAM_BIN_SIZE;
+  histogram_bucket = std::max(std::min(histogram_bucket, 
HISTOGRAM_NUM_OF_BINS), 1L) - 1;
+  peak_mem_histogram_[histogram_bucket] = 
++(peak_mem_histogram_[histogram_bucket]);
 }
 
 void AdmissionController::PoolStats::Queue(const QuerySchedule& schedule) {
@@ -634,6 +644,7 @@ Status 
AdmissionController::SubmitForAdmission(QuerySchedule* schedule,
       }
       VLOG_QUERY << "Admitted query id=" << PrintId(schedule->query_id());
       AdmitQuery(schedule, false);
+      stats->UpdateWaitTime(0);
       VLOG_RPC << "Final: " << stats->DebugString();
       return Status::OK();
     }
@@ -683,7 +694,7 @@ Status 
AdmissionController::SubmitForAdmission(QuerySchedule* schedule,
     RequestQueue* queue = &request_queue_map_[pool_name];
     pools_for_updates_.insert(pool_name);
     PoolStats* stats = GetPoolStats(pool_name);
-    stats->metrics()->time_in_queue_ms->Increment(wait_time_ms);
+    stats->UpdateWaitTime(wait_time_ms);
     if (outcome == AdmissionOutcome::REJECTED_OR_TIMED_OUT) {
       queue->Remove(&queue_node);
       stats->Dequeue(*schedule, true);
@@ -713,12 +724,13 @@ Status 
AdmissionController::SubmitForAdmission(QuerySchedule* schedule,
   }
 }
 
-void AdmissionController::ReleaseQuery(const QuerySchedule& schedule) {
+void AdmissionController::ReleaseQuery(
+    const QuerySchedule& schedule, int64_t peak_mem_consumption) {
   const string& pool_name = schedule.request_pool();
   {
     lock_guard<mutex> lock(admission_ctrl_lock_);
     PoolStats* stats = GetPoolStats(pool_name);
-    stats->Release(schedule);
+    stats->Release(schedule, peak_mem_consumption);
     UpdateHostMemAdmitted(schedule, -schedule.per_backend_mem_to_admit());
     pools_for_updates_.insert(pool_name);
     VLOG_RPC << "Released query id=" << PrintId(schedule.query_id()) << " "
@@ -1067,6 +1079,141 @@ void AdmissionController::AdmitQuery(QuerySchedule* 
schedule, bool was_queued) {
       PROFILE_INFO_KEY_ADMITTED_MEM, 
PrintBytes(schedule->GetClusterMemoryToAdmit()));
 }
 
+void AdmissionController::PoolToJsonLocked(const string& pool_name,
+    rapidjson::Value* resource_pools, rapidjson::Document* document) {
+  PoolStatsMap::iterator it = pool_stats_.find(pool_name);
+  if (it == pool_stats_.end()) return;
+  PoolStats* stats = &it->second;
+  RequestQueue& queue = request_queue_map_[pool_name];
+  // Get the pool stats
+  using namespace rapidjson;
+  Value pool_info_json(kObjectType);
+  stats->ToJson(&pool_info_json, document);
+
+  // Get the queued queries
+  Value queued_queries(kArrayType);
+  queue.Iterate([&queued_queries, document](QueueNode* node) {
+    QuerySchedule* schedule = node->schedule;
+    Value query_info(kObjectType);
+    Value query_id(PrintId(schedule->query_id()).c_str(), 
document->GetAllocator());
+    query_info.AddMember("query_id", query_id, document->GetAllocator());
+    query_info.AddMember(
+        "mem_limit", schedule->per_backend_mem_limit(), 
document->GetAllocator());
+    query_info.AddMember("mem_limit_to_admit", 
schedule->per_backend_mem_to_admit(),
+        document->GetAllocator());
+    query_info.AddMember("num_backends", 
schedule->per_backend_exec_params().size(),
+        document->GetAllocator());
+    queued_queries.PushBack(query_info, document->GetAllocator());
+    return true;
+  });
+  pool_info_json.AddMember("queued_queries", queued_queries, 
document->GetAllocator());
+
+  // Get the queued reason for the query at the head of the queue.
+  if (!queue.empty()) {
+    Value head_queued_reason(
+        queue.head()
+            ->profile->GetInfoString(PROFILE_INFO_KEY_LAST_QUEUED_REASON)
+            ->c_str(),
+        document->GetAllocator());
+    pool_info_json.AddMember(
+        "head_queued_reason", head_queued_reason, document->GetAllocator());
+  }
+
+  resource_pools->PushBack(pool_info_json, document->GetAllocator());
+}
+
+void AdmissionController::PoolToJson(const string& pool_name,
+    rapidjson::Value* resource_pools, rapidjson::Document* document) {
+  lock_guard<mutex> lock(admission_ctrl_lock_);
+  PoolToJsonLocked(pool_name, resource_pools, document);
+}
+
+void AdmissionController::AllPoolsToJson(
+    rapidjson::Value* resource_pools, rapidjson::Document* document) {
+  lock_guard<mutex> lock(admission_ctrl_lock_);
+  for (const PoolConfigMap::value_type& entry : pool_config_map_) {
+    const string& pool_name = entry.first;
+    PoolToJsonLocked(pool_name, resource_pools, document);
+  }
+}
+
+void AdmissionController::PoolStats::UpdateWaitTime(int64_t wait_time_ms) {
+  metrics()->time_in_queue_ms->Increment(wait_time_ms);
+  if (wait_time_ms_ema_ == 0) {
+    wait_time_ms_ema_ = wait_time_ms;
+    return;
+  }
+  wait_time_ms_ema_ =
+      wait_time_ms_ema_ * (1 - EMA_MULTIPLIER) + wait_time_ms * EMA_MULTIPLIER;
+}
+
+void AdmissionController::PoolStats::ToJson(
+    rapidjson::Value* pool, rapidjson::Document* document) const {
+  using namespace rapidjson;
+  Value pool_name(name_.c_str(), document->GetAllocator());
+  pool->AddMember("pool_name", pool_name, document->GetAllocator());
+  pool->AddMember(
+      "agg_num_running", metrics_.agg_num_running->GetValue(), 
document->GetAllocator());
+  pool->AddMember(
+      "agg_num_queued", metrics_.agg_num_queued->GetValue(), 
document->GetAllocator());
+  pool->AddMember("agg_mem_reserved", metrics_.agg_mem_reserved->GetValue(),
+      document->GetAllocator());
+  pool->AddMember("local_mem_admitted", 
metrics_.local_mem_admitted->GetValue(),
+      document->GetAllocator());
+  pool->AddMember(
+      "total_admitted", metrics_.total_admitted->GetValue(), 
document->GetAllocator());
+  pool->AddMember(
+      "total_rejected", metrics_.total_rejected->GetValue(), 
document->GetAllocator());
+  pool->AddMember(
+      "total_timed_out", metrics_.total_timed_out->GetValue(), 
document->GetAllocator());
+  pool->AddMember("pool_max_mem_resources", 
metrics_.pool_max_mem_resources->GetValue(),
+      document->GetAllocator());
+  pool->AddMember("pool_max_requests", metrics_.pool_max_requests->GetValue(),
+      document->GetAllocator());
+  pool->AddMember(
+      "pool_max_queued", metrics_.pool_max_queued->GetValue(), 
document->GetAllocator());
+  pool->AddMember("max_query_mem_limit", 
metrics_.max_query_mem_limit->GetValue(),
+      document->GetAllocator());
+  pool->AddMember("min_query_mem_limit", 
metrics_.min_query_mem_limit->GetValue(),
+      document->GetAllocator());
+  pool->AddMember("clamp_mem_limit_query_option",
+      metrics_.clamp_mem_limit_query_option->GetValue(), 
document->GetAllocator());
+  pool->AddMember("wait_time_ms_ema", wait_time_ms_ema_, 
document->GetAllocator());
+  Value histogram(kArrayType);
+  for (int bucket = 0; bucket < peak_mem_histogram_.size(); bucket++) {
+    Value histogram_elem(kArrayType);
+    histogram_elem.PushBack(bucket, document->GetAllocator());
+    histogram_elem.PushBack(peak_mem_histogram_[bucket], 
document->GetAllocator());
+    histogram.PushBack(histogram_elem, document->GetAllocator());
+  }
+  pool->AddMember("peak_mem_usage_histogram", histogram, 
document->GetAllocator());
+}
+
+void AdmissionController::ResetPoolInformationalStats(const string& pool_name) 
{
+  lock_guard<mutex> lock(admission_ctrl_lock_);
+  PoolStatsMap::iterator it = pool_stats_.find(pool_name);
+  if(it == pool_stats_.end()) return;
+  it->second.ResetInformationalStats();
+}
+
+void AdmissionController::ResetAllPoolInformationalStats() {
+  lock_guard<mutex> lock(admission_ctrl_lock_);
+  for (auto& it: pool_stats_) it.second.ResetInformationalStats();
+}
+
+void AdmissionController::PoolStats::ResetInformationalStats() {
+  std::fill(peak_mem_histogram_.begin(), peak_mem_histogram_.end(), 0);
+  wait_time_ms_ema_ = 0.0;
+  // Reset only metrics keeping track of totals since last reset.
+  metrics()->total_admitted->SetValue(0);
+  metrics()->total_rejected->SetValue(0);
+  metrics()->total_queued->SetValue(0);
+  metrics()->total_dequeued->SetValue(0);
+  metrics()->total_timed_out->SetValue(0);
+  metrics()->total_released->SetValue(0);
+  metrics()->time_in_queue_ms->SetValue(0);
+}
+
 void AdmissionController::PoolStats::InitMetrics() {
   metrics_.total_admitted = parent_->metrics_group_->AddCounter(
       TOTAL_ADMITTED_METRIC_KEY_FORMAT, 0, name_);
diff --git a/be/src/scheduling/admission-controller.h 
b/be/src/scheduling/admission-controller.h
index 5b741b9..0fa1596 100644
--- a/be/src/scheduling/admission-controller.h
+++ b/be/src/scheduling/admission-controller.h
@@ -248,11 +248,29 @@ class AdmissionController {
   /// is cancelled or failed). This should be called for all requests that have
   /// been submitted via AdmitQuery().
   /// This does not block.
-  void ReleaseQuery(const QuerySchedule& schedule);
+  void ReleaseQuery(const QuerySchedule& schedule, int64_t 
peak_mem_consumption);
 
   /// Registers the request queue topic with the statestore.
   Status Init();
 
+  /// Serializes relevant stats, configurations and information associated 
with queued
+  /// queries for the resource pool identified by 'pool_name' to JSON by 
adding members to
+  /// 'resource_pools'. Is a no-op if a pool with name 'pool_name' does not 
exist or no
+  /// queries have been submitted to that pool yet.
+  void PoolToJson(const string& pool_name, rapidjson::Value* resource_pools,
+      rapidjson::Document* document);
+
+  /// Serializes relevant stats, configurations and information associated 
with queued
+  /// queries for every resource pool (to which queries have been submitted at 
least once)
+  /// to JSON by adding members to 'resource_pools'.
+  void AllPoolsToJson(rapidjson::Value* resource_pools, rapidjson::Document* 
document);
+
+  /// Calls ResetInformationalStats on the pool identified by 'pool_name'.
+  void ResetPoolInformationalStats(const string& pool_name);
+
+  /// Calls ResetInformationalStats on all pools.
+  void ResetAllPoolInformationalStats();
+
  private:
   class PoolStats;
   friend class PoolStats;
@@ -332,7 +350,8 @@ class AdmissionController {
 
     PoolStats(AdmissionController* parent, const std::string& name)
       : name_(name), parent_(parent), agg_num_running_(0), agg_num_queued_(0),
-        agg_mem_reserved_(0), local_mem_admitted_(0) {
+        agg_mem_reserved_(0), local_mem_admitted_(0), wait_time_ms_ema_(0.0) {
+      peak_mem_histogram_.resize(HISTOGRAM_NUM_OF_BINS, 0);
       InitMetrics();
     }
 
@@ -346,7 +365,7 @@ class AdmissionController {
     /// The following methods update the pool stats when the request 
represented by
     /// schedule is admitted, released, queued, or dequeued.
     void Admit(const QuerySchedule& schedule);
-    void Release(const QuerySchedule& schedule);
+    void Release(const QuerySchedule& schedule, int64_t peak_mem_consumption);
     void Queue(const QuerySchedule& schedule);
     void Dequeue(const QuerySchedule& schedule, bool timed_out);
 
@@ -379,6 +398,19 @@ class AdmissionController {
 
     PoolMetrics* metrics() { return &metrics_; }
     std::string DebugString() const;
+
+    /// Updates the metric keeping track of total time in queue and the 
exponential
+    /// moving average of query wait time for all queries submitted to this 
pool.
+    void UpdateWaitTime(int64_t wait_time_ms);
+
+    /// Serializes relevant stats and configurations to JSON by adding members 
to 'pool'.
+    void ToJson(rapidjson::Value* pool, rapidjson::Document* document) const;
+
+    /// Resets the informational stats like those keeping track of absolute
+    /// values(totals), the peak query memory histogram, and the exponential 
moving
+    /// average of wait time.
+    void ResetInformationalStats();
+
    private:
     const std::string name_;
     AdmissionController* parent_;
@@ -417,6 +449,21 @@ class AdmissionController {
 
     /// Per-pool metrics, created by InitMetrics().
     PoolMetrics metrics_;
+
+    /// A histogram of the peak memory used by a query among all hosts. Its a 
vector of
+    /// size 'HISTOGRAM_NUM_OF_BINS' and every i-th element represents the 
number of
+    /// queries that had recorded a peak memory between (i, i+1] * 
HISTOGRAM_BIN_SIZE
+    /// Bytes, except for the last one that represents a memory range of
+    /// (HISTOGRAM_NUM_OF_BINS - 1, infinity) * HISTOGRAM_BIN_SIZE Bytes.
+    std::vector<int64_t> peak_mem_histogram_;
+    static const int64_t HISTOGRAM_NUM_OF_BINS;
+    static const int64_t HISTOGRAM_BIN_SIZE;
+
+    /// Keeps track of exponential moving average of all queries submitted to 
this pool
+    /// that were not rejected. A weighting multiplier of value 
'EMA_MULTIPLIER' is used.
+    double wait_time_ms_ema_;
+    static const double EMA_MULTIPLIER;
+
     void InitMetrics();
   };
 
@@ -547,10 +594,15 @@ class AdmissionController {
   /// behind invalidity.
   bool IsPoolConfigValid(const TPoolConfig& pool_cfg, std::string* reason);
 
-  // Sets the per host mem limit and mem admitted in the schedule and does the 
necessary
-  // accounting and logging on successful submission.
-  // Caller must hold 'admission_ctrl_lock_'.
+  /// Sets the per host mem limit and mem admitted in the schedule and does 
the necessary
+  /// accounting and logging on successful submission.
+  /// Caller must hold 'admission_ctrl_lock_'.
   void AdmitQuery(QuerySchedule* schedule, bool was_queued);
+
+  /// Same as PoolToJson() but requires 'admission_ctrl_lock_' to be held by 
the caller.
+  /// Is a helper method used by both PoolToJson() and AllPoolsToJson()
+  void PoolToJsonLocked(const string& pool_name, rapidjson::Value* 
resource_pools,
+      rapidjson::Document* document);
 };
 
 }
diff --git a/be/src/service/impala-http-handler.cc 
b/be/src/service/impala-http-handler.cc
index 38cd6a4..394c41e 100644
--- a/be/src/service/impala-http-handler.cc
+++ b/be/src/service/impala-http-handler.cc
@@ -20,6 +20,8 @@
 #include <sstream>
 #include <boost/lexical_cast.hpp>
 #include <boost/thread/mutex.hpp>
+#include <rapidjson/stringbuffer.h>
+#include <rapidjson/prettywriter.h>
 #include <gutil/strings/substitute.h>
 
 #include "catalog/catalog-util.h"
@@ -33,6 +35,7 @@
 #include "service/impala-server.h"
 #include "service/client-request-state.h"
 #include "service/frontend.h"
+#include "scheduling/admission-controller.h"
 #include "thrift/protocol/TDebugProtocol.h"
 #include "util/coding-util.h"
 #include "util/logging-support.h"
@@ -82,19 +85,19 @@ void ImpalaHttpHandler::RegisterHandlers(Webserver* 
webserver) {
   DCHECK(webserver != NULL);
 
   webserver->RegisterUrlCallback("/backends", "backends.tmpl",
-      MakeCallback(this, &ImpalaHttpHandler::BackendsHandler));
+      MakeCallback(this, &ImpalaHttpHandler::BackendsHandler), true);
 
   webserver->RegisterUrlCallback("/hadoop-varz", "hadoop-varz.tmpl",
-      MakeCallback(this, &ImpalaHttpHandler::HadoopVarzHandler));
+      MakeCallback(this, &ImpalaHttpHandler::HadoopVarzHandler), true);
 
   webserver->RegisterUrlCallback("/queries", "queries.tmpl",
-      MakeCallback(this, &ImpalaHttpHandler::QueryStateHandler));
+      MakeCallback(this, &ImpalaHttpHandler::QueryStateHandler), true);
 
   webserver->RegisterUrlCallback("/sessions", "sessions.tmpl",
-      MakeCallback(this, &ImpalaHttpHandler::SessionsHandler));
+      MakeCallback(this, &ImpalaHttpHandler::SessionsHandler), true);
 
   webserver->RegisterUrlCallback("/catalog", "catalog.tmpl",
-      MakeCallback(this, &ImpalaHttpHandler::CatalogHandler));
+      MakeCallback(this, &ImpalaHttpHandler::CatalogHandler), true);
 
   webserver->RegisterUrlCallback("/catalog_object", "catalog_object.tmpl",
       MakeCallback(this, &ImpalaHttpHandler::CatalogObjectsHandler), false);
@@ -139,6 +142,12 @@ void ImpalaHttpHandler::RegisterHandlers(Webserver* 
webserver) {
       [this](const auto& args, auto* doc) {
         this->QuerySummaryHandler(false, false, args, doc); }, false);
 
+    webserver->RegisterUrlCallback("/admission", "admission_controller.tmpl",
+      MakeCallback(this, &ImpalaHttpHandler::AdmissionStateHandler), true);
+
+    webserver->RegisterUrlCallback("/resource_pool_reset", "",
+      MakeCallback(this, &ImpalaHttpHandler::ResetResourcePoolStatsHandler), 
false);
+
   RegisterLogLevelCallbacks(webserver, true);
 }
 
@@ -434,7 +443,6 @@ void ImpalaHttpHandler::QueryStateHandler(const 
Webserver::ArgumentMap& args,
   document->AddMember("query_locations", query_locations, 
document->GetAllocator());
 }
 
-
 void ImpalaHttpHandler::SessionsHandler(const Webserver::ArgumentMap& args,
     Document* document) {
   lock_guard<mutex> l(server_->session_state_map_lock_);
@@ -854,3 +862,87 @@ void ImpalaHttpHandler::BackendsHandler(const 
Webserver::ArgumentMap& args,
   }
   document->AddMember("backends", backends_list, document->GetAllocator());
 }
+
+void ImpalaHttpHandler::AdmissionStateHandler(
+    const Webserver::ArgumentMap& args, Document* document) {
+  Webserver::ArgumentMap::const_iterator pool_name_arg = 
args.find("pool_name");
+  bool get_all_pools = (pool_name_arg == args.end());
+  Value resource_pools(kArrayType);
+  if(get_all_pools){
+    ExecEnv::GetInstance()->admission_controller()->AllPoolsToJson(
+        &resource_pools, document);
+  } else {
+    ExecEnv::GetInstance()->admission_controller()->PoolToJson(
+        pool_name_arg->second, &resource_pools, document);
+  }
+
+  // Now get running queries from CRS map.
+  struct QueryInfo {
+    TUniqueId query_id;
+    int64_t mem_limit;
+    int64_t mem_limit_for_admission;
+    unsigned long num_backends;
+  };
+  unordered_map<string, vector<QueryInfo>> running_queries;
+  server_->client_request_state_map_.DoFuncForAllEntries([&running_queries](
+      const std::shared_ptr<ClientRequestState>& request_state) {
+    // Make sure only queries past admission control are added.
+    auto query_state = request_state->operation_state();
+    if (query_state != TOperationState::INITIALIZED_STATE
+        && query_state != TOperationState::PENDING_STATE
+        && request_state->schedule() != nullptr)
+      running_queries[request_state->request_pool()].push_back(
+          {request_state->query_id(), 
request_state->schedule()->per_backend_mem_limit(),
+              request_state->schedule()->per_backend_mem_to_admit(),
+              static_cast<unsigned long>(
+                  
request_state->schedule()->per_backend_exec_params().size())});
+  });
+
+  // Add the running queries to the resource_pools json.
+  for (int i = 0; i < resource_pools.Size(); i++) {
+    DCHECK(resource_pools[i].IsObject());
+    Value::MemberIterator it = 
resource_pools[i].GetObject().FindMember("pool_name");
+    DCHECK(it != resource_pools[i].GetObject().MemberEnd());
+    DCHECK(it->value.IsString());
+    string pool_name(it->value.GetString());
+    // Now add running queries to the json.
+    auto query_list = running_queries.find(pool_name);
+    if (query_list == running_queries.end()) continue;
+    vector<QueryInfo>& info_array = query_list->second;
+    Value queries_in_pool(rapidjson::kArrayType);
+    for (QueryInfo info : info_array) {
+      Value query_info(rapidjson::kObjectType);
+      Value query_id(PrintId(info.query_id).c_str(), document->GetAllocator());
+      query_info.AddMember("query_id", query_id, document->GetAllocator());
+      query_info.AddMember("mem_limit", info.mem_limit, 
document->GetAllocator());
+      query_info.AddMember(
+          "mem_limit_to_admit", info.mem_limit_for_admission, 
document->GetAllocator());
+      query_info.AddMember("num_backends", info.num_backends, 
document->GetAllocator());
+      queries_in_pool.PushBack(query_info, document->GetAllocator());
+    }
+    resource_pools[i].GetObject().AddMember(
+        "running_queries", queries_in_pool, document->GetAllocator());
+  }
+  // In order to embed a plain json inside the webpage generated by mustache, 
we need
+  // to stringify it and write it out as a json element.
+  rapidjson::StringBuffer strbuf;
+  PrettyWriter<rapidjson::StringBuffer> writer(strbuf);
+  resource_pools.Accept(writer);
+  Value raw_json(strbuf.GetString(), document->GetAllocator());
+  document->AddMember("resource_pools_plain_json", raw_json, 
document->GetAllocator());
+  document->AddMember("resource_pools", resource_pools, 
document->GetAllocator());
+  // Indicator that helps render UI elements based on this condition.
+  document->AddMember("get_all_pools", get_all_pools, 
document->GetAllocator());
+}
+
+void ImpalaHttpHandler::ResetResourcePoolStatsHandler(
+    const Webserver::ArgumentMap& args, Document* document) {
+  Webserver::ArgumentMap::const_iterator pool_name_arg = 
args.find("pool_name");
+  bool reset_all_pools = (pool_name_arg == args.end());
+  if (reset_all_pools) {
+    
ExecEnv::GetInstance()->admission_controller()->ResetAllPoolInformationalStats();
+  } else {
+    
ExecEnv::GetInstance()->admission_controller()->ResetPoolInformationalStats(
+        pool_name_arg->second);
+  }
+}
diff --git a/be/src/service/impala-http-handler.h 
b/be/src/service/impala-http-handler.h
index f2492ff..aa69e6e 100644
--- a/be/src/service/impala-http-handler.h
+++ b/be/src/service/impala-http-handler.h
@@ -164,6 +164,67 @@ class ImpalaHttpHandler {
   ///   }
   /// ]
   void BackendsHandler(const Webserver::ArgumentMap& args, 
rapidjson::Document* document);
+
+  /// Json callback for /admission_controller, which prints relevant details 
for all
+  /// resource pools.
+  ///"resource_pools": [
+  ///  {
+  ///    "pool_name": "default-pool",
+  ///    "agg_num_running": 1,
+  ///    "agg_num_queued": 4,
+  ///    "agg_mem_reserved": 10382760,
+  ///    "local_mem_admitted": 10382760,
+  ///    "local_num_admitted_running": 1,
+  ///    "local_num_queued": 4,
+  ///    "local_backend_mem_reserved": 10382760,
+  ///    "local_backend_mem_usage": 16384,
+  ///    "pool_max_mem_resources": 10485760,
+  ///    "pool_max_requests": 10,
+  ///    "pool_max_queued": 10,
+  ///    "max_query_mem_limit": 0,
+  ///    "min_query_mem_limit": 0,
+  ///    "clamp_mem_limit_query_option": true,
+  ///    "wait_time_ms_EMA": 325.4,
+  ///    "histogram": [
+  ///      [
+  ///        0,
+  ///        3
+  ///      ],
+  ///      .
+  ///      .
+  ///      [
+  ///        127,
+  ///        1
+  ///      ]
+  ///    ],
+  ///    "queued_queries": [
+  ///      {
+  ///        "query_id": "6f49e509bfa5b347:207d8ef900000000",
+  ///        "mem_limit": 10382760,
+  ///        "mem_limit_to_admit": 10382760,
+  ///        "num_backends": 1
+  ///      }
+  ///    ],
+  ///    "head_queued_reason": "<...>",
+  ///    "running_queries": [
+  ///      {
+  ///        "query_id": "b94cf355d6df041c:ba3b91400000000",
+  ///        "mem_limit": 10382760,
+  ///        "mem_limit_to_admit": 10382760,
+  ///        "num_backends": 1
+  ///      }
+  ///    ]
+  ///  }
+  ///]
+  void AdmissionStateHandler(
+      const Webserver::ArgumentMap& args, rapidjson::Document* document);
+
+  /// Resets resource pool informational statistics. Takes an optional 
argument:
+  /// 'pool_name'. If its not specified, all resource pool's informational 
statistics are
+  /// reset otherwise it resets the statistics for a single pool identified by 
the
+  /// supplied argument. Produces no JSON output.
+  void ResetResourcePoolStatsHandler(
+      const Webserver::ArgumentMap& args, rapidjson::Document* document);
 };
 
 }
diff --git a/be/src/statestore/statestore.cc b/be/src/statestore/statestore.cc
index 6f088d9..28dd95e 100644
--- a/be/src/statestore/statestore.cc
+++ b/be/src/statestore/statestore.cc
@@ -487,12 +487,12 @@ void Statestore::RegisterWebpages(Webserver* webserver) {
   Webserver::UrlCallback topics_callback =
       bind<void>(mem_fn(&Statestore::TopicsHandler), this, _1, _2);
   webserver->RegisterUrlCallback("/topics", "statestore_topics.tmpl",
-      topics_callback);
+      topics_callback, true);
 
   Webserver::UrlCallback subscribers_callback =
       bind<void>(&Statestore::SubscribersHandler, this, _1, _2);
   webserver->RegisterUrlCallback("/subscribers", "statestore_subscribers.tmpl",
-      subscribers_callback);
+      subscribers_callback, true);
 
   RegisterLogLevelCallbacks(webserver, false);
 }
diff --git a/be/src/util/default-path-handlers.cc 
b/be/src/util/default-path-handlers.cc
index f0fbe60..4458b83 100644
--- a/be/src/util/default-path-handlers.cc
+++ b/be/src/util/default-path-handlers.cc
@@ -299,20 +299,20 @@ void RootHandler(const Webserver::ArgumentMap& args, 
Document* document) {
 
 void AddDefaultUrlCallbacks(Webserver* webserver, MemTracker* 
process_mem_tracker,
     MetricGroup* metric_group) {
-  webserver->RegisterUrlCallback("/logs", "logs.tmpl", LogsHandler);
-  webserver->RegisterUrlCallback("/varz", "flags.tmpl", FlagsHandler);
+  webserver->RegisterUrlCallback("/logs", "logs.tmpl", LogsHandler, true);
+  webserver->RegisterUrlCallback("/varz", "flags.tmpl", FlagsHandler, true);
   if (JniUtil::is_jvm_inited()) {
     // JmxHandler outputs a plain JSON string and does not require a template 
to
     // render. However RawUrlCallback only supports PLAIN content type.
     // (TODO): Switch to RawUrlCallback when it supports JSON content-type.
-    webserver->RegisterUrlCallback("/jmx", "raw_text.tmpl", JmxHandler);
+    webserver->RegisterUrlCallback("/jmx", "raw_text.tmpl", JmxHandler, true);
   }
   if (process_mem_tracker != NULL) {
     auto callback = [process_mem_tracker, metric_group]
         (const Webserver::ArgumentMap& args, Document* doc) {
       MemUsageHandler(process_mem_tracker, metric_group, args, doc);
     };
-    webserver->RegisterUrlCallback("/memz", "memz.tmpl", callback);
+    webserver->RegisterUrlCallback("/memz", "memz.tmpl", callback, true);
   }
 
 #if !defined(ADDRESS_SANITIZER) && !defined(THREAD_SANITIZER)
@@ -326,7 +326,7 @@ void AddDefaultUrlCallbacks(Webserver* webserver, 
MemTracker* process_mem_tracke
     [](const Webserver::ArgumentMap& args, Document* doc) {
       RootHandler(args, doc);
     };
-  webserver->RegisterUrlCallback("/", "root.tmpl", root_handler);
+  webserver->RegisterUrlCallback("/", "root.tmpl", root_handler, true);
 }
 
 }
diff --git a/be/src/util/logging-support.cc b/be/src/util/logging-support.cc
index b050f37..8281487 100644
--- a/be/src/util/logging-support.cc
+++ b/be/src/util/logging-support.cc
@@ -305,7 +305,7 @@ void LoggingSupport::DeleteOldLogs(const string& 
path_pattern, int max_log_files
 void RegisterLogLevelCallbacks(Webserver* webserver, bool 
register_log4j_handlers) {
   webserver->RegisterUrlCallback("/log_level", "log_level.tmpl",
       MakeCallback([](const Webserver::ArgumentMap& args, Document* 
document){},
-      register_log4j_handlers));
+      register_log4j_handlers), true);
   webserver->RegisterUrlCallback("/set_glog_level", "log_level.tmpl",
       MakeCallback(&SetGlogLevelCallback, register_log4j_handlers), false);
   webserver->RegisterUrlCallback("/reset_glog_level", "log_level.tmpl",
diff --git a/be/src/util/metrics.cc b/be/src/util/metrics.cc
index 03834a4..dd0fd35 100644
--- a/be/src/util/metrics.cc
+++ b/be/src/util/metrics.cc
@@ -90,7 +90,7 @@ Status MetricGroup::Init(Webserver* webserver) {
 
     Webserver::UrlCallback json_callback =
         bind<void>(mem_fn(&MetricGroup::TemplateCallback), this, _1, _2);
-    webserver->RegisterUrlCallback("/metrics", "metrics.tmpl", json_callback);
+    webserver->RegisterUrlCallback("/metrics", "metrics.tmpl", json_callback, 
true);
   }
 
   return Status::OK();
diff --git a/be/src/util/thread.cc b/be/src/util/thread.cc
index 2bee232..0d5cd3a 100644
--- a/be/src/util/thread.cc
+++ b/be/src/util/thread.cc
@@ -380,7 +380,8 @@ void RegisterUrlCallbacks(bool include_jvm_threads, 
Webserver* webserver) {
       (const Webserver::ArgumentMap& args, Document* doc) {
     ThreadOverviewUrlCallback(include_jvm_threads, args, doc);
   };
-  webserver->RegisterUrlCallback(THREADS_WEB_PAGE, THREADS_TEMPLATE, 
overview_callback);
+  webserver->RegisterUrlCallback(
+      THREADS_WEB_PAGE, THREADS_TEMPLATE, overview_callback, true);
 
   auto group_callback = [] (const Webserver::ArgumentMap& args, Document* doc) 
{
     thread_manager->ThreadGroupUrlCallback(args, doc);
diff --git a/be/src/util/webserver-test.cc b/be/src/util/webserver-test.cc
index c1071f4..f89ffe2 100644
--- a/be/src/util/webserver-test.cc
+++ b/be/src/util/webserver-test.cc
@@ -116,7 +116,7 @@ TEST(Webserver, ArgsTest) {
   const string ARGS_TEST_PATH = "/args-test";
   bool success = false;
   Webserver::UrlCallback callback = bind<void>(AssertArgsCallback, &success , 
_1, _2);
-  webserver.RegisterUrlCallback(ARGS_TEST_PATH, "json-test.tmpl", callback);
+  webserver.RegisterUrlCallback(ARGS_TEST_PATH, "json-test.tmpl", callback, 
true);
 
   ASSERT_OK(webserver.Start());
   stringstream contents;
@@ -147,11 +147,11 @@ TEST(Webserver, JsonTest) {
   const string RAW_TEXT_PATH = "/text";
   const string NO_TEMPLATE_PATH = "/no-template";
   Webserver::UrlCallback callback = bind<void>(JsonCallback, false, _1, _2);
-  webserver.RegisterUrlCallback(JSON_TEST_PATH, "json-test.tmpl", callback);
-  webserver.RegisterUrlCallback(NO_TEMPLATE_PATH, "doesnt-exist.tmpl", 
callback);
+  webserver.RegisterUrlCallback(JSON_TEST_PATH, "json-test.tmpl", callback, 
true);
+  webserver.RegisterUrlCallback(NO_TEMPLATE_PATH, "doesnt-exist.tmpl", 
callback, true);
 
   Webserver::UrlCallback text_callback = bind<void>(JsonCallback, true, _1, 
_2);
-  webserver.RegisterUrlCallback(RAW_TEXT_PATH, "json-test.tmpl", 
text_callback);
+  webserver.RegisterUrlCallback(RAW_TEXT_PATH, "json-test.tmpl", 
text_callback, true);
   ASSERT_OK(webserver.Start());
 
   stringstream contents;
@@ -187,7 +187,7 @@ TEST(Webserver, EscapingTest) {
 
   const string JSON_TEST_PATH = "/json-test";
   Webserver::UrlCallback callback = bind<void>(JsonCallback, false, _1, _2);
-  webserver.RegisterUrlCallback(JSON_TEST_PATH, "json-test.tmpl", callback);
+  webserver.RegisterUrlCallback(JSON_TEST_PATH, "json-test.tmpl", callback, 
true);
   ASSERT_OK(webserver.Start());
   stringstream contents;
   ASSERT_OK(HttpGet("localhost", FLAGS_webserver_port, JSON_TEST_PATH, 
&contents));
@@ -363,7 +363,7 @@ TEST(Webserver, NoFrameEmbeddingTest) {
   const string FRAME_TEST_PATH = "/frames_test";
   Webserver webserver(FLAGS_webserver_port);
   Webserver::UrlCallback callback = bind<void>(FrameCallback, _1, _2);
-  webserver.RegisterUrlCallback(FRAME_TEST_PATH, "raw_text.tmpl", callback);
+  webserver.RegisterUrlCallback(FRAME_TEST_PATH, "raw_text.tmpl", callback, 
true);
   ASSERT_OK(webserver.Start());
   stringstream contents;
   ASSERT_OK(HttpGet("localhost", FLAGS_webserver_port,
@@ -378,7 +378,7 @@ TEST(Webserver, FrameAllowEmbeddingTest) {
       ScopedFlagSetter<string>::Make(&FLAGS_webserver_x_frame_options, 
"ALLOWALL");
   Webserver webserver(FLAGS_webserver_port);
   Webserver::UrlCallback callback = bind<void>(FrameCallback, _1, _2);
-  webserver.RegisterUrlCallback(FRAME_TEST_PATH, "raw_text.tmpl", callback);
+  webserver.RegisterUrlCallback(FRAME_TEST_PATH, "raw_text.tmpl", callback, 
true);
   ASSERT_OK(webserver.Start());
   stringstream contents;
   ASSERT_OK(HttpGet("localhost", FLAGS_webserver_port,
diff --git a/be/src/util/webserver.h b/be/src/util/webserver.h
index 1651d88..9f403cb 100644
--- a/be/src/util/webserver.h
+++ b/be/src/util/webserver.h
@@ -90,7 +90,7 @@ class Webserver {
   /// The path of the template file is relative to the webserver's document
   /// root.
   void RegisterUrlCallback(const std::string& path, const std::string& 
template_filename,
-      const UrlCallback& callback, bool is_on_nav_bar = true);
+      const UrlCallback& callback, bool is_on_nav_bar);
 
   /// Register a 'raw' url callback that produces a bytestream as output. This 
should only
   /// be used for URLs that want to return binary data; non-HTML callbacks 
that want to
diff --git a/tests/webserver/test_web_pages.py 
b/tests/webserver/test_web_pages.py
index 61455d9..18f83b1 100644
--- a/tests/webserver/test_web_pages.py
+++ b/tests/webserver/test_web_pages.py
@@ -42,6 +42,8 @@ class TestWebPage(ImpalaTestSuite):
   THREAD_GROUP_URL = "http://localhost:{0}/thread-group";
   METRICS_URL = "http://localhost:{0}/metrics";
   JMX_URL = "http://localhost:{0}/jmx";
+  ADMISSION_URL = "http://localhost:{0}/admission";
+  RESET_RESOURCE_POOL_STATS_URL = "http://localhost:{0}/resource_pool_reset";
 
   # log4j changes do not apply to the statestore since it doesn't
   # have an embedded JVM. So we make two sets of ports to test the
@@ -410,3 +412,48 @@ class TestWebPage(ImpalaTestSuite):
     after = get_svc_metrics(SVC_NAME)
 
     assert before != after
+
+  @pytest.mark.execute_serially
+  def test_admission_page(self):
+    """Sanity check for the admission debug page's http end points (both 
admission and
+    reset stats end points)."""
+    # Make sure at least one query is submitted to the default pool since 
impala startup,
+    # so that it shows up in the admission control debug page. Checks for both 
with and
+    # without the pool_name search string.
+    self.client.execute("select 1")
+    response_json = self.__fetch_resource_pools_json()
+    assert response_json[0]['pool_name'] == "default-pool"
+
+    response_json = self.__fetch_resource_pools_json("default-pool")
+    assert response_json[0]['pool_name'] == "default-pool"
+
+    # Make sure the reset informational stats endpoint works, both with and 
without the
+    # pool_name search string.
+    assert response_json[0]['total_admitted'] > 0
+    self.get_and_check_status(
+      self.RESET_RESOURCE_POOL_STATS_URL + "?pool_name=default-pool",
+      ports_to_test=[25000])
+    response_json = self.__fetch_resource_pools_json("default-pool")
+    assert response_json[0]['total_admitted'] == 0
+
+    self.client.execute("select 1")
+    response_json = self.__fetch_resource_pools_json("default-pool")
+    assert response_json[0]['total_admitted'] > 0
+    self.get_and_check_status(self.RESET_RESOURCE_POOL_STATS_URL, 
ports_to_test=[25000])
+    response_json = self.__fetch_resource_pools_json("default-pool")
+    assert response_json[0]['total_admitted'] == 0
+
+  def __fetch_resource_pools_json(self, pool_name=None):
+    """Helper method used to fetch the resource pool json from the admission 
debug page.
+    If a 'pool_name' is passed to this method, it adds the pool_name search 
string to the
+    http request."""
+    search_string = "?json"
+    if pool_name is not None:
+      search_string += "&pool_name=" + pool_name
+    responses = self.get_and_check_status(self.ADMISSION_URL + search_string,
+                                          ports_to_test=[25000])
+    assert len(responses) == 1
+    response_json = json.loads(responses[0].text)
+    assert 'resource_pools' in response_json
+    assert len(response_json['resource_pools']) == 1
+    return response_json['resource_pools']
diff --git a/www/admission_controller.tmpl b/www/admission_controller.tmpl
new file mode 100644
index 0000000..79d1f7d
--- /dev/null
+++ b/www/admission_controller.tmpl
@@ -0,0 +1,343 @@
+<!--
+Licensed to the Apache Software Foundation (ASF) under one
+or more contributor license agreements.  See the NOTICE file
+distributed with this work for additional information
+regarding copyright ownership.  The ASF licenses this file
+to you under the Apache License, Version 2.0 (the
+"License"); you may not use this file except in compliance
+with the License.  You may obtain a copy of the License at
+
+  http://www.apache.org/licenses/LICENSE-2.0
+
+Unless required by applicable law or agreed to in writing,
+software distributed under the License is distributed on an
+"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
+KIND, either express or implied.  See the License for the
+specific language governing permissions and limitations
+under the License.
+
+Example of json received from the impala server
+"resource_pools_plain_json": <..the whole json below in text.>
+"resource_pools": [
+        {
+            "pool_name": "default-pool",
+            "agg_num_running": 1,
+            "agg_num_queued": 4,
+            "agg_mem_reserved": 10382760,
+            "local_mem_admitted": 10382760,
+            "local_num_admitted_running": 1,
+            "local_num_queued": 4,
+            "local_backend_mem_reserved": 10382760,
+            "local_backend_mem_usage": 16384,
+            "pool_max_mem_resources": 10485760,
+            "pool_max_requests": 10,
+            "pool_max_queued": 10,
+            "max_query_mem_limit": 0,
+            "min_query_mem_limit": 0,
+            "clamp_mem_limit_query_option": true,
+            "wait_time_ms_EMA": 0.0,
+            "histogram": [
+                [
+                    0,
+                    0
+                ],
+                .
+                .
+                .
+                [
+                    127,
+                    0
+                ]
+            ],
+            "queued_queries": [
+                {
+                    "query_id": "6f49e509bfa5b347:207d8ef900000000",
+                    "mem_limit": 10382760,
+                    "mem_limit_to_admit": 10382760,
+                    "num_backends": 1
+                },
+                {
+                    "query_id": "854f954e79f79d87:18483b9400000000",
+                    "mem_limit": 10382760,
+                    "mem_limit_to_admit": 10382760,
+                    "num_backends": 1
+                },
+                {
+                    "query_id": "45421dce8bf5664f:6865a45200000000",
+                    "mem_limit": 10382760,
+                    "mem_limit_to_admit": 10382760,
+                    "num_backends": 1
+                },
+                {
+                    "query_id": "e249aecff1bf3372:d5527a2700000000",
+                    "mem_limit": 10382760,
+                    "mem_limit_to_admit": 10382760,
+                    "num_backends": 1
+                }
+            ],
+            "head_queued_reason": "Not enough aggregate memory available in 
pool default-pool with max mem resources 10.00 MB. Needed 9.90 MB but only 
100.59 KB was available.",
+            "running_queries": [
+                {
+                    "query_id": "b94cf355d6df041c:ba3b91400000000",
+                    "mem_limit": 10382760,
+                    "mem_limit_to_admit": 10382760,
+                    "num_backends": 1
+                }
+            ]
+        }
+    ]
+-->
+{{> www/common-header.tmpl }}
+<script 
src="https://cdnjs.cloudflare.com/ajax/libs/Chart.js/2.7.3/Chart.min.js"; 
type="text/javascript"></script>
+<script type="text/javascript">
+window.onload = function() {
+  renderGraph();
+  formatMemoryColumns();
+};
+
+// Picks up all the canvas elements associated with each resource pool and 
renders its
+// peak memory usage histogram.
+function renderGraph() {
+  var plainJson = document.getElementById("resource_pools_plain_json");
+  var json = JSON.parse(plainJson.innerText);
+  var canvases = document.getElementsByTagName("canvas");
+
+  for (var pool_idx = 0; pool_idx < json.length; pool_idx++){
+    var histogram = json[pool_idx]["peak_mem_usage_histogram"]
+    var pool_name = json[pool_idx]['pool_name'];
+    var hist_labels = new Array();
+    var hist_values = new Array();
+    for (var i = 0; i < histogram.length; i++) {
+      var hist_elem = histogram[i];
+      //hist_labels.push((hist_elem[0]-1) + " - " + hist_elem[0] + " GB");
+      hist_labels.push((hist_elem[0]+1) + " GB");
+      hist_values.push(hist_elem[1]);
+    }
+    hist_labels[hist_labels.length-1] += " and above"
+    // Render the bar chart now.
+    var chart_Data = {
+            labels: hist_labels,
+            datasets: [{
+                label: 'Number of Queries',
+                backgroundColor: '#2E6595', // Impala logo's color
+                data: hist_values
+            }],
+      };
+
+    var ctx = canvases[pool_name].getContext('2d');
+      window.myBar = new Chart(ctx, {
+          type: 'bar',
+          data: chart_Data,
+          options: {
+              responsive: true,
+              legend: {
+                  position: 'top',
+              },
+              title: {
+                  display: true,
+                  text: 'Peak Memory per Host'
+              }
+          }
+      });
+  }
+}
+
+// Picks up all the elemets classified as memory and replaces it with pretty 
printed
+// value.
+function formatMemoryColumns() {
+  var cols = document.getElementsByClassName("memory");
+  for (var idx = 0; idx < cols.length; idx++) {
+    cols[idx].innerText = formatMemory(cols[idx].innerText);
+  }
+}
+
+var memory_key = [
+  {'unit': 'B', 'val': 1},
+  {'unit': 'KB', 'val': 1024},
+  {'unit': 'MB', 'val': 1048576},
+  {'unit': 'GB', 'val': 1073741824}
+]
+
+// Helper method that takes in a value (in bytes) and outputs its pretty 
printed value.
+function formatMemory(val) {
+  var mem_bytes = parseInt(val);
+  for (var idx = memory_key.length - 1; idx >= 0; idx--) {
+    var result = parseFloat(mem_bytes / memory_key[idx].val);
+    if (result < 1) continue;
+    return result.toFixed(2) + " " + memory_key[idx].unit;
+  }
+  return "0 B";
+}
+
+function reset_all() {
+  if (!confirm('Are you sure you want to reset stats for all resource pools 
?')) return;
+  var xhr = new XMLHttpRequest();
+  xhr.open('GET', "/resource_pool_reset", true);
+  xhr.send();
+  window.location.reload(true);
+}
+
+function reset_method(pool_name) {
+  if (!confirm('Are you sure you want to reset stats for ' + pool_name +' ?')) 
return;
+  var xhr = new XMLHttpRequest();
+  xhr.open('GET', "/resource_pool_reset?pool_name=" + pool_name, true);
+  xhr.send();
+  window.location.reload(true);
+}
+</script>
+
+<h2>Admission Controller
+  {{?get_all_pools}}
+  <button class="btn btn-warning btn-xs" onClick="reset_all();">
+    Reset informational stats for all pools
+  </button>
+  {{/get_all_pools}}
+</h2>
+{{^get_all_pools}}
+<p id="show_all_pools" class="lead">
+  <a href='/admission'> < Show all Resource Pools</a>
+</p>
+{{/get_all_pools}}
+<p class="lead">This page lists all resource pools to which queries have been 
submitted
+  at least once and their corresponding state and statistics.</p>
+{{#resource_pools}}
+<div class="container-fluid">
+  <h3><a href='/admission?pool_name={{pool_name}}'>{{pool_name}}</a></h3>
+
+  <h4>Pool Config</h4>
+  <table class='table table-hover table-border'>
+    <tr>
+      <th>Property</th>
+      <th>Value</th>
+    </tr>
+    <tr>
+      <td>Max memory (cluster wide)</td>
+      <td class='memory'>{{pool_max_mem_resources}}</td>
+    </tr>
+    <tr>
+      <td>Max concurrent queries</td>
+      <td>{{pool_max_requests}}</td>
+    </tr>
+    <tr>
+      <td>Max queue size</td>
+      <td>{{pool_max_queued}}</td>
+    </tr>
+    <tr>
+      <td><b>Min</b> Query MEM_LIMIT range</td>
+      <td class='memory'>{{min_query_mem_limit}}</td>
+    </tr>
+    <tr>
+      <td><b>Max</b> Query MEM_LIMIT range</td>
+      <td class='memory'>{{max_query_mem_limit}}</td>
+    </tr>
+    <tr>
+      <td>Clamp MEM_LIMIT query option</td>
+      <td>{{clamp_mem_limit_query_option}}</td>
+    </tr>
+  </table>
+
+  <h4>Queued queries in order of being queued (submitted to this 
coordinator)</h4>
+  <table class='table table-hover table-border'>
+    <tr>
+      <th>Query ID</th>
+      <th>Memory limit</th>
+      <th>Memory limit used for admission</th>
+      <th>Num of backends it will run on</th>
+      <th>Details</th>
+    </tr>
+    {{#queued_queries}}
+    <tr>
+      <td>{{query_id}}</td>
+      <td class='memory'>{{mem_limit}}</td>
+      <td class='memory'>{{mem_limit_to_admit}}</td>
+      <td>{{num_backends}}</td>
+      <td><a href='/query_plan?query_id={{query_id}}'>Details</a></td>
+    </tr>
+    {{/queued_queries}}
+  </table>
+
+  <h4>Running queries (submitted to this coordinator)</h4>
+  <table class='table table-hover table-border'>
+    <tr>
+      <th>Query ID</th>
+      <th>Memory limit</th>
+      <th>Memory limit used for admission</th>
+      <th>Num of backends it will run on</th>
+      <th>Details</th>
+    </tr>
+    {{#running_queries}}
+    <tr>
+      <td>{{query_id}}</td>
+      <td class='memory'>{{mem_limit}}</td>
+      <td class='memory'>{{mem_limit_to_admit}}</td>
+      <td>{{num_backends}}</td>
+      <td><a href='/query_plan?query_id={{query_id}}'>Details</a></td>
+    </tr>
+    {{/running_queries}}
+  </table>
+
+  <h4>Pool stats
+    <button class="btn btn-warning btn-xs" 
onClick="reset_method('{{pool_name}}');">
+      Reset informational stats
+    </button>
+  </h4>
+  <table class='table table-hover table-border'>
+    <tr>
+      <th>Property</th>
+      <th>Value</th>
+      <th>Limit / Max value</th>
+    </tr>
+    <tr>
+      <td>Total queries <b>admitted</b> by this coordinator</td>
+      <td colspan='2'>{{total_admitted}}</td>
+    </tr>
+    <tr>
+      <td>Total queries <b>rejected</b> by this coordinator</td>
+      <td colspan='2'>{{total_rejected}}</td>
+    </tr>
+    <tr>
+      <td>Total queries <b>timed out</b> on this coordinator</td>
+      <td colspan='2'>{{total_timed_out}}</td>
+    </tr>
+    <tr>
+      <td>Queries currently running</td>
+      <td>{{agg_num_running}}</td>
+      <td>{{pool_max_requests}}</td>
+    </tr>
+    <tr>
+      <td>Queries currently queued</td>
+      <td>{{agg_num_queued}}</td>
+      <td>{{pool_max_queued}}</td>
+    </tr>
+    <tr>
+      <td>Total memory reserved across cluster</td>
+      <td class='memory'>{{agg_mem_reserved}}</td>
+      <td class='memory'>{{pool_max_mem_resources}}</td>
+    </tr>
+    <tr>
+      <td>Memory admitted on this coordinator</td>
+      <td class='memory'>{{local_mem_admitted}}</td>
+      <td class='memory'>{{pool_max_mem_resources}}</td>
+    </tr>
+    <tr>
+      <td>Queued reason of query at the head of the queue</td>
+      <td colspan='2'>{{head_queued_reason}}</td>
+    </tr>
+    <tr>
+      <td>Time in queue (exponential moving average)</td>
+      <td colspan='2'>{{wait_time_ms_ema}} ms</td>
+    </tr>
+    <tr>
+      <td colspan='3'>
+        <canvas id="{{pool_name}}" style="border:1px solid"></canvas>
+      </td>
+    </tr>
+  </table>
+</div>
+{{/resource_pools}}
+
+
+<div id="resource_pools_plain_json" style="display: none;">
+    {{resource_pools_plain_json}}
+</div>
+{{> www/common-footer.tmpl }}
diff --git a/www/common-header.tmpl b/www/common-header.tmpl
index 7bf669c..35c1648 100644
--- a/www/common-header.tmpl
+++ b/www/common-header.tmpl
@@ -28,11 +28,16 @@ common-footer.tmpl) }}
     <script type="text/javascript" src="/www/datatables.min.js"></script>
     <link href='/www/bootstrap/css/bootstrap.min.css' rel='stylesheet' 
media='screen'>
     <style>
+      @media (min-width: 1300px) {
+        #nav-options {
+            width: 1250px;
+        }
+      }
     </style>
   </head>
   <body>
     <header class="navbar navbar-default navbar-static-top" id="top" 
role="banner">
-      <div class="container">
+      <div id="nav-options" class="container">
         <div class="navbar-header">
           <a class='navbar-brand' href='/'>{{ __common__.process-name }}</a>
         </div>

Reply via email to