This is an automated email from the ASF dual-hosted git repository.
janhoy pushed a commit to branch branch_10_0
in repository https://gitbox.apache.org/repos/asf/solr.git
The following commit(s) were added to refs/heads/branch_10_0 by this push:
new a23b43db855 SOLR-18004 Admin UI nodes view to parse Prometheus metrics
(#3908)
a23b43db855 is described below
commit a23b43db855ef5e797484adc682e5037985e103a
Author: Jan Høydahl <[email protected]>
AuthorDate: Thu Dec 4 11:53:35 2025 +0100
SOLR-18004 Admin UI nodes view to parse Prometheus metrics (#3908)
Co-authored-by: David Smiley <[email protected]>
Co-authored-by: Copilot <[email protected]>
Co-authored-by: Claude Code <[email protected]>
(cherry picked from commit b4f228b37bee176e8ab0adada4d11c3854b01a5e)
---
.../SOLR-18004-frontend-parse-prometheus.yml | 10 +
.../solr/handler/admin/AdminHandlersProxy.java | 182 +++++++---
.../solr/response/PrometheusResponseWriter.java | 21 ++
solr/webapp/web/index.html | 2 +
solr/webapp/web/js/angular/controllers/cloud.js | 379 ++++++++++++++-------
solr/webapp/web/js/angular/controllers/plugins.js | 2 +-
solr/webapp/web/js/angular/metrics-extractor.js | 166 +++++++++
solr/webapp/web/js/angular/prometheus-parser.js | 178 ++++++++++
solr/webapp/web/js/angular/services.js | 29 +-
solr/webapp/web/partials/cloud.html | 14 +-
10 files changed, 787 insertions(+), 196 deletions(-)
diff --git a/changelog/unreleased/SOLR-18004-frontend-parse-prometheus.yml
b/changelog/unreleased/SOLR-18004-frontend-parse-prometheus.yml
new file mode 100644
index 00000000000..ee2902d5a62
--- /dev/null
+++ b/changelog/unreleased/SOLR-18004-frontend-parse-prometheus.yml
@@ -0,0 +1,10 @@
+# See https://github.com/apache/solr/blob/main/dev-docs/changelog.adoc
+title: Admin UI "Nodes" view now works with the new Prometheus formatted
metrics endpoint
+type: fixed # added, changed, fixed, deprecated, removed, dependency_update,
security, other
+authors:
+ - name: Jan Høydahl
+ url: https://home.apache.org/phonebook.html?uid=janhoy
+ - name: David Smiley
+links:
+ - name: SOLR-18004
+ url: https://issues.apache.org/jira/browse/SOLR-18004
diff --git
a/solr/core/src/java/org/apache/solr/handler/admin/AdminHandlersProxy.java
b/solr/core/src/java/org/apache/solr/handler/admin/AdminHandlersProxy.java
index 0e0806022c7..5f253c4ec4e 100644
--- a/solr/core/src/java/org/apache/solr/handler/admin/AdminHandlersProxy.java
+++ b/solr/core/src/java/org/apache/solr/handler/admin/AdminHandlersProxy.java
@@ -33,6 +33,7 @@ import java.util.concurrent.TimeoutException;
import org.apache.solr.client.solrj.SolrRequest;
import org.apache.solr.client.solrj.SolrServerException;
import org.apache.solr.client.solrj.request.GenericSolrRequest;
+import org.apache.solr.client.solrj.response.InputStreamResponseParser;
import org.apache.solr.cloud.ZkController;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.params.ModifiableSolrParams;
@@ -51,57 +52,56 @@ import org.slf4j.LoggerFactory;
public class AdminHandlersProxy {
private static final Logger log =
LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final String PARAM_NODES = "nodes";
+ private static final String PARAM_NODE = "node";
+ private static final long PROMETHEUS_FETCH_TIMEOUT_SECONDS = 10;
- // Proxy this request to a different remote node if 'node' parameter is
provided
+ /** Proxy this request to a different remote node if 'node' or 'nodes'
parameter is provided */
public static boolean maybeProxyToNodes(
SolrQueryRequest req, SolrQueryResponse rsp, CoreContainer container)
throws IOException, SolrServerException, InterruptedException {
- String nodeNames = req.getParams().get(PARAM_NODES);
- if (nodeNames == null || nodeNames.isEmpty()) {
- return false; // local request
- }
- if (!container.isZooKeeperAware()) {
- throw new SolrException(
- SolrException.ErrorCode.BAD_REQUEST,
- "Parameter " + PARAM_NODES + " only supported in Cloud mode");
- }
-
- Set<String> nodes;
String pathStr = req.getPath();
+ ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
- Set<String> liveNodes =
-
container.getZkController().zkStateReader.getClusterState().getLiveNodes();
+ // Check if response format is Prometheus/OpenMetrics
+ String wt = params.get("wt");
+ boolean isPrometheusFormat = "prometheus".equals(wt) ||
"openmetrics".equals(wt);
- if (nodeNames.equals("all")) {
- nodes = liveNodes;
- log.debug("All live nodes requested");
+ if (isPrometheusFormat) {
+ // Prometheus format: use singular 'node' parameter for single-node proxy
+ String nodeName = req.getParams().get(PARAM_NODE);
+ if (nodeName == null || nodeName.isEmpty()) {
+ return false; // No node parameter, handle locally
+ }
+
+ params.remove(PARAM_NODE);
+ handlePrometheusSingleNode(nodeName, pathStr, params, container, rsp);
} else {
- nodes = new HashSet<>(Arrays.asList(nodeNames.split(",")));
- for (String nodeName : nodes) {
- if (!nodeName.matches("^[^/:]+:\\d+_[\\w/]+$")) {
- throw new SolrException(
- SolrException.ErrorCode.BAD_REQUEST,
- "Parameter " + PARAM_NODES + " has wrong format");
- }
-
- if (!liveNodes.contains(nodeName)) {
- throw new SolrException(
- SolrException.ErrorCode.BAD_REQUEST,
- "Requested node " + nodeName + " is not part of cluster");
- }
+ // Other formats (JSON/XML): use plural 'nodes' parameter for multi-node
aggregation
+ String nodeNames = req.getParams().get(PARAM_NODES);
+ if (nodeNames == null || nodeNames.isEmpty()) {
+ return false; // No nodes parameter, handle locally
}
- log.debug("Nodes requested: {}", nodes);
- }
- if (log.isDebugEnabled()) {
- log.debug("{} parameter {} specified on {} request", PARAM_NODES,
nodeNames, pathStr);
+
+ params.remove(PARAM_NODES);
+ Set<String> nodes = resolveNodes(nodeNames, container);
+ handleNamedListFormat(nodes, pathStr, params,
container.getZkController(), rsp);
}
- ModifiableSolrParams params = new ModifiableSolrParams(req.getParams());
- params.remove(PARAM_NODES);
+ return true;
+ }
+
+ /** Handle non-Prometheus formats using the existing NamedList approach. */
+ private static void handleNamedListFormat(
+ Set<String> nodes,
+ String pathStr,
+ SolrParams params,
+ ZkController zkController,
+ SolrQueryResponse rsp) {
+
Map<String, Future<NamedList<Object>>> responses = new LinkedHashMap<>();
for (String node : nodes) {
- responses.put(node, callRemoteNode(node, pathStr, params,
container.getZkController()));
+ responses.put(node, callRemoteNode(node, pathStr, params, zkController));
}
for (Map.Entry<String, Future<NamedList<Object>>> entry :
responses.entrySet()) {
@@ -109,29 +109,115 @@ public class AdminHandlersProxy {
NamedList<Object> resp = entry.getValue().get(10, TimeUnit.SECONDS);
rsp.add(entry.getKey(), resp);
} catch (ExecutionException ee) {
- log.warn("Exception when fetching result from node {}",
entry.getKey(), ee);
+ log.warn(
+ "Exception when fetching result from node {}", entry.getKey(),
ee.getCause()); // nowarn
} catch (TimeoutException te) {
- log.warn("Timeout when fetching result from node {}", entry.getKey(),
te);
+ log.warn("Timeout when fetching result from node {}", entry.getKey());
+ } catch (InterruptedException e) {
+ log.warn("Interrupted when fetching result from node {}",
entry.getKey());
+ Thread.currentThread().interrupt();
+ break; // stop early
}
}
if (log.isDebugEnabled()) {
- log.debug(
- "Fetched response from {} nodes: {}", responses.keySet().size(),
responses.keySet());
+ log.debug("Fetched response from {} nodes: {}", responses.size(),
responses.keySet());
}
- return true;
}
/** Makes a remote request asynchronously. */
public static CompletableFuture<NamedList<Object>> callRemoteNode(
- String nodeName, String uriPath, SolrParams params, ZkController
zkController)
- throws IOException, SolrServerException {
+ String nodeName, String uriPath, SolrParams params, ZkController
zkController) {
+
+ // Validate that the node exists in the cluster
+ if
(!zkController.zkStateReader.getClusterState().getLiveNodes().contains(nodeName))
{
+ throw new SolrException(
+ SolrException.ErrorCode.BAD_REQUEST,
+ "Requested node " + nodeName + " is not part of cluster");
+ }
+
log.debug("Proxying {} request to node {}", uriPath, nodeName);
URI baseUri =
URI.create(zkController.zkStateReader.getBaseUrlForNodeName(nodeName));
SolrRequest<?> proxyReq = new GenericSolrRequest(SolrRequest.METHOD.GET,
uriPath, params);
- return zkController
- .getCoreContainer()
- .getDefaultHttpSolrClient()
- .requestWithBaseUrl(baseUri.toString(), c -> c.requestAsync(proxyReq));
+ // Set response parser based on wt parameter to ensure correct format is
used
+ String wt = params.get("wt");
+ if ("prometheus".equals(wt) || "openmetrics".equals(wt)) {
+ proxyReq.setResponseParser(new InputStreamResponseParser(wt));
+ }
+
+ try {
+ return zkController
+ .getCoreContainer()
+ .getDefaultHttpSolrClient()
+ .requestWithBaseUrl(baseUri.toString(), c ->
c.requestAsync(proxyReq));
+ } catch (SolrServerException | IOException e) {
+ // requestWithBaseUrl declares it throws these but it actually depends
on the lambda
+ assert false : "requestAsync doesn't throw; it returns a Future";
+ throw new RuntimeException(e);
+ }
+ }
+
+ /**
+ * Resolve node names from the "nodes" parameter into a set of live node
names.
+ *
+ * @param nodeNames the value of the "nodes" parameter ("all" or
comma-separated node names)
+ * @param container the CoreContainer
+ * @return set of resolved node names
+ * @throws SolrException if node format is invalid
+ */
+ private static Set<String> resolveNodes(String nodeNames, CoreContainer
container) {
+ Set<String> liveNodes =
+
container.getZkController().zkStateReader.getClusterState().getLiveNodes();
+
+ if (nodeNames.equals("all")) {
+ log.debug("All live nodes requested");
+ return liveNodes;
+ }
+
+ Set<String> nodes = new HashSet<>(Arrays.asList(nodeNames.split(",")));
+ for (String nodeName : nodes) {
+ if (!nodeName.matches("^[^/:]+:\\d+_[\\w/]+$")) {
+ throw new SolrException(
+ SolrException.ErrorCode.BAD_REQUEST, "Parameter " + PARAM_NODES +
" has wrong format");
+ }
+ }
+ log.debug("Nodes requested: {}", nodes);
+ return nodes;
+ }
+
+ /**
+ * Handle Prometheus format by proxying to a single node. *
+ *
+ * @param nodeName the name of the single node to proxy to
+ * @param pathStr the request path
+ * @param params the request parameters (with 'node' parameter already
removed)
+ * @param container the CoreContainer
+ * @param rsp the response to populate
+ */
+ private static void handlePrometheusSingleNode(
+ String nodeName,
+ String pathStr,
+ ModifiableSolrParams params,
+ CoreContainer container,
+ SolrQueryResponse rsp)
+ throws IOException, SolrServerException {
+
+ // Keep wt=prometheus for the remote request so MetricsHandler accepts it
+ // The InputStreamResponseParser will return the Prometheus text in a
"stream" key
+ Future<NamedList<Object>> response =
+ callRemoteNode(nodeName, pathStr, params, container.getZkController());
+
+ try {
+ try {
+ NamedList<Object> resp =
response.get(PROMETHEUS_FETCH_TIMEOUT_SECONDS, TimeUnit.SECONDS);
+ rsp.getValues().addAll(resp);
+ } catch (ExecutionException e) {
+ throw e.getCause();
+ }
+ } catch (IOException | SolrServerException | RuntimeException | Error e) {
+ throw e;
+ } catch (Throwable t) { // unlikely?
+ throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, t);
+ }
}
}
diff --git
a/solr/core/src/java/org/apache/solr/response/PrometheusResponseWriter.java
b/solr/core/src/java/org/apache/solr/response/PrometheusResponseWriter.java
index a5f69e1632b..01a2af19421 100644
--- a/solr/core/src/java/org/apache/solr/response/PrometheusResponseWriter.java
+++ b/solr/core/src/java/org/apache/solr/response/PrometheusResponseWriter.java
@@ -22,7 +22,9 @@ import
io.prometheus.metrics.expositionformats.OpenMetricsTextFormatWriter;
import io.prometheus.metrics.expositionformats.PrometheusTextFormatWriter;
import io.prometheus.metrics.model.snapshots.MetricSnapshots;
import java.io.IOException;
+import java.io.InputStream;
import java.io.OutputStream;
+import java.nio.charset.StandardCharsets;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.handler.admin.MetricsHandler;
import org.apache.solr.request.SolrQueryRequest;
@@ -41,7 +43,26 @@ public class PrometheusResponseWriter implements
QueryResponseWriter {
OutputStream out, SolrQueryRequest request, SolrQueryResponse response,
String contentType)
throws IOException {
+ // Check if we have pre-formatted Prometheus text (from single-node proxy)
+ if (response.getValues().get("stream") instanceof InputStream stream) {
+ try {
+ stream.transferTo(out);
+ } finally {
+ stream.close();
+ }
+ return;
+ }
+
+ if (response.getException() != null) {
+
out.write(response.getException().toString().getBytes(StandardCharsets.UTF_8));
+ return;
+ }
+
+ // Otherwise handle MetricSnapshots
var metrics = response.getValues().get("metrics");
+ if (metrics == null) {
+ throw new IOException("No metrics found in response");
+ }
MetricSnapshots snapshots = (MetricSnapshots) metrics;
if (writeOpenMetricsFormat(request)) {
new OpenMetricsTextFormatWriter(false, true).write(out, snapshots);
diff --git a/solr/webapp/web/index.html b/solr/webapp/web/index.html
index 7e692e0efcd..4689d5bb485 100644
--- a/solr/webapp/web/index.html
+++ b/solr/webapp/web/index.html
@@ -74,6 +74,8 @@ limitations under the License.
<script src="libs/jssha-3.3.1-sha256.min.js?_=${version}"></script>
<script src="js/angular/app.js?_=${version}"></script>
<script src="js/angular/services.js?_=${version}"></script>
+ <script src="js/angular/prometheus-parser.js?_=${version}"></script>
+ <script src="js/angular/metrics-extractor.js?_=${version}"></script>
<script src="js/angular/permissions.js?_=${version}"></script>
<script src="js/angular/controllers/index.js?_=${version}"></script>
<script src="js/angular/controllers/login.js?_=${version}"></script>
diff --git a/solr/webapp/web/js/angular/controllers/cloud.js
b/solr/webapp/web/js/angular/controllers/cloud.js
index b766c9a6e6b..5bce7213b4a 100644
--- a/solr/webapp/web/js/angular/controllers/cloud.js
+++ b/solr/webapp/web/js/angular/controllers/cloud.js
@@ -16,7 +16,7 @@
*/
solrAdminApp.controller('CloudController',
- function($scope, $location, Zookeeper, Constants, Collections, System,
Metrics, ZookeeperStatus) {
+ function($scope, $location, Zookeeper, Constants, Collections, System,
Metrics, MetricsExtractor, ZookeeperStatus) {
$scope.showDebug = false;
@@ -37,7 +37,7 @@ solrAdminApp.controller('CloudController',
graphSubController($scope, Zookeeper, false);
} else if (view === "nodes") {
$scope.resetMenu("cloud-nodes", Constants.IS_ROOT_PAGE);
- nodesSubController($scope, Collections, System, Metrics);
+ nodesSubController($scope, Collections, System, Metrics,
MetricsExtractor);
} else if (view === "zkstatus") {
$scope.resetMenu("cloud-zkstatus", Constants.IS_ROOT_PAGE);
zkStatusSubController($scope, ZookeeperStatus, false);
@@ -107,7 +107,7 @@ function isNumeric(n) {
return !isNaN(parseFloat(n)) && isFinite(n);
}
-var nodesSubController = function($scope, Collections, System, Metrics) {
+var nodesSubController = function($scope, Collections, System, Metrics,
MetricsExtractor) {
$scope.pageSize = 10;
$scope.showNodes = true;
$scope.showTree = false;
@@ -150,25 +150,25 @@ var nodesSubController = function($scope, Collections,
System, Metrics) {
$scope.from = Math.max(0, $scope.from - parseInt($scope.pageSize));
$scope.reload();
};
-
+
// Checks if this node is the first (alphabetically) for a given host. Used
to decide rowspan in table
$scope.isFirstNodeForHost = function(node) {
- var hostName = node.split(":")[0];
+ var hostName = node.split(":")[0];
var nodesInHost = $scope.filteredNodes.filter(function (node) {
return node.split(":")[0] === hostName;
});
return nodesInHost[0] === node;
};
-
+
// Returns the first live node for this host, to make sure we pick
host-level metrics from a live node
$scope.firstLiveNodeForHost = function(key) {
- var hostName = key.split(":")[0];
+ var hostName = key.split(":")[0];
var liveNodesInHost = $scope.filteredNodes.filter(function (key) {
return key.split(":")[0] === hostName;
}).filter(function (key) {
return $scope.live_nodes.includes(key);
});
- return liveNodesInHost.length > 0 ? liveNodesInHost[0] : key;
+ return liveNodesInHost.length > 0 ? liveNodesInHost[0] : key;
};
// Initializes the cluster state, list of nodes, collections etc
@@ -227,7 +227,7 @@ var nodesSubController = function($scope, Collections,
System, Metrics) {
ensureNodeInHosts(node, hosts);
}
- // Make sure nodes are sorted alphabetically to align with rowspan in
table
+ // Make sure nodes are sorted alphabetically to align with rowspan in
table
for (var host in hosts) {
hosts[host].nodes.sort();
}
@@ -249,7 +249,7 @@ var nodesSubController = function($scope, Collections,
System, Metrics) {
/*
Reload will fetch data for the current page of the table and thus refresh
numbers.
- It is also called whenever a filter or paging action is executed
+ It is also called whenever a filter or paging action is executed
*/
$scope.reload = function() {
var nodes = $scope.nodes;
@@ -307,7 +307,7 @@ var nodesSubController = function($scope, Collections,
System, Metrics) {
case "health":
}
-
+
if (filteredNodes) {
// If filtering is active, calculate what hosts contain the nodes that
match the filters
isFiltered = true;
@@ -322,7 +322,7 @@ var nodesSubController = function($scope, Collections,
System, Metrics) {
}
filteredNodes.sort();
filteredHosts.sort();
-
+
// Find what hosts & nodes (from the filtered set) that should be
displayed on current page
for (var id = $scope.from ; id < $scope.from + pageSize &&
filteredHosts[id] ; id++) {
var hostName = filteredHosts[id];
@@ -335,9 +335,9 @@ var nodesSubController = function($scope, Collections,
System, Metrics) {
nodesToShow = nodesToShow.concat(hosts[hostName]['nodes']);
}
}
- nodesParam = nodesToShow.filter(function (node) {
- return live_nodes.includes(node);
- }).join(',');
+ var liveNodesToShow = nodesToShow.filter(function (node) {
+ return live_nodes.includes(node);
+ });
var deadNodes = nodesToShow.filter(function (node) {
return !live_nodes.includes(node);
});
@@ -353,7 +353,7 @@ var nodesSubController = function($scope, Collections,
System, Metrics) {
Fetch system info for all selected nodes
Pick the data we want to display and add it to the node-centric data
structure
*/
- System.get({"nodes": nodesParam}, function (systemResponse) {
+ System.get({"nodes": liveNodesToShow.join(',')}, function (systemResponse)
{
for (var node in systemResponse) {
if (node in nodes) {
var s = systemResponse[node];
@@ -391,121 +391,182 @@ var nodesSubController = function($scope, Collections,
System, Metrics) {
});
/*
- Fetch metrics for all selected nodes. Only pull the metrics that we'll
show to save bandwidth
- Pick the data we want to display and add it to the node-centric data
structure
+ Fetch metrics for all selected nodes in parallel. Make one request per
node.
+ Only pull the metrics that we'll show to save bandwidth.
*/
- Metrics.get({
- "nodes": nodesParam,
- "prefix":
"CONTAINER.fs,org.eclipse.jetty.server.handler.DefaultHandler.get-requests,INDEX.sizeInBytes,SEARCHER.searcher.numDocs,SEARCHER.searcher.deletedDocs,SEARCHER.searcher.warmupTime"
+ var metricsNameParam =
"solr_disk_space_megabytes,solr_core_index_size_megabytes,solr_core_indexsearcher_index_num_docs,solr_core_indexsearcher_index_docs,solr_core_indexsearcher_open_time_milliseconds";
+
+ // Create array of promises (one per node)
+ var metricsPromises = [];
+ liveNodesToShow.forEach(function(node) {
+ var promise = Metrics.get({
+ node: node,
+ name: metricsNameParam
+ }).$promise.then(
+ function(response) {
+ // Success - return the parsed metrics with node identifier
+ return {
+ node: node,
+ metrics: response.metrics,
+ success: true
+ };
},
- function (metricsResponse) {
- for (var node in metricsResponse) {
- if (node in nodes) {
- var m = metricsResponse[node];
- nodes[node]['metrics'] = m;
- var diskTotal =
m.metrics['solr.node']['CONTAINER.fs.totalSpace'];
- var diskFree =
m.metrics['solr.node']['CONTAINER.fs.usableSpace'];
- var diskPercentage = Math.floor((diskTotal - diskFree) /
diskTotal * 100);
- nodes[node]['diskUsedPct'] = diskPercentage;
- nodes[node]['diskUsedPctStyle'] = styleForPct(diskPercentage);
- nodes[node]['diskTotal'] = bytesToSize(diskTotal);
- nodes[node]['diskFree'] = bytesToSize(diskFree);
-
- var r =
m.metrics['solr.jetty']['org.eclipse.jetty.server.handler.DefaultHandler.get-requests'];
- nodes[node]['req'] = r.count;
- nodes[node]['req1minRate'] = Math.floor(r['1minRate'] * 100) /
100;
- nodes[node]['req5minRate'] = Math.floor(r['5minRate'] * 100) /
100;
- nodes[node]['req15minRate'] = Math.floor(r['15minRate'] * 100) /
100;
- nodes[node]['reqp75_ms'] = Math.floor(r['p75_ms']);
- nodes[node]['reqp95_ms'] = Math.floor(r['p95_ms']);
- nodes[node]['reqp99_ms'] = Math.floor(r['p99_ms']);
-
- // These are the cores we _expect_ to find on this node
according to the CLUSTERSTATUS
- var cores = nodes[node]['cores'];
- var indexSizeTotal = 0;
- var indexSizeMax = 0;
- var docsTotal = 0;
- var graphData = [];
- for (let coreId in cores) {
- var core = cores[coreId];
- if (core['shard_state'] !== 'active' || core['state'] !==
'active') {
- // If core state is not active, display the real state, or
if shard is inactive, display that
- var labelState = (core['state'] !== 'active') ?
core['state'] : core['shard_state'];
- core['label'] += "_(" + labelState + ")";
- }
- var coreMetricName = "solr.core." + core['collection'] + "." +
core['shard'] + "." + core['replica'];
- var coreMetric = m.metrics[coreMetricName];
- // we may not actually get metrics back for every expected
core (the core may be down)
- if (coreMetric) {
- var size = coreMetric['INDEX.sizeInBytes'];
- size = (typeof size !== 'undefined') ? size : 0;
- core['sizeInBytes'] = size;
- core['size'] = bytesToSize(size);
- indexSizeTotal = indexSizeTotal + size;
- indexSizeMax = size > indexSizeMax ? size : indexSizeMax;
- var numDocs = coreMetric['SEARCHER.searcher.numDocs'];
- numDocs = (typeof numDocs !== 'undefined') ? numDocs : 0;
- core['numDocs'] = numDocs;
- core['numDocsHuman'] = numDocsHuman(numDocs);
- core['avgSizePerDoc'] = bytesToSize(numDocs === 0 ? 0 : size
/ numDocs);
- var deletedDocs =
coreMetric['SEARCHER.searcher.deletedDocs'];
- deletedDocs = (typeof deletedDocs !== 'undefined') ?
deletedDocs : 0;
- core['deletedDocs'] = deletedDocs;
- core['deletedDocsHuman'] = numDocsHuman(deletedDocs);
- var warmupTime = coreMetric['SEARCHER.searcher.warmupTime'];
- warmupTime = (typeof warmupTime !== 'undefined') ?
warmupTime : 0;
- core['warmupTime'] = warmupTime;
- docsTotal += core['numDocs'];
- }
- }
- for (let coreId in cores) {
- var core = cores[coreId];
- var graphObj = {};
- graphObj['label'] = core['label'];
- graphObj['size'] = core['sizeInBytes'];
- graphObj['sizeHuman'] = core['size'];
- graphObj['pct'] = (core['sizeInBytes'] / indexSizeMax) * 100;
- graphData.push(graphObj);
- }
- if (cores) {
- cores.sort(function (a, b) {
- return b.sizeInBytes - a.sizeInBytes
- });
- }
- graphData.sort(function (a, b) {
- return b.size - a.size
- });
- nodes[node]['graphData'] = graphData;
- nodes[node]['numDocs'] = numDocsHuman(docsTotal);
- nodes[node]['sizeInBytes'] = indexSizeTotal;
- nodes[node]['size'] = bytesToSize(indexSizeTotal);
- nodes[node]['sizePerDoc'] = docsTotal === 0 ? '0b' :
bytesToSize(indexSizeTotal / docsTotal);
-
- // Build the d3 powered bar chart
- $('#chart' + nodes[node]['id']).empty();
- var chart = d3.select('#chart' +
nodes[node]['id']).append('div').attr('class', 'chart');
-
- // Add one div per bar which will group together both labels and
bars
- var g = chart.selectAll('div')
- .data(nodes[node]['graphData']).enter()
- .append('div');
-
- // Add the bars
- var bars = g.append("div")
- .attr("class", "rect")
- .text(function (d) {
- return d.label + ':\u00A0\u00A0' + d.sizeHuman;
- });
-
- // Execute the transition to show the bars
- bars.transition()
- .ease('elastic')
- .style('width', function (d) {
- return d.pct + '%';
- });
+ function(error) {
+ // Failure - log and return error marker
+ console.error('Failed to fetch metrics from node ' + node + ':',
error);
+ return {
+ node: node,
+ success: false,
+ error: error
+ };
+ }
+ );
+ metricsPromises.push(promise);
+ });
+
+ // Wait for all requests to complete (success or failure)
+ Promise.all(metricsPromises).then(function(results) {
+ // Separate successful and failed results
+ var successfulResults = results.filter(function(r) { return r.success;
});
+ var failedResults = results.filter(function(r) { return !r.success; });
+
+ // Log any failures
+ if (failedResults.length > 0) {
+ console.warn('Failed to fetch metrics from ' + failedResults.length +
' node(s):',
+ failedResults.map(function(r) { return r.node; }));
+ }
+
+ // If all nodes failed, show error state
+ if (successfulResults.length === 0) {
+ console.error('Failed to fetch metrics from all nodes');
+ $scope.metricsError = true;
+ return;
+ }
+
+ // Merge all successful metrics responses, passing node info along
+ var parsedMetrics = mergePrometheusMetrics(successfulResults);
+
+ if (!parsedMetrics) {
+ console.error('Failed to merge metrics');
+ $scope.metricsError = true;
+ return;
+ }
+
+ // Now process the merged metrics the same way as before
+ for (var i = 0; i < nodesToShow.length; i++) {
+ var node = nodesToShow[i];
+ if (!nodes[node]) continue;
+
+ nodes[node]['metrics'] = parsedMetrics;
+
+ // Extract disk metrics with node filter
+ var diskMetrics = MetricsExtractor.extractDiskMetrics(parsedMetrics,
{ node: node });
+ if (diskMetrics) {
+ var diskTotal = diskMetrics.totalSpace || 0;
+ var diskFree = diskMetrics.usableSpace || 0;
+ var diskPercentage = diskTotal > 0 ? Math.floor((diskTotal -
diskFree) / diskTotal * 100) : 0;
+ nodes[node]['diskUsedPct'] = diskPercentage;
+ nodes[node]['diskUsedPctStyle'] = styleForPct(diskPercentage);
+ nodes[node]['diskTotal'] = bytesToSize(diskTotal);
+ nodes[node]['diskFree'] = bytesToSize(diskFree);
+ }
+
+ // These are the cores we _expect_ to find on this node according to
the CLUSTERSTATUS
+ var cores = nodes[node]['cores'];
+ if (!cores || typeof cores !== 'object') {
+ cores = {};
+ nodes[node]['cores'] = cores;
+ }
+ var indexSizeTotal = 0;
+ var indexSizeMax = 0;
+ var docsTotal = 0;
+ var graphData = [];
+
+ for (var coreId in cores) {
+ var core = cores[coreId];
+
+ if (core['shard_state'] !== 'active' || core['state'] !==
'active') {
+ // If core state is not active, display the real state, or if
shard is inactive, display that
+ var labelState = (core['state'] !== 'active') ? core['state'] :
core['shard_state'];
+ core['label'] += "_(" + labelState + ")";
}
+
+ // Build full core name for label matching
+ // Prometheus metrics use format: "collection_shard_replica"
+ var fullCoreName = core['collection'] + '_' + core['shard'] + '_'
+ core['replica'];
+ var coreLabels = { core: fullCoreName, node: node };
+
+ // Extract metrics using helpers (with node filter)
+ var size = MetricsExtractor.extractCoreIndexSize(parsedMetrics,
coreLabels);
+ var searcherMetrics =
MetricsExtractor.extractSearcherMetrics(parsedMetrics, coreLabels);
+
+ core['sizeInBytes'] = size;
+ core['size'] = bytesToSize(size);
+ indexSizeTotal = indexSizeTotal + size;
+ indexSizeMax = size > indexSizeMax ? size : indexSizeMax;
+
+ var numDocs = searcherMetrics.numDocs || 0;
+ core['numDocs'] = numDocs;
+ core['numDocsHuman'] = numDocsHuman(numDocs);
+ core['avgSizePerDoc'] = bytesToSize(numDocs === 0 ? 0 : size /
numDocs);
+
+ var deletedDocs = searcherMetrics.deletedDocs || 0;
+ core['deletedDocs'] = deletedDocs;
+ core['deletedDocsHuman'] = numDocsHuman(deletedDocs);
+
+ var warmupTime = searcherMetrics.warmupTime || 0;
+ core['warmupTime'] = warmupTime;
+
+ docsTotal += core['numDocs'];
}
- });
+
+ for (var coreId in cores) {
+ var core = cores[coreId];
+ var graphObj = {};
+ graphObj['label'] = core['label'];
+ graphObj['size'] = core['sizeInBytes'];
+ graphObj['sizeHuman'] = core['size'];
+ graphObj['pct'] = indexSizeMax > 0 ? (core['sizeInBytes'] /
indexSizeMax) * 100 : 0;
+ graphData.push(graphObj);
+ }
+
+ // Note: cores is an object (key-value pairs), not an array, so we
cannot sort it directly.
+ // The sorting is handled separately for graphData which is an array.
+
+ graphData.sort(function (a, b) {
+ return b.size - a.size
+ });
+
+ nodes[node]['graphData'] = graphData;
+ nodes[node]['numDocs'] = numDocsHuman(docsTotal);
+ nodes[node]['sizeInBytes'] = indexSizeTotal;
+ nodes[node]['size'] = bytesToSize(indexSizeTotal);
+ nodes[node]['sizePerDoc'] = docsTotal === 0 ? '0b' :
bytesToSize(indexSizeTotal / docsTotal);
+
+ // Build the d3 powered bar chart
+ $('#chart' + nodes[node]['id']).empty();
+ var chart = d3.select('#chart' +
nodes[node]['id']).append('div').attr('class', 'chart');
+
+ // Add one div per bar which will group together both labels and bars
+ var g = chart.selectAll('div')
+ .data(nodes[node]['graphData']).enter()
+ .append('div');
+
+ // Add the bars
+ var bars = g.append("div")
+ .attr("class", "rect")
+ .text(function (d) {
+ return d.label + ':\u00A0\u00A0' + d.sizeHuman;
+ });
+
+ // Execute the transition to show the bars
+ bars.transition()
+ .ease('elastic')
+ .style('width', function (d) {
+ return d.pct + '%';
+ });
+ }
+ });
$scope.nodes = nodes;
$scope.hosts = hosts;
$scope.live_nodes = live_nodes;
@@ -514,6 +575,62 @@ var nodesSubController = function($scope, Collections,
System, Metrics) {
$scope.filteredNodes = filteredNodes;
$scope.filteredHosts = filteredHosts;
};
+
+ /**
+ * Merge multiple Prometheus metrics objects into a single object.
+ * Each result has {node: nodeName, metrics: {...}}
+ * Merging combines all samples from all sources under the same metric names,
+ * and adds a 'node' label to each sample to track which node it came from.
+ *
+ * @param {Array} resultsArray - Array of {node, metrics} objects
+ * @returns {Object} Merged metrics object
+ */
+ function mergePrometheusMetrics(resultsArray) {
+ var merged = {};
+
+ resultsArray.forEach(function(result) {
+ if (!result || !result.metrics) return;
+
+ var nodeName = result.node;
+ var metrics = result.metrics;
+
+ for (var metricName in metrics) {
+ if (!metrics.hasOwnProperty(metricName)) continue;
+
+ var metric = metrics[metricName];
+
+ if (!merged[metricName]) {
+ // First time seeing this metric - initialize
+ merged[metricName] = {
+ type: metric.type,
+ help: metric.help,
+ samples: []
+ };
+ }
+
+ // Add all samples from this metric, injecting the node label
+ if (metric.samples && Array.isArray(metric.samples)) {
+ metric.samples.forEach(function(sample) {
+ // Create a copy of the sample with the node label added
+ var sampleWithNode = {
+ metricName: sample.metricName,
+ labels: Object.assign({}, sample.labels || {}, {node: nodeName}),
+ value: sample.value,
+ metricSuffix: sample.metricSuffix
+ };
+ if (sample.timestamp !== undefined) {
+ sampleWithNode.timestamp = sample.timestamp;
+ }
+ merged[metricName].samples.push(sampleWithNode);
+ });
+ }
+ }
+ });
+
+ return merged;
+ }
+
+ // Initialize cluster state
$scope.initClusterState();
};
@@ -525,7 +642,7 @@ var zkStatusSubController = function($scope,
ZookeeperStatus) {
$scope.tree = {};
$scope.showData = false;
$scope.showDetails = false;
-
+
$scope.toggleDetails = function() {
$scope.showDetails = !$scope.showDetails === true;
};
@@ -535,8 +652,8 @@ var zkStatusSubController = function($scope,
ZookeeperStatus) {
$scope.zkState = data.zkStatus;
$scope.mainKeys = ["ok", "clientPort", "secureClientPort",
"zk_server_state", "zk_version",
"zk_approximate_data_size", "zk_znode_count",
"zk_num_alive_connections"];
- $scope.detailKeys = ["dataDir", "dataLogDir",
- "zk_avg_latency", "zk_max_file_descriptor_count", "zk_watch_count",
+ $scope.detailKeys = ["dataDir", "dataLogDir",
+ "zk_avg_latency", "zk_max_file_descriptor_count", "zk_watch_count",
"zk_packets_sent", "zk_packets_received",
"tickTime", "maxClientCnxns", "minSessionTimeout",
"maxSessionTimeout"];
$scope.ensembleMainKeys = ["serverId", "electionPort", "quorumPort",
"role"];
@@ -597,7 +714,7 @@ var treeSubController = function($scope, Zookeeper) {
/**
* Translates seconds into human readable format of seconds, minutes, hours,
days, and years
- *
+ *
* @param {number} seconds The number of seconds to be processed
* @return {string} The phrase describing the amount of time
*/
diff --git a/solr/webapp/web/js/angular/controllers/plugins.js
b/solr/webapp/web/js/angular/controllers/plugins.js
index d3abd35a903..bf0dd9bcac7 100644
--- a/solr/webapp/web/js/angular/controllers/plugins.js
+++ b/solr/webapp/web/js/angular/controllers/plugins.js
@@ -34,7 +34,7 @@ solrAdminApp.controller('PluginsController',
var type = $location.search().type;
- Metrics.prometheus(params, function (response) {
+ Metrics.raw(params, function (response) {
$scope.types = getPluginTypesFromMetrics(response.data, type);
$scope.type = getSelectedType($scope.types, type);
diff --git a/solr/webapp/web/js/angular/metrics-extractor.js
b/solr/webapp/web/js/angular/metrics-extractor.js
new file mode 100644
index 00000000000..2eb07c0022c
--- /dev/null
+++ b/solr/webapp/web/js/angular/metrics-extractor.js
@@ -0,0 +1,166 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Metrics extraction helper for Solr Admin UI
+ *
+ * Provides helper functions to extract specific metric values from
+ * parsed Prometheus metrics data.
+ */
+
+(function() {
+ 'use strict';
+
+ angular.module('solrAdminApp').factory('MetricsExtractor', function() {
+
+ /**
+ * Find a metric sample by label filters
+ * @param {Object} metric - Parsed metric object with samples array
+ * @param {Object} labelFilters - Object with label key-value pairs to
match
+ * @returns {Object|null} Matching sample or null
+ */
+ function findSample(metric, labelFilters) {
+ if (!metric || !metric.samples) return null;
+
+ for (var i = 0; i < metric.samples.length; i++) {
+ var sample = metric.samples[i];
+ var matches = true;
+
+ for (var key in labelFilters) {
+ if (labelFilters.hasOwnProperty(key)) {
+ if (!sample.labels || sample.labels[key] !== labelFilters[key]) {
+ matches = false;
+ break;
+ }
+ }
+ }
+
+ if (matches) {
+ return sample;
+ }
+ }
+
+ return null;
+ }
+
+ /**
+ * Extract disk metrics (total and usable space)
+ * @param {Object} parsedMetrics - Parsed Prometheus metrics
+ * @param {Object} labelFilters - Optional additional label filters (e.g.,
{node: "nodeName"})
+ * @returns {Object} Object with totalSpace and usableSpace in bytes
+ */
+ function extractDiskMetrics(parsedMetrics, labelFilters) {
+ var diskMetric = parsedMetrics['solr_disk_space_megabytes'];
+ if (!diskMetric) {
+ return { totalSpace: 0, usableSpace: 0 };
+ }
+
+ // Merge standard filters with optional filters
+ var totalFilters = { category: 'CONTAINER', type: 'total_space' };
+ var usableFilters = { category: 'CONTAINER', type: 'usable_space' };
+
+ if (labelFilters) {
+ for (var key in labelFilters) {
+ if (labelFilters.hasOwnProperty(key)) {
+ totalFilters[key] = labelFilters[key];
+ usableFilters[key] = labelFilters[key];
+ }
+ }
+ }
+
+ var totalSample = findSample(diskMetric, totalFilters);
+ var usableSample = findSample(diskMetric, usableFilters);
+
+ return {
+ totalSpace: totalSample ? totalSample.value * 1024 * 1024 : 0, // MB
to bytes
+ usableSpace: usableSample ? usableSample.value * 1024 * 1024 : 0 //
MB to bytes
+ };
+ }
+
+ /**
+ * Extract core index size
+ * @param {Object} parsedMetrics - Parsed Prometheus metrics
+ * @param {Object} coreLabels - Labels to identify the core (must include
'core')
+ * @returns {number} Index size in bytes
+ */
+ function extractCoreIndexSize(parsedMetrics, coreLabels) {
+ var indexSizeMetric = parsedMetrics['solr_core_index_size_megabytes'];
+ if (!indexSizeMetric) return 0;
+
+ var sample = findSample(indexSizeMetric, coreLabels);
+ return sample ? sample.value * 1024 * 1024 : 0; // MB to bytes
+ }
+
+ /**
+ * Extract searcher metrics (numDocs, deletedDocs, warmupTime)
+ * @param {Object} parsedMetrics - Parsed Prometheus metrics
+ * @param {Object} coreLabels - Labels to identify the core (must include
'core')
+ * @returns {Object} Object with numDocs, deletedDocs, warmupTime
+ */
+ function extractSearcherMetrics(parsedMetrics, coreLabels) {
+ var numDocsMetric =
parsedMetrics['solr_core_indexsearcher_index_num_docs'];
+ var totalDocsMetric =
parsedMetrics['solr_core_indexsearcher_index_docs'];
+ var openTimeMetric =
parsedMetrics['solr_core_indexsearcher_open_time_milliseconds'];
+
+ var numDocsSample = findSample(numDocsMetric, coreLabels);
+ var totalDocsSample = findSample(totalDocsMetric, coreLabels);
+
+ var numDocs = numDocsSample ? numDocsSample.value : 0;
+ var totalDocs = totalDocsSample ? totalDocsSample.value : 0;
+ var deletedDocs = totalDocs - numDocs;
+
+ // For warmup time, look for _sum sample from the histogram
+ var warmupTime = 0;
+ if (openTimeMetric) {
+ for (var i = 0; i < openTimeMetric.samples.length; i++) {
+ var sample = openTimeMetric.samples[i];
+
+ // Check if labels match
+ var labelsMatch = true;
+ for (var key in coreLabels) {
+ if (coreLabels.hasOwnProperty(key)) {
+ if (!sample.labels || sample.labels[key] !== coreLabels[key]) {
+ labelsMatch = false;
+ break;
+ }
+ }
+ }
+
+ // Check if this is the _sum sample
+ if (labelsMatch && sample.metricSuffix === '_sum') {
+ warmupTime = sample.value;
+ break;
+ }
+ }
+ }
+
+ return {
+ numDocs: numDocs,
+ deletedDocs: deletedDocs,
+ warmupTime: warmupTime
+ };
+ }
+
+ // Export public API
+ return {
+ extractDiskMetrics: extractDiskMetrics,
+ extractCoreIndexSize: extractCoreIndexSize,
+ extractSearcherMetrics: extractSearcherMetrics,
+ findSample: findSample
+ };
+ });
+})();
diff --git a/solr/webapp/web/js/angular/prometheus-parser.js
b/solr/webapp/web/js/angular/prometheus-parser.js
new file mode 100644
index 00000000000..ef79633c839
--- /dev/null
+++ b/solr/webapp/web/js/angular/prometheus-parser.js
@@ -0,0 +1,178 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements. See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License. You may obtain a copy of the License at
+ *
+ * http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+/**
+ * Prometheus text format parser for Solr Admin UI
+ *
+ * Parses Prometheus exposition format (text-based format for metrics)
+ * into a structured JavaScript object for consumption by the Admin UI.
+ */
+
+(function() {
+ 'use strict';
+
+ angular.module('solrAdminApp').factory('PrometheusParser', function() {
+
+ /**
+ * Parse Prometheus text format into structured JavaScript object
+ * @param {string} prometheusText - Raw Prometheus format text
+ * @returns {Object} Parsed metrics object keyed by metric name
+ */
+ function parsePrometheusFormat(prometheusText) {
+ if (!prometheusText || typeof prometheusText !== 'string') {
+ return {};
+ }
+
+ var metrics = {};
+ var lines = prometheusText.split('\n');
+ var currentMetricName = null;
+ var currentMetricType = null;
+ var currentMetricHelp = null;
+
+ for (var i = 0; i < lines.length; i++) {
+ var line = lines[i].trim();
+
+ // Skip empty lines
+ if (!line) continue;
+
+ // Parse HELP comments - use regex for robust parsing
+ if (line.indexOf('# HELP ') === 0) {
+ var helpMatch = line.match(/^# HELP
([a-zA-Z_:][a-zA-Z0-9_:]*)\s+(.*)$/);
+ if (helpMatch) {
+ currentMetricName = helpMatch[1];
+ currentMetricHelp = helpMatch[2];
+ }
+ }
+ // Parse TYPE comments
+ else if (line.indexOf('# TYPE ') === 0) {
+ var typeParts = line.substring(7).split(' ');
+ currentMetricName = typeParts[0];
+ currentMetricType = typeParts[1];
+
+ // Initialize metric entry
+ if (!metrics[currentMetricName]) {
+ metrics[currentMetricName] = {
+ type: currentMetricType,
+ help: currentMetricHelp || '',
+ samples: []
+ };
+ }
+ }
+ // Skip other comments
+ else if (line.charAt(0) === '#') {
+ continue;
+ }
+ // Parse metric sample
+ else {
+ var sample = parseMetricLine(line);
+ if (sample && sample.metricName) {
+ var baseMetricName = sample.metricName;
+ var metricSuffix = null;
+
+ // Only strip suffixes for histogram and summary types
+ // Check if metric name has known suffixes
+ if (sample.metricName.indexOf('_sum') === sample.metricName.length
- 4) {
+ baseMetricName = sample.metricName.substring(0,
sample.metricName.length - 4);
+ metricSuffix = '_sum';
+ } else if (sample.metricName.indexOf('_count') ===
sample.metricName.length - 6) {
+ baseMetricName = sample.metricName.substring(0,
sample.metricName.length - 6);
+ metricSuffix = '_count';
+ } else if (sample.metricName.indexOf('_bucket') ===
sample.metricName.length - 7) {
+ baseMetricName = sample.metricName.substring(0,
sample.metricName.length - 7);
+ metricSuffix = '_bucket';
+ } else if (sample.metricName.indexOf('_total') ===
sample.metricName.length - 6) {
+ // Handle _total suffix for summary metrics
+ baseMetricName = sample.metricName.substring(0,
sample.metricName.length - 6);
+ metricSuffix = '_total';
+ }
+
+ // Check if base metric exists with histogram/summary type
+ var shouldGroup = false;
+ if (metricSuffix && metrics[baseMetricName]) {
+ var baseType = metrics[baseMetricName].type;
+ shouldGroup = (baseType === 'histogram' || baseType ===
'summary');
+ }
+
+ // Use base name if we should group, otherwise use full name
+ var targetMetricName = (shouldGroup || metricSuffix) ?
baseMetricName : sample.metricName;
+
+ if (!metrics[targetMetricName]) {
+ metrics[targetMetricName] = {
+ type: currentMetricType || 'unknown',
+ help: currentMetricHelp || '',
+ samples: []
+ };
+ }
+
+ // Add suffix info to sample if present
+ if (metricSuffix) {
+ sample.metricSuffix = metricSuffix;
+ }
+
+ metrics[targetMetricName].samples.push(sample);
+ }
+ }
+ }
+
+ return metrics;
+ }
+
+ /**
+ * Parse a single metric line
+ * @param {string} line - Metric line (e.g., 'metric_name{label1="val1"}
123.45' or with timestamp)
+ * @returns {Object|null} Parsed sample or null
+ */
+ function parseMetricLine(line) {
+ // Regex to match: metric_name{labels} value [timestamp]
+ // or: metric_name value [timestamp]
+ // The timestamp is optional and is a Unix timestamp in milliseconds
+ // The value pattern [^\s]+ matches scientific notation (e.g., 1.23e-4)
and special values (NaN, +Inf, -Inf).
+ // The label set is matched as everything between { and }, allowing for
escaped braces inside quoted label values.
+ var match =
line.match(/^([a-zA-Z_:][a-zA-Z0-9_:]*)(?:\{((?:"(?:[^"\\]|\\.)*"|[^}])*)\})?\s+([^\s]+)(?:\s+\d+)?$/);
+
+ if (!match) return null;
+
+ var metricName = match[1];
+ var labelsStr = match[2] || '';
+ var value = parseFloat(match[3]);
+
+ // Parse labels
+ var labels = {};
+ if (labelsStr) {
+ // Match label="value" patterns - only allow valid Prometheus escape
sequences (\\, \", \n)
+ var labelRegex = /([a-zA-Z_][a-zA-Z0-9_]*)="((?:[^"\\]|\\[\\"n])*)"/g;
+ var labelMatch;
+ while ((labelMatch = labelRegex.exec(labelsStr)) !== null) {
+ // Unescape label values - must unescape \\ first to avoid
double-unescaping
+ var labelValue = labelMatch[2].replace(/\\\\/g,
'\\').replace(/\\"/g, '"').replace(/\\n/g, '\n');
+ labels[labelMatch[1]] = labelValue;
+ }
+ }
+
+ return {
+ metricName: metricName,
+ labels: labels,
+ value: value
+ };
+ }
+
+ // Export public API
+ return {
+ parse: parsePrometheusFormat
+ };
+ });
+})();
diff --git a/solr/webapp/web/js/angular/services.js
b/solr/webapp/web/js/angular/services.js
index 265873ac9fb..67eaa42e21f 100644
--- a/solr/webapp/web/js/angular/services.js
+++ b/solr/webapp/web/js/angular/services.js
@@ -22,17 +22,28 @@ solrAdminServices.factory('System',
return $resource('admin/info/system', {"wt":"json", "nodes": "@nodes",
"_":Date.now()});
}])
.factory('Metrics',
- ['$resource', function($resource) {
- return $resource('admin/metrics', {"wt":"json", "nodes": "@nodes",
"prefix":"@prefix", "core":"@core", "_":Date.now()}, {
- "prometheus": {
- method: 'GET',
- params: {wt: 'prometheus', core: '@core'},
- transformResponse: function(data) {
- return {data: data};
+ ['$resource', 'PrometheusParser', function($resource, PrometheusParser) {
+ return $resource('admin/metrics', {"wt":"prometheus", "node": "@node",
"_":Date.now()}, {
+ get: {
+ method: 'GET',
+ transformResponse: function(data) {
+ // Parse the merged Prometheus text response
+ try {
+ return {metrics: PrometheusParser.parse(data)};
+ } catch (e) {
+ return {metrics: {}, error: e.message};
}
}
- });
- }])
+ },
+ "raw": {
+ method: 'GET',
+ params: {wt: 'prometheus', core: '@core'},
+ transformResponse: function(data) {
+ return {data: data};
+ }
+ }
+ });
+ }])
.factory('CollectionsV2',
function() {
solrApi.ApiClient.instance.basePath = '/api';
diff --git a/solr/webapp/web/partials/cloud.html
b/solr/webapp/web/partials/cloud.html
index 01d63499ee3..d5715fbaff3 100644
--- a/solr/webapp/web/partials/cloud.html
+++ b/solr/webapp/web/partials/cloud.html
@@ -37,7 +37,7 @@ limitations under the License.
<div>Ensemble size: {{zkState.ensembleSize}}</div>
<div>Ensemble mode: {{zkState.mode}}</div>
<div>Dynamic reconfig enabled: {{zkState.dynamicReconfig}}</div>
-
+
<table id="zk-table">
<thead>
<tr>
@@ -144,7 +144,7 @@ limitations under the License.
<th>CPU</th>
<th>Heap</th>
<th>Disk usage</th>
- <th>Requests</th>
+ <th ng-if="false">Requests</th>
<th>Collections</th>
<th>Replicas</th>
</tr>
@@ -154,7 +154,7 @@ limitations under the License.
<td rowspan="{{hosts[h.host].nodes.length}}"
ng-show="isFirstNodeForHost(key)">
<div class="host-name">{{h.host}}</div>
<span class="host-spec" ng-show="!showDetails[h.host]">
- <span
title="{{h.system.system.uptime}}">{{h.system.system.name}}</span>
+ <span
title="{{h.system.system.uptime}}">{{h.system.system.name}}</span>
<span title="free: {{h.memFree}}">{{h.memTotal}}</span>
<span title="{{h.system.jvm.name}}
{{h.system.jvm.version}}">Java {{h.system.jvm.spec.version}}</span>
<br/>Load: {{h.loadAvg}}
@@ -162,7 +162,7 @@ limitations under the License.
<div class="host-spec" ng-show="showDetails[h.host]">
{{h.system.system.name}} {{h.system.system.version}},
{{h.system.system.availableProcessors}}cpu<br/>
Uptime: {{h.uptime}}<br/>
- <span title="Used: {{h.memUsed}} - includes OS file-cache,
and it is normal for it to approach 100%">Memory: {{h.memTotal}}</span><br/>
+ <span title="Used: {{h.memUsed}} - includes OS file-cache,
and it is normal for it to approach 100%">Memory: {{h.memTotal}}</span><br/>
File descriptors:
{{h.openFileDescriptorCount}}/{{h.maxFileDescriptorCount}}<br/>
Disk: <span class="{{h.diskUsedPctStyle}}" title="Nodes may
use other disks too">{{h.diskTotal}} used: {{h.diskUsedPct}}%</span><br/>
Load: {{h.loadAvg}}
@@ -212,8 +212,8 @@ limitations under the License.
<div id="chart{{n.id}}" ng-show="showDetails[key] &&
!n.dead"></div>
</div>
</td>
- <td ng-class="{'dead-node': n.dead}"><div class="node-requests"
title="1minRate: {{n.req1minRate}} 5minRate: {{n.req5minRate}} 15minRate:
{{n.req15minRate}} p75: {{n.reqp75_ms}} p99: {{n.reqp99_ms}}" ng-show="!n.dead">
- RPM: {{n.req15minRate}}<br/>p95: {{n.reqp95_ms}}ms</div>
+ <td ng-if="false" ng-class="{'dead-node': n.dead}">
+ <!-- TODO: Bring back RPM and p99 in 10.x -->
</td>
<td ng-class="{'dead-node': n.dead}">
<div ng-show="!n.collections">(none)</div>
@@ -243,7 +243,7 @@ limitations under the License.
</tbody>
</table>
</div>
-
+
<div graph data="graphData" leaf-count="leafCount"
helper-data="helperData" id="graph-content" class="content clearfix"
ng-show="showGraph">
<div id="canvas"></div>