This is an automated email from the ASF dual-hosted git repository. brandonwilliams pushed a commit to branch cassandra-3.11 in repository https://gitbox.apache.org/repos/asf/cassandra.git
commit c87064147595f2f1260d0bb197afc5bc968db45d Merge: 681b6ca103 fd9f07dab8 Author: Brandon Williams <brandonwilli...@apache.org> AuthorDate: Fri Aug 18 05:08:36 2023 -0500 Merge branch 'cassandra-3.0' into cassandra-3.11 CHANGES.txt | 5 +++++ src/java/org/apache/cassandra/tools/nodetool/stats/StatsTable.java | 1 + .../org/apache/cassandra/tools/nodetool/stats/TableStatsHolder.java | 2 ++ .../org/apache/cassandra/tools/nodetool/stats/TableStatsPrinter.java | 1 + 4 files changed, 9 insertions(+) diff --cc CHANGES.txt index 8456e8cad4,ea9b1024e3..b8f489b183 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,13 -1,5 +1,18 @@@ -3.0.30 ++3.11.17 ++Merged from 3.0: + * Fix missing speculative retries in tablestats (CASSANDRA-18767) ++ ++ +3.11.16 + * Moved jflex from runtime to build dependencies (CASSANDRA-18664) + * Fix CAST function for float to decimal (CASSANDRA-18647) + * Suppress CVE-2022-45688 (CASSANDRA-18643) + * Remove unrepaired SSTables from garbage collection when only_purge_repaired_tombstones is true (CASSANDRA-14204) + * Wait for live endpoints in gossip waiting to settle (CASSANDRA-18543) + * Fix error message handling when trying to use CLUSTERING ORDER with non-clustering column (CASSANDRA-17818 + * Add keyspace and table name to exception message during ColumnSubselection deserialization (CASSANDRA-18346) + * Remove unnecessary String.format invocation in QueryProcessor when getting a prepared statement from cache (CASSANDRA-17202) +Merged from 3.0: * Fix Requires for Java for RPM package (CASSANDRA-18751) * Fix CQLSH online help topic link (CASSANDRA-17534) * Remove unused suppressions (CASSANDRA-18724) diff --cc src/java/org/apache/cassandra/tools/nodetool/stats/StatsTable.java index 87bc527608,0000000000..8a32caf88e mode 100644,000000..100644 --- a/src/java/org/apache/cassandra/tools/nodetool/stats/StatsTable.java +++ b/src/java/org/apache/cassandra/tools/nodetool/stats/StatsTable.java @@@ -1,66 -1,0 +1,67 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.tools.nodetool.stats; + +import java.util.ArrayList; +import java.util.List; + +public class StatsTable +{ + public String name; + public boolean isIndex; + public boolean isLeveledSstable = false; + public Object sstableCount; + public String spaceUsedLive; + public String spaceUsedTotal; + public String spaceUsedBySnapshotsTotal; + public boolean offHeapUsed = false; + public String offHeapMemoryUsedTotal; + public Object sstableCompressionRatio; + public Object numberOfPartitionsEstimate; + public Object memtableCellCount; + public String memtableDataSize; + public boolean memtableOffHeapUsed = false; + public String memtableOffHeapMemoryUsed; + public Object memtableSwitchCount; ++ public Object speculativeRetries; + public long localReadCount; + public double localReadLatencyMs; + public long localWriteCount; + public double localWriteLatencyMs; + public Object pendingFlushes; + public Object bloomFilterFalsePositives; + public Object bloomFilterFalseRatio; + public String bloomFilterSpaceUsed; + public boolean bloomFilterOffHeapUsed = false; + public String bloomFilterOffHeapMemoryUsed; + public boolean indexSummaryOffHeapUsed = false; + public String indexSummaryOffHeapMemoryUsed; + public boolean compressionMetadataOffHeapUsed = false; + public String compressionMetadataOffHeapMemoryUsed; + public long compactedPartitionMinimumBytes; + public long compactedPartitionMaximumBytes; + public long compactedPartitionMeanBytes; + public double percentRepaired; + public double averageLiveCellsPerSliceLastFiveMinutes; + public long maximumLiveCellsPerSliceLastFiveMinutes; + public double averageTombstonesPerSliceLastFiveMinutes; + public long maximumTombstonesPerSliceLastFiveMinutes; + public String droppedMutations; + public List<String> sstablesInEachLevel = new ArrayList<>(); +} diff --cc src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsHolder.java index 300895f288,0000000000..35b2481c1a mode 100644,000000..100644 --- a/src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsHolder.java +++ b/src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsHolder.java @@@ -1,372 -1,0 +1,374 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.tools.nodetool.stats; + +import java.util.*; + +import javax.management.InstanceNotFoundException; + +import com.google.common.collect.ArrayListMultimap; + +import org.apache.cassandra.db.*; +import org.apache.cassandra.io.util.*; +import org.apache.cassandra.metrics.*; +import org.apache.cassandra.tools.*; + +public class TableStatsHolder implements StatsHolder +{ + public final List<StatsKeyspace> keyspaces; + public final int numberOfTables; + + public TableStatsHolder(NodeProbe probe, boolean humanReadable, boolean ignore, List<String> tableNames) + { + this.keyspaces = new ArrayList<>(); + this.numberOfTables = probe.getNumberOfTables(); + this.initializeKeyspaces(probe, humanReadable, ignore, tableNames); + } + + @Override + public Map<String, Object> convert2Map() + { + HashMap<String, Object> mpRet = new HashMap<>(); + mpRet.put("total_number_of_tables", numberOfTables); + for (StatsKeyspace keyspace : keyspaces) + { + // store each keyspace's metrics to map + HashMap<String, Object> mpKeyspace = new HashMap<>(); + mpKeyspace.put("read_latency", keyspace.readLatency()); + mpKeyspace.put("read_count", keyspace.readCount); + mpKeyspace.put("read_latency_ms", keyspace.readLatency()); + mpKeyspace.put("write_count", keyspace.writeCount); + mpKeyspace.put("write_latency_ms", keyspace.writeLatency()); + mpKeyspace.put("pending_flushes", keyspace.pendingFlushes); + + // store each table's metrics to map + List<StatsTable> tables = keyspace.tables; + Map<String, Map<String, Object>> mpTables = new HashMap<>(); + for (StatsTable table : tables) + { + Map<String, Object> mpTable = new HashMap<>(); + + mpTable.put("sstable_count", table.sstableCount); + mpTable.put("sstables_in_each_level", table.sstablesInEachLevel); + mpTable.put("space_used_live", table.spaceUsedLive); + mpTable.put("space_used_total", table.spaceUsedTotal); + mpTable.put("space_used_by_snapshots_total", table.spaceUsedBySnapshotsTotal); + if (table.offHeapUsed) + mpTable.put("off_heap_memory_used_total", table.offHeapMemoryUsedTotal); + mpTable.put("sstable_compression_ratio", table.sstableCompressionRatio); + mpTable.put("number_of_partitions_estimate", table.numberOfPartitionsEstimate); + mpTable.put("memtable_cell_count", table.memtableCellCount); + mpTable.put("memtable_data_size", table.memtableDataSize); + if (table.memtableOffHeapUsed) + mpTable.put("memtable_off_heap_memory_used", table.memtableOffHeapMemoryUsed); + mpTable.put("memtable_switch_count", table.memtableSwitchCount); ++ mpTable.put("speculative_retries", table.speculativeRetries); + mpTable.put("local_read_count", table.localReadCount); + mpTable.put("local_read_latency_ms", String.format("%01.3f", table.localReadLatencyMs)); + mpTable.put("local_write_count", table.localWriteCount); + mpTable.put("local_write_latency_ms", String.format("%01.3f", table.localWriteLatencyMs)); + mpTable.put("pending_flushes", table.pendingFlushes); + mpTable.put("percent_repaired", table.percentRepaired); + mpTable.put("bloom_filter_false_positives", table.bloomFilterFalsePositives); + mpTable.put("bloom_filter_false_ratio", String.format("%01.5f", table.bloomFilterFalseRatio)); + mpTable.put("bloom_filter_space_used", table.bloomFilterSpaceUsed); + if (table.bloomFilterOffHeapUsed) + mpTable.put("bloom_filter_off_heap_memory_used", table.bloomFilterOffHeapMemoryUsed); + if (table.indexSummaryOffHeapUsed) + mpTable.put("index_summary_off_heap_memory_used", table.indexSummaryOffHeapMemoryUsed); + if (table.compressionMetadataOffHeapUsed) + mpTable.put("compression_metadata_off_heap_memory_used", + table.compressionMetadataOffHeapMemoryUsed); + mpTable.put("compacted_partition_minimum_bytes", table.compactedPartitionMinimumBytes); + mpTable.put("compacted_partition_maximum_bytes", table.compactedPartitionMaximumBytes); + mpTable.put("compacted_partition_mean_bytes", table.compactedPartitionMeanBytes); + mpTable.put("average_live_cells_per_slice_last_five_minutes", + table.averageLiveCellsPerSliceLastFiveMinutes); + mpTable.put("maximum_live_cells_per_slice_last_five_minutes", + table.maximumLiveCellsPerSliceLastFiveMinutes); + mpTable.put("average_tombstones_per_slice_last_five_minutes", + table.averageTombstonesPerSliceLastFiveMinutes); + mpTable.put("maximum_tombstones_per_slice_last_five_minutes", + table.maximumTombstonesPerSliceLastFiveMinutes); + mpTable.put("dropped_mutations", table.droppedMutations); + + mpTables.put(table.name, mpTable); + } + mpKeyspace.put("tables", mpTables); + mpRet.put(keyspace.name, mpKeyspace); + } + return mpRet; + } + + private void initializeKeyspaces(NodeProbe probe, boolean humanReadable, boolean ignore, List<String> tableNames) + { + OptionFilter filter = new OptionFilter(ignore, tableNames); + ArrayListMultimap<String, ColumnFamilyStoreMBean> selectedTableMbeans = ArrayListMultimap.create(); + Map<String, StatsKeyspace> keyspaceStats = new HashMap<>(); + + // get a list of table stores + Iterator<Map.Entry<String, ColumnFamilyStoreMBean>> tableMBeans = probe.getColumnFamilyStoreMBeanProxies(); + + while (tableMBeans.hasNext()) + { + Map.Entry<String, ColumnFamilyStoreMBean> entry = tableMBeans.next(); + String keyspaceName = entry.getKey(); + ColumnFamilyStoreMBean tableProxy = entry.getValue(); + + if (filter.isKeyspaceIncluded(keyspaceName)) + { + StatsKeyspace stats = keyspaceStats.get(keyspaceName); + if (stats == null) + { + stats = new StatsKeyspace(probe, keyspaceName); + keyspaceStats.put(keyspaceName, stats); + } + stats.add(tableProxy); + + if (filter.isTableIncluded(keyspaceName, tableProxy.getTableName())) + selectedTableMbeans.put(keyspaceName, tableProxy); + } + } + + // make sure all specified keyspace and tables exist + filter.verifyKeyspaces(probe.getKeyspaces()); + filter.verifyTables(); + + // get metrics of keyspace + for (Map.Entry<String, Collection<ColumnFamilyStoreMBean>> entry : selectedTableMbeans.asMap().entrySet()) + { + String keyspaceName = entry.getKey(); + Collection<ColumnFamilyStoreMBean> tables = entry.getValue(); + StatsKeyspace statsKeyspace = keyspaceStats.get(keyspaceName); + + // get metrics of table statistics for this keyspace + for (ColumnFamilyStoreMBean table : tables) + { + String tableName = table.getTableName(); + StatsTable statsTable = new StatsTable(); + statsTable.name = tableName; + statsTable.isIndex = tableName.contains("."); + statsTable.sstableCount = probe.getColumnFamilyMetric(keyspaceName, tableName, "LiveSSTableCount"); + int[] leveledSStables = table.getSSTableCountPerLevel(); + if (leveledSStables != null) + { + statsTable.isLeveledSstable = true; + + for (int level = 0; level < leveledSStables.length; level++) + { + int count = leveledSStables[level]; + long maxCount = 4L; // for L0 + if (level > 0) + maxCount = (long) Math.pow(table.getLevelFanoutSize(), level); + // show max threshold for level when exceeded + statsTable.sstablesInEachLevel.add(count + ((count > maxCount) ? "/" + maxCount : "")); + } + } + + Long memtableOffHeapSize = null; + Long bloomFilterOffHeapSize = null; + Long indexSummaryOffHeapSize = null; + Long compressionMetadataOffHeapSize = null; + Long offHeapSize = null; + Double percentRepaired = null; + + try + { + memtableOffHeapSize = (Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MemtableOffHeapSize"); + bloomFilterOffHeapSize = (Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "BloomFilterOffHeapMemoryUsed"); + indexSummaryOffHeapSize = (Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "IndexSummaryOffHeapMemoryUsed"); + compressionMetadataOffHeapSize = (Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "CompressionMetadataOffHeapMemoryUsed"); + offHeapSize = memtableOffHeapSize + bloomFilterOffHeapSize + indexSummaryOffHeapSize + compressionMetadataOffHeapSize; + percentRepaired = (Double) probe.getColumnFamilyMetric(keyspaceName, tableName, "PercentRepaired"); + } + catch (RuntimeException e) + { + // offheap-metrics introduced in 2.1.3 - older versions do not have the appropriate mbeans + if (!(e.getCause() instanceof InstanceNotFoundException)) + throw e; + } + + statsTable.spaceUsedLive = format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "LiveDiskSpaceUsed"), humanReadable); + statsTable.spaceUsedTotal = format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "TotalDiskSpaceUsed"), humanReadable); + statsTable.spaceUsedBySnapshotsTotal = format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "SnapshotsSize"), humanReadable); + if (offHeapSize != null) + { + statsTable.offHeapUsed = true; + statsTable.offHeapMemoryUsedTotal = format(offHeapSize, humanReadable); + + } + if (percentRepaired != null) + { + statsTable.percentRepaired = Math.round(100 * percentRepaired) / 100.0; + } + statsTable.sstableCompressionRatio = probe.getColumnFamilyMetric(keyspaceName, tableName, "CompressionRatio"); + Object estimatedPartitionCount = probe.getColumnFamilyMetric(keyspaceName, tableName, "EstimatedPartitionCount"); + if (Long.valueOf(-1L).equals(estimatedPartitionCount)) + { + estimatedPartitionCount = 0L; + } + statsTable.numberOfPartitionsEstimate = estimatedPartitionCount; + + statsTable.memtableCellCount = probe.getColumnFamilyMetric(keyspaceName, tableName, "MemtableColumnsCount"); + statsTable.memtableDataSize = format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MemtableLiveDataSize"), humanReadable); + if (memtableOffHeapSize != null) + { + statsTable.memtableOffHeapUsed = true; + statsTable.memtableOffHeapMemoryUsed = format(memtableOffHeapSize, humanReadable); + } + statsTable.memtableSwitchCount = probe.getColumnFamilyMetric(keyspaceName, tableName, "MemtableSwitchCount"); ++ statsTable.speculativeRetries = probe.getColumnFamilyMetric(keyspaceName, tableName, "SpeculativeRetries"); + statsTable.localReadCount = ((CassandraMetricsRegistry.JmxTimerMBean) probe.getColumnFamilyMetric(keyspaceName, tableName, "ReadLatency")).getCount(); + + double localReadLatency = ((CassandraMetricsRegistry.JmxTimerMBean) probe.getColumnFamilyMetric(keyspaceName, tableName, "ReadLatency")).getMean() / 1000; + double localRLatency = localReadLatency > 0 ? localReadLatency : Double.NaN; + statsTable.localReadLatencyMs = localRLatency; + statsTable.localWriteCount = ((CassandraMetricsRegistry.JmxTimerMBean) probe.getColumnFamilyMetric(keyspaceName, tableName, "WriteLatency")).getCount(); + + double localWriteLatency = ((CassandraMetricsRegistry.JmxTimerMBean) probe.getColumnFamilyMetric(keyspaceName, tableName, "WriteLatency")).getMean() / 1000; + double localWLatency = localWriteLatency > 0 ? localWriteLatency : Double.NaN; + statsTable.localWriteLatencyMs = localWLatency; + statsTable.pendingFlushes = probe.getColumnFamilyMetric(keyspaceName, tableName, "PendingFlushes"); + + statsTable.bloomFilterFalsePositives = probe.getColumnFamilyMetric(keyspaceName, tableName, "BloomFilterFalsePositives"); + statsTable.bloomFilterFalseRatio = probe.getColumnFamilyMetric(keyspaceName, tableName, "RecentBloomFilterFalseRatio"); + statsTable.bloomFilterSpaceUsed = format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "BloomFilterDiskSpaceUsed"), humanReadable); + + if (bloomFilterOffHeapSize != null) + { + statsTable.bloomFilterOffHeapUsed = true; + statsTable.bloomFilterOffHeapMemoryUsed = format(bloomFilterOffHeapSize, humanReadable); + } + + if (indexSummaryOffHeapSize != null) + { + statsTable.indexSummaryOffHeapUsed = true; + statsTable.indexSummaryOffHeapMemoryUsed = format(indexSummaryOffHeapSize, humanReadable); + } + if (compressionMetadataOffHeapSize != null) + { + statsTable.compressionMetadataOffHeapUsed = true; + statsTable.compressionMetadataOffHeapMemoryUsed = format(compressionMetadataOffHeapSize, humanReadable); + } + statsTable.compactedPartitionMinimumBytes = (Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MinPartitionSize"); + statsTable.compactedPartitionMaximumBytes = (Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MaxPartitionSize"); + statsTable.compactedPartitionMeanBytes = (Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "MeanPartitionSize"); + + CassandraMetricsRegistry.JmxHistogramMBean histogram = (CassandraMetricsRegistry.JmxHistogramMBean) probe.getColumnFamilyMetric(keyspaceName, tableName, "LiveScannedHistogram"); + statsTable.averageLiveCellsPerSliceLastFiveMinutes = histogram.getMean(); + statsTable.maximumLiveCellsPerSliceLastFiveMinutes = histogram.getMax(); + + histogram = (CassandraMetricsRegistry.JmxHistogramMBean) probe.getColumnFamilyMetric(keyspaceName, tableName, "TombstoneScannedHistogram"); + statsTable.averageTombstonesPerSliceLastFiveMinutes = histogram.getMean(); + statsTable.maximumTombstonesPerSliceLastFiveMinutes = histogram.getMax(); + statsTable.droppedMutations = format((Long) probe.getColumnFamilyMetric(keyspaceName, tableName, "DroppedMutations"), humanReadable); + statsKeyspace.tables.add(statsTable); + } + keyspaces.add(statsKeyspace); + } + } + + private String format(long bytes, boolean humanReadable) + { + return humanReadable ? FileUtils.stringifyFileSize(bytes) : Long.toString(bytes); + } + + /** + * Used for filtering keyspaces and tables to be displayed using the tablestats command. + */ + private static class OptionFilter + { + private final Map<String, List<String>> filter = new HashMap<>(); + private final Map<String, List<String>> verifier = new HashMap<>(); // Same as filter initially, but we remove tables every time we've checked them for inclusion + // in isTableIncluded() so that we detect if those table requested don't exist (verifyTables()) + private final List<String> filterList = new ArrayList<>(); + private final boolean ignoreMode; + + OptionFilter(boolean ignoreMode, List<String> filterList) + { + this.filterList.addAll(filterList); + this.ignoreMode = ignoreMode; + + for (String s : filterList) + { + String[] keyValues = s.split("\\.", 2); + + // build the map that stores the keyspaces and tables to use + if (!filter.containsKey(keyValues[0])) + { + filter.put(keyValues[0], new ArrayList<>()); + verifier.put(keyValues[0], new ArrayList<>()); + } + + if (keyValues.length == 2) + { + filter.get(keyValues[0]).add(keyValues[1]); + verifier.get(keyValues[0]).add(keyValues[1]); + } + } + } + + public boolean isTableIncluded(String keyspace, String table) + { + // supplying empty params list is treated as wanting to display all keyspaces and tables + if (filterList.isEmpty()) + return !ignoreMode; + + List<String> tables = filter.get(keyspace); + + // no such keyspace is in the map + if (tables == null) + return ignoreMode; + // only a keyspace with no tables was supplied + // so ignore or include (based on the flag) every column family in specified keyspace + else if (tables.isEmpty()) + return !ignoreMode; + + // keyspace exists, and it contains specific table + verifier.get(keyspace).remove(table); + return ignoreMode ^ tables.contains(table); + } + + public boolean isKeyspaceIncluded(String keyspace) + { + // supplying empty params list is treated as wanting to display all keyspaces and tables + if (filterList.isEmpty()) + return !ignoreMode; + + // Note that if there is any table for the keyspace, we want to include the keyspace irregarding + // of the ignoreMode, since the ignoreMode then apply to the table inside the keyspace but the + // keyspace itself is not ignored + return filter.get(keyspace) != null || ignoreMode; + } + + public void verifyKeyspaces(List<String> keyspaces) + { + for (String ks : verifier.keySet()) + if (!keyspaces.contains(ks)) + throw new IllegalArgumentException("Unknown keyspace: " + ks); + } + + public void verifyTables() + { + for (String ks : filter.keySet()) + if (!verifier.get(ks).isEmpty()) + throw new IllegalArgumentException("Unknown tables: " + verifier.get(ks) + " in keyspace: " + ks); + } + } +} diff --cc src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsPrinter.java index e1e7b428d9,0000000000..dbca4bf8e6 mode 100644,000000..100644 --- a/src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsPrinter.java +++ b/src/java/org/apache/cassandra/tools/nodetool/stats/TableStatsPrinter.java @@@ -1,114 -1,0 +1,115 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ + +package org.apache.cassandra.tools.nodetool.stats; + +import java.io.PrintStream; +import java.util.List; + +public class TableStatsPrinter +{ + public static StatsPrinter from(String format) + { + switch (format) + { + case "json": + return new StatsPrinter.JsonPrinter(); + case "yaml": + return new StatsPrinter.YamlPrinter(); + default: + return new DefaultPrinter(); + } + } + + private static class DefaultPrinter implements StatsPrinter<TableStatsHolder> + { + @Override + public void print(TableStatsHolder data, PrintStream out) + { + out.println("Total number of tables: " + data.numberOfTables); + out.println("----------------"); + + List<StatsKeyspace> keyspaces = data.keyspaces; + for (StatsKeyspace keyspace : keyspaces) + { + // print each keyspace's information + out.println("Keyspace : " + keyspace.name); + out.println("\tRead Count: " + keyspace.readCount); + out.println("\tRead Latency: " + keyspace.readLatency() + " ms"); + out.println("\tWrite Count: " + keyspace.writeCount); + out.println("\tWrite Latency: " + keyspace.writeLatency() + " ms"); + out.println("\tPending Flushes: " + keyspace.pendingFlushes); + + // print each table's information + List<StatsTable> tables = keyspace.tables; + for (StatsTable table : tables) + { + out.println("\t\tTable" + (table.isIndex ? " (index): " : ": ") + table.name); + out.println("\t\tSSTable count: " + table.sstableCount); + if (table.isLeveledSstable) + out.println("\t\tSSTables in each level: [" + String.join(", ", + table.sstablesInEachLevel) + "]"); + + out.println("\t\tSpace used (live): " + table.spaceUsedLive); + out.println("\t\tSpace used (total): " + table.spaceUsedTotal); + out.println("\t\tSpace used by snapshots (total): " + table.spaceUsedBySnapshotsTotal); + + if (table.offHeapUsed) + out.println("\t\tOff heap memory used (total): " + table.offHeapMemoryUsedTotal); + out.println("\t\tSSTable Compression Ratio: " + table.sstableCompressionRatio); + out.println("\t\tNumber of partitions (estimate): " + table.numberOfPartitionsEstimate); + out.println("\t\tMemtable cell count: " + table.memtableCellCount); + out.println("\t\tMemtable data size: " + table.memtableDataSize); + + if (table.memtableOffHeapUsed) + out.println("\t\tMemtable off heap memory used: " + table.memtableOffHeapMemoryUsed); + out.println("\t\tMemtable switch count: " + table.memtableSwitchCount); ++ out.println("\t\tSpeculative retries: " + table.speculativeRetries); + out.println("\t\tLocal read count: " + table.localReadCount); + out.printf("\t\tLocal read latency: %01.3f ms%n", table.localReadLatencyMs); + out.println("\t\tLocal write count: " + table.localWriteCount); + out.printf("\t\tLocal write latency: %01.3f ms%n", table.localWriteLatencyMs); + out.println("\t\tPending flushes: " + table.pendingFlushes); + out.println("\t\tPercent repaired: " + table.percentRepaired); + + out.println("\t\tBloom filter false positives: " + table.bloomFilterFalsePositives); + out.printf("\t\tBloom filter false ratio: %01.5f%n", table.bloomFilterFalseRatio); + out.println("\t\tBloom filter space used: " + table.bloomFilterSpaceUsed); + + if (table.bloomFilterOffHeapUsed) + out.println("\t\tBloom filter off heap memory used: " + table.bloomFilterOffHeapMemoryUsed); + if (table.indexSummaryOffHeapUsed) + out.println("\t\tIndex summary off heap memory used: " + table.indexSummaryOffHeapMemoryUsed); + if (table.compressionMetadataOffHeapUsed) + out.println("\t\tCompression metadata off heap memory used: " + table.compressionMetadataOffHeapMemoryUsed); + + out.println("\t\tCompacted partition minimum bytes: " + table.compactedPartitionMinimumBytes); + out.println("\t\tCompacted partition maximum bytes: " + table.compactedPartitionMaximumBytes); + out.println("\t\tCompacted partition mean bytes: " + table.compactedPartitionMeanBytes); + out.println("\t\tAverage live cells per slice (last five minutes): " + table.averageLiveCellsPerSliceLastFiveMinutes); + out.println("\t\tMaximum live cells per slice (last five minutes): " + table.maximumLiveCellsPerSliceLastFiveMinutes); + out.println("\t\tAverage tombstones per slice (last five minutes): " + table.averageTombstonesPerSliceLastFiveMinutes); + out.println("\t\tMaximum tombstones per slice (last five minutes): " + table.maximumTombstonesPerSliceLastFiveMinutes); + out.println("\t\tDropped Mutations: " + table.droppedMutations); + out.println(""); + } + out.println("----------------"); + } + } + } +} --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org For additional commands, e-mail: commits-h...@cassandra.apache.org