This is an automated email from the ASF dual-hosted git repository. edimitrova pushed a commit to branch trunk in repository https://gitbox.apache.org/repos/asf/cassandra.git
commit 1315d0c96f4625a76296f58d431f97669e5178c2 Author: Ekaterina Dimitrova <ekaterina.dimitr...@datastax.com> AuthorDate: Thu Feb 3 22:28:41 2022 -0500 Transfer parameters to the newly introduced configuration framework (5) patch by Ekaterina Dimitrova; reviewed by Caleb Rackliffe, David Capwell, Michael Semb Wever and Benjamin Lerer for CASSANDRA-15234 --- conf/cassandra.yaml | 40 +-- src/java/org/apache/cassandra/config/Config.java | 79 +++--- .../cassandra/config/DatabaseDescriptor.java | 272 ++++++++++++--------- .../config/SmallestDataStorageMebibytes.java | 11 + .../cassandra/cql3/statements/BatchStatement.java | 4 +- src/java/org/apache/cassandra/db/ColumnIndex.java | 12 +- src/java/org/apache/cassandra/db/Memtable.java | 4 +- .../org/apache/cassandra/db/RowIndexEntry.java | 16 +- .../apache/cassandra/db/marshal/AbstractType.java | 2 +- .../org/apache/cassandra/io/sstable/IndexInfo.java | 2 +- .../org/apache/cassandra/io/util/FileUtils.java | 10 +- .../apache/cassandra/repair/ValidationManager.java | 2 +- .../cassandra/service/ActiveRepairService.java | 20 +- .../service/ActiveRepairServiceMBean.java | 5 + .../apache/cassandra/service/StorageService.java | 18 +- .../apache/cassandra/streaming/StreamSession.java | 2 +- .../tools/nodetool/GetColumnIndexSize.java | 2 +- .../tools/nodetool/SetColumnIndexSize.java | 6 +- test/conf/cassandra-murmur.yaml | 2 +- test/conf/cassandra-old.yaml | 3 +- ...dra-sslcontextfactory-invalidconfiguration.yaml | 2 +- test/conf/cassandra-sslcontextfactory.yaml | 2 +- test/conf/unit-test-conf/test-native-port.yaml | 2 +- .../cassandra/distributed/impl/InstanceConfig.java | 2 +- .../distributed/test/LargeColumnTest.java | 8 +- .../cassandra/simulator/ClusterSimulation.java | 2 +- .../cassandra/config/DatabaseDescriptorTest.java | 39 +-- .../LoadOldYAMLBackwardCompatibilityTest.java | 13 +- .../config/YamlConfigurationLoaderTest.java | 8 +- .../db/compaction/CompactionsCQLTest.java | 6 +- .../io/sstable/SSTableWriterTestBase.java | 2 +- .../org/apache/cassandra/repair/ValidatorTest.java | 16 +- .../cassandra/service/ClientWarningsTest.java | 2 +- .../cassandra/service/ProtocolBetaVersionTest.java | 2 +- .../tools/nodetool/SetGetColumnIndexSizeTest.java | 12 +- .../cassandra/transport/CQLConnectionTest.java | 4 +- 36 files changed, 365 insertions(+), 269 deletions(-) diff --git a/conf/cassandra.yaml b/conf/cassandra.yaml index 3d8168b..0c4acb8 100644 --- a/conf/cassandra.yaml +++ b/conf/cassandra.yaml @@ -582,8 +582,8 @@ concurrent_materialized_view_writes: 32 # accepting writes when the limit is exceeded until a flush completes, # and will trigger a flush based on memtable_cleanup_threshold # If omitted, Cassandra will set both to 1/4 the size of the heap. -# memtable_heap_space_in_mb: 2048 -# memtable_offheap_space_in_mb: 2048 +# memtable_heap_space: 2048MiB +# memtable_offheap_space: 2048MiB # memtable_cleanup_threshold is deprecated. The default calculation # is the only reasonable choice. See the comments on memtable_flush_writers @@ -620,7 +620,7 @@ memtable_allocation_type: heap_buffers # # For more details see https://issues.apache.org/jira/browse/CASSANDRA-14096. # -# repair_session_space_in_mb: +# repair_session_space: # Total space to use for commit logs on disk. # @@ -771,7 +771,7 @@ native_transport_port: 9042 # The maximum size of allowed frame. Frame (requests) larger than this will # be rejected as invalid. The default is 16MB. If you're changing this parameter, # you may want to adjust max_value_size_in_mb accordingly. This should be positive and less than 2048. -# native_transport_max_frame_size_in_mb: 16 +# native_transport_max_frame_size: 16MiB # The maximum number of concurrent client connections. # The default is -1, which means unlimited. @@ -836,7 +836,7 @@ rpc_keepalive: true # /proc/sys/net/ipv4/tcp_wmem # /proc/sys/net/ipv4/tcp_wmem # and 'man tcp' -# internode_socket_send_buffer_size_in_bytes: +# internode_socket_send_buffer_size: # Uncomment to set socket buffer size for internode communication # Note that when setting this, the buffer size is limited by net.core.wmem_max @@ -878,7 +878,7 @@ snapshot_links_per_second: 0 # - but, Cassandra will keep the collation index in memory for hot # rows (as part of the key cache), so a larger granularity means # you can cache more hot rows -column_index_size_in_kb: 64 +column_index_size: 64KiB # Per sstable indexed key cache entries (the collation index in memory # mentioned above) exceeding this size will not be held on heap. @@ -887,7 +887,7 @@ column_index_size_in_kb: 64 # # Note that this size refers to the size of the # serialized index information and not the size of the partition. -column_index_cache_size_in_kb: 2 +column_index_cache_size: 2KiB # Number of simultaneous compactions to allow, NOT including # validation "compactions" for anti-entropy repair. Simultaneous @@ -1011,17 +1011,17 @@ request_timeout: 10000ms # See (CASSANDRA-14358) for details. # # The amount of time to wait for internode tcp connections to establish. -# internode_tcp_connect_timeout_in_ms: 2000 +# internode_tcp_connect_timeout: 2000ms # # The amount of time unacknowledged data is allowed on a connection before we throw out the connection # Note this is only supported on Linux + epoll, and it appears to behave oddly above a setting of 30000 # (it takes much longer than 30s) as of Linux 4.12. If you want something that high set this to 0 # which picks up the OS default and configure the net.ipv4.tcp_retries2 sysctl to be ~8. -# internode_tcp_user_timeout_in_ms: 30000 +# internode_tcp_user_timeout: 30000ms # The amount of time unacknowledged data is allowed on a streaming connection. # The default is 5 minutes. Increase it or set it to 0 in order to increase the timeout. -# internode_streaming_tcp_user_timeout_in_ms: 300000 +# internode_streaming_tcp_user_timeout: 300000ms # Global, per-endpoint and per-connection limits imposed on messages queued for delivery to other nodes # and waiting to be processed on arrival from other nodes in the cluster. These limits are applied to the on-wire @@ -1029,7 +1029,7 @@ request_timeout: 10000ms # # The basic per-link limit is consumed in isolation before any endpoint or global limit is imposed. # Each node-pair has three links: urgent, small and large. So any given node may have a maximum of -# N*3*(internode_application_send_queue_capacity_in_bytes+internode_application_receive_queue_capacity_in_bytes) +# N*3*(internode_application_send_queue_capacity+internode_application_receive_queue_capacity) # messages queued without any coordination between them although in practice, with token-aware routing, only RF*tokens # nodes should need to communicate with significant bandwidth. # @@ -1038,12 +1038,12 @@ request_timeout: 10000ms # The global limit is imposed on all messages exceeding the per-link limit, simultaneously with the per-endpoint limit, # on all links to or from any node in the cluster. # -# internode_application_send_queue_capacity_in_bytes: 4194304 #4MiB -# internode_application_send_queue_reserve_endpoint_capacity_in_bytes: 134217728 #128MiB -# internode_application_send_queue_reserve_global_capacity_in_bytes: 536870912 #512MiB -# internode_application_receive_queue_capacity_in_bytes: 4194304 #4MiB -# internode_application_receive_queue_reserve_endpoint_capacity_in_bytes: 134217728 #128MiB -# internode_application_receive_queue_reserve_global_capacity_in_bytes: 536870912 #512MiB +# internode_application_send_queue_capacity: 4MiB +# internode_application_send_queue_reserve_endpoint_capacity: 128MiB +# internode_application_send_queue_reserve_global_capacity: 512MiB +# internode_application_receive_queue_capacity: 4MiB +# internode_application_receive_queue_reserve_endpoint_capacity: 128MiB +# internode_application_receive_queue_reserve_global_capacity: 512MiB # How long before a node logs slow queries. Select queries that take longer than @@ -1385,10 +1385,10 @@ replica_filtering_protection: # Log WARN on any multiple-partition batch size exceeding this value. 5kb per batch by default. # Caution should be taken on increasing the size of this threshold as it can lead to node instability. -batch_size_warn_threshold_in_kb: 5 +batch_size_warn_threshold: 5KiB # Fail any multiple-partition batch exceeding this value. 50kb (10x warn threshold) by default. -batch_size_fail_threshold_in_kb: 50 +batch_size_fail_threshold: 50KiB # Log WARN on any batches not of type LOGGED than span across more partitions than this limit unlogged_batch_across_partitions_warn_threshold: 10 @@ -1411,7 +1411,7 @@ compaction_tombstone_warning_threshold: 100000 # Maximum size of any value in SSTables. Safety measure to detect SSTable corruption # early. Any value size larger than this threshold will result into marking an SSTable # as corrupted. This should be positive and less than 2048. -# max_value_size_in_mb: 256 +# max_value_size: 256MiB # ** Impact on keyspace creation ** # If replication factor is not mentioned as part of keyspace creation, default_keyspace_rf would apply. diff --git a/src/java/org/apache/cassandra/config/Config.java b/src/java/org/apache/cassandra/config/Config.java index 6c22d5f..7ee8c42 100644 --- a/src/java/org/apache/cassandra/config/Config.java +++ b/src/java/org/apache/cassandra/config/Config.java @@ -148,14 +148,17 @@ public class Config public Integer concurrent_replicates = null; public int memtable_flush_writers = 0; - public Integer memtable_heap_space_in_mb; - public Integer memtable_offheap_space_in_mb; + @Replaces(oldName = "memtable_heap_space_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE, deprecated = true) + public SmallestDataStorageMebibytes memtable_heap_space; + @Replaces(oldName = "memtable_offheap_space_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE, deprecated = true) + public SmallestDataStorageMebibytes memtable_offheap_space; public Float memtable_cleanup_threshold = null; // Limit the maximum depth of repair session merkle trees @Deprecated public volatile Integer repair_session_max_tree_depth = null; - public volatile Integer repair_session_space_in_mb = null; + @Replaces(oldName = "repair_session_space_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE, deprecated = true) + public volatile SmallestDataStorageMebibytes repair_session_space = null; public volatile boolean use_offheap_merkle_trees = true; @@ -183,39 +186,52 @@ public class Config public String broadcast_rpc_address; public boolean rpc_keepalive = true; - public Integer internode_max_message_size_in_bytes; + @Replaces(oldName = "internode_max_message_size_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated=true) + public DataStorageSpec internode_max_message_size; - @Replaces(oldName = "internode_send_buff_size_in_bytes", deprecated = true) - public int internode_socket_send_buffer_size_in_bytes = 0; - @Replaces(oldName = "internode_recv_buff_size_in_bytes", deprecated = true) - public int internode_socket_receive_buffer_size_in_bytes = 0; + @Replaces(oldName = "internode_socket_send_buffer_size_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true) + @Replaces(oldName = "internode_send_buff_size_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true) + public DataStorageSpec internode_socket_send_buffer_size = new DataStorageSpec("0B"); + @Replaces(oldName = "internode_socket_receive_buffer_size_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true) + @Replaces(oldName = "internode_recv_buff_size_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true) + public DataStorageSpec internode_socket_receive_buffer_size = new DataStorageSpec("0B"); - // TODO: derive defaults from system memory settings? - public int internode_application_send_queue_capacity_in_bytes = 1 << 22; // 4MiB - public int internode_application_send_queue_reserve_endpoint_capacity_in_bytes = 1 << 27; // 128MiB - public int internode_application_send_queue_reserve_global_capacity_in_bytes = 1 << 29; // 512MiB + @Replaces(oldName = "internode_application_send_queue_capacity_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true) + public DataStorageSpec internode_application_send_queue_capacity = new DataStorageSpec("4MiB"); + @Replaces(oldName = "internode_application_send_queue_reserve_endpoint_capacity_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true) + public DataStorageSpec internode_application_send_queue_reserve_endpoint_capacity = new DataStorageSpec("128MiB"); + @Replaces(oldName = "internode_application_send_queue_reserve_global_capacity_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true) + public DataStorageSpec internode_application_send_queue_reserve_global_capacity = new DataStorageSpec("512MiB"); - public int internode_application_receive_queue_capacity_in_bytes = 1 << 22; // 4MiB - public int internode_application_receive_queue_reserve_endpoint_capacity_in_bytes = 1 << 27; // 128MiB - public int internode_application_receive_queue_reserve_global_capacity_in_bytes = 1 << 29; // 512MiB + // TODO: derive defaults from system memory settings? + @Replaces(oldName = "internode_application_receive_queue_capacity_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true) + public DataStorageSpec internode_application_receive_queue_capacity = new DataStorageSpec("4MiB"); + @Replaces(oldName = "internode_application_receive_queue_reserve_endpoint_capacity_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true) + public DataStorageSpec internode_application_receive_queue_reserve_endpoint_capacity = new DataStorageSpec("128MiB"); + @Replaces(oldName = "internode_application_receive_queue_reserve_global_capacity_in_bytes", converter = Converters.BYTES_DATASTORAGE, deprecated = true) + public DataStorageSpec internode_application_receive_queue_reserve_global_capacity = new DataStorageSpec("512MiB"); // Defensive settings for protecting Cassandra from true network partitions. See (CASSANDRA-14358) for details. // The amount of time to wait for internode tcp connections to establish. - public volatile int internode_tcp_connect_timeout_in_ms = 2000; + @Replaces(oldName = "internode_tcp_connect_timeout_in_ms", converter = Converters.MILLIS_DURATION, deprecated = true) + public volatile SmallestDurationMilliseconds internode_tcp_connect_timeout = new SmallestDurationMilliseconds("2s"); // The amount of time unacknowledged data is allowed on a connection before we throw out the connection // Note this is only supported on Linux + epoll, and it appears to behave oddly above a setting of 30000 // (it takes much longer than 30s) as of Linux 4.12. If you want something that high set this to 0 // (which picks up the OS default) and configure the net.ipv4.tcp_retries2 sysctl to be ~8. - public volatile int internode_tcp_user_timeout_in_ms = 30000; - // Similar to internode_tcp_user_timeout_in_ms but used specifically for streaming connection. + @Replaces(oldName = "internode_tcp_user_timeout_in_ms", converter = Converters.MILLIS_DURATION, deprecated = true) + public volatile SmallestDurationMilliseconds internode_tcp_user_timeout = new SmallestDurationMilliseconds("30s"); + // Similar to internode_tcp_user_timeout but used specifically for streaming connection. // The default is 5 minutes. Increase it or set it to 0 in order to increase the timeout. - public volatile int internode_streaming_tcp_user_timeout_in_ms = 300_000; // 5 minutes + @Replaces(oldName = "internode_streaming_tcp_user_timeout_in_ms", converter = Converters.MILLIS_DURATION, deprecated = true) + public volatile SmallestDurationMilliseconds internode_streaming_tcp_user_timeout = new SmallestDurationMilliseconds("300s"); // 5 minutes public boolean start_native_transport = true; public int native_transport_port = 9042; public Integer native_transport_port_ssl = null; public int native_transport_max_threads = 128; - public int native_transport_max_frame_size_in_mb = 16; + @Replaces(oldName = "native_transport_max_frame_size_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE, deprecated = true) + public SmallestDataStorageMebibytes native_transport_max_frame_size = new SmallestDataStorageMebibytes("16MiB"); public volatile long native_transport_max_concurrent_connections = -1L; public volatile long native_transport_max_concurrent_connections_per_ip = -1L; public boolean native_transport_flush_in_batches_legacy = false; @@ -224,27 +240,34 @@ public class Config public volatile long native_transport_max_concurrent_requests_in_bytes = -1L; public volatile boolean native_transport_rate_limiting_enabled = false; public volatile int native_transport_max_requests_per_second = 1000000; + // not exposed in the yaml public int native_transport_receive_queue_capacity_in_bytes = 1 << 20; // 1MiB @Deprecated public Integer native_transport_max_negotiable_protocol_version = null; /** - * Max size of values in SSTables, in MegaBytes. - * Default is the same as the native protocol frame limit: 256Mb. + * Max size of values in SSTables, in MebiBytes. + * Default is the same as the native protocol frame limit: 256MiB. * See AbstractType for how it is used. */ - public int max_value_size_in_mb = 256; + @Replaces(oldName = "max_value_size_in_mb", converter = Converters.MEBIBYTES_DATA_STORAGE, deprecated = true) + public SmallestDataStorageMebibytes max_value_size = new SmallestDataStorageMebibytes("256MiB"); public boolean snapshot_before_compaction = false; public boolean auto_snapshot = true; public volatile long snapshot_links_per_second = 0; /* if the size of columns or super-columns are more than this, indexing will kick in */ - public volatile int column_index_size_in_kb = 64; - public volatile int column_index_cache_size_in_kb = 2; - public volatile int batch_size_warn_threshold_in_kb = 5; - public volatile int batch_size_fail_threshold_in_kb = 50; + @Replaces(oldName = "column_index_size_in_kb", converter = Converters.KIBIBYTES_DATASTORAGE, deprecated = true) + public volatile SmallestDataStorageKibibytes column_index_size = new SmallestDataStorageKibibytes("64KiB"); + @Replaces(oldName = "column_index_cache_size_in_kb", converter = Converters.KIBIBYTES_DATASTORAGE, deprecated = true) + public volatile SmallestDataStorageKibibytes column_index_cache_size = new SmallestDataStorageKibibytes("2KiB"); + @Replaces(oldName = "batch_size_warn_threshold_in_kb", converter = Converters.KIBIBYTES_DATASTORAGE, deprecated = true) + public volatile SmallestDataStorageKibibytes batch_size_warn_threshold = new SmallestDataStorageKibibytes("5KiB"); + @Replaces(oldName = "batch_size_fail_threshold_in_kb", converter = Converters.KIBIBYTES_DATASTORAGE, deprecated = true) + public volatile SmallestDataStorageKibibytes batch_size_fail_threshold = new SmallestDataStorageKibibytes("50KiB"); + public Integer unlogged_batch_across_partitions_warn_threshold = 10; public volatile Integer concurrent_compactors; @Replaces(oldName = "compaction_throughput_mb_per_sec", converter = Converters.MEBIBYTES_PER_SECOND_DATA_RATE, deprecated = true) @@ -355,8 +378,6 @@ public class Config public int cache_load_timeout_seconds = 30; - public Long paxos_cache_size_in_mb = null; - private static boolean isClientMode = false; private static Supplier<Config> overrideLoadConfig = null; diff --git a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java index d573def..3b1ef66 100644 --- a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java +++ b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java @@ -79,7 +79,7 @@ import static java.util.concurrent.TimeUnit.MILLISECONDS; import static org.apache.cassandra.config.CassandraRelevantProperties.OS_ARCH; import static org.apache.cassandra.config.CassandraRelevantProperties.SUN_ARCH_DATA_MODEL; import static org.apache.cassandra.io.util.FileUtils.ONE_GB; -import static org.apache.cassandra.io.util.FileUtils.ONE_MB; +import static org.apache.cassandra.io.util.FileUtils.ONE_MIB; import static org.apache.cassandra.utils.Clock.Global.logInitializationOutcome; public class DatabaseDescriptor @@ -469,24 +469,22 @@ public class DatabaseDescriptor if (conf.file_cache_round_up == null) conf.file_cache_round_up = conf.disk_optimization_strategy == Config.DiskOptimizationStrategy.spinning; - if (conf.memtable_offheap_space_in_mb == null) - conf.memtable_offheap_space_in_mb = (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576)); - if (conf.memtable_offheap_space_in_mb < 0) - throw new ConfigurationException("memtable_offheap_space_in_mb must be positive, but was " + conf.memtable_offheap_space_in_mb, false); + if (conf.memtable_offheap_space == null) + conf.memtable_offheap_space = SmallestDataStorageMebibytes.inMebibytes( (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576))); // for the moment, we default to twice as much on-heap space as off-heap, as heap overhead is very large - if (conf.memtable_heap_space_in_mb == null) - conf.memtable_heap_space_in_mb = (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576)); - if (conf.memtable_heap_space_in_mb <= 0) - throw new ConfigurationException("memtable_heap_space_in_mb must be positive, but was " + conf.memtable_heap_space_in_mb, false); - logger.info("Global memtable on-heap threshold is enabled at {}MB", conf.memtable_heap_space_in_mb); - if (conf.memtable_offheap_space_in_mb == 0) + if (conf.memtable_heap_space == null) + conf.memtable_heap_space = SmallestDataStorageMebibytes.inMebibytes((int) (Runtime.getRuntime().maxMemory() / (4 * 1048576))); + if (conf.memtable_heap_space.toMebibytesAsInt() == 0) + throw new ConfigurationException("memtable_heap_space must be positive, but was " + conf.memtable_heap_space, false); + logger.info("Global memtable on-heap threshold is enabled at {}", conf.memtable_heap_space); + if (conf.memtable_offheap_space.toMebibytesAsInt() == 0) logger.info("Global memtable off-heap threshold is disabled, HeapAllocator will be used instead"); else - logger.info("Global memtable off-heap threshold is enabled at {}MB", conf.memtable_offheap_space_in_mb); + logger.info("Global memtable off-heap threshold is enabled at {}", conf.memtable_offheap_space); if (conf.repair_session_max_tree_depth != null) { - logger.warn("repair_session_max_tree_depth has been deprecated and should be removed from cassandra.yaml. Use repair_session_space_in_mb instead"); + logger.warn("repair_session_max_tree_depth has been deprecated and should be removed from cassandra.yaml. Use repair_session_space instead"); if (conf.repair_session_max_tree_depth < 10) throw new ConfigurationException("repair_session_max_tree_depth should not be < 10, but was " + conf.repair_session_max_tree_depth); if (conf.repair_session_max_tree_depth > 20) @@ -497,27 +495,30 @@ public class DatabaseDescriptor conf.repair_session_max_tree_depth = 20; } - if (conf.repair_session_space_in_mb == null) - conf.repair_session_space_in_mb = Math.max(1, (int) (Runtime.getRuntime().maxMemory() / (16 * 1048576))); + if (conf.repair_session_space == null) + conf.repair_session_space = SmallestDataStorageMebibytes.inMebibytes(Math.max(1, (int) (Runtime.getRuntime().maxMemory() / (16 * 1048576)))); - if (conf.repair_session_space_in_mb < 1) - throw new ConfigurationException("repair_session_space_in_mb must be > 0, but was " + conf.repair_session_space_in_mb); - else if (conf.repair_session_space_in_mb > (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576))) - logger.warn("A repair_session_space_in_mb of " + conf.repair_session_space_in_mb + " megabytes is likely to cause heap pressure"); + if (conf.repair_session_space.toMebibytes() < 1) + throw new ConfigurationException("repair_session_space must be > 0, but was " + conf.repair_session_space); + else if (conf.repair_session_space.toMebibytesAsInt() > (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576))) + logger.warn("A repair_session_space of " + conf.repair_session_space+ " mebibytes is likely to cause heap pressure"); checkForLowestAcceptedTimeouts(conf); - checkValidForByteConversion(conf.native_transport_max_frame_size_in_mb, - "native_transport_max_frame_size_in_mb", ByteUnit.MEBI_BYTES); - - checkValidForByteConversion(conf.column_index_size_in_kb, - "column_index_size_in_kb", ByteUnit.KIBI_BYTES); - - checkValidForByteConversion(conf.column_index_cache_size_in_kb, - "column_index_cache_size_in_kb", ByteUnit.KIBI_BYTES); + long valueInBytes = conf.native_transport_max_frame_size.toBytes(); + if (valueInBytes < 0 || valueInBytes > Integer.MAX_VALUE) + { + throw new ConfigurationException(String.format("%s must be positive value < %dB, but was %dB", + "native_transport_max_frame_size", + conf.native_transport_max_frame_size.getUnit() + .convert(Integer.MAX_VALUE, DataStorageSpec.DataStorageUnit.BYTES), + valueInBytes), + false); + } - checkValidForByteConversion(conf.batch_size_warn_threshold_in_kb, - "batch_size_warn_threshold_in_kb", ByteUnit.KIBI_BYTES); + checkValidForByteConversion(conf.column_index_size, "column_index_size"); + checkValidForByteConversion(conf.column_index_cache_size, "column_index_cache_size"); + checkValidForByteConversion(conf.batch_size_warn_threshold, "batch_size_warn_threshold"); if (conf.native_transport_max_negotiable_protocol_version != null) logger.warn("The configuration option native_transport_max_negotiable_protocol_version has been deprecated " + @@ -770,11 +771,11 @@ public class DatabaseDescriptor if (conf.snapshot_links_per_second < 0) throw new ConfigurationException("snapshot_links_per_second must be >= 0"); - if (conf.max_value_size_in_mb <= 0) - throw new ConfigurationException("max_value_size_in_mb must be positive", false); - else if (conf.max_value_size_in_mb >= 2048) - throw new ConfigurationException("max_value_size_in_mb must be smaller than 2048, but was " - + conf.max_value_size_in_mb, false); + if (conf.max_value_size.toMebibytesAsInt() == 0) + throw new ConfigurationException("max_value_size must be positive", false); + else if (conf.max_value_size.toMebibytesAsInt() >= 2048) + throw new ConfigurationException("max_value_size must be smaller than 2048, but was " + + conf.max_value_size.toString(), false); switch (conf.disk_optimization_strategy) { @@ -796,26 +797,30 @@ public class DatabaseDescriptor throw new ConfigurationException("legacy_ssl_storage_port_enabled is true (enabled) with internode encryption disabled (none). Enable encryption or disable the legacy ssl storage port."); } } - Integer maxMessageSize = conf.internode_max_message_size_in_bytes; - if (maxMessageSize != null) + + if (conf.internode_max_message_size != null) { - if (maxMessageSize > conf.internode_application_receive_queue_reserve_endpoint_capacity_in_bytes) - throw new ConfigurationException("internode_max_message_size_in_mb must no exceed internode_application_receive_queue_reserve_endpoint_capacity_in_bytes", false); + long maxMessageSize = conf.internode_max_message_size.toBytes(); + + if (maxMessageSize > conf.internode_application_receive_queue_reserve_endpoint_capacity.toBytes()) + throw new ConfigurationException("internode_max_message_size must no exceed internode_application_receive_queue_reserve_endpoint_capacity", false); - if (maxMessageSize > conf.internode_application_receive_queue_reserve_global_capacity_in_bytes) - throw new ConfigurationException("internode_max_message_size_in_mb must no exceed internode_application_receive_queue_reserve_global_capacity_in_bytes", false); + if (maxMessageSize > conf.internode_application_receive_queue_reserve_global_capacity.toBytes()) + throw new ConfigurationException("internode_max_message_size must no exceed internode_application_receive_queue_reserve_global_capacity", false); - if (maxMessageSize > conf.internode_application_send_queue_reserve_endpoint_capacity_in_bytes) - throw new ConfigurationException("internode_max_message_size_in_mb must no exceed internode_application_send_queue_reserve_endpoint_capacity_in_bytes", false); + if (maxMessageSize > conf.internode_application_send_queue_reserve_endpoint_capacity.toBytes()) + throw new ConfigurationException("internode_max_message_size must no exceed internode_application_send_queue_reserve_endpoint_capacity", false); - if (maxMessageSize > conf.internode_application_send_queue_reserve_global_capacity_in_bytes) - throw new ConfigurationException("internode_max_message_size_in_mb must no exceed internode_application_send_queue_reserve_global_capacity_in_bytes", false); + if (maxMessageSize > conf.internode_application_send_queue_reserve_global_capacity.toBytes()) + throw new ConfigurationException("internode_max_message_size must no exceed internode_application_send_queue_reserve_global_capacity", false); } else { - conf.internode_max_message_size_in_bytes = - Math.min(conf.internode_application_receive_queue_reserve_endpoint_capacity_in_bytes, - conf.internode_application_send_queue_reserve_endpoint_capacity_in_bytes); + long maxMessageSizeInBytes = + Math.min(conf.internode_application_receive_queue_reserve_endpoint_capacity.toBytes(), + conf.internode_application_send_queue_reserve_endpoint_capacity.toBytes()); + + conf.internode_max_message_size = DataStorageSpec.inBytes(maxMessageSizeInBytes); } validateMaxConcurrentAutoUpgradeTasksConf(conf.max_concurrent_automatic_sstable_upgrades); @@ -901,7 +906,7 @@ public class DatabaseDescriptor static int calculateDefaultSpaceInMB(String type, String path, String setting, int preferredSizeInMB, long totalSpaceInBytes, long totalSpaceNumerator, long totalSpaceDenominator) { - final long totalSizeInMB = totalSpaceInBytes / ONE_MB; + final long totalSizeInMB = totalSpaceInBytes / ONE_MIB; final int minSizeInMB = Ints.saturatedCast(totalSpaceNumerator * totalSizeInMB / totalSpaceDenominator); if (minSizeInMB < preferredSizeInMB) @@ -1061,13 +1066,46 @@ public class DatabaseDescriptor @VisibleForTesting static void checkForLowestAcceptedTimeouts(Config conf) { - conf.read_request_timeout = max("read_request_timeout", conf.read_request_timeout, LOWEST_ACCEPTED_TIMEOUT); - conf.range_request_timeout = max("range_request_timeout", conf.range_request_timeout, LOWEST_ACCEPTED_TIMEOUT); - conf.request_timeout = max("request_timeout", conf.request_timeout, LOWEST_ACCEPTED_TIMEOUT); - conf.write_request_timeout = max("write_request_timeout", conf.write_request_timeout, LOWEST_ACCEPTED_TIMEOUT); - conf.cas_contention_timeout = max("cas_contention_timeout", conf.cas_contention_timeout, LOWEST_ACCEPTED_TIMEOUT); - conf.counter_write_request_timeout = max("counter_write_request_timeout", conf.counter_write_request_timeout, LOWEST_ACCEPTED_TIMEOUT); - conf.truncate_request_timeout = max("truncate_request_timeout", conf.truncate_request_timeout, LOWEST_ACCEPTED_TIMEOUT); + if(conf.read_request_timeout.toMillisecondsAsInt() < LOWEST_ACCEPTED_TIMEOUT.toMillisecondsAsInt()) + { + logInfo("read_request_timeout", conf.read_request_timeout, LOWEST_ACCEPTED_TIMEOUT); + conf.read_request_timeout = new SmallestDurationMilliseconds("10ms"); + } + + if(conf.range_request_timeout.toMillisecondsAsInt() < LOWEST_ACCEPTED_TIMEOUT.toMillisecondsAsInt()) + { + logInfo("range_request_timeout", conf.range_request_timeout, LOWEST_ACCEPTED_TIMEOUT); + conf.range_request_timeout = new SmallestDurationMilliseconds("10ms"); + } + + if(conf.request_timeout.toMillisecondsAsInt() < LOWEST_ACCEPTED_TIMEOUT.toMillisecondsAsInt()) + { + logInfo("request_timeout", conf.request_timeout, LOWEST_ACCEPTED_TIMEOUT); + conf.request_timeout = new SmallestDurationMilliseconds("10ms"); + } + + if(conf.write_request_timeout.toMillisecondsAsInt() < LOWEST_ACCEPTED_TIMEOUT.toMillisecondsAsInt()) + { + logInfo("write_request_timeout", conf.write_request_timeout, LOWEST_ACCEPTED_TIMEOUT); + conf.write_request_timeout = new SmallestDurationMilliseconds("10ms"); + } + + if(conf.cas_contention_timeout.toMillisecondsAsInt() < LOWEST_ACCEPTED_TIMEOUT.toMillisecondsAsInt()) + { + logInfo("cas_contention_timeout", conf.cas_contention_timeout, LOWEST_ACCEPTED_TIMEOUT); + conf.cas_contention_timeout = new SmallestDurationMilliseconds("10ms"); + } + + if(conf.counter_write_request_timeout.toMillisecondsAsInt()< LOWEST_ACCEPTED_TIMEOUT.toMillisecondsAsInt()) + { + logInfo("counter_write_request_timeout", conf.counter_write_request_timeout, LOWEST_ACCEPTED_TIMEOUT); + conf.counter_write_request_timeout = new SmallestDurationMilliseconds("10ms"); + } + if(conf.truncate_request_timeout.toMillisecondsAsInt() < LOWEST_ACCEPTED_TIMEOUT.toMillisecondsAsInt()) + { + logInfo("truncate_request_timeout", conf.truncate_request_timeout, LOWEST_ACCEPTED_TIMEOUT); + conf.truncate_request_timeout = LOWEST_ACCEPTED_TIMEOUT; + } } private static void logInfo(String property, SmallestDurationMilliseconds actualValue, SmallestDurationMilliseconds lowestAcceptedValue) @@ -1378,12 +1416,12 @@ public class DatabaseDescriptor public static int getMaxValueSize() { - return conf.max_value_size_in_mb * 1024 * 1024; + return conf.max_value_size.toBytesAsInt(); } public static void setMaxValueSize(int maxValueSizeInBytes) { - conf.max_value_size_in_mb = maxValueSizeInBytes / 1024 / 1024; + conf.max_value_size = SmallestDataStorageMebibytes.inBytes(maxValueSizeInBytes); } /** @@ -1470,54 +1508,56 @@ public class DatabaseDescriptor public static int getColumnIndexSize() { - return (int) ByteUnit.KIBI_BYTES.toBytes(conf.column_index_size_in_kb); + return conf.column_index_size.toBytesAsInt(); } - public static int getColumnIndexSizeInKB() + public static int getColumnIndexSizeInKiB() { - return conf.column_index_size_in_kb; + return conf.column_index_size.toKibibytesAsInt(); } public static void setColumnIndexSize(int val) { - checkValidForByteConversion(val, "column_index_size_in_kb", ByteUnit.KIBI_BYTES); - conf.column_index_size_in_kb = val; + SmallestDataStorageKibibytes memory = SmallestDataStorageKibibytes.inKibibytes(val); + checkValidForByteConversion(memory, "column_index_size"); + conf.column_index_size = SmallestDataStorageKibibytes.inKibibytes(val); } public static int getColumnIndexCacheSize() { - return (int) ByteUnit.KIBI_BYTES.toBytes(conf.column_index_cache_size_in_kb); + return conf.column_index_cache_size.toBytesAsInt(); } - public static int getColumnIndexCacheSizeInKB() + public static int getColumnIndexCacheSizeInKiB() { - return conf.column_index_cache_size_in_kb; + return conf.column_index_cache_size.toKibibytesAsInt(); } public static void setColumnIndexCacheSize(int val) { - checkValidForByteConversion(val, "column_index_cache_size_in_kb", ByteUnit.KIBI_BYTES); - conf.column_index_cache_size_in_kb = val; + SmallestDataStorageKibibytes memory = SmallestDataStorageKibibytes.inKibibytes(val); + checkValidForByteConversion(memory, "column_index_cache_size"); + conf.column_index_cache_size = SmallestDataStorageKibibytes.inKibibytes(val); } public static int getBatchSizeWarnThreshold() { - return (int) ByteUnit.KIBI_BYTES.toBytes(conf.batch_size_warn_threshold_in_kb); + return conf.batch_size_warn_threshold.toBytesAsInt(); } - public static int getBatchSizeWarnThresholdInKB() + public static int getBatchSizeWarnThresholdInKiB() { - return conf.batch_size_warn_threshold_in_kb; + return conf.batch_size_warn_threshold.toKibibytesAsInt(); } public static long getBatchSizeFailThreshold() { - return ByteUnit.KIBI_BYTES.toBytes(conf.batch_size_fail_threshold_in_kb); + return conf.batch_size_fail_threshold.toBytesAsInt(); } public static int getBatchSizeFailThresholdInKB() { - return conf.batch_size_fail_threshold_in_kb; + return conf.batch_size_fail_threshold.toKibibytesAsInt(); } public static int getUnloggedBatchAcrossPartitionsWarnThreshold() @@ -1525,15 +1565,16 @@ public class DatabaseDescriptor return conf.unlogged_batch_across_partitions_warn_threshold; } - public static void setBatchSizeWarnThresholdInKB(int threshold) + public static void setBatchSizeWarnThresholdInKiB(int threshold) { - checkValidForByteConversion(threshold, "batch_size_warn_threshold_in_kb", ByteUnit.KIBI_BYTES); - conf.batch_size_warn_threshold_in_kb = threshold; + SmallestDataStorageKibibytes storage = SmallestDataStorageKibibytes.inKibibytes(threshold); + checkValidForByteConversion(storage, "batch_size_warn_threshold"); + conf.batch_size_warn_threshold = SmallestDataStorageKibibytes.inKibibytes(threshold); } public static void setBatchSizeFailThresholdInKB(int threshold) { - conf.batch_size_fail_threshold_in_kb = threshold; + conf.batch_size_fail_threshold = SmallestDataStorageKibibytes.inKibibytes(threshold); } public static Collection<String> getInitialTokens() @@ -2198,83 +2239,83 @@ public class DatabaseDescriptor public static int getInternodeSocketSendBufferSizeInBytes() { - return conf.internode_socket_send_buffer_size_in_bytes; + return conf.internode_socket_send_buffer_size.toBytesAsInt(); } public static int getInternodeSocketReceiveBufferSizeInBytes() { - return conf.internode_socket_receive_buffer_size_in_bytes; + return conf.internode_socket_receive_buffer_size.toBytesAsInt(); } public static int getInternodeApplicationSendQueueCapacityInBytes() { - return conf.internode_application_send_queue_capacity_in_bytes; + return conf.internode_application_send_queue_capacity.toBytesAsInt(); } public static int getInternodeApplicationSendQueueReserveEndpointCapacityInBytes() { - return conf.internode_application_send_queue_reserve_endpoint_capacity_in_bytes; + return conf.internode_application_send_queue_reserve_endpoint_capacity.toBytesAsInt(); } public static int getInternodeApplicationSendQueueReserveGlobalCapacityInBytes() { - return conf.internode_application_send_queue_reserve_global_capacity_in_bytes; + return conf.internode_application_send_queue_reserve_global_capacity.toBytesAsInt(); } public static int getInternodeApplicationReceiveQueueCapacityInBytes() { - return conf.internode_application_receive_queue_capacity_in_bytes; + return conf.internode_application_receive_queue_capacity.toBytesAsInt(); } public static int getInternodeApplicationReceiveQueueReserveEndpointCapacityInBytes() { - return conf.internode_application_receive_queue_reserve_endpoint_capacity_in_bytes; + return conf.internode_application_receive_queue_reserve_endpoint_capacity.toBytesAsInt(); } public static int getInternodeApplicationReceiveQueueReserveGlobalCapacityInBytes() { - return conf.internode_application_receive_queue_reserve_global_capacity_in_bytes; + return conf.internode_application_receive_queue_reserve_global_capacity.toBytesAsInt(); } public static int getInternodeTcpConnectTimeoutInMS() { - return conf.internode_tcp_connect_timeout_in_ms; + return conf.internode_tcp_connect_timeout.toMillisecondsAsInt(); } public static void setInternodeTcpConnectTimeoutInMS(int value) { - conf.internode_tcp_connect_timeout_in_ms = value; + conf.internode_tcp_connect_timeout = SmallestDurationMilliseconds.inMilliseconds(value); } public static int getInternodeTcpUserTimeoutInMS() { - return conf.internode_tcp_user_timeout_in_ms; + return conf.internode_tcp_user_timeout.toMillisecondsAsInt(); } public static void setInternodeTcpUserTimeoutInMS(int value) { - conf.internode_tcp_user_timeout_in_ms = value; + conf.internode_tcp_user_timeout = SmallestDurationMilliseconds.inMilliseconds(value); } public static int getInternodeStreamingTcpUserTimeoutInMS() { - return conf.internode_streaming_tcp_user_timeout_in_ms; + return conf.internode_streaming_tcp_user_timeout.toMillisecondsAsInt(); } public static void setInternodeStreamingTcpUserTimeoutInMS(int value) { - conf.internode_streaming_tcp_user_timeout_in_ms = value; + conf.internode_streaming_tcp_user_timeout = SmallestDurationMilliseconds.inMilliseconds(value); } public static int getInternodeMaxMessageSizeInBytes() { - return conf.internode_max_message_size_in_bytes; + return conf.internode_max_message_size.toBytesAsInt(); } @VisibleForTesting public static void setInternodeMaxMessageSizeInBytes(int value) { - conf.internode_max_message_size_in_bytes = value; + conf.internode_max_message_size = DataStorageSpec.inBytes(value); } public static boolean startNativeTransport() @@ -2320,12 +2361,12 @@ public class DatabaseDescriptor public static int getNativeTransportMaxFrameSize() { - return (int) ByteUnit.MEBI_BYTES.toBytes(conf.native_transport_max_frame_size_in_mb); + return conf.native_transport_max_frame_size.toBytesAsInt(); } public static void setNativeTransportMaxFrameSize(int bytes) { - conf.native_transport_max_frame_size_in_mb = (int) ByteUnit.MEBI_BYTES.fromBytes(bytes); + conf.native_transport_max_frame_size = SmallestDataStorageMebibytes.inMebibytes(bytes); } public static long getNativeTransportMaxConcurrentConnections() @@ -2953,14 +2994,14 @@ public class DatabaseDescriptor return conf.inter_dc_tcp_nodelay; } - public static long getMemtableHeapSpaceInMb() + public static long getMemtableHeapSpaceInMiB() { - return conf.memtable_heap_space_in_mb; + return conf.memtable_heap_space.toMebibytes(); } - public static long getMemtableOffheapSpaceInMb() + public static long getMemtableOffheapSpaceInMiB() { - return conf.memtable_offheap_space_in_mb; + return conf.memtable_offheap_space.toMebibytes(); } public static Config.MemtableAllocationType getMemtableAllocationType() @@ -2984,21 +3025,21 @@ public class DatabaseDescriptor conf.repair_session_max_tree_depth = depth; } - public static int getRepairSessionSpaceInMegabytes() + public static int getRepairSessionSpaceInMiB() { - return conf.repair_session_space_in_mb; + return conf.repair_session_space.toMebibytesAsInt(); } - public static void setRepairSessionSpaceInMegabytes(int sizeInMegabytes) + public static void setRepairSessionSpaceInMiB(int sizeInMiB) { - if (sizeInMegabytes < 1) - throw new ConfigurationException("Cannot set repair_session_space_in_mb to " + sizeInMegabytes + - " < 1 megabyte"); - else if (sizeInMegabytes > (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576))) - logger.warn("A repair_session_space_in_mb of " + conf.repair_session_space_in_mb + - " megabytes is likely to cause heap pressure."); + if (sizeInMiB < 1) + throw new ConfigurationException("Cannot set repair_session_space to " + sizeInMiB + + " < 1 mebibyte"); + else if (sizeInMiB > (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576))) + logger.warn("A repair_session_space of " + conf.repair_session_space + + " is likely to cause heap pressure."); - conf.repair_session_space_in_mb = sizeInMegabytes; + conf.repair_session_space = SmallestDataStorageMebibytes.inMebibytes(sizeInMiB); } public static Float getMemtableCleanupThreshold() @@ -3420,11 +3461,18 @@ public class DatabaseDescriptor /** * Ensures passed in configuration value is positive and will not overflow when converted to Bytes */ - private static void checkValidForByteConversion(int val, final String name, final ByteUnit unit) + private static void checkValidForByteConversion(final SmallestDataStorageKibibytes value, String name) { - if (val < 0 || unit.willOverflowInBytes(val)) - throw new ConfigurationException(String.format("%s must be positive value < %d, but was %d", - name, unit.overflowThreshold(), val), false); + long valueInBytes = value.toBytes(); + if (valueInBytes < 0 || valueInBytes > Integer.MAX_VALUE) + { + throw new ConfigurationException(String.format("%s must be positive value < %dB, but was %dB", + name, + value.getUnit() + .convert(Integer.MAX_VALUE, DataStorageSpec.DataStorageUnit.BYTES), + valueInBytes), + false); + } } public static int getValidationPreviewPurgeHeadStartInSec() diff --git a/src/java/org/apache/cassandra/config/SmallestDataStorageMebibytes.java b/src/java/org/apache/cassandra/config/SmallestDataStorageMebibytes.java index 8f54936..514d1bc 100644 --- a/src/java/org/apache/cassandra/config/SmallestDataStorageMebibytes.java +++ b/src/java/org/apache/cassandra/config/SmallestDataStorageMebibytes.java @@ -52,4 +52,15 @@ public final class SmallestDataStorageMebibytes extends DataStorageSpec { return new SmallestDataStorageMebibytes(mebibytes, DataStorageSpec.DataStorageUnit.MEBIBYTES); } + + /** + * Creates a {@code SmallestDataStorageMebibytes} of the specified amount of bytes. + * + * @param bytes the amount of bytes + * @return a data storage + */ + public static SmallestDataStorageMebibytes inBytes(long bytes) + { + return new SmallestDataStorageMebibytes(bytes, DataStorageSpec.DataStorageUnit.BYTES); + } } diff --git a/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java b/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java index dabe05e..b7e9c07 100644 --- a/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java +++ b/src/java/org/apache/cassandra/cql3/statements/BatchStatement.java @@ -350,9 +350,9 @@ public class BatchStatement implements CQLStatement if (size > failThreshold) { Tracing.trace(format, tableNames, FBUtilities.prettyPrintMemory(size), FBUtilities.prettyPrintMemory(failThreshold), - FBUtilities.prettyPrintMemory(size - failThreshold), " (see batch_size_fail_threshold_in_kb)"); + FBUtilities.prettyPrintMemory(size - failThreshold), " (see batch_size_fail_threshold)"); logger.error(format, tableNames, FBUtilities.prettyPrintMemory(size), FBUtilities.prettyPrintMemory(failThreshold), - FBUtilities.prettyPrintMemory(size - failThreshold), " (see batch_size_fail_threshold_in_kb)"); + FBUtilities.prettyPrintMemory(size - failThreshold), " (see batch_size_fail_threshold)"); throw new InvalidRequestException("Batch too large"); } else if (logger.isWarnEnabled()) diff --git a/src/java/org/apache/cassandra/db/ColumnIndex.java b/src/java/org/apache/cassandra/db/ColumnIndex.java index b872300..15405bd 100644 --- a/src/java/org/apache/cassandra/db/ColumnIndex.java +++ b/src/java/org/apache/cassandra/db/ColumnIndex.java @@ -36,16 +36,16 @@ import org.apache.cassandra.utils.ByteBufferUtil; /** * Column index builder used by {@link org.apache.cassandra.io.sstable.format.big.BigTableWriter}. - * For index entries that exceed {@link org.apache.cassandra.config.Config#column_index_cache_size_in_kb}, + * For index entries that exceed {@link org.apache.cassandra.config.Config#column_index_cache_size}, * this uses the serialization logic as in {@link RowIndexEntry}. */ public class ColumnIndex { - // used, if the row-index-entry reaches config column_index_cache_size_in_kb + // used, if the row-index-entry reaches config column_index_cache_size private DataOutputBuffer buffer; // used to track the size of the serialized size of row-index-entry (unused for buffer) private int indexSamplesSerializedSize; - // used, until the row-index-entry reaches config column_index_cache_size_in_kb + // used, until the row-index-entry reaches config column_index_cache_size private final List<IndexInfo> indexSamples = new ArrayList<>(); private DataOutputBuffer reusableBuffer; @@ -199,8 +199,8 @@ public class ColumnIndex } columnIndexCount++; - // First, we collect the IndexInfo objects until we reach Config.column_index_cache_size_in_kb in an ArrayList. - // When column_index_cache_size_in_kb is reached, we switch to byte-buffer mode. + // First, we collect the IndexInfo objects until we reach Config.column_index_cache_size in an ArrayList. + // When column_index_cache_size is reached, we switch to byte-buffer mode. if (buffer == null) { indexSamplesSerializedSize += idxSerializer.serializedSize(cIndexInfo); @@ -285,7 +285,7 @@ public class ColumnIndex // If we serialize the IndexInfo objects directly in the code above into 'buffer', // we have to write the offsts to these here. The offsets have already been are collected - // in indexOffsets[]. buffer is != null, if it exceeds Config.column_index_cache_size_in_kb. + // in indexOffsets[]. buffer is != null, if it exceeds Config.column_index_cache_size. // In the other case, when buffer==null, the offsets are serialized in RowIndexEntry.IndexedEntry.serialize(). if (buffer != null) RowIndexEntry.Serializer.serializeOffsets(buffer, indexOffsets, columnIndexCount); diff --git a/src/java/org/apache/cassandra/db/Memtable.java b/src/java/org/apache/cassandra/db/Memtable.java index c755591..21bf31a 100644 --- a/src/java/org/apache/cassandra/db/Memtable.java +++ b/src/java/org/apache/cassandra/db/Memtable.java @@ -87,8 +87,8 @@ public class Memtable implements Comparable<Memtable> private static MemtablePool createMemtableAllocatorPool() { - long heapLimit = DatabaseDescriptor.getMemtableHeapSpaceInMb() << 20; - long offHeapLimit = DatabaseDescriptor.getMemtableOffheapSpaceInMb() << 20; + long heapLimit = DatabaseDescriptor.getMemtableHeapSpaceInMiB() << 20; + long offHeapLimit = DatabaseDescriptor.getMemtableOffheapSpaceInMiB() << 20; final float cleaningThreshold = DatabaseDescriptor.getMemtableCleanupThreshold(); final MemtableCleaner cleaner = ColumnFamilyStore::flushLargestMemtable; switch (DatabaseDescriptor.getMemtableAllocationType()) diff --git a/src/java/org/apache/cassandra/db/RowIndexEntry.java b/src/java/org/apache/cassandra/db/RowIndexEntry.java index 895bea9..153dbac 100644 --- a/src/java/org/apache/cassandra/db/RowIndexEntry.java +++ b/src/java/org/apache/cassandra/db/RowIndexEntry.java @@ -76,7 +76,7 @@ import static org.apache.cassandra.metrics.CassandraMetricsRegistry.Metrics; * samples</i> (list of {@link IndexInfo} objects) and those who don't. * For each <i>portion</i> of data for a single partition in the data file, * an index sample is created. The size of that <i>portion</i> is defined - * by {@link org.apache.cassandra.config.Config#column_index_size_in_kb}. + * by {@link org.apache.cassandra.config.Config#column_index_size}. * </p> * <p> * Index entries with less than 2 index samples, will just store the @@ -97,9 +97,9 @@ import static org.apache.cassandra.metrics.CassandraMetricsRegistry.Metrics; * "acceptable" amount of index samples per partition and those * with an "enormous" amount of index samples. The barrier * is controlled by the configuration parameter - * {@link org.apache.cassandra.config.Config#column_index_cache_size_in_kb}. + * {@link org.apache.cassandra.config.Config#column_index_cache_size}. * Index entries with a total serialized size of index samples up to - * {@code column_index_cache_size_in_kb} will be held in an array. + * {@code column_index_cache_size} will be held in an array. * Index entries exceeding that value will always be accessed from * disk. * </p> @@ -110,9 +110,9 @@ import static org.apache.cassandra.metrics.CassandraMetricsRegistry.Metrics; * <li>{@link RowIndexEntry} just stores the offset in the data file.</li> * <li>{@link IndexedEntry} is for index entries with index samples * and used for both current and legacy sstables, which do not exceed - * {@link org.apache.cassandra.config.Config#column_index_cache_size_in_kb}.</li> + * {@link org.apache.cassandra.config.Config#column_index_cache_size}.</li> * <li>{@link ShallowIndexedEntry} is for index entries with index samples - * that exceed {@link org.apache.cassandra.config.Config#column_index_cache_size_in_kb} + * that exceed {@link org.apache.cassandra.config.Config#column_index_cache_size} * for sstables with an offset table to the index samples.</li> * </ul> * <p> @@ -193,7 +193,7 @@ public class RowIndexEntry<T> implements IMeasurableMemory * @param headerLength deletion time of {@link RowIndexEntry} * @param columnIndexCount number of {@link IndexInfo} entries in the {@link RowIndexEntry} * @param indexedPartSize serialized size of all serialized {@link IndexInfo} objects and their offsets - * @param indexSamples list with IndexInfo offsets (if total serialized size is less than {@link org.apache.cassandra.config.Config#column_index_cache_size_in_kb} + * @param indexSamples list with IndexInfo offsets (if total serialized size is less than {@link org.apache.cassandra.config.Config#column_index_cache_size} * @param offsets offsets of IndexInfo offsets * @param idxInfoSerializer the {@link IndexInfo} serializer */ @@ -205,14 +205,14 @@ public class RowIndexEntry<T> implements IMeasurableMemory { // If the "partition building code" in BigTableWriter.append() via ColumnIndex returns a list // of IndexInfo objects, which is the case if the serialized size is less than - // Config.column_index_cache_size_in_kb, AND we have more than one IndexInfo object, we + // Config.column_index_cache_size, AND we have more than one IndexInfo object, we // construct an IndexedEntry object. (note: indexSamples.size() and columnIndexCount have the same meaning) if (indexSamples != null && indexSamples.size() > 1) return new IndexedEntry(dataFilePosition, deletionTime, headerLength, indexSamples.toArray(new IndexInfo[indexSamples.size()]), offsets, indexedPartSize, idxInfoSerializer); // Here we have to decide whether we have serialized IndexInfo objects that exceeds - // Config.column_index_cache_size_in_kb (not exceeding case covered above). + // Config.column_index_cache_size (not exceeding case covered above). // Such a "big" indexed-entry is represented as a shallow one. if (columnIndexCount > 1) return new ShallowIndexedEntry(dataFilePosition, indexFilePosition, diff --git a/src/java/org/apache/cassandra/db/marshal/AbstractType.java b/src/java/org/apache/cassandra/db/marshal/AbstractType.java index 3a18f76..c8eca39 100644 --- a/src/java/org/apache/cassandra/db/marshal/AbstractType.java +++ b/src/java/org/apache/cassandra/db/marshal/AbstractType.java @@ -514,7 +514,7 @@ public abstract class AbstractType<T> implements Comparator<ByteBuffer>, Assignm if (l > maxValueSize) throw new IOException(String.format("Corrupt value length %d encountered, as it exceeds the maximum of %d, " + - "which is set via max_value_size_in_mb in cassandra.yaml", + "which is set via max_value_size in cassandra.yaml", l, maxValueSize)); return accessor.read(in, l); diff --git a/src/java/org/apache/cassandra/io/sstable/IndexInfo.java b/src/java/org/apache/cassandra/io/sstable/IndexInfo.java index e744150..fa0fb2c 100644 --- a/src/java/org/apache/cassandra/io/sstable/IndexInfo.java +++ b/src/java/org/apache/cassandra/io/sstable/IndexInfo.java @@ -35,7 +35,7 @@ import org.apache.cassandra.utils.ObjectSizes; /** * {@code IndexInfo} is embedded in the indexed version of {@link RowIndexEntry}. - * Each instance roughly covers a range of {@link org.apache.cassandra.config.Config#column_index_size_in_kb column_index_size_in_kb} kB + * Each instance roughly covers a range of {@link org.apache.cassandra.config.Config#column_index_size column_index_size} KiB * and contains the first and last clustering value (or slice bound), its offset in the data file and width in the data file. * <p> * Each {@code IndexInfo} object is serialized as follows. diff --git a/src/java/org/apache/cassandra/io/util/FileUtils.java b/src/java/org/apache/cassandra/io/util/FileUtils.java index ea54497..063e3fb 100644 --- a/src/java/org/apache/cassandra/io/util/FileUtils.java +++ b/src/java/org/apache/cassandra/io/util/FileUtils.java @@ -76,8 +76,8 @@ public final class FileUtils private static final Logger logger = LoggerFactory.getLogger(FileUtils.class); public static final long ONE_KB = 1024; - public static final long ONE_MB = 1024 * ONE_KB; - public static final long ONE_GB = 1024 * ONE_MB; + public static final long ONE_MIB = 1024 * ONE_KB; + public static final long ONE_GB = 1024 * ONE_MIB; public static final long ONE_TB = 1024 * ONE_GB; private static final DecimalFormat df = new DecimalFormat("#.##"); @@ -405,7 +405,7 @@ public final class FileUtils } else if (value.endsWith(" MiB")) { - result = Math.round(Double.valueOf(value.replace(" MiB", "")) * ONE_MB); + result = Math.round(Double.valueOf(value.replace(" MiB", "")) * ONE_MIB); return result; } else if (value.endsWith(" bytes")) @@ -434,9 +434,9 @@ public final class FileUtils String val = df.format(d); return val + " GiB"; } - else if ( value >= ONE_MB ) + else if (value >= ONE_MIB) { - d = value / ONE_MB; + d = value / ONE_MIB; String val = df.format(d); return val + " MiB"; } diff --git a/src/java/org/apache/cassandra/repair/ValidationManager.java b/src/java/org/apache/cassandra/repair/ValidationManager.java index 7077dd3..2897aea 100644 --- a/src/java/org/apache/cassandra/repair/ValidationManager.java +++ b/src/java/org/apache/cassandra/repair/ValidationManager.java @@ -57,7 +57,7 @@ public class ValidationManager // The repair coordinator must hold RF trees in memory at once, so a given validation compaction can only // use 1 / RF of the allowed space. - long availableBytes = (DatabaseDescriptor.getRepairSessionSpaceInMegabytes() * 1048576) / + long availableBytes = (DatabaseDescriptor.getRepairSessionSpaceInMiB() * 1048576) / cfs.keyspace.getReplicationStrategy().getReplicationFactor().allReplicas; for (Range<Token> range : ranges) diff --git a/src/java/org/apache/cassandra/service/ActiveRepairService.java b/src/java/org/apache/cassandra/service/ActiveRepairService.java index cc72430..ae57bf9 100644 --- a/src/java/org/apache/cassandra/service/ActiveRepairService.java +++ b/src/java/org/apache/cassandra/service/ActiveRepairService.java @@ -238,16 +238,28 @@ public class ActiveRepairService implements IEndpointStateChangeSubscriber, IFai consistent.local.cancelSession(sessionID, force); } - @Override + @Deprecated public void setRepairSessionSpaceInMegabytes(int sizeInMegabytes) { - DatabaseDescriptor.setRepairSessionSpaceInMegabytes(sizeInMegabytes); + DatabaseDescriptor.setRepairSessionSpaceInMiB(sizeInMegabytes); } - @Override + @Deprecated public int getRepairSessionSpaceInMegabytes() { - return DatabaseDescriptor.getRepairSessionSpaceInMegabytes(); + return DatabaseDescriptor.getRepairSessionSpaceInMiB(); + } + + @Override + public void setRepairSessionSpaceInMebibytes(int sizeInMebibytes) + { + DatabaseDescriptor.setRepairSessionSpaceInMiB(sizeInMebibytes); + } + + @Override + public int getRepairSessionSpaceInMebibytes() + { + return DatabaseDescriptor.getRepairSessionSpaceInMiB(); } public List<CompositeData> getRepairStats(List<String> schemaArgs, String rangeString) diff --git a/src/java/org/apache/cassandra/service/ActiveRepairServiceMBean.java b/src/java/org/apache/cassandra/service/ActiveRepairServiceMBean.java index b68cb6f..3f03602 100644 --- a/src/java/org/apache/cassandra/service/ActiveRepairServiceMBean.java +++ b/src/java/org/apache/cassandra/service/ActiveRepairServiceMBean.java @@ -29,9 +29,14 @@ public interface ActiveRepairServiceMBean public List<Map<String, String>> getSessions(boolean all, String rangesStr); public void failSession(String session, boolean force); + @Deprecated public void setRepairSessionSpaceInMegabytes(int sizeInMegabytes); + @Deprecated public int getRepairSessionSpaceInMegabytes(); + public void setRepairSessionSpaceInMebibytes(int sizeInMebibytes); + public int getRepairSessionSpaceInMebibytes(); + public boolean getUseOffheapMerkleTrees(); public void setUseOffheapMerkleTrees(boolean value); diff --git a/src/java/org/apache/cassandra/service/StorageService.java b/src/java/org/apache/cassandra/service/StorageService.java index 4be4fcf..4133119 100644 --- a/src/java/org/apache/cassandra/service/StorageService.java +++ b/src/java/org/apache/cassandra/service/StorageService.java @@ -5891,25 +5891,25 @@ public class StorageService extends NotificationBroadcasterSupport implements IE public int getColumnIndexSizeInKB() { - return DatabaseDescriptor.getColumnIndexSizeInKB(); + return DatabaseDescriptor.getColumnIndexSizeInKiB(); } public void setColumnIndexSize(int columnIndexSizeInKB) { - int oldValueInKB = DatabaseDescriptor.getColumnIndexSizeInKB(); + int oldValueInKiB = DatabaseDescriptor.getColumnIndexSizeInKiB(); DatabaseDescriptor.setColumnIndexSize(columnIndexSizeInKB); - logger.info("Updated column_index_size_in_kb to {} KiB (was {} KiB)", columnIndexSizeInKB, oldValueInKB); + logger.info("Updated column_index_size to {} KiB (was {} KiB)", columnIndexSizeInKB, oldValueInKiB); } public int getColumnIndexCacheSize() { - return DatabaseDescriptor.getColumnIndexCacheSizeInKB(); + return DatabaseDescriptor.getColumnIndexCacheSizeInKiB(); } public void setColumnIndexCacheSize(int cacheSizeInKB) { DatabaseDescriptor.setColumnIndexCacheSize(cacheSizeInKB); - logger.info("Updated column_index_cache_size_in_kb to {}", cacheSizeInKB); + logger.info("Updated column_index_cache_size to {}", cacheSizeInKB); } public int getBatchSizeFailureThreshold() @@ -5920,18 +5920,18 @@ public class StorageService extends NotificationBroadcasterSupport implements IE public void setBatchSizeFailureThreshold(int threshold) { DatabaseDescriptor.setBatchSizeFailThresholdInKB(threshold); - logger.info("updated batch_size_fail_threshold_in_kb to {}", threshold); + logger.info("updated batch_size_fail_threshold to {}", threshold); } public int getBatchSizeWarnThreshold() { - return DatabaseDescriptor.getBatchSizeWarnThresholdInKB(); + return DatabaseDescriptor.getBatchSizeWarnThresholdInKiB(); } public void setBatchSizeWarnThreshold(int threshold) { - DatabaseDescriptor.setBatchSizeWarnThresholdInKB(threshold); - logger.info("Updated batch_size_warn_threshold_in_kb to {}", threshold); + DatabaseDescriptor.setBatchSizeWarnThresholdInKiB(threshold); + logger.info("Updated batch_size_warn_threshold to {}", threshold); } public int getInitialRangeTombstoneListAllocationSize() diff --git a/src/java/org/apache/cassandra/streaming/StreamSession.java b/src/java/org/apache/cassandra/streaming/StreamSession.java index 02255dd..00e3bf1 100644 --- a/src/java/org/apache/cassandra/streaming/StreamSession.java +++ b/src/java/org/apache/cassandra/streaming/StreamSession.java @@ -826,7 +826,7 @@ public class StreamSession implements IEndpointStateChangeSubscriber "The time taken ({} ms) for processing the incoming stream message ({})" + " exceeded internode streaming TCP user timeout ({} ms).\n" + "The streaming connection might be closed due to tcp user timeout.\n" + - "Try to increase the internode_streaming_tcp_user_timeout_in_ms" + + "Try to increase the internode_streaming_tcp_user_timeout" + " or set it to 0 to use system defaults.", latencyMs, message, timeout); } diff --git a/src/java/org/apache/cassandra/tools/nodetool/GetColumnIndexSize.java b/src/java/org/apache/cassandra/tools/nodetool/GetColumnIndexSize.java index 806d815..6925932 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/GetColumnIndexSize.java +++ b/src/java/org/apache/cassandra/tools/nodetool/GetColumnIndexSize.java @@ -28,6 +28,6 @@ public class GetColumnIndexSize extends NodeToolCmd @Override protected void execute(NodeProbe probe) { - probe.output().out.println("Current value for column_index_size_in_kb: " + probe.getColumnIndexSizeInKB() + " KiB"); + probe.output().out.println("Current value for column_index_size: " + probe.getColumnIndexSizeInKB() + " KiB"); } } diff --git a/src/java/org/apache/cassandra/tools/nodetool/SetColumnIndexSize.java b/src/java/org/apache/cassandra/tools/nodetool/SetColumnIndexSize.java index 85a066d..fe5d8b3 100644 --- a/src/java/org/apache/cassandra/tools/nodetool/SetColumnIndexSize.java +++ b/src/java/org/apache/cassandra/tools/nodetool/SetColumnIndexSize.java @@ -27,12 +27,12 @@ import org.apache.cassandra.tools.NodeTool.NodeToolCmd; public class SetColumnIndexSize extends NodeToolCmd { @SuppressWarnings("UnusedDeclaration") - @Arguments(title = "column_index_size_in_kb", usage = "<value_in_kib>", description = "Value in KiB", required = true) - private int columnIndexSizeInKB; + @Arguments(title = "column_index_size", usage = "<value_in_kib>", description = "Value in KiB", required = true) + private int columnIndexSizeInKiB; @Override protected void execute(NodeProbe probe) { - probe.setColumnIndexSize(columnIndexSizeInKB); + probe.setColumnIndexSize(columnIndexSizeInKiB); } } diff --git a/test/conf/cassandra-murmur.yaml b/test/conf/cassandra-murmur.yaml index 06507e4..9da6917 100644 --- a/test/conf/cassandra-murmur.yaml +++ b/test/conf/cassandra-murmur.yaml @@ -16,7 +16,7 @@ listen_address: 127.0.0.1 storage_port: 7012 start_native_transport: true native_transport_port: 9042 -column_index_size_in_kb: 4 +column_index_size: 4KiB saved_caches_directory: build/test/cassandra/saved_caches data_file_directories: - build/test/cassandra/data diff --git a/test/conf/cassandra-old.yaml b/test/conf/cassandra-old.yaml index aa5572c..11b79f2 100644 --- a/test/conf/cassandra-old.yaml +++ b/test/conf/cassandra-old.yaml @@ -20,7 +20,7 @@ storage_port: 7012 ssl_storage_port: 17012 start_native_transport: true native_transport_port: 9042 -column_index_size_in_kb: 4 +column_index_size: 4KiB saved_caches_directory: build/test/cassandra/saved_caches data_file_directories: - build/test/cassandra/data @@ -54,3 +54,4 @@ enable_drop_compact_storage: true file_cache_enabled: true internode_send_buff_size_in_bytes: 5 internode_recv_buff_size_in_bytes: 5 +max_hint_window_in_ms: 10800000 diff --git a/test/conf/cassandra-sslcontextfactory-invalidconfiguration.yaml b/test/conf/cassandra-sslcontextfactory-invalidconfiguration.yaml index e528d27..b11f248 100644 --- a/test/conf/cassandra-sslcontextfactory-invalidconfiguration.yaml +++ b/test/conf/cassandra-sslcontextfactory-invalidconfiguration.yaml @@ -38,7 +38,7 @@ storage_port: 7012 ssl_storage_port: 17012 start_native_transport: true native_transport_port: 9042 -column_index_size_in_kb: 4 +column_index_size: 4KiB saved_caches_directory: build/test/cassandra/saved_caches data_file_directories: - build/test/cassandra/data diff --git a/test/conf/cassandra-sslcontextfactory.yaml b/test/conf/cassandra-sslcontextfactory.yaml index 26cea35..6e55b49 100644 --- a/test/conf/cassandra-sslcontextfactory.yaml +++ b/test/conf/cassandra-sslcontextfactory.yaml @@ -38,7 +38,7 @@ storage_port: 7012 ssl_storage_port: 17012 start_native_transport: true native_transport_port: 9042 -column_index_size_in_kb: 4 +column_index_size: 4KiB saved_caches_directory: build/test/cassandra/saved_caches data_file_directories: - build/test/cassandra/data diff --git a/test/conf/unit-test-conf/test-native-port.yaml b/test/conf/unit-test-conf/test-native-port.yaml index 8db2ad4..d3c5516 100644 --- a/test/conf/unit-test-conf/test-native-port.yaml +++ b/test/conf/unit-test-conf/test-native-port.yaml @@ -20,7 +20,7 @@ storage_port: 7010 ssl_storage_port: 7011 start_native_transport: true native_transport_port_ssl: 9142 -column_index_size_in_kb: 4 +column_index_size: 4KiB saved_caches_directory: build/test/cassandra/saved_caches data_file_directories: - build/test/cassandra/data diff --git a/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java b/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java index 7574318..d6bc7ad 100644 --- a/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java +++ b/test/distributed/org/apache/cassandra/distributed/impl/InstanceConfig.java @@ -93,7 +93,7 @@ public class InstanceConfig implements IInstanceConfig .set("concurrent_reads", 2) .set("memtable_flush_writers", 1) .set("concurrent_compactors", 1) - .set("memtable_heap_space_in_mb", 10) + .set("memtable_heap_space", "10MiB") .set("commitlog_sync", "batch") .set("storage_port", storage_port) .set("native_transport_port", native_transport_port) diff --git a/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java b/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java index 7a59876..34432dc 100644 --- a/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java +++ b/test/distributed/org/apache/cassandra/distributed/test/LargeColumnTest.java @@ -29,8 +29,6 @@ import org.slf4j.LoggerFactory; import org.apache.cassandra.distributed.api.ConsistencyLevel; import org.apache.cassandra.distributed.api.ICluster; -import static java.util.concurrent.TimeUnit.SECONDS; - // TODO: this test should be removed after running in-jvm dtests is set up via the shared API repository public class LargeColumnTest extends TestBaseImpl { @@ -68,11 +66,11 @@ public class LargeColumnTest extends TestBaseImpl .withNodes(nodes) .withConfig(config -> config.set("commitlog_segment_size_in_mb", (columnSize * 3) >> 20) - .set("internode_application_send_queue_reserve_endpoint_capacity_in_bytes", columnSize * 2) - .set("internode_application_send_queue_reserve_global_capacity_in_bytes", columnSize * 3) + .set("internode_application_send_queue_reserve_endpoint_capacity", String.format("%dB", (columnSize * 2))) + .set("internode_application_send_queue_reserve_global_capacity", String.format("%dB", (columnSize * 3))) .set("write_request_timeout", "30s") .set("read_request_timeout", "30s") - .set("memtable_heap_space_in_mb", 1024) + .set("memtable_heap_space", "1024MiB") ) .start())) { diff --git a/test/simulator/main/org/apache/cassandra/simulator/ClusterSimulation.java b/test/simulator/main/org/apache/cassandra/simulator/ClusterSimulation.java index c56a5a7..74c9356 100644 --- a/test/simulator/main/org/apache/cassandra/simulator/ClusterSimulation.java +++ b/test/simulator/main/org/apache/cassandra/simulator/ClusterSimulation.java @@ -642,7 +642,7 @@ public class ClusterSimulation<S extends Simulation> implements AutoCloseable .set("write_request_timeout", String.format("%dms", NANOSECONDS.toMillis(builder.writeTimeoutNanos))) .set("cas_contention_timeout", String.format("%dms", NANOSECONDS.toMillis(builder.contentionTimeoutNanos))) .set("request_timeout", String.format("%dms", NANOSECONDS.toMillis(builder.requestTimeoutNanos))) - .set("memtable_heap_space_in_mb", 1) + .set("memtable_heap_space", "1mb") .set("memtable_allocation_type", builder.memoryListener != null ? "unslabbed_heap_buffers_logged" : "heap_buffers") .set("file_cache_size_in_mb", 16) .set("use_deterministic_table_id", true) diff --git a/test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java b/test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java index 5aabc80..0d13654 100644 --- a/test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java +++ b/test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java @@ -198,7 +198,7 @@ public class DatabaseDescriptorTest } @Test - public void testRpcAddress() throws Exception + public void testRpcAddress() { Config testConfig = DatabaseDescriptor.loadConfig(); testConfig.rpc_address = suitableInterface.getInterfaceAddresses().get(0).getAddress().getHostAddress(); @@ -282,7 +282,7 @@ public class DatabaseDescriptorTest try { DatabaseDescriptor.setColumnIndexCacheSize(-1); - fail("Should have received a ConfigurationException column_index_cache_size_in_kb = -1"); + fail("Should have received a ConfigurationException column_index_cache_size = -1"); } catch (ConfigurationException ignored) { } Assert.assertEquals(2048, DatabaseDescriptor.getColumnIndexCacheSize()); @@ -290,7 +290,7 @@ public class DatabaseDescriptorTest try { DatabaseDescriptor.setColumnIndexCacheSize(2 * 1024 * 1024); - fail("Should have received a ConfigurationException column_index_cache_size_in_kb = 2GiB"); + fail("Should have received a ConfigurationException column_index_cache_size= 2GiB"); } catch (ConfigurationException ignored) { } Assert.assertEquals(2048, DatabaseDescriptor.getColumnIndexCacheSize()); @@ -298,7 +298,7 @@ public class DatabaseDescriptorTest try { DatabaseDescriptor.setColumnIndexSize(-1); - fail("Should have received a ConfigurationException column_index_size_in_kb = -1"); + fail("Should have received a ConfigurationException column_index_size = -1"); } catch (ConfigurationException ignored) { } Assert.assertEquals(4096, DatabaseDescriptor.getColumnIndexSize()); @@ -306,23 +306,23 @@ public class DatabaseDescriptorTest try { DatabaseDescriptor.setColumnIndexSize(2 * 1024 * 1024); - fail("Should have received a ConfigurationException column_index_size_in_kb = 2GiB"); + fail("Should have received a ConfigurationException column_index_size = 2GiB"); } catch (ConfigurationException ignored) { } Assert.assertEquals(4096, DatabaseDescriptor.getColumnIndexSize()); try { - DatabaseDescriptor.setBatchSizeWarnThresholdInKB(-1); - fail("Should have received a ConfigurationException batch_size_warn_threshold_in_kb = -1"); + DatabaseDescriptor.setBatchSizeWarnThresholdInKiB(-1); + fail("Should have received a ConfigurationException batch_size_warn_threshold = -1"); } catch (ConfigurationException ignored) { } Assert.assertEquals(5120, DatabaseDescriptor.getBatchSizeWarnThreshold()); try { - DatabaseDescriptor.setBatchSizeWarnThresholdInKB(2 * 1024 * 1024); - fail("Should have received a ConfigurationException batch_size_warn_threshold_in_kb = 2GiB"); + DatabaseDescriptor.setBatchSizeWarnThresholdInKiB(2 * 1024 * 1024); + fail("Should have received a ConfigurationException batch_size_warn_threshold = 2GiB"); } catch (ConfigurationException ignored) { } Assert.assertEquals(4096, DatabaseDescriptor.getColumnIndexSize()); @@ -333,7 +333,8 @@ public class DatabaseDescriptorTest { Config testConfig = new Config(); - SmallestDurationMilliseconds greaterThanLowestTimeout = SmallestDurationMilliseconds.inMilliseconds(DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT.toMilliseconds() + 1); + SmallestDurationMilliseconds greaterThanLowestTimeout = SmallestDurationMilliseconds + .inMilliseconds(DatabaseDescriptor.LOWEST_ACCEPTED_TIMEOUT.toMilliseconds() + 1); testConfig.read_request_timeout = greaterThanLowestTimeout; testConfig.range_request_timeout = greaterThanLowestTimeout; @@ -376,32 +377,32 @@ public class DatabaseDescriptorTest @Test public void testRepairSessionMemorySizeToggles() { - int previousSize = DatabaseDescriptor.getRepairSessionSpaceInMegabytes(); + int previousSize = DatabaseDescriptor.getRepairSessionSpaceInMiB(); try { Assert.assertEquals((Runtime.getRuntime().maxMemory() / (1024 * 1024) / 16), - DatabaseDescriptor.getRepairSessionSpaceInMegabytes()); + DatabaseDescriptor.getRepairSessionSpaceInMiB()); int targetSize = (int) (Runtime.getRuntime().maxMemory() / (1024 * 1024) / 4) + 1; - DatabaseDescriptor.setRepairSessionSpaceInMegabytes(targetSize); - Assert.assertEquals(targetSize, DatabaseDescriptor.getRepairSessionSpaceInMegabytes()); + DatabaseDescriptor.setRepairSessionSpaceInMiB(targetSize); + Assert.assertEquals(targetSize, DatabaseDescriptor.getRepairSessionSpaceInMiB()); - DatabaseDescriptor.setRepairSessionSpaceInMegabytes(10); - Assert.assertEquals(10, DatabaseDescriptor.getRepairSessionSpaceInMegabytes()); + DatabaseDescriptor.setRepairSessionSpaceInMiB(10); + Assert.assertEquals(10, DatabaseDescriptor.getRepairSessionSpaceInMiB()); try { - DatabaseDescriptor.setRepairSessionSpaceInMegabytes(0); + DatabaseDescriptor.setRepairSessionSpaceInMiB(0); fail("Should have received a ConfigurationException for depth of 9"); } catch (ConfigurationException ignored) { } - Assert.assertEquals(10, DatabaseDescriptor.getRepairSessionSpaceInMegabytes()); + Assert.assertEquals(10, DatabaseDescriptor.getRepairSessionSpaceInMiB()); } finally { - DatabaseDescriptor.setRepairSessionSpaceInMegabytes(previousSize); + DatabaseDescriptor.setRepairSessionSpaceInMiB(previousSize); } } diff --git a/test/unit/org/apache/cassandra/config/LoadOldYAMLBackwardCompatibilityTest.java b/test/unit/org/apache/cassandra/config/LoadOldYAMLBackwardCompatibilityTest.java index 885f28f..ad7abb8 100644 --- a/test/unit/org/apache/cassandra/config/LoadOldYAMLBackwardCompatibilityTest.java +++ b/test/unit/org/apache/cassandra/config/LoadOldYAMLBackwardCompatibilityTest.java @@ -22,6 +22,7 @@ import org.junit.BeforeClass; import org.junit.Test; import static org.junit.Assert.assertEquals; +import static org.junit.Assert.assertNull; import static org.junit.Assert.assertTrue; import static org.junit.Assert.assertFalse; @@ -39,11 +40,9 @@ public class LoadOldYAMLBackwardCompatibilityTest public void testConfigurationLoaderBackwardCompatibility() { Config config = DatabaseDescriptor.loadConfig(); - //Confirm parameters were successfully read with their old names and the default values in cassandra-old.yaml - /*assertEquals(5, config.internode_socket_send_buffer_size_in_bytes); - assertEquals(5, config.internode_socket_receive_buffer_size_in_bytes); + assertEquals(DurationSpec.inMilliseconds(10800000), config.max_hint_window); - assertEquals(DurationSpec.inHours(0), config.max_hint_window);*/ + assertEquals(DurationSpec.inHours(3), config.max_hint_window); assertEquals(DurationSpec.inMilliseconds(0), config.native_transport_idle_timeout); assertEquals(DurationSpec.inMilliseconds(10000), config.request_timeout); assertEquals(DurationSpec.inMilliseconds(5000), config.read_request_timeout); @@ -54,7 +53,7 @@ public class LoadOldYAMLBackwardCompatibilityTest assertEquals(DurationSpec.inMilliseconds(60000), config.truncate_request_timeout); assertEquals(DurationSpec.inSeconds(300), config.streaming_keep_alive_period); assertEquals(DurationSpec.inMilliseconds(500), config.slow_query_log_timeout); - /*assertNull(config.memtable_heap_space); + assertNull(config.memtable_heap_space); assertNull(config.memtable_offheap_space); assertNull( config.repair_session_space); assertEquals(DataStorageSpec.inBytes(4194304), config.internode_application_send_queue_capacity); @@ -68,9 +67,9 @@ public class LoadOldYAMLBackwardCompatibilityTest assertEquals(DurationSpec.inMilliseconds(300000), config.internode_streaming_tcp_user_timeout); assertEquals(DataStorageSpec.inMebibytes(16), config.native_transport_max_frame_size); assertEquals(DataStorageSpec.inMebibytes(256), config.max_value_size); - assertEquals(DataStorageSpec.inKibibytes(64), config.column_index_size); + assertEquals(DataStorageSpec.inKibibytes(4), config.column_index_size); assertEquals(DataStorageSpec.inKibibytes(2), config.column_index_cache_size); - assertEquals(DataStorageSpec.inKibibytes(5), config.batch_size_warn_threshold);*/ + assertEquals(DataStorageSpec.inKibibytes(5), config.batch_size_warn_threshold); assertEquals(DataRateSpec.inMebibytesPerSecond(64), config.compaction_throughput); //assertEquals(DataStorageSpec.inMebibytes(50), config.min_free_space_per_drive); assertEquals(DataRateSpec.inMebibytesPerSecond(23841858).toString(), config.stream_throughput_outbound.toString()); diff --git a/test/unit/org/apache/cassandra/config/YamlConfigurationLoaderTest.java b/test/unit/org/apache/cassandra/config/YamlConfigurationLoaderTest.java index 06671df..ae2bf5f 100644 --- a/test/unit/org/apache/cassandra/config/YamlConfigurationLoaderTest.java +++ b/test/unit/org/apache/cassandra/config/YamlConfigurationLoaderTest.java @@ -91,8 +91,8 @@ public class YamlConfigurationLoaderTest .put("commitlog_sync", commitLogSync) .put("seed_provider", seedProvider) .put("client_encryption_options", encryptionOptions) - .put("internode_send_buff_size_in_bytes", 5) - .put("internode_recv_buff_size_in_bytes", 5) + .put("internode_socket_send_buffer_size", "5B") + .put("internode_socket_receive_buffer_size", "5B") .build(); Config config = YamlConfigurationLoader.fromMap(map, Config.class); @@ -101,8 +101,8 @@ public class YamlConfigurationLoaderTest assertEquals(seedProvider, config.seed_provider); // Check a parameterized class assertEquals(false, config.client_encryption_options.optional); // Check a nested object assertEquals(true, config.client_encryption_options.enabled); // Check a nested object - assertEquals(5, config.internode_socket_send_buffer_size_in_bytes); // Check names backward compatibility (CASSANDRA-17141) - assertEquals(5, config.internode_socket_receive_buffer_size_in_bytes); // Check names backward compatibility (CASSANDRA-17141) + assertEquals(new DataStorageSpec("5B"), config.internode_socket_send_buffer_size); // Check names backward compatibility (CASSANDRA-17141 and CASSANDRA-15234) + assertEquals(new DataStorageSpec("5B"), config.internode_socket_receive_buffer_size); // Check names backward compatibility (CASSANDRA-17141 and CASSANDRA-15234) } @Test diff --git a/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java b/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java index 48caef6..25ec662 100644 --- a/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java +++ b/test/unit/org/apache/cassandra/db/compaction/CompactionsCQLTest.java @@ -354,7 +354,7 @@ public class CompactionsCQLTest extends CQLTester { // write enough data to make sure we use an IndexedReader when doing a read, and make sure it fails when reading a corrupt row deletion DatabaseDescriptor.setCorruptedTombstoneStrategy(Config.CorruptedTombstoneStrategy.exception); - int maxSizePre = DatabaseDescriptor.getColumnIndexSizeInKB(); + int maxSizePre = DatabaseDescriptor.getColumnIndexSizeInKiB(); DatabaseDescriptor.setColumnIndexSize(1024); prepareWide(); RowUpdateBuilder.deleteRowAt(getCurrentColumnFamilyStore().metadata(), System.currentTimeMillis() * 1000, -1, 22, 33).apply(); @@ -369,7 +369,7 @@ public class CompactionsCQLTest extends CQLTester { // write enough data to make sure we use an IndexedReader when doing a read, and make sure it fails when reading a corrupt standard tombstone DatabaseDescriptor.setCorruptedTombstoneStrategy(Config.CorruptedTombstoneStrategy.exception); - int maxSizePre = DatabaseDescriptor.getColumnIndexSizeInKB(); + int maxSizePre = DatabaseDescriptor.getColumnIndexSizeInKiB(); DatabaseDescriptor.setColumnIndexSize(1024); prepareWide(); RowUpdateBuilder rub = new RowUpdateBuilder(getCurrentColumnFamilyStore().metadata(), -1, System.currentTimeMillis() * 1000, 22).clustering(33).delete("b"); @@ -385,7 +385,7 @@ public class CompactionsCQLTest extends CQLTester { // write enough data to make sure we use an IndexedReader when doing a read, and make sure it fails when reading a corrupt range tombstone DatabaseDescriptor.setCorruptedTombstoneStrategy(Config.CorruptedTombstoneStrategy.exception); - final int maxSizePreKB = DatabaseDescriptor.getColumnIndexSizeInKB(); + final int maxSizePreKB = DatabaseDescriptor.getColumnIndexSizeInKiB(); DatabaseDescriptor.setColumnIndexSize(1024); prepareWide(); RangeTombstone rt = new RangeTombstone(Slice.ALL, new DeletionTime(System.currentTimeMillis(), -1)); diff --git a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java index 10bb783..3ad09fb 100644 --- a/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java +++ b/test/unit/org/apache/cassandra/io/sstable/SSTableWriterTestBase.java @@ -73,7 +73,7 @@ public class SSTableWriterTestBase extends SchemaLoader SchemaLoader.standardCFMD(KEYSPACE, CF_SMALL_MAX_VALUE)); maxValueSize = DatabaseDescriptor.getMaxValueSize(); - DatabaseDescriptor.setMaxValueSize(1024 * 1024); // set max value size to 1MB + DatabaseDescriptor.setMaxValueSize(1024 * 1024); // set max value size to 1MiB } @AfterClass diff --git a/test/unit/org/apache/cassandra/repair/ValidatorTest.java b/test/unit/org/apache/cassandra/repair/ValidatorTest.java index 4df2140..db9ce31 100644 --- a/test/unit/org/apache/cassandra/repair/ValidatorTest.java +++ b/test/unit/org/apache/cassandra/repair/ValidatorTest.java @@ -66,7 +66,7 @@ import static org.junit.Assert.assertTrue; public class ValidatorTest { private static final long TEST_TIMEOUT = 60; //seconds - private static int testSizeMegabytes; + private static int testSizeMebibytes; private static final String keyspace = "ValidatorTest"; private static final String columnFamily = "Standard1"; @@ -80,20 +80,20 @@ public class ValidatorTest KeyspaceParams.simple(1), SchemaLoader.standardCFMD(keyspace, columnFamily)); partitioner = Schema.instance.getTableMetadata(keyspace, columnFamily).partitioner; - testSizeMegabytes = DatabaseDescriptor.getRepairSessionSpaceInMegabytes(); + testSizeMebibytes = DatabaseDescriptor.getRepairSessionSpaceInMiB(); } @After public void tearDown() { MessagingService.instance().outboundSink.clear(); - DatabaseDescriptor.setRepairSessionSpaceInMegabytes(testSizeMegabytes); + DatabaseDescriptor.setRepairSessionSpaceInMiB(testSizeMebibytes); } @Before public void setup() { - DatabaseDescriptor.setRepairSessionSpaceInMegabytes(testSizeMegabytes); + DatabaseDescriptor.setRepairSessionSpaceInMiB(testSizeMebibytes); } @Test @@ -233,7 +233,7 @@ public class ValidatorTest ColumnFamilyStore cfs = ks.getColumnFamilyStore(columnFamily); cfs.clearUnsafe(); - DatabaseDescriptor.setRepairSessionSpaceInMegabytes(1); + DatabaseDescriptor.setRepairSessionSpaceInMiB(1); // disable compaction while flushing cfs.disableAutoCompaction(); @@ -292,7 +292,7 @@ public class ValidatorTest ColumnFamilyStore cfs = ks.getColumnFamilyStore(columnFamily); cfs.clearUnsafe(); - DatabaseDescriptor.setRepairSessionSpaceInMegabytes(1); + DatabaseDescriptor.setRepairSessionSpaceInMiB(1); // disable compaction while flushing cfs.disableAutoCompaction(); @@ -328,7 +328,7 @@ public class ValidatorTest Message message = outgoingMessageSink.get(TEST_TIMEOUT, TimeUnit.SECONDS); MerkleTrees trees = ((ValidationResponse) message.payload).trees; - // Should have 4 trees each with a depth of on average 10 (since each range should have gotten 0.25 megabytes) + // Should have 4 trees each with a depth of on average 10 (since each range should have gotten 0.25 mebibytes) Iterator<Map.Entry<Range<Token>, MerkleTree>> iterator = trees.iterator(); int numTrees = 0; double totalResolution = 0; @@ -347,7 +347,7 @@ public class ValidatorTest assertEquals(trees.rowCount(), 1 << 14); assertEquals(4, numTrees); - // With a single tree and a megabyte we should had a total resolution of 2^12 leaves; with multiple + // With a single tree and a mebibyte we should had a total resolution of 2^12 leaves; with multiple // ranges we should get similar overall resolution, but not more. assertTrue(totalResolution > (1 << 11) && totalResolution < (1 << 13)); } diff --git a/test/unit/org/apache/cassandra/service/ClientWarningsTest.java b/test/unit/org/apache/cassandra/service/ClientWarningsTest.java index ee652e2..e7a34e0 100644 --- a/test/unit/org/apache/cassandra/service/ClientWarningsTest.java +++ b/test/unit/org/apache/cassandra/service/ClientWarningsTest.java @@ -60,7 +60,7 @@ public class ClientWarningsTest extends CQLTester public static void setUp() { requireNetwork(); - DatabaseDescriptor.setBatchSizeWarnThresholdInKB(1); + DatabaseDescriptor.setBatchSizeWarnThresholdInKiB(1); } @Test diff --git a/test/unit/org/apache/cassandra/service/ProtocolBetaVersionTest.java b/test/unit/org/apache/cassandra/service/ProtocolBetaVersionTest.java index a7551f4..bfb464d 100644 --- a/test/unit/org/apache/cassandra/service/ProtocolBetaVersionTest.java +++ b/test/unit/org/apache/cassandra/service/ProtocolBetaVersionTest.java @@ -35,7 +35,7 @@ public class ProtocolBetaVersionTest extends CQLTester public static void setUp() { requireNetwork(); - DatabaseDescriptor.setBatchSizeWarnThresholdInKB(1); + DatabaseDescriptor.setBatchSizeWarnThresholdInKiB(1); } private ProtocolVersion getBetaVersion() diff --git a/test/unit/org/apache/cassandra/tools/nodetool/SetGetColumnIndexSizeTest.java b/test/unit/org/apache/cassandra/tools/nodetool/SetGetColumnIndexSizeTest.java index 67b519e..b30cf06 100644 --- a/test/unit/org/apache/cassandra/tools/nodetool/SetGetColumnIndexSizeTest.java +++ b/test/unit/org/apache/cassandra/tools/nodetool/SetGetColumnIndexSizeTest.java @@ -42,7 +42,7 @@ public class SetGetColumnIndexSizeTest extends CQLTester @Test public void testNull() { - assertSetInvalidColumnIndexSize(null, "Required parameters are missing: column_index_size_in_kb", 1); + assertSetInvalidColumnIndexSize(null, "Required parameters are missing: column_index_size", 1); } @Test @@ -66,20 +66,20 @@ public class SetGetColumnIndexSizeTest extends CQLTester @Test public void testNegative() { - assertSetInvalidColumnIndexSize("-7", "column_index_size_in_kb must be positive value < 2097152, but was -7", 2); + assertSetInvalidColumnIndexSize("-7", "Invalid data storage: value must be positive, but was -7", 2); } @Test public void testInvalidValue() { - assertSetInvalidColumnIndexSize("2097152", "column_index_size_in_kb must be positive value < 2097152, but was 2097152", 2); + assertSetInvalidColumnIndexSize("2097152", "column_index_size must be positive value < 2097151B, but was 2147483648B", 2); } @Test public void testUnparseable() { - assertSetInvalidColumnIndexSize("1.2", "column_index_size_in_kb: can not convert \"1.2\" to a int", 1); - assertSetInvalidColumnIndexSize("value", "column_index_size_in_kb: can not convert \"value\" to a int", 1); + assertSetInvalidColumnIndexSize("1.2", "column_index_size: can not convert \"1.2\" to a int", 1); + assertSetInvalidColumnIndexSize("value", "column_index_size: can not convert \"value\" to a int", 1); } private static void assertSetGetValidColumnIndexSize(int columnIndexSizeInKB) @@ -105,6 +105,6 @@ public class SetGetColumnIndexSizeTest extends CQLTester { ToolResult tool = invokeNodetool("getcolumnindexsize"); tool.assertOnCleanExit(); - assertThat(tool.getStdout()).contains("Current value for column_index_size_in_kb: " + expected + " KiB"); + assertThat(tool.getStdout()).contains("Current value for column_index_size: " + expected + " KiB"); } } diff --git a/test/unit/org/apache/cassandra/transport/CQLConnectionTest.java b/test/unit/org/apache/cassandra/transport/CQLConnectionTest.java index ac32e6f..86de587 100644 --- a/test/unit/org/apache/cassandra/transport/CQLConnectionTest.java +++ b/test/unit/org/apache/cassandra/transport/CQLConnectionTest.java @@ -59,7 +59,7 @@ import org.apache.cassandra.utils.concurrent.NonBlockingRateLimiter; import org.apache.cassandra.utils.concurrent.Condition; import static org.apache.cassandra.config.EncryptionOptions.TlsEncryptionPolicy.UNENCRYPTED; -import static org.apache.cassandra.io.util.FileUtils.ONE_MB; +import static org.apache.cassandra.io.util.FileUtils.ONE_MIB; import static org.apache.cassandra.net.FramingTest.randomishBytes; import static org.apache.cassandra.transport.Flusher.MAX_FRAMED_PAYLOAD_SIZE; import static org.apache.cassandra.utils.concurrent.Condition.newOneTimeCondition; @@ -107,7 +107,7 @@ public class CQLConnectionTest // set connection-local queue size to 0 so that all capacity is allocated from reserves DatabaseDescriptor.setNativeTransportReceiveQueueCapacityInBytes(0); // set transport to max frame size possible - DatabaseDescriptor.setNativeTransportMaxFrameSize(256 * (int) ONE_MB); + DatabaseDescriptor.setNativeTransportMaxFrameSize(256 * (int) ONE_MIB); } @Test --------------------------------------------------------------------- To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org For additional commands, e-mail: commits-h...@cassandra.apache.org