This is an automated email from the ASF dual-hosted git repository.

mck pushed a commit to branch trunk
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit 1122fcfeaa127de40a3ff4dbfd0e74a4e8783d52
Merge: a04ccf3 d9e1af8
Author: Mick Semb Wever <m...@apache.org>
AuthorDate: Tue Nov 17 22:46:04 2020 +0100

    Merge branch 'cassandra-3.11' into trunk

 CHANGES.txt                                        |  1 +
 NEWS.txt                                           |  3 +
 src/java/org/apache/cassandra/config/Config.java   |  2 +-
 .../cassandra/config/DatabaseDescriptor.java       | 27 ++++++--
 .../config/DatabaseDescriptorRefTest.java          |  2 +-
 .../cassandra/config/DatabaseDescriptorTest.java   | 78 ++++++++++++++++++++++
 6 files changed, 107 insertions(+), 6 deletions(-)

diff --cc CHANGES.txt
index 83b8a61,80b1532..69ccf55
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,47 -1,13 +1,48 @@@
 -3.11.10
 +4.0-beta4
 + * Upgrade JNA to 5.6.0, dropping support for <=glibc-2.6 systems 
(CASSANDRA-16212)
 + * Add saved Host IDs to TokenMetadata at startup (CASSANDRA-16246)
 + * Ensure that CacheMetrics.requests is picked up by the metric reporter 
(CASSANDRA-16228)
 + * Add a ratelimiter to snapshot creation and deletion (CASSANDRA-13019)
 + * Produce consistent tombstone for reads to avoid digest mistmatch 
(CASSANDRA-15369)
 + * Fix SSTableloader issue when restoring a table named backups 
(CASSANDRA-16235)
 + * Invalid serialized size for responses caused by increasing message time by 
1ms which caused extra bytes in size calculation (CASSANDRA-16103)
 + * Throw BufferOverflowException from DataOutputBuffer for better visibility 
(CASSANDRA-16214)
 + * TLS connections to the storage port on a node without server encryption 
configured causes java.io.IOException accessing missing keystore 
(CASSANDRA-16144)
 +Merged from 3.11:
  Merged from 3.0:
+  * Improved check of num_tokens against the length of initial_token 
(CASSANDRA-14477)
   * Fix a race condition on ColumnFamilyStore and TableMetrics 
(CASSANDRA-16228)
   * Remove the SEPExecutor blocking behavior (CASSANDRA-16186)
 - * Fix invalid cell value skipping when reading from disk (CASSANDRA-16223)
 + * Wait for schema agreement when bootstrapping (CASSANDRA-15158)
   * Prevent invoking enable/disable gossip when not in NORMAL (CASSANDRA-16146)
  
 -3.11.9
 - * Synchronize Keyspace instance store/clear (CASSANDRA-16210)
 +4.0-beta3
 + * Segregate Network and Chunk Cache BufferPools and Recirculate Partially 
Freed Chunks (CASSANDRA-15229)
 + * Fail truncation requests when they fail on a replica (CASSANDRA-16208)
 + * Move compact storage validation earlier in startup process 
(CASSANDRA-16063)
 + * Fix ByteBufferAccessor cast exceptions are thrown when trying to query a 
virtual table (CASSANDRA-16155)
 + * Consolidate node liveness check for forced repair (CASSANDRA-16113)
 + * Use unsigned short in ValueAccessor.sliceWithShortLength (CASSANDRA-16147)
 + * Abort repairs when getting a truncation request (CASSANDRA-15854)
 + * Remove bad assert when getting active compactions for an sstable 
(CASSANDRA-15457)
 + * Avoid failing compactions with very large partitions (CASSANDRA-15164)
 + * Prevent NPE in StreamMessage in type lookup (CASSANDRA-16131)
 + * Avoid invalid state transition exception during incremental repair 
(CASSANDRA-16067)
 + * Allow zero padding in timestamp serialization (CASSANDRA-16105)
 + * Add byte array backed cells (CASSANDRA-15393)
 + * Correctly handle pending ranges with adjacent range movements 
(CASSANDRA-14801)
 + * Avoid adding locahost when streaming trivial ranges (CASSANDRA-16099)
 + * Add nodetool getfullquerylog (CASSANDRA-15988)
 + * Fix yaml format and alignment in tpstats (CASSANDRA-11402)
 + * Avoid trying to keep track of RTs for endpoints we won't write to during 
read repair (CASSANDRA-16084)
 + * When compaction gets interrupted, the exception should include the 
compactionId (CASSANDRA-15954)
 + * Make Table/Keyspace Metric Names Consistent With Each Other 
(CASSANDRA-15909)
 + * Mutating sstable component may race with entire-sstable-streaming(ZCS) 
causing checksum validation failure (CASSANDRA-15861)
 + * NPE thrown while updating speculative execution time if keyspace is 
removed during task execution (CASSANDRA-15949)
 + * Show the progress of data streaming and index build (CASSANDRA-15406)
 + * Add flag to disable chunk cache and disable by default (CASSANDRA-16036)
 + * Upgrade to snakeyaml >= 1.26 version for CVE-2017-18640 fix 
(CASSANDRA-16150)
 +Merged from 3.11:
   * Fix ColumnFilter to avoid querying cells of unselected complex columns 
(CASSANDRA-15977)
   * Fix memory leak in CompressedChunkReader (CASSANDRA-15880)
   * Don't attempt value skipping with mixed version cluster (CASSANDRA-15833)
diff --cc NEWS.txt
index de472c4,d16bcce..cf33df9
--- a/NEWS.txt
+++ b/NEWS.txt
@@@ -33,249 -42,29 +33,252 @@@ restore snapshots created with the prev
  'sstableloader' tool. You can upgrade the file format of your snapshots
  using the provided 'sstableupgrade' tool.
  
 -3.11.10
 -=====
 +4.0
 +===
 +
 +New features
 +------------
 +    - Nodes will now bootstrap all intra-cluster connections at startup by 
default and wait
 +      10 seconds for the all but one node in the local data center to be 
connected and marked
 +      UP in gossip. This prevents nodes from coordinating requests and 
failing because they
 +      aren't able to connect to the cluster fast enough. 
block_for_peers_timeout_in_secs in
 +      cassandra.yaml can be used to configure how long to wait (or whether to 
wait at all)
 +      and block_for_peers_in_remote_dcs can be used to also block on all but 
one node in
 +      each remote DC as well. See CASSANDRA-14297 and CASSANDRA-13993 for 
more information.
 +    - *Experimental* support for Transient Replication and Cheap Quorums 
introduced by CASSANDRA-14404
 +      The intended audience for this functionality is expert users of 
Cassandra who are prepared
 +      to validate every aspect of the database for their application and 
deployment practices. Future
 +      releases of Cassandra will make this feature suitable for a wider 
audience.
 +    - *Experimental* support for Java 11 has been added. JVM options that 
differ between or are
 +      specific for Java 8 and 11 have been moved from jvm.options into 
jvm8.options and jvm11.options.
 +      IMPORTANT: Running C* on Java 11 is *experimental* and do it at your 
own risk.
 +    - LCS now respects the max_threshold parameter when compacting - this was 
hard coded to 32
 +      before, but now it is possible to do bigger compactions when compacting 
from L0 to L1.
 +      This also applies to STCS-compactions in L0 - if there are more than 32 
sstables in L0
 +      we will compact at most max_threshold sstables in an L0 STCS 
compaction. See CASSANDRA-14388
 +      for more information.
 +    - There is now an option to automatically upgrade sstables after 
Cassandra upgrade, enable
 +      either in `cassandra.yaml:automatic_sstable_upgrade` or via JMX during 
runtime. See
 +      CASSANDRA-14197.
 +    - `nodetool refresh` has been deprecated in favour of `nodetool import` - 
see CASSANDRA-6719
 +      for details
 +    - An experimental option to compare all merkle trees together has been 
added - for example, in
 +      a 3 node cluster with 2 replicas identical and 1 out-of-date, with this 
option enabled, the
 +      out-of-date replica will only stream a single copy from up-to-date 
replica. Enable it by adding
 +      "-os" to nodetool repair. See CASSANDRA-3200.
 +    - The currentTimestamp, currentDate, currentTime and currentTimeUUID 
functions have been added.
 +      See CASSANDRA-13132
 +    - Support for arithmetic operations between `timestamp`/`date` and 
`duration` has been added.
 +      See CASSANDRA-11936
 +    - Support for arithmetic operations on number has been added. See 
CASSANDRA-11935
 +    - Preview expected streaming required for a repair (nodetool repair 
--preview), and validate the
 +      consistency of repaired data between nodes (nodetool repair 
--validate). See CASSANDRA-13257
 +    - Support for selecting Map values and Set elements has been added for 
SELECT queries. See CASSANDRA-7396
 +    - Change-Data-Capture has been modified to make CommitLogSegments 
available
 +      immediately upon creation via hard-linking the files. This means that 
incomplete
 +      segments will be available in cdc_raw rather than fully flushed. See 
documentation
 +      and CASSANDRA-12148 for more detail.
 +    - The initial build of materialized views can be parallelized. The number 
of concurrent builder
 +      threads is specified by the property 
`cassandra.yaml:concurrent_materialized_view_builders`.
 +      This property can be modified at runtime through both JMX and the new 
`setconcurrentviewbuilders`
 +      and `getconcurrentviewbuilders` nodetool commands. See CASSANDRA-12245 
for more details.
 +    - There is now a binary full query log based on Chronicle Queue that can 
be controlled using
 +      nodetool enablefullquerylog, disablefullquerylog, and 
resetfullquerylog. The log
 +      contains all queries invoked, approximate time they were invoked, any 
parameters necessary
 +      to bind wildcard values, and all query options. A human readable 
version of the log can be
 +      dumped or tailed using the new bin/fqltool utility. The full query log 
is designed to be safe
 +      to use in production and limits utilization of heap memory and disk 
space with limits
 +      you can specify when enabling the log.
 +      See nodetool and fqltool help text for more information.
 +    - SSTableDump now supports the -l option to output each partition as it's 
own json object
 +      See CASSANDRA-13848 for more detail
 +    - Metric for coordinator writes per table has been added. See 
CASSANDRA-14232
 +    - Nodetool cfstats now has options to sort by various metrics as well as 
limit results.
 +    - Operators can restrict login user activity to one or more datacenters. 
See `network_authorizer`
 +      in cassandra.yaml, and the docs for create and alter role statements. 
CASSANDRA-13985
 +    - Roles altered from login=true to login=false will prevent existing 
connections from executing any
 +      statements after the cache has been refreshed. CASSANDRA-13985
 +    - Support for audit logging of database activity. If enabled, logs every 
incoming
 +      CQL command request, Authentication (successful as well as unsuccessful 
login) to a node.
 +    - Faster streaming of entire SSTables using ZeroCopy APIs. If enabled, 
Cassandra will use stream
 +      entire SSTables, significantly speeding up transfers. Any streaming 
related operations will see
 +      corresponding improvement. See CASSANDRA-14556.
 +    - NetworkTopologyStrategy now supports auto-expanding the 
replication_factor
 +      option into all available datacenters at CREATE or ALTER time. For 
example,
 +      specifying replication_factor: 3 translates to three replicas in every
 +      datacenter. This auto-expansion will _only add_ datacenters for safety.
 +      See CASSANDRA-14303 for more details.
 +    - Added Python 3 support so cqlsh and cqlshlib is now compatible with 
Python 2.7 and Python 3.6.
 +      Added --python option to cqlsh so users can specify the path to their 
chosen Python interpreter.
 +      See CASSANDRA-10190 for details.
 +    - Support for server side DESCRIBE statements has been added. See 
CASSANDRA-14825
 +    - It is now possible to rate limit snapshot creation/clearing. See 
CASSANDRA-13019
 +
  Upgrading
  ---------
 +    - Sstables for tables using with a frozen UDT written by C* 3.0 appear as 
corrupted.
 +
 +      Background: The serialization-header in the -Statistics.db sstable 
component contains the type information
 +      of the table columns. C* 3.0 write incorrect type information for 
frozen UDTs by omitting the
 +      "frozen" information. Non-frozen UDTs were introduced by CASSANDRA-7423 
in C* 3.6. Since then, the missing
 +      "frozen" information leads to deserialization issues that result in 
CorruptSSTableExceptions, potentially other
 +      exceptions as well.
 +
 +      As a mitigation, the sstable serialization-headers are rewritten to 
contain the missing "frozen" information for
 +      UDTs once, when an upgrade from C* 3.0 is detected. This migration does 
not touch snapshots or backups.
 +
 +      The sstablescrub tool now performs a check of the sstable 
serialization-header against the schema. A mismatch of
 +      the types in the serialization-header and the schema will cause 
sstablescrub to error out and stop by default.
 +      See the new `-e` option. `-e off` disables the new validation code. `-e 
fix` or `-e fix-only`, e.g.
 +      `sstablescrub -e fix keyspace table`, will validate the 
serialization-header, rewrite the non-frozen UDTs
 +      in the serialzation-header to frozen UDTs, if that matches the schema, 
and continue with scrub.
 +      See `sstablescrub -h`.
 +      (CASSANDRA-15035)
 +    - CASSANDRA-13241 lowered the default chunk_lengh_in_kb for compresesd 
tables from
 +      64kb to 16kb. For highly compressible data this can have a noticeable 
impact
 +      on space utilization. You may want to consider manually specifying this 
value.
 +    - Additional columns have been added to system_distributed.repair_history,
 +      system_traces.sessions and system_traces.events. As a result select 
queries
 +      against these tables - including queries against tracing tables 
performed
 +      automatically by the drivers and cqlsh - will fail and generate an 
error in the log
 +      during upgrade when the cluster is mixed version. On 3.x side this will 
also lead
 +      to broken internode connections and lost messages.
 +      Cassandra versions 3.0.20 and 3.11.6 pre-add these columns (see 
CASSANDRA-15385),
 +      so please make sure to upgrade to those versions or higher before 
upgrading to
 +      4.0 for query tracing to not cause any issues during the upgrade to 4.0.
 +    - Timestamp ties between values resolve differently: if either value has 
a TTL,
 +      this value always wins. This is to provide consistent reconciliation 
before
 +      and after the value expires into a tombstone.
 +    - Cassandra 4.0 removed support for COMPACT STORAGE tables. All Compact 
Tables
 +      have to be migrated using `ALTER ... DROP COMPACT STORAGE` statement in 
3.0/3.11.
 +      Cassandra starting 4.0 will not start if flags indicate that the table 
is non-CQL.
 +      Syntax for creating compact tables is also deprecated.
 +    - Support for legacy auth tables in the system_auth keyspace (users,
 +      permissions, credentials) and the migration code has been removed. 
Migration
 +      of these legacy auth tables must have been completed before the upgrade 
to
 +      4.0 and the legacy tables must have been removed. See the 'Upgrading' 
section
 +      for version 2.2 for migration instructions.
 +    - Cassandra 4.0 removed support for the deprecated Thrift interface. 
Amongst
 +      other things, this implies the removal of all yaml options related to 
thrift
 +      ('start_rpc', rpc_port, ...).
 +    - Cassandra 4.0 removed support for any pre-3.0 format. This means you
 +      cannot upgrade from a 2.x version to 4.0 directly, you have to upgrade 
to
 +      a 3.0.x/3.x version first (and run upgradesstable). In particular, this
 +      mean Cassandra 4.0 cannot load or read pre-3.0 sstables in any way: you
 +      will need to upgrade those sstable in 3.0.x/3.x first.
 +    - Upgrades from 3.0.x or 3.x are supported since 3.0.13 or 3.11.0, 
previous
 +      versions will causes issues during rolling upgrades (CASSANDRA-13274).
 +    - Cassandra will no longer allow invalid keyspace replication options, 
such
 +      as invalid datacenter names for NetworkTopologyStrategy. Operators MUST
 +      add new nodes to a datacenter before they can set set ALTER or CREATE
 +      keyspace replication policies using that datacenter. Existing keyspaces
 +      will continue to operate, but CREATE and ALTER will validate that all
 +      datacenters specified exist in the cluster.
 +    - Cassandra 4.0 fixes a problem with incremental repair which caused 
repaired
 +      data to be inconsistent between nodes. The fix changes the behavior of 
both
 +      full and incremental repairs. For full repairs, data is no longer marked
 +      repaired. For incremental repairs, anticompaction is run at the 
beginning
 +      of the repair, instead of at the end. If incremental repair was being 
used
 +      prior to upgrading, a full repair should be run after upgrading to 
resolve
 +      any inconsistencies.
 +    - Config option index_interval has been removed (it was deprecated since 
2.0)
 +    - Deprecated repair JMX APIs are removed.
 +    - The version of snappy-java has been upgraded to 1.1.2.6
 +    - the miniumum value for internode message timeouts is 10ms. Previously, 
any
 +      positive value was allowed. See cassandra.yaml entries like
 +      read_request_timeout_in_ms for more details.
 +    - Cassandra 4.0 allows a single port to be used for both secure and 
insecure
 +      connections between cassandra nodes (CASSANDRA-10404). See the yaml for
 +      specific property changes, and see the security doc for full details.
 +    - Due to the parallelization of the initial build of materialized views,
 +      the per token range view building status is stored in the new table
 +      `system.view_builds_in_progress`. The old table 
`system.views_builds_in_progress`
 +      is no longer used and can be removed. See CASSANDRA-12245 for more 
details.
 +    - Config option commitlog_sync_batch_window_in_ms has been deprecated as 
it's
 +      documentation has been incorrect and the setting itself near useless.
 +      Batch mode remains a valid commit log mode, however.
 +    - There is a new commit log mode, group, which is similar to batch mode
 +      but blocks for up to a configurable number of milliseconds between disk 
flushes.
 +    - nodetool clearsnapshot now required the --all flag to remove all 
snapshots.
 +      Previous behavior would delete all snapshots by default.
 +    - Nodes are now identified by a combination of IP, and storage port.
 +      Existing JMX APIs, nodetool, and system tables continue to work
 +      and accept/return just an IP, but there is a new
 +      version of each that works with the full unambiguous identifier.
 +      You should prefer these over the deprecated ambiguous versions that only
 +      work with an IP. This was done to support multiple instances per IP.
 +      Additionally we are moving to only using a single port for encrypted and
 +      unencrypted traffic and if you want multiple instances per IP you must
 +      first switch encrypted traffic to the storage port and not a separate
 +      encrypted port. If you want to use multiple instances per IP
 +      with SSL you will need to use StartTLS on storage_port and set
 +      outgoing_encrypted_port_source to gossip outbound connections
 +      know what port to connect to for each instance. Before changing
 +      storage port or native port at nodes you must first upgrade the entire 
cluster
 +      and clients to 4.0 so they can handle the port not being consistent 
across
 +      the cluster.
 +    - Names of AWS regions/availability zones have been cleaned up to more 
correctly
 +      match the Amazon names. There is now a new option in 
conf/cassandra-rackdc.properties
 +      that lets users enable the correct names for new clusters, or use the 
legacy
 +      names for existing clusters. See conf/cassandra-rackdc.properties for 
details.
 +    - Background repair has been removed. dclocal_read_repair_chance and
 +      read_repair_chance table options have been removed and are now rejected.
 +      See CASSANDRA-13910 for details.
 +    - Internode TCP connections that do not ack segments for 30s will now
 +      be automatically detected and closed via the Linux TCP_USER_TIMEOUT
 +      socket option. This should be exceedingly rare, but AWS networks (and
 +      other stateful firewalls) apparently suffer from this issue. You can
 +      tune the timeouts on TCP connection and segment ack via the
 +      `cassandra.yaml:internode_tcp_connect_timeout_in_ms` and
 +      `cassandra.yaml:internode_tcp_user_timeout_in_ms` options respectively.
 +      See CASSANDRA-14358 for details.
 +    - repair_session_space_in_mb setting has been added to cassandra.yaml to 
allow operators to reduce
 +      merkle tree size if repair is creating too much heap pressure. The 
repair_session_max_tree_depth
 +      setting added in 3.0.19 and 3.11.5 is deprecated in favor of this 
setting. See CASSANDRA-14096
 +    - The flags 'enable_materialized_views' and 'enable_sasi_indexes' in 
cassandra.yaml
 +      have been set as false by default. Operators should modify them to 
allow the
 +      creation of new views and SASI indexes, the existing ones will continue 
working.
 +      See CASSANDRA-14866 for details.
 +    - CASSANDRA-15216 - The flag 'cross_node_timeout' has been set as true by 
default.
 +      This change is done under the assumption that users have setup NTP on
 +      their clusters or otherwise synchronize their clocks, and that clocks 
are
 +      mostly in sync, since this is a requirement for general correctness of
 +      last write wins.
 +    - CASSANDRA-15257 removed the joda time dependency.  Any time formats
 +      passed will now need to conform to java.time.format.DateTimeFormatter.
 +      Most notably, days and months must be two digits, and years exceeding
 +      four digits need to be prefixed with a plus or minus sign.
 +    - cqlsh now returns a non-zero code in case of errors. This is a backward 
incompatible change so it may
 +      break existing scripts that rely on the current behavior. See 
CASSANDRA-15623 for more details.
 +    - Updated the default compaction_throughput_mb_per_sec to to 64. The 
original
 +      default (16) was meant for spinning disk volumes.  See CASSANDRA-14902 
for details.
 +    - Custom compaction strategies must now handle getting sstables 
added/removed notifications for
 +      sstables already added/removed - see CASSANDRA-14103 for details.
 +    - Support for JNA with glibc 2.6 and earlier has been removed. Centos 5, 
Debian 4, and Ubuntu 7.10 operating systems
 +      must be first upgraded. See CASSANDRA-16212 for more.
+     - In cassandra.yaml, num_tokens must be defined if initial_token is 
defined.
+       If it is not defined, or not equal to the numbers of tokens defined in 
initial_tokens,
+       the node will not start. See CASSANDRA-14477 for details.
  
 -3.11.9
 -======
 -Upgrading
 ----------
 -   - Custom compaction strategies must handle getting sstables added/removed 
notifications for
 -     sstables already added/removed - see CASSANDRA-14103 for details. This 
has been a requirement
 -     for correct operation since 3.11.0 due to an issue in 
CompactionStrategyManager.
  
 -3.11.7
 -======
 +Deprecation
 +-----------
  
 -Upgrading
 ----------
 -    - Nothing specific to this release, but please see previous upgrading 
sections,
 -      especially if you are upgrading from 3.0.
 +    - The JMX MBean org.apache.cassandra.db:type=BlacklistedDirectories has 
been
 +      deprecated in favor of 
org.apache.cassandra.db:type=DisallowedDirectories
 +      and will be removed in a subsequent major version.
 +
 +
 +Materialized Views
 +-------------------
 +    - Following a discussion regarding concerns about the design and safety 
of Materialized Views, the C* development
 +      community no longer recommends them for production use, and considers 
them experimental. Warnings messages will
 +      now be logged when they are created. (See 
https://www.mail-archive.com/dev@cassandra.apache.org/msg11511.html)
 +    - An 'enable_materialized_views' flag has been added to cassandra.yaml to 
allow operators to prevent creation of
 +      views
 +    - CREATE MATERIALIZED VIEW syntax has become stricter. Partition key 
columns are no longer implicitly considered
 +      to be NOT NULL, and no base primary key columns get automatically 
included in view definition. You have to
 +      specify them explicitly now.
  
  3.11.6
  ======
diff --cc src/java/org/apache/cassandra/config/Config.java
index 759a41a,e68b6be..cde7d53
--- a/src/java/org/apache/cassandra/config/Config.java
+++ b/src/java/org/apache/cassandra/config/Config.java
@@@ -83,13 -78,9 +83,13 @@@ public class Confi
  
      /* initial token in the ring */
      public String initial_token;
-     public int num_tokens = 1;
+     public Integer num_tokens;
      /** Triggers automatic allocation of tokens if set, using the replication 
strategy of the referenced keyspace */
      public String allocate_tokens_for_keyspace = null;
 +    /** Triggers automatic allocation of tokens if set, based on the provided 
replica count for a datacenter */
 +    public Integer allocate_tokens_for_local_replication_factor = null;
 +
 +    public long native_transport_idle_timeout_in_ms = 0L;
  
      public volatile long request_timeout_in_ms = 10000L;
  
diff --cc src/java/org/apache/cassandra/config/DatabaseDescriptor.java
index 2be169a,cbf42b9..2eac706
--- a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
+++ b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java
@@@ -352,9 -327,13 +352,9 @@@ public class DatabaseDescripto
  
          applyAddressConfig();
  
 -        applyThriftHSHA();
 -
          applySnitch();
  
-         applyInitialTokens();
 -        applyRequestScheduler();
 -
+         applyTokensConfig();
  
          applySeedProvider();
  
@@@ -674,13 -616,7 +674,13 @@@
          if (conf.concurrent_compactors <= 0)
              throw new ConfigurationException("concurrent_compactors should be 
strictly greater than 0, but was " + conf.concurrent_compactors, false);
  
 +        applyConcurrentValidations(conf);
 +        applyRepairCommandPoolSize(conf);
 +
 +        if (conf.concurrent_materialized_view_builders <= 0)
 +            throw new 
ConfigurationException("concurrent_materialized_view_builders should be 
strictly greater than 0, but was " + 
conf.concurrent_materialized_view_builders, false);
 +
-         if (conf.num_tokens > MAX_NUM_TOKENS)
+         if (conf.num_tokens != null && conf.num_tokens > MAX_NUM_TOKENS)
              throw new ConfigurationException(String.format("A maximum number 
of %d tokens per node is supported", MAX_NUM_TOKENS), false);
  
          try
@@@ -1024,58 -942,12 +1024,63 @@@
              throw new ConfigurationException("The seed provider lists no 
seeds.", false);
      }
  
 +    @VisibleForTesting
 +    static void checkForLowestAcceptedTimeouts(Config conf)
 +    {
 +        if(conf.read_request_timeout_in_ms < LOWEST_ACCEPTED_TIMEOUT)
 +        {
 +           logInfo("read_request_timeout_in_ms", 
conf.read_request_timeout_in_ms, LOWEST_ACCEPTED_TIMEOUT);
 +           conf.read_request_timeout_in_ms = LOWEST_ACCEPTED_TIMEOUT;
 +        }
 +
 +        if(conf.range_request_timeout_in_ms < LOWEST_ACCEPTED_TIMEOUT)
 +        {
 +           logInfo("range_request_timeout_in_ms", 
conf.range_request_timeout_in_ms, LOWEST_ACCEPTED_TIMEOUT);
 +           conf.range_request_timeout_in_ms = LOWEST_ACCEPTED_TIMEOUT;
 +        }
 +
 +        if(conf.request_timeout_in_ms < LOWEST_ACCEPTED_TIMEOUT)
 +        {
 +           logInfo("request_timeout_in_ms", conf.request_timeout_in_ms, 
LOWEST_ACCEPTED_TIMEOUT);
 +           conf.request_timeout_in_ms = LOWEST_ACCEPTED_TIMEOUT;
 +        }
 +
 +        if(conf.write_request_timeout_in_ms < LOWEST_ACCEPTED_TIMEOUT)
 +        {
 +           logInfo("write_request_timeout_in_ms", 
conf.write_request_timeout_in_ms, LOWEST_ACCEPTED_TIMEOUT);
 +           conf.write_request_timeout_in_ms = LOWEST_ACCEPTED_TIMEOUT;
 +        }
 +
 +        if(conf.cas_contention_timeout_in_ms < LOWEST_ACCEPTED_TIMEOUT)
 +        {
 +           logInfo("cas_contention_timeout_in_ms", 
conf.cas_contention_timeout_in_ms, LOWEST_ACCEPTED_TIMEOUT);
 +           conf.cas_contention_timeout_in_ms = LOWEST_ACCEPTED_TIMEOUT;
 +        }
 +
 +        if(conf.counter_write_request_timeout_in_ms < LOWEST_ACCEPTED_TIMEOUT)
 +        {
 +           logInfo("counter_write_request_timeout_in_ms", 
conf.counter_write_request_timeout_in_ms, LOWEST_ACCEPTED_TIMEOUT);
 +           conf.counter_write_request_timeout_in_ms = LOWEST_ACCEPTED_TIMEOUT;
 +        }
 +
 +        if(conf.truncate_request_timeout_in_ms < LOWEST_ACCEPTED_TIMEOUT)
 +        {
 +           logInfo("truncate_request_timeout_in_ms", 
conf.truncate_request_timeout_in_ms, LOWEST_ACCEPTED_TIMEOUT);
 +           conf.truncate_request_timeout_in_ms = LOWEST_ACCEPTED_TIMEOUT;
 +        }
 +    }
 +
 +    private static void logInfo(String property, long actualValue, long 
lowestAcceptedValue)
 +    {
 +        logger.info("found {}::{} less than lowest acceptable value {}, 
continuing with {}", property, actualValue, lowestAcceptedValue, 
lowestAcceptedValue);
 +    }
 +
-     public static void applyInitialTokens()
+     public static void applyTokensConfig()
+     {
+         applyTokensConfig(conf);
+     }
+ 
+     static void applyTokensConfig(Config conf)
      {
          if (conf.initial_token != null)
          {
@@@ -1086,8 -968,53 +1101,12 @@@
              for (String token : tokens)
                  partitioner.getTokenFactory().validate(token);
          }
+         else if (conf.num_tokens == null)
+         {
+             conf.num_tokens = 1;
+         }
      }
  
 -    // Maybe safe for clients + tools
 -    public static void applyRequestScheduler()
 -    {
 -        /* Request Scheduler setup */
 -        requestSchedulerOptions = conf.request_scheduler_options;
 -        if (conf.request_scheduler != null)
 -        {
 -            try
 -            {
 -                if (requestSchedulerOptions == null)
 -                {
 -                    requestSchedulerOptions = new RequestSchedulerOptions();
 -                }
 -                Class<?> cls = Class.forName(conf.request_scheduler);
 -                requestScheduler = (IRequestScheduler) 
cls.getConstructor(RequestSchedulerOptions.class).newInstance(requestSchedulerOptions);
 -            }
 -            catch (ClassNotFoundException e)
 -            {
 -                throw new ConfigurationException("Invalid Request Scheduler 
class " + conf.request_scheduler, false);
 -            }
 -            catch (Exception e)
 -            {
 -                throw new ConfigurationException("Unable to instantiate 
request scheduler", e);
 -            }
 -        }
 -        else
 -        {
 -            requestScheduler = new NoScheduler();
 -        }
 -
 -        if (conf.request_scheduler_id == RequestSchedulerId.keyspace)
 -        {
 -            requestSchedulerId = conf.request_scheduler_id;
 -        }
 -        else
 -        {
 -            // Default to Keyspace
 -            requestSchedulerId = RequestSchedulerId.keyspace;
 -        }
 -    }
 -
      // definitely not safe for tools + clients - implicitly instantiates 
StorageService
      public static void applySnitch()
      {
diff --cc test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java
index 181adbf,6e865ae..7dc84f6
--- a/test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java
+++ b/test/unit/org/apache/cassandra/config/DatabaseDescriptorRefTest.java
@@@ -257,7 -216,8 +257,7 @@@ public class DatabaseDescriptorRefTes
          for (String methodName : new String[]{
              "clientInitialization",
              "applyAddressConfig",
-             "applyInitialTokens",
 -            "applyThriftHSHA",
+             "applyTokensConfig",
              // no seed provider in default configuration for clients
              // "applySeedProvider",
              // definitely not safe for clients - implicitly instantiates 
schema
diff --cc test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java
index 2992a60,4c6b3d7..1995103
--- a/test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java
+++ b/test/unit/org/apache/cassandra/config/DatabaseDescriptorTest.java
@@@ -436,70 -323,80 +436,148 @@@ public class DatabaseDescriptorTes
      }
  
      @Test
 +    public void testCalculateDefaultSpaceInMB()
 +    {
 +        // check prefered size is used for a small storage volume
 +        int preferredInMB = 667;
 +        int numerator = 2;
 +        int denominator = 3;
 +        int spaceInBytes = 999 * 1024 * 1024;
 +
 +        assertEquals(666, // total size is less than preferred, so return 
lower limit
 +                     DatabaseDescriptor.calculateDefaultSpaceInMB("type", 
"/path", "setting_name", preferredInMB, spaceInBytes, numerator, denominator));
 +
 +        // check preferred size is used for a small storage volume
 +        preferredInMB = 100;
 +        numerator = 1;
 +        denominator = 3;
 +        spaceInBytes = 999 * 1024 * 1024;
 +
 +        assertEquals(100, // total size is more than preferred so keep the 
configured limit
 +                     DatabaseDescriptor.calculateDefaultSpaceInMB("type", 
"/path", "setting_name", preferredInMB, spaceInBytes, numerator, denominator));
 +    }
 +
 +    @Test
 +    public void testConcurrentValidations()
 +    {
 +        Config conf = new Config();
 +        conf.concurrent_compactors = 8;
 +        // if concurrent_validations is < 1 (including being unset) it should 
default to concurrent_compactors
 +        assertThat(conf.concurrent_validations).isLessThan(1);
 +        DatabaseDescriptor.applyConcurrentValidations(conf);
 +        
assertThat(conf.concurrent_validations).isEqualTo(conf.concurrent_compactors);
 +
 +        // otherwise, it must be <= concurrent_compactors
 +        conf.concurrent_validations = conf.concurrent_compactors + 1;
 +        try
 +        {
 +            DatabaseDescriptor.applyConcurrentValidations(conf);
 +            fail("Expected exception");
 +        }
 +        catch (ConfigurationException e)
 +        {
 +            assertThat(e.getMessage()).isEqualTo("To set 
concurrent_validations > concurrent_compactors, " +
 +                                                 "set the system property 
cassandra.allow_unlimited_concurrent_validations=true");
 +        }
 +
 +        // unless we disable that check (done with a system property at 
startup or via JMX)
 +        DatabaseDescriptor.allowUnlimitedConcurrentValidations = true;
 +        conf.concurrent_validations = conf.concurrent_compactors + 1;
 +        DatabaseDescriptor.applyConcurrentValidations(conf);
 +        
assertThat(conf.concurrent_validations).isEqualTo(conf.concurrent_compactors + 
1);
 +    }
 +
 +    @Test
 +    public void testRepairCommandPoolSize()
 +    {
 +        Config conf = new Config();
 +        conf.concurrent_validations = 3;
 +        // if repair_command_pool_size is < 1 (including being unset) it 
should default to concurrent_validations
 +        assertThat(conf.repair_command_pool_size).isLessThan(1);
 +        DatabaseDescriptor.applyRepairCommandPoolSize(conf);
 +        
assertThat(conf.repair_command_pool_size).isEqualTo(conf.concurrent_validations);
 +
 +        // but it can be overridden
 +        conf.repair_command_pool_size = conf.concurrent_validations + 1;
 +        DatabaseDescriptor.applyRepairCommandPoolSize(conf);
 +        
assertThat(conf.repair_command_pool_size).isEqualTo(conf.concurrent_validations 
+ 1);
 +    }
++
++    @Test
+     public void 
testApplyInitialTokensInitialTokensSetNumTokensSetAndDoesMatch()
+     {
+         Config config = DatabaseDescriptor.loadConfig();
+         config.initial_token = "0,256,1024";
+         config.num_tokens = 3;
+ 
+         try
+         {
+             DatabaseDescriptor.applyTokensConfig(config);
+             Assert.assertEquals(Integer.valueOf(3), config.num_tokens);
+             Assert.assertEquals(3, 
DatabaseDescriptor.tokensFromString(config.initial_token).size());
+         }
+         catch (ConfigurationException e)
+         {
+             Assert.fail("number of tokens in initial_token=0,256,1024 does 
not match num_tokens = 3");
+         }
+     }
+ 
+     @Test
+     public void 
testApplyInitialTokensInitialTokensSetNumTokensSetAndDoesntMatch()
+     {
+         Config config = DatabaseDescriptor.loadConfig();
+         config.initial_token = "0,256,1024";
+         config.num_tokens = 10;
+ 
+         try
+         {
+             DatabaseDescriptor.applyTokensConfig(config);
+ 
+             Assert.fail("initial_token = 0,256,1024 and num_tokens = 10 but 
applyInitialTokens() did not fail!");
+         }
+         catch (ConfigurationException ex)
+         {
+             Assert.assertEquals("The number of initial tokens (by 
initial_token) specified (3) is different from num_tokens value (10)",
+                                 ex.getMessage());
+         }
+     }
+ 
+     @Test
+     public void testApplyInitialTokensInitialTokensSetNumTokensNotSet()
+     {
+         Config config = DatabaseDescriptor.loadConfig();
+         config.initial_token = "0,256,1024";
+ 
+         try
+         {
+             DatabaseDescriptor.applyTokensConfig(config);
+             Assert.fail("setting initial_token and not setting num_tokens is 
invalid");
+         }
+         catch (ConfigurationException ex)
+         {
+             Assert.assertEquals("initial_token was set but num_tokens is 
not!", ex.getMessage());
+         }
+     }
+ 
+     @Test
+     public void testApplyInitialTokensInitialTokensNotSetNumTokensSet()
+     {
+         Config config = DatabaseDescriptor.loadConfig();
+         config.num_tokens = 3;
+ 
+         DatabaseDescriptor.applyTokensConfig(config);
+ 
+         Assert.assertEquals(Integer.valueOf(3), config.num_tokens);
+         
Assert.assertTrue(DatabaseDescriptor.tokensFromString(config.initial_token).isEmpty());
+     }
+ 
+     @Test
+     public void testApplyInitialTokensInitialTokensNotSetNumTokensNotSet()
+     {
+         Config config = DatabaseDescriptor.loadConfig();
+         DatabaseDescriptor.applyTokensConfig(config);
+ 
+         Assert.assertEquals(Integer.valueOf(1), config.num_tokens);
+         
Assert.assertTrue(DatabaseDescriptor.tokensFromString(config.initial_token).isEmpty());
+     }
  }


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org
For additional commands, e-mail: commits-h...@cassandra.apache.org

Reply via email to