Merge branch 'cassandra-3.0' into trunk

Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/d45f323e
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/d45f323e
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/d45f323e

Branch: refs/heads/trunk
Commit: d45f323eb972c6fec146e5cfa84fdc47eb8aa5eb
Parents: b80ef9b f2c5ad7
Author: Jeff Jirsa <jeff.ji...@crowdstrike.com>
Authored: Tue Sep 27 18:16:55 2016 -0700
Committer: Jeff Jirsa <jeff.ji...@crowdstrike.com>
Committed: Tue Sep 27 18:26:17 2016 -0700

----------------------------------------------------------------------
 CHANGES.txt                                     |  1 +
 NEWS.txt                                        |  3 +
 .../locator/AbstractReplicationStrategy.java    |  2 +-
 .../locator/NetworkTopologyStrategy.java        | 41 ++++++++++++++
 .../org/apache/cassandra/cql3/CQLTester.java    | 11 ++++
 .../validation/entities/SecondaryIndexTest.java | 10 ----
 .../cql3/validation/operations/AlterTest.java   | 47 +++++++++++++++-
 .../cql3/validation/operations/CreateTest.java  | 59 ++++++++++++++++++++
 .../apache/cassandra/dht/BootStrapperTest.java  |  8 +++
 .../org/apache/cassandra/service/MoveTest.java  |  9 ++-
 10 files changed, 176 insertions(+), 15 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/d45f323e/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index 75e7d2a,6edc491..7a5d73a
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -88,58 -33,12 +88,59 @@@ Merged from 3.0
   * Disk failure policy should not be invoked on out of space (CASSANDRA-12385)
   * Calculate last compacted key on startup (CASSANDRA-6216)
   * Add schema to snapshot manifest, add USING TIMESTAMP clause to ALTER TABLE 
statements (CASSANDRA-7190)
 + * If CF has no clustering columns, any row cache is full partition cache 
(CASSANDRA-12499)
++ * Reject invalid replication settings when creating or altering a keyspace 
(CASSANDRA-12681)
 +Merged from 2.2:
 + * Make Collections deserialization more robust (CASSANDRA-12618)
 + * Fix exceptions when enabling gossip on nodes that haven't joined the ring 
(CASSANDRA-12253)
 + * Fix authentication problem when invoking clqsh copy from a SOURCE command 
(CASSANDRA-12642)
 + * Decrement pending range calculator jobs counter in finally block
 + * cqlshlib tests: increase default execute timeout (CASSANDRA-12481)
 + * Forward writes to replacement node when replace_address != 
broadcast_address (CASSANDRA-8523)
 + * Fail repair on non-existing table (CASSANDRA-12279)
 + * Enable repair -pr and -local together (fix regression of CASSANDRA-7450) 
(CASSANDRA-12522)
 +
 +
 +3.8, 3.9
 + * Fix value skipping with counter columns (CASSANDRA-11726)
 + * Fix nodetool tablestats miss SSTable count (CASSANDRA-12205)
 + * Fixed flacky SSTablesIteratedTest (CASSANDRA-12282)
 + * Fixed flacky SSTableRewriterTest: check file counts before calling 
validateCFS (CASSANDRA-12348)
 + * cqlsh: Fix handling of $$-escaped strings (CASSANDRA-12189)
 + * Fix SSL JMX requiring truststore containing server cert (CASSANDRA-12109)
 + * RTE from new CDC column breaks in flight queries (CASSANDRA-12236)
 + * Fix hdr logging for single operation workloads (CASSANDRA-12145)
 + * Fix SASI PREFIX search in CONTAINS mode with partial terms 
(CASSANDRA-12073)
 + * Increase size of flushExecutor thread pool (CASSANDRA-12071)
 + * Partial revert of CASSANDRA-11971, cannot recycle buffer in 
SP.sendMessagesToNonlocalDC (CASSANDRA-11950)
 + * Upgrade netty to 4.0.39 (CASSANDRA-12032, CASSANDRA-12034)
 + * Improve details in compaction log message (CASSANDRA-12080)
 + * Allow unset values in CQLSSTableWriter (CASSANDRA-11911)
 + * Chunk cache to request compressor-compatible buffers if pool space is 
exhausted (CASSANDRA-11993)
 + * Remove DatabaseDescriptor dependencies from SequentialWriter 
(CASSANDRA-11579)
 + * Move skip_stop_words filter before stemming (CASSANDRA-12078)
 + * Support seek() in EncryptedFileSegmentInputStream (CASSANDRA-11957)
 + * SSTable tools mishandling LocalPartitioner (CASSANDRA-12002)
 + * When SEPWorker assigned work, set thread name to match pool 
(CASSANDRA-11966)
 + * Add cross-DC latency metrics (CASSANDRA-11596)
 + * Allow terms in selection clause (CASSANDRA-10783)
 + * Add bind variables to trace (CASSANDRA-11719)
 + * Switch counter shards' clock to timestamps (CASSANDRA-9811)
 + * Introduce HdrHistogram and response/service/wait separation to stress tool 
(CASSANDRA-11853)
 + * entry-weighers in QueryProcessor should respect partitionKeyBindIndexes 
field (CASSANDRA-11718)
 + * Support older ant versions (CASSANDRA-11807)
 + * Estimate compressed on disk size when deciding if sstable size limit 
reached (CASSANDRA-11623)
 + * cassandra-stress profiles should support case sensitive schemas 
(CASSANDRA-11546)
 + * Remove DatabaseDescriptor dependency from FileUtils (CASSANDRA-11578)
 + * Faster streaming (CASSANDRA-9766)
 + * Add prepared query parameter to trace for "Execute CQL3 prepared query" 
session (CASSANDRA-11425)
 + * Add repaired percentage metric (CASSANDRA-11503)
 + * Add Change-Data-Capture (CASSANDRA-8844)
 +Merged from 3.0:
 + * Fix paging for 2.x to 3.x upgrades (CASSANDRA-11195)
   * Fix clean interval not sent to commit log for empty memtable flush 
(CASSANDRA-12436)
   * Fix potential resource leak in RMIServerSocketFactoryImpl (CASSANDRA-12331)
 - * Backport CASSANDRA-12002 (CASSANDRA-12177)
   * Make sure compaction stats are updated when compaction is interrupted 
(CASSANDRA-12100)
 - * Fix potential bad messaging service message for paged range reads
 -   within mixed-version 3.x clusters (CASSANDRA-12249)
   * Change commitlog and sstables to track dirty and clean intervals 
(CASSANDRA-11828)
   * NullPointerException during compaction on table with static columns 
(CASSANDRA-12336)
   * Fixed ConcurrentModificationException when reading metrics in 
GraphiteReporter (CASSANDRA-11823)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d45f323e/NEWS.txt
----------------------------------------------------------------------
diff --cc NEWS.txt
index ad0f2be,b97a420..9ab7c26
--- a/NEWS.txt
+++ b/NEWS.txt
@@@ -13,120 -13,19 +13,123 @@@ restore snapshots created with the prev
  'sstableloader' tool. You can upgrade the file format of your snapshots
  using the provided 'sstableupgrade' tool.
  
 -3.0.10
 -=====
 +3.10
 +====
  
 -Upgrading
 ----------
 -   - To protect against accidental data loss, cassandra no longer allows 
 -     users to set arbitrary datacenter names for NetworkTopologyStrategy. 
 -     Cassandra will allow users to continue using existing keyspaces
 -     with invalid datacenter names, but will validat DC names on CREATE and
 -     ALTER
 +New features
 +------------
 +   - Runtime modification of concurrent_compactors is now available via 
nodetool
 +   - Support for the assignment operators +=/-= has been added for update 
queries.
 +   - An Index implementation may now provide a task which runs prior to 
joining
 +     the ring. See CASSANDRA-12039
 +   - Filtering on partition key columns is now also supported for queries 
without
 +     secondary indexes.
 +   - A slow query log has been added: slow queries will be logged at DEBUG 
level.
 +     For more details refer to CASSANDRA-12403 and 
slow_query_log_timeout_in_ms
 +     in cassandra.yaml.
 +   - Support for GROUP BY queries has been added.
 +   - A new compaction-stress tool has been added to test the throughput of 
compaction
 +     for any cassandra-stress user schema.  see compaction-stress help for 
how to use.
 +   - Compaction can now take into account overlapping tables that don't take 
part
 +     in the compaction to look for deleted or overwritten data in the 
compacted tables.
 +     Then such data is found, it can be safely discarded, which in turn 
should enable
 +     the removal of tombstones over that data.
 +
 +     The behavior can be engaged in two ways:
 +       - as a "nodetool garbagecollect -g CELL/ROW" operation, which applies
 +         single-table compaction on all sstables to discard deleted data in 
one step.
 +       - as a "provide_overlapping_tombstones:CELL/ROW/NONE" compaction 
strategy flag,
 +         which uses overlapping tables as a source of deletions/overwrites 
during all
 +         compactions.
 +     The argument specifies the granularity at which deleted data is to be 
found:
 +       - If ROW is specified, only whole deleted rows (or sets of rows) will 
be
 +         discarded.
 +       - If CELL is specified, any columns whose value is overwritten or 
deleted
 +         will also be discarded.
 +       - NONE (default) specifies the old behavior, overlapping tables are 
not used to
 +         decide when to discard data.
 +     Which option to use depends on your workload, both ROW and CELL increase 
the
 +     disk load on compaction (especially with the size-tiered compaction 
strategy),
 +     with CELL being more resource-intensive. Both should lead to better read
 +     performance if deleting rows (resp. overwriting or deleting cells) is 
common.
 +   - Prepared statements are now persisted in the table prepared_statements in
 +     the system keyspace. Upon startup, this table is used to preload all
 +     previously prepared statements - i.e. in many cases clients do not need 
to
 +     re-prepare statements against restarted nodes.
 +   - cqlsh can now connect to older Cassandra versions by downgrading the 
native
 +     protocol version. Please note that this is currently not part of our 
release
 +     testing and, as a consequence, it is not guaranteed to work in all cases.
 +     See CASSANDRA-12150 for more details.
 +   - Snapshots that are automatically taken before a table is dropped or 
truncated
 +     will have a "dropped" or "truncated" prefix on their snapshot tag name.
 +   - Metrics are exposed for successful and failed authentication attempts.
 +     These can be located using the object names 
org.apache.cassandra.metrics:type=Client,name=AuthSuccess
 +     and org.apache.cassandra.metrics:type=Client,name=AuthFailure 
respectively.
 +   - Add support to "unset" JSON fields in prepared statements by specifying 
DEFAULT UNSET.
 +     See CASSANDRA-11424 for details
 +   - Allow TTL with null value on insert and update. It will be treated as 
equivalent to inserting a 0.
 +
 +Upgrading
 +---------
 +    - Request timeouts in cassandra.yaml (read_request_timeout_in_ms, etc) 
now apply to the
 +      "full" request time on the coordinator.  Previously, they only covered 
the time from
 +      when the coordinator sent a message to a replica until the time that 
the replica
 +      responded.  Additionally, the previous behavior was to reset the 
timeout when performing
 +      a read repair, making a second read to fix a short read, and when 
subranges were read
 +      as part of a range scan or secondary index query.  In 3.10 and higher, 
the timeout
 +      is no longer reset for these "subqueries".  The entire request must 
complete within
 +      the specified timeout.  As a consequence, your timeouts may need to be 
adjusted
 +      to account for this.  See CASSANDRA-12256 for more details.
 +    - Logs written to stdout are now consistent with logs written to files.
 +      Time is now local (it was UTC on the console and local in files). Date, 
thread, file
 +      and line info where added to stdout. (see CASSANDRA-12004)
 +    - The 'clientutil' jar, which has been somewhat broken on the 3.x branch, 
is not longer provided.
 +      The features provided by that jar are provided by any good java driver 
and we advise relying on drivers rather on
 +      that jar, but if you need that jar for backward compatiblity until you 
do so, you should use the version provided
 +      on previous Cassandra branch, like the 3.0 branch (by design, the 
functionality provided by that jar are stable
 +      accross versions so using the 3.0 jar for a client connecting to 3.x 
should work without issues).
 +    - (Tools development) DatabaseDescriptor no longer implicitly startups 
components/services like
 +      commit log replay. This may break existing 3rd party tools and clients. 
In order to startup
 +      a standalone tool or client application, use the 
DatabaseDescriptor.toolInitialization() or
 +      DatabaseDescriptor.clientInitialization() methods. Tool initialization 
sets up partitioner,
 +      snitch, encryption context. Client initialization just applies the 
configuration but does not
 +      setup anything. Instead of using Config.setClientMode() or 
Config.isClientMode(), which are
 +      deprecated now, use one of the appropiate new methods in 
DatabaseDescriptor.
 +    - Application layer keep-alives were added to the streaming protocol to 
prevent idle incoming connections from
 +      timing out and failing the stream session (CASSANDRA-11839). This 
effectively deprecates the streaming_socket_timeout_in_ms
 +      property in favor of streaming_keep_alive_period_in_secs. See 
cassandra.yaml for more details about this property.
++    - Cassandra will no longer allow invalid keyspace replication options, 
such as invalid datacenter names for
++      NetworkTopologyStrategy. Existing keyspaces will continue to operate, 
but CREATE and ALTER will validate that
++      all datacenters specified exist in the cluster. 
 +
 +3.8
 +===
  
 -3.0.9
 -=====
 +New features
 +------------
 +   - Shared pool threads are now named according to the stage they are 
executing
 +     tasks for. Thread names mentioned in traced queries change accordingly.
 +   - A new option has been added to cassandra-stress "-rate fixed={number}/s"
 +     that forces a scheduled rate of operations/sec over time. Using this, 
stress can
 +     accurately account for coordinated ommission from the stress process.
 +   - The cassandra-stress "-rate limit=" option has been renamed to "-rate 
throttle="
 +   - hdr histograms have been added to stress runs, it's output can be saved 
to disk using:
 +     "-log hdrfile=" option. This histogram includes response/service/wait 
times when used with the
 +     fixed or throttle rate options.  The histogram file can be plotted on
 +     http://hdrhistogram.github.io/HdrHistogram/plotFiles.html
 +   - TimeWindowCompactionStrategy has been added. This has proven to be a 
better approach
 +     to time series compaction and new tables should use this instead of 
DTCS. See
 +     CASSANDRA-9666 for details.
 +   - Change-Data-Capture is now available. See cassandra.yaml and for 
cdc-specific flags and
 +     a brief explanation of on-disk locations for archived data in CommitLog 
form. This can
 +     be enabled via ALTER TABLE ... WITH cdc=true.
 +     Upon flush, CommitLogSegments containing data for CDC-enabled tables are 
moved to
 +     the data/cdc_raw directory until removed by the user and writes to 
CDC-enabled tables
 +     will be rejected with a WriteTimeoutException once cdc_total_space_in_mb 
is reached
 +     between unflushed CommitLogSegments and cdc_raw.
 +     NOTE: CDC is disabled by default in the .yaml file. Do not enable CDC on 
a mixed-version
 +     cluster as it will lead to exceptions which can interrupt traffic. Once 
all nodes
 +     have been upgraded to 3.8 it is safe to enable this feature and restart 
the cluster.
  
  Upgrading
  ---------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d45f323e/src/java/org/apache/cassandra/locator/AbstractReplicationStrategy.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d45f323e/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
index 756b689,78f5b06..442e6cf
--- a/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
+++ b/src/java/org/apache/cassandra/locator/NetworkTopologyStrategy.java
@@@ -27,8 -28,8 +28,9 @@@ import org.apache.cassandra.config.Data
  import org.apache.cassandra.exceptions.ConfigurationException;
  import org.apache.cassandra.dht.Token;
  import org.apache.cassandra.locator.TokenMetadata.Topology;
+ import org.apache.cassandra.service.StorageService;
  import org.apache.cassandra.utils.FBUtilities;
 +import org.apache.cassandra.utils.Pair;
  
  import com.google.common.collect.Multimap;
  
@@@ -214,16 -185,55 +216,55 @@@ public class NetworkTopologyStrategy ex
          return datacenters.keySet();
      }
  
 -    public void validateOptions() throws ConfigurationException
 -    {
 -        for (Entry<String, String> e : this.configOptions.entrySet())
 -        {
 -            if (e.getKey().equalsIgnoreCase("replication_factor"))
 -                throw new ConfigurationException("replication_factor is an 
option for SimpleStrategy, not NetworkTopologyStrategy");
 -            validateReplicationFactor(e.getValue());
 -        }
 -    }
 -
+     /*
+      * (non-javadoc) Method to generate list of valid data center names to be 
used to validate the replication parameters during CREATE / ALTER keyspace 
operations.
+      * All peers of current node are fetched from {@link TokenMetadata} and 
then a set is build by fetching DC name of each peer.
+      * @return a set of valid DC names
+      */
+     private static Set<String> buildValidDataCentersSet()
+     {
+         final Set<String> validDataCenters = new HashSet<>();
+         final IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
+ 
+         // Add data center of localhost.
+         
validDataCenters.add(snitch.getDatacenter(FBUtilities.getBroadcastAddress()));
+         // Fetch and add DCs of all peers.
+         for (final InetAddress peer : 
StorageService.instance.getTokenMetadata().getAllEndpoints())
+         {
+             validDataCenters.add(snitch.getDatacenter(peer));
+         }
+ 
+         return validDataCenters;
+     }
+ 
+     public Collection<String> recognizedOptions()
+     {
+         // only valid options are valid DC names.
+         return buildValidDataCentersSet();
+     }
+ 
+     protected void validateExpectedOptions() throws ConfigurationException
+     {
+         // Do not accept query with no data centers specified.
+         if (this.configOptions.isEmpty())
+         {
+             throw new ConfigurationException("Configuration for at least one 
datacenter must be present");
+         }
+ 
+         // Validate the data center names
+         super.validateExpectedOptions();
+     }
+ 
 +    public void validateOptions() throws ConfigurationException
 +    {
 +        for (Entry<String, String> e : this.configOptions.entrySet())
 +        {
 +            if (e.getKey().equalsIgnoreCase("replication_factor"))
 +                throw new ConfigurationException("replication_factor is an 
option for SimpleStrategy, not NetworkTopologyStrategy");
 +            validateReplicationFactor(e.getValue());
 +        }
 +    }
 +
      @Override
      public boolean hasSameSettings(AbstractReplicationStrategy other)
      {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d45f323e/test/unit/org/apache/cassandra/cql3/CQLTester.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/cql3/CQLTester.java
index 3bb753f,69a0b79..29d51a8
--- a/test/unit/org/apache/cassandra/cql3/CQLTester.java
+++ b/test/unit/org/apache/cassandra/cql3/CQLTester.java
@@@ -57,6 -56,8 +57,7 @@@ import org.apache.cassandra.dht.Murmur3
  import org.apache.cassandra.exceptions.ConfigurationException;
  import org.apache.cassandra.exceptions.SyntaxException;
  import org.apache.cassandra.io.util.FileUtils;
+ import org.apache.cassandra.locator.AbstractEndpointSnitch;
 -import org.apache.cassandra.locator.IEndpointSnitch;
  import org.apache.cassandra.serializers.TypeSerializer;
  import org.apache.cassandra.service.ClientState;
  import org.apache.cassandra.service.QueryState;
@@@ -113,8 -114,11 +116,16 @@@ public abstract class CQLTeste
          }
          PROTOCOL_VERSIONS = builder.build();
  
 -        // Once per-JVM is enough
 -        prepareServer();
 -
          nativeAddr = InetAddress.getLoopbackAddress();
  
++        // Register an EndpointSnitch which returns fixed values for test.
++        DatabaseDescriptor.setEndpointSnitch(new AbstractEndpointSnitch()
++        {
++            @Override public String getRack(InetAddress endpoint) { return 
RACK1; }
++            @Override public String getDatacenter(InetAddress endpoint) { 
return DATA_CENTER; }
++            @Override public int compareEndpoints(InetAddress target, 
InetAddress a1, InetAddress a2) { return 0; }
++        });
++
          try
          {
              try (ServerSocket serverSocket = new ServerSocket(0))

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d45f323e/test/unit/org/apache/cassandra/cql3/validation/entities/SecondaryIndexTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d45f323e/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java
----------------------------------------------------------------------
diff --cc 
test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java
index c9be678,48108cd..672b6eb
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/AlterTest.java
@@@ -218,7 -218,7 +218,7 @@@ public class AlterTest extends CQLTeste
          assertRowsIgnoringOrderAndExtra(execute("SELECT keyspace_name, 
durable_writes, replication FROM system_schema.keyspaces"),
                     row(KEYSPACE, true, map("class", 
"org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1")),
                     row(KEYSPACE_PER_TEST, true, map("class", 
"org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1")),
-                    row(ks1, false, map("class", 
"org.apache.cassandra.locator.NetworkTopologyStrategy", "dc1", "1")),
 -                   row(ks1, false, map("class", 
"org.apache.cassandra.locator.NetworkTopologyStrategy", DATA_CENTER , "1")),
++                   row(ks1, false, map("class", 
"org.apache.cassandra.locator.NetworkTopologyStrategy", DATA_CENTER, "1")),
                     row(ks2, true, map("class", 
"org.apache.cassandra.locator.SimpleStrategy", "replication_factor", "1")));
  
          execute("USE " + ks1);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d45f323e/test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java
----------------------------------------------------------------------
diff --cc 
test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java
index da0824f,0781169..4957b18
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/CreateTest.java
@@@ -23,9 -24,10 +24,10 @@@ import java.util.UUID
  
  import org.junit.Test;
  
 -
  import org.apache.cassandra.config.CFMetaData;
+ import org.apache.cassandra.config.DatabaseDescriptor;
  import org.apache.cassandra.config.Schema;
 +import org.apache.cassandra.config.SchemaConstants;
  import org.apache.cassandra.cql3.CQLTester;
  import org.apache.cassandra.db.Mutation;
  import org.apache.cassandra.db.partitions.Partition;
@@@ -495,6 -526,34 +526,34 @@@ public class CreateTest extends CQLTest
      }
  
      @Test
+     // tests CASSANDRA-4278
+     public void testHyphenDatacenters() throws Throwable
+     {
+         IEndpointSnitch snitch = DatabaseDescriptor.getEndpointSnitch();
+ 
+         // Register an EndpointSnitch which returns fixed values for test.
+         DatabaseDescriptor.setEndpointSnitch(new AbstractEndpointSnitch()
+         {
+             @Override
+             public String getRack(InetAddress endpoint) { return RACK1; }
+ 
+             @Override
+             public String getDatacenter(InetAddress endpoint) { return 
"us-east-1"; }
+ 
+             @Override
+             public int compareEndpoints(InetAddress target, InetAddress a1, 
InetAddress a2) { return 0; }
+         });
+ 
+         execute("CREATE KEYSPACE Foo WITH replication = { 'class' : 
'NetworkTopologyStrategy', 'us-east-1' : 1 };");
+ 
+         // Restore the previous EndpointSnitch
+         DatabaseDescriptor.setEndpointSnitch(snitch);
+ 
 -        // Clean up
++        // clean up
+         execute("DROP KEYSPACE IF EXISTS Foo");
+     }
+ 
+     @Test
      // tests CASSANDRA-9565
      public void testDoubleWith() throws Throwable
      {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d45f323e/test/unit/org/apache/cassandra/dht/BootStrapperTest.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/d45f323e/test/unit/org/apache/cassandra/service/MoveTest.java
----------------------------------------------------------------------
diff --cc test/unit/org/apache/cassandra/service/MoveTest.java
index 05757c0,6c07a47..e8d5ccd
--- a/test/unit/org/apache/cassandra/service/MoveTest.java
+++ b/test/unit/org/apache/cassandra/service/MoveTest.java
@@@ -82,9 -82,8 +82,9 @@@ public class MoveTes
       * So instead of extending SchemaLoader, we call it's method below.
       */
      @BeforeClass
-     public static void setup() throws ConfigurationException
+     public static void setup() throws Exception
      {
 +        DatabaseDescriptor.daemonInitialization();
          oldPartitioner = 
StorageService.instance.setPartitionerUnsafe(partitioner);
          SchemaLoader.loadSchema();
          SchemaLoader.schemaDefinition("MoveTest");
@@@ -140,6 -139,11 +140,11 @@@
              }
          });
  
+         final TokenMetadata tmd = StorageService.instance.getTokenMetadata();
 -        tmd.clearUnsafe();
 -        tmd.updateHostId(UUID.randomUUID(), 
InetAddress.getByName("127.0.0.1"));
 -        tmd.updateHostId(UUID.randomUUID(), 
InetAddress.getByName("127.0.0.2"));
++                tmd.clearUnsafe();
++                tmd.updateHostId(UUID.randomUUID(), 
InetAddress.getByName("127.0.0.1"));
++                tmd.updateHostId(UUID.randomUUID(), 
InetAddress.getByName("127.0.0.2"));
+ 
          KeyspaceMetadata keyspace =  KeyspaceMetadata.create(keyspaceName,
                                                               
KeyspaceParams.nts(configOptions(replicas)),
                                                               
Tables.of(CFMetaData.Builder.create(keyspaceName, "CF1")

Reply via email to