Merge branch 'cassandra-2.1' into cassandra-2.2

Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/7cab3272
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/7cab3272
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/7cab3272

Branch: refs/heads/cassandra-2.2
Commit: 7cab3272455bdd16b639c510416ae339a8613414
Parents: 751e4f9 711870e
Author: Aleksey Yeschenko <alek...@apache.org>
Authored: Wed Oct 14 15:54:25 2015 +0100
Committer: Aleksey Yeschenko <alek...@apache.org>
Committed: Wed Oct 14 15:54:25 2015 +0100

----------------------------------------------------------------------
 CHANGES.txt                                          |  1 +
 NEWS.txt                                             | 12 ++++++++++++
 conf/cassandra-rackdc.properties                     |  4 ++--
 conf/cassandra.yaml                                  |  3 +++
 src/java/org/apache/cassandra/db/SystemKeyspace.java | 15 +++++++++++++++
 .../apache/cassandra/service/CassandraDaemon.java    | 13 +++++++++++++
 6 files changed, 46 insertions(+), 2 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/7cab3272/CHANGES.txt
----------------------------------------------------------------------
diff --cc CHANGES.txt
index 6aa16bc,9a82780..57c6ff9
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,5 -1,5 +1,6 @@@
 -2.1.12
 +2.2.4
 +Merged from 2.1:
+  * Don't allow startup if the node's rack has changed (CASSANDRA-10242)
   * (cqlsh) show partial trace if incomplete after max_trace_wait 
(CASSANDRA-7645)
  
  

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7cab3272/NEWS.txt
----------------------------------------------------------------------
diff --cc NEWS.txt
index f9fbe43,fadd541..a235a1d
--- a/NEWS.txt
+++ b/NEWS.txt
@@@ -13,160 -13,27 +13,172 @@@ restore snapshots created with the prev
  'sstableloader' tool. You can upgrade the file format of your snapshots
  using the provided 'sstableupgrade' tool.
  
 -2.1.12
 -======
++2.2.4
++=====
+ 
 -New features
 -------------
++Operations
++----------
+     - Switching racks is no longer an allowed operation on a node which has
+       data. Instead, the node will need to be wiped and bootstrapped. If
+       moving from the SimpleSnitch, make sure the rack containing all current
 -      nodes is named "rack1". To override this behavior when manually wiping
 -      the node and bootstrapping, use -Dcassandra.ignore_rack=true.
++      nodes is named "rack1".
+ 
+ 
 -2.1.11
 +2.2.3
  =====
  
  Upgrading
  ---------
 -    - Nothing specific to this release, but please see 2.1 if you are 
upgrading
 +    - Nothing specific to this release, but please see 2.2 if you are 
upgrading
        from a previous version.
  
+ 
 +2.2.2
 +=====
 +
 +Upgrading
 +---------
 +    - Version 1 and 2 of the native protocol are now deprecated and support
 +      will be removed in Cassandra 3.0. You are encouraged to upgrade to a
 +      client driver using version 3 of the native protocol.
 +
 +Changed Defaults
 +----------------
 +   - commitlog_total_space_in_mb will use the smaller of 8192, and 1/4
 +     of the total space of the commitlog volume. (Before: always used
 +     8192)
 +   - Incremental repair is on by default since 2.2.0, run full repairs by
 +     providing the '-full' parameter to nodetool repair.
 +   - Parallel repairs are the default since 2.2.0, run sequential repairs
 +     by providing the '-seq' parameter to nodetool repair.
 +   - The following INFO logs were reduced to DEBUG level and will now show
 +     on debug.log instead of system.log:
 +      - Memtable flushing actions
 +      - Commit log replayed files
 +      - Compacted sstables
 +      - SStable opening (SSTableReader)
 +
 +New features
 +------------
 +   - Custom QueryHandlers can retrieve the column specifications for the bound
 +     variables from QueryOptions by using the hasColumnSpecifications()
 +     and getColumnSpecifications() methods.
 +   - A new default assynchronous log appender debug.log was created in 
addition
 +     to  the system.log appender in order to provide more detailed log 
debugging.
 +     In order to disable debug logging, you must comment-out the ASYNCDEBUGLOG
 +     appender on conf/logback.xml. See CASSANDRA-10241 for more information.
 +
 +
 +2.2.1
 +=====
 +
 +Upgrading
 +---------
 +    - Nothing specific to this release, but please see 2.2 if you are 
upgrading
 +      from a previous version.
 +
 +New features
 +------------
 +   - COUNT(*) and COUNT(1) can be selected with other columns or functions
 +
 +
 +2.2
 +===
 +
 +New features
 +------------
 +   - The LIMIT clause applies now only to the number of rows returned to the 
user,
 +     not to the number of row queried. By consequence, queries using 
aggregates will not
 +     be impacted by the LIMIT clause anymore.
 +   - Very large batches will now be rejected (defaults to 50kb). This
 +     can be customized by modifying batch_size_fail_threshold_in_kb.
 +   - Selecting columns,scalar functions, UDT fields, writetime or ttl together
 +     with aggregated is now possible. The value returned for the columns,
 +     scalar functions, UDT fields, writetime and ttl will be the ones for
 +     the first row matching the query.
 +   - Windows is now a supported platform. Powershell execution for startup 
scripts
 +     is highly recommended and can be enabled via an administrator 
command-prompt
 +     with: 'powershell set-executionpolicy unrestricted'
 +   - It is now possible to do major compactions when using leveled compaction.
 +     Doing that will take all sstables and compact them out in levels. The
 +     levels will be non overlapping so doing this will still not be something
 +     you want to do very often since it might cause more compactions for a 
while.
 +     It is also possible to split output when doing a major compaction with
 +     STCS - files will be split in sizes 50%, 25%, 12.5% etc of the total 
size.
 +     This might be a bit better than old major compactions which created one 
big
 +     file on disk.
 +   - A new tool has been added bin/sstableverify that checks for errors/bitrot
 +     in all sstables.  Unlike scrub, this is a non-invasive tool.
 +   - Authentication & Authorization APIs have been updated to introduce
 +     roles. Roles and Permissions granted to them are inherited, supporting
 +     role based access control. The role concept supercedes that of users
 +     and CQL constructs such as CREATE USER are deprecated but retained for
 +     compatibility. The requirement to explicitly create Roles in Cassandra
 +     even when auth is handled by an external system has been removed, so
 +     authentication & authorization can be delegated to such systems in their
 +     entirety.
 +   - In addition to the above, Roles are also first class resources and can 
be the
 +     subject of permissions. Users (roles) can now be granted permissions on 
other
 +     roles, including CREATE, ALTER, DROP & AUTHORIZE, which removesthe need 
for
 +     superuser privileges in order to perform user/role management operations.
 +   - Creators of database resources (Keyspaces, Tables, Roles) are now 
automatically
 +     granted all permissions on them (if the IAuthorizer implementation 
supports
 +     this).
 +   - SSTable file name is changed. Now you don't have Keyspace/CF name
 +     in file name. Also, secondary index has its own directory under parent's
 +     directory.
 +   - Support for user-defined functions and user-defined aggregates have
 +     been added to CQL.
 +     ************************************************************************
 +     IMPORTANT NOTE: user-defined functions can be used to execute
 +     arbitrary and possibly evil code in Cassandra 2.2, and are
 +     therefore disabled by default.  To enable UDFs edit
 +     cassandra.yaml and set enable_user_defined_functions to true.
 +
 +     CASSANDRA-9402 will add a security manager for UDFs in Cassandra
 +     3.0.  This will inherently be backwards-incompatible with any 2.2
 +     UDF that perform insecure operations such as opening a socket or
 +     writing to the filesystem.
 +     ************************************************************************
 +   - Row-cache is now fully off-heap.
 +   - jemalloc is now automatically preloaded and used on Linux and OS-X if
 +     installed.
 +   - Please ensure on Unix platforms that there is no libjnadispath.so
 +     installed which is accessible by Cassandra. Old versions of
 +     libjna packages (< 4.0.0) will cause problems - e.g. Debian Wheezy
 +     contains libjna versin 3.2.x.
 +   - The node now keeps up when streaming is failed during bootstrapping. You 
can
 +     use new `nodetool bootstrap resume` command to continue streaming after 
resolving
 +     an issue.
 +   - Protocol version 4 specifies that bind variables do not require having a
 +     value when executing a statement. Bind variables without a value are
 +     called 'unset'. The 'unset' bind variable is serialized as the int
 +     value '-2' without following bytes.
 +     In an EXECUTE or BATCH request an unset bind value does not modify the 
value and
 +     does not create a tombstone, an unset bind ttl is treated as 'unlimited',
 +     an unset bind timestamp is treated as 'now', an unset bind counter 
operation
 +     does not change the counter value.
 +     Unset tuple field, UDT field and map key are not allowed.
 +     In a QUERY request an unset limit is treated as 'unlimited'.
 +     Unset WHERE clauses with unset partition column, clustering column
 +     or index column are not allowed.
 +   - New `ByteType` (cql tinyint). 1-byte signed integer
 +   - New `ShortType` (cql smallint). 2-byte signed integer
 +   - New `SimpleDateType` (cql date). 4-byte unsigned integer
 +   - New `TimeType` (cql time). 8-byte long
 +   - The toDate(timeuuid), toTimestamp(timeuuid) and 
toUnixTimestamp(timeuuid) functions have been added to allow
 +     to convert from timeuuid into date type, timestamp type and bigint raw 
value.
 +     The functions unixTimestampOf(timeuuid) and dateOf(timeuuid) have been 
deprecated.
 +   - The toDate(timestamp) and toUnixTimestamp(timestamp) functions have been 
added to allow
 +     to convert from timestamp into date type and bigint raw value.
 +   - The toTimestamp(date) and toUnixTimestamp(date) functions have been 
added to allow
 +     to convert from date into timestamp type and bigint raw value.
 +   - SizeTieredCompactionStrategy parameter cold_reads_to_omit has been 
removed.
 +   - The default JVM flag -XX:+PerfDisableSharedMem will cause the following 
tools JVM
 +     to stop working: jps, jstack, jinfo, jmc, jcmd as well as 3rd party 
tools like Jolokia.
 +     If you wish to use these tools you can comment this flag out in 
cassandra-env.{sh,ps1}
 +
 +
  2.1.10
  =====
  

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7cab3272/conf/cassandra-rackdc.properties
----------------------------------------------------------------------
diff --cc conf/cassandra-rackdc.properties
index f0a0d55,f85646e..2ea6043
--- a/conf/cassandra-rackdc.properties
+++ b/conf/cassandra-rackdc.properties
@@@ -16,8 -16,11 +16,8 @@@
  
  # These properties are used with GossipingPropertyFileSnitch and will
  # indicate the rack and dc for this node
 -#
 -# When upgrading from SimpleSnitch, you will need to set your initial machines
 -# to have rack=rack1
--dc=DC1
--rack=RAC1
++dc=dc1
++rack=rack1
  
  # Add a suffix to a datacenter name. Used by the Ec2Snitch and 
Ec2MultiRegionSnitch
  # to append a string to the EC2 region name.

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7cab3272/conf/cassandra.yaml
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7cab3272/src/java/org/apache/cassandra/db/SystemKeyspace.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/SystemKeyspace.java
index 308edcd,72ee270..62bf59e
--- a/src/java/org/apache/cassandra/db/SystemKeyspace.java
+++ b/src/java/org/apache/cassandra/db/SystemKeyspace.java
@@@ -881,6 -777,126 +881,21 @@@ public final class SystemKeyspac
          return hostId;
      }
  
+     /**
 -     * @param cfName The name of the ColumnFamily responsible for part of the 
schema (keyspace, ColumnFamily, columns)
 -     * @return CFS responsible to hold low-level serialized schema
 -     */
 -    public static ColumnFamilyStore schemaCFS(String cfName)
 -    {
 -        return Keyspace.open(Keyspace.SYSTEM_KS).getColumnFamilyStore(cfName);
 -    }
 -
 -    public static List<Row> serializedSchema()
 -    {
 -        List<Row> schema = new ArrayList<>();
 -
 -        for (String cf : allSchemaCfs)
 -            schema.addAll(serializedSchema(cf));
 -
 -        return schema;
 -    }
 -
 -    /**
 -     * @param schemaCfName The name of the ColumnFamily responsible for part 
of the schema (keyspace, ColumnFamily, columns)
 -     * @return low-level schema representation (each row represents 
individual Keyspace or ColumnFamily)
++     * Gets the stored rack for the local node, or null if none have been set 
yet.
+      */
 -    public static List<Row> serializedSchema(String schemaCfName)
 -    {
 -        Token minToken = StorageService.getPartitioner().getMinimumToken();
 -
 -        return schemaCFS(schemaCfName).getRangeSlice(new 
Range<RowPosition>(minToken.minKeyBound(), minToken.maxKeyBound()),
 -                                                     null,
 -                                                     new 
IdentityQueryFilter(),
 -                                                     Integer.MAX_VALUE,
 -                                                     
System.currentTimeMillis());
 -    }
 -
 -    public static Collection<Mutation> serializeSchema()
++    public static String getRack()
+     {
 -        Map<DecoratedKey, Mutation> mutationMap = new HashMap<>();
++        String req = "SELECT rack FROM system.%s WHERE key='%s'";
++        UntypedResultSet result = executeInternal(String.format(req, LOCAL, 
LOCAL));
+ 
 -        for (String cf : allSchemaCfs)
 -            serializeSchema(mutationMap, cf);
++        // Look up the Rack (return it if found)
++        if (!result.isEmpty() && result.one().has("rack"))
++            return result.one().getString("rack");
+ 
 -        return mutationMap.values();
 -    }
 -
 -    private static void serializeSchema(Map<DecoratedKey, Mutation> 
mutationMap, String schemaCfName)
 -    {
 -        for (Row schemaRow : serializedSchema(schemaCfName))
 -        {
 -            if (Schema.ignoredSchemaRow(schemaRow))
 -                continue;
 -
 -            Mutation mutation = mutationMap.get(schemaRow.key);
 -            if (mutation == null)
 -            {
 -                mutation = new Mutation(Keyspace.SYSTEM_KS, 
schemaRow.key.getKey());
 -                mutationMap.put(schemaRow.key, mutation);
 -            }
 -
 -            mutation.add(schemaRow.cf);
 -        }
 -    }
 -
 -    public static Map<DecoratedKey, ColumnFamily> getSchema(String 
schemaCfName, Set<String> keyspaces)
 -    {
 -        Map<DecoratedKey, ColumnFamily> schema = new HashMap<>();
 -
 -        for (String keyspace : keyspaces)
 -        {
 -            Row schemaEntity = readSchemaRow(schemaCfName, keyspace);
 -            if (schemaEntity.cf != null)
 -                schema.put(schemaEntity.key, schemaEntity.cf);
 -        }
 -
 -        return schema;
 -    }
 -
 -    public static ByteBuffer getSchemaKSKey(String ksName)
 -    {
 -        return AsciiType.instance.fromString(ksName);
 -    }
 -
 -    /**
 -     * Fetches a subset of schema (table data, columns metadata or triggers) 
for the keyspace.
 -     *
 -     * @param schemaCfName the schema table to get the data from 
(schema_keyspaces, schema_columnfamilies, schema_columns or schema_triggers)
 -     * @param ksName the keyspace of the tables we are interested in
 -     * @return a Row containing the schema data of a particular type for the 
keyspace
 -     */
 -    public static Row readSchemaRow(String schemaCfName, String ksName)
 -    {
 -        DecoratedKey key = 
StorageService.getPartitioner().decorateKey(getSchemaKSKey(ksName));
 -
 -        ColumnFamilyStore schemaCFS = SystemKeyspace.schemaCFS(schemaCfName);
 -        ColumnFamily result = 
schemaCFS.getColumnFamily(QueryFilter.getIdentityFilter(key, schemaCfName, 
System.currentTimeMillis()));
 -
 -        return new Row(key, result);
 -    }
 -
 -    /**
 -     * Fetches a subset of schema (table data, columns metadata or triggers) 
for the keyspace+table pair.
 -     *
 -     * @param schemaCfName the schema table to get the data from 
(schema_columnfamilies, schema_columns or schema_triggers)
 -     * @param ksName the keyspace of the table we are interested in
 -     * @param cfName the table we are interested in
 -     * @return a Row containing the schema data of a particular type for the 
table
 -     */
 -    public static Row readSchemaRow(String schemaCfName, String ksName, 
String cfName)
 -    {
 -        DecoratedKey key = 
StorageService.getPartitioner().decorateKey(getSchemaKSKey(ksName));
 -        ColumnFamilyStore schemaCFS = SystemKeyspace.schemaCFS(schemaCfName);
 -        Composite prefix = schemaCFS.getComparator().make(cfName);
 -        ColumnFamily cf = schemaCFS.getColumnFamily(key,
 -                                                    prefix,
 -                                                    prefix.end(),
 -                                                    false,
 -                                                    Integer.MAX_VALUE,
 -                                                    
System.currentTimeMillis());
 -        return new Row(key, cf);
++        return null;
+     }
+ 
      public static PaxosState loadPaxosState(ByteBuffer key, CFMetaData 
metadata)
      {
          String req = "SELECT * FROM system.%s WHERE row_key = ? AND cf_id = 
?";

http://git-wip-us.apache.org/repos/asf/cassandra/blob/7cab3272/src/java/org/apache/cassandra/service/CassandraDaemon.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/service/CassandraDaemon.java
index 2a23550,17553f3..9e38e06
--- a/src/java/org/apache/cassandra/service/CassandraDaemon.java
+++ b/src/java/org/apache/cassandra/service/CassandraDaemon.java
@@@ -231,6 -319,6 +231,19 @@@ public class CassandraDaemo
          }
  
          Keyspace.setInitialized();
++
++        String storedRack = SystemKeyspace.getRack();
++        if (storedRack != null)
++        {
++            String currentRack = 
DatabaseDescriptor.getEndpointSnitch().getRack(FBUtilities.getBroadcastAddress());
++            if (!storedRack.equals(currentRack))
++            {
++                logger.error("Cannot start node if snitch's rack differs from 
previous rack. " +
++                             "Please fix the snitch or wipe and rebootstrap 
this node.");
++                System.exit(100);
++            }
++        }
++
          // initialize keyspaces
          for (String keyspaceName : Schema.instance.getKeyspaces())
          {

Reply via email to