Author: [email protected]
Date: Tue Jun 12 16:42:24 2012
New Revision: 2483

Log:
[AMDATUCASSANDRA-202] Merged fix for 0.3.0 back to 0.2.5

Modified:
   branches/amdatu-cassandra-0.2.5/cassandra-application/pom.xml
   
branches/amdatu-cassandra-0.2.5/cassandra-application/src/main/java/org/amdatu/cassandra/application/service/CassandraDaemonActivatorImpl.java
   
branches/amdatu-cassandra-0.2.5/cassandra-application/src/main/resources/conf/cassandra.yaml
   
branches/amdatu-cassandra-0.2.5/cassandra-application/src/test/resources/cassandra.yaml
   
branches/amdatu-cassandra-0.2.5/config/src/main/resources/org.amdatu.cassandra.launcher.cfg

Modified: branches/amdatu-cassandra-0.2.5/cassandra-application/pom.xml
==============================================================================
--- branches/amdatu-cassandra-0.2.5/cassandra-application/pom.xml       
(original)
+++ branches/amdatu-cassandra-0.2.5/cassandra-application/pom.xml       Tue Jun 
12 16:42:24 2012
@@ -27,23 +27,27 @@
   <name>Amdatu Cassandra - Apache Cassandra Application</name>
   <description>This bundle embeds the Apache Cassandra libraries and exports 
the Cassandra Daemon as an OSGi service</description>
 
+  <properties>
+    <cassandra.version>1.0.10</cassandra.version>
+  </properties>
+
   <dependencies>
     <dependency>
       <groupId>org.apache.cassandra</groupId>
-      <artifactId>apache-cassandra</artifactId>
+      <artifactId>cassandra-all</artifactId>
       <version>${cassandra.version}</version>
       <scope>compile</scope>
     </dependency>
     <dependency>
       <groupId>org.apache.cassandra</groupId>
-      <artifactId>apache-cassandra-thrift</artifactId>
+      <artifactId>cassandra-thrift</artifactId>
       <version>${cassandra.version}</version>
       <scope>compile</scope>
     </dependency>
     <dependency>
-      <groupId>libthrift</groupId>
+      <groupId>org.apache.thrift</groupId>
       <artifactId>libthrift</artifactId>
-      <version>0.6.0-2</version>
+      <version>0.6.1-2</version>
       <scope>compile</scope>
       <exclusions>
         <exclusion>
@@ -119,26 +123,13 @@
               !javax.jmdns,
               !javax.jms,
               !javax.mail.*,
-              !javax.servlet.jsp,
               !javax.swing.*,
-              !jline,
               !joptsimple,
               !junit.framework,
-              !org.apache.commons.cli,
-              !org.apache.commons.codec.*,
-              !org.apache.commons.httpclient.*,
-              !org.apache.commons.logging.*,
-              !org.apache.commons.net.ftp,
-              !org.apache.http.*,
-              !org.apache.jasper.*,
               !org.apache.tools.ant.*,
               !org.apache.velocity.*,
               !org.jboss.netty.*,
               !org.joda.time,
-              !org.znerd.xmlenc,
-              !org.mortbay.*,
-              !org.kosmix.kosmosfs.access,
-              !org.jets3t.*,
               *
             </Import-Package>
             <Export-Package>

Modified: 
branches/amdatu-cassandra-0.2.5/cassandra-application/src/main/java/org/amdatu/cassandra/application/service/CassandraDaemonActivatorImpl.java
==============================================================================
--- 
branches/amdatu-cassandra-0.2.5/cassandra-application/src/main/java/org/amdatu/cassandra/application/service/CassandraDaemonActivatorImpl.java
      (original)
+++ 
branches/amdatu-cassandra-0.2.5/cassandra-application/src/main/java/org/amdatu/cassandra/application/service/CassandraDaemonActivatorImpl.java
      Tue Jun 12 16:42:24 2012
@@ -15,21 +15,24 @@
  */
 package org.amdatu.cassandra.application.service;
 
-import org.amdatu.cassandra.application.CassandraServerConfigurationService;
-
 import java.util.ArrayList;
 import java.util.List;
 import java.util.concurrent.Future;
 import java.util.concurrent.ThreadPoolExecutor;
 import java.util.concurrent.TimeUnit;
 
+import org.amdatu.cassandra.application.CassandraServerConfigurationService;
 import org.apache.cassandra.concurrent.Stage;
 import org.apache.cassandra.concurrent.StageManager;
-import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.KSMetaData;
+import org.apache.cassandra.config.Schema;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.Table;
 import org.apache.cassandra.db.commitlog.CommitLog;
+import org.apache.cassandra.gms.Gossiper;
+import org.apache.cassandra.net.MessagingService;
+import org.apache.cassandra.service.StorageProxy;
+import org.apache.cassandra.service.StorageService;
 import org.apache.cassandra.thrift.CassandraDaemon;
 import org.apache.cassandra.utils.FBUtilities;
 import org.osgi.service.log.LogService;
@@ -104,28 +107,44 @@
             // StorageService that contains the shutdown hook with the code 
snippet below verifies if the mutation
             // stage has already been terminated before terminating it.
             ThreadPoolExecutor mutationStage = 
StageManager.getStage(Stage.MUTATION);
-            if (!mutationStage.isShutdown()) {
-                mutationStage.shutdown();
-                mutationStage.awaitTermination(30, TimeUnit.SECONDS);
-
-                List<Future<?>> flushes = new ArrayList<Future<?>>();
-                for (Table table : Table.all()) {
-                    KSMetaData ksm = 
DatabaseDescriptor.getKSMetaData(table.name);
-                    if (!ksm.durableWrites) {
-                        for (ColumnFamilyStore cfs : 
table.getColumnFamilyStores())
-                        {
-                            Future<?> future = cfs.forceFlush();
-                            if (future != null) {
-                                flushes.add(future);
-                            }
+            if (mutationStage.isShutdown()) {
+                return; // drained already
+            }
+
+            StorageService.instance.stopRPCServer();
+            StorageService.optionalTasks.shutdown();
+            Gossiper.instance.stop();
+
+            // In-progress writes originating here could generate hints to be 
written, so shut down MessagingService
+            // before mutation stage, so we can get all the hints saved before 
shutting down
+            MessagingService.instance().shutdown();
+            mutationStage.shutdown();
+            mutationStage.awaitTermination(3600, TimeUnit.SECONDS);
+            StorageProxy.instance.verifyNoHintsInProgress();
+
+            List<Future<?>> flushes = new ArrayList<Future<?>>();
+            for (Table table : Table.all()) {
+                KSMetaData ksm = Schema.instance.getKSMetaData(table.name);
+                if (!ksm.durableWrites) {
+                    for (ColumnFamilyStore cfs : 
table.getColumnFamilyStores()) {
+                        Future<?> future = cfs.forceFlush();
+                        if (future != null) {
+                            flushes.add(future);
                         }
                     }
                 }
-                FBUtilities.waitOnFutures(flushes);
+            }
+            FBUtilities.waitOnFutures(flushes);
+
+            CommitLog.instance.shutdownBlocking();
 
-                CommitLog.instance.shutdownBlocking();
-                m_logService.log(LogService.LOG_INFO, "Cassandra Daemon 
shutdown completed.");
+            // wait for miscellaneous tasks like sstable and commitlog segment 
deletion
+            StorageService.tasks.shutdown();
+            if (!StorageService.tasks.awaitTermination(1, TimeUnit.MINUTES)) {
+                m_logService.log(LogService.LOG_WARNING,
+                    "Miscellaneous task executor still busy after one minute; 
proceeding with shutdown");
             }
+            m_logService.log(LogService.LOG_INFO, "Cassandra Daemon shutdown 
completed.");
         }
         catch (InterruptedException e) {
             m_logService.log(LogService.LOG_ERROR, "Could not properly 
terminate Cassandra mutation stage", e);

Modified: 
branches/amdatu-cassandra-0.2.5/cassandra-application/src/main/resources/conf/cassandra.yaml
==============================================================================
--- 
branches/amdatu-cassandra-0.2.5/cassandra-application/src/main/resources/conf/cassandra.yaml
        (original)
+++ 
branches/amdatu-cassandra-0.2.5/cassandra-application/src/main/resources/conf/cassandra.yaml
        Tue Jun 12 16:42:24 2012
@@ -92,9 +92,6 @@
 # saved caches
 saved_caches_directory: ${org.amdatu.core.cassandra.application/savedcachesdir}
 
-# Size to allow commitlog to grow to before creating a new segment
-commitlog_rotation_threshold_in_mb: 128
-
 # commitlog_sync may be either "periodic" or "batch."
 # When in batch mode, Cassandra won't ack writes until the commit log
 # has been fsynced to disk.  It will wait up to
@@ -110,8 +107,8 @@
 commitlog_sync: periodic
 commitlog_sync_period_in_ms: 10000
 
-# any class that implements the SeedProvider interface and has a constructor 
that takes a Map<String, String> of
-# parameters will do.
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map<String, String> of parameters will do.
 seed_provider:
     # Addresses of hosts that are deemed contact points.
     # Cassandra nodes use this list of hosts to find each other and learn
@@ -161,11 +158,15 @@
 concurrent_writes: 32
 
 # Total memory to use for memtables.  Cassandra will flush the largest
-# memtable when this much memory is used.  Prefer using this to
-# the older, per-ColumnFamily memtable flush thresholds.
+# memtable when this much memory is used.
 # If omitted, Cassandra will set it to 1/3 of the heap.
-# If set to 0, only the old flush thresholds are used.
-memtable_total_space_in_mb: 0
+# memtable_total_space_in_mb: 2048
+
+# Total space to use for commitlogs.
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Cassandra will flush every dirty CF in the oldest
+# segment and remove it.
+# commitlog_total_space_in_mb: 4096
 
 # This sets the amount of memtable flush writer threads.  These will
 # be blocked by disk io, and each one will hold a memtable in memory
@@ -186,6 +187,10 @@
 # TCP port, for commands and data
 storage_port: ${org.amdatu.core.cassandra.application/storage_port}
 
+# SSL port, for encrypted communication.  Unused unless enabled in
+# encryption_options
+ssl_storage_port: 7001
+
 # Address to bind to and tell other Cassandra nodes to connect to. You
 # _must_ change this if you want multiple nodes to be able to
 # communicate!
@@ -198,6 +203,10 @@
 # Setting this to 0.0.0.0 is always wrong.
 listen_address: ${org.amdatu.core.cassandra.application/listen_address}
 
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
 # The address to bind the Thrift RPC service to -- clients connect
 # here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
 # you want Thrift to listen on all interfaces.
@@ -270,9 +279,11 @@
 # is a data format change.
 snapshot_before_compaction: false
 
-# change this to increase the compaction thread's priority.  In java, 1 is the
-# lowest priority and that is our default. The highest allowed is 5.
-# compaction_thread_priority: 1
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you 
will
+# lose data on truncation or drop.
+auto_snapshot: true
 
 # Add column indexes to a row after its contents reach this size.
 # Increase if your column values are large, or if you have a very large
@@ -289,17 +300,27 @@
 in_memory_compaction_limit_in_mb: 64
 
 # Number of simultaneous compactions to allow, NOT including
-# validation "compactions" for anti-entropy repair. This defaults to
-# the number of cores. This can help preserve read performance in a
-# mixed read/write workload, by mitigating the tendency of small
-# sstables to accumulate during a single long running compactions. The
-# default is usually fine and if you experience problems with
-# compaction running too slowly or too fast, you should look at
+# validation "compactions" for anti-entropy repair.  Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
 # compaction_throughput_mb_per_sec first.
 #
-# Uncomment to make compaction mono-threaded.
+# This setting has no effect on LeveledCompactionStrategy.
+#
+# concurrent_compactors defaults to the number of cores.
+# Uncomment to make compaction mono-threaded, the pre-0.8 default.
 #concurrent_compactors: 1
 
+# Multi-threaded compaction. When enabled, each compaction will use
+# up to one thread per core, plus one thread per sstable being merged.
+# This is usually only useful for SSD-based hardware: otherwise,
+# your concern is usually to get compaction to do LESS i/o (see:
+# compaction_throughput_mb_per_sec), not more.
+multithreaded_compaction: false
+
 # Throttles compaction to the given total throughput across the entire
 # system. The faster you insert data, the faster you need to compact in
 # order to keep the sstable count down, but in general, setting this to
@@ -313,34 +334,73 @@
 # key caches.
 compaction_preheat_key_cache: true
 
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 400 Mbps or 50 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 400
+
 # Time to wait for a reply from other nodes before failing the command
 rpc_timeout_in_ms: 10000
 
+# Enable socket timeout for streaming operation.
+# When a timeout occurs during streaming, streaming is retried from the start
+# of the current file. This *can* involve re-streaming an important amount of
+# data, so you should avoid setting the value too low.
+# Default value is 0, which never timeout streams.
+# streaming_socket_timeout_in_ms: 0
+
 # phi value that must be reached for a host to be marked down.
 # most users should never need to adjust this.
 # phi_convict_threshold: 8
 
 # endpoint_snitch -- Set this to a class that implements
-# IEndpointSnitch, which will let Cassandra know enough
-# about your network topology to route requests efficiently.
+# IEndpointSnitch.  The snitch has two functions:
+# - it teaches Cassandra enough about your network topology to route
+#   requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+#   correlated failures. It does this by grouping machines into
+#   "datacenters" and "racks."  Cassandra will do its best not to have
+#   more than one replica on the same "rack" (which may not actually
+#   be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
 # Out of the box, Cassandra provides
-#  - org.apache.cassandra.locator.SimpleSnitch:
+#  - SimpleSnitch:
 #    Treats Strategy order as proximity. This improves cache locality
 #    when disabling read repair, which can further improve throughput.
-#  - org.apache.cassandra.locator.RackInferringSnitch:
+#    Only appropriate for single-datacenter deployments.
+#  - PropertyFileSnitch:
 #    Proximity is determined by rack and data center, which are
-#    assumed to correspond to the 3rd and 2nd octet of each node's
-#    IP address, respectively
-# org.apache.cassandra.locator.PropertyFileSnitch:
-#  - Proximity is determined by rack and data center, which are
 #    explicitly configured in cassandra-topology.properties.
-endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
+#  - RackInferringSnitch:
+#    Proximity is determined by rack and data center, which are
+#    assumed to correspond to the 3rd and 2nd octet of each node's
+#    IP address, respectively.  Unless this happens to match your
+#    deployment conventions (as it did Facebook's), this is best used
+#    as an example of writing a custom Snitch class.
+#  - Ec2Snitch:
+#    Appropriate for EC2 deployments in a single Region.  Loads Region
+#    and Availability Zone information from the EC2 API. The Region is
+#    treated as the Datacenter, and the Availability Zone as the rack.
+#    Only private IPs are used, so this will not work across multiple
+#    Regions.
+#  - Ec2MultiRegionSnitch:
+#    Uses public IPs as broadcast_address to allow cross-region
+#    connectivity.  (Thus, you should set seed addresses to the public
+#    IP as well.) You will need to open the storage_port or
+#    ssl_storage_port on the public IP firewall.  (For intra-Region
+#    traffic, Cassandra will switch to the private IP after
+#    establishing a connection.)
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
 
-# dynamic_snitch -- This boolean controls whether the above snitch is
-# wrapped with a dynamic snitch, which will monitor read latencies
-# and avoid reading from hosts that have slowed (due to compaction,
-# for instance)
-dynamic_snitch: true
 # controls how often to perform the more expensive part of host score
 # calculation
 dynamic_snitch_update_interval_in_ms: 100
@@ -354,7 +414,7 @@
 # expressed as a double which represents a percentage.  Thus, a value of
 # 0.2 means Cassandra would continue to prefer the static snitch values
 # until the pinned host was 20% worse than the fastest.
-dynamic_snitch_badness_threshold: 0.0
+dynamic_snitch_badness_threshold: 0.1
 
 # request_scheduler -- Set this to a class that implements
 # RequestScheduler, which will schedule incoming client requests
@@ -413,14 +473,23 @@
 # users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
 # suite for authentication, key exchange and encryption of the actual data 
transfers.
 # NOTE: No custom encryption options are enabled at the moment
-# The available internode options are : all, none
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
 #
 # The passwords used in these options must match the passwords used when 
generating
 # the keystore and truststore.  For instructions on generating these files, 
see:
 # 
http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
+#
 encryption_options:
     internode_encryption: none
     keystore: conf/.keystore
     keystore_password: cassandra
     truststore: conf/.truststore
-    truststore_password: cassandra
\ No newline at end of file
+    truststore_password: cassandra
+    # More advanced defaults below:
+    # protocol: TLS
+    # algorithm: SunX509
+    # store_type: JKS
+    # cipher_suites: 
[TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]

Modified: 
branches/amdatu-cassandra-0.2.5/cassandra-application/src/test/resources/cassandra.yaml
==============================================================================
--- 
branches/amdatu-cassandra-0.2.5/cassandra-application/src/test/resources/cassandra.yaml
     (original)
+++ 
branches/amdatu-cassandra-0.2.5/cassandra-application/src/test/resources/cassandra.yaml
     Tue Jun 12 16:42:24 2012
@@ -1,24 +1,9 @@
-# Copyright (c) 2010, 2011 The Amdatu Foundation
-#
-# Licensed under the Apache License, Version 2.0 (the "License");
-# you may not use this file except in compliance with the License.
-# You may obtain a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS,
-# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
-# See the License for the specific language governing permissions and
-# limitations under the License.verning permissions and limitations
-# under the License.
-
 # Cassandra storage config YAML
 
-#NOTE !!!!!!!! NOTE
-# See http://wiki.apache.org/cassandra/StorageConfiguration for
-# full explanations of configuration directives
-#NOTE !!!!!!!! NOTE
+# NOTE:
+#   See http://wiki.apache.org/cassandra/StorageConfiguration for
+#   full explanations of configuration directives
+# /NOTE
 
 # The name of the cluster. This is mainly used to prevent machines in
 # one logical cluster from joining another.
@@ -36,21 +21,13 @@
 # a random token, which will lead to hot spots.
 initial_token: 0
 
-# Set to true to make new [non-seed] nodes automatically migrate data
-# to themselves from the pre-existing nodes in the cluster.  Defaults
-# to false because you can only bootstrap N machines at a time from
-# an existing cluster of N, so if you are bringing up a cluster of
-# 10 machines with 3 seeds you would have to do it in stages.  Leaving
-# this off for the initial start simplifies that.
-auto_bootstrap: false
-
 # See http://wiki.apache.org/cassandra/HintedHandoff
 hinted_handoff_enabled: true
 # this defines the maximum amount of time a dead host will have hints
 # generated.  After it has been dead this long, hints will be dropped.
 max_hint_window_in_ms: 3600000 # one hour
-# Sleep this long after delivering each row or row fragment
-hinted_handoff_throttle_delay_in_ms: 50
+# Sleep this long after delivering each hint
+hinted_handoff_throttle_delay_in_ms: 1
 
 # authentication backend, implementing IAuthenticator; used to identify users
 authenticator: org.apache.cassandra.auth.AllowAllAuthenticator
@@ -92,9 +69,6 @@
 # saved caches
 saved_caches_directory: target/cassandra_work/saved_caches
 
-# Size to allow commitlog to grow to before creating a new segment
-commitlog_rotation_threshold_in_mb: 128
-
 # commitlog_sync may be either "periodic" or "batch."
 # When in batch mode, Cassandra won't ack writes until the commit log
 # has been fsynced to disk.  It will wait up to
@@ -110,8 +84,8 @@
 commitlog_sync: periodic
 commitlog_sync_period_in_ms: 10000
 
-# any class that implements the SeedProvider interface and has a constructor 
that takes a Map<String, String> of
-# parameters will do.
+# any class that implements the SeedProvider interface and has a
+# constructor that takes a Map<String, String> of parameters will do.
 seed_provider:
     # Addresses of hosts that are deemed contact points.
     # Cassandra nodes use this list of hosts to find each other and learn
@@ -161,11 +135,15 @@
 concurrent_writes: 32
 
 # Total memory to use for memtables.  Cassandra will flush the largest
-# memtable when this much memory is used.  Prefer using this to
-# the older, per-ColumnFamily memtable flush thresholds.
+# memtable when this much memory is used.
 # If omitted, Cassandra will set it to 1/3 of the heap.
-# If set to 0, only the old flush thresholds are used.
-memtable_total_space_in_mb: 0
+# memtable_total_space_in_mb: 2048
+
+# Total space to use for commitlogs.
+# If space gets above this value (it will round up to the next nearest
+# segment multiple), Cassandra will flush every dirty CF in the oldest
+# segment and remove it.
+# commitlog_total_space_in_mb: 4096
 
 # This sets the amount of memtable flush writer threads.  These will
 # be blocked by disk io, and each one will hold a memtable in memory
@@ -186,6 +164,10 @@
 # TCP port, for commands and data
 storage_port: 7001
 
+# SSL port, for encrypted communication.  Unused unless enabled in
+# encryption_options
+ssl_storage_port: 7002
+
 # Address to bind to and tell other Cassandra nodes to connect to. You
 # _must_ change this if you want multiple nodes to be able to
 # communicate!
@@ -198,6 +180,10 @@
 # Setting this to 0.0.0.0 is always wrong.
 listen_address: 127.0.0.1
 
+# Address to broadcast to other Cassandra nodes
+# Leaving this blank will set it to the same value as listen_address
+# broadcast_address: 1.2.3.4
+
 # The address to bind the Thrift RPC service to -- clients connect
 # here. Unlike ListenAddress above, you *can* specify 0.0.0.0 here if
 # you want Thrift to listen on all interfaces.
@@ -205,42 +191,42 @@
 # Leaving this blank has the same effect it does for ListenAddress,
 # (i.e. it will be based on the configured hostname of the node).
 rpc_address: 127.0.0.1
-
 # port for Thrift to listen for clients on
 rpc_port: 9161
 
 # enable or disable keepalive on rpc connections
 rpc_keepalive: true
 
-# Cassandra provides you with a variety of options for RPC Server
-# sync  -> Creates one thread per connection but with a configurable number of
-#           threads.  This can be expensive in memory used for thread stack for
-#           a large enough number of clients.  (Hence, connection pooling is
-#           very, very strongly recommended.)
+# Cassandra provides three options for the RPC Server:
+#
+# sync  -> One connection per thread in the rpc pool (see below).
+#          For a very large number of clients, memory will be your limiting
+#          factor; on a 64 bit JVM, 128KB is the minimum stack size per thread.
+#          Connection pooling is very, very strongly recommended.
 #
 # async -> Nonblocking server implementation with one thread to serve
-#           rpc connections.  This is not recommended for high throughput use
-#           cases.
+#          rpc connections.  This is not recommended for high throughput use
+#          cases. Async has been tested to be about 50% slower than sync
+#          or hsha and is deprecated: it will be removed in the next major 
release.
+#
+# hsha  -> Stands for "half synchronous, half asynchronous." The rpc thread 
pool
+#          (see below) is used to manage requests, but the threads are 
multiplexed
+#          across the different clients.
 #
-# hsha  -> half sync and half async implementation with configurable number
-#           of worker threads (For managing connections).  IO Management is
-#           done by a set of threads currently equal to the number of
-#           processors in the system. The number of threads in the threadpool
-#           is configured via rpc_min_threads and rpc_max_threads.  (Connection
-#           pooling is strongly recommended in this case too.)
-
+# The default is sync because on Windows hsha is about 30% slower.  On Linux,
+# sync/hsha performance is about the same, with hsha of course using less 
memory.
 rpc_server_type: sync
 
 # Uncomment rpc_min|max|thread to set request pool size.
 # You would primarily set max for the sync server to safeguard against
 # misbehaved clients; if you do hit the max, Cassandra will block until one
-# disconnects before accepting more.  The defaults are min of 16 and max
+# disconnects before accepting more.  The defaults for sync are min of 16 and 
max
 # unlimited.
 #
-# For the Hsha server, you would set the max so that a fair amount of resources
-# are provided to the other working threads on the server.
+# For the Hsha server, the min and max both default to quadruple the number of
+# CPU cores.
 #
-# This configuration is not used for the async server.
+# This configuration is ignored by the async server.
 #
 # rpc_min_threads: 16
 # rpc_max_threads: 2048
@@ -270,9 +256,11 @@
 # is a data format change.
 snapshot_before_compaction: false
 
-# change this to increase the compaction thread's priority.  In java, 1 is the
-# lowest priority and that is our default. The highest allowed is 5.
-# compaction_thread_priority: 1
+# Whether or not a snapshot is taken of the data before keyspace truncation
+# or dropping of column families. The STRONGLY advised default of true
+# should be used to provide data safety. If you set this flag to false, you 
will
+# lose data on truncation or drop.
+auto_snapshot: true
 
 # Add column indexes to a row after its contents reach this size.
 # Increase if your column values are large, or if you have a very large
@@ -289,17 +277,27 @@
 in_memory_compaction_limit_in_mb: 64
 
 # Number of simultaneous compactions to allow, NOT including
-# validation "compactions" for anti-entropy repair. This defaults to
-# the number of cores. This can help preserve read performance in a
-# mixed read/write workload, by mitigating the tendency of small
-# sstables to accumulate during a single long running compactions. The
-# default is usually fine and if you experience problems with
-# compaction running too slowly or too fast, you should look at
+# validation "compactions" for anti-entropy repair.  Simultaneous
+# compactions can help preserve read performance in a mixed read/write
+# workload, by mitigating the tendency of small sstables to accumulate
+# during a single long running compactions. The default is usually
+# fine and if you experience problems with compaction running too
+# slowly or too fast, you should look at
 # compaction_throughput_mb_per_sec first.
 #
-# Uncomment to make compaction mono-threaded.
+# This setting has no effect on LeveledCompactionStrategy.
+#
+# concurrent_compactors defaults to the number of cores.
+# Uncomment to make compaction mono-threaded, the pre-0.8 default.
 #concurrent_compactors: 1
 
+# Multi-threaded compaction. When enabled, each compaction will use
+# up to one thread per core, plus one thread per sstable being merged.
+# This is usually only useful for SSD-based hardware: otherwise,
+# your concern is usually to get compaction to do LESS i/o (see:
+# compaction_throughput_mb_per_sec), not more.
+multithreaded_compaction: false
+
 # Throttles compaction to the given total throughput across the entire
 # system. The faster you insert data, the faster you need to compact in
 # order to keep the sstable count down, but in general, setting this to
@@ -313,34 +311,73 @@
 # key caches.
 compaction_preheat_key_cache: true
 
+# Throttles all outbound streaming file transfers on this node to the
+# given total throughput in Mbps. This is necessary because Cassandra does
+# mostly sequential IO when streaming data during bootstrap or repair, which
+# can lead to saturating the network connection and degrading rpc performance.
+# When unset, the default is 400 Mbps or 50 MB/s.
+# stream_throughput_outbound_megabits_per_sec: 400
+
 # Time to wait for a reply from other nodes before failing the command
 rpc_timeout_in_ms: 10000
 
+# Enable socket timeout for streaming operation.
+# When a timeout occurs during streaming, streaming is retried from the start
+# of the current file. This *can* involve re-streaming an important amount of
+# data, so you should avoid setting the value too low.
+# Default value is 0, which never timeout streams.
+# streaming_socket_timeout_in_ms: 0
+
 # phi value that must be reached for a host to be marked down.
 # most users should never need to adjust this.
 # phi_convict_threshold: 8
 
 # endpoint_snitch -- Set this to a class that implements
-# IEndpointSnitch, which will let Cassandra know enough
-# about your network topology to route requests efficiently.
+# IEndpointSnitch.  The snitch has two functions:
+# - it teaches Cassandra enough about your network topology to route
+#   requests efficiently
+# - it allows Cassandra to spread replicas around your cluster to avoid
+#   correlated failures. It does this by grouping machines into
+#   "datacenters" and "racks."  Cassandra will do its best not to have
+#   more than one replica on the same "rack" (which may not actually
+#   be a physical location)
+#
+# IF YOU CHANGE THE SNITCH AFTER DATA IS INSERTED INTO THE CLUSTER,
+# YOU MUST RUN A FULL REPAIR, SINCE THE SNITCH AFFECTS WHERE REPLICAS
+# ARE PLACED.
+#
 # Out of the box, Cassandra provides
-#  - org.apache.cassandra.locator.SimpleSnitch:
+#  - SimpleSnitch:
 #    Treats Strategy order as proximity. This improves cache locality
 #    when disabling read repair, which can further improve throughput.
-#  - org.apache.cassandra.locator.RackInferringSnitch:
+#    Only appropriate for single-datacenter deployments.
+#  - PropertyFileSnitch:
 #    Proximity is determined by rack and data center, which are
-#    assumed to correspond to the 3rd and 2nd octet of each node's
-#    IP address, respectively
-# org.apache.cassandra.locator.PropertyFileSnitch:
-#  - Proximity is determined by rack and data center, which are
 #    explicitly configured in cassandra-topology.properties.
-endpoint_snitch: org.apache.cassandra.locator.SimpleSnitch
+#  - RackInferringSnitch:
+#    Proximity is determined by rack and data center, which are
+#    assumed to correspond to the 3rd and 2nd octet of each node's
+#    IP address, respectively.  Unless this happens to match your
+#    deployment conventions (as it did Facebook's), this is best used
+#    as an example of writing a custom Snitch class.
+#  - Ec2Snitch:
+#    Appropriate for EC2 deployments in a single Region.  Loads Region
+#    and Availability Zone information from the EC2 API. The Region is
+#    treated as the Datacenter, and the Availability Zone as the rack.
+#    Only private IPs are used, so this will not work across multiple
+#    Regions.
+#  - Ec2MultiRegionSnitch:
+#    Uses public IPs as broadcast_address to allow cross-region
+#    connectivity.  (Thus, you should set seed addresses to the public
+#    IP as well.) You will need to open the storage_port or
+#    ssl_storage_port on the public IP firewall.  (For intra-Region
+#    traffic, Cassandra will switch to the private IP after
+#    establishing a connection.)
+#
+# You can use a custom Snitch by setting this to the full class name
+# of the snitch, which will be assumed to be on your classpath.
+endpoint_snitch: SimpleSnitch
 
-# dynamic_snitch -- This boolean controls whether the above snitch is
-# wrapped with a dynamic snitch, which will monitor read latencies
-# and avoid reading from hosts that have slowed (due to compaction,
-# for instance)
-dynamic_snitch: true
 # controls how often to perform the more expensive part of host score
 # calculation
 dynamic_snitch_update_interval_in_ms: 100
@@ -354,7 +391,7 @@
 # expressed as a double which represents a percentage.  Thus, a value of
 # 0.2 means Cassandra would continue to prefer the static snitch values
 # until the pinned host was 20% worse than the fastest.
-dynamic_snitch_badness_threshold: 0.0
+dynamic_snitch_badness_threshold: 0.1
 
 # request_scheduler -- Set this to a class that implements
 # RequestScheduler, which will schedule incoming client requests
@@ -413,14 +450,23 @@
 # users generate their own keys) TLS_RSA_WITH_AES_128_CBC_SHA as the cipher
 # suite for authentication, key exchange and encryption of the actual data 
transfers.
 # NOTE: No custom encryption options are enabled at the moment
-# The available internode options are : all, none
+# The available internode options are : all, none, dc, rack
+#
+# If set to dc cassandra will encrypt the traffic between the DCs
+# If set to rack cassandra will encrypt the traffic between the racks
 #
 # The passwords used in these options must match the passwords used when 
generating
 # the keystore and truststore.  For instructions on generating these files, 
see:
 # 
http://download.oracle.com/javase/6/docs/technotes/guides/security/jsse/JSSERefGuide.html#CreateKeystore
+#
 encryption_options:
     internode_encryption: none
     keystore: conf/.keystore
     keystore_password: cassandra
     truststore: conf/.truststore
-    truststore_password: cassandra
\ No newline at end of file
+    truststore_password: cassandra
+    # More advanced defaults below:
+    # protocol: TLS
+    # algorithm: SunX509
+    # store_type: JKS
+    # cipher_suites: 
[TLS_RSA_WITH_AES_128_CBC_SHA,TLS_RSA_WITH_AES_256_CBC_SHA]

Modified: 
branches/amdatu-cassandra-0.2.5/config/src/main/resources/org.amdatu.cassandra.launcher.cfg
==============================================================================
--- 
branches/amdatu-cassandra-0.2.5/config/src/main/resources/org.amdatu.cassandra.launcher.cfg
 (original)
+++ 
branches/amdatu-cassandra-0.2.5/config/src/main/resources/org.amdatu.cassandra.launcher.cfg
 Tue Jun 12 16:42:24 2012
@@ -102,7 +102,7 @@
 # The version of the specified target Cassandra release. Defaults to 1.0.10. 
To upgrade or downgrade the
 # Cassandra version used by the launcher, modify this version to the target 
version and the download_url
 # to a http or file URL to the target release.
-download_version=0.8.10
+download_version=1.0.10
 
 # URL where the launcher can download the target Cassandra release from. If 
the URL does not contain
 # a protocol, it is assumed that this URL points to a file on disk. The URL 
should point to a
@@ -110,4 +110,4 @@
 # Examples:
 #   
http://apache.cs.uu.nl/dist/cassandra/1.1.0/apache-cassandra-1.1.0-bin.tar.gz
 #   c:/downloads/apache-cassandra-1.1.0-bin.tar
-download_url=http://mirrors.supportex.net/apache/cassandra/0.8.10/apache-cassandra-0.8.10-bin.tar.gz
\ No newline at end of file
+download_url=http://apache.xl-mirror.nl/cassandra/1.0.10/apache-cassandra-1.0.10-bin.tar.gz
\ No newline at end of file
_______________________________________________
Amdatu-commits mailing list
[email protected]
http://lists.amdatu.org/mailman/listinfo/amdatu-commits

Reply via email to