Merge branch 'cassandra-1.2' into cassandra-2.0 Conflicts: CHANGES.txt src/java/org/apache/cassandra/config/DatabaseDescriptor.java
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/a26ac36a Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/a26ac36a Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/a26ac36a Branch: refs/heads/cassandra-2.1.0 Commit: a26ac36a76b02d16cee04cc8d6bd0996e6760a3e Parents: 60eab4e eb92a9f Author: Brandon Williams <brandonwilli...@apache.org> Authored: Fri Aug 1 10:46:38 2014 -0500 Committer: Brandon Williams <brandonwilli...@apache.org> Committed: Fri Aug 1 10:46:38 2014 -0500 ---------------------------------------------------------------------- CHANGES.txt | 1 + .../cassandra/config/DatabaseDescriptor.java | 2 +- .../cassandra/locator/SimpleSeedProvider.java | 36 ++++++++++++-------- 3 files changed, 23 insertions(+), 16 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/a26ac36a/CHANGES.txt ---------------------------------------------------------------------- diff --cc CHANGES.txt index 33bab82,0ad02c1..a5b49c5 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,63 -1,14 +1,64 @@@ -1.2.19 +2.0.10 + * Fix truncate to always flush (CASSANDRA-7511) + * Remove shuffle and taketoken (CASSANDRA-7601) + * Switch liveRatio-related log messages to DEBUG (CASSANDRA-7467) + * (cqlsh) Add tab-completion for CREATE/DROP USER IF [NOT] EXISTS (CASSANDRA-7611) + * Always merge ranges owned by a single node (CASSANDRA-6930) + * Pig support for hadoop CqlInputFormat (CASSANDRA-6454) + * Fix ReversedType(DateType) mapping to native protocol (CASSANDRA-7576) + * (Windows) force range-based repair to non-sequential mode (CASSANDRA-7541) + * Fix range merging when DES scores are zero (CASSANDRA-7535) + * Warn when SSL certificates have expired (CASSANDRA-7528) + * Workaround JVM NPE on JMX bind failure (CASSANDRA-7254) + * Fix race in FileCacheService RemovalListener (CASSANDRA-7278) + * Fix inconsistent use of consistencyForCommit that allowed LOCAL_QUORUM + operations to incorrect become full QUORUM (CASSANDRA-7345) + * Properly handle unrecognized opcodes and flags (CASSANDRA-7440) + * (Hadoop) close CqlRecordWriter clients when finished (CASSANDRA-7459) + * Make sure high level sstables get compacted (CASSANDRA-7414) + * Fix AssertionError when using empty clustering columns and static columns + (CASSANDRA-7455) + * Add inter_dc_stream_throughput_outbound_megabits_per_sec (CASSANDRA-6596) + * Add option to disable STCS in L0 (CASSANDRA-6621) + * Fix error when doing reversed queries with static columns (CASSANDRA-7490) + * Backport CASSANDRA-6747 (CASSANDRA-7560) + * Track max/min timestamps for range tombstones (CASSANDRA-7647) + * Fix NPE when listing saved caches dir (CASSANDRA-7632) +Merged from 1.2: + * SimpleSeedProvider no longer caches seeds forever (CASSANDRA-7663) * Set correct stream ID on responses when non-Exception Throwables are thrown while handling native protocol messages (CASSANDRA-7470) - * Fix row size miscalculation in LazilyCompactedRow (CASSANDRA-7543) -1.2.18 - * Support Thrift tables clustering columns on CqlPagingInputFormat (CASSANDRA-7445) - * Fix compilation with java 6 broke by CASSANDRA-7147 -1.2.17 +2.0.9 + * Fix CC#collectTimeOrderedData() tombstone optimisations (CASSANDRA-7394) + * Fix assertion error in CL.ANY timeout handling (CASSANDRA-7364) + * Handle empty CFs in Memtable#maybeUpdateLiveRatio() (CASSANDRA-7401) + * Fix native protocol CAS batches (CASSANDRA-7337) + * Add per-CF range read request latency metrics (CASSANDRA-7338) + * Fix NPE in StreamTransferTask.createMessageForRetry() (CASSANDRA-7323) + * Add conditional CREATE/DROP USER support (CASSANDRA-7264) + * Swap local and global default read repair chances (CASSANDRA-7320) + * Add missing iso8601 patterns for date strings (CASSANDRA-6973) + * Support selecting multiple rows in a partition using IN (CASSANDRA-6875) + * cqlsh: always emphasize the partition key in DESC output (CASSANDRA-7274) + * Copy compaction options to make sure they are reloaded (CASSANDRA-7290) + * Add option to do more aggressive tombstone compactions (CASSANDRA-6563) + * Don't try to compact already-compacting files in HHOM (CASSANDRA-7288) + * Add authentication support to shuffle (CASSANDRA-6484) + * Cqlsh counts non-empty lines for "Blank lines" warning (CASSANDRA-7325) + * Make StreamSession#closeSession() idempotent (CASSANDRA-7262) + * Fix infinite loop on exception while streaming (CASSANDRA-7330) + * Reference sstables before populating key cache (CASSANDRA-7234) + * Account for range tombstones in min/max column names (CASSANDRA-7235) + * Improve sub range repair validation (CASSANDRA-7317) + * Accept subtypes for function results, type casts (CASSANDRA-6766) + * Support DISTINCT for static columns and fix behaviour when DISTINC is + not use (CASSANDRA-7305). + * Refuse range queries with strict bounds on compact tables since they + are broken (CASSANDRA-7059) +Merged from 1.2: + * Expose global ColumnFamily metrics (CASSANDRA-7273) * cqlsh: Fix CompositeType columns in DESCRIBE TABLE output (CASSANDRA-7399) * Expose global ColumnFamily metrics (CASSANDRA-7273) * Handle possible integer overflow in FastByteArrayOutputStream (CASSANDRA-7373) http://git-wip-us.apache.org/repos/asf/cassandra/blob/a26ac36a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java ---------------------------------------------------------------------- diff --cc src/java/org/apache/cassandra/config/DatabaseDescriptor.java index d4c1f26,3079283..836edfd --- a/src/java/org/apache/cassandra/config/DatabaseDescriptor.java +++ b/src/java/org/apache/cassandra/config/DatabaseDescriptor.java @@@ -119,386 -117,424 +119,386 @@@ public class DatabaseDescripto } catch (Exception e) { - ClassLoader loader = DatabaseDescriptor.class.getClassLoader(); - url = loader.getResource(configUrl); - if (url == null) - throw new ConfigurationException("Cannot locate " + configUrl); + logger.error("Fatal error during configuration loading", e); + System.err.println(e.getMessage() + "\nFatal error during configuration loading; unable to start. See log for stacktrace."); + System.exit(1); } - - return url; } - static + @VisibleForTesting - static Config loadConfig() throws ConfigurationException ++ public static Config loadConfig() throws ConfigurationException { - if (Config.getLoadYaml()) - loadYaml(); - else - conf = new Config(); + String loaderClass = System.getProperty("cassandra.config.loader"); + ConfigurationLoader loader = loaderClass == null + ? new YamlConfigurationLoader() + : FBUtilities.<ConfigurationLoader>construct(loaderClass, "configuration loading"); + return loader.loadConfig(); } - static void loadYaml() + + private static void applyConfig(Config config) throws ConfigurationException { - try - { - URL url = getStorageConfigURL(); - logger.info("Loading settings from " + url); - InputStream input; - try - { - input = url.openStream(); - } - catch (IOException e) - { - // getStorageConfigURL should have ruled this out - throw new AssertionError(e); - } - org.yaml.snakeyaml.constructor.Constructor constructor = new org.yaml.snakeyaml.constructor.Constructor(Config.class); - TypeDescription seedDesc = new TypeDescription(SeedProviderDef.class); - seedDesc.putMapPropertyType("parameters", String.class, String.class); - constructor.addTypeDescription(seedDesc); - Yaml yaml = new Yaml(new Loader(constructor)); - conf = (Config)yaml.load(input); + conf = config; - logger.info("Data files directories: " + Arrays.toString(conf.data_file_directories)); - logger.info("Commit log directory: " + conf.commitlog_directory); + logger.info("Data files directories: " + Arrays.toString(conf.data_file_directories)); + logger.info("Commit log directory: " + conf.commitlog_directory); - if (conf.commitlog_sync == null) - { - throw new ConfigurationException("Missing required directive CommitLogSync"); - } + if (conf.commitlog_sync == null) + { + throw new ConfigurationException("Missing required directive CommitLogSync"); + } - if (conf.commitlog_sync == Config.CommitLogSync.batch) - { - if (conf.commitlog_sync_batch_window_in_ms == null) - { - throw new ConfigurationException("Missing value for commitlog_sync_batch_window_in_ms: Double expected."); - } - else if (conf.commitlog_sync_period_in_ms != null) - { - throw new ConfigurationException("Batch sync specified, but commitlog_sync_period_in_ms found. Only specify commitlog_sync_batch_window_in_ms when using batch sync"); - } - logger.debug("Syncing log with a batch window of " + conf.commitlog_sync_batch_window_in_ms); - } - else + if (conf.commitlog_sync == Config.CommitLogSync.batch) + { + if (conf.commitlog_sync_batch_window_in_ms == null) { - if (conf.commitlog_sync_period_in_ms == null) - { - throw new ConfigurationException("Missing value for commitlog_sync_period_in_ms: Integer expected"); - } - else if (conf.commitlog_sync_batch_window_in_ms != null) - { - throw new ConfigurationException("commitlog_sync_period_in_ms specified, but commitlog_sync_batch_window_in_ms found. Only specify commitlog_sync_period_in_ms when using periodic sync."); - } - logger.debug("Syncing log with a period of " + conf.commitlog_sync_period_in_ms); + throw new ConfigurationException("Missing value for commitlog_sync_batch_window_in_ms: Double expected."); } - - if (conf.commitlog_total_space_in_mb == null) - conf.commitlog_total_space_in_mb = System.getProperty("os.arch").contains("64") ? 1024 : 32; - - /* evaluate the DiskAccessMode Config directive, which also affects indexAccessMode selection */ - if (conf.disk_access_mode == Config.DiskAccessMode.auto) + else if (conf.commitlog_sync_period_in_ms != null) { - conf.disk_access_mode = System.getProperty("os.arch").contains("64") ? Config.DiskAccessMode.mmap : Config.DiskAccessMode.standard; - indexAccessMode = conf.disk_access_mode; - logger.info("DiskAccessMode 'auto' determined to be " + conf.disk_access_mode + ", indexAccessMode is " + indexAccessMode ); + throw new ConfigurationException("Batch sync specified, but commitlog_sync_period_in_ms found. Only specify commitlog_sync_batch_window_in_ms when using batch sync"); } - else if (conf.disk_access_mode == Config.DiskAccessMode.mmap_index_only) + logger.debug("Syncing log with a batch window of " + conf.commitlog_sync_batch_window_in_ms); + } + else + { + if (conf.commitlog_sync_period_in_ms == null) { - conf.disk_access_mode = Config.DiskAccessMode.standard; - indexAccessMode = Config.DiskAccessMode.mmap; - logger.info("DiskAccessMode is " + conf.disk_access_mode + ", indexAccessMode is " + indexAccessMode ); + throw new ConfigurationException("Missing value for commitlog_sync_period_in_ms: Integer expected"); } - else + else if (conf.commitlog_sync_batch_window_in_ms != null) { - indexAccessMode = conf.disk_access_mode; - logger.info("DiskAccessMode is " + conf.disk_access_mode + ", indexAccessMode is " + indexAccessMode ); + throw new ConfigurationException("commitlog_sync_period_in_ms specified, but commitlog_sync_batch_window_in_ms found. Only specify commitlog_sync_period_in_ms when using periodic sync."); } + logger.debug("Syncing log with a period of " + conf.commitlog_sync_period_in_ms); + } - logger.info("disk_failure_policy is " + conf.disk_failure_policy); + if (conf.commitlog_total_space_in_mb == null) + conf.commitlog_total_space_in_mb = hasLargeAddressSpace() ? 1024 : 32; - /* Authentication and authorization backend, implementing IAuthenticator and IAuthorizer */ - if (conf.authenticator != null) - authenticator = FBUtilities.newAuthenticator(conf.authenticator); + /* evaluate the DiskAccessMode Config directive, which also affects indexAccessMode selection */ + if (conf.disk_access_mode == Config.DiskAccessMode.auto) + { + conf.disk_access_mode = hasLargeAddressSpace() ? Config.DiskAccessMode.mmap : Config.DiskAccessMode.standard; + indexAccessMode = conf.disk_access_mode; + logger.info("DiskAccessMode 'auto' determined to be " + conf.disk_access_mode + ", indexAccessMode is " + indexAccessMode ); + } + else if (conf.disk_access_mode == Config.DiskAccessMode.mmap_index_only) + { + conf.disk_access_mode = Config.DiskAccessMode.standard; + indexAccessMode = Config.DiskAccessMode.mmap; + logger.info("DiskAccessMode is " + conf.disk_access_mode + ", indexAccessMode is " + indexAccessMode ); + } + else + { + indexAccessMode = conf.disk_access_mode; + logger.info("DiskAccessMode is " + conf.disk_access_mode + ", indexAccessMode is " + indexAccessMode ); + } - if (conf.authority != null) - { - logger.warn("Please rename 'authority' to 'authorizer' in cassandra.yaml"); - if (!conf.authority.equals("org.apache.cassandra.auth.AllowAllAuthority")) - throw new ConfigurationException("IAuthority interface has been deprecated," - + " please implement IAuthorizer instead."); - } + logger.info("disk_failure_policy is " + conf.disk_failure_policy); + logger.info("commit_failure_policy is " + conf.commit_failure_policy); - if (conf.authorizer != null) - authorizer = FBUtilities.newAuthorizer(conf.authorizer); + /* Authentication and authorization backend, implementing IAuthenticator and IAuthorizer */ + if (conf.authenticator != null) + authenticator = FBUtilities.newAuthenticator(conf.authenticator); - if (authenticator instanceof AllowAllAuthenticator && !(authorizer instanceof AllowAllAuthorizer)) - throw new ConfigurationException("AllowAllAuthenticator can't be used with " + conf.authorizer); + if (conf.authorizer != null) + authorizer = FBUtilities.newAuthorizer(conf.authorizer); - if (conf.internode_authenticator != null) - internodeAuthenticator = FBUtilities.construct(conf.internode_authenticator, "internode_authenticator"); - else - internodeAuthenticator = new AllowAllInternodeAuthenticator(); + if (authenticator instanceof AllowAllAuthenticator && !(authorizer instanceof AllowAllAuthorizer)) + throw new ConfigurationException("AllowAllAuthenticator can't be used with " + conf.authorizer); - authenticator.validateConfiguration(); - authorizer.validateConfiguration(); - internodeAuthenticator.validateConfiguration(); + if (conf.internode_authenticator != null) + internodeAuthenticator = FBUtilities.construct(conf.internode_authenticator, "internode_authenticator"); + else + internodeAuthenticator = new AllowAllInternodeAuthenticator(); - /* Hashing strategy */ - if (conf.partitioner == null) - { - throw new ConfigurationException("Missing directive: partitioner"); - } + authenticator.validateConfiguration(); + authorizer.validateConfiguration(); + internodeAuthenticator.validateConfiguration(); - try - { - partitioner = FBUtilities.newPartitioner(System.getProperty("cassandra.partitioner", conf.partitioner)); - } - catch (Exception e) - { - throw new ConfigurationException("Invalid partitioner class " + conf.partitioner); - } - paritionerName = partitioner.getClass().getCanonicalName(); + /* Hashing strategy */ + if (conf.partitioner == null) + { + throw new ConfigurationException("Missing directive: partitioner"); + } + try + { + partitioner = FBUtilities.newPartitioner(System.getProperty("cassandra.partitioner", conf.partitioner)); + } + catch (Exception e) + { + throw new ConfigurationException("Invalid partitioner class " + conf.partitioner); + } + paritionerName = partitioner.getClass().getCanonicalName(); - if (conf.max_hint_window_in_ms == null) - { - throw new ConfigurationException("max_hint_window_in_ms cannot be set to null"); - } + if (conf.max_hint_window_in_ms == null) + { + throw new ConfigurationException("max_hint_window_in_ms cannot be set to null"); + } - /* phi convict threshold for FailureDetector */ - if (conf.phi_convict_threshold < 5 || conf.phi_convict_threshold > 16) - { - throw new ConfigurationException("phi_convict_threshold must be between 5 and 16"); - } + /* phi convict threshold for FailureDetector */ + if (conf.phi_convict_threshold < 5 || conf.phi_convict_threshold > 16) + { + throw new ConfigurationException("phi_convict_threshold must be between 5 and 16"); + } - /* Thread per pool */ - if (conf.concurrent_reads != null && conf.concurrent_reads < 2) - { - throw new ConfigurationException("concurrent_reads must be at least 2"); - } + /* Thread per pool */ + if (conf.concurrent_reads != null && conf.concurrent_reads < 2) + { + throw new ConfigurationException("concurrent_reads must be at least 2"); + } - if (conf.concurrent_writes != null && conf.concurrent_writes < 2) - { - throw new ConfigurationException("concurrent_writes must be at least 2"); - } + if (conf.concurrent_writes != null && conf.concurrent_writes < 2) + { + throw new ConfigurationException("concurrent_writes must be at least 2"); + } - if (conf.concurrent_replicates != null && conf.concurrent_replicates < 2) - { - throw new ConfigurationException("concurrent_replicates must be at least 2"); - } + if (conf.concurrent_replicates != null && conf.concurrent_replicates < 2) + { + throw new ConfigurationException("concurrent_replicates must be at least 2"); + } + + if (conf.file_cache_size_in_mb == null) + conf.file_cache_size_in_mb = Math.min(512, (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576))); - if (conf.memtable_total_space_in_mb == null) - conf.memtable_total_space_in_mb = (int) (Runtime.getRuntime().maxMemory() / (3 * 1048576)); - if (conf.memtable_total_space_in_mb <= 0) - throw new ConfigurationException("memtable_total_space_in_mb must be positive"); - logger.info("Global memtable threshold is enabled at {}MB", conf.memtable_total_space_in_mb); + if (conf.memtable_total_space_in_mb == null) + conf.memtable_total_space_in_mb = (int) (Runtime.getRuntime().maxMemory() / (4 * 1048576)); + if (conf.memtable_total_space_in_mb <= 0) + throw new ConfigurationException("memtable_total_space_in_mb must be positive"); + logger.info("Global memtable threshold is enabled at {}MB", conf.memtable_total_space_in_mb); - /* Memtable flush writer threads */ - if (conf.memtable_flush_writers != null && conf.memtable_flush_writers < 1) + /* Memtable flush writer threads */ + if (conf.memtable_flush_writers != null && conf.memtable_flush_writers < 1) + { + throw new ConfigurationException("memtable_flush_writers must be at least 1"); + } + else if (conf.memtable_flush_writers == null) + { + conf.memtable_flush_writers = conf.data_file_directories.length; + } + + /* Local IP or hostname to bind services to */ + if (conf.listen_address != null) + { + if (conf.listen_address.equals("0.0.0.0")) + throw new ConfigurationException("listen_address cannot be 0.0.0.0!"); + try { - throw new ConfigurationException("memtable_flush_writers must be at least 1"); + listenAddress = InetAddress.getByName(conf.listen_address); } - else if (conf.memtable_flush_writers == null) + catch (UnknownHostException e) { - conf.memtable_flush_writers = conf.data_file_directories.length; + throw new ConfigurationException("Unknown listen_address '" + conf.listen_address + "'"); } + } - /* Local IP or hostname to bind services to */ - if (conf.listen_address != null) + /* Gossip Address to broadcast */ + if (conf.broadcast_address != null) + { + if (conf.broadcast_address.equals("0.0.0.0")) { - if (conf.listen_address.equals("0.0.0.0")) - throw new ConfigurationException("listen_address cannot be 0.0.0.0!"); - - try - { - listenAddress = InetAddress.getByName(conf.listen_address); - } - catch (UnknownHostException e) - { - throw new ConfigurationException("Unknown listen_address '" + conf.listen_address + "'"); - } + throw new ConfigurationException("broadcast_address cannot be 0.0.0.0!"); } - /* Gossip Address to broadcast */ - if (conf.broadcast_address != null) + try { - if (conf.broadcast_address.equals("0.0.0.0")) - { - throw new ConfigurationException("broadcast_address cannot be 0.0.0.0!"); - } - - try - { - broadcastAddress = InetAddress.getByName(conf.broadcast_address); - } - catch (UnknownHostException e) - { - throw new ConfigurationException("Unknown broadcast_address '" + conf.broadcast_address + "'"); - } + broadcastAddress = InetAddress.getByName(conf.broadcast_address); + } + catch (UnknownHostException e) + { + throw new ConfigurationException("Unknown broadcast_address '" + conf.broadcast_address + "'"); } + } - /* Local IP or hostname to bind RPC server to */ - if (conf.rpc_address != null) + /* Local IP or hostname to bind RPC server to */ + if (conf.rpc_address != null) + { + try { - try - { - rpcAddress = InetAddress.getByName(conf.rpc_address); - } - catch (UnknownHostException e) - { - throw new ConfigurationException("Unknown host in rpc_address " + conf.rpc_address); - } + rpcAddress = InetAddress.getByName(conf.rpc_address); } - else + catch (UnknownHostException e) { - rpcAddress = FBUtilities.getLocalAddress(); + throw new ConfigurationException("Unknown host in rpc_address " + conf.rpc_address); } + } + else + { + rpcAddress = FBUtilities.getLocalAddress(); + } - if (conf.thrift_framed_transport_size_in_mb <= 0) - throw new ConfigurationException("thrift_framed_transport_size_in_mb must be positive"); + if (conf.thrift_framed_transport_size_in_mb <= 0) + throw new ConfigurationException("thrift_framed_transport_size_in_mb must be positive"); - /* end point snitch */ - if (conf.endpoint_snitch == null) - { - throw new ConfigurationException("Missing endpoint_snitch directive"); - } - snitch = createEndpointSnitch(conf.endpoint_snitch); - EndpointSnitchInfo.create(); + if (conf.native_transport_max_frame_size_in_mb <= 0) + throw new ConfigurationException("native_transport_max_frame_size_in_mb must be positive"); - localDC = snitch.getDatacenter(FBUtilities.getBroadcastAddress()); - localComparator = new Comparator<InetAddress>() + /* end point snitch */ + if (conf.endpoint_snitch == null) + { + throw new ConfigurationException("Missing endpoint_snitch directive"); + } + snitch = createEndpointSnitch(conf.endpoint_snitch); + EndpointSnitchInfo.create(); + + localDC = snitch.getDatacenter(FBUtilities.getBroadcastAddress()); + localComparator = new Comparator<InetAddress>() + { + public int compare(InetAddress endpoint1, InetAddress endpoint2) { - public int compare(InetAddress endpoint1, InetAddress endpoint2) - { - boolean local1 = localDC.equals(snitch.getDatacenter(endpoint1)); - boolean local2 = localDC.equals(snitch.getDatacenter(endpoint2)); - if (local1 && !local2) - return -1; - if (local2 && !local1) - return 1; - return 0; - } - }; + boolean local1 = localDC.equals(snitch.getDatacenter(endpoint1)); + boolean local2 = localDC.equals(snitch.getDatacenter(endpoint2)); + if (local1 && !local2) + return -1; + if (local2 && !local1) + return 1; + return 0; + } + }; - /* Request Scheduler setup */ - requestSchedulerOptions = conf.request_scheduler_options; - if (conf.request_scheduler != null) + /* Request Scheduler setup */ + requestSchedulerOptions = conf.request_scheduler_options; + if (conf.request_scheduler != null) + { + try { - try + if (requestSchedulerOptions == null) { - if (requestSchedulerOptions == null) - { - requestSchedulerOptions = new RequestSchedulerOptions(); - } - Class<?> cls = Class.forName(conf.request_scheduler); - requestScheduler = (IRequestScheduler) cls.getConstructor(RequestSchedulerOptions.class).newInstance(requestSchedulerOptions); - } - catch (ClassNotFoundException e) - { - throw new ConfigurationException("Invalid Request Scheduler class " + conf.request_scheduler); - } - catch (Exception e) - { - throw new ConfigurationException("Unable to instantiate request scheduler", e); + requestSchedulerOptions = new RequestSchedulerOptions(); } + Class<?> cls = Class.forName(conf.request_scheduler); + requestScheduler = (IRequestScheduler) cls.getConstructor(RequestSchedulerOptions.class).newInstance(requestSchedulerOptions); } - else + catch (ClassNotFoundException e) { - requestScheduler = new NoScheduler(); + throw new ConfigurationException("Invalid Request Scheduler class " + conf.request_scheduler); } - - if (conf.request_scheduler_id == RequestSchedulerId.keyspace) - { - requestSchedulerId = conf.request_scheduler_id; - } - else + catch (Exception e) { - // Default to Keyspace - requestSchedulerId = RequestSchedulerId.keyspace; + throw new ConfigurationException("Unable to instantiate request scheduler", e); } + } + else + { + requestScheduler = new NoScheduler(); + } - if (logger.isDebugEnabled() && conf.auto_bootstrap != null) - { - logger.debug("setting auto_bootstrap to " + conf.auto_bootstrap); - } + if (conf.request_scheduler_id == RequestSchedulerId.keyspace) + { + requestSchedulerId = conf.request_scheduler_id; + } + else + { + // Default to Keyspace + requestSchedulerId = RequestSchedulerId.keyspace; + } - logger.info((conf.multithreaded_compaction ? "" : "Not ") + "using multi-threaded compaction"); + if (logger.isDebugEnabled() && conf.auto_bootstrap != null) + { + logger.debug("setting auto_bootstrap to " + conf.auto_bootstrap); + } - if (conf.in_memory_compaction_limit_in_mb != null && conf.in_memory_compaction_limit_in_mb <= 0) - { - throw new ConfigurationException("in_memory_compaction_limit_in_mb must be a positive integer"); - } + logger.info((conf.multithreaded_compaction ? "" : "Not ") + "using multi-threaded compaction"); - if (conf.concurrent_compactors == null) - conf.concurrent_compactors = FBUtilities.getAvailableProcessors(); + if (conf.in_memory_compaction_limit_in_mb != null && conf.in_memory_compaction_limit_in_mb <= 0) + { + throw new ConfigurationException("in_memory_compaction_limit_in_mb must be a positive integer"); + } - if (conf.concurrent_compactors <= 0) - throw new ConfigurationException("concurrent_compactors should be strictly greater than 0"); + if (conf.concurrent_compactors == null) + conf.concurrent_compactors = FBUtilities.getAvailableProcessors(); - /* data file and commit log directories. they get created later, when they're needed. */ - if (conf.commitlog_directory != null && conf.data_file_directories != null && conf.saved_caches_directory != null) - { - for (String datadir : conf.data_file_directories) - { - if (datadir.equals(conf.commitlog_directory)) - throw new ConfigurationException("commitlog_directory must not be the same as any data_file_directories"); - if (datadir.equals(conf.saved_caches_directory)) - throw new ConfigurationException("saved_caches_directory must not be the same as any data_file_directories"); - } + if (conf.concurrent_compactors <= 0) + throw new ConfigurationException("concurrent_compactors should be strictly greater than 0"); - if (conf.commitlog_directory.equals(conf.saved_caches_directory)) - throw new ConfigurationException("saved_caches_directory must not be the same as the commitlog_directory"); - } - else + /* data file and commit log directories. they get created later, when they're needed. */ + if (conf.commitlog_directory != null && conf.data_file_directories != null && conf.saved_caches_directory != null) + { + for (String datadir : conf.data_file_directories) { - if (conf.commitlog_directory == null) - throw new ConfigurationException("commitlog_directory missing"); - if (conf.data_file_directories == null) - throw new ConfigurationException("data_file_directories missing; at least one data directory must be specified"); - if (conf.saved_caches_directory == null) - throw new ConfigurationException("saved_caches_directory missing"); + if (datadir.equals(conf.commitlog_directory)) + throw new ConfigurationException("commitlog_directory must not be the same as any data_file_directories"); + if (datadir.equals(conf.saved_caches_directory)) + throw new ConfigurationException("saved_caches_directory must not be the same as any data_file_directories"); } - if (conf.initial_token != null) - for (String token : tokensFromString(conf.initial_token)) - partitioner.getTokenFactory().validate(token); + if (conf.commitlog_directory.equals(conf.saved_caches_directory)) + throw new ConfigurationException("saved_caches_directory must not be the same as the commitlog_directory"); + } + else + { + if (conf.commitlog_directory == null) + throw new ConfigurationException("commitlog_directory missing"); + if (conf.data_file_directories == null) + throw new ConfigurationException("data_file_directories missing; at least one data directory must be specified"); + if (conf.saved_caches_directory == null) + throw new ConfigurationException("saved_caches_directory missing"); + } - if (conf.num_tokens > MAX_NUM_TOKENS) - throw new ConfigurationException(String.format("A maximum number of %d tokens per node is supported", MAX_NUM_TOKENS)); + if (conf.initial_token != null) + for (String token : tokensFromString(conf.initial_token)) + partitioner.getTokenFactory().validate(token); - try - { - // if key_cache_size_in_mb option was set to "auto" then size of the cache should be "min(5% of Heap (in MB), 100MB) - keyCacheSizeInMB = (conf.key_cache_size_in_mb == null) - ? Math.min(Math.max(1, (int) (Runtime.getRuntime().totalMemory() * 0.05 / 1024 / 1024)), 100) - : conf.key_cache_size_in_mb; + if (conf.num_tokens == null) + conf.num_tokens = 1; + else if (conf.num_tokens > MAX_NUM_TOKENS) + throw new ConfigurationException(String.format("A maximum number of %d tokens per node is supported", MAX_NUM_TOKENS)); - if (keyCacheSizeInMB < 0) - throw new NumberFormatException(); // to escape duplicating error message - } - catch (NumberFormatException e) - { - throw new ConfigurationException("key_cache_size_in_mb option was set incorrectly to '" - + conf.key_cache_size_in_mb + "', supported values are <integer> >= 0."); - } + try + { + // if key_cache_size_in_mb option was set to "auto" then size of the cache should be "min(5% of Heap (in MB), 100MB) + keyCacheSizeInMB = (conf.key_cache_size_in_mb == null) + ? Math.min(Math.max(1, (int) (Runtime.getRuntime().totalMemory() * 0.05 / 1024 / 1024)), 100) + : conf.key_cache_size_in_mb; + + if (keyCacheSizeInMB < 0) + throw new NumberFormatException(); // to escape duplicating error message + } + catch (NumberFormatException e) + { + throw new ConfigurationException("key_cache_size_in_mb option was set incorrectly to '" + + conf.key_cache_size_in_mb + "', supported values are <integer> >= 0."); + } - rowCacheProvider = FBUtilities.newCacheProvider(conf.row_cache_provider); + memoryAllocator = FBUtilities.newOffHeapAllocator(conf.memory_allocator); - if(conf.encryption_options != null) - { - logger.warn("Please rename encryption_options as server_encryption_options in the yaml"); - //operate under the assumption that server_encryption_options is not set in yaml rather than both - conf.server_encryption_options = conf.encryption_options; - } + if(conf.encryption_options != null) + { + logger.warn("Please rename encryption_options as server_encryption_options in the yaml"); + //operate under the assumption that server_encryption_options is not set in yaml rather than both + conf.server_encryption_options = conf.encryption_options; + } - String allocatorClass = conf.memtable_allocator; - if (!allocatorClass.contains(".")) - allocatorClass = "org.apache.cassandra.utils." + allocatorClass; - memtableAllocator = FBUtilities.classForName(allocatorClass, "allocator"); + String allocatorClass = conf.memtable_allocator; + if (!allocatorClass.contains(".")) + allocatorClass = "org.apache.cassandra.utils." + allocatorClass; + memtableAllocator = FBUtilities.classForName(allocatorClass, "allocator"); - // Hardcoded system tables - List<KSMetaData> systemKeyspaces = Arrays.asList(KSMetaData.systemKeyspace(), KSMetaData.traceKeyspace()); - assert systemKeyspaces.size() == Schema.systemKeyspaceNames.size(); - for (KSMetaData ksmd : systemKeyspaces) - { - // install the definition - for (CFMetaData cfm : ksmd.cfMetaData().values()) - Schema.instance.load(cfm); - Schema.instance.setTableDefinition(ksmd); - } + // Hardcoded system keyspaces + List<KSMetaData> systemKeyspaces = Arrays.asList(KSMetaData.systemKeyspace()); + assert systemKeyspaces.size() == Schema.systemKeyspaceNames.size(); + for (KSMetaData ksmd : systemKeyspaces) + Schema.instance.load(ksmd); - /* Load the seeds for node contact points */ - if (conf.seed_provider == null) - { - throw new ConfigurationException("seeds configuration is missing; a minimum of one seed is required."); - } - try - { - Class<?> seedProviderClass = Class.forName(conf.seed_provider.class_name); - seedProvider = (SeedProvider)seedProviderClass.getConstructor(Map.class).newInstance(conf.seed_provider.parameters); - } - // there are about 5 checked exceptions that could be thrown here. - catch (Exception e) - { - logger.error("Fatal configuration error", e); - System.err.println(e.getMessage() + "\nFatal configuration error; unable to start server. See log for stacktrace."); - System.exit(1); - } - if (seedProvider.getSeeds().size() == 0) - throw new ConfigurationException("The seed provider lists no seeds."); + /* Load the seeds for node contact points */ + if (conf.seed_provider == null) + { + throw new ConfigurationException("seeds configuration is missing; a minimum of one seed is required."); } - catch (ConfigurationException e) + try { - logger.error("Fatal configuration error", e); - System.err.println(e.getMessage() + "\nFatal configuration error; unable to start server. See log for stacktrace."); - System.exit(1); + Class<?> seedProviderClass = Class.forName(conf.seed_provider.class_name); + seedProvider = (SeedProvider)seedProviderClass.getConstructor(Map.class).newInstance(conf.seed_provider.parameters); } - catch (YAMLException e) + // there are about 5 checked exceptions that could be thrown here. + catch (Exception e) { - logger.error("Fatal configuration error error", e); - System.err.println(e.getMessage() + "\nInvalid yaml; unable to start server. See log for stacktrace."); + logger.error("Fatal configuration error", e); + System.err.println(e.getMessage() + "\nFatal configuration error; unable to start server. See log for stacktrace."); System.exit(1); } + if (seedProvider.getSeeds().size() == 0) + throw new ConfigurationException("The seed provider lists no seeds."); } private static IEndpointSnitch createEndpointSnitch(String snitchClassName) throws ConfigurationException http://git-wip-us.apache.org/repos/asf/cassandra/blob/a26ac36a/src/java/org/apache/cassandra/locator/SimpleSeedProvider.java ---------------------------------------------------------------------- diff --cc src/java/org/apache/cassandra/locator/SimpleSeedProvider.java index a3031fa,9f491f3..a9ca15b --- a/src/java/org/apache/cassandra/locator/SimpleSeedProvider.java +++ b/src/java/org/apache/cassandra/locator/SimpleSeedProvider.java @@@ -31,12 -39,28 +39,21 @@@ public class SimpleSeedProvider impleme { private static final Logger logger = LoggerFactory.getLogger(SimpleSeedProvider.class); - private final List<InetAddress> seeds; + public SimpleSeedProvider(Map<String, String> args) {} - public SimpleSeedProvider(Map<String, String> args) + public List<InetAddress> getSeeds() { - String[] hosts = args.get("seeds").split(",", -1); - seeds = new ArrayList<InetAddress>(hosts.length); - InputStream input; ++ Config conf; + try + { - URL url = DatabaseDescriptor.getStorageConfigURL(); - input = url.openStream(); ++ conf = DatabaseDescriptor.loadConfig(); + } + catch (Exception e) + { + throw new AssertionError(e); + } - org.yaml.snakeyaml.constructor.Constructor constructor = new org.yaml.snakeyaml.constructor.Constructor(Config.class); - TypeDescription seedDesc = new TypeDescription(SeedProviderDef.class); - seedDesc.putMapPropertyType("parameters", String.class, String.class); - constructor.addTypeDescription(seedDesc); - Yaml yaml = new Yaml(new Loader(constructor)); - Config conf = (Config)yaml.load(input); + String[] hosts = conf.seed_provider.parameters.get("seeds").split(",", -1); + List<InetAddress> seeds = new ArrayList<InetAddress>(hosts.length); for (String host : hosts) { try