Merge branch 'cassandra-2.1' into cassandra-2.2
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/da4b9716 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/da4b9716 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/da4b9716 Branch: refs/heads/trunk Commit: da4b97164a89d57990ef19b553cb4a25937efe4a Parents: 3851670 882adf0 Author: Sylvain Lebresne <sylv...@datastax.com> Authored: Fri Nov 20 14:00:35 2015 +0100 Committer: Sylvain Lebresne <sylv...@datastax.com> Committed: Fri Nov 20 14:00:35 2015 +0100 ---------------------------------------------------------------------- CHANGES.txt | 2 ++ .../hadoop/ColumnFamilyRecordReader.java | 35 +++++++++++--------- 2 files changed, 21 insertions(+), 16 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/da4b9716/CHANGES.txt ---------------------------------------------------------------------- diff --cc CHANGES.txt index 867226f,9e2869e..17c5047 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,17 -1,6 +1,19 @@@ -2.1.12 +2.2.4 + * Don't do anticompaction after subrange repair (CASSANDRA-10422) + * Fix SimpleDateType type compatibility (CASSANDRA-10027) + * (Hadoop) fix splits calculation (CASSANDRA-10640) + * (Hadoop) ensure that Cluster instances are always closed (CASSANDRA-10058) + * (cqlsh) show partial trace if incomplete after max_trace_wait (CASSANDRA-7645) + * Use most up-to-date version of schema for system tables (CASSANDRA-10652) + * Deprecate memory_allocator in cassandra.yaml (CASSANDRA-10581,10628) + * Expose phi values from failure detector via JMX and tweak debug + and trace logging (CASSANDRA-9526) + * Fix RangeNamesQueryPager (CASSANDRA-10509) + * Deprecate Pig support (CASSANDRA-10542) + * Reduce contention getting instances of CompositeType (CASSANDRA-10433) +Merged from 2.1: + * Try next replica if not possible to connect to primary replica on + ColumnFamilyRecordReader (CASSANDRA-2388) * Limit window size in DTCS (CASSANDRA-10280) * sstableloader does not use MAX_HEAP_SIZE env parameter (CASSANDRA-10188) * (cqlsh) Improve COPY TO performance and error handling (CASSANDRA-9304) http://git-wip-us.apache.org/repos/asf/cassandra/blob/da4b9716/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java ---------------------------------------------------------------------- diff --cc src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java index 97dc497,dc44a43..9d1d10c --- a/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java +++ b/src/java/org/apache/cassandra/hadoop/ColumnFamilyRecordReader.java @@@ -152,25 -151,28 +152,28 @@@ public class ColumnFamilyRecordReader e if (batchSize < 2) throw new IllegalArgumentException("Minimum batchSize is 2. Suggested batchSize is 100 or more"); - try - { - if (client != null) - return; - - // create connection using thrift - String location = getLocation(); - - int port = ConfigHelper.getInputRpcPort(conf); - client = ColumnFamilyInputFormat.createAuthenticatedClient(location, port, conf); + String[] locations = getLocations(); + int port = ConfigHelper.getInputRpcPort(conf); - } - catch (Exception e) + Exception lastException = null; + for (String location : locations) { - throw new RuntimeException(e); + try + { + client = ColumnFamilyInputFormat.createAuthenticatedClient(location, port, conf); + break; + } + catch (Exception e) + { + lastException = e; + logger.warn("Failed to create authenticated client to {}:{}", location , port); + } } + if (client == null && lastException != null) + throw new RuntimeException(lastException); iter = widerows ? new WideRowIterator() : new StaticRowIterator(); - logger.debug("created {}", iter); + logger.trace("created {}", iter); } public boolean nextKeyValue() throws IOException @@@ -210,10 -212,10 +213,10 @@@ } } } - return split.getLocations()[0]; + return split.getLocations(); } - private abstract class RowIterator extends AbstractIterator<Pair<ByteBuffer, SortedMap<ByteBuffer, Cell>>> + private abstract class RowIterator extends AbstractIterator<Pair<ByteBuffer, SortedMap<ByteBuffer, Column>>> { protected List<KeySlice> rows; protected int totalRead = 0;