[2/2] git commit: Merge branch 'cassandra-2.0' into trunk
Merge branch 'cassandra-2.0' into trunk Conflicts: src/java/org/apache/cassandra/db/ColumnFamily.java src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/98e6b08c Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/98e6b08c Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/98e6b08c Branch: refs/heads/trunk Commit: 98e6b08c71871666c5d7a206aedaa416ef1cf765 Parents: 7ce5e06 9fb44ee Author: Marcus Eriksson Authored: Thu Feb 6 08:57:46 2014 +0100 Committer: Marcus Eriksson Committed: Thu Feb 6 08:57:46 2014 +0100 -- .../org/apache/cassandra/db/ColumnFamily.java | 8 +++ .../db/compaction/LazilyCompactedRow.java | 11 .../cassandra/io/sstable/SSTableWriter.java | 10 .../cassandra/tools/SSTableMetadataViewer.java | 60 4 files changed, 66 insertions(+), 23 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/98e6b08c/src/java/org/apache/cassandra/db/ColumnFamily.java -- diff --cc src/java/org/apache/cassandra/db/ColumnFamily.java index 2df3fbf,ec6a395..00f0e35 --- a/src/java/org/apache/cassandra/db/ColumnFamily.java +++ b/src/java/org/apache/cassandra/db/ColumnFamily.java @@@ -423,16 -418,26 +423,24 @@@ public abstract class ColumnFamily impl int maxLocalDeletionTime = Integer.MIN_VALUE; List minColumnNamesSeen = Collections.emptyList(); List maxColumnNamesSeen = Collections.emptyList(); - -if (deletionInfo().getTopLevelDeletion().localDeletionTime < Integer.MAX_VALUE) - tombstones.update(deletionInfo().getTopLevelDeletion().localDeletionTime); -Iterator it = deletionInfo().rangeIterator(); -while (it.hasNext()) +for (Cell cell : this) { -RangeTombstone rangeTombstone = it.next(); -tombstones.update(rangeTombstone.getLocalDeletionTime()); -} - -for (Column column : this) -{ -minTimestampSeen = Math.min(minTimestampSeen, column.minTimestamp()); -maxTimestampSeen = Math.max(maxTimestampSeen, column.maxTimestamp()); -maxLocalDeletionTime = Math.max(maxLocalDeletionTime, column.getLocalDeletionTime()); -int deletionTime = column.getLocalDeletionTime(); ++if (deletionInfo().getTopLevelDeletion().localDeletionTime < Integer.MAX_VALUE) ++ tombstones.update(deletionInfo().getTopLevelDeletion().localDeletionTime); ++Iterator it = deletionInfo().rangeIterator(); ++while (it.hasNext()) ++{ ++RangeTombstone rangeTombstone = it.next(); ++tombstones.update(rangeTombstone.getLocalDeletionTime()); ++} +minTimestampSeen = Math.min(minTimestampSeen, cell.minTimestamp()); +maxTimestampSeen = Math.max(maxTimestampSeen, cell.maxTimestamp()); +maxLocalDeletionTime = Math.max(maxLocalDeletionTime, cell.getLocalDeletionTime()); +int deletionTime = cell.getLocalDeletionTime(); if (deletionTime < Integer.MAX_VALUE) tombstones.update(deletionTime); -minColumnNamesSeen = ColumnNameHelper.minComponents(minColumnNamesSeen, column.name, metadata.comparator); -maxColumnNamesSeen = ColumnNameHelper.maxComponents(maxColumnNamesSeen, column.name, metadata.comparator); +minColumnNamesSeen = ColumnNameHelper.minComponents(minColumnNamesSeen, cell.name, metadata.comparator); +maxColumnNamesSeen = ColumnNameHelper.maxComponents(maxColumnNamesSeen, cell.name, metadata.comparator); } return new ColumnStats(getColumnCount(), minTimestampSeen, maxTimestampSeen, maxLocalDeletionTime, tombstones, minColumnNamesSeen, maxColumnNamesSeen); } http://git-wip-us.apache.org/repos/asf/cassandra/blob/98e6b08c/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java -- diff --cc src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java index 02901a3,e10fb2c..89181e6 --- a/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java +++ b/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java @@@ -261,17 -265,25 +261,28 @@@ public class LazilyCompactedRow extend container.clear(); return null; } -Column reduced = purged.iterator().next(); -container.clear(); + -// PrecompactedRow.removeDeleted has only checked the top-level CF delet
[1/2] git commit: Account for range and row tombstones in tombstone drop time histogram.
Updated Branches: refs/heads/trunk 7ce5e062e -> 98e6b08c7 Account for range and row tombstones in tombstone drop time histogram. Patch by marcuse, reviewed by jbellis for CASSANDRA-6522 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/9fb44ee5 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/9fb44ee5 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/9fb44ee5 Branch: refs/heads/trunk Commit: 9fb44ee55bd14043f307cc320ecc277010a42953 Parents: e59ef16 Author: Marcus Eriksson Authored: Thu Feb 6 08:13:24 2014 +0100 Committer: Marcus Eriksson Committed: Thu Feb 6 08:52:07 2014 +0100 -- .../org/apache/cassandra/db/ColumnFamily.java | 10 ++ .../db/compaction/LazilyCompactedRow.java | 10 +- .../cassandra/io/sstable/SSTableWriter.java | 10 ++ .../cassandra/tools/SSTableMetadataViewer.java | 36 ++-- 4 files changed, 54 insertions(+), 12 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/9fb44ee5/src/java/org/apache/cassandra/db/ColumnFamily.java -- diff --git a/src/java/org/apache/cassandra/db/ColumnFamily.java b/src/java/org/apache/cassandra/db/ColumnFamily.java index 2c00071..ec6a395 100644 --- a/src/java/org/apache/cassandra/db/ColumnFamily.java +++ b/src/java/org/apache/cassandra/db/ColumnFamily.java @@ -418,6 +418,16 @@ public abstract class ColumnFamily implements Iterable, IRowCacheEntry int maxLocalDeletionTime = Integer.MIN_VALUE; List minColumnNamesSeen = Collections.emptyList(); List maxColumnNamesSeen = Collections.emptyList(); + +if (deletionInfo().getTopLevelDeletion().localDeletionTime < Integer.MAX_VALUE) + tombstones.update(deletionInfo().getTopLevelDeletion().localDeletionTime); +Iterator it = deletionInfo().rangeIterator(); +while (it.hasNext()) +{ +RangeTombstone rangeTombstone = it.next(); +tombstones.update(rangeTombstone.getLocalDeletionTime()); +} + for (Column column : this) { minTimestampSeen = Math.min(minTimestampSeen, column.minTimestamp()); http://git-wip-us.apache.org/repos/asf/cassandra/blob/9fb44ee5/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java -- diff --git a/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java b/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java index 998f8cc..e10fb2c 100644 --- a/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java +++ b/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java @@ -275,7 +275,15 @@ public class LazilyCompactedRow extends AbstractCompactedRow implements Iterable indexer.remove(reduced); return null; } - +int localDeletionTime = purged.deletionInfo().getTopLevelDeletion().localDeletionTime; +if (localDeletionTime < Integer.MAX_VALUE) +tombstones.update(localDeletionTime); +Iterator rangeTombstoneIterator = purged.deletionInfo().rangeIterator(); +while (rangeTombstoneIterator.hasNext()) +{ +RangeTombstone rangeTombstone = rangeTombstoneIterator.next(); +tombstones.update(rangeTombstone.getLocalDeletionTime()); +} columns++; minTimestampSeen = Math.min(minTimestampSeen, reduced.minTimestamp()); maxTimestampSeen = Math.max(maxTimestampSeen, reduced.maxTimestamp()); http://git-wip-us.apache.org/repos/asf/cassandra/blob/9fb44ee5/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java -- diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java index 81b3c27..6528ced 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java @@ -235,6 +235,16 @@ public class SSTableWriter extends SSTable if (version.hasRowSizeAndColumnCount) columnCount = in.readInt(); +if (cf.deletionInfo().getTopLevelDeletion().localDeletionTime < Integer.MAX_VALUE) + tombstones.update(cf.deletionInfo().getTopLevelDeletion().localDeletionTime); + +Iterator rangeTombstoneIterator = cf.deletionInfo().rangeIterator(); +while (rangeTombstoneIterator.hasNext()) +{ +RangeTombstone rangeTombstone = rangeTombstoneIterator.next(); +tombst
git commit: Account for range and row tombstones in tombstone drop time histogram.
Updated Branches: refs/heads/cassandra-2.0 e59ef16bf -> 9fb44ee55 Account for range and row tombstones in tombstone drop time histogram. Patch by marcuse, reviewed by jbellis for CASSANDRA-6522 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/9fb44ee5 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/9fb44ee5 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/9fb44ee5 Branch: refs/heads/cassandra-2.0 Commit: 9fb44ee55bd14043f307cc320ecc277010a42953 Parents: e59ef16 Author: Marcus Eriksson Authored: Thu Feb 6 08:13:24 2014 +0100 Committer: Marcus Eriksson Committed: Thu Feb 6 08:52:07 2014 +0100 -- .../org/apache/cassandra/db/ColumnFamily.java | 10 ++ .../db/compaction/LazilyCompactedRow.java | 10 +- .../cassandra/io/sstable/SSTableWriter.java | 10 ++ .../cassandra/tools/SSTableMetadataViewer.java | 36 ++-- 4 files changed, 54 insertions(+), 12 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/9fb44ee5/src/java/org/apache/cassandra/db/ColumnFamily.java -- diff --git a/src/java/org/apache/cassandra/db/ColumnFamily.java b/src/java/org/apache/cassandra/db/ColumnFamily.java index 2c00071..ec6a395 100644 --- a/src/java/org/apache/cassandra/db/ColumnFamily.java +++ b/src/java/org/apache/cassandra/db/ColumnFamily.java @@ -418,6 +418,16 @@ public abstract class ColumnFamily implements Iterable, IRowCacheEntry int maxLocalDeletionTime = Integer.MIN_VALUE; List minColumnNamesSeen = Collections.emptyList(); List maxColumnNamesSeen = Collections.emptyList(); + +if (deletionInfo().getTopLevelDeletion().localDeletionTime < Integer.MAX_VALUE) + tombstones.update(deletionInfo().getTopLevelDeletion().localDeletionTime); +Iterator it = deletionInfo().rangeIterator(); +while (it.hasNext()) +{ +RangeTombstone rangeTombstone = it.next(); +tombstones.update(rangeTombstone.getLocalDeletionTime()); +} + for (Column column : this) { minTimestampSeen = Math.min(minTimestampSeen, column.minTimestamp()); http://git-wip-us.apache.org/repos/asf/cassandra/blob/9fb44ee5/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java -- diff --git a/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java b/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java index 998f8cc..e10fb2c 100644 --- a/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java +++ b/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java @@ -275,7 +275,15 @@ public class LazilyCompactedRow extends AbstractCompactedRow implements Iterable indexer.remove(reduced); return null; } - +int localDeletionTime = purged.deletionInfo().getTopLevelDeletion().localDeletionTime; +if (localDeletionTime < Integer.MAX_VALUE) +tombstones.update(localDeletionTime); +Iterator rangeTombstoneIterator = purged.deletionInfo().rangeIterator(); +while (rangeTombstoneIterator.hasNext()) +{ +RangeTombstone rangeTombstone = rangeTombstoneIterator.next(); +tombstones.update(rangeTombstone.getLocalDeletionTime()); +} columns++; minTimestampSeen = Math.min(minTimestampSeen, reduced.minTimestamp()); maxTimestampSeen = Math.max(maxTimestampSeen, reduced.maxTimestamp()); http://git-wip-us.apache.org/repos/asf/cassandra/blob/9fb44ee5/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java -- diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java index 81b3c27..6528ced 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java @@ -235,6 +235,16 @@ public class SSTableWriter extends SSTable if (version.hasRowSizeAndColumnCount) columnCount = in.readInt(); +if (cf.deletionInfo().getTopLevelDeletion().localDeletionTime < Integer.MAX_VALUE) + tombstones.update(cf.deletionInfo().getTopLevelDeletion().localDeletionTime); + +Iterator rangeTombstoneIterator = cf.deletionInfo().rangeIterator(); +while (rangeTombstoneIterator.hasNext()) +{ +RangeTombstone rangeTombstone = rangeTombstoneIterator.next(); +
[1/2] git commit: Correctly handle null in conditions with TTL
Updated Branches: refs/heads/trunk fc01759f4 -> 7ce5e062e Correctly handle null in conditions with TTL patch by slebresne; reviewed by iamaleksey for CASSANDRA-6623 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/e59ef16b Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/e59ef16b Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/e59ef16b Branch: refs/heads/trunk Commit: e59ef16bfcb3bd019202fc12bedeb04302066540 Parents: 58e9481 Author: Sylvain Lebresne Authored: Thu Feb 6 08:36:12 2014 +0100 Committer: Sylvain Lebresne Committed: Thu Feb 6 08:36:12 2014 +0100 -- CHANGES.txt | 1 + .../cql3/statements/ModificationStatement.java | 109 +++ .../apache/cassandra/service/CASConditions.java | 38 +++ .../apache/cassandra/service/StorageProxy.java | 75 ++--- .../cassandra/thrift/CassandraServer.java | 64 ++- 5 files changed, 197 insertions(+), 90 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/e59ef16b/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index bba5f20..7ba8044 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,4 +1,5 @@ 2.0.6 + * Correctly handle null with IF conditions and TTL (CASSANDRA-6623) Merged from 1.2: * Fix partition and range deletes not triggering flush (CASSANDRA-6655) http://git-wip-us.apache.org/repos/asf/cassandra/blob/e59ef16b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java -- diff --git a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java index c0bf428..2567043 100644 --- a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java +++ b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java @@ -27,19 +27,20 @@ import org.apache.cassandra.config.CFMetaData; import org.apache.cassandra.cql3.*; import org.apache.cassandra.db.*; import org.apache.cassandra.db.filter.ColumnSlice; +import org.apache.cassandra.db.filter.IDiskAtomFilter; import org.apache.cassandra.db.filter.SliceQueryFilter; import org.apache.cassandra.db.marshal.CompositeType; import org.apache.cassandra.db.marshal.UTF8Type; import org.apache.cassandra.db.marshal.ListType; import org.apache.cassandra.db.marshal.BooleanType; import org.apache.cassandra.exceptions.*; +import org.apache.cassandra.service.CASConditions; import org.apache.cassandra.service.ClientState; import org.apache.cassandra.service.QueryState; import org.apache.cassandra.service.StorageProxy; import org.apache.cassandra.thrift.ThriftValidation; import org.apache.cassandra.transport.messages.ResultMessage; import org.apache.cassandra.utils.Pair; -import org.apache.cassandra.utils.ByteBufferUtil; /* * Abstract parent class of individual modifications, i.e. INSERT, UPDATE and DELETE. @@ -415,16 +416,17 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF UpdateParameters updParams = new UpdateParameters(cfm, variables, queryState.getTimestamp(), getTimeToLive(variables), null); ColumnFamily updates = updateForKey(key, clusteringPrefix, updParams); -// When building the conditions, we should not use the TTL. It's not useful, and if a very low ttl (1 seconds) is used, it's possible -// for it to expire before actually build the conditions which would break since we would then test for the presence of tombstones. -UpdateParameters condParams = new UpdateParameters(cfm, variables, queryState.getTimestamp(), 0, null); -ColumnFamily expected = buildConditions(key, clusteringPrefix, condParams); +// It's cleaner to use the query timestamp below, but it's in seconds while the conditions expects microseconds, so just +// put it back in millis (we don't really lose precision because the ultimate consumer, Column.isLive, re-divide it). +long now = queryState.getTimestamp() * 1000; +CASConditions conditions = ifNotExists + ? new NotExistCondition(clusteringPrefix, now) + : new ColumnsConditions(clusteringPrefix, cfm, key, columnConditions, variables, now); ColumnFamily result = StorageProxy.cas(keyspace(), columnFamily(), key, - clusteringPrefix, - expected, + conditions,
[2/2] git commit: Merge branch 'cassandra-2.0' into trunk
Merge branch 'cassandra-2.0' into trunk Conflicts: src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java src/java/org/apache/cassandra/service/StorageProxy.java Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/7ce5e062 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/7ce5e062 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/7ce5e062 Branch: refs/heads/trunk Commit: 7ce5e062ed602dd1c9593a03b554c11ff3cc52d5 Parents: fc01759 e59ef16 Author: Sylvain Lebresne Authored: Thu Feb 6 08:50:34 2014 +0100 Committer: Sylvain Lebresne Committed: Thu Feb 6 08:50:34 2014 +0100 -- CHANGES.txt | 1 + .../cql3/statements/ModificationStatement.java | 109 +++ .../apache/cassandra/service/CASConditions.java | 38 +++ .../apache/cassandra/service/StorageProxy.java | 76 ++--- .../cassandra/thrift/CassandraServer.java | 65 ++- 5 files changed, 200 insertions(+), 89 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/7ce5e062/CHANGES.txt -- diff --cc CHANGES.txt index a139fdc,7ba8044..657bf9e --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,34 -1,5 +1,35 @@@ +2.1 + * add listsnapshots command to nodetool (CASSANDRA-5742) + * Introduce AtomicBTreeColumns (CASSANDRA-6271) + * Multithreaded commitlog (CASSANDRA-3578) + * allocate fixed index summary memory pool and resample cold index summaries + to use less memory (CASSANDRA-5519) + * Removed multithreaded compaction (CASSANDRA-6142) + * Parallelize fetching rows for low-cardinality indexes (CASSANDRA-1337) + * change logging from log4j to logback (CASSANDRA-5883) + * switch to LZ4 compression for internode communication (CASSANDRA-5887) + * Stop using Thrift-generated Index* classes internally (CASSANDRA-5971) + * Remove 1.2 network compatibility code (CASSANDRA-5960) + * Remove leveled json manifest migration code (CASSANDRA-5996) + * Remove CFDefinition (CASSANDRA-6253) + * Use AtomicIntegerFieldUpdater in RefCountedMemory (CASSANDRA-6278) + * User-defined types for CQL3 (CASSANDRA-5590) + * Use of o.a.c.metrics in nodetool (CASSANDRA-5871, 6406) + * Batch read from OTC's queue and cleanup (CASSANDRA-1632) + * Secondary index support for collections (CASSANDRA-4511, 6383) + * SSTable metadata(Stats.db) format change (CASSANDRA-6356) + * Push composites support in the storage engine + (CASSANDRA-5417, CASSANDRA-6520) + * Add snapshot space used to cfstats (CASSANDRA-6231) + * Add cardinality estimator for key count estimation (CASSANDRA-5906) + * CF id is changed to be non-deterministic. Data dir/key cache are created + uniquely for CF id (CASSANDRA-5202) + * New counters implementation (CASSANDRA-6504) + * Replace UnsortedColumns usage with ArrayBackedSortedColumns (CASSANDRA-6630) + + 2.0.6 + * Correctly handle null with IF conditions and TTL (CASSANDRA-6623) Merged from 1.2: * Fix partition and range deletes not triggering flush (CASSANDRA-6655) http://git-wip-us.apache.org/repos/asf/cassandra/blob/7ce5e062/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java -- diff --cc src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java index e551187,2567043..f35690b --- a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java +++ b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java @@@ -24,13 -24,13 +24,14 @@@ import org.github.jamm.MemoryMeter import org.apache.cassandra.auth.Permission; import org.apache.cassandra.config.CFMetaData; +import org.apache.cassandra.config.ColumnDefinition; import org.apache.cassandra.cql3.*; import org.apache.cassandra.db.*; +import org.apache.cassandra.db.composites.CBuilder; +import org.apache.cassandra.db.composites.Composite; import org.apache.cassandra.db.filter.ColumnSlice; + import org.apache.cassandra.db.filter.IDiskAtomFilter; import org.apache.cassandra.db.filter.SliceQueryFilter; -import org.apache.cassandra.db.marshal.CompositeType; -import org.apache.cassandra.db.marshal.UTF8Type; import org.apache.cassandra.db.marshal.ListType; import org.apache.cassandra.db.marshal.BooleanType; import org.apache.cassandra.exceptions.*; @@@ -533,29 -539,96 +535,96 @@@ public abstract class ModificationState } else { -rm = new RowMutation(cfm.ksName, key, cf); +mutation = new Mutation(cfm.ksName, key, cf); } -return isCounter() ? new CounterMutation(rm, cl) : rm; +return isCounter() ? new CounterMutation(mutation, cl) :
git commit: Correctly handle null in conditions with TTL
Updated Branches: refs/heads/cassandra-2.0 58e948185 -> e59ef16bf Correctly handle null in conditions with TTL patch by slebresne; reviewed by iamaleksey for CASSANDRA-6623 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/e59ef16b Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/e59ef16b Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/e59ef16b Branch: refs/heads/cassandra-2.0 Commit: e59ef16bfcb3bd019202fc12bedeb04302066540 Parents: 58e9481 Author: Sylvain Lebresne Authored: Thu Feb 6 08:36:12 2014 +0100 Committer: Sylvain Lebresne Committed: Thu Feb 6 08:36:12 2014 +0100 -- CHANGES.txt | 1 + .../cql3/statements/ModificationStatement.java | 109 +++ .../apache/cassandra/service/CASConditions.java | 38 +++ .../apache/cassandra/service/StorageProxy.java | 75 ++--- .../cassandra/thrift/CassandraServer.java | 64 ++- 5 files changed, 197 insertions(+), 90 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/e59ef16b/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index bba5f20..7ba8044 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,4 +1,5 @@ 2.0.6 + * Correctly handle null with IF conditions and TTL (CASSANDRA-6623) Merged from 1.2: * Fix partition and range deletes not triggering flush (CASSANDRA-6655) http://git-wip-us.apache.org/repos/asf/cassandra/blob/e59ef16b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java -- diff --git a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java index c0bf428..2567043 100644 --- a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java +++ b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java @@ -27,19 +27,20 @@ import org.apache.cassandra.config.CFMetaData; import org.apache.cassandra.cql3.*; import org.apache.cassandra.db.*; import org.apache.cassandra.db.filter.ColumnSlice; +import org.apache.cassandra.db.filter.IDiskAtomFilter; import org.apache.cassandra.db.filter.SliceQueryFilter; import org.apache.cassandra.db.marshal.CompositeType; import org.apache.cassandra.db.marshal.UTF8Type; import org.apache.cassandra.db.marshal.ListType; import org.apache.cassandra.db.marshal.BooleanType; import org.apache.cassandra.exceptions.*; +import org.apache.cassandra.service.CASConditions; import org.apache.cassandra.service.ClientState; import org.apache.cassandra.service.QueryState; import org.apache.cassandra.service.StorageProxy; import org.apache.cassandra.thrift.ThriftValidation; import org.apache.cassandra.transport.messages.ResultMessage; import org.apache.cassandra.utils.Pair; -import org.apache.cassandra.utils.ByteBufferUtil; /* * Abstract parent class of individual modifications, i.e. INSERT, UPDATE and DELETE. @@ -415,16 +416,17 @@ public abstract class ModificationStatement implements CQLStatement, MeasurableF UpdateParameters updParams = new UpdateParameters(cfm, variables, queryState.getTimestamp(), getTimeToLive(variables), null); ColumnFamily updates = updateForKey(key, clusteringPrefix, updParams); -// When building the conditions, we should not use the TTL. It's not useful, and if a very low ttl (1 seconds) is used, it's possible -// for it to expire before actually build the conditions which would break since we would then test for the presence of tombstones. -UpdateParameters condParams = new UpdateParameters(cfm, variables, queryState.getTimestamp(), 0, null); -ColumnFamily expected = buildConditions(key, clusteringPrefix, condParams); +// It's cleaner to use the query timestamp below, but it's in seconds while the conditions expects microseconds, so just +// put it back in millis (we don't really lose precision because the ultimate consumer, Column.isLive, re-divide it). +long now = queryState.getTimestamp() * 1000; +CASConditions conditions = ifNotExists + ? new NotExistCondition(clusteringPrefix, now) + : new ColumnsConditions(clusteringPrefix, cfm, key, columnConditions, variables, now); ColumnFamily result = StorageProxy.cas(keyspace(), columnFamily(), key, - clusteringPrefix, - expected, +
[jira] [Commented] (CASSANDRA-6662) Sort/reconcile cells in ArrayBackedSortedColumns only when an accessor is called
[ https://issues.apache.org/jira/browse/CASSANDRA-6662?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13893005#comment-13893005 ] Aleksey Yeschenko commented on CASSANDRA-6662: -- Once implemented, do we actually *need* TreeMapBackedSortedColumns anymore? > Sort/reconcile cells in ArrayBackedSortedColumns only when an accessor is > called > > > Key: CASSANDRA-6662 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6662 > Project: Cassandra > Issue Type: Improvement >Reporter: Aleksey Yeschenko >Assignee: Aleksey Yeschenko > Fix For: 2.1 > > > To avoid poor performance with huge numbers of cells added out of order > (which should be rare, but *can* happen with certain batch scenarios) we > should make ABSC only sort/reconcile its cells when an actual accessor is > actually called, delaying sorting until the very end. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
git commit: Actually remove UnsortedColumns (CASSANDRA-6630 followup)
Updated Branches: refs/heads/trunk 0b738073a -> fc01759f4 Actually remove UnsortedColumns (CASSANDRA-6630 followup) Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/fc01759f Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/fc01759f Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/fc01759f Branch: refs/heads/trunk Commit: fc01759f41f253f7c850ed17e939b610812925e5 Parents: 0b73807 Author: Aleksey Yeschenko Authored: Thu Feb 6 07:01:46 2014 +0300 Committer: Aleksey Yeschenko Committed: Thu Feb 6 07:02:15 2014 +0300 -- .../apache/cassandra/db/UnsortedColumns.java| 145 --- 1 file changed, 145 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/fc01759f/src/java/org/apache/cassandra/db/UnsortedColumns.java -- diff --git a/src/java/org/apache/cassandra/db/UnsortedColumns.java b/src/java/org/apache/cassandra/db/UnsortedColumns.java deleted file mode 100644 index ddd7827..000 --- a/src/java/org/apache/cassandra/db/UnsortedColumns.java +++ /dev/null @@ -1,145 +0,0 @@ -/* - * Licensed to the Apache Software Foundation (ASF) under one - * or more contributor license agreements. See the NOTICE file - * distributed with this work for additional information - * regarding copyright ownership. The ASF licenses this file - * to you under the Apache License, Version 2.0 (the - * "License"); you may not use this file except in compliance - * with the License. You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package org.apache.cassandra.db; - -import java.util.ArrayList; -import java.util.Collection; -import java.util.Iterator; - -import com.google.common.base.Function; -import com.google.common.collect.Iterables; - -import org.apache.cassandra.config.CFMetaData; -import org.apache.cassandra.db.composites.CellName; -import org.apache.cassandra.db.filter.ColumnSlice; -import org.apache.cassandra.utils.memory.AbstractAllocator; - -/** - * A ColumnFamily that allows inserting in any order, even unsorted. - * - * Operations that require sorting (getSortedColumns) or that cannot be efficient without it - * (replace, getColumn, etc.) are not supported. - */ -public class UnsortedColumns extends AbstractThreadUnsafeSortedColumns -{ -private final ArrayList cells; - -public static final Factory factory = new Factory() -{ -public UnsortedColumns create(CFMetaData metadata, boolean insertReversed) -{ -assert !insertReversed; -return new UnsortedColumns(metadata); -} -}; - -private UnsortedColumns(CFMetaData metadata) -{ -this(metadata, new ArrayList()); -} - -private UnsortedColumns(CFMetaData metadata, ArrayList cells) -{ -super(metadata); -this.cells = cells; -} - -public Factory getFactory() -{ -return factory; -} - -public ColumnFamily cloneMe() -{ -return new UnsortedColumns(metadata, new ArrayList(cells)); -} - -public boolean isInsertReversed() -{ -return false; -} - -public void clear() -{ -cells.clear(); -} - -public void addColumn(Cell cell, AbstractAllocator allocator) -{ -cells.add(cell); -} - -public void addAll(ColumnFamily cm, AbstractAllocator allocator, Function transformation) -{ -delete(cm.deletionInfo()); -for (Cell cell : cm) -addColumn(cell); -} - -public Iterator iterator() -{ -return cells.iterator(); -} - -public boolean replace(Cell oldCell, Cell newCell) -{ -throw new UnsupportedOperationException(); -} - -public Cell getColumn(CellName name) -{ -throw new UnsupportedOperationException(); -} - -public Iterable getColumnNames() -{ -return Iterables.transform(cells, new Function() -{ -public CellName apply(Cell cell) -{ -return cell.name; -} -}); -} - -public Collection getSortedColumns() -{ -throw new UnsupportedOperationException(); -} - -public Collection getReverseSortedColumns() -{ -throw new UnsupportedOperationException(); -} - -public int getColumnCount() -{ -return cells.size(); -} - -public Iterator iterator(Colu
[jira] [Commented] (CASSANDRA-5351) Avoid repairing already-repaired data by default
[ https://issues.apache.org/jira/browse/CASSANDRA-5351?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13893003#comment-13893003 ] Yuki Morishita commented on CASSANDRA-5351: --- Pushed update to https://github.com/yukim/cassandra/commits/5351-3 I removed fullRepair flag from message and ParentRepairSession. I think we are ready to test. If you don't need to add more, let's rebase on the latest trunk, squash the commits and attach the patch here. I'll keep on testing... > Avoid repairing already-repaired data by default > > > Key: CASSANDRA-5351 > URL: https://issues.apache.org/jira/browse/CASSANDRA-5351 > Project: Cassandra > Issue Type: Task > Components: Core >Reporter: Jonathan Ellis >Assignee: Lyuben Todorov > Labels: repair > Fix For: 2.1 > > Attachments: 5351_node1.log, 5351_node2.log, 5351_node3.log, > 5351_nodetool.log > > > Repair has always built its merkle tree from all the data in a columnfamily, > which is guaranteed to work but is inefficient. > We can improve this by remembering which sstables have already been > successfully repaired, and only repairing sstables new since the last repair. > (This automatically makes CASSANDRA-3362 much less of a problem too.) > The tricky part is, compaction will (if not taught otherwise) mix repaired > data together with non-repaired. So we should segregate unrepaired sstables > from the repaired ones. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6663) Connecting to a Raspberry PI Cassandra Cluster crashes the node being connected to
[ https://issues.apache.org/jira/browse/CASSANDRA-6663?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] ian mccrae updated CASSANDRA-6663: -- Description: I have a working 4x node Raspberry Pi cluster and # DevCenter happily connects to this (...which has an option to turn Snappy compression off) # ...however the Python Driver fails to connect and crashes the node being connected to with the errors in the error-log below. There appears to be a problem with Snappy compression (not supported on the Raspberry Pi). So I also tried "compression = None" with the same result. How might I fix this? *Python Code* >>> from cassandra.cluster import Cluster >>> cluster = Cluster(['192.168.200.151'], compression = None) >>> session = cluster.connect() *Error Log* {quote} Traceback (most recent call last): File "", line 1, in session = cluster.connect() File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/cassandra/cluster.py", line 471, in connect self.control_connection.connect() File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/cassandra/cluster.py", line 1351, in connect self._set_new_connection(self._reconnect_internal()) File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/cassandra/cluster.py", line 1386, in _reconnect_internal raise NoHostAvailable("Unable to connect to any servers", errors) NoHostAvailable: ('Unable to connect to any servers', {'192.168.200.151': ConnectionShutdown('Connection to 192.168.200.151 is closed',)) {quote} *A Dump of the "cluster" class attributes* {quote} >>> pprint(vars(cluster)) {'_core_connections_per_host': {0: 2, 1: 1}, '_is_setup': True, '_is_shutdown': True, '_listener_lock': , '_listeners': set([]), '_lock': <_RLock owner=None count=0>, '_max_connections_per_host': {0: 8, 1: 2}, '_max_requests_per_connection': {0: 100, 1: 100}, '_min_requests_per_connection': {0: 5, 1: 5}, '_prepared_statements': , 'compression': None, 'contact_points': ['192.168.200.151'], 'control_connection': , 'control_connection_timeout': 2.0, 'cql_version': None, 'executor': , 'load_balancing_policy': , 'max_schema_agreement_wait': 10, 'metadata': , 'metrics_enabled': False, 'port': 9042, 'scheduler': , 'sessions': <_weakrefset.WeakSet object at 0x106148750>, 'sockopts': None, 'ssl_options': None} >>> {quote} was: I have a working 4x node Raspberry Pi cluster and # DevCenter happily connects to this (...which has an option to turn Snappy compression off) # ...however the Python Driver fails to connect and crashes the node being connected to with the errors in the error-log below. There appears to be a problem with Snappy compression (not supported on the Raspberry Pi). So I also tried "compression = None" with the same result. How might I fix this? *Python Code* >>> from cassandra.cluster import Cluster >>> cluster = Cluster(['192.168.200.151'], compression = None) >>> session = cluster.connect() *Error Log* {quote} Traceback (most recent call last): File "", line 1, in session = cluster.connect() File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/cassandra/cluster.py", line 471, in connect self.control_connection.connect() File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/cassandra/cluster.py", line 1351, in connect self._set_new_connection(self._reconnect_internal()) File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/cassandra/cluster.py", line 1386, in _reconnect_internal raise NoHostAvailable("Unable to connect to any servers", errors) NoHostAvailable: ('Unable to connect to any servers', {'192.168.200.151': ConnectionShutdown('Connection to 192.168.200.151 is closed',)) {quote} > Connecting to a Raspberry PI Cassandra Cluster crashes the node being > connected to > -- > > Key: CASSANDRA-6663 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6663 > Project: Cassandra > Issue Type: Bug > Components: Drivers (now out of tree) > Environment: 4x node Raspberry PI cluster > Macbook running Idle 2.7 >Reporter: ian mccrae > > I have a working 4x node Raspberry Pi cluster and > # DevCenter happily connects to this (...which has an option to turn Snappy > compression off) > # ...however the Python Driver fails to connect and crashes the node being > connected to with the errors in the error-log below. > There appears to be a problem with Snappy compression (not supported on the > Raspberry Pi). So I also tried "compression = None" with the same result. > How might I fix this? > *Python Code* > >>> from cassandra.cluster import Cluster > >>> cluster = Cluster(['192.168.200
[jira] [Commented] (CASSANDRA-6663) Connecting to a Raspberry PI Cassandra Cluster crashes the node being connected to
[ https://issues.apache.org/jira/browse/CASSANDRA-6663?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892974#comment-13892974 ] Brandon Williams commented on CASSANDRA-6663: - We're going to need to see a server-side error to get anywhere here. > Connecting to a Raspberry PI Cassandra Cluster crashes the node being > connected to > -- > > Key: CASSANDRA-6663 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6663 > Project: Cassandra > Issue Type: Bug > Components: Drivers (now out of tree) > Environment: 4x node Raspberry PI cluster > Macbook running Idle 2.7 >Reporter: ian mccrae > > I have a working 4x node Raspberry Pi cluster and > # DevCenter happily connects to this (...which has an option to turn Snappy > compression off) > # ...however the Python Driver fails to connect and crashes the node being > connected to with the errors in the error-log below. > There appears to be a problem with Snappy compression (not supported on the > Raspberry Pi). So I also tried "compression = None" with the same result. > How might I fix this? > *Python Code* > >>> from cassandra.cluster import Cluster > >>> cluster = Cluster(['192.168.200.151'], compression = None) > >>> session = cluster.connect() > *Error Log* > {quote} > Traceback (most recent call last): > File "", line 1, in > session = cluster.connect() > File > "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/cassandra/cluster.py", > line 471, in connect > self.control_connection.connect() > File > "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/cassandra/cluster.py", > line 1351, in connect > self._set_new_connection(self._reconnect_internal()) > File > "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/cassandra/cluster.py", > line 1386, in _reconnect_internal > raise NoHostAvailable("Unable to connect to any servers", errors) > NoHostAvailable: ('Unable to connect to any servers', {'192.168.200.151': > ConnectionShutdown('Connection to 192.168.200.151 is closed',)) > {quote} -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Created] (CASSANDRA-6663) Connecting to a Raspberry PI Cassandra Cluster crashes the node being connected to
ian mccrae created CASSANDRA-6663: - Summary: Connecting to a Raspberry PI Cassandra Cluster crashes the node being connected to Key: CASSANDRA-6663 URL: https://issues.apache.org/jira/browse/CASSANDRA-6663 Project: Cassandra Issue Type: Bug Components: Drivers (now out of tree) Environment: 4x node Raspberry PI cluster Macbook running Idle 2.7 Reporter: ian mccrae I have a working 4x node Raspberry Pi cluster and # DevCenter happily connects to this (...which has an option to turn Snappy compression off) # ...however the Python Driver fails to connect and crashes the node being connected to with the errors in the error-log below. There appears to be a problem with Snappy compression (not supported on the Raspberry Pi). So I also tried "compression = None" with the same result. How might I fix this? *Python Code* >>> from cassandra.cluster import Cluster >>> cluster = Cluster(['192.168.200.151'], compression = None) >>> session = cluster.connect() *Error Log* {quote} Traceback (most recent call last): File "", line 1, in session = cluster.connect() File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/cassandra/cluster.py", line 471, in connect self.control_connection.connect() File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/cassandra/cluster.py", line 1351, in connect self._set_new_connection(self._reconnect_internal()) File "/Library/Frameworks/Python.framework/Versions/2.7/lib/python2.7/site-packages/cassandra/cluster.py", line 1386, in _reconnect_internal raise NoHostAvailable("Unable to connect to any servers", errors) NoHostAvailable: ('Unable to connect to any servers', {'192.168.200.151': ConnectionShutdown('Connection to 192.168.200.151 is closed',)) {quote} -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6561) Static columns in CQL3
[ https://issues.apache.org/jira/browse/CASSANDRA-6561?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892955#comment-13892955 ] Aleksey Yeschenko commented on CASSANDRA-6561: -- I think I'd prefer {code} BEGIN BATCH UPDATE test SET v='foobar' WHERE id=0 AND k='k1'; UPDATE test SET v='barfoo' WHERE id=0 AND k='k2'; UPDATE test SET version=3 WHERE id=0; APPLY BATCH IF version=1; {code} to {code} BEGIN BATCH UPDATE test SET v='foobar' WHERE id=0 AND k='k1'; UPDATE test SET v='barfoo' WHERE id=0 AND k='k2'; UPDATE test SET version=3 WHERE id=0 IF version=1; APPLY BATCH {code} To make it clear that only one condition applies to the whole modified partition. Maybe even make it BEGIN CAS BATCH, for explicitness sake, even though we can infer it from IF. (moving IF outside also makes it easier to support IF NOT EXISTS properly). > Static columns in CQL3 > -- > > Key: CASSANDRA-6561 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6561 > Project: Cassandra > Issue Type: New Feature >Reporter: Sylvain Lebresne >Assignee: Sylvain Lebresne > Fix For: 2.0.6 > > > I'd like to suggest the following idea for adding "static" columns to CQL3. > I'll note that the basic idea has been suggested by jhalliday on irc but the > rest of the details are mine and I should be blamed for anything stupid in > what follows. > Let me start with a rational: there is 2 main family of CF that have been > historically used in Thrift: static ones and dynamic ones. CQL3 handles both > family through the presence or not of clustering columns. There is however > some cases where mixing both behavior has its use. I like to think of those > use cases as 3 broad category: > # to denormalize small amounts of not-entirely-static data in otherwise > static entities. It's say "tags" for a product or "custom properties" in a > user profile. This is why we've added CQL3 collections. Importantly, this is > the *only* use case for which collections are meant (which doesn't diminishes > their usefulness imo, and I wouldn't disagree that we've maybe not > communicated this too well). > # to optimize fetching both a static entity and related dynamic ones. Say you > have blog posts, and each post has associated comments (chronologically > ordered). *And* say that a very common query is "fetch a post and its 50 last > comments". In that case, it *might* be beneficial to store a blog post > (static entity) in the same underlying CF than it's comments for performance > reason. So that "fetch a post and it's 50 last comments" is just one slice > internally. > # you want to CAS rows of a dynamic partition based on some partition > condition. This is the same use case than why CASSANDRA-5633 exists for. > As said above, 1) is already covered by collections, but 2) and 3) are not > (and > I strongly believe collections are not the right fit, API wise, for those). > Also, note that I don't want to underestimate the usefulness of 2). In most > cases, using a separate table for the blog posts and the comments is The > Right Solution, and trying to do 2) is premature optimisation. Yet, when used > properly, that kind of optimisation can make a difference, so I think having > a relatively native solution for it in CQL3 could make sense. > Regarding 3), though CASSANDRA-5633 would provide one solution for it, I have > the feeling that static columns actually are a more natural approach (in term > of API). That's arguably more of a personal opinion/feeling though. > So long story short, CQL3 lacks a way to mix both some "static" and "dynamic" > rows in the same partition of the same CQL3 table, and I think such a tool > could have it's use. > The proposal is thus to allow "static" columns. Static columns would only > make sense in table with clustering columns (the "dynamic" ones). A static > column value would be static to the partition (all rows of the partition > would share the value for such column). The syntax would just be: > {noformat} > CREATE TABLE t ( > k text, > s text static, > i int, > v text, > PRIMARY KEY (k, i) > ) > {noformat} > then you'd get: > {noformat} > INSERT INTO t(k, s, i, v) VALUES ("k0", "I'm shared", 0, "foo"); > INSERT INTO t(k, s, i, v) VALUES ("k0", "I'm still shared", 1, "bar"); > SELECT * FROM t; > k | s | i |v > > k0 | "I'm still shared" | 0 | "bar" > k0 | "I'm still shared" | 1 | "foo" > {noformat} > There would be a few semantic details to decide on regarding deletions, ttl, > etc. but let's see if we agree it's a good idea first before ironing those > out. > One last point is the implementation. Though I do think this idea has merits, > it's definitively not useful enough to justify rewrit
git commit: naming
Updated Branches: refs/heads/trunk 0f83a8610 -> 0b738073a naming Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/0b738073 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/0b738073 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/0b738073 Branch: refs/heads/trunk Commit: 0b738073af5213f41af530414f8bc88201da5248 Parents: 0f83a86 Author: Dave Brosius Authored: Wed Feb 5 21:51:25 2014 -0500 Committer: Dave Brosius Committed: Wed Feb 5 21:53:10 2014 -0500 -- .../apache/cassandra/service/pager/AbstractQueryPagerTest.java | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/0b738073/test/unit/org/apache/cassandra/service/pager/AbstractQueryPagerTest.java -- diff --git a/test/unit/org/apache/cassandra/service/pager/AbstractQueryPagerTest.java b/test/unit/org/apache/cassandra/service/pager/AbstractQueryPagerTest.java index 9df9f6d..03a651d 100644 --- a/test/unit/org/apache/cassandra/service/pager/AbstractQueryPagerTest.java +++ b/test/unit/org/apache/cassandra/service/pager/AbstractQueryPagerTest.java @@ -34,7 +34,7 @@ import org.apache.cassandra.utils.ByteBufferUtil; public class AbstractQueryPagerTest { @Test -public void DiscardFirstTest() +public void discardFirstTest() { TestPager pager = new TestPager(); List rows = Arrays.asList(createRow("r1", 1), @@ -69,7 +69,7 @@ public class AbstractQueryPagerTest } @Test -public void DiscardLastTest() +public void discardLastTest() { TestPager pager = new TestPager(); List rows = Arrays.asList(createRow("r1", 2),
git commit: update stream-lib for HLL++ serialization thread-safety
Updated Branches: refs/heads/trunk 812504713 -> 0f83a8610 update stream-lib for HLL++ serialization thread-safety Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/0f83a861 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/0f83a861 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/0f83a861 Branch: refs/heads/trunk Commit: 0f83a8610bc083c706ce9ebe82d8ed295612d877 Parents: 8125047 Author: Yuki Morishita Authored: Wed Feb 5 20:44:20 2014 -0600 Committer: Yuki Morishita Committed: Wed Feb 5 20:44:20 2014 -0600 -- build.xml | 4 +- lib/licenses/stream-2.5.1.txt | 202 - lib/licenses/stream-2.5.2.txt | 202 + lib/stream-2.5.1.jar | Bin 152692 -> 0 bytes lib/stream-2.5.2.jar | Bin 0 -> 152489 bytes 5 files changed, 204 insertions(+), 204 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/0f83a861/build.xml -- diff --git a/build.xml b/build.xml index 0583012..54d6226 100644 --- a/build.xml +++ b/build.xml @@ -387,7 +387,7 @@ - + @@ -471,7 +471,7 @@ - + http://git-wip-us.apache.org/repos/asf/cassandra/blob/0f83a861/lib/licenses/stream-2.5.1.txt -- diff --git a/lib/licenses/stream-2.5.1.txt b/lib/licenses/stream-2.5.1.txt deleted file mode 100644 index c8dc677..000 --- a/lib/licenses/stream-2.5.1.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 -http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue t
[jira] [Assigned] (CASSANDRA-6649) CQL: disallow counter update with "USING TIMESTAMP" and "USING TTL"
[ https://issues.apache.org/jira/browse/CASSANDRA-6649?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Aleksey Yeschenko reassigned CASSANDRA-6649: Assignee: Aleksey Yeschenko > CQL: disallow counter update with "USING TIMESTAMP" and "USING TTL" > --- > > Key: CASSANDRA-6649 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6649 > Project: Cassandra > Issue Type: Bug >Reporter: Sylvain Lebresne >Assignee: Aleksey Yeschenko >Priority: Minor > Fix For: 1.2.15, 2.0.6 > > > Timestamps are not used by counters and TTL are not supported, but it appears > we don't reject counter updates that have "USING TIMESTAMP X" or "USING TTL > X". We should since both are non-sensical (the value is completely ignored > currently). > Note: we should also refuse "USING TIMESTAMP" on "DELETE" statements on > counters table: even though we kind of do use a timestamp internally, it's > more of an implementation detail and in fact may go away with CASSANDRA-6506 > (there is also nothing clever you can do with it by providing it client side). > Note bis: strictly speaking doing that could break a few users that where > setting those thinking it does something. I think that the lack of validation > is more of a bug and that user that think it's doing something probably ought > to know it's not sooner than later, but I could be fine with just warning in > the log file for 1.2 and 2.0, and only rejecting in 2.1 if someone thinks > it's safer. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6561) Static columns in CQL3
[ https://issues.apache.org/jira/browse/CASSANDRA-6561?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892935#comment-13892935 ] Aleksey Yeschenko commented on CASSANDRA-6561: -- Only just skimmed it a little. I don't think is_static boolean is the way to go - I'd prefer new ColumnDefinition.Kind.STATIC here. Also not sure about the BATCH syntax here (but don't have a better solution in mind, yet). > Static columns in CQL3 > -- > > Key: CASSANDRA-6561 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6561 > Project: Cassandra > Issue Type: New Feature >Reporter: Sylvain Lebresne >Assignee: Sylvain Lebresne > Fix For: 2.0.6 > > > I'd like to suggest the following idea for adding "static" columns to CQL3. > I'll note that the basic idea has been suggested by jhalliday on irc but the > rest of the details are mine and I should be blamed for anything stupid in > what follows. > Let me start with a rational: there is 2 main family of CF that have been > historically used in Thrift: static ones and dynamic ones. CQL3 handles both > family through the presence or not of clustering columns. There is however > some cases where mixing both behavior has its use. I like to think of those > use cases as 3 broad category: > # to denormalize small amounts of not-entirely-static data in otherwise > static entities. It's say "tags" for a product or "custom properties" in a > user profile. This is why we've added CQL3 collections. Importantly, this is > the *only* use case for which collections are meant (which doesn't diminishes > their usefulness imo, and I wouldn't disagree that we've maybe not > communicated this too well). > # to optimize fetching both a static entity and related dynamic ones. Say you > have blog posts, and each post has associated comments (chronologically > ordered). *And* say that a very common query is "fetch a post and its 50 last > comments". In that case, it *might* be beneficial to store a blog post > (static entity) in the same underlying CF than it's comments for performance > reason. So that "fetch a post and it's 50 last comments" is just one slice > internally. > # you want to CAS rows of a dynamic partition based on some partition > condition. This is the same use case than why CASSANDRA-5633 exists for. > As said above, 1) is already covered by collections, but 2) and 3) are not > (and > I strongly believe collections are not the right fit, API wise, for those). > Also, note that I don't want to underestimate the usefulness of 2). In most > cases, using a separate table for the blog posts and the comments is The > Right Solution, and trying to do 2) is premature optimisation. Yet, when used > properly, that kind of optimisation can make a difference, so I think having > a relatively native solution for it in CQL3 could make sense. > Regarding 3), though CASSANDRA-5633 would provide one solution for it, I have > the feeling that static columns actually are a more natural approach (in term > of API). That's arguably more of a personal opinion/feeling though. > So long story short, CQL3 lacks a way to mix both some "static" and "dynamic" > rows in the same partition of the same CQL3 table, and I think such a tool > could have it's use. > The proposal is thus to allow "static" columns. Static columns would only > make sense in table with clustering columns (the "dynamic" ones). A static > column value would be static to the partition (all rows of the partition > would share the value for such column). The syntax would just be: > {noformat} > CREATE TABLE t ( > k text, > s text static, > i int, > v text, > PRIMARY KEY (k, i) > ) > {noformat} > then you'd get: > {noformat} > INSERT INTO t(k, s, i, v) VALUES ("k0", "I'm shared", 0, "foo"); > INSERT INTO t(k, s, i, v) VALUES ("k0", "I'm still shared", 1, "bar"); > SELECT * FROM t; > k | s | i |v > > k0 | "I'm still shared" | 0 | "bar" > k0 | "I'm still shared" | 1 | "foo" > {noformat} > There would be a few semantic details to decide on regarding deletions, ttl, > etc. but let's see if we agree it's a good idea first before ironing those > out. > One last point is the implementation. Though I do think this idea has merits, > it's definitively not useful enough to justify rewriting the storage engine > for it. But I think we can support this relatively easily (emphasis on > "relatively" :)), which is probably the main reason why I like the approach. > Namely, internally, we can store static columns as cells whose clustering > column values are empty. So in terms of cells, the partition of my example > would look like: > {noformat} > "k0" : [ > (:"s" -> "I'm still shared"), // the static column > (0:"" -> "")
[jira] [Commented] (CASSANDRA-5631) NPE when creating column family shortly after multinode startup
[ https://issues.apache.org/jira/browse/CASSANDRA-5631?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892925#comment-13892925 ] Aleksey Yeschenko commented on CASSANDRA-5631: -- bq. I think a fix would be to catch the NPEs above, and then, on learning about a new keyspace, check to see if any CFs should have been created for that keyspace. This sounds reasonable to me. Another way would be to send the keyspace mutation serialized along with any column families created/altered messages, so that there will never be an NPE there in the first place. This had actually come up before. Will have a look. > NPE when creating column family shortly after multinode startup > --- > > Key: CASSANDRA-5631 > URL: https://issues.apache.org/jira/browse/CASSANDRA-5631 > Project: Cassandra > Issue Type: Bug > Components: Core >Affects Versions: 1.2.0 >Reporter: Martin Serrano >Assignee: Aleksey Yeschenko > > I'm testing a 2-node cluster and creating a column family right after the > nodes startup. I am using the Astyanax client. Sometimes column family > creation fails and I see NPEs on the cassandra server: > {noformat} > 2013-06-12 14:55:31,773 ERROR CassandraDaemon [MigrationStage:1] - Exception > in thread Thread[MigrationStage:1,5,main] > java.lang.NullPointerException > at org.apache.cassandra.db.DefsTable.addColumnFamily(DefsTable.java:510) > at > org.apache.cassandra.db.DefsTable.mergeColumnFamilies(DefsTable.java:444) > at org.apache.cassandra.db.DefsTable.mergeSchema(DefsTable.java:354) > at > org.apache.cassandra.db.DefinitionsUpdateVerbHandler$1.runMayThrow(DefinitionsUpdateVerbHandler.java:55) > at > org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) > at > java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) > at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334) > at java.util.concurrent.FutureTask.run(FutureTask.java:166) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > at java.lang.Thread.run(Thread.java:722) > {noformat} > {noformat} > 2013-06-12 14:55:31,880 ERROR CassandraDaemon [MigrationStage:1] - Exception > in thread Thread[MigrationStage:1,5,main] > java.lang.NullPointerException > at > org.apache.cassandra.db.DefsTable.mergeColumnFamilies(DefsTable.java:475) > at org.apache.cassandra.db.DefsTable.mergeSchema(DefsTable.java:354) > at > org.apache.cassandra.db.DefinitionsUpdateVerbHandler$1.runMayThrow(DefinitionsUpdateVerbHandler.java:55) > at > org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) > at > java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) > at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334) > at java.util.concurrent.FutureTask.run(FutureTask.java:166) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > at java.lang.Thread.run(Thread.java:722) > {noformat} -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6656) Exception logging
[ https://issues.apache.org/jira/browse/CASSANDRA-6656?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892919#comment-13892919 ] Ding Yuan commented on CASSANDRA-6656: -- Thanks Mikhail. Updated the patch. > Exception logging > - > > Key: CASSANDRA-6656 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6656 > Project: Cassandra > Issue Type: Improvement > Components: Core, Tools >Reporter: Ding Yuan >Assignee: Ding Yuan >Priority: Trivial > Fix For: 2.1 > > Attachments: trunk-6656-v2.txt, trunk-6656.txt > > > Reporting a few cases where informative exceptions can be silently swallowed. > Attaching a proposed patch. > = > Case 1 > Line: 95, File: "org/apache/cassandra/utils/Hex.java" > An actual failure in the underlying constructor will be lost. > Propose to log it. > {noformat} > try > { > s = stringConstructor.newInstance(0, c.length, c); > + } > + catch (InvocationTargetException ite) { > + // The underlying constructor failed. Unwrapping the > exception. > + logger.info("Underlying constructor throws exception: ", > ite.getCause()); > } > catch (Exception e) > { > // Swallowing as we'll just use a copying constructor > } > return s == null ? new String(c) : s; > {noformat} > == > = > Case 2 > Line: 192, File: "org/apache/cassandra/db/marshal/DynamicCompositeType.java" > The actual cause of comparator error can be lost as it can fail in multiple > locations. > {noformat} > AbstractType comparator = null; > int header = getShortLength(bb); > if ((header & 0x8000) == 0) > { > ByteBuffer value = getBytes(bb, header); > try > { > comparator = TypeParser.parse(ByteBufferUtil.string(value)); > } > catch (Exception e) > { > <--- can fail here > // we'll deal with this below since comparator == null > } > } > else > { > comparator = aliases.get((byte)(header & 0xFF)); > <--- can fail here > } > if (comparator == null) > throw new MarshalException("Cannot find comparator for component > " + i); > {noformat} > Propose to log the exception. > == > = > Case 3 > Line: 239, File: "org/apache/cassandra/tools/NodeProbe.java" > Exception ignored in finally. Propose log them with debug or trace. > {noformat} > 232: finally > 233: { > 234: try > 235: { > 236: ssProxy.removeNotificationListener(runner); > 236: ssProxy.removeNotificationListener(runner); > 237: jmxc.removeConnectionNotificationListener(runner); > 238: } > 239: catch (Throwable ignored) {} > 240: } > {noformat} > Similar case is at line 264 in the same file. > == -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6656) Exception logging
[ https://issues.apache.org/jira/browse/CASSANDRA-6656?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Ding Yuan updated CASSANDRA-6656: - Attachment: trunk-6656-v2.txt > Exception logging > - > > Key: CASSANDRA-6656 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6656 > Project: Cassandra > Issue Type: Improvement > Components: Core, Tools >Reporter: Ding Yuan >Assignee: Ding Yuan >Priority: Trivial > Fix For: 2.1 > > Attachments: trunk-6656-v2.txt, trunk-6656.txt > > > Reporting a few cases where informative exceptions can be silently swallowed. > Attaching a proposed patch. > = > Case 1 > Line: 95, File: "org/apache/cassandra/utils/Hex.java" > An actual failure in the underlying constructor will be lost. > Propose to log it. > {noformat} > try > { > s = stringConstructor.newInstance(0, c.length, c); > + } > + catch (InvocationTargetException ite) { > + // The underlying constructor failed. Unwrapping the > exception. > + logger.info("Underlying constructor throws exception: ", > ite.getCause()); > } > catch (Exception e) > { > // Swallowing as we'll just use a copying constructor > } > return s == null ? new String(c) : s; > {noformat} > == > = > Case 2 > Line: 192, File: "org/apache/cassandra/db/marshal/DynamicCompositeType.java" > The actual cause of comparator error can be lost as it can fail in multiple > locations. > {noformat} > AbstractType comparator = null; > int header = getShortLength(bb); > if ((header & 0x8000) == 0) > { > ByteBuffer value = getBytes(bb, header); > try > { > comparator = TypeParser.parse(ByteBufferUtil.string(value)); > } > catch (Exception e) > { > <--- can fail here > // we'll deal with this below since comparator == null > } > } > else > { > comparator = aliases.get((byte)(header & 0xFF)); > <--- can fail here > } > if (comparator == null) > throw new MarshalException("Cannot find comparator for component > " + i); > {noformat} > Propose to log the exception. > == > = > Case 3 > Line: 239, File: "org/apache/cassandra/tools/NodeProbe.java" > Exception ignored in finally. Propose log them with debug or trace. > {noformat} > 232: finally > 233: { > 234: try > 235: { > 236: ssProxy.removeNotificationListener(runner); > 236: ssProxy.removeNotificationListener(runner); > 237: jmxc.removeConnectionNotificationListener(runner); > 238: } > 239: catch (Throwable ignored) {} > 240: } > {noformat} > Similar case is at line 264 in the same file. > == -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Created] (CASSANDRA-6662) Sort/reconcile cells in ArrayBackedSortedColumns only when an accessor is called
Aleksey Yeschenko created CASSANDRA-6662: Summary: Sort/reconcile cells in ArrayBackedSortedColumns only when an accessor is called Key: CASSANDRA-6662 URL: https://issues.apache.org/jira/browse/CASSANDRA-6662 Project: Cassandra Issue Type: Improvement Reporter: Aleksey Yeschenko Assignee: Aleksey Yeschenko Fix For: 2.1 To avoid poor performance with huge numbers of cells added out of order (which should be rare, but *can* happen with certain batch scenarios) we should make ABSC only sort/reconcile its cells when an actual accessor is actually called, delaying sorting until the very end. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
git commit: Replace UnsortedColumns usage with ArrayBackedSortedColumns
Updated Branches: refs/heads/trunk fe4247e58 -> 812504713 Replace UnsortedColumns usage with ArrayBackedSortedColumns patch by Aleksey Yeschenko; reviewed by Sylvain Lebresne for CASSANDRA-6630 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/81250471 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/81250471 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/81250471 Branch: refs/heads/trunk Commit: 812504713523c2b8fbff394fbf4448ea30b5e4a3 Parents: fe4247e Author: Aleksey Yeschenko Authored: Thu Feb 6 04:57:56 2014 +0300 Committer: Aleksey Yeschenko Committed: Thu Feb 6 04:57:56 2014 +0300 -- CHANGES.txt | 1 + .../cql3/statements/UpdateStatement.java| 2 +- .../cassandra/db/ArrayBackedSortedColumns.java | 14 -- .../apache/cassandra/db/AtomicBTreeColumns.java | 9 +--- .../org/apache/cassandra/db/ColumnFamily.java | 6 ++- src/java/org/apache/cassandra/db/Mutation.java | 2 +- .../apache/cassandra/service/paxos/Commit.java | 5 ++- .../service/paxos/PrepareResponse.java | 10 +++-- .../cassandra/thrift/CassandraServer.java | 4 +- .../apache/cassandra/db/CounterCacheTest.java | 2 +- .../cassandra/db/CounterMutationTest.java | 46 +++- 11 files changed, 56 insertions(+), 45 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/81250471/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 7d628b5..a139fdc 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -25,6 +25,7 @@ * CF id is changed to be non-deterministic. Data dir/key cache are created uniquely for CF id (CASSANDRA-5202) * New counters implementation (CASSANDRA-6504) + * Replace UnsortedColumns usage with ArrayBackedSortedColumns (CASSANDRA-6630) 2.0.6 http://git-wip-us.apache.org/repos/asf/cassandra/blob/81250471/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java -- diff --git a/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java b/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java index 1102c09..6ed0e33 100644 --- a/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java +++ b/src/java/org/apache/cassandra/cql3/statements/UpdateStatement.java @@ -99,7 +99,7 @@ public class UpdateStatement extends ModificationStatement public ColumnFamily updateForKey(ByteBuffer key, Composite prefix, UpdateParameters params) throws InvalidRequestException { -ColumnFamily cf = UnsortedColumns.factory.create(cfm); +ColumnFamily cf = ArrayBackedSortedColumns.factory.create(cfm); addUpdateForKey(cf, key, prefix, params); return cf; } http://git-wip-us.apache.org/repos/asf/cassandra/blob/81250471/src/java/org/apache/cassandra/db/ArrayBackedSortedColumns.java -- diff --git a/src/java/org/apache/cassandra/db/ArrayBackedSortedColumns.java b/src/java/org/apache/cassandra/db/ArrayBackedSortedColumns.java index 7bcbe25..b81e403 100644 --- a/src/java/org/apache/cassandra/db/ArrayBackedSortedColumns.java +++ b/src/java/org/apache/cassandra/db/ArrayBackedSortedColumns.java @@ -91,16 +91,6 @@ public class ArrayBackedSortedColumns extends AbstractThreadUnsafeSortedColumns return pos >= 0 ? cells.get(pos) : null; } -/** - * AddColumn throws an exception if the cell added does not sort after - * the last cell in the map. - * The reasoning is that this implementation can get slower if too much - * insertions are done in unsorted order and right now we only use it when - * *all* insertion (with this method) are done in sorted order. The - * assertion throwing is thus a protection against performance regression - * without knowing about (we can revisit that decision later if we have - * use cases where most insert are in sorted order but a few are not). - */ public void addColumn(Cell cell, AbstractAllocator allocator) { if (cells.isEmpty()) @@ -109,11 +99,7 @@ public class ArrayBackedSortedColumns extends AbstractThreadUnsafeSortedColumns return; } -// Fast path if inserting at the tail int c = internalComparator().compare(cells.get(getColumnCount() - 1).name(), cell.name()); -// note that we want an assertion here (see addColumn javadoc), but we also want that if -// assertion are disabled, addColumn works correctly with unsorted input -assert c <= 0 : "Added cell does not sort as the " + (reversed ? "first" : "last") + " cell"; if (c < 0)
[jira] [Commented] (CASSANDRA-6653) Attempting to bootstrap causes nodes to lock up in GC
[ https://issues.apache.org/jira/browse/CASSANDRA-6653?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892887#comment-13892887 ] Keith Wright commented on CASSANDRA-6653: - The GC logs do show that the application paused for 16+ seconds: Total time for which application threads were stopped: 16.5111290 seconds Seems to me thats an unhealthy GC pause. What additional logging would be helpful to diagnose this issue beyond looking at CPU usage? Thanks! > Attempting to bootstrap causes nodes to lock up in GC > - > > Key: CASSANDRA-6653 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6653 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: VNodes using Murmur3 >Reporter: Keith Wright > > We have been struggling with the inability to bootstrap nodes into our 1.2.13 > environment with Vnodes using centos 6.4 with Java 7. We have an 8 node > cluster (32 GB RAM, dual hex core, SSDs, 8 GB heap with 1200 MB eden space, > RF3) with around 1 TB per node using murmur3. When we go to bootstrap a new > node this is what we see: > - Bootstrapping node assigns tokens and requests data from cluster > - 5-6 nodes within the cluster begin to stream data > - Around 2 minutes after bootstrap start, between 1 and 4 nodes (sometimes > the bootstrapping node and sometimes not) become unresponsive in par new GCs > - IF no nodes go down during the first 5 minutes of bootstrap, then the > bootstrap will succeed without issue > GC mired nodes tend to recover after a minute or two but the receiving node > stops attempting to get more data from the nodes > - Bootstrap eventually fails (after streaming all the data from nodes that > did not go down) with Unable to Fetch Ranges > We have tried the following and it appears that sometimes a bootstrap will > succeed (perhaps 1 in 10) but with no discernible pattern: > - Increase phi_convict to 16 > - Restart all nodes prior to bootstrap (to ensure heap is as “clean” as > possible) > - Stop production load against the cluster (to reduce par new churn); after 5 > minutes we know if the bootstrap will succeed so we then re-enable load > - Distribute soft interrupts across all CPUs > Below is an output from the GC log of the bootstrapping node when it was > stuck in GC. > {Heap before GC invocations=109 (full 0): > par new generation total 1105920K, used 1021140K [0x0005fae0, > 0x000645e0, 0x000645e0) > eden space 983040K, 100% used [0x0005fae0, 0x000636e0, > 0x000636e0) > from space 122880K, 31% used [0x00063e60, 0x000640b350f0, > 0x000645e0) > to space 122880K, 0% used [0x000636e0, 0x000636e0, > 0x00063e60) > concurrent mark-sweep generation total 7159808K, used 3826815K > [0x000645e0, 0x0007fae0, 0x0007fae0) > concurrent-mark-sweep perm gen total 24512K, used 24368K > [0x0007fae0, 0x0007fc5f, 0x0008) > 2014-02-05T13:27:49.621+: 210.242: [GC 210.242: [ParNew: > 1021140K->122880K(1105920K), 0.2963210 secs] 4847955K->4024095K(8265728K), > 0.2965270 secs] [Times: user=4.97 sys=0.00, real=0.30 secs] > Heap after GC invocations=110 (full 0): > par new generation total 1105920K, used 122880K [0x0005fae0, > 0x000645e0, 0x000645e0) > eden space 983040K, 0% used [0x0005fae0, 0x0005fae0, > 0x000636e0) > from space 122880K, 100% used [0x000636e0, 0x00063e60, > 0x00063e60) > to space 122880K, 0% used [0x00063e60, 0x00063e60, > 0x000645e0) > concurrent mark-sweep generation total 7159808K, used 3901215K > [0x000645e0, 0x0007fae0, 0x0007fae0) > concurrent-mark-sweep perm gen total 24512K, used 24368K > [0x0007fae0, 0x0007fc5f, 0x0008) > } > Total time for which application threads were stopped: 0.2968550 seconds > Application time: 1.5953840 seconds > Total time for which application threads were stopped: 0.0002040 seconds > Application time: 0.510 seconds > Relevant portion of GC log from non-bootstrapping node: > {Heap before GC invocations=518 (full 1): > par new generation total 1105920K, used 17921K [0x0005fae0, > 0x000645e0, 0x000645e0) > eden space 983040K, 1% used [0x0005fae0, 0x0005fbf29db8, > 0x000636e0) > from space 122880K, 0% used [0x000636e0, 0x000636e56938, > 0x00063e60) > to space 122880K, 0% used [0x00063e60, 0x00063e60, > 0x000645e0) > concurrent mark-sweep generation total 7159808K, used 6367511K > [0x000645e0, 0x0007fae0, 0x0007fae0) > concurre
[jira] [Commented] (CASSANDRA-6572) Workload recording / playback
[ https://issues.apache.org/jira/browse/CASSANDRA-6572?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892875#comment-13892875 ] Aleksey Yeschenko commented on CASSANDRA-6572: -- bq. Do you have any strong feelings here Aleksey Yeschenko on the QP/SP divide? This belongs to QP. > Workload recording / playback > - > > Key: CASSANDRA-6572 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6572 > Project: Cassandra > Issue Type: New Feature > Components: Core, Tools >Reporter: Jonathan Ellis >Assignee: Lyuben Todorov > Fix For: 2.0.6 > > > "Write sample mode" gets us part way to testing new versions against a real > world workload, but we need an easy way to test the query side as well. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6624) General protection fault
[ https://issues.apache.org/jira/browse/CASSANDRA-6624?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Aleksey Yeschenko updated CASSANDRA-6624: - Fix Version/s: (was: 2.0.3) 2.0.6 > General protection fault > > > Key: CASSANDRA-6624 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6624 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: Linux s41083 3.2.0-58-generic #88-Ubuntu SMP Tue Dec 3 > 17:37:58 UTC 2013 x86_64 x86_64 x86_64 GNU/Linux > java version "1.7.0_51" > Java(TM) SE Runtime Environment (build 1.7.0_51-b13) > Java HotSpot(TM) 64-Bit Server VM (build 24.51-b03, mixed mode) >Reporter: Mateusz Gajewski >Priority: Critical > Fix For: 2.0.6 > > Attachments: system.log > > > Hi, > Yesterday I got General Protection Fault in cassandra 2.0.3 process while > stress testing it. > Jan 26 23:19:43 s41083 kernel: [461545.017756] java[192074] general > protection ip:7fea558c6ae7 sp:7fe959844bf0 error:0 in > libc-2.15.so[7fea5588d000+1b5000] > It just died while doing compactation and restarted couple of times several > minutes after that. > System log attached -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-4851) CQL3: improve support for paginating over composites
[ https://issues.apache.org/jira/browse/CASSANDRA-4851?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892862#comment-13892862 ] Aleksey Yeschenko commented on CASSANDRA-4851: -- In SelectStatement, what's the purpose of {code} boolean hasTuple = false; boolean hasRestrictedNotTuple = false {code} ? Is that a leftover from a previous iteration? (neither is used) > CQL3: improve support for paginating over composites > > > Key: CASSANDRA-4851 > URL: https://issues.apache.org/jira/browse/CASSANDRA-4851 > Project: Cassandra > Issue Type: Improvement > Components: API >Reporter: Sylvain Lebresne >Assignee: Sylvain Lebresne >Priority: Minor > Fix For: 2.0.6 > > Attachments: 4851.txt > > > Consider the following table: > {noformat} > CREATE TABLE test ( > k int, > c1 int, > c2 int, > PRIMARY KEY (k, c1, c2) > ) > {noformat} > with the following data: > {noformat} > k | c1 | c2 > > 0 | 0 | 0 > 0 | 0 | 1 > 0 | 1 | 0 > 0 | 1 | 1 > {noformat} > Currently, CQL3 allows to slice over either c1 or c2: > {noformat} > SELECT * FROM test WHERE k = 0 AND c1 > 0 AND c1 < 2 > SELECT * FROM test WHERE k = 0 AND c1 = 1 AND c2 > 0 AND c2 < 2 > {noformat} > but you cannot express a query that return the 3 last records. Indeed, for > that you would need to do a query like say: > {noformat} > SELECT * FROM test WHERE k = 0 AND ((c1 = 0 AND c2 > 0) OR c2 > 0) > {noformat} > but we don't support that. > This can make it hard to paginate over say all records for {{k = 0}} (I'm > saying "can" because if the value for c2 cannot be very large, an easy > workaround could be to paginate by entire value of c1, which you can do). > For the case where you only paginate to avoid OOMing on a query, > CASSANDRA-4415 will that and is probably the best solution. However, there > may be case where the pagination is say user (as in, the user of your > application) triggered. > I note that one solution would be to add the OR support at least in case like > the one above. That's definitively doable but on the other side, we won't be > able to support full-blown OR, so it may not be very natural that we support > seemingly random combination of OR and not others. > Another solution would be to allow the following syntax: > {noformat} > SELECT * FROM test WHERE k = 0 AND (c1, c2) > (0, 0) > {noformat} > which would literally mean that you want records where the values of c1 and > c2 taken as a tuple is lexicographically greater than the tuple (0, 0). This > is less SQL-like (though maybe some SQL store have that, it's a fairly thing > to have imo?), but would be much simpler to implement and probably to use too. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Resolved] (CASSANDRA-6653) Attempting to bootstrap causes nodes to lock up in GC
[ https://issues.apache.org/jira/browse/CASSANDRA-6653?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis resolved CASSANDRA-6653. --- Resolution: Not A Problem I don't see anything here to suggest a Cassandra bug, or even that GC is particularly sad. When you take this back to the mailing list, consider looking at CPU usage: if it's GC-bound, there will be a GC thread consuming 100% of a core pretty much constantly. > Attempting to bootstrap causes nodes to lock up in GC > - > > Key: CASSANDRA-6653 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6653 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: VNodes using Murmur3 >Reporter: Keith Wright > > We have been struggling with the inability to bootstrap nodes into our 1.2.13 > environment with Vnodes using centos 6.4 with Java 7. We have an 8 node > cluster (32 GB RAM, dual hex core, SSDs, 8 GB heap with 1200 MB eden space, > RF3) with around 1 TB per node using murmur3. When we go to bootstrap a new > node this is what we see: > - Bootstrapping node assigns tokens and requests data from cluster > - 5-6 nodes within the cluster begin to stream data > - Around 2 minutes after bootstrap start, between 1 and 4 nodes (sometimes > the bootstrapping node and sometimes not) become unresponsive in par new GCs > - IF no nodes go down during the first 5 minutes of bootstrap, then the > bootstrap will succeed without issue > GC mired nodes tend to recover after a minute or two but the receiving node > stops attempting to get more data from the nodes > - Bootstrap eventually fails (after streaming all the data from nodes that > did not go down) with Unable to Fetch Ranges > We have tried the following and it appears that sometimes a bootstrap will > succeed (perhaps 1 in 10) but with no discernible pattern: > - Increase phi_convict to 16 > - Restart all nodes prior to bootstrap (to ensure heap is as “clean” as > possible) > - Stop production load against the cluster (to reduce par new churn); after 5 > minutes we know if the bootstrap will succeed so we then re-enable load > - Distribute soft interrupts across all CPUs > Below is an output from the GC log of the bootstrapping node when it was > stuck in GC. > {Heap before GC invocations=109 (full 0): > par new generation total 1105920K, used 1021140K [0x0005fae0, > 0x000645e0, 0x000645e0) > eden space 983040K, 100% used [0x0005fae0, 0x000636e0, > 0x000636e0) > from space 122880K, 31% used [0x00063e60, 0x000640b350f0, > 0x000645e0) > to space 122880K, 0% used [0x000636e0, 0x000636e0, > 0x00063e60) > concurrent mark-sweep generation total 7159808K, used 3826815K > [0x000645e0, 0x0007fae0, 0x0007fae0) > concurrent-mark-sweep perm gen total 24512K, used 24368K > [0x0007fae0, 0x0007fc5f, 0x0008) > 2014-02-05T13:27:49.621+: 210.242: [GC 210.242: [ParNew: > 1021140K->122880K(1105920K), 0.2963210 secs] 4847955K->4024095K(8265728K), > 0.2965270 secs] [Times: user=4.97 sys=0.00, real=0.30 secs] > Heap after GC invocations=110 (full 0): > par new generation total 1105920K, used 122880K [0x0005fae0, > 0x000645e0, 0x000645e0) > eden space 983040K, 0% used [0x0005fae0, 0x0005fae0, > 0x000636e0) > from space 122880K, 100% used [0x000636e0, 0x00063e60, > 0x00063e60) > to space 122880K, 0% used [0x00063e60, 0x00063e60, > 0x000645e0) > concurrent mark-sweep generation total 7159808K, used 3901215K > [0x000645e0, 0x0007fae0, 0x0007fae0) > concurrent-mark-sweep perm gen total 24512K, used 24368K > [0x0007fae0, 0x0007fc5f, 0x0008) > } > Total time for which application threads were stopped: 0.2968550 seconds > Application time: 1.5953840 seconds > Total time for which application threads were stopped: 0.0002040 seconds > Application time: 0.510 seconds > Relevant portion of GC log from non-bootstrapping node: > {Heap before GC invocations=518 (full 1): > par new generation total 1105920K, used 17921K [0x0005fae0, > 0x000645e0, 0x000645e0) > eden space 983040K, 1% used [0x0005fae0, 0x0005fbf29db8, > 0x000636e0) > from space 122880K, 0% used [0x000636e0, 0x000636e56938, > 0x00063e60) > to space 122880K, 0% used [0x00063e60, 0x00063e60, > 0x000645e0) > concurrent mark-sweep generation total 7159808K, used 6367511K > [0x000645e0, 0x0007fae0, 0x0007fae0) > concurrent-mark-sweep perm gen total 29888K, used
[jira] [Updated] (CASSANDRA-6653) Attempting to bootstrap causes nodes to lock up in GC
[ https://issues.apache.org/jira/browse/CASSANDRA-6653?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis updated CASSANDRA-6653: -- Description: We have been struggling with the inability to bootstrap nodes into our 1.2.13 environment with Vnodes using centos 6.4 with Java 7. We have an 8 node cluster (32 GB RAM, dual hex core, SSDs, 8 GB heap with 1200 MB eden space, RF3) with around 1 TB per node using murmur3. When we go to bootstrap a new node this is what we see: - Bootstrapping node assigns tokens and requests data from cluster - 5-6 nodes within the cluster begin to stream data - Around 2 minutes after bootstrap start, between 1 and 4 nodes (sometimes the bootstrapping node and sometimes not) become unresponsive in par new GCs - IF no nodes go down during the first 5 minutes of bootstrap, then the bootstrap will succeed without issue GC mired nodes tend to recover after a minute or two but the receiving node stops attempting to get more data from the nodes - Bootstrap eventually fails (after streaming all the data from nodes that did not go down) with Unable to Fetch Ranges We have tried the following and it appears that sometimes a bootstrap will succeed (perhaps 1 in 10) but with no discernible pattern: - Increase phi_convict to 16 - Restart all nodes prior to bootstrap (to ensure heap is as “clean” as possible) - Stop production load against the cluster (to reduce par new churn); after 5 minutes we know if the bootstrap will succeed so we then re-enable load - Distribute soft interrupts across all CPUs Below is an output from the GC log of the bootstrapping node when it was stuck in GC. {Heap before GC invocations=109 (full 0): par new generation total 1105920K, used 1021140K [0x0005fae0, 0x000645e0, 0x000645e0) eden space 983040K, 100% used [0x0005fae0, 0x000636e0, 0x000636e0) from space 122880K, 31% used [0x00063e60, 0x000640b350f0, 0x000645e0) to space 122880K, 0% used [0x000636e0, 0x000636e0, 0x00063e60) concurrent mark-sweep generation total 7159808K, used 3826815K [0x000645e0, 0x0007fae0, 0x0007fae0) concurrent-mark-sweep perm gen total 24512K, used 24368K [0x0007fae0, 0x0007fc5f, 0x0008) 2014-02-05T13:27:49.621+: 210.242: [GC 210.242: [ParNew: 1021140K->122880K(1105920K), 0.2963210 secs] 4847955K->4024095K(8265728K), 0.2965270 secs] [Times: user=4.97 sys=0.00, real=0.30 secs] Heap after GC invocations=110 (full 0): par new generation total 1105920K, used 122880K [0x0005fae0, 0x000645e0, 0x000645e0) eden space 983040K, 0% used [0x0005fae0, 0x0005fae0, 0x000636e0) from space 122880K, 100% used [0x000636e0, 0x00063e60, 0x00063e60) to space 122880K, 0% used [0x00063e60, 0x00063e60, 0x000645e0) concurrent mark-sweep generation total 7159808K, used 3901215K [0x000645e0, 0x0007fae0, 0x0007fae0) concurrent-mark-sweep perm gen total 24512K, used 24368K [0x0007fae0, 0x0007fc5f, 0x0008) } Total time for which application threads were stopped: 0.2968550 seconds Application time: 1.5953840 seconds Total time for which application threads were stopped: 0.0002040 seconds Application time: 0.510 seconds Relevant portion of GC log from non-bootstrapping node: {Heap before GC invocations=518 (full 1): par new generation total 1105920K, used 17921K [0x0005fae0, 0x000645e0, 0x000645e0) eden space 983040K, 1% used [0x0005fae0, 0x0005fbf29db8, 0x000636e0) from space 122880K, 0% used [0x000636e0, 0x000636e56938, 0x00063e60) to space 122880K, 0% used [0x00063e60, 0x00063e60, 0x000645e0) concurrent mark-sweep generation total 7159808K, used 6367511K [0x000645e0, 0x0007fae0, 0x0007fae0) concurrent-mark-sweep perm gen total 29888K, used 29784K [0x0007fae0, 0x0007fcb3, 0x0008) 2014-02-04T16:16:44.471+: 945.646: [GC 945.646: [ParNew: 17921K->364K(1105920K), 0.0090810 secs]945.655: [CMS2014-02-04T16:16:48.373+: 949.548: [CMS-concurrent-sweep: 3.938/4.362 secs] [Times: user=9.10 sys=0.19, real=4.36 secs] (concurrent mode failure): 6367540K->4453666K(7159808K), 16.4971830 secs] 6385433K->4453666K(8265728K), [CMS Perm : 29784K->29740K(29888K)], 16.5083610 secs] [Times: user=16.61 sys=0.00, real=16.50 secs] Heap after GC invocations=519 (full 2): par new generation total 1105920K, used 0K [0x0005fae0, 0x000645e0, 0x000645e0) eden space 983040K, 0% used [0x0005fae0, 0x0005fae0, 0x000636e0) from space 122880K, 0% used [0x00063e60, 0x000
[jira] [Resolved] (CASSANDRA-5359) AssertionError in HintedHandoff and CompactionExecutor
[ https://issues.apache.org/jira/browse/CASSANDRA-5359?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis resolved CASSANDRA-5359. --- Resolution: Won't Fix > AssertionError in HintedHandoff and CompactionExecutor > -- > > Key: CASSANDRA-5359 > URL: https://issues.apache.org/jira/browse/CASSANDRA-5359 > Project: Cassandra > Issue Type: Bug >Affects Versions: 1.2.2 > Environment: Centos 6.4 > 3 Nodes > RF 3 > Vnodes > Murmur3Partitioner >Reporter: Joey Imbasciano > > Seeing this on 2 of the 3 nodes pretty consistently. The stack traces seem to > happen consistently together, and I believe they are related. > HintedHandoff > {noformat} > ERROR [HintedHandoff:3] 2013-03-18 20:58:58,643 CassandraDaemon.java (line > 132) Exception in thread Thread[HintedHandoff:3,1,main] > java.lang.RuntimeException: java.util.concurrent.ExecutionException: > java.lang.AssertionError: originally calculated column size of 4 > 53516860 but now it is 453517031 > at > org.apache.cassandra.db.HintedHandOffManager.deliverHintsToEndpointInternal(HintedHandOffManager.java:406) > at > org.apache.cassandra.db.HintedHandOffManager.deliverHintsToEndpoint(HintedHandOffManager.java:252) > at > org.apache.cassandra.db.HintedHandOffManager.access$300(HintedHandOffManager.java:89) > at > org.apache.cassandra.db.HintedHandOffManager$4.runMayThrow(HintedHandOffManager.java:459) > at > org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) > at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(Unknown > Source) > at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source) > at java.lang.Thread.run(Unknown Source) > Caused by: java.util.concurrent.ExecutionException: java.lang.AssertionError: > originally calculated column size of 453516860 but now > it is 453517031 > at java.util.concurrent.FutureTask$Sync.innerGet(Unknown Source) > at java.util.concurrent.FutureTask.get(Unknown Source) > at > org.apache.cassandra.db.HintedHandOffManager.deliverHintsToEndpointInternal(HintedHandOffManager.java:402) > ... 7 more > Caused by: java.lang.AssertionError: originally calculated column size of > 453516860 but now it is 453517031 > at > org.apache.cassandra.db.compaction.LazilyCompactedRow.write(LazilyCompactedRow.java:135) > at > org.apache.cassandra.io.sstable.SSTableWriter.append(SSTableWriter.java:159) > at > org.apache.cassandra.db.compaction.CompactionTask.runWith(CompactionTask.java:163) > at > org.apache.cassandra.io.util.DiskAwareRunnable.runMayThrow(DiskAwareRunnable.java:48) > at > org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) > at > org.apache.cassandra.db.compaction.CompactionTask.executeInternal(CompactionTask.java:59) > at > org.apache.cassandra.db.compaction.AbstractCompactionTask.execute(AbstractCompactionTask.java:62) > at > org.apache.cassandra.db.compaction.CompactionManager$7.runMayThrow(CompactionManager.java:422) > at > org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) > at java.util.concurrent.Executors$RunnableAdapter.call(Unknown Source) > at java.util.concurrent.FutureTask$Sync.innerRun(Unknown Source) > at java.util.concurrent.FutureTask.run(Unknown Source) > ... 3 more > ERROR [CompactionExecutor:8] 2013-03-18 20:58:58,645 CassandraDaemon.java > (line 132) Exception in thread Thread[CompactionExecutor:8, > 1,main] > java.lang.AssertionError: originally calculated column size of 453516860 but > now it is 453517031 > at > org.apache.cassandra.db.compaction.LazilyCompactedRow.write(LazilyCompactedRow.java:135) > at > org.apache.cassandra.io.sstable.SSTableWriter.append(SSTableWriter.java:159) > at > org.apache.cassandra.db.compaction.CompactionTask.runWith(CompactionTask.java:163) > at > org.apache.cassandra.io.util.DiskAwareRunnable.runMayThrow(DiskAwareRunnable.java:48) > at > org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) > at > org.apache.cassandra.db.compaction.CompactionTask.executeInternal(CompactionTask.java:59) > at > org.apache.cassandra.db.compaction.AbstractCompactionTask.execute(AbstractCompactionTask.java:62) > at > org.apache.cassandra.db.compaction.CompactionManager$7.runMayThrow(CompactionManager.java:422) > at > org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) > at java.util.concurrent.Executors$RunnableAdapter.call(Unknown Source) > at java.util.concurrent.FutureTask$Sync.innerRun(Unknown Source) > at java.util.concurrent.FutureTask.run(Unknown Source) >
[jira] [Commented] (CASSANDRA-5359) AssertionError in HintedHandoff and CompactionExecutor
[ https://issues.apache.org/jira/browse/CASSANDRA-5359?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892784#comment-13892784 ] Jonathan Ellis commented on CASSANDRA-5359: --- As mentioned, CASSANDRA-4180 should fix this in 2.0+. > AssertionError in HintedHandoff and CompactionExecutor > -- > > Key: CASSANDRA-5359 > URL: https://issues.apache.org/jira/browse/CASSANDRA-5359 > Project: Cassandra > Issue Type: Bug >Affects Versions: 1.2.2 > Environment: Centos 6.4 > 3 Nodes > RF 3 > Vnodes > Murmur3Partitioner >Reporter: Joey Imbasciano > > Seeing this on 2 of the 3 nodes pretty consistently. The stack traces seem to > happen consistently together, and I believe they are related. > HintedHandoff > {noformat} > ERROR [HintedHandoff:3] 2013-03-18 20:58:58,643 CassandraDaemon.java (line > 132) Exception in thread Thread[HintedHandoff:3,1,main] > java.lang.RuntimeException: java.util.concurrent.ExecutionException: > java.lang.AssertionError: originally calculated column size of 4 > 53516860 but now it is 453517031 > at > org.apache.cassandra.db.HintedHandOffManager.deliverHintsToEndpointInternal(HintedHandOffManager.java:406) > at > org.apache.cassandra.db.HintedHandOffManager.deliverHintsToEndpoint(HintedHandOffManager.java:252) > at > org.apache.cassandra.db.HintedHandOffManager.access$300(HintedHandOffManager.java:89) > at > org.apache.cassandra.db.HintedHandOffManager$4.runMayThrow(HintedHandOffManager.java:459) > at > org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) > at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(Unknown > Source) > at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source) > at java.lang.Thread.run(Unknown Source) > Caused by: java.util.concurrent.ExecutionException: java.lang.AssertionError: > originally calculated column size of 453516860 but now > it is 453517031 > at java.util.concurrent.FutureTask$Sync.innerGet(Unknown Source) > at java.util.concurrent.FutureTask.get(Unknown Source) > at > org.apache.cassandra.db.HintedHandOffManager.deliverHintsToEndpointInternal(HintedHandOffManager.java:402) > ... 7 more > Caused by: java.lang.AssertionError: originally calculated column size of > 453516860 but now it is 453517031 > at > org.apache.cassandra.db.compaction.LazilyCompactedRow.write(LazilyCompactedRow.java:135) > at > org.apache.cassandra.io.sstable.SSTableWriter.append(SSTableWriter.java:159) > at > org.apache.cassandra.db.compaction.CompactionTask.runWith(CompactionTask.java:163) > at > org.apache.cassandra.io.util.DiskAwareRunnable.runMayThrow(DiskAwareRunnable.java:48) > at > org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) > at > org.apache.cassandra.db.compaction.CompactionTask.executeInternal(CompactionTask.java:59) > at > org.apache.cassandra.db.compaction.AbstractCompactionTask.execute(AbstractCompactionTask.java:62) > at > org.apache.cassandra.db.compaction.CompactionManager$7.runMayThrow(CompactionManager.java:422) > at > org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) > at java.util.concurrent.Executors$RunnableAdapter.call(Unknown Source) > at java.util.concurrent.FutureTask$Sync.innerRun(Unknown Source) > at java.util.concurrent.FutureTask.run(Unknown Source) > ... 3 more > ERROR [CompactionExecutor:8] 2013-03-18 20:58:58,645 CassandraDaemon.java > (line 132) Exception in thread Thread[CompactionExecutor:8, > 1,main] > java.lang.AssertionError: originally calculated column size of 453516860 but > now it is 453517031 > at > org.apache.cassandra.db.compaction.LazilyCompactedRow.write(LazilyCompactedRow.java:135) > at > org.apache.cassandra.io.sstable.SSTableWriter.append(SSTableWriter.java:159) > at > org.apache.cassandra.db.compaction.CompactionTask.runWith(CompactionTask.java:163) > at > org.apache.cassandra.io.util.DiskAwareRunnable.runMayThrow(DiskAwareRunnable.java:48) > at > org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) > at > org.apache.cassandra.db.compaction.CompactionTask.executeInternal(CompactionTask.java:59) > at > org.apache.cassandra.db.compaction.AbstractCompactionTask.execute(AbstractCompactionTask.java:62) > at > org.apache.cassandra.db.compaction.CompactionManager$7.runMayThrow(CompactionManager.java:422) > at > org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) > at java.util.concurrent.Executors$RunnableAdapter.call(Unknown Source) > at java.util.concurrent.FutureTask$Sync.innerRun(Un
[jira] [Updated] (CASSANDRA-6654) Droppable tombstones are not being removed from LCS table despite being above 20%
[ https://issues.apache.org/jira/browse/CASSANDRA-6654?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis updated CASSANDRA-6654: -- Assignee: Ryan McGuire > Droppable tombstones are not being removed from LCS table despite being above > 20% > - > > Key: CASSANDRA-6654 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6654 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: 1.2.13 VNodes with murmur3 >Reporter: Keith Wright >Assignee: Ryan McGuire > Attachments: Screen Shot 2014-02-05 at 9.38.20 AM.png > > > JMX is showing that one of our CQL3 LCS tables has a droppable tombstone > ratio above 20% and increasing (currently at 28%). Compactions are not > falling behind and we are using the OOTB setting for this feature so I would > expect not to go above 20% (will attach screen shot from JMX). Table > description: > CREATE TABLE global_user ( > user_id timeuuid, > app_id int, > type text, > name text, > extra_param map, > last timestamp, > paid boolean, > sku_time map, > values map, > PRIMARY KEY (user_id, app_id, type, name) > ) WITH > bloom_filter_fp_chance=0.10 AND > caching='KEYS_ONLY' AND > comment='' AND > dclocal_read_repair_chance=0.00 AND > gc_grace_seconds=86400 AND > read_repair_chance=0.10 AND > replicate_on_write='true' AND > populate_io_cache_on_flush='false' AND > compaction={'sstable_size_in_mb': '160', 'class': > 'LeveledCompactionStrategy'} AND > compression={'chunk_length_kb': '8', 'crc_check_chance': '0.1', > 'sstable_compression': 'LZ4Compressor'}; -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6654) Droppable tombstones are not being removed from LCS table despite being above 20%
[ https://issues.apache.org/jira/browse/CASSANDRA-6654?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892775#comment-13892775 ] Jonathan Ellis commented on CASSANDRA-6654: --- Ryan, can your team reproduce? > Droppable tombstones are not being removed from LCS table despite being above > 20% > - > > Key: CASSANDRA-6654 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6654 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: 1.2.13 VNodes with murmur3 >Reporter: Keith Wright >Assignee: Ryan McGuire > Attachments: Screen Shot 2014-02-05 at 9.38.20 AM.png > > > JMX is showing that one of our CQL3 LCS tables has a droppable tombstone > ratio above 20% and increasing (currently at 28%). Compactions are not > falling behind and we are using the OOTB setting for this feature so I would > expect not to go above 20% (will attach screen shot from JMX). Table > description: > CREATE TABLE global_user ( > user_id timeuuid, > app_id int, > type text, > name text, > extra_param map, > last timestamp, > paid boolean, > sku_time map, > values map, > PRIMARY KEY (user_id, app_id, type, name) > ) WITH > bloom_filter_fp_chance=0.10 AND > caching='KEYS_ONLY' AND > comment='' AND > dclocal_read_repair_chance=0.00 AND > gc_grace_seconds=86400 AND > read_repair_chance=0.10 AND > replicate_on_write='true' AND > populate_io_cache_on_flush='false' AND > compaction={'sstable_size_in_mb': '160', 'class': > 'LeveledCompactionStrategy'} AND > compression={'chunk_length_kb': '8', 'crc_check_chance': '0.1', > 'sstable_compression': 'LZ4Compressor'}; -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Issue Comment Deleted] (CASSANDRA-6283) Windows 7 data files keept open / can't be deleted after compaction.
[ https://issues.apache.org/jira/browse/CASSANDRA-6283?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Brandon Williams updated CASSANDRA-6283: Comment: was deleted (was: I am out of the office from Tuesday Jan 28th to Wednesday Feb 19th. For Hector subjects, please contact Maurice MILLS and Marc AUBRY. For Ted2, Frederic WARMAN and TED2DEV. For Titan, Marc AURY and EQD PRS DEV. This message and any attachments (the "message") is intended solely for the intended addressees and is confidential. If you receive this message in error,or are not the intended recipient(s), please delete it and any copies from your systems and immediately notify the sender. Any unauthorized view, use that does not comply with its purpose, dissemination or disclosure, either whole or partial, is prohibited. Since the internet cannot guarantee the integrity of this message which may not be reliable, BNP PARIBAS (and its subsidiaries) shall not be liable for the message if modified, changed or falsified. Do not print this message unless it is necessary,consider the environment. -- Ce message et toutes les pieces jointes (ci-apres le "message") sont etablis a l'intention exclusive de ses destinataires et sont confidentiels. Si vous recevez ce message par erreur ou s'il ne vous est pas destine, merci de le detruire ainsi que toute copie de votre systeme et d'en avertir immediatement l'expediteur. Toute lecture non autorisee, toute utilisation de ce message qui n'est pas conforme a sa destination, toute diffusion ou toute publication, totale ou partielle, est interdite. L'Internet ne permettant pas d'assurer l'integrite de ce message electronique susceptible d'alteration, BNP Paribas (et ses filiales) decline(nt) toute responsabilite au titre de ce message dans l'hypothese ou il aurait ete modifie, deforme ou falsifie. N'imprimez ce message que si necessaire, pensez a l'environnement. ) > Windows 7 data files keept open / can't be deleted after compaction. > > > Key: CASSANDRA-6283 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6283 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: Windows 7 (32) / Java 1.7.0.45 >Reporter: Andreas Schnitzerling >Assignee: Joshua McKenzie > Labels: compaction > Fix For: 1.2.15 > > Attachments: leakdetect.patch, screenshot-1.jpg, system.log > > > Files cannot be deleted, patch CASSANDRA-5383 (Win7 deleting problem) doesn't > help on Win-7 on Cassandra 2.0.2. Even 2.1 Snapshot is not running. The cause > is: Opened file handles seem to be lost and not closed properly. Win 7 > blames, that another process is still using the file (but its obviously > cassandra). Only restart of the server makes the files deleted. But after > heavy using (changes) of tables, there are about 24K files in the data folder > (instead of 35 after every restart) and Cassandra crashes. I experiminted and > I found out, that a finalizer fixes the problem. So after GC the files will > be deleted (not optimal, but working fine). It runs now 2 days continously > without problem. Possible fix/test: > I wrote the following finalizer at the end of class > org.apache.cassandra.io.util.RandomAccessReader: > {code:title=RandomAccessReader.java|borderStyle=solid} > @Override > protected void finalize() throws Throwable { > deallocate(); > super.finalize(); > } > {code} > Can somebody test / develop / patch it? Thx. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6283) Windows 7 data files keept open / can't be deleted after compaction.
[ https://issues.apache.org/jira/browse/CASSANDRA-6283?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892771#comment-13892771 ] Ngoc Minh Vo commented on CASSANDRA-6283: - I am out of the office from Tuesday Jan 28th to Wednesday Feb 19th. For Hector subjects, please contact Maurice MILLS and Marc AUBRY. For Ted2, Frederic WARMAN and TED2DEV. For Titan, Marc AURY and EQD PRS DEV. This message and any attachments (the "message") is intended solely for the intended addressees and is confidential. If you receive this message in error,or are not the intended recipient(s), please delete it and any copies from your systems and immediately notify the sender. Any unauthorized view, use that does not comply with its purpose, dissemination or disclosure, either whole or partial, is prohibited. Since the internet cannot guarantee the integrity of this message which may not be reliable, BNP PARIBAS (and its subsidiaries) shall not be liable for the message if modified, changed or falsified. Do not print this message unless it is necessary,consider the environment. -- Ce message et toutes les pieces jointes (ci-apres le "message") sont etablis a l'intention exclusive de ses destinataires et sont confidentiels. Si vous recevez ce message par erreur ou s'il ne vous est pas destine, merci de le detruire ainsi que toute copie de votre systeme et d'en avertir immediatement l'expediteur. Toute lecture non autorisee, toute utilisation de ce message qui n'est pas conforme a sa destination, toute diffusion ou toute publication, totale ou partielle, est interdite. L'Internet ne permettant pas d'assurer l'integrite de ce message electronique susceptible d'alteration, BNP Paribas (et ses filiales) decline(nt) toute responsabilite au titre de ce message dans l'hypothese ou il aurait ete modifie, deforme ou falsifie. N'imprimez ce message que si necessaire, pensez a l'environnement. > Windows 7 data files keept open / can't be deleted after compaction. > > > Key: CASSANDRA-6283 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6283 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: Windows 7 (32) / Java 1.7.0.45 >Reporter: Andreas Schnitzerling >Assignee: Joshua McKenzie > Labels: compaction > Fix For: 1.2.15 > > Attachments: leakdetect.patch, screenshot-1.jpg, system.log > > > Files cannot be deleted, patch CASSANDRA-5383 (Win7 deleting problem) doesn't > help on Win-7 on Cassandra 2.0.2. Even 2.1 Snapshot is not running. The cause > is: Opened file handles seem to be lost and not closed properly. Win 7 > blames, that another process is still using the file (but its obviously > cassandra). Only restart of the server makes the files deleted. But after > heavy using (changes) of tables, there are about 24K files in the data folder > (instead of 35 after every restart) and Cassandra crashes. I experiminted and > I found out, that a finalizer fixes the problem. So after GC the files will > be deleted (not optimal, but working fine). It runs now 2 days continously > without problem. Possible fix/test: > I wrote the following finalizer at the end of class > org.apache.cassandra.io.util.RandomAccessReader: > {code:title=RandomAccessReader.java|borderStyle=solid} > @Override > protected void finalize() throws Throwable { > deallocate(); > super.finalize(); > } > {code} > Can somebody test / develop / patch it? Thx. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Assigned] (CASSANDRA-6283) Windows 7 data files keept open / can't be deleted after compaction.
[ https://issues.apache.org/jira/browse/CASSANDRA-6283?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis reassigned CASSANDRA-6283: - Assignee: Joshua McKenzie > Windows 7 data files keept open / can't be deleted after compaction. > > > Key: CASSANDRA-6283 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6283 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: Windows 7 (32) / Java 1.7.0.45 >Reporter: Andreas Schnitzerling >Assignee: Joshua McKenzie > Labels: compaction > Fix For: 1.2.15 > > Attachments: leakdetect.patch, screenshot-1.jpg, system.log > > > Files cannot be deleted, patch CASSANDRA-5383 (Win7 deleting problem) doesn't > help on Win-7 on Cassandra 2.0.2. Even 2.1 Snapshot is not running. The cause > is: Opened file handles seem to be lost and not closed properly. Win 7 > blames, that another process is still using the file (but its obviously > cassandra). Only restart of the server makes the files deleted. But after > heavy using (changes) of tables, there are about 24K files in the data folder > (instead of 35 after every restart) and Cassandra crashes. I experiminted and > I found out, that a finalizer fixes the problem. So after GC the files will > be deleted (not optimal, but working fine). It runs now 2 days continously > without problem. Possible fix/test: > I wrote the following finalizer at the end of class > org.apache.cassandra.io.util.RandomAccessReader: > {code:title=RandomAccessReader.java|borderStyle=solid} > @Override > protected void finalize() throws Throwable { > deallocate(); > super.finalize(); > } > {code} > Can somebody test / develop / patch it? Thx. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6100) Nodetool clearsnapshot throws exception
[ https://issues.apache.org/jira/browse/CASSANDRA-6100?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892768#comment-13892768 ] Jonathan Ellis commented on CASSANDRA-6100: --- looks like CASSANDRA-4050 to me > Nodetool clearsnapshot throws exception > --- > > Key: CASSANDRA-6100 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6100 > Project: Cassandra > Issue Type: Bug > Components: Tools > Environment: Windows >Reporter: Vaibhav Khanduja >Priority: Minor > > Nodetool snapshot option gives a mechanism to create a snapshot. Nodetool > clearsnapshot gives a mechanism to clear the snapshot or delete the hardlinks > created during snapshot. On windows snapshot deletion does not happen as one > of the sstable table file *index* is locked. > The problem is similar to what is reported here: > http://marc.info/?l=cassandra-user&m=133171026922848 > http://mail-archives.apache.org/mod_mbox/cassandra-user/201203.mbox/%3c35177e23-2e1b-4020-a634-1c595502d...@gmail.com%3E -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Resolved] (CASSANDRA-6100) Nodetool clearsnapshot throws exception
[ https://issues.apache.org/jira/browse/CASSANDRA-6100?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis resolved CASSANDRA-6100. --- Resolution: Duplicate > Nodetool clearsnapshot throws exception > --- > > Key: CASSANDRA-6100 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6100 > Project: Cassandra > Issue Type: Bug > Components: Tools > Environment: Windows >Reporter: Vaibhav Khanduja >Priority: Minor > > Nodetool snapshot option gives a mechanism to create a snapshot. Nodetool > clearsnapshot gives a mechanism to clear the snapshot or delete the hardlinks > created during snapshot. On windows snapshot deletion does not happen as one > of the sstable table file *index* is locked. > The problem is similar to what is reported here: > http://marc.info/?l=cassandra-user&m=133171026922848 > http://mail-archives.apache.org/mod_mbox/cassandra-user/201203.mbox/%3c35177e23-2e1b-4020-a634-1c595502d...@gmail.com%3E -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Assigned] (CASSANDRA-4050) Unable to remove snapshot files on Windows while original sstables are live
[ https://issues.apache.org/jira/browse/CASSANDRA-4050?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis reassigned CASSANDRA-4050: - Assignee: Joshua McKenzie > Unable to remove snapshot files on Windows while original sstables are live > --- > > Key: CASSANDRA-4050 > URL: https://issues.apache.org/jira/browse/CASSANDRA-4050 > Project: Cassandra > Issue Type: Bug > Environment: Windows 7 >Reporter: Jim Newsham >Assignee: Joshua McKenzie >Priority: Minor > > I'm using Cassandra 1.0.8, on Windows 7. When I take a snapshot of the > database, I find that I am unable to delete the snapshot directory (i.e., dir > named "{datadir}\{keyspacename}\snapshots\{snapshottag}") while Cassandra is > running: "The action can't be completed because the folder or a file in it > is open in another program. Close the folder or file and try again" [in > Windows Explorer]. If I terminate Cassandra, then I can delete the directory > with no problem. > I expect to be able to move or delete the snapshotted files while Cassandra > is running, as this should not affect the runtime operation of Cassandra. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-5591) Windows failure renaming LCS json.
[ https://issues.apache.org/jira/browse/CASSANDRA-5591?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892765#comment-13892765 ] Jonathan Ellis commented on CASSANDRA-5591: --- manifest is gone in 2.0+ > Windows failure renaming LCS json. > -- > > Key: CASSANDRA-5591 > URL: https://issues.apache.org/jira/browse/CASSANDRA-5591 > Project: Cassandra > Issue Type: Bug > Components: Core >Affects Versions: 1.2.4 > Environment: Windows >Reporter: Jeremiah Jordan > > Had someone report that on Windows, under load, the LCS json file sometimes > fails to be renamed. > {noformat} > ERROR [CompactionExecutor:1] 2013-05-23 14:43:55,848 CassandraDaemon.java > (line 174) Exception in thread Thread[CompactionExecutor:1,1,main] > java.lang.RuntimeException: Failed to rename C:\development\tools\DataStax > Community\data\data\zzz\zzz\zzz.json to C:\development\tools\DataStax > Community\data\data\zzz\zzz\zzz-old.json > at > org.apache.cassandra.io.util.FileUtils.renameWithConfirm(FileUtils.java:133) > at > org.apache.cassandra.db.compaction.LeveledManifest.serialize(LeveledManifest.java:617) > at > org.apache.cassandra.db.compaction.LeveledManifest.promote(LeveledManifest.java:229) > at > org.apache.cassandra.db.compaction.LeveledCompactionStrategy.handleNotification(LeveledCompactionStrategy.java:155) > at > org.apache.cassandra.db.DataTracker.notifySSTablesChanged(DataTracker.java:410) > at > org.apache.cassandra.db.DataTracker.replaceCompactedSSTables(DataTracker.java:223) > at > org.apache.cassandra.db.ColumnFamilyStore.replaceCompactedSSTables(ColumnFamilyStore.java:991) > at > org.apache.cassandra.db.compaction.CompactionTask.runWith(CompactionTask.java:230) > at > org.apache.cassandra.io.util.DiskAwareRunnable.runMayThrow(DiskAwareRunnable.java:48) > at > org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) > at > org.apache.cassandra.db.compaction.CompactionTask.executeInternal(CompactionTask.java:58) > at > org.apache.cassandra.db.compaction.AbstractCompactionTask.execute(AbstractCompactionTask.java:60) > at > org.apache.cassandra.db.compaction.CompactionManager$BackgroundCompactionTask.run(CompactionManager.java:188) > at > java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:439) > at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303) > at java.util.concurrent.FutureTask.run(FutureTask.java:138) > at > java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:895) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:918) > at java.lang.Thread.run(Thread.java:662) > {noformat} -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Resolved] (CASSANDRA-5591) Windows failure renaming LCS json.
[ https://issues.apache.org/jira/browse/CASSANDRA-5591?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis resolved CASSANDRA-5591. --- Resolution: Won't Fix > Windows failure renaming LCS json. > -- > > Key: CASSANDRA-5591 > URL: https://issues.apache.org/jira/browse/CASSANDRA-5591 > Project: Cassandra > Issue Type: Bug > Components: Core >Affects Versions: 1.2.4 > Environment: Windows >Reporter: Jeremiah Jordan > > Had someone report that on Windows, under load, the LCS json file sometimes > fails to be renamed. > {noformat} > ERROR [CompactionExecutor:1] 2013-05-23 14:43:55,848 CassandraDaemon.java > (line 174) Exception in thread Thread[CompactionExecutor:1,1,main] > java.lang.RuntimeException: Failed to rename C:\development\tools\DataStax > Community\data\data\zzz\zzz\zzz.json to C:\development\tools\DataStax > Community\data\data\zzz\zzz\zzz-old.json > at > org.apache.cassandra.io.util.FileUtils.renameWithConfirm(FileUtils.java:133) > at > org.apache.cassandra.db.compaction.LeveledManifest.serialize(LeveledManifest.java:617) > at > org.apache.cassandra.db.compaction.LeveledManifest.promote(LeveledManifest.java:229) > at > org.apache.cassandra.db.compaction.LeveledCompactionStrategy.handleNotification(LeveledCompactionStrategy.java:155) > at > org.apache.cassandra.db.DataTracker.notifySSTablesChanged(DataTracker.java:410) > at > org.apache.cassandra.db.DataTracker.replaceCompactedSSTables(DataTracker.java:223) > at > org.apache.cassandra.db.ColumnFamilyStore.replaceCompactedSSTables(ColumnFamilyStore.java:991) > at > org.apache.cassandra.db.compaction.CompactionTask.runWith(CompactionTask.java:230) > at > org.apache.cassandra.io.util.DiskAwareRunnable.runMayThrow(DiskAwareRunnable.java:48) > at > org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) > at > org.apache.cassandra.db.compaction.CompactionTask.executeInternal(CompactionTask.java:58) > at > org.apache.cassandra.db.compaction.AbstractCompactionTask.execute(AbstractCompactionTask.java:60) > at > org.apache.cassandra.db.compaction.CompactionManager$BackgroundCompactionTask.run(CompactionManager.java:188) > at > java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:439) > at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:303) > at java.util.concurrent.FutureTask.run(FutureTask.java:138) > at > java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:895) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:918) > at java.lang.Thread.run(Thread.java:662) > {noformat} -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Comment Edited] (CASSANDRA-6590) Gossip does not heal after a temporary partition at startup
[ https://issues.apache.org/jira/browse/CASSANDRA-6590?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892699#comment-13892699 ] Brandon Williams edited comment on CASSANDRA-6590 at 2/5/14 10:43 PM: -- I'm not sure why the block in handleMajorStateChange moved, but because the endpoint state is added before that so the check for it will never be null, so it always says the node restarted (and we should keep the 'UP' message there to keep it easy to look for) even though it's the first time it's been seen. I think the if (!localState.isAlive()) check is problematic, because while it got rid of the repeated UP messages, it also seemed to introduce a race situation where sometimes some nodes would end up in a cluster by themselves. I briefly tried making Echo verbs droppable in CASSANDRA-6661 instead, but that didn't help, so I'm not sure why we're seemingly building these requests up, or if something else is making realMarkAlive fire so much. Finally, I think we'll need a separate yaml option, since removing things in a minor is kind of mean to upgraders who don't catch it and their server won't start. was (Author: brandon.williams): I'm not sure why the block in handleMajorStateChange, but because the endpoint state is added before that the check for it will never be null, so it always says the node restarted (and we should keep the 'UP' message there to keep it easy to look for) even though it's the first time it's been seen. I think the if (!localState.isAlive()) check is problematic, because while it got rid of the repeated UP messages, it also seem to introduce a race situation where sometimes some nodes would end up in a cluster by themselves. I briefly tried making Echo verbs droppable in CASSANDRA-6661 instead, but that didn't help, so I'm not sure why we're seemingly building these requests up, or if something else is making realMarkAlive fire so much. Finally, I think we'll need a separate yaml option, since removing things in a minor is kind of mean to upgraders who don't catch it and their server won't start. > Gossip does not heal after a temporary partition at startup > --- > > Key: CASSANDRA-6590 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6590 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Brandon Williams >Assignee: Vijay > Fix For: 2.0.6 > > Attachments: 0001-CASSANDRA-6590.patch, 0001-logging-for-6590.patch, > 6590_disable_echo.txt > > > See CASSANDRA-6571 for background. If a node is partitioned on startup when > the echo command is sent, but then the partition heals, the halves of the > partition will never mark each other up despite being able to communicate. > This stems from CASSANDRA-3533. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6590) Gossip does not heal after a temporary partition at startup
[ https://issues.apache.org/jira/browse/CASSANDRA-6590?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892699#comment-13892699 ] Brandon Williams commented on CASSANDRA-6590: - I'm not sure why the block in handleMajorStateChange, but because the endpoint state is added before that the check for it will never be null, so it always says the node restarted (and we should keep the 'UP' message there to keep it easy to look for) even though it's the first time it's been seen. I think the if (!localState.isAlive()) check is problematic, because while it got rid of the repeated UP messages, it also seem to introduce a race situation where sometimes some nodes would end up in a cluster by themselves. I briefly tried making Echo verbs droppable in CASSANDRA-6661 instead, but that didn't help, so I'm not sure why we're seemingly building these requests up, or if something else is making realMarkAlive fire so much. Finally, I think we'll need a separate yaml option, since removing things in a minor is kind of mean to upgraders who don't catch it and their server won't start. > Gossip does not heal after a temporary partition at startup > --- > > Key: CASSANDRA-6590 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6590 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Brandon Williams >Assignee: Vijay > Fix For: 2.0.6 > > Attachments: 0001-CASSANDRA-6590.patch, 0001-logging-for-6590.patch, > 6590_disable_echo.txt > > > See CASSANDRA-6571 for background. If a node is partitioned on startup when > the echo command is sent, but then the partition heals, the halves of the > partition will never mark each other up despite being able to communicate. > This stems from CASSANDRA-3533. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Reopened] (CASSANDRA-6661) echo verbs should be droppable
[ https://issues.apache.org/jira/browse/CASSANDRA-6661?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Brandon Williams reopened CASSANDRA-6661: - > echo verbs should be droppable > -- > > Key: CASSANDRA-6661 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6661 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Brandon Williams >Assignee: Brandon Williams >Priority: Minor > > Because this: > {noformat} > INFO 21:37:05,611 Handshaking version with /10.208.8.63 > INFO 21:37:10,611 Cannot handshake version with /10.208.8.63 > INFO 21:37:10,615 Handshaking version with /10.208.8.63 > INFO 21:37:15,616 Cannot handshake version with /10.208.8.63 > INFO 21:37:15,628 Handshaking version with /10.208.8.63 > INFO 21:37:20,628 Cannot handshake version with /10.208.8.63 > INFO 21:37:20,629 Handshaking version with /10.208.8.63 > INFO 21:37:25,630 Cannot handshake version with /10.208.8.63 > INFO 21:37:25,631 Handshaking version with /10.208.8.63 > INFO 21:37:30,631 Cannot handshake version with /10.208.8.63 > INFO 21:37:30,632 Handshaking version with /10.208.8.63 > INFO 21:37:35,633 Cannot handshake version with /10.208.8.63 > INFO 21:37:35,641 Handshaking version with /10.208.8.63 > INFO 21:37:40,641 Cannot handshake version with /10.208.8.63 > INFO 21:37:40,642 Handshaking version with /10.208.8.63 > INFO 21:37:45,643 Cannot handshake version with /10.208.8.63 > INFO 21:37:45,644 Handshaking version with /10.208.8.63 > INFO 21:37:50,644 Cannot handshake version with /10.208.8.63 > INFO 21:37:50,651 Handshaking version with /10.208.8.63 > INFO 21:37:55,651 Cannot handshake version with /10.208.8.63 > INFO 21:37:55,655 Handshaking version with /10.208.8.63 > INFO 21:38:00,655 Cannot handshake version with /10.208.8.63 > INFO 21:38:00,657 Handshaking version with /10.208.8.63 > INFO 21:38:05,657 Cannot handshake version with /10.208.8.63 > INFO 21:38:05,658 Handshaking version with /10.208.8.63 > INFO 21:38:10,658 Cannot handshake version with /10.208.8.63 > INFO 21:38:10,660 Handshaking version with /10.208.8.63 > INFO 21:38:15,660 Cannot handshake version with /10.208.8.63 > INFO 21:38:15,740 Handshaking version with /10.208.8.63 > INFO 21:38:20,740 Cannot handshake version with /10.208.8.63 > INFO 21:38:20,742 Handshaking version with /10.208.8.63 > INFO 21:38:25,742 Cannot handshake version with /10.208.8.63 > INFO 21:38:25,746 Handshaking version with /10.208.8.63 > INFO 21:38:30,746 Cannot handshake version with /10.208.8.63 > INFO 21:38:30,747 Handshaking version with /10.208.8.63 > INFO 21:38:35,747 Cannot handshake version with /10.208.8.63 > INFO 21:38:35,749 Handshaking version with /10.208.8.63 > INFO 21:38:40,749 Cannot handshake version with /10.208.8.63 > INFO 21:38:40,750 Handshaking version with /10.208.8.63 > INFO 21:38:41,442 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,446 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,447 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,447 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,455 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,455 InetAddress /10.208.8.63 is now UP > INFO 21:38:4
[jira] [Resolved] (CASSANDRA-6661) echo verbs should be droppable
[ https://issues.apache.org/jira/browse/CASSANDRA-6661?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Brandon Williams resolved CASSANDRA-6661. - Resolution: Duplicate Fix Version/s: (was: 2.0.6) > echo verbs should be droppable > -- > > Key: CASSANDRA-6661 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6661 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Brandon Williams >Assignee: Brandon Williams >Priority: Minor > > Because this: > {noformat} > INFO 21:37:05,611 Handshaking version with /10.208.8.63 > INFO 21:37:10,611 Cannot handshake version with /10.208.8.63 > INFO 21:37:10,615 Handshaking version with /10.208.8.63 > INFO 21:37:15,616 Cannot handshake version with /10.208.8.63 > INFO 21:37:15,628 Handshaking version with /10.208.8.63 > INFO 21:37:20,628 Cannot handshake version with /10.208.8.63 > INFO 21:37:20,629 Handshaking version with /10.208.8.63 > INFO 21:37:25,630 Cannot handshake version with /10.208.8.63 > INFO 21:37:25,631 Handshaking version with /10.208.8.63 > INFO 21:37:30,631 Cannot handshake version with /10.208.8.63 > INFO 21:37:30,632 Handshaking version with /10.208.8.63 > INFO 21:37:35,633 Cannot handshake version with /10.208.8.63 > INFO 21:37:35,641 Handshaking version with /10.208.8.63 > INFO 21:37:40,641 Cannot handshake version with /10.208.8.63 > INFO 21:37:40,642 Handshaking version with /10.208.8.63 > INFO 21:37:45,643 Cannot handshake version with /10.208.8.63 > INFO 21:37:45,644 Handshaking version with /10.208.8.63 > INFO 21:37:50,644 Cannot handshake version with /10.208.8.63 > INFO 21:37:50,651 Handshaking version with /10.208.8.63 > INFO 21:37:55,651 Cannot handshake version with /10.208.8.63 > INFO 21:37:55,655 Handshaking version with /10.208.8.63 > INFO 21:38:00,655 Cannot handshake version with /10.208.8.63 > INFO 21:38:00,657 Handshaking version with /10.208.8.63 > INFO 21:38:05,657 Cannot handshake version with /10.208.8.63 > INFO 21:38:05,658 Handshaking version with /10.208.8.63 > INFO 21:38:10,658 Cannot handshake version with /10.208.8.63 > INFO 21:38:10,660 Handshaking version with /10.208.8.63 > INFO 21:38:15,660 Cannot handshake version with /10.208.8.63 > INFO 21:38:15,740 Handshaking version with /10.208.8.63 > INFO 21:38:20,740 Cannot handshake version with /10.208.8.63 > INFO 21:38:20,742 Handshaking version with /10.208.8.63 > INFO 21:38:25,742 Cannot handshake version with /10.208.8.63 > INFO 21:38:25,746 Handshaking version with /10.208.8.63 > INFO 21:38:30,746 Cannot handshake version with /10.208.8.63 > INFO 21:38:30,747 Handshaking version with /10.208.8.63 > INFO 21:38:35,747 Cannot handshake version with /10.208.8.63 > INFO 21:38:35,749 Handshaking version with /10.208.8.63 > INFO 21:38:40,749 Cannot handshake version with /10.208.8.63 > INFO 21:38:40,750 Handshaking version with /10.208.8.63 > INFO 21:38:41,442 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,446 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,447 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,447 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,455 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INF
[jira] [Resolved] (CASSANDRA-6661) echo verbs should be droppable
[ https://issues.apache.org/jira/browse/CASSANDRA-6661?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Brandon Williams resolved CASSANDRA-6661. - Resolution: Fixed Decided this is easier to handle as part of CASSANDRA-6590 > echo verbs should be droppable > -- > > Key: CASSANDRA-6661 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6661 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Brandon Williams >Assignee: Brandon Williams >Priority: Minor > Fix For: 2.0.6 > > > Because this: > {noformat} > INFO 21:37:05,611 Handshaking version with /10.208.8.63 > INFO 21:37:10,611 Cannot handshake version with /10.208.8.63 > INFO 21:37:10,615 Handshaking version with /10.208.8.63 > INFO 21:37:15,616 Cannot handshake version with /10.208.8.63 > INFO 21:37:15,628 Handshaking version with /10.208.8.63 > INFO 21:37:20,628 Cannot handshake version with /10.208.8.63 > INFO 21:37:20,629 Handshaking version with /10.208.8.63 > INFO 21:37:25,630 Cannot handshake version with /10.208.8.63 > INFO 21:37:25,631 Handshaking version with /10.208.8.63 > INFO 21:37:30,631 Cannot handshake version with /10.208.8.63 > INFO 21:37:30,632 Handshaking version with /10.208.8.63 > INFO 21:37:35,633 Cannot handshake version with /10.208.8.63 > INFO 21:37:35,641 Handshaking version with /10.208.8.63 > INFO 21:37:40,641 Cannot handshake version with /10.208.8.63 > INFO 21:37:40,642 Handshaking version with /10.208.8.63 > INFO 21:37:45,643 Cannot handshake version with /10.208.8.63 > INFO 21:37:45,644 Handshaking version with /10.208.8.63 > INFO 21:37:50,644 Cannot handshake version with /10.208.8.63 > INFO 21:37:50,651 Handshaking version with /10.208.8.63 > INFO 21:37:55,651 Cannot handshake version with /10.208.8.63 > INFO 21:37:55,655 Handshaking version with /10.208.8.63 > INFO 21:38:00,655 Cannot handshake version with /10.208.8.63 > INFO 21:38:00,657 Handshaking version with /10.208.8.63 > INFO 21:38:05,657 Cannot handshake version with /10.208.8.63 > INFO 21:38:05,658 Handshaking version with /10.208.8.63 > INFO 21:38:10,658 Cannot handshake version with /10.208.8.63 > INFO 21:38:10,660 Handshaking version with /10.208.8.63 > INFO 21:38:15,660 Cannot handshake version with /10.208.8.63 > INFO 21:38:15,740 Handshaking version with /10.208.8.63 > INFO 21:38:20,740 Cannot handshake version with /10.208.8.63 > INFO 21:38:20,742 Handshaking version with /10.208.8.63 > INFO 21:38:25,742 Cannot handshake version with /10.208.8.63 > INFO 21:38:25,746 Handshaking version with /10.208.8.63 > INFO 21:38:30,746 Cannot handshake version with /10.208.8.63 > INFO 21:38:30,747 Handshaking version with /10.208.8.63 > INFO 21:38:35,747 Cannot handshake version with /10.208.8.63 > INFO 21:38:35,749 Handshaking version with /10.208.8.63 > INFO 21:38:40,749 Cannot handshake version with /10.208.8.63 > INFO 21:38:40,750 Handshaking version with /10.208.8.63 > INFO 21:38:41,442 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,446 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,447 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,447 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,455 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:
[jira] [Commented] (CASSANDRA-6656) Exception logging
[ https://issues.apache.org/jira/browse/CASSANDRA-6656?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892620#comment-13892620 ] Mikhail Stepura commented on CASSANDRA-6656: [~d.yuan] My comments * Case1 (Hex.java): Do you really need a full stack trace there? (you use {{error(String, Throwable)}} version) * Case2 (DynamicCompositeType.java) Use parameterized messages instead of strings concatenation (http://www.slf4j.org/faq.html#logging_performance ) * Case3 (NodeProbe.java): Use provided {{PrintStream out}} instead of calling {{System.out}} directly > Exception logging > - > > Key: CASSANDRA-6656 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6656 > Project: Cassandra > Issue Type: Improvement > Components: Core, Tools >Reporter: Ding Yuan >Assignee: Ding Yuan >Priority: Trivial > Fix For: 2.1 > > Attachments: trunk-6656.txt > > > Reporting a few cases where informative exceptions can be silently swallowed. > Attaching a proposed patch. > = > Case 1 > Line: 95, File: "org/apache/cassandra/utils/Hex.java" > An actual failure in the underlying constructor will be lost. > Propose to log it. > {noformat} > try > { > s = stringConstructor.newInstance(0, c.length, c); > + } > + catch (InvocationTargetException ite) { > + // The underlying constructor failed. Unwrapping the > exception. > + logger.info("Underlying constructor throws exception: ", > ite.getCause()); > } > catch (Exception e) > { > // Swallowing as we'll just use a copying constructor > } > return s == null ? new String(c) : s; > {noformat} > == > = > Case 2 > Line: 192, File: "org/apache/cassandra/db/marshal/DynamicCompositeType.java" > The actual cause of comparator error can be lost as it can fail in multiple > locations. > {noformat} > AbstractType comparator = null; > int header = getShortLength(bb); > if ((header & 0x8000) == 0) > { > ByteBuffer value = getBytes(bb, header); > try > { > comparator = TypeParser.parse(ByteBufferUtil.string(value)); > } > catch (Exception e) > { > <--- can fail here > // we'll deal with this below since comparator == null > } > } > else > { > comparator = aliases.get((byte)(header & 0xFF)); > <--- can fail here > } > if (comparator == null) > throw new MarshalException("Cannot find comparator for component > " + i); > {noformat} > Propose to log the exception. > == > = > Case 3 > Line: 239, File: "org/apache/cassandra/tools/NodeProbe.java" > Exception ignored in finally. Propose log them with debug or trace. > {noformat} > 232: finally > 233: { > 234: try > 235: { > 236: ssProxy.removeNotificationListener(runner); > 236: ssProxy.removeNotificationListener(runner); > 237: jmxc.removeConnectionNotificationListener(runner); > 238: } > 239: catch (Throwable ignored) {} > 240: } > {noformat} > Similar case is at line 264 in the same file. > == -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6661) echo verbs should be droppable
[ https://issues.apache.org/jira/browse/CASSANDRA-6661?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Brandon Williams updated CASSANDRA-6661: Priority: Minor (was: Major) > echo verbs should be droppable > -- > > Key: CASSANDRA-6661 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6661 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Brandon Williams >Assignee: Brandon Williams >Priority: Minor > Fix For: 2.0.6 > > > Because this: > {noformat} > INFO 21:37:05,611 Handshaking version with /10.208.8.63 > INFO 21:37:10,611 Cannot handshake version with /10.208.8.63 > INFO 21:37:10,615 Handshaking version with /10.208.8.63 > INFO 21:37:15,616 Cannot handshake version with /10.208.8.63 > INFO 21:37:15,628 Handshaking version with /10.208.8.63 > INFO 21:37:20,628 Cannot handshake version with /10.208.8.63 > INFO 21:37:20,629 Handshaking version with /10.208.8.63 > INFO 21:37:25,630 Cannot handshake version with /10.208.8.63 > INFO 21:37:25,631 Handshaking version with /10.208.8.63 > INFO 21:37:30,631 Cannot handshake version with /10.208.8.63 > INFO 21:37:30,632 Handshaking version with /10.208.8.63 > INFO 21:37:35,633 Cannot handshake version with /10.208.8.63 > INFO 21:37:35,641 Handshaking version with /10.208.8.63 > INFO 21:37:40,641 Cannot handshake version with /10.208.8.63 > INFO 21:37:40,642 Handshaking version with /10.208.8.63 > INFO 21:37:45,643 Cannot handshake version with /10.208.8.63 > INFO 21:37:45,644 Handshaking version with /10.208.8.63 > INFO 21:37:50,644 Cannot handshake version with /10.208.8.63 > INFO 21:37:50,651 Handshaking version with /10.208.8.63 > INFO 21:37:55,651 Cannot handshake version with /10.208.8.63 > INFO 21:37:55,655 Handshaking version with /10.208.8.63 > INFO 21:38:00,655 Cannot handshake version with /10.208.8.63 > INFO 21:38:00,657 Handshaking version with /10.208.8.63 > INFO 21:38:05,657 Cannot handshake version with /10.208.8.63 > INFO 21:38:05,658 Handshaking version with /10.208.8.63 > INFO 21:38:10,658 Cannot handshake version with /10.208.8.63 > INFO 21:38:10,660 Handshaking version with /10.208.8.63 > INFO 21:38:15,660 Cannot handshake version with /10.208.8.63 > INFO 21:38:15,740 Handshaking version with /10.208.8.63 > INFO 21:38:20,740 Cannot handshake version with /10.208.8.63 > INFO 21:38:20,742 Handshaking version with /10.208.8.63 > INFO 21:38:25,742 Cannot handshake version with /10.208.8.63 > INFO 21:38:25,746 Handshaking version with /10.208.8.63 > INFO 21:38:30,746 Cannot handshake version with /10.208.8.63 > INFO 21:38:30,747 Handshaking version with /10.208.8.63 > INFO 21:38:35,747 Cannot handshake version with /10.208.8.63 > INFO 21:38:35,749 Handshaking version with /10.208.8.63 > INFO 21:38:40,749 Cannot handshake version with /10.208.8.63 > INFO 21:38:40,750 Handshaking version with /10.208.8.63 > INFO 21:38:41,442 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,446 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,447 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,447 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,455 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO
[jira] [Updated] (CASSANDRA-6661) echo verbs should be droppable
[ https://issues.apache.org/jira/browse/CASSANDRA-6661?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Brandon Williams updated CASSANDRA-6661: Component/s: Core Since Version: 2.0 beta 1 Fix Version/s: 2.0.6 > echo verbs should be droppable > -- > > Key: CASSANDRA-6661 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6661 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Brandon Williams >Assignee: Brandon Williams > Fix For: 2.0.6 > > > Because this: > {noformat} > INFO 21:37:05,611 Handshaking version with /10.208.8.63 > INFO 21:37:10,611 Cannot handshake version with /10.208.8.63 > INFO 21:37:10,615 Handshaking version with /10.208.8.63 > INFO 21:37:15,616 Cannot handshake version with /10.208.8.63 > INFO 21:37:15,628 Handshaking version with /10.208.8.63 > INFO 21:37:20,628 Cannot handshake version with /10.208.8.63 > INFO 21:37:20,629 Handshaking version with /10.208.8.63 > INFO 21:37:25,630 Cannot handshake version with /10.208.8.63 > INFO 21:37:25,631 Handshaking version with /10.208.8.63 > INFO 21:37:30,631 Cannot handshake version with /10.208.8.63 > INFO 21:37:30,632 Handshaking version with /10.208.8.63 > INFO 21:37:35,633 Cannot handshake version with /10.208.8.63 > INFO 21:37:35,641 Handshaking version with /10.208.8.63 > INFO 21:37:40,641 Cannot handshake version with /10.208.8.63 > INFO 21:37:40,642 Handshaking version with /10.208.8.63 > INFO 21:37:45,643 Cannot handshake version with /10.208.8.63 > INFO 21:37:45,644 Handshaking version with /10.208.8.63 > INFO 21:37:50,644 Cannot handshake version with /10.208.8.63 > INFO 21:37:50,651 Handshaking version with /10.208.8.63 > INFO 21:37:55,651 Cannot handshake version with /10.208.8.63 > INFO 21:37:55,655 Handshaking version with /10.208.8.63 > INFO 21:38:00,655 Cannot handshake version with /10.208.8.63 > INFO 21:38:00,657 Handshaking version with /10.208.8.63 > INFO 21:38:05,657 Cannot handshake version with /10.208.8.63 > INFO 21:38:05,658 Handshaking version with /10.208.8.63 > INFO 21:38:10,658 Cannot handshake version with /10.208.8.63 > INFO 21:38:10,660 Handshaking version with /10.208.8.63 > INFO 21:38:15,660 Cannot handshake version with /10.208.8.63 > INFO 21:38:15,740 Handshaking version with /10.208.8.63 > INFO 21:38:20,740 Cannot handshake version with /10.208.8.63 > INFO 21:38:20,742 Handshaking version with /10.208.8.63 > INFO 21:38:25,742 Cannot handshake version with /10.208.8.63 > INFO 21:38:25,746 Handshaking version with /10.208.8.63 > INFO 21:38:30,746 Cannot handshake version with /10.208.8.63 > INFO 21:38:30,747 Handshaking version with /10.208.8.63 > INFO 21:38:35,747 Cannot handshake version with /10.208.8.63 > INFO 21:38:35,749 Handshaking version with /10.208.8.63 > INFO 21:38:40,749 Cannot handshake version with /10.208.8.63 > INFO 21:38:40,750 Handshaking version with /10.208.8.63 > INFO 21:38:41,442 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,446 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,447 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,447 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,455 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP > INFO 21:38:41,454 InetAddress /10.208.8.63 is
[jira] [Created] (CASSANDRA-6661) echo verbs should be droppable
Brandon Williams created CASSANDRA-6661: --- Summary: echo verbs should be droppable Key: CASSANDRA-6661 URL: https://issues.apache.org/jira/browse/CASSANDRA-6661 Project: Cassandra Issue Type: Bug Reporter: Brandon Williams Assignee: Brandon Williams Because this: {noformat} INFO 21:37:05,611 Handshaking version with /10.208.8.63 INFO 21:37:10,611 Cannot handshake version with /10.208.8.63 INFO 21:37:10,615 Handshaking version with /10.208.8.63 INFO 21:37:15,616 Cannot handshake version with /10.208.8.63 INFO 21:37:15,628 Handshaking version with /10.208.8.63 INFO 21:37:20,628 Cannot handshake version with /10.208.8.63 INFO 21:37:20,629 Handshaking version with /10.208.8.63 INFO 21:37:25,630 Cannot handshake version with /10.208.8.63 INFO 21:37:25,631 Handshaking version with /10.208.8.63 INFO 21:37:30,631 Cannot handshake version with /10.208.8.63 INFO 21:37:30,632 Handshaking version with /10.208.8.63 INFO 21:37:35,633 Cannot handshake version with /10.208.8.63 INFO 21:37:35,641 Handshaking version with /10.208.8.63 INFO 21:37:40,641 Cannot handshake version with /10.208.8.63 INFO 21:37:40,642 Handshaking version with /10.208.8.63 INFO 21:37:45,643 Cannot handshake version with /10.208.8.63 INFO 21:37:45,644 Handshaking version with /10.208.8.63 INFO 21:37:50,644 Cannot handshake version with /10.208.8.63 INFO 21:37:50,651 Handshaking version with /10.208.8.63 INFO 21:37:55,651 Cannot handshake version with /10.208.8.63 INFO 21:37:55,655 Handshaking version with /10.208.8.63 INFO 21:38:00,655 Cannot handshake version with /10.208.8.63 INFO 21:38:00,657 Handshaking version with /10.208.8.63 INFO 21:38:05,657 Cannot handshake version with /10.208.8.63 INFO 21:38:05,658 Handshaking version with /10.208.8.63 INFO 21:38:10,658 Cannot handshake version with /10.208.8.63 INFO 21:38:10,660 Handshaking version with /10.208.8.63 INFO 21:38:15,660 Cannot handshake version with /10.208.8.63 INFO 21:38:15,740 Handshaking version with /10.208.8.63 INFO 21:38:20,740 Cannot handshake version with /10.208.8.63 INFO 21:38:20,742 Handshaking version with /10.208.8.63 INFO 21:38:25,742 Cannot handshake version with /10.208.8.63 INFO 21:38:25,746 Handshaking version with /10.208.8.63 INFO 21:38:30,746 Cannot handshake version with /10.208.8.63 INFO 21:38:30,747 Handshaking version with /10.208.8.63 INFO 21:38:35,747 Cannot handshake version with /10.208.8.63 INFO 21:38:35,749 Handshaking version with /10.208.8.63 INFO 21:38:40,749 Cannot handshake version with /10.208.8.63 INFO 21:38:40,750 Handshaking version with /10.208.8.63 INFO 21:38:41,442 InetAddress /10.208.8.63 is now UP INFO 21:38:41,446 InetAddress /10.208.8.63 is now UP INFO 21:38:41,447 InetAddress /10.208.8.63 is now UP INFO 21:38:41,447 InetAddress /10.208.8.63 is now UP INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP INFO 21:38:41,448 InetAddress /10.208.8.63 is now UP INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP INFO 21:38:41,449 InetAddress /10.208.8.63 is now UP INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP INFO 21:38:41,450 InetAddress /10.208.8.63 is now UP INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP INFO 21:38:41,451 InetAddress /10.208.8.63 is now UP INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP INFO 21:38:41,452 InetAddress /10.208.8.63 is now UP INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP INFO 21:38:41,453 InetAddress /10.208.8.63 is now UP INFO 21:38:41,455 InetAddress /10.208.8.63 is now UP INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP INFO 21:38:41,454 InetAddress /10.208.8.63 is now UP INFO 21:38:41,455 InetAddress /10.208.8.63 is now UP INFO 21:38:41,455 InetAddress /10.208.8.63 is now UP INFO 21:38:41,455 InetAddress /10.208.8.63 is now UP INFO 21:38:41,456 InetAddress /10.208.8.63 is now UP INFO 21:38:41,456 InetAddress /10.208.8.63 is now UP INFO 21:38:41,455 InetAddress /10.208.8.63 is now UP {noformat} -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6561) Static columns in CQL3
[ https://issues.apache.org/jira/browse/CASSANDRA-6561?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892585#comment-13892585 ] Aleksey Yeschenko commented on CASSANDRA-6561: -- [~slebresne] now that 6623 is +1d, can you commit it and rebase 6561 before I review it? I'll go review CASSANDRA-4851 for now. > Static columns in CQL3 > -- > > Key: CASSANDRA-6561 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6561 > Project: Cassandra > Issue Type: New Feature >Reporter: Sylvain Lebresne >Assignee: Sylvain Lebresne > Fix For: 2.0.6 > > > I'd like to suggest the following idea for adding "static" columns to CQL3. > I'll note that the basic idea has been suggested by jhalliday on irc but the > rest of the details are mine and I should be blamed for anything stupid in > what follows. > Let me start with a rational: there is 2 main family of CF that have been > historically used in Thrift: static ones and dynamic ones. CQL3 handles both > family through the presence or not of clustering columns. There is however > some cases where mixing both behavior has its use. I like to think of those > use cases as 3 broad category: > # to denormalize small amounts of not-entirely-static data in otherwise > static entities. It's say "tags" for a product or "custom properties" in a > user profile. This is why we've added CQL3 collections. Importantly, this is > the *only* use case for which collections are meant (which doesn't diminishes > their usefulness imo, and I wouldn't disagree that we've maybe not > communicated this too well). > # to optimize fetching both a static entity and related dynamic ones. Say you > have blog posts, and each post has associated comments (chronologically > ordered). *And* say that a very common query is "fetch a post and its 50 last > comments". In that case, it *might* be beneficial to store a blog post > (static entity) in the same underlying CF than it's comments for performance > reason. So that "fetch a post and it's 50 last comments" is just one slice > internally. > # you want to CAS rows of a dynamic partition based on some partition > condition. This is the same use case than why CASSANDRA-5633 exists for. > As said above, 1) is already covered by collections, but 2) and 3) are not > (and > I strongly believe collections are not the right fit, API wise, for those). > Also, note that I don't want to underestimate the usefulness of 2). In most > cases, using a separate table for the blog posts and the comments is The > Right Solution, and trying to do 2) is premature optimisation. Yet, when used > properly, that kind of optimisation can make a difference, so I think having > a relatively native solution for it in CQL3 could make sense. > Regarding 3), though CASSANDRA-5633 would provide one solution for it, I have > the feeling that static columns actually are a more natural approach (in term > of API). That's arguably more of a personal opinion/feeling though. > So long story short, CQL3 lacks a way to mix both some "static" and "dynamic" > rows in the same partition of the same CQL3 table, and I think such a tool > could have it's use. > The proposal is thus to allow "static" columns. Static columns would only > make sense in table with clustering columns (the "dynamic" ones). A static > column value would be static to the partition (all rows of the partition > would share the value for such column). The syntax would just be: > {noformat} > CREATE TABLE t ( > k text, > s text static, > i int, > v text, > PRIMARY KEY (k, i) > ) > {noformat} > then you'd get: > {noformat} > INSERT INTO t(k, s, i, v) VALUES ("k0", "I'm shared", 0, "foo"); > INSERT INTO t(k, s, i, v) VALUES ("k0", "I'm still shared", 1, "bar"); > SELECT * FROM t; > k | s | i |v > > k0 | "I'm still shared" | 0 | "bar" > k0 | "I'm still shared" | 1 | "foo" > {noformat} > There would be a few semantic details to decide on regarding deletions, ttl, > etc. but let's see if we agree it's a good idea first before ironing those > out. > One last point is the implementation. Though I do think this idea has merits, > it's definitively not useful enough to justify rewriting the storage engine > for it. But I think we can support this relatively easily (emphasis on > "relatively" :)), which is probably the main reason why I like the approach. > Namely, internally, we can store static columns as cells whose clustering > column values are empty. So in terms of cells, the partition of my example > would look like: > {noformat} > "k0" : [ > (:"s" -> "I'm still shared"), // the static column > (0:"" -> "") // row marker > (0:"v" -> "bar") > (1:"" -> "") // row marker > (1:"v
[jira] [Commented] (CASSANDRA-6623) Null in a cell caused by expired TTL does not work with IF clause (in CQL3)
[ https://issues.apache.org/jira/browse/CASSANDRA-6623?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892577#comment-13892577 ] Aleksey Yeschenko commented on CASSANDRA-6623: -- LGTM Nits: - I'd rather see {code}if (!(c != null && c.isLive(now) && c.value().equals(e.value({code} rewritten as {code}if (c == null || c.isMarkedForDelete(now) || !c.value().equals(e.value())){code} in ThriftCASConditions (as it is more or less in ColumnsConditions, apparently) - hasLiveColumns() in ColumnsCondition is dead code - ByteBufferUtil in ModificationStatement; ColumnNameBuilder, NamesQueryFilter, and SliceQueryFilter in SP are now unused imports > Null in a cell caused by expired TTL does not work with IF clause (in CQL3) > --- > > Key: CASSANDRA-6623 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6623 > Project: Cassandra > Issue Type: Bug > Components: Tests > Environment: One cluster with two nodes on a Linux and a Windows > system. cqlsh 4.1.0 | Cassandra 2.0.4 | CQL spec 3.1.1 | Thrift protocol > 19.39.0. CQL3 Column Family >Reporter: Csaba Seres >Assignee: Sylvain Lebresne >Priority: Minor > Fix For: 2.0.6 > > Attachments: 6623.txt > > > IF onecell=null clause does not work if the onecell has got its null value > from an expired TTL. If onecell is updated with null value (UPDATE) then IF > onecell=null works fine. > This bug is not present when you create a table with COMPACT STORAGE > directive. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6660) Make node tool command take a password file
[ https://issues.apache.org/jira/browse/CASSANDRA-6660?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892570#comment-13892570 ] Brandon Williams commented on CASSANDRA-6660: - Historically (CASSANDRA-5316, CASSANDRA-6279) we've felt that if you need to get this fancy, it's time to be using your own tool, but I wouldn't be against adding it if we had a patch. Probably the best way to do this is with a simple property file that can be specified on the command line. > Make node tool command take a password file > --- > > Key: CASSANDRA-6660 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6660 > Project: Cassandra > Issue Type: Improvement >Reporter: Vishy Kasar > > We are sending the jmx password in the clear to the node tool command in > production. This is a security risk. Any one doing a 'ps' can see the clear > password. Can we change the node tool command to also take a password file > argument. This file will list the JMX user and passwords. Example below: > cat /cassandra/run/10003004.jmxpasswd > monitorRole abc > controlRole def > Based on the user name provided, node tool can pick up the right password. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6658) Nodes flap once at startup
[ https://issues.apache.org/jira/browse/CASSANDRA-6658?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Brandon Williams updated CASSANDRA-6658: Attachment: 6658-v2.txt v2 removes the constant CASSANDRA-4925 added and uses TimeUnit instead. > Nodes flap once at startup > -- > > Key: CASSANDRA-6658 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6658 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Brandon Williams >Assignee: Brandon Williams >Priority: Minor > Attachments: 6658-v2.txt, 6658.txt > > > Upon initially seeing each other, a node will mark another UP, then DOWN, > then UP again. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6561) Static columns in CQL3
[ https://issues.apache.org/jira/browse/CASSANDRA-6561?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892512#comment-13892512 ] Nicolas Favre-Felix commented on CASSANDRA-6561: Thanks Sylvain. One more thing: it seems that the "static" suffix is not currently added to the column definition printed by "DESCRIBE TABLE foo;" > Static columns in CQL3 > -- > > Key: CASSANDRA-6561 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6561 > Project: Cassandra > Issue Type: New Feature >Reporter: Sylvain Lebresne >Assignee: Sylvain Lebresne > Fix For: 2.0.6 > > > I'd like to suggest the following idea for adding "static" columns to CQL3. > I'll note that the basic idea has been suggested by jhalliday on irc but the > rest of the details are mine and I should be blamed for anything stupid in > what follows. > Let me start with a rational: there is 2 main family of CF that have been > historically used in Thrift: static ones and dynamic ones. CQL3 handles both > family through the presence or not of clustering columns. There is however > some cases where mixing both behavior has its use. I like to think of those > use cases as 3 broad category: > # to denormalize small amounts of not-entirely-static data in otherwise > static entities. It's say "tags" for a product or "custom properties" in a > user profile. This is why we've added CQL3 collections. Importantly, this is > the *only* use case for which collections are meant (which doesn't diminishes > their usefulness imo, and I wouldn't disagree that we've maybe not > communicated this too well). > # to optimize fetching both a static entity and related dynamic ones. Say you > have blog posts, and each post has associated comments (chronologically > ordered). *And* say that a very common query is "fetch a post and its 50 last > comments". In that case, it *might* be beneficial to store a blog post > (static entity) in the same underlying CF than it's comments for performance > reason. So that "fetch a post and it's 50 last comments" is just one slice > internally. > # you want to CAS rows of a dynamic partition based on some partition > condition. This is the same use case than why CASSANDRA-5633 exists for. > As said above, 1) is already covered by collections, but 2) and 3) are not > (and > I strongly believe collections are not the right fit, API wise, for those). > Also, note that I don't want to underestimate the usefulness of 2). In most > cases, using a separate table for the blog posts and the comments is The > Right Solution, and trying to do 2) is premature optimisation. Yet, when used > properly, that kind of optimisation can make a difference, so I think having > a relatively native solution for it in CQL3 could make sense. > Regarding 3), though CASSANDRA-5633 would provide one solution for it, I have > the feeling that static columns actually are a more natural approach (in term > of API). That's arguably more of a personal opinion/feeling though. > So long story short, CQL3 lacks a way to mix both some "static" and "dynamic" > rows in the same partition of the same CQL3 table, and I think such a tool > could have it's use. > The proposal is thus to allow "static" columns. Static columns would only > make sense in table with clustering columns (the "dynamic" ones). A static > column value would be static to the partition (all rows of the partition > would share the value for such column). The syntax would just be: > {noformat} > CREATE TABLE t ( > k text, > s text static, > i int, > v text, > PRIMARY KEY (k, i) > ) > {noformat} > then you'd get: > {noformat} > INSERT INTO t(k, s, i, v) VALUES ("k0", "I'm shared", 0, "foo"); > INSERT INTO t(k, s, i, v) VALUES ("k0", "I'm still shared", 1, "bar"); > SELECT * FROM t; > k | s | i |v > > k0 | "I'm still shared" | 0 | "bar" > k0 | "I'm still shared" | 1 | "foo" > {noformat} > There would be a few semantic details to decide on regarding deletions, ttl, > etc. but let's see if we agree it's a good idea first before ironing those > out. > One last point is the implementation. Though I do think this idea has merits, > it's definitively not useful enough to justify rewriting the storage engine > for it. But I think we can support this relatively easily (emphasis on > "relatively" :)), which is probably the main reason why I like the approach. > Namely, internally, we can store static columns as cells whose clustering > column values are empty. So in terms of cells, the partition of my example > would look like: > {noformat} > "k0" : [ > (:"s" -> "I'm still shared"), // the static column > (0:"" -> "") // row marker > (0:"v" -> "bar") > (1:"" -> "")
[jira] [Commented] (CASSANDRA-6622) Streaming session failures during node replace using replace_address
[ https://issues.apache.org/jira/browse/CASSANDRA-6622?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892506#comment-13892506 ] Ravi Prasad commented on CASSANDRA-6622: I'm seeing FailureDetector notifying listeners every second invoked through GossiperTask's doStatusCheck(). Tested sleeping for RING_DELAY (instead of BROADCAST_INTERVAL) before bootstrap, works without any stream session closure. > Streaming session failures during node replace using replace_address > > > Key: CASSANDRA-6622 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6622 > Project: Cassandra > Issue Type: Bug > Environment: RHEL6, cassandra-2.0.4 >Reporter: Ravi Prasad >Assignee: Brandon Williams > Attachments: 0001-don-t-signal-restart-of-dead-states.txt, > 6622-2.0.txt, logs.tgz > > > When using replace_address, Gossiper ApplicationState is set to hibernate, > which is a down state. We are seeing that the peer nodes are seeing streaming > plan request even before the Gossiper on them marks the replacing node as > dead. As a result, streaming on peer nodes convicts the replacing node by > closing the stream handler. > I think, making the StorageService thread on the replacing node, sleep for > BROADCAST_INTERVAL before bootstrapping, would avoid this scenario. > Relevant logs from peer node (see that the Gossiper on peer node mark the > replacing node as down, 2 secs after the streaming init request): > {noformat} > INFO [STREAM-INIT-/x.x.x.x:46436] 2014-01-26 20:42:24,388 > StreamResultFuture.java (line 116) [Stream > #5c6cd940-86ca-11e3-90a0-411b913c0e88] Received streaming plan for Bootstrap > > INFO [GossipTasks:1] 2014-01-26 20:42:25,240 StreamResultFuture.java (line > 181) [Stream #5c6cd940-86ca-11e3-90a0-411b913c0e88] Session with /x.x.x.x is > complete > WARN [GossipTasks:1] 2014-01-26 20:42:25,240 StreamResultFuture.java (line > 210) [Stream #5c6cd940-86ca-11e3-90a0-411b913c0e88] Stream failed > INFO [GossipStage:1] 2014-01-26 20:42:25,242 Gossiper.java (line 850) > InetAddress /x.x.x.x is now DOWN > ERROR [STREAM-IN-/x.x.x.x] 2014-01-26 20:42:25,766 StreamSession.java (line > 410) [Stream #5c6cd940-86ca-11e3-90a0-411b913c0e88] Streaming error occurred > java.lang.RuntimeException: Outgoing stream handler has been closed > at > org.apache.cassandra.streaming.ConnectionHandler.sendMessage(ConnectionHandler.java:175) > at > org.apache.cassandra.streaming.StreamSession.prepare(StreamSession.java:436) > at > org.apache.cassandra.streaming.StreamSession.messageReceived(StreamSession.java:358) > at > org.apache.cassandra.streaming.ConnectionHandler$IncomingMessageHandler.run(ConnectionHandler.java:293) > at java.lang.Thread.run(Thread.java:722) > INFO [STREAM-IN-/x.x.x.x] 2014-01-26 20:42:25,768 StreamResultFuture.java > (line 181) [Stream #5c6cd940-86ca-11e3-90a0-411b913c0e88] Session with > /x.x.x.x is complete > WARN [STREAM-IN-/x.x.x.x] 2014-01-26 20:42:25,768 StreamResultFuture.java > (line 210) [Stream #5c6cd940-86ca-11e3-90a0-411b913c0e88] Stream failed > {noformat} -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6528) TombstoneOverwhelmingException is thrown while populating data in recently truncated CF
[ https://issues.apache.org/jira/browse/CASSANDRA-6528?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892488#comment-13892488 ] Anne Sullivan commented on CASSANDRA-6528: -- We can reproduce the tombstone_fail_threshold exception on HintedHandoff replay consistently. We're using C* 2.0.4. NodeB goes down, and NodeA starts storing hints for it. Around 400K hints are written, and then NodeB comes back online. NodeA starts replaying hints for NodeB. It replays >100K hints, and then NodeB goes down again. When NodeB comes back online, the next HH replay fails with the tombstone_fail_threshold error. > TombstoneOverwhelmingException is thrown while populating data in recently > truncated CF > --- > > Key: CASSANDRA-6528 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6528 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: Cassadra 2.0.3, Linux, 6 nodes >Reporter: Nikolai Grigoriev >Priority: Minor > > I am running some performance tests and recently I had to flush the data from > one of the tables and repopulate it. I have about 30M rows with a few columns > in each, about 5kb per row in in total. In order to repopulate the data I do > "truncate " from CQLSH and then relaunch the test. The test simply > inserts the data in the table, does not read anything. Shortly after > restarting the data generator I see this on one of the nodes: > {code} > INFO [HintedHandoff:655] 2013-12-26 16:45:42,185 HintedHandOffManager.java > (line 323) Started hinted handoff f > or host: 985c8a08-3d92-4fad-a1d1-7135b2b9774a with IP: /10.5.45.158 > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 SliceQueryFilter.java (line > 200) Scanned ove > r 10 tombstones; query aborted (see tombstone_fail_threshold) > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 CassandraDaemon.java (line > 187) Exception in thread Thread[HintedHandoff:655,1,main] > org.apache.cassandra.db.filter.TombstoneOverwhelmingException > at > org.apache.cassandra.db.filter.SliceQueryFilter.collectReducedColumns(SliceQueryFilter.java:201) > at > org.apache.cassandra.db.filter.QueryFilter.collateColumns(QueryFilter.java:122) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:80) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:72) > at > org.apache.cassandra.db.CollationController.collectAllData(CollationController.java:297) > at > org.apache.cassandra.db.CollationController.getTopLevelColumns(CollationController.java:56) > at > org.apache.cassandra.db.ColumnFamilyStore.getTopLevelColumns(ColumnFamilyStore.java:1487) > at > org.apache.cassandra.db.ColumnFamilyStore.getColumnFamily(ColumnFamilyStore.java:1306) > at > org.apache.cassandra.db.HintedHandOffManager.doDeliverHintsToEndpoint(HintedHandOffManager.java:351) > at > org.apache.cassandra.db.HintedHandOffManager.deliverHintsToEndpoint(HintedHandOffManager.java:309) > at > org.apache.cassandra.db.HintedHandOffManager.access$4(HintedHandOffManager.java:281) > at > org.apache.cassandra.db.HintedHandOffManager$4.run(HintedHandOffManager.java:530) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > at java.lang.Thread.run(Thread.java:724) > INFO [OptionalTasks:1] 2013-12-26 16:45:53,946 MeteredFlusher.java (line 63) > flushing high-traffic column family CFS(Keyspace='test_jmeter', > ColumnFamily='test_profiles') (estimated 192717267 bytes) > {code} > I am inserting the data with CL=1. > It seems to be happening every time I do it. But I do not see any errors on > the client side and the node seems to continue operating, this is why I think > it is not a major issue. Maybe not an issue at all, but the message is logged > as ERROR. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6528) TombstoneOverwhelmingException is thrown while populating data in recently truncated CF
[ https://issues.apache.org/jira/browse/CASSANDRA-6528?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892475#comment-13892475 ] Machiel Groeneveld commented on CASSANDRA-6528: --- Only doing inserts (query below), no updates. Not sure about inserting null values, will get back on that. BEGIN BATCH insert into visits ( id, cookie_uuid, uuid_hash, default_cpc, cookie_uuid, external_click_id, session_id, visitor_ip, user_agent, shop_product_id, channel_id, shop_id, shop_category_id, type, medium_id, campaign_id, channel_affiliate_id, disabled, has_referer, known_visitor, marketing, created_at, updated_at, time_id) VALUES(?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?) USING TTL 7776000 insert into visits_by_cookie (visit_id, time_id, cookie_uuid, shop_id, created_at, enabled_visit) VALUES(?, ?, ?, ?, ?, ?) USING TTL 7776000 insert into visits_by_hash (visit_id, time_id, uuid_hash, shop_id, created_at) VALUES(?, ?, ?, ?, ?) USING TTL 7776000 APPLY BATCH > TombstoneOverwhelmingException is thrown while populating data in recently > truncated CF > --- > > Key: CASSANDRA-6528 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6528 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: Cassadra 2.0.3, Linux, 6 nodes >Reporter: Nikolai Grigoriev >Priority: Minor > > I am running some performance tests and recently I had to flush the data from > one of the tables and repopulate it. I have about 30M rows with a few columns > in each, about 5kb per row in in total. In order to repopulate the data I do > "truncate " from CQLSH and then relaunch the test. The test simply > inserts the data in the table, does not read anything. Shortly after > restarting the data generator I see this on one of the nodes: > {code} > INFO [HintedHandoff:655] 2013-12-26 16:45:42,185 HintedHandOffManager.java > (line 323) Started hinted handoff f > or host: 985c8a08-3d92-4fad-a1d1-7135b2b9774a with IP: /10.5.45.158 > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 SliceQueryFilter.java (line > 200) Scanned ove > r 10 tombstones; query aborted (see tombstone_fail_threshold) > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 CassandraDaemon.java (line > 187) Exception in thread Thread[HintedHandoff:655,1,main] > org.apache.cassandra.db.filter.TombstoneOverwhelmingException > at > org.apache.cassandra.db.filter.SliceQueryFilter.collectReducedColumns(SliceQueryFilter.java:201) > at > org.apache.cassandra.db.filter.QueryFilter.collateColumns(QueryFilter.java:122) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:80) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:72) > at > org.apache.cassandra.db.CollationController.collectAllData(CollationController.java:297) > at > org.apache.cassandra.db.CollationController.getTopLevelColumns(CollationController.java:56) > at > org.apache.cassandra.db.ColumnFamilyStore.getTopLevelColumns(ColumnFamilyStore.java:1487) > at > org.apache.cassandra.db.ColumnFamilyStore.getColumnFamily(ColumnFamilyStore.java:1306) > at > org.apache.cassandra.db.HintedHandOffManager.doDeliverHintsToEndpoint(HintedHandOffManager.java:351) > at > org.apache.cassandra.db.HintedHandOffManager.deliverHintsToEndpoint(HintedHandOffManager.java:309) > at > org.apache.cassandra.db.HintedHandOffManager.access$4(HintedHandOffManager.java:281) > at > org.apache.cassandra.db.HintedHandOffManager$4.run(HintedHandOffManager.java:530) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > at java.lang.Thread.run(Thread.java:724) > INFO [OptionalTasks:1] 2013-12-26 16:45:53,946 MeteredFlusher.java (line 63) > flushing high-traffic column family CFS(Keyspace='test_jmeter', > ColumnFamily='test_profiles') (estimated 192717267 bytes) > {code} > I am inserting the data with CL=1. > It seems to be happening every time I do it. But I do not see any errors on > the client side and the node seems to continue operating, this is why I think > it is not a major issue. Maybe not an issue at all, but the message is logged > as ERROR. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Created] (CASSANDRA-6660) Make node tool command take a password file
Vishy Kasar created CASSANDRA-6660: -- Summary: Make node tool command take a password file Key: CASSANDRA-6660 URL: https://issues.apache.org/jira/browse/CASSANDRA-6660 Project: Cassandra Issue Type: Improvement Reporter: Vishy Kasar We are sending the jmx password in the clear to the node tool command in production. This is a security risk. Any one doing a 'ps' can see the clear password. Can we change the node tool command to also take a password file argument. This file will list the JMX user and passwords. Example below: cat /cassandra/run/10003004.jmxpasswd monitorRole abc controlRole def Based on the user name provided, node tool can pick up the right password. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-4757) Expose bulk loading progress/status over jmx
[ https://issues.apache.org/jira/browse/CASSANDRA-4757?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892474#comment-13892474 ] Nick Bailey commented on CASSANDRA-4757: Patch LGTM. Haven't tested it out though. > Expose bulk loading progress/status over jmx > > > Key: CASSANDRA-4757 > URL: https://issues.apache.org/jira/browse/CASSANDRA-4757 > Project: Cassandra > Issue Type: Improvement >Reporter: Nick Bailey >Assignee: Tyler Hobbs >Priority: Minor > Labels: lhf > Fix For: 2.0.6 > > Attachments: 4757-2.0.patch, CASSANDRA-4757.txt, CASSANDRA-4757v2.txt > > > The bulk loading interface should be exposing some progress or status > information over jmx. This shouldn't be too difficult and should be exposed > in a way that the information is available whether you are using the separate > sstableloader utility or calling the bulkload jmx call. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6659) Allow "intercepting" query by user provided custom classes
[ https://issues.apache.org/jira/browse/CASSANDRA-6659?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis updated CASSANDRA-6659: -- Reviewer: Benjamin Coverston Tagging [~bcoverston] for review. > Allow "intercepting" query by user provided custom classes > -- > > Key: CASSANDRA-6659 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6659 > Project: Cassandra > Issue Type: Improvement >Reporter: Sylvain Lebresne >Assignee: Sylvain Lebresne >Priority: Minor > Attachments: 6659.txt > > > The idea for this ticket is to abstract the main execution methods of > QueryProcessor into an interface, something like: > {noformat} > public interface QueryHandler > { > public ResultSet process(String query, QueryState state, QueryOptions > options); > public ResultMessage.Prepared prepare(String query, QueryState state); > public ResultSet processPrepared(CQLStatement statement, QueryState > state, QueryOptions options); > public ResultSet processBatch(BatchStatement statement, QueryState state, > BatchQueryOptions options); > } > {noformat} > and to allow users to provide a specific class of their own (implementing > said interface) to which the native protocol would handoff queries to (so by > default queries would go to QueryProcessor, but you would have a way to use a > custom class instead). > A typical use case for that could be to allow some form of custom logging of > incoming queries and/or of their results. But this could probably also have > some application for testing as one could have a handler that completely > bypass QueryProcessor if you want, say, do perf regression tests for a given > driver (and don't want to actually execute the query as you're perf testing > the driver, not C*) without needing to patch the sources. Those being just > examples, the mechanism is generic enough to allow for other ideas. > Most importantly, it requires very little code in C*. As for how users would > register their "handler", it can be as simple as a startup flag indicating > the class to use, or a yaml setting, or both. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6658) Nodes flap once at startup
[ https://issues.apache.org/jira/browse/CASSANDRA-6658?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892450#comment-13892450 ] Jonathan Ellis commented on CASSANDRA-6658: --- Nit: shouldn't we use TimeUnit to convert instead of hardcoding extra constants? > Nodes flap once at startup > -- > > Key: CASSANDRA-6658 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6658 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Brandon Williams >Assignee: Brandon Williams >Priority: Minor > Attachments: 6658.txt > > > Upon initially seeing each other, a node will mark another UP, then DOWN, > then UP again. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6651) Repair hanging
[ https://issues.apache.org/jira/browse/CASSANDRA-6651?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis updated CASSANDRA-6651: -- Reproduced In: 2.0.3 Fix Version/s: (was: 2.0.3) Assignee: Yuki Morishita > Repair hanging > -- > > Key: CASSANDRA-6651 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6651 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Eitan Eibschutz >Assignee: Yuki Morishita > > Hi, > We have a 12 node cluster in PROD environment and we've noticed that repairs > are never finishing. The behavior that we've observed is that a repair > process will run until at some point it hangs and no other processing is > happening. > For example, at the moment, I have a repair process that has been running for > two days and not finishing: > nodetool tpstats is showing 2 active and 2 pending AntiEntropySessions > nodetool compactionstats is showing: > pending tasks: 0 > Active compaction remaining time :n/a > nodetools netstats is showing: > Mode: NORMAL > Not sending any streams. > Read Repair Statistics: > Attempted: 0 > Mismatch (Blocking): 142110 > Mismatch (Background): 0 > Pool NameActive Pending Completed > Commandsn/a 0 107589657 > Responses n/a 0 116430785 > The last entry that I see in the log is: > INFO [AntiEntropySessions:18] 2014-02-03 04:01:39,145 RepairJob.java (line > 116) [repair #ae78c6c0-8c2b-11e3-b950-c3b81a36bc9b] requesting merkle trees > for MyCF (to [/x.x.x.x, /y.y.y.y, /z.z.z.z]) > The repair started at 4am so it stopped after 1:40 minute. > On node y.y.y.y I can see this in the log: > INFO [MiscStage:1] 2014-02-03 04:01:38,985 ColumnFamilyStore.java (line 740) > Enqueuing flush of Memtable-MyCF@1290890489(2176/5931 serialized/live bytes, > 32 ops) > INFO [FlushWriter:411] 2014-02-03 04:01:38,986 Memtable.java (line 333) > Writing Memtable-MyCF@1290890489(2176/5931 serialized/live bytes, 32 ops) > INFO [FlushWriter:411] 2014-02-03 04:01:39,048 Memtable.java (line 373) > Completed flushing > /var/lib/cassandra/main-db/data/MyKS/MyCF/MyKS-MyCF-jb-518-Data.db (1789 > bytes) for commitlog position ReplayPosition(segmentId=1390437013339, > position=21868792) > INFO [ScheduledTasks:1] 2014-02-03 05:00:04,794 ColumnFamilyStore.java (line > 740) Enqueuing flush of Memtable-compaction_history@1649414699(1635/17360 > serialized/live bytes, 42 ops) > So for some reason the merkle tree for this CF is never sent back to the node > being repaired and it's hanging. > I've also noticed that sometimes, restarting node y.y.y.y will cause the > repair to resume. > Another observation is that sometimes when restarting y.y.y.y it will not > start with these errors: > ERROR 16:34:18,485 Exception encountered during startup > java.lang.IllegalStateException: Unfinished compactions reference missing > sstables. This should never happen since compactions are marked finished > before we start removing the old sstables. > at > org.apache.cassandra.db.ColumnFamilyStore.removeUnfinishedCompactionLeftovers(ColumnFamilyStore.java:495) > at > org.apache.cassandra.service.CassandraDaemon.setup(CassandraDaemon.java:264) > at > org.apache.cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:461) > at > org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:504) > java.lang.IllegalStateException: Unfinished compactions reference missing > sstables. This should never happen since compactions are marked finished > before we start removing the old sstables. > at > org.apache.cassandra.db.ColumnFamilyStore.removeUnfinishedCompactionLeftovers(ColumnFamilyStore.java:495) > at > org.apache.cassandra.service.CassandraDaemon.setup(CassandraDaemon.java:264) > at > org.apache.cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:461) > at > org.apache.cassandra.service.CassandraDaemon.main(CassandraDaemon.java:504) > Exception encountered during startup: Unfinished compactions reference > missing sstables. This should never happen since compactions are marked > finished before we start removing the old sstables. > And it will only restart after manually cleaning the compactions_in-progress > folder. > I'm not sure if these two issues are related but we've seen both on all the > nodes in our cluster. > I'll be happy to provide more info if needed as we are not sure what could > cause this behavior. > Another thing in our environment is that some of the Cassandra nodes have > more than one network interface and RPC is listening on 0.0.0.0, not sure if > it has anything to do with this. > Thanks, > Eitan -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6656) Exception logging
[ https://issues.apache.org/jira/browse/CASSANDRA-6656?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis updated CASSANDRA-6656: -- Reviewer: Mikhail Stepura Priority: Trivial (was: Major) Fix Version/s: 2.1 Assignee: Ding Yuan Can you review, [~mishail]? > Exception logging > - > > Key: CASSANDRA-6656 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6656 > Project: Cassandra > Issue Type: Improvement > Components: Core, Tools >Reporter: Ding Yuan >Assignee: Ding Yuan >Priority: Trivial > Fix For: 2.1 > > Attachments: trunk-6656.txt > > > Reporting a few cases where informative exceptions can be silently swallowed. > Attaching a proposed patch. > = > Case 1 > Line: 95, File: "org/apache/cassandra/utils/Hex.java" > An actual failure in the underlying constructor will be lost. > Propose to log it. > {noformat} > try > { > s = stringConstructor.newInstance(0, c.length, c); > + } > + catch (InvocationTargetException ite) { > + // The underlying constructor failed. Unwrapping the > exception. > + logger.info("Underlying constructor throws exception: ", > ite.getCause()); > } > catch (Exception e) > { > // Swallowing as we'll just use a copying constructor > } > return s == null ? new String(c) : s; > {noformat} > == > = > Case 2 > Line: 192, File: "org/apache/cassandra/db/marshal/DynamicCompositeType.java" > The actual cause of comparator error can be lost as it can fail in multiple > locations. > {noformat} > AbstractType comparator = null; > int header = getShortLength(bb); > if ((header & 0x8000) == 0) > { > ByteBuffer value = getBytes(bb, header); > try > { > comparator = TypeParser.parse(ByteBufferUtil.string(value)); > } > catch (Exception e) > { > <--- can fail here > // we'll deal with this below since comparator == null > } > } > else > { > comparator = aliases.get((byte)(header & 0xFF)); > <--- can fail here > } > if (comparator == null) > throw new MarshalException("Cannot find comparator for component > " + i); > {noformat} > Propose to log the exception. > == > = > Case 3 > Line: 239, File: "org/apache/cassandra/tools/NodeProbe.java" > Exception ignored in finally. Propose log them with debug or trace. > {noformat} > 232: finally > 233: { > 234: try > 235: { > 236: ssProxy.removeNotificationListener(runner); > 236: ssProxy.removeNotificationListener(runner); > 237: jmxc.removeConnectionNotificationListener(runner); > 238: } > 239: catch (Throwable ignored) {} > 240: } > {noformat} > Similar case is at line 264 in the same file. > == -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6657) Log the newsize value alongside the heap size at startup
[ https://issues.apache.org/jira/browse/CASSANDRA-6657?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892441#comment-13892441 ] Jonathan Ellis commented on CASSANDRA-6657: --- Suspect you will need to find the MemoryPoolMBean corresponding to young gen. > Log the newsize value alongside the heap size at startup > > > Key: CASSANDRA-6657 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6657 > Project: Cassandra > Issue Type: Wish > Components: Core >Reporter: Jeremy Hanna >Assignee: Lyuben Todorov >Priority: Trivial > > It would be nice to have the newsize value logged alongside the heap size at > startup to more easily track down problems. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Assigned] (CASSANDRA-6657) Log the newsize value alongside the heap size at startup
[ https://issues.apache.org/jira/browse/CASSANDRA-6657?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis reassigned CASSANDRA-6657: - Assignee: Lyuben Todorov > Log the newsize value alongside the heap size at startup > > > Key: CASSANDRA-6657 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6657 > Project: Cassandra > Issue Type: Wish > Components: Core >Reporter: Jeremy Hanna >Assignee: Lyuben Todorov >Priority: Trivial > > It would be nice to have the newsize value logged alongside the heap size at > startup to more easily track down problems. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6360) Make nodetool cfhistograms output easily understandable
[ https://issues.apache.org/jira/browse/CASSANDRA-6360?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Tyler Hobbs updated CASSANDRA-6360: --- Attachment: 6360-2.0-v2.patch v2 patch (and [branch|https://github.com/thobbs/cassandra/tree/CASSANDRA-6360-rebase]) is rebased for the latest cassandra-2.0. > Make nodetool cfhistograms output easily understandable > --- > > Key: CASSANDRA-6360 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6360 > Project: Cassandra > Issue Type: Improvement > Components: Tools >Reporter: Tyler Hobbs >Assignee: Tyler Hobbs >Priority: Trivial > Attachments: 6360-2.0-v2.patch, 6360-2.0.patch > > > Almost nobody understands the cfhistograms output without googling it. By > default, we shouldn't share an axis across all metrics. We can still provide > the current format with a --compact option. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6659) Allow "intercepting" query by user provided custom classes
[ https://issues.apache.org/jira/browse/CASSANDRA-6659?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Sylvain Lebresne updated CASSANDRA-6659: Attachment: 6659.txt Attaching fairly trivial patch to implement this (the patch is against 2.0 because that has virtually no chance to break anything existing so why not). Note that the patch remove the pre and post execution hooks from QueryProcessor as those were only here for external tool and, unless I'm missing something obvious, the mechanism here provides a stricly more general mechanism. > Allow "intercepting" query by user provided custom classes > -- > > Key: CASSANDRA-6659 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6659 > Project: Cassandra > Issue Type: Improvement >Reporter: Sylvain Lebresne >Assignee: Sylvain Lebresne >Priority: Minor > Attachments: 6659.txt > > > The idea for this ticket is to abstract the main execution methods of > QueryProcessor into an interface, something like: > {noformat} > public interface QueryHandler > { > public ResultSet process(String query, QueryState state, QueryOptions > options); > public ResultMessage.Prepared prepare(String query, QueryState state); > public ResultSet processPrepared(CQLStatement statement, QueryState > state, QueryOptions options); > public ResultSet processBatch(BatchStatement statement, QueryState state, > BatchQueryOptions options); > } > {noformat} > and to allow users to provide a specific class of their own (implementing > said interface) to which the native protocol would handoff queries to (so by > default queries would go to QueryProcessor, but you would have a way to use a > custom class instead). > A typical use case for that could be to allow some form of custom logging of > incoming queries and/or of their results. But this could probably also have > some application for testing as one could have a handler that completely > bypass QueryProcessor if you want, say, do perf regression tests for a given > driver (and don't want to actually execute the query as you're perf testing > the driver, not C*) without needing to patch the sources. Those being just > examples, the mechanism is generic enough to allow for other ideas. > Most importantly, it requires very little code in C*. As for how users would > register their "handler", it can be as simple as a startup flag indicating > the class to use, or a yaml setting, or both. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Created] (CASSANDRA-6659) Allow "intercepting" query by user provided custom classes
Sylvain Lebresne created CASSANDRA-6659: --- Summary: Allow "intercepting" query by user provided custom classes Key: CASSANDRA-6659 URL: https://issues.apache.org/jira/browse/CASSANDRA-6659 Project: Cassandra Issue Type: Improvement Reporter: Sylvain Lebresne Assignee: Sylvain Lebresne Priority: Minor The idea for this ticket is to abstract the main execution methods of QueryProcessor into an interface, something like: {noformat} public interface QueryHandler { public ResultSet process(String query, QueryState state, QueryOptions options); public ResultMessage.Prepared prepare(String query, QueryState state); public ResultSet processPrepared(CQLStatement statement, QueryState state, QueryOptions options); public ResultSet processBatch(BatchStatement statement, QueryState state, BatchQueryOptions options); } {noformat} and to allow users to provide a specific class of their own (implementing said interface) to which the native protocol would handoff queries to (so by default queries would go to QueryProcessor, but you would have a way to use a custom class instead). A typical use case for that could be to allow some form of custom logging of incoming queries and/or of their results. But this could probably also have some application for testing as one could have a handler that completely bypass QueryProcessor if you want, say, do perf regression tests for a given driver (and don't want to actually execute the query as you're perf testing the driver, not C*) without needing to patch the sources. Those being just examples, the mechanism is generic enough to allow for other ideas. Most importantly, it requires very little code in C*. As for how users would register their "handler", it can be as simple as a startup flag indicating the class to use, or a yaml setting, or both. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6360) Make nodetool cfhistograms output easily understandable
[ https://issues.apache.org/jira/browse/CASSANDRA-6360?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Tyler Hobbs updated CASSANDRA-6360: --- Attachment: 6360-2.0.patch Actually, since the 2.1 output uses percentiles instead of an "offsets" column, I think it's fine as is. I'm attaching 6360-2.0.patch in case we want to improve this just for the remainder of 2.0, but since things are improved in 2.1, it's not terribly important to me. > Make nodetool cfhistograms output easily understandable > --- > > Key: CASSANDRA-6360 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6360 > Project: Cassandra > Issue Type: Improvement > Components: Tools >Reporter: Tyler Hobbs >Assignee: Tyler Hobbs >Priority: Trivial > Attachments: 6360-2.0.patch > > > Almost nobody understands the cfhistograms output without googling it. By > default, we shouldn't share an axis across all metrics. We can still provide > the current format with a --compact option. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6360) Make nodetool cfhistograms output easily understandable
[ https://issues.apache.org/jira/browse/CASSANDRA-6360?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892421#comment-13892421 ] Benedict commented on CASSANDRA-6360: - bq. since things are improved in 2.1, it's not terribly important to me. +1 > Make nodetool cfhistograms output easily understandable > --- > > Key: CASSANDRA-6360 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6360 > Project: Cassandra > Issue Type: Improvement > Components: Tools >Reporter: Tyler Hobbs >Assignee: Tyler Hobbs >Priority: Trivial > Attachments: 6360-2.0.patch > > > Almost nobody understands the cfhistograms output without googling it. By > default, we shouldn't share an axis across all metrics. We can still provide > the current format with a --compact option. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6360) Make nodetool cfhistograms output easily understandable
[ https://issues.apache.org/jira/browse/CASSANDRA-6360?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892413#comment-13892413 ] Brandon Williams commented on CASSANDRA-6360: - Oh, I thought you meant print as many columns as possible. We have a lot more than two columns, they're just not shown in Tyler's example, and there isn't always a logical pairing between them. > Make nodetool cfhistograms output easily understandable > --- > > Key: CASSANDRA-6360 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6360 > Project: Cassandra > Issue Type: Improvement > Components: Tools >Reporter: Tyler Hobbs >Assignee: Tyler Hobbs >Priority: Trivial > Attachments: 6360-2.0.patch > > > Almost nobody understands the cfhistograms output without googling it. By > default, we shouldn't share an axis across all metrics. We can still provide > the current format with a --compact option. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6360) Make nodetool cfhistograms output easily understandable
[ https://issues.apache.org/jira/browse/CASSANDRA-6360?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892408#comment-13892408 ] Benedict commented on CASSANDRA-6360: - bq. The problem is you'd have to detect how wide the terminal is to know if you've gone too far ?? Just printing the two columns we have should be fine for basically any terminal. Anyone with a terminal with < ~30 characters can deal with it. Like they would for the compact format (where we assume ~100 chars) > Make nodetool cfhistograms output easily understandable > --- > > Key: CASSANDRA-6360 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6360 > Project: Cassandra > Issue Type: Improvement > Components: Tools >Reporter: Tyler Hobbs >Assignee: Tyler Hobbs >Priority: Trivial > > Almost nobody understands the cfhistograms output without googling it. By > default, we shouldn't share an axis across all metrics. We can still provide > the current format with a --compact option. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6360) Make nodetool cfhistograms output easily understandable
[ https://issues.apache.org/jira/browse/CASSANDRA-6360?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892405#comment-13892405 ] Brandon Williams commented on CASSANDRA-6360: - The problem is you'd have to detect how wide the terminal is to know if you've gone too far, otherwise the output is going to be unreadable when it wraps. I'm fine with it the way it is. > Make nodetool cfhistograms output easily understandable > --- > > Key: CASSANDRA-6360 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6360 > Project: Cassandra > Issue Type: Improvement > Components: Tools >Reporter: Tyler Hobbs >Assignee: Tyler Hobbs >Priority: Trivial > > Almost nobody understands the cfhistograms output without googling it. By > default, we shouldn't share an axis across all metrics. We can still provide > the current format with a --compact option. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6360) Make nodetool cfhistograms output easily understandable
[ https://issues.apache.org/jira/browse/CASSANDRA-6360?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892403#comment-13892403 ] Benedict commented on CASSANDRA-6360: - It involves a lot of scrolling, which isn't ideal for terminal use as things stand. I think it would be nice to either print them side-by-side, or to coalesce adjacent buckets that contain little data. But I don't feel strongly about it, no. > Make nodetool cfhistograms output easily understandable > --- > > Key: CASSANDRA-6360 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6360 > Project: Cassandra > Issue Type: Improvement > Components: Tools >Reporter: Tyler Hobbs >Assignee: Tyler Hobbs >Priority: Trivial > > Almost nobody understands the cfhistograms output without googling it. By > default, we shouldn't share an axis across all metrics. We can still provide > the current format with a --compact option. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6360) Make nodetool cfhistograms output easily understandable
[ https://issues.apache.org/jira/browse/CASSANDRA-6360?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892394#comment-13892394 ] Tyler Hobbs commented on CASSANDRA-6360: bq. Might be nice to print them side by side? If you don't feel strongly about it, I'd prefer to not do this just to save time, although I agree it might look better. > Make nodetool cfhistograms output easily understandable > --- > > Key: CASSANDRA-6360 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6360 > Project: Cassandra > Issue Type: Improvement > Components: Tools >Reporter: Tyler Hobbs >Assignee: Tyler Hobbs >Priority: Trivial > > Almost nobody understands the cfhistograms output without googling it. By > default, we shouldn't share an axis across all metrics. We can still provide > the current format with a --compact option. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-4757) Expose bulk loading progress/status over jmx
[ https://issues.apache.org/jira/browse/CASSANDRA-4757?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Tyler Hobbs updated CASSANDRA-4757: --- Reviewer: Nick Bailey (was: Yuki Morishita) > Expose bulk loading progress/status over jmx > > > Key: CASSANDRA-4757 > URL: https://issues.apache.org/jira/browse/CASSANDRA-4757 > Project: Cassandra > Issue Type: Improvement >Reporter: Nick Bailey >Assignee: Tyler Hobbs >Priority: Minor > Labels: lhf > Fix For: 2.0.6 > > Attachments: 4757-2.0.patch, CASSANDRA-4757.txt, CASSANDRA-4757v2.txt > > > The bulk loading interface should be exposing some progress or status > information over jmx. This shouldn't be too difficult and should be exposed > in a way that the information is available whether you are using the separate > sstableloader utility or calling the bulkload jmx call. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-4757) Expose bulk loading progress/status over jmx
[ https://issues.apache.org/jira/browse/CASSANDRA-4757?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Tyler Hobbs updated CASSANDRA-4757: --- Attachment: 4757-2.0.patch 4757-2.0.patch (and [branch|https://github.com/thobbs/cassandra/tree/CASSANDRA-4757]) makes the following changes: * Add a bulkLoadAsync method to StorageServiceMBean which returns the string version of the planID * Print the planID before loading starts in sstableloader * sstableloader already had a {{--no-progress}} option, but it was being ignored, so I fixed that as well > Expose bulk loading progress/status over jmx > > > Key: CASSANDRA-4757 > URL: https://issues.apache.org/jira/browse/CASSANDRA-4757 > Project: Cassandra > Issue Type: Improvement >Reporter: Nick Bailey >Assignee: Tyler Hobbs >Priority: Minor > Labels: lhf > Fix For: 2.0.6 > > Attachments: 4757-2.0.patch, CASSANDRA-4757.txt, CASSANDRA-4757v2.txt > > > The bulk loading interface should be exposing some progress or status > information over jmx. This shouldn't be too difficult and should be exposed > in a way that the information is available whether you are using the separate > sstableloader utility or calling the bulkload jmx call. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Comment Edited] (CASSANDRA-6528) TombstoneOverwhelmingException is thrown while populating data in recently truncated CF
[ https://issues.apache.org/jira/browse/CASSANDRA-6528?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892315#comment-13892315 ] Machiel Groeneveld edited comment on CASSANDRA-6528 at 2/5/14 6:10 PM: --- I have the same issue, after inserting 216258 records (sharing the same partition key) in a new database (I reinstalled Cassandra) I couldn't run a select query (something like 'select * from partition_key = x'). Also a count(*) on the table gives me tombstone warnings. I'm not expecting any tombstones as they are all inserts (not 100% sure about possible overwriting though) In the log I get org.apache.cassandra.db.filter.TombstoneOverwhelmingException was (Author: machielg): I have the same issue, after inserting 216258 records (in one row) in a new database (I removed all files in the data directory files before starting) I couldn't run a select query (something like 'select * from partition_key = x') In the log I get org.apache.cassandra.db.filter.TombstoneOverwhelmingException > TombstoneOverwhelmingException is thrown while populating data in recently > truncated CF > --- > > Key: CASSANDRA-6528 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6528 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: Cassadra 2.0.3, Linux, 6 nodes >Reporter: Nikolai Grigoriev >Priority: Minor > > I am running some performance tests and recently I had to flush the data from > one of the tables and repopulate it. I have about 30M rows with a few columns > in each, about 5kb per row in in total. In order to repopulate the data I do > "truncate " from CQLSH and then relaunch the test. The test simply > inserts the data in the table, does not read anything. Shortly after > restarting the data generator I see this on one of the nodes: > {code} > INFO [HintedHandoff:655] 2013-12-26 16:45:42,185 HintedHandOffManager.java > (line 323) Started hinted handoff f > or host: 985c8a08-3d92-4fad-a1d1-7135b2b9774a with IP: /10.5.45.158 > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 SliceQueryFilter.java (line > 200) Scanned ove > r 10 tombstones; query aborted (see tombstone_fail_threshold) > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 CassandraDaemon.java (line > 187) Exception in thread Thread[HintedHandoff:655,1,main] > org.apache.cassandra.db.filter.TombstoneOverwhelmingException > at > org.apache.cassandra.db.filter.SliceQueryFilter.collectReducedColumns(SliceQueryFilter.java:201) > at > org.apache.cassandra.db.filter.QueryFilter.collateColumns(QueryFilter.java:122) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:80) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:72) > at > org.apache.cassandra.db.CollationController.collectAllData(CollationController.java:297) > at > org.apache.cassandra.db.CollationController.getTopLevelColumns(CollationController.java:56) > at > org.apache.cassandra.db.ColumnFamilyStore.getTopLevelColumns(ColumnFamilyStore.java:1487) > at > org.apache.cassandra.db.ColumnFamilyStore.getColumnFamily(ColumnFamilyStore.java:1306) > at > org.apache.cassandra.db.HintedHandOffManager.doDeliverHintsToEndpoint(HintedHandOffManager.java:351) > at > org.apache.cassandra.db.HintedHandOffManager.deliverHintsToEndpoint(HintedHandOffManager.java:309) > at > org.apache.cassandra.db.HintedHandOffManager.access$4(HintedHandOffManager.java:281) > at > org.apache.cassandra.db.HintedHandOffManager$4.run(HintedHandOffManager.java:530) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > at java.lang.Thread.run(Thread.java:724) > INFO [OptionalTasks:1] 2013-12-26 16:45:53,946 MeteredFlusher.java (line 63) > flushing high-traffic column family CFS(Keyspace='test_jmeter', > ColumnFamily='test_profiles') (estimated 192717267 bytes) > {code} > I am inserting the data with CL=1. > It seems to be happening every time I do it. But I do not see any errors on > the client side and the node seems to continue operating, this is why I think > it is not a major issue. Maybe not an issue at all, but the message is logged > as ERROR. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6658) Nodes flap once at startup
[ https://issues.apache.org/jira/browse/CASSANDRA-6658?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Brandon Williams updated CASSANDRA-6658: Attachment: 6658.txt > Nodes flap once at startup > -- > > Key: CASSANDRA-6658 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6658 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Brandon Williams >Assignee: Brandon Williams >Priority: Minor > Attachments: 6658.txt > > > Upon initially seeing each other, a node will mark another UP, then DOWN, > then UP again. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6528) TombstoneOverwhelmingException is thrown while populating data in recently truncated CF
[ https://issues.apache.org/jira/browse/CASSANDRA-6528?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892380#comment-13892380 ] Aleksey Yeschenko commented on CASSANDRA-6528: -- Example insert query. Do you ever insert, or update set NULL? > TombstoneOverwhelmingException is thrown while populating data in recently > truncated CF > --- > > Key: CASSANDRA-6528 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6528 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: Cassadra 2.0.3, Linux, 6 nodes >Reporter: Nikolai Grigoriev >Priority: Minor > > I am running some performance tests and recently I had to flush the data from > one of the tables and repopulate it. I have about 30M rows with a few columns > in each, about 5kb per row in in total. In order to repopulate the data I do > "truncate " from CQLSH and then relaunch the test. The test simply > inserts the data in the table, does not read anything. Shortly after > restarting the data generator I see this on one of the nodes: > {code} > INFO [HintedHandoff:655] 2013-12-26 16:45:42,185 HintedHandOffManager.java > (line 323) Started hinted handoff f > or host: 985c8a08-3d92-4fad-a1d1-7135b2b9774a with IP: /10.5.45.158 > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 SliceQueryFilter.java (line > 200) Scanned ove > r 10 tombstones; query aborted (see tombstone_fail_threshold) > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 CassandraDaemon.java (line > 187) Exception in thread Thread[HintedHandoff:655,1,main] > org.apache.cassandra.db.filter.TombstoneOverwhelmingException > at > org.apache.cassandra.db.filter.SliceQueryFilter.collectReducedColumns(SliceQueryFilter.java:201) > at > org.apache.cassandra.db.filter.QueryFilter.collateColumns(QueryFilter.java:122) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:80) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:72) > at > org.apache.cassandra.db.CollationController.collectAllData(CollationController.java:297) > at > org.apache.cassandra.db.CollationController.getTopLevelColumns(CollationController.java:56) > at > org.apache.cassandra.db.ColumnFamilyStore.getTopLevelColumns(ColumnFamilyStore.java:1487) > at > org.apache.cassandra.db.ColumnFamilyStore.getColumnFamily(ColumnFamilyStore.java:1306) > at > org.apache.cassandra.db.HintedHandOffManager.doDeliverHintsToEndpoint(HintedHandOffManager.java:351) > at > org.apache.cassandra.db.HintedHandOffManager.deliverHintsToEndpoint(HintedHandOffManager.java:309) > at > org.apache.cassandra.db.HintedHandOffManager.access$4(HintedHandOffManager.java:281) > at > org.apache.cassandra.db.HintedHandOffManager$4.run(HintedHandOffManager.java:530) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > at java.lang.Thread.run(Thread.java:724) > INFO [OptionalTasks:1] 2013-12-26 16:45:53,946 MeteredFlusher.java (line 63) > flushing high-traffic column family CFS(Keyspace='test_jmeter', > ColumnFamily='test_profiles') (estimated 192717267 bytes) > {code} > I am inserting the data with CL=1. > It seems to be happening every time I do it. But I do not see any errors on > the client side and the node seems to continue operating, this is why I think > it is not a major issue. Maybe not an issue at all, but the message is logged > as ERROR. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6658) Nodes flap once at startup
[ https://issues.apache.org/jira/browse/CASSANDRA-6658?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Brandon Williams updated CASSANDRA-6658: Attachment: (was: 6658.txt) > Nodes flap once at startup > -- > > Key: CASSANDRA-6658 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6658 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Brandon Williams >Assignee: Brandon Williams >Priority: Minor > Attachments: 6658.txt > > > Upon initially seeing each other, a node will mark another UP, then DOWN, > then UP again. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Comment Edited] (CASSANDRA-6528) TombstoneOverwhelmingException is thrown while populating data in recently truncated CF
[ https://issues.apache.org/jira/browse/CASSANDRA-6528?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892376#comment-13892376 ] Machiel Groeneveld edited comment on CASSANDRA-6528 at 2/5/14 6:07 PM: --- create table IF NOT EXISTS visits.visits( id text, cookie_uuid text, cookie_uuid text, external_click_id text, session_id text, visitor_ip text, user_agent text, uuid_hash text, shop_product_id int, channel_id int, shop_id int, shop_category_id int, type int, medium_id int, campaign_id int, channel_affiliate_id int, default_cpc float, created_at timestamp, updated_at timestamp, time_id int, disabled int, has_referer boolean, known_visitor boolean, marketing boolean, primary key(time_id, id)); SELECT * FROM VISITS was (Author: machielg): create table IF NOT EXISTS visits.visits( id text, cookie_uuid text, cookie_uuid text, external_click_id text, session_id text, visitor_ip text, user_agent text, uuid_hash text, shop_product_id int, channel_id int, shop_id int, shop_category_id int, type int, medium_id int, campaign_id int, channel_affiliate_id int, default_cpc float, created_at timestamp, updated_at timestamp, time_id int, disabled int, has_referer boolean, known_visitor boolean, marketing boolean, primary key(time_id, id)); > TombstoneOverwhelmingException is thrown while populating data in recently > truncated CF > --- > > Key: CASSANDRA-6528 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6528 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: Cassadra 2.0.3, Linux, 6 nodes >Reporter: Nikolai Grigoriev >Priority: Minor > > I am running some performance tests and recently I had to flush the data from > one of the tables and repopulate it. I have about 30M rows with a few columns > in each, about 5kb per row in in total. In order to repopulate the data I do > "truncate " from CQLSH and then relaunch the test. The test simply > inserts the data in the table, does not read anything. Shortly after > restarting the data generator I see this on one of the nodes: > {code} > INFO [HintedHandoff:655] 2013-12-26 16:45:42,185 HintedHandOffManager.java > (line 323) Started hinted handoff f > or host: 985c8a08-3d92-4fad-a1d1-7135b2b9774a with IP: /10.5.45.158 > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 SliceQueryFilter.java (line > 200) Scanned ove > r 10 tombstones; query aborted (see tombstone_fail_threshold) > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 CassandraDaemon.java (line > 187) Exception in thread Thread[HintedHandoff:655,1,main] > org.apache.cassandra.db.filter.TombstoneOverwhelmingException > at > org.apache.cassandra.db.filter.SliceQueryFilter.collectReducedColumns(SliceQueryFilter.java:201) > at > org.apache.cassandra.db.filter.QueryFilter.collateColumns(QueryFilter.java:122) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:80) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:72) > at > org.apache.cassandra.db.CollationController.collectAllData(CollationController.java:297) > at > org.apache.cassandra.db.CollationController.getTopLevelColumns(CollationController.java:56) > at > org.apache.cassandra.db.ColumnFamilyStore.getTopLevelColumns(ColumnFamilyStore.java:1487) > at > org.apache.cassandra.db.ColumnFamilyStore.getColumnFamily(ColumnFamilyStore.java:1306) > at > org.apache.cassandra.db.HintedHandOffManager.doDeliverHintsToEndpoint(HintedHandOffManager.java:351) > at > org.apache.cassandra.db.HintedHandOffManager.deliverHintsToEndpoint(HintedHandOffManager.java:309) > at > org.apache.cassandra.db.HintedHandOffManager.access$4(HintedHandOffManager.java:281) > at > org.apache.cassandra.db.HintedHandOffManager$4.run(HintedHandOffManager.java:530) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > at java.lang.Thread.run(Thread.java:724) > INFO [OptionalTasks:1] 2013-12-26 16:45:53,946 MeteredFlusher.java (line 63) > flushing high-traffic column family CFS(Keyspace='test_jmeter', > ColumnFamily='test_profiles') (estimated 192717267 bytes) > {code} > I am inserting the data with CL=1. > It seems to be happening every time I do it. But I do not see any errors on > the client side and the node seems to continue operating, this is why I think > it is not a major issue. Maybe not an issue at all, but the message is logged > as ERROR. -- This message was sent by Atlassian JIRA (v6.1.5#61
[jira] [Commented] (CASSANDRA-6528) TombstoneOverwhelmingException is thrown while populating data in recently truncated CF
[ https://issues.apache.org/jira/browse/CASSANDRA-6528?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892376#comment-13892376 ] Machiel Groeneveld commented on CASSANDRA-6528: --- create table IF NOT EXISTS visits.visits( id text, cookie_uuid text, cookie_uuid text, external_click_id text, session_id text, visitor_ip text, user_agent text, uuid_hash text, shop_product_id int, channel_id int, shop_id int, shop_category_id int, type int, medium_id int, campaign_id int, channel_affiliate_id int, default_cpc float, created_at timestamp, updated_at timestamp, time_id int, disabled int, has_referer boolean, known_visitor boolean, marketing boolean, primary key(time_id, id)); > TombstoneOverwhelmingException is thrown while populating data in recently > truncated CF > --- > > Key: CASSANDRA-6528 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6528 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: Cassadra 2.0.3, Linux, 6 nodes >Reporter: Nikolai Grigoriev >Priority: Minor > > I am running some performance tests and recently I had to flush the data from > one of the tables and repopulate it. I have about 30M rows with a few columns > in each, about 5kb per row in in total. In order to repopulate the data I do > "truncate " from CQLSH and then relaunch the test. The test simply > inserts the data in the table, does not read anything. Shortly after > restarting the data generator I see this on one of the nodes: > {code} > INFO [HintedHandoff:655] 2013-12-26 16:45:42,185 HintedHandOffManager.java > (line 323) Started hinted handoff f > or host: 985c8a08-3d92-4fad-a1d1-7135b2b9774a with IP: /10.5.45.158 > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 SliceQueryFilter.java (line > 200) Scanned ove > r 10 tombstones; query aborted (see tombstone_fail_threshold) > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 CassandraDaemon.java (line > 187) Exception in thread Thread[HintedHandoff:655,1,main] > org.apache.cassandra.db.filter.TombstoneOverwhelmingException > at > org.apache.cassandra.db.filter.SliceQueryFilter.collectReducedColumns(SliceQueryFilter.java:201) > at > org.apache.cassandra.db.filter.QueryFilter.collateColumns(QueryFilter.java:122) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:80) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:72) > at > org.apache.cassandra.db.CollationController.collectAllData(CollationController.java:297) > at > org.apache.cassandra.db.CollationController.getTopLevelColumns(CollationController.java:56) > at > org.apache.cassandra.db.ColumnFamilyStore.getTopLevelColumns(ColumnFamilyStore.java:1487) > at > org.apache.cassandra.db.ColumnFamilyStore.getColumnFamily(ColumnFamilyStore.java:1306) > at > org.apache.cassandra.db.HintedHandOffManager.doDeliverHintsToEndpoint(HintedHandOffManager.java:351) > at > org.apache.cassandra.db.HintedHandOffManager.deliverHintsToEndpoint(HintedHandOffManager.java:309) > at > org.apache.cassandra.db.HintedHandOffManager.access$4(HintedHandOffManager.java:281) > at > org.apache.cassandra.db.HintedHandOffManager$4.run(HintedHandOffManager.java:530) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > at java.lang.Thread.run(Thread.java:724) > INFO [OptionalTasks:1] 2013-12-26 16:45:53,946 MeteredFlusher.java (line 63) > flushing high-traffic column family CFS(Keyspace='test_jmeter', > ColumnFamily='test_profiles') (estimated 192717267 bytes) > {code} > I am inserting the data with CL=1. > It seems to be happening every time I do it. But I do not see any errors on > the client side and the node seems to continue operating, this is why I think > it is not a major issue. Maybe not an issue at all, but the message is logged > as ERROR. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6658) Nodes flap once at startup
[ https://issues.apache.org/jira/browse/CASSANDRA-6658?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Brandon Williams updated CASSANDRA-6658: Attachment: 6658.txt At least one problem is that in CASSANDRA-6385 we never converted the initial value to nanos, so when we divide by the mean when the only sample is the initial value and thus equivalent to it, the resulting phi is inflated. > Nodes flap once at startup > -- > > Key: CASSANDRA-6658 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6658 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Brandon Williams >Assignee: Brandon Williams >Priority: Minor > Attachments: 6658.txt > > > Upon initially seeing each other, a node will mark another UP, then DOWN, > then UP again. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6658) Nodes flap once at startup
[ https://issues.apache.org/jira/browse/CASSANDRA-6658?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Brandon Williams updated CASSANDRA-6658: Since Version: 2.0.3 (was: 2.0.4) > Nodes flap once at startup > -- > > Key: CASSANDRA-6658 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6658 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Brandon Williams >Assignee: Brandon Williams >Priority: Minor > > Upon initially seeing each other, a node will mark another UP, then DOWN, > then UP again. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[2/3] git commit: Merge branch 'cassandra-1.2' into cassandra-2.0
Merge branch 'cassandra-1.2' into cassandra-2.0 Conflicts: CHANGES.txt src/java/org/apache/cassandra/db/AtomicSortedColumns.java src/java/org/apache/cassandra/db/DeletionInfo.java Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/58e94818 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/58e94818 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/58e94818 Branch: refs/heads/trunk Commit: 58e948185e214dbdc68e4ce533edb4dfa5430b51 Parents: 49bb972 adcb713 Author: Sylvain Lebresne Authored: Wed Feb 5 18:42:00 2014 +0100 Committer: Sylvain Lebresne Committed: Wed Feb 5 18:42:00 2014 +0100 -- CHANGES.txt | 5 + .../org/apache/cassandra/db/AtomicSortedColumns.java | 7 ++- src/java/org/apache/cassandra/db/DeletionInfo.java| 14 ++ src/java/org/apache/cassandra/db/Memtable.java| 10 ++ 4 files changed, 31 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/58e94818/CHANGES.txt -- diff --cc CHANGES.txt index 9599e56,cfdd148..bba5f20 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,29 -1,21 +1,34 @@@ -1.2.16 ++2.0.6 ++Merged from 1.2: + * Fix partition and range deletes not triggering flush (CASSANDRA-6655) + -1.2.15 - * Move handling of migration event source to solve bootstrap race (CASSANDRA-6648) - * Make sure compaction throughput value doesn't overflow with int math (CASSANDRA-6647) - + -1.2.14 - * Reverted code to limit CQL prepared statement cache by size (CASSANDRA-6592) - * add cassandra.default_messaging_version property to allow easier - upgrading from 1.1 (CASSANDRA-6619) - * Allow executing CREATE statements multiple times (CASSANDRA-6471) - * Don't send confusing info with timeouts (CASSANDRA-6491) - * Don't resubmit counter mutation runnables internally (CASSANDRA-6427) - * Don't drop local mutations without a hint (CASSANDRA-6510) - * Don't allow null max_hint_window_in_ms (CASSANDRA-6419) - * Validate SliceRange start and finish lengths (CASSANDRA-6521) +2.0.5 + * Reduce garbage generated by bloom filter lookups (CASSANDRA-6609) + * Add ks.cf names to tombstone logging (CASSANDRA-6597) + * Use LOCAL_QUORUM for LWT operations at LOCAL_SERIAL (CASSANDRA-6495) + * Wait for gossip to settle before accepting client connections (CASSANDRA-4288) + * Delete unfinished compaction incrementally (CASSANDRA-6086) + * Allow specifying custom secondary index options in CQL3 (CASSANDRA-6480) + * Improve replica pinning for cache efficiency in DES (CASSANDRA-6485) + * Fix LOCAL_SERIAL from thrift (CASSANDRA-6584) + * Don't special case received counts in CAS timeout exceptions (CASSANDRA-6595) + * Add support for 2.1 global counter shards (CASSANDRA-6505) + * Fix NPE when streaming connection is not yet established (CASSANDRA-6210) + * Avoid rare duplicate read repair triggering (CASSANDRA-6606) + * Fix paging discardFirst (CASSANDRA-6555) + * Fix ArrayIndexOutOfBoundsException in 2ndary index query (CASSANDRA-6470) + * Release sstables upon rebuilding 2i (CASSANDRA-6635) + * Add AbstractCompactionStrategy.startup() method (CASSANDRA-6637) + * SSTableScanner may skip rows during cleanup (CASSANDRA-6638) + * sstables from stalled repair sessions can resurrect deleted data (CASSANDRA-6503) + * Switch stress to use ITransportFactory (CASSANDRA-6641) + * Fix IllegalArgumentException during prepare (CASSANDRA-6592) + * Fix possible loss of 2ndary index entries during compaction (CASSANDRA-6517) + * Fix direct Memory on architectures that do not support unaligned long access + (CASSANDRA-6628) + * Let scrub optionally skip broken counter partitions (CASSANDRA-5930) +Merged from 1.2: * fsync compression metadata (CASSANDRA-6531) * Validate CF existence on execution for prepared statement (CASSANDRA-6535) * Add ability to throttle batchlog replay (CASSANDRA-6550) http://git-wip-us.apache.org/repos/asf/cassandra/blob/58e94818/src/java/org/apache/cassandra/db/AtomicSortedColumns.java -- diff --cc src/java/org/apache/cassandra/db/AtomicSortedColumns.java index 1c0bf1b,d6c861b..d3a979c --- a/src/java/org/apache/cassandra/db/AtomicSortedColumns.java +++ b/src/java/org/apache/cassandra/db/AtomicSortedColumns.java @@@ -178,19 -194,15 +178,24 @@@ public class AtomicSortedColumns extend { sizeDelta = 0; current = ref.get(); - DeletionInfo newDelInfo = current.deletionInfo.copy().add(cm.deletionInfo()); + DeletionInfo newDelInfo = current.deletionInfo; -if (cm.getDeletionInfo().mayModif
[1/2] git commit: Fix partition and range deletes not triggering flush
Updated Branches: refs/heads/cassandra-2.0 49bb972c6 -> 58e948185 Fix partition and range deletes not triggering flush patch by benedict; reviewed by slebresne for CASSANDRA-6655 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/adcb713d Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/adcb713d Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/adcb713d Branch: refs/heads/cassandra-2.0 Commit: adcb713d597302a868b6224a87ea6ce38e718e5d Parents: 16efdf4 Author: Sylvain Lebresne Authored: Wed Feb 5 18:34:37 2014 +0100 Committer: Sylvain Lebresne Committed: Wed Feb 5 18:34:37 2014 +0100 -- CHANGES.txt | 3 +++ .../org/apache/cassandra/db/AtomicSortedColumns.java | 7 ++- src/java/org/apache/cassandra/db/DeletionInfo.java| 14 ++ src/java/org/apache/cassandra/db/Memtable.java| 10 ++ 4 files changed, 29 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/adcb713d/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 0989dc4..cfdd148 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,3 +1,6 @@ +1.2.16 + * Fix partition and range deletes not triggering flush (CASSANDRA-6655) + 1.2.15 * Move handling of migration event source to solve bootstrap race (CASSANDRA-6648) * Make sure compaction throughput value doesn't overflow with int math (CASSANDRA-6647) http://git-wip-us.apache.org/repos/asf/cassandra/blob/adcb713d/src/java/org/apache/cassandra/db/AtomicSortedColumns.java -- diff --git a/src/java/org/apache/cassandra/db/AtomicSortedColumns.java b/src/java/org/apache/cassandra/db/AtomicSortedColumns.java index 9803544..d6c861b 100644 --- a/src/java/org/apache/cassandra/db/AtomicSortedColumns.java +++ b/src/java/org/apache/cassandra/db/AtomicSortedColumns.java @@ -194,7 +194,12 @@ public class AtomicSortedColumns implements ISortedColumns { sizeDelta = 0; current = ref.get(); -DeletionInfo newDelInfo = current.deletionInfo.copy().add(cm.getDeletionInfo()); +DeletionInfo newDelInfo = current.deletionInfo; +if (cm.getDeletionInfo().mayModify(newDelInfo)) +{ +newDelInfo = current.deletionInfo.copy().add(cm.getDeletionInfo()); +sizeDelta += newDelInfo.dataSize() - current.deletionInfo.dataSize(); +} modified = new Holder(current.map.clone(), newDelInfo); for (IColumn column : cm.getSortedColumns()) http://git-wip-us.apache.org/repos/asf/cassandra/blob/adcb713d/src/java/org/apache/cassandra/db/DeletionInfo.java -- diff --git a/src/java/org/apache/cassandra/db/DeletionInfo.java b/src/java/org/apache/cassandra/db/DeletionInfo.java index e486eeb..91af9fd 100644 --- a/src/java/org/apache/cassandra/db/DeletionInfo.java +++ b/src/java/org/apache/cassandra/db/DeletionInfo.java @@ -216,6 +216,20 @@ public class DeletionInfo return size + (ranges == null ? 0 : ranges.dataSize()); } +public int rangeCount() +{ +return ranges == null ? 0 : ranges.size(); +} + +/** + * Whether this deletion info may modify the provided one if added to it. + */ +public boolean mayModify(DeletionInfo delInfo) +{ +return topLevel.markedForDeleteAt > delInfo.topLevel.markedForDeleteAt +|| ranges == null; +} + @Override public String toString() { http://git-wip-us.apache.org/repos/asf/cassandra/blob/adcb713d/src/java/org/apache/cassandra/db/Memtable.java -- diff --git a/src/java/org/apache/cassandra/db/Memtable.java b/src/java/org/apache/cassandra/db/Memtable.java index 817561b..b229060 100644 --- a/src/java/org/apache/cassandra/db/Memtable.java +++ b/src/java/org/apache/cassandra/db/Memtable.java @@ -192,6 +192,7 @@ public class Memtable { ColumnFamily previous = columnFamilies.get(key); +long sizeDelta = 0; if (previous == null) { // AtomicSortedColumns doesn't work for super columns (see #3821) @@ -199,14 +200,15 @@ public class Memtable // We'll add the columns later. This avoids wasting works if we get beaten in the putIfAbsent previous = columnFamilies.putIfAbsent(new DecoratedKey(key.token, allocator.clone(key.key)), empty); if (previous == null) +{ previous = empty; +sizeDelta += empty.deletionInfo().dataSize(); +
[1/3] git commit: Fix partition and range deletes not triggering flush
Updated Branches: refs/heads/trunk 58d1a4f81 -> fe4247e58 Fix partition and range deletes not triggering flush patch by benedict; reviewed by slebresne for CASSANDRA-6655 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/adcb713d Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/adcb713d Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/adcb713d Branch: refs/heads/trunk Commit: adcb713d597302a868b6224a87ea6ce38e718e5d Parents: 16efdf4 Author: Sylvain Lebresne Authored: Wed Feb 5 18:34:37 2014 +0100 Committer: Sylvain Lebresne Committed: Wed Feb 5 18:34:37 2014 +0100 -- CHANGES.txt | 3 +++ .../org/apache/cassandra/db/AtomicSortedColumns.java | 7 ++- src/java/org/apache/cassandra/db/DeletionInfo.java| 14 ++ src/java/org/apache/cassandra/db/Memtable.java| 10 ++ 4 files changed, 29 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/adcb713d/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 0989dc4..cfdd148 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,3 +1,6 @@ +1.2.16 + * Fix partition and range deletes not triggering flush (CASSANDRA-6655) + 1.2.15 * Move handling of migration event source to solve bootstrap race (CASSANDRA-6648) * Make sure compaction throughput value doesn't overflow with int math (CASSANDRA-6647) http://git-wip-us.apache.org/repos/asf/cassandra/blob/adcb713d/src/java/org/apache/cassandra/db/AtomicSortedColumns.java -- diff --git a/src/java/org/apache/cassandra/db/AtomicSortedColumns.java b/src/java/org/apache/cassandra/db/AtomicSortedColumns.java index 9803544..d6c861b 100644 --- a/src/java/org/apache/cassandra/db/AtomicSortedColumns.java +++ b/src/java/org/apache/cassandra/db/AtomicSortedColumns.java @@ -194,7 +194,12 @@ public class AtomicSortedColumns implements ISortedColumns { sizeDelta = 0; current = ref.get(); -DeletionInfo newDelInfo = current.deletionInfo.copy().add(cm.getDeletionInfo()); +DeletionInfo newDelInfo = current.deletionInfo; +if (cm.getDeletionInfo().mayModify(newDelInfo)) +{ +newDelInfo = current.deletionInfo.copy().add(cm.getDeletionInfo()); +sizeDelta += newDelInfo.dataSize() - current.deletionInfo.dataSize(); +} modified = new Holder(current.map.clone(), newDelInfo); for (IColumn column : cm.getSortedColumns()) http://git-wip-us.apache.org/repos/asf/cassandra/blob/adcb713d/src/java/org/apache/cassandra/db/DeletionInfo.java -- diff --git a/src/java/org/apache/cassandra/db/DeletionInfo.java b/src/java/org/apache/cassandra/db/DeletionInfo.java index e486eeb..91af9fd 100644 --- a/src/java/org/apache/cassandra/db/DeletionInfo.java +++ b/src/java/org/apache/cassandra/db/DeletionInfo.java @@ -216,6 +216,20 @@ public class DeletionInfo return size + (ranges == null ? 0 : ranges.dataSize()); } +public int rangeCount() +{ +return ranges == null ? 0 : ranges.size(); +} + +/** + * Whether this deletion info may modify the provided one if added to it. + */ +public boolean mayModify(DeletionInfo delInfo) +{ +return topLevel.markedForDeleteAt > delInfo.topLevel.markedForDeleteAt +|| ranges == null; +} + @Override public String toString() { http://git-wip-us.apache.org/repos/asf/cassandra/blob/adcb713d/src/java/org/apache/cassandra/db/Memtable.java -- diff --git a/src/java/org/apache/cassandra/db/Memtable.java b/src/java/org/apache/cassandra/db/Memtable.java index 817561b..b229060 100644 --- a/src/java/org/apache/cassandra/db/Memtable.java +++ b/src/java/org/apache/cassandra/db/Memtable.java @@ -192,6 +192,7 @@ public class Memtable { ColumnFamily previous = columnFamilies.get(key); +long sizeDelta = 0; if (previous == null) { // AtomicSortedColumns doesn't work for super columns (see #3821) @@ -199,14 +200,15 @@ public class Memtable // We'll add the columns later. This avoids wasting works if we get beaten in the putIfAbsent previous = columnFamilies.putIfAbsent(new DecoratedKey(key.token, allocator.clone(key.key)), empty); if (previous == null) +{ previous = empty; +sizeDelta += empty.deletionInfo().dataSize(); +} }
[2/2] git commit: Merge branch 'cassandra-1.2' into cassandra-2.0
Merge branch 'cassandra-1.2' into cassandra-2.0 Conflicts: CHANGES.txt src/java/org/apache/cassandra/db/AtomicSortedColumns.java src/java/org/apache/cassandra/db/DeletionInfo.java Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/58e94818 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/58e94818 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/58e94818 Branch: refs/heads/cassandra-2.0 Commit: 58e948185e214dbdc68e4ce533edb4dfa5430b51 Parents: 49bb972 adcb713 Author: Sylvain Lebresne Authored: Wed Feb 5 18:42:00 2014 +0100 Committer: Sylvain Lebresne Committed: Wed Feb 5 18:42:00 2014 +0100 -- CHANGES.txt | 5 + .../org/apache/cassandra/db/AtomicSortedColumns.java | 7 ++- src/java/org/apache/cassandra/db/DeletionInfo.java| 14 ++ src/java/org/apache/cassandra/db/Memtable.java| 10 ++ 4 files changed, 31 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/58e94818/CHANGES.txt -- diff --cc CHANGES.txt index 9599e56,cfdd148..bba5f20 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,29 -1,21 +1,34 @@@ -1.2.16 ++2.0.6 ++Merged from 1.2: + * Fix partition and range deletes not triggering flush (CASSANDRA-6655) + -1.2.15 - * Move handling of migration event source to solve bootstrap race (CASSANDRA-6648) - * Make sure compaction throughput value doesn't overflow with int math (CASSANDRA-6647) - + -1.2.14 - * Reverted code to limit CQL prepared statement cache by size (CASSANDRA-6592) - * add cassandra.default_messaging_version property to allow easier - upgrading from 1.1 (CASSANDRA-6619) - * Allow executing CREATE statements multiple times (CASSANDRA-6471) - * Don't send confusing info with timeouts (CASSANDRA-6491) - * Don't resubmit counter mutation runnables internally (CASSANDRA-6427) - * Don't drop local mutations without a hint (CASSANDRA-6510) - * Don't allow null max_hint_window_in_ms (CASSANDRA-6419) - * Validate SliceRange start and finish lengths (CASSANDRA-6521) +2.0.5 + * Reduce garbage generated by bloom filter lookups (CASSANDRA-6609) + * Add ks.cf names to tombstone logging (CASSANDRA-6597) + * Use LOCAL_QUORUM for LWT operations at LOCAL_SERIAL (CASSANDRA-6495) + * Wait for gossip to settle before accepting client connections (CASSANDRA-4288) + * Delete unfinished compaction incrementally (CASSANDRA-6086) + * Allow specifying custom secondary index options in CQL3 (CASSANDRA-6480) + * Improve replica pinning for cache efficiency in DES (CASSANDRA-6485) + * Fix LOCAL_SERIAL from thrift (CASSANDRA-6584) + * Don't special case received counts in CAS timeout exceptions (CASSANDRA-6595) + * Add support for 2.1 global counter shards (CASSANDRA-6505) + * Fix NPE when streaming connection is not yet established (CASSANDRA-6210) + * Avoid rare duplicate read repair triggering (CASSANDRA-6606) + * Fix paging discardFirst (CASSANDRA-6555) + * Fix ArrayIndexOutOfBoundsException in 2ndary index query (CASSANDRA-6470) + * Release sstables upon rebuilding 2i (CASSANDRA-6635) + * Add AbstractCompactionStrategy.startup() method (CASSANDRA-6637) + * SSTableScanner may skip rows during cleanup (CASSANDRA-6638) + * sstables from stalled repair sessions can resurrect deleted data (CASSANDRA-6503) + * Switch stress to use ITransportFactory (CASSANDRA-6641) + * Fix IllegalArgumentException during prepare (CASSANDRA-6592) + * Fix possible loss of 2ndary index entries during compaction (CASSANDRA-6517) + * Fix direct Memory on architectures that do not support unaligned long access + (CASSANDRA-6628) + * Let scrub optionally skip broken counter partitions (CASSANDRA-5930) +Merged from 1.2: * fsync compression metadata (CASSANDRA-6531) * Validate CF existence on execution for prepared statement (CASSANDRA-6535) * Add ability to throttle batchlog replay (CASSANDRA-6550) http://git-wip-us.apache.org/repos/asf/cassandra/blob/58e94818/src/java/org/apache/cassandra/db/AtomicSortedColumns.java -- diff --cc src/java/org/apache/cassandra/db/AtomicSortedColumns.java index 1c0bf1b,d6c861b..d3a979c --- a/src/java/org/apache/cassandra/db/AtomicSortedColumns.java +++ b/src/java/org/apache/cassandra/db/AtomicSortedColumns.java @@@ -178,19 -194,15 +178,24 @@@ public class AtomicSortedColumns extend { sizeDelta = 0; current = ref.get(); - DeletionInfo newDelInfo = current.deletionInfo.copy().add(cm.deletionInfo()); + DeletionInfo newDelInfo = current.deletionInfo; -if (cm.getDeletionInfo().
[3/3] git commit: Merge branch 'cassandra-2.0' into trunk
Merge branch 'cassandra-2.0' into trunk Conflicts: CHANGES.txt src/java/org/apache/cassandra/db/AtomicSortedColumns.java src/java/org/apache/cassandra/db/Memtable.java Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/fe4247e5 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/fe4247e5 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/fe4247e5 Branch: refs/heads/trunk Commit: fe4247e589714d9ea183187c0538b6446f16ffca Parents: 58d1a4f 58e9481 Author: Sylvain Lebresne Authored: Wed Feb 5 18:51:40 2014 +0100 Committer: Sylvain Lebresne Committed: Wed Feb 5 18:51:40 2014 +0100 -- CHANGES.txt | 8 ++- .../apache/cassandra/db/AtomicBTreeColumns.java | 22 .../org/apache/cassandra/db/DeletionInfo.java | 14 + src/java/org/apache/cassandra/db/Memtable.java | 4 +--- 4 files changed, 30 insertions(+), 18 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/fe4247e5/CHANGES.txt -- diff --cc CHANGES.txt index 0690c38,bba5f20..7d628b5 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,40 -1,7 +1,36 @@@ +2.1 + * add listsnapshots command to nodetool (CASSANDRA-5742) + * Introduce AtomicBTreeColumns (CASSANDRA-6271) + * Multithreaded commitlog (CASSANDRA-3578) + * allocate fixed index summary memory pool and resample cold index summaries + to use less memory (CASSANDRA-5519) + * Removed multithreaded compaction (CASSANDRA-6142) + * Parallelize fetching rows for low-cardinality indexes (CASSANDRA-1337) + * change logging from log4j to logback (CASSANDRA-5883) + * switch to LZ4 compression for internode communication (CASSANDRA-5887) + * Stop using Thrift-generated Index* classes internally (CASSANDRA-5971) + * Remove 1.2 network compatibility code (CASSANDRA-5960) + * Remove leveled json manifest migration code (CASSANDRA-5996) + * Remove CFDefinition (CASSANDRA-6253) + * Use AtomicIntegerFieldUpdater in RefCountedMemory (CASSANDRA-6278) + * User-defined types for CQL3 (CASSANDRA-5590) + * Use of o.a.c.metrics in nodetool (CASSANDRA-5871, 6406) + * Batch read from OTC's queue and cleanup (CASSANDRA-1632) + * Secondary index support for collections (CASSANDRA-4511, 6383) + * SSTable metadata(Stats.db) format change (CASSANDRA-6356) + * Push composites support in the storage engine + (CASSANDRA-5417, CASSANDRA-6520) + * Add snapshot space used to cfstats (CASSANDRA-6231) + * Add cardinality estimator for key count estimation (CASSANDRA-5906) + * CF id is changed to be non-deterministic. Data dir/key cache are created + uniquely for CF id (CASSANDRA-5202) + * New counters implementation (CASSANDRA-6504) + + 2.0.6 - * Fix direct Memory on architectures that do not support unaligned long access -(CASSANDRA-6628) - * Let scrub optionally skip broken counter partitions (CASSANDRA-5930) Merged from 1.2: - * Move handling of migration event source to solve bootstrap race. (CASSANDRA-6648) - * Make sure compaction throughput value doesn't overflow with int math (CASSANDRA-6647) - + * Fix partition and range deletes not triggering flush (CASSANDRA-6655) + 2.0.5 * Reduce garbage generated by bloom filter lookups (CASSANDRA-6609) http://git-wip-us.apache.org/repos/asf/cassandra/blob/fe4247e5/src/java/org/apache/cassandra/db/AtomicBTreeColumns.java -- diff --cc src/java/org/apache/cassandra/db/AtomicBTreeColumns.java index 238bb7c,000..fd7d4bc mode 100644,00..100644 --- a/src/java/org/apache/cassandra/db/AtomicBTreeColumns.java +++ b/src/java/org/apache/cassandra/db/AtomicBTreeColumns.java @@@ -1,457 -1,0 +1,461 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.cassandra.db; + +import java.util.AbstractCollection; +import java.util.ArrayList; +import java.util.Arrays; +import java.
[jira] [Commented] (CASSANDRA-4757) Expose bulk loading progress/status over jmx
[ https://issues.apache.org/jira/browse/CASSANDRA-4757?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892350#comment-13892350 ] Tyler Hobbs commented on CASSANDRA-4757: bq. My only other question would be if the building indexes step of bulk loading would be included in that progress as well. In 2.0 index building is the last step before marking a streaming task as complete, so it's included. > Expose bulk loading progress/status over jmx > > > Key: CASSANDRA-4757 > URL: https://issues.apache.org/jira/browse/CASSANDRA-4757 > Project: Cassandra > Issue Type: Improvement >Reporter: Nick Bailey >Assignee: Tyler Hobbs >Priority: Minor > Labels: lhf > Fix For: 2.0.6 > > Attachments: CASSANDRA-4757.txt, CASSANDRA-4757v2.txt > > > The bulk loading interface should be exposing some progress or status > information over jmx. This shouldn't be too difficult and should be exposed > in a way that the information is available whether you are using the separate > sstableloader utility or calling the bulkload jmx call. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6651) Repair hanging
[ https://issues.apache.org/jira/browse/CASSANDRA-6651?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892345#comment-13892345 ] Thunder Stumpges commented on CASSANDRA-6651: - FWIW we have this exact same issue. We are running 2.0.3 on a 3 node cluster. It has happened multiple times, and happens more times than not when running nodetool repair. There is nearly always one or more AntiEntropySessions remaining according to tpstats. One strange thing about the behavior I see is that the output of nodetool compactionstats returns 0 active compactions, yet when restarting, we get the exception about "Unfinished compactions reference missing sstables." It does seem like these two issues are related. Another thing I see sometimes in the ouput from nodetool repair is the following message: [2014-02-04 14:07:30,858] Starting repair command #7, repairing 768 ranges for keyspace thunder_test [2014-02-04 14:08:30,862] Lost notification. You should check server log for repair status of keyspace thunder_test [2014-02-04 14:08:30,870] Starting repair command #8, repairing 768 ranges for keyspace doan_synset [2014-02-04 14:09:30,874] Lost notification. You should check server log for repair status of keyspace doan_synset When this happens, it starts the next repair session immediately rather than waiting for the current one to finish. This doesn't however seem to always correlate to a hung session. My logs don't look much/any different from the OP, but I'd be glad to provide any more details that might be helpful. We will be upgrading to 2.0.4 in the next couple days and I will report back if we see any difference in behavior. > Repair hanging > -- > > Key: CASSANDRA-6651 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6651 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Eitan Eibschutz > Fix For: 2.0.3 > > > Hi, > We have a 12 node cluster in PROD environment and we've noticed that repairs > are never finishing. The behavior that we've observed is that a repair > process will run until at some point it hangs and no other processing is > happening. > For example, at the moment, I have a repair process that has been running for > two days and not finishing: > nodetool tpstats is showing 2 active and 2 pending AntiEntropySessions > nodetool compactionstats is showing: > pending tasks: 0 > Active compaction remaining time :n/a > nodetools netstats is showing: > Mode: NORMAL > Not sending any streams. > Read Repair Statistics: > Attempted: 0 > Mismatch (Blocking): 142110 > Mismatch (Background): 0 > Pool NameActive Pending Completed > Commandsn/a 0 107589657 > Responses n/a 0 116430785 > The last entry that I see in the log is: > INFO [AntiEntropySessions:18] 2014-02-03 04:01:39,145 RepairJob.java (line > 116) [repair #ae78c6c0-8c2b-11e3-b950-c3b81a36bc9b] requesting merkle trees > for MyCF (to [/x.x.x.x, /y.y.y.y, /z.z.z.z]) > The repair started at 4am so it stopped after 1:40 minute. > On node y.y.y.y I can see this in the log: > INFO [MiscStage:1] 2014-02-03 04:01:38,985 ColumnFamilyStore.java (line 740) > Enqueuing flush of Memtable-MyCF@1290890489(2176/5931 serialized/live bytes, > 32 ops) > INFO [FlushWriter:411] 2014-02-03 04:01:38,986 Memtable.java (line 333) > Writing Memtable-MyCF@1290890489(2176/5931 serialized/live bytes, 32 ops) > INFO [FlushWriter:411] 2014-02-03 04:01:39,048 Memtable.java (line 373) > Completed flushing > /var/lib/cassandra/main-db/data/MyKS/MyCF/MyKS-MyCF-jb-518-Data.db (1789 > bytes) for commitlog position ReplayPosition(segmentId=1390437013339, > position=21868792) > INFO [ScheduledTasks:1] 2014-02-03 05:00:04,794 ColumnFamilyStore.java (line > 740) Enqueuing flush of Memtable-compaction_history@1649414699(1635/17360 > serialized/live bytes, 42 ops) > So for some reason the merkle tree for this CF is never sent back to the node > being repaired and it's hanging. > I've also noticed that sometimes, restarting node y.y.y.y will cause the > repair to resume. > Another observation is that sometimes when restarting y.y.y.y it will not > start with these errors: > ERROR 16:34:18,485 Exception encountered during startup > java.lang.IllegalStateException: Unfinished compactions reference missing > sstables. This should never happen since compactions are marked finished > before we start removing the old sstables. > at > org.apache.cassandra.db.ColumnFamilyStore.removeUnfinishedCompactionLeftovers(ColumnFamilyStore.java:495) > at > org.apache.cassandra.service.CassandraDaemon.setup(CassandraDaemon.java:264) > at > org.apache.cassandra.service.CassandraDaemon.activate(CassandraDaemon.java:461) > at > org.
[jira] [Commented] (CASSANDRA-6528) TombstoneOverwhelmingException is thrown while populating data in recently truncated CF
[ https://issues.apache.org/jira/browse/CASSANDRA-6528?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892335#comment-13892335 ] Aleksey Yeschenko commented on CASSANDRA-6528: -- bq. I have the same issue, after inserting 216258 records (in one row) in a new database Your schema and an example query? > TombstoneOverwhelmingException is thrown while populating data in recently > truncated CF > --- > > Key: CASSANDRA-6528 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6528 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: Cassadra 2.0.3, Linux, 6 nodes >Reporter: Nikolai Grigoriev >Priority: Minor > > I am running some performance tests and recently I had to flush the data from > one of the tables and repopulate it. I have about 30M rows with a few columns > in each, about 5kb per row in in total. In order to repopulate the data I do > "truncate " from CQLSH and then relaunch the test. The test simply > inserts the data in the table, does not read anything. Shortly after > restarting the data generator I see this on one of the nodes: > {code} > INFO [HintedHandoff:655] 2013-12-26 16:45:42,185 HintedHandOffManager.java > (line 323) Started hinted handoff f > or host: 985c8a08-3d92-4fad-a1d1-7135b2b9774a with IP: /10.5.45.158 > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 SliceQueryFilter.java (line > 200) Scanned ove > r 10 tombstones; query aborted (see tombstone_fail_threshold) > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 CassandraDaemon.java (line > 187) Exception in thread Thread[HintedHandoff:655,1,main] > org.apache.cassandra.db.filter.TombstoneOverwhelmingException > at > org.apache.cassandra.db.filter.SliceQueryFilter.collectReducedColumns(SliceQueryFilter.java:201) > at > org.apache.cassandra.db.filter.QueryFilter.collateColumns(QueryFilter.java:122) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:80) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:72) > at > org.apache.cassandra.db.CollationController.collectAllData(CollationController.java:297) > at > org.apache.cassandra.db.CollationController.getTopLevelColumns(CollationController.java:56) > at > org.apache.cassandra.db.ColumnFamilyStore.getTopLevelColumns(ColumnFamilyStore.java:1487) > at > org.apache.cassandra.db.ColumnFamilyStore.getColumnFamily(ColumnFamilyStore.java:1306) > at > org.apache.cassandra.db.HintedHandOffManager.doDeliverHintsToEndpoint(HintedHandOffManager.java:351) > at > org.apache.cassandra.db.HintedHandOffManager.deliverHintsToEndpoint(HintedHandOffManager.java:309) > at > org.apache.cassandra.db.HintedHandOffManager.access$4(HintedHandOffManager.java:281) > at > org.apache.cassandra.db.HintedHandOffManager$4.run(HintedHandOffManager.java:530) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > at java.lang.Thread.run(Thread.java:724) > INFO [OptionalTasks:1] 2013-12-26 16:45:53,946 MeteredFlusher.java (line 63) > flushing high-traffic column family CFS(Keyspace='test_jmeter', > ColumnFamily='test_profiles') (estimated 192717267 bytes) > {code} > I am inserting the data with CL=1. > It seems to be happening every time I do it. But I do not see any errors on > the client side and the node seems to continue operating, this is why I think > it is not a major issue. Maybe not an issue at all, but the message is logged > as ERROR. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
git commit: Fix partition and range deletes not triggering flush
Updated Branches: refs/heads/cassandra-1.2 16efdf4a0 -> adcb713d5 Fix partition and range deletes not triggering flush patch by benedict; reviewed by slebresne for CASSANDRA-6655 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/adcb713d Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/adcb713d Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/adcb713d Branch: refs/heads/cassandra-1.2 Commit: adcb713d597302a868b6224a87ea6ce38e718e5d Parents: 16efdf4 Author: Sylvain Lebresne Authored: Wed Feb 5 18:34:37 2014 +0100 Committer: Sylvain Lebresne Committed: Wed Feb 5 18:34:37 2014 +0100 -- CHANGES.txt | 3 +++ .../org/apache/cassandra/db/AtomicSortedColumns.java | 7 ++- src/java/org/apache/cassandra/db/DeletionInfo.java| 14 ++ src/java/org/apache/cassandra/db/Memtable.java| 10 ++ 4 files changed, 29 insertions(+), 5 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/adcb713d/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 0989dc4..cfdd148 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -1,3 +1,6 @@ +1.2.16 + * Fix partition and range deletes not triggering flush (CASSANDRA-6655) + 1.2.15 * Move handling of migration event source to solve bootstrap race (CASSANDRA-6648) * Make sure compaction throughput value doesn't overflow with int math (CASSANDRA-6647) http://git-wip-us.apache.org/repos/asf/cassandra/blob/adcb713d/src/java/org/apache/cassandra/db/AtomicSortedColumns.java -- diff --git a/src/java/org/apache/cassandra/db/AtomicSortedColumns.java b/src/java/org/apache/cassandra/db/AtomicSortedColumns.java index 9803544..d6c861b 100644 --- a/src/java/org/apache/cassandra/db/AtomicSortedColumns.java +++ b/src/java/org/apache/cassandra/db/AtomicSortedColumns.java @@ -194,7 +194,12 @@ public class AtomicSortedColumns implements ISortedColumns { sizeDelta = 0; current = ref.get(); -DeletionInfo newDelInfo = current.deletionInfo.copy().add(cm.getDeletionInfo()); +DeletionInfo newDelInfo = current.deletionInfo; +if (cm.getDeletionInfo().mayModify(newDelInfo)) +{ +newDelInfo = current.deletionInfo.copy().add(cm.getDeletionInfo()); +sizeDelta += newDelInfo.dataSize() - current.deletionInfo.dataSize(); +} modified = new Holder(current.map.clone(), newDelInfo); for (IColumn column : cm.getSortedColumns()) http://git-wip-us.apache.org/repos/asf/cassandra/blob/adcb713d/src/java/org/apache/cassandra/db/DeletionInfo.java -- diff --git a/src/java/org/apache/cassandra/db/DeletionInfo.java b/src/java/org/apache/cassandra/db/DeletionInfo.java index e486eeb..91af9fd 100644 --- a/src/java/org/apache/cassandra/db/DeletionInfo.java +++ b/src/java/org/apache/cassandra/db/DeletionInfo.java @@ -216,6 +216,20 @@ public class DeletionInfo return size + (ranges == null ? 0 : ranges.dataSize()); } +public int rangeCount() +{ +return ranges == null ? 0 : ranges.size(); +} + +/** + * Whether this deletion info may modify the provided one if added to it. + */ +public boolean mayModify(DeletionInfo delInfo) +{ +return topLevel.markedForDeleteAt > delInfo.topLevel.markedForDeleteAt +|| ranges == null; +} + @Override public String toString() { http://git-wip-us.apache.org/repos/asf/cassandra/blob/adcb713d/src/java/org/apache/cassandra/db/Memtable.java -- diff --git a/src/java/org/apache/cassandra/db/Memtable.java b/src/java/org/apache/cassandra/db/Memtable.java index 817561b..b229060 100644 --- a/src/java/org/apache/cassandra/db/Memtable.java +++ b/src/java/org/apache/cassandra/db/Memtable.java @@ -192,6 +192,7 @@ public class Memtable { ColumnFamily previous = columnFamilies.get(key); +long sizeDelta = 0; if (previous == null) { // AtomicSortedColumns doesn't work for super columns (see #3821) @@ -199,14 +200,15 @@ public class Memtable // We'll add the columns later. This avoids wasting works if we get beaten in the putIfAbsent previous = columnFamilies.putIfAbsent(new DecoratedKey(key.token, allocator.clone(key.key)), empty); if (previous == null) +{ previous = empty; +sizeDelta += empty.deletionInfo().dataSize(); +
[jira] [Updated] (CASSANDRA-6658) Nodes flap once at startup
[ https://issues.apache.org/jira/browse/CASSANDRA-6658?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Brandon Williams updated CASSANDRA-6658: Since Version: 2.0.4 Fix Version/s: (was: 2.0.4) > Nodes flap once at startup > -- > > Key: CASSANDRA-6658 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6658 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Brandon Williams >Assignee: Brandon Williams >Priority: Minor > > Upon initially seeing each other, a node will mark another UP, then DOWN, > then UP again. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Created] (CASSANDRA-6658) Nodes flap once at starup
Brandon Williams created CASSANDRA-6658: --- Summary: Nodes flap once at starup Key: CASSANDRA-6658 URL: https://issues.apache.org/jira/browse/CASSANDRA-6658 Project: Cassandra Issue Type: Bug Components: Core Reporter: Brandon Williams Assignee: Brandon Williams Priority: Minor Fix For: 2.0.4 Upon initially seeing each other, a node will mark another UP, then DOWN, then UP again. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6658) Nodes flap once at startup
[ https://issues.apache.org/jira/browse/CASSANDRA-6658?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Brandon Williams updated CASSANDRA-6658: Summary: Nodes flap once at startup (was: Nodes flap once at starup) > Nodes flap once at startup > -- > > Key: CASSANDRA-6658 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6658 > Project: Cassandra > Issue Type: Bug > Components: Core >Reporter: Brandon Williams >Assignee: Brandon Williams >Priority: Minor > Fix For: 2.0.4 > > > Upon initially seeing each other, a node will mark another UP, then DOWN, > then UP again. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6656) Exception logging
[ https://issues.apache.org/jira/browse/CASSANDRA-6656?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Mikhail Stepura updated CASSANDRA-6656: --- Fix Version/s: (was: 2.0.4) > Exception logging > - > > Key: CASSANDRA-6656 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6656 > Project: Cassandra > Issue Type: Improvement > Components: Core, Tools >Reporter: Ding Yuan > Attachments: trunk-6656.txt > > > Reporting a few cases where informative exceptions can be silently swallowed. > Attaching a proposed patch. > = > Case 1 > Line: 95, File: "org/apache/cassandra/utils/Hex.java" > An actual failure in the underlying constructor will be lost. > Propose to log it. > {noformat} > try > { > s = stringConstructor.newInstance(0, c.length, c); > + } > + catch (InvocationTargetException ite) { > + // The underlying constructor failed. Unwrapping the > exception. > + logger.info("Underlying constructor throws exception: ", > ite.getCause()); > } > catch (Exception e) > { > // Swallowing as we'll just use a copying constructor > } > return s == null ? new String(c) : s; > {noformat} > == > = > Case 2 > Line: 192, File: "org/apache/cassandra/db/marshal/DynamicCompositeType.java" > The actual cause of comparator error can be lost as it can fail in multiple > locations. > {noformat} > AbstractType comparator = null; > int header = getShortLength(bb); > if ((header & 0x8000) == 0) > { > ByteBuffer value = getBytes(bb, header); > try > { > comparator = TypeParser.parse(ByteBufferUtil.string(value)); > } > catch (Exception e) > { > <--- can fail here > // we'll deal with this below since comparator == null > } > } > else > { > comparator = aliases.get((byte)(header & 0xFF)); > <--- can fail here > } > if (comparator == null) > throw new MarshalException("Cannot find comparator for component > " + i); > {noformat} > Propose to log the exception. > == > = > Case 3 > Line: 239, File: "org/apache/cassandra/tools/NodeProbe.java" > Exception ignored in finally. Propose log them with debug or trace. > {noformat} > 232: finally > 233: { > 234: try > 235: { > 236: ssProxy.removeNotificationListener(runner); > 236: ssProxy.removeNotificationListener(runner); > 237: jmxc.removeConnectionNotificationListener(runner); > 238: } > 239: catch (Throwable ignored) {} > 240: } > {noformat} > Similar case is at line 264 in the same file. > == -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Comment Edited] (CASSANDRA-6528) TombstoneOverwhelmingException is thrown while populating data in recently truncated CF
[ https://issues.apache.org/jira/browse/CASSANDRA-6528?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892315#comment-13892315 ] Machiel Groeneveld edited comment on CASSANDRA-6528 at 2/5/14 5:14 PM: --- I have the same issue, after inserting 216258 records (in one row) in a new database (I removed all files in the data directory files before starting) I couldn't run a select query (something like 'select * from partition_key = x') In the log I get org.apache.cassandra.db.filter.TombstoneOverwhelmingException was (Author: machielg): I have the same issue, after inserting 216258 records (in one row) in a new database (I removed all files in the data directory files before starting) I couldn't run a select query (something like 'select * from partition_key = x') > TombstoneOverwhelmingException is thrown while populating data in recently > truncated CF > --- > > Key: CASSANDRA-6528 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6528 > Project: Cassandra > Issue Type: Bug > Components: Core > Environment: Cassadra 2.0.3, Linux, 6 nodes >Reporter: Nikolai Grigoriev >Priority: Minor > > I am running some performance tests and recently I had to flush the data from > one of the tables and repopulate it. I have about 30M rows with a few columns > in each, about 5kb per row in in total. In order to repopulate the data I do > "truncate " from CQLSH and then relaunch the test. The test simply > inserts the data in the table, does not read anything. Shortly after > restarting the data generator I see this on one of the nodes: > {code} > INFO [HintedHandoff:655] 2013-12-26 16:45:42,185 HintedHandOffManager.java > (line 323) Started hinted handoff f > or host: 985c8a08-3d92-4fad-a1d1-7135b2b9774a with IP: /10.5.45.158 > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 SliceQueryFilter.java (line > 200) Scanned ove > r 10 tombstones; query aborted (see tombstone_fail_threshold) > ERROR [HintedHandoff:655] 2013-12-26 16:45:42,680 CassandraDaemon.java (line > 187) Exception in thread Thread[HintedHandoff:655,1,main] > org.apache.cassandra.db.filter.TombstoneOverwhelmingException > at > org.apache.cassandra.db.filter.SliceQueryFilter.collectReducedColumns(SliceQueryFilter.java:201) > at > org.apache.cassandra.db.filter.QueryFilter.collateColumns(QueryFilter.java:122) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:80) > at > org.apache.cassandra.db.filter.QueryFilter.collateOnDiskAtom(QueryFilter.java:72) > at > org.apache.cassandra.db.CollationController.collectAllData(CollationController.java:297) > at > org.apache.cassandra.db.CollationController.getTopLevelColumns(CollationController.java:56) > at > org.apache.cassandra.db.ColumnFamilyStore.getTopLevelColumns(ColumnFamilyStore.java:1487) > at > org.apache.cassandra.db.ColumnFamilyStore.getColumnFamily(ColumnFamilyStore.java:1306) > at > org.apache.cassandra.db.HintedHandOffManager.doDeliverHintsToEndpoint(HintedHandOffManager.java:351) > at > org.apache.cassandra.db.HintedHandOffManager.deliverHintsToEndpoint(HintedHandOffManager.java:309) > at > org.apache.cassandra.db.HintedHandOffManager.access$4(HintedHandOffManager.java:281) > at > org.apache.cassandra.db.HintedHandOffManager$4.run(HintedHandOffManager.java:530) > at > java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) > at > java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) > at java.lang.Thread.run(Thread.java:724) > INFO [OptionalTasks:1] 2013-12-26 16:45:53,946 MeteredFlusher.java (line 63) > flushing high-traffic column family CFS(Keyspace='test_jmeter', > ColumnFamily='test_profiles') (estimated 192717267 bytes) > {code} > I am inserting the data with CL=1. > It seems to be happening every time I do it. But I do not see any errors on > the client side and the node seems to continue operating, this is why I think > it is not a major issue. Maybe not an issue at all, but the message is logged > as ERROR. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6655) Writing mostly deletes to a Memtable results in undercounting the table's occupancy so it may not flush
[ https://issues.apache.org/jira/browse/CASSANDRA-6655?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanel&focusedCommentId=13892316#comment-13892316 ] Sylvain Lebresne commented on CASSANDRA-6655: - bq. Want me to repatch, or do you want to ninja it in? I can change while committing I guess. > Writing mostly deletes to a Memtable results in undercounting the table's > occupancy so it may not flush > --- > > Key: CASSANDRA-6655 > URL: https://issues.apache.org/jira/browse/CASSANDRA-6655 > Project: Cassandra > Issue Type: Improvement >Reporter: Benedict >Assignee: Benedict >Priority: Minor > Fix For: 2.0.5, 2.0.6 > > Attachments: tmp-2.1.patch, tmp.patch > > > In the extreme case of only deletes the memtable will never flush, and we > will OOM. -- This message was sent by Atlassian JIRA (v6.1.5#6160)