git commit: Properly validate cellname max size in CQL3
Repository: cassandra Updated Branches: refs/heads/trunk 110990401 - 9ea99491e Properly validate cellname max size in CQL3 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/9ea99491 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/9ea99491 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/9ea99491 Branch: refs/heads/trunk Commit: 9ea99491e846db82777bfdc740e409b827c07238 Parents: 1109904 Author: Sylvain Lebresne sylv...@datastax.com Authored: Mon Feb 17 09:57:07 2014 +0100 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Mon Feb 17 09:57:18 2014 +0100 -- .../org/apache/cassandra/cql3/QueryProcessor.java | 18 +- .../apache/cassandra/cql3/UpdateParameters.java | 12 ++-- .../cql3/statements/SelectStatement.java | 2 +- 3 files changed, 16 insertions(+), 16 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/9ea99491/src/java/org/apache/cassandra/cql3/QueryProcessor.java -- diff --git a/src/java/org/apache/cassandra/cql3/QueryProcessor.java b/src/java/org/apache/cassandra/cql3/QueryProcessor.java index 7035f63..f2559e6 100644 --- a/src/java/org/apache/cassandra/cql3/QueryProcessor.java +++ b/src/java/org/apache/cassandra/cql3/QueryProcessor.java @@ -34,8 +34,7 @@ import org.apache.cassandra.cql3.hooks.*; import org.apache.cassandra.cql3.statements.*; import org.apache.cassandra.transport.messages.ResultMessage; import org.apache.cassandra.db.*; -import org.apache.cassandra.db.composites.CellName; -import org.apache.cassandra.db.composites.Composite; +import org.apache.cassandra.db.composites.*; import org.apache.cassandra.exceptions.*; import org.apache.cassandra.service.ClientState; import org.apache.cassandra.service.QueryState; @@ -145,24 +144,25 @@ public class QueryProcessor } } -public static void validateCellNames(IterableCellName cellNames) throws InvalidRequestException +public static void validateCellNames(IterableCellName cellNames, CellNameType type) throws InvalidRequestException { for (CellName name : cellNames) -validateCellName(name); +validateCellName(name, type); } -public static void validateCellName(CellName name) throws InvalidRequestException +public static void validateCellName(CellName name, CellNameType type) throws InvalidRequestException { -validateComposite(name); +validateComposite(name, type); if (name.isEmpty()) throw new InvalidRequestException(Invalid empty value for clustering column of COMPACT TABLE); } -public static void validateComposite(Composite name) throws InvalidRequestException +public static void validateComposite(Composite name, CType type) throws InvalidRequestException { -if (name.dataSize() Cell.MAX_NAME_LENGTH) +long serializedSize = type.serializer().serializedSize(name, TypeSizes.NATIVE); +if (serializedSize Cell.MAX_NAME_LENGTH) throw new InvalidRequestException(String.format(The sum of all clustering columns is too long (%s %s), -name.dataSize(), +serializedSize, Cell.MAX_NAME_LENGTH)); } http://git-wip-us.apache.org/repos/asf/cassandra/blob/9ea99491/src/java/org/apache/cassandra/cql3/UpdateParameters.java -- diff --git a/src/java/org/apache/cassandra/cql3/UpdateParameters.java b/src/java/org/apache/cassandra/cql3/UpdateParameters.java index a3553f4..87ce22e 100644 --- a/src/java/org/apache/cassandra/cql3/UpdateParameters.java +++ b/src/java/org/apache/cassandra/cql3/UpdateParameters.java @@ -54,27 +54,27 @@ public class UpdateParameters public Cell makeColumn(CellName name, ByteBuffer value) throws InvalidRequestException { -QueryProcessor.validateCellName(name); +QueryProcessor.validateCellName(name, metadata.comparator); return Cell.create(name, value, timestamp, ttl, metadata); } public Cell makeTombstone(CellName name) throws InvalidRequestException { -QueryProcessor.validateCellName(name); +QueryProcessor.validateCellName(name, metadata.comparator); return new DeletedCell(name, localDeletionTime, timestamp); } public RangeTombstone makeRangeTombstone(ColumnSlice slice) throws InvalidRequestException { -QueryProcessor.validateComposite(slice.start); -QueryProcessor.validateComposite(slice.finish); +
git commit: CQL3: improve support for paginating over composites
Repository: cassandra Updated Branches: refs/heads/cassandra-2.0 ea28d3689 - 652ec6a5c CQL3: improve support for paginating over composites patch by slebresne; reviewed by iamaleksey for CASSANDRA-4851 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/652ec6a5 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/652ec6a5 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/652ec6a5 Branch: refs/heads/cassandra-2.0 Commit: 652ec6a5c36feae346c71f0ff009ec3b8457448b Parents: ea28d36 Author: Sylvain Lebresne sylv...@datastax.com Authored: Thu Jan 30 16:11:35 2014 +0100 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Mon Feb 17 10:30:29 2014 +0100 -- CHANGES.txt | 1 + doc/cql3/CQL.textile| 24 +- .../org/apache/cassandra/cql3/CFDefinition.java | 10 +-- .../cassandra/cql3/ColumnNameBuilder.java | 11 +-- src/java/org/apache/cassandra/cql3/Cql.g| 16 .../apache/cassandra/cql3/QueryProcessor.java | 2 +- .../org/apache/cassandra/cql3/Relation.java | 17 +++- .../cassandra/cql3/statements/Restriction.java | 24 +- .../cql3/statements/SelectStatement.java| 82 +++- .../cassandra/db/marshal/CompositeType.java | 61 +++ 10 files changed, 175 insertions(+), 73 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/652ec6a5/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 571b8dd..fd3b1b7 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -12,6 +12,7 @@ * Stop CommitLogSegment.close() from calling sync() (CASSANDRA-6652) * Make commitlog failure handling configurable (CASSANDRA-6364) * Avoid overlaps in LCS (CASSANDRA-6688) + * improve support for paginating over composites (4851) Merged from 1.2: * Fix broken streams when replacing with same IP (CASSANDRA-6622) * Fix upgradesstables NPE for non-CF-based indexes (CASSANDRA-6645) http://git-wip-us.apache.org/repos/asf/cassandra/blob/652ec6a5/doc/cql3/CQL.textile -- diff --git a/doc/cql3/CQL.textile b/doc/cql3/CQL.textile index f82fc19..03b95e0 100644 --- a/doc/cql3/CQL.textile +++ b/doc/cql3/CQL.textile @@ -1,6 +1,6 @@ link rel=StyleSheet href=CQL.css type=text/css media=screen -h1. Cassandra Query Language (CQL) v3.1.4 +h1. Cassandra Query Language (CQL) v3.1.5 span id=tableOfContents @@ -619,10 +619,12 @@ bc(syntax).. where-clause ::= relation ( AND relation )* -relation ::= identifier ('=' | '' | '' | '=' | '=') term +relation ::= identifier op term + | '(' identifier (',' identifier)* ')' op '(' term (',' term)* ')' | identifier IN '(' ( term ( ',' term)* )? ')' - | TOKEN '(' identifier ( ',' identifer)* ')' ('=' | '' | '' | '=' | '=') term + | TOKEN '(' identifier ( ',' identifer)* ')' op term +op ::= '=' | '' | '' | '=' | '=' order-by ::= ordering ( ',' odering )* ordering ::= identifer ( ASC | DESC )? p. @@ -676,7 +678,7 @@ CREATE TABLE posts ( The following query is allowed: bc(sample). -SELECT entry_title, content FROM posts WHERE userid='john doe' AND blog_title='John's Blog' AND posted_at = '2012-01-01' AND posted_at '2012-01-31' +SELECT entry_title, content FROM posts WHERE userid='john doe' AND blog_title='John''s Blog' AND posted_at = '2012-01-01' AND posted_at '2012-01-31' But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are set): @@ -691,6 +693,16 @@ SELECT * FROM posts WHERE token(userid) token('tom') AND token(userid) token Moreover, the @IN@ relation is only allowed on the last column of the partition key and on the last column of the full primary key. +It is also possible to group @CLUSTERING COLUMNS@ together in a relation, for instance: + +bc(sample). +SELECT * FROM posts WHERE userid='john doe' AND (blog_title, posted_at) ('John''s Blog', '2012-01-01') + +will request all rows that sorts after the one having John's Blog as @blog_tile@ and '2012-01-01' for @posted_at@ in the clustering order. In particular, rows having a @post_at = '2012-01-01'@ will be returned as long as their @blog_title 'John''s Blog'@, which wouldn't be the case for: + +bc(sample). +SELECT * FROM posts WHERE userid='john doe' AND blog_title 'John''s Blog' AND posted_at '2012-01-01' + h4(#selectOrderBy). @order-by@ The @ORDER BY@ option allows to select the order of the returned results. It takes as argument a list of column names along with the order for the column (@ASC@ for ascendant and @DESC@ for descendant, omitting the order being equivalent
[jira] [Commented] (CASSANDRA-6707) AIOOBE when doing select count(*) from on a mixed cluster.
[ https://issues.apache.org/jira/browse/CASSANDRA-6707?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903159#comment-13903159 ] Piotr Kołaczkowski commented on CASSANDRA-6707: --- If it can deserialize, it should do it. If it cannot (unsupported version), it should bail out, shout at the user don't do this! do nodetool drain first! and terminate without touching any data. This way it is now, I'm pretty sure some of the customers will run into this, and telling them it was their fault wouldn't make them less unhappy. AIOOBE when doing select count(*) from on a mixed cluster. -- Key: CASSANDRA-6707 URL: https://issues.apache.org/jira/browse/CASSANDRA-6707 Project: Cassandra Issue Type: Bug Components: Core Environment: old nodes: Cassandra 1.2.16 from DSE 3.2.5 (unreleased) new node: Cassandra 2.0.5 from DSE 4.0.0 (unreleased) Reporter: Piotr Kołaczkowski Assignee: Tyler Hobbs Priority: Critical Attachments: 6707.patch After upgrading one node from 1.2 to 2.0, the following query fails with timeout: {noformat} Connected to test at localhost:9160. [cqlsh 4.1.0 | Cassandra 2.0.5.1-SNAPSHOT | CQL spec 3.1.1 | Thrift protocol 19.39.0] Use HELP for help. cqlsh select count(*) from cfs.sblocks; Request did not complete within rpc_timeout. {noformat} Table definition: {noformat} cqlsh describe columnfamily cfs.sblocks; CREATE TABLE sblocks ( key blob, column1 blob, value blob, PRIMARY KEY (key, column1) ) WITH COMPACT STORAGE AND bloom_filter_fp_chance=0.68 AND caching='KEYS_ONLY' AND comment='Stores blocks of information associated with a inode' AND dclocal_read_repair_chance=0.00 AND gc_grace_seconds=864000 AND index_interval=128 AND read_repair_chance=0.10 AND replicate_on_write='true' AND populate_io_cache_on_flush='true' AND default_time_to_live=0 AND speculative_retry='99.0PERCENTILE' AND memtable_flush_period_in_ms=0 AND compaction={'class': 'com.datastax.bdp.hadoop.cfs.compaction.CFSCompactionStrategy'} AND compression={}; {noformat} The 1.2 node reports the following error: {noformat} ERROR 08:38:02,006 Exception in thread Thread[Thread-32,5,main] java.lang.ArrayIndexOutOfBoundsException: 36 at org.apache.cassandra.net.MessageIn.read(MessageIn.java:59) at org.apache.cassandra.net.IncomingTcpConnection.receiveMessage(IncomingTcpConnection.java:208) at org.apache.cassandra.net.IncomingTcpConnection.handleModernVersion(IncomingTcpConnection.java:140) at org.apache.cassandra.net.IncomingTcpConnection.run(IncomingTcpConnection.java:83) {noformat} There were no errors during the upgrade. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Comment Edited] (CASSANDRA-6707) AIOOBE when doing select count(*) from on a mixed cluster.
[ https://issues.apache.org/jira/browse/CASSANDRA-6707?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903159#comment-13903159 ] Piotr Kołaczkowski edited comment on CASSANDRA-6707 at 2/17/14 11:57 AM: - If it can deserialize, it should do it. If it cannot (unsupported version), it should bail out, shout at the user don't do this! do nodetool drain first! and terminate without touching any data. This way it is now, I'm pretty sure some of the customers will run into this, and telling them it was their fault wouldn't make them less unhappy. We're not mongo to penalize them with silent data loss. was (Author: pkolaczk): If it can deserialize, it should do it. If it cannot (unsupported version), it should bail out, shout at the user don't do this! do nodetool drain first! and terminate without touching any data. This way it is now, I'm pretty sure some of the customers will run into this, and telling them it was their fault wouldn't make them less unhappy. AIOOBE when doing select count(*) from on a mixed cluster. -- Key: CASSANDRA-6707 URL: https://issues.apache.org/jira/browse/CASSANDRA-6707 Project: Cassandra Issue Type: Bug Components: Core Environment: old nodes: Cassandra 1.2.16 from DSE 3.2.5 (unreleased) new node: Cassandra 2.0.5 from DSE 4.0.0 (unreleased) Reporter: Piotr Kołaczkowski Assignee: Tyler Hobbs Priority: Critical Attachments: 6707.patch After upgrading one node from 1.2 to 2.0, the following query fails with timeout: {noformat} Connected to test at localhost:9160. [cqlsh 4.1.0 | Cassandra 2.0.5.1-SNAPSHOT | CQL spec 3.1.1 | Thrift protocol 19.39.0] Use HELP for help. cqlsh select count(*) from cfs.sblocks; Request did not complete within rpc_timeout. {noformat} Table definition: {noformat} cqlsh describe columnfamily cfs.sblocks; CREATE TABLE sblocks ( key blob, column1 blob, value blob, PRIMARY KEY (key, column1) ) WITH COMPACT STORAGE AND bloom_filter_fp_chance=0.68 AND caching='KEYS_ONLY' AND comment='Stores blocks of information associated with a inode' AND dclocal_read_repair_chance=0.00 AND gc_grace_seconds=864000 AND index_interval=128 AND read_repair_chance=0.10 AND replicate_on_write='true' AND populate_io_cache_on_flush='true' AND default_time_to_live=0 AND speculative_retry='99.0PERCENTILE' AND memtable_flush_period_in_ms=0 AND compaction={'class': 'com.datastax.bdp.hadoop.cfs.compaction.CFSCompactionStrategy'} AND compression={}; {noformat} The 1.2 node reports the following error: {noformat} ERROR 08:38:02,006 Exception in thread Thread[Thread-32,5,main] java.lang.ArrayIndexOutOfBoundsException: 36 at org.apache.cassandra.net.MessageIn.read(MessageIn.java:59) at org.apache.cassandra.net.IncomingTcpConnection.receiveMessage(IncomingTcpConnection.java:208) at org.apache.cassandra.net.IncomingTcpConnection.handleModernVersion(IncomingTcpConnection.java:140) at org.apache.cassandra.net.IncomingTcpConnection.run(IncomingTcpConnection.java:83) {noformat} There were no errors during the upgrade. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6707) AIOOBE when doing select count(*) from on a mixed cluster.
[ https://issues.apache.org/jira/browse/CASSANDRA-6707?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903163#comment-13903163 ] Piotr Kołaczkowski commented on CASSANDRA-6707: --- I tested again doing nodetool drain before upgrading every single node and counts were correct. [~thobbs] patch works fine and lgtm. +1 Shouldn't we move the commitlog replay problem to another ticket? AIOOBE when doing select count(*) from on a mixed cluster. -- Key: CASSANDRA-6707 URL: https://issues.apache.org/jira/browse/CASSANDRA-6707 Project: Cassandra Issue Type: Bug Components: Core Environment: old nodes: Cassandra 1.2.16 from DSE 3.2.5 (unreleased) new node: Cassandra 2.0.5 from DSE 4.0.0 (unreleased) Reporter: Piotr Kołaczkowski Assignee: Tyler Hobbs Priority: Critical Attachments: 6707.patch After upgrading one node from 1.2 to 2.0, the following query fails with timeout: {noformat} Connected to test at localhost:9160. [cqlsh 4.1.0 | Cassandra 2.0.5.1-SNAPSHOT | CQL spec 3.1.1 | Thrift protocol 19.39.0] Use HELP for help. cqlsh select count(*) from cfs.sblocks; Request did not complete within rpc_timeout. {noformat} Table definition: {noformat} cqlsh describe columnfamily cfs.sblocks; CREATE TABLE sblocks ( key blob, column1 blob, value blob, PRIMARY KEY (key, column1) ) WITH COMPACT STORAGE AND bloom_filter_fp_chance=0.68 AND caching='KEYS_ONLY' AND comment='Stores blocks of information associated with a inode' AND dclocal_read_repair_chance=0.00 AND gc_grace_seconds=864000 AND index_interval=128 AND read_repair_chance=0.10 AND replicate_on_write='true' AND populate_io_cache_on_flush='true' AND default_time_to_live=0 AND speculative_retry='99.0PERCENTILE' AND memtable_flush_period_in_ms=0 AND compaction={'class': 'com.datastax.bdp.hadoop.cfs.compaction.CFSCompactionStrategy'} AND compression={}; {noformat} The 1.2 node reports the following error: {noformat} ERROR 08:38:02,006 Exception in thread Thread[Thread-32,5,main] java.lang.ArrayIndexOutOfBoundsException: 36 at org.apache.cassandra.net.MessageIn.read(MessageIn.java:59) at org.apache.cassandra.net.IncomingTcpConnection.receiveMessage(IncomingTcpConnection.java:208) at org.apache.cassandra.net.IncomingTcpConnection.handleModernVersion(IncomingTcpConnection.java:140) at org.apache.cassandra.net.IncomingTcpConnection.run(IncomingTcpConnection.java:83) {noformat} There were no errors during the upgrade. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Updated] (CASSANDRA-6707) AIOOBE when doing select count(*) from on a mixed cluster.
[ https://issues.apache.org/jira/browse/CASSANDRA-6707?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Piotr Kołaczkowski updated CASSANDRA-6707: -- Reviewer: Piotr Kołaczkowski AIOOBE when doing select count(*) from on a mixed cluster. -- Key: CASSANDRA-6707 URL: https://issues.apache.org/jira/browse/CASSANDRA-6707 Project: Cassandra Issue Type: Bug Components: Core Environment: old nodes: Cassandra 1.2.16 from DSE 3.2.5 (unreleased) new node: Cassandra 2.0.5 from DSE 4.0.0 (unreleased) Reporter: Piotr Kołaczkowski Assignee: Tyler Hobbs Priority: Critical Attachments: 6707.patch After upgrading one node from 1.2 to 2.0, the following query fails with timeout: {noformat} Connected to test at localhost:9160. [cqlsh 4.1.0 | Cassandra 2.0.5.1-SNAPSHOT | CQL spec 3.1.1 | Thrift protocol 19.39.0] Use HELP for help. cqlsh select count(*) from cfs.sblocks; Request did not complete within rpc_timeout. {noformat} Table definition: {noformat} cqlsh describe columnfamily cfs.sblocks; CREATE TABLE sblocks ( key blob, column1 blob, value blob, PRIMARY KEY (key, column1) ) WITH COMPACT STORAGE AND bloom_filter_fp_chance=0.68 AND caching='KEYS_ONLY' AND comment='Stores blocks of information associated with a inode' AND dclocal_read_repair_chance=0.00 AND gc_grace_seconds=864000 AND index_interval=128 AND read_repair_chance=0.10 AND replicate_on_write='true' AND populate_io_cache_on_flush='true' AND default_time_to_live=0 AND speculative_retry='99.0PERCENTILE' AND memtable_flush_period_in_ms=0 AND compaction={'class': 'com.datastax.bdp.hadoop.cfs.compaction.CFSCompactionStrategy'} AND compression={}; {noformat} The 1.2 node reports the following error: {noformat} ERROR 08:38:02,006 Exception in thread Thread[Thread-32,5,main] java.lang.ArrayIndexOutOfBoundsException: 36 at org.apache.cassandra.net.MessageIn.read(MessageIn.java:59) at org.apache.cassandra.net.IncomingTcpConnection.receiveMessage(IncomingTcpConnection.java:208) at org.apache.cassandra.net.IncomingTcpConnection.handleModernVersion(IncomingTcpConnection.java:140) at org.apache.cassandra.net.IncomingTcpConnection.run(IncomingTcpConnection.java:83) {noformat} There were no errors during the upgrade. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6689) Partially Off Heap Memtables
[ https://issues.apache.org/jira/browse/CASSANDRA-6689?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903185#comment-13903185 ] Benedict commented on CASSANDRA-6689: - Pushed a slightly revised version [here|https://github.com/belliottsmith/cassandra/tree/offheap1], as didn't override a couple of methods in the Cell heirarchy. It's also merged with latest trunk. Partially Off Heap Memtables Key: CASSANDRA-6689 URL: https://issues.apache.org/jira/browse/CASSANDRA-6689 Project: Cassandra Issue Type: New Feature Components: Core Reporter: Benedict Assignee: Benedict Fix For: 2.1 Move the contents of ByteBuffers off-heap for records written to a memtable. (See comments for details) -- This message was sent by Atlassian JIRA (v6.1.5#6160)
git commit: Fix count(*) queries in a mixed cluster
Repository: cassandra Updated Branches: refs/heads/cassandra-2.0 652ec6a5c - 44cf4a66d Fix count(*) queries in a mixed cluster patch by Tyler Hobbs; reviewed by Piotr KoÅaczkowski for CASSANDRA-6707 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/44cf4a66 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/44cf4a66 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/44cf4a66 Branch: refs/heads/cassandra-2.0 Commit: 44cf4a66d157643297b7ab791a57f323432e28c5 Parents: 652ec6a Author: Aleksey Yeschenko alek...@apache.org Authored: Mon Feb 17 16:39:29 2014 +0300 Committer: Aleksey Yeschenko alek...@apache.org Committed: Mon Feb 17 16:39:29 2014 +0300 -- CHANGES.txt | 3 ++- .../cql3/statements/SelectStatement.java| 4 ++- .../apache/cassandra/net/MessagingService.java | 26 +++- 3 files changed, 30 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/44cf4a66/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index fd3b1b7..c9fabd2 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -12,7 +12,8 @@ * Stop CommitLogSegment.close() from calling sync() (CASSANDRA-6652) * Make commitlog failure handling configurable (CASSANDRA-6364) * Avoid overlaps in LCS (CASSANDRA-6688) - * improve support for paginating over composites (4851) + * Improve support for paginating over composites (CASSANDRA-4851) + * Fix count(*) queries in a mixed cluster (CASSANDRA-6707) Merged from 1.2: * Fix broken streams when replacing with same IP (CASSANDRA-6622) * Fix upgradesstables NPE for non-CF-based indexes (CASSANDRA-6645) http://git-wip-us.apache.org/repos/asf/cassandra/blob/44cf4a66/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java -- diff --git a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java index d42fd76..52a7c70 100644 --- a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java +++ b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java @@ -37,6 +37,7 @@ import org.apache.cassandra.db.filter.*; import org.apache.cassandra.db.marshal.*; import org.apache.cassandra.dht.*; import org.apache.cassandra.exceptions.*; +import org.apache.cassandra.net.MessagingService; import org.apache.cassandra.service.ClientState; import org.apache.cassandra.service.QueryState; import org.apache.cassandra.service.StorageProxy; @@ -165,7 +166,8 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache int pageSize = options.getPageSize(); // A count query will never be paged for the user, but we always page it internally to avoid OOM. // If we user provided a pageSize we'll use that to page internally (because why not), otherwise we use our default -if (parameters.isCount pageSize = 0) +// Note that if there are some nodes in the cluster with a version less than 2.0, we can't use paging (CASSANDRA-6707). +if (parameters.isCount pageSize = 0 MessagingService.instance().allNodesAtLeast20) pageSize = DEFAULT_COUNT_PAGE_SIZE; if (pageSize = 0 || command == null || !QueryPagers.mayNeedPaging(command, pageSize)) http://git-wip-us.apache.org/repos/asf/cassandra/blob/44cf4a66/src/java/org/apache/cassandra/net/MessagingService.java -- diff --git a/src/java/org/apache/cassandra/net/MessagingService.java b/src/java/org/apache/cassandra/net/MessagingService.java index 232cf6a..ad86bbd 100644 --- a/src/java/org/apache/cassandra/net/MessagingService.java +++ b/src/java/org/apache/cassandra/net/MessagingService.java @@ -73,6 +73,8 @@ public final class MessagingService implements MessagingServiceMBean public static final int VERSION_20 = 7; public static final int current_version = VERSION_20; +public boolean allNodesAtLeast20 = true; + /** * we preface every message with this number so the recipient can validate the sender is sane */ @@ -742,14 +744,36 @@ public final class MessagingService implements MessagingServiceMBean public int setVersion(InetAddress endpoint, int version) { logger.debug(Setting version {} for {}, version, endpoint); +if (version VERSION_20) +allNodesAtLeast20 = false; Integer v = versions.put(endpoint, version); + +// if the version was increased to 2.0 or later, see if all nodes are = 2.0 now +if (v != null v VERSION_20 version = VERSION_20) +
[jira] [Commented] (CASSANDRA-6707) AIOOBE when doing select count(*) from on a mixed cluster.
[ https://issues.apache.org/jira/browse/CASSANDRA-6707?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903238#comment-13903238 ] Aleksey Yeschenko commented on CASSANDRA-6707: -- Committed to 2.0, waiting for 2.0-trunk merge of another issue before committing to trunk. AIOOBE when doing select count(*) from on a mixed cluster. -- Key: CASSANDRA-6707 URL: https://issues.apache.org/jira/browse/CASSANDRA-6707 Project: Cassandra Issue Type: Bug Components: Core Environment: old nodes: Cassandra 1.2.16 from DSE 3.2.5 (unreleased) new node: Cassandra 2.0.5 from DSE 4.0.0 (unreleased) Reporter: Piotr Kołaczkowski Assignee: Tyler Hobbs Priority: Critical Attachments: 6707.patch After upgrading one node from 1.2 to 2.0, the following query fails with timeout: {noformat} Connected to test at localhost:9160. [cqlsh 4.1.0 | Cassandra 2.0.5.1-SNAPSHOT | CQL spec 3.1.1 | Thrift protocol 19.39.0] Use HELP for help. cqlsh select count(*) from cfs.sblocks; Request did not complete within rpc_timeout. {noformat} Table definition: {noformat} cqlsh describe columnfamily cfs.sblocks; CREATE TABLE sblocks ( key blob, column1 blob, value blob, PRIMARY KEY (key, column1) ) WITH COMPACT STORAGE AND bloom_filter_fp_chance=0.68 AND caching='KEYS_ONLY' AND comment='Stores blocks of information associated with a inode' AND dclocal_read_repair_chance=0.00 AND gc_grace_seconds=864000 AND index_interval=128 AND read_repair_chance=0.10 AND replicate_on_write='true' AND populate_io_cache_on_flush='true' AND default_time_to_live=0 AND speculative_retry='99.0PERCENTILE' AND memtable_flush_period_in_ms=0 AND compaction={'class': 'com.datastax.bdp.hadoop.cfs.compaction.CFSCompactionStrategy'} AND compression={}; {noformat} The 1.2 node reports the following error: {noformat} ERROR 08:38:02,006 Exception in thread Thread[Thread-32,5,main] java.lang.ArrayIndexOutOfBoundsException: 36 at org.apache.cassandra.net.MessageIn.read(MessageIn.java:59) at org.apache.cassandra.net.IncomingTcpConnection.receiveMessage(IncomingTcpConnection.java:208) at org.apache.cassandra.net.IncomingTcpConnection.handleModernVersion(IncomingTcpConnection.java:140) at org.apache.cassandra.net.IncomingTcpConnection.run(IncomingTcpConnection.java:83) {noformat} There were no errors during the upgrade. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Resolved] (CASSANDRA-6707) AIOOBE when doing select count(*) from on a mixed cluster.
[ https://issues.apache.org/jira/browse/CASSANDRA-6707?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Aleksey Yeschenko resolved CASSANDRA-6707. -- Resolution: Fixed Fix Version/s: 2.0.6 Committed, thanks everybody. AIOOBE when doing select count(*) from on a mixed cluster. -- Key: CASSANDRA-6707 URL: https://issues.apache.org/jira/browse/CASSANDRA-6707 Project: Cassandra Issue Type: Bug Components: Core Environment: old nodes: Cassandra 1.2.16 from DSE 3.2.5 (unreleased) new node: Cassandra 2.0.5 from DSE 4.0.0 (unreleased) Reporter: Piotr Kołaczkowski Assignee: Tyler Hobbs Priority: Critical Fix For: 2.0.6 Attachments: 6707.patch After upgrading one node from 1.2 to 2.0, the following query fails with timeout: {noformat} Connected to test at localhost:9160. [cqlsh 4.1.0 | Cassandra 2.0.5.1-SNAPSHOT | CQL spec 3.1.1 | Thrift protocol 19.39.0] Use HELP for help. cqlsh select count(*) from cfs.sblocks; Request did not complete within rpc_timeout. {noformat} Table definition: {noformat} cqlsh describe columnfamily cfs.sblocks; CREATE TABLE sblocks ( key blob, column1 blob, value blob, PRIMARY KEY (key, column1) ) WITH COMPACT STORAGE AND bloom_filter_fp_chance=0.68 AND caching='KEYS_ONLY' AND comment='Stores blocks of information associated with a inode' AND dclocal_read_repair_chance=0.00 AND gc_grace_seconds=864000 AND index_interval=128 AND read_repair_chance=0.10 AND replicate_on_write='true' AND populate_io_cache_on_flush='true' AND default_time_to_live=0 AND speculative_retry='99.0PERCENTILE' AND memtable_flush_period_in_ms=0 AND compaction={'class': 'com.datastax.bdp.hadoop.cfs.compaction.CFSCompactionStrategy'} AND compression={}; {noformat} The 1.2 node reports the following error: {noformat} ERROR 08:38:02,006 Exception in thread Thread[Thread-32,5,main] java.lang.ArrayIndexOutOfBoundsException: 36 at org.apache.cassandra.net.MessageIn.read(MessageIn.java:59) at org.apache.cassandra.net.IncomingTcpConnection.receiveMessage(IncomingTcpConnection.java:208) at org.apache.cassandra.net.IncomingTcpConnection.handleModernVersion(IncomingTcpConnection.java:140) at org.apache.cassandra.net.IncomingTcpConnection.run(IncomingTcpConnection.java:83) {noformat} There were no errors during the upgrade. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[2/3] git commit: Fix count(*) queries in a mixed cluster
Fix count(*) queries in a mixed cluster patch by Tyler Hobbs; reviewed by Piotr KoÅaczkowski for CASSANDRA-6707 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/44cf4a66 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/44cf4a66 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/44cf4a66 Branch: refs/heads/trunk Commit: 44cf4a66d157643297b7ab791a57f323432e28c5 Parents: 652ec6a Author: Aleksey Yeschenko alek...@apache.org Authored: Mon Feb 17 16:39:29 2014 +0300 Committer: Aleksey Yeschenko alek...@apache.org Committed: Mon Feb 17 16:39:29 2014 +0300 -- CHANGES.txt | 3 ++- .../cql3/statements/SelectStatement.java| 4 ++- .../apache/cassandra/net/MessagingService.java | 26 +++- 3 files changed, 30 insertions(+), 3 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/44cf4a66/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index fd3b1b7..c9fabd2 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -12,7 +12,8 @@ * Stop CommitLogSegment.close() from calling sync() (CASSANDRA-6652) * Make commitlog failure handling configurable (CASSANDRA-6364) * Avoid overlaps in LCS (CASSANDRA-6688) - * improve support for paginating over composites (4851) + * Improve support for paginating over composites (CASSANDRA-4851) + * Fix count(*) queries in a mixed cluster (CASSANDRA-6707) Merged from 1.2: * Fix broken streams when replacing with same IP (CASSANDRA-6622) * Fix upgradesstables NPE for non-CF-based indexes (CASSANDRA-6645) http://git-wip-us.apache.org/repos/asf/cassandra/blob/44cf4a66/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java -- diff --git a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java index d42fd76..52a7c70 100644 --- a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java +++ b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java @@ -37,6 +37,7 @@ import org.apache.cassandra.db.filter.*; import org.apache.cassandra.db.marshal.*; import org.apache.cassandra.dht.*; import org.apache.cassandra.exceptions.*; +import org.apache.cassandra.net.MessagingService; import org.apache.cassandra.service.ClientState; import org.apache.cassandra.service.QueryState; import org.apache.cassandra.service.StorageProxy; @@ -165,7 +166,8 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache int pageSize = options.getPageSize(); // A count query will never be paged for the user, but we always page it internally to avoid OOM. // If we user provided a pageSize we'll use that to page internally (because why not), otherwise we use our default -if (parameters.isCount pageSize = 0) +// Note that if there are some nodes in the cluster with a version less than 2.0, we can't use paging (CASSANDRA-6707). +if (parameters.isCount pageSize = 0 MessagingService.instance().allNodesAtLeast20) pageSize = DEFAULT_COUNT_PAGE_SIZE; if (pageSize = 0 || command == null || !QueryPagers.mayNeedPaging(command, pageSize)) http://git-wip-us.apache.org/repos/asf/cassandra/blob/44cf4a66/src/java/org/apache/cassandra/net/MessagingService.java -- diff --git a/src/java/org/apache/cassandra/net/MessagingService.java b/src/java/org/apache/cassandra/net/MessagingService.java index 232cf6a..ad86bbd 100644 --- a/src/java/org/apache/cassandra/net/MessagingService.java +++ b/src/java/org/apache/cassandra/net/MessagingService.java @@ -73,6 +73,8 @@ public final class MessagingService implements MessagingServiceMBean public static final int VERSION_20 = 7; public static final int current_version = VERSION_20; +public boolean allNodesAtLeast20 = true; + /** * we preface every message with this number so the recipient can validate the sender is sane */ @@ -742,14 +744,36 @@ public final class MessagingService implements MessagingServiceMBean public int setVersion(InetAddress endpoint, int version) { logger.debug(Setting version {} for {}, version, endpoint); +if (version VERSION_20) +allNodesAtLeast20 = false; Integer v = versions.put(endpoint, version); + +// if the version was increased to 2.0 or later, see if all nodes are = 2.0 now +if (v != null v VERSION_20 version = VERSION_20) +refreshAllNodesAtLeast20(); + return v == null ? version : v; } public
[1/3] git commit: CQL3: improve support for paginating over composites
Repository: cassandra Updated Branches: refs/heads/trunk 9ea99491e - 4c727f6f9 CQL3: improve support for paginating over composites patch by slebresne; reviewed by iamaleksey for CASSANDRA-4851 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/652ec6a5 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/652ec6a5 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/652ec6a5 Branch: refs/heads/trunk Commit: 652ec6a5c36feae346c71f0ff009ec3b8457448b Parents: ea28d36 Author: Sylvain Lebresne sylv...@datastax.com Authored: Thu Jan 30 16:11:35 2014 +0100 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Mon Feb 17 10:30:29 2014 +0100 -- CHANGES.txt | 1 + doc/cql3/CQL.textile| 24 +- .../org/apache/cassandra/cql3/CFDefinition.java | 10 +-- .../cassandra/cql3/ColumnNameBuilder.java | 11 +-- src/java/org/apache/cassandra/cql3/Cql.g| 16 .../apache/cassandra/cql3/QueryProcessor.java | 2 +- .../org/apache/cassandra/cql3/Relation.java | 17 +++- .../cassandra/cql3/statements/Restriction.java | 24 +- .../cql3/statements/SelectStatement.java| 82 +++- .../cassandra/db/marshal/CompositeType.java | 61 +++ 10 files changed, 175 insertions(+), 73 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/652ec6a5/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 571b8dd..fd3b1b7 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -12,6 +12,7 @@ * Stop CommitLogSegment.close() from calling sync() (CASSANDRA-6652) * Make commitlog failure handling configurable (CASSANDRA-6364) * Avoid overlaps in LCS (CASSANDRA-6688) + * improve support for paginating over composites (4851) Merged from 1.2: * Fix broken streams when replacing with same IP (CASSANDRA-6622) * Fix upgradesstables NPE for non-CF-based indexes (CASSANDRA-6645) http://git-wip-us.apache.org/repos/asf/cassandra/blob/652ec6a5/doc/cql3/CQL.textile -- diff --git a/doc/cql3/CQL.textile b/doc/cql3/CQL.textile index f82fc19..03b95e0 100644 --- a/doc/cql3/CQL.textile +++ b/doc/cql3/CQL.textile @@ -1,6 +1,6 @@ link rel=StyleSheet href=CQL.css type=text/css media=screen -h1. Cassandra Query Language (CQL) v3.1.4 +h1. Cassandra Query Language (CQL) v3.1.5 span id=tableOfContents @@ -619,10 +619,12 @@ bc(syntax).. where-clause ::= relation ( AND relation )* -relation ::= identifier ('=' | '' | '' | '=' | '=') term +relation ::= identifier op term + | '(' identifier (',' identifier)* ')' op '(' term (',' term)* ')' | identifier IN '(' ( term ( ',' term)* )? ')' - | TOKEN '(' identifier ( ',' identifer)* ')' ('=' | '' | '' | '=' | '=') term + | TOKEN '(' identifier ( ',' identifer)* ')' op term +op ::= '=' | '' | '' | '=' | '=' order-by ::= ordering ( ',' odering )* ordering ::= identifer ( ASC | DESC )? p. @@ -676,7 +678,7 @@ CREATE TABLE posts ( The following query is allowed: bc(sample). -SELECT entry_title, content FROM posts WHERE userid='john doe' AND blog_title='John's Blog' AND posted_at = '2012-01-01' AND posted_at '2012-01-31' +SELECT entry_title, content FROM posts WHERE userid='john doe' AND blog_title='John''s Blog' AND posted_at = '2012-01-01' AND posted_at '2012-01-31' But the following one is not, as it does not select a contiguous set of rows (and we suppose no secondary indexes are set): @@ -691,6 +693,16 @@ SELECT * FROM posts WHERE token(userid) token('tom') AND token(userid) token Moreover, the @IN@ relation is only allowed on the last column of the partition key and on the last column of the full primary key. +It is also possible to group @CLUSTERING COLUMNS@ together in a relation, for instance: + +bc(sample). +SELECT * FROM posts WHERE userid='john doe' AND (blog_title, posted_at) ('John''s Blog', '2012-01-01') + +will request all rows that sorts after the one having John's Blog as @blog_tile@ and '2012-01-01' for @posted_at@ in the clustering order. In particular, rows having a @post_at = '2012-01-01'@ will be returned as long as their @blog_title 'John''s Blog'@, which wouldn't be the case for: + +bc(sample). +SELECT * FROM posts WHERE userid='john doe' AND blog_title 'John''s Blog' AND posted_at '2012-01-01' + h4(#selectOrderBy). @order-by@ The @ORDER BY@ option allows to select the order of the returned results. It takes as argument a list of column names along with the order for the column (@ASC@ for ascendant and @DESC@ for descendant, omitting the order being equivalent to @ASC@).
[3/3] git commit: Merge branch 'cassandra-2.0' into trunk
Merge branch 'cassandra-2.0' into trunk Conflicts: src/java/org/apache/cassandra/cql3/CFDefinition.java src/java/org/apache/cassandra/cql3/ColumnNameBuilder.java src/java/org/apache/cassandra/cql3/Cql.g src/java/org/apache/cassandra/cql3/statements/SelectStatement.java src/java/org/apache/cassandra/db/marshal/CompositeType.java Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4c727f6f Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4c727f6f Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4c727f6f Branch: refs/heads/trunk Commit: 4c727f6f97411d55e12d1b5615e5425d162b1ad8 Parents: 9ea9949 44cf4a6 Author: Sylvain Lebresne sylv...@datastax.com Authored: Mon Feb 17 15:02:44 2014 +0100 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Mon Feb 17 15:02:44 2014 +0100 -- CHANGES.txt | 2 + doc/cql3/CQL.textile| 24 +- src/java/org/apache/cassandra/cql3/Cql.g| 16 .../apache/cassandra/cql3/QueryProcessor.java | 2 +- .../org/apache/cassandra/cql3/Relation.java | 17 +++- .../cassandra/cql3/statements/Restriction.java | 24 +- .../cql3/statements/SelectStatement.java| 85 +++- .../cassandra/db/marshal/CompositeType.java | 61 +++--- .../apache/cassandra/net/MessagingService.java | 26 +- 9 files changed, 197 insertions(+), 60 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4c727f6f/CHANGES.txt -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4c727f6f/doc/cql3/CQL.textile -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4c727f6f/src/java/org/apache/cassandra/cql3/Cql.g -- diff --cc src/java/org/apache/cassandra/cql3/Cql.g index 11291b6,6e7cf1c..55d8aac --- a/src/java/org/apache/cassandra/cql3/Cql.g +++ b/src/java/org/apache/cassandra/cql3/Cql.g @@@ -964,8 -881,22 +964,24 @@@ relation[ListRelation clauses { $clauses.add(new Relation(name, Relation.Type.IN, marker)); } | name=cident K_IN { Relation rel = Relation.createInRelation($name.id); } '(' ( f1=term { rel.addInValue(f1); } (',' fN=term { rel.addInValue(fN); } )* )? ')' { $clauses.add(rel); } +| name=cident K_CONTAINS { Relation.Type rt = Relation.Type.CONTAINS; } (K_KEY { rt = Relation.Type.CONTAINS_KEY; })? +t=term { $clauses.add(new Relation(name, rt, t)); } + | { + ListColumnIdentifier ids = new ArrayListColumnIdentifier(); + ListTerm.Raw terms = new ArrayListTerm.Raw(); + } + '(' n1=cident { ids.add(n1); } (',' ni=cident { ids.add(ni); })* ')' + type=relationType + '(' t1=term { terms.add(t1); } (',' ti=term { terms.add(ti); })* ')' + { + if (type == Relation.Type.IN) + addRecognitionError(Cannot use IN relation with tuple notation); + if (ids.size() != terms.size()) + addRecognitionError(String.format(Number of values ( + terms.size() + ) in tuple notation doesn't match the number of column names ( + ids.size() + ))); + else + for (int i = 0; i ids.size(); i++) + $clauses.add(new Relation(ids.get(i), type, terms.get(i), i == 0 ? null : ids.get(i-1))); + } | '(' relation[$clauses] ')' ; http://git-wip-us.apache.org/repos/asf/cassandra/blob/4c727f6f/src/java/org/apache/cassandra/cql3/QueryProcessor.java -- diff --cc src/java/org/apache/cassandra/cql3/QueryProcessor.java index f2559e6,167533f..5acb367 --- a/src/java/org/apache/cassandra/cql3/QueryProcessor.java +++ b/src/java/org/apache/cassandra/cql3/QueryProcessor.java @@@ -45,10 -44,10 +45,10 @@@ import org.apache.cassandra.utils.Seman public class QueryProcessor { - public static final SemanticVersion CQL_VERSION = new SemanticVersion(3.1.4); + public static final SemanticVersion CQL_VERSION = new SemanticVersion(3.1.5); private static final Logger logger = LoggerFactory.getLogger(QueryProcessor.class); -private static final MemoryMeter meter = new MemoryMeter(); +private static final MemoryMeter meter = new MemoryMeter().withGuessing(MemoryMeter.Guess.FALLBACK_BEST); private static final long MAX_CACHE_PREPARED_MEMORY = Runtime.getRuntime().maxMemory() / 256; private static final int MAX_CACHE_PREPARED_COUNT = 1;
git commit: Undo CASSANDRA-6707 from trunk
Repository: cassandra Updated Branches: refs/heads/trunk 4c727f6f9 - 8c6541715 Undo CASSANDRA-6707 from trunk Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/8c654171 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/8c654171 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/8c654171 Branch: refs/heads/trunk Commit: 8c6541715067a4ae9e3bb583c49d4b7ac0bb2fff Parents: 4c727f6 Author: Aleksey Yeschenko alek...@apache.org Authored: Mon Feb 17 17:08:23 2014 +0300 Committer: Aleksey Yeschenko alek...@apache.org Committed: Mon Feb 17 17:08:23 2014 +0300 -- .../cql3/statements/SelectStatement.java| 2 +- .../apache/cassandra/net/MessagingService.java | 26 +--- 2 files changed, 2 insertions(+), 26 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/8c654171/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java -- diff --git a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java index 6b61ea5..e08b960 100644 --- a/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java +++ b/src/java/org/apache/cassandra/cql3/statements/SelectStatement.java @@ -166,7 +166,7 @@ public class SelectStatement implements CQLStatement, MeasurableForPreparedCache // A count query will never be paged for the user, but we always page it internally to avoid OOM. // If we user provided a pageSize we'll use that to page internally (because why not), otherwise we use our default // Note that if there are some nodes in the cluster with a version less than 2.0, we can't use paging (CASSANDRA-6707). -if (parameters.isCount pageSize = 0 MessagingService.instance().allNodesAtLeast20) +if (parameters.isCount pageSize = 0) pageSize = DEFAULT_COUNT_PAGE_SIZE; if (pageSize = 0 || command == null || !QueryPagers.mayNeedPaging(command, pageSize)) http://git-wip-us.apache.org/repos/asf/cassandra/blob/8c654171/src/java/org/apache/cassandra/net/MessagingService.java -- diff --git a/src/java/org/apache/cassandra/net/MessagingService.java b/src/java/org/apache/cassandra/net/MessagingService.java index 22bdbe8..9713576 100644 --- a/src/java/org/apache/cassandra/net/MessagingService.java +++ b/src/java/org/apache/cassandra/net/MessagingService.java @@ -74,8 +74,6 @@ public final class MessagingService implements MessagingServiceMBean public static final int VERSION_21 = 8; public static final int current_version = VERSION_21; -public boolean allNodesAtLeast20 = true; - /** * we preface every message with this number so the recipient can validate the sender is sane */ @@ -745,36 +743,14 @@ public final class MessagingService implements MessagingServiceMBean public int setVersion(InetAddress endpoint, int version) { logger.debug(Setting version {} for {}, version, endpoint); -if (version VERSION_20) -allNodesAtLeast20 = false; Integer v = versions.put(endpoint, version); - -// if the version was increased to 2.0 or later, see if all nodes are = 2.0 now -if (v != null v VERSION_20 version = VERSION_20) -refreshAllNodesAtLeast20(); - return v == null ? version : v; } public void resetVersion(InetAddress endpoint) { logger.debug(Reseting version for {}, endpoint); -Integer removed = versions.remove(endpoint); -if (removed != null removed = VERSION_20) -refreshAllNodesAtLeast20(); -} - -private void refreshAllNodesAtLeast20() -{ -for (Integer version: versions.values()) -{ -if (version VERSION_20) -{ -allNodesAtLeast20 = false; -return; -} -} -allNodesAtLeast20 = true; +versions.remove(endpoint); } public int getVersion(InetAddress endpoint)
[jira] [Created] (CASSANDRA-6714) Upgrading from 1.2 to 2.0 without prior nodetool flush causes data loss
Piotr Kołaczkowski created CASSANDRA-6714: - Summary: Upgrading from 1.2 to 2.0 without prior nodetool flush causes data loss Key: CASSANDRA-6714 URL: https://issues.apache.org/jira/browse/CASSANDRA-6714 Project: Cassandra Issue Type: Bug Components: Core Environment: Old node: Cassandra 1.2.15 (DSE) New node: Cassandra 2.0.5.1 (DSE) Reporter: Piotr Kołaczkowski Priority: Critical Accidentally forgetting to nodetool drain or flush before upgrading to 2.0 causes silent loss of unflushed data. No errors or warnings are reported in the system.log. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Assigned] (CASSANDRA-6714) Upgrading from 1.2 to 2.0 without prior nodetool flush causes data loss
[ https://issues.apache.org/jira/browse/CASSANDRA-6714?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Aleksey Yeschenko reassigned CASSANDRA-6714: Assignee: Aleksey Yeschenko Upgrading from 1.2 to 2.0 without prior nodetool flush causes data loss --- Key: CASSANDRA-6714 URL: https://issues.apache.org/jira/browse/CASSANDRA-6714 Project: Cassandra Issue Type: Bug Components: Core Environment: Old node: Cassandra 1.2.15 (DSE) New node: Cassandra 2.0.5.1 (DSE) Reporter: Piotr Kołaczkowski Assignee: Aleksey Yeschenko Priority: Critical Accidentally forgetting to nodetool drain or flush before upgrading to 2.0 causes silent loss of unflushed data. No errors or warnings are reported in the system.log. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6714) Upgrading from 1.2 to 2.0 without prior nodetool flush causes data loss
[ https://issues.apache.org/jira/browse/CASSANDRA-6714?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903258#comment-13903258 ] Piotr Kołaczkowski commented on CASSANDRA-6714: --- Discovered while testing CASSANDRA-6707. Upgrading from 1.2 to 2.0 without prior nodetool flush causes data loss --- Key: CASSANDRA-6714 URL: https://issues.apache.org/jira/browse/CASSANDRA-6714 Project: Cassandra Issue Type: Bug Components: Core Environment: Old node: Cassandra 1.2.15 (DSE) New node: Cassandra 2.0.5.1 (DSE) Reporter: Piotr Kołaczkowski Assignee: Aleksey Yeschenko Priority: Critical Accidentally forgetting to nodetool drain or flush before upgrading to 2.0 causes silent loss of unflushed data. No errors or warnings are reported in the system.log. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Comment Edited] (CASSANDRA-6714) Upgrading from 1.2 to 2.0 without prior nodetool flush causes data loss
[ https://issues.apache.org/jira/browse/CASSANDRA-6714?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903258#comment-13903258 ] Piotr Kołaczkowski edited comment on CASSANDRA-6714 at 2/17/14 2:11 PM: Discovered while testing CASSANDRA-6707. {quote} However, I will note that after upgrading the first node to 2.0, the results of select count(*) were consistent but incorrect (I got 957815 as the result). After upgrading the second node to 2.0 I got another consistent but incorrect result (912873). {quote} was (Author: pkolaczk): Discovered while testing CASSANDRA-6707. Upgrading from 1.2 to 2.0 without prior nodetool flush causes data loss --- Key: CASSANDRA-6714 URL: https://issues.apache.org/jira/browse/CASSANDRA-6714 Project: Cassandra Issue Type: Bug Components: Core Environment: Old node: Cassandra 1.2.15 (DSE) New node: Cassandra 2.0.5.1 (DSE) Reporter: Piotr Kołaczkowski Assignee: Aleksey Yeschenko Priority: Critical Accidentally forgetting to nodetool drain or flush before upgrading to 2.0 causes silent loss of unflushed data. No errors or warnings are reported in the system.log. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6561) Static columns in CQL3
[ https://issues.apache.org/jira/browse/CASSANDRA-6561?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903298#comment-13903298 ] Sylvain Lebresne commented on CASSANDRA-6561: - {quote} This query should throw IRE, just like DELETEs do now. Typo in ModificationStatement in Invalid restriction on clustering column %s since %s statement only modify static columns - should be the statement %s modifies. {quote} Pushed a simple commit for those on the last branch (for ModificationStatement, I did fixed the message but kept the %s statement over the statement %s, I think since the UPDATE statement modifies ... reads better since the statement UPDATE modifies ...). bq. I think we should be doing more extensive validation for cas batches. I agree, though that part is slightly annoying. We can't find duplicates until we have the bound values since we can't tell which row a statement applies to in general before that time. And with the current code, conditions reuse the Operation code which makes it slightly harder to do cleanly. Besides, as it turns out, conditions validation is broken for collections currently. We allow stuff like {noformat} UPDTATE ... IF s = s + {2, 3} {noformat} which is non-sensical. And collection conditions is even more broken in the sense that even {noformat} UPDTATE ... IF s = {2, 3} {noformat} is not properly handled as we only check that the set contains 2 and 3, but not that it contains only those. In any case, I think the proper way to handle all this is to refactor stuffs a bit so that conditions don't reuse Operation which is not adapted. To a large extent, fixing collections mishandling is not specific to this issue, but it bothers me to do a dirty fix for just the duplicate validation issue alone so I pushed a last commit (still on the same branch) that fixes all that. I'll note that this refactor will makes it relatively easy to add support for stuff like: {noformat} UPDATE ... IF s CONTAINS 2; {noformat} or even {noformat} UPDATE ... IF v = 2; {noformat} which are surely nice to have, but that definitively belong to a followup ticket. Static columns in CQL3 -- Key: CASSANDRA-6561 URL: https://issues.apache.org/jira/browse/CASSANDRA-6561 Project: Cassandra Issue Type: New Feature Reporter: Sylvain Lebresne Assignee: Sylvain Lebresne Fix For: 2.0.6 I'd like to suggest the following idea for adding static columns to CQL3. I'll note that the basic idea has been suggested by jhalliday on irc but the rest of the details are mine and I should be blamed for anything stupid in what follows. Let me start with a rational: there is 2 main family of CF that have been historically used in Thrift: static ones and dynamic ones. CQL3 handles both family through the presence or not of clustering columns. There is however some cases where mixing both behavior has its use. I like to think of those use cases as 3 broad category: # to denormalize small amounts of not-entirely-static data in otherwise static entities. It's say tags for a product or custom properties in a user profile. This is why we've added CQL3 collections. Importantly, this is the *only* use case for which collections are meant (which doesn't diminishes their usefulness imo, and I wouldn't disagree that we've maybe not communicated this too well). # to optimize fetching both a static entity and related dynamic ones. Say you have blog posts, and each post has associated comments (chronologically ordered). *And* say that a very common query is fetch a post and its 50 last comments. In that case, it *might* be beneficial to store a blog post (static entity) in the same underlying CF than it's comments for performance reason. So that fetch a post and it's 50 last comments is just one slice internally. # you want to CAS rows of a dynamic partition based on some partition condition. This is the same use case than why CASSANDRA-5633 exists for. As said above, 1) is already covered by collections, but 2) and 3) are not (and I strongly believe collections are not the right fit, API wise, for those). Also, note that I don't want to underestimate the usefulness of 2). In most cases, using a separate table for the blog posts and the comments is The Right Solution, and trying to do 2) is premature optimisation. Yet, when used properly, that kind of optimisation can make a difference, so I think having a relatively native solution for it in CQL3 could make sense. Regarding 3), though CASSANDRA-5633 would provide one solution for it, I have the feeling that static columns actually are a more natural approach (in term of API). That's arguably more of a personal opinion/feeling though. So long story short, CQL3 lacks a way to mix both some static and
Git Push Summary
Repository: cassandra Updated Branches: refs/heads/cassandra-2.1 [created] 8c6541715
[1/2] Versions and licenses in preparation for 2.1.0-beta1 release
Repository: cassandra Updated Branches: refs/heads/trunk 8c6541715 - 9f1485623 http://git-wip-us.apache.org/repos/asf/cassandra/blob/9f148562/tools/stress/src/org/apache/cassandra/stress/settings/SettingsColumn.java -- diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsColumn.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsColumn.java index b3cca10..235d143 100644 --- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsColumn.java +++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsColumn.java @@ -1,4 +1,25 @@ package org.apache.cassandra.stress.settings; +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * License); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + import java.io.Serializable; import java.nio.ByteBuffer; http://git-wip-us.apache.org/repos/asf/cassandra/blob/9f148562/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java -- diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java index 2a8ff76..18f570c 100644 --- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java +++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java @@ -1,4 +1,25 @@ package org.apache.cassandra.stress.settings; +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * License); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + import java.io.Serializable; import java.util.Arrays; http://git-wip-us.apache.org/repos/asf/cassandra/blob/9f148562/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandMixed.java -- diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandMixed.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandMixed.java index 995e7d6..289dd30 100644 --- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandMixed.java +++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandMixed.java @@ -1,4 +1,25 @@ package org.apache.cassandra.stress.settings; +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * License); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + import java.util.ArrayList; import java.util.List; http://git-wip-us.apache.org/repos/asf/cassandra/blob/9f148562/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandMulti.java -- diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandMulti.java
[jira] [Commented] (CASSANDRA-6283) Windows 7 data files keept open / can't be deleted after compaction.
[ https://issues.apache.org/jira/browse/CASSANDRA-6283?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903340#comment-13903340 ] Joshua McKenzie commented on CASSANDRA-6283: I've reproduced leaked file handles on repair in a lab on 2.0.5 w/leakfinalizer.patch. Nodes start up without issue - I'm not seeing any LEAK or File-Not-Found on regular init, and the LEAK aren't showing up until repair kicks off. W/the finalizer patch repair runs through to completion. Andreas - have you had a chance to try out 2.0.5 w/the patch yet? lastly - the leaks I'm seeing look like they're all isolated to a single case - streaming data outbound during the repair process: ERROR [Finalizer] 2014-02-17 09:21:52,922 RandomAccessReader.java (line 399) LEAK finalizer had to clean up java.lang.Exception: RAR for C:\var\lib\cassandra\data\Keyspace1\Standard1\Keyspace1-Standard1-jb-41-CRC.db allocated at org.apache.cassandra.io.util.RandomAccessReader.init(RandomAccessReader.java:66) at org.apache.cassandra.io.util.RandomAccessReader.open(RandomAccessReader.java:106) at org.apache.cassandra.io.util.RandomAccessReader.open(RandomAccessReader.java:98) at org.apache.cassandra.io.util.DataIntegrityMetadata$ChecksumValidator.init(DataIntegrityMetadata.java:53) at org.apache.cassandra.io.util.DataIntegrityMetadata.checksumValidator(DataIntegrityMetadata.java:40) at org.apache.cassandra.streaming.StreamWriter.write(StreamWriter.java:76) at org.apache.cassandra.streaming.messages.OutgoingFileMessage$1.serialize(OutgoingFileMessage.java:59) at org.apache.cassandra.streaming.messages.OutgoingFileMessage$1.serialize(OutgoingFileMessage.java:42) at org.apache.cassandra.streaming.messages.StreamMessage.serialize(StreamMessage.java:45) at org.apache.cassandra.streaming.ConnectionHandler$OutgoingMessageHandler.sendMessage(ConnectionHandler.java:383) at org.apache.cassandra.streaming.ConnectionHandler$OutgoingMessageHandler.run(ConnectionHandler.java:355) at java.lang.Thread.run(Thread.java:744) Andreas - could you confirm whether or not this matches what you're seeing in your environment? I'm curious if this is a dangling file handle like we've seen in other related tickets or if this is perhaps a race condition on access Windows is intolerant of. Windows 7 data files keept open / can't be deleted after compaction. Key: CASSANDRA-6283 URL: https://issues.apache.org/jira/browse/CASSANDRA-6283 Project: Cassandra Issue Type: Bug Components: Core Environment: Windows 7 (32) / Java 1.7.0.45 Reporter: Andreas Schnitzerling Assignee: Joshua McKenzie Labels: compaction Fix For: 2.0.6 Attachments: leakdetect.patch, screenshot-1.jpg, system.log Files cannot be deleted, patch CASSANDRA-5383 (Win7 deleting problem) doesn't help on Win-7 on Cassandra 2.0.2. Even 2.1 Snapshot is not running. The cause is: Opened file handles seem to be lost and not closed properly. Win 7 blames, that another process is still using the file (but its obviously cassandra). Only restart of the server makes the files deleted. But after heavy using (changes) of tables, there are about 24K files in the data folder (instead of 35 after every restart) and Cassandra crashes. I experiminted and I found out, that a finalizer fixes the problem. So after GC the files will be deleted (not optimal, but working fine). It runs now 2 days continously without problem. Possible fix/test: I wrote the following finalizer at the end of class org.apache.cassandra.io.util.RandomAccessReader: {code:title=RandomAccessReader.java|borderStyle=solid} @Override protected void finalize() throws Throwable { deallocate(); super.finalize(); } {code} Can somebody test / develop / patch it? Thx. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Comment Edited] (CASSANDRA-6283) Windows 7 data files keept open / can't be deleted after compaction.
[ https://issues.apache.org/jira/browse/CASSANDRA-6283?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903340#comment-13903340 ] Joshua McKenzie edited comment on CASSANDRA-6283 at 2/17/14 4:11 PM: - I've reproduced leaked file handles on repair in a lab on 2.0.5 w/leakfinalizer.patch. Nodes start up without issue - I'm not seeing any LEAK or File-Not-Found on regular init, and the LEAK aren't showing up until repair kicks off. W/the finalizer patch repair runs through to completion. Andreas - have you had a chance to try out 2.0.5 w/the patch yet? lastly - the leaks I'm seeing look like they're all isolated to a single case - streaming data outbound during the repair process: {code:title=error|borderStyle=solid} ERROR [Finalizer] 2014-02-17 09:21:52,922 RandomAccessReader.java (line 399) LEAK finalizer had to clean up java.lang.Exception: RAR for C:\var\lib\cassandra\data\Keyspace1\Standard1\Keyspace1-Standard1-jb-41-CRC.db allocated at org.apache.cassandra.io.util.RandomAccessReader.init(RandomAccessReader.java:66) at org.apache.cassandra.io.util.RandomAccessReader.open(RandomAccessReader.java:106) at org.apache.cassandra.io.util.RandomAccessReader.open(RandomAccessReader.java:98) at org.apache.cassandra.io.util.DataIntegrityMetadata$ChecksumValidator.init(DataIntegrityMetadata.java:53) at org.apache.cassandra.io.util.DataIntegrityMetadata.checksumValidator(DataIntegrityMetadata.java:40) at org.apache.cassandra.streaming.StreamWriter.write(StreamWriter.java:76) at org.apache.cassandra.streaming.messages.OutgoingFileMessage$1.serialize(OutgoingFileMessage.java:59) at org.apache.cassandra.streaming.messages.OutgoingFileMessage$1.serialize(OutgoingFileMessage.java:42) at org.apache.cassandra.streaming.messages.StreamMessage.serialize(StreamMessage.java:45) at org.apache.cassandra.streaming.ConnectionHandler$OutgoingMessageHandler.sendMessage(ConnectionHandler.java:383) at org.apache.cassandra.streaming.ConnectionHandler$OutgoingMessageHandler.run(ConnectionHandler.java:355) at java.lang.Thread.run(Thread.java:744) {code} Andreas - could you confirm whether or not this matches what you're seeing in your environment? I'm curious if this is a dangling file handle like we've seen in other related tickets or if this is perhaps a race condition on access Windows is intolerant of. was (Author: joshuamckenzie): I've reproduced leaked file handles on repair in a lab on 2.0.5 w/leakfinalizer.patch. Nodes start up without issue - I'm not seeing any LEAK or File-Not-Found on regular init, and the LEAK aren't showing up until repair kicks off. W/the finalizer patch repair runs through to completion. Andreas - have you had a chance to try out 2.0.5 w/the patch yet? lastly - the leaks I'm seeing look like they're all isolated to a single case - streaming data outbound during the repair process: ERROR [Finalizer] 2014-02-17 09:21:52,922 RandomAccessReader.java (line 399) LEAK finalizer had to clean up java.lang.Exception: RAR for C:\var\lib\cassandra\data\Keyspace1\Standard1\Keyspace1-Standard1-jb-41-CRC.db allocated at org.apache.cassandra.io.util.RandomAccessReader.init(RandomAccessReader.java:66) at org.apache.cassandra.io.util.RandomAccessReader.open(RandomAccessReader.java:106) at org.apache.cassandra.io.util.RandomAccessReader.open(RandomAccessReader.java:98) at org.apache.cassandra.io.util.DataIntegrityMetadata$ChecksumValidator.init(DataIntegrityMetadata.java:53) at org.apache.cassandra.io.util.DataIntegrityMetadata.checksumValidator(DataIntegrityMetadata.java:40) at org.apache.cassandra.streaming.StreamWriter.write(StreamWriter.java:76) at org.apache.cassandra.streaming.messages.OutgoingFileMessage$1.serialize(OutgoingFileMessage.java:59) at org.apache.cassandra.streaming.messages.OutgoingFileMessage$1.serialize(OutgoingFileMessage.java:42) at org.apache.cassandra.streaming.messages.StreamMessage.serialize(StreamMessage.java:45) at org.apache.cassandra.streaming.ConnectionHandler$OutgoingMessageHandler.sendMessage(ConnectionHandler.java:383) at org.apache.cassandra.streaming.ConnectionHandler$OutgoingMessageHandler.run(ConnectionHandler.java:355) at java.lang.Thread.run(Thread.java:744) Andreas - could you confirm whether or not this matches what you're seeing in your environment? I'm curious if this is a dangling file handle like we've seen in other related tickets or if this is perhaps a race condition on access Windows is intolerant of. Windows 7 data files keept open / can't be deleted after compaction. Key: CASSANDRA-6283 URL:
[jira] [Created] (CASSANDRA-6715) nodetool Cfhistograms doesn't see native protocole queries
julien campan created CASSANDRA-6715: Summary: nodetool Cfhistograms doesn't see native protocole queries Key: CASSANDRA-6715 URL: https://issues.apache.org/jira/browse/CASSANDRA-6715 Project: Cassandra Issue Type: Bug Components: Tools Environment: Mac os X Reporter: julien campan Hi, When I successfully perform some operations in a table using the native protocol (read or write), these operations become invisible on cfhistograms (All the information is 0). If I'm using thrift, then operations are visible on cfhistograms. Thank four your time -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[2/2] git commit: Update NOTICE file
Update NOTICE file Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/73dcdbdf Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/73dcdbdf Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/73dcdbdf Branch: refs/heads/trunk Commit: 73dcdbdf294d5e44f44e7e7eb17655bc40240a61 Parents: 1828b92 Author: Sylvain Lebresne sylv...@datastax.com Authored: Mon Feb 17 17:30:13 2014 +0100 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Mon Feb 17 17:30:13 2014 +0100 -- NOTICE.txt | 8 1 file changed, 8 insertions(+) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/73dcdbdf/NOTICE.txt -- diff --git a/NOTICE.txt b/NOTICE.txt index 03cf45a..14ffad2 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -52,3 +52,11 @@ Written by Pavel Yaskevich. LMAX Disruptor (http://lmax-exchange.github.io/disruptor/) Copyright 2011 LMAX Ltd. + +Airline +(https://github.com/airlift/airline) +Copyright 2011, Dain Sundstrom d...@iq80.com +Copyright 2010, Cedric Beust ced...@beust.com + +HLL++ support provided by stream-lib +(https://github.com/addthis/stream-lib)
[1/2] git commit: Remove now unused depency on snaptree
Repository: cassandra Updated Branches: refs/heads/trunk 9f1485623 - 73dcdbdf2 Remove now unused depency on snaptree Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/1828b929 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/1828b929 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/1828b929 Branch: refs/heads/trunk Commit: 1828b929c2ea11b3a39373cf50dcdb17eecf10d0 Parents: 9f14856 Author: Sylvain Lebresne sylv...@datastax.com Authored: Mon Feb 17 17:23:04 2014 +0100 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Mon Feb 17 17:23:04 2014 +0100 -- NOTICE.txt | 4 - build.xml | 1 - lib/licenses/snaptree-0.1.txt | 776 --- lib/snaptree-0.1.jar| Bin 55066 - 0 bytes .../apache/cassandra/utils/LongBTreeTest.java | 26 +- 5 files changed, 6 insertions(+), 801 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/1828b929/NOTICE.txt -- diff --git a/NOTICE.txt b/NOTICE.txt index b093f8e..03cf45a 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -37,10 +37,6 @@ Streaming compression support provided by ning-compress (https://github.com/ning/compress) Copyright 2009-2010 Ning, Inc. -Alternative map implementation provided by SnapTree -(https://github.com/nbronson/snaptree) -Written by Nathan G. Bronson et al. - CQL Native transport uses Netty (https://netty.io/) Copyright (C) 2011 The Netty Project http://git-wip-us.apache.org/repos/asf/cassandra/blob/1828b929/build.xml -- diff --git a/build.xml b/build.xml index 7b7535e..1a8de44 100644 --- a/build.xml +++ b/build.xml @@ -384,7 +384,6 @@ dependency groupId=org.apache.cassandra artifactId=cassandra-thrift version=${version} / dependency groupId=com.yammer.metrics artifactId=metrics-core version=2.2.0 / dependency groupId=com.addthis.metrics artifactId=reporter-config version=2.1.0 / - dependency groupId=edu.stanford.ppl artifactId=snaptree version=0.1 / dependency groupId=org.mindrot artifactId=jbcrypt version=0.3m / dependency groupId=io.airlift artifactId=airline version=0.6 / dependency groupId=io.netty artifactId=netty version=3.6.6.Final / http://git-wip-us.apache.org/repos/asf/cassandra/blob/1828b929/lib/licenses/snaptree-0.1.txt -- diff --git a/lib/licenses/snaptree-0.1.txt b/lib/licenses/snaptree-0.1.txt deleted file mode 100644 index 07324c5..000 --- a/lib/licenses/snaptree-0.1.txt +++ /dev/null @@ -1,776 +0,0 @@ - - - -!DOCTYPE html -html - head -meta charset='utf-8' -meta http-equiv=X-UA-Compatible content=chrome=1 -titledoc/LICENSE at master from nbronson/snaptree - GitHub/title -link rel=search type=application/opensearchdescription+xml href=/opensearch.xml title=GitHub / -link rel=fluid-icon href=https://github.com/fluidicon.png; title=GitHub / - - - - -meta content=authenticity_token name=csrf-param / -meta content=kCKdrZvsCWGNIGaBTKBWlG4gYXwI636kkFF5Jfbr//c= name=csrf-token / - -link href=https://a248.e.akamai.net/assets.github.com/stylesheets/bundles/github-ce4abc8fb736cacb557664dcd8194a5486c74f6b.css; media=screen rel=stylesheet type=text/css / - - -script src=https://a248.e.akamai.net/assets.github.com/javascripts/bundles/jquery-6c2aad85e5c2becfaac6d62ce0f290d10fa1725e.js; type=text/javascript/script -script src=https://a248.e.akamai.net/assets.github.com/javascripts/bundles/github-724a1478428e953614c0459ba27f5d900fc109be.js; type=text/javascript/script - - - link rel='permalink' href='/nbronson/snaptree/blob/b198f84b0c927f6b5cdef080552fc26aa004d3ee/doc/LICENSE' - - -meta name=description content=snaptree - Concurrent TreeMap w/ efficient support for clone() and consistent iteration / - link href=https://github.com/nbronson/snaptree/commits/master.atom; rel=alternate title=Recent Commits to snaptree:master type=application/atom+xml / - - /head - - - body class=logged_out page-blob vis-public env-production - - - - - - div id=header class=true clearfix -div class=container class=clearfix - a class=site-logo href=https://github.com; -!--[if IE] -img alt=GitHub class=github-logo src=https://a248.e.akamai.net/assets.github.com/images/modules/header/logov7.png?1323882717; / -img alt=GitHub class=github-logo-hover
[4/4] git commit: Update NOTICE file
Update NOTICE file Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/73dcdbdf Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/73dcdbdf Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/73dcdbdf Branch: refs/heads/cassandra-2.1 Commit: 73dcdbdf294d5e44f44e7e7eb17655bc40240a61 Parents: 1828b92 Author: Sylvain Lebresne sylv...@datastax.com Authored: Mon Feb 17 17:30:13 2014 +0100 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Mon Feb 17 17:30:13 2014 +0100 -- NOTICE.txt | 8 1 file changed, 8 insertions(+) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/73dcdbdf/NOTICE.txt -- diff --git a/NOTICE.txt b/NOTICE.txt index 03cf45a..14ffad2 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -52,3 +52,11 @@ Written by Pavel Yaskevich. LMAX Disruptor (http://lmax-exchange.github.io/disruptor/) Copyright 2011 LMAX Ltd. + +Airline +(https://github.com/airlift/airline) +Copyright 2011, Dain Sundstrom d...@iq80.com +Copyright 2010, Cedric Beust ced...@beust.com + +HLL++ support provided by stream-lib +(https://github.com/addthis/stream-lib)
[1/4] Versions and licenses in preparation for 2.1.0-beta1 release
Repository: cassandra Updated Branches: refs/heads/cassandra-2.1 8c6541715 - 73dcdbdf2 http://git-wip-us.apache.org/repos/asf/cassandra/blob/9f148562/tools/stress/src/org/apache/cassandra/stress/settings/SettingsColumn.java -- diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsColumn.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsColumn.java index b3cca10..235d143 100644 --- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsColumn.java +++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsColumn.java @@ -1,4 +1,25 @@ package org.apache.cassandra.stress.settings; +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * License); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + import java.io.Serializable; import java.nio.ByteBuffer; http://git-wip-us.apache.org/repos/asf/cassandra/blob/9f148562/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java -- diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java index 2a8ff76..18f570c 100644 --- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java +++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommand.java @@ -1,4 +1,25 @@ package org.apache.cassandra.stress.settings; +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * License); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + import java.io.Serializable; import java.util.Arrays; http://git-wip-us.apache.org/repos/asf/cassandra/blob/9f148562/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandMixed.java -- diff --git a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandMixed.java b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandMixed.java index 995e7d6..289dd30 100644 --- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandMixed.java +++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandMixed.java @@ -1,4 +1,25 @@ package org.apache.cassandra.stress.settings; +/* + * + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * License); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * AS IS BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY + * KIND, either express or implied. See the License for the + * specific language governing permissions and limitations + * under the License. + * + */ + import java.util.ArrayList; import java.util.List; http://git-wip-us.apache.org/repos/asf/cassandra/blob/9f148562/tools/stress/src/org/apache/cassandra/stress/settings/SettingsCommandMulti.java -- diff --git
[3/4] git commit: Remove now unused depency on snaptree
Remove now unused depency on snaptree Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/1828b929 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/1828b929 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/1828b929 Branch: refs/heads/cassandra-2.1 Commit: 1828b929c2ea11b3a39373cf50dcdb17eecf10d0 Parents: 9f14856 Author: Sylvain Lebresne sylv...@datastax.com Authored: Mon Feb 17 17:23:04 2014 +0100 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Mon Feb 17 17:23:04 2014 +0100 -- NOTICE.txt | 4 - build.xml | 1 - lib/licenses/snaptree-0.1.txt | 776 --- lib/snaptree-0.1.jar| Bin 55066 - 0 bytes .../apache/cassandra/utils/LongBTreeTest.java | 26 +- 5 files changed, 6 insertions(+), 801 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/1828b929/NOTICE.txt -- diff --git a/NOTICE.txt b/NOTICE.txt index b093f8e..03cf45a 100644 --- a/NOTICE.txt +++ b/NOTICE.txt @@ -37,10 +37,6 @@ Streaming compression support provided by ning-compress (https://github.com/ning/compress) Copyright 2009-2010 Ning, Inc. -Alternative map implementation provided by SnapTree -(https://github.com/nbronson/snaptree) -Written by Nathan G. Bronson et al. - CQL Native transport uses Netty (https://netty.io/) Copyright (C) 2011 The Netty Project http://git-wip-us.apache.org/repos/asf/cassandra/blob/1828b929/build.xml -- diff --git a/build.xml b/build.xml index 7b7535e..1a8de44 100644 --- a/build.xml +++ b/build.xml @@ -384,7 +384,6 @@ dependency groupId=org.apache.cassandra artifactId=cassandra-thrift version=${version} / dependency groupId=com.yammer.metrics artifactId=metrics-core version=2.2.0 / dependency groupId=com.addthis.metrics artifactId=reporter-config version=2.1.0 / - dependency groupId=edu.stanford.ppl artifactId=snaptree version=0.1 / dependency groupId=org.mindrot artifactId=jbcrypt version=0.3m / dependency groupId=io.airlift artifactId=airline version=0.6 / dependency groupId=io.netty artifactId=netty version=3.6.6.Final / http://git-wip-us.apache.org/repos/asf/cassandra/blob/1828b929/lib/licenses/snaptree-0.1.txt -- diff --git a/lib/licenses/snaptree-0.1.txt b/lib/licenses/snaptree-0.1.txt deleted file mode 100644 index 07324c5..000 --- a/lib/licenses/snaptree-0.1.txt +++ /dev/null @@ -1,776 +0,0 @@ - - - -!DOCTYPE html -html - head -meta charset='utf-8' -meta http-equiv=X-UA-Compatible content=chrome=1 -titledoc/LICENSE at master from nbronson/snaptree - GitHub/title -link rel=search type=application/opensearchdescription+xml href=/opensearch.xml title=GitHub / -link rel=fluid-icon href=https://github.com/fluidicon.png; title=GitHub / - - - - -meta content=authenticity_token name=csrf-param / -meta content=kCKdrZvsCWGNIGaBTKBWlG4gYXwI636kkFF5Jfbr//c= name=csrf-token / - -link href=https://a248.e.akamai.net/assets.github.com/stylesheets/bundles/github-ce4abc8fb736cacb557664dcd8194a5486c74f6b.css; media=screen rel=stylesheet type=text/css / - - -script src=https://a248.e.akamai.net/assets.github.com/javascripts/bundles/jquery-6c2aad85e5c2becfaac6d62ce0f290d10fa1725e.js; type=text/javascript/script -script src=https://a248.e.akamai.net/assets.github.com/javascripts/bundles/github-724a1478428e953614c0459ba27f5d900fc109be.js; type=text/javascript/script - - - link rel='permalink' href='/nbronson/snaptree/blob/b198f84b0c927f6b5cdef080552fc26aa004d3ee/doc/LICENSE' - - -meta name=description content=snaptree - Concurrent TreeMap w/ efficient support for clone() and consistent iteration / - link href=https://github.com/nbronson/snaptree/commits/master.atom; rel=alternate title=Recent Commits to snaptree:master type=application/atom+xml / - - /head - - - body class=logged_out page-blob vis-public env-production - - - - - - div id=header class=true clearfix -div class=container class=clearfix - a class=site-logo href=https://github.com; -!--[if IE] -img alt=GitHub class=github-logo src=https://a248.e.akamai.net/assets.github.com/images/modules/header/logov7.png?1323882717; / -img alt=GitHub class=github-logo-hover src=https://a248.e.akamai.net/assets.github.com/images/modules/header/logov7-hover.png?1324325359; / -![endif]-- -img alt=GitHub
Git Push Summary
Repository: cassandra Updated Tags: refs/tags/2.1.0-beta1-tentative [created] 73dcdbdf2
[4/9] git commit: Fix snapshot repair not snapshotting coordinator
Fix snapshot repair not snapshotting coordinator patch by yukim; reviewed by sankalp kohli for CASSANDRA-6713 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/6dfca3d3 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/6dfca3d3 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/6dfca3d3 Branch: refs/heads/trunk Commit: 6dfca3d329e4be1c6d47a7791de0a349c164f2ad Parents: 7937ee3 Author: Yuki Morishita yu...@apache.org Authored: Mon Feb 17 11:08:59 2014 -0600 Committer: Yuki Morishita yu...@apache.org Committed: Mon Feb 17 11:08:59 2014 -0600 -- CHANGES.txt | 1 + src/java/org/apache/cassandra/service/AntiEntropyService.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/6dfca3d3/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 964e10c..f146166 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -9,6 +9,7 @@ * Use real node messaging versions for schema exchange decisions (CASSANDRA-6700) * IN on the last clustering columns + ORDER BY DESC yield no results (CASSANDRA-6701) * Fix SecondaryIndexManager#deleteFromIndexes() (CASSANDRA-6711) + * Fix snapshot repair not snapshotting coordinator itself (CASSANDRA-6713) 1.2.15 http://git-wip-us.apache.org/repos/asf/cassandra/blob/6dfca3d3/src/java/org/apache/cassandra/service/AntiEntropyService.java -- diff --git a/src/java/org/apache/cassandra/service/AntiEntropyService.java b/src/java/org/apache/cassandra/service/AntiEntropyService.java index eafab67..f766303 100644 --- a/src/java/org/apache/cassandra/service/AntiEntropyService.java +++ b/src/java/org/apache/cassandra/service/AntiEntropyService.java @@ -849,7 +849,7 @@ public class AntiEntropyService allEndpoints.add(FBUtilities.getBroadcastAddress()); if (isSequential) -makeSnapshots(endpoints); +makeSnapshots(allEndpoints); for (InetAddress endpoint : allEndpoints) treeRequests.add(new TreeRequest(getName(), endpoint, range, new CFPair(tablename, cfname)));
[6/9] git commit: Merge branch 'cassandra-1.2' into cassandra-2.0
Merge branch 'cassandra-1.2' into cassandra-2.0 Conflicts: src/java/org/apache/cassandra/service/AntiEntropyService.java Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4b50b2b2 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4b50b2b2 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4b50b2b2 Branch: refs/heads/cassandra-2.1 Commit: 4b50b2b2e41aeda86c166059826e8eb1498b24fc Parents: 44cf4a6 6dfca3d Author: Yuki Morishita yu...@apache.org Authored: Mon Feb 17 11:12:26 2014 -0600 Committer: Yuki Morishita yu...@apache.org Committed: Mon Feb 17 11:12:26 2014 -0600 -- CHANGES.txt | 1 + src/java/org/apache/cassandra/repair/RepairJob.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4b50b2b2/CHANGES.txt -- diff --cc CHANGES.txt index c9fabd2,f146166..bdfec11 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -25,33 -9,24 +25,34 @@@ Merged from 1.2 * Use real node messaging versions for schema exchange decisions (CASSANDRA-6700) * IN on the last clustering columns + ORDER BY DESC yield no results (CASSANDRA-6701) * Fix SecondaryIndexManager#deleteFromIndexes() (CASSANDRA-6711) + * Fix snapshot repair not snapshotting coordinator itself (CASSANDRA-6713) - -1.2.15 - * Move handling of migration event source to solve bootstrap race (CASSANDRA-6648) - * Make sure compaction throughput value doesn't overflow with int math (CASSANDRA-6647) - - -1.2.14 - * Reverted code to limit CQL prepared statement cache by size (CASSANDRA-6592) - * add cassandra.default_messaging_version property to allow easier - upgrading from 1.1 (CASSANDRA-6619) - * Allow executing CREATE statements multiple times (CASSANDRA-6471) - * Don't send confusing info with timeouts (CASSANDRA-6491) - * Don't resubmit counter mutation runnables internally (CASSANDRA-6427) - * Don't drop local mutations without a hint (CASSANDRA-6510) - * Don't allow null max_hint_window_in_ms (CASSANDRA-6419) - * Validate SliceRange start and finish lengths (CASSANDRA-6521) +2.0.5 + * Reduce garbage generated by bloom filter lookups (CASSANDRA-6609) + * Add ks.cf names to tombstone logging (CASSANDRA-6597) + * Use LOCAL_QUORUM for LWT operations at LOCAL_SERIAL (CASSANDRA-6495) + * Wait for gossip to settle before accepting client connections (CASSANDRA-4288) + * Delete unfinished compaction incrementally (CASSANDRA-6086) + * Allow specifying custom secondary index options in CQL3 (CASSANDRA-6480) + * Improve replica pinning for cache efficiency in DES (CASSANDRA-6485) + * Fix LOCAL_SERIAL from thrift (CASSANDRA-6584) + * Don't special case received counts in CAS timeout exceptions (CASSANDRA-6595) + * Add support for 2.1 global counter shards (CASSANDRA-6505) + * Fix NPE when streaming connection is not yet established (CASSANDRA-6210) + * Avoid rare duplicate read repair triggering (CASSANDRA-6606) + * Fix paging discardFirst (CASSANDRA-6555) + * Fix ArrayIndexOutOfBoundsException in 2ndary index query (CASSANDRA-6470) + * Release sstables upon rebuilding 2i (CASSANDRA-6635) + * Add AbstractCompactionStrategy.startup() method (CASSANDRA-6637) + * SSTableScanner may skip rows during cleanup (CASSANDRA-6638) + * sstables from stalled repair sessions can resurrect deleted data (CASSANDRA-6503) + * Switch stress to use ITransportFactory (CASSANDRA-6641) + * Fix IllegalArgumentException during prepare (CASSANDRA-6592) + * Fix possible loss of 2ndary index entries during compaction (CASSANDRA-6517) + * Fix direct Memory on architectures that do not support unaligned long access + (CASSANDRA-6628) + * Let scrub optionally skip broken counter partitions (CASSANDRA-5930) +Merged from 1.2: * fsync compression metadata (CASSANDRA-6531) * Validate CF existence on execution for prepared statement (CASSANDRA-6535) * Add ability to throttle batchlog replay (CASSANDRA-6550) http://git-wip-us.apache.org/repos/asf/cassandra/blob/4b50b2b2/src/java/org/apache/cassandra/repair/RepairJob.java -- diff --cc src/java/org/apache/cassandra/repair/RepairJob.java index 16daf4e,000..6705c95 mode 100644,00..100644 --- a/src/java/org/apache/cassandra/repair/RepairJob.java +++ b/src/java/org/apache/cassandra/repair/RepairJob.java @@@ -1,224 -1,0 +1,224 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache
[5/9] git commit: Merge branch 'cassandra-1.2' into cassandra-2.0
Merge branch 'cassandra-1.2' into cassandra-2.0 Conflicts: src/java/org/apache/cassandra/service/AntiEntropyService.java Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4b50b2b2 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4b50b2b2 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4b50b2b2 Branch: refs/heads/trunk Commit: 4b50b2b2e41aeda86c166059826e8eb1498b24fc Parents: 44cf4a6 6dfca3d Author: Yuki Morishita yu...@apache.org Authored: Mon Feb 17 11:12:26 2014 -0600 Committer: Yuki Morishita yu...@apache.org Committed: Mon Feb 17 11:12:26 2014 -0600 -- CHANGES.txt | 1 + src/java/org/apache/cassandra/repair/RepairJob.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4b50b2b2/CHANGES.txt -- diff --cc CHANGES.txt index c9fabd2,f146166..bdfec11 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -25,33 -9,24 +25,34 @@@ Merged from 1.2 * Use real node messaging versions for schema exchange decisions (CASSANDRA-6700) * IN on the last clustering columns + ORDER BY DESC yield no results (CASSANDRA-6701) * Fix SecondaryIndexManager#deleteFromIndexes() (CASSANDRA-6711) + * Fix snapshot repair not snapshotting coordinator itself (CASSANDRA-6713) - -1.2.15 - * Move handling of migration event source to solve bootstrap race (CASSANDRA-6648) - * Make sure compaction throughput value doesn't overflow with int math (CASSANDRA-6647) - - -1.2.14 - * Reverted code to limit CQL prepared statement cache by size (CASSANDRA-6592) - * add cassandra.default_messaging_version property to allow easier - upgrading from 1.1 (CASSANDRA-6619) - * Allow executing CREATE statements multiple times (CASSANDRA-6471) - * Don't send confusing info with timeouts (CASSANDRA-6491) - * Don't resubmit counter mutation runnables internally (CASSANDRA-6427) - * Don't drop local mutations without a hint (CASSANDRA-6510) - * Don't allow null max_hint_window_in_ms (CASSANDRA-6419) - * Validate SliceRange start and finish lengths (CASSANDRA-6521) +2.0.5 + * Reduce garbage generated by bloom filter lookups (CASSANDRA-6609) + * Add ks.cf names to tombstone logging (CASSANDRA-6597) + * Use LOCAL_QUORUM for LWT operations at LOCAL_SERIAL (CASSANDRA-6495) + * Wait for gossip to settle before accepting client connections (CASSANDRA-4288) + * Delete unfinished compaction incrementally (CASSANDRA-6086) + * Allow specifying custom secondary index options in CQL3 (CASSANDRA-6480) + * Improve replica pinning for cache efficiency in DES (CASSANDRA-6485) + * Fix LOCAL_SERIAL from thrift (CASSANDRA-6584) + * Don't special case received counts in CAS timeout exceptions (CASSANDRA-6595) + * Add support for 2.1 global counter shards (CASSANDRA-6505) + * Fix NPE when streaming connection is not yet established (CASSANDRA-6210) + * Avoid rare duplicate read repair triggering (CASSANDRA-6606) + * Fix paging discardFirst (CASSANDRA-6555) + * Fix ArrayIndexOutOfBoundsException in 2ndary index query (CASSANDRA-6470) + * Release sstables upon rebuilding 2i (CASSANDRA-6635) + * Add AbstractCompactionStrategy.startup() method (CASSANDRA-6637) + * SSTableScanner may skip rows during cleanup (CASSANDRA-6638) + * sstables from stalled repair sessions can resurrect deleted data (CASSANDRA-6503) + * Switch stress to use ITransportFactory (CASSANDRA-6641) + * Fix IllegalArgumentException during prepare (CASSANDRA-6592) + * Fix possible loss of 2ndary index entries during compaction (CASSANDRA-6517) + * Fix direct Memory on architectures that do not support unaligned long access + (CASSANDRA-6628) + * Let scrub optionally skip broken counter partitions (CASSANDRA-5930) +Merged from 1.2: * fsync compression metadata (CASSANDRA-6531) * Validate CF existence on execution for prepared statement (CASSANDRA-6535) * Add ability to throttle batchlog replay (CASSANDRA-6550) http://git-wip-us.apache.org/repos/asf/cassandra/blob/4b50b2b2/src/java/org/apache/cassandra/repair/RepairJob.java -- diff --cc src/java/org/apache/cassandra/repair/RepairJob.java index 16daf4e,000..6705c95 mode 100644,00..100644 --- a/src/java/org/apache/cassandra/repair/RepairJob.java +++ b/src/java/org/apache/cassandra/repair/RepairJob.java @@@ -1,224 -1,0 +1,224 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License,
[9/9] git commit: Merge branch 'cassandra-2.0' into cassandra-2.1
Merge branch 'cassandra-2.0' into cassandra-2.1 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/c9a4bffe Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/c9a4bffe Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/c9a4bffe Branch: refs/heads/cassandra-2.1 Commit: c9a4bffee6b76fe340695403886ae18027972bf9 Parents: 73dcdbd 4b50b2b Author: Yuki Morishita yu...@apache.org Authored: Mon Feb 17 11:12:38 2014 -0600 Committer: Yuki Morishita yu...@apache.org Committed: Mon Feb 17 11:12:38 2014 -0600 -- CHANGES.txt | 1 + src/java/org/apache/cassandra/repair/RepairJob.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/c9a4bffe/CHANGES.txt -- diff --cc CHANGES.txt index d9d0f20,bdfec11..d7bc77e --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -62,8 -25,8 +62,9 @@@ Merged from 1.2 * Use real node messaging versions for schema exchange decisions (CASSANDRA-6700) * IN on the last clustering columns + ORDER BY DESC yield no results (CASSANDRA-6701) * Fix SecondaryIndexManager#deleteFromIndexes() (CASSANDRA-6711) + * Fix snapshot repair not snapshotting coordinator itself (CASSANDRA-6713) + 2.0.5 * Reduce garbage generated by bloom filter lookups (CASSANDRA-6609) * Add ks.cf names to tombstone logging (CASSANDRA-6597) http://git-wip-us.apache.org/repos/asf/cassandra/blob/c9a4bffe/src/java/org/apache/cassandra/repair/RepairJob.java --
[7/9] git commit: Merge branch 'cassandra-1.2' into cassandra-2.0
Merge branch 'cassandra-1.2' into cassandra-2.0 Conflicts: src/java/org/apache/cassandra/service/AntiEntropyService.java Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4b50b2b2 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4b50b2b2 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4b50b2b2 Branch: refs/heads/cassandra-2.0 Commit: 4b50b2b2e41aeda86c166059826e8eb1498b24fc Parents: 44cf4a6 6dfca3d Author: Yuki Morishita yu...@apache.org Authored: Mon Feb 17 11:12:26 2014 -0600 Committer: Yuki Morishita yu...@apache.org Committed: Mon Feb 17 11:12:26 2014 -0600 -- CHANGES.txt | 1 + src/java/org/apache/cassandra/repair/RepairJob.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4b50b2b2/CHANGES.txt -- diff --cc CHANGES.txt index c9fabd2,f146166..bdfec11 --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -25,33 -9,24 +25,34 @@@ Merged from 1.2 * Use real node messaging versions for schema exchange decisions (CASSANDRA-6700) * IN on the last clustering columns + ORDER BY DESC yield no results (CASSANDRA-6701) * Fix SecondaryIndexManager#deleteFromIndexes() (CASSANDRA-6711) + * Fix snapshot repair not snapshotting coordinator itself (CASSANDRA-6713) - -1.2.15 - * Move handling of migration event source to solve bootstrap race (CASSANDRA-6648) - * Make sure compaction throughput value doesn't overflow with int math (CASSANDRA-6647) - - -1.2.14 - * Reverted code to limit CQL prepared statement cache by size (CASSANDRA-6592) - * add cassandra.default_messaging_version property to allow easier - upgrading from 1.1 (CASSANDRA-6619) - * Allow executing CREATE statements multiple times (CASSANDRA-6471) - * Don't send confusing info with timeouts (CASSANDRA-6491) - * Don't resubmit counter mutation runnables internally (CASSANDRA-6427) - * Don't drop local mutations without a hint (CASSANDRA-6510) - * Don't allow null max_hint_window_in_ms (CASSANDRA-6419) - * Validate SliceRange start and finish lengths (CASSANDRA-6521) +2.0.5 + * Reduce garbage generated by bloom filter lookups (CASSANDRA-6609) + * Add ks.cf names to tombstone logging (CASSANDRA-6597) + * Use LOCAL_QUORUM for LWT operations at LOCAL_SERIAL (CASSANDRA-6495) + * Wait for gossip to settle before accepting client connections (CASSANDRA-4288) + * Delete unfinished compaction incrementally (CASSANDRA-6086) + * Allow specifying custom secondary index options in CQL3 (CASSANDRA-6480) + * Improve replica pinning for cache efficiency in DES (CASSANDRA-6485) + * Fix LOCAL_SERIAL from thrift (CASSANDRA-6584) + * Don't special case received counts in CAS timeout exceptions (CASSANDRA-6595) + * Add support for 2.1 global counter shards (CASSANDRA-6505) + * Fix NPE when streaming connection is not yet established (CASSANDRA-6210) + * Avoid rare duplicate read repair triggering (CASSANDRA-6606) + * Fix paging discardFirst (CASSANDRA-6555) + * Fix ArrayIndexOutOfBoundsException in 2ndary index query (CASSANDRA-6470) + * Release sstables upon rebuilding 2i (CASSANDRA-6635) + * Add AbstractCompactionStrategy.startup() method (CASSANDRA-6637) + * SSTableScanner may skip rows during cleanup (CASSANDRA-6638) + * sstables from stalled repair sessions can resurrect deleted data (CASSANDRA-6503) + * Switch stress to use ITransportFactory (CASSANDRA-6641) + * Fix IllegalArgumentException during prepare (CASSANDRA-6592) + * Fix possible loss of 2ndary index entries during compaction (CASSANDRA-6517) + * Fix direct Memory on architectures that do not support unaligned long access + (CASSANDRA-6628) + * Let scrub optionally skip broken counter partitions (CASSANDRA-5930) +Merged from 1.2: * fsync compression metadata (CASSANDRA-6531) * Validate CF existence on execution for prepared statement (CASSANDRA-6535) * Add ability to throttle batchlog replay (CASSANDRA-6550) http://git-wip-us.apache.org/repos/asf/cassandra/blob/4b50b2b2/src/java/org/apache/cassandra/repair/RepairJob.java -- diff --cc src/java/org/apache/cassandra/repair/RepairJob.java index 16daf4e,000..6705c95 mode 100644,00..100644 --- a/src/java/org/apache/cassandra/repair/RepairJob.java +++ b/src/java/org/apache/cassandra/repair/RepairJob.java @@@ -1,224 -1,0 +1,224 @@@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache
[8/9] git commit: Merge branch 'cassandra-2.0' into cassandra-2.1
Merge branch 'cassandra-2.0' into cassandra-2.1 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/c9a4bffe Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/c9a4bffe Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/c9a4bffe Branch: refs/heads/trunk Commit: c9a4bffee6b76fe340695403886ae18027972bf9 Parents: 73dcdbd 4b50b2b Author: Yuki Morishita yu...@apache.org Authored: Mon Feb 17 11:12:38 2014 -0600 Committer: Yuki Morishita yu...@apache.org Committed: Mon Feb 17 11:12:38 2014 -0600 -- CHANGES.txt | 1 + src/java/org/apache/cassandra/repair/RepairJob.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/c9a4bffe/CHANGES.txt -- diff --cc CHANGES.txt index d9d0f20,bdfec11..d7bc77e --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -62,8 -25,8 +62,9 @@@ Merged from 1.2 * Use real node messaging versions for schema exchange decisions (CASSANDRA-6700) * IN on the last clustering columns + ORDER BY DESC yield no results (CASSANDRA-6701) * Fix SecondaryIndexManager#deleteFromIndexes() (CASSANDRA-6711) + * Fix snapshot repair not snapshotting coordinator itself (CASSANDRA-6713) + 2.0.5 * Reduce garbage generated by bloom filter lookups (CASSANDRA-6609) * Add ks.cf names to tombstone logging (CASSANDRA-6597) http://git-wip-us.apache.org/repos/asf/cassandra/blob/c9a4bffe/src/java/org/apache/cassandra/repair/RepairJob.java --
[1/9] git commit: Fix snapshot repair not snapshotting coordinator
Repository: cassandra Updated Branches: refs/heads/cassandra-1.2 7937ee38f - 6dfca3d32 refs/heads/cassandra-2.0 44cf4a66d - 4b50b2b2e refs/heads/cassandra-2.1 73dcdbdf2 - c9a4bffee refs/heads/trunk 73dcdbdf2 - c9a4bffee Fix snapshot repair not snapshotting coordinator patch by yukim; reviewed by sankalp kohli for CASSANDRA-6713 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/6dfca3d3 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/6dfca3d3 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/6dfca3d3 Branch: refs/heads/cassandra-1.2 Commit: 6dfca3d329e4be1c6d47a7791de0a349c164f2ad Parents: 7937ee3 Author: Yuki Morishita yu...@apache.org Authored: Mon Feb 17 11:08:59 2014 -0600 Committer: Yuki Morishita yu...@apache.org Committed: Mon Feb 17 11:08:59 2014 -0600 -- CHANGES.txt | 1 + src/java/org/apache/cassandra/service/AntiEntropyService.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/6dfca3d3/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 964e10c..f146166 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -9,6 +9,7 @@ * Use real node messaging versions for schema exchange decisions (CASSANDRA-6700) * IN on the last clustering columns + ORDER BY DESC yield no results (CASSANDRA-6701) * Fix SecondaryIndexManager#deleteFromIndexes() (CASSANDRA-6711) + * Fix snapshot repair not snapshotting coordinator itself (CASSANDRA-6713) 1.2.15 http://git-wip-us.apache.org/repos/asf/cassandra/blob/6dfca3d3/src/java/org/apache/cassandra/service/AntiEntropyService.java -- diff --git a/src/java/org/apache/cassandra/service/AntiEntropyService.java b/src/java/org/apache/cassandra/service/AntiEntropyService.java index eafab67..f766303 100644 --- a/src/java/org/apache/cassandra/service/AntiEntropyService.java +++ b/src/java/org/apache/cassandra/service/AntiEntropyService.java @@ -849,7 +849,7 @@ public class AntiEntropyService allEndpoints.add(FBUtilities.getBroadcastAddress()); if (isSequential) -makeSnapshots(endpoints); +makeSnapshots(allEndpoints); for (InetAddress endpoint : allEndpoints) treeRequests.add(new TreeRequest(getName(), endpoint, range, new CFPair(tablename, cfname)));
[2/9] git commit: Fix snapshot repair not snapshotting coordinator
Fix snapshot repair not snapshotting coordinator patch by yukim; reviewed by sankalp kohli for CASSANDRA-6713 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/6dfca3d3 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/6dfca3d3 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/6dfca3d3 Branch: refs/heads/cassandra-2.0 Commit: 6dfca3d329e4be1c6d47a7791de0a349c164f2ad Parents: 7937ee3 Author: Yuki Morishita yu...@apache.org Authored: Mon Feb 17 11:08:59 2014 -0600 Committer: Yuki Morishita yu...@apache.org Committed: Mon Feb 17 11:08:59 2014 -0600 -- CHANGES.txt | 1 + src/java/org/apache/cassandra/service/AntiEntropyService.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/6dfca3d3/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 964e10c..f146166 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -9,6 +9,7 @@ * Use real node messaging versions for schema exchange decisions (CASSANDRA-6700) * IN on the last clustering columns + ORDER BY DESC yield no results (CASSANDRA-6701) * Fix SecondaryIndexManager#deleteFromIndexes() (CASSANDRA-6711) + * Fix snapshot repair not snapshotting coordinator itself (CASSANDRA-6713) 1.2.15 http://git-wip-us.apache.org/repos/asf/cassandra/blob/6dfca3d3/src/java/org/apache/cassandra/service/AntiEntropyService.java -- diff --git a/src/java/org/apache/cassandra/service/AntiEntropyService.java b/src/java/org/apache/cassandra/service/AntiEntropyService.java index eafab67..f766303 100644 --- a/src/java/org/apache/cassandra/service/AntiEntropyService.java +++ b/src/java/org/apache/cassandra/service/AntiEntropyService.java @@ -849,7 +849,7 @@ public class AntiEntropyService allEndpoints.add(FBUtilities.getBroadcastAddress()); if (isSequential) -makeSnapshots(endpoints); +makeSnapshots(allEndpoints); for (InetAddress endpoint : allEndpoints) treeRequests.add(new TreeRequest(getName(), endpoint, range, new CFPair(tablename, cfname)));
[3/9] git commit: Fix snapshot repair not snapshotting coordinator
Fix snapshot repair not snapshotting coordinator patch by yukim; reviewed by sankalp kohli for CASSANDRA-6713 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/6dfca3d3 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/6dfca3d3 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/6dfca3d3 Branch: refs/heads/cassandra-2.1 Commit: 6dfca3d329e4be1c6d47a7791de0a349c164f2ad Parents: 7937ee3 Author: Yuki Morishita yu...@apache.org Authored: Mon Feb 17 11:08:59 2014 -0600 Committer: Yuki Morishita yu...@apache.org Committed: Mon Feb 17 11:08:59 2014 -0600 -- CHANGES.txt | 1 + src/java/org/apache/cassandra/service/AntiEntropyService.java | 2 +- 2 files changed, 2 insertions(+), 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/6dfca3d3/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 964e10c..f146166 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -9,6 +9,7 @@ * Use real node messaging versions for schema exchange decisions (CASSANDRA-6700) * IN on the last clustering columns + ORDER BY DESC yield no results (CASSANDRA-6701) * Fix SecondaryIndexManager#deleteFromIndexes() (CASSANDRA-6711) + * Fix snapshot repair not snapshotting coordinator itself (CASSANDRA-6713) 1.2.15 http://git-wip-us.apache.org/repos/asf/cassandra/blob/6dfca3d3/src/java/org/apache/cassandra/service/AntiEntropyService.java -- diff --git a/src/java/org/apache/cassandra/service/AntiEntropyService.java b/src/java/org/apache/cassandra/service/AntiEntropyService.java index eafab67..f766303 100644 --- a/src/java/org/apache/cassandra/service/AntiEntropyService.java +++ b/src/java/org/apache/cassandra/service/AntiEntropyService.java @@ -849,7 +849,7 @@ public class AntiEntropyService allEndpoints.add(FBUtilities.getBroadcastAddress()); if (isSequential) -makeSnapshots(endpoints); +makeSnapshots(allEndpoints); for (InetAddress endpoint : allEndpoints) treeRequests.add(new TreeRequest(getName(), endpoint, range, new CFPair(tablename, cfname)));
[jira] [Updated] (CASSANDRA-6566) Differencer should not run in AntiEntropy Stage
[ https://issues.apache.org/jira/browse/CASSANDRA-6566?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Yuki Morishita updated CASSANDRA-6566: -- Attachment: 6566-2.0-v3.txt V3 attached. Rebased and removed empty RepairJob#terminate method. I just leave the method name RepairSession#terminate as is for now. Differencer should not run in AntiEntropy Stage --- Key: CASSANDRA-6566 URL: https://issues.apache.org/jira/browse/CASSANDRA-6566 Project: Cassandra Issue Type: Improvement Components: Core Reporter: sankalp kohli Assignee: Yuki Morishita Priority: Minor Fix For: 2.0.6 Attachments: 6566-2.0-v2.txt, 6566-2.0-v3.txt, 6566-2.0.txt The Differencing currently runs in AntiEntropy stage. When there are lot of ranges which do not match, it takes sometime to compute the diff in ranges. Also with increase in Merkle tree height it will take even more time in case of large diffs. This causes other things to get blocked behind this. Also no other repair messages can be processed. Example: If a node is doing differencing for a repair, and Validation compaction is done for another repair, it needs to block to send the tree over till Differencing is done. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6566) Differencer should not run in AntiEntropy Stage
[ https://issues.apache.org/jira/browse/CASSANDRA-6566?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903416#comment-13903416 ] sankalp kohli commented on CASSANDRA-6566: -- Since differencers are now tracked as a Set with this change, why don't we make the set concurrent and remove the synchronize from the method. - I am fine dropping this change since it is hardly a problem. v3 looks good. Differencer should not run in AntiEntropy Stage --- Key: CASSANDRA-6566 URL: https://issues.apache.org/jira/browse/CASSANDRA-6566 Project: Cassandra Issue Type: Improvement Components: Core Reporter: sankalp kohli Assignee: Yuki Morishita Priority: Minor Fix For: 2.0.6 Attachments: 6566-2.0-v2.txt, 6566-2.0-v3.txt, 6566-2.0.txt The Differencing currently runs in AntiEntropy stage. When there are lot of ranges which do not match, it takes sometime to compute the diff in ranges. Also with increase in Merkle tree height it will take even more time in case of large diffs. This causes other things to get blocked behind this. Also no other repair messages can be processed. Example: If a node is doing differencing for a repair, and Validation compaction is done for another repair, it needs to block to send the tree over till Differencing is done. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6704) Create wide row scanners
[ https://issues.apache.org/jira/browse/CASSANDRA-6704?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903434#comment-13903434 ] Edward Capriolo commented on CASSANDRA-6704: Latest commit supports a yaml parameter. {code} dynamic_loading: - JAVA_LOCAL_CLASSPATH - GROOVY_CLASS_LOADER {code} You can use this to disable any dynamic loading. {code} @Test public void testIllegalDynamic() throws InvalidRequestException, UnavailableException, TimedOutException, TException{ FilterDesc d = new FilterDesc(); d.setSpec(CLOJURE_CLOSURE); d.setName(limit9); boolean noClojure = false; try{ server.create_filter(d); } catch (InvalidRequestException ex){ noClojure = true; } } {code} JAVA_LOCAL_CLASSPATH means allow mechanism to load using Class.forName Create wide row scanners Key: CASSANDRA-6704 URL: https://issues.apache.org/jira/browse/CASSANDRA-6704 Project: Cassandra Issue Type: New Feature Reporter: Edward Capriolo Assignee: Edward Capriolo The BigTable white paper demonstrates the use of scanners to iterate over rows and columns. http://static.googleusercontent.com/media/research.google.com/en/us/archive/bigtable-osdi06.pdf Because Cassandra does not have a primary sorting on row keys scanning over ranges of row keys is less useful. However we can use the scanner concept to operate on wide rows. For example many times a user wishes to do some custom processing inside a row and does not wish to carry the data across the network to do this processing. I have already implemented thrift methods to compile dynamic groovy code into Filters as well as some code that uses a Filter to page through and process data on the server side. https://github.com/edwardcapriolo/cassandra/compare/apache:trunk...trunk The following is a working code snippet. {code} @Test public void test_scanner() throws Exception { ColumnParent cp = new ColumnParent(); cp.setColumn_family(Standard1); ByteBuffer key = ByteBuffer.wrap(rscannerkey.getBytes()); for (char a='a'; a 'g'; a++){ Column c1 = new Column(); c1.setName((a+).getBytes()); c1.setValue(new byte [0]); c1.setTimestamp(System.nanoTime()); server.insert(key, cp, c1, ConsistencyLevel.ONE); } FilterDesc d = new FilterDesc(); d.setSpec(GROOVY_CLASS_LOADER); d.setName(limit3); d.setCode(import org.apache.cassandra.dht.* \n + import org.apache.cassandra.thrift.* \n + public class Limit3 implements SFilter { \n + public FilterReturn filter(ColumnOrSuperColumn col, ListColumnOrSuperColumn filtered) {\n+ filtered.add(col);\n+ return filtered.size() 3 ? FilterReturn.FILTER_MORE : FilterReturn.FILTER_DONE;\n+ } \n + }\n); server.create_filter(d); ScannerResult res = server.create_scanner(Standard1, limit3, key, ByteBuffer.wrap(a.getBytes())); Assert.assertEquals(3, res.results.size()); } {code} I am going to be working on this code over the next few weeks but I wanted to get the concept our early so the design can see some criticism. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6704) Create wide row scanners
[ https://issues.apache.org/jira/browse/CASSANDRA-6704?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903436#comment-13903436 ] Edward Capriolo commented on CASSANDRA-6704: the implementation looks like this. {code} @Override public void create_filter(FilterDesc desc) throws InvalidRequestException, UnavailableException, TimedOutException, TException { ClientState cState = state(); NitDesc.NitSpec spec = NitSpec.valueOf(desc.spec); if (!DatabaseDescriptor.getDynamicLoading().contains(spec)){ throw new InvalidRequestException(spec + is not in allowed list + DatabaseDescriptor.getDynamicLoading()); } {code} Create wide row scanners Key: CASSANDRA-6704 URL: https://issues.apache.org/jira/browse/CASSANDRA-6704 Project: Cassandra Issue Type: New Feature Reporter: Edward Capriolo Assignee: Edward Capriolo The BigTable white paper demonstrates the use of scanners to iterate over rows and columns. http://static.googleusercontent.com/media/research.google.com/en/us/archive/bigtable-osdi06.pdf Because Cassandra does not have a primary sorting on row keys scanning over ranges of row keys is less useful. However we can use the scanner concept to operate on wide rows. For example many times a user wishes to do some custom processing inside a row and does not wish to carry the data across the network to do this processing. I have already implemented thrift methods to compile dynamic groovy code into Filters as well as some code that uses a Filter to page through and process data on the server side. https://github.com/edwardcapriolo/cassandra/compare/apache:trunk...trunk The following is a working code snippet. {code} @Test public void test_scanner() throws Exception { ColumnParent cp = new ColumnParent(); cp.setColumn_family(Standard1); ByteBuffer key = ByteBuffer.wrap(rscannerkey.getBytes()); for (char a='a'; a 'g'; a++){ Column c1 = new Column(); c1.setName((a+).getBytes()); c1.setValue(new byte [0]); c1.setTimestamp(System.nanoTime()); server.insert(key, cp, c1, ConsistencyLevel.ONE); } FilterDesc d = new FilterDesc(); d.setSpec(GROOVY_CLASS_LOADER); d.setName(limit3); d.setCode(import org.apache.cassandra.dht.* \n + import org.apache.cassandra.thrift.* \n + public class Limit3 implements SFilter { \n + public FilterReturn filter(ColumnOrSuperColumn col, ListColumnOrSuperColumn filtered) {\n+ filtered.add(col);\n+ return filtered.size() 3 ? FilterReturn.FILTER_MORE : FilterReturn.FILTER_DONE;\n+ } \n + }\n); server.create_filter(d); ScannerResult res = server.create_scanner(Standard1, limit3, key, ByteBuffer.wrap(a.getBytes())); Assert.assertEquals(3, res.results.size()); } {code} I am going to be working on this code over the next few weeks but I wanted to get the concept our early so the design can see some criticism. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6285) LCS compaction failing with Exception
[ https://issues.apache.org/jira/browse/CASSANDRA-6285?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903449#comment-13903449 ] Nikolai Grigoriev commented on CASSANDRA-6285: -- I have started seeing these too. Surprisingly...after adding OpsCenter CE to my cluster. I do not see these associated with my own data. {code} java.lang.RuntimeException: Last written key DecoratedKey(3542937286762954312, 31302e332e34352e3135382d676574466c757368657350656e64696e67) = current key DecoratedKey(-2152912038130700738, 31302e332e34352e3135362d77696e7465726d7574655f6a6d657465722d776d5f6170706c69636174696f6e732d676574526563656e744 26c6f6f6d46) writing into /hadoop/disk1/cassandra/data/OpsCenter/rollups300/OpsCenter-rollups300-tmp-jb-5055-Data.db at org.apache.cassandra.io.sstable.SSTableWriter.beforeAppend(SSTableWriter.java:142) at org.apache.cassandra.io.sstable.SSTableWriter.append(SSTableWriter.java:165) at org.apache.cassandra.db.compaction.CompactionTask.runWith(CompactionTask.java:160) at org.apache.cassandra.io.util.DiskAwareRunnable.runMayThrow(DiskAwareRunnable.java:48) at org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) at org.apache.cassandra.db.compaction.CompactionTask.executeInternal(CompactionTask.java:60) at org.apache.cassandra.db.compaction.AbstractCompactionTask.execute(AbstractCompactionTask.java:59) at org.apache.cassandra.db.compaction.CompactionManager$BackgroundCompactionTask.run(CompactionManager.java:197) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334) at java.util.concurrent.FutureTask.run(FutureTask.java:166) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:724) {code} LCS compaction failing with Exception - Key: CASSANDRA-6285 URL: https://issues.apache.org/jira/browse/CASSANDRA-6285 Project: Cassandra Issue Type: Bug Components: Core Environment: 4 nodes, shortly updated from 1.2.11 to 2.0.2 Reporter: David Sauer Assignee: Tyler Hobbs Fix For: 2.0.6 Attachments: compaction_test.py After altering everything to LCS the table OpsCenter.rollups60 amd one other none OpsCenter-Table got stuck with everything hanging around in L0. The compaction started and ran until the logs showed this: ERROR [CompactionExecutor:111] 2013-11-01 19:14:53,865 CassandraDaemon.java (line 187) Exception in thread Thread[CompactionExecutor:111,1,RMI Runtime] java.lang.RuntimeException: Last written key DecoratedKey(1326283851463420237, 37382e34362e3132382e3139382d6a7576616c69735f6e6f72785f696e6465785f323031335f31305f30382d63616368655f646f63756d656e74736c6f6f6b75702d676574426c6f6f6d46696c746572537061636555736564) = current key DecoratedKey(954210699457429663, 37382e34362e3132382e3139382d6a7576616c69735f6e6f72785f696e6465785f323031335f31305f30382d63616368655f646f63756d656e74736c6f6f6b75702d676574546f74616c4469736b5370616365557365640b0f) writing into /var/lib/cassandra/data/OpsCenter/rollups60/OpsCenter-rollups60-tmp-jb-58656-Data.db at org.apache.cassandra.io.sstable.SSTableWriter.beforeAppend(SSTableWriter.java:141) at org.apache.cassandra.io.sstable.SSTableWriter.append(SSTableWriter.java:164) at org.apache.cassandra.db.compaction.CompactionTask.runWith(CompactionTask.java:160) at org.apache.cassandra.io.util.DiskAwareRunnable.runMayThrow(DiskAwareRunnable.java:48) at org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) at org.apache.cassandra.db.compaction.CompactionTask.executeInternal(CompactionTask.java:60) at org.apache.cassandra.db.compaction.AbstractCompactionTask.execute(AbstractCompactionTask.java:59) at org.apache.cassandra.db.compaction.CompactionManager$6.runMayThrow(CompactionManager.java:296) at org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) at java.util.concurrent.FutureTask.run(FutureTask.java:262) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:724) Moving back to STC worked to keep the compactions running. Especialy my own Table i would like to move to LCS. After a major compaction with STC the move to LCS fails with the same Exception.
[jira] [Commented] (CASSANDRA-6283) Windows 7 data files keept open / can't be deleted after compaction.
[ https://issues.apache.org/jira/browse/CASSANDRA-6283?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903474#comment-13903474 ] Andreas Schnitzerling commented on CASSANDRA-6283: -- Actually Im still running 2.0.5 Snapshot with my finalizer patch. During normal operation no fault, but I didn't check the logs in the last 2 weeks yet. The repair jobs I start with -par option. I will update to 2.0.5-rel with my patch and test repair w/o -par option and compare the logs to yours. Correct my plan, if necessary. Windows 7 data files keept open / can't be deleted after compaction. Key: CASSANDRA-6283 URL: https://issues.apache.org/jira/browse/CASSANDRA-6283 Project: Cassandra Issue Type: Bug Components: Core Environment: Windows 7 (32) / Java 1.7.0.45 Reporter: Andreas Schnitzerling Assignee: Joshua McKenzie Labels: compaction Fix For: 2.0.6 Attachments: leakdetect.patch, screenshot-1.jpg, system.log Files cannot be deleted, patch CASSANDRA-5383 (Win7 deleting problem) doesn't help on Win-7 on Cassandra 2.0.2. Even 2.1 Snapshot is not running. The cause is: Opened file handles seem to be lost and not closed properly. Win 7 blames, that another process is still using the file (but its obviously cassandra). Only restart of the server makes the files deleted. But after heavy using (changes) of tables, there are about 24K files in the data folder (instead of 35 after every restart) and Cassandra crashes. I experiminted and I found out, that a finalizer fixes the problem. So after GC the files will be deleted (not optimal, but working fine). It runs now 2 days continously without problem. Possible fix/test: I wrote the following finalizer at the end of class org.apache.cassandra.io.util.RandomAccessReader: {code:title=RandomAccessReader.java|borderStyle=solid} @Override protected void finalize() throws Throwable { deallocate(); super.finalize(); } {code} Can somebody test / develop / patch it? Thx. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6715) nodetool Cfhistograms doesn't see native protocole queries
[ https://issues.apache.org/jira/browse/CASSANDRA-6715?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903509#comment-13903509 ] sankalp kohli commented on CASSANDRA-6715: -- just FYI. I tried this on latest trunk and it works. I started a one node new cluster and inserted and read a record from cqlsh. And I could see data in cfhistogram. nodetool Cfhistograms doesn't see native protocole queries -- Key: CASSANDRA-6715 URL: https://issues.apache.org/jira/browse/CASSANDRA-6715 Project: Cassandra Issue Type: Bug Components: Tools Environment: Mac os X Reporter: julien campan Hi, When I successfully perform some operations in a table using the native protocol (read or write), these operations become invisible on cfhistograms (All the information is 0). If I'm using thrift, then operations are visible on cfhistograms. Thank four your time -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6572) Workload recording / playback
[ https://issues.apache.org/jira/browse/CASSANDRA-6572?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903514#comment-13903514 ] Lyuben Todorov commented on CASSANDRA-6572: --- Just an update with progress so far, [branch here|https://github.com/lyubent/cassandra/tree/query_rec]. Currently query recording is enabled using JMX via StorageService#enableQueryRecording. This will record every nth query and append it to the QueryLog. Currently the log is piggybacking on the commit_log directory but the long-term plan is to either add a setting to cassandra.yaml or modify {{enableQueryRecording}} to take another param that will be the directory for the log. Queries are stored in an append only log as suggested, once the log reaches 4MB (this should also be configurable, I'm thinking set a default and add a JMX function that can overload said default). Once the limit is reached, the log is renamed (a timestamp is added to the name) and a new log is created that will now store the new appends. As for the replaying, its still fairly basic, currently the entire query log file is read in a single operation (smarter approaches than storing a large collection of query strings are welcome), then the collection of queries is replayed sequentially. If there is too large a gap between queries, a timeout takes effect to avoid stalling the replay (again this should be configurable, currently the timeout is 10s). Replaying of the workload is invoked via JMX right now, but *workload replayer* tool will be in charge of handling the replaying, and also what cluster the logs get replayed to. Workload recording / playback - Key: CASSANDRA-6572 URL: https://issues.apache.org/jira/browse/CASSANDRA-6572 Project: Cassandra Issue Type: New Feature Components: Core, Tools Reporter: Jonathan Ellis Assignee: Lyuben Todorov Fix For: 2.0.6 Write sample mode gets us part way to testing new versions against a real world workload, but we need an easy way to test the query side as well. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6715) nodetool Cfhistograms doesn't see native protocole queries
[ https://issues.apache.org/jira/browse/CASSANDRA-6715?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903522#comment-13903522 ] Brandon Williams commented on CASSANDRA-6715: - cqlsh still goes over thrift, not the native proto. nodetool Cfhistograms doesn't see native protocole queries -- Key: CASSANDRA-6715 URL: https://issues.apache.org/jira/browse/CASSANDRA-6715 Project: Cassandra Issue Type: Bug Components: Tools Environment: Mac os X Reporter: julien campan Hi, When I successfully perform some operations in a table using the native protocol (read or write), these operations become invisible on cfhistograms (All the information is 0). If I'm using thrift, then operations are visible on cfhistograms. Thank four your time -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6715) nodetool Cfhistograms doesn't see native protocole queries
[ https://issues.apache.org/jira/browse/CASSANDRA-6715?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903526#comment-13903526 ] Jonathan Ellis commented on CASSANDRA-6715: --- Still, cfhistograms is not managed by CassandraDaemon which is the only Thrift-specific part. nodetool Cfhistograms doesn't see native protocole queries -- Key: CASSANDRA-6715 URL: https://issues.apache.org/jira/browse/CASSANDRA-6715 Project: Cassandra Issue Type: Bug Components: Tools Environment: Mac os X Reporter: julien campan Hi, When I successfully perform some operations in a table using the native protocol (read or write), these operations become invisible on cfhistograms (All the information is 0). If I'm using thrift, then operations are visible on cfhistograms. Thank four your time -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-3991) Investigate importance of jsvc in debian packages
[ https://issues.apache.org/jira/browse/CASSANDRA-3991?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903539#comment-13903539 ] Cyril Scetbon commented on CASSANDRA-3991: -- you should remove JVSC from init file as it's not used anymore : {code} [cyril:~/src/git/cassandra] trunk(110)+ 2 ± grep JSVC debian/init JSVC=/usr/bin/jsvc {code} Investigate importance of jsvc in debian packages - Key: CASSANDRA-3991 URL: https://issues.apache.org/jira/browse/CASSANDRA-3991 Project: Cassandra Issue Type: Improvement Components: Core Reporter: Brandon Williams Assignee: Eric Evans Priority: Minor Fix For: 2.0.1 Attachments: 0001-CASSANDRA-3991-refactor-init-script-to-use-start-sto.patch jsvc seems to be buggy at best. For instance, if you set a small heap like 128M it seems to completely ignore this and use as much memory as it wants. I don't know what this is buying us over launching /usr/bin/cassandra directly like the redhat scripts do, but I've seen multiple complaints about its memory usage. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6561) Static columns in CQL3
[ https://issues.apache.org/jira/browse/CASSANDRA-6561?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903542#comment-13903542 ] Nicolas Favre-Felix commented on CASSANDRA-6561: Thanks Sylvain for the new patches, it looks great. bq. This query should throw IRE, just like DELETEs do now. I think this is a problem since there is no way to retrieve both a CQL row and the partition's static columns in a single SELECT. This is an issue since partition-level isolation guarantees that you don't see partial updates within a partition; if the whole point of static columns is to have a consistent view of both clustered and unclustered data within a partition, not being able to fetch both in a single operation makes this isolation property useless. Keeping the example of bills that have or haven't been paid, here's a table definition: {code} CREATE TABLE bills ( user text, balance bigint static, expense_id bigint, amount bigint, item text, paid boolean, PRIMARY KEY (user, expense_id) ); CREATE INDEX unpaid ON bills (paid); {code} Let's create 2 expenses for a single user, with CAS updates: {code} BEGIN BATCH INSERT INTO bills (user, expense_id, amount, item, paid) values ('user1', 1000, 8, 'burrito', false); INSERT INTO bills (user, balance) VALUES ('user1', -8) IF NOT EXISTS; APPLY BATCH; BEGIN BATCH INSERT INTO bills (user, expense_id, amount, item, paid) values ('user1', 2000, 200, 'hotel room', false); UPDATE bills SET balance = -208 WHERE user='user1' IF balance = -8; APPLY BATCH; {code} They are both present: {code} SELECT * FROM bills WHERE user='user1'; user | expense_id | balance | amount | item | paid ---++-+++--- user1 | 1000 |-208 | 8 |burrito | False user1 | 2000 |-208 |200 | hotel room | False (2 rows) {code} The great thing about using a single partition that's updated with CAS is that all queries that read the full partition will always see a consistent view of the data, and respect our invariants – in our case, that the sum of the amount for all unpaid bills + the balance is equal to zero. We can pay bills using CAS too – let's pay for the burrito: {code} BEGIN BATCH UPDATE bills SET paid=true WHERE user='user1' AND expense_id=1000; UPDATE bills SET balance=-200 WHERE user='user1' IF balance=-208; APPLY BATCH; {code} This works, of course, and any client that retrieve the full partition would either see balance=-208 and all bills unpaid, or balance=-200 and one bill paid, but never anything else. If we don't return the balance with the bill in a single SELECT, we lose the isolation property and the query for the balance could be out of date with the query for the bills themselves (SELECT * FROM bills WHERE user='user1' AND paid=false\;) I'd argue that it is also confusing for users to see CQL rows with static columns filled in when they select the full partition but can't access the same data if they give the full PK coordinates of that row: you'd expect the second query to select a subset of the data extracted by the first. Static columns in CQL3 -- Key: CASSANDRA-6561 URL: https://issues.apache.org/jira/browse/CASSANDRA-6561 Project: Cassandra Issue Type: New Feature Reporter: Sylvain Lebresne Assignee: Sylvain Lebresne Fix For: 2.0.6 I'd like to suggest the following idea for adding static columns to CQL3. I'll note that the basic idea has been suggested by jhalliday on irc but the rest of the details are mine and I should be blamed for anything stupid in what follows. Let me start with a rational: there is 2 main family of CF that have been historically used in Thrift: static ones and dynamic ones. CQL3 handles both family through the presence or not of clustering columns. There is however some cases where mixing both behavior has its use. I like to think of those use cases as 3 broad category: # to denormalize small amounts of not-entirely-static data in otherwise static entities. It's say tags for a product or custom properties in a user profile. This is why we've added CQL3 collections. Importantly, this is the *only* use case for which collections are meant (which doesn't diminishes their usefulness imo, and I wouldn't disagree that we've maybe not communicated this too well). # to optimize fetching both a static entity and related dynamic ones. Say you have blog posts, and each post has associated comments (chronologically ordered). *And* say that a very common query is fetch a post and its 50 last comments. In that case, it *might* be beneficial to store a blog post (static entity) in the same underlying CF than it's comments for performance reason. So that
[jira] [Commented] (CASSANDRA-6561) Static columns in CQL3
[ https://issues.apache.org/jira/browse/CASSANDRA-6561?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903562#comment-13903562 ] Aleksey Yeschenko commented on CASSANDRA-6561: -- [~nff] that's not the point of that change. The idea was for this to return the row and the static value, as expected: {code} cqlsh:ks1 SELECT * FROM bills WHERE user='user1' and expense_id = 1000; user | expense_id | balance | amount | item| paid ---++-++-+-- user1 | 1000 |-200 | 8 | burrito | True (1 rows) {code} But for this throw an IRE, to match UPDATE/DELETE behavior: {code} cqlsh:ks1 SELECT balance FROM bills WHERE user='user1' and expense_id = 1000; Bad Request: Cannot restrict clustering columns when selecting only static columns {code} And https://github.com/pcmanus/cassandra/commit/67b4f976b57399bcbb94b3b03eaaf71842b6e843 does exactly this. Your issue looks entirely unrelated to me (it's about static columns and 2i queries), but yeah, it's still an issue to be addressed in the next iteration. [~slebresne] Looks almost LGTM. Other than the Nicolas' issue with static columns and 2i, I have one more potential issue. While the conditions validation now works properly, I'm on the fence whether or not we should perform the same validation on the cells, too. Think {code} begin batch update foo set z = 13 where x = 'a' and y = 1 if z = 23; update foo set z = 12 where x = 'a' and y = 1 if z = 23; apply batch {code} z = 13 and z = 12 updates are in conflict here, but the batch itself will be applied, with z = 13 cell winning the reconcile. While this is what we do for regular batches, I'm *not sure* if we should be doing the same for CAS batches, and not validate update conflicts the same way we validate the conditions. Static columns in CQL3 -- Key: CASSANDRA-6561 URL: https://issues.apache.org/jira/browse/CASSANDRA-6561 Project: Cassandra Issue Type: New Feature Reporter: Sylvain Lebresne Assignee: Sylvain Lebresne Fix For: 2.0.6 I'd like to suggest the following idea for adding static columns to CQL3. I'll note that the basic idea has been suggested by jhalliday on irc but the rest of the details are mine and I should be blamed for anything stupid in what follows. Let me start with a rational: there is 2 main family of CF that have been historically used in Thrift: static ones and dynamic ones. CQL3 handles both family through the presence or not of clustering columns. There is however some cases where mixing both behavior has its use. I like to think of those use cases as 3 broad category: # to denormalize small amounts of not-entirely-static data in otherwise static entities. It's say tags for a product or custom properties in a user profile. This is why we've added CQL3 collections. Importantly, this is the *only* use case for which collections are meant (which doesn't diminishes their usefulness imo, and I wouldn't disagree that we've maybe not communicated this too well). # to optimize fetching both a static entity and related dynamic ones. Say you have blog posts, and each post has associated comments (chronologically ordered). *And* say that a very common query is fetch a post and its 50 last comments. In that case, it *might* be beneficial to store a blog post (static entity) in the same underlying CF than it's comments for performance reason. So that fetch a post and it's 50 last comments is just one slice internally. # you want to CAS rows of a dynamic partition based on some partition condition. This is the same use case than why CASSANDRA-5633 exists for. As said above, 1) is already covered by collections, but 2) and 3) are not (and I strongly believe collections are not the right fit, API wise, for those). Also, note that I don't want to underestimate the usefulness of 2). In most cases, using a separate table for the blog posts and the comments is The Right Solution, and trying to do 2) is premature optimisation. Yet, when used properly, that kind of optimisation can make a difference, so I think having a relatively native solution for it in CQL3 could make sense. Regarding 3), though CASSANDRA-5633 would provide one solution for it, I have the feeling that static columns actually are a more natural approach (in term of API). That's arguably more of a personal opinion/feeling though. So long story short, CQL3 lacks a way to mix both some static and dynamic rows in the same partition of the same CQL3 table, and I think such a tool could have it's use. The proposal is thus to allow static columns. Static columns would only make sense in table with clustering columns (the dynamic ones). A static column value would be static to the partition (all
[jira] [Commented] (CASSANDRA-6561) Static columns in CQL3
[ https://issues.apache.org/jira/browse/CASSANDRA-6561?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903573#comment-13903573 ] Aleksey Yeschenko commented on CASSANDRA-6561: -- That, and some nits: - ColumnCondition could use some bling (http://youtu.be/lWA2pjMjpBs) - CQL3CasConditions.ColumnsConditions doesn't need cfm. Also, one more diamond missing in CQL3CasConditions constructor. Static columns in CQL3 -- Key: CASSANDRA-6561 URL: https://issues.apache.org/jira/browse/CASSANDRA-6561 Project: Cassandra Issue Type: New Feature Reporter: Sylvain Lebresne Assignee: Sylvain Lebresne Fix For: 2.0.6 I'd like to suggest the following idea for adding static columns to CQL3. I'll note that the basic idea has been suggested by jhalliday on irc but the rest of the details are mine and I should be blamed for anything stupid in what follows. Let me start with a rational: there is 2 main family of CF that have been historically used in Thrift: static ones and dynamic ones. CQL3 handles both family through the presence or not of clustering columns. There is however some cases where mixing both behavior has its use. I like to think of those use cases as 3 broad category: # to denormalize small amounts of not-entirely-static data in otherwise static entities. It's say tags for a product or custom properties in a user profile. This is why we've added CQL3 collections. Importantly, this is the *only* use case for which collections are meant (which doesn't diminishes their usefulness imo, and I wouldn't disagree that we've maybe not communicated this too well). # to optimize fetching both a static entity and related dynamic ones. Say you have blog posts, and each post has associated comments (chronologically ordered). *And* say that a very common query is fetch a post and its 50 last comments. In that case, it *might* be beneficial to store a blog post (static entity) in the same underlying CF than it's comments for performance reason. So that fetch a post and it's 50 last comments is just one slice internally. # you want to CAS rows of a dynamic partition based on some partition condition. This is the same use case than why CASSANDRA-5633 exists for. As said above, 1) is already covered by collections, but 2) and 3) are not (and I strongly believe collections are not the right fit, API wise, for those). Also, note that I don't want to underestimate the usefulness of 2). In most cases, using a separate table for the blog posts and the comments is The Right Solution, and trying to do 2) is premature optimisation. Yet, when used properly, that kind of optimisation can make a difference, so I think having a relatively native solution for it in CQL3 could make sense. Regarding 3), though CASSANDRA-5633 would provide one solution for it, I have the feeling that static columns actually are a more natural approach (in term of API). That's arguably more of a personal opinion/feeling though. So long story short, CQL3 lacks a way to mix both some static and dynamic rows in the same partition of the same CQL3 table, and I think such a tool could have it's use. The proposal is thus to allow static columns. Static columns would only make sense in table with clustering columns (the dynamic ones). A static column value would be static to the partition (all rows of the partition would share the value for such column). The syntax would just be: {noformat} CREATE TABLE t ( k text, s text static, i int, v text, PRIMARY KEY (k, i) ) {noformat} then you'd get: {noformat} INSERT INTO t(k, s, i, v) VALUES (k0, I'm shared, 0, foo); INSERT INTO t(k, s, i, v) VALUES (k0, I'm still shared, 1, bar); SELECT * FROM t; k | s | i |v k0 | I'm still shared | 0 | bar k0 | I'm still shared | 1 | foo {noformat} There would be a few semantic details to decide on regarding deletions, ttl, etc. but let's see if we agree it's a good idea first before ironing those out. One last point is the implementation. Though I do think this idea has merits, it's definitively not useful enough to justify rewriting the storage engine for it. But I think we can support this relatively easily (emphasis on relatively :)), which is probably the main reason why I like the approach. Namely, internally, we can store static columns as cells whose clustering column values are empty. So in terms of cells, the partition of my example would look like: {noformat} k0 : [ (:s - I'm still shared), // the static column (0: - ) // row marker (0:v - bar) (1: - ) // row marker (1:v - foo) ] {noformat} Of course, using empty values for the
[2/7] git commit: remove deb init cruft
remove deb init cruft Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/972cffd5 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/972cffd5 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/972cffd5 Branch: refs/heads/cassandra-2.1 Commit: 972cffd540c9cbe895452f4ad47492315420c1a5 Parents: 4b50b2b Author: Brandon Williams brandonwilli...@apache.org Authored: Mon Feb 17 17:08:15 2014 -0600 Committer: Brandon Williams brandonwilli...@apache.org Committed: Mon Feb 17 17:08:15 2014 -0600 -- debian/init | 1 - 1 file changed, 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/972cffd5/debian/init -- diff --git a/debian/init b/debian/init index d132441..85d363a 100644 --- a/debian/init +++ b/debian/init @@ -19,7 +19,6 @@ NAME=cassandra PIDFILE=/var/run/$NAME/$NAME.pid SCRIPTNAME=/etc/init.d/$NAME CONFDIR=/etc/cassandra -JSVC=/usr/bin/jsvc WAIT_FOR_START=10 CASSANDRA_HOME=/usr/share/cassandra FD_LIMIT=10
[4/7] git commit: Merge branch 'cassandra-2.0' into cassandra-2.1
Merge branch 'cassandra-2.0' into cassandra-2.1 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/2777e1e5 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/2777e1e5 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/2777e1e5 Branch: refs/heads/trunk Commit: 2777e1e5de3675f7b3ab7c1a20b53b0f18b4bf53 Parents: c9a4bff 972cffd Author: Brandon Williams brandonwilli...@apache.org Authored: Mon Feb 17 17:08:28 2014 -0600 Committer: Brandon Williams brandonwilli...@apache.org Committed: Mon Feb 17 17:08:28 2014 -0600 -- debian/init | 1 - 1 file changed, 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/2777e1e5/debian/init --
[1/7] git commit: remove deb init cruft
Repository: cassandra Updated Branches: refs/heads/cassandra-2.0 4b50b2b2e - 972cffd54 refs/heads/cassandra-2.1 c9a4bffee - 2777e1e5d refs/heads/trunk c9a4bffee - dc1dad328 remove deb init cruft Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/972cffd5 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/972cffd5 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/972cffd5 Branch: refs/heads/cassandra-2.0 Commit: 972cffd540c9cbe895452f4ad47492315420c1a5 Parents: 4b50b2b Author: Brandon Williams brandonwilli...@apache.org Authored: Mon Feb 17 17:08:15 2014 -0600 Committer: Brandon Williams brandonwilli...@apache.org Committed: Mon Feb 17 17:08:15 2014 -0600 -- debian/init | 1 - 1 file changed, 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/972cffd5/debian/init -- diff --git a/debian/init b/debian/init index d132441..85d363a 100644 --- a/debian/init +++ b/debian/init @@ -19,7 +19,6 @@ NAME=cassandra PIDFILE=/var/run/$NAME/$NAME.pid SCRIPTNAME=/etc/init.d/$NAME CONFDIR=/etc/cassandra -JSVC=/usr/bin/jsvc WAIT_FOR_START=10 CASSANDRA_HOME=/usr/share/cassandra FD_LIMIT=10
[3/7] git commit: remove deb init cruft
remove deb init cruft Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/972cffd5 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/972cffd5 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/972cffd5 Branch: refs/heads/trunk Commit: 972cffd540c9cbe895452f4ad47492315420c1a5 Parents: 4b50b2b Author: Brandon Williams brandonwilli...@apache.org Authored: Mon Feb 17 17:08:15 2014 -0600 Committer: Brandon Williams brandonwilli...@apache.org Committed: Mon Feb 17 17:08:15 2014 -0600 -- debian/init | 1 - 1 file changed, 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/972cffd5/debian/init -- diff --git a/debian/init b/debian/init index d132441..85d363a 100644 --- a/debian/init +++ b/debian/init @@ -19,7 +19,6 @@ NAME=cassandra PIDFILE=/var/run/$NAME/$NAME.pid SCRIPTNAME=/etc/init.d/$NAME CONFDIR=/etc/cassandra -JSVC=/usr/bin/jsvc WAIT_FOR_START=10 CASSANDRA_HOME=/usr/share/cassandra FD_LIMIT=10
[5/7] git commit: Merge branch 'cassandra-2.0' into cassandra-2.1
Merge branch 'cassandra-2.0' into cassandra-2.1 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/2777e1e5 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/2777e1e5 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/2777e1e5 Branch: refs/heads/cassandra-2.1 Commit: 2777e1e5de3675f7b3ab7c1a20b53b0f18b4bf53 Parents: c9a4bff 972cffd Author: Brandon Williams brandonwilli...@apache.org Authored: Mon Feb 17 17:08:28 2014 -0600 Committer: Brandon Williams brandonwilli...@apache.org Committed: Mon Feb 17 17:08:28 2014 -0600 -- debian/init | 1 - 1 file changed, 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/2777e1e5/debian/init --
[6/7] git commit: Merge branch 'cassandra-2.0' into trunk
Merge branch 'cassandra-2.0' into trunk Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/c9798680 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/c9798680 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/c9798680 Branch: refs/heads/trunk Commit: c979868011e071cd56874761d0df023390d3d9ac Parents: c9a4bff 972cffd Author: Brandon Williams brandonwilli...@apache.org Authored: Mon Feb 17 17:08:45 2014 -0600 Committer: Brandon Williams brandonwilli...@apache.org Committed: Mon Feb 17 17:08:45 2014 -0600 -- debian/init | 1 - 1 file changed, 1 deletion(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/c9798680/debian/init --
[7/7] git commit: Merge branch 'cassandra-2.1' into trunk
Merge branch 'cassandra-2.1' into trunk Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/dc1dad32 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/dc1dad32 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/dc1dad32 Branch: refs/heads/trunk Commit: dc1dad32871ee41e3df5e132439429da0d918424 Parents: c979868 2777e1e Author: Brandon Williams brandonwilli...@apache.org Authored: Mon Feb 17 17:09:12 2014 -0600 Committer: Brandon Williams brandonwilli...@apache.org Committed: Mon Feb 17 17:09:12 2014 -0600 -- --
[jira] [Commented] (CASSANDRA-3991) Investigate importance of jsvc in debian packages
[ https://issues.apache.org/jira/browse/CASSANDRA-3991?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903595#comment-13903595 ] Brandon Williams commented on CASSANDRA-3991: - done. Investigate importance of jsvc in debian packages - Key: CASSANDRA-3991 URL: https://issues.apache.org/jira/browse/CASSANDRA-3991 Project: Cassandra Issue Type: Improvement Components: Core Reporter: Brandon Williams Assignee: Eric Evans Priority: Minor Fix For: 2.0.1 Attachments: 0001-CASSANDRA-3991-refactor-init-script-to-use-start-sto.patch jsvc seems to be buggy at best. For instance, if you set a small heap like 128M it seems to completely ignore this and use as much memory as it wants. I don't know what this is buying us over launching /usr/bin/cassandra directly like the redhat scripts do, but I've seen multiple complaints about its memory usage. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6283) Windows 7 data files keept open / can't be deleted after compaction.
[ https://issues.apache.org/jira/browse/CASSANDRA-6283?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903609#comment-13903609 ] Joshua McKenzie commented on CASSANDRA-6283: With -par on the test. Without -par I'm seeing the same errors you were seeing above indicating that snapshots have open handles: {code:title=Bar.java|borderStyle=solid} ERROR [ValidationExecutor:3] 2014-02-17 17:52:57,092 Validator.java (line 242) Failed creating a merkle tree for [repair #99973cf0-982e-11e3-9370-639bcb1c8d6c on Keyspace1/Standard1, (-390084131511610885,-345083722760460 251]], /10.193.84.101 (see log for details) ERROR [ValidationExecutor:3] 2014-02-17 17:52:57,092 CassandraDaemon.java (line 192) Exception in thread Thread[ValidationExecutor:3,1,main] FSWriteError in \var\lib\cassandra\data\Keyspace1\Standard1\snapshots\99973cf0-982e-11e3-9370-639bcb1c8d6c\Keyspace1-Standard1-jb-37-Data.db at org.apache.cassandra.io.util.FileUtils.deleteWithConfirm(FileUtils.java:120) at org.apache.cassandra.io.util.FileUtils.deleteRecursive(FileUtils.java:382) at org.apache.cassandra.io.util.FileUtils.deleteRecursive(FileUtils.java:378) at org.apache.cassandra.db.Directories.clearSnapshot(Directories.java:416) at org.apache.cassandra.db.ColumnFamilyStore.clearSnapshot(ColumnFamilyStore.java:1881) at org.apache.cassandra.db.compaction.CompactionManager.doValidationCompaction(CompactionManager.java:810) at org.apache.cassandra.db.compaction.CompactionManager.access$600(CompactionManager.java:62) at org.apache.cassandra.db.compaction.CompactionManager$8.call(CompactionManager.java:397) at java.util.concurrent.FutureTask.run(FutureTask.java:262) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:744) Caused by: java.nio.file.FileSystemException: \var\lib\cassandra\data\Keyspace1\Standard1\snapshots\99973cf0-982e-11e3-9370-639bcb1c8d6c\Keyspace1-Standard1-jb-37-Data.db: The process cannot access the file because it is being used by another process. {code} I want to confirm that repair w/-par on 2.0.5 doesn't give you much trouble, if any, and then tackle -par vs. non separately. Windows 7 data files keept open / can't be deleted after compaction. Key: CASSANDRA-6283 URL: https://issues.apache.org/jira/browse/CASSANDRA-6283 Project: Cassandra Issue Type: Bug Components: Core Environment: Windows 7 (32) / Java 1.7.0.45 Reporter: Andreas Schnitzerling Assignee: Joshua McKenzie Labels: compaction Fix For: 2.0.6 Attachments: leakdetect.patch, screenshot-1.jpg, system.log Files cannot be deleted, patch CASSANDRA-5383 (Win7 deleting problem) doesn't help on Win-7 on Cassandra 2.0.2. Even 2.1 Snapshot is not running. The cause is: Opened file handles seem to be lost and not closed properly. Win 7 blames, that another process is still using the file (but its obviously cassandra). Only restart of the server makes the files deleted. But after heavy using (changes) of tables, there are about 24K files in the data folder (instead of 35 after every restart) and Cassandra crashes. I experiminted and I found out, that a finalizer fixes the problem. So after GC the files will be deleted (not optimal, but working fine). It runs now 2 days continously without problem. Possible fix/test: I wrote the following finalizer at the end of class org.apache.cassandra.io.util.RandomAccessReader: {code:title=RandomAccessReader.java|borderStyle=solid} @Override protected void finalize() throws Throwable { deallocate(); super.finalize(); } {code} Can somebody test / develop / patch it? Thx. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Comment Edited] (CASSANDRA-6283) Windows 7 data files keept open / can't be deleted after compaction.
[ https://issues.apache.org/jira/browse/CASSANDRA-6283?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903609#comment-13903609 ] Joshua McKenzie edited comment on CASSANDRA-6283 at 2/17/14 11:59 PM: -- With -par on the test. Without -par I'm seeing the same errors you were seeing above indicating that snapshots have open handles: {code:title=Non-par|borderStyle=solid} ERROR [ValidationExecutor:3] 2014-02-17 17:52:57,092 Validator.java (line 242) Failed creating a merkle tree for [repair #99973cf0-982e-11e3-9370-639bcb1c8d6c on Keyspace1/Standard1, (-390084131511610885,-345083722760460 251]], /10.193.84.101 (see log for details) ERROR [ValidationExecutor:3] 2014-02-17 17:52:57,092 CassandraDaemon.java (line 192) Exception in thread Thread[ValidationExecutor:3,1,main] FSWriteError in \var\lib\cassandra\data\Keyspace1\Standard1\snapshots\99973cf0-982e-11e3-9370-639bcb1c8d6c\Keyspace1-Standard1-jb-37-Data.db at org.apache.cassandra.io.util.FileUtils.deleteWithConfirm(FileUtils.java:120) at org.apache.cassandra.io.util.FileUtils.deleteRecursive(FileUtils.java:382) at org.apache.cassandra.io.util.FileUtils.deleteRecursive(FileUtils.java:378) at org.apache.cassandra.db.Directories.clearSnapshot(Directories.java:416) at org.apache.cassandra.db.ColumnFamilyStore.clearSnapshot(ColumnFamilyStore.java:1881) at org.apache.cassandra.db.compaction.CompactionManager.doValidationCompaction(CompactionManager.java:810) at org.apache.cassandra.db.compaction.CompactionManager.access$600(CompactionManager.java:62) at org.apache.cassandra.db.compaction.CompactionManager$8.call(CompactionManager.java:397) at java.util.concurrent.FutureTask.run(FutureTask.java:262) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:744) Caused by: java.nio.file.FileSystemException: \var\lib\cassandra\data\Keyspace1\Standard1\snapshots\99973cf0-982e-11e3-9370-639bcb1c8d6c\Keyspace1-Standard1-jb-37-Data.db: The process cannot access the file because it is being used by another process. {code} I want to confirm that repair w/-par on 2.0.5 doesn't give you much trouble, if any, and then tackle -par vs. non separately. was (Author: joshuamckenzie): With -par on the test. Without -par I'm seeing the same errors you were seeing above indicating that snapshots have open handles: {code:title=Bar.java|borderStyle=solid} ERROR [ValidationExecutor:3] 2014-02-17 17:52:57,092 Validator.java (line 242) Failed creating a merkle tree for [repair #99973cf0-982e-11e3-9370-639bcb1c8d6c on Keyspace1/Standard1, (-390084131511610885,-345083722760460 251]], /10.193.84.101 (see log for details) ERROR [ValidationExecutor:3] 2014-02-17 17:52:57,092 CassandraDaemon.java (line 192) Exception in thread Thread[ValidationExecutor:3,1,main] FSWriteError in \var\lib\cassandra\data\Keyspace1\Standard1\snapshots\99973cf0-982e-11e3-9370-639bcb1c8d6c\Keyspace1-Standard1-jb-37-Data.db at org.apache.cassandra.io.util.FileUtils.deleteWithConfirm(FileUtils.java:120) at org.apache.cassandra.io.util.FileUtils.deleteRecursive(FileUtils.java:382) at org.apache.cassandra.io.util.FileUtils.deleteRecursive(FileUtils.java:378) at org.apache.cassandra.db.Directories.clearSnapshot(Directories.java:416) at org.apache.cassandra.db.ColumnFamilyStore.clearSnapshot(ColumnFamilyStore.java:1881) at org.apache.cassandra.db.compaction.CompactionManager.doValidationCompaction(CompactionManager.java:810) at org.apache.cassandra.db.compaction.CompactionManager.access$600(CompactionManager.java:62) at org.apache.cassandra.db.compaction.CompactionManager$8.call(CompactionManager.java:397) at java.util.concurrent.FutureTask.run(FutureTask.java:262) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:744) Caused by: java.nio.file.FileSystemException: \var\lib\cassandra\data\Keyspace1\Standard1\snapshots\99973cf0-982e-11e3-9370-639bcb1c8d6c\Keyspace1-Standard1-jb-37-Data.db: The process cannot access the file because it is being used by another process. {code} I want to confirm that repair w/-par on 2.0.5 doesn't give you much trouble, if any, and then tackle -par vs. non separately. Windows 7 data files keept open / can't be deleted after compaction. Key: CASSANDRA-6283 URL: https://issues.apache.org/jira/browse/CASSANDRA-6283 Project: Cassandra
[jira] [Resolved] (CASSANDRA-6715) nodetool Cfhistograms doesn't see native protocole queries
[ https://issues.apache.org/jira/browse/CASSANDRA-6715?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Brandon Williams resolved CASSANDRA-6715. - Resolution: Cannot Reproduce Worked for me with the native proto. nodetool Cfhistograms doesn't see native protocole queries -- Key: CASSANDRA-6715 URL: https://issues.apache.org/jira/browse/CASSANDRA-6715 Project: Cassandra Issue Type: Bug Components: Tools Environment: Mac os X Reporter: julien campan Hi, When I successfully perform some operations in a table using the native protocol (read or write), these operations become invisible on cfhistograms (All the information is 0). If I'm using thrift, then operations are visible on cfhistograms. Thank four your time -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Created] (CASSANDRA-6716) nodetool scrub constantly fails with RuntimeException (Tried to hard link to file that does not exist)
Nikolai Grigoriev created CASSANDRA-6716: Summary: nodetool scrub constantly fails with RuntimeException (Tried to hard link to file that does not exist) Key: CASSANDRA-6716 URL: https://issues.apache.org/jira/browse/CASSANDRA-6716 Project: Cassandra Issue Type: Bug Components: Core Environment: Cassandra 2.0.5 (built from source), Linux, 6 nodes, JDK 1.7 Reporter: Nikolai Grigoriev Attachments: system.log.gz It seems that since recently I have started getting a number of exceptions like File not found on all Cassandra nodes. Currently I am getting an exception like this every couple of seconds on each node, for different keyspaces and CFs. I have tried to restart the nodes, tried to scrub them. No luck so far. It seems that scrub cannot complete on any of these nodes, at some point it fails because of the file that it can't find. One one of the nodes currently the nodetool scrub command fails instantly and consistently with this exception: {code} # /opt/cassandra/bin/nodetool scrub Exception in thread main java.lang.RuntimeException: Tried to hard link to file that does not exist /mnt/disk5/cassandra/data/mykeyspace_jmeter/test_contacts/mykeyspace_jmeter-test_contacts-jb-28049-Data.db at org.apache.cassandra.io.util.FileUtils.createHardLink(FileUtils.java:75) at org.apache.cassandra.io.sstable.SSTableReader.createLinks(SSTableReader.java:1215) at org.apache.cassandra.db.ColumnFamilyStore.snapshotWithoutFlush(ColumnFamilyStore.java:1826) at org.apache.cassandra.db.ColumnFamilyStore.scrub(ColumnFamilyStore.java:1122) at org.apache.cassandra.service.StorageService.scrub(StorageService.java:2159) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.reflect.misc.Trampoline.invoke(MethodUtil.java:75) at sun.reflect.GeneratedMethodAccessor15.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.reflect.misc.MethodUtil.invoke(MethodUtil.java:279) at com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:112) at com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:46) at com.sun.jmx.mbeanserver.MBeanIntrospector.invokeM(MBeanIntrospector.java:237) at com.sun.jmx.mbeanserver.PerInterface.invoke(PerInterface.java:138) at com.sun.jmx.mbeanserver.MBeanSupport.invoke(MBeanSupport.java:252) at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.invoke(DefaultMBeanServerInterceptor.java:819) at com.sun.jmx.mbeanserver.JmxMBeanServer.invoke(JmxMBeanServer.java:801) at javax.management.remote.rmi.RMIConnectionImpl.doOperation(RMIConnectionImpl.java:1487) at javax.management.remote.rmi.RMIConnectionImpl.access$300(RMIConnectionImpl.java:97) at javax.management.remote.rmi.RMIConnectionImpl$PrivilegedOperation.run(RMIConnectionImpl.java:1328) at javax.management.remote.rmi.RMIConnectionImpl.doPrivilegedOperation(RMIConnectionImpl.java:1420) at javax.management.remote.rmi.RMIConnectionImpl.invoke(RMIConnectionImpl.java:848) at sun.reflect.GeneratedMethodAccessor38.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.rmi.server.UnicastServerRef.dispatch(UnicastServerRef.java:322) at sun.rmi.transport.Transport$1.run(Transport.java:177) at sun.rmi.transport.Transport$1.run(Transport.java:174) at java.security.AccessController.doPrivileged(Native Method) at sun.rmi.transport.Transport.serviceCall(Transport.java:173) at sun.rmi.transport.tcp.TCPTransport.handleMessages(TCPTransport.java:553) at sun.rmi.transport.tcp.TCPTransport$ConnectionHandler.run0(TCPTransport.java:808) at sun.rmi.transport.tcp.TCPTransport$ConnectionHandler.run(TCPTransport.java:667) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:724) {code} Also I have noticed that the files that are missing are often (or maybe always?) referred to in the log as follows: {quote} WARN 00:06:10,597 At level 3,
[jira] [Updated] (CASSANDRA-6716) nodetool scrub constantly fails with RuntimeException (Tried to hard link to file that does not exist)
[ https://issues.apache.org/jira/browse/CASSANDRA-6716?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Nikolai Grigoriev updated CASSANDRA-6716: - Attachment: system.log.gz log from one of the nodes nodetool scrub constantly fails with RuntimeException (Tried to hard link to file that does not exist) -- Key: CASSANDRA-6716 URL: https://issues.apache.org/jira/browse/CASSANDRA-6716 Project: Cassandra Issue Type: Bug Components: Core Environment: Cassandra 2.0.5 (built from source), Linux, 6 nodes, JDK 1.7 Reporter: Nikolai Grigoriev Attachments: system.log.gz It seems that since recently I have started getting a number of exceptions like File not found on all Cassandra nodes. Currently I am getting an exception like this every couple of seconds on each node, for different keyspaces and CFs. I have tried to restart the nodes, tried to scrub them. No luck so far. It seems that scrub cannot complete on any of these nodes, at some point it fails because of the file that it can't find. One one of the nodes currently the nodetool scrub command fails instantly and consistently with this exception: {code} # /opt/cassandra/bin/nodetool scrub Exception in thread main java.lang.RuntimeException: Tried to hard link to file that does not exist /mnt/disk5/cassandra/data/mykeyspace_jmeter/test_contacts/mykeyspace_jmeter-test_contacts-jb-28049-Data.db at org.apache.cassandra.io.util.FileUtils.createHardLink(FileUtils.java:75) at org.apache.cassandra.io.sstable.SSTableReader.createLinks(SSTableReader.java:1215) at org.apache.cassandra.db.ColumnFamilyStore.snapshotWithoutFlush(ColumnFamilyStore.java:1826) at org.apache.cassandra.db.ColumnFamilyStore.scrub(ColumnFamilyStore.java:1122) at org.apache.cassandra.service.StorageService.scrub(StorageService.java:2159) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.reflect.misc.Trampoline.invoke(MethodUtil.java:75) at sun.reflect.GeneratedMethodAccessor15.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.reflect.misc.MethodUtil.invoke(MethodUtil.java:279) at com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:112) at com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:46) at com.sun.jmx.mbeanserver.MBeanIntrospector.invokeM(MBeanIntrospector.java:237) at com.sun.jmx.mbeanserver.PerInterface.invoke(PerInterface.java:138) at com.sun.jmx.mbeanserver.MBeanSupport.invoke(MBeanSupport.java:252) at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.invoke(DefaultMBeanServerInterceptor.java:819) at com.sun.jmx.mbeanserver.JmxMBeanServer.invoke(JmxMBeanServer.java:801) at javax.management.remote.rmi.RMIConnectionImpl.doOperation(RMIConnectionImpl.java:1487) at javax.management.remote.rmi.RMIConnectionImpl.access$300(RMIConnectionImpl.java:97) at javax.management.remote.rmi.RMIConnectionImpl$PrivilegedOperation.run(RMIConnectionImpl.java:1328) at javax.management.remote.rmi.RMIConnectionImpl.doPrivilegedOperation(RMIConnectionImpl.java:1420) at javax.management.remote.rmi.RMIConnectionImpl.invoke(RMIConnectionImpl.java:848) at sun.reflect.GeneratedMethodAccessor38.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.rmi.server.UnicastServerRef.dispatch(UnicastServerRef.java:322) at sun.rmi.transport.Transport$1.run(Transport.java:177) at sun.rmi.transport.Transport$1.run(Transport.java:174) at java.security.AccessController.doPrivileged(Native Method) at sun.rmi.transport.Transport.serviceCall(Transport.java:173) at sun.rmi.transport.tcp.TCPTransport.handleMessages(TCPTransport.java:553) at sun.rmi.transport.tcp.TCPTransport$ConnectionHandler.run0(TCPTransport.java:808) at sun.rmi.transport.tcp.TCPTransport$ConnectionHandler.run(TCPTransport.java:667) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at
[jira] [Updated] (CASSANDRA-6440) Repair should allow repairing particular endpoints to reduce WAN usage.
[ https://issues.apache.org/jira/browse/CASSANDRA-6440?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] sankalp kohli updated CASSANDRA-6440: - Attachment: trunk_6440-v4.diff Rebased it with trunk. Repair should allow repairing particular endpoints to reduce WAN usage. Key: CASSANDRA-6440 URL: https://issues.apache.org/jira/browse/CASSANDRA-6440 Project: Cassandra Issue Type: New Feature Reporter: sankalp kohli Assignee: sankalp kohli Priority: Minor Attachments: 6440_repair.log, JIRA-6440-v2.diff, JIRA-6440-v3.diff, JIRA-6440.diff, trunk_6440-v4.diff The way we send out data that does not match over WAN can be improved. Example: Say there are four nodes(A,B,C,D) which are replica of a range we are repairing. A, B is in DC1 and C,D is in DC2. If A does not have the data which other replicas have, then we will have following streams 1) A to B and back 2) A to C and back(Goes over WAN) 3) A to D and back(Goes over WAN) One of the ways of doing it to reduce WAN traffic is this. 1) Repair A and B only with each other and C and D with each other starting at same time t. 2) Once these repairs have finished, A,B and C,D are in sync with respect to time t. 3) Now run a repair between A and C, the streams which are exchanged as a result of the diff will also be streamed to B and D via A and C(C and D behaves like a proxy to the streams). For a replication of DC1:2,DC2:2, the WAN traffic will get reduced by 50% and even more for higher replication factors. Another easy way to do this is to have repair command take nodes with which you want to repair with. Then we can do something like this. 1) Run repair between (A and B) and (C and D) 2) Run repair between (A and C) 3) Run repair between (A and B) and (C and D) But this will increase the traffic inside the DC as we wont be doing proxy. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6715) nodetool Cfhistograms doesn't see native protocole queries
[ https://issues.apache.org/jira/browse/CASSANDRA-6715?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903640#comment-13903640 ] sankalp kohli commented on CASSANDRA-6715: -- +1..worked for me as well using Datastax driver. nodetool Cfhistograms doesn't see native protocole queries -- Key: CASSANDRA-6715 URL: https://issues.apache.org/jira/browse/CASSANDRA-6715 Project: Cassandra Issue Type: Bug Components: Tools Environment: Mac os X Reporter: julien campan Hi, When I successfully perform some operations in a table using the native protocol (read or write), these operations become invisible on cfhistograms (All the information is 0). If I'm using thrift, then operations are visible on cfhistograms. Thank four your time -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6716) nodetool scrub constantly fails with RuntimeException (Tried to hard link to file that does not exist)
[ https://issues.apache.org/jira/browse/CASSANDRA-6716?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903654#comment-13903654 ] sankalp kohli commented on CASSANDRA-6716: -- I am seeing this CASSANDRA-6285 in your logs as well. CassandraDaemon.java (line 192) Exception in thread Thread[CompactionExecutor:25,1,main] java.lang.RuntimeException: Last written key DecoratedKey(4020520808752189597, 31302e332e34352e3136312d6765744e6f6e4865617055736564) = current key DecoratedKey(-2471509717181461453, 31302e332e34352e3135380b0f00010004706470730c00010c00010c00010b0001003931302e332e34352e3135382d77696e7465726d7574655f6a6d657465) writing into /mnt/disk2/cassandra/data/OpsCenter/rollups60/OpsCenter-rollups60-tmp-jb-11559-Data.db at org.apache.cassandra.io.sstable.SSTableWriter.beforeAppend(SSTableWriter.java:142) at org.apache.cassandra.io.sstable.SSTableWriter.append(SSTableWriter.java:165) at org.apache.cassandra.db.compaction.CompactionTask.runWith(CompactionTask.java:160) at org.apache.cassandra.io.util.DiskAwareRunnable.runMayThrow(DiskAwareRunnable.java:48) at org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) at org.apache.cassandra.db.compaction.CompactionTask.executeInternal(CompactionTask.java:60) at org.apache.cassandra.db.compaction.AbstractCompactionTask.execute(AbstractCompactionTask.java:59) at org.apache.cassandra.db.compaction.CompactionManager$BackgroundCompactionTask.run(CompactionManager.java:197) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334) at java.util.concurrent.FutureTask.run(FutureTask.java:166) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:724) nodetool scrub constantly fails with RuntimeException (Tried to hard link to file that does not exist) -- Key: CASSANDRA-6716 URL: https://issues.apache.org/jira/browse/CASSANDRA-6716 Project: Cassandra Issue Type: Bug Components: Core Environment: Cassandra 2.0.5 (built from source), Linux, 6 nodes, JDK 1.7 Reporter: Nikolai Grigoriev Attachments: system.log.gz It seems that since recently I have started getting a number of exceptions like File not found on all Cassandra nodes. Currently I am getting an exception like this every couple of seconds on each node, for different keyspaces and CFs. I have tried to restart the nodes, tried to scrub them. No luck so far. It seems that scrub cannot complete on any of these nodes, at some point it fails because of the file that it can't find. One one of the nodes currently the nodetool scrub command fails instantly and consistently with this exception: {code} # /opt/cassandra/bin/nodetool scrub Exception in thread main java.lang.RuntimeException: Tried to hard link to file that does not exist /mnt/disk5/cassandra/data/mykeyspace_jmeter/test_contacts/mykeyspace_jmeter-test_contacts-jb-28049-Data.db at org.apache.cassandra.io.util.FileUtils.createHardLink(FileUtils.java:75) at org.apache.cassandra.io.sstable.SSTableReader.createLinks(SSTableReader.java:1215) at org.apache.cassandra.db.ColumnFamilyStore.snapshotWithoutFlush(ColumnFamilyStore.java:1826) at org.apache.cassandra.db.ColumnFamilyStore.scrub(ColumnFamilyStore.java:1122) at org.apache.cassandra.service.StorageService.scrub(StorageService.java:2159) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.reflect.misc.Trampoline.invoke(MethodUtil.java:75) at sun.reflect.GeneratedMethodAccessor15.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.reflect.misc.MethodUtil.invoke(MethodUtil.java:279) at com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:112) at com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:46) at com.sun.jmx.mbeanserver.MBeanIntrospector.invokeM(MBeanIntrospector.java:237) at
[jira] [Commented] (CASSANDRA-6285) LCS compaction failing with Exception
[ https://issues.apache.org/jira/browse/CASSANDRA-6285?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903655#comment-13903655 ] sankalp kohli commented on CASSANDRA-6285: -- This issue is also in logs attached in CASSANDRA-6716. LCS compaction failing with Exception - Key: CASSANDRA-6285 URL: https://issues.apache.org/jira/browse/CASSANDRA-6285 Project: Cassandra Issue Type: Bug Components: Core Environment: 4 nodes, shortly updated from 1.2.11 to 2.0.2 Reporter: David Sauer Assignee: Tyler Hobbs Fix For: 2.0.6 Attachments: compaction_test.py After altering everything to LCS the table OpsCenter.rollups60 amd one other none OpsCenter-Table got stuck with everything hanging around in L0. The compaction started and ran until the logs showed this: ERROR [CompactionExecutor:111] 2013-11-01 19:14:53,865 CassandraDaemon.java (line 187) Exception in thread Thread[CompactionExecutor:111,1,RMI Runtime] java.lang.RuntimeException: Last written key DecoratedKey(1326283851463420237, 37382e34362e3132382e3139382d6a7576616c69735f6e6f72785f696e6465785f323031335f31305f30382d63616368655f646f63756d656e74736c6f6f6b75702d676574426c6f6f6d46696c746572537061636555736564) = current key DecoratedKey(954210699457429663, 37382e34362e3132382e3139382d6a7576616c69735f6e6f72785f696e6465785f323031335f31305f30382d63616368655f646f63756d656e74736c6f6f6b75702d676574546f74616c4469736b5370616365557365640b0f) writing into /var/lib/cassandra/data/OpsCenter/rollups60/OpsCenter-rollups60-tmp-jb-58656-Data.db at org.apache.cassandra.io.sstable.SSTableWriter.beforeAppend(SSTableWriter.java:141) at org.apache.cassandra.io.sstable.SSTableWriter.append(SSTableWriter.java:164) at org.apache.cassandra.db.compaction.CompactionTask.runWith(CompactionTask.java:160) at org.apache.cassandra.io.util.DiskAwareRunnable.runMayThrow(DiskAwareRunnable.java:48) at org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) at org.apache.cassandra.db.compaction.CompactionTask.executeInternal(CompactionTask.java:60) at org.apache.cassandra.db.compaction.AbstractCompactionTask.execute(AbstractCompactionTask.java:59) at org.apache.cassandra.db.compaction.CompactionManager$6.runMayThrow(CompactionManager.java:296) at org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) at java.util.concurrent.FutureTask.run(FutureTask.java:262) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:724) Moving back to STC worked to keep the compactions running. Especialy my own Table i would like to move to LCS. After a major compaction with STC the move to LCS fails with the same Exception. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6716) nodetool scrub constantly fails with RuntimeException (Tried to hard link to file that does not exist)
[ https://issues.apache.org/jira/browse/CASSANDRA-6716?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903662#comment-13903662 ] Nikolai Grigoriev commented on CASSANDRA-6716: -- Yes, but I was not sure if the problem with missing sstables is the consequence of that issue. nodetool scrub constantly fails with RuntimeException (Tried to hard link to file that does not exist) -- Key: CASSANDRA-6716 URL: https://issues.apache.org/jira/browse/CASSANDRA-6716 Project: Cassandra Issue Type: Bug Components: Core Environment: Cassandra 2.0.5 (built from source), Linux, 6 nodes, JDK 1.7 Reporter: Nikolai Grigoriev Attachments: system.log.gz It seems that since recently I have started getting a number of exceptions like File not found on all Cassandra nodes. Currently I am getting an exception like this every couple of seconds on each node, for different keyspaces and CFs. I have tried to restart the nodes, tried to scrub them. No luck so far. It seems that scrub cannot complete on any of these nodes, at some point it fails because of the file that it can't find. One one of the nodes currently the nodetool scrub command fails instantly and consistently with this exception: {code} # /opt/cassandra/bin/nodetool scrub Exception in thread main java.lang.RuntimeException: Tried to hard link to file that does not exist /mnt/disk5/cassandra/data/mykeyspace_jmeter/test_contacts/mykeyspace_jmeter-test_contacts-jb-28049-Data.db at org.apache.cassandra.io.util.FileUtils.createHardLink(FileUtils.java:75) at org.apache.cassandra.io.sstable.SSTableReader.createLinks(SSTableReader.java:1215) at org.apache.cassandra.db.ColumnFamilyStore.snapshotWithoutFlush(ColumnFamilyStore.java:1826) at org.apache.cassandra.db.ColumnFamilyStore.scrub(ColumnFamilyStore.java:1122) at org.apache.cassandra.service.StorageService.scrub(StorageService.java:2159) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.reflect.misc.Trampoline.invoke(MethodUtil.java:75) at sun.reflect.GeneratedMethodAccessor15.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.reflect.misc.MethodUtil.invoke(MethodUtil.java:279) at com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:112) at com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:46) at com.sun.jmx.mbeanserver.MBeanIntrospector.invokeM(MBeanIntrospector.java:237) at com.sun.jmx.mbeanserver.PerInterface.invoke(PerInterface.java:138) at com.sun.jmx.mbeanserver.MBeanSupport.invoke(MBeanSupport.java:252) at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.invoke(DefaultMBeanServerInterceptor.java:819) at com.sun.jmx.mbeanserver.JmxMBeanServer.invoke(JmxMBeanServer.java:801) at javax.management.remote.rmi.RMIConnectionImpl.doOperation(RMIConnectionImpl.java:1487) at javax.management.remote.rmi.RMIConnectionImpl.access$300(RMIConnectionImpl.java:97) at javax.management.remote.rmi.RMIConnectionImpl$PrivilegedOperation.run(RMIConnectionImpl.java:1328) at javax.management.remote.rmi.RMIConnectionImpl.doPrivilegedOperation(RMIConnectionImpl.java:1420) at javax.management.remote.rmi.RMIConnectionImpl.invoke(RMIConnectionImpl.java:848) at sun.reflect.GeneratedMethodAccessor38.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.rmi.server.UnicastServerRef.dispatch(UnicastServerRef.java:322) at sun.rmi.transport.Transport$1.run(Transport.java:177) at sun.rmi.transport.Transport$1.run(Transport.java:174) at java.security.AccessController.doPrivileged(Native Method) at sun.rmi.transport.Transport.serviceCall(Transport.java:173) at sun.rmi.transport.tcp.TCPTransport.handleMessages(TCPTransport.java:553) at sun.rmi.transport.tcp.TCPTransport$ConnectionHandler.run0(TCPTransport.java:808) at sun.rmi.transport.tcp.TCPTransport$ConnectionHandler.run(TCPTransport.java:667) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
[jira] [Comment Edited] (CASSANDRA-6716) nodetool scrub constantly fails with RuntimeException (Tried to hard link to file that does not exist)
[ https://issues.apache.org/jira/browse/CASSANDRA-6716?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903662#comment-13903662 ] Nikolai Grigoriev edited comment on CASSANDRA-6716 at 2/18/14 12:54 AM: Yes, but I was not sure if the problem with missing sstables is the consequence of that issue. And, unlike with that issue I did not upgrade from 1.2. was (Author: ngrigoriev): Yes, but I was not sure if the problem with missing sstables is the consequence of that issue. nodetool scrub constantly fails with RuntimeException (Tried to hard link to file that does not exist) -- Key: CASSANDRA-6716 URL: https://issues.apache.org/jira/browse/CASSANDRA-6716 Project: Cassandra Issue Type: Bug Components: Core Environment: Cassandra 2.0.5 (built from source), Linux, 6 nodes, JDK 1.7 Reporter: Nikolai Grigoriev Attachments: system.log.gz It seems that since recently I have started getting a number of exceptions like File not found on all Cassandra nodes. Currently I am getting an exception like this every couple of seconds on each node, for different keyspaces and CFs. I have tried to restart the nodes, tried to scrub them. No luck so far. It seems that scrub cannot complete on any of these nodes, at some point it fails because of the file that it can't find. One one of the nodes currently the nodetool scrub command fails instantly and consistently with this exception: {code} # /opt/cassandra/bin/nodetool scrub Exception in thread main java.lang.RuntimeException: Tried to hard link to file that does not exist /mnt/disk5/cassandra/data/mykeyspace_jmeter/test_contacts/mykeyspace_jmeter-test_contacts-jb-28049-Data.db at org.apache.cassandra.io.util.FileUtils.createHardLink(FileUtils.java:75) at org.apache.cassandra.io.sstable.SSTableReader.createLinks(SSTableReader.java:1215) at org.apache.cassandra.db.ColumnFamilyStore.snapshotWithoutFlush(ColumnFamilyStore.java:1826) at org.apache.cassandra.db.ColumnFamilyStore.scrub(ColumnFamilyStore.java:1122) at org.apache.cassandra.service.StorageService.scrub(StorageService.java:2159) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.reflect.misc.Trampoline.invoke(MethodUtil.java:75) at sun.reflect.GeneratedMethodAccessor15.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.reflect.misc.MethodUtil.invoke(MethodUtil.java:279) at com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:112) at com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:46) at com.sun.jmx.mbeanserver.MBeanIntrospector.invokeM(MBeanIntrospector.java:237) at com.sun.jmx.mbeanserver.PerInterface.invoke(PerInterface.java:138) at com.sun.jmx.mbeanserver.MBeanSupport.invoke(MBeanSupport.java:252) at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.invoke(DefaultMBeanServerInterceptor.java:819) at com.sun.jmx.mbeanserver.JmxMBeanServer.invoke(JmxMBeanServer.java:801) at javax.management.remote.rmi.RMIConnectionImpl.doOperation(RMIConnectionImpl.java:1487) at javax.management.remote.rmi.RMIConnectionImpl.access$300(RMIConnectionImpl.java:97) at javax.management.remote.rmi.RMIConnectionImpl$PrivilegedOperation.run(RMIConnectionImpl.java:1328) at javax.management.remote.rmi.RMIConnectionImpl.doPrivilegedOperation(RMIConnectionImpl.java:1420) at javax.management.remote.rmi.RMIConnectionImpl.invoke(RMIConnectionImpl.java:848) at sun.reflect.GeneratedMethodAccessor38.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.rmi.server.UnicastServerRef.dispatch(UnicastServerRef.java:322) at sun.rmi.transport.Transport$1.run(Transport.java:177) at sun.rmi.transport.Transport$1.run(Transport.java:174) at java.security.AccessController.doPrivileged(Native Method) at sun.rmi.transport.Transport.serviceCall(Transport.java:173) at sun.rmi.transport.tcp.TCPTransport.handleMessages(TCPTransport.java:553) at
[jira] [Comment Edited] (CASSANDRA-6716) nodetool scrub constantly fails with RuntimeException (Tried to hard link to file that does not exist)
[ https://issues.apache.org/jira/browse/CASSANDRA-6716?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903654#comment-13903654 ] sankalp kohli edited comment on CASSANDRA-6716 at 2/18/14 12:58 AM: I am seeing this CASSANDRA-6285 in your logs as well. Though looks unrelated CassandraDaemon.java (line 192) Exception in thread Thread[CompactionExecutor:25,1,main] java.lang.RuntimeException: Last written key DecoratedKey(4020520808752189597, 31302e332e34352e3136312d6765744e6f6e4865617055736564) = current key DecoratedKey(-2471509717181461453, 31302e332e34352e3135380b0f00010004706470730c00010c00010c00010b0001003931302e332e34352e3135382d77696e7465726d7574655f6a6d657465) writing into /mnt/disk2/cassandra/data/OpsCenter/rollups60/OpsCenter-rollups60-tmp-jb-11559-Data.db at org.apache.cassandra.io.sstable.SSTableWriter.beforeAppend(SSTableWriter.java:142) at org.apache.cassandra.io.sstable.SSTableWriter.append(SSTableWriter.java:165) at org.apache.cassandra.db.compaction.CompactionTask.runWith(CompactionTask.java:160) at org.apache.cassandra.io.util.DiskAwareRunnable.runMayThrow(DiskAwareRunnable.java:48) at org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) at org.apache.cassandra.db.compaction.CompactionTask.executeInternal(CompactionTask.java:60) at org.apache.cassandra.db.compaction.AbstractCompactionTask.execute(AbstractCompactionTask.java:59) at org.apache.cassandra.db.compaction.CompactionManager$BackgroundCompactionTask.run(CompactionManager.java:197) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334) at java.util.concurrent.FutureTask.run(FutureTask.java:166) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:724) was (Author: kohlisankalp): I am seeing this CASSANDRA-6285 in your logs as well. CassandraDaemon.java (line 192) Exception in thread Thread[CompactionExecutor:25,1,main] java.lang.RuntimeException: Last written key DecoratedKey(4020520808752189597, 31302e332e34352e3136312d6765744e6f6e4865617055736564) = current key DecoratedKey(-2471509717181461453, 31302e332e34352e3135380b0f00010004706470730c00010c00010c00010b0001003931302e332e34352e3135382d77696e7465726d7574655f6a6d657465) writing into /mnt/disk2/cassandra/data/OpsCenter/rollups60/OpsCenter-rollups60-tmp-jb-11559-Data.db at org.apache.cassandra.io.sstable.SSTableWriter.beforeAppend(SSTableWriter.java:142) at org.apache.cassandra.io.sstable.SSTableWriter.append(SSTableWriter.java:165) at org.apache.cassandra.db.compaction.CompactionTask.runWith(CompactionTask.java:160) at org.apache.cassandra.io.util.DiskAwareRunnable.runMayThrow(DiskAwareRunnable.java:48) at org.apache.cassandra.utils.WrappedRunnable.run(WrappedRunnable.java:28) at org.apache.cassandra.db.compaction.CompactionTask.executeInternal(CompactionTask.java:60) at org.apache.cassandra.db.compaction.AbstractCompactionTask.execute(AbstractCompactionTask.java:59) at org.apache.cassandra.db.compaction.CompactionManager$BackgroundCompactionTask.run(CompactionManager.java:197) at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:471) at java.util.concurrent.FutureTask$Sync.innerRun(FutureTask.java:334) at java.util.concurrent.FutureTask.run(FutureTask.java:166) at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615) at java.lang.Thread.run(Thread.java:724) nodetool scrub constantly fails with RuntimeException (Tried to hard link to file that does not exist) -- Key: CASSANDRA-6716 URL: https://issues.apache.org/jira/browse/CASSANDRA-6716 Project: Cassandra Issue Type: Bug Components: Core Environment: Cassandra 2.0.5 (built from source), Linux, 6 nodes, JDK 1.7 Reporter: Nikolai Grigoriev Attachments: system.log.gz It seems that since recently I have started getting a number of exceptions like File not found on all Cassandra nodes. Currently I am getting an exception like this every couple of seconds on each node, for different keyspaces and CFs. I have tried to restart the nodes, tried to scrub them. No luck so far. It seems that scrub cannot complete on any of these nodes, at some
[jira] [Commented] (CASSANDRA-6692) AtomicBTreeColumns Improvements
[ https://issues.apache.org/jira/browse/CASSANDRA-6692?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903667#comment-13903667 ] Benedict commented on CASSANDRA-6692: - Are you sure? It doesn't appear to be applied to trunk, and I can't see the commit message in my email history AtomicBTreeColumns Improvements --- Key: CASSANDRA-6692 URL: https://issues.apache.org/jira/browse/CASSANDRA-6692 Project: Cassandra Issue Type: Improvement Components: Core Reporter: Benedict Assignee: Benedict Priority: Minor Labels: easyfix, performance Fix For: 2.1 There are two improvements to make to the BTree code that should help: 1) It turns out Stack Allocation is more rubbish than we had hoped, and so the fast route actually allocates garbage. It's unlikely this reduces throughput, but the increased young-gen pressure is probably unwelcome. I propose to remove the fast route for now. 2) It is not uncommon to race to perform an update, so that the new values are actually out-of-date when we come to modify the tree. In this case the update should recognise that the original (portion of) the tree has not been modified, and simply return it, without allocating a new one. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[3/3] git commit: Merge branch 'cassandra-2.1' into trunk
Merge branch 'cassandra-2.1' into trunk Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/434e0428 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/434e0428 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/434e0428 Branch: refs/heads/trunk Commit: 434e04281067b548712ff54501e76bcffa4d598e Parents: dc1dad3 5f82aa3 Author: Jonathan Ellis jbel...@apache.org Authored: Mon Feb 17 20:41:39 2014 -0600 Committer: Jonathan Ellis jbel...@apache.org Committed: Mon Feb 17 20:41:39 2014 -0600 -- CHANGES.txt | 2 +- .../org/apache/cassandra/utils/btree/BTree.java | 59 +- .../apache/cassandra/utils/btree/BTreeSet.java | 2 +- .../apache/cassandra/utils/btree/Builder.java | 6 +- .../cassandra/utils/btree/NodeBuilder.java | 85 +--- .../cassandra/utils/btree/UpdateFunction.java | 29 +++ .../apache/cassandra/utils/LongBTreeTest.java | 17 +++- 7 files changed, 106 insertions(+), 94 deletions(-) --
[2/3] git commit: optimize AtomicBTree patch by Benedict Elliott Smith; reviewed by jbellis for CASSANDRA-6692
optimize AtomicBTree patch by Benedict Elliott Smith; reviewed by jbellis for CASSANDRA-6692 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/5f82aa3b Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/5f82aa3b Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/5f82aa3b Branch: refs/heads/trunk Commit: 5f82aa3b03031c9aa7439b4cb745a6c5641a7b76 Parents: 2777e1e Author: Jonathan Ellis jbel...@apache.org Authored: Mon Feb 17 20:40:42 2014 -0600 Committer: Jonathan Ellis jbel...@apache.org Committed: Mon Feb 17 20:40:55 2014 -0600 -- CHANGES.txt | 2 +- .../org/apache/cassandra/utils/btree/BTree.java | 59 +- .../apache/cassandra/utils/btree/BTreeSet.java | 2 +- .../apache/cassandra/utils/btree/Builder.java | 6 +- .../cassandra/utils/btree/NodeBuilder.java | 85 +--- .../cassandra/utils/btree/UpdateFunction.java | 29 +++ .../apache/cassandra/utils/LongBTreeTest.java | 17 +++- 7 files changed, 106 insertions(+), 94 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/5f82aa3b/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index d7bc77e..ea74c62 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -2,7 +2,7 @@ * Add flush directory distinct from compaction directories (CASSANDRA-6357) * Require JNA by default (CASSANDRA-6575) * add listsnapshots command to nodetool (CASSANDRA-5742) - * Introduce AtomicBTreeColumns (CASSANDRA-6271) + * Introduce AtomicBTreeColumns (CASSANDRA-6271, 6692) * Multithreaded commitlog (CASSANDRA-3578) * allocate fixed index summary memory pool and resample cold index summaries to use less memory (CASSANDRA-5519) http://git-wip-us.apache.org/repos/asf/cassandra/blob/5f82aa3b/src/java/org/apache/cassandra/utils/btree/BTree.java -- diff --git a/src/java/org/apache/cassandra/utils/btree/BTree.java b/src/java/org/apache/cassandra/utils/btree/BTree.java index 5368690..82f5574 100644 --- a/src/java/org/apache/cassandra/utils/btree/BTree.java +++ b/src/java/org/apache/cassandra/utils/btree/BTree.java @@ -128,7 +128,7 @@ public class BTree */ public static V Object[] update(Object[] btree, ComparatorV comparator, CollectionV updateWith, boolean updateWithIsSorted) { -return update(btree, comparator, updateWith, updateWithIsSorted, null); +return update(btree, comparator, updateWith, updateWithIsSorted, UpdateFunction.NoOp.Vinstance()); } /** @@ -154,63 +154,6 @@ public class BTree if (!updateWithIsSorted) updateWith = sorted(updateWith, comparator, updateWith.size()); -// if the b-tree is just a single root node, we can try a quick in-place merge -if (isLeaf(btree) btree.length + updateWith.size() QUICK_MERGE_LIMIT) -{ -// since updateWith is sorted, we can skip elements from earlier iterations tracked by this offset -int btreeOffset = 0; -int keyEnd = getLeafKeyEnd(btree); -Object[] merged = new Object[QUICK_MERGE_LIMIT]; -int mergedCount = 0; -for (V v : updateWith) -{ -// find the index i where v would belong in the original btree -int i = find(comparator, v, btree, btreeOffset, keyEnd); -boolean found = i = 0; -if (!found) -i = -i - 1; - -// copy original elements up to i into the merged array -int count = i - btreeOffset; -if (count 0) -{ -System.arraycopy(btree, btreeOffset, merged, mergedCount, count); -mergedCount += count; -btreeOffset = i; -} - -if (found) -{ -// apply replaceF if it matches an existing element -btreeOffset++; -if (updateF != null) -v = updateF.apply((V) btree[i], v); -} -else if (updateF != null) -{ -// new element but still need to apply replaceF to handle indexing and size-tracking -v = updateF.apply(v); -} - -merged[mergedCount++] = v; -} - -// copy any remaining original elements -if (btreeOffset keyEnd) -{ -int count = keyEnd - btreeOffset; -System.arraycopy(btree, btreeOffset, merged, mergedCount, count); -mergedCount += count; -} -
[1/3] git commit: optimize AtomicBTree patch by Benedict Elliott Smith; reviewed by jbellis for CASSANDRA-6692
Repository: cassandra Updated Branches: refs/heads/cassandra-2.1 2777e1e5d - 5f82aa3b0 refs/heads/trunk dc1dad328 - 434e04281 optimize AtomicBTree patch by Benedict Elliott Smith; reviewed by jbellis for CASSANDRA-6692 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/5f82aa3b Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/5f82aa3b Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/5f82aa3b Branch: refs/heads/cassandra-2.1 Commit: 5f82aa3b03031c9aa7439b4cb745a6c5641a7b76 Parents: 2777e1e Author: Jonathan Ellis jbel...@apache.org Authored: Mon Feb 17 20:40:42 2014 -0600 Committer: Jonathan Ellis jbel...@apache.org Committed: Mon Feb 17 20:40:55 2014 -0600 -- CHANGES.txt | 2 +- .../org/apache/cassandra/utils/btree/BTree.java | 59 +- .../apache/cassandra/utils/btree/BTreeSet.java | 2 +- .../apache/cassandra/utils/btree/Builder.java | 6 +- .../cassandra/utils/btree/NodeBuilder.java | 85 +--- .../cassandra/utils/btree/UpdateFunction.java | 29 +++ .../apache/cassandra/utils/LongBTreeTest.java | 17 +++- 7 files changed, 106 insertions(+), 94 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/5f82aa3b/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index d7bc77e..ea74c62 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -2,7 +2,7 @@ * Add flush directory distinct from compaction directories (CASSANDRA-6357) * Require JNA by default (CASSANDRA-6575) * add listsnapshots command to nodetool (CASSANDRA-5742) - * Introduce AtomicBTreeColumns (CASSANDRA-6271) + * Introduce AtomicBTreeColumns (CASSANDRA-6271, 6692) * Multithreaded commitlog (CASSANDRA-3578) * allocate fixed index summary memory pool and resample cold index summaries to use less memory (CASSANDRA-5519) http://git-wip-us.apache.org/repos/asf/cassandra/blob/5f82aa3b/src/java/org/apache/cassandra/utils/btree/BTree.java -- diff --git a/src/java/org/apache/cassandra/utils/btree/BTree.java b/src/java/org/apache/cassandra/utils/btree/BTree.java index 5368690..82f5574 100644 --- a/src/java/org/apache/cassandra/utils/btree/BTree.java +++ b/src/java/org/apache/cassandra/utils/btree/BTree.java @@ -128,7 +128,7 @@ public class BTree */ public static V Object[] update(Object[] btree, ComparatorV comparator, CollectionV updateWith, boolean updateWithIsSorted) { -return update(btree, comparator, updateWith, updateWithIsSorted, null); +return update(btree, comparator, updateWith, updateWithIsSorted, UpdateFunction.NoOp.Vinstance()); } /** @@ -154,63 +154,6 @@ public class BTree if (!updateWithIsSorted) updateWith = sorted(updateWith, comparator, updateWith.size()); -// if the b-tree is just a single root node, we can try a quick in-place merge -if (isLeaf(btree) btree.length + updateWith.size() QUICK_MERGE_LIMIT) -{ -// since updateWith is sorted, we can skip elements from earlier iterations tracked by this offset -int btreeOffset = 0; -int keyEnd = getLeafKeyEnd(btree); -Object[] merged = new Object[QUICK_MERGE_LIMIT]; -int mergedCount = 0; -for (V v : updateWith) -{ -// find the index i where v would belong in the original btree -int i = find(comparator, v, btree, btreeOffset, keyEnd); -boolean found = i = 0; -if (!found) -i = -i - 1; - -// copy original elements up to i into the merged array -int count = i - btreeOffset; -if (count 0) -{ -System.arraycopy(btree, btreeOffset, merged, mergedCount, count); -mergedCount += count; -btreeOffset = i; -} - -if (found) -{ -// apply replaceF if it matches an existing element -btreeOffset++; -if (updateF != null) -v = updateF.apply((V) btree[i], v); -} -else if (updateF != null) -{ -// new element but still need to apply replaceF to handle indexing and size-tracking -v = updateF.apply(v); -} - -merged[mergedCount++] = v; -} - -// copy any remaining original elements -if (btreeOffset keyEnd) -{ -int count = keyEnd - btreeOffset;
[jira] [Commented] (CASSANDRA-6692) AtomicBTreeColumns Improvements
[ https://issues.apache.org/jira/browse/CASSANDRA-6692?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903714#comment-13903714 ] Jonathan Ellis commented on CASSANDRA-6692: --- re-committed and pushed this time. AtomicBTreeColumns Improvements --- Key: CASSANDRA-6692 URL: https://issues.apache.org/jira/browse/CASSANDRA-6692 Project: Cassandra Issue Type: Improvement Components: Core Reporter: Benedict Assignee: Benedict Priority: Minor Labels: easyfix, performance Fix For: 2.1 There are two improvements to make to the BTree code that should help: 1) It turns out Stack Allocation is more rubbish than we had hoped, and so the fast route actually allocates garbage. It's unlikely this reduces throughput, but the increased young-gen pressure is probably unwelcome. I propose to remove the fast route for now. 2) It is not uncommon to race to perform an update, so that the new values are actually out-of-date when we come to modify the tree. In this case the update should recognise that the original (portion of) the tree has not been modified, and simply return it, without allocating a new one. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6283) Windows 7 data files keept open / can't be deleted after compaction.
[ https://issues.apache.org/jira/browse/CASSANDRA-6283?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903719#comment-13903719 ] graham sanderson commented on CASSANDRA-6283: - Unless I'm missing something, the code at StreamWriter.java:76 {code} validator = DataIntegrityMetadata.checksumValidator(sstable.descriptor); {code} creates a file reference that is never closed, which seems like it is a bug on all platforms - of course it might not manifest in the same way (especially if multiple threads/timing are involved). Windows 7 data files keept open / can't be deleted after compaction. Key: CASSANDRA-6283 URL: https://issues.apache.org/jira/browse/CASSANDRA-6283 Project: Cassandra Issue Type: Bug Components: Core Environment: Windows 7 (32) / Java 1.7.0.45 Reporter: Andreas Schnitzerling Assignee: Joshua McKenzie Labels: compaction Fix For: 2.0.6 Attachments: leakdetect.patch, screenshot-1.jpg, system.log Files cannot be deleted, patch CASSANDRA-5383 (Win7 deleting problem) doesn't help on Win-7 on Cassandra 2.0.2. Even 2.1 Snapshot is not running. The cause is: Opened file handles seem to be lost and not closed properly. Win 7 blames, that another process is still using the file (but its obviously cassandra). Only restart of the server makes the files deleted. But after heavy using (changes) of tables, there are about 24K files in the data folder (instead of 35 after every restart) and Cassandra crashes. I experiminted and I found out, that a finalizer fixes the problem. So after GC the files will be deleted (not optimal, but working fine). It runs now 2 days continously without problem. Possible fix/test: I wrote the following finalizer at the end of class org.apache.cassandra.io.util.RandomAccessReader: {code:title=RandomAccessReader.java|borderStyle=solid} @Override protected void finalize() throws Throwable { deallocate(); super.finalize(); } {code} Can somebody test / develop / patch it? Thx. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-5918) Remove CQL2 entirely from Cassandra 3.0
[ https://issues.apache.org/jira/browse/CASSANDRA-5918?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903759#comment-13903759 ] Aleksey Yeschenko commented on CASSANDRA-5918: -- Pushed the commit to https://github.com/iamaleksey/cassandra/commits/5918. Not sure what to do with thirft definitions (besides obviously bumping the major). Throw IRE for all the CQL2 methods (as in the commit), or get rid of them altogether, erasing the last and only traces of CQL2 existence (other than NEWS and CHANGES). Remove CQL2 entirely from Cassandra 3.0 --- Key: CASSANDRA-5918 URL: https://issues.apache.org/jira/browse/CASSANDRA-5918 Project: Cassandra Issue Type: Improvement Reporter: Aleksey Yeschenko Assignee: Aleksey Yeschenko Priority: Minor Labels: cql Fix For: 3.0 CQL2 is officially no longer worked on since 1.2. cqlsh no longer supports CQL2 as of Cassandra 2.0. It's probably the time to deprecate CQL2 in 2.0 and to remove it entirely in 2.2 - there is nothing in CQL2 now that can't be done via CQL3 and two versions advance warning is plenty of time for those few still using CQL2 to switch to CQL3. -- This message was sent by Atlassian JIRA (v6.1.5#6160)
[jira] [Commented] (CASSANDRA-6716) nodetool scrub constantly fails with RuntimeException (Tried to hard link to file that does not exist)
[ https://issues.apache.org/jira/browse/CASSANDRA-6716?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903783#comment-13903783 ] Ravi Prasad commented on CASSANDRA-6716: Do you see the missing file(s) compacted before in the logs ? I've been seeing ocassional FileNotFoundException during compaction running 2.0.5, but not able to reproduce consistently. in our case, the missing file was compacted away, but for some reason they weren't cleaned up, and after restart, looked like the sstable was loaded opening a reference before getting cleaned up by cleanupCompactionleftovers. the issue goes away after another restart for me. Could be related to CASSANDRA-5151 and CASSANDRA-6086. nodetool scrub constantly fails with RuntimeException (Tried to hard link to file that does not exist) -- Key: CASSANDRA-6716 URL: https://issues.apache.org/jira/browse/CASSANDRA-6716 Project: Cassandra Issue Type: Bug Components: Core Environment: Cassandra 2.0.5 (built from source), Linux, 6 nodes, JDK 1.7 Reporter: Nikolai Grigoriev Attachments: system.log.gz It seems that since recently I have started getting a number of exceptions like File not found on all Cassandra nodes. Currently I am getting an exception like this every couple of seconds on each node, for different keyspaces and CFs. I have tried to restart the nodes, tried to scrub them. No luck so far. It seems that scrub cannot complete on any of these nodes, at some point it fails because of the file that it can't find. One one of the nodes currently the nodetool scrub command fails instantly and consistently with this exception: {code} # /opt/cassandra/bin/nodetool scrub Exception in thread main java.lang.RuntimeException: Tried to hard link to file that does not exist /mnt/disk5/cassandra/data/mykeyspace_jmeter/test_contacts/mykeyspace_jmeter-test_contacts-jb-28049-Data.db at org.apache.cassandra.io.util.FileUtils.createHardLink(FileUtils.java:75) at org.apache.cassandra.io.sstable.SSTableReader.createLinks(SSTableReader.java:1215) at org.apache.cassandra.db.ColumnFamilyStore.snapshotWithoutFlush(ColumnFamilyStore.java:1826) at org.apache.cassandra.db.ColumnFamilyStore.scrub(ColumnFamilyStore.java:1122) at org.apache.cassandra.service.StorageService.scrub(StorageService.java:2159) at sun.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at sun.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:57) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.reflect.misc.Trampoline.invoke(MethodUtil.java:75) at sun.reflect.GeneratedMethodAccessor15.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.reflect.misc.MethodUtil.invoke(MethodUtil.java:279) at com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:112) at com.sun.jmx.mbeanserver.StandardMBeanIntrospector.invokeM2(StandardMBeanIntrospector.java:46) at com.sun.jmx.mbeanserver.MBeanIntrospector.invokeM(MBeanIntrospector.java:237) at com.sun.jmx.mbeanserver.PerInterface.invoke(PerInterface.java:138) at com.sun.jmx.mbeanserver.MBeanSupport.invoke(MBeanSupport.java:252) at com.sun.jmx.interceptor.DefaultMBeanServerInterceptor.invoke(DefaultMBeanServerInterceptor.java:819) at com.sun.jmx.mbeanserver.JmxMBeanServer.invoke(JmxMBeanServer.java:801) at javax.management.remote.rmi.RMIConnectionImpl.doOperation(RMIConnectionImpl.java:1487) at javax.management.remote.rmi.RMIConnectionImpl.access$300(RMIConnectionImpl.java:97) at javax.management.remote.rmi.RMIConnectionImpl$PrivilegedOperation.run(RMIConnectionImpl.java:1328) at javax.management.remote.rmi.RMIConnectionImpl.doPrivilegedOperation(RMIConnectionImpl.java:1420) at javax.management.remote.rmi.RMIConnectionImpl.invoke(RMIConnectionImpl.java:848) at sun.reflect.GeneratedMethodAccessor38.invoke(Unknown Source) at sun.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.lang.reflect.Method.invoke(Method.java:606) at sun.rmi.server.UnicastServerRef.dispatch(UnicastServerRef.java:322) at sun.rmi.transport.Transport$1.run(Transport.java:177) at sun.rmi.transport.Transport$1.run(Transport.java:174) at java.security.AccessController.doPrivileged(Native Method) at
[jira] [Commented] (CASSANDRA-6561) Static columns in CQL3
[ https://issues.apache.org/jira/browse/CASSANDRA-6561?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13903785#comment-13903785 ] Nicolas Favre-Felix commented on CASSANDRA-6561: [~slebresne], my point was not about 2i, but about the fact that partition-level isolation is only useful if during _read_ you can select both the static and the clustered columns. An isolation property is used to guarantee that readers do not see partially-applied updates. If readers have to issue two queries to select the static and the clusters columns, they cannot rely on any isolation. They _will_ see partially applied updates and have an inconsistent view of the partition. There is currently no way to select together both the static and clustered columns of a CQL row, even though this is how they are returned when the full partition is queried. There are two more points on this subject: # It seems to me that presenting CQL rows with the static columns for full-partition scans but not doing so in any other case displays a level of inconsistency in the API. # All of this is pretty easy to do with Thrift. Static columns in CQL3 -- Key: CASSANDRA-6561 URL: https://issues.apache.org/jira/browse/CASSANDRA-6561 Project: Cassandra Issue Type: New Feature Reporter: Sylvain Lebresne Assignee: Sylvain Lebresne Fix For: 2.0.6 I'd like to suggest the following idea for adding static columns to CQL3. I'll note that the basic idea has been suggested by jhalliday on irc but the rest of the details are mine and I should be blamed for anything stupid in what follows. Let me start with a rational: there is 2 main family of CF that have been historically used in Thrift: static ones and dynamic ones. CQL3 handles both family through the presence or not of clustering columns. There is however some cases where mixing both behavior has its use. I like to think of those use cases as 3 broad category: # to denormalize small amounts of not-entirely-static data in otherwise static entities. It's say tags for a product or custom properties in a user profile. This is why we've added CQL3 collections. Importantly, this is the *only* use case for which collections are meant (which doesn't diminishes their usefulness imo, and I wouldn't disagree that we've maybe not communicated this too well). # to optimize fetching both a static entity and related dynamic ones. Say you have blog posts, and each post has associated comments (chronologically ordered). *And* say that a very common query is fetch a post and its 50 last comments. In that case, it *might* be beneficial to store a blog post (static entity) in the same underlying CF than it's comments for performance reason. So that fetch a post and it's 50 last comments is just one slice internally. # you want to CAS rows of a dynamic partition based on some partition condition. This is the same use case than why CASSANDRA-5633 exists for. As said above, 1) is already covered by collections, but 2) and 3) are not (and I strongly believe collections are not the right fit, API wise, for those). Also, note that I don't want to underestimate the usefulness of 2). In most cases, using a separate table for the blog posts and the comments is The Right Solution, and trying to do 2) is premature optimisation. Yet, when used properly, that kind of optimisation can make a difference, so I think having a relatively native solution for it in CQL3 could make sense. Regarding 3), though CASSANDRA-5633 would provide one solution for it, I have the feeling that static columns actually are a more natural approach (in term of API). That's arguably more of a personal opinion/feeling though. So long story short, CQL3 lacks a way to mix both some static and dynamic rows in the same partition of the same CQL3 table, and I think such a tool could have it's use. The proposal is thus to allow static columns. Static columns would only make sense in table with clustering columns (the dynamic ones). A static column value would be static to the partition (all rows of the partition would share the value for such column). The syntax would just be: {noformat} CREATE TABLE t ( k text, s text static, i int, v text, PRIMARY KEY (k, i) ) {noformat} then you'd get: {noformat} INSERT INTO t(k, s, i, v) VALUES (k0, I'm shared, 0, foo); INSERT INTO t(k, s, i, v) VALUES (k0, I'm still shared, 1, bar); SELECT * FROM t; k | s | i |v k0 | I'm still shared | 0 | bar k0 | I'm still shared | 1 | foo {noformat} There would be a few semantic details to decide on regarding deletions, ttl, etc. but let's see if we agree it's a good idea first before ironing those