git commit: Fix use of CQL3 functions with descending clustering order
Updated Branches: refs/heads/cassandra-1.2 9851b73fc - 7eae57aea Fix use of CQL3 functions with descending clustering order patch by slebresne; reviewed by iamaleksey for CASSANDRA-5472 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/7eae57ae Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/7eae57ae Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/7eae57ae Branch: refs/heads/cassandra-1.2 Commit: 7eae57aeac291799d54d5a1b5a444e27336215f1 Parents: 9851b73 Author: Sylvain Lebresne sylv...@datastax.com Authored: Mon Apr 29 09:29:22 2013 +0200 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Mon Apr 29 09:29:22 2013 +0200 -- CHANGES.txt|3 +- .../cassandra/cql3/statements/Selection.java | 42 -- 2 files changed, 38 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/7eae57ae/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index c843e5e..241ef7c 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -13,9 +13,10 @@ * Fix shutdown of binary protocol server (CASSANDRA-5507) * Fix repair -snapshot not working (CASSANDRA-5512) * Set isRunning flag later in binary protocol server (CASSANDRA-5467) + * Fix use of CQL3 functions with descencind clustering order (CASSANDRA-5472) Merged from 1.1 * Add retry mechanism to OTC for non-droppable_verbs (CASSANDRA-5393) - * Use allocator information to improve memtable memory usage estimate + * Use allocator information to improve memtable memory usage estimate (CASSANDRA-5497) * Fix trying to load deleted row into row cache on startup (CASSANDRA-4463) http://git-wip-us.apache.org/repos/asf/cassandra/blob/7eae57ae/src/java/org/apache/cassandra/cql3/statements/Selection.java -- diff --git a/src/java/org/apache/cassandra/cql3/statements/Selection.java b/src/java/org/apache/cassandra/cql3/statements/Selection.java index e4e59c5..64710be 100644 --- a/src/java/org/apache/cassandra/cql3/statements/Selection.java +++ b/src/java/org/apache/cassandra/cql3/statements/Selection.java @@ -92,7 +92,7 @@ public abstract class Selection throw new InvalidRequestException(String.format(Undefined name %s in selection clause, raw)); if (metadata != null) metadata.add(name); -return new SimpleSelector(addAndGetIndex(name, names), name.type); +return new SimpleSelector(name.toString(), addAndGetIndex(name, names), name.type); } else if (raw instanceof RawSelector.WritetimeOrTTL) { @@ -107,7 +107,7 @@ public abstract class Selection if (metadata != null) metadata.add(makeWritetimeOrTTLSpec(cfDef, tot)); -return new WritetimeOrTTLSelector(addAndGetIndex(name, names), tot.isWritetime); +return new WritetimeOrTTLSelector(name.toString(), addAndGetIndex(name, names), tot.isWritetime); } else { @@ -313,11 +313,13 @@ public abstract class Selection private static class SimpleSelector implements Selector { +private final String columnName; private final int idx; private final AbstractType? type; -public SimpleSelector(int idx, AbstractType? type) +public SimpleSelector(String columnName, int idx, AbstractType? type) { +this.columnName = columnName; this.idx = idx; this.type = type; } @@ -329,7 +331,13 @@ public abstract class Selection public boolean isAssignableTo(ColumnSpecification receiver) { -return type.equals(receiver.type); +return type.asCQL3Type().equals(receiver.type.asCQL3Type()); +} + +@Override +public String toString() +{ +return columnName; } } @@ -355,17 +363,33 @@ public abstract class Selection public boolean isAssignableTo(ColumnSpecification receiver) { -return fun.returnType().equals(receiver.type); +return fun.returnType().asCQL3Type().equals(receiver.type.asCQL3Type()); +} + +@Override +public String toString() +{ +StringBuilder sb = new StringBuilder(); +sb.append(fun.name()).append((); +for (int i = 0; i argSelectors.size(); i++) +{ +if (i 0) +sb.append(, ); +sb.append(argSelectors.get(i)); +} +return sb.append()).toString(); } } private static class
[2/2] git commit: Merge branch 'cassandra-1.2' into trunk
Merge branch 'cassandra-1.2' into trunk Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/6a5495af Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/6a5495af Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/6a5495af Branch: refs/heads/trunk Commit: 6a5495afe4e89f3d9de8cb6c961104027774e2f0 Parents: 383d35e 7eae57a Author: Sylvain Lebresne sylv...@datastax.com Authored: Mon Apr 29 09:32:13 2013 +0200 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Mon Apr 29 09:32:13 2013 +0200 -- CHANGES.txt|3 +- .../cassandra/cql3/statements/Selection.java | 42 -- 2 files changed, 38 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/6a5495af/CHANGES.txt -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/6a5495af/src/java/org/apache/cassandra/cql3/statements/Selection.java --
[1/2] git commit: Fix use of CQL3 functions with descending clustering order
Updated Branches: refs/heads/trunk 383d35ea5 - 6a5495afe Fix use of CQL3 functions with descending clustering order patch by slebresne; reviewed by iamaleksey for CASSANDRA-5472 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/7eae57ae Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/7eae57ae Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/7eae57ae Branch: refs/heads/trunk Commit: 7eae57aeac291799d54d5a1b5a444e27336215f1 Parents: 9851b73 Author: Sylvain Lebresne sylv...@datastax.com Authored: Mon Apr 29 09:29:22 2013 +0200 Committer: Sylvain Lebresne sylv...@datastax.com Committed: Mon Apr 29 09:29:22 2013 +0200 -- CHANGES.txt|3 +- .../cassandra/cql3/statements/Selection.java | 42 -- 2 files changed, 38 insertions(+), 7 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/7eae57ae/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index c843e5e..241ef7c 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -13,9 +13,10 @@ * Fix shutdown of binary protocol server (CASSANDRA-5507) * Fix repair -snapshot not working (CASSANDRA-5512) * Set isRunning flag later in binary protocol server (CASSANDRA-5467) + * Fix use of CQL3 functions with descencind clustering order (CASSANDRA-5472) Merged from 1.1 * Add retry mechanism to OTC for non-droppable_verbs (CASSANDRA-5393) - * Use allocator information to improve memtable memory usage estimate + * Use allocator information to improve memtable memory usage estimate (CASSANDRA-5497) * Fix trying to load deleted row into row cache on startup (CASSANDRA-4463) http://git-wip-us.apache.org/repos/asf/cassandra/blob/7eae57ae/src/java/org/apache/cassandra/cql3/statements/Selection.java -- diff --git a/src/java/org/apache/cassandra/cql3/statements/Selection.java b/src/java/org/apache/cassandra/cql3/statements/Selection.java index e4e59c5..64710be 100644 --- a/src/java/org/apache/cassandra/cql3/statements/Selection.java +++ b/src/java/org/apache/cassandra/cql3/statements/Selection.java @@ -92,7 +92,7 @@ public abstract class Selection throw new InvalidRequestException(String.format(Undefined name %s in selection clause, raw)); if (metadata != null) metadata.add(name); -return new SimpleSelector(addAndGetIndex(name, names), name.type); +return new SimpleSelector(name.toString(), addAndGetIndex(name, names), name.type); } else if (raw instanceof RawSelector.WritetimeOrTTL) { @@ -107,7 +107,7 @@ public abstract class Selection if (metadata != null) metadata.add(makeWritetimeOrTTLSpec(cfDef, tot)); -return new WritetimeOrTTLSelector(addAndGetIndex(name, names), tot.isWritetime); +return new WritetimeOrTTLSelector(name.toString(), addAndGetIndex(name, names), tot.isWritetime); } else { @@ -313,11 +313,13 @@ public abstract class Selection private static class SimpleSelector implements Selector { +private final String columnName; private final int idx; private final AbstractType? type; -public SimpleSelector(int idx, AbstractType? type) +public SimpleSelector(String columnName, int idx, AbstractType? type) { +this.columnName = columnName; this.idx = idx; this.type = type; } @@ -329,7 +331,13 @@ public abstract class Selection public boolean isAssignableTo(ColumnSpecification receiver) { -return type.equals(receiver.type); +return type.asCQL3Type().equals(receiver.type.asCQL3Type()); +} + +@Override +public String toString() +{ +return columnName; } } @@ -355,17 +363,33 @@ public abstract class Selection public boolean isAssignableTo(ColumnSpecification receiver) { -return fun.returnType().equals(receiver.type); +return fun.returnType().asCQL3Type().equals(receiver.type.asCQL3Type()); +} + +@Override +public String toString() +{ +StringBuilder sb = new StringBuilder(); +sb.append(fun.name()).append((); +for (int i = 0; i argSelectors.size(); i++) +{ +if (i 0) +sb.append(, ); +sb.append(argSelectors.get(i)); +} +return sb.append()).toString(); } } private static class WritetimeOrTTLSelector
[jira] [Commented] (CASSANDRA-2737) CQL: support IF EXISTS extension for DROP commands (table, keyspace, index)
[ https://issues.apache.org/jira/browse/CASSANDRA-2737?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644325#comment-13644325 ] MichaĆ Michalski commented on CASSANDRA-2737: - Thanks for reply, Sylvain! I definitely agree that my solution has some overhead and your is much simpler, but one of the things I aimed for was to distinguish the critical exceptions from the ones that can be handled by such IF (NOT) EXISTS extensions, so - in the end - it could be possible to let user know what really happened and, maybe, display some descriptive NOTICE message instead of displaying nothing (I was a bit inspired in how it's reported in PostgreSQL in similar case). However, if you say it's not necessary, I'm fine with it :-) Additionally, in most of the cases validation checking if KS/CF/Index exists (or not) happens outside *Statement classes (for example for CFs it's done in MigrationManager.announceColumnFamilyDrop()) and I didn't like the idea of passing additional parameter and handling it there - I preferred this decision (if exception should be thrown or not) to be done by *Statement class itself, basing on what was returned/thrown by MigrationManager. One thing I missed when coding was the fact that for KS/CF, when reporting existing / inexistent KS/CF, ConfigurationException is thrown (index-related code throws InvalidRequestException, which looks like a little inconsistency to me, by the way), so I could have reused it, instead of creating new ones. Anyway, I see your point :-) Tomorrow I'm leaving for three weeks and when I'm back I'll reimplement it taking your suggestions into account :-) CQL: support IF EXISTS extension for DROP commands (table, keyspace, index) --- Key: CASSANDRA-2737 URL: https://issues.apache.org/jira/browse/CASSANDRA-2737 Project: Cassandra Issue Type: New Feature Affects Versions: 0.8.0 Reporter: Cathy Daw Priority: Trivial Labels: cql Fix For: 2.0 Attachments: 2737-concept-v1.txt -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-5517) Cassandra crashes at start with segmentation fault
[ https://issues.apache.org/jira/browse/CASSANDRA-5517?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644425#comment-13644425 ] Jeremy Hanna commented on CASSANDRA-5517: - Can you try to see if you can reproduce the problem while running with a Sun/Oracle 1.6 JDK (non-openjdk), one of the more recent versions? In the 1.6 line, openjdk is pretty behind the Sun/Oracle JDK. I wouldn't be surprised if it was just an oddity with openjdk. Cassandra crashes at start with segmentation fault -- Key: CASSANDRA-5517 URL: https://issues.apache.org/jira/browse/CASSANDRA-5517 Project: Cassandra Issue Type: Bug Components: Core Environment: VirtualBox 4.2.6 VM with 4GB RAM, Xubuntu 12.10 as host and guest OS. Cassandra 1.2.4 installed on guest as Debian package. Reporter: Sergey Naumov Sometimes Cassandra fails at start with segmentation fault: # /usr/sbin/cassandra -f xss = -ea -javaajent:/usr/share/cassandra/lib/jamm-0.2.5.jar -XX:+UseThreadPriorities -XX:ThreadPriorityPolicy=42 -Xms1024M -Xmx1024M -Xmn100M -XX:+HeapDumpOnOutOfMemoryError -Xss180k Segmentation fault It seems that not only me encountered this bug: http://snapwebsites.org/known-issues/cassandra-crashes-java-segmentation-fault Solution proposed on this link works. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-5051) Allow automatic cleanup after gc_grace
[ https://issues.apache.org/jira/browse/CASSANDRA-5051?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644509#comment-13644509 ] Jonathan Ellis commented on CASSANDRA-5051: --- bq. V6 addressees the test concerns Why do half a dozen tests need initServer now? When I rip pendingranges out of getNodeRange, CPTest still passes: {code} . private ListRangeToken getNodeRange() { ListRangeToken ranges = new ArrayListRangeToken(StorageService.instance.getLocalRanges(cfs.table.getName())); return ranges; } {code} Allow automatic cleanup after gc_grace -- Key: CASSANDRA-5051 URL: https://issues.apache.org/jira/browse/CASSANDRA-5051 Project: Cassandra Issue Type: New Feature Components: Core Reporter: Brandon Williams Assignee: Vijay Labels: vnodes Fix For: 2.0 Attachments: 0001-5051-v4.patch, 0001-5051-v6.patch, 0001-5051-with-test-fixes.patch, 0001-CASSANDRA-5051.patch, 0002-5051-remove-upgradesstable.patch, 0002-5051-remove-upgradesstable-v4.patch, 0004-5051-additional-test-v4.patch, 5051-v2.txt When using vnodes, after adding a new node you have to run cleanup on all the machines, because you don't know which are affected and chances are it was most if not all of them. As an alternative to this intensive process, we could allow cleanup during compaction if the data is older than gc_grace (or perhaps some other time period since people tend to use gc_grace hacks to get rid of tombstones.) -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Updated] (CASSANDRA-5443) Add CAS CQL support
[ https://issues.apache.org/jira/browse/CASSANDRA-5443?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Sylvain Lebresne updated CASSANDRA-5443: Attachment: 0005-add-UPDATE-.-IF-NOT-EXISTS-syntax.txt Adding a 5th patch for the the 'IF NOT EXISTS' syntax. As a side note, I figured a 'IF EXISTS' notation for delete would make sense by symmetry, but that can't really be done with the current {{cas()}} method. We could add some sort of flag for that but since I wasn't what was the best way to do that, I'm letting that for later. Add CAS CQL support --- Key: CASSANDRA-5443 URL: https://issues.apache.org/jira/browse/CASSANDRA-5443 Project: Cassandra Issue Type: Sub-task Components: API, Core Reporter: Jonathan Ellis Assignee: Sylvain Lebresne Fix For: 2.0 Attachments: 0001-Refactor-Update-and-Delete-statement-to-extract-common.txt, 0002-Add-syntax-to-support-conditional-update-delete.txt, 0003-Handle-deleted-and-expiring-column-in-paxos-updates.txt, 0004-Support-tombstones-when-comparing-for-CAS.txt, 0005-add-UPDATE-.-IF-NOT-EXISTS-syntax.txt -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Comment Edited] (CASSANDRA-5051) Allow automatic cleanup after gc_grace
[ https://issues.apache.org/jira/browse/CASSANDRA-5051?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644509#comment-13644509 ] Jonathan Ellis edited comment on CASSANDRA-5051 at 4/29/13 2:24 PM: bq. V6 addressees the test concerns Why do half a dozen tests need initServer now? When I rip pendingranges out of getNodeRange, CPTest still passes: {code} . private ListRangeToken getNodeRange() { ListRangeToken ranges = new ArrayListRangeToken(StorageService.instance.getLocalRanges(cfs.table.getName())); return ranges; } {code} This may be because it's still hitting the empty ranges == all ranges code in isInRanges, which looks kind of fishy to me -- if we have the pending ranges in getNodeRange, we shouldn't need this extra piece. was (Author: jbellis): bq. V6 addressees the test concerns Why do half a dozen tests need initServer now? When I rip pendingranges out of getNodeRange, CPTest still passes: {code} . private ListRangeToken getNodeRange() { ListRangeToken ranges = new ArrayListRangeToken(StorageService.instance.getLocalRanges(cfs.table.getName())); return ranges; } {code} Allow automatic cleanup after gc_grace -- Key: CASSANDRA-5051 URL: https://issues.apache.org/jira/browse/CASSANDRA-5051 Project: Cassandra Issue Type: New Feature Components: Core Reporter: Brandon Williams Assignee: Vijay Labels: vnodes Fix For: 2.0 Attachments: 0001-5051-v4.patch, 0001-5051-v6.patch, 0001-5051-with-test-fixes.patch, 0001-CASSANDRA-5051.patch, 0002-5051-remove-upgradesstable.patch, 0002-5051-remove-upgradesstable-v4.patch, 0004-5051-additional-test-v4.patch, 5051-v2.txt When using vnodes, after adding a new node you have to run cleanup on all the machines, because you don't know which are affected and chances are it was most if not all of them. As an alternative to this intensive process, we could allow cleanup during compaction if the data is older than gc_grace (or perhaps some other time period since people tend to use gc_grace hacks to get rid of tombstones.) -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-5443) Add CAS CQL support
[ https://issues.apache.org/jira/browse/CASSANDRA-5443?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644514#comment-13644514 ] Sylvain Lebresne commented on CASSANDRA-5443: - btw, I've also pushed a dtest at https://github.com/riptano/cassandra-dtest/blob/master/cql_tests.py#L3044 Add CAS CQL support --- Key: CASSANDRA-5443 URL: https://issues.apache.org/jira/browse/CASSANDRA-5443 Project: Cassandra Issue Type: Sub-task Components: API, Core Reporter: Jonathan Ellis Assignee: Sylvain Lebresne Fix For: 2.0 Attachments: 0001-Refactor-Update-and-Delete-statement-to-extract-common.txt, 0002-Add-syntax-to-support-conditional-update-delete.txt, 0003-Handle-deleted-and-expiring-column-in-paxos-updates.txt, 0004-Support-tombstones-when-comparing-for-CAS.txt, 0005-add-UPDATE-.-IF-NOT-EXISTS-syntax.txt -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-5273) Hanging system after OutOfMemory. Server cannot die due to uncaughtException handling
[ https://issues.apache.org/jira/browse/CASSANDRA-5273?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644517#comment-13644517 ] Jonathan Ellis commented on CASSANDRA-5273: --- bq. the threads that would have called System.exit I don't think we're very rigorous about calling Thread.setDaemon, so I think this will actually deadlock it -- System.exit will wait for daemon threads to die, and the daemon threads will park at the lock acquisition. Hanging system after OutOfMemory. Server cannot die due to uncaughtException handling - Key: CASSANDRA-5273 URL: https://issues.apache.org/jira/browse/CASSANDRA-5273 Project: Cassandra Issue Type: Bug Components: Core Affects Versions: 1.2.1 Environment: linux, 64 bit Reporter: Ignace Desimpel Assignee: Marcus Eriksson Priority: Minor Fix For: 1.2.5 Attachments: 0001-CASSANDRA-5273-add-timeouts-to-the-blocking-commitlo.patch, 0001-CASSANDRA-5273-add-timeouts-to-the-blocking-commitlo.patch, CassHangs.txt On out of memory exception, there is an uncaughtexception handler that is calling System.exit(). However, multiple threads are calling this handler causing a deadlock and the server cannot stop working. See http://www.mail-archive.com/user@cassandra.apache.org/msg27898.html. And see stack trace in attachement. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-5150) sstable2json doesn't check SIGPIPE
[ https://issues.apache.org/jira/browse/CASSANDRA-5150?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644555#comment-13644555 ] Jonathan Ellis commented on CASSANDRA-5150: --- Do you actually see a performance difference when you redirect to a file, for instance? That's probably by far the most common use case. sstable2json doesn't check SIGPIPE -- Key: CASSANDRA-5150 URL: https://issues.apache.org/jira/browse/CASSANDRA-5150 Project: Cassandra Issue Type: Bug Components: Tools Affects Versions: 2.0 Reporter: Will Oberman Priority: Minor Labels: lhf Attachments: trunk-5150.txt I believe this explains the issue better than I can: http://stackoverflow.com/questions/11695500/how-do-i-get-java-to-exit-when-piped-to-head. Basically, I expected that if I did: sstable2json SSTABLE | other-process, and other-process had issues and/or died then the sstable2json process would die. It doesn't. My workaround is using mkfifo FILE, and having sstable2json write to FILE, other-process read from FILE, and a 3rd overall process make sure the other two processes are working. But, it would be _much_ simplier if sstable2json failed on SIGPIPE. I looks like the fix is to periodically check System.out.checkError() in the Java. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Created] (CASSANDRA-5522) Migrate FileStreamTask to Guava RateLimiter and remove homegrown Throttle
Jonathan Ellis created CASSANDRA-5522: - Summary: Migrate FileStreamTask to Guava RateLimiter and remove homegrown Throttle Key: CASSANDRA-5522 URL: https://issues.apache.org/jira/browse/CASSANDRA-5522 Project: Cassandra Issue Type: Task Components: Core Reporter: Jonathan Ellis Assignee: Yuki Morishita Fix For: 2.0 -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Comment Edited] (CASSANDRA-4316) Compaction Throttle too bursty with large rows
[ https://issues.apache.org/jira/browse/CASSANDRA-4316?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13643558#comment-13643558 ] Jonathan Ellis edited comment on CASSANDRA-4316 at 4/29/13 3:00 PM: bq. we [should] open a new ticket to move FST to RateLimiter and get rid of Throttle entirely Done: CASSANDRA-5522. was (Author: jbellis): Reminder from earlier: bq. we [should] open a new ticket to move FST to RateLimiter and get rid of Throttle entirely Compaction Throttle too bursty with large rows -- Key: CASSANDRA-4316 URL: https://issues.apache.org/jira/browse/CASSANDRA-4316 Project: Cassandra Issue Type: Improvement Components: Core Affects Versions: 0.8.0 Reporter: Wayne Lewis Assignee: Jonathan Ellis Fix For: 1.2.5 Attachments: 4316-1.2.txt, 4316-1.2-v2.txt, 4316-v3.txt In org.apache.cassandra.db.compaction.CompactionIterable the check for compaction throttling occurs once every 1000 rows. In our workload this is much too large as we have many large rows (16 - 100 MB). With a 100 MB row, about 100 GB is read (and possibly written) before the compaction throttle sleeps. This causes bursts of essentially unthrottled compaction IO followed by a long sleep which yields inconsistence performance and high error rates during the bursts. We applied a workaround to check throttle every row which solved our performance and error issues: line 116 in org.apache.cassandra.db.compaction.CompactionIterable: if ((row++ % 1000) == 0) replaced with if ((row++ % 1) == 0) I think the better solution is to calculate how often throttle should be checked based on the throttle rate to apply sleeps more consistently. E.g. if 16MB/sec is the limit then check for sleep after every 16MB is read so sleeps are spaced out about every second. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-5432) Repair Freeze/Gossip Invisibility Issues 1.2.4
[ https://issues.apache.org/jira/browse/CASSANDRA-5432?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644558#comment-13644558 ] Jonathan Ellis commented on CASSANDRA-5432: --- Why does let's use the last-known location of this node cause problems? Repair Freeze/Gossip Invisibility Issues 1.2.4 -- Key: CASSANDRA-5432 URL: https://issues.apache.org/jira/browse/CASSANDRA-5432 Project: Cassandra Issue Type: Bug Components: Core Affects Versions: 1.2.4 Environment: Ubuntu 10.04.1 LTS C* 1.2.3 Sun Java 6 u43 JNA Enabled Not using VNodes Reporter: Arya Goudarzi Assignee: Vijay Priority: Critical Attachments: 0001-CASSANDRA-5432.patch Read comment 6. This description summarizes the repair issue only, but I believe there is a bigger problem going on with networking as described on that comment. Since I have upgraded our sandbox cluster, I am unable to run repair on any node and I am reaching our gc_grace seconds this weekend. Please help. So far, I have tried the following suggestions: - nodetool scrub - offline scrub - running repair on each CF separately. Didn't matter. All got stuck the same way. The repair command just gets stuck and the machine is idling. Only the following logs are printed for repair job: INFO [Thread-42214] 2013-04-05 23:30:27,785 StorageService.java (line 2379) Starting repair command #4, repairing 1 ranges for keyspace cardspring_production INFO [AntiEntropySessions:7] 2013-04-05 23:30:27,789 AntiEntropyService.java (line 652) [repair #cc5a9aa0-9e48-11e2-98ba-11bde7670242] new session: will sync /X.X.X.190, /X.X.X.43, /X.X.X.56 on range (1808575600,42535295865117307932921825930779602032] for keyspace_production.[comma separated list of CFs] INFO [AntiEntropySessions:7] 2013-04-05 23:30:27,790 AntiEntropyService.java (line 858) [repair #cc5a9aa0-9e48-11e2-98ba-11bde7670242] requesting merkle trees for BusinessConnectionIndicesEntries (to [/X.X.X.43, /X.X.X.56, /X.X.X.190]) INFO [AntiEntropyStage:1] 2013-04-05 23:30:28,086 AntiEntropyService.java (line 214) [repair #cc5a9aa0-9e48-11e2-98ba-11bde7670242] Received merkle tree for ColumnFamilyName from /X.X.X.43 INFO [AntiEntropyStage:1] 2013-04-05 23:30:28,147 AntiEntropyService.java (line 214) [repair #cc5a9aa0-9e48-11e2-98ba-11bde7670242] Received merkle tree for ColumnFamilyName from /X.X.X.56 Please advise. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-4324) Implement Lucene FST in for key index
[ https://issues.apache.org/jira/browse/CASSANDRA-4324?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644580#comment-13644580 ] Jason Rutherglen commented on CASSANDRA-4324: - What is the difference between the token and the key? The FST could theoretically cache (due to compression efficiency) every key/token (?) in RAM, removing the need for the key cache, and providing an extremely fine grained pointer to the underlying row value data. Implement Lucene FST in for key index - Key: CASSANDRA-4324 URL: https://issues.apache.org/jira/browse/CASSANDRA-4324 Project: Cassandra Issue Type: Improvement Reporter: Jason Rutherglen Assignee: Jason Rutherglen Priority: Minor Attachments: CASSANDRA-4324.patch, CASSANDRA-4324.patch, CASSANDRA-4324.patch, lucene-core-4.0-SNAPSHOT.jar The Lucene FST data structure offers a compact and fast system for indexing Cassandra keys. More keys may be loaded which in turn should seeks faster. * Update the IndexSummary class to make use of the Lucene FST, overriding the serialization mechanism. * Alter SSTableReader to make use of the FST seek mechanism -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-4324) Implement Lucene FST in for key index
[ https://issues.apache.org/jira/browse/CASSANDRA-4324?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644583#comment-13644583 ] Jonathan Ellis commented on CASSANDRA-4324: --- My point is that keys routed to a given node will be effectively a random subset of the keys in use (under RP/M3P), so unlike the tokens I don't think it's clear at all that they will be well-compressible by FST. Implement Lucene FST in for key index - Key: CASSANDRA-4324 URL: https://issues.apache.org/jira/browse/CASSANDRA-4324 Project: Cassandra Issue Type: Improvement Reporter: Jason Rutherglen Assignee: Jason Rutherglen Priority: Minor Attachments: CASSANDRA-4324.patch, CASSANDRA-4324.patch, CASSANDRA-4324.patch, lucene-core-4.0-SNAPSHOT.jar The Lucene FST data structure offers a compact and fast system for indexing Cassandra keys. More keys may be loaded which in turn should seeks faster. * Update the IndexSummary class to make use of the Lucene FST, overriding the serialization mechanism. * Alter SSTableReader to make use of the FST seek mechanism -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Updated] (CASSANDRA-4687) Exception: DecoratedKey(xxx, yyy) != DecoratedKey(zzz, kkk)
[ https://issues.apache.org/jira/browse/CASSANDRA-4687?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis updated CASSANDRA-4687: -- Priority: Minor (was: Major) Fix Version/s: (was: 1.2.5) Exception: DecoratedKey(xxx, yyy) != DecoratedKey(zzz, kkk) --- Key: CASSANDRA-4687 URL: https://issues.apache.org/jira/browse/CASSANDRA-4687 Project: Cassandra Issue Type: Bug Components: Core Environment: CentOS 6.3 64-bit, Oracle JRE 1.6.0.33 64-bit, single node cluster Reporter: Leonid Shalupov Priority: Minor Attachments: 4687-debugging.txt Under heavy write load sometimes cassandra fails with assertion error. git bisect leads to commit 295aedb278e7a495213241b66bc46d763fd4ce66. works fine if global key/row caches disabled in code. {quote} java.lang.AssertionError: DecoratedKey(xxx, yyy) != DecoratedKey(zzz, kkk) in /var/lib/cassandra/data/...-he-1-Data.db at org.apache.cassandra.db.columniterator.SSTableSliceIterator.init(SSTableSliceIterator.java:60) at org.apache.cassandra.db.filter.SliceQueryFilter.getSSTableColumnIterator(SliceQueryFilter.java:67) at org.apache.cassandra.db.filter.QueryFilter.getSSTableColumnIterator(QueryFilter.java:79) at org.apache.cassandra.db.CollationController.collectAllData(CollationController.java:256) at org.apache.cassandra.db.CollationController.getTopLevelColumns(CollationController.java:64) at org.apache.cassandra.db.ColumnFamilyStore.getTopLevelColumns(ColumnFamilyStore.java:1345) at org.apache.cassandra.db.ColumnFamilyStore.getColumnFamily(ColumnFamilyStore.java:1207) at org.apache.cassandra.db.ColumnFamilyStore.getColumnFamily(ColumnFamilyStore.java:1142) at org.apache.cassandra.db.Table.getRow(Table.java:378) at org.apache.cassandra.db.SliceFromReadCommand.getRow(SliceFromReadCommand.java:69) at org.apache.cassandra.service.StorageProxy$LocalReadRunnable.runMayThrow(StorageProxy.java:819) at org.apache.cassandra.service.StorageProxy$DroppableRunnable.run(StorageProxy.java:1253) at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908) at java.lang.Thread.run(Thread.java:662) {quote} -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Resolved] (CASSANDRA-5506) Reduce memory consumption of IndexSummary
[ https://issues.apache.org/jira/browse/CASSANDRA-5506?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis resolved CASSANDRA-5506. --- Resolution: Fixed Reduce memory consumption of IndexSummary - Key: CASSANDRA-5506 URL: https://issues.apache.org/jira/browse/CASSANDRA-5506 Project: Cassandra Issue Type: Improvement Components: Core Reporter: Nick Puz Assignee: Jonathan Ellis Fix For: 1.2.5 I am evaluating cassandra for a use case with many tiny rows which would result in a node with 1-3TB of storage having billions of rows. Before loading that much data I am hitting GC issues and when looking at the heap dump I noticed that 70+% of the memory was used by IndexSummaries. The two major issues seem to be: 1) that the positions are stored as an ArrayListLong which results in each position taking 24 bytes (class + flags + 8 byte long). This might make sense when the file is initially written but once it has been serialized it would be a lot more memory efficient to just have an long[] (really a int[] would be fine unless 2GB sstables are allowed). 2) The DecoratedKey for a byte[16] key takes 195 bytes -- this is for the overhead of the ByteBuffer in the key and overhead in the token. To somewhat work around the problem I have increased index_sample but will this many rows that didn't really help starts to have diminishing returns. NOTE: This heap dump was from linux with a 64bit oracle vm. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Resolved] (CASSANDRA-5056) Drop legacy HintsColumnFamily, Migrations, Schema, and LocationInfo CFs after upgrading to 1.2
[ https://issues.apache.org/jira/browse/CASSANDRA-5056?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis resolved CASSANDRA-5056. --- Resolution: Won't Fix Fix Version/s: (was: 1.2.5) Removed from cfmetdata in 2.0 (CASSANDRA-5511); adding code to formally drop them doesn't seem worth it. Drop legacy HintsColumnFamily, Migrations, Schema, and LocationInfo CFs after upgrading to 1.2 -- Key: CASSANDRA-5056 URL: https://issues.apache.org/jira/browse/CASSANDRA-5056 Project: Cassandra Issue Type: Improvement Components: Core Affects Versions: 1.2.0 beta 1 Reporter: Jonathan Ellis Priority: Minor Once upgraded we don't need to keep these around anymore. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Updated] (CASSANDRA-5522) Migrate FileStreamTask to Guava RateLimiter and remove homegrown Throttle
[ https://issues.apache.org/jira/browse/CASSANDRA-5522?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Yuki Morishita updated CASSANDRA-5522: -- Issue Type: Sub-task (was: Task) Parent: CASSANDRA-5286 Migrate FileStreamTask to Guava RateLimiter and remove homegrown Throttle - Key: CASSANDRA-5522 URL: https://issues.apache.org/jira/browse/CASSANDRA-5522 Project: Cassandra Issue Type: Sub-task Components: Core Reporter: Jonathan Ellis Assignee: Yuki Morishita Fix For: 2.0 -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-4687) Exception: DecoratedKey(xxx, yyy) != DecoratedKey(zzz, kkk)
[ https://issues.apache.org/jira/browse/CASSANDRA-4687?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644605#comment-13644605 ] Dirk Lachowski commented on CASSANDRA-4687: --- I see this error on an older (longer in use) cf after adding a new column definition via cqlsh and then adding values for existing keys for the new column. Exception: DecoratedKey(xxx, yyy) != DecoratedKey(zzz, kkk) --- Key: CASSANDRA-4687 URL: https://issues.apache.org/jira/browse/CASSANDRA-4687 Project: Cassandra Issue Type: Bug Components: Core Environment: CentOS 6.3 64-bit, Oracle JRE 1.6.0.33 64-bit, single node cluster Reporter: Leonid Shalupov Priority: Minor Attachments: 4687-debugging.txt Under heavy write load sometimes cassandra fails with assertion error. git bisect leads to commit 295aedb278e7a495213241b66bc46d763fd4ce66. works fine if global key/row caches disabled in code. {quote} java.lang.AssertionError: DecoratedKey(xxx, yyy) != DecoratedKey(zzz, kkk) in /var/lib/cassandra/data/...-he-1-Data.db at org.apache.cassandra.db.columniterator.SSTableSliceIterator.init(SSTableSliceIterator.java:60) at org.apache.cassandra.db.filter.SliceQueryFilter.getSSTableColumnIterator(SliceQueryFilter.java:67) at org.apache.cassandra.db.filter.QueryFilter.getSSTableColumnIterator(QueryFilter.java:79) at org.apache.cassandra.db.CollationController.collectAllData(CollationController.java:256) at org.apache.cassandra.db.CollationController.getTopLevelColumns(CollationController.java:64) at org.apache.cassandra.db.ColumnFamilyStore.getTopLevelColumns(ColumnFamilyStore.java:1345) at org.apache.cassandra.db.ColumnFamilyStore.getColumnFamily(ColumnFamilyStore.java:1207) at org.apache.cassandra.db.ColumnFamilyStore.getColumnFamily(ColumnFamilyStore.java:1142) at org.apache.cassandra.db.Table.getRow(Table.java:378) at org.apache.cassandra.db.SliceFromReadCommand.getRow(SliceFromReadCommand.java:69) at org.apache.cassandra.service.StorageProxy$LocalReadRunnable.runMayThrow(StorageProxy.java:819) at org.apache.cassandra.service.StorageProxy$DroppableRunnable.run(StorageProxy.java:1253) at java.util.concurrent.ThreadPoolExecutor$Worker.runTask(ThreadPoolExecutor.java:886) at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:908) at java.lang.Thread.run(Thread.java:662) {quote} -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-5306) Improve Dsnitch Severity
[ https://issues.apache.org/jira/browse/CASSANDRA-5306?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644606#comment-13644606 ] Jonathan Ellis commented on CASSANDRA-5306: --- I guess that makes sense wasn't very clear, so +1. (Retargetting for 2.0 -- let's go ahead and rename it there. :) Improve Dsnitch Severity Key: CASSANDRA-5306 URL: https://issues.apache.org/jira/browse/CASSANDRA-5306 Project: Cassandra Issue Type: Improvement Reporter: Vijay Assignee: Vijay Priority: Minor Fix For: 1.2.5 Attachments: 0001-5306.patch This ticket is to continue the discussion in CASSANDRA-5255. Currently Dsnitch reports the Severity by calculating the amount of data compacting relative to load of the node. It will be nice to report Severity as a factor of load average, N/W Throughput and IO Wait instead. http://www.hyperic.com/products/sigar seem to have it (But personally i have not used it). -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-5432) Repair Freeze/Gossip Invisibility Issues 1.2.4
[ https://issues.apache.org/jira/browse/CASSANDRA-5432?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644658#comment-13644658 ] Vijay commented on CASSANDRA-5432: -- The problem is that we need Private_ip to communicate within DC/region is not available until the gossiping with nodes. Since we dont have the private information but we do have the rest (DC/RACK), we are trying to connect via public IP. Removing that optimization forces us to assume it is in other DC and hence using public IP and SSL port, eventually when we receive the private IP we reset the status to use the right (private_ip) connection. You may ask why not store the private IP? well we could but currently the reset connection (to private IP) logic is in the snitch. Repair Freeze/Gossip Invisibility Issues 1.2.4 -- Key: CASSANDRA-5432 URL: https://issues.apache.org/jira/browse/CASSANDRA-5432 Project: Cassandra Issue Type: Bug Components: Core Affects Versions: 1.2.4 Environment: Ubuntu 10.04.1 LTS C* 1.2.3 Sun Java 6 u43 JNA Enabled Not using VNodes Reporter: Arya Goudarzi Assignee: Vijay Priority: Critical Attachments: 0001-CASSANDRA-5432.patch Read comment 6. This description summarizes the repair issue only, but I believe there is a bigger problem going on with networking as described on that comment. Since I have upgraded our sandbox cluster, I am unable to run repair on any node and I am reaching our gc_grace seconds this weekend. Please help. So far, I have tried the following suggestions: - nodetool scrub - offline scrub - running repair on each CF separately. Didn't matter. All got stuck the same way. The repair command just gets stuck and the machine is idling. Only the following logs are printed for repair job: INFO [Thread-42214] 2013-04-05 23:30:27,785 StorageService.java (line 2379) Starting repair command #4, repairing 1 ranges for keyspace cardspring_production INFO [AntiEntropySessions:7] 2013-04-05 23:30:27,789 AntiEntropyService.java (line 652) [repair #cc5a9aa0-9e48-11e2-98ba-11bde7670242] new session: will sync /X.X.X.190, /X.X.X.43, /X.X.X.56 on range (1808575600,42535295865117307932921825930779602032] for keyspace_production.[comma separated list of CFs] INFO [AntiEntropySessions:7] 2013-04-05 23:30:27,790 AntiEntropyService.java (line 858) [repair #cc5a9aa0-9e48-11e2-98ba-11bde7670242] requesting merkle trees for BusinessConnectionIndicesEntries (to [/X.X.X.43, /X.X.X.56, /X.X.X.190]) INFO [AntiEntropyStage:1] 2013-04-05 23:30:28,086 AntiEntropyService.java (line 214) [repair #cc5a9aa0-9e48-11e2-98ba-11bde7670242] Received merkle tree for ColumnFamilyName from /X.X.X.43 INFO [AntiEntropyStage:1] 2013-04-05 23:30:28,147 AntiEntropyService.java (line 214) [repair #cc5a9aa0-9e48-11e2-98ba-11bde7670242] Received merkle tree for ColumnFamilyName from /X.X.X.56 Please advise. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-5051) Allow automatic cleanup after gc_grace
[ https://issues.apache.org/jira/browse/CASSANDRA-5051?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644665#comment-13644665 ] Vijay commented on CASSANDRA-5051: -- Hi Jonathan, we also need to remove the following for the test to fail... {quote} StorageService.instance.startBootstrapping(); {quote} in the test case, the empty ranges check is when the node is bootstrapping. I would be safe than sorry :) Allow automatic cleanup after gc_grace -- Key: CASSANDRA-5051 URL: https://issues.apache.org/jira/browse/CASSANDRA-5051 Project: Cassandra Issue Type: New Feature Components: Core Reporter: Brandon Williams Assignee: Vijay Labels: vnodes Fix For: 2.0 Attachments: 0001-5051-v4.patch, 0001-5051-v6.patch, 0001-5051-with-test-fixes.patch, 0001-CASSANDRA-5051.patch, 0002-5051-remove-upgradesstable.patch, 0002-5051-remove-upgradesstable-v4.patch, 0004-5051-additional-test-v4.patch, 5051-v2.txt When using vnodes, after adding a new node you have to run cleanup on all the machines, because you don't know which are affected and chances are it was most if not all of them. As an alternative to this intensive process, we could allow cleanup during compaction if the data is older than gc_grace (or perhaps some other time period since people tend to use gc_grace hacks to get rid of tombstones.) -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Updated] (CASSANDRA-5099) Since 1.1, get_count sometimes returns value smaller than actual column count
[ https://issues.apache.org/jira/browse/CASSANDRA-5099?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis updated CASSANDRA-5099: -- Fix Version/s: 1.2.1 Since 1.1, get_count sometimes returns value smaller than actual column count - Key: CASSANDRA-5099 URL: https://issues.apache.org/jira/browse/CASSANDRA-5099 Project: Cassandra Issue Type: Bug Affects Versions: 1.1.7 Reporter: Jason Harvey Assignee: Yuki Morishita Fix For: 1.1.9, 1.2.1 Attachments: 5099-1.1.txt We have a CF where rows have thousands of TTLd columns. The columns are continually added at a regular rate, and TTL out after 15 minutes. We continually run a 'get_count' on these keys to get a count of the number of live columns. Since we upgrade from 1.0 to 1.1.7, get_count regularly returns much smaller values than are possible. For example, with roughly 15,000 columns that have well-distributed TTLs, running a get_count 10 times will result in 1 or 2 results that are up to half the actual column count. Using a normal 'get' to count those columns always results in proper values. For example: (all of these counts were ran within a second or less of eachother) {code} [default@reddit] count AccountsActiveBySR['2qh0u']; 13665 columns [default@reddit] count AccountsActiveBySR['2qh0u']; 13665 columns [default@reddit] count AccountsActiveBySR['2qh0u']; 13666 columns [default@reddit] count AccountsActiveBySR['2qh0u']; 3069 columns [default@reddit] count AccountsActiveBySR['2qh0u']; 13660 columns [default@reddit] count AccountsActiveBySR['2qh0u']; 13661 columns {code} I should note that this issue happens much more frequently with larger (10k columns) rows than smaller rows. It never seems to happen with rows having fewer than 1k columns. There are no supercolumns in use. The key names and column names are very short, and there are no column values. The CF is LCS, and due to the TTL only hovers around a few MB in size. GC grace is normally at zero, but the problem is consistent with non-zero gc grace times. It appears that there was an issue (CASSANDRA-4833) fixed in 1.1.7 regarding get_count. Some logic was added to prevent an infinite loop case. Could that change have resulted in this problem somehow? I can't find any other relevant 1.1 changes that might explain this issue. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Updated] (CASSANDRA-5109) convert default marshallers list to map for better readability
[ https://issues.apache.org/jira/browse/CASSANDRA-5109?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis updated CASSANDRA-5109: -- Fix Version/s: 1.2.1 convert default marshallers list to map for better readability -- Key: CASSANDRA-5109 URL: https://issues.apache.org/jira/browse/CASSANDRA-5109 Project: Cassandra Issue Type: Improvement Components: Hadoop Affects Versions: 1.1.8 Reporter: Dave Brosius Assignee: Dave Brosius Priority: Trivial Fix For: 1.1.9, 1.2.1 Attachments: marshallers_map_rebase.txt, marshallers_map.txt Code uses index 0, 1, 2, 3 of a list to mean specific things.. difficult to read and is brittle, changed to a map. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Updated] (CASSANDRA-5188) o.a.c.hadoop.ConfigHelper should support setting Thrift frame and max message sizes.
[ https://issues.apache.org/jira/browse/CASSANDRA-5188?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis updated CASSANDRA-5188: -- Fix Version/s: 1.2.2 o.a.c.hadoop.ConfigHelper should support setting Thrift frame and max message sizes. Key: CASSANDRA-5188 URL: https://issues.apache.org/jira/browse/CASSANDRA-5188 Project: Cassandra Issue Type: Bug Components: Hadoop Affects Versions: 1.1.9 Reporter: Pavel Yaskevich Assignee: Pavel Yaskevich Priority: Minor Fix For: 1.1.10, 1.2.2 Attachments: CASSANDRA-5188.patch Without such support people will be running into problems like https://github.com/thinkaurelius/faunus/issues/99 without any work around when custom frame and/or max message sizes are used. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[1/3] git commit: unbork changelog entries for merges from 1.1.9 and 1.1.10
Updated Branches: refs/heads/cassandra-1.2 7eae57aea - a44a3d4b8 refs/heads/trunk 6a5495afe - 011e8a060 unbork changelog entries for merges from 1.1.9 and 1.1.10 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/a44a3d4b Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/a44a3d4b Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/a44a3d4b Branch: refs/heads/cassandra-1.2 Commit: a44a3d4b86135356d4ea9b50d2cd990e5f474894 Parents: 7eae57a Author: Jonathan Ellis jbel...@apache.org Authored: Mon Apr 29 16:55:24 2013 -0500 Committer: Jonathan Ellis jbel...@apache.org Committed: Mon Apr 29 16:55:29 2013 -0500 -- CHANGES.txt | 35 ++- 1 files changed, 14 insertions(+), 21 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/a44a3d4b/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 241ef7c..3922cb3 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -132,6 +132,12 @@ Merged from 1.1: * cqlsh: Add username autocompletion (CASSANDRA-5231) * Fix CQL3 composite partition key error (CASSANDRA-5240) * Allow IN clause on last clustering key (CASSANDRA-5230) +Merged from 1.1: + * fix start key/end token validation for wide row iteration (CASSANDRA-5168) + * add ConfigHelper support for Thrift frame and max message sizes (CASSANDRA-5188) + * fix nodetool repair not fail on node down (CASSANDRA-5203) + * always collect tombstone hints (CASSANDRA-5068) + * Fix error when sourcing file in cqlsh (CASSANDRA-5235) 1.2.1 @@ -184,35 +190,22 @@ Merged from 1.1: * Fix cf name extraction from manifest in Directories.migrateFile() (CASSANDRA-5242) * Support pluggable internode authentication (CASSANDRA-5401) * Replace mistaken usage of commons-logging with slf4j (CASSANDRA-5464) - - -1.1.10 - * fix saved key cache not loading at startup (CASSANDRA-5166) - * fix ConcurrentModificationException in getBootstrapSource (CASSANDRA-5170) - * fix sstable maxtimestamp for row deletes and pre-1.1.1 sstables (CASSANDRA-5153) - * fix start key/end token validation for wide row iteration (CASSANDRA-5168) - * add ConfigHelper support for Thrift frame and max message sizes (CASSANDRA-5188) - * fix nodetool repair not fail on node down (CASSANDRA-5203) - * always collect tombstone hints (CASSANDRA-5068) - * Fix thread growth on node removal (CASSANDRA-5175) - * Fix error when sourcing file in cqlsh (CASSANDRA-5235) - * Make Ec2Region's datacenter name configurable (CASSANDRA-5155) - - -1.1.9 + * Ensure Jackson dependency matches lib (CASSANDRA-5126) + * Expose droppable tombstone ratio stats over JMX (CASSANDRA-5159) +Merged from 1.1: * Simplify CompressedRandomAccessReader to work around JDK FD bug (CASSANDRA-5088) * Improve handling a changing target throttle rate mid-compaction (CASSANDRA-5087) * Pig: correctly decode row keys in widerow mode (CASSANDRA-5098) * nodetool repair command now prints progress (CASSANDRA-4767) - * Ensure Jackson dependency matches lib (CASSANDRA-5126) * fix user defined compaction to run against 1.1 data directory (CASSANDRA-5118) * Fix CQL3 BATCH authorization caching (CASSANDRA-5145) * fix get_count returns incorrect value with TTL (CASSANDRA-5099) - * better handling for amid compaction failure (CASSANDRA-5137) - * calculate pending ranges asynchronously (CASSANDRA-5135) + * better handling for mid-compaction failure (CASSANDRA-5137) * convert default marshallers list to map for better readability (CASSANDRA-5109) - * Expose droppable tombstone ratio stats over JMX (CASSANDRA-5159) - + * fix ConcurrentModificationException in getBootstrapSource (CASSANDRA-5170) + * fix sstable maxtimestamp for row deletes and pre-1.1.1 sstables (CASSANDRA-5153) + * Fix thread growth on node removal (CASSANDRA-5175) + * Make Ec2Region's datacenter name configurable (CASSANDRA-5155) 1.2.0
[2/3] git commit: unbork changelog entries for merges from 1.1.9 and 1.1.10
unbork changelog entries for merges from 1.1.9 and 1.1.10 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/a44a3d4b Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/a44a3d4b Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/a44a3d4b Branch: refs/heads/trunk Commit: a44a3d4b86135356d4ea9b50d2cd990e5f474894 Parents: 7eae57a Author: Jonathan Ellis jbel...@apache.org Authored: Mon Apr 29 16:55:24 2013 -0500 Committer: Jonathan Ellis jbel...@apache.org Committed: Mon Apr 29 16:55:29 2013 -0500 -- CHANGES.txt | 35 ++- 1 files changed, 14 insertions(+), 21 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/a44a3d4b/CHANGES.txt -- diff --git a/CHANGES.txt b/CHANGES.txt index 241ef7c..3922cb3 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -132,6 +132,12 @@ Merged from 1.1: * cqlsh: Add username autocompletion (CASSANDRA-5231) * Fix CQL3 composite partition key error (CASSANDRA-5240) * Allow IN clause on last clustering key (CASSANDRA-5230) +Merged from 1.1: + * fix start key/end token validation for wide row iteration (CASSANDRA-5168) + * add ConfigHelper support for Thrift frame and max message sizes (CASSANDRA-5188) + * fix nodetool repair not fail on node down (CASSANDRA-5203) + * always collect tombstone hints (CASSANDRA-5068) + * Fix error when sourcing file in cqlsh (CASSANDRA-5235) 1.2.1 @@ -184,35 +190,22 @@ Merged from 1.1: * Fix cf name extraction from manifest in Directories.migrateFile() (CASSANDRA-5242) * Support pluggable internode authentication (CASSANDRA-5401) * Replace mistaken usage of commons-logging with slf4j (CASSANDRA-5464) - - -1.1.10 - * fix saved key cache not loading at startup (CASSANDRA-5166) - * fix ConcurrentModificationException in getBootstrapSource (CASSANDRA-5170) - * fix sstable maxtimestamp for row deletes and pre-1.1.1 sstables (CASSANDRA-5153) - * fix start key/end token validation for wide row iteration (CASSANDRA-5168) - * add ConfigHelper support for Thrift frame and max message sizes (CASSANDRA-5188) - * fix nodetool repair not fail on node down (CASSANDRA-5203) - * always collect tombstone hints (CASSANDRA-5068) - * Fix thread growth on node removal (CASSANDRA-5175) - * Fix error when sourcing file in cqlsh (CASSANDRA-5235) - * Make Ec2Region's datacenter name configurable (CASSANDRA-5155) - - -1.1.9 + * Ensure Jackson dependency matches lib (CASSANDRA-5126) + * Expose droppable tombstone ratio stats over JMX (CASSANDRA-5159) +Merged from 1.1: * Simplify CompressedRandomAccessReader to work around JDK FD bug (CASSANDRA-5088) * Improve handling a changing target throttle rate mid-compaction (CASSANDRA-5087) * Pig: correctly decode row keys in widerow mode (CASSANDRA-5098) * nodetool repair command now prints progress (CASSANDRA-4767) - * Ensure Jackson dependency matches lib (CASSANDRA-5126) * fix user defined compaction to run against 1.1 data directory (CASSANDRA-5118) * Fix CQL3 BATCH authorization caching (CASSANDRA-5145) * fix get_count returns incorrect value with TTL (CASSANDRA-5099) - * better handling for amid compaction failure (CASSANDRA-5137) - * calculate pending ranges asynchronously (CASSANDRA-5135) + * better handling for mid-compaction failure (CASSANDRA-5137) * convert default marshallers list to map for better readability (CASSANDRA-5109) - * Expose droppable tombstone ratio stats over JMX (CASSANDRA-5159) - + * fix ConcurrentModificationException in getBootstrapSource (CASSANDRA-5170) + * fix sstable maxtimestamp for row deletes and pre-1.1.1 sstables (CASSANDRA-5153) + * Fix thread growth on node removal (CASSANDRA-5175) + * Make Ec2Region's datacenter name configurable (CASSANDRA-5155) 1.2.0
[jira] [Updated] (CASSANDRA-5175) Unbounded (?) thread growth connecting to an removed node
[ https://issues.apache.org/jira/browse/CASSANDRA-5175?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Jonathan Ellis updated CASSANDRA-5175: -- Fix Version/s: 1.2.1 Unbounded (?) thread growth connecting to an removed node - Key: CASSANDRA-5175 URL: https://issues.apache.org/jira/browse/CASSANDRA-5175 Project: Cassandra Issue Type: Bug Components: Core Affects Versions: 1.1.8 Environment: EC2, JDK 7u9, Ubuntu 12.04.1 LTS Reporter: Janne Jalkanen Assignee: Vijay Priority: Minor Fix For: 1.1.10, 1.2.1 Attachments: 0001-CASSANDRA-5175.patch The following lines started repeating every minute in the log file {noformat} INFO [GossipStage:1] 2013-01-19 19:35:43,929 Gossiper.java (line 831) InetAddress /10.238.x.y is now dead. INFO [GossipStage:1] 2013-01-19 19:35:43,930 StorageService.java (line 1291) Removing token 170141183460469231731687303715884105718 for /10.238.x.y {noformat} Also, I got about 3000 threads which all look like this: {noformat} Name: WRITE-/10.238.x.y State: WAITING on java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject@1bb65c0f Total blocked: 0 Total waited: 3 Stack trace: sun.misc.Unsafe.park(Native Method) java.util.concurrent.locks.LockSupport.park(LockSupport.java:186) java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:2043) java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:442) org.apache.cassandra.net.OutboundTcpConnection.run(OutboundTcpConnection.java:104) {noformat} A new thread seems to be created every minute, and they never go away. The endpoint in question had been a part of the cluster weeks ago, and the node exhibiting the thread growth was added yesterday. Anyway, assassinating the endpoint in question stopped thread growth (but kept the existing threads running), so this isn't a huge issue. But I don't think the thread count is supposed to be increasing like this... -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Created] (CASSANDRA-5523) Prevent repair among the nodes of different version
Yuki Morishita created CASSANDRA-5523: - Summary: Prevent repair among the nodes of different version Key: CASSANDRA-5523 URL: https://issues.apache.org/jira/browse/CASSANDRA-5523 Project: Cassandra Issue Type: Bug Reporter: Yuki Morishita Assignee: Yuki Morishita Priority: Minor Fix For: 1.2.5 Since streaming file to the node of different version is not allowed, and in fact it would be the cause of repair hang, there is no point to allow repairing among the nodes of different versions. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Updated] (CASSANDRA-5523) Prevent repair among the nodes of different version
[ https://issues.apache.org/jira/browse/CASSANDRA-5523?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] Yuki Morishita updated CASSANDRA-5523: -- Attachment: 5523-1.2.txt Patch to fail repair when the participants' versions are different. Note that we don't need to check if the node is pre-1.1 for sequential repair, I removed that check also. Prevent repair among the nodes of different version --- Key: CASSANDRA-5523 URL: https://issues.apache.org/jira/browse/CASSANDRA-5523 Project: Cassandra Issue Type: Bug Reporter: Yuki Morishita Assignee: Yuki Morishita Priority: Minor Fix For: 1.2.5 Attachments: 5523-1.2.txt Since streaming file to the node of different version is not allowed, and in fact it would be the cause of repair hang, there is no point to allow repairing among the nodes of different versions. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Created] (CASSANDRA-5524) Allow upgradesstables to be run against a specified directory
Tyler Hobbs created CASSANDRA-5524: -- Summary: Allow upgradesstables to be run against a specified directory Key: CASSANDRA-5524 URL: https://issues.apache.org/jira/browse/CASSANDRA-5524 Project: Cassandra Issue Type: Improvement Components: Tools Reporter: Tyler Hobbs Priority: Minor Currently, upgradesstables only modifies live SSTables. Because sstableloader cannot stream old SSTable formats, this makes it difficult to restore data from a snapshot taken in a previous major version of Cassandra. Allowing the user to specify a directory for upgradesstables would resolve this, but it may also be nice to upgrade SSTables in snapshot directories automatically or with a separate flag. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-5523) Prevent repair among the nodes of different version
[ https://issues.apache.org/jira/browse/CASSANDRA-5523?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644982#comment-13644982 ] Jonathan Ellis commented on CASSANDRA-5523: --- I would change the message to different protocol versions or different major releases since different minor releases should be okay. Otherwise +1 Prevent repair among the nodes of different version --- Key: CASSANDRA-5523 URL: https://issues.apache.org/jira/browse/CASSANDRA-5523 Project: Cassandra Issue Type: Bug Affects Versions: 1.2.4 Reporter: Yuki Morishita Assignee: Yuki Morishita Priority: Minor Labels: repair Fix For: 1.2.5 Attachments: 5523-1.2.txt Since streaming file to the node of different version is not allowed, and in fact it would be the cause of repair hang, there is no point to allow repairing among the nodes of different versions. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-5524) Allow upgradesstables to be run against a specified directory
[ https://issues.apache.org/jira/browse/CASSANDRA-5524?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13644987#comment-13644987 ] Jonathan Ellis commented on CASSANDRA-5524: --- Upgrading snapshots automatically sounds like a bad idea to me since it breaks the hard link and could thus cause a massive expansion in disk space used. If we're going to upgrade stuff in an arbitrary directory, sounds like we should really have an offline tool rather than round-trip it through JMX. Allow upgradesstables to be run against a specified directory - Key: CASSANDRA-5524 URL: https://issues.apache.org/jira/browse/CASSANDRA-5524 Project: Cassandra Issue Type: Improvement Components: Tools Reporter: Tyler Hobbs Priority: Minor Currently, upgradesstables only modifies live SSTables. Because sstableloader cannot stream old SSTable formats, this makes it difficult to restore data from a snapshot taken in a previous major version of Cassandra. Allowing the user to specify a directory for upgradesstables would resolve this, but it may also be nice to upgrade SSTables in snapshot directories automatically or with a separate flag. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-5514) Allow timestamp hints
[ https://issues.apache.org/jira/browse/CASSANDRA-5514?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13645010#comment-13645010 ] Tupshin Harper commented on CASSANDRA-5514: --- See CASSANDRA-5518 for a previous ticket along similar lines. +1 on no additional syntax and Sylvain's general approach. +1000 on a time-series aware compaction strategy. That is where the big win will be. Allow timestamp hints - Key: CASSANDRA-5514 URL: https://issues.apache.org/jira/browse/CASSANDRA-5514 Project: Cassandra Issue Type: New Feature Components: API, Core Reporter: Jonathan Ellis Assignee: Marcus Eriksson Fix For: 2.0 Slice queries can't optimize based on timestamp except for rare cases (CASSANDRA-4116). However, many common queries involve an implicit time component, where the application author knows that he is only interested in data more recent than X, or older than Y. We could use the per-sstable max and min timestamps we track to avoid touching cold data if we could pass a hint to Cassandra about the time range we care about. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Created] (CASSANDRA-5525) Adding nodes to 1.2 cluster w/ vnodes streamed more data than average node load
John Watson created CASSANDRA-5525: -- Summary: Adding nodes to 1.2 cluster w/ vnodes streamed more data than average node load Key: CASSANDRA-5525 URL: https://issues.apache.org/jira/browse/CASSANDRA-5525 Project: Cassandra Issue Type: Bug Reporter: John Watson 12 node cluster upgraded from 1.1.9 to 1.2.3, enabled 'num_tokens: 256', restarted and ran upgradesstables and cleanup. Tried to join 2 additional nodes into the ring. However, 1 of the new nodes ran out of disk space. This started causing 'no host id' alerts when attempting to store hints for that node. {noformat} ERROR 10:12:02,408 Exception in thread Thread[MutationStage:190,5,main] java.lang.AssertionError: Missing host ID {noformat} The other node I killed to stop it from continuing to join. Since the live cluster was now in some sort of broken state dropping mutation messages on a 3 nodes. This was fixed by restarting them, however 1 node never stopped, so had to decomm it (leaving the original cluster at 11 nodes.) Ring pre-join: {noformat} Load Tokens Owns (effective) Host ID 147.55 GB 256 16.7% 754f9f4c-4ba7-4495-97e7-1f5b6755cb27 124.99 GB 256 16.7% 93f4400a-09d9-4ca0-b6a6-9bcca2427450 136.63 GB 256 16.7% ff821e8e-b2ca-48a9-ac3f-8234b16329ce 141.78 GB 253 100.0%339c474f-cf19-4ada-9a47-8b10912d5eb3 137.74 GB 256 16.7% 6d726cbf-147d-426e-a735-e14928c95e45 135.9 GB 256 16.7% e59a02b3-8b91-4abd-990e-b3cb2a494950 165.96 GB 256 16.7% 83ca527c-60c5-4ea0-89a8-de53b92b99c8 135.41 GB 256 16.7% c3ea4026-551b-4a14-a346-480e8c1fe283 143.38 GB 256 16.7% df7ba879-74ad-400b-b371-91b45dcbed37 178.05 GB 256 25.0% 78192d73-be0b-4d49-a129-9bec0770efed 194.92 GB 256 25.0% 361d7e31-b155-4ce1-8890-451b3ddf46cf 150.5 GB 256 16.7% 9889280a-1433-439e-bb84-6b7e7f44d761 {noformat} Ring after decomm bad node: {noformat} Load Tokens Owns (effective) Host ID 80.95 GB 256 16.7% 754f9f4c-4ba7-4495-97e7-1f5b6755cb27 87.15 GB 256 16.7% 93f4400a-09d9-4ca0-b6a6-9bcca2427450 98.16 GB 256 16.7% ff821e8e-b2ca-48a9-ac3f-8234b16329ce 142.6 GB 253 100.0%339c474f-cf19-4ada-9a47-8b10912d5eb3 77.64 GB 256 16.7% e59a02b3-8b91-4abd-990e-b3cb2a494950 194.31 GB 256 25.0% 6d726cbf-147d-426e-a735-e14928c95e45 221.94 GB 256 33.3% 83ca527c-60c5-4ea0-89a8-de53b92b99c8 87.61 GB 256 16.7% c3ea4026-551b-4a14-a346-480e8c1fe283 101.02 GB 256 16.7% df7ba879-74ad-400b-b371-91b45dcbed37 172.44 GB 256 25.0% 78192d73-be0b-4d49-a129-9bec0770efed 108.5 GB 256 16.7% 9889280a-1433-439e-bb84-6b7e7f44d761 {noformat} -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Updated] (CASSANDRA-5525) Adding nodes to 1.2 cluster w/ vnodes streamed more data than average node load
[ https://issues.apache.org/jira/browse/CASSANDRA-5525?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] John Watson updated CASSANDRA-5525: --- Attachment: Screen Shot 2013-04-25 at 12.35.24 PM.png Actual disk space usage. Gray line is the joining node that out of disk space. Dark blue is the joining node I killed. Adding nodes to 1.2 cluster w/ vnodes streamed more data than average node load --- Key: CASSANDRA-5525 URL: https://issues.apache.org/jira/browse/CASSANDRA-5525 Project: Cassandra Issue Type: Bug Reporter: John Watson Attachments: Screen Shot 2013-04-25 at 12.35.24 PM.png 12 node cluster upgraded from 1.1.9 to 1.2.3, enabled 'num_tokens: 256', restarted and ran upgradesstables and cleanup. Tried to join 2 additional nodes into the ring. However, 1 of the new nodes ran out of disk space. This started causing 'no host id' alerts when attempting to store hints for that node. {noformat} ERROR 10:12:02,408 Exception in thread Thread[MutationStage:190,5,main] java.lang.AssertionError: Missing host ID {noformat} The other node I killed to stop it from continuing to join. Since the live cluster was now in some sort of broken state dropping mutation messages on a 3 nodes. This was fixed by restarting them, however 1 node never stopped, so had to decomm it (leaving the original cluster at 11 nodes.) Ring pre-join: {noformat} Load Tokens Owns (effective) Host ID 147.55 GB 256 16.7% 754f9f4c-4ba7-4495-97e7-1f5b6755cb27 124.99 GB 256 16.7% 93f4400a-09d9-4ca0-b6a6-9bcca2427450 136.63 GB 256 16.7% ff821e8e-b2ca-48a9-ac3f-8234b16329ce 141.78 GB 253 100.0%339c474f-cf19-4ada-9a47-8b10912d5eb3 137.74 GB 256 16.7% 6d726cbf-147d-426e-a735-e14928c95e45 135.9 GB 256 16.7% e59a02b3-8b91-4abd-990e-b3cb2a494950 165.96 GB 256 16.7% 83ca527c-60c5-4ea0-89a8-de53b92b99c8 135.41 GB 256 16.7% c3ea4026-551b-4a14-a346-480e8c1fe283 143.38 GB 256 16.7% df7ba879-74ad-400b-b371-91b45dcbed37 178.05 GB 256 25.0% 78192d73-be0b-4d49-a129-9bec0770efed 194.92 GB 256 25.0% 361d7e31-b155-4ce1-8890-451b3ddf46cf 150.5 GB 256 16.7% 9889280a-1433-439e-bb84-6b7e7f44d761 {noformat} Ring after decomm bad node: {noformat} Load Tokens Owns (effective) Host ID 80.95 GB 256 16.7% 754f9f4c-4ba7-4495-97e7-1f5b6755cb27 87.15 GB 256 16.7% 93f4400a-09d9-4ca0-b6a6-9bcca2427450 98.16 GB 256 16.7% ff821e8e-b2ca-48a9-ac3f-8234b16329ce 142.6 GB 253 100.0%339c474f-cf19-4ada-9a47-8b10912d5eb3 77.64 GB 256 16.7% e59a02b3-8b91-4abd-990e-b3cb2a494950 194.31 GB 256 25.0% 6d726cbf-147d-426e-a735-e14928c95e45 221.94 GB 256 33.3% 83ca527c-60c5-4ea0-89a8-de53b92b99c8 87.61 GB 256 16.7% c3ea4026-551b-4a14-a346-480e8c1fe283 101.02 GB 256 16.7% df7ba879-74ad-400b-b371-91b45dcbed37 172.44 GB 256 25.0% 78192d73-be0b-4d49-a129-9bec0770efed 108.5 GB 256 16.7% 9889280a-1433-439e-bb84-6b7e7f44d761 {noformat} -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Updated] (CASSANDRA-5525) Adding nodes to 1.2 cluster w/ vnodes streamed more data than average node load
[ https://issues.apache.org/jira/browse/CASSANDRA-5525?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] John Watson updated CASSANDRA-5525: --- Description: 12 node cluster upgraded from 1.1.9 to 1.2.3, enabled 'num_tokens: 256', restarted and ran upgradesstables and cleanup. Tried to join 2 additional nodes into the ring. However, 1 of the new nodes ran out of disk space. This started causing 'no host id' alerts in the live cluster when attempting to store hints for that node. {noformat} ERROR 10:12:02,408 Exception in thread Thread[MutationStage:190,5,main] java.lang.AssertionError: Missing host ID {noformat} The other node I killed to stop it from continuing to join. Since the live cluster was now in some sort of broken state dropping mutation messages on a 3 nodes. This was fixed by restarting them, however 1 node never stopped, so had to decomm it (leaving the original cluster at 11 nodes.) Ring pre-join: {noformat} Load Tokens Owns (effective) Host ID 147.55 GB 256 16.7% 754f9f4c-4ba7-4495-97e7-1f5b6755cb27 124.99 GB 256 16.7% 93f4400a-09d9-4ca0-b6a6-9bcca2427450 136.63 GB 256 16.7% ff821e8e-b2ca-48a9-ac3f-8234b16329ce 141.78 GB 253 100.0%339c474f-cf19-4ada-9a47-8b10912d5eb3 137.74 GB 256 16.7% 6d726cbf-147d-426e-a735-e14928c95e45 135.9 GB 256 16.7% e59a02b3-8b91-4abd-990e-b3cb2a494950 165.96 GB 256 16.7% 83ca527c-60c5-4ea0-89a8-de53b92b99c8 135.41 GB 256 16.7% c3ea4026-551b-4a14-a346-480e8c1fe283 143.38 GB 256 16.7% df7ba879-74ad-400b-b371-91b45dcbed37 178.05 GB 256 25.0% 78192d73-be0b-4d49-a129-9bec0770efed 194.92 GB 256 25.0% 361d7e31-b155-4ce1-8890-451b3ddf46cf 150.5 GB 256 16.7% 9889280a-1433-439e-bb84-6b7e7f44d761 {noformat} Ring after decomm bad node: {noformat} Load Tokens Owns (effective) Host ID 80.95 GB 256 16.7% 754f9f4c-4ba7-4495-97e7-1f5b6755cb27 87.15 GB 256 16.7% 93f4400a-09d9-4ca0-b6a6-9bcca2427450 98.16 GB 256 16.7% ff821e8e-b2ca-48a9-ac3f-8234b16329ce 142.6 GB 253 100.0%339c474f-cf19-4ada-9a47-8b10912d5eb3 77.64 GB 256 16.7% e59a02b3-8b91-4abd-990e-b3cb2a494950 194.31 GB 256 25.0% 6d726cbf-147d-426e-a735-e14928c95e45 221.94 GB 256 33.3% 83ca527c-60c5-4ea0-89a8-de53b92b99c8 87.61 GB 256 16.7% c3ea4026-551b-4a14-a346-480e8c1fe283 101.02 GB 256 16.7% df7ba879-74ad-400b-b371-91b45dcbed37 172.44 GB 256 25.0% 78192d73-be0b-4d49-a129-9bec0770efed 108.5 GB 256 16.7% 9889280a-1433-439e-bb84-6b7e7f44d761 {noformat} was: 12 node cluster upgraded from 1.1.9 to 1.2.3, enabled 'num_tokens: 256', restarted and ran upgradesstables and cleanup. Tried to join 2 additional nodes into the ring. However, 1 of the new nodes ran out of disk space. This started causing 'no host id' alerts when attempting to store hints for that node. {noformat} ERROR 10:12:02,408 Exception in thread Thread[MutationStage:190,5,main] java.lang.AssertionError: Missing host ID {noformat} The other node I killed to stop it from continuing to join. Since the live cluster was now in some sort of broken state dropping mutation messages on a 3 nodes. This was fixed by restarting them, however 1 node never stopped, so had to decomm it (leaving the original cluster at 11 nodes.) Ring pre-join: {noformat} Load Tokens Owns (effective) Host ID 147.55 GB 256 16.7% 754f9f4c-4ba7-4495-97e7-1f5b6755cb27 124.99 GB 256 16.7% 93f4400a-09d9-4ca0-b6a6-9bcca2427450 136.63 GB 256 16.7% ff821e8e-b2ca-48a9-ac3f-8234b16329ce 141.78 GB 253 100.0%339c474f-cf19-4ada-9a47-8b10912d5eb3 137.74 GB 256 16.7% 6d726cbf-147d-426e-a735-e14928c95e45 135.9 GB 256 16.7% e59a02b3-8b91-4abd-990e-b3cb2a494950 165.96 GB 256 16.7% 83ca527c-60c5-4ea0-89a8-de53b92b99c8 135.41 GB 256 16.7% c3ea4026-551b-4a14-a346-480e8c1fe283 143.38 GB 256 16.7% df7ba879-74ad-400b-b371-91b45dcbed37 178.05 GB 256 25.0% 78192d73-be0b-4d49-a129-9bec0770efed 194.92 GB 256 25.0% 361d7e31-b155-4ce1-8890-451b3ddf46cf 150.5 GB 256 16.7% 9889280a-1433-439e-bb84-6b7e7f44d761 {noformat} Ring after decomm bad node: {noformat} Load Tokens Owns (effective) Host ID 80.95 GB 256 16.7% 754f9f4c-4ba7-4495-97e7-1f5b6755cb27 87.15 GB 256 16.7% 93f4400a-09d9-4ca0-b6a6-9bcca2427450 98.16 GB 256 16.7% ff821e8e-b2ca-48a9-ac3f-8234b16329ce 142.6 GB 253 100.0%
[jira] [Updated] (CASSANDRA-5525) Adding nodes to 1.2 cluster w/ vnodes streamed more data than average node load
[ https://issues.apache.org/jira/browse/CASSANDRA-5525?page=com.atlassian.jira.plugin.system.issuetabpanels:all-tabpanel ] John Watson updated CASSANDRA-5525: --- Description: 12 node cluster upgraded from 1.1.9 to 1.2.3, enabled 'num_tokens: 256', restarted and ran upgradesstables and cleanup. Tried to join 2 additional nodes into the ring. However, 1 of the new nodes ran out of disk space. This started causing 'no host id' alerts in the live cluster when attempting to store hints for that node. {noformat} ERROR 10:12:02,408 Exception in thread Thread[MutationStage:190,5,main] java.lang.AssertionError: Missing host ID {noformat} The other node I killed to stop it from continuing to join. Since the live cluster was now in some sort of broken state dropping mutation messages on 3 nodes. This was fixed by restarting them, however 1 node never stopped, so had to decomm it (leaving the original cluster at 11 nodes.) Ring pre-join: {noformat} Load Tokens Owns (effective) Host ID 147.55 GB 256 16.7% 754f9f4c-4ba7-4495-97e7-1f5b6755cb27 124.99 GB 256 16.7% 93f4400a-09d9-4ca0-b6a6-9bcca2427450 136.63 GB 256 16.7% ff821e8e-b2ca-48a9-ac3f-8234b16329ce 141.78 GB 253 100.0%339c474f-cf19-4ada-9a47-8b10912d5eb3 137.74 GB 256 16.7% 6d726cbf-147d-426e-a735-e14928c95e45 135.9 GB 256 16.7% e59a02b3-8b91-4abd-990e-b3cb2a494950 165.96 GB 256 16.7% 83ca527c-60c5-4ea0-89a8-de53b92b99c8 135.41 GB 256 16.7% c3ea4026-551b-4a14-a346-480e8c1fe283 143.38 GB 256 16.7% df7ba879-74ad-400b-b371-91b45dcbed37 178.05 GB 256 25.0% 78192d73-be0b-4d49-a129-9bec0770efed 194.92 GB 256 25.0% 361d7e31-b155-4ce1-8890-451b3ddf46cf 150.5 GB 256 16.7% 9889280a-1433-439e-bb84-6b7e7f44d761 {noformat} Ring after decomm bad node: {noformat} Load Tokens Owns (effective) Host ID 80.95 GB 256 16.7% 754f9f4c-4ba7-4495-97e7-1f5b6755cb27 87.15 GB 256 16.7% 93f4400a-09d9-4ca0-b6a6-9bcca2427450 98.16 GB 256 16.7% ff821e8e-b2ca-48a9-ac3f-8234b16329ce 142.6 GB 253 100.0%339c474f-cf19-4ada-9a47-8b10912d5eb3 77.64 GB 256 16.7% e59a02b3-8b91-4abd-990e-b3cb2a494950 194.31 GB 256 25.0% 6d726cbf-147d-426e-a735-e14928c95e45 221.94 GB 256 33.3% 83ca527c-60c5-4ea0-89a8-de53b92b99c8 87.61 GB 256 16.7% c3ea4026-551b-4a14-a346-480e8c1fe283 101.02 GB 256 16.7% df7ba879-74ad-400b-b371-91b45dcbed37 172.44 GB 256 25.0% 78192d73-be0b-4d49-a129-9bec0770efed 108.5 GB 256 16.7% 9889280a-1433-439e-bb84-6b7e7f44d761 {noformat} was: 12 node cluster upgraded from 1.1.9 to 1.2.3, enabled 'num_tokens: 256', restarted and ran upgradesstables and cleanup. Tried to join 2 additional nodes into the ring. However, 1 of the new nodes ran out of disk space. This started causing 'no host id' alerts in the live cluster when attempting to store hints for that node. {noformat} ERROR 10:12:02,408 Exception in thread Thread[MutationStage:190,5,main] java.lang.AssertionError: Missing host ID {noformat} The other node I killed to stop it from continuing to join. Since the live cluster was now in some sort of broken state dropping mutation messages on a 3 nodes. This was fixed by restarting them, however 1 node never stopped, so had to decomm it (leaving the original cluster at 11 nodes.) Ring pre-join: {noformat} Load Tokens Owns (effective) Host ID 147.55 GB 256 16.7% 754f9f4c-4ba7-4495-97e7-1f5b6755cb27 124.99 GB 256 16.7% 93f4400a-09d9-4ca0-b6a6-9bcca2427450 136.63 GB 256 16.7% ff821e8e-b2ca-48a9-ac3f-8234b16329ce 141.78 GB 253 100.0%339c474f-cf19-4ada-9a47-8b10912d5eb3 137.74 GB 256 16.7% 6d726cbf-147d-426e-a735-e14928c95e45 135.9 GB 256 16.7% e59a02b3-8b91-4abd-990e-b3cb2a494950 165.96 GB 256 16.7% 83ca527c-60c5-4ea0-89a8-de53b92b99c8 135.41 GB 256 16.7% c3ea4026-551b-4a14-a346-480e8c1fe283 143.38 GB 256 16.7% df7ba879-74ad-400b-b371-91b45dcbed37 178.05 GB 256 25.0% 78192d73-be0b-4d49-a129-9bec0770efed 194.92 GB 256 25.0% 361d7e31-b155-4ce1-8890-451b3ddf46cf 150.5 GB 256 16.7% 9889280a-1433-439e-bb84-6b7e7f44d761 {noformat} Ring after decomm bad node: {noformat} Load Tokens Owns (effective) Host ID 80.95 GB 256 16.7% 754f9f4c-4ba7-4495-97e7-1f5b6755cb27 87.15 GB 256 16.7% 93f4400a-09d9-4ca0-b6a6-9bcca2427450 98.16 GB 256 16.7% ff821e8e-b2ca-48a9-ac3f-8234b16329ce 142.6 GB 253 100.0%
[jira] [Comment Edited] (CASSANDRA-5525) Adding nodes to 1.2 cluster w/ vnodes streamed more data than average node load
[ https://issues.apache.org/jira/browse/CASSANDRA-5525?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13645036#comment-13645036 ] John Watson edited comment on CASSANDRA-5525 at 4/29/13 11:25 PM: -- Actual disk space usage. !Screen Shot 2013-04-25 at 12.35.24 PM.png! Gray line is the joining node that out of disk space. Dark blue is the joining node I killed. was (Author: dctrwatson): Actual disk space usage. Gray line is the joining node that out of disk space. Dark blue is the joining node I killed. Adding nodes to 1.2 cluster w/ vnodes streamed more data than average node load --- Key: CASSANDRA-5525 URL: https://issues.apache.org/jira/browse/CASSANDRA-5525 Project: Cassandra Issue Type: Bug Reporter: John Watson Attachments: Screen Shot 2013-04-25 at 12.35.24 PM.png 12 node cluster upgraded from 1.1.9 to 1.2.3, enabled 'num_tokens: 256', restarted and ran upgradesstables and cleanup. Tried to join 2 additional nodes into the ring. However, 1 of the new nodes ran out of disk space. This started causing 'no host id' alerts in the live cluster when attempting to store hints for that node. {noformat} ERROR 10:12:02,408 Exception in thread Thread[MutationStage:190,5,main] java.lang.AssertionError: Missing host ID {noformat} The other node I killed to stop it from continuing to join. Since the live cluster was now in some sort of broken state dropping mutation messages on 3 nodes. This was fixed by restarting them, however 1 node never stopped, so had to decomm it (leaving the original cluster at 11 nodes.) Ring pre-join: {noformat} Load Tokens Owns (effective) Host ID 147.55 GB 256 16.7% 754f9f4c-4ba7-4495-97e7-1f5b6755cb27 124.99 GB 256 16.7% 93f4400a-09d9-4ca0-b6a6-9bcca2427450 136.63 GB 256 16.7% ff821e8e-b2ca-48a9-ac3f-8234b16329ce 141.78 GB 253 100.0%339c474f-cf19-4ada-9a47-8b10912d5eb3 137.74 GB 256 16.7% 6d726cbf-147d-426e-a735-e14928c95e45 135.9 GB 256 16.7% e59a02b3-8b91-4abd-990e-b3cb2a494950 165.96 GB 256 16.7% 83ca527c-60c5-4ea0-89a8-de53b92b99c8 135.41 GB 256 16.7% c3ea4026-551b-4a14-a346-480e8c1fe283 143.38 GB 256 16.7% df7ba879-74ad-400b-b371-91b45dcbed37 178.05 GB 256 25.0% 78192d73-be0b-4d49-a129-9bec0770efed 194.92 GB 256 25.0% 361d7e31-b155-4ce1-8890-451b3ddf46cf 150.5 GB 256 16.7% 9889280a-1433-439e-bb84-6b7e7f44d761 {noformat} Ring after decomm bad node: {noformat} Load Tokens Owns (effective) Host ID 80.95 GB 256 16.7% 754f9f4c-4ba7-4495-97e7-1f5b6755cb27 87.15 GB 256 16.7% 93f4400a-09d9-4ca0-b6a6-9bcca2427450 98.16 GB 256 16.7% ff821e8e-b2ca-48a9-ac3f-8234b16329ce 142.6 GB 253 100.0%339c474f-cf19-4ada-9a47-8b10912d5eb3 77.64 GB 256 16.7% e59a02b3-8b91-4abd-990e-b3cb2a494950 194.31 GB 256 25.0% 6d726cbf-147d-426e-a735-e14928c95e45 221.94 GB 256 33.3% 83ca527c-60c5-4ea0-89a8-de53b92b99c8 87.61 GB 256 16.7% c3ea4026-551b-4a14-a346-480e8c1fe283 101.02 GB 256 16.7% df7ba879-74ad-400b-b371-91b45dcbed37 172.44 GB 256 25.0% 78192d73-be0b-4d49-a129-9bec0770efed 108.5 GB 256 16.7% 9889280a-1433-439e-bb84-6b7e7f44d761 {noformat} -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
[jira] [Commented] (CASSANDRA-5443) Add CAS CQL support
[ https://issues.apache.org/jira/browse/CASSANDRA-5443?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13645045#comment-13645045 ] Aleksey Yeschenko commented on CASSANDRA-5443: -- Mostly LGTM, the only issue I've spotted is that K_EXISTS has not been added to unreserved keywords. This is potentially breaking stuff (not sure about K_IF and K_NOT). Add CAS CQL support --- Key: CASSANDRA-5443 URL: https://issues.apache.org/jira/browse/CASSANDRA-5443 Project: Cassandra Issue Type: Sub-task Components: API, Core Reporter: Jonathan Ellis Assignee: Sylvain Lebresne Fix For: 2.0 Attachments: 0001-Refactor-Update-and-Delete-statement-to-extract-common.txt, 0002-Add-syntax-to-support-conditional-update-delete.txt, 0003-Handle-deleted-and-expiring-column-in-paxos-updates.txt, 0004-Support-tombstones-when-comparing-for-CAS.txt, 0005-add-UPDATE-.-IF-NOT-EXISTS-syntax.txt -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira
git commit: exclude *-sources.jar from the classpath
Updated Branches: refs/heads/cassandra-1.1 8bb5487ea - 50f49a640 exclude *-sources.jar from the classpath Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/50f49a64 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/50f49a64 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/50f49a64 Branch: refs/heads/cassandra-1.1 Commit: 50f49a64004b7203297fbc8669aff2d13be69373 Parents: 8bb5487 Author: Dave Brosius dbros...@apache.org Authored: Tue Apr 30 00:54:04 2013 -0400 Committer: Dave Brosius dbros...@apache.org Committed: Tue Apr 30 00:54:04 2013 -0400 -- build.xml |2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/50f49a64/build.xml -- diff --git a/build.xml b/build.xml index c81af1f..ab4dd71 100644 --- a/build.xml +++ b/build.xml @@ -119,9 +119,11 @@ pathelement location=${build.classes.thrift} / fileset dir=${build.lib} include name=**/*.jar / + exclude name=**/*-sources.jar/ /fileset fileset dir=${build.dir.lib} include name=**/*.jar / + exclude name=**/*-sources.jar/ /fileset /path
[1/2] git commit: exclude *-sources.jar from the classpath
Updated Branches: refs/heads/cassandra-1.2 a44a3d4b8 - 4f5c84742 exclude *-sources.jar from the classpath Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/50f49a64 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/50f49a64 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/50f49a64 Branch: refs/heads/cassandra-1.2 Commit: 50f49a64004b7203297fbc8669aff2d13be69373 Parents: 8bb5487 Author: Dave Brosius dbros...@apache.org Authored: Tue Apr 30 00:54:04 2013 -0400 Committer: Dave Brosius dbros...@apache.org Committed: Tue Apr 30 00:54:04 2013 -0400 -- build.xml |2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/50f49a64/build.xml -- diff --git a/build.xml b/build.xml index c81af1f..ab4dd71 100644 --- a/build.xml +++ b/build.xml @@ -119,9 +119,11 @@ pathelement location=${build.classes.thrift} / fileset dir=${build.lib} include name=**/*.jar / + exclude name=**/*-sources.jar/ /fileset fileset dir=${build.dir.lib} include name=**/*.jar / + exclude name=**/*-sources.jar/ /fileset /path
[2/2] git commit: Merge branch 'cassandra-1.1' into cassandra-1.2
Merge branch 'cassandra-1.1' into cassandra-1.2 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4f5c8474 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4f5c8474 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4f5c8474 Branch: refs/heads/cassandra-1.2 Commit: 4f5c84742aae68bcd2b6e864c54a15ddba0bddf8 Parents: a44a3d4 50f49a6 Author: Dave Brosius dbros...@apache.org Authored: Tue Apr 30 00:55:10 2013 -0400 Committer: Dave Brosius dbros...@apache.org Committed: Tue Apr 30 00:55:10 2013 -0400 -- build.xml |2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4f5c8474/build.xml --
[2/3] git commit: Merge branch 'cassandra-1.1' into cassandra-1.2
Merge branch 'cassandra-1.1' into cassandra-1.2 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/4f5c8474 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/4f5c8474 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/4f5c8474 Branch: refs/heads/trunk Commit: 4f5c84742aae68bcd2b6e864c54a15ddba0bddf8 Parents: a44a3d4 50f49a6 Author: Dave Brosius dbros...@apache.org Authored: Tue Apr 30 00:55:10 2013 -0400 Committer: Dave Brosius dbros...@apache.org Committed: Tue Apr 30 00:55:10 2013 -0400 -- build.xml |2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/4f5c8474/build.xml --
[1/3] git commit: exclude *-sources.jar from the classpath
Updated Branches: refs/heads/trunk 011e8a060 - 851fe6400 exclude *-sources.jar from the classpath Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/50f49a64 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/50f49a64 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/50f49a64 Branch: refs/heads/trunk Commit: 50f49a64004b7203297fbc8669aff2d13be69373 Parents: 8bb5487 Author: Dave Brosius dbros...@apache.org Authored: Tue Apr 30 00:54:04 2013 -0400 Committer: Dave Brosius dbros...@apache.org Committed: Tue Apr 30 00:54:04 2013 -0400 -- build.xml |2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/50f49a64/build.xml -- diff --git a/build.xml b/build.xml index c81af1f..ab4dd71 100644 --- a/build.xml +++ b/build.xml @@ -119,9 +119,11 @@ pathelement location=${build.classes.thrift} / fileset dir=${build.lib} include name=**/*.jar / + exclude name=**/*-sources.jar/ /fileset fileset dir=${build.dir.lib} include name=**/*.jar / + exclude name=**/*-sources.jar/ /fileset /path
[3/3] git commit: Merge branch 'cassandra-1.2' into trunk
Merge branch 'cassandra-1.2' into trunk Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/851fe640 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/851fe640 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/851fe640 Branch: refs/heads/trunk Commit: 851fe64006f7d0493a9079d7520c70a1096c02b1 Parents: 011e8a0 4f5c847 Author: Dave Brosius dbros...@apache.org Authored: Tue Apr 30 01:01:55 2013 -0400 Committer: Dave Brosius dbros...@apache.org Committed: Tue Apr 30 01:01:55 2013 -0400 -- build.xml |2 ++ 1 files changed, 2 insertions(+), 0 deletions(-) -- http://git-wip-us.apache.org/repos/asf/cassandra/blob/851fe640/build.xml -- diff --cc build.xml index 8889794,c8407b2..ec2e90a --- a/build.xml +++ b/build.xml @@@ -122,12 -123,9 +123,13 @@@ /fileset fileset dir=${build.dir.lib} include name=**/*.jar / + exclude name=**/*-sources.jar/ /fileset /path + + path id=cobertura.classpath + pathelement location=${cobertura.classes.dir}/ + /path macrodef name=create-javadoc attribute name=destdir/
[jira] [Commented] (CASSANDRA-5514) Allow timestamp hints
[ https://issues.apache.org/jira/browse/CASSANDRA-5514?page=com.atlassian.jira.plugin.system.issuetabpanels:comment-tabpanelfocusedCommentId=13645255#comment-13645255 ] Marcus Eriksson commented on CASSANDRA-5514: [~tupshin] what ticket did you mean? 5518 looks quite unrelated Allow timestamp hints - Key: CASSANDRA-5514 URL: https://issues.apache.org/jira/browse/CASSANDRA-5514 Project: Cassandra Issue Type: New Feature Components: API, Core Reporter: Jonathan Ellis Assignee: Marcus Eriksson Fix For: 2.0 Slice queries can't optimize based on timestamp except for rare cases (CASSANDRA-4116). However, many common queries involve an implicit time component, where the application author knows that he is only interested in data more recent than X, or older than Y. We could use the per-sstable max and min timestamps we track to avoid touching cold data if we could pass a hint to Cassandra about the time range we care about. -- This message is automatically generated by JIRA. If you think it was sent incorrectly, please contact your JIRA administrators For more information on JIRA, see: http://www.atlassian.com/software/jira