Fix compaction race during columnfamily drop patch by Tyler Hobbs; reviewed by jbellis for CASSANDRA-5957
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/d40f3c8d Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/d40f3c8d Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/d40f3c8d Branch: refs/heads/trunk Commit: d40f3c8d7f826e88c4e2d23dfa2712b258b9857f Parents: 1797b49 Author: Jonathan Ellis <jbel...@apache.org> Authored: Mon Oct 14 10:35:10 2013 +0100 Committer: Jonathan Ellis <jbel...@apache.org> Committed: Mon Oct 14 10:35:10 2013 +0100 ---------------------------------------------------------------------- CHANGES.txt | 1 + .../org/apache/cassandra/db/DataTracker.java | 30 ++++++++++++++------ 2 files changed, 23 insertions(+), 8 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/d40f3c8d/CHANGES.txt ---------------------------------------------------------------------- diff --git a/CHANGES.txt b/CHANGES.txt index 7f43031..53bc848 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -23,6 +23,7 @@ * (Hadoop) Fetch no more than 128 splits in parallel (CASSANDRA-6169) * stress: add username/password authentication support (CASSANDRA-6068) * Fix indexed queries with row cache enabled on parent table (CASSANDRA-5732) + * Fix compaction race during columnfamily drop (CASSANDRA-5957) 1.2.10 http://git-wip-us.apache.org/repos/asf/cassandra/blob/d40f3c8d/src/java/org/apache/cassandra/db/DataTracker.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/DataTracker.java b/src/java/org/apache/cassandra/db/DataTracker.java index b2f52a9..4fe0a5e 100644 --- a/src/java/org/apache/cassandra/db/DataTracker.java +++ b/src/java/org/apache/cassandra/db/DataTracker.java @@ -193,7 +193,8 @@ public class DataTracker */ public void unmarkCompacting(Collection<SSTableReader> unmark) { - if (!cfstore.isValid()) + boolean isValid = cfstore.isValid(); + if (!isValid) { // We don't know if the original compaction suceeded or failed, which makes it difficult to know // if the sstable reference has already been released. @@ -210,6 +211,14 @@ public class DataTracker newView = currentView.unmarkCompacting(unmark); } while (!view.compareAndSet(currentView, newView)); + + if (!isValid) + { + // when the CFS is invalidated, it will call unreferenceSSTables(). However, unreferenceSSTables only deals + // with sstables that aren't currently being compacted. If there are ongoing compactions that finish or are + // interrupted after the CFS is invalidated, those sstables need to be unreferenced as well, so we do that here. + unreferenceSSTables(); + } } public void markCompacted(Collection<SSTableReader> sstables, OperationType compactionType) @@ -262,7 +271,7 @@ public class DataTracker return; } notifySSTablesChanged(notCompacting, Collections.<SSTableReader>emptySet(), OperationType.UNKNOWN); - postReplace(notCompacting, Collections.<SSTableReader>emptySet()); + postReplace(notCompacting, Collections.<SSTableReader>emptySet(), true); } /** @@ -305,7 +314,7 @@ public class DataTracker { if (!cfstore.isValid()) { - removeOldSSTablesSize(replacements); + removeOldSSTablesSize(replacements, false); replacements = Collections.emptyList(); } @@ -317,13 +326,13 @@ public class DataTracker } while (!view.compareAndSet(currentView, newView)); - postReplace(oldSSTables, replacements); + postReplace(oldSSTables, replacements, false); } - private void postReplace(Collection<SSTableReader> oldSSTables, Iterable<SSTableReader> replacements) + private void postReplace(Collection<SSTableReader> oldSSTables, Iterable<SSTableReader> replacements, boolean tolerateCompacted) { addNewSSTablesSize(replacements); - removeOldSSTablesSize(oldSSTables); + removeOldSSTablesSize(oldSSTables, tolerateCompacted); } private void addNewSSTablesSize(Iterable<SSTableReader> newSSTables) @@ -341,7 +350,7 @@ public class DataTracker } } - private void removeOldSSTablesSize(Iterable<SSTableReader> oldSSTables) + private void removeOldSSTablesSize(Iterable<SSTableReader> oldSSTables, boolean tolerateCompacted) { for (SSTableReader sstable : oldSSTables) { @@ -351,8 +360,13 @@ public class DataTracker long size = sstable.bytesOnDisk(); StorageMetrics.load.dec(size); cfstore.metric.liveDiskSpaceUsed.dec(size); + + // tolerateCompacted will be true when the CFS is no longer valid (dropped). If there were ongoing + // compactions when it was invalidated, sstables may already be marked compacted, so we should + // tolerate that (see CASSANDRA-5957) boolean firstToCompact = sstable.markCompacted(); - assert firstToCompact : sstable + " was already marked compacted"; + assert (tolerateCompacted || firstToCompact) : sstable + " was already marked compacted"; + sstable.releaseReference(); } }