rename new sstables with correct generation at load time
patch by jbellis; reviewed by slebresne for CASSANDRA-3967


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/837ee0ec
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/837ee0ec
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/837ee0ec

Branch: refs/heads/cassandra-1.1.0
Commit: 837ee0ec5659afae2d3f138ee55d3bc7a60bdebf
Parents: 6423bfe
Author: Jonathan Ellis <jbel...@apache.org>
Authored: Thu Mar 8 14:00:47 2012 -0600
Committer: Jonathan Ellis <jbel...@apache.org>
Committed: Fri Mar 16 11:20:33 2012 -0500

----------------------------------------------------------------------
 .../org/apache/cassandra/db/ColumnFamilyStore.java |   93 ++++----------
 .../apache/cassandra/io/sstable/SSTableWriter.java |    7 +-
 2 files changed, 33 insertions(+), 67 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/837ee0ec/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java 
b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
index ef9bc90..7bc3c95 100644
--- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
+++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java
@@ -449,75 +449,59 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
     {
         logger.info("Loading new SSTables for " + table.name + "/" + 
columnFamily + "...");
 
-        // current view over ColumnFamilyStore
-        DataTracker.View view = data.getView();
-        // descriptors of currently registered SSTables
         Set<Descriptor> currentDescriptors = new HashSet<Descriptor>();
-        // going to hold new SSTable view of the CFS containing old and new 
SSTables
-        Set<SSTableReader> sstables = new HashSet<SSTableReader>();
-        // get the max generation number, to prevent generation conflicts
-        int generation = 0;
-
-        for (SSTableReader reader : view.sstables)
-        {
-            sstables.add(reader); // first of all, add old SSTables
-            currentDescriptors.add(reader.descriptor);
-
-            if (reader.descriptor.generation > generation)
-                generation = reader.descriptor.generation;
-        }
-
-        SSTableReader reader;
-        // set to true if we have at least one new SSTable to load
-        boolean atLeastOneNew = false;
+        for (SSTableReader sstable : data.getView().sstables)
+            currentDescriptors.add(sstable.descriptor);
+        Set<SSTableReader> newSSTables = new HashSet<SSTableReader>();
 
         Directories.SSTableLister lister = 
directories.sstableLister().skipCompacted(true).skipTemporary(true);
-        for (Map.Entry<Descriptor, Set<Component>> rawSSTable : 
lister.list().entrySet())
+        for (Map.Entry<Descriptor, Set<Component>> entry : 
lister.list().entrySet())
         {
-            Descriptor descriptor = rawSSTable.getKey();
+            Descriptor descriptor = entry.getKey();
 
             if (currentDescriptors.contains(descriptor))
                 continue; // old (initialized) SSTable found, skipping
+            if (descriptor.temporary) // in the process of being written
+                continue;
 
             if (!descriptor.isCompatible())
                 throw new RuntimeException(String.format("Can't open 
incompatible SSTable! Current version %s, found file: %s",
                                                          
Descriptor.CURRENT_VERSION,
                                                          descriptor));
 
-            logger.info("Initializing new SSTable {}", rawSSTable);
+            Descriptor newDescriptor = new Descriptor(descriptor.directory,
+                                                      descriptor.ksname,
+                                                      descriptor.cfname,
+                                                      
fileIndexGenerator.incrementAndGet(),
+                                                      false);
+            logger.info("Renaming new SSTable {} to {}", descriptor, 
newDescriptor);
+            SSTableWriter.rename(descriptor, newDescriptor, entry.getValue());
 
+            SSTableReader reader;
             try
             {
-                Set<DecoratedKey> savedKeys = 
CacheService.instance.keyCache.readSaved(descriptor.ksname, descriptor.cfname);
-                reader = SSTableReader.open(rawSSTable.getKey(), 
rawSSTable.getValue(), savedKeys, data, metadata, partitioner);
+                reader = SSTableReader.open(newDescriptor, entry.getValue(), 
Collections.<DecoratedKey>emptySet(), data, metadata, partitioner);
             }
             catch (IOException e)
             {
-                SSTableReader.logOpenException(rawSSTable.getKey(), e);
+                SSTableReader.logOpenException(entry.getKey(), e);
                 continue;
             }
-
-            sstables.add(reader);
-
-            if (descriptor.generation > generation)
-                generation = descriptor.generation;
-
-            if (!atLeastOneNew) // set flag only once
-                atLeastOneNew = true;
+            newSSTables.add(reader);
         }
 
-        if (!atLeastOneNew)
+        if (newSSTables.isEmpty())
         {
             logger.info("No new SSTables where found for " + table.name + "/" 
+ columnFamily);
             return;
         }
 
-        logger.info("Loading new SSTables and building secondary indexes for " 
+ table.name + "/" + columnFamily + ": " + sstables);
-        SSTableReader.acquireReferences(sstables);
-        data.addSSTables(sstables);
+        logger.info("Loading new SSTables and building secondary indexes for " 
+ table.name + "/" + columnFamily + ": " + newSSTables);
+        SSTableReader.acquireReferences(newSSTables);
+        data.addSSTables(newSSTables);
         try
         {
-            indexManager.maybeBuildSecondaryIndexes(sstables, 
indexManager.getIndexedColumns());
+            indexManager.maybeBuildSecondaryIndexes(newSSTables, 
indexManager.getIndexedColumns());
         }
         catch (IOException e)
         {
@@ -525,20 +509,7 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
         }
         finally
         {
-            SSTableReader.releaseReferences(sstables);
-        }
-
-        if (fileIndexGenerator.get() < generation)
-        {
-            // we don't bother with CAS here since if the generations used in 
the new files overlap with
-            // files that we create during load, we're already screwed
-            logger.info("Setting up new generation: " + generation);
-            fileIndexGenerator.set(generation);
-        }
-        else
-        {
-            logger.warn("Largest generation seen in loaded sstables was {}, 
which may overlap with native sstable files (generation {}).",
-                        generation, fileIndexGenerator.get());
+            SSTableReader.releaseReferences(newSSTables);
         }
 
         logger.info("Done loading load new SSTables for " + table.name + "/" + 
columnFamily);
@@ -1621,22 +1592,12 @@ public class ColumnFamilyStore implements 
ColumnFamilyStoreMBean
     }
 
     /**
-     * For testing.  no effort is made to clear historical memtables, nor for
-     * thread safety
+     * For testing.  No effort is made to clear historical or even the current 
memtables, nor for
+     * thread safety.  All we do is wipe the sstable containers clean, while 
leaving the actual
+     * data files present on disk.  (This allows tests to easily call 
loadNewSSTables on them.)
      */
     public void clearUnsafe()
     {
-        fileIndexGenerator.set(0); // Avoid unit test failures (see 
CASSANDRA-3735).
-
-        // Clear backups
-        Directories.SSTableLister lister = 
directories.sstableLister().onlyBackups(true);
-        for (Map.Entry<Descriptor, Set<Component>> entry : 
lister.list().entrySet())
-        {
-            Descriptor desc = entry.getKey();
-            for (Component comp : entry.getValue())
-                FileUtils.delete(desc.filenameFor(comp));
-        }
-
         for (ColumnFamilyStore cfs : concatWithIndexes())
             cfs.data.init();
     }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/837ee0ec/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java 
b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
index 25c158c..5e99241 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java
@@ -363,6 +363,12 @@ public class SSTableWriter extends SSTable
     static Descriptor rename(Descriptor tmpdesc, Set<Component> components)
     {
         Descriptor newdesc = tmpdesc.asTemporary(false);
+        rename(tmpdesc, newdesc, components);
+        return newdesc;
+    }
+
+    public static void rename(Descriptor tmpdesc, Descriptor newdesc, 
Set<Component> components)
+    {
         try
         {
             // do -Data last because -Data present should mean the sstable was 
completely renamed before crash
@@ -374,7 +380,6 @@ public class SSTableWriter extends SSTable
         {
             throw new IOError(e);
         }
-        return newdesc;
     }
 
     public long getFilePointer()

Reply via email to