http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/SecondaryIndexTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/SecondaryIndexTest.java 
b/test/unit/org/apache/cassandra/db/SecondaryIndexTest.java
index a037d90..8341e30 100644
--- a/test/unit/org/apache/cassandra/db/SecondaryIndexTest.java
+++ b/test/unit/org/apache/cassandra/db/SecondaryIndexTest.java
@@ -30,7 +30,7 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
-import org.apache.cassandra.config.ColumnDefinition;
+import org.apache.cassandra.schema.ColumnMetadata;
 import org.apache.cassandra.cql3.Operator;
 import org.apache.cassandra.cql3.statements.IndexTarget;
 import org.apache.cassandra.db.marshal.AbstractType;
@@ -40,6 +40,8 @@ import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.index.Index;
 import org.apache.cassandra.schema.IndexMetadata;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.TableMetadata;
+import org.apache.cassandra.schema.MigrationManager;
 import org.apache.cassandra.utils.ByteBufferUtil;
 import org.apache.cassandra.utils.FBUtilities;
 
@@ -81,10 +83,10 @@ public class SecondaryIndexTest
     {
         ColumnFamilyStore cfs = 
Keyspace.open(KEYSPACE1).getColumnFamilyStore(WITH_COMPOSITE_INDEX);
 
-        new RowUpdateBuilder(cfs.metadata, 0, 
"k1").clustering("c").add("birthdate", 1L).add("notbirthdate", 
1L).build().applyUnsafe();
-        new RowUpdateBuilder(cfs.metadata, 0, 
"k2").clustering("c").add("birthdate", 2L).add("notbirthdate", 
2L).build().applyUnsafe();
-        new RowUpdateBuilder(cfs.metadata, 0, 
"k3").clustering("c").add("birthdate", 1L).add("notbirthdate", 
2L).build().applyUnsafe();
-        new RowUpdateBuilder(cfs.metadata, 0, 
"k4").clustering("c").add("birthdate", 3L).add("notbirthdate", 
2L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 0, 
"k1").clustering("c").add("birthdate", 1L).add("notbirthdate", 
1L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 0, 
"k2").clustering("c").add("birthdate", 2L).add("notbirthdate", 
2L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 0, 
"k3").clustering("c").add("birthdate", 1L).add("notbirthdate", 
2L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 0, 
"k4").clustering("c").add("birthdate", 3L).add("notbirthdate", 
2L).build().applyUnsafe();
 
         // basic single-expression query
         List<FilteredPartition> partitions = 
Util.getAll(Util.cmd(cfs).fromKeyExcl("k1").toKeyIncl("k3").columns("birthdate").build());
@@ -157,7 +159,7 @@ public class SecondaryIndexTest
 
         for (int i = 0; i < 100; i++)
         {
-            new RowUpdateBuilder(cfs.metadata, FBUtilities.timestampMicros(), 
"key" + i)
+            new RowUpdateBuilder(cfs.metadata(), 
FBUtilities.timestampMicros(), "key" + i)
                     .clustering("c")
                     .add("birthdate", 34L)
                     .add("notbirthdate", ByteBufferUtil.bytes((long) (i % 2)))
@@ -189,15 +191,15 @@ public class SecondaryIndexTest
     {
         ColumnFamilyStore cfs = 
Keyspace.open(KEYSPACE1).getColumnFamilyStore(WITH_COMPOSITE_INDEX);
         ByteBuffer bBB = ByteBufferUtil.bytes("birthdate");
-        ColumnDefinition bDef = cfs.metadata.getColumnDefinition(bBB);
+        ColumnMetadata bDef = cfs.metadata().getColumn(bBB);
         ByteBuffer col = ByteBufferUtil.bytes("birthdate");
 
         // Confirm addition works
-        new RowUpdateBuilder(cfs.metadata, 0, 
"k1").clustering("c").add("birthdate", 1L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 0, 
"k1").clustering("c").add("birthdate", 1L).build().applyUnsafe();
         assertIndexedOne(cfs, col, 1L);
 
         // delete the column directly
-        RowUpdateBuilder.deleteRow(cfs.metadata, 1, "k1", "c").applyUnsafe();
+        RowUpdateBuilder.deleteRow(cfs.metadata(), 1, "k1", "c").applyUnsafe();
         assertIndexedNone(cfs, col, 1L);
 
         // verify that it's not being indexed under any other value either
@@ -205,26 +207,26 @@ public class SecondaryIndexTest
         assertNull(cfs.indexManager.getBestIndexFor(rc));
 
         // resurrect w/ a newer timestamp
-        new RowUpdateBuilder(cfs.metadata, 2, 
"k1").clustering("c").add("birthdate", 1L).build().apply();;
+        new RowUpdateBuilder(cfs.metadata(), 2, 
"k1").clustering("c").add("birthdate", 1L).build().apply();;
         assertIndexedOne(cfs, col, 1L);
 
         // verify that row and delete w/ older timestamp does nothing
-        RowUpdateBuilder.deleteRow(cfs.metadata, 1, "k1", "c").applyUnsafe();
+        RowUpdateBuilder.deleteRow(cfs.metadata(), 1, "k1", "c").applyUnsafe();
         assertIndexedOne(cfs, col, 1L);
 
         // similarly, column delete w/ older timestamp should do nothing
-        new RowUpdateBuilder(cfs.metadata, 1, 
"k1").clustering("c").delete(bDef).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 1, 
"k1").clustering("c").delete(bDef).build().applyUnsafe();
         assertIndexedOne(cfs, col, 1L);
 
         // delete the entire row (w/ newer timestamp this time)
         // todo - checking the # of index searchers for the command is 
probably not the best thing to test here
-        RowUpdateBuilder.deleteRow(cfs.metadata, 3, "k1", "c").applyUnsafe();
+        RowUpdateBuilder.deleteRow(cfs.metadata(), 3, "k1", "c").applyUnsafe();
         rc = Util.cmd(cfs).build();
         assertNull(cfs.indexManager.getBestIndexFor(rc));
 
         // make sure obsolete mutations don't generate an index entry
         // todo - checking the # of index searchers for the command is 
probably not the best thing to test here
-        new RowUpdateBuilder(cfs.metadata, 3, 
"k1").clustering("c").add("birthdate", 1L).build().apply();;
+        new RowUpdateBuilder(cfs.metadata(), 3, 
"k1").clustering("c").add("birthdate", 1L).build().apply();;
         rc = Util.cmd(cfs).build();
         assertNull(cfs.indexManager.getBestIndexFor(rc));
     }
@@ -237,8 +239,8 @@ public class SecondaryIndexTest
         ByteBuffer col = ByteBufferUtil.bytes("birthdate");
 
         // create a row and update the birthdate value, test that the index 
query fetches the new version
-        new RowUpdateBuilder(cfs.metadata, 1, 
"testIndexUpdate").clustering("c").add("birthdate", 100L).build().applyUnsafe();
-        new RowUpdateBuilder(cfs.metadata, 2, 
"testIndexUpdate").clustering("c").add("birthdate", 200L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 1, 
"testIndexUpdate").clustering("c").add("birthdate", 100L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 2, 
"testIndexUpdate").clustering("c").add("birthdate", 200L).build().applyUnsafe();
 
         // Confirm old version fetch fails
         assertIndexedNone(cfs, col, 100L);
@@ -259,23 +261,23 @@ public class SecondaryIndexTest
         ByteBuffer col = ByteBufferUtil.bytes("birthdate");
 
         // create a row and update the birthdate value with an expiring column
-        new RowUpdateBuilder(cfs.metadata, 1L, 500, 
"K100").clustering("c").add("birthdate", 100L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 1L, 500, 
"K100").clustering("c").add("birthdate", 100L).build().applyUnsafe();
         assertIndexedOne(cfs, col, 100L);
 
         // requires a 1s sleep because we calculate local expiry time as 
(now() / 1000) + ttl
         TimeUnit.SECONDS.sleep(1);
 
         // now overwrite with the same name/value/ttl, but the local expiry 
time will be different
-        new RowUpdateBuilder(cfs.metadata, 1L, 500, 
"K100").clustering("c").add("birthdate", 100L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 1L, 500, 
"K100").clustering("c").add("birthdate", 100L).build().applyUnsafe();
         assertIndexedOne(cfs, col, 100L);
 
         // check that modifying the indexed value using the same timestamp 
behaves as expected
-        new RowUpdateBuilder(cfs.metadata, 1L, 500, 
"K101").clustering("c").add("birthdate", 101L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 1L, 500, 
"K101").clustering("c").add("birthdate", 101L).build().applyUnsafe();
         assertIndexedOne(cfs, col, 101L);
 
         TimeUnit.SECONDS.sleep(1);
 
-        new RowUpdateBuilder(cfs.metadata, 1L, 500, 
"K101").clustering("c").add("birthdate", 102L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 1L, 500, 
"K101").clustering("c").add("birthdate", 102L).build().applyUnsafe();
         // Confirm 101 is gone
         assertIndexedNone(cfs, col, 101L);
 
@@ -292,13 +294,13 @@ public class SecondaryIndexTest
         ByteBuffer col = ByteBufferUtil.bytes("birthdate");
 
         // create a row and update the "birthdate" value
-        new RowUpdateBuilder(cfs.metadata, 1, 
"k1").noRowMarker().add("birthdate", 1L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 1, 
"k1").noRowMarker().add("birthdate", 1L).build().applyUnsafe();
 
         // force a flush, so our index isn't being read from a memtable
         keyspace.getColumnFamilyStore(WITH_KEYS_INDEX).forceBlockingFlush();
 
         // now apply another update, but force the index update to be skipped
-        keyspace.apply(new RowUpdateBuilder(cfs.metadata, 2, 
"k1").noRowMarker().add("birthdate", 2L).build(),
+        keyspace.apply(new RowUpdateBuilder(cfs.metadata(), 2, 
"k1").noRowMarker().add("birthdate", 2L).build(),
                        true,
                        false);
 
@@ -311,7 +313,7 @@ public class SecondaryIndexTest
 
         // now, reset back to the original value, still skipping the index 
update, to
         // make sure the value was expunged from the index when it was 
discovered to be inconsistent
-        keyspace.apply(new RowUpdateBuilder(cfs.metadata, 3, 
"k1").noRowMarker().add("birthdate", 1L).build(),
+        keyspace.apply(new RowUpdateBuilder(cfs.metadata(), 3, 
"k1").noRowMarker().add("birthdate", 1L).build(),
                        true,
                        false);
         assertIndexedNone(cfs, col, 1L);
@@ -342,7 +344,7 @@ public class SecondaryIndexTest
         ByteBuffer col = ByteBufferUtil.bytes(colName);
 
         // create a row and update the author value
-        RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata, 0, "k1");
+        RowUpdateBuilder builder = new RowUpdateBuilder(cfs.metadata(), 0, 
"k1");
         if (!isStatic)
             builder = builder.clustering("c");
         builder.add(colName, 10l).build().applyUnsafe();
@@ -355,7 +357,7 @@ public class SecondaryIndexTest
         assertIndexedOne(cfs, col, 10l);
 
         // now apply another update, but force the index update to be skipped
-        builder = new RowUpdateBuilder(cfs.metadata, 0, "k1");
+        builder = new RowUpdateBuilder(cfs.metadata(), 0, "k1");
         if (!isStatic)
             builder = builder.clustering("c");
         builder.add(colName, 20l);
@@ -371,7 +373,7 @@ public class SecondaryIndexTest
         // now, reset back to the original value, still skipping the index 
update, to
         // make sure the value was expunged from the index when it was 
discovered to be inconsistent
         // TODO: Figure out why this is re-inserting
-        builder = new RowUpdateBuilder(cfs.metadata, 2, "k1");
+        builder = new RowUpdateBuilder(cfs.metadata(), 2, "k1");
         if (!isStatic)
             builder = builder.clustering("c");
         builder.add(colName, 10L);
@@ -391,10 +393,10 @@ public class SecondaryIndexTest
         ByteBuffer colName = ByteBufferUtil.bytes("birthdate");
 
         // Insert indexed value.
-        new RowUpdateBuilder(cfs.metadata, 1, 
"k1").clustering("c").add("birthdate", 10l).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 1, 
"k1").clustering("c").add("birthdate", 10l).build().applyUnsafe();
 
         // Now delete the value
-        RowUpdateBuilder.deleteRow(cfs.metadata, 2, "k1", "c").applyUnsafe();
+        RowUpdateBuilder.deleteRow(cfs.metadata(), 2, "k1", "c").applyUnsafe();
 
         // We want the data to be gcable, but even if gcGrace == 0, we still 
need to wait 1 second
         // since we won't gc on a tie.
@@ -414,10 +416,10 @@ public class SecondaryIndexTest
         ByteBuffer colName = ByteBufferUtil.bytes("birthdate");
 
         // Insert indexed value.
-        new RowUpdateBuilder(cfs.metadata, 1, "k1").add("birthdate", 
10l).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 1, "k1").add("birthdate", 
10l).build().applyUnsafe();
 
         // Now delete the value
-        RowUpdateBuilder.deleteRow(cfs.metadata, 2, "k1").applyUnsafe();
+        RowUpdateBuilder.deleteRow(cfs.metadata(), 2, "k1").applyUnsafe();
 
         // We want the data to be gcable, but even if gcGrace == 0, we still 
need to wait 1 second
         // since we won't gc on a tie.
@@ -436,14 +438,14 @@ public class SecondaryIndexTest
         ColumnFamilyStore cfs = 
Keyspace.open(KEYSPACE1).getColumnFamilyStore(WITH_COMPOSITE_INDEX);
         Mutation rm;
 
-        new RowUpdateBuilder(cfs.metadata, 0, 
"kk1").clustering("c").add("birthdate", 1L).build().applyUnsafe();
-        new RowUpdateBuilder(cfs.metadata, 0, 
"kk1").clustering("c").add("notbirthdate", 1L).build().applyUnsafe();
-        new RowUpdateBuilder(cfs.metadata, 0, 
"kk2").clustering("c").add("birthdate", 1L).build().applyUnsafe();
-        new RowUpdateBuilder(cfs.metadata, 0, 
"kk2").clustering("c").add("notbirthdate", 2L).build().applyUnsafe();
-        new RowUpdateBuilder(cfs.metadata, 0, 
"kk3").clustering("c").add("birthdate", 1L).build().applyUnsafe();
-        new RowUpdateBuilder(cfs.metadata, 0, 
"kk3").clustering("c").add("notbirthdate", 2L).build().applyUnsafe();
-        new RowUpdateBuilder(cfs.metadata, 0, 
"kk4").clustering("c").add("birthdate", 1L).build().applyUnsafe();
-        new RowUpdateBuilder(cfs.metadata, 0, 
"kk4").clustering("c").add("notbirthdate", 2L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 0, 
"kk1").clustering("c").add("birthdate", 1L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 0, 
"kk1").clustering("c").add("notbirthdate", 1L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 0, 
"kk2").clustering("c").add("birthdate", 1L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 0, 
"kk2").clustering("c").add("notbirthdate", 2L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 0, 
"kk3").clustering("c").add("birthdate", 1L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 0, 
"kk3").clustering("c").add("notbirthdate", 2L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 0, 
"kk4").clustering("c").add("birthdate", 1L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 0, 
"kk4").clustering("c").add("notbirthdate", 2L).build().applyUnsafe();
 
         // basic single-expression query, limit 1
         Util.getOnlyRow(Util.cmd(cfs)
@@ -460,28 +462,39 @@ public class SecondaryIndexTest
         ColumnFamilyStore cfs = 
keyspace.getColumnFamilyStore(COMPOSITE_INDEX_TO_BE_ADDED);
 
         // create a row and update the birthdate value, test that the index 
query fetches the new version
-        new RowUpdateBuilder(cfs.metadata, 0, 
"k1").clustering("c").add("birthdate", 1L).build().applyUnsafe();
+        new RowUpdateBuilder(cfs.metadata(), 0, 
"k1").clustering("c").add("birthdate", 1L).build().applyUnsafe();
 
         String indexName = "birthdate_index";
-        ColumnDefinition old = 
cfs.metadata.getColumnDefinition(ByteBufferUtil.bytes("birthdate"));
+        ColumnMetadata old = 
cfs.metadata().getColumn(ByteBufferUtil.bytes("birthdate"));
         IndexMetadata indexDef =
-            IndexMetadata.fromIndexTargets(cfs.metadata,
-                                           Collections.singletonList(new 
IndexTarget(old.name, IndexTarget.Type.VALUES)),
+            IndexMetadata.fromIndexTargets(
+            Collections.singletonList(new IndexTarget(old.name, 
IndexTarget.Type.VALUES)),
                                            indexName,
                                            IndexMetadata.Kind.COMPOSITES,
                                            Collections.EMPTY_MAP);
-        cfs.metadata.indexes(cfs.metadata.getIndexes().with(indexDef));
-        Future<?> future = cfs.indexManager.addIndex(indexDef);
-        future.get();
+
+        TableMetadata current = cfs.metadata();
+
+        TableMetadata updated =
+            current.unbuild()
+                   .indexes(current.indexes.with(indexDef))
+                   .build();
+        MigrationManager.announceTableUpdate(updated, true);
+
+        // fait for the index to be built
+        Index index = cfs.indexManager.getIndex(indexDef);
+        do
+        {
+            TimeUnit.MILLISECONDS.sleep(100);
+        }
+        while (!cfs.indexManager.isIndexQueryable(index));
 
         // we had a bug (CASSANDRA-2244) where index would get created but not 
flushed -- check for that
         // the way we find the index cfs is a bit convoluted at the moment
-        boolean flushed = false;
         ColumnFamilyStore indexCfs = cfs.indexManager.getIndex(indexDef)
                                                      .getBackingTable()
                                                      
.orElseThrow(throwAssert("Index not found"));
-        flushed = !indexCfs.getLiveSSTables().isEmpty();
-        assertTrue(flushed);
+        assertFalse(indexCfs.getLiveSSTables().isEmpty());
         assertIndexedOne(cfs, ByteBufferUtil.bytes("birthdate"), 1L);
 
         // validate that drop clears it out & rebuild works (CASSANDRA-2320)
@@ -490,7 +503,7 @@ public class SecondaryIndexTest
         assertFalse(cfs.getBuiltIndexes().contains(indexName));
 
         // rebuild & re-query
-        future = cfs.indexManager.addIndex(indexDef);
+        Future future = cfs.indexManager.addIndex(indexDef);
         future.get();
         assertIndexedOne(cfs, ByteBufferUtil.bytes("birthdate"), 1L);
     }
@@ -503,7 +516,7 @@ public class SecondaryIndexTest
         ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(WITH_KEYS_INDEX);
 
         for (int i = 0; i < 10; i++)
-            new RowUpdateBuilder(cfs.metadata, 0, "k" + 
i).noRowMarker().add("birthdate", 1l).build().applyUnsafe();
+            new RowUpdateBuilder(cfs.metadata(), 0, "k" + 
i).noRowMarker().add("birthdate", 1l).build().applyUnsafe();
 
         assertIndexedCount(cfs, ByteBufferUtil.bytes("birthdate"), 1l, 10);
         cfs.forceBlockingFlush();
@@ -520,7 +533,7 @@ public class SecondaryIndexTest
     }
     private void assertIndexedCount(ColumnFamilyStore cfs, ByteBuffer col, 
Object val, int count)
     {
-        ColumnDefinition cdef = cfs.metadata.getColumnDefinition(col);
+        ColumnMetadata cdef = cfs.metadata().getColumn(col);
 
         ReadCommand rc = Util.cmd(cfs).filterOn(cdef.name.toString(), 
Operator.EQ, ((AbstractType) cdef.cellValueType()).decompose(val)).build();
         Index.Searcher searcher = 
cfs.indexManager.getBestIndexFor(rc).searcherFor(rc);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
----------------------------------------------------------------------
diff --git 
a/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java 
b/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
index f89aa8d..1d4bdb6 100644
--- a/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
+++ b/test/unit/org/apache/cassandra/db/SinglePartitionSliceCommandTest.java
@@ -21,8 +21,6 @@
 package org.apache.cassandra.db;
 
 import java.io.IOException;
-import java.nio.ByteBuffer;
-import java.util.Collections;
 import java.util.Iterator;
 
 import org.junit.Assert;
@@ -34,13 +32,12 @@ import org.slf4j.Logger;
 import org.slf4j.LoggerFactory;
 
 import org.apache.cassandra.SchemaLoader;
-import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.config.ColumnDefinition;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.config.Schema;
+import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.cql3.ColumnIdentifier;
 import org.apache.cassandra.cql3.QueryProcessor;
-import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.db.filter.ClusteringIndexSliceFilter;
 import org.apache.cassandra.db.filter.ColumnFilter;
 import org.apache.cassandra.db.filter.DataLimits;
@@ -67,27 +64,27 @@ public class SinglePartitionSliceCommandTest
     private static final String KEYSPACE = "ks";
     private static final String TABLE = "tbl";
 
-    private static CFMetaData cfm;
-    private static ColumnDefinition v;
-    private static ColumnDefinition s;
+    private static TableMetadata metadata;
+    private static ColumnMetadata v;
+    private static ColumnMetadata s;
 
     @BeforeClass
     public static void defineSchema() throws ConfigurationException
     {
         DatabaseDescriptor.daemonInitialization();
 
-        cfm = CFMetaData.Builder.create(KEYSPACE, TABLE)
-                                .addPartitionKey("k", UTF8Type.instance)
-                                .addStaticColumn("s", UTF8Type.instance)
-                                .addClusteringColumn("i", IntegerType.instance)
-                                .addRegularColumn("v", UTF8Type.instance)
-                                .build();
+        metadata =
+            TableMetadata.builder(KEYSPACE, TABLE)
+                         .addPartitionKeyColumn("k", UTF8Type.instance)
+                         .addStaticColumn("s", UTF8Type.instance)
+                         .addClusteringColumn("i", IntegerType.instance)
+                         .addRegularColumn("v", UTF8Type.instance)
+                         .build();
 
         SchemaLoader.prepareServer();
-        SchemaLoader.createKeyspace(KEYSPACE, KeyspaceParams.simple(1), cfm);
-        cfm = Schema.instance.getCFMetaData(KEYSPACE, TABLE);
-        v = cfm.getColumnDefinition(new ColumnIdentifier("v", true));
-        s = cfm.getColumnDefinition(new ColumnIdentifier("s", true));
+        SchemaLoader.createKeyspace(KEYSPACE, KeyspaceParams.simple(1), 
metadata);
+        v = metadata.getColumn(new ColumnIdentifier("v", true));
+        s = metadata.getColumn(new ColumnIdentifier("s", true));
     }
 
     @Before
@@ -103,7 +100,7 @@ public class SinglePartitionSliceCommandTest
         Assert.assertTrue(ri.columns().contains(s));
         Row staticRow = ri.staticRow();
         Iterator<Cell> cellIterator = staticRow.cells().iterator();
-        Assert.assertTrue(staticRow.toString(cfm, true), 
cellIterator.hasNext());
+        Assert.assertTrue(staticRow.toString(metadata, true), 
cellIterator.hasNext());
         Cell cell = cellIterator.next();
         Assert.assertEquals(s, cell.column());
         Assert.assertEquals(ByteBufferUtil.bytesToHex(cell.value()), 
ByteBufferUtil.bytes("s"), cell.value());
@@ -113,14 +110,14 @@ public class SinglePartitionSliceCommandTest
     @Test
     public void staticColumnsAreReturned() throws IOException
     {
-        DecoratedKey key = cfm.decorateKey(ByteBufferUtil.bytes("k1"));
+        DecoratedKey key = 
metadata.partitioner.decorateKey(ByteBufferUtil.bytes("k1"));
 
         QueryProcessor.executeInternal("INSERT INTO ks.tbl (k, s) VALUES 
('k1', 's')");
         Assert.assertFalse(QueryProcessor.executeInternal("SELECT s FROM 
ks.tbl WHERE k='k1'").isEmpty());
 
-        ColumnFilter columnFilter = 
ColumnFilter.selection(PartitionColumns.of(s));
+        ColumnFilter columnFilter = 
ColumnFilter.selection(RegularAndStaticColumns.of(s));
         ClusteringIndexSliceFilter sliceFilter = new 
ClusteringIndexSliceFilter(Slices.NONE, false);
-        ReadCommand cmd = new SinglePartitionReadCommand(false, 
MessagingService.VERSION_30, cfm,
+        ReadCommand cmd = new SinglePartitionReadCommand(false, 
MessagingService.VERSION_30, metadata,
                                                          
FBUtilities.nowInSeconds(),
                                                          columnFilter,
                                                          RowFilter.NONE,
@@ -155,7 +152,7 @@ public class SinglePartitionSliceCommandTest
         }
 
         // check (de)serialized iterator for sstable static cell
-        
Schema.instance.getColumnFamilyStoreInstance(cfm.cfId).forceBlockingFlush();
+        
Schema.instance.getColumnFamilyStoreInstance(metadata.id).forceBlockingFlush();
         try (ReadExecutionController executionController = 
cmd.executionController(); UnfilteredPartitionIterator pi = 
cmd.executeLocally(executionController))
         {
             response = ReadResponse.createDataResponse(pi, cmd);
@@ -173,12 +170,12 @@ public class SinglePartitionSliceCommandTest
     @Test
     public void toCQLStringIsSafeToCall() throws IOException
     {
-        DecoratedKey key = cfm.decorateKey(ByteBufferUtil.bytes("k1"));
+        DecoratedKey key = 
metadata.partitioner.decorateKey(ByteBufferUtil.bytes("k1"));
 
-        ColumnFilter columnFilter = 
ColumnFilter.selection(PartitionColumns.of(s));
+        ColumnFilter columnFilter = 
ColumnFilter.selection(RegularAndStaticColumns.of(s));
         Slice slice = Slice.make(ClusteringBound.BOTTOM, 
ClusteringBound.inclusiveEndOf(ByteBufferUtil.bytes("i1")));
-        ClusteringIndexSliceFilter sliceFilter = new 
ClusteringIndexSliceFilter(Slices.with(cfm.comparator, slice), false);
-        ReadCommand cmd = new SinglePartitionReadCommand(false, 
MessagingService.VERSION_30, cfm,
+        ClusteringIndexSliceFilter sliceFilter = new 
ClusteringIndexSliceFilter(Slices.with(metadata.comparator, slice), false);
+        ReadCommand cmd = new SinglePartitionReadCommand(false, 
MessagingService.VERSION_30, metadata,
                                                          
FBUtilities.nowInSeconds(),
                                                          columnFilter,
                                                          RowFilter.NONE,

http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/SystemKeyspaceTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/SystemKeyspaceTest.java 
b/test/unit/org/apache/cassandra/db/SystemKeyspaceTest.java
index d188821..feac1e9 100644
--- a/test/unit/org/apache/cassandra/db/SystemKeyspaceTest.java
+++ b/test/unit/org/apache/cassandra/db/SystemKeyspaceTest.java
@@ -30,7 +30,7 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.config.SchemaConstants;
+import org.apache.cassandra.schema.SchemaConstants;
 import org.apache.cassandra.cql3.QueryProcessor;
 import org.apache.cassandra.cql3.UntypedResultSet;
 import org.apache.cassandra.dht.ByteOrderedPartitioner.BytesToken;

http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/TransformerTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/TransformerTest.java 
b/test/unit/org/apache/cassandra/db/TransformerTest.java
index fe87af8..eda3a52 100644
--- a/test/unit/org/apache/cassandra/db/TransformerTest.java
+++ b/test/unit/org/apache/cassandra/db/TransformerTest.java
@@ -25,7 +25,7 @@ import org.junit.Test;
 
 import junit.framework.Assert;
 import org.apache.cassandra.Util;
-import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.marshal.BytesType;
 import org.apache.cassandra.db.marshal.Int32Type;
@@ -44,18 +44,18 @@ public class TransformerTest
         DatabaseDescriptor.daemonInitialization();
     }
 
-    static final CFMetaData metadata = metadata();
+    static final TableMetadata metadata = metadata();
     static final DecoratedKey partitionKey = new BufferDecoratedKey(new 
Murmur3Partitioner.LongToken(0L), ByteBufferUtil.EMPTY_BYTE_BUFFER);
-    static final Row staticRow = 
BTreeRow.singleCellRow(Clustering.STATIC_CLUSTERING, new 
BufferCell(metadata.partitionColumns().columns(true).getSimple(0), 0L, 0, 0, 
ByteBufferUtil.bytes(-1), null));
+    static final Row staticRow = 
BTreeRow.singleCellRow(Clustering.STATIC_CLUSTERING, new 
BufferCell(metadata.regularAndStaticColumns().columns(true).getSimple(0), 0L, 
0, 0, ByteBufferUtil.bytes(-1), null));
 
-    static CFMetaData metadata()
+    static TableMetadata metadata()
     {
-        CFMetaData.Builder builder = CFMetaData.Builder.create("", "");
-        builder.addPartitionKey("pk", BytesType.instance);
-        builder.addClusteringColumn("c", Int32Type.instance);
-        builder.addStaticColumn("s", Int32Type.instance);
-        builder.addRegularColumn("v", Int32Type.instance);
-        return builder.build();
+        return TableMetadata.builder("", "")
+                            .addPartitionKeyColumn("pk", BytesType.instance)
+                            .addClusteringColumn("c", Int32Type.instance)
+                            .addStaticColumn("s", Int32Type.instance)
+                            .addRegularColumn("v", Int32Type.instance)
+                            .build();
     }
 
     // Mock Data
@@ -78,7 +78,7 @@ public class TransformerTest
             return (U) row(i);
         }
 
-        public CFMetaData metadata()
+        public TableMetadata metadata()
         {
             return metadata;
         }
@@ -88,9 +88,9 @@ public class TransformerTest
             return false;
         }
 
-        public PartitionColumns columns()
+        public RegularAndStaticColumns columns()
         {
-            return metadata.partitionColumns();
+            return metadata.regularAndStaticColumns();
         }
 
         public DecoratedKey partitionKey()
@@ -150,7 +150,7 @@ public class TransformerTest
     private static Row row(int i)
     {
         return BTreeRow.singleCellRow(Util.clustering(metadata.comparator, i),
-                                      new 
BufferCell(metadata.partitionColumns().columns(false).getSimple(0), 1L, 
BufferCell.NO_TTL, BufferCell.NO_DELETION_TIME, ByteBufferUtil.bytes(i), null));
+                                      new 
BufferCell(metadata.regularAndStaticColumns().columns(false).getSimple(0), 1L, 
BufferCell.NO_TTL, BufferCell.NO_DELETION_TIME, ByteBufferUtil.bytes(i), null));
     }
 
     // Transformations that check mock data ranges

http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/VerifyTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/VerifyTest.java 
b/test/unit/org/apache/cassandra/db/VerifyTest.java
index 77096b9..b77d56d 100644
--- a/test/unit/org/apache/cassandra/db/VerifyTest.java
+++ b/test/unit/org/apache/cassandra/db/VerifyTest.java
@@ -21,7 +21,6 @@ package org.apache.cassandra.db;
 import com.google.common.base.Charsets;
 
 import org.apache.cassandra.OrderedJUnit4ClassRunner;
-import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
 import org.apache.cassandra.cache.ChunkCache;
 import org.apache.cassandra.UpdateBuilder;
@@ -48,6 +47,10 @@ import java.nio.file.Files;
 import java.util.zip.CRC32;
 import java.util.zip.CheckedInputStream;
 
+import static org.apache.cassandra.SchemaLoader.counterCFMD;
+import static org.apache.cassandra.SchemaLoader.createKeyspace;
+import static org.apache.cassandra.SchemaLoader.loadSchema;
+import static org.apache.cassandra.SchemaLoader.standardCFMD;
 import static org.junit.Assert.fail;
 
 @RunWith(OrderedJUnit4ClassRunner.class)
@@ -74,22 +77,22 @@ public class VerifyTest
     {
         CompressionParams compressionParameters = 
CompressionParams.snappy(32768);
 
-        SchemaLoader.loadSchema();
-        SchemaLoader.createKeyspace(KEYSPACE,
-                                    KeyspaceParams.simple(1),
-                                    SchemaLoader.standardCFMD(KEYSPACE, 
CF).compression(compressionParameters),
-                                    SchemaLoader.standardCFMD(KEYSPACE, 
CF2).compression(compressionParameters),
-                                    SchemaLoader.standardCFMD(KEYSPACE, CF3),
-                                    SchemaLoader.standardCFMD(KEYSPACE, CF4),
-                                    SchemaLoader.standardCFMD(KEYSPACE, 
CORRUPT_CF),
-                                    SchemaLoader.standardCFMD(KEYSPACE, 
CORRUPT_CF2),
-                                    SchemaLoader.counterCFMD(KEYSPACE, 
COUNTER_CF).compression(compressionParameters),
-                                    SchemaLoader.counterCFMD(KEYSPACE, 
COUNTER_CF2).compression(compressionParameters),
-                                    SchemaLoader.counterCFMD(KEYSPACE, 
COUNTER_CF3),
-                                    SchemaLoader.counterCFMD(KEYSPACE, 
COUNTER_CF4),
-                                    SchemaLoader.counterCFMD(KEYSPACE, 
CORRUPTCOUNTER_CF),
-                                    SchemaLoader.counterCFMD(KEYSPACE, 
CORRUPTCOUNTER_CF2),
-                                    SchemaLoader.standardCFMD(KEYSPACE, 
CF_UUID, 0, UUIDType.instance));
+        loadSchema();
+        createKeyspace(KEYSPACE,
+                       KeyspaceParams.simple(1),
+                       standardCFMD(KEYSPACE, 
CF).compression(compressionParameters),
+                       standardCFMD(KEYSPACE, 
CF2).compression(compressionParameters),
+                       standardCFMD(KEYSPACE, CF3),
+                       standardCFMD(KEYSPACE, CF4),
+                       standardCFMD(KEYSPACE, CORRUPT_CF),
+                       standardCFMD(KEYSPACE, CORRUPT_CF2),
+                       counterCFMD(KEYSPACE, 
COUNTER_CF).compression(compressionParameters),
+                       counterCFMD(KEYSPACE, 
COUNTER_CF2).compression(compressionParameters),
+                       counterCFMD(KEYSPACE, COUNTER_CF3),
+                       counterCFMD(KEYSPACE, COUNTER_CF4),
+                       counterCFMD(KEYSPACE, CORRUPTCOUNTER_CF),
+                       counterCFMD(KEYSPACE, CORRUPTCOUNTER_CF2),
+                       standardCFMD(KEYSPACE, CF_UUID, 0, UUIDType.instance));
     }
 
 
@@ -352,7 +355,7 @@ public class VerifyTest
     {
         for (int i = 0; i < partitionsPerSSTable; i++)
         {
-            UpdateBuilder.create(cfs.metadata, String.valueOf(i))
+            UpdateBuilder.create(cfs.metadata(), String.valueOf(i))
                          .newRow("c1").add("val", "1")
                          .newRow("c2").add("val", "2")
                          .apply();
@@ -365,7 +368,7 @@ public class VerifyTest
     {
         for (int i = 0; i < partitionsPerSSTable; i++)
         {
-            UpdateBuilder.create(cfs.metadata, String.valueOf(i))
+            UpdateBuilder.create(cfs.metadata(), String.valueOf(i))
                          .newRow("c1").add("val", 100L)
                          .apply();
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java
----------------------------------------------------------------------
diff --git 
a/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java 
b/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java
index edff3b7..6eaf2c8 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogReaderTest.java
@@ -27,8 +27,8 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import org.apache.cassandra.config.CFMetaData;
-import org.apache.cassandra.config.ColumnDefinition;
+import org.apache.cassandra.schema.ColumnMetadata;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.Config;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
@@ -158,14 +158,14 @@ public class CommitLogReaderTest extends CQLTester
     }
 
     /**
-     * Since we have both cfm and non mixed into the CL, we ignore updates 
that aren't for the cfm the test handler
+     * Since we have both table and non mixed into the CL, we ignore updates 
that aren't for the table the test handler
      * is configured to check.
      * @param handler
      * @param offset integer offset of count we expect to see in record
      */
     private void confirmReadOrder(TestCLRHandler handler, int offset)
     {
-        ColumnDefinition cd = currentTableMetadata().getColumnDefinition(new 
ColumnIdentifier("data", false));
+        ColumnMetadata cd = currentTableMetadata().getColumn(new 
ColumnIdentifier("data", false));
         int i = 0;
         int j = 0;
         while (i + j < handler.seenMutationCount())
@@ -208,17 +208,17 @@ public class CommitLogReaderTest extends CQLTester
         public List<Mutation> seenMutations = new ArrayList<Mutation>();
         public boolean sawStopOnErrorCheck = false;
 
-        private final CFMetaData cfm;
+        private final TableMetadata metadata;
 
         // Accept all
         public TestCLRHandler()
         {
-            this.cfm = null;
+            this.metadata = null;
         }
 
-        public TestCLRHandler(CFMetaData cfm)
+        public TestCLRHandler(TableMetadata metadata)
         {
-            this.cfm = cfm;
+            this.metadata = metadata;
         }
 
         public boolean shouldSkipSegmentOnError(CommitLogReadException 
exception) throws IOException
@@ -234,7 +234,7 @@ public class CommitLogReaderTest extends CQLTester
 
         public void handleMutation(Mutation m, int size, int entryLocation, 
CommitLogDescriptor desc)
         {
-            if ((cfm == null) || (cfm != null && m.get(cfm) != null)) {
+            if ((metadata == null) || (metadata != null && m.get(metadata) != 
null)) {
                 seenMutations.add(m);
             }
         }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java
----------------------------------------------------------------------
diff --git 
a/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java
 
b/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java
index 68ce57d..3ae1ae4 100644
--- 
a/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java
+++ 
b/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerCDCTest.java
@@ -29,7 +29,7 @@ import org.junit.Before;
 import org.junit.BeforeClass;
 import org.junit.Test;
 
-import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.cql3.CQLTester;
 import org.apache.cassandra.db.Keyspace;
@@ -61,7 +61,7 @@ public class CommitLogSegmentManagerCDCTest extends CQLTester
     {
         createTable("CREATE TABLE %s (idx int, data text, primary key(idx)) 
WITH cdc=true;");
         CommitLogSegmentManagerCDC cdcMgr = 
(CommitLogSegmentManagerCDC)CommitLog.instance.segmentManager;
-        CFMetaData cfm = currentTableMetadata();
+        TableMetadata cfm = currentTableMetadata();
 
         // Confirm that logic to check for whether or not we can allocate new 
CDC segments works
         Integer originalCDCSize = DatabaseDescriptor.getCDCSpaceInMB();
@@ -159,7 +159,7 @@ public class CommitLogSegmentManagerCDCTest extends 
CQLTester
         try
         {
             DatabaseDescriptor.setCDCSpaceInMB(16);
-            CFMetaData ccfm = 
Keyspace.open(keyspace()).getColumnFamilyStore(ct).metadata;
+            TableMetadata ccfm = 
Keyspace.open(keyspace()).getColumnFamilyStore(ct).metadata();
             // Spin until we hit CDC capacity and make sure we get a 
WriteTimeout
             try
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerTest.java
----------------------------------------------------------------------
diff --git 
a/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerTest.java 
b/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerTest.java
index e22e86f..06513a5 100644
--- 
a/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerTest.java
+++ 
b/test/unit/org/apache/cassandra/db/commitlog/CommitLogSegmentManagerTest.java
@@ -92,7 +92,7 @@ public class CommitLogSegmentManagerTest
 
         ColumnFamilyStore cfs1 = 
Keyspace.open(KEYSPACE1).getColumnFamilyStore(STANDARD1);
 
-        final Mutation m = new RowUpdateBuilder(cfs1.metadata, 0, 
"k").clustering("bytes")
+        final Mutation m = new RowUpdateBuilder(cfs1.metadata(), 0, 
"k").clustering("bytes")
                                                                       
.add("val", ByteBuffer.wrap(entropy))
                                                                       .build();
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java 
b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java
index 19305ac..f98dd6b 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogTest.java
@@ -37,7 +37,8 @@ import org.junit.runners.Parameterized.Parameters;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
-import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.config.ParameterizedClass;
 import org.apache.cassandra.config.Config.DiskFailurePolicy;
@@ -229,7 +230,7 @@ public class CommitLogTest
         ColumnFamilyStore cfs2 = ks.getColumnFamilyStore(STANDARD2);
 
         // Roughly 32 MB mutation
-        Mutation m = new RowUpdateBuilder(cfs1.metadata, 0, "k")
+        Mutation m = new RowUpdateBuilder(cfs1.metadata(), 0, "k")
                      .clustering("bytes")
                      .add("val", 
ByteBuffer.allocate(DatabaseDescriptor.getCommitLogSegmentSize() / 4))
                      .build();
@@ -242,7 +243,7 @@ public class CommitLogTest
         CommitLog.instance.add(m);
 
         // Adding new mutation on another CF
-        Mutation m2 = new RowUpdateBuilder(cfs2.metadata, 0, "k")
+        Mutation m2 = new RowUpdateBuilder(cfs2.metadata(), 0, "k")
                       .clustering("bytes")
                       .add("val", ByteBuffer.allocate(4))
                       .build();
@@ -250,8 +251,8 @@ public class CommitLogTest
 
         assertEquals(2, 
CommitLog.instance.segmentManager.getActiveSegments().size());
 
-        UUID cfid2 = m2.getColumnFamilyIds().iterator().next();
-        CommitLog.instance.discardCompletedSegments(cfid2, 
CommitLogPosition.NONE, CommitLog.instance.getCurrentPosition());
+        TableId id2 = m2.getTableIds().iterator().next();
+        CommitLog.instance.discardCompletedSegments(id2, 
CommitLogPosition.NONE, CommitLog.instance.getCurrentPosition());
 
         // Assert we still have both our segments
         assertEquals(2, 
CommitLog.instance.segmentManager.getActiveSegments().size());
@@ -265,7 +266,7 @@ public class CommitLogTest
         ColumnFamilyStore cfs2 = ks.getColumnFamilyStore(STANDARD2);
 
         // Roughly 32 MB mutation
-         Mutation rm = new RowUpdateBuilder(cfs1.metadata, 0, "k")
+         Mutation rm = new RowUpdateBuilder(cfs1.metadata(), 0, "k")
                   .clustering("bytes")
                   .add("val", 
ByteBuffer.allocate((DatabaseDescriptor.getCommitLogSegmentSize()/4) - 1))
                   .build();
@@ -277,14 +278,14 @@ public class CommitLogTest
         assertEquals(1, 
CommitLog.instance.segmentManager.getActiveSegments().size());
 
         // "Flush": this won't delete anything
-        UUID cfid1 = rm.getColumnFamilyIds().iterator().next();
+        TableId id1 = rm.getTableIds().iterator().next();
         CommitLog.instance.sync();
-        CommitLog.instance.discardCompletedSegments(cfid1, 
CommitLogPosition.NONE, CommitLog.instance.getCurrentPosition());
+        CommitLog.instance.discardCompletedSegments(id1, 
CommitLogPosition.NONE, CommitLog.instance.getCurrentPosition());
 
         assertEquals(1, 
CommitLog.instance.segmentManager.getActiveSegments().size());
 
         // Adding new mutation on another CF, large enough (including CL entry 
overhead) that a new segment is created
-        Mutation rm2 = new RowUpdateBuilder(cfs2.metadata, 0, "k")
+        Mutation rm2 = new RowUpdateBuilder(cfs2.metadata(), 0, "k")
                        .clustering("bytes")
                        .add("val", 
ByteBuffer.allocate(DatabaseDescriptor.getMaxMutationSize() - 200))
                        .build();
@@ -302,8 +303,8 @@ public class CommitLogTest
         // "Flush" second cf: The first segment should be deleted since we
         // didn't write anything on cf1 since last flush (and we flush cf2)
 
-        UUID cfid2 = rm2.getColumnFamilyIds().iterator().next();
-        CommitLog.instance.discardCompletedSegments(cfid2, 
CommitLogPosition.NONE, CommitLog.instance.getCurrentPosition());
+        TableId id2 = rm2.getTableIds().iterator().next();
+        CommitLog.instance.discardCompletedSegments(id2, 
CommitLogPosition.NONE, CommitLog.instance.getCurrentPosition());
 
         segments = CommitLog.instance.segmentManager.getActiveSegments();
 
@@ -315,9 +316,9 @@ public class CommitLogTest
 
     private String getDirtyCFIds(Collection<CommitLogSegment> segments)
     {
-        return "Dirty cfIds: <"
+        return "Dirty tableIds: <"
                + String.join(", ", segments.stream()
-                                           
.map(CommitLogSegment::getDirtyCFIDs)
+                                           
.map(CommitLogSegment::getDirtyTableIds)
                                            .flatMap(uuids -> uuids.stream())
                                            .distinct()
                                            .map(uuid -> 
uuid.toString()).collect(Collectors.toList()))
@@ -330,7 +331,7 @@ public class CommitLogTest
         // We don't want to allocate a size of 0 as this is optimized under 
the hood and our computation would
         // break testEqualRecordLimit
         int allocSize = 1;
-        Mutation rm = new RowUpdateBuilder(cfs.metadata, 0, key)
+        Mutation rm = new RowUpdateBuilder(cfs.metadata(), 0, key)
                       .clustering(colName)
                       .add("val", ByteBuffer.allocate(allocSize)).build();
 
@@ -358,7 +359,7 @@ public class CommitLogTest
     public void testEqualRecordLimit() throws Exception
     {
         ColumnFamilyStore cfs = 
Keyspace.open(KEYSPACE1).getColumnFamilyStore(STANDARD1);
-        Mutation rm = new RowUpdateBuilder(cfs.metadata, 0, "k")
+        Mutation rm = new RowUpdateBuilder(cfs.metadata(), 0, "k")
                       .clustering("bytes")
                       .add("val", ByteBuffer.allocate(getMaxRecordDataSize()))
                       .build();
@@ -370,7 +371,7 @@ public class CommitLogTest
     {
         Keyspace ks = Keyspace.open(KEYSPACE1);
         ColumnFamilyStore cfs = ks.getColumnFamilyStore(STANDARD1);
-        Mutation rm = new RowUpdateBuilder(cfs.metadata, 0, "k")
+        Mutation rm = new RowUpdateBuilder(cfs.metadata(), 0, "k")
                       .clustering("bytes")
                       .add("val", ByteBuffer.allocate(1 + 
getMaxRecordDataSize()))
                       .build();
@@ -540,10 +541,10 @@ public class CommitLogTest
             ColumnFamilyStore cfs1 = ks.getColumnFamilyStore(STANDARD1);
             ColumnFamilyStore cfs2 = ks.getColumnFamilyStore(STANDARD2);
 
-            new RowUpdateBuilder(cfs1.metadata, 0, 
"k").clustering("bytes").add("val", 
ByteBuffer.allocate(100)).build().applyUnsafe();
+            new RowUpdateBuilder(cfs1.metadata(), 0, 
"k").clustering("bytes").add("val", 
ByteBuffer.allocate(100)).build().applyUnsafe();
             cfs1.truncateBlocking();
             DatabaseDescriptor.setAutoSnapshot(prev);
-            Mutation m2 = new RowUpdateBuilder(cfs2.metadata, 0, "k")
+            Mutation m2 = new RowUpdateBuilder(cfs2.metadata(), 0, "k")
                           .clustering("bytes")
                           .add("val", 
ByteBuffer.allocate(DatabaseDescriptor.getCommitLogSegmentSize() / 4))
                           .build();
@@ -555,8 +556,8 @@ public class CommitLogTest
             CommitLogPosition position = 
CommitLog.instance.getCurrentPosition();
             for (Keyspace keyspace : Keyspace.system())
                 for (ColumnFamilyStore syscfs : 
keyspace.getColumnFamilyStores())
-                    
CommitLog.instance.discardCompletedSegments(syscfs.metadata.cfId, 
CommitLogPosition.NONE, position);
-            CommitLog.instance.discardCompletedSegments(cfs2.metadata.cfId, 
CommitLogPosition.NONE, position);
+                    
CommitLog.instance.discardCompletedSegments(syscfs.metadata().id, 
CommitLogPosition.NONE, position);
+            CommitLog.instance.discardCompletedSegments(cfs2.metadata().id, 
CommitLogPosition.NONE, position);
             assertEquals(1, 
CommitLog.instance.segmentManager.getActiveSegments().size());
         }
         finally
@@ -576,7 +577,7 @@ public class CommitLogTest
             
Assert.assertFalse(notDurableKs.getMetadata().params.durableWrites);
 
             ColumnFamilyStore cfs = 
notDurableKs.getColumnFamilyStore("Standard1");
-            new RowUpdateBuilder(cfs.metadata, 0, "key1")
+            new RowUpdateBuilder(cfs.metadata(), 0, "key1")
             .clustering("bytes").add("val", bytes("abcd"))
             .build()
             .applyUnsafe();
@@ -599,14 +600,14 @@ public class CommitLogTest
     {
         int cellCount = 0;
         ColumnFamilyStore cfs = 
Keyspace.open(KEYSPACE1).getColumnFamilyStore(STANDARD1);
-        final Mutation rm1 = new RowUpdateBuilder(cfs.metadata, 0, "k1")
+        final Mutation rm1 = new RowUpdateBuilder(cfs.metadata(), 0, "k1")
                              .clustering("bytes")
                              .add("val", bytes("this is a string"))
                              .build();
         cellCount += 1;
         CommitLog.instance.add(rm1);
 
-        final Mutation rm2 = new RowUpdateBuilder(cfs.metadata, 0, "k2")
+        final Mutation rm2 = new RowUpdateBuilder(cfs.metadata(), 0, "k2")
                              .clustering("bytes")
                              .add("val", bytes("this is a string"))
                              .build();
@@ -615,7 +616,7 @@ public class CommitLogTest
 
         CommitLog.instance.sync();
 
-        SimpleCountingReplayer replayer = new 
SimpleCountingReplayer(CommitLog.instance, CommitLogPosition.NONE, 
cfs.metadata);
+        SimpleCountingReplayer replayer = new 
SimpleCountingReplayer(CommitLog.instance, CommitLogPosition.NONE, 
cfs.metadata());
         List<String> activeSegments = 
CommitLog.instance.getActiveSegmentNames();
         Assert.assertFalse(activeSegments.isEmpty());
 
@@ -636,7 +637,7 @@ public class CommitLogTest
 
         for (int i = 0; i < max; i++)
         {
-            final Mutation rm1 = new RowUpdateBuilder(cfs.metadata, 0, "k" + 1)
+            final Mutation rm1 = new RowUpdateBuilder(cfs.metadata(), 0, "k" + 
1)
                                  .clustering("bytes")
                                  .add("val", bytes("this is a string"))
                                  .build();
@@ -652,7 +653,7 @@ public class CommitLogTest
 
         CommitLog.instance.sync();
 
-        SimpleCountingReplayer replayer = new 
SimpleCountingReplayer(CommitLog.instance, commitLogPosition, cfs.metadata);
+        SimpleCountingReplayer replayer = new 
SimpleCountingReplayer(CommitLog.instance, commitLogPosition, cfs.metadata());
         List<String> activeSegments = 
CommitLog.instance.getActiveSegmentNames();
         Assert.assertFalse(activeSegments.isEmpty());
 
@@ -665,15 +666,15 @@ public class CommitLogTest
     class SimpleCountingReplayer extends CommitLogReplayer
     {
         private final CommitLogPosition filterPosition;
-        private final CFMetaData metadata;
+        private final TableMetadata metadata;
         int cells;
         int skipped;
 
-        SimpleCountingReplayer(CommitLog commitLog, CommitLogPosition 
filterPosition, CFMetaData cfm)
+        SimpleCountingReplayer(CommitLog commitLog, CommitLogPosition 
filterPosition, TableMetadata metadata)
         {
             super(commitLog, filterPosition, Collections.emptyMap(), 
ReplayFilter.create());
             this.filterPosition = filterPosition;
-            this.metadata = cfm;
+            this.metadata = metadata;
         }
 
         @SuppressWarnings("resource")
@@ -694,7 +695,7 @@ public class CommitLogTest
             {
                 // Only process mutations for the CF's we're testing against, 
since we can't deterministically predict
                 // whether or not system keyspaces will be mutated during a 
test.
-                if (partitionUpdate.metadata().cfName.equals(metadata.cfName))
+                if (partitionUpdate.metadata().name.equals(metadata.name))
                 {
                     for (Row row : partitionUpdate)
                         cells += Iterables.size(row.cells());
@@ -716,7 +717,7 @@ public class CommitLogTest
 
             for (int i = 0 ; i < 5 ; i++)
             {
-                new RowUpdateBuilder(cfs.metadata, 0, "k")
+                new RowUpdateBuilder(cfs.metadata(), 0, "k")
                     .clustering("c" + i).add("val", ByteBuffer.allocate(100))
                     .build()
                     .apply();
@@ -759,7 +760,7 @@ public class CommitLogTest
 
         for (int i = 0 ; i < 5 ; i++)
         {
-            new RowUpdateBuilder(cfs.metadata, 0, "k")
+            new RowUpdateBuilder(cfs.metadata(), 0, "k")
                 .clustering("c" + i).add("val", ByteBuffer.allocate(100))
                 .build()
                 .apply();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
----------------------------------------------------------------------
diff --git 
a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java 
b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
index e25d2f1..1ca9a80 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTest.java
@@ -24,7 +24,6 @@ package org.apache.cassandra.db.commitlog;
 import java.io.*;
 import java.nio.ByteBuffer;
 import java.util.Properties;
-import java.util.UUID;
 
 import junit.framework.Assert;
 
@@ -36,9 +35,11 @@ import org.junit.BeforeClass;
 import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
-import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.schema.KeyspaceMetadata;
+import org.apache.cassandra.schema.TableId;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.config.Schema;
+import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.db.rows.Cell;
 import org.apache.cassandra.db.rows.Row;
@@ -46,6 +47,7 @@ import org.apache.cassandra.db.marshal.AsciiType;
 import org.apache.cassandra.db.marshal.BytesType;
 import org.apache.cassandra.db.partitions.PartitionUpdate;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.Tables;
 import org.apache.cassandra.security.EncryptionContextGenerator;
 import org.apache.cassandra.utils.JVMStabilityInspector;
 import org.apache.cassandra.utils.KillerForTests;
@@ -76,12 +78,15 @@ public class CommitLogUpgradeTest
     private KillerForTests killerForTests;
     private boolean shouldBeKilled = false;
 
-    static CFMetaData metadata = CFMetaData.Builder.createDense(KEYSPACE, 
TABLE, false, false)
-                                                   .addPartitionKey("key", 
AsciiType.instance)
-                                                   .addClusteringColumn("col", 
AsciiType.instance)
-                                                   .addRegularColumn("val", 
BytesType.instance)
-                                                   .build()
-                                                   
.compression(SchemaLoader.getCompressionParameters());
+    static TableMetadata metadata =
+        TableMetadata.builder(KEYSPACE, TABLE)
+                     .isCompound(false)
+                     .isDense(true)
+                     .addPartitionKeyColumn("key", AsciiType.instance)
+                     .addClusteringColumn("col", AsciiType.instance)
+                     .addRegularColumn("val", BytesType.instance)
+                     .compression(SchemaLoader.getCompressionParameters())
+                     .build();
 
     @Before
     public void prepareToBeKilled()
@@ -107,9 +112,7 @@ public class CommitLogUpgradeTest
     public static void initialize()
     {
         SchemaLoader.loadSchema();
-        SchemaLoader.createKeyspace(KEYSPACE,
-                                    KeyspaceParams.simple(1),
-                                    metadata);
+        SchemaLoader.createKeyspace(KEYSPACE, KeyspaceParams.simple(1), 
metadata);
         
DatabaseDescriptor.setEncryptionContext(EncryptionContextGenerator.createContext(true));
     }
 
@@ -123,13 +126,9 @@ public class CommitLogUpgradeTest
         String cfidString = prop.getProperty(CFID_PROPERTY);
         if (cfidString != null)
         {
-            UUID cfid = UUID.fromString(cfidString);
-            if (Schema.instance.getCF(cfid) == null)
-            {
-                CFMetaData cfm = Schema.instance.getCFMetaData(KEYSPACE, 
TABLE);
-                Schema.instance.unload(cfm);
-                Schema.instance.load(cfm.copy(cfid));
-            }
+            TableId tableId = TableId.fromString(cfidString);
+            if (Schema.instance.getTableMetadata(tableId) == null)
+                Schema.instance.load(KeyspaceMetadata.create(KEYSPACE, 
KeyspaceParams.simple(1), Tables.of(metadata.unbuild().id(tableId).build())));
         }
 
         Hasher hasher = new Hasher();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java
----------------------------------------------------------------------
diff --git 
a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java 
b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java
index 5a03f9f..d2ad42f 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/CommitLogUpgradeTestMaker.java
@@ -38,7 +38,7 @@ import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
 import org.apache.cassandra.UpdateBuilder;
 import org.apache.cassandra.config.DatabaseDescriptor;
-import org.apache.cassandra.config.Schema;
+import org.apache.cassandra.schema.Schema;
 import org.apache.cassandra.db.Mutation;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.util.FileUtils;
@@ -92,9 +92,7 @@ public class CommitLogUpgradeTestMaker
         }
 
         SchemaLoader.loadSchema();
-        SchemaLoader.createKeyspace(KEYSPACE,
-                                    KeyspaceParams.simple(1),
-                                    metadata);
+        SchemaLoader.createKeyspace(KEYSPACE, KeyspaceParams.simple(1), 
metadata);
     }
 
     public void makeLog() throws IOException, InterruptedException
@@ -134,7 +132,7 @@ public class CommitLogUpgradeTestMaker
             FileUtils.createHardLink(f, new File(dataDir, f.getName()));
 
         Properties prop = new Properties();
-        prop.setProperty(CFID_PROPERTY, Schema.instance.getId(KEYSPACE, 
TABLE).toString());
+        prop.setProperty(CFID_PROPERTY, 
Schema.instance.getTableMetadata(KEYSPACE, TABLE).id.toString());
         prop.setProperty(CELLS_PROPERTY, Integer.toString(cells));
         prop.setProperty(HASH_PROPERTY, Integer.toString(hash));
         prop.store(new FileOutputStream(new File(dataDir, PROPERTIES_FILE)),
@@ -236,7 +234,7 @@ public class CommitLogUpgradeTestMaker
                     rl.acquire();
                 ByteBuffer key = randomBytes(16, tlr);
 
-                UpdateBuilder builder = 
UpdateBuilder.create(Schema.instance.getCFMetaData(KEYSPACE, TABLE), 
Util.dk(key));
+                UpdateBuilder builder = 
UpdateBuilder.create(Schema.instance.getTableMetadata(KEYSPACE, TABLE), 
Util.dk(key));
 
                 for (int ii = 0; ii < numCells; ii++)
                 {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/commitlog/SnapshotDeletingTest.java
----------------------------------------------------------------------
diff --git 
a/test/unit/org/apache/cassandra/db/commitlog/SnapshotDeletingTest.java 
b/test/unit/org/apache/cassandra/db/commitlog/SnapshotDeletingTest.java
index 413e716..b3dc070 100644
--- a/test/unit/org/apache/cassandra/db/commitlog/SnapshotDeletingTest.java
+++ b/test/unit/org/apache/cassandra/db/commitlog/SnapshotDeletingTest.java
@@ -25,7 +25,7 @@ import static org.junit.Assert.*;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
-import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
@@ -90,7 +90,7 @@ public class SnapshotDeletingTest
 
     private void populate(int rowCount) {
         long timestamp = System.currentTimeMillis();
-        CFMetaData cfm = 
Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1).metadata;
+        TableMetadata cfm = 
Keyspace.open(KEYSPACE1).getColumnFamilyStore(CF_STANDARD1).metadata();
         for (int i = 0; i <= rowCount; i++)
         {
             DecoratedKey key = Util.dk(Integer.toString(i));

http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
----------------------------------------------------------------------
diff --git 
a/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java 
b/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
index 399a935..5a7bfed 100644
--- a/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/AntiCompactionTest.java
@@ -27,12 +27,11 @@ import java.util.UUID;
 
 import com.google.common.collect.ImmutableSet;
 import com.google.common.collect.Iterables;
-import com.google.common.util.concurrent.RateLimiter;
 import org.junit.BeforeClass;
 import org.junit.After;
 import org.junit.Test;
 
-import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.*;
 import org.apache.cassandra.db.lifecycle.LifecycleTransaction;
 import org.apache.cassandra.db.rows.EncodingStats;
@@ -63,16 +62,14 @@ public class AntiCompactionTest
 {
     private static final String KEYSPACE1 = "AntiCompactionTest";
     private static final String CF = "AntiCompactionTest";
-    private static CFMetaData cfm;
+    private static TableMetadata metadata;
 
     @BeforeClass
     public static void defineSchema() throws ConfigurationException
     {
         SchemaLoader.prepareServer();
-        cfm = SchemaLoader.standardCFMD(KEYSPACE1, CF);
-        SchemaLoader.createKeyspace(KEYSPACE1,
-                                    KeyspaceParams.simple(1),
-                                    cfm);
+        metadata = SchemaLoader.standardCFMD(KEYSPACE1, CF).build();
+        SchemaLoader.createKeyspace(KEYSPACE1, KeyspaceParams.simple(1), 
metadata);
     }
 
     @After
@@ -169,11 +166,11 @@ public class AntiCompactionTest
         File dir = cfs.getDirectories().getDirectoryForNewSSTables();
         Descriptor desc = cfs.newSSTableDescriptor(dir);
 
-        try (SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, desc, 0, 
0, new SerializationHeader(true, cfm, cfm.partitionColumns(), 
EncodingStats.NO_STATS)))
+        try (SSTableTxnWriter writer = SSTableTxnWriter.create(cfs, desc, 0, 
0, new SerializationHeader(true, cfs.metadata(), 
cfs.metadata().regularAndStaticColumns(), EncodingStats.NO_STATS)))
         {
             for (int i = 0; i < count; i++)
             {
-                UpdateBuilder builder = UpdateBuilder.create(cfm, 
ByteBufferUtil.bytes(i));
+                UpdateBuilder builder = UpdateBuilder.create(metadata, 
ByteBufferUtil.bytes(i));
                 for (int j = 0; j < count * 5; j++)
                     builder.newRow("c" + j).add("val", "value1");
                 writer.append(builder.build().unfilteredIterator());
@@ -191,7 +188,7 @@ public class AntiCompactionTest
         for (int i = 0; i < 10; i++)
         {
             String localSuffix = Integer.toString(i);
-            new RowUpdateBuilder(cfm, System.currentTimeMillis(), localSuffix 
+ "-" + Suffix)
+            new RowUpdateBuilder(metadata, System.currentTimeMillis(), 
localSuffix + "-" + Suffix)
                     .clustering("c")
                     .add("val", "val" + localSuffix)
                     .build()
@@ -328,7 +325,7 @@ public class AntiCompactionTest
         store.disableAutoCompaction();
         for (int i = 0; i < 10; i++)
         {
-            new RowUpdateBuilder(cfm, System.currentTimeMillis(), 
Integer.toString(i))
+            new RowUpdateBuilder(metadata, System.currentTimeMillis(), 
Integer.toString(i))
                 .clustering("c")
                 .add("val", "val")
                 .build()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java
----------------------------------------------------------------------
diff --git 
a/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java 
b/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java
index f08ea97..e0f24f2 100644
--- 
a/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java
+++ 
b/test/unit/org/apache/cassandra/db/compaction/BlacklistingCompactionsTest.java
@@ -91,7 +91,7 @@ public class BlacklistingCompactionsTest
     /**
      * Return a table metadata, we use types with fixed size to increase the 
chance of detecting corrupt data
      */
-    private static CFMetaData makeTable(String tableName)
+    private static TableMetadata.Builder makeTable(String tableName)
     {
         return SchemaLoader.standardCFMD(KEYSPACE1, tableName, 1, 
LongType.instance, LongType.instance, LongType.instance);
     }
@@ -131,7 +131,7 @@ public class BlacklistingCompactionsTest
         final ColumnFamilyStore cfs = keyspace.getColumnFamilyStore(tableName);
 
         final int ROWS_PER_SSTABLE = 10;
-        final int SSTABLES = cfs.metadata.params.minIndexInterval * 2 / 
ROWS_PER_SSTABLE;
+        final int SSTABLES = cfs.metadata().params.minIndexInterval * 2 / 
ROWS_PER_SSTABLE;
         final int SSTABLES_TO_CORRUPT = 8;
 
         assertTrue(String.format("Not enough sstables (%d), expected at least 
%d sstables to corrupt", SSTABLES, SSTABLES_TO_CORRUPT),
@@ -150,7 +150,7 @@ public class BlacklistingCompactionsTest
             {
                 DecoratedKey key = Util.dk(String.valueOf(i));
                 long timestamp = j * ROWS_PER_SSTABLE + i;
-                new RowUpdateBuilder(cfs.metadata, timestamp, key.getKey())
+                new RowUpdateBuilder(cfs.metadata(), timestamp, key.getKey())
                         .clustering(Long.valueOf(i))
                         .add("val", Long.valueOf(i))
                         .build()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/compaction/CompactionControllerTest.java
----------------------------------------------------------------------
diff --git 
a/test/unit/org/apache/cassandra/db/compaction/CompactionControllerTest.java 
b/test/unit/org/apache/cassandra/db/compaction/CompactionControllerTest.java
index 1b400e8..14dc3be 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionControllerTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionControllerTest.java
@@ -28,7 +28,7 @@ import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
-import org.apache.cassandra.config.CFMetaData;
+import org.apache.cassandra.schema.TableMetadata;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
 import org.apache.cassandra.db.Keyspace;
@@ -59,16 +59,18 @@ public class CompactionControllerTest extends SchemaLoader
         SchemaLoader.prepareServer();
         SchemaLoader.createKeyspace(KEYSPACE,
                                     KeyspaceParams.simple(1),
-                                    CFMetaData.Builder.create(KEYSPACE, CF1, 
true, false, false)
-                                                      .addPartitionKey("pk", 
AsciiType.instance)
-                                                      
.addClusteringColumn("ck", AsciiType.instance)
-                                                      .addRegularColumn("val", 
AsciiType.instance)
-                                                      .build(),
-                                    CFMetaData.Builder.create(KEYSPACE, CF2, 
true, false, false)
-                                                      .addPartitionKey("pk", 
AsciiType.instance)
-                                                      
.addClusteringColumn("ck", AsciiType.instance)
-                                                      .addRegularColumn("val", 
AsciiType.instance)
-                                                      .build());
+                                    TableMetadata.builder(KEYSPACE, CF1)
+                                                 .isCompound(false)
+                                                 .isDense(true)
+                                                 .addPartitionKeyColumn("pk", 
AsciiType.instance)
+                                                 .addClusteringColumn("ck", 
AsciiType.instance)
+                                                 .addRegularColumn("val", 
AsciiType.instance),
+                                    TableMetadata.builder(KEYSPACE, CF2)
+                                                 .isCompound(false)
+                                                 .isDense(true)
+                                                 .addPartitionKeyColumn("pk", 
AsciiType.instance)
+                                                 .addClusteringColumn("ck", 
AsciiType.instance)
+                                                 .addRegularColumn("val", 
AsciiType.instance));
     }
 
     @Test
@@ -85,7 +87,7 @@ public class CompactionControllerTest extends SchemaLoader
         long timestamp3 = timestamp2 - 5; // oldest timestamp
 
         // add to first memtable
-        applyMutation(cfs.metadata, key, timestamp1);
+        applyMutation(cfs.metadata(), key, timestamp1);
 
         // check max purgeable timestamp without any sstables
         try(CompactionController controller = new CompactionController(cfs, 
null, 0))
@@ -99,7 +101,7 @@ public class CompactionControllerTest extends SchemaLoader
         Set<SSTableReader> compacting = 
Sets.newHashSet(cfs.getLiveSSTables()); // first sstable is compacting
 
         // create another sstable
-        applyMutation(cfs.metadata, key, timestamp2);
+        applyMutation(cfs.metadata(), key, timestamp2);
         cfs.forceBlockingFlush();
 
         // check max purgeable timestamp when compacting the first sstable 
with and without a memtable
@@ -107,7 +109,7 @@ public class CompactionControllerTest extends SchemaLoader
         {
             assertPurgeBoundary(controller.getPurgeEvaluator(key), timestamp2);
 
-            applyMutation(cfs.metadata, key, timestamp3);
+            applyMutation(cfs.metadata(), key, timestamp3);
 
             assertPurgeBoundary(controller.getPurgeEvaluator(key), 
timestamp3); //second sstable and second memtable
         }
@@ -118,9 +120,9 @@ public class CompactionControllerTest extends SchemaLoader
         //newest to oldest
         try (CompactionController controller = new CompactionController(cfs, 
null, 0))
         {
-            applyMutation(cfs.metadata, key, timestamp1);
-            applyMutation(cfs.metadata, key, timestamp2);
-            applyMutation(cfs.metadata, key, timestamp3);
+            applyMutation(cfs.metadata(), key, timestamp1);
+            applyMutation(cfs.metadata(), key, timestamp2);
+            applyMutation(cfs.metadata(), key, timestamp3);
 
             assertPurgeBoundary(controller.getPurgeEvaluator(key), 
timestamp3); //memtable only
         }
@@ -130,9 +132,9 @@ public class CompactionControllerTest extends SchemaLoader
         //oldest to newest
         try (CompactionController controller = new CompactionController(cfs, 
null, 0))
         {
-            applyMutation(cfs.metadata, key, timestamp3);
-            applyMutation(cfs.metadata, key, timestamp2);
-            applyMutation(cfs.metadata, key, timestamp1);
+            applyMutation(cfs.metadata(), key, timestamp3);
+            applyMutation(cfs.metadata(), key, timestamp2);
+            applyMutation(cfs.metadata(), key, timestamp1);
 
             assertPurgeBoundary(controller.getPurgeEvaluator(key), timestamp3);
         }
@@ -152,14 +154,14 @@ public class CompactionControllerTest extends SchemaLoader
         long timestamp3 = timestamp2 - 5; // oldest timestamp
 
         // create sstable with tombstone that should be expired in no older 
timestamps
-        applyDeleteMutation(cfs.metadata, key, timestamp2);
+        applyDeleteMutation(cfs.metadata(), key, timestamp2);
         cfs.forceBlockingFlush();
 
         // first sstable with tombstone is compacting
         Set<SSTableReader> compacting = Sets.newHashSet(cfs.getLiveSSTables());
 
         // create another sstable with more recent timestamp
-        applyMutation(cfs.metadata, key, timestamp1);
+        applyMutation(cfs.metadata(), key, timestamp1);
         cfs.forceBlockingFlush();
 
         // second sstable is overlapping
@@ -173,13 +175,13 @@ public class CompactionControllerTest extends SchemaLoader
         assertEquals(compacting.iterator().next(), expired.iterator().next());
 
         // however if we add an older mutation to the memtable then the 
sstable should not be expired
-        applyMutation(cfs.metadata, key, timestamp3);
+        applyMutation(cfs.metadata(), key, timestamp3);
         expired = CompactionController.getFullyExpiredSSTables(cfs, 
compacting, overlapping, gcBefore);
         assertNotNull(expired);
         assertEquals(0, expired.size());
     }
 
-    private void applyMutation(CFMetaData cfm, DecoratedKey key, long 
timestamp)
+    private void applyMutation(TableMetadata cfm, DecoratedKey key, long 
timestamp)
     {
         ByteBuffer val = ByteBufferUtil.bytes(1L);
 
@@ -190,7 +192,7 @@ public class CompactionControllerTest extends SchemaLoader
         .applyUnsafe();
     }
 
-    private void applyDeleteMutation(CFMetaData cfm, DecoratedKey key, long 
timestamp)
+    private void applyDeleteMutation(TableMetadata cfm, DecoratedKey key, long 
timestamp)
     {
         new Mutation(PartitionUpdate.fullPartitionDelete(cfm, key, timestamp, 
FBUtilities.nowInSeconds()))
         .applyUnsafe();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/af3fe39d/test/unit/org/apache/cassandra/db/compaction/CompactionIteratorTest.java
----------------------------------------------------------------------
diff --git 
a/test/unit/org/apache/cassandra/db/compaction/CompactionIteratorTest.java 
b/test/unit/org/apache/cassandra/db/compaction/CompactionIteratorTest.java
index 3c8c1b0..99df52f 100644
--- a/test/unit/org/apache/cassandra/db/compaction/CompactionIteratorTest.java
+++ b/test/unit/org/apache/cassandra/db/compaction/CompactionIteratorTest.java
@@ -29,7 +29,6 @@ import org.junit.Test;
 
 import org.apache.cassandra.SchemaLoader;
 import org.apache.cassandra.Util;
-import org.apache.cassandra.config.CFMetaData;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.db.ColumnFamilyStore;
 import org.apache.cassandra.db.DecoratedKey;
@@ -40,8 +39,8 @@ import org.apache.cassandra.db.marshal.UTF8Type;
 import org.apache.cassandra.db.partitions.AbstractUnfilteredPartitionIterator;
 import org.apache.cassandra.db.rows.*;
 import org.apache.cassandra.io.sstable.ISSTableScanner;
-import org.apache.cassandra.io.sstable.metadata.MetadataCollector;
 import org.apache.cassandra.schema.KeyspaceParams;
+import org.apache.cassandra.schema.TableMetadata;
 
 public class CompactionIteratorTest
 {
@@ -52,7 +51,7 @@ public class CompactionIteratorTest
     private static final String CFNAME = "Integer1";
 
     static final DecoratedKey kk;
-    static final CFMetaData metadata;
+    static final TableMetadata metadata;
     private static final int RANGE = 1000;
     private static final int COUNT = 100;
 
@@ -71,7 +70,7 @@ public class CompactionIteratorTest
                                                                          1,
                                                                          
UTF8Type.instance,
                                                                          
Int32Type.instance,
-                                                                         
Int32Type.instance));
+                                                                         
Int32Type.instance).build());
     }
 
     // See org.apache.cassandra.db.rows.UnfilteredRowsGenerator.parse for the 
syntax used in these tests.
@@ -339,7 +338,7 @@ public class CompactionIteratorTest
         }
 
         @Override
-        public CFMetaData metadata()
+        public TableMetadata metadata()
         {
             return metadata;
         }

Reply via email to