Repository: cassandra Updated Branches: refs/heads/trunk cbde96724 -> 93bd9ec25
Remove 1.2 sstable support in 2.1 patch by slebresne; reviewed by iamaleksey for CASSANDRA-6869 Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/8e172c85 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/8e172c85 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/8e172c85 Branch: refs/heads/trunk Commit: 8e172c8563a995808a72a1a7e81a06f3c2a355ce Parents: 6844462 Author: Sylvain Lebresne <sylv...@datastax.com> Authored: Mon Mar 31 12:30:50 2014 +0200 Committer: Sylvain Lebresne <sylv...@datastax.com> Committed: Mon Mar 31 12:30:50 2014 +0200 ---------------------------------------------------------------------- CHANGES.txt | 1 + NEWS.txt | 10 ++-- .../org/apache/cassandra/config/CFMetaData.java | 10 ++-- src/java/org/apache/cassandra/db/Cell.java | 10 ---- .../apache/cassandra/db/ColumnFamilyStore.java | 2 +- .../db/columniterator/IndexedSliceReader.java | 53 +++----------------- .../db/columniterator/SSTableNamesIterator.java | 14 ++---- .../db/columniterator/SimpleSliceReader.java | 7 +-- .../cassandra/db/compaction/Scrubber.java | 23 ++------- .../apache/cassandra/io/sstable/Descriptor.java | 17 +------ .../io/sstable/SSTableIdentityIterator.java | 3 +- .../cassandra/io/sstable/SSTableReader.java | 6 +-- .../cassandra/io/sstable/SSTableScanner.java | 2 - .../cassandra/io/sstable/SSTableWriter.java | 11 +--- .../metadata/LegacyMetadataSerializer.java | 37 ++++---------- .../io/sstable/metadata/StatsMetadata.java | 36 +++++-------- .../apache/cassandra/tools/SSTableExport.java | 6 +-- .../cassandra/io/sstable/LegacySSTableTest.java | 4 +- 18 files changed, 60 insertions(+), 192 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/CHANGES.txt ---------------------------------------------------------------------- diff --git a/CHANGES.txt b/CHANGES.txt index 196fa0d..c224c8f 100644 --- a/CHANGES.txt +++ b/CHANGES.txt @@ -34,6 +34,7 @@ * Add multiple memory allocation options for memtables (CASSANDRA-6689) * Remove adjusted op rate from stress output (CASSANDRA-6921) * Add optimized CF.hasColumns() implementations (CASSANDRA-6941) + * Properly remove 1.2 sstable support in 2.1 (CASSANDRA-6869) Merged from 2.0: * Restrict Windows to parallel repairs (CASSANDRA-6907) * (Hadoop) Allow manually specifying start/end tokens in CFIF (CASSANDRA-6436) http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/NEWS.txt ---------------------------------------------------------------------- diff --git a/NEWS.txt b/NEWS.txt index 23f6522..7cb7565 100644 --- a/NEWS.txt +++ b/NEWS.txt @@ -33,11 +33,11 @@ New features Upgrading --------- - - Rolling upgrades from anything pre-2.0.6 is not supported. - - For leveled compaction users, 2.0 must be atleast started before - upgrading to 2.1 due to the fact that the old JSON leveled - manifest is migrated into the sstable metadata files on startup - in 2.0 and this code is gone from 2.1. + - Rolling upgrades from anything pre-2.0.6 is not supported. Furthermore + - Pre-2.0 sstables are not supported. This means that before upgrading + a node a 2.1, this node must be started on 2.0 and + 'nodetool upgdradesstables' must be run (and this even in the case + of no-rolling upgrades). - For size-tiered compaction users, Cassandra now defaults to ignoring the coldest 5% of sstables. This can be customized with the cold_reads_to_omit compaction option; 0.0 omits nothing (the old http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/src/java/org/apache/cassandra/config/CFMetaData.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/config/CFMetaData.java b/src/java/org/apache/cassandra/config/CFMetaData.java index 8a4f147..1ca9880 100644 --- a/src/java/org/apache/cassandra/config/CFMetaData.java +++ b/src/java/org/apache/cassandra/config/CFMetaData.java @@ -1379,16 +1379,14 @@ public final class CFMetaData return (cfName + "_" + columnName + "_idx").replaceAll("\\W", ""); } - public Iterator<OnDiskAtom> getOnDiskIterator(DataInput in, int count, Descriptor.Version version) + public Iterator<OnDiskAtom> getOnDiskIterator(DataInput in, Descriptor.Version version) { - return getOnDiskIterator(in, count, ColumnSerializer.Flag.LOCAL, Integer.MIN_VALUE, version); + return getOnDiskIterator(in, ColumnSerializer.Flag.LOCAL, Integer.MIN_VALUE, version); } - public Iterator<OnDiskAtom> getOnDiskIterator(DataInput in, int count, ColumnSerializer.Flag flag, int expireBefore, Descriptor.Version version) + public Iterator<OnDiskAtom> getOnDiskIterator(DataInput in, ColumnSerializer.Flag flag, int expireBefore, Descriptor.Version version) { - if (version.hasSuperColumns && cfType == ColumnFamilyType.Super) - return SuperColumns.onDiskIterator(in, count, flag, expireBefore, comparator); - return Cell.onDiskIterator(in, count, flag, expireBefore, version, comparator); + return Cell.onDiskIterator(in, flag, expireBefore, version, comparator); } public AtomDeserializer getOnDiskDeserializer(DataInput in, Descriptor.Version version) http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/src/java/org/apache/cassandra/db/Cell.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/Cell.java b/src/java/org/apache/cassandra/db/Cell.java index e807a21..8db9770 100644 --- a/src/java/org/apache/cassandra/db/Cell.java +++ b/src/java/org/apache/cassandra/db/Cell.java @@ -48,12 +48,7 @@ public class Cell implements OnDiskAtom private static final long EMPTY_SIZE = ObjectSizes.measure(new Cell(CellNames.simpleDense(ByteBuffer.allocate(1)))); - /** - * For 2.0-formatted sstables (where column count is not stored), @param count should be Integer.MAX_VALUE, - * and we will look for the end-of-row column name marker instead of relying on that. - */ public static Iterator<OnDiskAtom> onDiskIterator(final DataInput in, - final int count, final ColumnSerializer.Flag flag, final int expireBefore, final Descriptor.Version version, @@ -61,13 +56,8 @@ public class Cell implements OnDiskAtom { return new AbstractIterator<OnDiskAtom>() { - int i = 0; - protected OnDiskAtom computeNext() { - if (i++ >= count) - return endOfData(); - OnDiskAtom atom; try { http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/src/java/org/apache/cassandra/db/ColumnFamilyStore.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java index d66c501..b9cab4d 100644 --- a/src/java/org/apache/cassandra/db/ColumnFamilyStore.java +++ b/src/java/org/apache/cassandra/db/ColumnFamilyStore.java @@ -454,7 +454,7 @@ public class ColumnFamilyStore implements ColumnFamilyStoreMBean Descriptor desc = entry.getKey(); generations.add(desc.generation); if (!desc.isCompatible()) - throw new RuntimeException(String.format("Incompatible SSTable found. Current version %s is unable to read file: %s. Please run upgradesstables.", + throw new RuntimeException(String.format("Incompatible SSTable found. Current version %s is unable to read file: %s. Please run upgradesstables.", Descriptor.Version.CURRENT, desc)); } Collections.sort(generations); http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java b/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java index 22fe5fa..7012321 100644 --- a/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java +++ b/src/java/org/apache/cassandra/db/columniterator/IndexedSliceReader.java @@ -26,7 +26,6 @@ import com.google.common.collect.AbstractIterator; import org.apache.cassandra.db.*; import org.apache.cassandra.db.composites.CellNameType; -import org.apache.cassandra.db.composites.CellNames; import org.apache.cassandra.db.composites.Composite; import org.apache.cassandra.db.filter.ColumnSlice; import org.apache.cassandra.io.sstable.CorruptSSTableException; @@ -113,8 +112,6 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA in.seek(rowEntry.position); } sstable.partitioner.decorateKey(ByteBufferUtil.readWithShortLength(file)); - if (sstable.descriptor.version.hasRowSizeAndColumnCount) - file.readLong(); } public ColumnFamily getColumnFamily() @@ -179,34 +176,6 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA } } - static int indexFor(SSTableReader sstable, Composite name, List<IndexHelper.IndexInfo> indexes, CellNameType comparator, boolean reversed, int startIdx) - { - // If it's a super CF and the sstable is from the old format, then the index will contain old format info, i.e. non composite - // SC names. So we need to 1) use only the SC name part of the comparator and 2) extract only that part from 'name' - if (sstable.metadata.isSuper() && sstable.descriptor.version.hasSuperColumns) - { - CellNameType scComparator = SuperColumns.scNameType(comparator); - Composite scName = CellNames.simpleDense(SuperColumns.scName(name)); - return IndexHelper.indexFor(scName, indexes, scComparator, reversed, startIdx); - } - return IndexHelper.indexFor(name, indexes, comparator, reversed, startIdx); - } - - static Composite forIndexComparison(SSTableReader sstable, Composite name) - { - // See indexFor above. - return sstable.metadata.isSuper() && sstable.descriptor.version.hasSuperColumns - ? CellNames.simpleDense(SuperColumns.scName(name)) - : name; - } - - static CellNameType comparatorForIndex(SSTableReader sstable, CellNameType comparator) - { - return sstable.metadata.isSuper() && sstable.descriptor.version.hasSuperColumns - ? SuperColumns.scNameType(comparator) - : comparator; - } - private abstract class BlockFetcher { protected int currentSliceIdx; @@ -247,22 +216,16 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA return !start.isEmpty() && comparator.compare(name, start) < 0; } - protected boolean isIndexEntryBeforeSliceStart(Composite name) - { - Composite start = currentStart(); - return !start.isEmpty() && comparatorForIndex(sstable, comparator).compare(name, forIndexComparison(sstable, start)) < 0; - } - protected boolean isColumnBeforeSliceFinish(OnDiskAtom column) { Composite finish = currentFinish(); return finish.isEmpty() || comparator.compare(column.name(), finish) <= 0; } - protected boolean isIndexEntryAfterSliceFinish(Composite name) + protected boolean isAfterSliceFinish(Composite name) { Composite finish = currentFinish(); - return !finish.isEmpty() && comparatorForIndex(sstable, comparator).compare(name, forIndexComparison(sstable, finish)) > 0; + return !finish.isEmpty() && comparator.compare(name, finish) > 0; } } @@ -293,7 +256,7 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA { while (++currentSliceIdx < slices.length) { - nextIndexIdx = indexFor(sstable, slices[currentSliceIdx].start, indexes, comparator, reversed, nextIndexIdx); + nextIndexIdx = IndexHelper.indexFor(slices[currentSliceIdx].start, indexes, comparator, reversed, nextIndexIdx); if (nextIndexIdx < 0 || nextIndexIdx >= indexes.size()) // no index block for that slice continue; @@ -302,12 +265,12 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA IndexInfo info = indexes.get(nextIndexIdx); if (reversed) { - if (!isIndexEntryBeforeSliceStart(info.lastName)) + if (!isBeforeSliceStart(info.lastName)) return true; } else { - if (!isIndexEntryAfterSliceFinish(info.firstName)) + if (!isAfterSliceFinish(info.firstName)) return true; } } @@ -480,10 +443,8 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA // We remenber when we are whithin a slice to avoid some comparison boolean inSlice = false; - int columnCount = sstable.descriptor.version.hasRowSizeAndColumnCount ? file.readInt() : Integer.MAX_VALUE; AtomDeserializer deserializer = emptyColumnFamily.metadata().getOnDiskDeserializer(file, sstable.descriptor.version); - int deserialized = 0; - while (deserializer.hasNext() && deserialized < columnCount) + while (deserializer.hasNext()) { // col is before slice // (If in slice, don't bother checking that until we change slice) @@ -491,7 +452,6 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA if (!inSlice && !start.isEmpty() && deserializer.compareNextTo(start) < 0) { deserializer.skipNext(); - ++deserialized; continue; } @@ -501,7 +461,6 @@ class IndexedSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskA { inSlice = true; addColumn(deserializer.readNext()); - ++deserialized; } // col is after slice. more slices? else http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java b/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java index 374dedb..224b63f 100644 --- a/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java +++ b/src/java/org/apache/cassandra/db/columniterator/SSTableNamesIterator.java @@ -109,8 +109,6 @@ public class SSTableNamesIterator extends AbstractIterator<OnDiskAtom> implement DecoratedKey keyInDisk = sstable.partitioner.decorateKey(ByteBufferUtil.readWithShortLength(file)); assert keyInDisk.equals(key) : String.format("%s != %s in %s", keyInDisk, key, file.getPath()); - if (sstable.descriptor.version.hasRowSizeAndColumnCount) - file.readLong(); } indexList = indexEntry.columnsIndex(); @@ -137,8 +135,7 @@ public class SSTableNamesIterator extends AbstractIterator<OnDiskAtom> implement List<OnDiskAtom> result = new ArrayList<OnDiskAtom>(); if (indexList.isEmpty()) { - int columnCount = sstable.descriptor.version.hasRowSizeAndColumnCount ? file.readInt() : Integer.MAX_VALUE; - readSimpleColumns(file, columns, result, columnCount); + readSimpleColumns(file, columns, result); } else { @@ -149,9 +146,9 @@ public class SSTableNamesIterator extends AbstractIterator<OnDiskAtom> implement iter = result.iterator(); } - private void readSimpleColumns(FileDataInput file, SortedSet<CellName> columnNames, List<OnDiskAtom> result, int columnCount) + private void readSimpleColumns(FileDataInput file, SortedSet<CellName> columnNames, List<OnDiskAtom> result) { - Iterator<OnDiskAtom> atomIterator = cf.metadata().getOnDiskIterator(file, columnCount, sstable.descriptor.version); + Iterator<OnDiskAtom> atomIterator = cf.metadata().getOnDiskIterator(file, sstable.descriptor.version); int n = 0; while (atomIterator.hasNext()) { @@ -186,13 +183,12 @@ public class SSTableNamesIterator extends AbstractIterator<OnDiskAtom> implement int lastIndexIdx = -1; for (CellName name : columnNames) { - int index = IndexedSliceReader.indexFor(sstable, name, indexList, comparator, false, lastIndexIdx); + int index = IndexHelper.indexFor(name, indexList, comparator, false, lastIndexIdx); if (index < 0 || index == indexList.size()) continue; IndexHelper.IndexInfo indexInfo = indexList.get(index); // Check the index block does contain the column names and that we haven't inserted this block yet. - if (IndexedSliceReader.comparatorForIndex(sstable, comparator).compare(IndexedSliceReader.forIndexComparison(sstable, name), indexInfo.firstName) < 0 - || index == lastIndexIdx) + if (comparator.compare(name, indexInfo.firstName) < 0 || index == lastIndexIdx) continue; ranges.add(indexInfo); http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/src/java/org/apache/cassandra/db/columniterator/SimpleSliceReader.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/columniterator/SimpleSliceReader.java b/src/java/org/apache/cassandra/db/columniterator/SimpleSliceReader.java index 7fb48e3..702bddc 100644 --- a/src/java/org/apache/cassandra/db/columniterator/SimpleSliceReader.java +++ b/src/java/org/apache/cassandra/db/columniterator/SimpleSliceReader.java @@ -64,17 +64,12 @@ class SimpleSliceReader extends AbstractIterator<OnDiskAtom> implements OnDiskAt this.needsClosing = false; } - Descriptor.Version version = sstable.descriptor.version; - // Skip key and data size ByteBufferUtil.skipShortLength(file); - if (version.hasRowSizeAndColumnCount) - file.readLong(); emptyColumnFamily = ArrayBackedSortedColumns.factory.create(sstable.metadata); emptyColumnFamily.delete(DeletionTime.serializer.deserialize(file)); - int columnCount = version.hasRowSizeAndColumnCount ? file.readInt() : Integer.MAX_VALUE; - atomIterator = emptyColumnFamily.metadata().getOnDiskIterator(file, columnCount, sstable.descriptor.version); + atomIterator = emptyColumnFamily.metadata().getOnDiskIterator(file, sstable.descriptor.version); } catch (IOException e) { http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/src/java/org/apache/cassandra/db/compaction/Scrubber.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/db/compaction/Scrubber.java b/src/java/org/apache/cassandra/db/compaction/Scrubber.java index 0a1e8c4..01da2e1 100644 --- a/src/java/org/apache/cassandra/db/compaction/Scrubber.java +++ b/src/java/org/apache/cassandra/db/compaction/Scrubber.java @@ -130,11 +130,6 @@ public class Scrubber implements Closeable try { key = sstable.partitioner.decorateKey(ByteBufferUtil.readWithShortLength(dataFile)); - if (sstable.descriptor.version.hasRowSizeAndColumnCount) - { - dataSize = dataFile.readLong(); - outputHandler.debug(String.format("row %s is %s bytes", ByteBufferUtil.bytesToHex(key.key), dataSize)); - } } catch (Throwable th) { @@ -162,22 +157,12 @@ public class Scrubber implements Closeable long dataStartFromIndex = currentIndexKey == null ? -1 : rowStart + 2 + currentIndexKey.remaining(); - if (sstable.descriptor.version.hasRowSizeAndColumnCount) - dataStartFromIndex += 8; long dataSizeFromIndex = nextRowPositionFromIndex - dataStartFromIndex; - if (!sstable.descriptor.version.hasRowSizeAndColumnCount) - { - dataSize = dataSizeFromIndex; - // avoid an NPE if key is null - String keyName = key == null ? "(unreadable key)" : ByteBufferUtil.bytesToHex(key.key); - outputHandler.debug(String.format("row %s is %s bytes", keyName, dataSize)); - } - else - { - if (currentIndexKey != null) - outputHandler.debug(String.format("Index doublecheck: row %s is %s bytes", ByteBufferUtil.bytesToHex(currentIndexKey), dataSizeFromIndex)); - } + dataSize = dataSizeFromIndex; + // avoid an NPE if key is null + String keyName = key == null ? "(unreadable key)" : ByteBufferUtil.bytesToHex(key.key); + outputHandler.debug(String.format("row %s is %s bytes", keyName, dataSize)); assert currentIndexKey != null || indexFile.isEOF(); http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/src/java/org/apache/cassandra/io/sstable/Descriptor.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/io/sstable/Descriptor.java b/src/java/org/apache/cassandra/io/sstable/Descriptor.java index 4803ae7..db6f13a 100644 --- a/src/java/org/apache/cassandra/io/sstable/Descriptor.java +++ b/src/java/org/apache/cassandra/io/sstable/Descriptor.java @@ -49,7 +49,6 @@ public class Descriptor // This needs to be at the begining for initialization sake public static final String current_version = "ka"; - // ic (1.2.5): omits per-row bloom filter of column names // ja (2.0.0): super columns are serialized as composites (note that there is no real format change, // this is mostly a marker to know if we should expect super columns or not. We do need // a major version bump however, because we should not allow streaming of super columns @@ -69,12 +68,6 @@ public class Descriptor private final String version; public final boolean isLatestVersion; - public final boolean hasSuperColumns; - public final boolean tracksMaxLocalDeletionTime; - public final boolean hasBloomFilterFPChance; - public final boolean offHeapSummaries; - public final boolean hasRowSizeAndColumnCount; - public final boolean tracksMaxMinColumnNames; public final boolean hasPostCompressionAdlerChecksums; public final boolean hasSamplingLevel; public final boolean newStatsFile; @@ -84,13 +77,7 @@ public class Descriptor public Version(String version) { this.version = version; - tracksMaxLocalDeletionTime = version.compareTo("ja") >= 0; isLatestVersion = version.compareTo(current_version) == 0; - hasSuperColumns = version.compareTo("ja") < 0; - hasBloomFilterFPChance = version.compareTo("ja") >= 0; - offHeapSummaries = version.compareTo("ja") >= 0; - hasRowSizeAndColumnCount = version.compareTo("ja") < 0; - tracksMaxMinColumnNames = version.compareTo("ja") >= 0; hasPostCompressionAdlerChecksums = version.compareTo("jb") >= 0; hasSamplingLevel = version.compareTo("ka") >= 0; newStatsFile = version.compareTo("ka") >= 0; @@ -110,7 +97,7 @@ public class Descriptor public boolean isCompatible() { - return version.compareTo("ic") >= 0 && version.charAt(0) <= CURRENT.version.charAt(0); + return version.compareTo("ja") >= 0 && version.charAt(0) <= CURRENT.version.charAt(0); } @Override @@ -249,7 +236,7 @@ public class Descriptor } if (!Version.validate(nexttok)) - throw new UnsupportedOperationException("SSTable " + name + " is too old to open. Upgrade to 1.2.5 first, and run upgradesstables"); + throw new UnsupportedOperationException("SSTable " + name + " is too old to open. Upgrade to 2.0 first, and run upgradesstables"); Version version = new Version(nexttok); nexttok = st.nextToken(); http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/src/java/org/apache/cassandra/io/sstable/SSTableIdentityIterator.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableIdentityIterator.java b/src/java/org/apache/cassandra/io/sstable/SSTableIdentityIterator.java index ce4b670..b784a7e 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableIdentityIterator.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableIdentityIterator.java @@ -89,8 +89,7 @@ public class SSTableIdentityIterator implements Comparable<SSTableIdentityIterat try { columnFamily.delete(DeletionTime.serializer.deserialize(in)); - int columnCount = dataVersion.hasRowSizeAndColumnCount ? in.readInt() : Integer.MAX_VALUE; - atomIterator = columnFamily.metadata().getOnDiskIterator(in, columnCount, flag, expireBefore, dataVersion); + atomIterator = columnFamily.metadata().getOnDiskIterator(in, flag, expireBefore, dataVersion); } catch (IOException e) { http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/src/java/org/apache/cassandra/io/sstable/SSTableReader.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableReader.java b/src/java/org/apache/cassandra/io/sstable/SSTableReader.java index 94595b5..82a0bc8 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableReader.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableReader.java @@ -525,14 +525,14 @@ public class SSTableReader extends SSTable implements Closeable // bf is enabled, but filter component is missing. load(true, true); } - else if (descriptor.version.hasBloomFilterFPChance && validation.bloomFilterFPChance != metadata.getBloomFilterFpChance()) + else if (validation.bloomFilterFPChance != metadata.getBloomFilterFpChance()) { // bf fp chance in sstable metadata and it has changed since compaction. load(true, true); } else { - // bf is enabled, but fp chance isn't present in metadata (pre-ja) OR matches the currently configured value. + // bf is enabled and fp chance matches the currently configured value. load(false, true); loadBloomFilter(); } @@ -655,7 +655,7 @@ public class SSTableReader extends SSTable implements Closeable public boolean loadSummary(SegmentedFile.Builder ibuilder, SegmentedFile.Builder dbuilder) { File summariesFile = new File(descriptor.filenameFor(Component.SUMMARY)); - if (!descriptor.version.offHeapSummaries || !summariesFile.exists()) + if (!summariesFile.exists()) return false; DataInputStream iStream = null; http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/src/java/org/apache/cassandra/io/sstable/SSTableScanner.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableScanner.java b/src/java/org/apache/cassandra/io/sstable/SSTableScanner.java index 911ef8c..2af68ae 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableScanner.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableScanner.java @@ -261,8 +261,6 @@ public class SSTableScanner implements ICompactionScanner { dfile.seek(currentEntry.position); ByteBufferUtil.readWithShortLength(dfile); // key - if (sstable.descriptor.version.hasRowSizeAndColumnCount) - dfile.readLong(); long dataSize = readEnd - dfile.getFilePointer(); return new SSTableIdentityIterator(sstable, dfile, currentKey, dataSize); } http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java index e93deb5..1dc2c98 100644 --- a/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java +++ b/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java @@ -229,19 +229,10 @@ public class SSTableWriter extends SSTable StreamingHistogram tombstones = new StreamingHistogram(TOMBSTONE_HISTOGRAM_BIN_SIZE); ColumnFamily cf = ArrayBackedSortedColumns.factory.create(metadata); - // skip row size for version < ja - if (version.hasRowSizeAndColumnCount) - FileUtils.skipBytesFully(in, 8); - cf.delete(DeletionTime.serializer.deserialize(in)); ColumnIndex.Builder columnIndexer = new ColumnIndex.Builder(cf, key.key, dataFile.stream); - // read column count for version < ja - int columnCount = Integer.MAX_VALUE; - if (version.hasRowSizeAndColumnCount) - columnCount = in.readInt(); - if (cf.deletionInfo().getTopLevelDeletion().localDeletionTime < Integer.MAX_VALUE) tombstones.update(cf.deletionInfo().getTopLevelDeletion().localDeletionTime); @@ -252,7 +243,7 @@ public class SSTableWriter extends SSTable tombstones.update(rangeTombstone.getLocalDeletionTime()); } - Iterator<OnDiskAtom> iter = metadata.getOnDiskIterator(in, columnCount, ColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE, version); + Iterator<OnDiskAtom> iter = metadata.getOnDiskIterator(in, ColumnSerializer.Flag.PRESERVE_SIZE, Integer.MIN_VALUE, version); try { while (iter.hasNext()) http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/src/java/org/apache/cassandra/io/sstable/metadata/LegacyMetadataSerializer.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/LegacyMetadataSerializer.java b/src/java/org/apache/cassandra/io/sstable/metadata/LegacyMetadataSerializer.java index 9e97e2e..59f7be5 100644 --- a/src/java/org/apache/cassandra/io/sstable/metadata/LegacyMetadataSerializer.java +++ b/src/java/org/apache/cassandra/io/sstable/metadata/LegacyMetadataSerializer.java @@ -38,8 +38,6 @@ import org.apache.cassandra.utils.StreamingHistogram; @Deprecated public class LegacyMetadataSerializer extends MetadataSerializer { - public static final double NO_BLOOM_FILTER_FP_CHANCE = -1.0; - /** * Legacy serialization is only used for SSTable level reset. */ @@ -96,8 +94,8 @@ public class LegacyMetadataSerializer extends MetadataSerializer ReplayPosition replayPosition = ReplayPosition.serializer.deserialize(in); long minTimestamp = in.readLong(); long maxTimestamp = in.readLong(); - int maxLocalDeletionTime = descriptor.version.tracksMaxLocalDeletionTime ? in.readInt() : Integer.MAX_VALUE; - double bloomFilterFPChance = descriptor.version.hasBloomFilterFPChance ? in.readDouble() : NO_BLOOM_FILTER_FP_CHANCE; + int maxLocalDeletionTime = in.readInt(); + double bloomFilterFPChance = in.readDouble(); double compressionRatio = in.readDouble(); String partitioner = in.readUTF(); int nbAncestors = in.readInt(); @@ -109,28 +107,15 @@ public class LegacyMetadataSerializer extends MetadataSerializer if (in.available() > 0) sstableLevel = in.readInt(); - List<ByteBuffer> minColumnNames; - List<ByteBuffer> maxColumnNames; - if (descriptor.version.tracksMaxMinColumnNames) - { - int colCount = in.readInt(); - minColumnNames = new ArrayList<>(colCount); - for (int i = 0; i < colCount; i++) - { - minColumnNames.add(ByteBufferUtil.readWithShortLength(in)); - } - colCount = in.readInt(); - maxColumnNames = new ArrayList<>(colCount); - for (int i = 0; i < colCount; i++) - { - maxColumnNames.add(ByteBufferUtil.readWithShortLength(in)); - } - } - else - { - minColumnNames = Collections.emptyList(); - maxColumnNames = Collections.emptyList(); - } + int colCount = in.readInt(); + List<ByteBuffer> minColumnNames = new ArrayList<>(colCount); + for (int i = 0; i < colCount; i++) + minColumnNames.add(ByteBufferUtil.readWithShortLength(in)); + + colCount = in.readInt(); + List<ByteBuffer> maxColumnNames = new ArrayList<>(colCount); + for (int i = 0; i < colCount; i++) + maxColumnNames.add(ByteBufferUtil.readWithShortLength(in)); if (types.contains(MetadataType.VALIDATION)) components.put(MetadataType.VALIDATION, http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java b/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java index 8568925..1c3dfd5 100644 --- a/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java +++ b/src/java/org/apache/cassandra/io/sstable/metadata/StatsMetadata.java @@ -21,7 +21,6 @@ import java.io.DataInput; import java.io.IOException; import java.nio.ByteBuffer; import java.util.ArrayList; -import java.util.Collections; import java.util.List; import org.apache.commons.lang3.builder.EqualsBuilder; @@ -234,35 +233,24 @@ public class StatsMetadata extends MetadataComponent ReplayPosition replayPosition = ReplayPosition.serializer.deserialize(in); long minTimestamp = in.readLong(); long maxTimestamp = in.readLong(); - int maxLocalDeletionTime = version.tracksMaxLocalDeletionTime ? in.readInt() : Integer.MAX_VALUE; + int maxLocalDeletionTime = in.readInt(); double compressionRatio = in.readDouble(); StreamingHistogram tombstoneHistogram = StreamingHistogram.serializer.deserialize(in); int sstableLevel = in.readInt(); long repairedAt = 0; if (version.hasRepairedAt) repairedAt = in.readLong(); - List<ByteBuffer> minColumnNames; - List<ByteBuffer> maxColumnNames; - if (version.tracksMaxMinColumnNames) - { - int colCount = in.readInt(); - minColumnNames = new ArrayList<>(colCount); - for (int i = 0; i < colCount; i++) - { - minColumnNames.add(ByteBufferUtil.readWithShortLength(in)); - } - colCount = in.readInt(); - maxColumnNames = new ArrayList<>(colCount); - for (int i = 0; i < colCount; i++) - { - maxColumnNames.add(ByteBufferUtil.readWithShortLength(in)); - } - } - else - { - minColumnNames = Collections.emptyList(); - maxColumnNames = Collections.emptyList(); - } + + int colCount = in.readInt(); + List<ByteBuffer> minColumnNames = new ArrayList<>(colCount); + for (int i = 0; i < colCount; i++) + minColumnNames.add(ByteBufferUtil.readWithShortLength(in)); + + colCount = in.readInt(); + List<ByteBuffer> maxColumnNames = new ArrayList<>(colCount); + for (int i = 0; i < colCount; i++) + maxColumnNames.add(ByteBufferUtil.readWithShortLength(in)); + return new StatsMetadata(rowSizes, columnCounts, replayPosition, http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/src/java/org/apache/cassandra/tools/SSTableExport.java ---------------------------------------------------------------------- diff --git a/src/java/org/apache/cassandra/tools/SSTableExport.java b/src/java/org/apache/cassandra/tools/SSTableExport.java index cff6e71..bbc3494 100644 --- a/src/java/org/apache/cassandra/tools/SSTableExport.java +++ b/src/java/org/apache/cassandra/tools/SSTableExport.java @@ -308,12 +308,8 @@ public class SSTableExport dfile.seek(entry.position); ByteBufferUtil.readWithShortLength(dfile); // row key - if (sstable.descriptor.version.hasRowSizeAndColumnCount) - dfile.readLong(); // row size DeletionInfo deletionInfo = new DeletionInfo(DeletionTime.serializer.deserialize(dfile)); - int columnCount = sstable.descriptor.version.hasRowSizeAndColumnCount ? dfile.readInt() : Integer.MAX_VALUE; - - Iterator<OnDiskAtom> atomIterator = sstable.metadata.getOnDiskIterator(dfile, columnCount, sstable.descriptor.version); + Iterator<OnDiskAtom> atomIterator = sstable.metadata.getOnDiskIterator(dfile, sstable.descriptor.version); checkStream(outs); http://git-wip-us.apache.org/repos/asf/cassandra/blob/8e172c85/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java ---------------------------------------------------------------------- diff --git a/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java b/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java index ad9ce5b..c0c9d41 100644 --- a/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java +++ b/test/unit/org/apache/cassandra/io/sstable/LegacySSTableTest.java @@ -97,7 +97,7 @@ public class LegacySSTableTest extends SchemaLoader StorageService.instance.initServer(); for (File version : LEGACY_SSTABLE_ROOT.listFiles()) - if (Descriptor.Version.validate(version.getName())) + if (Descriptor.Version.validate(version.getName()) && new Descriptor.Version(version.getName()).isCompatible()) testStreaming(version.getName()); } @@ -135,7 +135,7 @@ public class LegacySSTableTest extends SchemaLoader public void testVersions() throws Throwable { for (File version : LEGACY_SSTABLE_ROOT.listFiles()) - if (Descriptor.Version.validate(version.getName())) + if (Descriptor.Version.validate(version.getName()) && new Descriptor.Version(version.getName()).isCompatible()) testVersion(version.getName()); }