Merge branch 'cassandra-2.0' into trunk Conflicts: src/java/org/apache/cassandra/db/ColumnFamily.java src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/98e6b08c Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/98e6b08c Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/98e6b08c Branch: refs/heads/trunk Commit: 98e6b08c71871666c5d7a206aedaa416ef1cf765 Parents: 7ce5e06 9fb44ee Author: Marcus Eriksson <marc...@apache.org> Authored: Thu Feb 6 08:57:46 2014 +0100 Committer: Marcus Eriksson <marc...@apache.org> Committed: Thu Feb 6 08:57:46 2014 +0100 ---------------------------------------------------------------------- .../org/apache/cassandra/db/ColumnFamily.java | 8 +++ .../db/compaction/LazilyCompactedRow.java | 11 ++++ .../cassandra/io/sstable/SSTableWriter.java | 10 ++++ .../cassandra/tools/SSTableMetadataViewer.java | 60 ++++++++++++-------- 4 files changed, 66 insertions(+), 23 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/98e6b08c/src/java/org/apache/cassandra/db/ColumnFamily.java ---------------------------------------------------------------------- diff --cc src/java/org/apache/cassandra/db/ColumnFamily.java index 2df3fbf,ec6a395..00f0e35 --- a/src/java/org/apache/cassandra/db/ColumnFamily.java +++ b/src/java/org/apache/cassandra/db/ColumnFamily.java @@@ -423,16 -418,26 +423,24 @@@ public abstract class ColumnFamily impl int maxLocalDeletionTime = Integer.MIN_VALUE; List<ByteBuffer> minColumnNamesSeen = Collections.emptyList(); List<ByteBuffer> maxColumnNamesSeen = Collections.emptyList(); - - if (deletionInfo().getTopLevelDeletion().localDeletionTime < Integer.MAX_VALUE) - tombstones.update(deletionInfo().getTopLevelDeletion().localDeletionTime); - Iterator<RangeTombstone> it = deletionInfo().rangeIterator(); - while (it.hasNext()) + for (Cell cell : this) { - RangeTombstone rangeTombstone = it.next(); - tombstones.update(rangeTombstone.getLocalDeletionTime()); - } - - for (Column column : this) - { - minTimestampSeen = Math.min(minTimestampSeen, column.minTimestamp()); - maxTimestampSeen = Math.max(maxTimestampSeen, column.maxTimestamp()); - maxLocalDeletionTime = Math.max(maxLocalDeletionTime, column.getLocalDeletionTime()); - int deletionTime = column.getLocalDeletionTime(); ++ if (deletionInfo().getTopLevelDeletion().localDeletionTime < Integer.MAX_VALUE) ++ tombstones.update(deletionInfo().getTopLevelDeletion().localDeletionTime); ++ Iterator<RangeTombstone> it = deletionInfo().rangeIterator(); ++ while (it.hasNext()) ++ { ++ RangeTombstone rangeTombstone = it.next(); ++ tombstones.update(rangeTombstone.getLocalDeletionTime()); ++ } + minTimestampSeen = Math.min(minTimestampSeen, cell.minTimestamp()); + maxTimestampSeen = Math.max(maxTimestampSeen, cell.maxTimestamp()); + maxLocalDeletionTime = Math.max(maxLocalDeletionTime, cell.getLocalDeletionTime()); + int deletionTime = cell.getLocalDeletionTime(); if (deletionTime < Integer.MAX_VALUE) tombstones.update(deletionTime); - minColumnNamesSeen = ColumnNameHelper.minComponents(minColumnNamesSeen, column.name, metadata.comparator); - maxColumnNamesSeen = ColumnNameHelper.maxComponents(maxColumnNamesSeen, column.name, metadata.comparator); + minColumnNamesSeen = ColumnNameHelper.minComponents(minColumnNamesSeen, cell.name, metadata.comparator); + maxColumnNamesSeen = ColumnNameHelper.maxComponents(maxColumnNamesSeen, cell.name, metadata.comparator); } return new ColumnStats(getColumnCount(), minTimestampSeen, maxTimestampSeen, maxLocalDeletionTime, tombstones, minColumnNamesSeen, maxColumnNamesSeen); } http://git-wip-us.apache.org/repos/asf/cassandra/blob/98e6b08c/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java ---------------------------------------------------------------------- diff --cc src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java index 02901a3,e10fb2c..89181e6 --- a/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java +++ b/src/java/org/apache/cassandra/db/compaction/LazilyCompactedRow.java @@@ -261,17 -265,25 +261,28 @@@ public class LazilyCompactedRow extend container.clear(); return null; } - Column reduced = purged.iterator().next(); - container.clear(); + - // PrecompactedRow.removeDeleted has only checked the top-level CF deletion times, - // not the range tombstones. For that we use the columnIndexer tombstone tracker. - if (indexBuilder.tombstoneTracker().isDeleted(reduced)) - { - indexer.remove(reduced); - return null; - } - int localDeletionTime = purged.deletionInfo().getTopLevelDeletion().localDeletionTime; ++ int localDeletionTime = container.deletionInfo().getTopLevelDeletion().localDeletionTime; + if (localDeletionTime < Integer.MAX_VALUE) + tombstones.update(localDeletionTime); - Iterator<RangeTombstone> rangeTombstoneIterator = purged.deletionInfo().rangeIterator(); ++ Iterator<RangeTombstone> rangeTombstoneIterator = container.deletionInfo().rangeIterator(); + while (rangeTombstoneIterator.hasNext()) + { + RangeTombstone rangeTombstone = rangeTombstoneIterator.next(); + tombstones.update(rangeTombstone.getLocalDeletionTime()); + } ++ + Cell reduced = iter.next(); + container.clear(); + + // removeDeleted have only checked the top-level CF deletion times, + // not the range tombstone. For that we use the columnIndexer tombstone tracker. + if (indexBuilder.tombstoneTracker().isDeleted(reduced)) + { + indexer.remove(reduced); + return null; + } + columns++; minTimestampSeen = Math.min(minTimestampSeen, reduced.minTimestamp()); maxTimestampSeen = Math.max(maxTimestampSeen, reduced.maxTimestamp()); http://git-wip-us.apache.org/repos/asf/cassandra/blob/98e6b08c/src/java/org/apache/cassandra/io/sstable/SSTableWriter.java ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/98e6b08c/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java ---------------------------------------------------------------------- diff --cc src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java index a2f7b89,64720b5..0ab94c4 --- a/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java +++ b/src/java/org/apache/cassandra/tools/SSTableMetadataViewer.java @@@ -17,9 -17,9 +17,10 @@@ */ package org.apache.cassandra.tools; + import java.io.File; import java.io.IOException; import java.io.PrintStream; +import java.util.EnumSet; import java.util.Map; import org.apache.cassandra.io.sstable.Descriptor; @@@ -44,32 -44,29 +45,45 @@@ public class SSTableMetadataViewe for (String fname : args) { - Descriptor descriptor = Descriptor.fromFilename(fname); - Map<MetadataType, MetadataComponent> metadata = descriptor.getMetadataSerializer().deserialize(descriptor, EnumSet.allOf(MetadataType.class)); - ValidationMetadata validation = (ValidationMetadata) metadata.get(MetadataType.VALIDATION); - StatsMetadata stats = (StatsMetadata) metadata.get(MetadataType.STATS); - CompactionMetadata compaction = (CompactionMetadata) metadata.get(MetadataType.COMPACTION); - - out.printf("SSTable: %s%n", descriptor); - if (validation != null) - { - out.printf("Partitioner: %s%n", validation.partitioner); - out.printf("Bloom Filter FP chance: %f%n", validation.bloomFilterFPChance); - } - if (stats != null) + if (new File(fname).exists()) { - out.printf("Maximum timestamp: %s%n", stats.maxTimestamp); - out.printf("SSTable max local deletion time: %s%n", stats.maxLocalDeletionTime); - out.printf("Compression ratio: %s%n", stats.compressionRatio); - out.printf("Estimated droppable tombstones: %s%n", stats.getEstimatedDroppableTombstoneRatio((int) (System.currentTimeMillis() / 1000))); - out.printf("SSTable Level: %d%n", stats.sstableLevel); - out.println(stats.replayPosition); - printHistograms(stats, out); + Descriptor descriptor = Descriptor.fromFilename(fname); - SSTableMetadata metadata = SSTableMetadata.serializer.deserialize(descriptor).left; ++ Map<MetadataType, MetadataComponent> metadata = descriptor.getMetadataSerializer().deserialize(descriptor, EnumSet.allOf(MetadataType.class)); ++ ValidationMetadata validation = (ValidationMetadata) metadata.get(MetadataType.VALIDATION); ++ StatsMetadata stats = (StatsMetadata) metadata.get(MetadataType.STATS); ++ CompactionMetadata compaction = (CompactionMetadata) metadata.get(MetadataType.COMPACTION); + + out.printf("SSTable: %s%n", descriptor); - out.printf("Partitioner: %s%n", metadata.partitioner); - out.printf("Maximum timestamp: %s%n", metadata.maxTimestamp); - out.printf("SSTable max local deletion time: %s%n", metadata.maxLocalDeletionTime); - out.printf("Compression ratio: %s%n", metadata.compressionRatio); - out.printf("Estimated droppable tombstones: %s%n", metadata.getEstimatedDroppableTombstoneRatio((int) (System.currentTimeMillis() / 1000))); - out.printf("SSTable Level: %d%n", metadata.sstableLevel); - out.println(metadata.replayPosition); - printHistograms(metadata, out); - out.println("Estimated tombstone drop times:"); - for (Map.Entry<Double, Long> entry : metadata.estimatedTombstoneDropTime.getAsMap().entrySet()) ++ if (validation != null) + { - out.printf("%-10s:%10s%n",entry.getKey().intValue(), entry.getValue()); ++ out.printf("Partitioner: %s%n", validation.partitioner); ++ out.printf("Bloom Filter FP chance: %f%n", validation.bloomFilterFPChance); ++ } ++ if (stats != null) ++ { ++ out.printf("Maximum timestamp: %s%n", stats.maxTimestamp); ++ out.printf("SSTable max local deletion time: %s%n", stats.maxLocalDeletionTime); ++ out.printf("Compression ratio: %s%n", stats.compressionRatio); ++ out.printf("Estimated droppable tombstones: %s%n", stats.getEstimatedDroppableTombstoneRatio((int) (System.currentTimeMillis() / 1000))); ++ out.printf("SSTable Level: %d%n", stats.sstableLevel); ++ out.println(stats.replayPosition); ++ out.println("Estimated tombstone drop times:%n"); ++ for (Map.Entry<Double, Long> entry : stats.estimatedTombstoneDropTime.getAsMap().entrySet()) ++ { ++ out.printf("%-10s:%10s%n",entry.getKey().intValue(), entry.getValue()); ++ } ++ printHistograms(stats, out); ++ } ++ if (compaction != null) ++ { ++ out.printf("Ancestors: %s%n", compaction.ancestors.toString()); ++ out.printf("Estimated cardinality: %s%n", compaction.cardinalityEstimator.cardinality()); ++ + } } - if (compaction != null) + else { - out.printf("Ancestors: %s%n", compaction.ancestors.toString()); - out.printf("Estimated cardinality: %s%n", compaction.cardinalityEstimator.cardinality()); + out.println("No such file: " + fname); } } }