http://git-wip-us.apache.org/repos/asf/phoenix/blob/670b53a9/phoenix-core/src/main/java/org/apache/phoenix/schema/PName.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PName.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PName.java index 0e1337c..8df6a95 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PName.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PName.java @@ -83,6 +83,32 @@ public interface PName { return 0; } }; + public static PName ENCODED_EMPTY_COLUMN_NAME = new PName() { + @Override + public String getString() { + return String.valueOf(QueryConstants.ENCODED_EMPTY_COLUMN_NAME); + } + + @Override + public byte[] getBytes() { + return QueryConstants.ENCODED_EMPTY_COLUMN_BYTES; + } + + @Override + public String toString() { + return getString(); + } + + @Override + public ImmutableBytesPtr getBytesPtr() { + return QueryConstants.ENCODED_EMPTY_COLUMN_BYTES_PTR; + } + + @Override + public int getEstimatedSize() { + return 0; + } + }; /** * Get the client-side, normalized name as referenced * in a SQL statement.
http://git-wip-us.apache.org/repos/asf/phoenix/blob/670b53a9/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java index 01e8afe..d3b11b2 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTable.java @@ -17,7 +17,15 @@ */ package org.apache.phoenix.schema; +import static org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE; + +import java.util.Collections; +import java.util.HashMap; import java.util.List; +import java.util.Map; +import java.util.Map.Entry; + +import javax.annotation.Nullable; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.Bytes; @@ -129,7 +137,7 @@ public interface PTable extends PMetaDataEntity { * Link from a view to its parent table */ PARENT_TABLE((byte)3); - + private final byte[] byteValue; private final byte serializedValue; @@ -153,6 +161,35 @@ public interface PTable extends PMetaDataEntity { return LinkType.values()[serializedValue-1]; } } + + public enum StorageScheme { + ENCODED_COLUMN_NAMES((byte)1), + NON_ENCODED_COLUMN_NAMES((byte)2), + COLUMNS_STORED_IN_SINGLE_CELL((byte)3); + + private final byte[] byteValue; + private final byte serializedValue; + + StorageScheme(byte serializedValue) { + this.serializedValue = serializedValue; + this.byteValue = Bytes.toBytes(this.name()); + } + + public byte[] getBytes() { + return byteValue; + } + + public byte getSerializedValue() { + return this.serializedValue; + } + + public static StorageScheme fromSerializedValue(byte serializedValue) { + if (serializedValue < 1 || serializedValue > StorageScheme.values().length) { + return null; + } + return StorageScheme.values()[serializedValue-1]; + } + } long getTimeStamp(); long getSequenceNumber(); @@ -208,7 +245,16 @@ public interface PTable extends PMetaDataEntity { * can be found * @throws AmbiguousColumnException if multiple columns are found with the given name */ - PColumn getColumn(String name) throws ColumnNotFoundException, AmbiguousColumnException; + PColumn getPColumnForColumnName(String name) throws ColumnNotFoundException, AmbiguousColumnException; + + /** + * Get the column with the given column qualifier. + * @param column qualifier bytes + * @return the PColumn with the given column qualifier + * @throws ColumnNotFoundException if no column with the given column qualifier can be found + * @throws AmbiguousColumnException if multiple columns are found with the given column qualifier + */ + PColumn getPColumnForColumnQualifier(byte[] cf, byte[] cq) throws ColumnNotFoundException, AmbiguousColumnException; /** * Get the PK column with the given name. @@ -345,7 +391,6 @@ public interface PTable extends PMetaDataEntity { */ int getRowTimestampColPos(); long getUpdateCacheFrequency(); - boolean isNamespaceMapped(); /** @@ -359,4 +404,92 @@ public interface PTable extends PMetaDataEntity { * you are also not allowed to delete the table */ boolean isAppendOnlySchema(); + StorageScheme getStorageScheme(); + EncodedCQCounter getEncodedCQCounter(); + + /** + * Class to help track encoded column qualifier counters per column family. + */ + public class EncodedCQCounter { + + private final Map<String, Integer> familyCounters = new HashMap<>(); + + /** + * Copy constructor + * @param counterToCopy + * @return copy of the passed counter + */ + public static EncodedCQCounter copy(EncodedCQCounter counterToCopy) { + EncodedCQCounter cqCounter = new EncodedCQCounter(); + for (Entry<String, Integer> e : counterToCopy.values().entrySet()) { + cqCounter.setValue(e.getKey(), e.getValue()); + } + return cqCounter; + } + + public static final EncodedCQCounter NULL_COUNTER = new EncodedCQCounter() { + + @Override + public Integer getNextQualifier(String columnFamily) { + return null; + } + + @Override + public void setValue(String columnFamily, Integer value) { + } + + @Override + public boolean increment(String columnFamily) { + return false; + } + + @Override + public Map<String, Integer> values() { + return Collections.emptyMap(); + } + + }; + + /** + * Get the next qualifier to be used for the column family. + * This method also ends up initializing the counter if the + * column family already doesn't have one. + */ + @Nullable + public Integer getNextQualifier(String columnFamily) { + Integer counter = familyCounters.get(columnFamily); + if (counter == null) { + counter = ENCODED_CQ_COUNTER_INITIAL_VALUE; + familyCounters.put(columnFamily, counter); + } + return counter; + } + + public void setValue(String columnFamily, Integer value) { + familyCounters.put(columnFamily, value); + } + + /** + * + * @param columnFamily + * @return true if the counter was incrememnted, false otherwise. + */ + public boolean increment(String columnFamily) { + if (columnFamily == null) { + return false; + } + Integer counter = familyCounters.get(columnFamily); + if (counter == null) { + counter = ENCODED_CQ_COUNTER_INITIAL_VALUE; + } + counter++; + familyCounters.put(columnFamily, counter); + return true; + } + + public Map<String, Integer> values() { + return Collections.unmodifiableMap(familyCounters); + } + + } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/670b53a9/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java index 98a0b99..1134e06 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableImpl.java @@ -20,6 +20,7 @@ package org.apache.phoenix.schema; import static org.apache.phoenix.hbase.index.util.KeyValueBuilder.addQuietly; import static org.apache.phoenix.hbase.index.util.KeyValueBuilder.deleteQuietly; import static org.apache.phoenix.schema.SaltingUtil.SALTING_COLUMN; +import static org.apache.phoenix.util.EncodedColumnsUtil.getEncodedColumnQualifier; import java.io.IOException; import java.sql.DriverManager; @@ -30,6 +31,7 @@ import java.util.Collections; import java.util.Iterator; import java.util.List; import java.util.Map; +import java.util.Map.Entry; import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.HConstants; @@ -41,11 +43,14 @@ import org.apache.hadoop.hbase.client.Put; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; import org.apache.hadoop.hbase.util.ByteStringer; import org.apache.hadoop.hbase.util.Bytes; +import org.apache.hadoop.hbase.util.Pair; import org.apache.phoenix.compile.ExpressionCompiler; import org.apache.phoenix.compile.StatementContext; import org.apache.phoenix.coprocessor.generated.PTableProtos; import org.apache.phoenix.exception.DataExceedsCapacityException; +import org.apache.phoenix.expression.ArrayConstructorExpression; import org.apache.phoenix.expression.Expression; +import org.apache.phoenix.expression.LiteralExpression; import org.apache.phoenix.hbase.index.util.ImmutableBytesPtr; import org.apache.phoenix.hbase.index.util.KeyValueBuilder; import org.apache.phoenix.index.IndexMaintainer; @@ -56,13 +61,16 @@ import org.apache.phoenix.parse.SQLParser; import org.apache.phoenix.protobuf.ProtobufUtil; import org.apache.phoenix.query.QueryConstants; import org.apache.phoenix.schema.RowKeySchema.RowKeySchemaBuilder; +import org.apache.phoenix.schema.tuple.BaseTuple; import org.apache.phoenix.schema.types.PBinary; import org.apache.phoenix.schema.types.PChar; import org.apache.phoenix.schema.types.PDataType; import org.apache.phoenix.schema.types.PDouble; import org.apache.phoenix.schema.types.PFloat; +import org.apache.phoenix.schema.types.PVarbinary; import org.apache.phoenix.schema.types.PVarchar; import org.apache.phoenix.util.ByteUtil; +import org.apache.phoenix.util.EncodedColumnsUtil; import org.apache.phoenix.util.PhoenixRuntime; import org.apache.phoenix.util.SchemaUtil; import org.apache.phoenix.util.SizedUtil; @@ -79,6 +87,7 @@ import com.google.common.collect.ListMultimap; import com.google.common.collect.Lists; import com.google.common.collect.Maps; + /** * * Base class for PTable implementors. Provides abstraction for @@ -106,7 +115,8 @@ public class PTableImpl implements PTable { private List<PColumnFamily> families; private Map<byte[], PColumnFamily> familyByBytes; private Map<String, PColumnFamily> familyByString; - private ListMultimap<String,PColumn> columnsByName; + private ListMultimap<String, PColumn> columnsByName; + private ListMultimap<Pair<String, Integer>, PColumn> kvColumnsByEncodedColumnNames; private PName pkName; private Integer bucketNum; private RowKeySchema rowKeySchema; @@ -138,6 +148,8 @@ public class PTableImpl implements PTable { private boolean isNamespaceMapped; private String autoPartitionSeqName; private boolean isAppendOnlySchema; + private StorageScheme storageScheme; + private EncodedCQCounter encodedCQCounter; public PTableImpl() { this.indexes = Collections.emptyList(); @@ -169,8 +181,9 @@ public class PTableImpl implements PTable { this.isNamespaceMapped = isNamespaceMapped; } + // For indexes stored in shared physical tables public PTableImpl(PName tenantId, PName schemaName, PName tableName, long timestamp, List<PColumnFamily> families, - List<PColumn> columns, List<PName> physicalNames, Short viewIndexId, boolean multiTenant, boolean isNamespaceMpped) throws SQLException { // For indexes stored in shared physical tables + List<PColumn> columns, List<PName> physicalNames, Short viewIndexId, boolean multiTenant, boolean isNamespaceMpped, StorageScheme storageScheme, EncodedCQCounter encodedCQCounter) throws SQLException { this.pkColumns = this.allColumns = Collections.emptyList(); this.rowKeySchema = RowKeySchema.EMPTY_SCHEMA; this.indexes = Collections.emptyList(); @@ -184,7 +197,7 @@ public class PTableImpl implements PTable { init(tenantId, this.schemaName, this.tableName, PTableType.INDEX, state, timeStamp, sequenceNumber, pkName, bucketNum, columns, this.schemaName, parentTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName, null, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable, - isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMpped, null, false); + isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMpped, null, false, storageScheme, encodedCQCounter); } public PTableImpl(long timeStamp) { // For delete marker @@ -228,7 +241,7 @@ public class PTableImpl implements PTable { indexes, table.isImmutableRows(), physicalNames, table.getDefaultFamilyName(), viewStatement, table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), updateCacheFrequency, - table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema()); + table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getStorageScheme(), table.getEncodedCQCounter()); } public static PTableImpl makePTable(PTable table, long timeStamp, List<PTable> indexes, PName parentSchemaName, String viewStatement) throws SQLException { @@ -238,7 +251,7 @@ public class PTableImpl implements PTable { indexes, table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), viewStatement, table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), - table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema()); + table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getStorageScheme(), table.getEncodedCQCounter()); } public static PTableImpl makePTable(PTable table, Collection<PColumn> columns) throws SQLException { @@ -248,7 +261,7 @@ public class PTableImpl implements PTable { table.getIndexes(), table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(), table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), - table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema()); + table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getStorageScheme(), table.getEncodedCQCounter()); } public static PTableImpl makePTable(PTable table, long timeStamp, long sequenceNumber, Collection<PColumn> columns) throws SQLException { @@ -258,7 +271,7 @@ public class PTableImpl implements PTable { table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(), table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), - table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema()); + table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getStorageScheme(), table.getEncodedCQCounter()); } public static PTableImpl makePTable(PTable table, long timeStamp, long sequenceNumber, Collection<PColumn> columns, boolean isImmutableRows) throws SQLException { @@ -268,7 +281,7 @@ public class PTableImpl implements PTable { table.getIndexes(), isImmutableRows, table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(), table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), - table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema()); + table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getStorageScheme(), table.getEncodedCQCounter()); } public static PTableImpl makePTable(PTable table, long timeStamp, long sequenceNumber, Collection<PColumn> columns, boolean isImmutableRows, boolean isWalDisabled, @@ -279,7 +292,7 @@ public class PTableImpl implements PTable { table.getIndexes(), isImmutableRows, table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(), isWalDisabled, isMultitenant, storeNulls, table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), isTransactional, updateCacheFrequency, table.getIndexDisableTimestamp(), - isNamespaceMapped, table.getAutoPartitionSeqName(), table.isAppendOnlySchema()); + isNamespaceMapped, table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getStorageScheme(), table.getEncodedCQCounter()); } public static PTableImpl makePTable(PTable table, PIndexState state) throws SQLException { @@ -290,7 +303,7 @@ public class PTableImpl implements PTable { table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(), table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), - table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema()); + table.getIndexDisableTimestamp(), table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getStorageScheme(), table.getEncodedCQCounter()); } public static PTableImpl makePTable(PTable table, boolean rowKeyOrderOptimizable) throws SQLException { @@ -301,7 +314,7 @@ public class PTableImpl implements PTable { table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(), table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getBaseColumnCount(), rowKeyOrderOptimizable, table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), table.isNamespaceMapped(), - table.getAutoPartitionSeqName(), table.isAppendOnlySchema()); + table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getStorageScheme(), table.getEncodedCQCounter()); } public static PTableImpl makePTable(PTable table) throws SQLException { @@ -312,7 +325,7 @@ public class PTableImpl implements PTable { table.isImmutableRows(), table.getPhysicalNames(), table.getDefaultFamilyName(), table.getViewStatement(), table.isWALDisabled(), table.isMultiTenant(), table.getStoreNulls(), table.getViewType(), table.getViewIndexId(), table.getIndexType(), table.getBaseColumnCount(), table.rowKeyOrderOptimizable(), table.isTransactional(), table.getUpdateCacheFrequency(), table.getIndexDisableTimestamp(), - table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema()); + table.isNamespaceMapped(), table.getAutoPartitionSeqName(), table.isAppendOnlySchema(), table.getStorageScheme(), table.getEncodedCQCounter()); } public static PTableImpl makePTable(PName tenantId, PName schemaName, PName tableName, PTableType type, @@ -321,12 +334,12 @@ public class PTableImpl implements PTable { boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId, IndexType indexType, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency, - long indexDisableTimestamp, boolean isNamespaceMapped, String autoPartitionSeqName, boolean isAppendOnlySchema) throws SQLException { + long indexDisableTimestamp, boolean isNamespaceMapped, String autoPartitionSeqName, boolean isAppendOnlySchema, StorageScheme storageScheme, EncodedCQCounter encodedCQCounter) throws SQLException { return new PTableImpl(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName, bucketNum, columns, dataSchemaName, dataTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName, viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, QueryConstants.BASE_TABLE_BASE_COLUMN_COUNT, rowKeyOrderOptimizable, isTransactional, - updateCacheFrequency,indexDisableTimestamp, isNamespaceMapped, autoPartitionSeqName, isAppendOnlySchema); + updateCacheFrequency,indexDisableTimestamp, isNamespaceMapped, autoPartitionSeqName, isAppendOnlySchema, storageScheme, encodedCQCounter); } public static PTableImpl makePTable(PName tenantId, PName schemaName, PName tableName, PTableType type, @@ -336,13 +349,13 @@ public class PTableImpl implements PTable { boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId, IndexType indexType, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency, int baseColumnCount, long indexDisableTimestamp, boolean isNamespaceMapped, - String autoPartitionSeqName, boolean isAppendOnlySchema) + String autoPartitionSeqName, boolean isAppendOnlySchema, StorageScheme storageScheme, EncodedCQCounter encodedCQCounter) throws SQLException { return new PTableImpl(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName, bucketNum, columns, dataSchemaName, dataTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName, viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable, isTransactional, updateCacheFrequency, - indexDisableTimestamp, isNamespaceMapped, autoPartitionSeqName, isAppendOnlySchema); + indexDisableTimestamp, isNamespaceMapped, autoPartitionSeqName, isAppendOnlySchema, storageScheme, encodedCQCounter); } private PTableImpl(PName tenantId, PName schemaName, PName tableName, PTableType type, PIndexState state, @@ -351,11 +364,11 @@ public class PTableImpl implements PTable { List<PName> physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId, IndexType indexType, int baseColumnCount, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency, - long indexDisableTimestamp, boolean isNamespaceMapped, String autoPartitionSeqName, boolean isAppendOnlySchema) throws SQLException { + long indexDisableTimestamp, boolean isNamespaceMapped, String autoPartitionSeqName, boolean isAppendOnlySchema, StorageScheme storageScheme, EncodedCQCounter encodedCQCounter) throws SQLException { init(tenantId, schemaName, tableName, type, state, timeStamp, sequenceNumber, pkName, bucketNum, columns, parentSchemaName, parentTableName, indexes, isImmutableRows, physicalNames, defaultFamilyName, viewExpression, disableWAL, multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable, - isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped, autoPartitionSeqName, isAppendOnlySchema); + isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped, autoPartitionSeqName, isAppendOnlySchema, storageScheme, encodedCQCounter); } @Override @@ -389,7 +402,7 @@ public class PTableImpl implements PTable { List<PTable> indexes, boolean isImmutableRows, List<PName> physicalNames, PName defaultFamilyName, String viewExpression, boolean disableWAL, boolean multiTenant, boolean storeNulls, ViewType viewType, Short viewIndexId, IndexType indexType , int baseColumnCount, boolean rowKeyOrderOptimizable, boolean isTransactional, long updateCacheFrequency, long indexDisableTimestamp, - boolean isNamespaceMapped, String autoPartitionSeqName, boolean isAppendOnlySchema) throws SQLException { + boolean isNamespaceMapped, String autoPartitionSeqName, boolean isAppendOnlySchema, StorageScheme storageScheme, EncodedCQCounter encodedCQCounter) throws SQLException { Preconditions.checkNotNull(schemaName); Preconditions.checkArgument(tenantId==null || tenantId.getBytes().length > 0); // tenantId should be null or not empty int estimatedSize = SizedUtil.OBJECT_SIZE * 2 + 23 * SizedUtil.POINTER_SIZE + 4 * SizedUtil.INT_SIZE + 2 * SizedUtil.LONG_SIZE + 2 * SizedUtil.INT_OBJECT_SIZE + @@ -425,10 +438,12 @@ public class PTableImpl implements PTable { this.isNamespaceMapped = isNamespaceMapped; this.autoPartitionSeqName = autoPartitionSeqName; this.isAppendOnlySchema = isAppendOnlySchema; + this.storageScheme = storageScheme; List<PColumn> pkColumns; PColumn[] allColumns; this.columnsByName = ArrayListMultimap.create(columns.size(), 1); + this.kvColumnsByEncodedColumnNames = (EncodedColumnsUtil.usesEncodedColumnNames(storageScheme) ? ArrayListMultimap.<Pair<String, Integer>, PColumn>create(columns.size(), 1) : null); int numPKColumns = 0; if (bucketNum != null) { // Add salt column to allColumns and pkColumns, but don't add to @@ -454,7 +469,26 @@ public class PTableImpl implements PTable { if (Objects.equal(familyName, dupColumn.getFamilyName())) { count++; if (count > 1) { - throw new ColumnAlreadyExistsException(null, name.getString(), columnName); + throw new ColumnAlreadyExistsException(schemaName.getString(), name.getString(), columnName); + } + } + } + } + //TODO: samarth understand the implication of this. + if (kvColumnsByEncodedColumnNames != null) { + Integer cq = column.getEncodedColumnQualifier(); + String cf = column.getFamilyName() != null ? column.getFamilyName().getString() : null; + if (cf != null && cq != null) { + Pair<String, Integer> pair = new Pair<>(cf, cq); + if (kvColumnsByEncodedColumnNames.put(pair, column)) { + int count = 0; + for (PColumn dupColumn : kvColumnsByEncodedColumnNames.get(pair)) { + if (Objects.equal(familyName, dupColumn.getFamilyName())) { + count++; + if (count > 1) { + throw new ColumnAlreadyExistsException(schemaName.getString(), name.getString(), columnName); + } + } } } } @@ -518,7 +552,7 @@ public class PTableImpl implements PTable { .orderedBy(Bytes.BYTES_COMPARATOR); for (int i = 0; i < families.length; i++) { Map.Entry<PName,List<PColumn>> entry = iterator.next(); - PColumnFamily family = new PColumnFamilyImpl(entry.getKey(), entry.getValue()); + PColumnFamily family = new PColumnFamilyImpl(entry.getKey(), entry.getValue(), EncodedColumnsUtil.usesEncodedColumnNames(storageScheme)); families[i] = family; familyByString.put(family.getName().getString(), family); familyByBytes.put(family.getName().getBytes(), family); @@ -544,9 +578,9 @@ public class PTableImpl implements PTable { for (PName name : this.physicalNames) { estimatedSize += name.getEstimatedSize(); } - this.estimatedSize = estimatedSize; this.baseColumnCount = baseColumnCount; + this.encodedCQCounter = encodedCQCounter; } @Override @@ -736,7 +770,7 @@ public class PTableImpl implements PTable { } @Override - public PColumn getColumn(String name) throws ColumnNotFoundException, AmbiguousColumnException { + public PColumn getPColumnForColumnName(String name) throws ColumnNotFoundException, AmbiguousColumnException { List<PColumn> columns = columnsByName.get(name); int size = columns.size(); if (size == 0) { @@ -755,6 +789,38 @@ public class PTableImpl implements PTable { } return columns.get(0); } + + @Override + public PColumn getPColumnForColumnQualifier(byte[] cf, byte[] cq) throws ColumnNotFoundException, AmbiguousColumnException { + Preconditions.checkNotNull(cq); + if (!EncodedColumnsUtil.usesEncodedColumnNames(this) || cf == null) { + String columnName = (String)PVarchar.INSTANCE.toObject(cq); + return getPColumnForColumnName(columnName); + } else { + Integer qualifier = getEncodedColumnQualifier(cq); + String family = (String)PVarchar.INSTANCE.toObject(cf); + List<PColumn> columns = kvColumnsByEncodedColumnNames.get(new Pair<>(family, qualifier)); + int size = columns.size(); + if (size == 0) { + //TODO: samarth should we have a column qualifier not found exception? + throw new ColumnNotFoundException(Bytes.toString(cq)); + } + //TODO: samarth I am not convinced if need this logic. +// if (size > 1) { +// for (PColumn column : columns) { +// if (QueryConstants.DEFAULT_COLUMN_FAMILY.equals(column.getFamilyName().getString())) { +// // Allow ambiguity with PK column or column in the default column family, +// // since a PK column cannot be prefixed and a user would not know how to +// // prefix a column in the default column family. +// return column; +// } +// } +// //TODO: samarth should we have a column qualifier not found exception? +// throw new AmbiguousColumnException(columns.get(0).getName().getString()); +// } + return columns.get(0); + } + } /** * @@ -775,6 +841,8 @@ public class PTableImpl implements PTable { private Mutation deleteRow; private final long ts; private final boolean hasOnDupKey; + // map from column name to value + private Map<PColumn, byte[]> columnToValueMap; public PRowImpl(KeyValueBuilder kvBuilder, ImmutableBytesWritable key, long ts, Integer bucketNum, boolean hasOnDupKey) { this.kvBuilder = kvBuilder; @@ -787,7 +855,7 @@ public class PTableImpl implements PTable { this.keyPtr = new ImmutableBytesPtr(key); this.key = ByteUtil.copyKeyBytesIfNecessary(key); } - + this.columnToValueMap = Maps.newHashMapWithExpectedSize(1);//TODO: samarth size it properly newMutations(); } @@ -809,13 +877,49 @@ public class PTableImpl implements PTable { // Include only deleteRow mutation if present because it takes precedence over all others mutations.add(deleteRow); } else { + // store all columns for a given column family in a single cell instead of one column per cell in order to improve write performance + if (storageScheme == StorageScheme.COLUMNS_STORED_IN_SINGLE_CELL) { + Put put = new Put(this.key); + if (isWALDisabled()) { + put.setDurability(Durability.SKIP_WAL); + } + // the setValues Put contains one cell per column, we need to convert it to a Put that contains a cell with all columns for a given column family + for (PColumnFamily family : families) { + byte[] columnFamily = family.getName().getBytes(); + Collection<PColumn> columns = family.getColumns(); + int maxEncodedColumnQualifier = Integer.MIN_VALUE; + for (PColumn column : columns) { + maxEncodedColumnQualifier = Math.max(maxEncodedColumnQualifier, column.getEncodedColumnQualifier()); + } + byte[][] colValues = new byte[maxEncodedColumnQualifier+1][]; + for (PColumn column : columns) { + colValues[column.getEncodedColumnQualifier()] = columnToValueMap.get(column); + } + + List<Expression> children = Lists.newArrayListWithExpectedSize(columns.size()); + // create an expression list with all the columns + for (int i=0; i<colValues.length; ++i) { + children.add(new LiteralExpression(colValues[i]==null ? ByteUtil.EMPTY_BYTE_ARRAY : colValues[i] )); + } + // we use ArrayConstructorExpression to serialize multiple columns into a single byte[] + // construct the ArrayConstructorExpression with a variable length data type since columns can be of fixed or variable length + ArrayConstructorExpression arrayExpression = new ArrayConstructorExpression(children, PVarbinary.INSTANCE, rowKeyOrderOptimizable); + ImmutableBytesWritable ptr = new ImmutableBytesWritable(); + arrayExpression.evaluate(new BaseTuple() {}, ptr); + ImmutableBytesPtr colFamilyPtr = new ImmutableBytesPtr(columnFamily); + addQuietly(put, kvBuilder, kvBuilder.buildPut(keyPtr, + colFamilyPtr, colFamilyPtr, ts, ptr)); + } + setValues = put; + } // Because we cannot enforce a not null constraint on a KV column (since we don't know if the row exists when - // we upsert it), se instead add a KV that is always emtpy. This allows us to imitate SQL semantics given the + // we upsert it), so instead add a KV that is always empty. This allows us to imitate SQL semantics given the // way HBase works. + Pair<byte[], byte[]> emptyKvInfo = EncodedColumnsUtil.getEmptyKeyValueInfo(PTableImpl.this); addQuietly(setValues, kvBuilder, kvBuilder.buildPut(keyPtr, SchemaUtil.getEmptyColumnFamilyPtr(PTableImpl.this), - QueryConstants.EMPTY_COLUMN_BYTES_PTR, ts, - QueryConstants.EMPTY_COLUMN_VALUE_BYTES_PTR)); + new ImmutableBytesPtr(emptyKvInfo.getFirst()), ts, + new ImmutableBytesPtr(emptyKvInfo.getSecond()))); mutations.add(setValues); if (!unsetValues.isEmpty()) { mutations.add(unsetValues); @@ -844,7 +948,8 @@ public class PTableImpl implements PTable { public void setValue(PColumn column, byte[] byteValue) { deleteRow = null; byte[] family = column.getFamilyName().getBytes(); - byte[] qualifier = column.getName().getBytes(); + byte[] qualifier = getColumnQualifier(column); + ImmutableBytesPtr qualifierPtr = new ImmutableBytesPtr(qualifier); PDataType<?> type = column.getDataType(); // Check null, since some types have no byte representation for null if (byteValue == null) { @@ -864,7 +969,7 @@ public class PTableImpl implements PTable { // case of updates occurring due to the execution of the clause. removeIfPresent(setValues, family, qualifier); deleteQuietly(unsetValues, kvBuilder, kvBuilder.buildDeleteColumns(keyPtr, column - .getFamilyName().getBytesPtr(), column.getName().getBytesPtr(), ts)); + .getFamilyName().getBytesPtr(), qualifierPtr, ts)); } else { ImmutableBytesWritable ptr = new ImmutableBytesWritable(byteValue); Integer maxLength = column.getMaxLength(); @@ -877,9 +982,17 @@ public class PTableImpl implements PTable { ptr.set(byteValue); type.pad(ptr, maxLength, sortOrder); removeIfPresent(unsetValues, family, qualifier); - addQuietly(setValues, kvBuilder, kvBuilder.buildPut(keyPtr, - column.getFamilyName().getBytesPtr(), column.getName().getBytesPtr(), + // store all columns for a given column family in a single cell instead of one column per cell in order to improve write performance + // we don't need to do anything with unsetValues as it is only used when storeNulls is false, storeNulls is always true when storeColsInSingleCell is true + if (storageScheme == StorageScheme.COLUMNS_STORED_IN_SINGLE_CELL) { + columnToValueMap.put(column, ptr.get()); + } + else { + removeIfPresent(unsetValues, family, qualifier); + addQuietly(setValues, kvBuilder, kvBuilder.buildPut(keyPtr, + column.getFamilyName().getBytesPtr(), qualifierPtr, ts, ptr)); + } } } @@ -912,6 +1025,11 @@ public class PTableImpl implements PTable { deleteRow.setDurability(Durability.SKIP_WAL); } } + + private byte[] getColumnQualifier(PColumn column) { + return EncodedColumnsUtil.getColumnQualifier(column, PTableImpl.this); + } + } @Override @@ -1072,116 +1190,126 @@ public class PTableImpl implements PTable { public IndexType getIndexType() { return indexType; } - + /** * Construct a PTable instance from ProtoBuffered PTable instance * @param table */ public static PTable createFromProto(PTableProtos.PTable table) { - PName tenantId = null; - if(table.hasTenantId()){ - tenantId = PNameFactory.newName(table.getTenantId().toByteArray()); - } - PName schemaName = PNameFactory.newName(table.getSchemaNameBytes().toByteArray()); - PName tableName = PNameFactory.newName(table.getTableNameBytes().toByteArray()); - PTableType tableType = PTableType.values()[table.getTableType().ordinal()]; - PIndexState indexState = null; - if (table.hasIndexState()) { - indexState = PIndexState.fromSerializedValue(table.getIndexState()); - } - Short viewIndexId = null; - if(table.hasViewIndexId()){ - viewIndexId = (short)table.getViewIndexId(); - } - IndexType indexType = IndexType.getDefault(); - if(table.hasIndexType()){ - indexType = IndexType.fromSerializedValue(table.getIndexType().toByteArray()[0]); - } - long sequenceNumber = table.getSequenceNumber(); - long timeStamp = table.getTimeStamp(); - long indexDisableTimestamp = table.getIndexDisableTimestamp(); - PName pkName = null; - if (table.hasPkNameBytes()) { - pkName = PNameFactory.newName(table.getPkNameBytes().toByteArray()); - } - int bucketNum = table.getBucketNum(); - List<PColumn> columns = Lists.newArrayListWithExpectedSize(table.getColumnsCount()); - for (PTableProtos.PColumn curPColumnProto : table.getColumnsList()) { - columns.add(PColumnImpl.createFromProto(curPColumnProto)); - } - List<PTable> indexes = Lists.newArrayListWithExpectedSize(table.getIndexesCount()); - for (PTableProtos.PTable curPTableProto : table.getIndexesList()) { - indexes.add(createFromProto(curPTableProto)); - } + PName tenantId = null; + if(table.hasTenantId()){ + tenantId = PNameFactory.newName(table.getTenantId().toByteArray()); + } + PName schemaName = PNameFactory.newName(table.getSchemaNameBytes().toByteArray()); + PName tableName = PNameFactory.newName(table.getTableNameBytes().toByteArray()); + PTableType tableType = PTableType.values()[table.getTableType().ordinal()]; + PIndexState indexState = null; + if (table.hasIndexState()) { + indexState = PIndexState.fromSerializedValue(table.getIndexState()); + } + Short viewIndexId = null; + if(table.hasViewIndexId()){ + viewIndexId = (short)table.getViewIndexId(); + } + IndexType indexType = IndexType.getDefault(); + if(table.hasIndexType()){ + indexType = IndexType.fromSerializedValue(table.getIndexType().toByteArray()[0]); + } + long sequenceNumber = table.getSequenceNumber(); + long timeStamp = table.getTimeStamp(); + long indexDisableTimestamp = table.getIndexDisableTimestamp(); + PName pkName = null; + if (table.hasPkNameBytes()) { + pkName = PNameFactory.newName(table.getPkNameBytes().toByteArray()); + } + int bucketNum = table.getBucketNum(); + List<PColumn> columns = Lists.newArrayListWithExpectedSize(table.getColumnsCount()); + for (PTableProtos.PColumn curPColumnProto : table.getColumnsList()) { + columns.add(PColumnImpl.createFromProto(curPColumnProto)); + } + List<PTable> indexes = Lists.newArrayListWithExpectedSize(table.getIndexesCount()); + for (PTableProtos.PTable curPTableProto : table.getIndexesList()) { + indexes.add(createFromProto(curPTableProto)); + } - boolean isImmutableRows = table.getIsImmutableRows(); - PName parentSchemaName = null; - PName parentTableName = null; - if (table.hasParentNameBytes()) { - parentSchemaName = PNameFactory.newName(SchemaUtil.getSchemaNameFromFullName((table.getParentNameBytes().toByteArray()))); - parentTableName = PNameFactory.newName(SchemaUtil.getTableNameFromFullName(table.getParentNameBytes().toByteArray())); - } - PName defaultFamilyName = null; - if (table.hasDefaultFamilyName()) { - defaultFamilyName = PNameFactory.newName(table.getDefaultFamilyName().toByteArray()); - } - boolean disableWAL = table.getDisableWAL(); - boolean multiTenant = table.getMultiTenant(); - boolean storeNulls = table.getStoreNulls(); - boolean isTransactional = table.getTransactional(); - ViewType viewType = null; - String viewStatement = null; - List<PName> physicalNames = Collections.emptyList(); - if (tableType == PTableType.VIEW) { - viewType = ViewType.fromSerializedValue(table.getViewType().toByteArray()[0]); - } - if(table.hasViewStatement()){ - viewStatement = (String) PVarchar.INSTANCE.toObject(table.getViewStatement().toByteArray()); - } - if (tableType == PTableType.VIEW || viewIndexId != null) { - physicalNames = Lists.newArrayListWithExpectedSize(table.getPhysicalNamesCount()); - for(int i = 0; i < table.getPhysicalNamesCount(); i++){ - physicalNames.add(PNameFactory.newName(table.getPhysicalNames(i).toByteArray())); + boolean isImmutableRows = table.getIsImmutableRows(); + PName parentSchemaName = null; + PName parentTableName = null; + if (table.hasParentNameBytes()) { + parentSchemaName = PNameFactory.newName(SchemaUtil.getSchemaNameFromFullName((table.getParentNameBytes().toByteArray()))); + parentTableName = PNameFactory.newName(SchemaUtil.getTableNameFromFullName(table.getParentNameBytes().toByteArray())); + } + PName defaultFamilyName = null; + if (table.hasDefaultFamilyName()) { + defaultFamilyName = PNameFactory.newName(table.getDefaultFamilyName().toByteArray()); + } + boolean disableWAL = table.getDisableWAL(); + boolean multiTenant = table.getMultiTenant(); + boolean storeNulls = table.getStoreNulls(); + boolean isTransactional = table.getTransactional(); + ViewType viewType = null; + String viewStatement = null; + List<PName> physicalNames = Collections.emptyList(); + if (tableType == PTableType.VIEW) { + viewType = ViewType.fromSerializedValue(table.getViewType().toByteArray()[0]); + } + if(table.hasViewStatement()){ + viewStatement = (String) PVarchar.INSTANCE.toObject(table.getViewStatement().toByteArray()); + } + if (tableType == PTableType.VIEW || viewIndexId != null) { + physicalNames = Lists.newArrayListWithExpectedSize(table.getPhysicalNamesCount()); + for(int i = 0; i < table.getPhysicalNamesCount(); i++) { + physicalNames.add(PNameFactory.newName(table.getPhysicalNames(i).toByteArray())); + } + } + int baseColumnCount = -1; + if (table.hasBaseColumnCount()) { + baseColumnCount = table.getBaseColumnCount(); } - } - - int baseColumnCount = -1; - if (table.hasBaseColumnCount()) { - baseColumnCount = table.getBaseColumnCount(); - } - boolean rowKeyOrderOptimizable = false; - if (table.hasRowKeyOrderOptimizable()) { - rowKeyOrderOptimizable = table.getRowKeyOrderOptimizable(); - } - long updateCacheFrequency = 0; - if (table.hasUpdateCacheFrequency()) { - updateCacheFrequency = table.getUpdateCacheFrequency(); - } - boolean isNamespaceMapped=false; - if (table.hasIsNamespaceMapped()) { - isNamespaceMapped = table.getIsNamespaceMapped(); - } - String autoParititonSeqName = null; - if (table.hasAutoParititonSeqName()) { - autoParititonSeqName = table.getAutoParititonSeqName(); - } - boolean isAppendOnlySchema = false; - if (table.hasIsAppendOnlySchema()) { - isAppendOnlySchema = table.getIsAppendOnlySchema(); - } - - try { - PTableImpl result = new PTableImpl(); - result.init(tenantId, schemaName, tableName, tableType, indexState, timeStamp, sequenceNumber, pkName, - (bucketNum == NO_SALTING) ? null : bucketNum, columns, parentSchemaName, parentTableName, indexes, - isImmutableRows, physicalNames, defaultFamilyName, viewStatement, disableWAL, - multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable, - isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped, autoParititonSeqName, isAppendOnlySchema); - return result; - } catch (SQLException e) { - throw new RuntimeException(e); // Impossible - } + boolean rowKeyOrderOptimizable = false; + if (table.hasRowKeyOrderOptimizable()) { + rowKeyOrderOptimizable = table.getRowKeyOrderOptimizable(); + } + long updateCacheFrequency = 0; + if (table.hasUpdateCacheFrequency()) { + updateCacheFrequency = table.getUpdateCacheFrequency(); + } + boolean isNamespaceMapped=false; + if (table.hasIsNamespaceMapped()) { + isNamespaceMapped = table.getIsNamespaceMapped(); + } + String autoParititonSeqName = null; + if (table.hasAutoParititonSeqName()) { + autoParititonSeqName = table.getAutoParititonSeqName(); + } + boolean isAppendOnlySchema = false; + if (table.hasIsAppendOnlySchema()) { + isAppendOnlySchema = table.getIsAppendOnlySchema(); + } + StorageScheme storageScheme = null; + if (table.hasStorageScheme()) { + storageScheme = StorageScheme.fromSerializedValue(table.getStorageScheme().toByteArray()[0]); + } + EncodedCQCounter encodedColumnQualifierCounter = EncodedColumnsUtil.usesEncodedColumnNames(storageScheme) ? new EncodedCQCounter() : EncodedCQCounter.NULL_COUNTER; + if (table.getEncodedCQCountersList() != null) { + encodedColumnQualifierCounter = new EncodedCQCounter(); + for (org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedCQCounter cqCounterFromProto : table.getEncodedCQCountersList()) { + encodedColumnQualifierCounter.setValue(cqCounterFromProto.getColFamily(), cqCounterFromProto.getCounter()); + } + } + + try { + PTableImpl result = new PTableImpl(); + result.init(tenantId, schemaName, tableName, tableType, indexState, timeStamp, sequenceNumber, pkName, + (bucketNum == NO_SALTING) ? null : bucketNum, columns, parentSchemaName, parentTableName, indexes, + isImmutableRows, physicalNames, defaultFamilyName, viewStatement, disableWAL, + multiTenant, storeNulls, viewType, viewIndexId, indexType, baseColumnCount, rowKeyOrderOptimizable, + isTransactional, updateCacheFrequency, indexDisableTimestamp, isNamespaceMapped, autoParititonSeqName, isAppendOnlySchema, storageScheme, encodedColumnQualifierCounter); + return result; + } catch (SQLException e) { + throw new RuntimeException(e); // Impossible + } } public static PTableProtos.PTable toProto(PTable table) { @@ -1259,10 +1387,22 @@ public class PTableImpl implements PTable { builder.setUpdateCacheFrequency(table.getUpdateCacheFrequency()); builder.setIndexDisableTimestamp(table.getIndexDisableTimestamp()); builder.setIsNamespaceMapped(table.isNamespaceMapped()); - if (table.getAutoPartitionSeqName()!= null) { + if (table.getAutoPartitionSeqName() != null) { builder.setAutoParititonSeqName(table.getAutoPartitionSeqName()); } builder.setIsAppendOnlySchema(table.isAppendOnlySchema()); + if (table.getStorageScheme() != null) { + builder.setStorageScheme(ByteStringer.wrap(new byte[]{table.getStorageScheme().getSerializedValue()})); + } + if (table.getEncodedCQCounter() != null) { + Map<String, Integer> values = table.getEncodedCQCounter().values(); + for (Entry<String, Integer> cqCounter : values.entrySet()) { + org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedCQCounter.Builder cqBuilder = org.apache.phoenix.coprocessor.generated.PTableProtos.EncodedCQCounter.newBuilder(); + cqBuilder.setColFamily(cqCounter.getKey()); + cqBuilder.setCounter(cqCounter.getValue()); + builder.addEncodedCQCounters(cqBuilder.build()); + } + } return builder.build(); } @@ -1332,4 +1472,14 @@ public class PTableImpl implements PTable { } else if (!key.equals(other.getKey())) return false; return true; } + + @Override + public StorageScheme getStorageScheme() { + return storageScheme; + } + + @Override + public EncodedCQCounter getEncodedCQCounter() { + return encodedCQCounter; + } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/670b53a9/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java index 42699d9..017c75d 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/PTableKey.java @@ -28,7 +28,11 @@ public class PTableKey { public PTableKey(PName tenantId, String name) { Preconditions.checkNotNull(name); this.tenantId = tenantId; - this.name = name; + if (name.indexOf(QueryConstants.NAMESPACE_SEPARATOR) != -1) { + this.name = name.replace(QueryConstants.NAMESPACE_SEPARATOR, QueryConstants.NAME_SEPARATOR); + } else { + this.name = name; + } } public PName getTenantId() { http://git-wip-us.apache.org/repos/asf/phoenix/blob/670b53a9/phoenix-core/src/main/java/org/apache/phoenix/schema/ProjectedColumn.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/ProjectedColumn.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/ProjectedColumn.java index 19dd1c1..9336938 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/ProjectedColumn.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/ProjectedColumn.java @@ -39,6 +39,7 @@ public class ProjectedColumn extends DelegateColumn { return name; } + @Override public PName getFamilyName() { return familyName; } http://git-wip-us.apache.org/repos/asf/phoenix/blob/670b53a9/phoenix-core/src/main/java/org/apache/phoenix/schema/SaltingUtil.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/SaltingUtil.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/SaltingUtil.java index 734a9ed..23cfd1b 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/SaltingUtil.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/SaltingUtil.java @@ -38,7 +38,7 @@ public class SaltingUtil { public static final String SALTING_COLUMN_NAME = "_SALT"; public static final String SALTED_ROW_KEY_NAME = "_SALTED_KEY"; public static final PColumnImpl SALTING_COLUMN = new PColumnImpl( - PNameFactory.newName(SALTING_COLUMN_NAME), null, PBinary.INSTANCE, 1, 0, false, 0, SortOrder.getDefault(), 0, null, false, null, false, false); + PNameFactory.newName(SALTING_COLUMN_NAME), null, PBinary.INSTANCE, 1, 0, false, 0, SortOrder.getDefault(), 0, null, false, null, false, false, null); public static final RowKeySchema VAR_BINARY_SALTED_SCHEMA = new RowKeySchemaBuilder(2) .addField(SALTING_COLUMN, false, SortOrder.getDefault()) .addField(SchemaUtil.VAR_BINARY_DATUM, false, SortOrder.getDefault()).build(); http://git-wip-us.apache.org/repos/asf/phoenix/blob/670b53a9/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/BaseTuple.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/BaseTuple.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/BaseTuple.java index a8dc487..8028eb2 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/BaseTuple.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/BaseTuple.java @@ -17,11 +17,50 @@ */ package org.apache.phoenix.schema.tuple; +import java.util.List; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; + public abstract class BaseTuple implements Tuple { + @Override + public int size() { + throw new UnsupportedOperationException(); + } + + @Override + public boolean isImmutable() { + throw new UnsupportedOperationException(); + } + + @Override + public void getKey(ImmutableBytesWritable ptr) { + throw new UnsupportedOperationException(); + } + + @Override + public Cell getValue(int index) { + throw new UnsupportedOperationException(); + } + + @Override + public Cell getValue(byte [] family, byte [] qualifier) { + throw new UnsupportedOperationException(); + } + + @Override + public boolean getValue(byte [] family, byte [] qualifier, ImmutableBytesWritable ptr) { + throw new UnsupportedOperationException(); + } @Override public long getSequenceValue(int index) { throw new UnsupportedOperationException(); } + + @Override + public void setKeyValues(List<Cell> values) { + throw new UnsupportedOperationException(); + } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/670b53a9/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/DelegateTuple.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/DelegateTuple.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/DelegateTuple.java index 58b1eda..3430f5b 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/DelegateTuple.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/DelegateTuple.java @@ -17,6 +17,8 @@ */ package org.apache.phoenix.schema.tuple; +import java.util.List; + import org.apache.hadoop.hbase.Cell; import org.apache.hadoop.hbase.io.ImmutableBytesWritable; @@ -61,4 +63,9 @@ public class DelegateTuple implements Tuple { public long getSequenceValue(int index) { return delegate.getSequenceValue(index); } + + @Override + public void setKeyValues(List<Cell> values) { + delegate.setKeyValues(values); + } } http://git-wip-us.apache.org/repos/asf/phoenix/blob/670b53a9/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java new file mode 100644 index 0000000..f39bb1f --- /dev/null +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/EncodedColumnQualiferCellsList.java @@ -0,0 +1,569 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.schema.tuple; + +import static com.google.common.base.Preconditions.checkArgument; +import static org.apache.phoenix.query.QueryConstants.ENCODED_CQ_COUNTER_INITIAL_VALUE; +import static org.apache.phoenix.query.QueryConstants.ENCODED_EMPTY_COLUMN_NAME; +import static org.apache.phoenix.util.EncodedColumnsUtil.getEncodedColumnQualifier; + +import java.util.Collection; +import java.util.ConcurrentModificationException; +import java.util.Iterator; +import java.util.List; +import java.util.ListIterator; +import java.util.NoSuchElementException; + +import javax.annotation.concurrent.NotThreadSafe; + +import org.apache.hadoop.hbase.Cell; +import org.apache.phoenix.schema.PTable.StorageScheme; + +/** + * List implementation that provides indexed based look up when the cell column qualifiers are positive numbers. + * These qualifiers are generated by using one of the column qualifier encoding schemes specified in {@link StorageScheme}. + * The api methods in this list assume that the caller wants to see + * and add only non null elements in the list. + * <p> + * Please note that this implementation doesn't implement all the optional methods of the + * {@link List} interface. Such unsupported methods could violate the basic invariance of the list that every cell with + * an encoded column qualifier has a fixed position in the list. + * </p> + * <p> + * An important performance characteristic of this list is that doing look up on the basis of index via {@link #get(int)} + * is an O(n) operation. This makes iterating through the list using {@link #get(int)} an O(n^2) operation. + * Instead, for iterating through the list, one should use the iterators created through {@link #iterator()} or + * {@link #listIterator()}. Do note that getting an element using {@link #getCellForColumnQualifier(int)} is an O(1) operation + * and should generally be the way for getting elements out of the list. + * </p> + */ +@NotThreadSafe +public class EncodedColumnQualiferCellsList implements List<Cell> { + + private int minQualifier; + private int maxQualifier; + private int nonReservedRangeOffset; + private final Cell[] array; + private int numNonNullElements; + private int firstNonNullElementIdx = -1; + private static final int RESERVED_RANGE_SIZE = ENCODED_CQ_COUNTER_INITIAL_VALUE - ENCODED_EMPTY_COLUMN_NAME; + // Used by iterators to figure out if the list was structurally modified. + private int modCount = 0; + + public EncodedColumnQualiferCellsList(int minQ, int maxQ) { + checkArgument(minQ <= maxQ, "Invalid arguments. Min: " + minQ + + ". Max: " + maxQ); + this.minQualifier = minQ; + this.maxQualifier = maxQ; + int size = 0; + if (maxQ < ENCODED_CQ_COUNTER_INITIAL_VALUE) { + size = RESERVED_RANGE_SIZE; + } else if (minQ < ENCODED_CQ_COUNTER_INITIAL_VALUE) { + size = (maxQ - minQ + 1); + } else { + size = RESERVED_RANGE_SIZE + (maxQ - minQ + 1); + } + this.array = new Cell[size]; + this.nonReservedRangeOffset = minQ > ENCODED_CQ_COUNTER_INITIAL_VALUE ? minQ - ENCODED_CQ_COUNTER_INITIAL_VALUE : 0; + } + + @Override + public int size() { + return numNonNullElements; + } + + @Override + public boolean isEmpty() { + return numNonNullElements == 0; + } + + @Override + public boolean contains(Object o) { + return indexOf(o) >= 0; + } + + @Override + public Object[] toArray() { + Object[] toReturn = new Object[numNonNullElements]; + int counter = 0; + if (numNonNullElements > 0) { + for (int i = 0; i < array.length; i++) { + if (array[i] != null) { + toReturn[counter++] = array[i]; + } + } + } + return toReturn; + } + + @Override + @SuppressWarnings("unchecked") + public <T> T[] toArray(T[] a) { + T[] toReturn = + (T[]) java.lang.reflect.Array.newInstance(a.getClass().getComponentType(), + numNonNullElements); + int counter = 0; + for (int i = 0; i < array.length; i++) { + if (array[i] != null) { + toReturn[counter++] = (T) array[i]; + } + } + return toReturn; + } + + @Override + public boolean add(Cell e) { + if (e == null) { + throw new NullPointerException(); + } + int columnQualifier = getEncodedColumnQualifier(e.getQualifierArray(), e.getQualifierOffset(), e.getQualifierLength()); + + checkQualifierRange(columnQualifier); + int idx = getArrayIndex(columnQualifier); + if (array[idx] == null) { + numNonNullElements++; + } + array[idx] = e; + if (firstNonNullElementIdx == -1) { + firstNonNullElementIdx = idx; + } else if (idx < firstNonNullElementIdx) { + firstNonNullElementIdx = idx; + } + modCount++; + /* + * Note that we don't care about equality of the element being added with the element + * already present at the index. + */ + return true; + } + + @Override + public boolean remove(Object o) { + if (o == null) { + return false; + } + Cell e = (Cell) o; + int i = 0; + while (i < array.length) { + if (array[i] != null && array[i].equals(e)) { + array[i] = null; + numNonNullElements--; + if (numNonNullElements == 0) { + firstNonNullElementIdx = -1; + } else if (firstNonNullElementIdx == i) { + // the element being removed was the first non-null element we knew + while (i < array.length && (array[i]) == null) { + i++; + } + if (i < array.length) { + firstNonNullElementIdx = i; + } else { + firstNonNullElementIdx = -1; + } + } + modCount++; + return true; + } + i++; + } + return false; + } + + @Override + public boolean containsAll(Collection<?> c) { + boolean containsAll = true; + Iterator<?> itr = c.iterator(); + while (itr.hasNext()) { + containsAll &= (indexOf(itr.next()) >= 0); + } + return containsAll; + } + + @Override + public boolean addAll(Collection<? extends Cell> c) { + boolean changed = false; + for (Cell cell : c) { + if (c == null) { + throw new NullPointerException(); + } + changed |= add(cell); + } + return changed; + } + + @Override + public boolean addAll(int index, Collection<? extends Cell> c) { + throwGenericUnsupportedOperationException(); + return false; + } + + @Override + public boolean removeAll(Collection<?> c) { + Iterator<?> itr = c.iterator(); + boolean changed = false; + while (itr.hasNext()) { + changed |= remove(itr.next()); + } + return changed; + } + + @Override + public boolean retainAll(Collection<?> collection) { + boolean changed = false; + // Optimize if the passed collection is an instance of EncodedColumnQualiferCellsList + if (collection instanceof EncodedColumnQualiferCellsList) { + EncodedColumnQualiferCellsList list = (EncodedColumnQualiferCellsList) collection; + ListIterator<Cell> listItr = this.listIterator(); + while (listItr.hasNext()) { + Cell cellInThis = listItr.next(); + int qualifier = getEncodedColumnQualifier(cellInThis.getQualifierArray(), + cellInThis.getQualifierOffset(), cellInThis.getQualifierLength()); + try { + Cell cellInParam = list.getCellForColumnQualifier(qualifier); + if (cellInParam != null && cellInParam.equals(cellInThis)) { + continue; + } + listItr.remove(); + changed = true; + } catch (IndexOutOfBoundsException expected) { + // this could happen when the qualifier of cellInParam lies out of + // the range of this list. + listItr.remove(); + changed = true; + } + } + } else { + throw new UnsupportedOperationException( + "Operation only supported for collections of type EncodedColumnQualiferCellsList"); + } + return changed; + } + + @Override + public void clear() { + for (int i = 0; i < array.length; i++) { + array[i] = null; + } + firstNonNullElementIdx = -1; + numNonNullElements = 0; + modCount++; + } + + @Override + public Cell get(int index) { + rangeCheck(index); + int numNonNullElementsFound = 0; + for (int i = 0; i < array.length; i++) { + if (array[i] != null) { + numNonNullElementsFound++; + if (numNonNullElementsFound == index + 1) { + return array[i]; + } + } + } + throw new IllegalStateException("There was no element present in the list at index " + + index + " even though number of elements in the list are " + size()); + } + + @Override + public Cell set(int index, Cell e) { + throwGenericUnsupportedOperationException(); + return null; + } + + @Override + public void add(int index, Cell element) { + throwGenericUnsupportedOperationException(); + } + + @Override + public Cell remove(int index) { + throwGenericUnsupportedOperationException(); + return null; + } + + @Override + public int indexOf(Object o) { + if (o == null || isEmpty()) { + return -1; + } else { + int numNonNull = -1; + for (int i = 0; i < array.length; i++) { + if (array[i] != null) { + numNonNull++; + } + if (o.equals(array[i])) { + return numNonNull; + } + } + } + return -1; + } + + @Override + public int lastIndexOf(Object o) { + if (o == null || isEmpty()) { + return -1; + } + int lastIndex = numNonNullElements; + for (int i = array.length - 1; i >= 0; i--) { + if (array[i] != null) { + lastIndex--; + } + if (o.equals(array[i])) { + return lastIndex; + } + } + return -1; + } + + @Override + public ListIterator<Cell> listIterator() { + return new ListItr(); + } + + @Override + public ListIterator<Cell> listIterator(int index) { + throwGenericUnsupportedOperationException(); + return null; + } + + @Override + public List<Cell> subList(int fromIndex, int toIndex) { + throwGenericUnsupportedOperationException(); + return null; + } + + @Override + public Iterator<Cell> iterator() { + return new Itr(); + } + + public Cell getCellForColumnQualifier(int columnQualifier) { + checkQualifierRange(columnQualifier); + int idx = getArrayIndex(columnQualifier); + Cell c = array[idx]; + return c; + } + + public Cell getFirstCell() { + if (firstNonNullElementIdx == -1) { + throw new NoSuchElementException("No elements present in the list"); + } + return array[firstNonNullElementIdx]; + } + + private void checkQualifierRange(int qualifier) { + if (qualifier < ENCODED_CQ_COUNTER_INITIAL_VALUE) { + return; // space in the array for reserved range is always allocated. + } + if (qualifier < minQualifier || qualifier > maxQualifier) { + throw new IndexOutOfBoundsException("Qualifier " + qualifier + + " is out of the valid range - (" + minQualifier + ", " + maxQualifier + ")"); + } + } + + private void rangeCheck(int index) { + if (index < 0 || index >= size()) { + throw new IndexOutOfBoundsException(); + } + } + + private int getArrayIndex(int columnQualifier) { + checkArgument(columnQualifier >= ENCODED_EMPTY_COLUMN_NAME); + if (columnQualifier < ENCODED_CQ_COUNTER_INITIAL_VALUE) { + return columnQualifier; + } + return columnQualifier - nonReservedRangeOffset; + } + + private void throwGenericUnsupportedOperationException() { + throw new UnsupportedOperationException( + "Operation cannot be supported because it potentially violates the invariance contract of this list implementation"); + } + + private class Itr implements Iterator<Cell> { + protected int nextIndex = 0; + protected int lastRet = -1; + protected int expectedModCount = modCount; + + private Itr() { + moveForward(true); + } + + @Override + public boolean hasNext() { + return nextIndex != -1; + } + + @Override + public Cell next() { + checkForCoModification(); + if (!hasNext()) { + throw new NoSuchElementException(); + } + Cell next = array[nextIndex]; + lastRet = nextIndex; + moveForward(false); + modCount++; + expectedModCount = modCount; + return next; + } + + @Override + public void remove() { + if (lastRet < 0) { + throw new IllegalStateException(); + } + checkForCoModification(); + array[lastRet] = null; + lastRet = -1; + numNonNullElements--; + modCount++; + expectedModCount = modCount; + } + + protected void moveForward(boolean init) { + int i = init ? 0 : nextIndex + 1; + while (i < array.length && (array[i]) == null) { + i++; + } + if (i < array.length) { + nextIndex = i; + } else { + nextIndex = -1; + } + } + + protected void checkForCoModification() { + if (modCount != expectedModCount) { + throw new ConcurrentModificationException(); + } + } + + } + + private class ListItr extends Itr implements ListIterator<Cell> { + private int previousIndex = -1; + + private ListItr() { + moveForward(true); + } + + @Override + public boolean hasNext() { + return nextIndex != -1; + } + + @Override + public boolean hasPrevious() { + return previousIndex != -1; + } + + @Override + public Cell previous() { + if (previousIndex == -1) { + throw new NoSuchElementException(); + } + checkForCoModification(); + lastRet = previousIndex; + movePointersBackward(); + return array[lastRet]; + } + + @Override + public int nextIndex() { + return nextIndex; + } + + @Override + public int previousIndex() { + return previousIndex; + } + + @Override + public void remove() { + if (lastRet == nextIndex) { + moveNextPointer(nextIndex); + } + super.remove(); + expectedModCount = modCount; + } + + @Override + public void set(Cell e) { + if (lastRet == -1) { + throw new IllegalStateException(); + } + int columnQualifier = getEncodedColumnQualifier(e.getQualifierArray(), e.getQualifierOffset(), e.getQualifierLength()); + int idx = getArrayIndex(columnQualifier); + if (idx != lastRet) { + throw new IllegalArgumentException("Cell " + e + " with column qualifier " + + columnQualifier + " belongs at index " + idx + + ". It cannot be added at the position " + lastRet + + " to which the previous next() or previous() was pointing to."); + } + EncodedColumnQualiferCellsList.this.add(e); + expectedModCount = modCount; + } + + @Override + public void add(Cell e) { + throwGenericUnsupportedOperationException(); + } + + @Override + protected void moveForward(boolean init) { + if (!init) { + previousIndex = nextIndex; + } + int i = init ? 0 : nextIndex + 1; + moveNextPointer(i); + } + + private void moveNextPointer(int i) { + while (i < array.length && (array[i]) == null) { + i++; + } + if (i < array.length) { + nextIndex = i; + } else { + nextIndex = -1; + } + } + + private void movePointersBackward() { + nextIndex = previousIndex; + int i = previousIndex - 1; + movePreviousPointer(i); + } + + private void movePreviousPointer(int i) { + for (; i >= 0; i--) { + if (array[i] != null) { + previousIndex = i; + break; + } + } + if (i < 0) { + previousIndex = -1; + } + } + } + +} http://git-wip-us.apache.org/repos/asf/phoenix/blob/670b53a9/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java index 53f155b..d946870 100644 --- a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/MultiKeyValueTuple.java @@ -36,6 +36,7 @@ public class MultiKeyValueTuple extends BaseTuple { } /** Caller must not modify the list that is passed here */ + @Override public void setKeyValues(List<Cell> values) { this.values = values; } http://git-wip-us.apache.org/repos/asf/phoenix/blob/670b53a9/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedMultiKeyValueTuple.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedMultiKeyValueTuple.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedMultiKeyValueTuple.java new file mode 100644 index 0000000..08cafe0 --- /dev/null +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedMultiKeyValueTuple.java @@ -0,0 +1,87 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.schema.tuple; + +import static com.google.common.base.Preconditions.checkArgument; +import static org.apache.phoenix.util.EncodedColumnsUtil.getEncodedColumnQualifier; + +import java.util.List; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; + +/** + * Tuple that uses the + */ +public class PositionBasedMultiKeyValueTuple extends BaseTuple { + private EncodedColumnQualiferCellsList values; + + public PositionBasedMultiKeyValueTuple() {} + + public PositionBasedMultiKeyValueTuple(List<Cell> values) { + checkArgument(values instanceof EncodedColumnQualiferCellsList, "PositionBasedMultiKeyValueTuple only works with lists of type EncodedColumnQualiferCellsList"); + this.values = (EncodedColumnQualiferCellsList)values; + } + + /** Caller must not modify the list that is passed here */ + @Override + public void setKeyValues(List<Cell> values) { + checkArgument(values instanceof EncodedColumnQualiferCellsList, "PositionBasedMultiKeyValueTuple only works with lists of type EncodedColumnQualiferCellsList"); + this.values = (EncodedColumnQualiferCellsList)values; + } + + @Override + public void getKey(ImmutableBytesWritable ptr) { + Cell value = values.getFirstCell(); + ptr.set(value.getRowArray(), value.getRowOffset(), value.getRowLength()); + } + + @Override + public boolean isImmutable() { + return true; + } + + @Override + public Cell getValue(byte[] family, byte[] qualifier) { + return values.getCellForColumnQualifier(getEncodedColumnQualifier(qualifier)); + } + + @Override + public String toString() { + return values.toString(); + } + + @Override + public int size() { + return values.size(); + } + + @Override + public Cell getValue(int index) { + return values.get(index); + } + + @Override + public boolean getValue(byte[] family, byte[] qualifier, + ImmutableBytesWritable ptr) { + Cell kv = getValue(family, qualifier); + if (kv == null) + return false; + ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + return true; + }} http://git-wip-us.apache.org/repos/asf/phoenix/blob/670b53a9/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedResultTuple.java ---------------------------------------------------------------------- diff --git a/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedResultTuple.java b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedResultTuple.java new file mode 100644 index 0000000..109cfc3 --- /dev/null +++ b/phoenix-core/src/main/java/org/apache/phoenix/schema/tuple/PositionBasedResultTuple.java @@ -0,0 +1,126 @@ +/* + * Licensed to the Apache Software Foundation (ASF) under one + * or more contributor license agreements. See the NOTICE file + * distributed with this work for additional information + * regarding copyright ownership. The ASF licenses this file + * to you under the Apache License, Version 2.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software + * distributed under the License is distributed on an "AS IS" BASIS, + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + * See the License for the specific language governing permissions and + * limitations under the License. + */ +package org.apache.phoenix.schema.tuple; + +import static com.google.common.base.Preconditions.checkArgument; + +import java.util.Iterator; +import java.util.List; + +import org.apache.hadoop.hbase.Cell; +import org.apache.hadoop.hbase.KeyValue; +import org.apache.hadoop.hbase.io.ImmutableBytesWritable; +import org.apache.hadoop.hbase.util.Bytes; +import org.apache.phoenix.util.EncodedColumnsUtil; + +public class PositionBasedResultTuple extends BaseTuple { + private final EncodedColumnQualiferCellsList cells; + + public PositionBasedResultTuple(List<Cell> list) { + checkArgument(list instanceof EncodedColumnQualiferCellsList, "Invalid list type"); + this.cells = (EncodedColumnQualiferCellsList)list; + } + + @Override + public void getKey(ImmutableBytesWritable ptr) { + Cell value = cells.getFirstCell(); + ptr.set(value.getRowArray(), value.getRowOffset(), value.getRowLength()); + } + + @Override + public boolean isImmutable() { + return true; + } + + @Override + public KeyValue getValue(byte[] family, byte[] qualifier) { + int columnQualifier = EncodedColumnsUtil.getEncodedColumnQualifier(qualifier); + return org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(cells.getCellForColumnQualifier(columnQualifier)); + } + + @Override + public String toString() { + StringBuilder sb = new StringBuilder(); + sb.append("keyvalues="); + if(this.cells == null || this.cells.isEmpty()) { + sb.append("NONE"); + return sb.toString(); + } + sb.append("{"); + boolean moreThanOne = false; + for(Cell kv : this.cells) { + if(moreThanOne) { + sb.append(", \n"); + } else { + moreThanOne = true; + } + sb.append(kv.toString()+"/value="+Bytes.toString(kv.getValueArray(), + kv.getValueOffset(), kv.getValueLength())); + } + sb.append("}\n"); + return sb.toString(); + } + + @Override + public int size() { + return cells.size(); + } + + @Override + public KeyValue getValue(int index) { + return org.apache.hadoop.hbase.KeyValueUtil.ensureKeyValue(index == 0 ? cells.getFirstCell() : cells.get(index)); + } + + @Override + public boolean getValue(byte[] family, byte[] qualifier, + ImmutableBytesWritable ptr) { + KeyValue kv = getValue(family, qualifier); + if (kv == null) + return false; + ptr.set(kv.getValueArray(), kv.getValueOffset(), kv.getValueLength()); + return true; + } + + public Iterator<Cell> getTupleIterator() { + return new TupleIterator(cells.iterator()); + } + + private static class TupleIterator implements Iterator<Cell> { + + private final Iterator<Cell> delegate; + private TupleIterator(Iterator<Cell> delegate) { + this.delegate = delegate; + } + + @Override + public boolean hasNext() { + return delegate.hasNext(); + } + + @Override + public Cell next() { + return delegate.next(); + } + + @Override + public void remove() { + delegate.remove(); + } + + } +}