Merge commit '0171259d05a2dd5ec901ccadda016361312b59a9' into cassandra-3.0 * commit '0171259d05a2dd5ec901ccadda016361312b59a9': Fix handling of clustering key > 64K
Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/40ab6312 Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/40ab6312 Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/40ab6312 Branch: refs/heads/cassandra-3.0 Commit: 40ab6312d014524908d064d022cc63f1a3adf149 Parents: d354ddc 0171259 Author: Sylvain Lebresne <sylv...@datastax.com> Authored: Thu Jun 23 10:58:14 2016 +0200 Committer: Sylvain Lebresne <sylv...@datastax.com> Committed: Thu Jun 23 11:03:16 2016 +0200 ---------------------------------------------------------------------- CHANGES.txt | 1 + .../cql3/statements/ModificationStatement.java | 10 ++++++ .../cassandra/net/OutboundTcpConnection.java | 3 +- .../apache/cassandra/utils/ByteBufferUtil.java | 6 ++-- .../org/apache/cassandra/cql3/CQLTester.java | 2 ++ .../cql3/validation/operations/InsertTest.java | 37 +++++++++++++++----- .../cql3/validation/operations/SelectTest.java | 21 +++++++++-- 7 files changed, 66 insertions(+), 14 deletions(-) ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/40ab6312/CHANGES.txt ---------------------------------------------------------------------- diff --cc CHANGES.txt index 26f24b0,59a9794..d57fb7d --- a/CHANGES.txt +++ b/CHANGES.txt @@@ -1,37 -1,9 +1,38 @@@ -2.2.7 +3.0.8 + * Fix upgrading schema with super columns with non-text subcomparators (CASSANDRA-12023) + * Add TimeWindowCompactionStrategy (CASSANDRA-9666) +Merged from 2.2: * Validate bloom_filter_fp_chance against lowest supported value when the table is created (CASSANDRA-11920) - * RandomAccessReader: call isEOF() only when rebuffering, not for every read operation (CASSANDRA-12013) * Don't send erroneous NEW_NODE notifications on restart (CASSANDRA-11038) * StorageService shutdown hook should use a volatile variable (CASSANDRA-11984) +Merged from 2.1: ++ * Prevent select statements with clustering key > 64k (CASSANDRA-11882) + * Fix clock skew corrupting other nodes with paxos (CASSANDRA-11991) + * Remove distinction between non-existing static columns and existing but null in LWTs (CASSANDRA-9842) + * Cache local ranges when calculating repair neighbors (CASSANDRA-11934) + * Allow LWT operation on static column with only partition keys (CASSANDRA-10532) + * Create interval tree over canonical sstables to avoid missing sstables during streaming (CASSANDRA-11886) + * cqlsh COPY FROM: shutdown parent cluster after forking, to avoid corrupting SSL connections (CASSANDRA-11749) + + +3.0.7 + * Fix legacy serialization of Thrift-generated non-compound range tombstones + when communicating with 2.x nodes (CASSANDRA-11930) + * Fix Directories instantiations where CFS.initialDirectories should be used (CASSANDRA-11849) + * Avoid referencing DatabaseDescriptor in AbstractType (CASSANDRA-11912) + * Fix sstables not being protected from removal during index build (CASSANDRA-11905) + * cqlsh: Suppress stack trace from Read/WriteFailures (CASSANDRA-11032) + * Remove unneeded code to repair index summaries that have + been improperly down-sampled (CASSANDRA-11127) + * Avoid WriteTimeoutExceptions during commit log replay due to materialized + view lock contention (CASSANDRA-11891) + * Prevent OOM failures on SSTable corruption, improve tests for corruption detection (CASSANDRA-9530) + * Use CFS.initialDirectories when clearing snapshots (CASSANDRA-11705) + * Allow compaction strategies to disable early open (CASSANDRA-11754) + * Refactor Materialized View code (CASSANDRA-11475) + * Update Java Driver (CASSANDRA-11615) +Merged from 2.2: * Persist local metadata earlier in startup sequence (CASSANDRA-11742) * Run CommitLog tests with different compression settings (CASSANDRA-9039) * cqlsh: fix tab completion for case-sensitive identifiers (CASSANDRA-11664) http://git-wip-us.apache.org/repos/asf/cassandra/blob/40ab6312/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java ---------------------------------------------------------------------- diff --cc src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java index 12bade5,5ffcc8a..01c2ad1 --- a/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java +++ b/src/java/org/apache/cassandra/cql3/statements/ModificationStatement.java @@@ -606,137 -726,49 +606,147 @@@ public abstract class ModificationState * @param now the current timestamp in microseconds to use if no timestamp is user provided. * * @return list of the mutations - * @throws InvalidRequestException on invalid requests */ private Collection<? extends IMutation> getMutations(QueryOptions options, boolean local, long now) - throws RequestExecutionException, RequestValidationException + { + UpdatesCollector collector = new UpdatesCollector(Collections.singletonMap(cfm.cfId, updatedColumns), 1); + addUpdates(collector, options, local, now); + collector.validateIndexedColumns(); + + return collector.toMutations(); + } + + final void addUpdates(UpdatesCollector collector, + QueryOptions options, + boolean local, + long now) { List<ByteBuffer> keys = buildPartitionKeyNames(options); - Composite clusteringPrefix = createClusteringPrefix(options); - UpdateParameters params = makeUpdateParameters(keys, clusteringPrefix, options, local, now); + if (type.allowClusteringColumnSlices() + && restrictions.hasClusteringColumnsRestriction() + && restrictions.isColumnRange()) + { + Slices slices = createSlice(options); + + // If all the ranges were invalid we do not need to do anything. + if (slices.isEmpty()) + return; + + UpdateParameters params = makeUpdateParameters(keys, + new ClusteringIndexSliceFilter(slices, false), + options, + DataLimits.NONE, + local, + now); + for (ByteBuffer key : keys) + { + ThriftValidation.validateKey(cfm, key); + DecoratedKey dk = cfm.decorateKey(key); - Collection<IMutation> mutations = new ArrayList<IMutation>(keys.size()); - for (ByteBuffer key: keys) + PartitionUpdate upd = collector.getPartitionUpdate(cfm, dk, options.getConsistency()); + + for (Slice slice : slices) + addUpdateForKey(upd, slice, params); + } + } + else { - ThriftValidation.validateKey(cfm, key); - ColumnFamily cf = ArrayBackedSortedColumns.factory.create(cfm); - addUpdateForKey(cf, key, clusteringPrefix, params); - Mutation mut = new Mutation(cfm.ksName, key, cf); + NavigableSet<Clustering> clusterings = createClustering(options); + + UpdateParameters params = makeUpdateParameters(keys, clusterings, options, local, now); - mutations.add(isCounter() ? new CounterMutation(mut, options.getConsistency()) : mut); + for (ByteBuffer key : keys) + { + ThriftValidation.validateKey(cfm, key); + DecoratedKey dk = cfm.decorateKey(key); + + PartitionUpdate upd = collector.getPartitionUpdate(cfm, dk, options.getConsistency()); + + if (clusterings.isEmpty()) + { + addUpdateForKey(upd, Clustering.EMPTY, params); + } + else + { + for (Clustering clustering : clusterings) ++ { ++ for (ByteBuffer c : clustering.getRawValues()) ++ { ++ if (c != null && c.remaining() > FBUtilities.MAX_UNSIGNED_SHORT) ++ throw new InvalidRequestException(String.format("Key length of %d is longer than maximum of %d", ++ clustering.dataSize(), ++ FBUtilities.MAX_UNSIGNED_SHORT)); ++ } ++ + addUpdateForKey(upd, clustering, params); ++ } + } + } } - return mutations; } - public UpdateParameters makeUpdateParameters(Collection<ByteBuffer> keys, - Composite prefix, - QueryOptions options, - boolean local, - long now) - throws RequestExecutionException, RequestValidationException + private Slices createSlice(QueryOptions options) + { + SortedSet<Slice.Bound> startBounds = restrictions.getClusteringColumnsBounds(Bound.START, options); + SortedSet<Slice.Bound> endBounds = restrictions.getClusteringColumnsBounds(Bound.END, options); + + return toSlices(startBounds, endBounds); + } + + private UpdateParameters makeUpdateParameters(Collection<ByteBuffer> keys, + NavigableSet<Clustering> clusterings, + QueryOptions options, + boolean local, + long now) + { + if (clusterings.contains(Clustering.STATIC_CLUSTERING)) + return makeUpdateParameters(keys, + new ClusteringIndexSliceFilter(Slices.ALL, false), + options, + DataLimits.cqlLimits(1), + local, + now); + + return makeUpdateParameters(keys, + new ClusteringIndexNamesFilter(clusterings, false), + options, + DataLimits.NONE, + local, + now); + } + + private UpdateParameters makeUpdateParameters(Collection<ByteBuffer> keys, + ClusteringIndexFilter filter, + QueryOptions options, + DataLimits limits, + boolean local, + long now) { // Some lists operation requires reading - Map<ByteBuffer, CQL3Row> rows = readRequiredRows(keys, prefix, local, options.getConsistency()); - return new UpdateParameters(cfm, options, getTimestamp(now, options), getTimeToLive(options), rows); + Map<DecoratedKey, Partition> lists = readRequiredLists(keys, filter, limits, local, options.getConsistency()); + return new UpdateParameters(cfm, updatedColumns(), options, getTimestamp(now, options), getTimeToLive(options), lists); } - /** - * If there are conditions on the statement, this is called after the where clause and conditions have been - * processed to check that they are compatible. - * @throws InvalidRequestException - */ - protected void validateWhereClauseForConditions() throws InvalidRequestException + private Slices toSlices(SortedSet<Slice.Bound> startBounds, SortedSet<Slice.Bound> endBounds) { - // no-op by default + assert startBounds.size() == endBounds.size(); + + Slices.Builder builder = new Slices.Builder(cfm.comparator); + + Iterator<Slice.Bound> starts = startBounds.iterator(); + Iterator<Slice.Bound> ends = endBounds.iterator(); + + while (starts.hasNext()) + { + Slice slice = Slice.make(starts.next(), ends.next()); + if (!slice.isEmpty(cfm.comparator)) + { + builder.add(slice); + } + } + + return builder.build(); } public static abstract class Parsed extends CFStatement http://git-wip-us.apache.org/repos/asf/cassandra/blob/40ab6312/src/java/org/apache/cassandra/net/OutboundTcpConnection.java ---------------------------------------------------------------------- http://git-wip-us.apache.org/repos/asf/cassandra/blob/40ab6312/src/java/org/apache/cassandra/utils/ByteBufferUtil.java ---------------------------------------------------------------------- diff --cc src/java/org/apache/cassandra/utils/ByteBufferUtil.java index 27f46b6,6c676e0..c1b0721 --- a/src/java/org/apache/cassandra/utils/ByteBufferUtil.java +++ b/src/java/org/apache/cassandra/utils/ByteBufferUtil.java @@@ -306,7 -298,8 +306,8 @@@ public class ByteBufferUti public static void writeWithShortLength(ByteBuffer buffer, DataOutputPlus out) throws IOException { int length = buffer.remaining(); - assert 0 <= length && length <= FBUtilities.MAX_UNSIGNED_SHORT : length; - assert 0 <= length && length <= FBUtilities.MAX_UNSIGNED_SHORT : - String.format("Attempted serializing to buffer exceeded maximum of %s bytes: %s", FBUtilities.MAX_UNSIGNED_SHORT, length); ++ assert 0 <= length && length <= FBUtilities.MAX_UNSIGNED_SHORT ++ : String.format("Attempted serializing to buffer exceeded maximum of %s bytes: %s", FBUtilities.MAX_UNSIGNED_SHORT, length); out.writeShort(length); out.write(buffer); } @@@ -314,7 -307,8 +315,8 @@@ public static void writeWithShortLength(byte[] buffer, DataOutput out) throws IOException { int length = buffer.length; - assert 0 <= length && length <= FBUtilities.MAX_UNSIGNED_SHORT : length; - assert 0 <= length && length <= FBUtilities.MAX_UNSIGNED_SHORT : - String.format("Attempted serializing to buffer exceeded maximum of %s bytes: %s", FBUtilities.MAX_UNSIGNED_SHORT, length); ++ assert 0 <= length && length <= FBUtilities.MAX_UNSIGNED_SHORT ++ : String.format("Attempted serializing to buffer exceeded maximum of %s bytes: %s", FBUtilities.MAX_UNSIGNED_SHORT, length); out.writeShort(length); out.write(buffer); } http://git-wip-us.apache.org/repos/asf/cassandra/blob/40ab6312/test/unit/org/apache/cassandra/cql3/CQLTester.java ---------------------------------------------------------------------- diff --cc test/unit/org/apache/cassandra/cql3/CQLTester.java index a7145fc,98b8e23..fe03db4 --- a/test/unit/org/apache/cassandra/cql3/CQLTester.java +++ b/test/unit/org/apache/cassandra/cql3/CQLTester.java @@@ -64,8 -68,6 +64,9 @@@ import org.apache.cassandra.transport.E import org.apache.cassandra.transport.Server; import org.apache.cassandra.transport.messages.ResultMessage; import org.apache.cassandra.utils.ByteBufferUtil; ++import org.apache.cassandra.utils.FBUtilities; + +import static junit.framework.Assert.assertNotNull; /** * Base class for CQL tests. @@@ -77,9 -79,9 +78,10 @@@ public abstract class CQLTeste public static final String KEYSPACE = "cql_test_keyspace"; public static final String KEYSPACE_PER_TEST = "cql_test_keyspace_alt"; protected static final boolean USE_PREPARED_VALUES = Boolean.valueOf(System.getProperty("cassandra.test.use_prepared", "true")); + protected static final boolean REUSE_PREPARED = Boolean.valueOf(System.getProperty("cassandra.test.reuse_prepared", "true")); protected static final long ROW_CACHE_SIZE_IN_MB = Integer.valueOf(System.getProperty("cassandra.test.row_cache_size_in_mb", "0")); private static final AtomicInteger seqNumber = new AtomicInteger(); - protected static final ByteBuffer TOO_BIG = ByteBuffer.allocate(1024 * 65); ++ protected static final ByteBuffer TOO_BIG = ByteBuffer.allocate(FBUtilities.MAX_UNSIGNED_SHORT + 1024); private static org.apache.cassandra.transport.Server server; protected static final int nativePort; http://git-wip-us.apache.org/repos/asf/cassandra/blob/40ab6312/test/unit/org/apache/cassandra/cql3/validation/operations/InsertTest.java ---------------------------------------------------------------------- diff --cc test/unit/org/apache/cassandra/cql3/validation/operations/InsertTest.java index 3c49989,1d532cb..a030613 --- a/test/unit/org/apache/cassandra/cql3/validation/operations/InsertTest.java +++ b/test/unit/org/apache/cassandra/cql3/validation/operations/InsertTest.java @@@ -58,235 -59,20 +59,253 @@@ public class InsertTest extends CQLTest } @Test - public void testOverlyLargeInsertPK() throws Throwable + public void testInsert() throws Throwable { - createTable("CREATE TABLE %s (a text, b text, PRIMARY KEY ((a), b))"); + testInsert(false); + testInsert(true); + } + + private void testInsert(boolean forceFlush) throws Throwable + { + createTable("CREATE TABLE %s (partitionKey int," + + "clustering int," + + "value int," + + " PRIMARY KEY (partitionKey, clustering))"); + + execute("INSERT INTO %s (partitionKey, clustering) VALUES (0, 0)"); + execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 1, 1)"); + flush(forceFlush); + + assertRows(execute("SELECT * FROM %s"), + row(0, 0, null), + row(0, 1, 1)); + + // Missing primary key columns + assertInvalidMessage("Some partition key parts are missing: partitionkey", + "INSERT INTO %s (clustering, value) VALUES (0, 1)"); + assertInvalidMessage("Some clustering keys are missing: clustering", + "INSERT INTO %s (partitionKey, value) VALUES (0, 2)"); + + // multiple time the same value + assertInvalidMessage("The column names contains duplicates", + "INSERT INTO %s (partitionKey, clustering, value, value) VALUES (0, 0, 2, 2)"); + + // multiple time same primary key element in WHERE clause + assertInvalidMessage("The column names contains duplicates", + "INSERT INTO %s (partitionKey, clustering, clustering, value) VALUES (0, 0, 0, 2)"); + + // unknown identifiers + assertInvalidMessage("Unknown identifier clusteringx", + "INSERT INTO %s (partitionKey, clusteringx, value) VALUES (0, 0, 2)"); + + assertInvalidMessage("Unknown identifier valuex", + "INSERT INTO %s (partitionKey, clustering, valuex) VALUES (0, 0, 2)"); + } + + @Test + public void testInsertWithCompactFormat() throws Throwable + { + testInsertWithCompactFormat(false); + testInsertWithCompactFormat(true); + } + + private void testInsertWithCompactFormat(boolean forceFlush) throws Throwable + { + createTable("CREATE TABLE %s (partitionKey int," + + "clustering int," + + "value int," + + " PRIMARY KEY (partitionKey, clustering)) WITH COMPACT STORAGE"); + + execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 0, 0)"); + execute("INSERT INTO %s (partitionKey, clustering, value) VALUES (0, 1, 1)"); + flush(forceFlush); + + assertRows(execute("SELECT * FROM %s"), + row(0, 0, 0), + row(0, 1, 1)); + + // Invalid Null values for the clustering key or the regular column + assertInvalidMessage("Some clustering keys are missing: clustering", + "INSERT INTO %s (partitionKey, value) VALUES (0, 0)"); + assertInvalidMessage("Column value is mandatory for this COMPACT STORAGE table", + "INSERT INTO %s (partitionKey, clustering) VALUES (0, 0)"); + + // Missing primary key columns + assertInvalidMessage("Some partition key parts are missing: partitionkey", + "INSERT INTO %s (clustering, value) VALUES (0, 1)"); + + // multiple time the same value + assertInvalidMessage("The column names contains duplicates", + "INSERT INTO %s (partitionKey, clustering, value, value) VALUES (0, 0, 2, 2)"); + + // multiple time same primary key element in WHERE clause + assertInvalidMessage("The column names contains duplicates", + "INSERT INTO %s (partitionKey, clustering, clustering, value) VALUES (0, 0, 0, 2)"); + + // unknown identifiers + assertInvalidMessage("Unknown identifier clusteringx", + "INSERT INTO %s (partitionKey, clusteringx, value) VALUES (0, 0, 2)"); + + assertInvalidMessage("Unknown identifier valuex", + "INSERT INTO %s (partitionKey, clustering, valuex) VALUES (0, 0, 2)"); + } + + @Test + public void testInsertWithTwoClusteringColumns() throws Throwable + { + testInsertWithTwoClusteringColumns(false); + testInsertWithTwoClusteringColumns(true); + } + + private void testInsertWithTwoClusteringColumns(boolean forceFlush) throws Throwable + { + createTable("CREATE TABLE %s (partitionKey int," + - "clustering_1 int," + - "clustering_2 int," + - "value int," + - " PRIMARY KEY (partitionKey, clustering_1, clustering_2))"); ++ "clustering_1 int," + ++ "clustering_2 int," + ++ "value int," + ++ " PRIMARY KEY (partitionKey, clustering_1, clustering_2))"); + + execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2) VALUES (0, 0, 0)"); + execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 1, 1)"); + flush(forceFlush); + + assertRows(execute("SELECT * FROM %s"), + row(0, 0, 0, null), + row(0, 0, 1, 1)); + + // Missing primary key columns + assertInvalidMessage("Some partition key parts are missing: partitionkey", + "INSERT INTO %s (clustering_1, clustering_2, value) VALUES (0, 0, 1)"); + assertInvalidMessage("Some clustering keys are missing: clustering_1", + "INSERT INTO %s (partitionKey, clustering_2, value) VALUES (0, 0, 2)"); + + // multiple time the same value + assertInvalidMessage("The column names contains duplicates", + "INSERT INTO %s (partitionKey, clustering_1, value, clustering_2, value) VALUES (0, 0, 2, 0, 2)"); + + // multiple time same primary key element in WHERE clause + assertInvalidMessage("The column names contains duplicates", + "INSERT INTO %s (partitionKey, clustering_1, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0, 2)"); + + // unknown identifiers + assertInvalidMessage("Unknown identifier clustering_1x", + "INSERT INTO %s (partitionKey, clustering_1x, clustering_2, value) VALUES (0, 0, 0, 2)"); + + assertInvalidMessage("Unknown identifier valuex", + "INSERT INTO %s (partitionKey, clustering_1, clustering_2, valuex) VALUES (0, 0, 0, 2)"); + } + + @Test + public void testInsertWithCompactStorageAndTwoClusteringColumns() throws Throwable + { + testInsertWithCompactStorageAndTwoClusteringColumns(false); + testInsertWithCompactStorageAndTwoClusteringColumns(true); + } + + private void testInsertWithCompactStorageAndTwoClusteringColumns(boolean forceFlush) throws Throwable + { + createTable("CREATE TABLE %s (partitionKey int," + + "clustering_1 int," + + "clustering_2 int," + + "value int," + + " PRIMARY KEY (partitionKey, clustering_1, clustering_2)) WITH COMPACT STORAGE"); + + execute("INSERT INTO %s (partitionKey, clustering_1, value) VALUES (0, 0, 0)"); + execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0)"); + execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (0, 0, 1, 1)"); + flush(forceFlush); + + assertRows(execute("SELECT * FROM %s"), + row(0, 0, null, 0), + row(0, 0, 0, 0), + row(0, 0, 1, 1)); + + // Invalid Null values for the clustering key or the regular column + assertInvalidMessage("PRIMARY KEY column \"clustering_2\" cannot be restricted as preceding column \"clustering_1\" is not restricted", + "INSERT INTO %s (partitionKey, clustering_2, value) VALUES (0, 0, 0)"); + assertInvalidMessage("Column value is mandatory for this COMPACT STORAGE table", + "INSERT INTO %s (partitionKey, clustering_1, clustering_2) VALUES (0, 0, 0)"); + + // Missing primary key columns + assertInvalidMessage("Some partition key parts are missing: partitionkey", + "INSERT INTO %s (clustering_1, clustering_2, value) VALUES (0, 0, 1)"); + assertInvalidMessage("PRIMARY KEY column \"clustering_2\" cannot be restricted as preceding column \"clustering_1\" is not restricted", + "INSERT INTO %s (partitionKey, clustering_2, value) VALUES (0, 0, 2)"); + + // multiple time the same value + assertInvalidMessage("The column names contains duplicates", + "INSERT INTO %s (partitionKey, clustering_1, value, clustering_2, value) VALUES (0, 0, 2, 0, 2)"); + + // multiple time same primary key element in WHERE clause + assertInvalidMessage("The column names contains duplicates", + "INSERT INTO %s (partitionKey, clustering_1, clustering_1, clustering_2, value) VALUES (0, 0, 0, 0, 2)"); + + // unknown identifiers + assertInvalidMessage("Unknown identifier clustering_1x", + "INSERT INTO %s (partitionKey, clustering_1x, clustering_2, value) VALUES (0, 0, 0, 2)"); + + assertInvalidMessage("Unknown identifier valuex", + "INSERT INTO %s (partitionKey, clustering_1, clustering_2, valuex) VALUES (0, 0, 0, 2)"); + } + + @Test + public void testInsertWithAStaticColumn() throws Throwable + { + testInsertWithAStaticColumn(false); + testInsertWithAStaticColumn(true); + } + + private void testInsertWithAStaticColumn(boolean forceFlush) throws Throwable + { + createTable("CREATE TABLE %s (partitionKey int," + - "clustering_1 int," + - "clustering_2 int," + - "value int," + - "staticValue text static," + - " PRIMARY KEY (partitionKey, clustering_1, clustering_2))"); ++ "clustering_1 int," + ++ "clustering_2 int," + ++ "value int," + ++ "staticValue text static," + ++ " PRIMARY KEY (partitionKey, clustering_1, clustering_2))"); + + execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, staticValue) VALUES (0, 0, 0, 'A')"); + execute("INSERT INTO %s (partitionKey, staticValue) VALUES (1, 'B')"); + flush(forceFlush); + + assertRows(execute("SELECT * FROM %s"), + row(1, null, null, "B", null), + row(0, 0, 0, "A", null)); + + execute("INSERT INTO %s (partitionKey, clustering_1, clustering_2, value) VALUES (1, 0, 0, 0)"); + flush(forceFlush); + assertRows(execute("SELECT * FROM %s"), + row(1, 0, 0, "B", 0), + row(0, 0, 0, "A", null)); + + // Missing primary key columns + assertInvalidMessage("Some partition key parts are missing: partitionkey", + "INSERT INTO %s (clustering_1, clustering_2, staticValue) VALUES (0, 0, 'A')"); + assertInvalidMessage("Some clustering keys are missing: clustering_1", + "INSERT INTO %s (partitionKey, clustering_2, staticValue) VALUES (0, 0, 'A')"); + } + + private void flush(boolean forceFlush) + { + if (forceFlush) + flush(); + } ++ ++ @Test ++ public void testPKInsertWithValueOver64K() throws Throwable ++ { ++ createTable("CREATE TABLE %s (a text, b text, PRIMARY KEY (a, b))"); + + assertInvalidThrow(InvalidRequestException.class, + "INSERT INTO %s (a, b) VALUES (?, 'foo')", new String(TOO_BIG.array())); + } + + @Test - public void testOverlyLargeInsertCK() throws Throwable ++ public void testCKInsertWithValueOver64K() throws Throwable + { - createTable("CREATE TABLE %s (a text, b text, PRIMARY KEY ((a), b))"); ++ createTable("CREATE TABLE %s (a text, b text, PRIMARY KEY (a, b))"); + + assertInvalidThrow(InvalidRequestException.class, + "INSERT INTO %s (a, b) VALUES ('foo', ?)", new String(TOO_BIG.array())); + } } http://git-wip-us.apache.org/repos/asf/cassandra/blob/40ab6312/test/unit/org/apache/cassandra/cql3/validation/operations/SelectTest.java ---------------------------------------------------------------------- diff --cc test/unit/org/apache/cassandra/cql3/validation/operations/SelectTest.java index b49bd87,9b10d0e..65bfb32 --- a/test/unit/org/apache/cassandra/cql3/validation/operations/SelectTest.java +++ b/test/unit/org/apache/cassandra/cql3/validation/operations/SelectTest.java @@@ -2250,12 -2032,9 +2250,29 @@@ public class SelectTest extends CQLTest assertInvalidMessage("Index expression values may not be larger than 64K", "SELECT * FROM %s WHERE c = ? ALLOW FILTERING", TOO_BIG); + + dropIndex("DROP INDEX %s.test"); + assertEmpty(execute("SELECT * FROM %s WHERE c = ? ALLOW FILTERING", TOO_BIG)); + } + + @Test ++ public void testPKQueryWithValueOver64K() throws Throwable ++ { ++ createTable("CREATE TABLE %s (a text, b text, PRIMARY KEY (a, b))"); ++ ++ assertInvalidThrow(InvalidRequestException.class, ++ "SELECT * FROM %s WHERE a = ?", new String(TOO_BIG.array())); ++ } ++ ++ @Test ++ public void testCKQueryWithValueOver64K() throws Throwable ++ { ++ createTable("CREATE TABLE %s (a text, b text, PRIMARY KEY (a, b))"); ++ ++ execute("SELECT * FROM %s WHERE a = 'foo' AND b = ?", new String(TOO_BIG.array())); + } + + @Test public void testFilteringOnCompactTablesWithoutIndicesAndWithMaps() throws Throwable { //----------------------------------------------