This is an automated email from the ASF dual-hosted git repository.

adelapena pushed a commit to branch cassandra-3.11
in repository https://gitbox.apache.org/repos/asf/cassandra.git

commit 377ceb675c9c64afcfb3decbd4659f02b6f584a5
Merge: d833df8 6cd2d07
Author: Andrés de la Peña <a.penya.gar...@gmail.com>
AuthorDate: Mon Apr 27 16:21:45 2020 +0100

    Merge branch 'cassandra-3.0' into cassandra-3.11
    
    # Conflicts:
    #   src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
    #   src/java/org/apache/cassandra/service/pager/MultiPartitionPager.java
    #   
test/unit/org/apache/cassandra/cql3/validation/operations/SelectTest.java

 CHANGES.txt                                        |   3 +-
 .../cql3/restrictions/StatementRestrictions.java   |   9 +-
 .../index/internal/CassandraIndexSearcher.java     |   7 +-
 .../internal/composites/CompositesSearcher.java    |   2 +-
 .../service/pager/AbstractQueryPager.java          |  28 ++-
 .../service/pager/PartitionRangeQueryPager.java    |   5 +
 .../cassandra/cql3/DistinctQueryPagingTest.java    | 262 +++++++++++++++++++++
 .../cassandra/cql3/IndexQueryPagingTest.java       |  54 +++++
 .../validation/entities/StaticColumnsTest.java     |   5 +-
 .../validation/operations/SelectLimitTest.java     | 156 ++++++++++++
 .../cql3/validation/operations/SelectTest.java     | 161 +------------
 .../index/internal/CassandraIndexTest.java         |  26 +-
 12 files changed, 536 insertions(+), 182 deletions(-)

diff --cc CHANGES.txt
index cd69117,efe35a7..fd7ad6a
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@@ -1,8 -1,5 +1,7 @@@
 -3.0.21
 - * Fix infinite loop on index query paging in tables with clustering 
(CASSANDRA-14242)
 +3.11.7
 + * Allow sstableloader to use SSL on the native port (CASSANDRA-14904)
 +Merged from 3.0:
- =======
- 3.0.21
++ * Allow selecting static column only when querying static index 
(CASSANDRA-14242)
   * cqlsh return non-zero status when STDIN CQL fails (CASSANDRA-15623)
   * Don't skip sstables in slice queries based only on local min/max/deletion 
timestamp (CASSANDRA-15690)
   * Memtable memory allocations may deadlock (CASSANDRA-15367)
diff --cc 
src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
index 3b7504f,84c6958..7636ecc
--- a/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
+++ b/src/java/org/apache/cassandra/cql3/restrictions/StatementRestrictions.java
@@@ -268,7 -252,7 +268,7 @@@ public final class StatementRestriction
          }
  
          if (usesSecondaryIndexing)
--            validateSecondaryIndexSelections(selectsOnlyStaticColumns);
++            validateSecondaryIndexSelections();
      }
  
      private void addRestriction(Restriction restriction)
@@@ -814,15 -832,15 +814,10 @@@
                          && nonPrimaryKeyRestrictions.hasMultipleContains());
      }
  
--    private void validateSecondaryIndexSelections(boolean 
selectsOnlyStaticColumns)
++    private void validateSecondaryIndexSelections()
      {
          checkFalse(keyIsInRelation(),
                     "Select on indexed columns and with IN clause for the 
PRIMARY KEY are not supported");
--        // When the user only select static columns, the intent is that we 
don't query the whole partition but just
--        // the static parts. But 1) we don't have an easy way to do that with 
2i and 2) since we don't support index on
--        // static columns
--        // so far, 2i means that you've restricted a non static column, so 
the query is somewhat non-sensical.
--        checkFalse(selectsOnlyStaticColumns, "Queries using 2ndary indexes 
don't support selecting only static columns");
      }
  
      /**
diff --cc 
src/java/org/apache/cassandra/index/internal/CassandraIndexSearcher.java
index 7b622e3,d6b39e6..7c23345
--- a/src/java/org/apache/cassandra/index/internal/CassandraIndexSearcher.java
+++ b/src/java/org/apache/cassandra/index/internal/CassandraIndexSearcher.java
@@@ -132,14 -132,14 +132,15 @@@ public abstract class CassandraIndexSea
                      DecoratedKey startKey = (DecoratedKey) range.left;
                      DecoratedKey endKey = (DecoratedKey) range.right;
  
 -                    Slice.Bound start = Slice.Bound.BOTTOM;
 -                    Slice.Bound end = Slice.Bound.TOP;
 +                    ClusteringBound start = ClusteringBound.BOTTOM;
 +                    ClusteringBound end = ClusteringBound.TOP;
  
                      /*
--                     * For index queries over a range, we can't do a whole 
lot better than querying everything for the key range, though for
--                     * slice queries where we can slightly restrict the 
beginning and end.
++                     * For index queries over a range, we can't do a whole 
lot better than querying everything for the
++                     * key range, though for slice queries where we can 
slightly restrict the beginning and end. We can
++                     * not do this optimisation for static column indexes.
                       */
--                    if (!dataRange.isNamesQuery())
++                    if (!dataRange.isNamesQuery() && 
!index.indexedColumn.isStatic())
                      {
                          ClusteringIndexSliceFilter startSliceFilter = 
((ClusteringIndexSliceFilter) dataRange.clusteringIndexFilter(
                                                                                
                                                     startKey));
diff --cc 
src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
index 2007800,f8a7c66..65caec3
--- 
a/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
+++ 
b/src/java/org/apache/cassandra/index/internal/composites/CompositesSearcher.java
@@@ -109,67 -108,43 +109,67 @@@ public class CompositesSearcher extend
                          nextEntry = index.decodeEntry(indexKey, 
indexHits.next());
                      }
  
 -                    // Gather all index hits belonging to the same partition 
and query the data for those hits.
 -                    // TODO: it's much more efficient to do 1 read for all 
hits to the same partition than doing
 -                    // 1 read per index hit. However, this basically mean 
materializing all hits for a partition
 -                    // in memory so we should consider adding some paging 
mechanism. However, index hits should
 -                    // be relatively small so it's much better than the 
previous code that was materializing all
 -                    // *data* for a given partition.
 -                    BTreeSet.Builder<Clustering> clusterings = 
BTreeSet.builder(index.baseCfs.getComparator());
 -                    List<IndexEntry> entries = new ArrayList<>();
 +                    SinglePartitionReadCommand dataCmd;
                      DecoratedKey partitionKey = 
index.baseCfs.decorateKey(nextEntry.indexedKey);
 -
 -                    while (nextEntry != null && 
partitionKey.getKey().equals(nextEntry.indexedKey))
 +                    List<IndexEntry> entries = new ArrayList<>();
 +                    if (isStaticColumn())
                      {
 -                        // We're queried a slice of the index, but some hits 
may not match some of the clustering column constraints
 -                        if (isMatchingEntry(partitionKey, nextEntry, command))
 -                        {
 -                            clusterings.add(nextEntry.indexedEntryClustering);
 -                            entries.add(nextEntry);
 +                        // The index hit may not match the commad key 
constraint
 +                        if (!isMatchingEntry(partitionKey, nextEntry, 
command)) {
 +                            nextEntry = indexHits.hasNext() ? 
index.decodeEntry(indexKey, indexHits.next()) : null;
 +                            continue;
                          }
  
 +                        // If the index is on a static column, we just need 
to do a full read on the partition.
 +                        // Note that we want to re-use the 
command.columnFilter() in case of future change.
 +                        dataCmd = 
SinglePartitionReadCommand.create(index.baseCfs.metadata,
 +                                                                    
command.nowInSec(),
 +                                                                    
command.columnFilter(),
 +                                                                    
RowFilter.NONE,
 +                                                                    
DataLimits.NONE,
 +                                                                    
partitionKey,
-                                                                     new 
ClusteringIndexSliceFilter(Slices.ALL, false));
++                                                                    
command.clusteringIndexFilter(partitionKey));
 +                        entries.add(nextEntry);
                          nextEntry = indexHits.hasNext() ? 
index.decodeEntry(indexKey, indexHits.next()) : null;
                      }
 +                    else
 +                    {
 +                        // Gather all index hits belonging to the same 
partition and query the data for those hits.
 +                        // TODO: it's much more efficient to do 1 read for 
all hits to the same partition than doing
 +                        // 1 read per index hit. However, this basically mean 
materializing all hits for a partition
 +                        // in memory so we should consider adding some paging 
mechanism. However, index hits should
 +                        // be relatively small so it's much better than the 
previous code that was materializing all
 +                        // *data* for a given partition.
 +                        BTreeSet.Builder<Clustering> clusterings = 
BTreeSet.builder(index.baseCfs.getComparator());
 +                        while (nextEntry != null && 
partitionKey.getKey().equals(nextEntry.indexedKey))
 +                        {
 +                            // We're queried a slice of the index, but some 
hits may not match some of the clustering column constraints
 +                            if (isMatchingEntry(partitionKey, nextEntry, 
command))
 +                            {
 +                                
clusterings.add(nextEntry.indexedEntryClustering);
 +                                entries.add(nextEntry);
 +                            }
 +
 +                            nextEntry = indexHits.hasNext() ? 
index.decodeEntry(indexKey, indexHits.next()) : null;
 +                        }
  
 -                    // Because we've eliminated entries that don't match the 
clustering columns, it's possible we added nothing
 -                    if (clusterings.isEmpty())
 -                        continue;
 +                        // Because we've eliminated entries that don't match 
the clustering columns, it's possible we added nothing
 +                        if (clusterings.isEmpty())
 +                            continue;
 +
 +                        // Query the gathered index hits. We still need to 
filter stale hits from the resulting query.
 +                        ClusteringIndexNamesFilter filter = new 
ClusteringIndexNamesFilter(clusterings.build(), false);
 +                        dataCmd = 
SinglePartitionReadCommand.create(isForThrift(),
 +                                                                    
index.baseCfs.metadata,
 +                                                                    
command.nowInSec(),
 +                                                                    
command.columnFilter(),
 +                                                                    
command.rowFilter(),
 +                                                                    
DataLimits.NONE,
 +                                                                    
partitionKey,
 +                                                                    filter,
 +                                                                    null);
 +                    }
  
 -                    // Query the gathered index hits. We still need to filter 
stale hits from the resulting query.
 -                    ClusteringIndexNamesFilter filter = new 
ClusteringIndexNamesFilter(clusterings.build(), false);
 -                    SinglePartitionReadCommand dataCmd = 
SinglePartitionReadCommand.create(isForThrift(),
 -                                                                              
             index.baseCfs.metadata,
 -                                                                              
             command.nowInSec(),
 -                                                                              
             command.columnFilter(),
 -                                                                              
             command.rowFilter(),
 -                                                                              
             DataLimits.NONE,
 -                                                                              
             partitionKey,
 -                                                                              
             filter,
 -                                                                              
             null);
                      @SuppressWarnings("resource") // We close right away if 
empty, and if it's assign to next it will be called either
                      // by the next caller of next, or through closing this 
iterator is this come before.
                      UnfilteredRowIterator dataIter =
diff --cc src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
index f5134fa,2eecfee..fa3f262
--- a/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
+++ b/src/java/org/apache/cassandra/service/pager/AbstractQueryPager.java
@@@ -66,28 -67,46 +66,45 @@@ abstract class AbstractQueryPager imple
  
          pageSize = Math.min(pageSize, remaining);
          Pager pager = new RowPager(limits.forPaging(pageSize), 
command.nowInSec());
-         return 
Transformation.apply(nextPageReadCommand(pageSize).execute(consistency, 
clientState, queryStartNanoTime), pager);
+         ReadCommand readCommand = nextPageReadCommand(pageSize);
+         if (readCommand == null)
+         {
+             exhausted = true;
+             return EmptyIterators.partition();
+         }
 -        return Transformation.apply(readCommand.execute(consistency, 
clientState), pager);
++        return Transformation.apply(readCommand.execute(consistency, 
clientState, queryStartNanoTime), pager);
      }
  
 -    public PartitionIterator fetchPageInternal(int pageSize, ReadOrderGroup 
orderGroup) throws RequestValidationException, RequestExecutionException
 +    public PartitionIterator fetchPageInternal(int pageSize, 
ReadExecutionController executionController)
      {
          if (isExhausted())
              return EmptyIterators.partition();
  
          pageSize = Math.min(pageSize, remaining);
          RowPager pager = new RowPager(limits.forPaging(pageSize), 
command.nowInSec());
-         return 
Transformation.apply(nextPageReadCommand(pageSize).executeInternal(executionController),
 pager);
+         ReadCommand readCommand = nextPageReadCommand(pageSize);
+         if (readCommand == null)
+         {
+             exhausted = true;
+             return EmptyIterators.partition();
+         }
 -        return Transformation.apply(readCommand.executeInternal(orderGroup), 
pager);
++        return 
Transformation.apply(readCommand.executeInternal(executionController), pager);
      }
  
 -    public UnfilteredPartitionIterator fetchPageUnfiltered(CFMetaData cfm, 
int pageSize, ReadOrderGroup orderGroup)
 +    public UnfilteredPartitionIterator fetchPageUnfiltered(CFMetaData cfm, 
int pageSize, ReadExecutionController executionController)
      {
          if (isExhausted())
              return EmptyIterators.unfilteredPartition(cfm, false);
  
          pageSize = Math.min(pageSize, remaining);
 -
 +        UnfilteredPager pager = new 
UnfilteredPager(limits.forPaging(pageSize), command.nowInSec());
- 
-         return 
Transformation.apply(nextPageReadCommand(pageSize).executeLocally(executionController),
 pager);
+         ReadCommand readCommand = nextPageReadCommand(pageSize);
+         if (readCommand == null)
+         {
+             exhausted = true;
+             return EmptyIterators.unfilteredPartition(cfm, false);
+         }
 -        UnfilteredPager pager = new 
UnfilteredPager(limits.forPaging(pageSize), command.nowInSec());
 -        return Transformation.apply(readCommand.executeLocally(orderGroup), 
pager);
++        return 
Transformation.apply(readCommand.executeLocally(executionController), pager);
      }
  
      private class UnfilteredPager extends Pager<Unfiltered>
@@@ -185,11 -204,7 +202,12 @@@
          public Row applyToStatic(Row row)
          {
              if (!row.isEmpty())
 +            {
-                 remainingInPartition = limits.perPartitionCount();
++                if (!currentKey.equals(lastKey))
++                    remainingInPartition = limits.perPartitionCount();
 +                lastKey = currentKey;
                  lastRow = row;
 +            }
              return row;
          }
  
diff --cc test/unit/org/apache/cassandra/cql3/DistinctQueryPagingTest.java
index 0000000,f433179..6f0477d
mode 000000,100644..100644
--- a/test/unit/org/apache/cassandra/cql3/DistinctQueryPagingTest.java
+++ b/test/unit/org/apache/cassandra/cql3/DistinctQueryPagingTest.java
@@@ -1,0 -1,260 +1,262 @@@
+ /*
+  * Licensed to the Apache Software Foundation (ASF) under one
+  * or more contributor license agreements.  See the NOTICE file
+  * distributed with this work for additional information
+  * regarding copyright ownership.  The ASF licenses this file
+  * to you under the Apache License, Version 2.0 (the
+  * "License"); you may not use this file except in compliance
+  * with the License.  You may obtain a copy of the License at
+  *
+  *     http://www.apache.org/licenses/LICENSE-2.0
+  *
+  * Unless required by applicable law or agreed to in writing, software
+  * distributed under the License is distributed on an "AS IS" BASIS,
+  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+  * See the License for the specific language governing permissions and
+  * limitations under the License.
+  */
+ package org.apache.cassandra.cql3;
+ 
+ import org.junit.Assert;
+ import org.junit.Test;
+ 
+ public class DistinctQueryPagingTest extends CQLTester
+ {
+     /**
+      * Migrated from cql_tests.py:TestCQL.test_select_distinct()
+      */
+     @Test
+     public void testSelectDistinct() throws Throwable
+     {
 -        // Test a regular (CQL3) table.
++        // Test a regular(CQL3) table.
+         createTable("CREATE TABLE %s (pk0 int, pk1 int, ck0 int, val int, 
PRIMARY KEY((pk0, pk1), ck0))");
+ 
+         for (int i = 0; i < 3; i++)
+         {
+             execute("INSERT INTO %s (pk0, pk1, ck0, val) VALUES (?, ?, 0, 
0)", i, i);
+             execute("INSERT INTO %s (pk0, pk1, ck0, val) VALUES (?, ?, 1, 
1)", i, i);
+         }
+ 
+         assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s LIMIT 1"),
+                    row(0, 0));
+ 
+         assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s LIMIT 3"),
+                    row(0, 0),
+                    row(2, 2),
+                    row(1, 1));
+ 
+         // Test selection validation.
+         assertInvalidMessage("queries must request all the partition key 
columns", "SELECT DISTINCT pk0 FROM %s");
+         assertInvalidMessage("queries must only request partition key 
columns", "SELECT DISTINCT pk0, pk1, ck0 FROM %s");
+ 
 -        // Test a 'compact storage' table.
++        //Test a 'compact storage' table.
+         createTable("CREATE TABLE %s (pk0 int, pk1 int, val int, PRIMARY 
KEY((pk0, pk1))) WITH COMPACT STORAGE");
+ 
+         for (int i = 0; i < 3; i++)
+             execute("INSERT INTO %s (pk0, pk1, val) VALUES (?, ?, ?)", i, i, 
i);
+ 
+         assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s LIMIT 1"),
+                    row(0, 0));
+ 
+         assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s LIMIT 3"),
+                    row(0, 0),
+                    row(2, 2),
+                    row(1, 1));
+ 
+         // Test a 'wide row' thrift table.
+         createTable("CREATE TABLE %s (pk int, name text, val int, PRIMARY 
KEY(pk, name)) WITH COMPACT STORAGE");
+ 
+         for (int i = 0; i < 3; i++)
+         {
+             execute("INSERT INTO %s (pk, name, val) VALUES (?, 'name0', 0)", 
i);
+             execute("INSERT INTO %s (pk, name, val) VALUES (?, 'name1', 1)", 
i);
+         }
+ 
+         assertRows(execute("SELECT DISTINCT pk FROM %s LIMIT 1"),
+                    row(1));
+ 
+         assertRows(execute("SELECT DISTINCT pk FROM %s LIMIT 3"),
+                    row(1),
+                    row(0),
+                    row(2));
+     }
+ 
+     /**
+      * Migrated from 
cql_tests.py:TestCQL.test_select_distinct_with_deletions()
+      */
+     @Test
+     public void testSelectDistinctWithDeletions() throws Throwable
+     {
+         createTable("CREATE TABLE %s (k int PRIMARY KEY, c int, v int)");
+ 
+         for (int i = 0; i < 10; i++)
+             execute("INSERT INTO %s (k, c, v) VALUES (?, ?, ?)", i, i, i);
+ 
+         Object[][] rows = getRows(execute("SELECT DISTINCT k FROM %s"));
+         Assert.assertEquals(10, rows.length);
+         Object key_to_delete = rows[3][0];
+ 
+         execute("DELETE FROM %s WHERE k=?", key_to_delete);
+ 
+         rows = getRows(execute("SELECT DISTINCT k FROM %s"));
+         Assert.assertEquals(9, rows.length);
+ 
+         rows = getRows(execute("SELECT DISTINCT k FROM %s LIMIT 5"));
+         Assert.assertEquals(5, rows.length);
+ 
+         rows = getRows(execute("SELECT DISTINCT k FROM %s"));
+         Assert.assertEquals(9, rows.length);
+     }
+ 
+     @Test
+     public void testSelectDistinctWithWhereClause() throws Throwable {
+         createTable("CREATE TABLE %s (k int, a int, b int, PRIMARY KEY (k, 
a))");
+         createIndex("CREATE INDEX ON %s (b)");
+ 
+         for (int i = 0; i < 10; i++)
+         {
+             execute("INSERT INTO %s (k, a, b) VALUES (?, ?, ?)", i, i, i);
+             execute("INSERT INTO %s (k, a, b) VALUES (?, ?, ?)", i, i * 10, i 
* 10);
+         }
+ 
+         String distinctQueryErrorMsg = "SELECT DISTINCT with WHERE clause 
only supports restriction by partition key and/or static columns.";
+         assertInvalidMessage(distinctQueryErrorMsg,
+                              "SELECT DISTINCT k FROM %s WHERE a >= 80 ALLOW 
FILTERING");
+ 
+         assertInvalidMessage(distinctQueryErrorMsg,
+                              "SELECT DISTINCT k FROM %s WHERE k IN (1, 2, 3) 
AND a = 10");
+ 
+         assertInvalidMessage(distinctQueryErrorMsg,
+                              "SELECT DISTINCT k FROM %s WHERE b = 5");
+ 
+         assertRows(execute("SELECT DISTINCT k FROM %s WHERE k = 1"),
+                    row(1));
+         assertRows(execute("SELECT DISTINCT k FROM %s WHERE k IN (5, 6, 7)"),
+                    row(5),
+                    row(6),
+                    row(7));
+ 
+         // With static columns
+         createTable("CREATE TABLE %s (k int, a int, s int static, b int, 
PRIMARY KEY (k, a))");
+         createIndex("CREATE INDEX ON %s (b)");
+         for (int i = 0; i < 10; i++)
+         {
+             execute("INSERT INTO %s (k, a, b, s) VALUES (?, ?, ?, ?)", i, i, 
i, i);
+             execute("INSERT INTO %s (k, a, b, s) VALUES (?, ?, ?, ?)", i, i * 
10, i * 10, i * 10);
+         }
+ 
+         assertRows(execute("SELECT DISTINCT s FROM %s WHERE k = 5"),
+                    row(50));
+         assertRows(execute("SELECT DISTINCT s FROM %s WHERE k IN (5, 6, 7)"),
+                    row(50),
+                    row(60),
+                    row(70));
+     }
+ 
+     @Test
+     public void testSelectDistinctWithWhereClauseOnStaticColumn() throws 
Throwable
+     {
+         createTable("CREATE TABLE %s (k int, a int, s int static, s1 int 
static, b int, PRIMARY KEY (k, a))");
+ 
+         for (int i = 0; i < 10; i++)
+         {
+             execute("INSERT INTO %s (k, a, b, s, s1) VALUES (?, ?, ?, ?, ?)", 
i, i, i, i, i);
+             execute("INSERT INTO %s (k, a, b, s, s1) VALUES (?, ?, ?, ?, ?)", 
i, i * 10, i * 10, i * 10, i * 10);
+         }
+ 
+         execute("INSERT INTO %s (k, a, b, s, s1) VALUES (?, ?, ?, ?, ?)", 2, 
10, 10, 10, 10);
+ 
 -        assertRows(execute("SELECT DISTINCT k, s, s1 FROM %s WHERE s = 90 AND 
s1 = 90 ALLOW FILTERING"),
 -                   row(9, 90, 90));
++        beforeAndAfterFlush(() -> {
++            assertRows(execute("SELECT DISTINCT k, s, s1 FROM %s WHERE s = 90 
AND s1 = 90 ALLOW FILTERING"),
++                       row(9, 90, 90));
+ 
 -        assertRows(execute("SELECT DISTINCT k, s, s1 FROM %s WHERE s = 90 AND 
s1 = 90 ALLOW FILTERING"),
 -                   row(9, 90, 90));
++            assertRows(execute("SELECT DISTINCT k, s, s1 FROM %s WHERE s = 90 
AND s1 = 90 ALLOW FILTERING"),
++                       row(9, 90, 90));
+ 
 -        assertRows(execute("SELECT DISTINCT k, s, s1 FROM %s WHERE s = 10 AND 
s1 = 10 ALLOW FILTERING"),
 -                   row(1, 10, 10),
 -                   row(2, 10, 10));
++            assertRows(execute("SELECT DISTINCT k, s, s1 FROM %s WHERE s = 10 
AND s1 = 10 ALLOW FILTERING"),
++                       row(1, 10, 10),
++                       row(2, 10, 10));
+ 
 -        assertRows(execute("SELECT DISTINCT k, s, s1 FROM %s WHERE k = 1 AND 
s = 10 AND s1 = 10 ALLOW FILTERING"),
 -                   row(1, 10, 10));
++            assertRows(execute("SELECT DISTINCT k, s, s1 FROM %s WHERE k = 1 
AND s = 10 AND s1 = 10 ALLOW FILTERING"),
++                       row(1, 10, 10));
++        });
+     }
+ 
+     @Test
+     public void testSelectDistinctWithStaticColumnsAndPaging() throws 
Throwable
+     {
+         createTable("CREATE TABLE %s (a int, b int, s int static, c int, d 
int, primary key (a, b));");
+ 
+         // Test with only static data
+         for (int i = 0; i < 5; i++)
+             execute("INSERT INTO %s (a, s) VALUES (?, ?)", i, i);
+ 
+         testSelectDistinctWithPaging();
+ 
+         // Test with a mix of partition with rows and partitions without rows
+         for (int i = 0; i < 5; i++)
+         {
+             if (i % 2 == 0)
+             {
+                 for (int j = 1; j < 4; j++)
+                 {
+                     execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, 
?)", i, j, j, i + j);
+                 }
+             }
+         }
+ 
+         testSelectDistinctWithPaging();
+ 
+         // Test with all partition with rows
+         for (int i = 0; i < 5; i++)
+         {
+             for (int j = 1; j < 4; j++)
+             {
+                 execute("INSERT INTO %s (a, b, c, d) VALUES (?, ?, ?, ?)", i, 
j, j, i + j);
+             }
+         }
+ 
+         testSelectDistinctWithPaging();
+     }
+ 
+     private void testSelectDistinctWithPaging() throws Throwable
+     {
+         for (int pageSize = 1; pageSize < 7; pageSize++)
+         {
+             // Range query
+             assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s FROM 
%s", pageSize),
+                           row(1, 1),
+                           row(0, 0),
+                           row(2, 2),
+                           row(4, 4),
+                           row(3, 3));
+ 
+             assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s FROM %s 
LIMIT 3", pageSize),
+                           row(1, 1),
+                           row(0, 0),
+                           row(2, 2));
+ 
+             assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s FROM %s 
WHERE s >= 2 ALLOW FILTERING", pageSize),
+                           row(2, 2),
+                           row(4, 4),
+                           row(3, 3));
+ 
+             // Multi partition query
+             assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s FROM %s 
WHERE a IN (1, 2, 3, 4);", pageSize),
+                           row(1, 1),
+                           row(2, 2),
+                           row(3, 3),
+                           row(4, 4));
+ 
+             assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s FROM %s 
WHERE a IN (1, 2, 3, 4) LIMIT 3;", pageSize),
+                           row(1, 1),
+                           row(2, 2),
+                           row(3, 3));
+ 
+             assertRowsNet(executeNetWithPaging("SELECT DISTINCT a, s FROM %s 
WHERE a IN (1, 2, 3, 4) AND s >= 2 ALLOW FILTERING;", pageSize),
+                           row(2, 2),
+                           row(3, 3),
+                           row(4, 4));
+         }
+     }
+ }
diff --cc test/unit/org/apache/cassandra/cql3/IndexQueryPagingTest.java
index 238a58d,fd1e661..e2220ac
--- a/test/unit/org/apache/cassandra/cql3/IndexQueryPagingTest.java
+++ b/test/unit/org/apache/cassandra/cql3/IndexQueryPagingTest.java
@@@ -91,6 -91,32 +91,60 @@@ public class IndexQueryPagingTest exten
          executePagingQuery("SELECT * FROM %s WHERE k1=0 AND c1>=0 AND c1<=3 
AND v1=0", rowCount);
      }
  
+     @Test
++    public void testPagingOnPartitionsWithoutRows() throws Throwable
++    {
++        requireNetwork();
++
++        createTable("CREATE TABLE %s (pk int, ck int, s int static, v int, 
PRIMARY KEY (pk, ck))");
++        createIndex("CREATE INDEX on %s(s)");
++
++        execute("INSERT INTO %s (pk, s) VALUES (201, 200);");
++        execute("INSERT INTO %s (pk, s) VALUES (202, 200);");
++        execute("INSERT INTO %s (pk, s) VALUES (203, 200);");
++        execute("INSERT INTO %s (pk, s) VALUES (100, 100);");
++
++        for (int pageSize = 1; pageSize < 10; pageSize++)
++        {
++            assertRowsNet(executeNetWithPaging("select * from %s where s = 
200 and pk = 201;", pageSize),
++                          row(201, null, 200, null));
++
++            assertRowsNet(executeNetWithPaging("select * from %s where s = 
200;", pageSize),
++                          row(201, null, 200, null),
++                          row(203, null, 200, null),
++                          row(202, null, 200, null));
++
++            assertRowsNet(executeNetWithPaging("select * from %s where s = 
100;", pageSize),
++                          row(100, null, 100, null));
++        }
++    }
++
++    @Test
+     public void testPagingOnPartitionsWithoutClusteringColumns() throws 
Throwable
+     {
+         createTable("CREATE TABLE %s (pk int PRIMARY KEY, v int)");
+         createIndex("CREATE INDEX on %s(v)");
+ 
+         execute("INSERT INTO %s (pk, v) VALUES (201, 200);");
+         execute("INSERT INTO %s (pk, v) VALUES (202, 200);");
+         execute("INSERT INTO %s (pk, v) VALUES (203, 200);");
+         execute("INSERT INTO %s (pk, v) VALUES (100, 100);");
+ 
+         for (int pageSize = 1; pageSize < 10; pageSize++)
+         {
+             assertRowsNet(executeNetWithPaging("select * from %s where v = 
200 and pk = 201;", pageSize),
+                           row(201, 200));
+ 
+             assertRowsNet(executeNetWithPaging("select * from %s where v = 
200;", pageSize),
+                           row(201, 200),
+                           row(203, 200),
+                           row(202, 200));
+ 
+             assertRowsNet(executeNetWithPaging("select * from %s where v = 
100;", pageSize),
+                           row(100, 100));
+         }
+     }
+ 
      private void executePagingQuery(String cql, int rowCount)
      {
          // Execute an index query which should return all rows,
diff --cc 
test/unit/org/apache/cassandra/cql3/validation/entities/StaticColumnsTest.java
index 74fed69,efa48ae..08e021c
--- 
a/test/unit/org/apache/cassandra/cql3/validation/entities/StaticColumnsTest.java
+++ 
b/test/unit/org/apache/cassandra/cql3/validation/entities/StaticColumnsTest.java
@@@ -24,7 -24,7 +24,7 @@@ import org.junit.Test
  
  import org.apache.cassandra.cql3.CQLTester;
  
--import static junit.framework.Assert.assertNull;
++import static org.junit.Assert.assertNull;
  import static org.junit.Assert.assertEquals;
  import static org.junit.Assert.assertTrue;
  
@@@ -124,8 -114,8 +124,7 @@@ public class StaticColumnsTest extends 
          assertRows(execute("SELECT * FROM %s WHERE v = 1"), row(0, 0, 42, 1), 
row(0, 1, 42, 1));
          assertRows(execute("SELECT p, s FROM %s WHERE v = 1"), row(0, 42), 
row(1, 42));
          assertRows(execute("SELECT p FROM %s WHERE v = 1"), row(0), row(1));
--        // We don't support that
--        assertInvalid("SELECT s FROM %s WHERE v = 1");
++        assertRows(execute("SELECT s FROM %s WHERE v = 1"), row(42), row(42));
      }
  
      /**
diff --cc 
test/unit/org/apache/cassandra/cql3/validation/operations/SelectLimitTest.java
index 68b2e93,8ef4b58..5c45451
--- 
a/test/unit/org/apache/cassandra/cql3/validation/operations/SelectLimitTest.java
+++ 
b/test/unit/org/apache/cassandra/cql3/validation/operations/SelectLimitTest.java
@@@ -173,118 -173,6 +173,274 @@@ public class SelectLimitTest extends CQ
      }
  
      @Test
 +    public void testPerPartitionLimit() throws Throwable
 +    {
 +        perPartitionLimitTest(false);
 +    }
 +
 +    @Test
 +    public void testPerPartitionLimitWithCompactStorage() throws Throwable
 +    {
 +        perPartitionLimitTest(true);
 +    }
 +
 +    private void perPartitionLimitTest(boolean withCompactStorage) throws 
Throwable
 +    {
 +        String query = "CREATE TABLE %s (a int, b int, c int, PRIMARY KEY (a, 
b))";
 +
 +        if (withCompactStorage)
 +            createTable(query + " WITH COMPACT STORAGE");
 +        else
 +            createTable(query);
 +
 +        for (int i = 0; i < 5; i++)
 +        {
 +            for (int j = 0; j < 5; j++)
 +            {
 +                execute("INSERT INTO %s (a, b, c) VALUES (?, ?, ?)", i, j, j);
 +            }
 +        }
 +
 +        assertInvalidMessage("LIMIT must be strictly positive",
 +                             "SELECT * FROM %s PER PARTITION LIMIT ?", 0);
 +        assertInvalidMessage("LIMIT must be strictly positive",
 +                             "SELECT * FROM %s PER PARTITION LIMIT ?", -1);
 +
 +        assertRowsIgnoringOrder(execute("SELECT * FROM %s PER PARTITION LIMIT 
?", 2),
 +                                row(0, 0, 0),
 +                                row(0, 1, 1),
 +                                row(1, 0, 0),
 +                                row(1, 1, 1),
 +                                row(2, 0, 0),
 +                                row(2, 1, 1),
 +                                row(3, 0, 0),
 +                                row(3, 1, 1),
 +                                row(4, 0, 0),
 +                                row(4, 1, 1));
 +
 +        // Combined Per Partition and "global" limit
 +        assertRowCount(execute("SELECT * FROM %s PER PARTITION LIMIT ? LIMIT 
?", 2, 6),
 +                       6);
 +
 +        // odd amount of results
 +        assertRowCount(execute("SELECT * FROM %s PER PARTITION LIMIT ? LIMIT 
?", 2, 5),
 +                       5);
 +
 +        // IN query
 +        assertRows(execute("SELECT * FROM %s WHERE a IN (2,3) PER PARTITION 
LIMIT ?", 2),
 +                   row(2, 0, 0),
 +                   row(2, 1, 1),
 +                   row(3, 0, 0),
 +                   row(3, 1, 1));
 +
 +        assertRows(execute("SELECT * FROM %s WHERE a IN (2,3) PER PARTITION 
LIMIT ? LIMIT 3", 2),
 +                   row(2, 0, 0),
 +                   row(2, 1, 1),
 +                   row(3, 0, 0));
 +
 +        assertRows(execute("SELECT * FROM %s WHERE a IN (1,2,3) PER PARTITION 
LIMIT ? LIMIT 3", 2),
 +                   row(1, 0, 0),
 +                   row(1, 1, 1),
 +                   row(2, 0, 0));
 +
 +        // with restricted partition key
 +        assertRows(execute("SELECT * FROM %s WHERE a = ? PER PARTITION LIMIT 
?", 2, 3),
 +                   row(2, 0, 0),
 +                   row(2, 1, 1),
 +                   row(2, 2, 2));
 +
 +        // with ordering
 +        assertRows(execute("SELECT * FROM %s WHERE a IN (3, 2) ORDER BY b 
DESC PER PARTITION LIMIT ?", 2),
 +                   row(2, 4, 4),
 +                   row(3, 4, 4),
 +                   row(2, 3, 3),
 +                   row(3, 3, 3));
 +
 +        assertRows(execute("SELECT * FROM %s WHERE a IN (3, 2) ORDER BY b 
DESC PER PARTITION LIMIT ? LIMIT ?", 3, 4),
 +                   row(2, 4, 4),
 +                   row(3, 4, 4),
 +                   row(2, 3, 3),
 +                   row(3, 3, 3));
 +
 +        assertRows(execute("SELECT * FROM %s WHERE a = ? ORDER BY b DESC PER 
PARTITION LIMIT ?", 2, 3),
 +                   row(2, 4, 4),
 +                   row(2, 3, 3),
 +                   row(2, 2, 2));
 +
 +        // with filtering
 +        assertRows(execute("SELECT * FROM %s WHERE a = ? AND b > ? PER 
PARTITION LIMIT ? ALLOW FILTERING", 2, 0, 2),
 +                   row(2, 1, 1),
 +                   row(2, 2, 2));
 +
 +        assertRows(execute("SELECT * FROM %s WHERE a = ? AND b > ? ORDER BY b 
DESC PER PARTITION LIMIT ? ALLOW FILTERING", 2, 2, 2),
 +                   row(2, 4, 4),
 +                   row(2, 3, 3));
 +
 +        assertInvalidMessage("PER PARTITION LIMIT is not allowed with SELECT 
DISTINCT queries",
 +                             "SELECT DISTINCT a FROM %s PER PARTITION LIMIT 
?", 3);
 +        assertInvalidMessage("PER PARTITION LIMIT is not allowed with SELECT 
DISTINCT queries",
 +                             "SELECT DISTINCT a FROM %s PER PARTITION LIMIT ? 
LIMIT ?", 3, 4);
 +        assertInvalidMessage("PER PARTITION LIMIT is not allowed with 
aggregate queries.",
 +                             "SELECT COUNT(*) FROM %s PER PARTITION LIMIT ?", 
3);
 +    }
 +
 +    @Test
++    public void testPerPartitionLimitWithStaticDataAndPaging() throws 
Throwable
++    {
++        String query = "CREATE TABLE %s (a int, b int, s int static, c int, 
PRIMARY KEY (a, b))";
++
++        createTable(query);
++
++        for (int i = 0; i < 5; i++)
++        {
++            execute("INSERT INTO %s (a, s) VALUES (?, ?)", i, i);
++        }
++
++        for (int pageSize = 1; pageSize < 8; pageSize++)
++        {
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s PER 
PARTITION LIMIT 2", pageSize),
++                          row(0, null, 0, null),
++                          row(1, null, 1, null),
++                          row(2, null, 2, null),
++                          row(3, null, 3, null),
++                          row(4, null, 4, null));
++
++            // Combined Per Partition and "global" limit
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s PER 
PARTITION LIMIT 2 LIMIT 4", pageSize),
++                          row(0, null, 0, null),
++                          row(1, null, 1, null),
++                          row(2, null, 2, null),
++                          row(3, null, 3, null));
++
++            // odd amount of results
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s PER 
PARTITION LIMIT 2 LIMIT 3", pageSize),
++                          row(0, null, 0, null),
++                          row(1, null, 1, null),
++                          row(2, null, 2, null));
++
++            // IN query
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a IN 
(1,3,4) PER PARTITION LIMIT 2", pageSize),
++                          row(1, null, 1, null),
++                          row(3, null, 3, null),
++                          row(4, null, 4, null));
++
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a IN 
(1,3,4) PER PARTITION LIMIT 2 LIMIT 2",
++                                               pageSize),
++                          row(1, null, 1, null),
++                          row(3, null, 3, null));
++
++            // with restricted partition key
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 2 
PER PARTITION LIMIT 3", pageSize),
++                          row(2, null, 2, null));
++
++            // with ordering
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 2 
ORDER BY b DESC PER PARTITION LIMIT 3",
++                                               pageSize),
++                          row(2, null, 2, null));
++
++            // with filtering
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 2 
AND s > 0 PER PARTITION LIMIT 2 ALLOW FILTERING",
++                                               pageSize),
++                          row(2, null, 2, null));
++        }
++
++        for (int i = 0; i < 5; i++)
++        {
++            if (i != 1)
++            {
++                for (int j = 0; j < 5; j++)
++                {
++                    execute("INSERT INTO %s (a, b, s, c) VALUES (?, ?, ?, 
?)", i, j, i, j);
++                }
++            }
++        }
++
++        assertInvalidMessage("LIMIT must be strictly positive",
++                             "SELECT * FROM %s PER PARTITION LIMIT ?", 0);
++        assertInvalidMessage("LIMIT must be strictly positive",
++                             "SELECT * FROM %s PER PARTITION LIMIT ?", -1);
++
++        for (int pageSize = 1; pageSize < 8; pageSize++)
++        {
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s PER 
PARTITION LIMIT 2", pageSize),
++                          row(0, 0, 0, 0),
++                          row(0, 1, 0, 1),
++                          row(1, null, 1, null),
++                          row(2, 0, 2, 0),
++                          row(2, 1, 2, 1),
++                          row(3, 0, 3, 0),
++                          row(3, 1, 3, 1),
++                          row(4, 0, 4, 0),
++                          row(4, 1, 4, 1));
++
++            // Combined Per Partition and "global" limit
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s PER 
PARTITION LIMIT 2 LIMIT 4", pageSize),
++                          row(0, 0, 0, 0),
++                          row(0, 1, 0, 1),
++                          row(1, null, 1, null),
++                          row(2, 0, 2, 0));
++
++            // odd amount of results
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s PER 
PARTITION LIMIT 2 LIMIT 5", pageSize),
++                          row(0, 0, 0, 0),
++                          row(0, 1, 0, 1),
++                          row(1, null, 1, null),
++                          row(2, 0, 2, 0),
++                          row(2, 1, 2, 1));
++
++            // IN query
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a IN 
(2,3) PER PARTITION LIMIT 2", pageSize),
++                          row(2, 0, 2, 0),
++                          row(2, 1, 2, 1),
++                          row(3, 0, 3, 0),
++                          row(3, 1, 3, 1));
++
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a IN 
(2,3) PER PARTITION LIMIT 2 LIMIT 3",
++                                               pageSize),
++                          row(2, 0, 2, 0),
++                          row(2, 1, 2, 1),
++                          row(3, 0, 3, 0));
++
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a IN 
(1,2,3) PER PARTITION LIMIT 2 LIMIT 3",
++                                               pageSize),
++                          row(1, null, 1, null),
++                          row(2, 0, 2, 0),
++                          row(2, 1, 2, 1));
++
++            // with restricted partition key
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 2 
PER PARTITION LIMIT 3", pageSize),
++                          row(2, 0, 2, 0),
++                          row(2, 1, 2, 1),
++                          row(2, 2, 2, 2));
++
++            // with ordering
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 2 
ORDER BY b DESC PER PARTITION LIMIT 3",
++                                               pageSize),
++                          row(2, 4, 2, 4),
++                          row(2, 3, 2, 3),
++                          row(2, 2, 2, 2));
++
++            // with filtering
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 2 
AND b > 0 PER PARTITION LIMIT 2 ALLOW FILTERING",
++                                               pageSize),
++                          row(2, 1, 2, 1),
++                          row(2, 2, 2, 2));
++
++            assertRowsNet(executeNetWithPaging("SELECT * FROM %s WHERE a = 2 
AND b > 2 ORDER BY b DESC PER PARTITION LIMIT 2 ALLOW FILTERING",
++                                               pageSize),
++                          row(2, 4, 2, 4),
++                          row(2, 3, 2, 3));
++        }
++
++        assertInvalidMessage("PER PARTITION LIMIT is not allowed with SELECT 
DISTINCT queries",
++                             "SELECT DISTINCT a FROM %s PER PARTITION LIMIT 
?", 3);
++        assertInvalidMessage("PER PARTITION LIMIT is not allowed with SELECT 
DISTINCT queries",
++                             "SELECT DISTINCT a FROM %s PER PARTITION LIMIT ? 
LIMIT ?", 3, 4);
++        assertInvalidMessage("PER PARTITION LIMIT is not allowed with 
aggregate queries.",
++                             "SELECT COUNT(*) FROM %s PER PARTITION LIMIT ?", 
3);
++    }
++
++    @Test
      public void testLimitWithDeletedRowsAndStaticColumns() throws Throwable
      {
          createTable("CREATE TABLE %s (pk int, c int, v int, s int static, 
PRIMARY KEY (pk, c))");
diff --cc 
test/unit/org/apache/cassandra/cql3/validation/operations/SelectTest.java
index ee9cd7f,469e8ca..1d45448
--- a/test/unit/org/apache/cassandra/cql3/validation/operations/SelectTest.java
+++ b/test/unit/org/apache/cassandra/cql3/validation/operations/SelectTest.java
@@@ -1190,166 -1170,6 +1190,7 @@@ public class SelectTest extends CQLTest
      }
  
      /**
-      * Migrated from cql_tests.py:TestCQL.select_distinct_test()
-      */
-     @Test
-     public void testSelectDistinct() throws Throwable
-     {
-         // Test a regular(CQL3) table.
-         createTable("CREATE TABLE %s (pk0 int, pk1 int, ck0 int, val int, 
PRIMARY KEY((pk0, pk1), ck0))");
- 
-         for (int i = 0; i < 3; i++)
-         {
-             execute("INSERT INTO %s (pk0, pk1, ck0, val) VALUES (?, ?, 0, 
0)", i, i);
-             execute("INSERT INTO %s (pk0, pk1, ck0, val) VALUES (?, ?, 1, 
1)", i, i);
-         }
- 
-         assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s LIMIT 1"),
-                    row(0, 0));
- 
-         assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s LIMIT 3"),
-                    row(0, 0),
-                    row(2, 2),
-                    row(1, 1));
- 
-         // Test selection validation.
-         assertInvalidMessage("queries must request all the partition key 
columns", "SELECT DISTINCT pk0 FROM %s");
-         assertInvalidMessage("queries must only request partition key 
columns", "SELECT DISTINCT pk0, pk1, ck0 FROM %s");
- 
-         //Test a 'compact storage' table.
-         createTable("CREATE TABLE %s (pk0 int, pk1 int, val int, PRIMARY 
KEY((pk0, pk1))) WITH COMPACT STORAGE");
- 
-         for (int i = 0; i < 3; i++)
-             execute("INSERT INTO %s (pk0, pk1, val) VALUES (?, ?, ?)", i, i, 
i);
- 
-         assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s LIMIT 1"),
-                    row(0, 0));
- 
-         assertRows(execute("SELECT DISTINCT pk0, pk1 FROM %s LIMIT 3"),
-                    row(0, 0),
-                    row(2, 2),
-                    row(1, 1));
- 
-         // Test a 'wide row' thrift table.
-         createTable("CREATE TABLE %s (pk int, name text, val int, PRIMARY 
KEY(pk, name)) WITH COMPACT STORAGE");
- 
-         for (int i = 0; i < 3; i++)
-         {
-             execute("INSERT INTO %s (pk, name, val) VALUES (?, 'name0', 0)", 
i);
-             execute("INSERT INTO %s (pk, name, val) VALUES (?, 'name1', 1)", 
i);
-         }
- 
-         assertRows(execute("SELECT DISTINCT pk FROM %s LIMIT 1"),
-                    row(1));
- 
-         assertRows(execute("SELECT DISTINCT pk FROM %s LIMIT 3"),
-                    row(1),
-                    row(0),
-                    row(2));
-     }
- 
-     /**
-      * Migrated from 
cql_tests.py:TestCQL.select_distinct_with_deletions_test()
-      */
-     @Test
-     public void testSelectDistinctWithDeletions() throws Throwable
-     {
-         createTable("CREATE TABLE %s (k int PRIMARY KEY, c int, v int)");
- 
-         for (int i = 0; i < 10; i++)
-             execute("INSERT INTO %s (k, c, v) VALUES (?, ?, ?)", i, i, i);
- 
-         Object[][] rows = getRows(execute("SELECT DISTINCT k FROM %s"));
-         Assert.assertEquals(10, rows.length);
-         Object key_to_delete = rows[3][0];
- 
-         execute("DELETE FROM %s WHERE k=?", key_to_delete);
- 
-         rows = getRows(execute("SELECT DISTINCT k FROM %s"));
-         Assert.assertEquals(9, rows.length);
- 
-         rows = getRows(execute("SELECT DISTINCT k FROM %s LIMIT 5"));
-         Assert.assertEquals(5, rows.length);
- 
-         rows = getRows(execute("SELECT DISTINCT k FROM %s"));
-         Assert.assertEquals(9, rows.length);
-     }
- 
-     @Test
-     public void testSelectDistinctWithWhereClause() throws Throwable {
-         createTable("CREATE TABLE %s (k int, a int, b int, PRIMARY KEY (k, 
a))");
-         createIndex("CREATE INDEX ON %s (b)");
- 
-         for (int i = 0; i < 10; i++)
-         {
-             execute("INSERT INTO %s (k, a, b) VALUES (?, ?, ?)", i, i, i);
-             execute("INSERT INTO %s (k, a, b) VALUES (?, ?, ?)", i, i * 10, i 
* 10);
-         }
- 
-         String distinctQueryErrorMsg = "SELECT DISTINCT with WHERE clause 
only supports restriction by partition key and/or static columns.";
-         assertInvalidMessage(distinctQueryErrorMsg,
-                              "SELECT DISTINCT k FROM %s WHERE a >= 80 ALLOW 
FILTERING");
- 
-         assertInvalidMessage(distinctQueryErrorMsg,
-                              "SELECT DISTINCT k FROM %s WHERE k IN (1, 2, 3) 
AND a = 10");
- 
-         assertInvalidMessage(distinctQueryErrorMsg,
-                              "SELECT DISTINCT k FROM %s WHERE b = 5");
- 
-         assertRows(execute("SELECT DISTINCT k FROM %s WHERE k = 1"),
-                    row(1));
-         assertRows(execute("SELECT DISTINCT k FROM %s WHERE k IN (5, 6, 7)"),
-                    row(5),
-                    row(6),
-                    row(7));
- 
-         // With static columns
-         createTable("CREATE TABLE %s (k int, a int, s int static, b int, 
PRIMARY KEY (k, a))");
-         createIndex("CREATE INDEX ON %s (b)");
-         for (int i = 0; i < 10; i++)
-         {
-             execute("INSERT INTO %s (k, a, b, s) VALUES (?, ?, ?, ?)", i, i, 
i, i);
-             execute("INSERT INTO %s (k, a, b, s) VALUES (?, ?, ?, ?)", i, i * 
10, i * 10, i * 10);
-         }
- 
-         assertRows(execute("SELECT DISTINCT s FROM %s WHERE k = 5"),
-                    row(50));
-         assertRows(execute("SELECT DISTINCT s FROM %s WHERE k IN (5, 6, 7)"),
-                    row(50),
-                    row(60),
-                    row(70));
-     }
- 
-     @Test
-     public void testSelectDistinctWithWhereClauseOnStaticColumn() throws 
Throwable
-     {
-         createTable("CREATE TABLE %s (k int, a int, s int static, s1 int 
static, b int, PRIMARY KEY (k, a))");
- 
-         for (int i = 0; i < 10; i++)
-         {
-             execute("INSERT INTO %s (k, a, b, s, s1) VALUES (?, ?, ?, ?, ?)", 
i, i, i, i, i);
-             execute("INSERT INTO %s (k, a, b, s, s1) VALUES (?, ?, ?, ?, ?)", 
i, i * 10, i * 10, i * 10, i * 10);
-         }
- 
-         execute("INSERT INTO %s (k, a, b, s, s1) VALUES (?, ?, ?, ?, ?)", 2, 
10, 10, 10, 10);
- 
-         beforeAndAfterFlush(() -> {
-             assertRows(execute("SELECT DISTINCT k, s, s1 FROM %s WHERE s = 90 
AND s1 = 90 ALLOW FILTERING"),
-                        row(9, 90, 90));
- 
-             assertRows(execute("SELECT DISTINCT k, s, s1 FROM %s WHERE s = 90 
AND s1 = 90 ALLOW FILTERING"),
-                        row(9, 90, 90));
- 
-             assertRows(execute("SELECT DISTINCT k, s, s1 FROM %s WHERE s = 10 
AND s1 = 10 ALLOW FILTERING"),
-                        row(1, 10, 10),
-                        row(2, 10, 10));
- 
-             assertRows(execute("SELECT DISTINCT k, s, s1 FROM %s WHERE k = 1 
AND s = 10 AND s1 = 10 ALLOW FILTERING"),
-                        row(1, 10, 10));
-         });
-     }
- 
-     /**
++>>>>>>> cassandra-3.0
       * Migrated from cql_tests.py:TestCQL.bug_6327_test()
       */
      @Test
diff --cc test/unit/org/apache/cassandra/index/internal/CassandraIndexTest.java
index b573e6f,7cea9da..da83601
--- a/test/unit/org/apache/cassandra/index/internal/CassandraIndexTest.java
+++ b/test/unit/org/apache/cassandra/index/internal/CassandraIndexTest.java
@@@ -43,7 -42,7 +43,6 @@@ import org.apache.cassandra.exceptions.
  import org.apache.cassandra.utils.ByteBufferUtil;
  import org.apache.cassandra.utils.FBUtilities;
  
--import static org.apache.cassandra.Util.executeLocally;
  import static org.apache.cassandra.Util.throwAssert;
  import static org.junit.Assert.assertArrayEquals;
  import static org.junit.Assert.assertEquals;
@@@ -341,32 -340,6 +340,57 @@@ public class CassandraIndexTest extend
      }
  
      @Test
 +    public void indexOnStaticColumn() throws Throwable
 +    {
 +        Object[] row1 = row("k0", "c0", "s0");
 +        Object[] row2 = row("k0", "c1", "s0");
 +        Object[] row3 = row("k1", "c0", "s1");
 +        Object[] row4 = row("k1", "c1", "s1");
 +
 +        createTable("CREATE TABLE %s (k text, c text, s text static, PRIMARY 
KEY (k, c));");
 +        createIndex("CREATE INDEX sc_index on %s(s)");
 +
 +        execute("INSERT INTO %s (k, c, s) VALUES (?, ?, ?)", row1);
 +        execute("INSERT INTO %s (k, c, s) VALUES (?, ?, ?)", row2);
 +        execute("INSERT INTO %s (k, c, s) VALUES (?, ?, ?)", row3);
 +        execute("INSERT INTO %s (k, c, s) VALUES (?, ?, ?)", row4);
 +
 +        assertRows(execute("SELECT * FROM %s WHERE s = ?", "s0"), row1, row2);
 +        assertRows(execute("SELECT * FROM %s WHERE s = ?", "s1"), row3, row4);
 +
 +        assertRows(execute("SELECT * FROM %s WHERE s = ? AND token(k) >= 
token(?)", "s0", "k0"), row1, row2);
 +        assertRows(execute("SELECT * FROM %s WHERE s = ? AND token(k) >= 
token(?)", "s1", "k1"), row3, row4);
 +
 +        assertEmpty(execute("SELECT * FROM %s WHERE s = ? AND token(k) < 
token(?)", "s0", "k0"));
 +        assertEmpty(execute("SELECT * FROM %s WHERE s = ? AND token(k) < 
token(?)", "s1", "k1"));
++
++        row1 = row("s0");
++        row2 = row("s0");
++        row3 = row("s1");
++        row4 = row("s1");
++
++        assertRows(execute("SELECT s FROM %s WHERE s = ?", "s0"), row1, row2);
++        assertRows(execute("SELECT s FROM %s WHERE s = ?", "s1"), row3, row4);
++
++        assertRows(execute("SELECT s FROM %s WHERE s = ? AND token(k) >= 
token(?)", "s0", "k0"), row1, row2);
++        assertRows(execute("SELECT s FROM %s WHERE s = ? AND token(k) >= 
token(?)", "s1", "k1"), row3, row4);
++
++        assertEmpty(execute("SELECT s FROM %s WHERE s = ? AND token(k) < 
token(?)", "s0", "k0"));
++        assertEmpty(execute("SELECT s FROM %s WHERE s = ? AND token(k) < 
token(?)", "s1", "k1"));
++
++        dropIndex(String.format("DROP INDEX %s.sc_index", keyspace()));
++
++        assertRows(execute("SELECT s FROM %s WHERE s = ? ALLOW FILTERING", 
"s0"), row1, row2);
++        assertRows(execute("SELECT s FROM %s WHERE s = ? ALLOW FILTERING", 
"s1"), row3, row4);
++
++        assertRows(execute("SELECT s FROM %s WHERE s = ? AND token(k) >= 
token(?) ALLOW FILTERING", "s0", "k0"), row1, row2);
++        assertRows(execute("SELECT s FROM %s WHERE s = ? AND token(k) >= 
token(?) ALLOW FILTERING", "s1", "k1"), row3, row4);
++
++        assertEmpty(execute("SELECT s FROM %s WHERE s = ? AND token(k) < 
token(?) ALLOW FILTERING", "s0", "k0"));
++        assertEmpty(execute("SELECT s FROM %s WHERE s = ? AND token(k) < 
token(?) ALLOW FILTERING", "s1", "k1"));
 +    }
 +
 +    @Test
      public void testIndexOnCompactTable() throws Throwable
      {
          createTable("CREATE TABLE %s (k int, v int, PRIMARY KEY (k)) WITH 
COMPACT STORAGE;");


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org
For additional commands, e-mail: commits-h...@cassandra.apache.org

Reply via email to