Merge branch 'cassandra-3.11' into trunk

Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/e6fb8302
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/e6fb8302
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/e6fb8302

Branch: refs/heads/trunk
Commit: e6fb8302848bc43888b0a742a9b0abce09872c45
Parents: 4603600 e624c66
Author: Paulo Motta <pa...@apache.org>
Authored: Tue Sep 5 01:05:32 2017 -0500
Committer: Paulo Motta <pa...@apache.org>
Committed: Tue Sep 5 01:06:24 2017 -0500

----------------------------------------------------------------------
 NEWS.txt                                        |   19 +
 doc/source/cql/mvs.rst                          |    8 +
 .../apache/cassandra/cql3/UpdateParameters.java |    2 +-
 .../cql3/statements/AlterTableStatement.java    |   19 +-
 .../org/apache/cassandra/db/LivenessInfo.java   |   17 +-
 .../org/apache/cassandra/db/ReadCommand.java    |    4 +-
 .../db/compaction/CompactionIterator.java       |    4 +-
 .../apache/cassandra/db/filter/RowFilter.java   |    4 +-
 .../cassandra/db/partitions/PurgeFunction.java  |   10 +-
 .../org/apache/cassandra/db/rows/BTreeRow.java  |    6 +-
 src/java/org/apache/cassandra/db/rows/Row.java  |   15 +-
 .../cassandra/db/rows/UnfilteredSerializer.java |    5 +
 .../apache/cassandra/db/transform/Filter.java   |    8 +-
 .../db/transform/FilteredPartitions.java        |    2 +-
 .../cassandra/db/transform/FilteredRows.java    |    2 +-
 .../apache/cassandra/db/view/TableViews.java    |   14 +-
 src/java/org/apache/cassandra/db/view/View.java |   45 +-
 .../apache/cassandra/db/view/ViewManager.java   |    5 +
 .../cassandra/db/view/ViewUpdateGenerator.java  |  161 ++-
 .../apache/cassandra/schema/TableMetadata.java  |   13 +
 .../apache/cassandra/service/DataResolver.java  |    2 +-
 .../org/apache/cassandra/cql3/CQLTester.java    |    2 +-
 .../apache/cassandra/cql3/ViewComplexTest.java  | 1344 ++++++++++++++++++
 .../cassandra/cql3/ViewFilteringTest.java       | 1106 ++++++++------
 .../org/apache/cassandra/cql3/ViewTest.java     |   35 +-
 25 files changed, 2319 insertions(+), 533 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/NEWS.txt
----------------------------------------------------------------------
diff --cc NEWS.txt
index c9963c3,0682ae9..7d0fe9a
--- a/NEWS.txt
+++ b/NEWS.txt
@@@ -33,41 -18,33 +33,60 @@@ New feature
  
  Upgrading
  ---------
 -    - Nothing specific to this version but please see previous upgrading 
sections,
 -      especially if you are upgrading from 2.2.
 +    - Support for legacy auth tables in the system_auth keyspace (users,
 +      permissions, credentials) and the migration code has been removed. 
Migration
 +      of these legacy auth tables must have been completed before the upgrade 
to
 +      4.0 and the legacy tables must have been removed. See the 'Upgrading' 
section
 +      for version 2.2 for migration instructions.
 +    - Cassandra 4.0 removed support for the deprecated Thrift interface. 
Amongst
 +      Tother things, this imply the removal of all yaml option related to 
thrift
 +      ('start_rpc', rpc_port, ...).
 +    - Cassandra 4.0 removed support for any pre-3.0 format. This means you
 +      cannot upgrade from a 2.x version to 4.0 directly, you have to upgrade 
to
 +      a 3.0.x/3.x version first (and run upgradesstable). In particular, this
 +      mean Cassandra 4.0 cannot load or read pre-3.0 sstables in any way: you
 +      will need to upgrade those sstable in 3.0.x/3.x first.
 +    - Upgrades from 3.0.x or 3.x are supported since 3.0.13 or 3.11.0, 
previous
 +      versions will causes issues during rolling upgrades (CASSANDRA-13274).
 +    - Cassandra will no longer allow invalid keyspace replication options, 
such
 +      as invalid datacenter names for NetworkTopologyStrategy. Operators MUST
 +      add new nodes to a datacenter before they can set set ALTER or CREATE
 +      keyspace replication policies using that datacenter. Existing keyspaces
 +      will continue to operate, but CREATE and ALTER will validate that all
 +      datacenters specified exist in the cluster.
 +    - Cassandra 4.0 fixes a problem with incremental repair which caused 
repaired
 +      data to be inconsistent between nodes. The fix changes the behavior of 
both
 +      full and incremental repairs. For full repairs, data is no longer marked
 +      repaired. For incremental repairs, anticompaction is run at the 
beginning
 +      of the repair, instead of at the end. If incremental repair was being 
used
 +      prior to upgrading, a full repair should be run after upgrading to 
resolve
 +      any inconsistencies.
 +    - Config option index_interval has been removed (it was deprecated since 
2.0)
 +    - Deprecated repair JMX APIs are removed.
 +    - The version of snappy-java has been upgraded to 1.1.2.6
 +      - the miniumum value for internode message timeouts is 10ms. 
Previously, any
 +        positive value was allowed. See cassandra.yaml entries like
 +        read_request_timeout_in_ms for more details.
 +
+ 
+ Materialized Views
+ -------------------
+     - Cassandra will no longer allow dropping columns on tables with 
Materialized Views.
+     - A change was made in the way the Materialized View timestamp is 
computed, which
+       may cause an old deletion to a base column which is view primary key 
(PK) column
+       to not be reflected in the view when repairing the base table 
post-upgrade. This
+       condition is only possible when a column deletion to an MV primary key 
(PK) column
+       not present in the base table PK (via UPDATE base SET view_pk_col = 
null or DELETE
+       view_pk_col FROM base) is missed before the upgrade and received by 
repair after the upgrade.
+       If such column deletions are done on a view PK column which is not a 
base PK, it's advisable
+       to run repair on the base table of all nodes prior to the upgrade. 
Alternatively it's possible
+       to fix potential inconsistencies by running repair on the views after 
upgrade or drop and
+       re-create the views. See CASSANDRA-11500 for more details.
+     - Removal of columns not selected in the Materialized View (via UPDATE 
base SET unselected_column
+       = null or DELETE unselected_column FROM base) may not be properly 
reflected in the view in some
+       situations so we advise against doing deletions on base columns not 
selected in views
+       until this is fixed on CASSANDRA-13826.
 -    - Creating Materialized View with filtering on non-primary-key base column
 -      (added in CASSANDRA-10368) is disabled, because the liveness of view row
 -      is depending on multiple filtered base non-key columns and base non-key
 -      column used in view primary-key. This semantic cannot be supported 
without
 -      storage format change, see CASSANDRA-13826. For append-only use case, 
you
 -      may still use this feature with a startup flag: 
"-Dcassandra.mv.allow_filtering_nonkey_columns_unsafe=true"
+ 
  3.11.0
  ======
  

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/doc/source/cql/mvs.rst
----------------------------------------------------------------------
diff --cc doc/source/cql/mvs.rst
index aabea10,aabea10..55ede22
--- a/doc/source/cql/mvs.rst
+++ b/doc/source/cql/mvs.rst
@@@ -164,3 -164,3 +164,11 @@@ Dropping a materialized view users the 
  
  If the materialized view does not exists, the statement will return an error, 
unless ``IF EXISTS`` is used in which case
  the operation is a no-op.
++
++MV Limitations
++```````````````
++
++.. Note:: Removal of columns not selected in the Materialized View (via 
``UPDATE base SET unselected_column = null`` or
++          ``DELETE unselected_column FROM base``) may shadow missed updates 
to other columns received by hints or repair.
++          For this reason, we advise against doing deletions on base columns 
not selected in views until this is
++          fixed on CASSANDRA-13826.

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/cql3/UpdateParameters.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java
index 0c45647,fdbcf7a..e6d997e
--- a/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java
+++ b/src/java/org/apache/cassandra/cql3/statements/AlterTableStatement.java
@@@ -216,25 -234,12 +216,14 @@@ public class AlterTableStatement extend
                                                                              
dependentIndexes.stream()
                                                                                
              .map(i -> i.name)
                                                                                
              .collect(Collectors.joining(","))));
 +                        }
                      }
  
-                     // If a column is dropped which is included in a view, we 
don't allow the drop to take place.
-                     boolean rejectAlter = false;
-                     StringBuilder viewNames = new StringBuilder();
-                     for (ViewMetadata view : views)
-                     {
-                         if (!view.includes(columnName)) continue;
-                         if (rejectAlter)
-                             viewNames.append(',');
-                         rejectAlter = true;
-                         viewNames.append(view.name);
-                     }
-                     if (rejectAlter)
-                         throw new 
InvalidRequestException(String.format("Cannot drop column %s, depended on by 
materialized views (%s.{%s})",
++
+                     if (!Iterables.isEmpty(views))
+                         throw new 
InvalidRequestException(String.format("Cannot drop column %s on base table with 
materialized views.",
                                                                          
columnName.toString(),
-                                                                         
keyspace(),
-                                                                         
viewNames.toString()));
+                                                                         
keyspace()));
                  }
                  break;
              case OPTS:

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/db/ReadCommand.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/ReadCommand.java
index 35a3a32,bb4d5e8..08ebc86
--- a/src/java/org/apache/cassandra/db/ReadCommand.java
+++ b/src/java/org/apache/cassandra/db/ReadCommand.java
@@@ -553,7 -610,12 +553,9 @@@ public abstract class ReadCommand exten
          {
              public WithoutPurgeableTombstones()
              {
-                 super(nowInSec(), cfs.gcBefore(nowInSec()), 
oldestUnrepairedTombstone(), 
cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones());
 -                super(isForThrift,
 -                      nowInSec(),
 -                      cfs.gcBefore(nowInSec()),
 -                      oldestUnrepairedTombstone(),
++                super(nowInSec(), cfs.gcBefore(nowInSec()), 
oldestUnrepairedTombstone(),
+                       
cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones(),
 -                      cfs.metadata.enforceStrictLiveness());
++                      iterator.metadata().enforceStrictLiveness());
              }
  
              protected Predicate<Long> getPurgeEvaluator()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
index 8e94fd9,f2da989..a6161f2
--- a/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
+++ b/src/java/org/apache/cassandra/db/compaction/CompactionIterator.java
@@@ -264,9 -270,14 +264,11 @@@ public class CompactionIterator extend
  
          private long compactedUnfiltered;
  
 -        private Purger(boolean isForThrift, CompactionController controller, 
int nowInSec)
 +        private Purger(CompactionController controller, int nowInSec)
          {
-             super(nowInSec, controller.gcBefore, 
controller.compactingRepaired() ? Integer.MAX_VALUE : Integer.MIN_VALUE, 
controller.cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones());
 -            super(isForThrift,
 -                  nowInSec,
 -                  controller.gcBefore,
 -                  controller.compactingRepaired() ? Integer.MAX_VALUE : 
Integer.MIN_VALUE,
++            super(nowInSec, controller.gcBefore, 
controller.compactingRepaired() ? Integer.MAX_VALUE : Integer.MIN_VALUE,
+                   
controller.cfs.getCompactionStrategyManager().onlyPurgeRepairedTombstones(),
 -                  controller.cfs.metadata.enforceStrictLiveness());
++                  controller.cfs.metadata.get().enforceStrictLiveness());
              this.controller = controller;
          }
  

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/db/filter/RowFilter.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/filter/RowFilter.java
index cecfede,84a93ab..baafe8b
--- a/src/java/org/apache/cassandra/db/filter/RowFilter.java
+++ b/src/java/org/apache/cassandra/db/filter/RowFilter.java
@@@ -147,10 -161,10 +147,10 @@@ public abstract class RowFilter impleme
       * @param nowInSec the current time in seconds (to know what is live and 
what isn't).
       * @return {@code true} if {@code row} in partition {@code partitionKey} 
satisfies this row filter.
       */
 -    public boolean isSatisfiedBy(CFMetaData metadata, DecoratedKey 
partitionKey, Row row, int nowInSec)
 +    public boolean isSatisfiedBy(TableMetadata metadata, DecoratedKey 
partitionKey, Row row, int nowInSec)
      {
          // We purge all tombstones as the expressions isSatisfiedBy methods 
expects it
-         Row purged = row.purge(DeletionPurger.PURGE_ALL, nowInSec);
+         Row purged = row.purge(DeletionPurger.PURGE_ALL, nowInSec, 
metadata.enforceStrictLiveness());
          if (purged == null)
              return expressions.isEmpty();
  

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/db/partitions/PurgeFunction.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/partitions/PurgeFunction.java
index b5580c6,5cc9145..83d4d38
--- a/src/java/org/apache/cassandra/db/partitions/PurgeFunction.java
+++ b/src/java/org/apache/cassandra/db/partitions/PurgeFunction.java
@@@ -25,12 -25,21 +25,15 @@@ import org.apache.cassandra.db.transfor
  
  public abstract class PurgeFunction extends 
Transformation<UnfilteredRowIterator>
  {
 -    private final boolean isForThrift;
      private final DeletionPurger purger;
      private final int nowInSec;
+ 
+     private final boolean enforceStrictLiveness;
      private boolean isReverseOrder;
  
-     public PurgeFunction(int nowInSec, int gcBefore, int 
oldestUnrepairedTombstone, boolean onlyPurgeRepairedTombstones)
 -    public PurgeFunction(boolean isForThrift,
 -                         int nowInSec,
 -                         int gcBefore,
 -                         int oldestUnrepairedTombstone,
 -                         boolean onlyPurgeRepairedTombstones,
++    public PurgeFunction(int nowInSec, int gcBefore, int 
oldestUnrepairedTombstone, boolean onlyPurgeRepairedTombstones,
+                          boolean enforceStrictLiveness)
      {
 -        this.isForThrift = isForThrift;
          this.nowInSec = nowInSec;
          this.purger = (timestamp, localDeletionTime) ->
                        !(onlyPurgeRepairedTombstones && localDeletionTime >= 
oldestUnrepairedTombstone)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/db/rows/BTreeRow.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/db/rows/Row.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/rows/Row.java
index 5999864,9a8508b..13ef86d
--- a/src/java/org/apache/cassandra/db/rows/Row.java
+++ b/src/java/org/apache/cassandra/db/rows/Row.java
@@@ -203,10 -203,20 +203,20 @@@ public interface Row extends Unfiltered
       *
       * @param purger the {@code DeletionPurger} to use to decide what can be 
purged.
       * @param nowInSec the current time to decide what is deleted and what 
isn't (in the case of expired cells).
+      * @param enforceStrictLiveness whether the row should be purged if there 
is no PK liveness info,
 -     *                              normally retrieved from {@link 
CFMetaData#enforceStrictLiveness()}
++     *                              normally retrieved from {@link 
TableMetadata#enforceStrictLiveness()}
+      *
+      *        When enforceStrictLiveness is set, rows with empty PK liveness 
info
+      *        and no row deletion are purged.
+      *
+      *        Currently this is only used by views with normal base column as 
PK column
+      *        so updates to other base columns do not make the row live when 
the PK column
+      *        is not live. See CASSANDRA-11500.
+      *
       * @return this row but without any deletion info purged by {@code 
purger}. If the purged row is empty, returns
--     * {@code null}.
++     *         {@code null}.
       */
-     public Row purge(DeletionPurger purger, int nowInSec);
+     public Row purge(DeletionPurger purger, int nowInSec, boolean 
enforceStrictLiveness);
  
      /**
       * Returns a copy of this row which only include the data queried by 
{@code filter}, excluding anything _fetched_ for

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/db/rows/UnfilteredSerializer.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/db/transform/FilteredPartitions.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/transform/FilteredPartitions.java
index fa12c9c,b835a6b..6a19f76
--- a/src/java/org/apache/cassandra/db/transform/FilteredPartitions.java
+++ b/src/java/org/apache/cassandra/db/transform/FilteredPartitions.java
@@@ -52,8 -52,13 +52,8 @@@ public final class FilteredPartitions e
       */
      public static FilteredPartitions filter(UnfilteredPartitionIterator 
iterator, int nowInSecs)
      {
-         FilteredPartitions filtered = filter(iterator, new Filter(nowInSecs));
 -        FilteredPartitions filtered = filter(iterator,
 -                                             new Filter(nowInSecs,
 -                                                        
iterator.metadata().enforceStrictLiveness()));
 -
 -        return iterator.isForThrift()
 -             ? filtered
 -             : (FilteredPartitions) Transformation.apply(filtered, new 
EmptyPartitionsDiscarder());
++        FilteredPartitions filtered = filter(iterator, new Filter(nowInSecs, 
iterator.metadata().enforceStrictLiveness()));
 +        return (FilteredPartitions) Transformation.apply(filtered, new 
EmptyPartitionsDiscarder());
      }
  
      public static FilteredPartitions filter(UnfilteredPartitionIterator 
iterator, Filter filter)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/db/view/TableViews.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/view/TableViews.java
index 0579429,f1f48f6..7a1373c
--- a/src/java/org/apache/cassandra/db/view/TableViews.java
+++ b/src/java/org/apache/cassandra/db/view/TableViews.java
@@@ -325,10 -326,13 +329,13 @@@ public class TableViews extends Abstrac
                      continue;
  
                  Row updateRow = (Row) update;
-                 addToViewUpdateGenerators(emptyRow(updateRow.clustering(), 
DeletionTime.LIVE), updateRow, generators, nowInSec);
+                 addToViewUpdateGenerators(emptyRow(updateRow.clustering(), 
existingsDeletion.currentDeletion()),
+                                           updateRow,
+                                           generators,
+                                           nowInSec);
              }
  
 -            return 
Iterators.singletonIterator(buildMutations(baseTableMetadata, generators));
 +            return 
Iterators.singletonIterator(buildMutations(baseTableMetadata.get(), 
generators));
          }
      }
  
@@@ -423,11 -427,12 +430,12 @@@
          ClusteringIndexFilter clusteringFilter = names == null
                                                 ? new 
ClusteringIndexSliceFilter(sliceBuilder.build(), false)
                                                 : new 
ClusteringIndexNamesFilter(names, false);
+         // since unselected columns also affect view liveness, we need to 
query all base columns if base and view have same key columns.
          // If we have more than one view, we should merge the queried columns 
by each views but to keep it simple we just
          // include everything. We could change that in the future.
-         ColumnFilter queriedColumns = views.size() == 1
+         ColumnFilter queriedColumns = views.size() == 1 && 
metadata.enforceStrictLiveness()
 -                                   ? 
Iterables.getOnlyElement(views).getSelectStatement().queriedColumns()
 -                                   : ColumnFilter.all(metadata);
 +                                    ? 
Iterables.getOnlyElement(views).getSelectStatement().queriedColumns()
 +                                    : ColumnFilter.all(metadata);
          // Note that the views could have restrictions on regular columns, 
but even if that's the case we shouldn't apply those
          // when we read, because even if an existing row doesn't match the 
view filter, the update can change that in which
          // case we'll need to know the existing content. There is also no 
easy way to merge those RowFilter when we have multiple views.

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/db/view/View.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/view/View.java
index 4bffe16,0baadc3..f601673
--- a/src/java/org/apache/cassandra/db/view/View.java
+++ b/src/java/org/apache/cassandra/db/view/View.java
@@@ -54,9 -51,8 +54,7 @@@ public class Vie
  
      private final ColumnFamilyStore baseCfs;
  
 -    public volatile List<ColumnDefinition> baseNonPKColumnsInViewPK;
 -
 +    public volatile List<ColumnMetadata> baseNonPKColumnsInViewPK;
- 
-     private final boolean includeAllColumns;
      private ViewBuilder builder;
  
      // Only the raw statement can be final, because the statement cannot 
always be prepared when the MV is initialized.
@@@ -70,8 -66,7 +68,7 @@@
                  ColumnFamilyStore baseCfs)
      {
          this.baseCfs = baseCfs;
 -        this.name = definition.viewName;
 +        this.name = definition.name;
-         this.includeAllColumns = definition.includeAllColumns;
          this.rawSelect = definition.select;
  
          updateDefinition(definition);
@@@ -83,16 -78,16 +80,15 @@@
      }
  
      /**
 -     * This updates the columns stored which are dependent on the base 
CFMetaData.
 +     * This updates the columns stored which are dependent on the base 
TableMetadata.
       */
 -    public void updateDefinition(ViewDefinition definition)
 +    public void updateDefinition(ViewMetadata definition)
      {
          this.definition = definition;
--
 -        List<ColumnDefinition> nonPKDefPartOfViewPK = new ArrayList<>();
 -        for (ColumnDefinition baseColumn : baseCfs.metadata.allColumns())
 +        List<ColumnMetadata> nonPKDefPartOfViewPK = new ArrayList<>();
-         for (ColumnMetadata baseColumn : baseCfs.metadata().columns())
++        for (ColumnMetadata baseColumn : baseCfs.metadata.get().columns())
          {
 -            ColumnDefinition viewColumn = getViewColumn(baseColumn);
 +            ColumnMetadata viewColumn = getViewColumn(baseColumn);
              if (viewColumn != null && !baseColumn.isPrimaryKeyColumn() && 
viewColumn.isPrimaryKeyColumn())
                  nonPKDefPartOfViewPK.add(baseColumn);
          }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/db/view/ViewManager.java
----------------------------------------------------------------------

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/db/view/ViewUpdateGenerator.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/db/view/ViewUpdateGenerator.java
index e276f62,74758e7..815a566
--- a/src/java/org/apache/cassandra/db/view/ViewUpdateGenerator.java
+++ b/src/java/org/apache/cassandra/db/view/ViewUpdateGenerator.java
@@@ -377,14 -383,34 +384,34 @@@ public class ViewUpdateGenerato
          if (!matchesViewFilter(existingBaseRow))
              return;
  
-         deleteOldEntryInternal(existingBaseRow);
+         deleteOldEntryInternal(existingBaseRow, mergedBaseRow);
      }
  
-     private void deleteOldEntryInternal(Row existingBaseRow)
+     private void deleteOldEntryInternal(Row existingBaseRow, Row 
mergedBaseRow)
      {
          startNewUpdate(existingBaseRow);
-         DeletionTime dt = new 
DeletionTime(computeTimestampForEntryDeletion(existingBaseRow), nowInSec);
-         currentViewEntryBuilder.addRowDeletion(Row.Deletion.shadowable(dt));
+         long timestamp = computeTimestampForEntryDeletion(existingBaseRow, 
mergedBaseRow);
+         long rowDeletion = 
mergedBaseRow.deletion().time().markedForDeleteAt();
+         assert timestamp >= rowDeletion;
 -
 -        // If computed deletion timestamp greater than row deletion, it must 
be coming from
++        
++        // If computed deletion timestamp greater than row deletion, it must 
be coming from 
+         //  1. non-pk base column used in view pk, or
+         //  2. unselected base column
+         //  any case, we need to use it as expired livenessInfo
+         // If computed deletion timestamp is from row deletion, we only need 
row deletion itself
+         if (timestamp > rowDeletion)
+         {
+             /**
+               * TODO: This is a hack and overload of LivenessInfo and we 
should probably modify
+               * the storage engine to properly support this, but on the 
meantime this
+               * should be fine because it only happens in some specific 
scenarios explained above.
+               */
+             LivenessInfo info = LivenessInfo.withExpirationTime(timestamp, 
Integer.MAX_VALUE, nowInSec);
+             currentViewEntryBuilder.addPrimaryKeyLivenessInfo(info);
+         }
+         currentViewEntryBuilder.addRowDeletion(mergedBaseRow.deletion());
+ 
+         addDifferentCells(existingBaseRow, mergedBaseRow);
          submitUpdate();
      }
  
@@@ -441,53 -457,77 +458,77 @@@
  
          LivenessInfo baseLiveness = baseRow.primaryKeyLivenessInfo();
  
 -        if (view.hasSamePrimaryKeyColumnsAsBaseTable())
 +        if (view.baseNonPKColumnsInViewPK.isEmpty())
          {
-             int ttl = baseLiveness.ttl();
-             int expirationTime = baseLiveness.localExpirationTime();
+             if (view.getDefinition().includeAllColumns)
+                 return baseLiveness;
+ 
+             long timestamp = baseLiveness.timestamp();
+             boolean hasNonExpiringLiveCell = false;
+             Cell biggestExpirationCell = null;
              for (Cell cell : baseRow.cells())
              {
-                 if (cell.ttl() > ttl)
+                 if (view.getViewColumn(cell.column()) != null)
+                     continue;
+                 if (!isLive(cell))
+                     continue;
+                 timestamp = Math.max(timestamp, cell.maxTimestamp());
+                 if (!cell.isExpiring())
+                     hasNonExpiringLiveCell = true;
+                 else
                  {
-                     ttl = cell.ttl();
-                     expirationTime = cell.localDeletionTime();
+                     if (biggestExpirationCell == null)
+                         biggestExpirationCell = cell;
+                     else if (cell.localDeletionTime() > 
biggestExpirationCell.localDeletionTime())
+                         biggestExpirationCell = cell;
                  }
              }
-             return ttl == baseLiveness.ttl()
-                  ? baseLiveness
-                  : LivenessInfo.withExpirationTime(baseLiveness.timestamp(), 
ttl, expirationTime);
+             if (baseLiveness.isLive(nowInSec) && !baseLiveness.isExpiring())
+                 return LivenessInfo.create(timestamp, nowInSec);
+             if (hasNonExpiringLiveCell)
+                 return LivenessInfo.create(timestamp, nowInSec);
+             if (biggestExpirationCell == null)
+                 return baseLiveness;
+             if (biggestExpirationCell.localDeletionTime() > 
baseLiveness.localExpirationTime()
+                     || !baseLiveness.isLive(nowInSec))
+                 return LivenessInfo.withExpirationTime(timestamp,
+                                                        
biggestExpirationCell.ttl(),
+                                                        
biggestExpirationCell.localDeletionTime());
+             return baseLiveness;
          }
  
-         ColumnMetadata baseColumn = view.baseNonPKColumnsInViewPK.get(0);
-         Cell cell = baseRow.getCell(baseColumn);
+         Cell cell = baseRow.getCell(view.baseNonPKColumnsInViewPK.get(0));
          assert isLive(cell) : "We shouldn't have got there if the base row 
had no associated entry";
  
-         long timestamp = Math.max(baseLiveness.timestamp(), cell.timestamp());
-         return LivenessInfo.withExpirationTime(timestamp, cell.ttl(), 
cell.localDeletionTime());
+         return LivenessInfo.withExpirationTime(cell.timestamp(), cell.ttl(), 
cell.localDeletionTime());
      }
  
-     private long computeTimestampForEntryDeletion(Row baseRow)
+     private long computeTimestampForEntryDeletion(Row existingBaseRow, Row 
mergedBaseRow)
      {
-         // We delete the old row with it's row entry timestamp using a 
shadowable deletion.
-         // We must make sure that the deletion deletes everything in the 
entry (or the entry will
-         // still show up), so we must use the bigger timestamp found in the 
existing row (for any
-         // column included in the view at least).
-         // TODO: We have a problem though: if the entry is "resurected" by a 
later update, we would
-         // need to ensure that the timestamp for then entry then is bigger 
than the tombstone
-         // we're just inserting, which is not currently guaranteed.
-         // This is a bug for a separate ticket though.
-         long timestamp = baseRow.primaryKeyLivenessInfo().timestamp();
-         for (ColumnData data : baseRow)
+         DeletionTime deletion = mergedBaseRow.deletion().time();
+         if (view.hasSamePrimaryKeyColumnsAsBaseTable())
          {
-             if (!view.getDefinition().includes(data.column().name))
-                 continue;
+             long timestamp = Math.max(deletion.markedForDeleteAt(), 
existingBaseRow.primaryKeyLivenessInfo().timestamp());
+             if (view.getDefinition().includeAllColumns)
+                 return timestamp;
  
-             timestamp = Math.max(timestamp, data.maxTimestamp());
+             for (Cell cell : existingBaseRow.cells())
+             {
+                 // selected column should not contribute to view deletion, 
itself is already included in view row
+                 if (view.getViewColumn(cell.column()) != null)
+                     continue;
+                 // unselected column is used regardless live or dead, because 
we don't know if it was used for liveness.
+                 timestamp = Math.max(timestamp, cell.maxTimestamp());
+             }
+             return timestamp;
          }
-         return timestamp;
+         // has base non-pk column in view pk
+         Cell before = 
existingBaseRow.getCell(view.baseNonPKColumnsInViewPK.get(0));
+         assert isLive(before) : "We shouldn't have got there if the base row 
had no associated entry";
+         return deletion.deletes(before) ? deletion.markedForDeleteAt() : 
before.timestamp();
      }
  
 -    private void addColumnData(ColumnDefinition viewColumn, ColumnData 
baseTableData)
 +    private void addColumnData(ColumnMetadata viewColumn, ColumnData 
baseTableData)
      {
          assert viewColumn.isComplex() == baseTableData.column().isComplex();
          if (!viewColumn.isComplex())

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/schema/TableMetadata.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/schema/TableMetadata.java
index 44b1f8a,0000000..86964cc
mode 100644,000000..100644
--- a/src/java/org/apache/cassandra/schema/TableMetadata.java
+++ b/src/java/org/apache/cassandra/schema/TableMetadata.java
@@@ -1,956 -1,0 +1,969 @@@
 +/*
 + * Licensed to the Apache Software Foundation (ASF) under one
 + * or more contributor license agreements.  See the NOTICE file
 + * distributed with this work for additional information
 + * regarding copyright ownership.  The ASF licenses this file
 + * to you under the Apache License, Version 2.0 (the
 + * "License"); you may not use this file except in compliance
 + * with the License.  You may obtain a copy of the License at
 + *
 + *     http://www.apache.org/licenses/LICENSE-2.0
 + *
 + * Unless required by applicable law or agreed to in writing, software
 + * distributed under the License is distributed on an "AS IS" BASIS,
 + * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 + * See the License for the specific language governing permissions and
 + * limitations under the License.
 + */
 +package org.apache.cassandra.schema;
 +
 +import java.nio.ByteBuffer;
 +import java.util.*;
 +import java.util.Objects;
 +
 +import com.google.common.base.MoreObjects;
 +import com.google.common.collect.*;
 +
 +import org.apache.cassandra.auth.DataResource;
 +import org.apache.cassandra.config.DatabaseDescriptor;
 +import org.apache.cassandra.cql3.ColumnIdentifier;
 +import org.apache.cassandra.db.*;
 +import org.apache.cassandra.db.marshal.*;
 +import org.apache.cassandra.dht.IPartitioner;
 +import org.apache.cassandra.exceptions.ConfigurationException;
 +import org.apache.cassandra.utils.AbstractIterator;
 +import org.github.jamm.Unmetered;
 +
 +import static java.lang.String.format;
 +import static java.util.stream.Collectors.toList;
 +import static java.util.stream.Collectors.toSet;
 +
 +import static com.google.common.collect.Iterables.transform;
 +import static org.apache.cassandra.schema.IndexMetadata.isNameValid;
 +
 +@Unmetered
 +public final class TableMetadata
 +{
 +    public enum Flag
 +    {
 +        SUPER, COUNTER, DENSE, COMPOUND;
 +
 +        public static Set<Flag> fromStringSet(Set<String> strings)
 +        {
 +            return 
strings.stream().map(String::toUpperCase).map(Flag::valueOf).collect(toSet());
 +        }
 +
 +        public static Set<String> toStringSet(Set<Flag> flags)
 +        {
 +            return 
flags.stream().map(Flag::toString).map(String::toLowerCase).collect(toSet());
 +        }
 +    }
 +
 +    public final String keyspace;
 +    public final String name;
 +    public final TableId id;
 +
 +    public final IPartitioner partitioner;
 +    public final TableParams params;
 +    public final ImmutableSet<Flag> flags;
 +
 +    private final boolean isView;
 +    private final String indexName; // derived from table name
 +
 +    /*
 +     * All CQL3 columns definition are stored in the columns map.
 +     * On top of that, we keep separated collection of each kind of 
definition, to
 +     * 1) allow easy access to each kind and
 +     * 2) for the partition key and clustering key ones, those list are 
ordered by the "component index" of the elements.
 +     */
 +    public final ImmutableMap<ByteBuffer, DroppedColumn> droppedColumns;
 +    final ImmutableMap<ByteBuffer, ColumnMetadata> columns;
 +
 +    private final ImmutableList<ColumnMetadata> partitionKeyColumns;
 +    private final ImmutableList<ColumnMetadata> clusteringColumns;
 +    private final RegularAndStaticColumns regularAndStaticColumns;
 +
 +    public final Indexes indexes;
 +    public final Triggers triggers;
 +
 +    // derived automatically from flags and columns
 +    public final AbstractType<?> partitionKeyType;
 +    public final ClusteringComparator comparator;
 +
 +    /*
 +     * For dense tables, this alias the single non-PK column the table 
contains (since it can only have one). We keep
 +     * that as convenience to access that column more easily (but we could 
replace calls by regularAndStaticColumns().iterator().next()
 +     * for those tables in practice).
 +     */
 +    public final ColumnMetadata compactValueColumn;
 +
 +    // performance hacks; TODO see if all are really necessary
 +    public final DataResource resource;
 +
 +    private TableMetadata(Builder builder)
 +    {
 +        keyspace = builder.keyspace;
 +        name = builder.name;
 +        id = builder.id;
 +
 +        partitioner = builder.partitioner;
 +        params = builder.params.build();
 +        flags = Sets.immutableEnumSet(builder.flags);
 +        isView = builder.isView;
 +
 +        indexName = name.contains(".")
 +                  ? name.substring(name.indexOf('.') + 1)
 +                  : null;
 +
 +        droppedColumns = ImmutableMap.copyOf(builder.droppedColumns);
 +        Collections.sort(builder.partitionKeyColumns);
 +        partitionKeyColumns = 
ImmutableList.copyOf(builder.partitionKeyColumns);
 +        Collections.sort(builder.clusteringColumns);
 +        clusteringColumns = ImmutableList.copyOf(builder.clusteringColumns);
 +        regularAndStaticColumns = 
RegularAndStaticColumns.builder().addAll(builder.regularAndStaticColumns).build();
 +        columns = ImmutableMap.copyOf(builder.columns);
 +
 +        indexes = builder.indexes;
 +        triggers = builder.triggers;
 +
 +        partitionKeyType = partitionKeyColumns.size() == 1
 +                         ? partitionKeyColumns.get(0).type
 +                         : 
CompositeType.getInstance(transform(partitionKeyColumns, t -> t.type));
 +
 +        comparator = new ClusteringComparator(transform(clusteringColumns, c 
-> c.type));
 +
 +        compactValueColumn = isCompactTable()
 +                           ? 
CompactTables.getCompactValueColumn(regularAndStaticColumns, isSuper())
 +                           : null;
 +
 +        resource = DataResource.table(keyspace, name);
 +    }
 +
 +    public static Builder builder(String keyspace, String table)
 +    {
 +        return new Builder(keyspace, table);
 +    }
 +
 +    public static Builder builder(String keyspace, String table, TableId id)
 +    {
 +        return new Builder(keyspace, table, id);
 +    }
 +
 +    public Builder unbuild()
 +    {
 +        return builder(keyspace, name, id)
 +               .partitioner(partitioner)
 +               .params(params)
 +               .flags(flags)
 +               .isView(isView)
 +               .addColumns(columns())
 +               .droppedColumns(droppedColumns)
 +               .indexes(indexes)
 +               .triggers(triggers);
 +    }
 +
 +    public boolean isView()
 +    {
 +        return isView;
 +    }
 +
 +    public boolean isIndex()
 +    {
 +        return indexName != null;
 +    }
 +
 +    public Optional<String> indexName()
 +    {
 +        return Optional.ofNullable(indexName);
 +    }
 +
 +    /*
 +     *  We call dense a CF for which each component of the comparator is a 
clustering column, i.e. no
 +     * component is used to store a regular column names. In other words, 
non-composite static "thrift"
 +     * and CQL3 CF are *not* dense.
 +     */
 +    public boolean isDense()
 +    {
 +        return flags.contains(Flag.DENSE);
 +    }
 +
 +    public boolean isCompound()
 +    {
 +        return flags.contains(Flag.COMPOUND);
 +    }
 +
 +    public boolean isSuper()
 +    {
 +        return flags.contains(Flag.SUPER);
 +    }
 +
 +    public boolean isCounter()
 +    {
 +        return flags.contains(Flag.COUNTER);
 +    }
 +
 +    public boolean isCQLTable()
 +    {
 +        return !isSuper() && !isDense() && isCompound();
 +    }
 +
 +    public boolean isCompactTable()
 +    {
 +        return !isCQLTable();
 +    }
 +
 +    public boolean isStaticCompactTable()
 +    {
 +        return !isSuper() && !isDense() && !isCompound();
 +    }
 +
 +    public ImmutableCollection<ColumnMetadata> columns()
 +    {
 +        return columns.values();
 +    }
 +
 +    public Iterable<ColumnMetadata> primaryKeyColumns()
 +    {
 +        return Iterables.concat(partitionKeyColumns, clusteringColumns);
 +    }
 +
 +    public ImmutableList<ColumnMetadata> partitionKeyColumns()
 +    {
 +        return partitionKeyColumns;
 +    }
 +
 +    public ImmutableList<ColumnMetadata> clusteringColumns()
 +    {
 +        return clusteringColumns;
 +    }
 +
 +    public RegularAndStaticColumns regularAndStaticColumns()
 +    {
 +        return regularAndStaticColumns;
 +    }
 +
 +    public Columns regularColumns()
 +    {
 +        return regularAndStaticColumns.regulars;
 +    }
 +
 +    public Columns staticColumns()
 +    {
 +        return regularAndStaticColumns.statics;
 +    }
 +
 +    /*
 +     * An iterator over all column definitions but that respect the order of 
a SELECT *.
 +     * This also "hide" the clustering/regular columns for a non-CQL3 
non-dense table for backward compatibility
 +     * sake.
 +     */
 +    public Iterator<ColumnMetadata> allColumnsInSelectOrder()
 +    {
 +        final boolean isStaticCompactTable = isStaticCompactTable();
 +        final boolean noNonPkColumns = isCompactTable() && 
CompactTables.hasEmptyCompactValue(this);
 +
 +        return new AbstractIterator<ColumnMetadata>()
 +        {
 +            private final Iterator<ColumnMetadata> partitionKeyIter = 
partitionKeyColumns.iterator();
 +            private final Iterator<ColumnMetadata> clusteringIter =
 +                isStaticCompactTable ? Collections.emptyIterator() : 
clusteringColumns.iterator();
 +            private final Iterator<ColumnMetadata> otherColumns =
 +                noNonPkColumns
 +              ? Collections.emptyIterator()
 +              : (isStaticCompactTable ? staticColumns().selectOrderIterator()
 +                                      : 
regularAndStaticColumns.selectOrderIterator());
 +
 +            protected ColumnMetadata computeNext()
 +            {
 +                if (partitionKeyIter.hasNext())
 +                    return partitionKeyIter.next();
 +
 +                if (clusteringIter.hasNext())
 +                    return clusteringIter.next();
 +
 +                return otherColumns.hasNext() ? otherColumns.next() : 
endOfData();
 +            }
 +        };
 +    }
 +
 +    /**
 +     * Returns the ColumnMetadata for {@code name}.
 +     */
 +    public ColumnMetadata getColumn(ColumnIdentifier name)
 +    {
 +        return columns.get(name.bytes);
 +    }
 +
 +    /*
 +     * In general it is preferable to work with ColumnIdentifier to make it
 +     * clear that we are talking about a CQL column, not a cell name, but 
there
 +     * is a few cases where all we have is a ByteBuffer (when dealing with 
IndexExpression
 +     * for instance) so...
 +     */
 +    public ColumnMetadata getColumn(ByteBuffer name)
 +    {
 +        return columns.get(name);
 +    }
 +
 +    public ColumnMetadata getDroppedColumn(ByteBuffer name)
 +    {
 +        DroppedColumn dropped = droppedColumns.get(name);
 +        return dropped == null ? null : dropped.column;
 +    }
 +
 +    /**
 +     * Returns a "fake" ColumnMetadata corresponding to the dropped column 
{@code name}
 +     * of {@code null} if there is no such dropped column.
 +     *
 +     * @param name - the column name
 +     * @param isStatic - whether the column was a static column, if known
 +     */
 +    public ColumnMetadata getDroppedColumn(ByteBuffer name, boolean isStatic)
 +    {
 +        DroppedColumn dropped = droppedColumns.get(name);
 +        if (dropped == null)
 +            return null;
 +
 +        if (isStatic && !dropped.column.isStatic())
 +            return ColumnMetadata.staticColumn(this, name, 
dropped.column.type);
 +
 +        return dropped.column;
 +    }
 +
 +    public boolean hasStaticColumns()
 +    {
 +        return !staticColumns().isEmpty();
 +    }
 +
 +    public void validate()
 +    {
 +        if (!isNameValid(keyspace))
 +            except("Keyspace name must not be empty, more than %s characters 
long, or contain non-alphanumeric-underscore characters (got \"%s\")", 
SchemaConstants.NAME_LENGTH, keyspace);
 +
 +        if (!isNameValid(name))
 +            except("Table name must not be empty, more than %s characters 
long, or contain non-alphanumeric-underscore characters (got \"%s\")", 
SchemaConstants.NAME_LENGTH, name);
 +
 +        params.validate();
 +
 +        if (partitionKeyColumns.stream().anyMatch(c -> c.type.isCounter()))
 +            except("PRIMARY KEY columns cannot contain counters");
 +
 +        // Mixing counter with non counter columns is not supported (#2614)
 +        if (isCounter())
 +        {
 +            for (ColumnMetadata column : regularAndStaticColumns)
 +                if (!(column.type.isCounter()) && 
!CompactTables.isSuperColumnMapColumn(column))
 +                    except("Cannot have a non counter column (\"%s\") in a 
counter table", column.name);
 +        }
 +        else
 +        {
 +            for (ColumnMetadata column : regularAndStaticColumns)
 +                if (column.type.isCounter())
 +                    except("Cannot have a counter column (\"%s\") in a non 
counter column table", column.name);
 +        }
 +
 +        // All tables should have a partition key
 +        if (partitionKeyColumns.isEmpty())
 +            except("Missing partition keys for table %s", toString());
 +
 +        // A compact table should always have a clustering
 +        if (isCompactTable() && clusteringColumns.isEmpty())
 +            except("For table %s, isDense=%b, isCompound=%b, clustering=%s", 
toString(), isDense(), isCompound(), clusteringColumns);
 +
 +        if (!indexes.isEmpty() && isSuper())
 +            except("Secondary indexes are not supported on super column 
families");
 +
 +        indexes.validate(this);
 +    }
 +
 +    void validateCompatibility(TableMetadata other)
 +    {
 +        if (isIndex())
 +            return;
 +
 +        if (!other.keyspace.equals(keyspace))
 +            except("Keyspace mismatch (found %s; expected %s)", 
other.keyspace, keyspace);
 +
 +        if (!other.name.equals(name))
 +            except("Table mismatch (found %s; expected %s)", other.name, 
name);
 +
 +        if (!other.id.equals(id))
 +            except("Table ID mismatch (found %s; expected %s)", other.id, id);
 +
 +        if (!other.flags.equals(flags))
 +            except("Table type mismatch (found %s; expected %s)", 
other.flags, flags);
 +
 +        if (other.partitionKeyColumns.size() != partitionKeyColumns.size())
 +            except("Partition keys of different length (found %s; expected 
%s)", other.partitionKeyColumns.size(), partitionKeyColumns.size());
 +
 +        for (int i = 0; i < partitionKeyColumns.size(); i++)
 +            if 
(!other.partitionKeyColumns.get(i).type.isCompatibleWith(partitionKeyColumns.get(i).type))
 +                except("Partition key column mismatch (found %s; expected 
%s)", other.partitionKeyColumns.get(i).type, partitionKeyColumns.get(i).type);
 +
 +        if (other.clusteringColumns.size() != clusteringColumns.size())
 +            except("Clustering columns of different length (found %s; 
expected %s)", other.clusteringColumns.size(), clusteringColumns.size());
 +
 +        for (int i = 0; i < clusteringColumns.size(); i++)
 +            if 
(!other.clusteringColumns.get(i).type.isCompatibleWith(clusteringColumns.get(i).type))
 +                except("Clustering column mismatch (found %s; expected %s)", 
other.clusteringColumns.get(i).type, clusteringColumns.get(i).type);
 +
 +        for (ColumnMetadata otherColumn : other.regularAndStaticColumns)
 +        {
 +            ColumnMetadata column = getColumn(otherColumn.name);
 +            if (column != null && 
!otherColumn.type.isCompatibleWith(column.type))
 +                except("Column mismatch (found %s; expected %s", otherColumn, 
column);
 +        }
 +    }
 +
 +    public ClusteringComparator partitionKeyAsClusteringComparator()
 +    {
 +        return new ClusteringComparator(partitionKeyColumns.stream().map(c -> 
c.type).collect(toList()));
 +    }
 +
 +    /**
 +     * The type to use to compare column names in "static compact"
 +     * tables or superColum ones.
 +     * <p>
 +     * This exists because for historical reasons, "static compact" tables as
 +     * well as super column ones can have non-UTF8 column names.
 +     * <p>
 +     * This method should only be called for superColumn tables and "static
 +     * compact" ones. For any other table, all column names are UTF8.
 +     */
 +    public AbstractType<?> staticCompactOrSuperTableColumnNameType()
 +    {
 +        if (isSuper())
 +        {
 +            assert compactValueColumn != null && compactValueColumn.type 
instanceof MapType;
 +            return ((MapType) compactValueColumn.type).nameComparator();
 +        }
 +
 +        assert isStaticCompactTable();
 +        return clusteringColumns.get(0).type;
 +    }
 +
 +    public AbstractType<?> columnDefinitionNameComparator(ColumnMetadata.Kind 
kind)
 +    {
 +        return (isSuper() && kind == ColumnMetadata.Kind.REGULAR) || 
(isStaticCompactTable() && kind == ColumnMetadata.Kind.STATIC)
 +             ? staticCompactOrSuperTableColumnNameType()
 +             : UTF8Type.instance;
 +    }
 +
 +    /**
 +     * Generate a table name for an index corresponding to the given column.
 +     * This is NOT the same as the index's name! This is only used in sstable 
filenames and is not exposed to users.
 +     *
 +     * @param info A definition of the column with index
 +     *
 +     * @return name of the index table
 +     */
 +    public String indexTableName(IndexMetadata info)
 +    {
 +        // TODO simplify this when info.index_name is guaranteed to be set
 +        return name + Directories.SECONDARY_INDEX_NAME_SEPARATOR + info.name;
 +    }
 +
 +    /**
 +     * @return true if the change as made impacts queries/updates on the 
table,
 +     *         e.g. any columns or indexes were added, removed, or altered; 
otherwise, false is returned.
 +     *         Used to determine whether prepared statements against this 
table need to be re-prepared.
 +     */
 +    boolean changeAffectsPreparedStatements(TableMetadata updated)
 +    {
 +        return !partitionKeyColumns.equals(updated.partitionKeyColumns)
 +            || !clusteringColumns.equals(updated.clusteringColumns)
 +            || 
!regularAndStaticColumns.equals(updated.regularAndStaticColumns)
 +            || !indexes.equals(updated.indexes)
 +            || params.defaultTimeToLive != updated.params.defaultTimeToLive
 +            || params.gcGraceSeconds != updated.params.gcGraceSeconds;
 +    }
 +
 +    /**
 +     * There is a couple of places in the code where we need a TableMetadata 
object and don't have one readily available
 +     * and know that only the keyspace and name matter. This creates such 
"fake" metadata. Use only if you know what
 +     * you're doing.
 +     */
 +    public static TableMetadata minimal(String keyspace, String name)
 +    {
 +        return TableMetadata.builder(keyspace, name)
 +                            .addPartitionKeyColumn("key", BytesType.instance)
 +                            .build();
 +    }
 +
 +    public TableMetadata updateIndexTableMetadata(TableParams baseTableParams)
 +    {
 +        TableParams.Builder builder =
 +            baseTableParams.unbuild()
 +                           .readRepairChance(0.0)
 +                           .dcLocalReadRepairChance(0.0)
 +                           .gcGraceSeconds(0);
 +
 +        // Depends on parent's cache setting, turn on its index table's cache.
 +        // Row caching is never enabled; see CASSANDRA-5732
 +        builder.caching(baseTableParams.caching.cacheKeys() ? 
CachingParams.CACHE_KEYS : CachingParams.CACHE_NOTHING);
 +
 +        return unbuild().params(builder.build()).build();
 +    }
 +
 +    private void except(String format, Object... args)
 +    {
 +        throw new ConfigurationException(format(format, args));
 +    }
 +
 +    @Override
 +    public boolean equals(Object o)
 +    {
 +        if (this == o)
 +            return true;
 +
 +        if (!(o instanceof TableMetadata))
 +            return false;
 +
 +        TableMetadata tm = (TableMetadata) o;
 +
 +        return keyspace.equals(tm.keyspace)
 +            && name.equals(tm.name)
 +            && id.equals(tm.id)
 +            && partitioner.equals(tm.partitioner)
 +            && params.equals(tm.params)
 +            && flags.equals(tm.flags)
 +            && isView == tm.isView
 +            && columns.equals(tm.columns)
 +            && droppedColumns.equals(tm.droppedColumns)
 +            && indexes.equals(tm.indexes)
 +            && triggers.equals(tm.triggers);
 +    }
 +
 +    @Override
 +    public int hashCode()
 +    {
 +        return Objects.hash(keyspace, name, id, partitioner, params, flags, 
isView, columns, droppedColumns, indexes, triggers);
 +    }
 +
 +    @Override
 +    public String toString()
 +    {
 +        return String.format("%s.%s", ColumnIdentifier.maybeQuote(keyspace), 
ColumnIdentifier.maybeQuote(name));
 +    }
 +
 +    public String toDebugString()
 +    {
 +        return MoreObjects.toStringHelper(this)
 +                          .add("keyspace", keyspace)
 +                          .add("table", name)
 +                          .add("id", id)
 +                          .add("partitioner", partitioner)
 +                          .add("params", params)
 +                          .add("flags", flags)
 +                          .add("isView", isView)
 +                          .add("columns", columns())
 +                          .add("droppedColumns", droppedColumns.values())
 +                          .add("indexes", indexes)
 +                          .add("triggers", triggers)
 +                          .toString();
 +    }
 +
 +    public static final class Builder
 +    {
 +        final String keyspace;
 +        final String name;
 +
 +        private TableId id;
 +
 +        private IPartitioner partitioner;
 +        private TableParams.Builder params = TableParams.builder();
 +
 +        // Setting compound as default as "normal" CQL tables are compound 
and that's what we want by default
 +        private Set<Flag> flags = EnumSet.of(Flag.COMPOUND);
 +        private Triggers triggers = Triggers.none();
 +        private Indexes indexes = Indexes.none();
 +
 +        private final Map<ByteBuffer, DroppedColumn> droppedColumns = new 
HashMap<>();
 +        private final Map<ByteBuffer, ColumnMetadata> columns = new 
HashMap<>();
 +        private final List<ColumnMetadata> partitionKeyColumns = new 
ArrayList<>();
 +        private final List<ColumnMetadata> clusteringColumns = new 
ArrayList<>();
 +        private final List<ColumnMetadata> regularAndStaticColumns = new 
ArrayList<>();
 +
 +        private boolean isView;
 +
 +        private Builder(String keyspace, String name, TableId id)
 +        {
 +            this.keyspace = keyspace;
 +            this.name = name;
 +            this.id = id;
 +        }
 +
 +        private Builder(String keyspace, String name)
 +        {
 +            this.keyspace = keyspace;
 +            this.name = name;
 +        }
 +
 +        public TableMetadata build()
 +        {
 +            if (partitioner == null)
 +                partitioner = DatabaseDescriptor.getPartitioner();
 +
 +            if (id == null)
 +                id = TableId.generate();
 +
 +            return new TableMetadata(this);
 +        }
 +
 +        public Builder id(TableId val)
 +        {
 +            id = val;
 +            return this;
 +        }
 +
 +        public Builder partitioner(IPartitioner val)
 +        {
 +            partitioner = val;
 +            return this;
 +        }
 +
 +        public Builder params(TableParams val)
 +        {
 +            params = val.unbuild();
 +            return this;
 +        }
 +
 +        public Builder bloomFilterFpChance(double val)
 +        {
 +            params.bloomFilterFpChance(val);
 +            return this;
 +        }
 +
 +        public Builder caching(CachingParams val)
 +        {
 +            params.caching(val);
 +            return this;
 +        }
 +
 +        public Builder comment(String val)
 +        {
 +            params.comment(val);
 +            return this;
 +        }
 +
 +        public Builder compaction(CompactionParams val)
 +        {
 +            params.compaction(val);
 +            return this;
 +        }
 +
 +        public Builder compression(CompressionParams val)
 +        {
 +            params.compression(val);
 +            return this;
 +        }
 +
 +        public Builder dcLocalReadRepairChance(double val)
 +        {
 +            params.dcLocalReadRepairChance(val);
 +            return this;
 +        }
 +
 +        public Builder defaultTimeToLive(int val)
 +        {
 +            params.defaultTimeToLive(val);
 +            return this;
 +        }
 +
 +        public Builder gcGraceSeconds(int val)
 +        {
 +            params.gcGraceSeconds(val);
 +            return this;
 +        }
 +
 +        public Builder maxIndexInterval(int val)
 +        {
 +            params.maxIndexInterval(val);
 +            return this;
 +        }
 +
 +        public Builder memtableFlushPeriod(int val)
 +        {
 +            params.memtableFlushPeriodInMs(val);
 +            return this;
 +        }
 +
 +        public Builder minIndexInterval(int val)
 +        {
 +            params.minIndexInterval(val);
 +            return this;
 +        }
 +
 +        public Builder readRepairChance(double val)
 +        {
 +            params.readRepairChance(val);
 +            return this;
 +        }
 +
 +        public Builder crcCheckChance(double val)
 +        {
 +            params.crcCheckChance(val);
 +            return this;
 +        }
 +
 +        public Builder speculativeRetry(SpeculativeRetryParam val)
 +        {
 +            params.speculativeRetry(val);
 +            return this;
 +        }
 +
 +        public Builder extensions(Map<String, ByteBuffer> val)
 +        {
 +            params.extensions(val);
 +            return this;
 +        }
 +
 +        public Builder isView(boolean val)
 +        {
 +            isView = val;
 +            return this;
 +        }
 +
 +        public Builder flags(Set<Flag> val)
 +        {
 +            flags = val;
 +            return this;
 +        }
 +
 +        public Builder isSuper(boolean val)
 +        {
 +            return flag(Flag.SUPER, val);
 +        }
 +
 +        public Builder isCounter(boolean val)
 +        {
 +            return flag(Flag.COUNTER, val);
 +        }
 +
 +        public Builder isDense(boolean val)
 +        {
 +            return flag(Flag.DENSE, val);
 +        }
 +
 +        public Builder isCompound(boolean val)
 +        {
 +            return flag(Flag.COMPOUND, val);
 +        }
 +
 +        private Builder flag(Flag flag, boolean set)
 +        {
 +            if (set) flags.add(flag); else flags.remove(flag);
 +            return this;
 +        }
 +
 +        public Builder triggers(Triggers val)
 +        {
 +            triggers = val;
 +            return this;
 +        }
 +
 +        public Builder indexes(Indexes val)
 +        {
 +            indexes = val;
 +            return this;
 +        }
 +
 +        public Builder addPartitionKeyColumn(String name, AbstractType type)
 +        {
 +            return addPartitionKeyColumn(ColumnIdentifier.getInterned(name, 
false), type);
 +        }
 +
 +        public Builder addPartitionKeyColumn(ColumnIdentifier name, 
AbstractType type)
 +        {
 +            return addColumn(new ColumnMetadata(keyspace, this.name, name, 
type, partitionKeyColumns.size(), ColumnMetadata.Kind.PARTITION_KEY));
 +        }
 +
 +        public Builder addClusteringColumn(String name, AbstractType type)
 +        {
 +            return addClusteringColumn(ColumnIdentifier.getInterned(name, 
false), type);
 +        }
 +
 +        public Builder addClusteringColumn(ColumnIdentifier name, 
AbstractType type)
 +        {
 +            return addColumn(new ColumnMetadata(keyspace, this.name, name, 
type, clusteringColumns.size(), ColumnMetadata.Kind.CLUSTERING));
 +        }
 +
 +        public Builder addRegularColumn(String name, AbstractType type)
 +        {
 +            return addRegularColumn(ColumnIdentifier.getInterned(name, 
false), type);
 +        }
 +
 +        public Builder addRegularColumn(ColumnIdentifier name, AbstractType 
type)
 +        {
 +            return addColumn(new ColumnMetadata(keyspace, this.name, name, 
type, ColumnMetadata.NO_POSITION, ColumnMetadata.Kind.REGULAR));
 +        }
 +
 +        public Builder addStaticColumn(String name, AbstractType type)
 +        {
 +            return addStaticColumn(ColumnIdentifier.getInterned(name, false), 
type);
 +        }
 +
 +        public Builder addStaticColumn(ColumnIdentifier name, AbstractType 
type)
 +        {
 +            return addColumn(new ColumnMetadata(keyspace, this.name, name, 
type, ColumnMetadata.NO_POSITION, ColumnMetadata.Kind.STATIC));
 +        }
 +
 +        public Builder addColumn(ColumnMetadata column)
 +        {
 +            if (columns.containsKey(column.name.bytes))
 +                throw new IllegalArgumentException();
 +
 +            switch (column.kind)
 +            {
 +                case PARTITION_KEY:
 +                    partitionKeyColumns.add(column);
 +                    Collections.sort(partitionKeyColumns);
 +                    break;
 +                case CLUSTERING:
 +                    column.type.checkComparable();
 +                    clusteringColumns.add(column);
 +                    Collections.sort(clusteringColumns);
 +                    break;
 +                default:
 +                    regularAndStaticColumns.add(column);
 +            }
 +
 +            columns.put(column.name.bytes, column);
 +
 +            return this;
 +        }
 +
 +        public Builder addColumns(Iterable<ColumnMetadata> columns)
 +        {
 +            columns.forEach(this::addColumn);
 +            return this;
 +        }
 +
 +        public Builder droppedColumns(Map<ByteBuffer, DroppedColumn> 
droppedColumns)
 +        {
 +            this.droppedColumns.clear();
 +            this.droppedColumns.putAll(droppedColumns);
 +            return this;
 +        }
 +
 +        /**
 +         * Records a deprecated column for a system table.
 +         */
 +        public Builder recordDeprecatedSystemColumn(String name, 
AbstractType<?> type)
 +        {
 +            // As we play fast and loose with the removal timestamp, make 
sure this is misued for a non system table.
 +            assert SchemaConstants.isSystemKeyspace(keyspace);
 +            recordColumnDrop(ColumnMetadata.regularColumn(keyspace, 
this.name, name, type), Long.MAX_VALUE);
 +            return this;
 +        }
 +
 +        public Builder recordColumnDrop(ColumnMetadata column, long 
timeMicros)
 +        {
 +            droppedColumns.put(column.name.bytes, new DroppedColumn(column, 
timeMicros));
 +            return this;
 +        }
 +
 +        public Iterable<ColumnMetadata> columns()
 +        {
 +            return columns.values();
 +        }
 +
 +        public Set<String> columnNames()
 +        {
 +            return columns.values().stream().map(c -> 
c.name.toString()).collect(toSet());
 +        }
 +
 +        public ColumnMetadata getColumn(ColumnIdentifier identifier)
 +        {
 +            return columns.get(identifier.bytes);
 +        }
 +
 +        public ColumnMetadata getColumn(ByteBuffer name)
 +        {
 +            return columns.get(name);
 +        }
 +
 +        public boolean hasRegularColumns()
 +        {
 +            return 
regularAndStaticColumns.stream().anyMatch(ColumnMetadata::isRegular);
 +        }
 +
 +        /*
 +         * The following methods all assume a Builder with valid set of 
partition key, clustering, regular and static columns.
 +         */
 +
 +        public Builder removeRegularOrStaticColumn(ColumnIdentifier 
identifier)
 +        {
 +            ColumnMetadata column = columns.get(identifier.bytes);
 +            if (column == null || column.isPrimaryKeyColumn())
 +                throw new IllegalArgumentException();
 +
 +            columns.remove(identifier.bytes);
 +            regularAndStaticColumns.remove(column);
 +
 +            return this;
 +        }
 +
 +        public Builder renamePrimaryKeyColumn(ColumnIdentifier from, 
ColumnIdentifier to)
 +        {
 +            if (columns.containsKey(to.bytes))
 +                throw new IllegalArgumentException();
 +
 +            ColumnMetadata column = columns.get(from.bytes);
 +            if (column == null || !column.isPrimaryKeyColumn())
 +                throw new IllegalArgumentException();
 +
 +            ColumnMetadata newColumn = column.withNewName(to);
 +            if (column.isPartitionKey())
 +                partitionKeyColumns.set(column.position(), newColumn);
 +            else
 +                clusteringColumns.set(column.position(), newColumn);
 +
 +            columns.remove(from.bytes);
 +            columns.put(to.bytes, newColumn);
 +
 +            return this;
 +        }
 +
 +        public Builder alterColumnType(ColumnIdentifier name, AbstractType<?> 
type)
 +        {
 +            ColumnMetadata column = columns.get(name.bytes);
 +            if (column == null)
 +                throw new IllegalArgumentException();
 +
 +            ColumnMetadata newColumn = column.withNewType(type);
 +
 +            switch (column.kind)
 +            {
 +                case PARTITION_KEY:
 +                    partitionKeyColumns.set(column.position(), newColumn);
 +                    break;
 +                case CLUSTERING:
 +                    clusteringColumns.set(column.position(), newColumn);
 +                    break;
 +                case REGULAR:
 +                case STATIC:
 +                    regularAndStaticColumns.remove(column);
 +                    regularAndStaticColumns.add(newColumn);
 +                    break;
 +            }
 +
 +            columns.put(column.name.bytes, newColumn);
 +
 +            return this;
 +        }
 +    }
++    
++    /**
++     * A table with strict liveness filters/ignores rows without PK liveness 
info,
++     * effectively tying the row liveness to its primary key liveness.
++     *
++     * Currently this is only used by views with normal base column as PK 
column
++     * so updates to other columns do not make the row live when the base 
column
++     * is not live. See CASSANDRA-11500.
++     */
++    public boolean enforceStrictLiveness()
++    {
++        return isView && 
Keyspace.open(keyspace).viewManager.getByName(name).enforceStrictLiveness();
++    }
 +}

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/src/java/org/apache/cassandra/service/DataResolver.java
----------------------------------------------------------------------
diff --cc src/java/org/apache/cassandra/service/DataResolver.java
index f4a472d,aeb7c29..eb2553b
--- a/src/java/org/apache/cassandra/service/DataResolver.java
+++ b/src/java/org/apache/cassandra/service/DataResolver.java
@@@ -91,9 -88,14 +91,9 @@@ public class DataResolver extends Respo
          DataLimits.Counter counter = 
command.limits().newCounter(command.nowInSec(), true, 
command.selectsFullPartition());
  
          UnfilteredPartitionIterator merged = 
mergeWithShortReadProtection(iters, sources, counter);
-         FilteredPartitions filtered = FilteredPartitions.filter(merged, new 
Filter(command.nowInSec()));
 -        FilteredPartitions filtered = FilteredPartitions.filter(merged,
 -                                                                new 
Filter(command.nowInSec(),
 -                                                                           
command.metadata().enforceStrictLiveness()));
++        FilteredPartitions filtered = FilteredPartitions.filter(merged, new 
Filter(command.nowInSec(), command.metadata().enforceStrictLiveness()));
          PartitionIterator counted = counter.applyTo(filtered);
 -
 -        return command.isForThrift()
 -             ? counted
 -             : Transformation.apply(counted, new EmptyPartitionsDiscarder());
 +        return Transformation.apply(counted, new EmptyPartitionsDiscarder());
      }
  
      public void compareResponses()

http://git-wip-us.apache.org/repos/asf/cassandra/blob/e6fb8302/test/unit/org/apache/cassandra/cql3/CQLTester.java
----------------------------------------------------------------------


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org
For additional commands, e-mail: commits-h...@cassandra.apache.org

Reply via email to