Repository: cassandra
Updated Branches:
  refs/heads/trunk 5786b3204 -> eae3b0264


Reverted partitionCache metric names to rowCache, CASSANDRA-9448


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/eae3b026
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/eae3b026
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/eae3b026

Branch: refs/heads/trunk
Commit: eae3b02649789f1993147d5580a7b20794212319
Parents: 5786b32
Author: Stefania Alborghetti <stefania.alborghe...@datastax.com>
Authored: Thu Jul 23 17:34:35 2015 -0500
Committer: Yuki Morishita <yu...@apache.org>
Committed: Thu Jul 23 17:34:35 2015 -0500

----------------------------------------------------------------------
 .../db/SinglePartitionReadCommand.java          |  8 +++---
 .../apache/cassandra/metrics/TableMetrics.java  | 18 ++++++------
 .../org/apache/cassandra/db/RowCacheTest.java   | 30 ++++++++++----------
 3 files changed, 28 insertions(+), 28 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/eae3b026/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java 
b/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
index 80711d6..3d4e42e 100644
--- a/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
+++ b/src/java/org/apache/cassandra/db/SinglePartitionReadCommand.java
@@ -256,24 +256,24 @@ public abstract class SinglePartitionReadCommand<F 
extends ClusteringIndexFilter
             {
                 // Some other read is trying to cache the value, just do a 
normal non-caching read
                 Tracing.trace("Row cache miss (race)");
-                cfs.metric.partitionCacheMiss.inc();
+                cfs.metric.rowCacheMiss.inc();
                 return queryMemtableAndDisk(cfs, readOp);
             }
 
             CachedPartition cachedPartition = (CachedPartition)cached;
             if (cfs.isFilterFullyCoveredBy(clusteringIndexFilter(), limits(), 
cachedPartition, nowInSec()))
             {
-                cfs.metric.partitionCacheHit.inc();
+                cfs.metric.rowCacheHit.inc();
                 Tracing.trace("Row cache hit");
                 return 
clusteringIndexFilter().getUnfilteredRowIterator(columnFilter(), 
cachedPartition);
             }
 
-            cfs.metric.partitionCacheHitOutOfRange.inc();
+            cfs.metric.rowCacheHitOutOfRange.inc();
             Tracing.trace("Ignoring row cache as cached value could not 
satisfy query");
             return queryMemtableAndDisk(cfs, readOp);
         }
 
-        cfs.metric.partitionCacheMiss.inc();
+        cfs.metric.rowCacheMiss.inc();
         Tracing.trace("Row cache miss");
 
         boolean cacheFullPartitions = 
metadata().getCaching().rowCache.cacheFullPartitions();

http://git-wip-us.apache.org/repos/asf/cassandra/blob/eae3b026/src/java/org/apache/cassandra/metrics/TableMetrics.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/metrics/TableMetrics.java 
b/src/java/org/apache/cassandra/metrics/TableMetrics.java
index d708ac4..1b4293f 100644
--- a/src/java/org/apache/cassandra/metrics/TableMetrics.java
+++ b/src/java/org/apache/cassandra/metrics/TableMetrics.java
@@ -116,12 +116,12 @@ public class TableMetrics
     public final TableHistogram colUpdateTimeDeltaHistogram;
     /** Disk space used by snapshot files which */
     public final Gauge<Long> trueSnapshotsSize;
-    /** Partition cache hits, but result out of range */
-    public final Counter partitionCacheHitOutOfRange;
-    /** Number of partition cache hits */
-    public final Counter partitionCacheHit;
-    /** Number of partition cache misses */
-    public final Counter partitionCacheMiss;
+    /** Row cache hits, but result out of range */
+    public final Counter rowCacheHitOutOfRange;
+    /** Number of row cache hits */
+    public final Counter rowCacheHit;
+    /** Number of row cache misses */
+    public final Counter rowCacheMiss;
     /** CAS Prepare metrics */
     public final LatencyMetrics casPrepare;
     /** CAS Propose metrics */
@@ -620,9 +620,9 @@ public class TableMetrics
                 return cfs.trueSnapshotsSize();
             }
         });
-        partitionCacheHitOutOfRange = 
createTableCounter("PartitionCacheHitOutOfRange", "RowCacheHitOutOfRange");
-        partitionCacheHit = createTableCounter("PartitionCacheHit", 
"RowCacheHit");
-        partitionCacheMiss = createTableCounter("PartitionCacheMiss", 
"RowCacheMiss");
+        rowCacheHitOutOfRange = createTableCounter("RowCacheHitOutOfRange");
+        rowCacheHit = createTableCounter("RowCacheHit");
+        rowCacheMiss = createTableCounter("RowCacheMiss");
 
         casPrepare = new LatencyMetrics(factory, "CasPrepare", 
cfs.keyspace.metric.casPrepare);
         casPropose = new LatencyMetrics(factory, "CasPropose", 
cfs.keyspace.metric.casPropose);

http://git-wip-us.apache.org/repos/asf/cassandra/blob/eae3b026/test/unit/org/apache/cassandra/db/RowCacheTest.java
----------------------------------------------------------------------
diff --git a/test/unit/org/apache/cassandra/db/RowCacheTest.java 
b/test/unit/org/apache/cassandra/db/RowCacheTest.java
index 883149f..b53f62c 100644
--- a/test/unit/org/apache/cassandra/db/RowCacheTest.java
+++ b/test/unit/org/apache/cassandra/db/RowCacheTest.java
@@ -79,8 +79,8 @@ public class RowCacheTest
         Keyspace keyspace = Keyspace.open(KEYSPACE_CACHED);
         String cf = "CachedIntCF";
         ColumnFamilyStore cachedStore  = keyspace.getColumnFamilyStore(cf);
-        long startRowCacheHits = 
cachedStore.metric.partitionCacheHit.getCount();
-        long startRowCacheOutOfRange = 
cachedStore.metric.partitionCacheHitOutOfRange.getCount();
+        long startRowCacheHits = cachedStore.metric.rowCacheHit.getCount();
+        long startRowCacheOutOfRange = 
cachedStore.metric.rowCacheHitOutOfRange.getCount();
         // empty the row cache
         CacheService.instance.invalidateRowCache();
 
@@ -98,12 +98,12 @@ public class RowCacheTest
 
         // populate row cache, we should not get a row cache hit;
         Util.getAll(Util.cmd(cachedStore, dk).withLimit(1).build());
-        assertEquals(startRowCacheHits, 
cachedStore.metric.partitionCacheHit.getCount());
+        assertEquals(startRowCacheHits, 
cachedStore.metric.rowCacheHit.getCount());
 
         // do another query, limit is 20, which is < 100 that we cache, we 
should get a hit and it should be in range
         Util.getAll(Util.cmd(cachedStore, dk).withLimit(1).build());
-        assertEquals(++startRowCacheHits, 
cachedStore.metric.partitionCacheHit.getCount());
-        assertEquals(startRowCacheOutOfRange, 
cachedStore.metric.partitionCacheHitOutOfRange.getCount());
+        assertEquals(++startRowCacheHits, 
cachedStore.metric.rowCacheHit.getCount());
+        assertEquals(startRowCacheOutOfRange, 
cachedStore.metric.rowCacheHitOutOfRange.getCount());
 
         CachedPartition cachedCf = 
(CachedPartition)CacheService.instance.rowCache.get(rck);
         assertEquals(1, cachedCf.rowCount());
@@ -246,8 +246,8 @@ public class RowCacheTest
         Keyspace keyspace = Keyspace.open(KEYSPACE_CACHED);
         String cf = "CachedIntCF";
         ColumnFamilyStore cachedStore  = keyspace.getColumnFamilyStore(cf);
-        long startRowCacheHits = 
cachedStore.metric.partitionCacheHit.getCount();
-        long startRowCacheOutOfRange = 
cachedStore.metric.partitionCacheHitOutOfRange.getCount();
+        long startRowCacheHits = cachedStore.metric.rowCacheHit.getCount();
+        long startRowCacheOutOfRange = 
cachedStore.metric.rowCacheHitOutOfRange.getCount();
         // empty the row cache
         CacheService.instance.invalidateRowCache();
 
@@ -270,29 +270,29 @@ public class RowCacheTest
 
         // populate row cache, we should not get a row cache hit;
         Util.getAll(Util.cmd(cachedStore, dk).withLimit(10).build());
-        assertEquals(startRowCacheHits, 
cachedStore.metric.partitionCacheHit.getCount());
+        assertEquals(startRowCacheHits, 
cachedStore.metric.rowCacheHit.getCount());
 
         // do another query, limit is 20, which is < 100 that we cache, we 
should get a hit and it should be in range
         Util.getAll(Util.cmd(cachedStore, dk).withLimit(10).build());
-        assertEquals(++startRowCacheHits, 
cachedStore.metric.partitionCacheHit.getCount());
-        assertEquals(startRowCacheOutOfRange, 
cachedStore.metric.partitionCacheHitOutOfRange.getCount());
+        assertEquals(++startRowCacheHits, 
cachedStore.metric.rowCacheHit.getCount());
+        assertEquals(startRowCacheOutOfRange, 
cachedStore.metric.rowCacheHitOutOfRange.getCount());
 
         // get a slice from 95 to 105, 95->99 are in cache, we should not get 
a hit and then row cache is out of range
         Util.getAll(Util.cmd(cachedStore, 
dk).fromIncl(String.valueOf(210)).toExcl(String.valueOf(215)).build());
-        assertEquals(startRowCacheHits, 
cachedStore.metric.partitionCacheHit.getCount());
-        assertEquals(++startRowCacheOutOfRange, 
cachedStore.metric.partitionCacheHitOutOfRange.getCount());
+        assertEquals(startRowCacheHits, 
cachedStore.metric.rowCacheHit.getCount());
+        assertEquals(++startRowCacheOutOfRange, 
cachedStore.metric.rowCacheHitOutOfRange.getCount());
 
         // get a slice with limit > 100, we should get a hit out of range.
         Util.getAll(Util.cmd(cachedStore, dk).withLimit(101).build());
-        assertEquals(startRowCacheHits, 
cachedStore.metric.partitionCacheHit.getCount());
-        assertEquals(++startRowCacheOutOfRange, 
cachedStore.metric.partitionCacheHitOutOfRange.getCount());
+        assertEquals(startRowCacheHits, 
cachedStore.metric.rowCacheHit.getCount());
+        assertEquals(++startRowCacheOutOfRange, 
cachedStore.metric.rowCacheHitOutOfRange.getCount());
 
 
         CacheService.instance.invalidateRowCache();
 
         // try to populate row cache with a limit > rows to cache, we should 
still populate row cache;
         Util.getAll(Util.cmd(cachedStore, dk).withLimit(105).build());
-        assertEquals(startRowCacheHits, 
cachedStore.metric.partitionCacheHit.getCount());
+        assertEquals(startRowCacheHits, 
cachedStore.metric.rowCacheHit.getCount());
 
         // validate the stuff in cache;
         CachedPartition cachedCf = 
(CachedPartition)CacheService.instance.rowCache.get(rck);

Reply via email to