Repository: cassandra
Updated Branches:
  refs/heads/cassandra-3.0 05cb556f9 -> 10ca7e47c
  refs/heads/cassandra-3.11 c3a1a4fa8 -> ae782319b
  refs/heads/trunk d8f036122 -> ed0ded123


Reduce garbage created by DynamicSnitch

Patch by Blake Eggleston; Reviewed by Jason Brown for CASSANDRA-14091


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/10ca7e47
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/10ca7e47
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/10ca7e47

Branch: refs/heads/cassandra-3.0
Commit: 10ca7e47ca63c43b4e0ba593fb4c736130764af9
Parents: 05cb556
Author: Blake Eggleston <bdeggles...@gmail.com>
Authored: Fri Dec 1 15:04:12 2017 -0800
Committer: Blake Eggleston <bdeggles...@gmail.com>
Committed: Tue Dec 5 16:05:07 2017 -0800

----------------------------------------------------------------------
 CHANGES.txt                                         |  1 +
 .../cassandra/locator/DynamicEndpointSnitch.java    | 16 ++++++++++++----
 2 files changed, 13 insertions(+), 4 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/10ca7e47/CHANGES.txt
----------------------------------------------------------------------
diff --git a/CHANGES.txt b/CHANGES.txt
index 2683dc2..cf8883a 100644
--- a/CHANGES.txt
+++ b/CHANGES.txt
@@ -1,4 +1,5 @@
 3.0.16
+ * Reduce garbage created by DynamicSnitch (CASSANDRA-14091)
  * More frequent commitlog chained markers (CASSANDRA-13987)
  * Fix serialized size of DataLimits (CASSANDRA-14057)
  * Add flag to allow dropping oversized read repair mutations (CASSANDRA-13975)

http://git-wip-us.apache.org/repos/asf/cassandra/blob/10ca7e47/src/java/org/apache/cassandra/locator/DynamicEndpointSnitch.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/locator/DynamicEndpointSnitch.java 
b/src/java/org/apache/cassandra/locator/DynamicEndpointSnitch.java
index 9c0c57e..8c255f5 100644
--- a/src/java/org/apache/cassandra/locator/DynamicEndpointSnitch.java
+++ b/src/java/org/apache/cassandra/locator/DynamicEndpointSnitch.java
@@ -29,6 +29,7 @@ import com.codahale.metrics.ExponentiallyDecayingReservoir;
 import javax.management.MBeanServer;
 import javax.management.ObjectName;
 
+import com.codahale.metrics.Snapshot;
 import org.apache.cassandra.concurrent.ScheduledExecutors;
 import org.apache.cassandra.config.DatabaseDescriptor;
 import org.apache.cassandra.net.MessagingService;
@@ -265,19 +266,26 @@ public class DynamicEndpointSnitch extends 
AbstractEndpointSnitch implements ILa
 
         }
         double maxLatency = 1;
+
+        Map<InetAddress, Snapshot> snapshots = new HashMap<>(samples.size());
+        for (Map.Entry<InetAddress, ExponentiallyDecayingReservoir> entry : 
samples.entrySet())
+        {
+            snapshots.put(entry.getKey(), entry.getValue().getSnapshot());
+        }
+
         // We're going to weight the latency for each host against the worst 
one we see, to
         // arrive at sort of a 'badness percentage' for them. First, find the 
worst for each:
         HashMap<InetAddress, Double> newScores = new HashMap<>();
-        for (Map.Entry<InetAddress, ExponentiallyDecayingReservoir> entry : 
samples.entrySet())
+        for (Map.Entry<InetAddress, Snapshot> entry : snapshots.entrySet())
         {
-            double mean = entry.getValue().getSnapshot().getMedian();
+            double mean = entry.getValue().getMedian();
             if (mean > maxLatency)
                 maxLatency = mean;
         }
         // now make another pass to do the weighting based on the maximums we 
found before
-        for (Map.Entry<InetAddress, ExponentiallyDecayingReservoir> entry: 
samples.entrySet())
+        for (Map.Entry<InetAddress, Snapshot> entry : snapshots.entrySet())
         {
-            double score = entry.getValue().getSnapshot().getMedian() / 
maxLatency;
+            double score = entry.getValue().getMedian() / maxLatency;
             // finally, add the severity without any weighting, since hosts 
scale this relative to their own load and the size of the task causing the 
severity.
             // "Severity" is basically a measure of compaction activity 
(CASSANDRA-3722).
             if (USE_SEVERITY)


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@cassandra.apache.org
For additional commands, e-mail: commits-h...@cassandra.apache.org

Reply via email to