Repository: cassandra
Updated Branches:
  refs/heads/trunk 93b365cdc -> dd825a5f0


use long math for long results


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/9499f7cb
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/9499f7cb
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/9499f7cb

Branch: refs/heads/trunk
Commit: 9499f7cb98f678b6dde0c24ce87c39bc13b24ac5
Parents: 2acd05d
Author: Dave Brosius <dbros...@mebigfatguy.com>
Authored: Tue Mar 3 21:50:23 2015 -0500
Committer: Dave Brosius <dbros...@mebigfatguy.com>
Committed: Tue Mar 3 21:50:23 2015 -0500

----------------------------------------------------------------------
 .../cassandra/io/compress/CompressionMetadata.java  | 16 +++++++---------
 .../apache/cassandra/io/sstable/SSTableReader.java  |  2 +-
 .../cassandra/streaming/StreamReceiveTask.java      |  2 +-
 .../cassandra/stress/settings/SettingsSchema.java   |  2 +-
 4 files changed, 10 insertions(+), 12 deletions(-)
----------------------------------------------------------------------


http://git-wip-us.apache.org/repos/asf/cassandra/blob/9499f7cb/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java 
b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
index b29e259..59c5da5 100644
--- a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
+++ b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
@@ -38,7 +38,6 @@ import java.util.TreeSet;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.primitives.Longs;
 
-import org.apache.cassandra.cache.RefCountedMemory;
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.FSReadError;
@@ -47,7 +46,6 @@ import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.sstable.Descriptor;
-import org.apache.cassandra.io.sstable.SSTableWriter;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.io.util.Memory;
@@ -181,7 +179,7 @@ public class CompressionMetadata
             if (chunkCount <= 0)
                 throw new IOException("Compressed file with 0 chunks 
encountered: " + input);
 
-            Memory offsets = Memory.allocate(chunkCount * 8);
+            Memory offsets = Memory.allocate(chunkCount * 8L);
 
             for (int i = 0; i < chunkCount; i++)
             {
@@ -248,7 +246,7 @@ public class CompressionMetadata
             endIndex = section.right % parameters.chunkLength() == 0 ? 
endIndex - 1 : endIndex;
             for (int i = startIndex; i <= endIndex; i++)
             {
-                long offset = i * 8;
+                long offset = i * 8L;
                 long chunkOffset = chunkOffsets.getLong(offset);
                 long nextChunkOffset = offset + 8 == chunkOffsetsSize
                                      ? compressedFileLength
@@ -270,7 +268,7 @@ public class CompressionMetadata
         private final CompressionParameters parameters;
         private final String filePath;
         private int maxCount = 100;
-        private SafeMemory offsets = new SafeMemory(maxCount * 8);
+        private SafeMemory offsets = new SafeMemory(maxCount * 8L);
         private int count = 0;
 
         private Writer(CompressionParameters parameters, String path)
@@ -288,11 +286,11 @@ public class CompressionMetadata
         {
             if (count == maxCount)
             {
-                SafeMemory newOffsets = offsets.copy((maxCount *= 2) * 8);
+                SafeMemory newOffsets = offsets.copy((maxCount *= 2L) * 8);
                 offsets.close();
                 offsets = newOffsets;
             }
-            offsets.setLong(8 * count++, offset);
+            offsets.setLong(8L * count++, offset);
         }
 
         private void writeHeader(DataOutput out, long dataLength, int chunks)
@@ -362,7 +360,7 @@ public class CompressionMetadata
                     count = (int) (dataLength / parameters.chunkLength());
                     // grab our actual compressed length from the next offset 
from our the position we're opened to
                     if (count < this.count)
-                        compressedLength = offsets.getLong(count * 8);
+                        compressedLength = offsets.getLong(count * 8L);
                     break;
 
                 default:
@@ -401,7 +399,7 @@ public class CompressionMetadata
                    assert chunks == count;
                    writeHeader(out, dataLength, chunks);
                 for (int i = 0 ; i < count ; i++)
-                    out.writeLong(offsets.getLong(i * 8));
+                    out.writeLong(offsets.getLong(i * 8L));
             }
             finally
             {

http://git-wip-us.apache.org/repos/asf/cassandra/blob/9499f7cb/src/java/org/apache/cassandra/io/sstable/SSTableReader.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/io/sstable/SSTableReader.java 
b/src/java/org/apache/cassandra/io/sstable/SSTableReader.java
index 202bc4d..13abc04 100644
--- a/src/java/org/apache/cassandra/io/sstable/SSTableReader.java
+++ b/src/java/org/apache/cassandra/io/sstable/SSTableReader.java
@@ -1174,7 +1174,7 @@ public class SSTableReader extends SSTable implements 
RefCounted<SSTableReader>
             sampleKeyCount += (sampleIndexRange.right - sampleIndexRange.left 
+ 1);
 
         // adjust for the current sampling level: (BSL / SL) * 
index_interval_at_full_sampling
-        long estimatedKeys = sampleKeyCount * 
(Downsampling.BASE_SAMPLING_LEVEL * indexSummary.getMinIndexInterval()) / 
indexSummary.getSamplingLevel();
+        long estimatedKeys = sampleKeyCount * ((long) 
Downsampling.BASE_SAMPLING_LEVEL * indexSummary.getMinIndexInterval()) / 
indexSummary.getSamplingLevel();
         return Math.max(1, estimatedKeys);
     }
 

http://git-wip-us.apache.org/repos/asf/cassandra/blob/9499f7cb/src/java/org/apache/cassandra/streaming/StreamReceiveTask.java
----------------------------------------------------------------------
diff --git a/src/java/org/apache/cassandra/streaming/StreamReceiveTask.java 
b/src/java/org/apache/cassandra/streaming/StreamReceiveTask.java
index 44b83f9..da2d7d6 100644
--- a/src/java/org/apache/cassandra/streaming/StreamReceiveTask.java
+++ b/src/java/org/apache/cassandra/streaming/StreamReceiveTask.java
@@ -118,7 +118,7 @@ public class StreamReceiveTask extends StreamTask
             }
             ColumnFamilyStore cfs = 
Keyspace.open(kscf.left).getColumnFamilyStore(kscf.right);
 
-            File lockfiledir = 
cfs.directories.getWriteableLocationAsFile(task.sstables.size() * 256);
+            File lockfiledir = 
cfs.directories.getWriteableLocationAsFile(task.sstables.size() * 256L);
             if (lockfiledir == null)
                 throw new IOError(new IOException("All disks full"));
             StreamLockfile lockfile = new StreamLockfile(lockfiledir, 
UUID.randomUUID());

http://git-wip-us.apache.org/repos/asf/cassandra/blob/9499f7cb/tools/stress/src/org/apache/cassandra/stress/settings/SettingsSchema.java
----------------------------------------------------------------------
diff --git 
a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsSchema.java 
b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsSchema.java
index 93a9bd7..54f27bc 100644
--- a/tools/stress/src/org/apache/cassandra/stress/settings/SettingsSchema.java
+++ b/tools/stress/src/org/apache/cassandra/stress/settings/SettingsSchema.java
@@ -125,7 +125,7 @@ public class SettingsSchema implements Serializable
             /* end */
 
             System.out.println(String.format("Created keyspaces. Sleeping %ss 
for propagation.", settings.node.nodes.size()));
-            Thread.sleep(settings.node.nodes.size() * 1000); // seconds
+            Thread.sleep(settings.node.nodes.size() * 1000L); // seconds
         }
         catch (InvalidRequestException e)
         {

Reply via email to