cassandra git commit: use long math, for long results

2016-06-16 Thread dbrosius
Repository: cassandra
Updated Branches:
  refs/heads/trunk 27395e78b -> 057c32997


use long math, for long results


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/057c3299
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/057c3299
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/057c3299

Branch: refs/heads/trunk
Commit: 057c32997442b5df8842fe46aa2ebe9b178d8647
Parents: 27395e7
Author: Dave Brosius 
Authored: Thu Jun 16 22:32:00 2016 -0400
Committer: Dave Brosius 
Committed: Thu Jun 16 22:32:00 2016 -0400

--
 .../cassandra/db/compaction/TimeWindowCompactionStrategy.java  | 6 +++---
 1 file changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/cassandra/blob/057c3299/src/java/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategy.java
--
diff --git 
a/src/java/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategy.java 
b/src/java/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategy.java
index df688c5..70f29e9 100644
--- 
a/src/java/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategy.java
+++ 
b/src/java/org/apache/cassandra/db/compaction/TimeWindowCompactionStrategy.java
@@ -189,16 +189,16 @@ public class TimeWindowCompactionStrategy extends 
AbstractCompactionStrategy
 switch(windowTimeUnit)
 {
 case MINUTES:
-lowerTimestamp = timestampInSeconds - ((timestampInSeconds) % 
(60 * windowTimeSize));
+lowerTimestamp = timestampInSeconds - ((timestampInSeconds) % 
(60L * windowTimeSize));
 upperTimestamp = (lowerTimestamp + (60L * (windowTimeSize - 
1L))) + 59L;
 break;
 case HOURS:
-lowerTimestamp = timestampInSeconds - ((timestampInSeconds) % 
(3600 * windowTimeSize));
+lowerTimestamp = timestampInSeconds - ((timestampInSeconds) % 
(3600L * windowTimeSize));
 upperTimestamp = (lowerTimestamp + (3600L * (windowTimeSize - 
1L))) + 3599L;
 break;
 case DAYS:
 default:
-lowerTimestamp = timestampInSeconds - ((timestampInSeconds) % 
(86400 * windowTimeSize));
+lowerTimestamp = timestampInSeconds - ((timestampInSeconds) % 
(86400L * windowTimeSize));
 upperTimestamp = (lowerTimestamp + (86400L * (windowTimeSize - 
1L))) + 86399L;
 break;
 }



cassandra git commit: use long math for long results

2015-04-02 Thread dbrosius
Repository: cassandra
Updated Branches:
  refs/heads/trunk 910170c9d -> 2bf60356c


use long math for long results


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/2bf60356
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/2bf60356
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/2bf60356

Branch: refs/heads/trunk
Commit: 2bf60356c61f87891b021b2f6ba3f8e46f135f13
Parents: 910170c
Author: Dave Brosius 
Authored: Fri Apr 3 00:25:06 2015 -0400
Committer: Dave Brosius 
Committed: Fri Apr 3 00:25:06 2015 -0400

--
 src/java/org/apache/cassandra/db/compaction/SSTableSplitter.java | 2 +-
 1 file changed, 1 insertion(+), 1 deletion(-)
--


http://git-wip-us.apache.org/repos/asf/cassandra/blob/2bf60356/src/java/org/apache/cassandra/db/compaction/SSTableSplitter.java
--
diff --git a/src/java/org/apache/cassandra/db/compaction/SSTableSplitter.java 
b/src/java/org/apache/cassandra/db/compaction/SSTableSplitter.java
index 4b48462..8d7b0e9 100644
--- a/src/java/org/apache/cassandra/db/compaction/SSTableSplitter.java
+++ b/src/java/org/apache/cassandra/db/compaction/SSTableSplitter.java
@@ -75,7 +75,7 @@ public class SSTableSplitter {
 @Override
 public CompactionAwareWriter 
getCompactionAwareWriter(ColumnFamilyStore cfs, Set allSSTables, 
Set nonExpiredSSTables)
 {
-return new MaxSSTableSizeWriter(cfs, sstables, nonExpiredSSTables, 
sstableSizeInMB * 1024 * 1024, 0, true, compactionType);
+return new MaxSSTableSizeWriter(cfs, sstables, nonExpiredSSTables, 
sstableSizeInMB * 1024L * 1024L, 0, true, compactionType);
 }
 
 @Override



cassandra git commit: use long math for long results

2015-03-17 Thread dbrosius
Repository: cassandra
Updated Branches:
  refs/heads/trunk 4a4f83334 -> 5a09483df


use long math for long results


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/5a09483d
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/5a09483d
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/5a09483d

Branch: refs/heads/trunk
Commit: 5a09483df399b9e498e30624d0e3227fb60d7fa1
Parents: 4a4f833
Author: Dave Brosius 
Authored: Tue Mar 17 21:16:49 2015 -0400
Committer: Dave Brosius 
Committed: Tue Mar 17 21:16:49 2015 -0400

--
 .../org/apache/cassandra/io/compress/CompressionMetadata.java| 4 ++--
 src/java/org/apache/cassandra/utils/StreamingHistogram.java  | 2 +-
 2 files changed, 3 insertions(+), 3 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/cassandra/blob/5a09483d/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
--
diff --git a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java 
b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
index 182cdd2..928541a 100644
--- a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
+++ b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
@@ -188,7 +188,7 @@ public class CompressionMetadata
 {
 try
 {
-offsets.setLong(i * 8, input.readLong());
+offsets.setLong(i * 8L, input.readLong());
 }
 catch (EOFException e)
 {
@@ -290,7 +290,7 @@ public class CompressionMetadata
 {
 if (count == maxCount)
 {
-SafeMemory newOffsets = offsets.copy((maxCount *= 2L) * 8);
+SafeMemory newOffsets = offsets.copy((maxCount *= 2L) * 8L);
 offsets.close();
 offsets = newOffsets;
 }

http://git-wip-us.apache.org/repos/asf/cassandra/blob/5a09483d/src/java/org/apache/cassandra/utils/StreamingHistogram.java
--
diff --git a/src/java/org/apache/cassandra/utils/StreamingHistogram.java 
b/src/java/org/apache/cassandra/utils/StreamingHistogram.java
index 3f5a715..eb884be 100644
--- a/src/java/org/apache/cassandra/utils/StreamingHistogram.java
+++ b/src/java/org/apache/cassandra/utils/StreamingHistogram.java
@@ -201,7 +201,7 @@ public class StreamingHistogram
 Map entries = histogram.getAsMap();
 size += typeSizes.sizeof(entries.size());
 // size of entries = size * (8(double) + 8(long))
-size += entries.size() * (8 + 8);
+size += entries.size() * (8L + 8L);
 return size;
 }
 }



[1/2] cassandra git commit: use long math for long results

2015-03-03 Thread dbrosius
Repository: cassandra
Updated Branches:
  refs/heads/trunk 93b365cdc -> dd825a5f0


use long math for long results


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/9499f7cb
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/9499f7cb
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/9499f7cb

Branch: refs/heads/trunk
Commit: 9499f7cb98f678b6dde0c24ce87c39bc13b24ac5
Parents: 2acd05d
Author: Dave Brosius 
Authored: Tue Mar 3 21:50:23 2015 -0500
Committer: Dave Brosius 
Committed: Tue Mar 3 21:50:23 2015 -0500

--
 .../cassandra/io/compress/CompressionMetadata.java  | 16 +++-
 .../apache/cassandra/io/sstable/SSTableReader.java  |  2 +-
 .../cassandra/streaming/StreamReceiveTask.java  |  2 +-
 .../cassandra/stress/settings/SettingsSchema.java   |  2 +-
 4 files changed, 10 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/cassandra/blob/9499f7cb/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
--
diff --git a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java 
b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
index b29e259..59c5da5 100644
--- a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
+++ b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
@@ -38,7 +38,6 @@ import java.util.TreeSet;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.primitives.Longs;
 
-import org.apache.cassandra.cache.RefCountedMemory;
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.FSReadError;
@@ -47,7 +46,6 @@ import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.sstable.Descriptor;
-import org.apache.cassandra.io.sstable.SSTableWriter;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.io.util.Memory;
@@ -181,7 +179,7 @@ public class CompressionMetadata
 if (chunkCount <= 0)
 throw new IOException("Compressed file with 0 chunks 
encountered: " + input);
 
-Memory offsets = Memory.allocate(chunkCount * 8);
+Memory offsets = Memory.allocate(chunkCount * 8L);
 
 for (int i = 0; i < chunkCount; i++)
 {
@@ -248,7 +246,7 @@ public class CompressionMetadata
 endIndex = section.right % parameters.chunkLength() == 0 ? 
endIndex - 1 : endIndex;
 for (int i = startIndex; i <= endIndex; i++)
 {
-long offset = i * 8;
+long offset = i * 8L;
 long chunkOffset = chunkOffsets.getLong(offset);
 long nextChunkOffset = offset + 8 == chunkOffsetsSize
  ? compressedFileLength
@@ -270,7 +268,7 @@ public class CompressionMetadata
 private final CompressionParameters parameters;
 private final String filePath;
 private int maxCount = 100;
-private SafeMemory offsets = new SafeMemory(maxCount * 8);
+private SafeMemory offsets = new SafeMemory(maxCount * 8L);
 private int count = 0;
 
 private Writer(CompressionParameters parameters, String path)
@@ -288,11 +286,11 @@ public class CompressionMetadata
 {
 if (count == maxCount)
 {
-SafeMemory newOffsets = offsets.copy((maxCount *= 2) * 8);
+SafeMemory newOffsets = offsets.copy((maxCount *= 2L) * 8);
 offsets.close();
 offsets = newOffsets;
 }
-offsets.setLong(8 * count++, offset);
+offsets.setLong(8L * count++, offset);
 }
 
 private void writeHeader(DataOutput out, long dataLength, int chunks)
@@ -362,7 +360,7 @@ public class CompressionMetadata
 count = (int) (dataLength / parameters.chunkLength());
 // grab our actual compressed length from the next offset 
from our the position we're opened to
 if (count < this.count)
-compressedLength = offsets.getLong(count * 8);
+compressedLength = offsets.getLong(count * 8L);
 break;
 
 default:
@@ -401,7 +399,7 @@ public class CompressionMetadata
assert chunks == count;
writeHeader(out, dataLength, chunks);
 for (int i = 0 ; i < count ; i++)
-out.writeLong(offsets.getLong(i * 

cassandra git commit: use long math for long results

2015-03-03 Thread dbrosius
Repository: cassandra
Updated Branches:
  refs/heads/cassandra-2.1 2acd05d96 -> 9499f7cb9


use long math for long results


Project: http://git-wip-us.apache.org/repos/asf/cassandra/repo
Commit: http://git-wip-us.apache.org/repos/asf/cassandra/commit/9499f7cb
Tree: http://git-wip-us.apache.org/repos/asf/cassandra/tree/9499f7cb
Diff: http://git-wip-us.apache.org/repos/asf/cassandra/diff/9499f7cb

Branch: refs/heads/cassandra-2.1
Commit: 9499f7cb98f678b6dde0c24ce87c39bc13b24ac5
Parents: 2acd05d
Author: Dave Brosius 
Authored: Tue Mar 3 21:50:23 2015 -0500
Committer: Dave Brosius 
Committed: Tue Mar 3 21:50:23 2015 -0500

--
 .../cassandra/io/compress/CompressionMetadata.java  | 16 +++-
 .../apache/cassandra/io/sstable/SSTableReader.java  |  2 +-
 .../cassandra/streaming/StreamReceiveTask.java  |  2 +-
 .../cassandra/stress/settings/SettingsSchema.java   |  2 +-
 4 files changed, 10 insertions(+), 12 deletions(-)
--


http://git-wip-us.apache.org/repos/asf/cassandra/blob/9499f7cb/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
--
diff --git a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java 
b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
index b29e259..59c5da5 100644
--- a/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
+++ b/src/java/org/apache/cassandra/io/compress/CompressionMetadata.java
@@ -38,7 +38,6 @@ import java.util.TreeSet;
 import com.google.common.annotations.VisibleForTesting;
 import com.google.common.primitives.Longs;
 
-import org.apache.cassandra.cache.RefCountedMemory;
 import org.apache.cassandra.db.TypeSizes;
 import org.apache.cassandra.exceptions.ConfigurationException;
 import org.apache.cassandra.io.FSReadError;
@@ -47,7 +46,6 @@ import org.apache.cassandra.io.IVersionedSerializer;
 import org.apache.cassandra.io.sstable.Component;
 import org.apache.cassandra.io.sstable.CorruptSSTableException;
 import org.apache.cassandra.io.sstable.Descriptor;
-import org.apache.cassandra.io.sstable.SSTableWriter;
 import org.apache.cassandra.io.util.DataOutputPlus;
 import org.apache.cassandra.io.util.FileUtils;
 import org.apache.cassandra.io.util.Memory;
@@ -181,7 +179,7 @@ public class CompressionMetadata
 if (chunkCount <= 0)
 throw new IOException("Compressed file with 0 chunks 
encountered: " + input);
 
-Memory offsets = Memory.allocate(chunkCount * 8);
+Memory offsets = Memory.allocate(chunkCount * 8L);
 
 for (int i = 0; i < chunkCount; i++)
 {
@@ -248,7 +246,7 @@ public class CompressionMetadata
 endIndex = section.right % parameters.chunkLength() == 0 ? 
endIndex - 1 : endIndex;
 for (int i = startIndex; i <= endIndex; i++)
 {
-long offset = i * 8;
+long offset = i * 8L;
 long chunkOffset = chunkOffsets.getLong(offset);
 long nextChunkOffset = offset + 8 == chunkOffsetsSize
  ? compressedFileLength
@@ -270,7 +268,7 @@ public class CompressionMetadata
 private final CompressionParameters parameters;
 private final String filePath;
 private int maxCount = 100;
-private SafeMemory offsets = new SafeMemory(maxCount * 8);
+private SafeMemory offsets = new SafeMemory(maxCount * 8L);
 private int count = 0;
 
 private Writer(CompressionParameters parameters, String path)
@@ -288,11 +286,11 @@ public class CompressionMetadata
 {
 if (count == maxCount)
 {
-SafeMemory newOffsets = offsets.copy((maxCount *= 2) * 8);
+SafeMemory newOffsets = offsets.copy((maxCount *= 2L) * 8);
 offsets.close();
 offsets = newOffsets;
 }
-offsets.setLong(8 * count++, offset);
+offsets.setLong(8L * count++, offset);
 }
 
 private void writeHeader(DataOutput out, long dataLength, int chunks)
@@ -362,7 +360,7 @@ public class CompressionMetadata
 count = (int) (dataLength / parameters.chunkLength());
 // grab our actual compressed length from the next offset 
from our the position we're opened to
 if (count < this.count)
-compressedLength = offsets.getLong(count * 8);
+compressedLength = offsets.getLong(count * 8L);
 break;
 
 default:
@@ -401,7 +399,7 @@ public class CompressionMetadata
assert chunks == count;
writeHeader(out, dataLength, chunks);
 for (int i = 0 ; i < count ; i++)
-out.writeLong(offs