Repository: systemml
Updated Branches:
  refs/heads/master 25a10f412 -> c3fdbb4da


http://git-wip-us.apache.org/repos/asf/systemml/blob/c3fdbb4d/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtils.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtils.java
 
b/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtils.java
index c8a0d3e..f775e92 100644
--- 
a/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtils.java
+++ 
b/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtils.java
@@ -71,7 +71,6 @@ import org.apache.sysml.runtime.util.DataConverter;
 import org.apache.sysml.runtime.util.FastStringTokenizer;
 import org.apache.sysml.runtime.util.MapReduceTool;
 import org.apache.sysml.runtime.util.UtilFunctions;
-import org.apache.sysml.utils.IntUtils;
 
 import scala.Tuple2;
 
@@ -272,7 +271,7 @@ public class RDDConverterUtils
                //slice blocks into rows, align and convert into data frame rows
                JavaRDD<Row> rowsRDD = in
                        .flatMapToPair(new 
SliceBinaryBlockToRowsFunction(mc.getRowsPerBlock()))
-                       .groupByKey().map(new 
ConvertRowBlocksToRows(IntUtils.toInt(mc.getCols()), mc.getColsPerBlock(), 
toVector));
+                       .groupByKey().map(new 
ConvertRowBlocksToRows((int)mc.getCols(), mc.getColsPerBlock(), toVector));
                
                //create data frame schema
                List<StructField> fields = new ArrayList<>();
@@ -323,7 +322,7 @@ public class RDDConverterUtils
                        MapReduceTool.deleteFileIfExistOnHDFS(pathY);
                        
                        //convert libsvm to labeled points
-                       int numFeatures = IntUtils.toInt( mcOutX.getCols() );
+                       int numFeatures = (int) mcOutX.getCols();
                        int numPartitions = 
SparkUtils.getNumPreferredPartitions(mcOutX, null);
                        JavaRDD<org.apache.spark.mllib.regression.LabeledPoint> 
lpoints = 
                                        MLUtils.loadLibSVMFile(sc.sc(), pathIn, 
numFeatures, numPartitions).toJavaRDD();
@@ -486,7 +485,7 @@ public class RDDConverterUtils
                        _bclen = mc.getColsPerBlock();
                        
                        //determine upper bounded buffer len
-                       _bufflen = IntUtils.toInt( Math.min(_rlen*_clen, 
BUFFER_SIZE) );
+                       _bufflen = (int) Math.min(_rlen*_clen, BUFFER_SIZE);
                }
 
                protected void flushBufferToList( ReblockBuffer rbuff,  
ArrayList<Tuple2<MatrixIndexes,MatrixBlock>> ret ) 
@@ -703,7 +702,7 @@ public class RDDConverterUtils
                {
                        ArrayList<Tuple2<MatrixIndexes,MatrixBlock>> ret = new 
ArrayList<>();
 
-                       int ncblks = 
IntUtils.toInt(Math.ceil((double)_clen/_bclen));
+                       int ncblks = (int)Math.ceil((double)_clen/_bclen);
                        MatrixIndexes[] ix = new MatrixIndexes[ncblks];
                        MatrixBlock[] mb = new MatrixBlock[ncblks];
                        
@@ -725,7 +724,7 @@ public class RDDConverterUtils
                                        if( ix[0] !=null )
                                                flushBlocksToList(ix, mb, ret);
                                        long len = 
UtilFunctions.computeBlockSize(_rlen, rix, _brlen);
-                                       createBlocks(rowix, 
IntUtils.toInt(len), ix, mb);
+                                       createBlocks(rowix, (int)len, ix, mb);
                                }
                                
                                //process row data
@@ -733,7 +732,7 @@ public class RDDConverterUtils
                                boolean emptyFound = false;
                                for( int cix=1, pix=0; cix<=ncblks; cix++ ) 
                                {
-                                       int lclen = 
IntUtils.toInt(UtilFunctions.computeBlockSize(_clen, cix, _bclen));
+                                       int lclen = 
(int)UtilFunctions.computeBlockSize(_clen, cix, _bclen);
                                        if( mb[cix-1].isInSparseFormat() ) {
                                                //allocate row once (avoid 
re-allocations)
                                                int lnnz = 
IOUtilFunctions.countNnz(parts, pix, lclen);
@@ -763,13 +762,13 @@ public class RDDConverterUtils
                {
                        //compute row block index and number of column blocks
                        long rix = UtilFunctions.computeBlockIndex(rowix, 
_brlen);
-                       int ncblks = 
IntUtils.toInt(Math.ceil((double)_clen/_bclen));
+                       int ncblks = (int)Math.ceil((double)_clen/_bclen);
                        
                        //create all column blocks (assume dense since csv is 
dense text format)
                        for( int cix=1; cix<=ncblks; cix++ ) {
-                               int lclen = 
IntUtils.toInt(UtilFunctions.computeBlockSize(_clen, cix, _bclen));             
            
+                               int lclen = 
(int)UtilFunctions.computeBlockSize(_clen, cix, _bclen);                        
    
                                ix[cix-1] = new MatrixIndexes(rix, cix);
-                               mb[cix-1] = new MatrixBlock(lrlen, lclen, 
_sparse, IntUtils.toInt(lrlen*lclen*_sparsity));
+                               mb[cix-1] = new MatrixBlock(lrlen, lclen, 
_sparse, (int)(lrlen*lclen*_sparsity));
                                mb[cix-1].allocateBlock();
                        }
                }
@@ -814,7 +813,7 @@ public class RDDConverterUtils
                {
                        ArrayList<Tuple2<MatrixIndexes,MatrixBlock>> ret = new 
ArrayList<>();
 
-                       int ncblks = 
IntUtils.toInt(Math.ceil((double)_clen/_bclen));
+                       int ncblks = (int)Math.ceil((double)_clen/_bclen);
                        MatrixIndexes[] ix = new MatrixIndexes[ncblks];
                        MatrixBlock[] mb = new MatrixBlock[ncblks];
                        
@@ -834,7 +833,7 @@ public class RDDConverterUtils
                                        if( ix[0] !=null )
                                                flushBlocksToList(ix, mb, ret);
                                        long len = 
UtilFunctions.computeBlockSize(_rlen, rix, _brlen);
-                                       createBlocks(rowix, 
IntUtils.toInt(len), ix, mb, lsparse);
+                                       createBlocks(rowix, (int)len, ix, mb, 
lsparse);
                                }
                                
                                //process row data
@@ -851,14 +850,14 @@ public class RDDConverterUtils
                                                                
(org.apache.spark.mllib.linalg.SparseVector) row.features();
                                                for( int k=0; k<lnnz; k++ ) {
                                                        int gix = 
srow.indices()[k]+1;
-                                                       int cix = 
IntUtils.toInt(UtilFunctions.computeBlockIndex(gix, _bclen));
+                                                       int cix = 
(int)UtilFunctions.computeBlockIndex(gix, _bclen);
                                                        int j = 
UtilFunctions.computeCellInBlock(gix, _bclen);
                                                        
mb[cix-1].appendValue(pos, j, srow.values()[k]);
                                                }
                                        }
                                        else { //dense
                                                for( int cix=1, pix=0; 
cix<=ncblks; cix++ ) {
-                                                       int lclen = 
IntUtils.toInt(UtilFunctions.computeBlockSize(_clen, cix, _bclen));
+                                                       int lclen = 
(int)UtilFunctions.computeBlockSize(_clen, cix, _bclen);
                                                        for( int j=0; j<lclen; 
j++ )
                                                                
mb[cix-1].appendValue(pos, j, row.features().apply(pix++));
                                                }
@@ -878,11 +877,11 @@ public class RDDConverterUtils
                {
                        //compute row block index and number of column blocks
                        long rix = UtilFunctions.computeBlockIndex(rowix, 
_brlen);
-                       int ncblks = 
IntUtils.toInt(Math.ceil((double)_clen/_bclen));
+                       int ncblks = (int)Math.ceil((double)_clen/_bclen);
                        
                        //create all column blocks (assume dense since csv is 
dense text format)
                        for( int cix=1; cix<=ncblks; cix++ ) {
-                               int lclen = 
IntUtils.toInt(UtilFunctions.computeBlockSize(_clen, cix, _bclen));
+                               int lclen = 
(int)UtilFunctions.computeBlockSize(_clen, cix, _bclen);
                                ix[cix-1] = new MatrixIndexes(rix, cix);
                                mb[cix-1] = new MatrixBlock(lrlen, lclen, 
lsparse);
                                mb[cix-1].allocateBlock();
@@ -986,7 +985,7 @@ public class RDDConverterUtils
                public ConcatenateBlocksFunction(long clen, int bclen) {
                        _clen = clen;
                        _bclen = bclen;
-                       _ncblks = IntUtils.toInt(Math.ceil((double)clen/bclen));
+                       _ncblks = (int)Math.ceil((double)clen/bclen);
                }
                
                @Override
@@ -1002,9 +1001,9 @@ public class RDDConverterUtils
                                tmpBlks[entry._1().intValue()-1] = entry._2();
                        }
                        //concatenate blocks
-                       MatrixBlock out = new 
MatrixBlock(1,IntUtils.toInt(_clen), tmpBlks[0].isInSparseFormat());
+                       MatrixBlock out = new MatrixBlock(1,(int)_clen, 
tmpBlks[0].isInSparseFormat());
                        for( int i=0; i<_ncblks; i++ )
-                               out.copy(0, 0, i*_bclen, 
IntUtils.toInt(Math.min((i+1)*_bclen, _clen)-1), tmpBlks[i], false);
+                               out.copy(0, 0, i*_bclen, 
(int)Math.min((i+1)*_bclen, _clen)-1, tmpBlks[i], false);
                        out.recomputeNonZeros();
                        //output row block
                        return new Tuple2<>(new MatrixIndexes(rowIndex, 1),out);
@@ -1044,7 +1043,7 @@ public class RDDConverterUtils
                {
                        ArrayList<Tuple2<MatrixIndexes,MatrixBlock>> ret = new 
ArrayList<>();
                        
-                       int ncblks = 
IntUtils.toInt(Math.ceil((double)_clen/_bclen));
+                       int ncblks = (int)Math.ceil((double)_clen/_bclen);
                        MatrixIndexes[] ix = new MatrixIndexes[ncblks];
                        MatrixBlock[] mb = new MatrixBlock[ncblks];
                        
@@ -1061,15 +1060,15 @@ public class RDDConverterUtils
                                        if( ix[0] !=null )
                                                flushBlocksToList(ix, mb, ret);
                                        long len = 
UtilFunctions.computeBlockSize(_rlen, rix, _brlen);
-                                       createBlocks(rowix, 
IntUtils.toInt(len), ix, mb);
+                                       createBlocks(rowix, (int)len, ix, mb);
                                }
                                
                                //process row data
                                int off = _containsID ? 1 : 0;
                                Object obj = _isVector ? tmp._1().get(off) : 
tmp._1();
                                for( int cix=1, pix=_isVector?0:off; 
cix<=ncblks; cix++ ) {
-                                       int lclen = 
IntUtils.toInt(UtilFunctions.computeBlockSize(_clen, cix, _bclen));
-                                       int cu = IntUtils.toInt( 
Math.min(_clen, cix*_bclen) + (_isVector?0:off) );
+                                       int lclen = 
(int)UtilFunctions.computeBlockSize(_clen, cix, _bclen);
+                                       int cu = (int) Math.min(_clen, 
cix*_bclen) + (_isVector?0:off);
                                        //allocate sparse row once (avoid 
re-allocations)
                                        if( mb[cix-1].isInSparseFormat() ) {
                                                int lnnz = countNnz(obj, 
_isVector, pix, cu);
@@ -1110,13 +1109,13 @@ public class RDDConverterUtils
                {
                        //compute row block index and number of column blocks
                        long rix = UtilFunctions.computeBlockIndex(rowix, 
_brlen);
-                       int ncblks = 
IntUtils.toInt(Math.ceil((double)_clen/_bclen));
+                       int ncblks = (int)Math.ceil((double)_clen/_bclen);
                        
                        //create all column blocks (assume dense since csv is 
dense text format)
                        for( int cix=1; cix<=ncblks; cix++ ) {
-                               int lclen = 
IntUtils.toInt(UtilFunctions.computeBlockSize(_clen, cix, _bclen));
+                               int lclen = 
(int)UtilFunctions.computeBlockSize(_clen, cix, _bclen);
                                ix[cix-1] = new MatrixIndexes(rix, cix);
-                               mb[cix-1] = new MatrixBlock(lrlen, lclen, 
_sparse,IntUtils.toInt(lrlen*lclen*_sparsity));
+                               mb[cix-1] = new MatrixBlock(lrlen, lclen, 
_sparse,(int)(lrlen*lclen*_sparsity));
                                mb[cix-1].allocateBlock();
                        }
                }

http://git-wip-us.apache.org/repos/asf/systemml/blob/c3fdbb4d/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtilsExt.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtilsExt.java
 
b/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtilsExt.java
index e350e77..4871aee 100644
--- 
a/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtilsExt.java
+++ 
b/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDConverterUtilsExt.java
@@ -54,7 +54,6 @@ import org.apache.sysml.runtime.matrix.data.MatrixIndexes;
 import org.apache.sysml.runtime.matrix.mapred.IndexedMatrixValue;
 import org.apache.sysml.runtime.matrix.mapred.ReblockBuffer;
 import org.apache.sysml.runtime.util.FastStringTokenizer;
-import org.apache.sysml.utils.IntUtils;
 
 import scala.Tuple2;
 
@@ -128,7 +127,7 @@ public class RDDConverterUtilsExt
        }
 
        public static MatrixBlock convertPy4JArrayToMB(byte [] data, long rlen, 
long clen) {
-               return convertPy4JArrayToMB(data, IntUtils.toInt(rlen), 
IntUtils.toInt(clen), false);
+               return convertPy4JArrayToMB(data, (int)rlen, (int)clen, false);
        }
 
        public static MatrixBlock convertPy4JArrayToMB(byte [] data, int rlen, 
int clen) {
@@ -136,7 +135,7 @@ public class RDDConverterUtilsExt
        }
 
        public static MatrixBlock convertSciPyCOOToMB(byte [] data, byte [] 
row, byte [] col, long rlen, long clen, long nnz) {
-               return convertSciPyCOOToMB(data, row, col, 
IntUtils.toInt(rlen), IntUtils.toInt(clen), IntUtils.toInt(nnz));
+               return convertSciPyCOOToMB(data, row, col, (int)rlen, 
(int)clen, (int)nnz);
        }
 
        public static MatrixBlock convertSciPyCOOToMB(byte [] data, byte [] 
row, byte [] col, int rlen, int clen, int nnz) {
@@ -160,7 +159,7 @@ public class RDDConverterUtilsExt
        }
 
        public static MatrixBlock convertPy4JArrayToMB(byte [] data, long rlen, 
long clen, boolean isSparse) {
-               return convertPy4JArrayToMB(data, IntUtils.toInt( rlen), 
IntUtils.toInt(clen), isSparse);
+               return convertPy4JArrayToMB(data, (int) rlen, (int) clen, 
isSparse);
        }
 
        public static MatrixBlock allocateDenseOrSparse(int rlen, int clen, 
boolean isSparse) {
@@ -187,7 +186,7 @@ public class RDDConverterUtilsExt
        public static void copyRowBlocks(MatrixBlock mb, long rowIndex, 
MatrixBlock ret, long numRowsPerBlock, long rlen, long clen) {
                // TODO: Double-check if synchronization is required here.
                // synchronized (RDDConverterUtilsExt.class) {
-                       ret.copy(IntUtils.toInt(rowIndex*numRowsPerBlock), 
IntUtils.toInt(Math.min((rowIndex+1)*numRowsPerBlock-1, rlen-1)), 0, 
IntUtils.toInt(clen-1), mb, false);
+                       ret.copy((int)(rowIndex*numRowsPerBlock), 
(int)Math.min((rowIndex+1)*numRowsPerBlock-1, rlen-1), 0, (int)(clen-1), mb, 
false);
                // }
        }
 
@@ -205,7 +204,7 @@ public class RDDConverterUtilsExt
                        long limit = rlen*clen;
                        if( limit > Integer.MAX_VALUE )
                                throw new DMLRuntimeException("Dense NumPy 
array of size " + limit + " cannot be converted to MatrixBlock");
-                       double [] denseBlock = new 
double[IntUtils.toInt(limit)];
+                       double [] denseBlock = new double[(int) limit];
                        ByteBuffer buf = ByteBuffer.wrap(data);
                        buf.order(ByteOrder.nativeOrder());
                        for(int i = 0; i < rlen*clen; i++) {
@@ -228,7 +227,7 @@ public class RDDConverterUtilsExt
                int times = Double.SIZE / Byte.SIZE;
                if( limit > Integer.MAX_VALUE / times )
                        throw new DMLRuntimeException("MatrixBlock of size " + 
limit + " cannot be converted to dense numpy array");
-               ret = new byte[IntUtils.toInt(limit * times)];
+               ret = new byte[(int) (limit * times)];
 
                double [] denseBlock = mb.getDenseBlockValues();
                if(mb.isEmptyBlock()) {
@@ -319,7 +318,7 @@ public class RDDConverterUtilsExt
                        _brlen = mc.getRowsPerBlock();
                        _bclen = mc.getColsPerBlock();
                        //determine upper bounded buffer len
-                       _bufflen = IntUtils.toInt( Math.min(_rlen*_clen, 
BUFFER_SIZE));
+                       _bufflen = (int) Math.min(_rlen*_clen, BUFFER_SIZE);
                }
 
                // ----------------------------------------------------

http://git-wip-us.apache.org/repos/asf/systemml/blob/c3fdbb4d/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDSortUtils.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDSortUtils.java
 
b/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDSortUtils.java
index d5abefc..01bdef8 100644
--- 
a/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDSortUtils.java
+++ 
b/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/RDDSortUtils.java
@@ -48,7 +48,6 @@ import 
org.apache.sysml.runtime.matrix.operators.ReorgOperator;
 import org.apache.sysml.runtime.util.DataConverter;
 import org.apache.sysml.runtime.util.SortUtils;
 import org.apache.sysml.runtime.util.UtilFunctions;
-import org.apache.sysml.utils.IntUtils;
 
 public class RDDSortUtils 
 {
@@ -61,7 +60,7 @@ public class RDDSortUtils
        
                //sort (creates sorted range per partition)
                long hdfsBlocksize = InfrastructureAnalyzer.getHDFSBlockSize();
-               int numPartitions = 
IntUtils.toInt(Math.ceil(((double)rlen*8)/hdfsBlocksize));
+               int numPartitions = 
(int)Math.ceil(((double)rlen*8)/hdfsBlocksize);
                JavaRDD<Double> sdvals = dvals
                                .sortBy(new CreateDoubleKeyFunction(), true, 
numPartitions);
                
@@ -83,7 +82,7 @@ public class RDDSortUtils
        
                //sort (creates sorted range per partition)
                long hdfsBlocksize = InfrastructureAnalyzer.getHDFSBlockSize();
-               int numPartitions = 
IntUtils.toInt(Math.ceil(((double)rlen*8)/hdfsBlocksize));
+               int numPartitions = 
(int)Math.ceil(((double)rlen*8)/hdfsBlocksize);
                JavaRDD<DoublePair> sdvals = dvals
                        .sortBy(new CreateDoubleKeyFunction2(), true, 
numPartitions);
 
@@ -127,7 +126,7 @@ public class RDDSortUtils
                
                //sort (creates sorted range per partition)
                long hdfsBlocksize = InfrastructureAnalyzer.getHDFSBlockSize();
-               int numPartitions = 
IntUtils.toInt(Math.ceil(((double)rlen*16)/hdfsBlocksize));
+               int numPartitions = 
(int)Math.ceil(((double)rlen*16)/hdfsBlocksize);
                JavaRDD<ValueIndexPair> sdvals = dvals
                        .sortByKey(new IndexComparator(asc), true, 
numPartitions)
                        .keys(); //workaround for index comparator
@@ -173,7 +172,7 @@ public class RDDSortUtils
                
                //sort (creates sorted range per partition)
                long hdfsBlocksize = InfrastructureAnalyzer.getHDFSBlockSize();
-               int numPartitions = 
IntUtils.toInt(Math.ceil(((double)rlen*16)/hdfsBlocksize));
+               int numPartitions = 
(int)Math.ceil(((double)rlen*16)/hdfsBlocksize);
                JavaRDD<ValueIndexPair> sdvals = dvals
                        .sortByKey(new IndexComparator(asc), true, 
numPartitions)
                        .keys(); //workaround for index comparator
@@ -250,7 +249,7 @@ public class RDDSortUtils
        {
                //collect orderby column for in-memory sorting
                MatrixBlock inMatBlock = SparkExecutionContext
-                               .toMatrixBlock(val, IntUtils.toInt(rlen), 1, 
brlen, bclen, -1);
+                               .toMatrixBlock(val, (int)rlen, 1, brlen, bclen, 
-1);
 
                //in-memory sort operation (w/ index return: source index in 
target position)
                ReorgOperator lrop = new ReorgOperator(new SortIndex(1, !asc, 
true));
@@ -260,7 +259,7 @@ public class RDDSortUtils
                //flip sort indices from <source ix in target pos> to <target 
ix in source pos>
                MatrixBlock sortedIxSrc = new 
MatrixBlock(sortedIx.getNumRows(), 1, false); 
                for (int i=0; i < sortedIx.getNumRows(); i++) 
-                       
sortedIxSrc.quickSetValue(IntUtils.toInt(sortedIx.quickGetValue(i,0)-1), 0, 
i+1);
+                       
sortedIxSrc.quickSetValue((int)sortedIx.quickGetValue(i,0)-1, 0, i+1);
 
                //broadcast index vector
                PartitionedBlock<MatrixBlock> pmb = new 
PartitionedBlock<>(sortedIxSrc, brlen, bclen);
@@ -462,7 +461,7 @@ public class RDDSortUtils
                                                ret.add(new Tuple2<>(ix,mb));
                                        long len = 
UtilFunctions.computeBlockSize(_rlen, rix, _brlen);
                                        ix = new MatrixIndexes(rix,1);
-                                       mb = new 
MatrixBlock(IntUtils.toInt(len), 1, false);    
+                                       mb = new MatrixBlock((int)len, 1, 
false);       
                                }
                                
                                mb.quickSetValue(pos, 0, val._1);
@@ -509,7 +508,7 @@ public class RDDSortUtils
                                                ret.add(new Tuple2<>(ix,mb));
                                        long len = 
UtilFunctions.computeBlockSize(_rlen, rix, _brlen);
                                        ix = new MatrixIndexes(rix,1);
-                                       mb = new 
MatrixBlock(IntUtils.toInt(len), 2, false);
+                                       mb = new MatrixBlock((int)len, 2, 
false);
                                }
                                
                                mb.quickSetValue(pos, 0, val._1.val1);
@@ -558,7 +557,7 @@ public class RDDSortUtils
                                                ret.add(new Tuple2<>(ix,mb));
                                        long len = 
UtilFunctions.computeBlockSize(_rlen, rix, _brlen);
                                        ix = new MatrixIndexes(rix,1);
-                                       mb = new 
MatrixBlock(IntUtils.toInt(len), 1, false);    
+                                       mb = new MatrixBlock((int)len, 1, 
false);       
                                }
                                
                                mb.quickSetValue(pos, 0, val._1.ix);
@@ -606,7 +605,7 @@ public class RDDSortUtils
                                                ret.add(new Tuple2<>(ix,mb));
                                        long len = 
UtilFunctions.computeBlockSize(_rlen, rix, _brlen);
                                        ix = new MatrixIndexes(rix,1);
-                                       mb = new 
MatrixBlock(IntUtils.toInt(len), 1, false);    
+                                       mb = new MatrixBlock((int)len, 1, 
false);       
                                }
                                
                                mb.quickSetValue(pos, 0, val._2+1);
@@ -652,7 +651,7 @@ public class RDDSortUtils
                                                ret.add(new Tuple2<>(ix,mb));
                                        long len = 
UtilFunctions.computeBlockSize(_rlen, rix, _brlen);
                                        ix = new MatrixIndexes(rix,1);
-                                       mb = new 
MatrixBlock(IntUtils.toInt(len), val._1.getNumColumns(), false);
+                                       mb = new MatrixBlock((int)len, 
val._1.getNumColumns(), false);
                                }
                                
                                mb.leftIndexingOperations(val._1, pos, pos, 0, 
val._1.getNumColumns()-1, mb, UpdateType.INPLACE);
@@ -698,7 +697,7 @@ public class RDDSortUtils
                                                ret.add(new Tuple2<>(ix,mb));
                                        long len = 
UtilFunctions.computeBlockSize(_rlen, rix, _brlen);
                                        ix = new MatrixIndexes(rix,1);
-                                       mb = new 
MatrixBlock(IntUtils.toInt(len), 1, false);
+                                       mb = new MatrixBlock((int)len, 1, 
false);
                                }
                                
                                mb.quickSetValue(pos, 0, val._1.ix);
@@ -858,7 +857,7 @@ public class RDDSortUtils
                                        //produce next output tuple
                                        MatrixIndexes ixmap = _currBlk._1();
                                        MatrixBlock data = _currBlk._2();
-                                       MatrixBlock mbTargetIndex = 
_pmb.value().getBlock(IntUtils.toInt(ixmap.getRowIndex()), 1);
+                                       MatrixBlock mbTargetIndex = 
_pmb.value().getBlock((int)ixmap.getRowIndex(), 1);
                                        
                                        long valix = (long) 
mbTargetIndex.getValue(_currPos, 0);
                                        long rix = 
UtilFunctions.computeBlockIndex(valix, _brlen);

http://git-wip-us.apache.org/repos/asf/systemml/blob/c3fdbb4d/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/SparkUtils.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/SparkUtils.java
 
b/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/SparkUtils.java
index 42eca7b..952135e 100644
--- 
a/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/SparkUtils.java
+++ 
b/src/main/java/org/apache/sysml/runtime/instructions/spark/utils/SparkUtils.java
@@ -49,7 +49,6 @@ import org.apache.sysml.runtime.matrix.data.MatrixIndexes;
 import org.apache.sysml.runtime.matrix.data.Pair;
 import org.apache.sysml.runtime.matrix.mapred.IndexedMatrixValue;
 import org.apache.sysml.runtime.util.UtilFunctions;
-import org.apache.sysml.utils.IntUtils;
 
 import scala.Tuple2;
 
@@ -121,7 +120,7 @@ public class SparkUtils
                        return 
SparkExecutionContext.getDefaultParallelism(true);
                double hdfsBlockSize = 
InfrastructureAnalyzer.getHDFSBlockSize();
                double matrixPSize = 
OptimizerUtils.estimatePartitionedSizeExactSparsity(mc);
-               return IntUtils.toInt( 
Math.max(Math.ceil(matrixPSize/hdfsBlockSize), 1));
+               return (int) Math.max(Math.ceil(matrixPSize/hdfsBlockSize), 1);
        }
        
        /**
@@ -188,8 +187,8 @@ public class SparkUtils
                //compute degree of parallelism and block ranges
                long size = mc.getNumBlocks() * 
OptimizerUtils.estimateSizeEmptyBlock(Math.min(
                                Math.max(mc.getRows(),1), 
mc.getRowsPerBlock()), Math.min(Math.max(mc.getCols(),1), 
mc.getColsPerBlock()));
-               int par = 
IntUtils.toInt(Math.min(4*Math.max(SparkExecutionContext.getDefaultParallelism(true),
-                               
Math.ceil(size/InfrastructureAnalyzer.getHDFSBlockSize())), mc.getNumBlocks()));
+               int par = (int) 
Math.min(4*Math.max(SparkExecutionContext.getDefaultParallelism(true),
+                               
Math.ceil(size/InfrastructureAnalyzer.getHDFSBlockSize())), mc.getNumBlocks());
                long pNumBlocks = 
(long)Math.ceil((double)mc.getNumBlocks()/par);
                
                //generate block offsets per partition

http://git-wip-us.apache.org/repos/asf/systemml/blob/c3fdbb4d/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java 
b/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java
index 848aaa0..fd06578 100644
--- a/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java
+++ b/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java
@@ -79,7 +79,6 @@ import 
org.apache.sysml.runtime.matrix.operators.RightScalarOperator;
 import org.apache.sysml.runtime.matrix.operators.ScalarOperator;
 import org.apache.sysml.runtime.util.IndexRange;
 import org.apache.sysml.utils.GPUStatistics;
-import org.apache.sysml.utils.IntUtils;
 import org.apache.sysml.utils.Statistics;
 
 import jcuda.Pointer;
@@ -1878,7 +1877,10 @@ public class LibMatrixCUDA {
        //********************************************************************/
 
        public static int toInt(long num) {
-               return IntUtils.toInt(num);
+               if(num >= Integer.MAX_VALUE || num <= Integer.MIN_VALUE) {
+                       throw new DMLRuntimeException("GPU : Exceeded supported 
size " + num);
+               }
+               return (int)num;
        }
 
        //********************************************************************/

http://git-wip-us.apache.org/repos/asf/systemml/blob/c3fdbb4d/src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java 
b/src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java
index 1ff2f43..7e93598 100644
--- a/src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java
+++ b/src/main/java/org/apache/sysml/runtime/matrix/data/MatrixBlock.java
@@ -97,7 +97,6 @@ import 
org.apache.sysml.runtime.util.FastBufferedDataInputStream;
 import org.apache.sysml.runtime.util.FastBufferedDataOutputStream;
 import org.apache.sysml.runtime.util.IndexRange;
 import org.apache.sysml.runtime.util.UtilFunctions;
-import org.apache.sysml.utils.IntUtils;
 import org.apache.sysml.utils.NativeHelper;
 
 
@@ -256,7 +255,7 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                sparse = (val == 0) ? sp : false;
                nonZeros = (val == 0) ? 0 : (long)rl*cl;
                estimatedNNzsPerRow = (estnnz < 0 || !sparse) ? -1 :
-                       IntUtils.toInt(Math.ceil((double)estnnz/(double)rlen));
+                       (int)Math.ceil((double)estnnz/(double)rlen);
                
                //reset sparse/dense blocks
                if( sparse )
@@ -1091,7 +1090,7 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                
                if( allowCSR && nonZeros <= Integer.MAX_VALUE ) {
                        //allocate target in memory-efficient CSR format
-                       int lnnz = IntUtils.toInt(nonZeros);
+                       int lnnz = (int) nonZeros;
                        int[] rptr = new int[m+1];
                        int[] indexes = new int[lnnz];
                        double[] values = new double[lnnz];
@@ -1292,7 +1291,7 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                this.rlen=that.rlen;
                this.clen=that.clen;
                this.sparse=sp;
-               
estimatedNNzsPerRow=IntUtils.toInt(Math.ceil((double)thatValue.getNonZeros()/(double)rlen));
+               
estimatedNNzsPerRow=(int)Math.ceil((double)thatValue.getNonZeros()/(double)rlen);
                if(this.sparse && that.sparse)
                        copySparseToSparse(that);
                else if(this.sparse && !that.sparse)
@@ -1937,7 +1936,7 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                        //block: read ijv-triples (ordered by row and column) 
via custom 
                        //init to avoid repeated updates of row pointers per 
append
                        SparseBlockCSR sblockCSR = (SparseBlockCSR) sparseBlock;
-                       sblockCSR.initUltraSparse(IntUtils.toInt(nonZeros), in);
+                       sblockCSR.initUltraSparse((int)nonZeros, in);
                }
                else { //ULTRA-SPARSE COL
                        //col: read iv-pairs (should never happen since always 
dense)
@@ -2265,7 +2264,7 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                        out.writeLong( nonZeros ); 
                }
                else {
-                       out.writeInt( IntUtils.toInt(nonZeros) );
+                       out.writeInt( (int)nonZeros );
                }
        }
        
@@ -3476,7 +3475,7 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                                SparseBlock sblock = result.getSparseBlock();
                                for( int i=0; i<result.rlen; i++ ) {
                                        final int row = i; //workaround for 
lambda compile issue
-                                       int lnnz = 
IntUtils.toInt(this.recomputeNonZeros(i, i, 0, this.clen-1) + 
Arrays.stream(that)
+                                       int lnnz = (int) 
(this.recomputeNonZeros(i, i, 0, this.clen-1) + Arrays.stream(that)
                                                .mapToLong(mb -> 
mb.recomputeNonZeros(row, row, 0, mb.clen-1)).sum());
                                        sblock.allocate(i, lnnz);
                                }
@@ -3691,8 +3690,8 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
        
        public final MatrixBlock leftIndexingOperations(MatrixBlock rhsMatrix,
                        IndexRange ixrange, MatrixBlock ret, UpdateType update) 
{
-               return leftIndexingOperations(rhsMatrix, 
IntUtils.toInt(ixrange.rowStart),
-                               IntUtils.toInt(ixrange.rowEnd), 
IntUtils.toInt(ixrange.colStart), IntUtils.toInt(ixrange.colEnd), ret, update);
+               return leftIndexingOperations(rhsMatrix, (int)ixrange.rowStart,
+                               (int)ixrange.rowEnd, (int)ixrange.colStart, 
(int)ixrange.colEnd, ret, update);
        }
 
        public MatrixBlock leftIndexingOperations(MatrixBlock rhsMatrix,
@@ -3818,8 +3817,8 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
 
        public MatrixBlock slice(IndexRange ixrange, MatrixBlock ret) {
                return slice(
-                               IntUtils.toInt(ixrange.rowStart), 
IntUtils.toInt(ixrange.rowEnd), 
-                               IntUtils.toInt(ixrange.colStart), 
IntUtils.toInt(ixrange.colEnd), true, ret);
+                               (int)ixrange.rowStart, (int)ixrange.rowEnd, 
+                               (int)ixrange.colStart, (int)ixrange.colEnd, 
true, ret);
        }
        
        public MatrixBlock slice(int rl, int ru) {
@@ -3970,44 +3969,44 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                if(colCut>range.colEnd)
                        blockColFactor=boundaryClen;
                
-               int minrowcut=IntUtils.toInt(Math.min(rowCut,range.rowEnd));
-               int mincolcut=IntUtils.toInt(Math.min(colCut, range.colEnd));
-               int maxrowcut=IntUtils.toInt(Math.max(rowCut, range.rowStart));
-               int maxcolcut=IntUtils.toInt(Math.max(colCut, range.colStart));
+               int minrowcut=(int)Math.min(rowCut,range.rowEnd);
+               int mincolcut=(int)Math.min(colCut, range.colEnd);
+               int maxrowcut=(int)Math.max(rowCut, range.rowStart);
+               int maxcolcut=(int)Math.max(colCut, range.colStart);
                
                if(range.rowStart<rowCut && range.colStart<colCut)
                {
                        topleft=(MatrixBlock) p.next().getValue();
                        //topleft.reset(blockRowFactor, blockColFactor, 
-                       //              
checkSparcityOnSlide(rowCut-IntUtils.toInt(range.rowStart), 
colCut-IntUtils.toInt(range.colStart), blockRowFactor, blockColFactor));
+                       //              
checkSparcityOnSlide(rowCut-(int)range.rowStart, colCut-(int)range.colStart, 
blockRowFactor, blockColFactor));
                        
                        topleft.reset(blockRowFactor, blockColFactor, 
-                                       
estimateSparsityOnSlice(minrowcut-IntUtils.toInt(range.rowStart), 
mincolcut-IntUtils.toInt(range.colStart), blockRowFactor, blockColFactor));
+                                       
estimateSparsityOnSlice(minrowcut-(int)range.rowStart, 
mincolcut-(int)range.colStart, blockRowFactor, blockColFactor));
                }
                if(range.rowStart<rowCut && range.colEnd>=colCut)
                {
                        topright=(MatrixBlock) p.next().getValue();
                        topright.reset(blockRowFactor, boundaryClen, 
-                                       
estimateSparsityOnSlice(minrowcut-IntUtils.toInt(range.rowStart), 
IntUtils.toInt(range.colEnd)-maxcolcut+1, blockRowFactor, boundaryClen));
+                                       
estimateSparsityOnSlice(minrowcut-(int)range.rowStart, 
(int)range.colEnd-maxcolcut+1, blockRowFactor, boundaryClen));
                }
                if(range.rowEnd>=rowCut && range.colStart<colCut)
                {
                        bottomleft=(MatrixBlock) p.next().getValue();
                        bottomleft.reset(boundaryRlen, blockColFactor, 
-                                       
estimateSparsityOnSlice(IntUtils.toInt(range.rowEnd)-maxrowcut+1, 
mincolcut-IntUtils.toInt(range.colStart), boundaryRlen, blockColFactor));
+                                       
estimateSparsityOnSlice((int)range.rowEnd-maxrowcut+1, 
mincolcut-(int)range.colStart, boundaryRlen, blockColFactor));
                }
                if(range.rowEnd>=rowCut && range.colEnd>=colCut)
                {
                        bottomright=(MatrixBlock) p.next().getValue();
                        bottomright.reset(boundaryRlen, boundaryClen, 
-                                       
estimateSparsityOnSlice(IntUtils.toInt(range.rowEnd)-maxrowcut+1, 
IntUtils.toInt(range.colEnd)-maxcolcut+1, boundaryRlen, boundaryClen));
+                                       
estimateSparsityOnSlice((int)range.rowEnd-maxrowcut+1, 
(int)range.colEnd-maxcolcut+1, boundaryRlen, boundaryClen));
                }
                
                if(sparse)
                {
                        if(sparseBlock!=null)
                        {
-                               int r=IntUtils.toInt(range.rowStart);
+                               int r=(int)range.rowStart;
                                for(; r<Math.min(Math.min(rowCut, 
sparseBlock.numRows()), range.rowEnd+1); r++)
                                        sliceHelp(r, range, colCut, topleft, 
topright, normalBlockRowFactor-rowCut, normalBlockRowFactor, 
normalBlockColFactor);
                                
@@ -4019,11 +4018,11 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                        if(denseBlock!=null)
                        {
                                double[] a = getDenseBlockValues();
-                               int i=(IntUtils.toInt(range.rowStart))*clen;
-                               int r=IntUtils.toInt(range.rowStart);
+                               int i=((int)range.rowStart)*clen;
+                               int r=(int) range.rowStart;
                                for(; r<Math.min(rowCut, range.rowEnd+1); r++)
                                {
-                                       int c=IntUtils.toInt(range.colStart);
+                                       int c=(int) range.colStart;
                                        for(; c<Math.min(colCut, 
range.colEnd+1); c++)
                                                
topleft.appendValue(r+normalBlockRowFactor-rowCut, 
c+normalBlockColFactor-colCut, a[i+c]);
                                        for(; c<=range.colEnd; c++)
@@ -4033,7 +4032,7 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                                
                                for(; r<=range.rowEnd; r++)
                                {
-                                       int c=IntUtils.toInt(range.colStart);
+                                       int c=(int) range.colStart;
                                        for(; c<Math.min(colCut, 
range.colEnd+1); c++)
                                                
bottomleft.appendValue(r-rowCut, c+normalBlockColFactor-colCut, a[i+c]);
                                        for(; c<=range.colEnd; c++)
@@ -4051,10 +4050,10 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                
                int[] cols=sparseBlock.indexes(r);
                double[] values=sparseBlock.values(r);
-               int start=sparseBlock.posFIndexGTE(r, 
IntUtils.toInt(range.colStart));
+               int start=sparseBlock.posFIndexGTE(r, (int)range.colStart);
                if(start<0) 
                        return;
-               int end=sparseBlock.posFIndexLTE(r, 
IntUtils.toInt(range.colEnd));
+               int end=sparseBlock.posFIndexLTE(r, (int)range.colEnd);
                if(end<0 || start>end) 
                        return;
                
@@ -4124,9 +4123,9 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                boolean lsparse = evalSparseFormatInMemory(rlen, clen, 
(long)(estimatedSps*rlen*clen));
                
                if(result==null)
-                       result=new MatrixBlock(rlen, clen, lsparse, 
IntUtils.toInt(estimatedSps*rlen*clen));
+                       result=new MatrixBlock(rlen, clen, lsparse, 
(int)(estimatedSps*rlen*clen));
                else
-                       result.reset(rlen, clen, lsparse, 
IntUtils.toInt(estimatedSps*rlen*clen));
+                       result.reset(rlen, clen, lsparse, 
(int)(estimatedSps*rlen*clen));
                
                
                if(sparse)
@@ -4135,12 +4134,12 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                        {
                                if(!complementary)//if zero out
                                {
-                                       for(int r=0; 
r<Math.min(IntUtils.toInt(range.rowStart), sparseBlock.numRows()); r++)
+                                       for(int r=0; 
r<Math.min((int)range.rowStart, sparseBlock.numRows()); r++)
                                                ((MatrixBlock) 
result).appendRow(r, sparseBlock.get(r));
-                                       for(int 
r=Math.min(IntUtils.toInt(range.rowEnd+1), sparseBlock.numRows()); 
r<Math.min(rlen, sparseBlock.numRows()); r++)
+                                       for(int r=Math.min((int)range.rowEnd+1, 
sparseBlock.numRows()); r<Math.min(rlen, sparseBlock.numRows()); r++)
                                                ((MatrixBlock) 
result).appendRow(r, sparseBlock.get(r));
                                }
-                               for(int r=IntUtils.toInt(range.rowStart); 
r<=Math.min(range.rowEnd, sparseBlock.numRows()-1); r++)
+                               for(int r=(int)range.rowStart; 
r<=Math.min(range.rowEnd, sparseBlock.numRows()-1); r++)
                                {
                                        if(sparseBlock.isEmpty(r)) 
                                                continue;
@@ -4149,9 +4148,9 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                                        
                                        if(complementary)//if selection
                                        {
-                                               int 
start=sparseBlock.posFIndexGTE(r,IntUtils.toInt(range.colStart));
+                                               int 
start=sparseBlock.posFIndexGTE(r,(int)range.colStart);
                                                if(start<0) continue;
-                                               int 
end=sparseBlock.posFIndexGT(r,IntUtils.toInt(range.colEnd));
+                                               int 
end=sparseBlock.posFIndexGT(r,(int)range.colEnd);
                                                if(end<0 || start>end) 
                                                        continue;
                                                
@@ -4163,9 +4162,9 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                                        {
                                                int pos = sparseBlock.pos(r);
                                                int len = sparseBlock.size(r);
-                                               int 
start=sparseBlock.posFIndexGTE(r,IntUtils.toInt(range.colStart));
+                                               int 
start=sparseBlock.posFIndexGTE(r,(int)range.colStart);
                                                if(start<0) start=pos+len;
-                                               int 
end=sparseBlock.posFIndexGT(r,IntUtils.toInt(range.colEnd));
+                                               int 
end=sparseBlock.posFIndexGT(r,(int)range.colEnd);
                                                if(end<0) end=pos+len;
                                                
                                                for(int i=pos; i<start; i++)
@@ -4186,10 +4185,10 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                                double[] a = getDenseBlockValues();
                                if(complementary)//if selection
                                {
-                                       int 
offset=(IntUtils.toInt(range.rowStart))*clen;
-                                       for(int 
r=IntUtils.toInt(range.rowStart); r<=range.rowEnd; r++)
+                                       int offset=((int)range.rowStart)*clen;
+                                       for(int r=(int) range.rowStart; 
r<=range.rowEnd; r++)
                                        {
-                                               for(int 
c=IntUtils.toInt(range.colStart); c<=range.colEnd; c++)
+                                               for(int c=(int) range.colStart; 
c<=range.colEnd; c++)
                                                        ((MatrixBlock) 
result).appendValue(r, c, a[offset+c]);
                                                offset+=clen;
                                        }
@@ -4197,15 +4196,15 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                                {
                                        int offset=0;
                                        int r=0;
-                                       for(; r<IntUtils.toInt(range.rowStart); 
r++)
+                                       for(; r<(int)range.rowStart; r++)
                                                for(int c=0; c<clen; c++, 
offset++)
                                                        ((MatrixBlock) 
result).appendValue(r, c, a[offset]);
                                        
-                                       for(; r<=IntUtils.toInt(range.rowEnd); 
r++)
+                                       for(; r<=(int)range.rowEnd; r++)
                                        {
-                                               for(int c=0; 
c<IntUtils.toInt(range.colStart); c++)
+                                               for(int c=0; 
c<(int)range.colStart; c++)
                                                        ((MatrixBlock) 
result).appendValue(r, c, a[offset+c]);
-                                               for(int 
c=IntUtils.toInt(range.colEnd)+1; c<clen; c++)
+                                               for(int c=(int)range.colEnd+1; 
c<clen; c++)
                                                        ((MatrixBlock) 
result).appendValue(r, c, a[offset+c]);
                                                offset+=clen;
                                        }
@@ -4628,7 +4627,7 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                
                // prepare result, currently always dense
                // #rows in temp matrix = 1 + #nnz in the input ( 1 is for the 
"zero" value)
-               int dim1 = IntUtils.toInt(1+this.getNonZeros());
+               int dim1 = (int) (1+this.getNonZeros());
                if(result==null)
                        result=new MatrixBlock(dim1, 2, false);
                else
@@ -4944,7 +4943,7 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                                throw new DMLRuntimeException("Invalid value (" 
+ min + ") encountered in 'groups' while computing groupedAggregate");
                        if ( max <= 0 )
                                throw new DMLRuntimeException("Invalid value (" 
+ max + ") encountered in 'groups' while computing groupedAggregate.");
-                       ngroups = IntUtils.toInt( max );
+                       ngroups = (int) max;
                }
        
                // Allocate result matrix
@@ -5192,7 +5191,7 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
        {       
                CTable ctable = CTable.getCTableFnObject();
                double w = scalarThat;
-               int offset = IntUtils.toInt((ix1.getRowIndex()-1)*brlen); 
+               int offset = (int) ((ix1.getRowIndex()-1)*brlen);  
                
                //sparse-unsafe ctable execution
                //(because input values of 0 are invalid and have to result in 
errors) 
@@ -5377,7 +5376,7 @@ public class MatrixBlock extends MatrixValue implements 
CacheBlock, Externalizab
                        R.reset(rlen, clen, sparse);
                else if( qop.wtype3 != null ) {
                        MatrixCharacteristics mc = 
qop.wtype3.computeOutputCharacteristics(X.rlen, X.clen, U.clen);
-                       R.reset( IntUtils.toInt(mc.getRows()), 
IntUtils.toInt(mc.getCols()), qop.wtype3.isBasic()?X.isInSparseFormat():false);
+                       R.reset( (int)mc.getRows(), (int)mc.getCols(), 
qop.wtype3.isBasic()?X.isInSparseFormat():false);
                }
                
                //core block operation

http://git-wip-us.apache.org/repos/asf/systemml/blob/c3fdbb4d/src/main/java/org/apache/sysml/utils/IntUtils.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/utils/IntUtils.java 
b/src/main/java/org/apache/sysml/utils/IntUtils.java
deleted file mode 100644
index aa1f963..0000000
--- a/src/main/java/org/apache/sysml/utils/IntUtils.java
+++ /dev/null
@@ -1,34 +0,0 @@
-/*
- * Licensed to the Apache Software Foundation (ASF) under one
- * or more contributor license agreements.  See the NOTICE file
- * distributed with this work for additional information
- * regarding copyright ownership.  The ASF licenses this file
- * to you under the Apache License, Version 2.0 (the
- * "License"); you may not use this file except in compliance
- * with the License.  You may obtain a copy of the License at
- * 
- *   http://www.apache.org/licenses/LICENSE-2.0
- * 
- * Unless required by applicable law or agreed to in writing,
- * software distributed under the License is distributed on an
- * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
- * KIND, either express or implied.  See the License for the
- * specific language governing permissions and limitations
- * under the License.
- */
-package org.apache.sysml.utils;
-import static java.lang.Math.toIntExact;
-
-
-public class IntUtils {
-
-       public static int toInt(long val) {
-               return toIntExact(val);
-       }
-       
-       public static int toInt(double val) {
-               long val1 = (long) val;
-               return toIntExact(val1);
-       }
-       
-}

Reply via email to