Repository: systemml
Updated Branches:
  refs/heads/master c14682b9c -> 34bb3ca82


http://git-wip-us.apache.org/repos/asf/systemml/blob/34bb3ca8/src/main/java/org/apache/sysml/api/DMLScript.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/api/DMLScript.java 
b/src/main/java/org/apache/sysml/api/DMLScript.java
index 767fead..fa4b4a1 100644
--- a/src/main/java/org/apache/sysml/api/DMLScript.java
+++ b/src/main/java/org/apache/sysml/api/DMLScript.java
@@ -173,7 +173,10 @@ public class DMLScript
        
        public static boolean           USE_ACCELERATOR     = 
DMLOptions.defaultOptions.gpu;
        public static boolean           FORCE_ACCELERATOR   = 
DMLOptions.defaultOptions.forceGPU;
-       public static boolean           SYNCHRONIZE_GPU         = true;         
                                                                // whether to 
synchronize GPU after every instruction
+       // whether to synchronize GPU after every instruction
+       public static boolean           SYNCHRONIZE_GPU         = true;
+       // whether to perform eager CUDA free on rmvar
+       public static boolean           EAGER_CUDA_FREE         = false;
 
 
        public static boolean _suppressPrint2Stdout = false;  // flag that 
indicates whether or not to suppress any prints to stdout

http://git-wip-us.apache.org/repos/asf/systemml/blob/34bb3ca8/src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java 
b/src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java
index 60c84aa..a49ffda 100644
--- a/src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java
+++ b/src/main/java/org/apache/sysml/api/ScriptExecutorUtils.java
@@ -79,6 +79,7 @@ public class ScriptExecutorUtils {
                LibMatrixDNN.DISPLAY_STATISTICS = 
dmlconf.getBooleanValue(DMLConfig.EXTRA_DNN_STATS);
                DMLScript.FINEGRAINED_STATISTICS = 
dmlconf.getBooleanValue(DMLConfig.EXTRA_FINEGRAINED_STATS);
                DMLScript.SYNCHRONIZE_GPU = 
dmlconf.getBooleanValue(DMLConfig.SYNCHRONIZE_GPU);
+               DMLScript.EAGER_CUDA_FREE = 
dmlconf.getBooleanValue(DMLConfig.EAGER_CUDA_FREE);
                DMLScript.STATISTICS_MAX_WRAP_LEN = 
dmlconf.getIntValue(DMLConfig.STATS_MAX_WRAP_LEN);
 
                boolean exceptionThrown = false;

http://git-wip-us.apache.org/repos/asf/systemml/blob/34bb3ca8/src/main/java/org/apache/sysml/conf/DMLConfig.java
----------------------------------------------------------------------
diff --git a/src/main/java/org/apache/sysml/conf/DMLConfig.java 
b/src/main/java/org/apache/sysml/conf/DMLConfig.java
index b1d0a2e..857071d 100644
--- a/src/main/java/org/apache/sysml/conf/DMLConfig.java
+++ b/src/main/java/org/apache/sysml/conf/DMLConfig.java
@@ -85,7 +85,8 @@ public class DMLConfig
        public static final String EXTRA_GPU_STATS      = 
"systemml.stats.extraGPU"; //boolean
        public static final String EXTRA_DNN_STATS      = 
"systemml.stats.extraDNN"; //boolean
        public static final String AVAILABLE_GPUS       = 
"systemml.gpu.availableGPUs"; // String to specify which GPUs to use (a range, 
all GPUs, comma separated list or a specific GPU)
-       public static final String SYNCHRONIZE_GPU       = 
"systemml.gpu.sync.postProcess"; // boolean: whether to synchronize GPUs after 
every instruction 
+       public static final String SYNCHRONIZE_GPU      = 
"systemml.gpu.sync.postProcess"; // boolean: whether to synchronize GPUs after 
every instruction 
+       public static final String EAGER_CUDA_FREE              = 
"systemml.gpu.eager.cudaFree"; // boolean: whether to perform eager CUDA free 
on rmvar
        
        // Fraction of available memory to use. The available memory is 
computer when the GPUContext is created
        // to handle the tradeoff on calling cudaMemGetInfo too often.
@@ -134,7 +135,8 @@ public class DMLConfig
                _defaultVals.put(EXTRA_DNN_STATS,        "false" );
                _defaultVals.put(GPU_MEMORY_UTILIZATION_FACTOR,      "0.9" );
                _defaultVals.put(AVAILABLE_GPUS,         "-1");
-               _defaultVals.put(SYNCHRONIZE_GPU,        "false" );
+               _defaultVals.put(SYNCHRONIZE_GPU,        "true" );
+               _defaultVals.put(EAGER_CUDA_FREE,        "false" );
        }
        
        public DMLConfig()
@@ -417,7 +419,7 @@ public class DMLConfig
                                COMPRESSED_LINALG, 
                                CODEGEN, CODEGEN_COMPILER, CODEGEN_PLANCACHE, 
CODEGEN_LITERALS,
                                EXTRA_GPU_STATS, EXTRA_DNN_STATS, 
EXTRA_FINEGRAINED_STATS, STATS_MAX_WRAP_LEN,
-                               AVAILABLE_GPUS, SYNCHRONIZE_GPU
+                               AVAILABLE_GPUS, SYNCHRONIZE_GPU, EAGER_CUDA_FREE
                }; 
                
                StringBuilder sb = new StringBuilder();

http://git-wip-us.apache.org/repos/asf/systemml/blob/34bb3ca8/src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUInstruction.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUInstruction.java 
b/src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUInstruction.java
index 2aa73b4..149de80 100644
--- 
a/src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUInstruction.java
+++ 
b/src/main/java/org/apache/sysml/runtime/instructions/gpu/GPUInstruction.java
@@ -120,7 +120,8 @@ public abstract class GPUInstruction extends Instruction {
        public final static String MISC_TIMER_REDUCE_COL_KERNEL =               
 "rcolk";  // time spent in reduce column kernel
        
        public final static String MISC_TIMER_RIX_DENSE_OP =                    
 "drix";    // time spent in the right indexing dense kernel
-       public final static String MISC_TIMER_RIX_SPARSE_DENSE_OP =             
 "sdrix";   // time spent in the right indexing sparse dense kernel
+       public final static String MISC_TIMER_RIX_SPARSE_DENSE_OP_ROWWISE =     
 "sdrixr";   // time spent in the right indexing sparse dense kernel (row-wise 
parallelism)
+       public final static String MISC_TIMER_RIX_SPARSE_DENSE_OP_NNZ =         
 "sdrixn";   // time spent in the right indexing sparse dense kernel (nnz 
parallelism)
 
        // Deep learning operators
        public final static String MISC_TIMER_ACTIVATION_FORWARD_LIB =         
"nnaf";  // time spent in cudnnActivationForward

http://git-wip-us.apache.org/repos/asf/systemml/blob/34bb3ca8/src/main/java/org/apache/sysml/runtime/instructions/gpu/context/CSRPointer.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/sysml/runtime/instructions/gpu/context/CSRPointer.java
 
b/src/main/java/org/apache/sysml/runtime/instructions/gpu/context/CSRPointer.java
index 7bb8b07..5a6e21c 100644
--- 
a/src/main/java/org/apache/sysml/runtime/instructions/gpu/context/CSRPointer.java
+++ 
b/src/main/java/org/apache/sysml/runtime/instructions/gpu/context/CSRPointer.java
@@ -521,7 +521,7 @@ public class CSRPointer {
         * @throws DMLRuntimeException ?
         */
        public void deallocate() throws DMLRuntimeException {
-               deallocate(false);
+               deallocate(DMLScript.EAGER_CUDA_FREE);
        }
 
        /**

http://git-wip-us.apache.org/repos/asf/systemml/blob/34bb3ca8/src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java
 
b/src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java
index 8a823cc..118602b 100644
--- 
a/src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java
+++ 
b/src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUContext.java
@@ -369,7 +369,7 @@ public class GPUContext {
         * @param toFree {@link Pointer} instance to be freed
         */
        public void cudaFreeHelper(final Pointer toFree) {
-               cudaFreeHelper(null, toFree, false);
+               cudaFreeHelper(null, toFree, DMLScript.EAGER_CUDA_FREE);
        }
 
        /**
@@ -389,7 +389,7 @@ public class GPUContext {
         * @param toFree          {@link Pointer} instance to be freed
         */
        public void cudaFreeHelper(String instructionName, final Pointer 
toFree) {
-               cudaFreeHelper(instructionName, toFree, false);
+               cudaFreeHelper(instructionName, toFree, 
DMLScript.EAGER_CUDA_FREE);
        }
 
        /**

http://git-wip-us.apache.org/repos/asf/systemml/blob/34bb3ca8/src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java
 
b/src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java
index 4bc983e..31bf151 100644
--- 
a/src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java
+++ 
b/src/main/java/org/apache/sysml/runtime/instructions/gpu/context/GPUObject.java
@@ -1050,7 +1050,7 @@ public class GPUObject {
         * @throws CacheException ?
         */
        public void clearData() throws DMLRuntimeException {
-               clearData(false);
+               clearData(DMLScript.EAGER_CUDA_FREE);
        }
 
        /**

http://git-wip-us.apache.org/repos/asf/systemml/blob/34bb3ca8/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java 
b/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java
index 5e2136f..5f31f28 100644
--- a/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java
+++ b/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCUDA.java
@@ -2249,14 +2249,13 @@ public class LibMatrixCUDA {
                                        + (cl + 1) + ":" + (cu + 1) + "] " + 
"must be within matrix dimensions [" + in1.getNumRows() + ","
                                        + in1.getNumColumns() + "]");
                }
-
-               
+               int len1 = toInt(in1.getNumColumns());
                if(isInSparseFormat(gCtx, in1)) {
                        // Input in1 is in sparse format and output is in dense 
format
                        MatrixObject out = 
getDenseMatrixOutputForGPUInstruction(ec, instName, outputName, ru - rl + 1, cu 
- cl + 1);
                        CSRPointer inPointer = getSparsePointer(gCtx, in1, 
instName);
                        Pointer outPointer = getDensePointer(gCtx, out, 
instName);
-                       sliceSparseDense(gCtx, instName, inPointer, outPointer, 
rl, ru, cl, cu);
+                       sliceSparseDense(gCtx, instName, inPointer, outPointer, 
rl, ru, cl, cu, len1);
                }
                else {
                        // Input in1 is in dense format (see inPointer)
@@ -2264,9 +2263,7 @@ public class LibMatrixCUDA {
 
                        Pointer inPointer = getDensePointer(gCtx, in1, 
instName);
                        Pointer outPointer = getDensePointer(gCtx, out, 
instName);
-                       int len1 = toInt(in1.getNumColumns());
-                       int len2 = 
toInt(ec.getMatrixObject(outputName).getNumColumns());
-                       sliceDenseDense(gCtx, instName, inPointer, outPointer, 
rl, ru, cl, cu, len1, len2);
+                       sliceDenseDense(gCtx, instName, inPointer, outPointer, 
rl, ru, cl, cu, len1);
                }
        }
        
@@ -2282,19 +2279,19 @@ public class LibMatrixCUDA {
         * @param cl column lower
         * @param cu column upper
         * @param inClen input number of columns
-        * @param retClen output number of columns
         * @throws DMLRuntimeException if error occurs
         */
        protected static void sliceDenseDense(GPUContext gCtx, String instName, 
Pointer inPointer, Pointer outPointer, 
-                       int rl, int ru, int cl, int cu, int inClen, int 
retClen) throws DMLRuntimeException {
+                       int rl, int ru, int cl, int cu, int inClen) throws 
DMLRuntimeException {
                long t0 = GPUStatistics.DISPLAY_STATISTICS ? System.nanoTime() 
: 0;
+               long retClen = cu - cl + 1;
                if (inClen == retClen) {
                        cudaMemcpy(outPointer, inPointer.withByteOffset(rl * 
inClen * Sizeof.DOUBLE), (ru - rl + 1) * inClen
                                        * Sizeof.DOUBLE, 
cudaMemcpyDeviceToDevice);
                } else {
-                       int size = ru - rl + 1;
-                       getCudaKernels(gCtx).launchKernel("slice_dense_dense", 
ExecutionConfig.getConfigForSimpleVectorOperations(size),
-                                       inPointer, outPointer, rl, ru, cl, cu, 
inClen, retClen);
+                       long retRlen = ru - rl + 1;
+                       getCudaKernels(gCtx).launchKernel("slice_dense_dense", 
ExecutionConfig.getConfigForSimpleVectorOperations(toInt(retRlen*retClen)),
+                                       inPointer, outPointer, rl, ru, cl, cu, 
inClen,  retRlen, retClen);
                }
                if (GPUStatistics.DISPLAY_STATISTICS) 
GPUStatistics.maintainCPMiscTimes(instName, 
GPUInstruction.MISC_TIMER_RIX_DENSE_OP, System.nanoTime() - t0);
        }
@@ -2310,18 +2307,52 @@ public class LibMatrixCUDA {
         * @param ru row upper
         * @param cl column lower
         * @param cu column upper
+        * @param inClen number of columns of input matrix
         * @throws DMLRuntimeException if error
         */
-       protected static void sliceSparseDense(GPUContext gCtx, String 
instName, CSRPointer inPointer, Pointer outPointer, int rl, int ru, int cl, int 
cu) throws DMLRuntimeException {
-               int size = ru - rl + 1;
+       protected static void sliceSparseDense(GPUContext gCtx, String 
instName, CSRPointer inPointer, Pointer outPointer, 
+                       int rl, int ru, int cl, int cu, int inClen) throws 
DMLRuntimeException {
+               int retRlen = ru - rl + 1;
                long t0 = GPUStatistics.DISPLAY_STATISTICS ? System.nanoTime() 
: 0;
                int retClen = cu - cl + 1;
+               
+               int size = -1; String kernel = null; String timer = null;
+               
+               // Note: row-wise parallelization scheme iterates over input 
rows in single thread 
+               // whereas nnz parallelization scheme iterates over number of 
output rows in single thread.
+               if(inClen > 10 && retClen > 2*retRlen) {
+                       // Perform nnz parallelization for wide and short 
matrices
+                       size = getNnz(inPointer, rl, ru);
+                       timer = 
GPUInstruction.MISC_TIMER_RIX_SPARSE_DENSE_OP_NNZ;
+                       kernel = "slice_sparse_dense_nnz";
+               }
+               else {
+                       size = retRlen;
+                       timer = 
GPUInstruction.MISC_TIMER_RIX_SPARSE_DENSE_OP_ROWWISE;
+                       kernel = "slice_sparse_dense_row";
+               }
+               
                // Performs a slice operation where the input matrix is sparse 
and the output matrix is dense.
                // This function avoids unnecessary sparse to dense conversion 
of the input matrix.
                // We can generalize this later to output sparse matrix.
-               getCudaKernels(gCtx).launchKernel("slice_sparse_dense", 
ExecutionConfig.getConfigForSimpleVectorOperations(size),
+               getCudaKernels(gCtx).launchKernel(kernel, 
ExecutionConfig.getConfigForSimpleVectorOperations(size),
                                inPointer.val, inPointer.rowPtr, 
inPointer.colInd, outPointer, rl, ru, cl, cu, retClen);
-               if (GPUStatistics.DISPLAY_STATISTICS) 
GPUStatistics.maintainCPMiscTimes(instName, 
GPUInstruction.MISC_TIMER_RIX_SPARSE_DENSE_OP, System.nanoTime() - t0);
+               if (GPUStatistics.DISPLAY_STATISTICS) 
GPUStatistics.maintainCPMiscTimes(instName, timer, System.nanoTime() - t0);
+       }
+       
+       /**
+        * Returns the number of non-zeroes in the given range of rows
+        * 
+        * @param inPointer input CSR pointer
+        * @param rl lower row index (inclusive and zero-based)
+        * @param ru upper row index (inclusive and zero-based)
+        * @return number of non-zeroes
+        */
+       private static int getNnz(CSRPointer inPointer, int rl, int ru) {
+               int[] rlPtr = { -1 }; int[] ruPtr = { -1 };
+               cudaMemcpy(Pointer.to(rlPtr), 
inPointer.rowPtr.withByteOffset(rl*Sizeof.INT), Sizeof.INT, 
cudaMemcpyDeviceToHost);
+               cudaMemcpy(Pointer.to(ruPtr), 
inPointer.rowPtr.withByteOffset((ru+1)*Sizeof.INT), Sizeof.INT, 
cudaMemcpyDeviceToHost);
+               return ruPtr[0] - rlPtr[0];
        }
 
        public static void cbind(ExecutionContext ec, GPUContext gCtx, String 
instName, MatrixObject in1, MatrixObject in2, String outputName) throws 
DMLRuntimeException {

http://git-wip-us.apache.org/repos/asf/systemml/blob/34bb3ca8/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java
----------------------------------------------------------------------
diff --git 
a/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java 
b/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java
index 9a39b1c..602edce 100644
--- a/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java
+++ b/src/main/java/org/apache/sysml/runtime/matrix/data/LibMatrixCuDNN.java
@@ -692,13 +692,15 @@ public class LibMatrixCuDNN extends LibMatrixCUDA {
                 */
                public Pointer getNthRow(int n) throws DMLRuntimeException {
                        if(isInputInSparseFormat) {
+                               jcuda.runtime.JCuda.cudaDeviceSynchronize();
                                long t0 = GPUStatistics.DISPLAY_STATISTICS ? 
System.nanoTime() : 0;
                                cudaMemset(outPointer, 0, 
numColumns*Sizeof.DOUBLE);
+                               jcuda.runtime.JCuda.cudaDeviceSynchronize();
                                if(GPUStatistics.DISPLAY_STATISTICS) 
GPUStatistics.maintainCPMiscTimes(instName, GPUInstruction.MISC_TIMER_SET_ZERO, 
System.nanoTime() - t0);
-                               sliceSparseDense(gCtx, instName, 
(CSRPointer)inPointer, outPointer, n, n, 0, toInt(numColumns-1));
+                               sliceSparseDense(gCtx, instName, 
(CSRPointer)inPointer, outPointer, n, n, 0, toInt(numColumns-1), numColumns);
                        }
                        else {
-                               sliceDenseDense(gCtx, instName, 
(Pointer)inPointer, outPointer, n, n, 0, toInt(numColumns-1), numColumns, 
numColumns);
+                               sliceDenseDense(gCtx, instName, 
(Pointer)inPointer, outPointer, n, n, 0, toInt(numColumns-1), numColumns);
                        }
                        return outPointer;
                }

http://git-wip-us.apache.org/repos/asf/systemml/blob/34bb3ca8/src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala
----------------------------------------------------------------------
diff --git a/src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala 
b/src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala
index 6e3e1dc..91373bc 100644
--- a/src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala
+++ b/src/main/scala/org/apache/sysml/api/dl/Caffe2DML.scala
@@ -206,6 +206,76 @@ class Caffe2DML(val sc: SparkContext,
     mloutput = baseFit(df, sc)
     new Caffe2DMLModel(this)
   }
+  /**
+   * Returns maximum dimensions of convolution and max pooling layer for 
either DIRECT_CONV2D or IM2COL
+   */
+  def getMaxDimensionOfConvLayers(approach:String, batchSize:Int):Int = {
+    val convOrPoolLayers = net.getLayers.map(l => 
net.getCaffeLayer(l)).filter(l => l.isInstanceOf[Convolution] || 
l.isInstanceOf[MaxPooling])
+    if(convOrPoolLayers.length == 0) {
+      return -1
+    }
+    else if(approach.equalsIgnoreCase("DIRECT_CONV2D") || 
approach.equalsIgnoreCase("IM2COL")) {
+      convOrPoolLayers
+        .map(l => {
+          if(l.isInstanceOf[Convolution]) {
+            val convLayer = l.asInstanceOf[Convolution]
+            val CHW = 
convLayer.numChannels.toInt*convLayer.Hin.toInt*convLayer.Win.toInt 
+            val KPQ = 
convLayer.numKernels.toInt*convLayer.Hout.toInt*convLayer.Wout.toInt
+            val inputOutputMaxCol = Math.max(CHW, KPQ)
+            if(approach.equalsIgnoreCase("DIRECT_CONV2D"))
+              inputOutputMaxCol
+            else {
+              val CRS = 
convLayer.numChannels.toInt*convLayer.kernel_h.toInt*convLayer.kernel_w.toInt
+              val NPQ = batchSize*convLayer.Hout.toInt*convLayer.Wout.toInt
+              return Math.max(Math.max(inputOutputMaxCol, CRS), NPQ)
+            }
+          }
+          else if(l.isInstanceOf[MaxPooling]) {
+            val maxpoolLayer = l.asInstanceOf[MaxPooling]
+            val CHW = 
maxpoolLayer.numChannels.toInt*maxpoolLayer.Hin.toInt*maxpoolLayer.Win.toInt 
+            val CPQ = 
maxpoolLayer.numChannels.toInt*maxpoolLayer.Hout.toInt*maxpoolLayer.Wout.toInt
+            Math.max(CHW, CPQ)
+          }
+          else {
+            throw new RuntimeException("Unexpected error: Incorrect layer type 
for " + l.param.getName)
+          }
+        }).max
+    }
+    else {
+      throw new RuntimeException("Unsupported approach:" + approach)
+    }
+  }
+  /**
+   * Returns maximum size of matrix blocks for either DIRECT_CONV2D or IM2COL
+   */
+  def getMaxMatrixBlockSize(approach:String, batchSize:Int):Long = {
+    if(approach.equalsIgnoreCase("DIRECT_CONV2D") || 
approach.equalsIgnoreCase("IM2COL")) {
+      net.getLayers
+        .map(l => net.getCaffeLayer(l))
+        .map(l => {
+          if(l.isInstanceOf[Convolution]) {
+            val convLayer = l.asInstanceOf[Convolution]
+            val CHW = 
convLayer.numChannels.toLong*convLayer.Hin.toLong*convLayer.Win.toLong 
+            val KPQ = 
convLayer.numKernels.toLong*convLayer.Hout.toLong*convLayer.Wout.toLong
+            val inputOutputMaxCol = Math.max(CHW, KPQ)
+            if(approach.equalsIgnoreCase("DIRECT_CONV2D"))
+              batchSize*inputOutputMaxCol
+            else {
+              val CRS = 
convLayer.numChannels.toLong*convLayer.kernel_h.toLong*convLayer.kernel_w.toLong
+              val NPQ = batchSize*convLayer.Hout.toLong*convLayer.Wout.toLong
+              return Math.max(Math.max(batchSize*inputOutputMaxCol, 
batchSize*CRS), batchSize*NPQ)
+            }
+          }
+          else {
+            val outputShape = l.outputShape
+            
batchSize*outputShape._1.toLong*outputShape._2.toLong*outputShape._3.toLong
+          }
+        }).max
+    }
+    else {
+      throw new RuntimeException("Unsupported approach:" + approach)
+    }
+  }
   // --------------------------------------------------------------
   // Returns true if last 2 of 4 dimensions are 1.
   // The first dimension refers to number of input datapoints.

http://git-wip-us.apache.org/repos/asf/systemml/blob/34bb3ca8/src/test/java/org/apache/sysml/test/gpu/GPUTests.java
----------------------------------------------------------------------
diff --git a/src/test/java/org/apache/sysml/test/gpu/GPUTests.java 
b/src/test/java/org/apache/sysml/test/gpu/GPUTests.java
index 56e0e92..b4e4b62 100644
--- a/src/test/java/org/apache/sysml/test/gpu/GPUTests.java
+++ b/src/test/java/org/apache/sysml/test/gpu/GPUTests.java
@@ -46,6 +46,9 @@ import org.junit.BeforeClass;
  */
 public abstract class GPUTests extends AutomatedTestBase {
 
+       // To run the test until this issue is resolved
+       protected final static boolean IGNORE_CLEAR_MEMORY_BUG = true;
+       
        protected final static String TEST_DIR = 
"org/apache/sysml/api/mlcontext";
        protected static SparkSession spark;
        protected final double THRESHOLD = 1e-9;    // for relative error
@@ -79,7 +82,7 @@ public abstract class GPUTests extends AutomatedTestBase {
        /**
         * Clear out the memory on all GPUs
         */
-       protected void clearGPUMemory() {
+       protected synchronized void clearGPUMemory() {
                try {
                        int count = GPUContextPool.getDeviceCount();
                        int freeCount = GPUContextPool.getAvailableCount();
@@ -88,7 +91,12 @@ public abstract class GPUTests extends AutomatedTestBase {
                        List<GPUContext> gCtxs = 
GPUContextPool.reserveAllGPUContexts();
                        for (GPUContext gCtx : gCtxs) {
                                gCtx.initializeThread();
-                               gCtx.clearMemory();
+                               try {
+                                       gCtx.clearMemory();
+                               } catch(RuntimeException e) {
+                                       if(!IGNORE_CLEAR_MEMORY_BUG)
+                                               throw e;
+                               }
                        }
                        GPUContextPool.freeAllGPUContexts();
 

Reply via email to