Dear all,

Today I am trying a Cuda Program to run on a Hadoop Cluster of 2 nodes with GPU enabled. But I am facing some issues while running the cuda code through Hadoop Pipes.

I read the whole wiki Hadoop page but it doesn't provide the information on how to run third-party programs in it as Jcuda provides a way to include all cuda jars & binaries to run on Hadoop Cluster.

I attached the code of the program . Please find the attachment.

I am able to run it through Java JNI but I want to achieve through Hadoop Pipes.
Is it possible or some parameters to achieve this would definitely help me.


Thanks & best Regards,

Adarsh Sharma
#include <algorithm>
#include <limits>
#include <string>

#include <cuda.h>
 
#include  "stdint.h"  // <--- to prevent uint64_t errors! 
 
#include "hadoop/Pipes.hh"
#include "hadoop/TemplateFactory.hh"
#include "hadoop/StringUtils.hh"

#include "5.cu"
 
using namespace std;
 
class WordCountMapper : public HadoopPipes::Mapper 
{
	private:
		float *a_h, *a_d;
		const int N = 10;

		int block_size;
		int n_blocks;
		size_t size;
				

	public:
  		// constructor: does nothing
  		WordCountMapper( HadoopPipes::TaskContext& context ) 
		{
	  		N = 10;  
	  		size = N * sizeof(float);
	  		a_h = (float *)malloc(size);        
	  		cudaMalloc((void **) &a_d, size);   
	  	  		for (i=0; i<N; i++) a_h[i] = (float)i;
	  		cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
	  		block_size = 4;
	  		n_blocks = N/block_size + (N%block_size == 0 ? 0:1);
          		
	 
 			
	  
	  		free(a_h); cudaFree(a_d);
  		}
 
  // map function: receives a line, outputs (word,"1")
  // to reducer.
  	void map( HadoopPipes::MapContext& context ) 
	{
    //--- get line of text ---
    		string line = context.getInputValue();


		/* Adarsh's cuda specific code */
	
		square_array <<< n_blocks, block_size >>> (a_d, N);

		cudaMemcpy(a_h, a_d, sizeof(float)*N, cudaMemcpyDeviceToHost);

	
 
   /* //--- split it into words ---
    vector< string > words =
      HadoopUtils::splitString( line, " " );
	*/
 
    //--- emit each word tuple (word, "1" ) ---
    		for ( unsigned int i=0; i < size; i++ ) 
		{
      			context.emit( HadoopUtils::toString(a_h[i]), HadoopUtils::toString( 1 ) );
		
    		}
  	}
};
 
class WordCountReducer : public HadoopPipes::Reducer {
public:
  // constructor: does nothing
  WordCountReducer(HadoopPipes::TaskContext& context) {
  }
 
  // reduce function
  void reduce( HadoopPipes::ReduceContext& context ) {
    int count = 0;
 
    //--- get all tuples with the same key, and count their numbers ---
    while ( context.nextValue() ) {
      count += HadoopUtils::toInt( context.getInputValue() );
    }
 
    //--- emit (word, count) ---
    context.emit(context.getInputKey(), HadoopUtils::toString( count ));
  }
};
 
int main(int argc, char *argv[]) {
  return HadoopPipes::runTask(HadoopPipes::TemplateFactory< 
			      WordCountMapper, 
                              WordCountReducer >() );
}

Reply via email to