Hi Michael ,
I am studying a solution to avoid repetitions of the same code with different
type
Please have a look at this
#include "simple.h"
#include <ov-cell.h> // avoid errmsg "cell -- incomplete datatype"
#include <oct-map.h> // avoid errmsg "Oct.map -- invalid use undef type"
enum ov_t_id
{
.....
};
int send_class( MPI_Comm comm, octave_value ov, ColumnVector rankrec, int
mytag); /* along the datatype */
template <class Any>
int send_scalar(int t_id, MPI_Comm comm, Any d, ColumnVector rankrec, int
mytag);
template <class Any>
int send_scalar(int t_id, MPI_Comm comm, Any d, ColumnVector rankrec, int
mytag){
int info;
OCTAVE_LOCAL_BUFFER(int,tanktag,2);
tanktag[0] = mytag;
tanktag[1] = mytag+1;
MPI_Datatype TSnd;
switch (t_id) {
case ov_scalar: TSnd = MPI_DOUBLE;
case ov_bool: TSnd = MPI_INT;
case ov_float_scalar: TSnd = MPI_FLOAT;
case ov_int8_scalar: TSnd = MPI_BYTE;
case ov_int16_scalar: TSnd = MPI_SHORT;
case ov_int32_scalar: TSnd = MPI_INT;
case ov_int64_scalar: TSnd = MPI_LONG_LONG;
case ov_uint8_scalar: TSnd = MPI_UNSIGNED_CHAR;
case ov_uint16_scalar: TSnd = MPI_UNSIGNED_SHORT;
case ov_uint32_scalar: TSnd = MPI_UNSIGNED;
case ov_uint64_scalar: TSnd = MPI_UNSIGNED_LONG_LONG;
}
for (octave_idx_type i = 0; i< rankrec.nelem(); i++)
{
info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm);
if (info !=MPI_SUCCESS) return info;
info = MPI_Send((&d), 1,TSnd, rankrec(i), tanktag[1], comm);
if (info !=MPI_SUCCESS) return info;
}
return(MPI_SUCCESS);
}
template <class Any>
int send_scalar(int t_id, MPI_Comm comm, Any rpart, Any impart, ColumnVector
rankrec, int mytag);
template <class Any>
int send_scalar(int t_id, MPI_Comm comm, Any rpart, Any impart, ColumnVector
rankrec, int mytag){
int info;
OCTAVE_LOCAL_BUFFER(int,tanktag,2);
tanktag[0] = mytag;
tanktag[1] = mytag+1;
OCTAVE_LOCAL_BUFFER(int,Comp,2);
Comp[0] = rpart;
Comp[1] = impart;
MPI_Datatype TSnd;
switch (t_id) {
case ov_complex_scalar: TSnd = MPI_DOUBLE;
case ov_float_complex_scalar: TSnd = MPI_FLOAT;
}
for (octave_idx_type i = 0; i< rankrec.nelem(); i++)
{
info = MPI_Send(&t_id, 1, MPI_INT, rankrec(i), tanktag[0], comm);
if (info !=MPI_SUCCESS) return info;
info = MPI_Send((&Comp), 2,TSnd, rankrec(i), tanktag[1], comm);
if (info !=MPI_SUCCESS) return info;
}
return(MPI_SUCCESS);
}
int send_class(MPI_Comm comm, octave_value ov, ColumnVector rankrec,int
mytag){ /* varname-strlength 1st, dims[ndim] */
/*----------------------------------*/ /* and then appropriate specific info
*/
int t_id = ov.type_id();
switch (t_id) {
case ov_scalar: return(send_scalar (t_id, comm,
ov.scalar_value (),rankrec,mytag));
case ov_int8_scalar: return(send_scalar (t_id, comm,
ov.int8_scalar_value (),rankrec,mytag));
case ov_int16_scalar: return(send_scalar (t_id, comm,
ov.int16_scalar_value (),rankrec,mytag));
case ov_int32_scalar: return(send_scalar (t_id, comm,
ov.int32_scalar_value (),rankrec,mytag));
case ov_int64_scalar: return(send_scalar (t_id, comm,
ov.int64_scalar_value (),rankrec,mytag));
case ov_uint8_scalar: return(send_scalar (t_id, comm,
ov.uint8_scalar_value (),rankrec,mytag));
case ov_uint16_scalar: return(send_scalar (t_id, comm,
ov.uint16_scalar_value (),rankrec,mytag));
case ov_uint32_scalar: return(send_scalar (t_id, comm,
ov.uint32_scalar_value (),rankrec,mytag));
case ov_uint64_scalar: return(send_scalar (t_id, comm,
ov.uint64_scalar_value (),rankrec,mytag));
case ov_bool: return(send_scalar (t_id, comm,
ov.int_value(),rankrec,mytag));
case ov_float_scalar: return(send_scalar (t_id, comm,
ov.float_value (),rankrec,mytag));
case ov_complex_scalar: {double rpart = real(ov.complex_value()); double
impart=imag(ov.complex_value()); return(send_scalar(t_id, comm, rpart,
impart,rankrec,mytag));}
case ov_float_complex_scalar: {std::complex<float> c =
ov..float_complex_value(); float frpart = real(c); float fimag =imag(c);
return(send_scalar(t_id,comm,frpart, fimag,rankrec,mytag));}
}
}
DEFUN_DLD(MPI_Snd_Scalar,args,nargout, "MPI_Snd_Scalar sends any scalar (int
double folat etc into contiguous memory using openmpi library even over an
hetherogeneous cluster i.e 32 bits CPUs and 64 bits CPU \n")
{
octave_value retval;
int nargin = args.length ();
if (nargin != 4 )
{
error ("expecting 4 input arguments");
return retval;
}
if (error_state)
return retval;
ColumnVector tankrank = args(1).column_vector_value();
if (error_state)
{
error ("expecting second argument to be a column vector");
return retval;
}
int mytag = args(2).int_value();
if (error_state)
{
error ("expecting third vector argument to be an integer value");
return retval;
}
if (!simple_type_loaded)
{
simple::register_type ();
simple_type_loaded = true;
mlock ();
}
if (args(3).type_id()!=simple::static_type_id()){
error("Please enter octave comunicator object!");
return octave_value(-1);
}
const octave_base_value& rep = args(3).get_rep();
const simple& B = ((const simple &)rep);
MPI_Comm comm = ((const simple&) B).comunicator_value ();
int info = send_class (comm, args(0), tankrank, mytag);
comm= NULL;
retval=info;
return retval;
}
Are there any comments on this?
It sounds a good strategy ..but I would like to know your important opinions
before going on..
Thanks a lot to all of you
Respectfully
Riccardo
------------------------------------------------------------------------------
The Planet: dedicated and managed hosting, cloud storage, colocation
Stay online with enterprise data centers and the best network in the business
Choose flexible plans and management services without long-term contracts
Personal 24x7 support from experience hosting pros just a phone call away.
http://p.sf.net/sfu/theplanet-com
_______________________________________________
Octave-dev mailing list
[email protected]
https://lists.sourceforge.net/lists/listinfo/octave-dev