Dear Altro,
I would simply use a wrapper around a C++ std::vector who holds as many
C++11 random number generators (engines) as you use threads in your code.
By calling a random number, the engine in the ith index of the vector is
used, where i is the thread id.
Please see the appended minimal example mb.cpp with the wrapper
rng-omp.{h,cpp}.
Best wishes and keep us informed,
-- Klaus.
> Dear All
>
> I wish to use effectively use the gsl random number generator jointly
> with OpenMP on a HPC cluster.
>
> The simulation I have to do are quite simple: just let evolve an
> "ensemble" of system with the same dynamics. Parallelize such a code
> with OpenMP "seems" straightforward.
> As the GSL does not support parallel processing, I suppose that we must
> use a different random number generator for each element of the ensemble;
> Indeed I don't know if this is a good procedure.
>
> The ensemble could be very big and it would be simply impossible to
> allocate memory for all generators associated with the Systems of the
> ensemble.
> Then my question is:
> Is this still a correct way to use the GSL random generator on OpenMP?
>
> An idea of how the allocation could be done is given in the code below.
> Does it make any sense? If not, which is the correct way?
>
> Best wishes,
>
> Al.
>
>
>
> #include <stdlib.h>
> #include <stdio.h>
> #include <gsl/gsl_rng.h>
> #include <gsl/gsl_randist.h>
> #include <omp.h>
>
> int main (){
> int i;
>
> int DIM_ENSEMBLE = 10000000000;
>
> double *d;
> gsl_rng **r ;
>
> d = malloc(DIM_ENSEMBLE*sizeof(double));
> r = malloc(DIM_ENSEMBLE*sizeof(gsl_rng*))
> ; ;
> for (i=0;i<DIM_ENSEMBLE;i++){
> r[i] = gsl_rng_alloc (gsl_rng_mt19937);
> // r[i] = gsl_rng_alloc (gsl_rng_taus);
> gsl_rng_set(r[i],i);
> }
>
>
> size_t n = gsl_rng_size (r[0]); //size in byte (the size of
> tausworthe is 24 ; the size of mersenne twister is 5000)
>
> #pragma omp parallel for default(none) shared(r,d) shared(DIM_ENSEMBLE)
> for (i =0; i< DIM_ENSEMBLE; i++) {
> d[i] = gsl_rng_uniform(r[i]);
> }
>
> #pragma omp parallel for default(none) shared(d) shared(DIM_ENSEMBLE)
> for (i=0;i<DIM_ENSEMBLE;i++){
> printf("%d %f\n",i,d[i]);
> }
>
> printf("The size in Mb of the vector r is %lu
> Mb\n",n*DIM_ENSEMBLE/1000000);
>
> free(d);
>
> for (i=0;i<DIM_ENSEMBLE;i++){
> gsl_rng_free (r[i]);
> }
> free(r);
>
> return 0;
> }
>
>
#include <iostream>
#include "rng-omp.h"
int main()
{
int const n_threads = 4;
omp_set_num_threads( n_threads );
RNG rng( n_threads );
int const DIM_ENSEMBLE = 10;
#pragma omp parallel for
for(int i = 0; i < DIM_ENSEMBLE; ++i)
{
#pragma omp critical
std::cout << "hello from thread " << omp_get_thread_num()
<< " with random number " << rng() << std::endl;
}
}/* rng-omp.h */
#ifndef RNG_H
#define RNG_H
#include <iostream>
#include <random>
#include <vector>
#include "omp.h"
class RNG
{
public:
RNG(unsigned int);
auto operator()() -> double;
RNG(const RNG&) = delete;
RNG& operator=(const RNG&) = delete;
RNG(RNG&&) = delete;
RNG& operator=(RNG&&) = delete;
private:
std::vector<std::mt19937> _engines;
std::uniform_real_distribution<double> _distribution;
};
#endif
/* End Of File *//* rng-omp.cpp */
#include "rng-omp.h"
/* constructor */
RNG::RNG(unsigned int threads): _engines(), _distribution( 0.0 , 1.0 )
{
for(unsigned int i = 0; i < threads; ++i)
_engines.push_back( std::mt19937( i + 1 ) );
}
/* get uniform distributed random number in (0,1) */
auto RNG::operator()() -> double
{
return _distribution( _engines[ omp_get_thread_num() ] );
}
/* End of File */