Dear all,

I want to build the scalar product between rows of a
TrilinosWrappers::SparseMatrix and a TrilinosWrappers::MPI::Vector. 
I implemented this by looping over the elements of the rows via iterators 
(it = systemMatrix.begin(row) , itend = systemMatrix.end(row) 
-> while (it != itend) { ... it++; } )
Inside while, I calculate it->value (to get the matrix element) times 
vec(it->column()) 
(to get the corresponding vector element) and sum it up.
I calculate the scalar product for a set of rows and store the result in a 
FullMatrix 
which I finally write to a file from the master process,
after gathering the local full matrices from all processors.
Attached is a small example which demonstrates this in practice.

In a parallel program, vec(it->column()) only works if it->column() is a 
locally owned element of the vector. So I wrapped this read access in 
if( vec.in_local_range(it->column() ) .
Another problem is the following:
Say row 5 lives on processor 0 and has non-zero entries at columns 
10,15,20,25,30.
Columns 10,15,20 live on processor 0 as well but columns 25,30 live on 
processor 1. 
Then, I will never add the contributions to the scalar product from columns 
25 and 30 because on processor 1, 
systemMatrix.begin(5) == systemMatrix.end(5)
which is also stated in the docu.

How can I deal with this?


Best,
Simon

-- 
The deal.II project is located at http://www.dealii.org/
For mailing list/forum options, see 
https://groups.google.com/d/forum/dealii?hl=en
--- 
You received this message because you are subscribed to the Google Groups 
"deal.II User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to dealii+unsubscr...@googlegroups.com.
To view this discussion on the web visit 
https://groups.google.com/d/msgid/dealii/03085575-2931-4755-abcd-7b7a31df102dn%40googlegroups.com.
#include <deal.II/lac/trilinos_vector.h>
#include <deal.II/lac/affine_constraints.h>
#include <deal.II/lac/trilinos_sparse_matrix.h>
#include <deal.II/lac/trilinos_sparsity_pattern.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/dofs/dof_renumbering.h>
#include <deal.II/dofs/dof_tools.h>

#include <deal.II/base/conditional_ostream.h>
#include <deal.II/base/index_set.h>
#include <deal.II/base/utilities.h>

#include <deal.II/dofs/dof_handler.h>

#include <deal.II/distributed/grid_refinement.h>
#include <deal.II/distributed/tria.h>
#include <deal.II/grid/grid_generator.h>

#include <deal.II/fe/fe_q.h>

#include <iostream>
#include <stdlib.h>
#include <fstream>
#include <numeric>

namespace dealiiLA
{
  using namespace dealii::TrilinosWrappers;
} // namespace dealiiLA

int main(int argc, char *argv[])
{
    
    const unsigned int                    masterProcessRank = 0;
    dealii::Utilities::MPI::MPI_InitFinalize mpiInitialization(argc, argv, 1);
    MPI_Comm const &mpiCommunicator(MPI_COMM_WORLD);
    unsigned int processorRank = 
dealii::Utilities::MPI::this_mpi_process(mpiCommunicator);
    dealii::ConditionalOStream pCout(std::cout, processorRank == 
masterProcessRank);
    
    // make grid
    dealii::parallel::distributed::Triangulation<3> tria(mpiCommunicator);
    dealii::GridGenerator::hyper_cube(tria);
    tria.refine_global(2);
    
    // distribute dofs
    dealii::DoFHandler<3> dofHandler(tria);
    dofHandler.distribute_dofs(dealii::FE_Q<3>(1));

    dealii::IndexSet locallyOwnedDofs = dofHandler.locally_owned_dofs();
    dealii::IndexSet locallyRelevantDofs;
    dealii::DoFTools::extract_locally_relevant_dofs(dofHandler, 
locallyRelevantDofs);

    dealii::AffineConstraints<double> constraintMatrix;
    constraintMatrix.clear();
    constraintMatrix.reinit(locallyRelevantDofs);
    dealii::DoFTools::make_hanging_node_constraints(dofHandler, 
constraintMatrix);
    constraintMatrix.close();

    // sparse matrix and sparsity pattern
    dealiiLA::SparsityPattern sparsityPattern;
    sparsityPattern.reinit(locallyOwnedDofs, mpiCommunicator);
    dealii::DoFTools::make_sparsity_pattern(dofHandler,
                                              sparsityPattern,
                                              constraintMatrix,
                                              false,
                                              
dealii::Utilities::MPI::this_mpi_process(
                                                mpiCommunicator));
    sparsityPattern.compress();

    dealiiLA::SparseMatrix systemMatrix;
    systemMatrix.reinit(sparsityPattern);
    
    // create dummy vector
    dealii::TrilinosWrappers::MPI::Vector vec;
    vec.reinit(locallyOwnedDofs, mpiCommunicator);
    for(unsigned int i=0; i<vec.size(); ++i)
      vec(i) = i;

    vec.compress(dealii::VectorOperation::insert);

    std::vector<unsigned int> rows{10, 40, 70, 100, 120}; //nDoFs = 125
    dealii::FullMatrix<double> matrix(rows.size(), 1);
    // populate fullmatrix
    for(unsigned int i = 0; i < rows.size(); ++i)
    {
      unsigned int idx = rows[i];
      auto it = systemMatrix.begin(idx);
      auto itend = systemMatrix.end(idx);
      // loop over row idx (first contribution)
      while(it != itend)
      {
        matrix(i, 0) += vec.in_local_range((*it).column()) ? ((*it).value() + 
1) * vec((*it).column()) : 0.0;
        it++;
      }
      // second contribution
      if(vec.in_local_range(idx))
        matrix(i, 0) += vec(idx);
    }

    // send local matrices to processor 0
    std::vector<dealii::FullMatrix<double>> gathered = 
      dealii::Utilities::MPI::gather(mpiCommunicator, matrix, 0);

    // write the matrix from processor 0
    if(processorRank == 0)
    {
      dealii::FullMatrix<double> sum(gathered[0].m(), gathered[0].n());
      for(const auto & matrix : gathered)
      {
        sum.add(1.0, matrix);
      }

      std::ofstream myMatrix("./myMatrix.txt");
      sum.print_formatted(myMatrix, 1, false);
    }

  return 0;
}

Reply via email to