I'm extending step-81, the time-harmonic maxwell problem into 3d, however the result is not correct. Both Nedelec and NedelecSZ elements are tested. However all physical formulas are kept same in 2d and 3d, with an absorption boundary condition rather than PML, but 2d test is correct even with little wavelength till boundary (1.5 waves) and low relolution (6 grids per wave). Is there a tutorial on EM in 3 dimensional?
-- The deal.II project is located at http://www.dealii.org/ For mailing list/forum options, see https://groups.google.com/d/forum/dealii?hl=en --- You received this message because you are subscribed to the Google Groups "deal.II User Group" group. To unsubscribe from this group and stop receiving emails from it, send an email to dealii+unsubscr...@googlegroups.com. To view this discussion on the web visit https://groups.google.com/d/msgid/dealii/c6f0d1f3-31a3-4fa4-a56a-d5a7c57ba928n%40googlegroups.com.
/* --------------------------------------------------------------------- * * Copyright (C) 2021 - 2022 by the deal.II authors * * This file is part of the deal.II library. * * The deal.II library is free software; you can use it, redistribute * it, and/or modify it under the terms of the GNU Lesser General * Public License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * The full text of the license can be found in the file LICENSE.md at * the top level directory of deal.II. * * --------------------------------------------------------------------- * * Author: Marc Fehling, Colorado State University, 2021 * Peter Munch, Technical University of Munich and Helmholtz-Zentrum * hereon, 2021 * Wolfgang Bangerth, Colorado State University, 2021 */ #include <deal.II/base/conditional_ostream.h> #include <deal.II/base/index_set.h> #include <deal.II/base/mpi.h> #include <deal.II/base/quadrature_lib.h> #include <deal.II/base/timer.h> #include <deal.II/base/logstream.h> #include <deal.II/base/utilities.h> #include <deal.II/distributed/grid_refinement.h> #include <deal.II/distributed/tria.h> #include <deal.II/dofs/dof_handler.h> #include <deal.II/dofs/dof_tools.h> #include <deal.II/grid/grid_generator.h> #include <deal.II/grid/grid_in.h> #include <deal.II/grid/manifold_lib.h> #include <deal.II/grid/grid_out.h> #include <deal.II/fe/fe_nedelec_sz.h> #include <deal.II/fe/fe_q.h> #include <deal.II/fe/fe_series.h> #include <deal.II/fe/fe_system.h> #include <deal.II/hp/fe_collection.h> #include <deal.II/hp/refinement.h> #include <deal.II/lac/affine_constraints.h> #include <deal.II/lac/dynamic_sparsity_pattern.h> #include <deal.II/lac/sparsity_tools.h> #include <deal.II/lac/precondition.h> #include <deal.II/lac/solver_cg.h> #include <deal.II/lac/solver_gmres.h> #include <deal.II/lac/trilinos_precondition.h> #include <deal.II/lac/trilinos_sparse_matrix.h> #include <deal.II/lac/trilinos_sparsity_pattern.h> #include <deal.II/lac/petsc_solver.h> #include <deal.II/lac/generic_linear_algebra.h> #include <deal.II/lac/vector.h> #include <deal.II/lac/trilinos_solver.h> #include <deal.II/lac/sparse_direct.h> #include <deal.II/numerics/data_out.h> #include <deal.II/numerics/error_estimator.h> #include <deal.II/numerics/smoothness_estimator.h> #include <deal.II/numerics/vector_tools.h> #include <algorithm> #include <fstream> #include <iostream> // For load balancing we will assign individual weights on cells, and for that // we will use the class parallel::CellWeights. #include <deal.II/distributed/cell_weights.h> // The solution function requires a transformation from Cartesian to polar // coordinates. The GeometricUtilities::Coordinates namespace provides the // necessary tools. #include <deal.II/base/function.h> #include <deal.II/base/geometric_utilities.h> // The following include files will enable the MatrixFree functionality. #include <deal.II/matrix_free/matrix_free.h> #include <deal.II/matrix_free/fe_evaluation.h> #include <deal.II/matrix_free/tools.h> // We will use LinearAlgebra::distributed::Vector for linear algebra operations. #include <deal.II/lac/la_parallel_vector.h> // We are left to include the files needed by the multigrid solver. #include <deal.II/multigrid/mg_coarse.h> #include <deal.II/multigrid/mg_constrained_dofs.h> #include <deal.II/multigrid/mg_matrix.h> #include <deal.II/multigrid/mg_smoother.h> #include <deal.II/multigrid/mg_tools.h> #include <deal.II/multigrid/mg_transfer_global_coarsening.h> #include <deal.II/multigrid/multigrid.h> #define OMEGA 1.0/4*M_PI*1e9 #define C_CONST 3e8 namespace Nodal_EM { using namespace dealii; const unsigned int fe_degree = 0; template <int dim> Tensor<1,dim,std::complex<double>> J_a_func(const Point<dim> &point, types::material_id /*id*/) { Tensor<1,dim,std::complex<double>> J_a; double dipole_radius = 0.2; // Point<dim> dipole_position(0., 0., 0.); // Tensor<1,dim> dipole_orientation{{0., 0., 1.}}; Point<dim> dipole_position(0., 0.); Tensor<1,dim> dipole_orientation{{0., 1.}}; std::complex<double> dipole_strength = 1; const auto distance = (dipole_position - point).norm() / dipole_radius; if(distance > 1.) return J_a; double scale = std::cos(distance * M_PI / 2.) * std::cos(distance * M_PI / 2.) / (M_PI / 2. - 2. / M_PI) / dipole_radius / dipole_radius; J_a = dipole_strength * dipole_orientation * scale; return J_a; } template <int dim> DEAL_II_ALWAYS_INLINE inline Tensor<1, dim, std::complex<double>> tangential_part(const Tensor<1, dim, std::complex<double>> &tensor, const Tensor<1, dim> & normal) { auto result = tensor; result[0] = normal[1] * (tensor[0] * normal[1] - tensor[1] * normal[0]); result[1] = -normal[0] * (tensor[0] * normal[1] - tensor[1] * normal[0]); // result in 2dim is -nxnxtensor, with component in z = 0. return result; // result[0] = normal[1]*tensor[2] - normal[2]*tensor[1]; // result[1] = -(normal[0]*tensor[2] - normal[2]*tensor[0]); // result[2] = normal[0]*tensor[1]* - normal[1]*tensor[0]; // return result; } template <int dim> DEAL_II_ALWAYS_INLINE inline Tensor<1, dim, std::complex<double>> cross(const Tensor<1, dim> & normal, const Tensor<1, dim, std::complex<double>> &tensor) { auto result = tensor; // result[0] = normal[1] * (tensor[0] * normal[1] - tensor[1] * normal[0]); // result[1] = -normal[0] * (tensor[0] * normal[1] - tensor[1] * normal[0]); // return result; if(dim == 2) { result[0] = normal[1]*tensor[2] - normal[2]*tensor[1]; result[1] = -(normal[0]*tensor[2] - normal[2]*tensor[0]); } else if (dim == 3) { result[0] = normal[1]*tensor[2] - normal[2]*tensor[1]; result[1] = -(normal[0]*tensor[2] - normal[2]*tensor[0]); result[2] = normal[0]*tensor[1] - normal[1]*tensor[0]; } return result; } template <int dim> DEAL_II_ALWAYS_INLINE inline Tensor<1, 1, std::complex<double>> cross_2d(const Tensor<1, dim> & normal, const Tensor<1, dim, std::complex<double>> &tensor) { auto result = Tensor<1, 1, std::complex<double>>(); // z direction result[0] = normal[0]*tensor[1] - normal[1]*tensor[0]; return result; } template <int dim> class LaplaceProblem { public: LaplaceProblem(); void run(); private: void make_grid(); void setup_system(); void assemble_system(); void solve_system(); void output_results(); MPI_Comm mpi_communicator; // parallel::distributed::Triangulation<dim> triangulation; Triangulation<dim> triangulation; DoFHandler<dim> dof_handler; FESystem<dim> fe; AffineConstraints<double> constraints; // SparsityPattern sparsity_pattern; // SparseMatrix<double> system_matrix; // Vector<double> system_rhs; // Vector<double> solution; LinearAlgebraPETSc::MPI::SparseMatrix system_matrix; LinearAlgebraPETSc::MPI::Vector locally_relevant_solution; LinearAlgebraPETSc::MPI::Vector system_rhs; }; template <int dim> LaplaceProblem<dim>::LaplaceProblem() : mpi_communicator(MPI_COMM_WORLD) , dof_handler(triangulation) // , fe(FE_Q<dim>(/*degree*/fe_degree),2*dim) , fe(FE_NedelecSZ<dim>(fe_degree),2) { ; } template <int dim> void LaplaceProblem<dim>::make_grid() { const double length = 3; const double wave_per_lambda = 15.; // GridGenerator::subdivided_hyper_cube(triangulation, // 2*length*wave_per_lambda, // -length, // length); GridGenerator::hyper_ball_balanced(triangulation, Point<dim>(), length); for (auto &cell : triangulation.cell_iterators()) for (const auto &face : cell->face_iterators()) { // face->set_boundary_id(2); if (face->at_boundary()) face->set_boundary_id(1); } triangulation.refine_global(5); std::ofstream out_mesh("grid.vtk"); GridOut grid_out; grid_out.write_vtk(triangulation, out_mesh); } template <int dim> void LaplaceProblem<dim>::setup_system() { dof_handler.distribute_dofs(fe); std::cout << "Number of degrees of freedom: " << dof_handler.n_dofs() << std::endl; std::cout << "Number of edges : " << triangulation.n_lines() << std::endl; IndexSet locally_owned_dofs; IndexSet locally_relevant_dofs; locally_owned_dofs = dof_handler.locally_owned_dofs(); locally_relevant_dofs = DoFTools::extract_locally_relevant_dofs(dof_handler); locally_relevant_solution.reinit(locally_owned_dofs, locally_relevant_dofs, mpi_communicator); system_rhs.reinit(locally_owned_dofs, mpi_communicator); // solution.reinit(dof_handler.n_locally_owned_dofs()); // system_rhs.reinit(dof_handler.n_locally_owned_dofs()); // const IndexSet locally_relevant_dofs = // DoFTools::extract_locally_relevant_dofs(dof_handler); constraints.reinit(locally_relevant_dofs); DoFTools::make_hanging_node_constraints(dof_handler, constraints); VectorTools::project_boundary_values_curl_conforming_l2( dof_handler, 0, /*real part*/ Functions::ZeroFunction<dim>(2*dim), 0, /*boundary id*/ constraints, MappingQ1<dim>()); VectorTools::project_boundary_values_curl_conforming_l2( dof_handler, dim, /*real part*/ Functions::ZeroFunction<dim>(2*dim), 0, /*boundary id*/ constraints, MappingQ1<dim>()); constraints.close(); { // DynamicSparsityPattern dsp(locally_owned_dofs); // DoFTools::make_sparsity_pattern(dof_handler, // dsp, // constraints, // /* keep_constrained_dofs = */ true); // sparsity_pattern.copy_from(dsp); // system_matrix.reinit(sparsity_pattern); } { DynamicSparsityPattern dsp(locally_owned_dofs); DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints, true); SparsityTools::distribute_sparsity_pattern(dsp, locally_owned_dofs, mpi_communicator, locally_relevant_dofs); system_matrix.reinit(locally_owned_dofs, locally_owned_dofs, dsp, mpi_communicator); } } template <int dim> void LaplaceProblem<dim>::assemble_system() { QGauss<dim> quadrature_formula(fe_degree+1); QGauss<dim-1> face_quadrature_formula(fe_degree+1); FEValues<dim, dim> fe_values(fe, quadrature_formula, update_values | update_gradients | update_quadrature_points | update_JxW_values); FEFaceValues<dim, dim> fe_face_values(fe, face_quadrature_formula, update_values | update_gradients | update_quadrature_points | update_normal_vectors | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); const unsigned int n_face_q_points = face_quadrature_formula.size(); FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell); Vector<double> cell_rhs(dofs_per_cell); std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell); double frequency = 0.3e9; double c_const = 3e8; double k0 = 2*M_PI*frequency/c_const; double Z0 = 376.73031; double mu_inv = 1.; double epsilon = 1.; for (const auto &cell : dof_handler.active_cell_iterators()) { fe_values.reinit(cell); FEValuesViews::Vector<dim> real_part(fe_values, 0); FEValuesViews::Vector<dim> imag_part(fe_values, dim); cell_matrix = 0.; cell_rhs = 0.; cell->get_dof_indices(local_dof_indices); const auto id = cell->material_id(); const auto &quadrature_points = fe_values.get_quadrature_points(); for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) { const Point<dim> &position = quadrature_points[q_point]; const auto J_a = J_a_func(position, id); for (const auto i : fe_values.dof_indices()) { constexpr std::complex<double> imag{0., 1.}; const auto phi_i = real_part.value(i, q_point) - imag * imag_part.value(i, q_point); const auto curl_phi_i = real_part.curl(i, q_point) - imag * imag_part.curl(i, q_point); const auto rhs_value = - k0 * Z0 * (imag * scalar_product(J_a, phi_i)) * fe_values.JxW(q_point); cell_rhs(i) += rhs_value.real(); for (const auto j : fe_values.dof_indices()) { const auto phi_j = real_part.value(j, q_point) + imag * imag_part.value(j, q_point); const auto curl_phi_j = real_part.curl(j, q_point) + imag * imag_part.curl(j, q_point); const auto temp = (scalar_product(mu_inv * curl_phi_j, curl_phi_i) - k0*k0 * scalar_product(epsilon * phi_j, phi_i)) * fe_values.JxW(q_point); cell_matrix(i, j) += temp.real(); } } } // face for (const auto &face : cell->face_iterators()) { if(face->at_boundary()) { // const auto id = face->boundary_id(); fe_face_values.reinit(cell, face); FEValuesViews::Vector<dim> real_part(fe_face_values, 0); FEValuesViews::Vector<dim> imag_part(fe_face_values, dim); for (unsigned int q_point = 0; q_point < n_face_q_points; ++q_point) { // const auto &position = quadrature_points[q_point]; const auto normal = fe_face_values.normal_vector(q_point); for (const auto i : fe_face_values.dof_indices()) { constexpr std::complex<double> imag{0., 1.}; const auto phi_i = real_part.value(i, q_point) - imag * imag_part.value(i, q_point); const auto nxphi_i = cross_2d(normal, phi_i); // const auto nxnxphi_i = cross_2d(normal, phi_i); // const auto phi_i_T = tangential_part(phi_i, normal); // const auto phi_i_T = cross(normal, // cross(normal,phi_i)); for (const auto j : fe_face_values.dof_indices()) { const auto phi_j = real_part.value(j, q_point) + imag * imag_part.value(j, q_point); const auto nxphi_j = cross_2d(normal, phi_j); // const auto phi_j_T = // tangential_part(phi_j, normal); // const auto prod = mu_inv * epsilon; // const auto sqrt_prod = prod; const auto temp = imag * k0 * (nxphi_j*nxphi_i) * fe_face_values.JxW(q_point); // the sign here only determines the out going direction of waves, // because only abc_face_integral and current_intergral has imaginary // parts, wrong abc sign equals to inverse current direction // const auto temp = // imag * k0 * scalar_product(phi_j_T,phi_i_T) // * fe_face_values.JxW(q_point); cell_matrix(i, j) += temp.real(); } /* j */ } /* i */ } } } cell->get_dof_indices(local_dof_indices); // for (const unsigned int i : fe_values.dof_indices()) // for (const unsigned int j : fe_values.dof_indices()) // system_matrix.add(local_dof_indices[i], // local_dof_indices[j], // cell_matrix(i, j)); // for (const unsigned int i : fe_values.dof_indices()) // system_rhs(local_dof_indices[i]) += cell_rhs(i); constraints.distribute_local_to_global( cell_matrix, cell_rhs, local_dof_indices, system_matrix, system_rhs); } system_matrix.compress(VectorOperation::add); system_rhs.compress(VectorOperation::add); } template <int dim> void LaplaceProblem<dim>::solve_system() { LinearAlgebraPETSc::MPI::Vector completely_distributed_solution( dof_handler.locally_owned_dofs(), mpi_communicator); SolverControl solver_control; PETScWrappers::SparseDirectMUMPS solver(solver_control, mpi_communicator); solver.solve(system_matrix, completely_distributed_solution, system_rhs); std::cout << " Solved in " << solver_control.last_step() << " iterations." << std::endl; constraints.distribute(completely_distributed_solution); locally_relevant_solution = completely_distributed_solution; locally_relevant_solution.update_ghost_values(); // SparseDirectUMFPACK A_direct; // A_direct.initialize(system_matrix); // A_direct.vmult(solution, system_rhs); } template <int dim> void LaplaceProblem<dim>::output_results() { DataOut<dim> data_out; data_out.attach_dof_handler(dof_handler); // data_out.add_data_vector(solution, // {"real_Ex", "real_Ey", // "imag_Ex", "imag_Ey"}); data_out.add_data_vector(locally_relevant_solution, {"real_Ex", "real_Ey", "imag_Ex", "imag_Ey"}); data_out.add_data_vector(system_rhs, {"real_Jx", "real_Jy", "imag_Jx", "imag_Jy"}); data_out.build_patches(); std::ofstream output("Nedelec_DIM2_solution.vtk"); data_out.write_vtk(output); } template <int dim> void LaplaceProblem<dim>::run() { make_grid(); if(Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)==0) std::cout << "make grid end" << std::endl; setup_system(); if(Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)==0) std::cout << "setup system end" << std::endl; Timer time_assemble; assemble_system(); time_assemble.stop(); if(Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)==0) std::cout << "Assemble system end : " << time_assemble.cpu_time() << "s" << std::endl; Timer time_solve; solve_system(); time_solve.stop(); if(Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)==0) std::cout << "Solve system end : " << time_solve.cpu_time() << "s" << std::endl; output_results(); } } // namespace Step75 int main(int argc, char *argv[]) { try { using namespace dealii; using namespace Nodal_EM; std::ofstream out("output"); deallog.attach(out); deallog.depth_console(false? 10:0); Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); LaplaceProblem<2> laplace_problem{}; laplace_problem.run(); } catch (std::exception &exc) { std::cerr << std::endl << std::endl << "----------------------------------------------------" << std::endl; std::cerr << "Exception on processing: " << std::endl << exc.what() << std::endl << "Aborting!" << std::endl << "----------------------------------------------------" << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl << "----------------------------------------------------" << std::endl; std::cerr << "Unknown exception!" << std::endl << "Aborting!" << std::endl << "----------------------------------------------------" << std::endl; return 1; } return 0; }
/* --------------------------------------------------------------------- * * Copyright (C) 2021 - 2022 by the deal.II authors * * This file is part of the deal.II library. * * The deal.II library is free software; you can use it, redistribute * it, and/or modify it under the terms of the GNU Lesser General * Public License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * The full text of the license can be found in the file LICENSE.md at * the top level directory of deal.II. * * --------------------------------------------------------------------- * * Author: Marc Fehling, Colorado State University, 2021 * Peter Munch, Technical University of Munich and Helmholtz-Zentrum * hereon, 2021 * Wolfgang Bangerth, Colorado State University, 2021 */ #include <deal.II/base/conditional_ostream.h> #include <deal.II/base/index_set.h> #include <deal.II/base/mpi.h> #include <deal.II/base/quadrature_lib.h> #include <deal.II/base/timer.h> #include <deal.II/base/logstream.h> #include <deal.II/base/utilities.h> #include <deal.II/distributed/grid_refinement.h> #include <deal.II/distributed/tria.h> #include <deal.II/dofs/dof_handler.h> #include <deal.II/dofs/dof_tools.h> #include <deal.II/grid/grid_generator.h> #include <deal.II/grid/grid_in.h> #include <deal.II/grid/manifold_lib.h> #include <deal.II/grid/grid_out.h> #include <deal.II/fe/fe_nedelec_sz.h> #include <deal.II/fe/fe_nedelec.h> #include <deal.II/fe/fe_q.h> #include <deal.II/fe/fe_series.h> #include <deal.II/fe/fe_system.h> #include <deal.II/hp/fe_collection.h> #include <deal.II/hp/refinement.h> #include <deal.II/lac/affine_constraints.h> #include <deal.II/lac/dynamic_sparsity_pattern.h> #include <deal.II/lac/sparsity_tools.h> #include <deal.II/lac/precondition.h> #include <deal.II/lac/solver_cg.h> #include <deal.II/lac/solver_gmres.h> #include <deal.II/lac/trilinos_precondition.h> #include <deal.II/lac/trilinos_sparse_matrix.h> #include <deal.II/lac/trilinos_sparsity_pattern.h> #include <deal.II/lac/petsc_solver.h> #include <deal.II/lac/generic_linear_algebra.h> #include <deal.II/lac/vector.h> #include <deal.II/lac/trilinos_solver.h> #include <deal.II/lac/sparse_direct.h> #include <deal.II/numerics/data_out.h> #include <deal.II/numerics/error_estimator.h> #include <deal.II/numerics/smoothness_estimator.h> #include <deal.II/numerics/vector_tools.h> #include <algorithm> #include <fstream> #include <iostream> // For load balancing we will assign individual weights on cells, and for that // we will use the class parallel::CellWeights. #include <deal.II/distributed/cell_weights.h> // The solution function requires a transformation from Cartesian to polar // coordinates. The GeometricUtilities::Coordinates namespace provides the // necessary tools. #include <deal.II/base/function.h> #include <deal.II/base/geometric_utilities.h> // The following include files will enable the MatrixFree functionality. #include <deal.II/matrix_free/matrix_free.h> #include <deal.II/matrix_free/fe_evaluation.h> #include <deal.II/matrix_free/tools.h> // We will use LinearAlgebra::distributed::Vector for linear algebra operations. #include <deal.II/lac/la_parallel_vector.h> // We are left to include the files needed by the multigrid solver. #include <deal.II/multigrid/mg_coarse.h> #include <deal.II/multigrid/mg_constrained_dofs.h> #include <deal.II/multigrid/mg_matrix.h> #include <deal.II/multigrid/mg_smoother.h> #include <deal.II/multigrid/mg_tools.h> #include <deal.II/multigrid/mg_transfer_global_coarsening.h> #include <deal.II/multigrid/multigrid.h> namespace Nodal_EM { using namespace dealii; const unsigned int fe_degree = 0; template <int dim> Tensor<1,dim,std::complex<double>> J_a_func(const Point<dim> &point, types::material_id /*id*/) { Tensor<1,dim,std::complex<double>> J_a; double dipole_radius = 0.2; Point<dim> dipole_position(0., 0., 0.); Tensor<1,dim> dipole_orientation{{0., 0., 1.}}; std::complex<double> dipole_strength = 1; const auto distance = (dipole_position - point).norm() / dipole_radius; if(distance > 1.) return J_a; double scale = std::cos(distance * M_PI / 2.) * std::cos(distance * M_PI / 2.) / (M_PI / 2. - 2. / M_PI) / dipole_radius / dipole_radius; J_a = dipole_strength * dipole_orientation * scale; return J_a; } template <int dim> DEAL_II_ALWAYS_INLINE inline Tensor<1, dim, std::complex<double>> tangential_part(const Tensor<1, dim, std::complex<double>> &tensor, const Tensor<1, dim> & normal) { auto result = tensor; // result[0] = normal[1] * (tensor[0] * normal[1] - tensor[1] * normal[0]); // result[1] = -normal[0] * (tensor[0] * normal[1] - tensor[1] * normal[0]); // return result; result[0] = normal[1]*tensor[2] - normal[2]*tensor[1]; result[1] = -(normal[0]*tensor[2] - normal[2]*tensor[0]); result[2] = normal[0]*tensor[1]* - normal[1]*tensor[0]; return result; } template <int dim> DEAL_II_ALWAYS_INLINE inline Tensor<1, dim, std::complex<double>> cross(const Tensor<1, dim> & normal, const Tensor<1, dim, std::complex<double>> &tensor) { auto result = tensor; // result[0] = normal[1] * (tensor[0] * normal[1] - tensor[1] * normal[0]); // result[1] = -normal[0] * (tensor[0] * normal[1] - tensor[1] * normal[0]); // return result; result[0] = normal[1]*tensor[2] - normal[2]*tensor[1]; result[1] = -(normal[0]*tensor[2] - normal[2]*tensor[0]); result[2] = normal[0]*tensor[1] - normal[1]*tensor[0]; return result; } template <int dim> class LaplaceProblem { public: LaplaceProblem(); void run(); private: void make_grid(); void setup_system(); void assemble_system(); void solve_system(); void output_results(); MPI_Comm mpi_communicator; // parallel::distributed::Triangulation<dim> triangulation; Triangulation<dim> triangulation; DoFHandler<dim> dof_handler; FESystem<dim> fe; AffineConstraints<double> constraints; // SparsityPattern sparsity_pattern; // SparseMatrix<double> system_matrix; // Vector<double> system_rhs; // Vector<double> solution; LinearAlgebraPETSc::MPI::SparseMatrix system_matrix; LinearAlgebraPETSc::MPI::Vector locally_relevant_solution; LinearAlgebraPETSc::MPI::Vector system_rhs; }; template <int dim> LaplaceProblem<dim>::LaplaceProblem() : mpi_communicator(MPI_COMM_WORLD) , dof_handler(triangulation) // , fe(FE_Q<dim>(/*degree*/fe_degree),2*dim) , fe(FE_Nedelec<dim>(fe_degree),2) { ; } template <int dim> void LaplaceProblem<dim>::make_grid() { const double length = 2; const double wave_per_lambda = 10.; GridGenerator::subdivided_hyper_cube(triangulation, 2*length*wave_per_lambda, -length, length); for (auto &cell : triangulation.cell_iterators()) { cell->set_material_id(0); for (const auto &face : cell->face_iterators()) { // face->set_boundary_id(2); if (face->at_boundary()) face->set_boundary_id(1); } } std::ofstream out_mesh("grid.vtk"); GridOut grid_out; grid_out.write_vtk(triangulation, out_mesh); } template <int dim> void LaplaceProblem<dim>::setup_system() { dof_handler.distribute_dofs(fe); std::cout << "Number of degrees of freedom: " << dof_handler.n_dofs() << std::endl; std::cout << "Number of edges : " << triangulation.n_lines() << std::endl; IndexSet locally_owned_dofs; IndexSet locally_relevant_dofs; locally_owned_dofs = dof_handler.locally_owned_dofs(); locally_relevant_dofs = DoFTools::extract_locally_relevant_dofs(dof_handler); { // solution.reinit(dof_handler.n_locally_owned_dofs()); // system_rhs.reinit(dof_handler.n_locally_owned_dofs()); } { locally_relevant_solution.reinit(locally_owned_dofs, locally_relevant_dofs, mpi_communicator); system_rhs.reinit(locally_owned_dofs, mpi_communicator); } constraints.reinit(locally_relevant_dofs); DoFTools::make_hanging_node_constraints(dof_handler, constraints); VectorTools::project_boundary_values_curl_conforming_l2( dof_handler, 0, /*real part*/ Functions::ZeroFunction<dim>(2*dim), 1, /*boundary id*/ constraints, MappingQ1<dim>()); VectorTools::project_boundary_values_curl_conforming_l2( dof_handler, dim, /*real part*/ Functions::ZeroFunction<dim>(2*dim), 1, /*boundary id*/ constraints, MappingQ1<dim>()); constraints.close(); { // DynamicSparsityPattern dsp(locally_owned_dofs); // DoFTools::make_sparsity_pattern(dof_handler, // dsp, // constraints, // /* keep_constrained_dofs = */ true); // sparsity_pattern.copy_from(dsp); // system_matrix.reinit(sparsity_pattern); } { DynamicSparsityPattern dsp(locally_owned_dofs); DoFTools::make_sparsity_pattern(dof_handler, dsp, constraints, true); SparsityTools::distribute_sparsity_pattern(dsp, locally_owned_dofs, mpi_communicator, locally_relevant_dofs); system_matrix.reinit(locally_owned_dofs, locally_owned_dofs, dsp, mpi_communicator); } } template <int dim> void LaplaceProblem<dim>::assemble_system() { QGauss<dim> quadrature_formula(fe_degree+1); QGauss<dim-1> face_quadrature_formula(fe_degree+1); FEValues<dim, dim> fe_values(fe, quadrature_formula, update_values | update_gradients | update_quadrature_points | update_JxW_values); FEFaceValues<dim, dim> fe_face_values(fe, face_quadrature_formula, update_values | update_gradients | update_quadrature_points | update_normal_vectors | update_JxW_values); const unsigned int dofs_per_cell = fe.dofs_per_cell; const unsigned int n_q_points = quadrature_formula.size(); const unsigned int n_face_q_points = face_quadrature_formula.size(); FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell); Vector<double> cell_rhs(dofs_per_cell); std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell); double frequency = 0.3e9; double c_const = 3e8; double k0 = 2*M_PI*frequency/c_const; double Z0 = 376.73031; double mu_inv = 1.; double epsilon = 1.; for (const auto &cell : dof_handler.active_cell_iterators()) { fe_values.reinit(cell); FEValuesViews::Vector<dim> real_part(fe_values, 0); FEValuesViews::Vector<dim> imag_part(fe_values, dim); cell_matrix = 0.; cell_rhs = 0.; cell->get_dof_indices(local_dof_indices); const auto id = cell->material_id(); const auto &quadrature_points = fe_values.get_quadrature_points(); for (unsigned int q_point = 0; q_point < n_q_points; ++q_point) { const Point<dim> &position = quadrature_points[q_point]; const auto J_a = J_a_func(position, id); for (const auto i : fe_values.dof_indices()) { constexpr std::complex<double> imag{0., 1.}; const auto phi_i = real_part.value(i, q_point) - imag * imag_part.value(i, q_point); const auto curl_phi_i = real_part.curl(i, q_point) - imag * imag_part.curl(i, q_point); const auto rhs_value = - k0 * Z0 * (imag * scalar_product(J_a, phi_i)) * fe_values.JxW(q_point); cell_rhs(i) += rhs_value.real(); for (const auto j : fe_values.dof_indices()) { const auto phi_j = real_part.value(j, q_point) + imag * imag_part.value(j, q_point); const auto curl_phi_j = real_part.curl(j, q_point) + imag * imag_part.curl(j, q_point); const auto temp = (scalar_product(mu_inv * curl_phi_j, curl_phi_i) - k0*k0 * scalar_product(epsilon * phi_j, phi_i)) * fe_values.JxW(q_point); cell_matrix(i, j) += temp.real(); } } } // face for (const auto &face : cell->face_iterators()) { if(face->at_boundary()) { // const auto id = face->boundary_id(); fe_face_values.reinit(cell, face); FEValuesViews::Vector<dim> real_part(fe_face_values, 0); FEValuesViews::Vector<dim> imag_part(fe_face_values, dim); for (unsigned int q_point = 0; q_point < n_face_q_points; ++q_point) { // const auto &position = quadrature_points[q_point]; const auto normal = fe_face_values.normal_vector(q_point); for (const auto i : fe_face_values.dof_indices()) { constexpr std::complex<double> imag{0., 1.}; const auto phi_i = real_part.value(i, q_point) - imag * imag_part.value(i, q_point); // const auto nxphi_i = cross(normal, phi_i); // const auto nxphi_i = cross_product_3d(normal, phi_i); const auto phi_i_T = cross_product_3d( cross_product_3d(normal,phi_i), normal); for (const auto j : fe_face_values.dof_indices()) { const auto phi_j = real_part.value(j, q_point) + imag * imag_part.value(j, q_point); // const auto nxphi_j = cross(normal, phi_j); // const auto nxphi_j = cross_product_3d(normal, phi_j); const auto phi_j_T = cross_product_3d( cross_product_3d(normal,phi_j), normal); // const auto phi_j_T =W // tangential_part(phi_j, normal) * // fe_face_values.JxW(q_point); // const auto prod = mu_inv * epsilon; // const auto sqrt_prod = prod; const auto temp = imag * k0 * scalar_product(phi_j_T,phi_i_T) * fe_face_values.JxW(q_point); cell_matrix(i, j) += temp.real(); } /* j */ } /* i */ } } } cell->get_dof_indices(local_dof_indices); constraints.distribute_local_to_global( cell_matrix, cell_rhs, local_dof_indices, system_matrix, system_rhs); } system_matrix.compress(VectorOperation::add); system_rhs.compress(VectorOperation::add); } template <int dim> void LaplaceProblem<dim>::solve_system() { LinearAlgebraPETSc::MPI::Vector completely_distributed_solution( dof_handler.locally_owned_dofs(), mpi_communicator); SolverControl solver_control; PETScWrappers::SparseDirectMUMPS solver(solver_control, mpi_communicator); solver.solve(system_matrix, completely_distributed_solution, system_rhs); std::cout << " Solved in " << solver_control.last_step() << " iterations." << std::endl; constraints.distribute(completely_distributed_solution); locally_relevant_solution = completely_distributed_solution; locally_relevant_solution.update_ghost_values(); } template <int dim> void LaplaceProblem<dim>::output_results() { DataOut<dim> data_out; data_out.attach_dof_handler(dof_handler); // data_out.add_data_vector(solution, // {"real_Ex", "real_Ey", "real_Ez" // "imag_Ex", "imag_Ey", "imag_Ez"}); data_out.add_data_vector(locally_relevant_solution, {"real_Ex", "real_Ey", "real_Ez", "imag_Ex", "imag_Ey", "imag_Ez"}); data_out.add_data_vector(system_rhs, {"real_Jx", "real_Jy", "real_Jz", "imag_Jx", "imag_Jy", "imag_Jz"}); data_out.build_patches(); std::ofstream output("Nedelec_DIM3_solution.vtk"); data_out.write_vtk(output); } template <int dim> void LaplaceProblem<dim>::run() { make_grid(); if(Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)==0) std::cout << "make grid end" << std::endl; setup_system(); if(Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)==0) std::cout << "setup system end" << std::endl; Timer time_assemble; assemble_system(); time_assemble.stop(); if(Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)==0) std::cout << "Assemble system end : " << time_assemble.cpu_time() << "s" << std::endl; Timer time_solve; solve_system(); time_solve.stop(); if(Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)==0) std::cout << "Solve system end : " << time_solve.cpu_time() << "s" << std::endl; output_results(); } } // namespace Step75 int main(int argc, char *argv[]) { try { using namespace dealii; using namespace Nodal_EM; std::ofstream out("output"); deallog.attach(out); deallog.depth_console(false? 10:0); Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1); LaplaceProblem<3> laplace_problem{}; laplace_problem.run(); } catch (std::exception &exc) { std::cerr << std::endl << std::endl << "----------------------------------------------------" << std::endl; std::cerr << "Exception on processing: " << std::endl << exc.what() << std::endl << "Aborting!" << std::endl << "----------------------------------------------------" << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl << "----------------------------------------------------" << std::endl; std::cerr << "Unknown exception!" << std::endl << "Aborting!" << std::endl << "----------------------------------------------------" << std::endl; return 1; } return 0; }