Hi everyone, I've been trying to write a parallel distributed solution to a nonlinear PDE using Newton's method. It runs fine on one rank, but when I use two the solver fails to converge on the second iteration. I've tracked the difference (between one rank and two) down to the `get_function_values` function evaluating the previous solution at quadrature points differently. This seems to happen only near the boundaries of regions owned by different ranks. I figure this is some sort of synchronization error, but I cannot figure out how to fix it. I believe that I am calling `compress` when is necessary, and call `update_ghost_values` prior to calling `get_function_values`. However, I am still getting the difference.
To help with debugging, I've tried to write a program which mimics the behavior in as few lines as possible (~150 with comments and whitespace). I attach it here in case it helps identify my (hopefully silly) error. Any help is much appreciated. Kind regards, Lucas -- The deal.II project is located at http://www.dealii.org/ For mailing list/forum options, see https://groups.google.com/d/forum/dealii?hl=en --- You received this message because you are subscribed to the Google Groups "deal.II User Group" group. To unsubscribe from this group and stop receiving emails from it, send an email to dealii+unsubscr...@googlegroups.com. To view this discussion on the web visit https://groups.google.com/d/msgid/dealii/91a725b9-b2cc-49c8-b442-4f71426ae9ddn%40googlegroups.com.
#include <deal.II/base/utilities.h> #include <deal.II/distributed/tria.h> #include <deal.II/dofs/dof_handler.h> #include <deal.II/fe/fe_system.h> #include <deal.II/fe/fe_q.h> #include <deal.II/base/index_set.h> #include <deal.II/lac/generic_linear_algebra.h> namespace LA = dealii::LinearAlgebraPETSc; #include <deal.II/grid/grid_generator.h> #include <deal.II/dofs/dof_tools.h> #include <deal.II/base/quadrature_lib.h> #include <deal.II/fe/fe_values.h> #include <deal.II/base/function.h> #include <deal.II/numerics/vector_tools.h> #include <deal.II/lac/vector_operation.h> #include <iostream> #include <vector> const int dim = 2; const int vec_dim = 2; const double left = -1.0; const double right = 1.0; const int num_refines = 2; template <int dim> class StepFunction : public dealii::Function<dim> { public: virtual void vector_value(const dealii::Point<dim> &p, dealii::Vector<double> &values) const override { if (p[1] > 0) { for (unsigned int i = 0; i < values.size(); i++) values[i] = static_cast<double>(i); } else { for (unsigned int i = 0; i < values.size(); i++) values[i] = -static_cast<double>(i); } } }; int main(int ac, char *av[]) { dealii::Utilities::MPI::MPI_InitFinalize mpi_initialization(ac, av, 1); // Set up mpi objects, output rank and number of ranks for process MPI_Comm mpi_communicator(MPI_COMM_WORLD); int rank = 0; int num_ranks = 0; MPI_Comm_rank(mpi_communicator, &rank); MPI_Comm_size(mpi_communicator, &num_ranks); std::cout << "rank: " << std::to_string(rank) << "\n"; std::cout << "num_ranks: " << std::to_string(num_ranks) << "\n\n"; // Declare other necessary finite element objects dealii::parallel::distributed::Triangulation<dim> triangulation(mpi_communicator, typename dealii::Triangulation<dim>::MeshSmoothing( dealii::Triangulation<dim>::smoothing_on_refinement | dealii::Triangulation<dim>::smoothing_on_coarsening)); dealii::DoFHandler<dim> dof_handler(triangulation); dealii::FESystem<dim> fe(dealii::FE_Q<dim>(1), vec_dim); dealii::IndexSet locally_owned_dofs; dealii::IndexSet locally_relevant_dofs; LA::MPI::Vector locally_relevant_solution; LA::MPI::Vector locally_relevant_update; LA::MPI::Vector locally_owned_solution; // Generate grid dealii::GridGenerator::hyper_cube(triangulation, left, right); triangulation.refine_global(num_refines); // Initialize dofs and vectors dof_handler.distribute_dofs(fe); locally_owned_dofs = dof_handler.locally_owned_dofs(); dealii::DoFTools::extract_locally_relevant_dofs(dof_handler, locally_relevant_dofs); locally_relevant_solution.reinit(locally_owned_dofs, locally_relevant_dofs, mpi_communicator); locally_relevant_update.reinit(locally_owned_dofs, locally_relevant_dofs, mpi_communicator); locally_owned_solution.reinit(locally_owned_dofs, mpi_communicator); // Interpolate everything with StepFunction function dealii::VectorTools::interpolate(dof_handler, StepFunction<dim>(), locally_owned_solution); // compress here because interpolating inserts elements into the vector locally_owned_solution.compress(dealii::VectorOperation::insert); locally_relevant_solution = locally_owned_solution; locally_relevant_update = locally_owned_solution; // compress here because elements have been assigned to solution & update locally_relevant_solution.compress(dealii::VectorOperation::insert); locally_relevant_update.compress(dealii::VectorOperation::insert); locally_relevant_solution.add(1.0, locally_relevant_update); // compress here because we have added to locally_relevant_solution locally_relevant_solution.compress(dealii::VectorOperation::add); locally_relevant_solution.update_ghost_values(); // Get ready to read locally_relevant_solution at quadrature points const dealii::QGauss<dim> quadrature_formula(fe.degree + 1); dealii::FEValues<dim> fe_values(fe, quadrature_formula, dealii::update_values | dealii::update_gradients | dealii::update_JxW_values | dealii::update_quadrature_points); const unsigned int n_q_points = quadrature_formula.size(); std::vector<dealii::Vector<double>> old_solution_values(n_q_points, dealii::Vector<double>(fe.components)); for (const auto &cell : dof_handler.active_cell_iterators()) { if (cell->is_locally_owned()) { fe_values.reinit(cell); fe_values.get_function_values(locally_relevant_solution, old_solution_values); for (auto old_solution_value : old_solution_values) std::cout << old_solution_value << "\n"; } } return 0; }