Hello Bruno, Sorry for the vagueness, I was not confident if this was an actual bug in deal.ii (I've never encountered one before!). Here I've attached a bare-bones version of my code which reproduces the bug I'm encountering. I basically look at 4 cases: 1) deal.ii's TrilinosWrappers::SparseMatrix::mmult in serial 2) multiplying the serial matrices outside of deal.ii 3) deal.ii's TrilinosWrappers::SparseMatrix::mmult in parallel 4) multiplying the parallel matrices outside of deal.ii
Only case (3) is returning something different, the rest of the cases agree with each other. Interestingly if I leave the ConstraintMatrix empty, then all cases (1)-(4) match exactly, it's only when I introduce constraints that (3) fails. -- The deal.II project is located at http://www.dealii.org/ For mailing list/forum options, see https://groups.google.com/d/forum/dealii?hl=en --- You received this message because you are subscribed to the Google Groups "deal.II User Group" group. To unsubscribe from this group and stop receiving emails from it, send an email to dealii+unsubscr...@googlegroups.com. For more options, visit https://groups.google.com/d/optout.
/* --------------------------------------------------------------------- * $Id: step-32.cc 31452 2013-10-28 02:23:20Z bangerth $ * * Copyright (C) 2008 - 2013 by the deal.II authors * * This file is part of the deal.II library. * * The deal.II library is free software; you can use it, redistribute * it, and/or modify it under the terms of the GNU Lesser General * Public License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * The full text of the license can be found in the file LICENSE at * the top level of the deal.II distribution. * * --------------------------------------------------------------------- * * Authors: Martin Kronbichler, Uppsala University, * Wolfgang Bangerth, Texas A&M University, * Timo Heister, University of Goettingen, 2008-2011 * Ben Shields, University of Illinois at Urbana-Champaign, 2014-2016 */ #include <deal.II/base/quadrature_lib.h> #include <deal.II/base/logstream.h> #include <deal.II/base/utilities.h> #include <deal.II/base/conditional_ostream.h> #include <deal.II/base/work_stream.h> #include <deal.II/lac/constraint_matrix.h> #include <deal.II/lac/block_sparsity_pattern.h> #include <deal.II/lac/trilinos_sparse_matrix.h> #include <deal.II/lac/trilinos_block_sparse_matrix.h> #include <deal.II/grid/tria.h> #include <deal.II/grid/grid_generator.h> #include <deal.II/grid/tria_accessor.h> #include <deal.II/grid/tria_iterator.h> #include <deal.II/grid/filtered_iterator.h> #include <deal.II/grid/tria_boundary_lib.h> #include <deal.II/grid/grid_tools.h> #include <deal.II/grid/grid_refinement.h> #include <deal.II/dofs/dof_handler.h> #include <deal.II/dofs/dof_renumbering.h> #include <deal.II/dofs/dof_accessor.h> #include <deal.II/dofs/dof_tools.h> #include <deal.II/fe/fe_q.h> #include <deal.II/fe/fe_dgp.h> #include <deal.II/fe/fe_system.h> #include <deal.II/fe/fe_values.h> #include <deal.II/fe/mapping_q.h> #include <deal.II/numerics/vector_tools.h> #include <deal.II/numerics/matrix_tools.h> #include <fstream> #include <iostream> #include <sstream> #include <limits> #include <locale> #include <string> #include <deal.II/base/index_set.h> #include <deal.II/distributed/tria.h> namespace EdgeFlameProblem { using namespace dealii; namespace Assembly { namespace Scratch { template <int dim> struct MatrixSystem { MatrixSystem (const FiniteElement<dim> &fe_system, const Mapping<dim> &mapping, const Quadrature<dim> &quadrature, const UpdateFlags update_flags); MatrixSystem (const MatrixSystem<dim> &data); FEValues<dim> fe_values; std::vector<double> div_phi_u; std::vector<double> phi_p; }; template <int dim> MatrixSystem<dim>:: MatrixSystem (const FiniteElement<dim> &fe_system, const Mapping<dim> &mapping, const Quadrature<dim> &quadrature, const UpdateFlags update_flags) : fe_values (mapping, fe_system, quadrature, update_flags), div_phi_u (fe_system.dofs_per_cell), phi_p (fe_system.dofs_per_cell) {} template <int dim> MatrixSystem<dim>:: MatrixSystem (const MatrixSystem<dim> &scratch) : fe_values (scratch.fe_values.get_mapping(), scratch.fe_values.get_fe(), scratch.fe_values.get_quadrature(), scratch.fe_values.get_update_flags()), div_phi_u (scratch.div_phi_u), phi_p (scratch.phi_p) {} } namespace CopyData { template <int dim> struct MatrixSystem { MatrixSystem (const FiniteElement<dim> &fe_system); MatrixSystem (const MatrixSystem<dim> &data); FullMatrix<double> local_block_matrix; std::vector<types::global_dof_index> local_dof_indices; }; template <int dim> MatrixSystem<dim>:: MatrixSystem (const FiniteElement<dim> &fe_system) : local_block_matrix (fe_system.dofs_per_cell, fe_system.dofs_per_cell), local_dof_indices (fe_system.dofs_per_cell) {} template <int dim> MatrixSystem<dim>:: MatrixSystem (const MatrixSystem<dim> &data) : local_block_matrix (data.local_block_matrix), local_dof_indices (data.local_dof_indices) {} } } template <int dim> class mmultTest { public: mmultTest (); void run (); private: void setup_dofs (); void assemble_system (); void sparse_mmult (); ConditionalOStream pcout; const MappingQ<dim> mapping; const FESystem<dim> fe_system; parallel::distributed::Triangulation<dim> triangulation; DoFHandler<dim> dof_handler; TrilinosWrappers::BlockSparseMatrix block_matrix; ConstraintMatrix constraints; void setup_matrix (const std::vector<IndexSet> &partitioning, const std::vector<IndexSet> &relevant_partitioning); void local_assemble_system (const typename DoFHandler<dim>::active_cell_iterator &cell, Assembly::Scratch::MatrixSystem<dim> &scratch, Assembly::CopyData::MatrixSystem<dim> &data); void copy_local_to_global_system (const Assembly::CopyData::MatrixSystem<dim> &data); }; template <int dim> mmultTest<dim>::mmultTest () : pcout (std::cout, (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == 0)), mapping (4), fe_system (FE_Q<dim>(2),dim, static_cast<const FiniteElement<dim> &> (FE_DGP<dim>(1)), 1), triangulation (MPI_COMM_WORLD), dof_handler (triangulation) {} template <int dim> void mmultTest<dim>:: setup_matrix (const std::vector<IndexSet> &partitioning, const std::vector<IndexSet> &relevant_partitioning) { block_matrix.clear(); TrilinosWrappers::BlockSparsityPattern sp1 (partitioning, partitioning, relevant_partitioning, MPI_COMM_WORLD); Table<2,DoFTools::Coupling> coupling (dim+1, dim+1); for (unsigned int c=0; c<dim+1; ++c) for (unsigned int d=0; d<dim+1; ++d) coupling[c][d] = DoFTools::always; DoFTools::make_sparsity_pattern (dof_handler, coupling, sp1, constraints, false, Utilities::MPI::this_mpi_process(MPI_COMM_WORLD)); sp1.compress(); block_matrix.reinit(sp1); } template <int dim> void mmultTest<dim>::setup_dofs () { std::vector<unsigned int> sub_blocks (dim+1,0); sub_blocks[dim] = 1; dof_handler.distribute_dofs (fe_system); DoFRenumbering::component_wise (dof_handler, sub_blocks); std::vector<types::global_dof_index> dofs_per_block (2); DoFTools::count_dofs_per_block (dof_handler, dofs_per_block, sub_blocks); const unsigned int n_u = dofs_per_block[0], n_p = dofs_per_block[1]; std::locale s = pcout.get_stream().getloc(); pcout.get_stream().imbue(std::locale("")); pcout << "Number of active cells: " << triangulation.n_global_active_cells() << " (on " << triangulation.n_levels() << " levels)" << std::endl << "Number of degrees of freedom: " << n_u + n_p << " (" << n_u << '+' << n_p << ')' << std::endl; pcout.get_stream().imbue(s); std::vector<IndexSet> partitioning, relevant_partitioning; { IndexSet index_set = dof_handler.locally_owned_dofs(); partitioning.push_back(index_set.get_view(0,n_u)); partitioning.push_back(index_set.get_view(n_u,n_u+n_p)); IndexSet relevant_set; DoFTools::extract_locally_relevant_dofs (dof_handler, relevant_set); relevant_partitioning.push_back(relevant_set.get_view(0,n_u)); relevant_partitioning.push_back(relevant_set.get_view(n_u,n_u+n_p)); constraints.clear (); constraints.reinit (relevant_set); VectorTools::interpolate_boundary_values (dof_handler, 0, ZeroFunction<dim>(dim+1), constraints); constraints.close (); } setup_matrix (partitioning, relevant_partitioning); } template <int dim> void mmultTest<dim>::sparse_mmult () { block_matrix.block(1,1) = 0; block_matrix.block(1,0).mmult(block_matrix.block(1,1), block_matrix.block(0,1)); for (unsigned int n=0; n<Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD); ++n) if (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD) == n) { { const std::string filename = ("BG_matrix_" + Utilities::int_to_string (n, 2) + ".dat"); std::ofstream matrix_file; matrix_file.open (filename.c_str()); block_matrix.block(1,1).print(matrix_file, false); matrix_file.close (); } { const std::string filename = ("B_matrix_" + Utilities::int_to_string (n, 2) + ".dat"); std::ofstream matrix_file; matrix_file.open (filename.c_str()); block_matrix.block(1,0).print(matrix_file, false); matrix_file.close (); } { const std::string filename = ("G_matrix_" + Utilities::int_to_string (n, 2) + ".dat"); std::ofstream matrix_file; matrix_file.open (filename.c_str()); block_matrix.block(0,1).print(matrix_file, false); matrix_file.close (); } } } template <int dim> void mmultTest<dim>:: local_assemble_system (const typename DoFHandler<dim>::active_cell_iterator &cell, Assembly::Scratch::MatrixSystem<dim> &scratch, Assembly::CopyData::MatrixSystem<dim> &data) { const unsigned int dofs_per_cell = scratch.fe_values.get_fe().dofs_per_cell; const unsigned int n_q_points = scratch.fe_values.n_quadrature_points; const FEValuesExtractors::Vector velocities (0); const FEValuesExtractors::Scalar pressure (dim); scratch.fe_values.reinit (cell); data.local_block_matrix = 0; for (unsigned int q=0; q<n_q_points; ++q) { for (unsigned int k=0; k<dofs_per_cell; ++k) { scratch.div_phi_u[k] = scratch.fe_values[velocities].divergence (k,q); scratch.phi_p[k] = scratch.fe_values[pressure].value (k,q); } for (unsigned int i=0; i<dofs_per_cell; ++i) for (unsigned int j=0; j<dofs_per_cell; ++j) { data.local_block_matrix(i,j) += ( scratch.div_phi_u[i] * scratch.phi_p[j] + 2 * scratch.phi_p[i] * scratch.div_phi_u[j] ) * scratch.fe_values.JxW(q); } } cell->get_dof_indices (data.local_dof_indices); } template <int dim> void mmultTest<dim>:: copy_local_to_global_system (const Assembly::CopyData::MatrixSystem<dim> &data) { constraints.distribute_local_to_global (data.local_block_matrix, data.local_dof_indices, block_matrix); } template <int dim> void mmultTest<dim>::assemble_system () { block_matrix=0; const QGauss<dim> quadrature_formula(3); typedef FilteredIterator<typename DoFHandler<dim>::active_cell_iterator> CellFilter; WorkStream:: run (CellFilter (IteratorFilters::LocallyOwnedCell(), dof_handler.begin_active()), CellFilter (IteratorFilters::LocallyOwnedCell(), dof_handler.end()), std_cxx1x::bind (&mmultTest<dim>:: local_assemble_system, this, std_cxx1x::_1, std_cxx1x::_2, std_cxx1x::_3), std_cxx1x::bind (&mmultTest<dim>:: copy_local_to_global_system, this, std_cxx1x::_1), Assembly::Scratch:: MatrixSystem<dim> (fe_system, mapping, quadrature_formula, update_values | update_gradients | update_JxW_values), Assembly::CopyData:: MatrixSystem<dim> (fe_system)); block_matrix.compress(VectorOperation::add); } template <int dim> void mmultTest<dim>::run () { GridGenerator::hyper_rectangle (triangulation, Point<2>(-1,-1), Point<2>(1,1)); triangulation.refine_global(4); setup_dofs (); assemble_system (); sparse_mmult (); } } int main (int argc, char *argv[]) { using namespace EdgeFlameProblem; using namespace dealii; Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv); try { deallog.depth_console (0); const int dim = 2; mmultTest<dim> test; test.run (); } catch (std::exception &exc) { std::cerr << std::endl << std::endl << "----------------------------------------------------" << std::endl; std::cerr << "Exception on processing: " << std::endl << exc.what() << std::endl << "Aborting!" << std::endl << "----------------------------------------------------" << std::endl; return 1; } catch (...) { std::cerr << std::endl << std::endl << "----------------------------------------------------" << std::endl; std::cerr << "Unknown exception!" << std::endl << "Aborting!" << std::endl << "----------------------------------------------------" << std::endl; return 1; } return 0; }