Dear deal.ii developers,

This problem pertains to using the routine project_boundary_values 
with a parallel::distributed::Triangulation with a mesh imported
from gmsh. In the guidelines it is advised to set boundary_ids on all
boundary cells (whether locally owned / ghosted or not). I believe that 
part is being
done properly. The program runs with 1 proc but for > 1 procs, the 
project_boundary_values 
routine throws an error :

An error occurred in line <3585> of file 
</home/user/dealii/include/deal.II/dofs/dof_accessor.templates.h> in 
function
    void dealii::DoFCellAccessor<DoFHandlerType, 
lda>::get_dof_indices(std::vector<unsigned int>&) const [with 
DoFHandlerType = dealii::DoFHandler<2, 2>; bool level_dof_access = false]
The violated condition was: 
    this->is_artificial() == false
Additional information: 
    Can't ask for DoF indices on artificial cells.

The above error also appears if only the processor that "owns" the boundary 
calls the above
routine. The problem does not seem to occur if the type 
parallel::shared::Triangulation 
is used instead. What extra needs to be done for using this with the 
distributed type ?

I have attached a "minimal working example" along with the mesh and cmake 
file that
recreates the problem.

Thanks,

-- 
The deal.II project is located at http://www.dealii.org/
For mailing list/forum options, see 
https://groups.google.com/d/forum/dealii?hl=en
--- 
You received this message because you are subscribed to the Google Groups 
"deal.II User Group" group.
To unsubscribe from this group and stop receiving emails from it, send an email 
to [email protected].
For more options, visit https://groups.google.com/d/optout.

Attachment: box.msh
Description: Mesh model

##
#  CMake script for the step-1 tutorial program:
##

# Set the name of the project and target:
SET(TARGET "project_bc_face")

# Declare all source files the target consists of. Here, this is only
# the one step-X.cc file, but as you expand your project you may wish
# to add other source files as well. If your project becomes much larger,
# you may want to either replace the following statement by something like
#    FILE(GLOB_RECURSE TARGET_SRC  "source/*.cc")
#    FILE(GLOB_RECURSE TARGET_INC  "include/*.h")
#    SET(TARGET_SRC ${TARGET_SRC}  ${TARGET_INC}) 
# or switch altogether to the large project CMakeLists.txt file discussed
# in the "CMake in user projects" page accessible from the "User info"
# page of the documentation.
SET(TARGET_SRC
  ${TARGET}.cc
  )

# Usually, you will not need to modify anything beyond this point...

CMAKE_MINIMUM_REQUIRED(VERSION 2.8.12)

FIND_PACKAGE(deal.II 9.1.0 QUIET
  HINTS ${deal.II_DIR} ${DEAL_II_DIR} ../ ../../ $ENV{DEAL_II_DIR}
  )
IF(NOT ${deal.II_FOUND})
  MESSAGE(FATAL_ERROR "\n"
    "*** Could not locate a (sufficiently recent) version of deal.II. ***\n\n"
    "You may want to either pass a flag -DDEAL_II_DIR=/path/to/deal.II to 
cmake\n"
    "or set an environment variable \"DEAL_II_DIR\" that contains this path."
    )
ENDIF()

DEAL_II_INITIALIZE_CACHED_VARIABLES()
PROJECT(${TARGET})
DEAL_II_INVOKE_AUTOPILOT()
#include <deal.II/base/logstream.h>
#include <deal.II/base/function.h>
#include <deal.II/base/function_parser.h>
#include <deal.II/base/index_set.h>
#include <deal.II/base/multithread_info.h>

#include <deal.II/lac/petsc_vector.h>

#include <deal.II/distributed/tria.h>

#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/grid/grid_tools.h>
#include <deal.II/grid/grid_out.h>
#include <deal.II/grid/grid_in.h>

#include <deal.II/dofs/dof_handler.h>
#include <deal.II/dofs/dof_accessor.h>
#include <deal.II/dofs/dof_tools.h>

#include <deal.II/fe/fe_face.h>
#include <deal.II/fe/fe_values.h>

#include <deal.II/numerics/vector_tools.h>
#include <deal.II/numerics/matrix_tools.h>
#include <deal.II/numerics/data_out.h>
#include <deal.II/numerics/data_out_faces.h>

#include <deal.II/base/quadrature_lib.h>
#include <deal.II/base/logstream.h>
#include <deal.II/base/work_stream.h>
#include <deal.II/base/function.h>
#include <deal.II/base/function_parser.h>
#include <deal.II/base/conditional_ostream.h>
#include <deal.II/base/index_set.h>
#include <deal.II/base/timer.h>

#include <deal.II/lac/vector.h>
#include <deal.II/lac/full_matrix.h>
#include <deal.II/lac/dynamic_sparsity_pattern.h>
#include <deal.II/lac/sparsity_tools.h>

#include <deal.II/lac/petsc_vector.h>
#include <deal.II/lac/petsc_solver.h>
#include <deal.II/lac/petsc_precondition.h>

#include <deal.II/distributed/tria.h>

#include <deal.II/grid/grid_generator.h>
#include <deal.II/grid/tria_accessor.h>
#include <deal.II/grid/tria_iterator.h>
#include <deal.II/grid/grid_tools.h>
#include <deal.II/grid/grid_out.h>
#include <deal.II/grid/grid_in.h>

#include <deal.II/dofs/dof_handler.h>
#include <deal.II/dofs/dof_renumbering.h>
#include <deal.II/dofs/dof_accessor.h>
#include <deal.II/dofs/dof_tools.h>

#include <deal.II/fe/fe_raviart_thomas.h>
#include <deal.II/fe/fe_values.h>

#include <iostream>
#include <fstream>

void test()
{
	using namespace dealii;

	unsigned int n_mpi_processes = Utilities::MPI::n_mpi_processes(MPI_COMM_WORLD);
	unsigned int this_mpi_process (Utilities::MPI::this_mpi_process(MPI_COMM_WORLD));
	parallel::distributed::Triangulation<2> triangulation(MPI_COMM_WORLD);
	FE_FaceQ<2> fe(0);
	DoFHandler<2> dof_handler(triangulation);
	PETScWrappers::MPI::Vector solution;
	PETScWrappers::MPI::Vector solution_local;
	AffineConstraints<PetscScalar> constraints;
	// Read in gmsh mesh
	GridIn<2> gridin;
	gridin.attach_triangulation(triangulation);
	std::ifstream f("box.msh");
	gridin.read_msh(f);
	//
	dof_handler.distribute_dofs(fe);
	//
	const unsigned int n = dof_handler.n_dofs();
	IndexSet local_partitioning(n), local_relevant_partitioning(n);
	local_partitioning = dof_handler.locally_owned_dofs();
	DoFTools::extract_locally_relevant_dofs(dof_handler, local_relevant_partitioning);
	solution.reinit(local_partitioning, local_relevant_partitioning, MPI_COMM_WORLD);
	solution_local.reinit(local_partitioning, MPI_COMM_WORLD);
    //
    constraints.reinit(local_relevant_partitioning);
    FunctionParser<2> species_boundary_value(1);
	std::string variables = "x,y,t";
	std::map<std::string,double> constants;
	constants["pi"] = numbers::PI;
	const std::string bdy_val_str = "pi*x*y*t";
	species_boundary_value.initialize(variables,bdy_val_str,constants,true);
	//
	std::map<types::boundary_id, const Function<2> *> boundary_functions;
	boundary_functions[2] = &species_boundary_value;
	//
	VectorTools::project_boundary_values(dof_handler,
	                                     boundary_functions,
	                                     QGauss<1>(fe.degree+1),
	                                     constraints);
	constraints.close();
	// Now print the solution
	DataOutFaces<2> data_out_face(false);

	std::vector<DataComponentInterpretation::DataComponentInterpretation>
		component_interpretation(1, DataComponentInterpretation::component_is_scalar);

	std::vector<std::string> solution_names;
	solution_names.emplace_back("Pressure");
	//
	data_out_face.add_data_vector (dof_handler, solution, solution_names, component_interpretation);
	//
	std::vector<types::subdomain_id> partition_int(triangulation.n_active_cells());
	GridTools::get_subdomain_association(triangulation, partition_int);
	const Vector<double> partitioning(partition_int.begin(),
	                                  partition_int.end());
	data_out_face.add_data_vector(partitioning, "Partitioning");
	
	data_out_face.build_patches();
		
	const std::string filename = "solution."+ Utilities::int_to_string (this_mpi_process,2) + ".vtu";
	std::ofstream output(filename);
	data_out_face.write_vtu(output);
		
	if (this_mpi_process == 0)
	{
		std::vector<std::string> filenames;
		for (unsigned int i=0; i<n_mpi_processes; i++)
			filenames.push_back ("solution." +
			                     Utilities::int_to_string (i, 2) +
			                     ".vtu");
		const std::string pvtu_master_filename =
			("solution.pvtu");
		std::ofstream pvtu_master(pvtu_master_filename);
		data_out_face.write_pvtu_record (pvtu_master, filenames);
	}
}

int main(int argc, char **argv)
{
	using namespace dealii;
	Utilities::MPI::MPI_InitFinalize mpi_initialization(argc, argv, 1);
    test();
}

Reply via email to