The finite element method using deal.II - 2021/2022
Step40::LaplaceProblem< dim > Class Template Reference

Public Member Functions

 LaplaceProblem ()
 
void run ()
 

Private Member Functions

void setup_system ()
 
void assemble_system ()
 
void solve ()
 
void refine_grid ()
 
void output_results (const unsigned int cycle) const
 

Private Attributes

MPI_Comm mpi_communicator
 
parallel::distributed::Triangulation< dim > triangulation
 
FE_Q< dim > fe
 
DoFHandler< dim > dof_handler
 
IndexSet locally_owned_dofs
 
IndexSet locally_relevant_dofs
 
AffineConstraints< double > constraints
 
LA::MPI::SparseMatrix system_matrix
 
LA::MPI::Vector locally_relevant_solution
 
LA::MPI::Vector system_rhs
 
ConditionalOStream pcout
 
TimerOutput computing_timer
 

Detailed Description

template<int dim>
class Step40::LaplaceProblem< dim >

Definition at line 77 of file step-40.cc.

Constructor & Destructor Documentation

◆ LaplaceProblem()

Member Function Documentation

◆ run()

template<int dim>
void Step40::LaplaceProblem< dim >::run

Definition at line 329 of file step-40.cc.

330  {
331  pcout << "Running with "
332 #ifdef USE_PETSC_LA
333  << "PETSc"
334 #else
335  << "Trilinos"
336 #endif
338  << " MPI rank(s)..." << std::endl;
339 
340  const unsigned int n_cycles = 8;
341  for (unsigned int cycle = 0; cycle < n_cycles; ++cycle)
342  {
343  pcout << "Cycle " << cycle << ':' << std::endl;
344 
345  if (cycle == 0)
346  {
349  }
350  else
351  refine_grid();
352 
353  setup_system();
354 
355  pcout << " Number of active cells: "
356  << triangulation.n_global_active_cells() << std::endl
357  << " Number of degrees of freedom: " << dof_handler.n_dofs()
358  << std::endl;
359 
360  assemble_system();
361  solve();
362 
364  {
365  TimerOutput::Scope t(computing_timer, "output");
367  }
368 
371 
372  pcout << std::endl;
373  }
374  }

◆ setup_system()

◆ assemble_system()

template<int dim>
void Step40::LaplaceProblem< dim >::assemble_system
private

Definition at line 181 of file step-40.cc.

182  {
183  TimerOutput::Scope t(computing_timer, "assembly");
184 
185  const QGauss<dim> quadrature_formula(fe.degree + 1);
186 
187  FEValues<dim> fe_values(fe,
188  quadrature_formula,
189  update_values | update_gradients |
190  update_quadrature_points | update_JxW_values);
191 
192  const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
193  const unsigned int n_q_points = quadrature_formula.size();
194 
195  FullMatrix<double> cell_matrix(dofs_per_cell, dofs_per_cell);
196  Vector<double> cell_rhs(dofs_per_cell);
197 
198  std::vector<types::global_dof_index> local_dof_indices(dofs_per_cell);
199 
200  for (const auto &cell : dof_handler.active_cell_iterators())
201  if (cell->is_locally_owned())
202  {
203  cell_matrix = 0.;
204  cell_rhs = 0.;
205 
206  fe_values.reinit(cell);
207 
208  for (unsigned int q_point = 0; q_point < n_q_points; ++q_point)
209  {
210  const double rhs_value =
211  (fe_values.quadrature_point(q_point)[1] >
212  0.5 +
213  0.25 * std::sin(4.0 * numbers::PI *
214  fe_values.quadrature_point(q_point)[0]) ?
215  1. :
216  -1.);
217 
218  for (unsigned int i = 0; i < dofs_per_cell; ++i)
219  {
220  for (unsigned int j = 0; j < dofs_per_cell; ++j)
221  cell_matrix(i, j) += fe_values.shape_grad(i, q_point) *
222  fe_values.shape_grad(j, q_point) *
223  fe_values.JxW(q_point);
224 
225  cell_rhs(i) += rhs_value * fe_values.shape_value(i, q_point) *
226  fe_values.JxW(q_point);
227  }
228  }
229 
230  cell->get_dof_indices(local_dof_indices);
232  cell_rhs,
233  local_dof_indices,
235  system_rhs);
236  }
237 
240  }

◆ solve()

template<int dim>
void Step40::LaplaceProblem< dim >::solve
private

Definition at line 246 of file step-40.cc.

247  {
249  LA::MPI::Vector completely_distributed_solution(locally_owned_dofs,
251 
252  SolverControl solver_control(dof_handler.n_dofs(), 1e-12);
253 
254 #ifdef USE_PETSC_LA
255  LA::SolverCG solver(solver_control, mpi_communicator);
256 #else
257  LA::SolverCG solver(solver_control);
258 #endif
259 
260  LA::MPI::PreconditionAMG preconditioner;
261 
263 
264 #ifdef USE_PETSC_LA
265  data.symmetric_operator = true;
266 #else
267  /* Trilinos defaults are good */
268 #endif
269  preconditioner.initialize(system_matrix, data);
270 
271  solver.solve(system_matrix,
272  completely_distributed_solution,
273  system_rhs,
275 
276  pcout << " Solved in " << solver_control.last_step() << " iterations."
277  << std::endl;
278 
279  constraints.distribute(completely_distributed_solution);
280 
281  locally_relevant_solution = completely_distributed_solution;
282  }

◆ refine_grid()

template<int dim>
void Step40::LaplaceProblem< dim >::refine_grid
private

Definition at line 288 of file step-40.cc.

289  {
290  TimerOutput::Scope t(computing_timer, "refine");
291 
292  Vector<float> estimated_error_per_cell(triangulation.n_active_cells());
294  dof_handler,
296  std::map<types::boundary_id, const Function<dim> *>(),
298  estimated_error_per_cell);
300  triangulation, estimated_error_per_cell, 0.3, 0.03);
302  }

◆ output_results()

template<int dim>
void Step40::LaplaceProblem< dim >::output_results ( const unsigned int  cycle) const
private

Definition at line 308 of file step-40.cc.

309  {
310  DataOut<dim> data_out;
313 
315  for (unsigned int i = 0; i < subdomain.size(); ++i)
316  subdomain(i) = triangulation.locally_owned_subdomain();
317  data_out.add_data_vector(subdomain, "subdomain");
318 
319  data_out.build_patches();
320 
322  "./", "solution", cycle, mpi_communicator, 2, 8);
323  }

Member Data Documentation

◆ mpi_communicator

template<int dim>
MPI_Comm Step40::LaplaceProblem< dim >::mpi_communicator
private

Definition at line 97 of file step-40.cc.

◆ triangulation

template<int dim>
parallel::distributed::Triangulation<dim> Step40::LaplaceProblem< dim >::triangulation
private

Definition at line 99 of file step-40.cc.

◆ fe

template<int dim>
FE_Q<dim> Step40::LaplaceProblem< dim >::fe
private

Definition at line 101 of file step-40.cc.

◆ dof_handler

template<int dim>
DoFHandler<dim> Step40::LaplaceProblem< dim >::dof_handler
private

Definition at line 102 of file step-40.cc.

◆ locally_owned_dofs

template<int dim>
IndexSet Step40::LaplaceProblem< dim >::locally_owned_dofs
private

Definition at line 104 of file step-40.cc.

◆ locally_relevant_dofs

template<int dim>
IndexSet Step40::LaplaceProblem< dim >::locally_relevant_dofs
private

Definition at line 105 of file step-40.cc.

◆ constraints

template<int dim>
AffineConstraints<double> Step40::LaplaceProblem< dim >::constraints
private

Definition at line 107 of file step-40.cc.

◆ system_matrix

template<int dim>
LA::MPI::SparseMatrix Step40::LaplaceProblem< dim >::system_matrix
private

Definition at line 109 of file step-40.cc.

◆ locally_relevant_solution

template<int dim>
LA::MPI::Vector Step40::LaplaceProblem< dim >::locally_relevant_solution
private

Definition at line 110 of file step-40.cc.

◆ system_rhs

template<int dim>
LA::MPI::Vector Step40::LaplaceProblem< dim >::system_rhs
private

Definition at line 111 of file step-40.cc.

◆ pcout

template<int dim>
ConditionalOStream Step40::LaplaceProblem< dim >::pcout
private

Definition at line 113 of file step-40.cc.

◆ computing_timer

template<int dim>
TimerOutput Step40::LaplaceProblem< dim >::computing_timer
private

Definition at line 114 of file step-40.cc.


The documentation for this class was generated from the following file:
Step40::LaplaceProblem::locally_relevant_solution
LA::MPI::Vector locally_relevant_solution
Definition: step-40.cc:110
Step40::LaplaceProblem::refine_grid
void refine_grid()
Definition: step-40.cc:288
local_dof_indices
std::vector< types::global_dof_index > local_dof_indices
cycle
unsigned int cycle
TimerOutput::print_summary
void print_summary() const
Vector
Vector< double > Vector
parallel::distributed::Triangulation::execute_coarsening_and_refinement
virtual void execute_coarsening_and_refinement() override
Step40::LaplaceProblem::constraints
AffineConstraints< double > constraints
Definition: step-40.cc:107
TimerOutput::Scope
SolverControl
Step40::LaplaceProblem::assemble_system
void assemble_system()
Definition: step-40.cc:181
data
Table< 2, float > data
Triangulation
parallel::distributed::GridRefinement::refine_and_coarsen_fixed_number
void refine_and_coarsen_fixed_number(parallel::distributed::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const types::global_cell_index max_n_cells=std::numeric_limits< types::global_cell_index >::max())
Step40::LaplaceProblem::fe
FE_Q< dim > fe
Definition: step-40.cc:101
Step40::LaplaceProblem::dof_handler
DoFHandler< dim > dof_handler
Definition: step-40.cc:102
TimerOutput::reset
void reset()
parallel::distributed::Triangulation::refine_global
void refine_global(const unsigned int times=1)
TimerOutput::summary
summary
VectorOperation::add
add
TimerOutput::wall_times
wall_times
DataOut::write_vtu_with_pvtu_record
std::string write_vtu_with_pvtu_record(const std::string &directory, const std::string &filename_without_extension, const unsigned int counter, const MPI_Comm &mpi_communicator, const unsigned int n_digits_for_counter=numbers::invalid_unsigned_int, const unsigned int n_groups=0) const
types::boundary_id
unsigned int boundary_id
Step40::LaplaceProblem::solve
void solve()
Definition: step-40.cc:246
cell_matrix
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double >> &velocity, const double factor=1.)
DataOut::attach_dof_handler
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
Step40::LaplaceProblem::system_rhs
LA::MPI::Vector system_rhs
Definition: step-40.cc:111
Step40::LaplaceProblem::setup_system
void setup_system()
Definition: step-40.cc:140
DoFHandler::distribute_dofs
void distribute_dofs(const FiniteElement< dim, spacedim > &fe)
DoFTools::make_hanging_node_constraints
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
FEValues< dim >
FE_Q::degree
const unsigned int degree
DoFHandler::locally_owned_dofs
const IndexSet & locally_owned_dofs() const
n_cycles
unsigned int n_cycles
AffineConstraints< double >::distribute_local_to_global
void distribute_local_to_global(const InVector &local_vector, const std::vector< size_type > &local_dof_indices, OutVector &global_vector) const
KellyErrorEstimator::estimate
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, typename InputVector::value_type > * > &neumann_bc, const InputVector &solution, Vector< float > &error, const ComponentMask &component_mask=ComponentMask(), const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
DoFTools::make_sparsity_pattern
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternType &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
AdditionalData
typename BaseClass::AdditionalData AdditionalData
Step40::LaplaceProblem::locally_relevant_dofs
IndexSet locally_relevant_dofs
Definition: step-40.cc:105
DataOut::build_patches
virtual void build_patches(const unsigned int n_subdivisions=0)
DoFTools::extract_locally_relevant_dofs
IndexSet extract_locally_relevant_dofs(const DoFHandler< dim, spacedim > &dof_handler)
Step40::LaplaceProblem::locally_owned_dofs
IndexSet locally_owned_dofs
Definition: step-40.cc:104
DynamicSparsityPattern
VectorTools::interpolate_boundary_values
void interpolate_boundary_values(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const std::map< types::boundary_id, const Function< spacedim, number > * > &function_map, std::map< types::global_dof_index, number > &boundary_values, const ComponentMask &component_mask=ComponentMask())
AffineConstraints< double >::reinit
void reinit(const IndexSet &local_constraints=IndexSet())
parallel::distributed::Triangulation::locally_owned_subdomain
types::subdomain_id locally_owned_subdomain() const override
QGauss
AffineConstraints< double >::distribute
void distribute(VectorType &vec) const
Step40::LaplaceProblem::output_results
void output_results(const unsigned int cycle) const
Definition: step-40.cc:308
AffineConstraints< double >::clear
void clear()
Step40::LaplaceProblem::computing_timer
TimerOutput computing_timer
Definition: step-40.cc:114
Utilities::MPI::this_mpi_process
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
GridGenerator::hyper_cube
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
DataOut::add_data_vector
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation={})
Utilities::MPI::n_mpi_processes
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
SparsityTools::distribute_sparsity_pattern
void distribute_sparsity_pattern(DynamicSparsityPattern &dsp, const IndexSet &locally_owned_rows, const MPI_Comm &mpi_comm, const IndexSet &locally_relevant_rows)
parallel::distributed::Triangulation::n_active_cells
unsigned int n_active_cells() const
Functions::ZeroFunction
FullMatrix< double >
Function< dim >
DoFHandler::active_cell_iterators
IteratorRange< active_cell_iterator > active_cell_iterators() const
Step40::LaplaceProblem::pcout
ConditionalOStream pcout
Definition: step-40.cc:113
FE_Q::n_dofs_per_cell
unsigned int n_dofs_per_cell() const
DataOut< dim >
numbers::PI
static constexpr double PI
Vector< double >
Step40::LaplaceProblem::triangulation
parallel::distributed::Triangulation< dim > triangulation
Definition: step-40.cc:99
AffineConstraints< double >::close
void close()
preconditioner
std::shared_ptr< PreconditionerType > preconditioner
Step40::LaplaceProblem::mpi_communicator
MPI_Comm mpi_communicator
Definition: step-40.cc:97
parallel::distributed::Triangulation::n_global_active_cells
virtual types::global_cell_index n_global_active_cells() const override
DoFHandler::n_dofs
types::global_dof_index n_dofs() const
Step40::LaplaceProblem::system_matrix
LA::MPI::SparseMatrix system_matrix
Definition: step-40.cc:109