The finite element method using deal.II - 2021/2022
|
|
Go to the documentation of this file.
30 #if defined(DEAL_II_WITH_PETSC) && !defined(DEAL_II_PETSC_WITH_COMPLEX) && \
31 !(defined(DEAL_II_WITH_TRILINOS) && defined(FORCE_USE_OF_TRILINOS))
32 using namespace dealii::LinearAlgebraPETSc;
34 #elif defined(DEAL_II_WITH_TRILINOS)
35 using namespace dealii::LinearAlgebraTrilinos;
37 # error DEAL_II_WITH_PETSC or DEAL_II_WITH_TRILINOS required
95 output_results(
const unsigned int cycle)
const;
121 : mpi_communicator(MPI_COMM_WORLD)
130 , computing_timer(mpi_communicator,
144 dof_handler.distribute_dofs(fe);
146 locally_owned_dofs = dof_handler.locally_owned_dofs();
149 locally_relevant_solution.reinit(locally_owned_dofs,
150 locally_relevant_dofs,
152 system_rhs.reinit(locally_owned_dofs, mpi_communicator);
167 dof_handler.locally_owned_dofs(),
169 locally_relevant_dofs);
171 system_matrix.reinit(locally_owned_dofs,
185 const QGauss<dim> quadrature_formula(fe.degree + 1);
192 const unsigned int dofs_per_cell = fe.n_dofs_per_cell();
193 const unsigned int n_q_points = quadrature_formula.
size();
200 for (
const auto &cell : dof_handler.active_cell_iterators())
201 if (cell->is_locally_owned())
208 for (
unsigned int q_point = 0; q_point < n_q_points; ++q_point)
210 const double rhs_value =
218 for (
unsigned int i = 0; i < dofs_per_cell; ++i)
220 for (
unsigned int j = 0; j < dofs_per_cell; ++j)
223 fe_values.
JxW(q_point);
225 cell_rhs(i) += rhs_value * fe_values.
shape_value(i, q_point) *
226 fe_values.
JxW(q_point);
255 LA::SolverCG solver(solver_control, mpi_communicator);
257 LA::SolverCG solver(solver_control);
265 data.symmetric_operator =
true;
271 solver.solve(system_matrix,
272 completely_distributed_solution,
276 pcout <<
" Solved in " << solver_control.
last_step() <<
" iterations."
281 locally_relevant_solution = completely_distributed_solution;
297 locally_relevant_solution,
298 estimated_error_per_cell);
315 for (
unsigned int i = 0; i < subdomain.
size(); ++i)
322 "./",
"solution",
cycle, mpi_communicator, 2, 8);
331 pcout <<
"Running with "
338 <<
" MPI rank(s)..." << std::endl;
343 pcout <<
"Cycle " <<
cycle <<
':' << std::endl;
355 pcout <<
" Number of active cells: "
357 <<
" Number of degrees of freedom: " << dof_handler.n_dofs()
366 output_results(
cycle);
369 computing_timer.print_summary();
370 computing_timer.reset();
390 laplace_problem_2d.
run();
392 catch (std::exception &exc)
394 std::cerr << std::endl
396 <<
"----------------------------------------------------"
398 std::cerr <<
"Exception on processing: " << std::endl
399 << exc.what() << std::endl
400 <<
"Aborting!" << std::endl
401 <<
"----------------------------------------------------"
408 std::cerr << std::endl
410 <<
"----------------------------------------------------"
412 std::cerr <<
"Unknown exception!" << std::endl
413 <<
"Aborting!" << std::endl
414 <<
"----------------------------------------------------"
const double & shape_value(const unsigned int function_no, const unsigned int point_no) const
LA::MPI::Vector locally_relevant_solution
std::vector< types::global_dof_index > local_dof_indices
void reinit(const TriaIterator< DoFCellAccessor< dim, spacedim, level_dof_access >> &cell)
const ::parallel::distributed::Triangulation< dim, spacedim > * triangulation
AffineConstraints< double > constraints
void refine_and_coarsen_fixed_number(parallel::distributed::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const types::global_cell_index max_n_cells=std::numeric_limits< types::global_cell_index >::max())
DoFHandler< dim > dof_handler
SymmetricTensor< 2, dim, Number > e(const Tensor< 2, dim, Number > &F)
std::string write_vtu_with_pvtu_record(const std::string &directory, const std::string &filename_without_extension, const unsigned int counter, const MPI_Comm &mpi_communicator, const unsigned int n_digits_for_counter=numbers::invalid_unsigned_int, const unsigned int n_groups=0) const
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double >> &velocity, const double factor=1.)
friend friend class SparseMatrix
AffineConstraints< double > constraints
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
LA::MPI::Vector system_rhs
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
unsigned int last_step() const
int main(int argc, char *argv[])
void distribute_local_to_global(const InVector &local_vector, const std::vector< size_type > &local_dof_indices, OutVector &global_vector) const
const Tensor< 1, spacedim > & shape_grad(const unsigned int function_no, const unsigned int quadrature_point) const
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, typename InputVector::value_type > * > &neumann_bc, const InputVector &solution, Vector< float > &error, const ComponentMask &component_mask=ComponentMask(), const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternType &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
typename BaseClass::AdditionalData AdditionalData
IndexSet locally_relevant_dofs
virtual void build_patches(const unsigned int n_subdivisions=0)
virtual void run(ParameterHandler &prm)=0
IndexSet locally_owned_dofs
double JxW(const unsigned int quadrature_point) const
void reinit(const IndexSet &local_constraints=IndexSet())
void distribute(VectorType &vec) const
void output_results(const unsigned int cycle) const
TimerOutput computing_timer
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation={})
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
const Point< spacedim > & quadrature_point(const unsigned int q) const
unsigned int size() const
static constexpr double PI
parallel::distributed::Triangulation< dim > triangulation
std::shared_ptr< PreconditionerType > preconditioner
MPI_Comm mpi_communicator
LA::MPI::SparseMatrix system_matrix