The finite element method using deal.II - 2021/2022
|
|
template<int dim>
class Step40::LaplaceProblem< dim >
Definition at line 77 of file step-40.cc.
◆ LaplaceProblem()
◆ run()
Definition at line 329 of file step-40.cc.
331 pcout <<
"Running with "
338 <<
" MPI rank(s)..." << std::endl;
343 pcout <<
"Cycle " <<
cycle <<
':' << std::endl;
355 pcout <<
" Number of active cells: "
◆ setup_system()
◆ assemble_system()
Definition at line 181 of file step-40.cc.
189 update_values | update_gradients |
190 update_quadrature_points | update_JxW_values);
193 const unsigned int n_q_points = quadrature_formula.size();
201 if (cell->is_locally_owned())
206 fe_values.reinit(cell);
208 for (
unsigned int q_point = 0; q_point < n_q_points; ++q_point)
210 const double rhs_value =
211 (fe_values.quadrature_point(q_point)[1] >
214 fe_values.quadrature_point(q_point)[0]) ?
218 for (
unsigned int i = 0; i < dofs_per_cell; ++i)
220 for (
unsigned int j = 0; j < dofs_per_cell; ++j)
221 cell_matrix(i, j) += fe_values.shape_grad(i, q_point) *
222 fe_values.shape_grad(j, q_point) *
223 fe_values.JxW(q_point);
225 cell_rhs(i) += rhs_value * fe_values.shape_value(i, q_point) *
226 fe_values.JxW(q_point);
230 cell->get_dof_indices(local_dof_indices);
◆ solve()
Definition at line 246 of file step-40.cc.
257 LA::SolverCG solver(solver_control);
265 data.symmetric_operator =
true;
272 completely_distributed_solution,
276 pcout <<
" Solved in " << solver_control.last_step() <<
" iterations."
◆ refine_grid()
Definition at line 288 of file step-40.cc.
298 estimated_error_per_cell);
◆ output_results()
Definition at line 308 of file step-40.cc.
315 for (
unsigned int i = 0; i < subdomain.size(); ++i)
◆ mpi_communicator
◆ triangulation
◆ fe
◆ dof_handler
◆ locally_owned_dofs
◆ locally_relevant_dofs
◆ constraints
◆ system_matrix
◆ locally_relevant_solution
◆ system_rhs
◆ pcout
◆ computing_timer
The documentation for this class was generated from the following file:
LA::MPI::Vector locally_relevant_solution
std::vector< types::global_dof_index > local_dof_indices
void print_summary() const
virtual void execute_coarsening_and_refinement() override
AffineConstraints< double > constraints
void refine_and_coarsen_fixed_number(parallel::distributed::Triangulation< dim, spacedim > &tria, const ::Vector< Number > &criteria, const double top_fraction_of_cells, const double bottom_fraction_of_cells, const types::global_cell_index max_n_cells=std::numeric_limits< types::global_cell_index >::max())
DoFHandler< dim > dof_handler
void refine_global(const unsigned int times=1)
std::string write_vtu_with_pvtu_record(const std::string &directory, const std::string &filename_without_extension, const unsigned int counter, const MPI_Comm &mpi_communicator, const unsigned int n_digits_for_counter=numbers::invalid_unsigned_int, const unsigned int n_groups=0) const
void cell_matrix(FullMatrix< double > &M, const FEValuesBase< dim > &fe, const FEValuesBase< dim > &fetest, const ArrayView< const std::vector< double >> &velocity, const double factor=1.)
void attach_dof_handler(const DoFHandler< dim, spacedim > &)
LA::MPI::Vector system_rhs
void distribute_dofs(const FiniteElement< dim, spacedim > &fe)
void make_hanging_node_constraints(const DoFHandler< dim, spacedim > &dof_handler, AffineConstraints< number > &constraints)
const unsigned int degree
const IndexSet & locally_owned_dofs() const
void distribute_local_to_global(const InVector &local_vector, const std::vector< size_type > &local_dof_indices, OutVector &global_vector) const
static void estimate(const Mapping< dim, spacedim > &mapping, const DoFHandler< dim, spacedim > &dof, const Quadrature< dim - 1 > &quadrature, const std::map< types::boundary_id, const Function< spacedim, typename InputVector::value_type > * > &neumann_bc, const InputVector &solution, Vector< float > &error, const ComponentMask &component_mask=ComponentMask(), const Function< spacedim > *coefficients=nullptr, const unsigned int n_threads=numbers::invalid_unsigned_int, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id, const types::material_id material_id=numbers::invalid_material_id, const Strategy strategy=cell_diameter_over_24)
void make_sparsity_pattern(const DoFHandler< dim, spacedim > &dof_handler, SparsityPatternType &sparsity_pattern, const AffineConstraints< number > &constraints=AffineConstraints< number >(), const bool keep_constrained_dofs=true, const types::subdomain_id subdomain_id=numbers::invalid_subdomain_id)
typename BaseClass::AdditionalData AdditionalData
IndexSet locally_relevant_dofs
virtual void build_patches(const unsigned int n_subdivisions=0)
IndexSet locally_owned_dofs
void reinit(const IndexSet &local_constraints=IndexSet())
types::subdomain_id locally_owned_subdomain() const override
void distribute(VectorType &vec) const
void output_results(const unsigned int cycle) const
TimerOutput computing_timer
unsigned int this_mpi_process(const MPI_Comm &mpi_communicator)
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
void add_data_vector(const VectorType &data, const std::vector< std::string > &names, const DataVectorType type=type_automatic, const std::vector< DataComponentInterpretation::DataComponentInterpretation > &data_component_interpretation={})
unsigned int n_mpi_processes(const MPI_Comm &mpi_communicator)
unsigned int n_active_cells() const
IteratorRange< active_cell_iterator > active_cell_iterators() const
unsigned int n_dofs_per_cell() const
static constexpr double PI
parallel::distributed::Triangulation< dim > triangulation
std::shared_ptr< PreconditionerType > preconditioner
MPI_Comm mpi_communicator
virtual types::global_cell_index n_global_active_cells() const override
types::global_dof_index n_dofs() const
LA::MPI::SparseMatrix system_matrix