Chaste Commit::1fd4e48e3990e67db148bc1bc4cf6991a0049d0c
LinearParabolicPdeSystemWithCoupledOdeSystemSolver.hpp
1/*
2
3Copyright (c) 2005-2024, University of Oxford.
4All rights reserved.
5
6University of Oxford means the Chancellor, Masters and Scholars of the
7University of Oxford, having an administrative office at Wellington
8Square, Oxford OX1 2JD, UK.
9
10This file is part of Chaste.
11
12Redistribution and use in source and binary forms, with or without
13modification, are permitted provided that the following conditions are met:
14 * Redistributions of source code must retain the above copyright notice,
15 this list of conditions and the following disclaimer.
16 * Redistributions in binary form must reproduce the above copyright notice,
17 this list of conditions and the following disclaimer in the documentation
18 and/or other materials provided with the distribution.
19 * Neither the name of the University of Oxford nor the names of its
20 contributors may be used to endorse or promote products derived from this
21 software without specific prior written permission.
22
23THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
29GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33
34*/
35#ifndef LINEARPARABOLICPDESYSTEMWITHCOUPLEDODESYSTEMSOLVER_HPP_
36#define LINEARPARABOLICPDESYSTEMWITHCOUPLEDODESYSTEMSOLVER_HPP_
37
38#include "AbstractAssemblerSolverHybrid.hpp"
39#include "AbstractDynamicLinearPdeSolver.hpp"
40#include "AbstractLinearParabolicPdeSystemForCoupledOdeSystem.hpp"
41#include "TetrahedralMesh.hpp"
42#include "BoundaryConditionsContainer.hpp"
43#include "AbstractOdeSystemForCoupledPdeSystem.hpp"
44#include "CvodeAdaptor.hpp"
45#include "BackwardEulerIvpOdeSolver.hpp"
46#include "Warnings.hpp"
47#include "VtkMeshWriter.hpp"
48
49#include <boost/shared_ptr.hpp>
50
60template<unsigned ELEMENT_DIM, unsigned SPACE_DIM=ELEMENT_DIM, unsigned PROBLEM_DIM=1>
62 : public AbstractAssemblerSolverHybrid<ELEMENT_DIM, SPACE_DIM, PROBLEM_DIM, NORMAL>,
63 public AbstractDynamicLinearPdeSolver<ELEMENT_DIM, SPACE_DIM, PROBLEM_DIM>
64{
65private:
66
69
72
74 std::vector<AbstractOdeSystemForCoupledPdeSystem*> mOdeSystemsAtNodes;
75
77 std::vector<double> mInterpolatedOdeStateVariables;
78
80 boost::shared_ptr<AbstractIvpOdeSolver> mpOdeSolver;
81
88
91
93 out_stream mpVtkMetaFile;
94
100
105
116 c_matrix<double, PROBLEM_DIM*(ELEMENT_DIM+1), PROBLEM_DIM*(ELEMENT_DIM+1)> ComputeMatrixTerm(
117 c_vector<double, ELEMENT_DIM+1>& rPhi,
118 c_matrix<double, SPACE_DIM, ELEMENT_DIM+1>& rGradPhi,
120 c_vector<double,PROBLEM_DIM>& rU,
121 c_matrix<double, PROBLEM_DIM, SPACE_DIM>& rGradU,
123
134 c_vector<double, PROBLEM_DIM*(ELEMENT_DIM+1)> ComputeVectorTerm(
135 c_vector<double, ELEMENT_DIM+1>& rPhi,
136 c_matrix<double, SPACE_DIM, ELEMENT_DIM+1>& rGradPhi,
138 c_vector<double,PROBLEM_DIM>& rU,
139 c_matrix<double,PROBLEM_DIM,SPACE_DIM>& rGradU,
141
146
154 void IncrementInterpolatedQuantities(double phiI, const Node<SPACE_DIM>* pNode);
155
164 void InitialiseForSolve(Vec initialSolution=NULL);
165
174 void SetupLinearSystem(Vec currentSolution, bool computeMatrix);
175
176public:
177
190 std::vector<AbstractOdeSystemForCoupledPdeSystem*> odeSystemsAtNodes=std::vector<AbstractOdeSystemForCoupledPdeSystem*>(),
191 boost::shared_ptr<AbstractIvpOdeSolver> pOdeSolver=boost::shared_ptr<AbstractIvpOdeSolver>());
192
198
205 void PrepareForSetupLinearSystem(Vec currentPdeSolution);
206
214 void SetOutputDirectory(std::string outputDirectory, bool clearDirectory=false);
215
221 void SetSamplingTimeStep(double samplingTimeStep);
222
228
235 void WriteVtkResultsToFile(Vec solution, unsigned numTimeStepsElapsed);
236
244};
245
247// Implementation
249
250template<unsigned ELEMENT_DIM, unsigned SPACE_DIM, unsigned PROBLEM_DIM>
252 c_vector<double, ELEMENT_DIM+1>& rPhi,
253 c_matrix<double, SPACE_DIM, ELEMENT_DIM+1>& rGradPhi,
255 c_vector<double,PROBLEM_DIM>& rU,
256 c_matrix<double, PROBLEM_DIM, SPACE_DIM>& rGradU,
258{
259 double timestep_inverse = PdeSimulationTime::GetPdeTimeStepInverse();
260 c_matrix<double, PROBLEM_DIM*(ELEMENT_DIM+1), PROBLEM_DIM*(ELEMENT_DIM+1)> matrix_term = zero_matrix<double>(PROBLEM_DIM*(ELEMENT_DIM+1), PROBLEM_DIM*(ELEMENT_DIM+1));
261
262 // Loop over PDEs and populate matrix_term
263 for (unsigned pde_index=0; pde_index<PROBLEM_DIM; pde_index++)
264 {
265 double this_dudt_coefficient = mpPdeSystem->ComputeDuDtCoefficientFunction(rX, pde_index);
266 c_matrix<double, SPACE_DIM, SPACE_DIM> this_pde_diffusion_term = mpPdeSystem->ComputeDiffusionTerm(rX, pde_index, pElement);
267 c_matrix<double, 1*(ELEMENT_DIM+1), 1*(ELEMENT_DIM+1)> this_stiffness_matrix =
268 prod(trans(rGradPhi), c_matrix<double, SPACE_DIM, ELEMENT_DIM+1>(prod(this_pde_diffusion_term, rGradPhi)) )
269 + timestep_inverse * this_dudt_coefficient * outer_prod(rPhi, rPhi);
270
271 for (unsigned i=0; i<ELEMENT_DIM+1; i++)
272 {
273 for (unsigned j=0; j<ELEMENT_DIM+1; j++)
274 {
275 matrix_term(i*PROBLEM_DIM + pde_index, j*PROBLEM_DIM + pde_index) = this_stiffness_matrix(i,j);
276 }
277 }
278 }
279 return matrix_term;
280}
281
282template<unsigned ELEMENT_DIM, unsigned SPACE_DIM, unsigned PROBLEM_DIM>
284 c_vector<double, ELEMENT_DIM+1>& rPhi,
285 c_matrix<double, SPACE_DIM, ELEMENT_DIM+1>& rGradPhi,
287 c_vector<double,PROBLEM_DIM>& rU,
288 c_matrix<double,PROBLEM_DIM,SPACE_DIM>& rGradU,
290{
291 double timestep_inverse = PdeSimulationTime::GetPdeTimeStepInverse();
292 c_vector<double, PROBLEM_DIM*(ELEMENT_DIM+1)> vector_term;
293 vector_term = zero_vector<double>(PROBLEM_DIM*(ELEMENT_DIM+1));
294
295 // Loop over PDEs and populate vector_term
296 for (unsigned pde_index=0; pde_index<PROBLEM_DIM; pde_index++)
297 {
298 double this_dudt_coefficient = mpPdeSystem->ComputeDuDtCoefficientFunction(rX, pde_index);
299 double this_source_term = mpPdeSystem->ComputeSourceTerm(rX, rU, mInterpolatedOdeStateVariables, pde_index);
300 c_vector<double, ELEMENT_DIM+1> this_vector_term;
301 this_vector_term = (this_source_term + timestep_inverse*this_dudt_coefficient*rU(pde_index))* rPhi;
302
303 for (unsigned i=0; i<ELEMENT_DIM+1; i++)
304 {
305 vector_term(i*PROBLEM_DIM + pde_index) = this_vector_term(i);
306 }
307 }
308
309 return vector_term;
310}
311
312template<unsigned ELEMENT_DIM, unsigned SPACE_DIM, unsigned PROBLEM_DIM>
314{
315 mInterpolatedOdeStateVariables.clear();
316
317 if (mOdeSystemsPresent)
318 {
319 unsigned num_state_variables = mOdeSystemsAtNodes[0]->GetNumberOfStateVariables();
320 mInterpolatedOdeStateVariables.resize(num_state_variables, 0.0);
321 }
322}
323
324template<unsigned ELEMENT_DIM, unsigned SPACE_DIM, unsigned PROBLEM_DIM>
326{
327 if (mOdeSystemsPresent)
328 {
329 unsigned num_state_variables = mOdeSystemsAtNodes[0]->GetNumberOfStateVariables();
330
331 for (unsigned i=0; i<num_state_variables; i++)
332 {
333 mInterpolatedOdeStateVariables[i] += phiI * mOdeSystemsAtNodes[pNode->GetIndex()]->rGetStateVariables()[i];
334 }
335 }
336}
337
338template<unsigned ELEMENT_DIM, unsigned SPACE_DIM, unsigned PROBLEM_DIM>
340{
341 if (this->mpLinearSystem == NULL)
342 {
343 unsigned preallocation = mpMesh->CalculateMaximumContainingElementsPerProcess() + ELEMENT_DIM;
344 if (ELEMENT_DIM > 1)
345 {
346 // Highest connectivity is closed
347 preallocation--;
348 }
349 preallocation *= PROBLEM_DIM;
350
351 /*
352 * Use the current solution (ie the initial solution) as the
353 * template in the alternative constructor of LinearSystem.
354 * This is to avoid problems with VecScatter.
355 */
356 this->mpLinearSystem = new LinearSystem(initialSolution, preallocation);
357 }
358
359 assert(this->mpLinearSystem);
360 this->mpLinearSystem->SetMatrixIsSymmetric(true);
361 this->mpLinearSystem->SetKspType("cg");
362}
363
364template<unsigned ELEMENT_DIM, unsigned SPACE_DIM, unsigned PROBLEM_DIM>
366{
367 this->SetupGivenLinearSystem(currentSolution, computeMatrix, this->mpLinearSystem);
368}
369
370template<unsigned ELEMENT_DIM, unsigned SPACE_DIM, unsigned PROBLEM_DIM>
375 std::vector<AbstractOdeSystemForCoupledPdeSystem*> odeSystemsAtNodes,
376 boost::shared_ptr<AbstractIvpOdeSolver> pOdeSolver)
377 : AbstractAssemblerSolverHybrid<ELEMENT_DIM, SPACE_DIM, PROBLEM_DIM, NORMAL>(pMesh, pBoundaryConditions),
378 AbstractDynamicLinearPdeSolver<ELEMENT_DIM, SPACE_DIM, PROBLEM_DIM>(pMesh),
379 mpMesh(pMesh),
380 mpPdeSystem(pPdeSystem),
381 mOdeSystemsAtNodes(odeSystemsAtNodes),
382 mpOdeSolver(pOdeSolver),
383 mSamplingTimeStep(DOUBLE_UNSET),
384 mOdeSystemsPresent(false),
385 mClearOutputDirectory(false)
386{
387 this->mpBoundaryConditions = pBoundaryConditions;
388
389 /*
390 * If any ODE systems are passed in to the constructor, then we aren't just
391 * solving a coupled PDE system, in which case the number of ODE system objects
392 * must match the number of nodes in the finite element mesh.
393 */
394 if (!mOdeSystemsAtNodes.empty())
395 {
396 mOdeSystemsPresent = true;
397 assert(mOdeSystemsAtNodes.size() == mpMesh->GetNumNodes());
398
399 /*
400 * In this case, if an ODE solver is not explicitly passed into the
401 * constructor, then we create a default solver.
402 */
403 if (!mpOdeSolver)
404 {
405#ifdef CHASTE_CVODE
406 mpOdeSolver.reset(new CvodeAdaptor);
407#else
408 mpOdeSolver.reset(new BackwardEulerIvpOdeSolver(mOdeSystemsAtNodes[0]->GetNumberOfStateVariables()));
409#endif //CHASTE_CVODE
410 }
411 }
412}
413
414template<unsigned ELEMENT_DIM, unsigned SPACE_DIM, unsigned PROBLEM_DIM>
416{
417 if (mOdeSystemsPresent)
418 {
419 for (unsigned i=0; i<mOdeSystemsAtNodes.size(); i++)
420 {
421 delete mOdeSystemsAtNodes[i];
422 }
423 }
424}
425
426template<unsigned ELEMENT_DIM, unsigned SPACE_DIM, unsigned PROBLEM_DIM>
428{
429 if (mOdeSystemsPresent)
430 {
431 double time = PdeSimulationTime::GetTime();
432 double next_time = PdeSimulationTime::GetNextTime();
434
435 ReplicatableVector soln_repl(currentPdeSolution);
436 std::vector<double> current_soln_this_node(PROBLEM_DIM);
437
438 // Loop over nodes
439 for (unsigned node_index=0; node_index<mpMesh->GetNumNodes(); node_index++)
440 {
441 // Store the current solution to the PDE system at this node
442 for (unsigned pde_index=0; pde_index<PROBLEM_DIM; pde_index++)
443 {
444 double current_soln_this_pde_this_node = soln_repl[PROBLEM_DIM*node_index + pde_index];
445 current_soln_this_node[pde_index] = current_soln_this_pde_this_node;
446 }
447
448 // Pass it into the ODE system at this node
449 mOdeSystemsAtNodes[node_index]->SetPdeSolution(current_soln_this_node);
450
451 // Solve ODE system at this node
452 mpOdeSolver->SolveAndUpdateStateVariable(mOdeSystemsAtNodes[node_index], time, next_time, dt);
453 }
454 }
455}
456
457template<unsigned ELEMENT_DIM, unsigned SPACE_DIM, unsigned PROBLEM_DIM>
459{
460 mClearOutputDirectory = clearDirectory;
461 this->mOutputDirectory = outputDirectory;
462}
463
464template<unsigned ELEMENT_DIM, unsigned SPACE_DIM, unsigned PROBLEM_DIM>
466{
467 assert(samplingTimeStep >= this->mIdealTimeStep);
468 mSamplingTimeStep = samplingTimeStep;
469}
470
471template<unsigned ELEMENT_DIM, unsigned SPACE_DIM, unsigned PROBLEM_DIM>
473{
474 // A number of methods must have been called prior to this method
475 if (this->mOutputDirectory == "")
476 {
477 EXCEPTION("SetOutputDirectory() must be called prior to SolveAndWriteResultsToFile()");
478 }
479 if (this->mTimesSet == false)
480 {
481 EXCEPTION("SetTimes() must be called prior to SolveAndWriteResultsToFile()");
482 }
483 if (this->mIdealTimeStep <= 0.0)
484 {
485 EXCEPTION("SetTimeStep() must be called prior to SolveAndWriteResultsToFile()");
486 }
487 if (mSamplingTimeStep == DOUBLE_UNSET)
488 {
489 EXCEPTION("SetSamplingTimeStep() must be called prior to SolveAndWriteResultsToFile()");
490 }
491 if (!this->mInitialCondition)
492 {
493 EXCEPTION("SetInitialCondition() must be called prior to SolveAndWriteResultsToFile()");
494 }
495
496#ifdef CHASTE_VTK
497 // Create a .pvd output file
498 OutputFileHandler output_file_handler(this->mOutputDirectory, mClearOutputDirectory);
499 mpVtkMetaFile = output_file_handler.OpenOutputFile("results.pvd");
500 *mpVtkMetaFile << "<?xml version=\"1.0\"?>\n";
501 *mpVtkMetaFile << "<VTKFile type=\"Collection\" version=\"0.1\" byte_order=\"LittleEndian\" compressor=\"vtkZLibDataCompressor\">\n";
502 *mpVtkMetaFile << " <Collection>\n";
503
504 // Write initial condition to VTK
505 Vec initial_condition = this->mInitialCondition;
506 WriteVtkResultsToFile(initial_condition, 0);
507
508 // The helper class TimeStepper deals with issues such as small final timesteps so we don't have to
509 TimeStepper stepper(this->mTstart, this->mTend, mSamplingTimeStep);
510
511 // Main time loop
512 while (!stepper.IsTimeAtEnd())
513 {
514 // Reset start and end times
515 this->SetTimes(stepper.GetTime(), stepper.GetNextTime());
516
517 // Solve the system up to the new end time
518 Vec soln = this->Solve();
519
520 // Reset the initial condition for the next timestep
521 if (this->mInitialCondition != initial_condition)
522 {
523 PetscTools::Destroy(this->mInitialCondition);
524 }
525 this->mInitialCondition = soln;
526
527 // Move forward in time
528 stepper.AdvanceOneTimeStep();
529
530 // Write solution to VTK
531 WriteVtkResultsToFile(soln, stepper.GetTotalTimeStepsTaken());
532 }
533
534 // Restore saved initial condition to avoid user confusion!
535 if (this->mInitialCondition != initial_condition)
536 {
537 PetscTools::Destroy(this->mInitialCondition);
538 }
539 this->mInitialCondition = initial_condition;
540
541 // Close .pvd output file
542 *mpVtkMetaFile << " </Collection>\n";
543 *mpVtkMetaFile << "</VTKFile>\n";
544 mpVtkMetaFile->close();
545#else //CHASTE_VTK
546// LCOV_EXCL_START // We only test this in weekly builds
547 WARNING("VTK is not installed and is required for this functionality");
548// LCOV_EXCL_STOP
549#endif //CHASTE_VTK
550}
551
552template<unsigned ELEMENT_DIM, unsigned SPACE_DIM, unsigned PROBLEM_DIM>
554{
555#ifdef CHASTE_VTK
556
557 // Create a new VTK file for this time step
558 std::stringstream time;
559 time << numTimeStepsElapsed;
560 VtkMeshWriter<ELEMENT_DIM, SPACE_DIM> mesh_writer(this->mOutputDirectory, "results_"+time.str(), false);
561
562 /*
563 * We first loop over PDEs. For each PDE we store the solution
564 * at each node in a vector, then pass this vector to the mesh
565 * writer.
566 */
567 ReplicatableVector solution_repl(solution);
568 unsigned num_nodes = mpMesh->GetNumNodes();
569 for (unsigned pde_index=0; pde_index<PROBLEM_DIM; pde_index++)
570 {
571 // Store the solution of this PDE at each node
572 std::vector<double> pde_index_data;
573 pde_index_data.resize(num_nodes, 0.0);
574 for (unsigned node_index=0; node_index<num_nodes; node_index++)
575 {
576 pde_index_data[node_index] = solution_repl[PROBLEM_DIM*node_index + pde_index];
577 }
578
579 // Add this data to the mesh writer
580 std::stringstream data_name;
581 data_name << "PDE variable " << pde_index;
582 mesh_writer.AddPointData(data_name.str(), pde_index_data);
583 }
584
585 if (mOdeSystemsPresent)
586 {
587 /*
588 * We cannot loop over ODEs like PDEs, since the solutions are not
589 * stored in one place. Therefore we build up a large 'vector of
590 * vectors', then pass each component of this vector to the mesh
591 * writer.
592 */
593 std::vector<std::vector<double> > ode_data;
594 unsigned num_odes = mOdeSystemsAtNodes[0]->rGetStateVariables().size();
595 ode_data.resize(num_odes);
596 for (unsigned ode_index=0; ode_index<num_odes; ode_index++)
597 {
598 ode_data[ode_index].resize(num_nodes, 0.0);
599 }
600
601 for (unsigned node_index=0; node_index<num_nodes; node_index++)
602 {
603 std::vector<double> all_odes_this_node = mOdeSystemsAtNodes[node_index]->rGetStateVariables();
604 for (unsigned i=0; i<num_odes; i++)
605 {
606 ode_data[i][node_index] = all_odes_this_node[i];
607 }
608 }
609
610 for (unsigned ode_index=0; ode_index<num_odes; ode_index++)
611 {
612 std::vector<double> ode_index_data = ode_data[ode_index];
613
614 // Add this data to the mesh writer
615 std::stringstream data_name;
616 data_name << "ODE variable " << ode_index;
617 mesh_writer.AddPointData(data_name.str(), ode_index_data);
618 }
619 }
620
621 mesh_writer.WriteFilesUsingMesh(*mpMesh);
622 *mpVtkMetaFile << " <DataSet timestep=\"";
623 *mpVtkMetaFile << numTimeStepsElapsed;
624 *mpVtkMetaFile << "\" group=\"\" part=\"0\" file=\"results_";
625 *mpVtkMetaFile << numTimeStepsElapsed;
626 *mpVtkMetaFile << ".vtu\"/>\n";
627#endif // CHASTE_VTK
628}
629
630template<unsigned ELEMENT_DIM, unsigned SPACE_DIM, unsigned PROBLEM_DIM>
635
636#endif /*LINEARPARABOLICPDESYSTEMWITHCOUPLEDODESYSTEMSOLVER_HPP_*/
const double DOUBLE_UNSET
Definition Exception.hpp:57
#define EXCEPTION(message)
BoundaryConditionsContainer< ELEMENT_DIM, SPACE_DIM, PROBLEM_DIM > * mpBoundaryConditions
virtual double ComputeSourceTerm(const ChastePoint< SPACE_DIM > &rX, c_vector< double, PROBLEM_DIM > &rU, std::vector< double > &rOdeSolution, unsigned pdeIndex)=0
virtual c_matrix< double, SPACE_DIM, SPACE_DIM > ComputeDiffusionTerm(const ChastePoint< SPACE_DIM > &rX, unsigned pdeIndex, Element< ELEMENT_DIM, SPACE_DIM > *pElement=NULL)=0
virtual double ComputeDuDtCoefficientFunction(const ChastePoint< SPACE_DIM > &rX, unsigned pdeIndex)=0
virtual unsigned GetNumNodes() const
unsigned CalculateMaximumContainingElementsPerProcess() const
LinearParabolicPdeSystemWithCoupledOdeSystemSolver(TetrahedralMesh< ELEMENT_DIM, SPACE_DIM > *pMesh, AbstractLinearParabolicPdeSystemForCoupledOdeSystem< ELEMENT_DIM, SPACE_DIM, PROBLEM_DIM > *pPdeSystem, BoundaryConditionsContainer< ELEMENT_DIM, SPACE_DIM, PROBLEM_DIM > *pBoundaryConditions, std::vector< AbstractOdeSystemForCoupledPdeSystem * > odeSystemsAtNodes=std::vector< AbstractOdeSystemForCoupledPdeSystem * >(), boost::shared_ptr< AbstractIvpOdeSolver > pOdeSolver=boost::shared_ptr< AbstractIvpOdeSolver >())
void SetOutputDirectory(std::string outputDirectory, bool clearDirectory=false)
c_matrix< double, PROBLEM_DIM *(ELEMENT_DIM+1), PROBLEM_DIM *(ELEMENT_DIM+1)> ComputeMatrixTerm(c_vector< double, ELEMENT_DIM+1 > &rPhi, c_matrix< double, SPACE_DIM, ELEMENT_DIM+1 > &rGradPhi, ChastePoint< SPACE_DIM > &rX, c_vector< double, PROBLEM_DIM > &rU, c_matrix< double, PROBLEM_DIM, SPACE_DIM > &rGradU, Element< ELEMENT_DIM, SPACE_DIM > *pElement)
c_vector< double, PROBLEM_DIM *(ELEMENT_DIM+1)> ComputeVectorTerm(c_vector< double, ELEMENT_DIM+1 > &rPhi, c_matrix< double, SPACE_DIM, ELEMENT_DIM+1 > &rGradPhi, ChastePoint< SPACE_DIM > &rX, c_vector< double, PROBLEM_DIM > &rU, c_matrix< double, PROBLEM_DIM, SPACE_DIM > &rGradU, Element< ELEMENT_DIM, SPACE_DIM > *pElement)
AbstractLinearParabolicPdeSystemForCoupledOdeSystem< ELEMENT_DIM, SPACE_DIM, PROBLEM_DIM > * mpPdeSystem
Definition Node.hpp:59
unsigned GetIndex() const
Definition Node.cpp:158
out_stream OpenOutputFile(const std::string &rFileName, std::ios_base::openmode mode=std::ios::out|std::ios::trunc) const
static double GetPdeTimeStep()
static double GetPdeTimeStepInverse()
static double GetTime()
static double GetNextTime()
static void Destroy(Vec &rVec)
bool IsTimeAtEnd() const
unsigned GetTotalTimeStepsTaken() const
double GetTime() const
void AdvanceOneTimeStep()
double GetNextTime() const
void AddPointData(std::string name, std::vector< double > data)
void WriteFilesUsingMesh(AbstractTetrahedralMesh< ELEMENT_DIM, SPACE_DIM > &rMesh, bool keepOriginalElementIndexing=true)