Chaste Commit::675f9facbe008c5eacb9006feaeb6423206579ea
DistributedTetrahedralMesh.cpp
1/*
2
3Copyright (c) 2005-2025, University of Oxford.
4All rights reserved.
5
6University of Oxford means the Chancellor, Masters and Scholars of the
7University of Oxford, having an administrative office at Wellington
8Square, Oxford OX1 2JD, UK.
9
10This file is part of Chaste.
11
12Redistribution and use in source and binary forms, with or without
13modification, are permitted provided that the following conditions are met:
14 * Redistributions of source code must retain the above copyright notice,
15 this list of conditions and the following disclaimer.
16 * Redistributions in binary form must reproduce the above copyright notice,
17 this list of conditions and the following disclaimer in the documentation
18 and/or other materials provided with the distribution.
19 * Neither the name of the University of Oxford nor the names of its
20 contributors may be used to endorse or promote products derived from this
21 software without specific prior written permission.
22
23THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
29GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33
34*/
35
36#include "DistributedTetrahedralMesh.hpp"
37
38#include <cassert>
39#include <sstream>
40#include <string>
41#include <iterator>
42#include <algorithm>
43#include <boost/scoped_array.hpp>
44
45#include "Exception.hpp"
46#include "Element.hpp"
47#include "BoundaryElement.hpp"
48
49#include "PetscTools.hpp"
50#include "PetscMatTools.hpp"
51#include "DistributedVectorFactory.hpp"
52#include "OutputFileHandler.hpp"
53#include "NodePartitioner.hpp"
54
55#include "RandomNumberGenerator.hpp"
56
57#include "Timer.hpp"
58#include "TetrahedralMesh.hpp"
59#include "Warnings.hpp"
60
61#include "petscao.h"
62
64// IMPLEMENTATION
66
67template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
69 :
70 mTotalNumElements(0u),
71 mTotalNumBoundaryElements(0u),
72 mTotalNumNodes(0u),
73 mpSpaceRegion(nullptr),
74 mPartitioning(partitioningMethod)
75{
76 if (ELEMENT_DIM == 1 && (partitioningMethod != DistributedTetrahedralMeshPartitionType::GEOMETRIC))
77 {
78 //No partition is possible - revert to DUMB
79 mPartitioning = DistributedTetrahedralMeshPartitionType::DUMB;
80 }
81}
82
83template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
85{
86 for (unsigned i=0; i<this->mHaloNodes.size(); i++)
87 {
88 delete this->mHaloNodes[i];
89 }
90}
91
92template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
98
99template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
102 std::set<unsigned>& rNodesOwned,
103 std::set<unsigned>& rHaloNodesOwned,
104 std::set<unsigned>& rElementsOwned,
105 std::vector<unsigned>& rProcessorsOffset)
106{
107 if (mPartitioning == DistributedTetrahedralMeshPartitionType::METIS_LIBRARY)
108 {
109 EXCEPTION("METIS partitioning is deprecated. Please use PARMETIS_LIBRARY for parMETIS (or the parMETIS interface to PT-Scotch).");
110 }
111 if (mPartitioning == DistributedTetrahedralMeshPartitionType::PETSC_MAT_PARTITION && !PetscTools::HasParMetis())
112 {
113 // The following warning can only be reproduced on machines which do not have the PETSc/parMETIS interface.
114// LCOV_EXCL_START
115 WARNING("PETSc/parMETIS partitioning requires PETSc to be configured with parMETIS as an option. Current install has PETSc and parMETIS installed independently. Switching to parMETIS");
116 mPartitioning = DistributedTetrahedralMeshPartitionType::PARMETIS_LIBRARY;
117// LCOV_EXCL_STOP
118 }
120 if (mPartitioning==DistributedTetrahedralMeshPartitionType::PARMETIS_LIBRARY && PetscTools::IsParallel())
121 {
122 /*
123 * With ParMetisLibraryNodeAndElementPartitioning we compute the element partition first
124 * and then we work out the node ownership.
125 */
126 ParMetisLibraryNodeAndElementPartitioning(rMeshReader, rElementsOwned, rNodesOwned, rHaloNodesOwned, rProcessorsOffset);
127 }
128 else
129 {
130 /*
131 * Otherwise we compute the node partition and then we work out element distribution
132 */
133 if (mPartitioning==DistributedTetrahedralMeshPartitionType::PETSC_MAT_PARTITION && PetscTools::IsParallel())
134 {
135 NodePartitioner<ELEMENT_DIM, SPACE_DIM>::PetscMatrixPartitioning(rMeshReader, this->mNodePermutation, rNodesOwned, rProcessorsOffset);
136 }
137 else if (mPartitioning==DistributedTetrahedralMeshPartitionType::GEOMETRIC && PetscTools::IsParallel())
138 {
139 if (!mpSpaceRegion)
140 {
141 EXCEPTION("Using GEOMETRIC partition for DistributedTetrahedralMesh with local regions not set. Call SetProcessRegion(ChasteCuboid)");
142 }
143 NodePartitioner<ELEMENT_DIM, SPACE_DIM>::GeometricPartitioning(rMeshReader, this->mNodePermutation, rNodesOwned, rProcessorsOffset, mpSpaceRegion);
144 }
145 else
146 {
148 }
149
150 if (rMeshReader.HasNclFile())
151 {
152 // Form a set of all the element indices we are going to own
153 // (union of the sets from the lines in the NCL file)
154 for (std::set<unsigned>::iterator iter = rNodesOwned.begin();
155 iter != rNodesOwned.end();
156 ++iter)
157 {
158 std::vector<unsigned> containing_elements = rMeshReader.GetContainingElementIndices( *iter );
159 rElementsOwned.insert( containing_elements.begin(), containing_elements.end() );
160 }
161
162 // Iterate through that set rather than mTotalNumElements (knowing that we own a least one node in each line)
163 // Then read all the data into a node_index set
164 std::set<unsigned> node_index_set;
165
166 for (std::set<unsigned>::iterator iter = rElementsOwned.begin();
167 iter != rElementsOwned.end();
168 ++iter)
169 {
170 ElementData element_data = rMeshReader.GetElementData(*iter);
171 node_index_set.insert( element_data.NodeIndices.begin(), element_data.NodeIndices.end() );
173
174 // Subtract off the rNodesOwned set to produce rHaloNodesOwned.
175 // Note that rNodesOwned is a subset of node_index_set.
176 std::set_difference(node_index_set.begin(), node_index_set.end(),
177 rNodesOwned.begin(), rNodesOwned.end(),
178 std::inserter(rHaloNodesOwned, rHaloNodesOwned.begin()));
179 }
180 else
181 {
182 for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
184 ElementData element_data = rMeshReader.GetNextElementData();
185
186 bool element_owned = false;
187 std::set<unsigned> temp_halo_nodes;
188
189 for (std::vector<unsigned>::const_iterator it = element_data.NodeIndices.begin();
190 it != element_data.NodeIndices.end();
191 ++it)
192 {
193 if (rNodesOwned.find(*it) != rNodesOwned.end())
195 element_owned = true;
196 rElementsOwned.insert(element_number);
197 }
198 else
200 temp_halo_nodes.insert(*it);
201 }
202 }
203
204 if (element_owned)
205 {
206 rHaloNodesOwned.insert(temp_halo_nodes.begin(), temp_halo_nodes.end());
207 }
208 }
209 }
210
211 if (mPartitioning==DistributedTetrahedralMeshPartitionType::PETSC_MAT_PARTITION && PetscTools::IsParallel())
212 {
215 {
216 Timer::PrintAndReset("Element and halo node assignation");
217 }
218 }
219 }
220 rMeshReader.Reset();
221}
223template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
226{
227 std::set<unsigned> nodes_owned;
228 std::set<unsigned> halo_nodes_owned;
229 std::set<unsigned> elements_owned;
230 std::vector<unsigned> proc_offsets;//(PetscTools::GetNumProcs());
231
232 this->mMeshFileBaseName = rMeshReader.GetMeshFileBaseName();
233 mTotalNumElements = rMeshReader.GetNumElements();
234 mTotalNumBoundaryElements = rMeshReader.GetNumFaces();
235 mTotalNumNodes = rMeshReader.GetNumNodes();
237
239 Timer::Reset();
240 ComputeMeshPartitioning(rMeshReader, nodes_owned, halo_nodes_owned, elements_owned, proc_offsets);
242 //Timer::Print("partitioning");
243
244 // Reserve memory
245 this->mElements.reserve(elements_owned.size());
246 this->mNodes.reserve(nodes_owned.size());
247
248 if (rMeshReader.IsFileFormatBinary())
249 {
252 std::vector<double> coords;
253 // Binary : load only the nodes which are needed
254 for (typename AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>::NodeIterator node_it = rMeshReader.GetNodeIteratorBegin(nodes_owned);
255 node_it != rMeshReader.GetNodeIteratorEnd();
256 ++node_it)
257 {
258 // Loop over wholly-owned nodes
259 unsigned global_node_index = node_it.GetIndex();
260 coords = *node_it;
261 RegisterNode(global_node_index);
262 Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, coords, false);
263
264// Node attributes in binary format are not yet supported, see #1730
265// for (unsigned i = 0; i < rMeshReader.GetNodeAttributes().size(); i++)
266// {
267// double attribute = rMeshReader.GetNodeAttributes()[i];
268// p_node->AddNodeAttribute(attribute);
269// }
270
271 this->mNodes.push_back(p_node);
272 }
273 for (typename AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>::NodeIterator node_it = rMeshReader.GetNodeIteratorBegin(halo_nodes_owned);
274 node_it != rMeshReader.GetNodeIteratorEnd();
275 ++node_it)
276 {
277 // Loop over halo-owned nodes
278 unsigned global_node_index = node_it.GetIndex();
279 coords = *node_it;
280 RegisterHaloNode(global_node_index);
281 mHaloNodes.push_back(new Node<SPACE_DIM>(global_node_index, coords, false));
282 }
283 }
284 else
285 {
286 // Ascii : Sequentially load all the nodes and store those owned (or halo-owned) by the process
288 for (unsigned node_index=0; node_index < mTotalNumNodes; node_index++)
290 std::vector<double> coords;
292 coords = rMeshReader.GetNextNode();
293
294 // The node is owned by the processor
295 if (nodes_owned.find(node_index) != nodes_owned.end())
296 {
297 RegisterNode(node_index);
298 Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(node_index, coords, false);
299
300 for (unsigned i = 0; i < rMeshReader.GetNodeAttributes().size(); i++)
301 {
302 double attribute = rMeshReader.GetNodeAttributes()[i];
303 p_node->AddNodeAttribute(attribute);
304 }
305
306 this->mNodes.push_back(p_node);
307 }
308
309 // The node is a halo node in this processor
310 if (halo_nodes_owned.find(node_index) != halo_nodes_owned.end())
311 {
312 RegisterHaloNode(node_index);
313 mHaloNodes.push_back(new Node<SPACE_DIM>(node_index, coords, false));
314 }
315 }
316 }
317
319 = rMeshReader.GetElementIteratorBegin(elements_owned);
320 elem_it != rMeshReader.GetElementIteratorEnd();
321 ++elem_it)
322 {
323 ElementData element_data = *elem_it;
324 unsigned global_element_index = elem_it.GetIndex();
325
326 std::vector<Node<SPACE_DIM>*> nodes;
327 for (unsigned j=0; j<ELEMENT_DIM+1; j++)
329 // Because we have populated mNodes and mHaloNodes above, we can now use this method, which should never throw
330 nodes.push_back(this->GetNodeOrHaloNode(element_data.NodeIndices[j]));
331 }
332
333 RegisterElement(global_element_index);
334 Element<ELEMENT_DIM,SPACE_DIM>* p_element = new Element<ELEMENT_DIM,SPACE_DIM>(global_element_index, nodes);
335 this->mElements.push_back(p_element);
336
337 if (rMeshReader.GetNumElementAttributes() > 0)
339 assert(rMeshReader.GetNumElementAttributes() == 1);
340 double attribute_value = element_data.AttributeValue;
341 p_element->SetAttribute(attribute_value);
342 }
343 }
344
345 // Boundary nodes and elements
346 try
348 for (unsigned face_index=0; face_index<mTotalNumBoundaryElements; face_index++)
349 {
350 ElementData face_data = rMeshReader.GetNextFaceData();
351 std::vector<unsigned> node_indices = face_data.NodeIndices;
352
353 bool own = false;
354
355 for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
357 // if I own this node
358 if (mNodesMapping.find(node_indices[node_index]) != mNodesMapping.end())
359 {
360 own = true;
361 break;
362 }
363 }
364
365 if (!own)
367 continue;
368 }
369
370 // Determine if this is a boundary face
371 //std::set<unsigned> containing_element_indices; // Elements that contain this face
372 std::vector<Node<SPACE_DIM>*> nodes;
373
374 for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
375 {
376 //because we have populated mNodes and mHaloNodes above, we can now use this method,
377 //which SHOULD never throw (but it does).
378 try
379 {
380 nodes.push_back(this->GetNodeOrHaloNode(node_indices[node_index]));
381 }
382 catch (Exception &)
383 {
384 EXCEPTION("Face does not appear in element file (Face " << face_index << " in "<<this->mMeshFileBaseName<< ")");
385 }
386 }
387
388 // This is a boundary face
389 // Ensure all its nodes are marked as boundary nodes
390 for (unsigned j=0; j<nodes.size(); j++)
391 {
392 if (!nodes[j]->IsBoundaryNode())
393 {
394 nodes[j]->SetAsBoundaryNode();
395 this->mBoundaryNodes.push_back(nodes[j]);
396 }
397 // Register the index that this boundary element will have with the node
398 nodes[j]->AddBoundaryElement(face_index);
399 }
400
401 RegisterBoundaryElement(face_index);
402 BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>* p_boundary_element = new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(face_index, nodes);
403 this->mBoundaryElements.push_back(p_boundary_element);
405 if (rMeshReader.GetNumFaceAttributes() > 0)
406 {
407 assert(rMeshReader.GetNumFaceAttributes() == 1);
408 p_boundary_element->SetAttribute(face_data.AttributeValue);
409 }
410 }
412 catch (Exception &e)
413 {
414 PetscTools::ReplicateException(true); //Bad face exception
415 throw e;
416 }
418
419 if (mPartitioning != DistributedTetrahedralMeshPartitionType::DUMB && PetscTools::IsParallel())
420 {
421 assert(this->mNodePermutation.size() != 0);
422 // If we are partitioning (and permuting) a mesh, we need to be certain that we aren't doing it twice
423 assert(rMeshReader.HasNodePermutation() == false);
424
425 // We reorder so that each process owns a contiguous set of the indices and we can then build a distributed vector factory.
426 ReorderNodes();
427
428 unsigned num_owned;
429 unsigned rank = PetscTools::GetMyRank();
431 {
432 num_owned = proc_offsets[rank+1]-proc_offsets[rank];
433 }
434 else
435 {
436 num_owned = mTotalNumNodes - proc_offsets[rank];
437 }
438
439 assert(!this->mpDistributedVectorFactory);
440 this->mpDistributedVectorFactory = new DistributedVectorFactory(this->GetNumNodes(), num_owned);
441 }
442 else
443 {
444 // Dumb or sequential partition
445 assert(this->mpDistributedVectorFactory);
446
447 if (rMeshReader.HasNodePermutation())
448 {
449 // This is probably an unarchiving operation where the original run applied a permutation to the mesh
450 // We need to re-record that the permutation has happened (so that we can archive it correctly later).
451 this->mNodePermutation = rMeshReader.rGetNodePermutation();
452 }
453 }
454 rMeshReader.Reset();
455}
456
457template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
459{
460 return this->mNodes.size();
461}
462
463template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
465{
466 return this->mHaloNodes.size();
467}
468
469template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
471{
472 return this->mElements.size();
473}
474
475template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
477{
478 return this->mBoundaryElements.size();
479}
480
481template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
483{
484 return mTotalNumNodes;
485}
486
487template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
489{
490 return mTotalNumNodes;
491}
492
493template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
495{
496 return mTotalNumElements;
497}
498
499template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
504
505template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
507{
508 return mTotalNumBoundaryElements;
509}
510
511template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
513{
514 //Make sure the output vector is empty
515 rHaloIndices.clear();
516 for (unsigned i=0; i<mHaloNodes.size(); i++)
517 {
518 rHaloIndices.push_back(mHaloNodes[i]->GetIndex());
519 }
520}
521
522template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
524{
525 if (mpSpaceRegion == nullptr)
526 {
527 EXCEPTION("Trying to get unset mpSpaceRegion");
528 }
529 return mpSpaceRegion;
530}
531
532template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
534{
535 // All the local elements are owned by the processor (obviously...)
536 //Does nothing - unlike the non-distributed version
537}
538
539template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
544
545template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
547{
548 try
549 {
551 }
552 catch(Exception&) // we don't own the element
553 {
554 return false;
555 }
556}
557
558template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
560{
561 try
562 {
564 }
565 catch(Exception&) // we don't own the face
566 {
567 return false;
568 }
569}
570
571template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
573{
574 mNodesMapping[index] = this->mNodes.size();
575}
576
577template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
579{
580 mHaloNodesMapping[index] = mHaloNodes.size();
581}
582
583template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
585{
586 mElementsMapping[index] = this->mElements.size();
587}
588
589template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
591{
592 mBoundaryElementsMapping[index] = this->mBoundaryElements.size();
593}
594
595template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
597{
598 std::map<unsigned, unsigned>::const_iterator node_position = mNodesMapping.find(index);
599
600 if (node_position == mNodesMapping.end())
601 {
602 EXCEPTION("Requested node " << index << " does not belong to processor " << PetscTools::GetMyRank());
603 }
604 return node_position->second;
605}
606
607//template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
608//unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveHaloNodeMapping(unsigned index)
609//{
610// assert(mHaloNodesMapping.find(index) != mHaloNodesMapping.end());
611// return mHaloNodesMapping[index];
612//}
613
614template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
616{
617 std::map<unsigned, unsigned>::const_iterator element_position = mElementsMapping.find(index);
618
619 if (element_position == mElementsMapping.end())
620 {
621 EXCEPTION("Requested element " << index << " does not belong to processor " << PetscTools::GetMyRank());
622 }
623
624 return element_position->second;
625}
626
627template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
629{
630 std::map<unsigned, unsigned>::const_iterator boundary_element_position = mBoundaryElementsMapping.find(index);
631
632 if (boundary_element_position == mBoundaryElementsMapping.end())
633 {
634 EXCEPTION("Requested boundary element " << index << " does not belong to processor " << PetscTools::GetMyRank());
635 }
636
637 return boundary_element_position->second;
638}
639
640template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
642{
643 std::map<unsigned, unsigned>::const_iterator node_position;
644 // First search the halo (expected to be a smaller map so quicker)
645 if ((node_position=mHaloNodesMapping.find(index)) != mHaloNodesMapping.end())
646 {
647 return mHaloNodes[node_position->second];
648 }
649 // Next search the owned node
650 if ((node_position=mNodesMapping.find(index)) != mNodesMapping.end())
651 {
652 //Found an owned node
653 return this->mNodes[node_position->second];
654 }
655 // Not here
656 EXCEPTION("Requested node/halo " << index << " does not belong to processor " << PetscTools::GetMyRank());
657}
658
659template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
661{
662 assert(PetscTools::IsParallel());
663
664 // Need to rebuild global-local maps
665 mNodesMapping.clear();
666 mHaloNodesMapping.clear();
667
668 // Update indices
669 for (unsigned index=0; index<this->mNodes.size(); index++)
670 {
671 unsigned old_index = this->mNodes[index]->GetIndex();
672 unsigned new_index = this->mNodePermutation[old_index];
673
674 this->mNodes[index]->SetIndex(new_index);
675 mNodesMapping[new_index] = index;
676 }
677
678 for (unsigned index=0; index<mHaloNodes.size(); index++)
679 {
680 unsigned old_index = mHaloNodes[index]->GetIndex();
681 unsigned new_index = this->mNodePermutation[old_index];
682
683 mHaloNodes[index]->SetIndex(new_index);
684 mHaloNodesMapping[new_index] = index;
685 }
686}
687
688template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
690{
691 assert(ELEMENT_DIM == 1); // LCOV_EXCL_LINE
692
693 //Check that there are enough nodes to make the parallelisation worthwhile
694 if (width==0)
695 {
696 EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
697 }
698
699 // Hook to pick up when we are using a geometric partition.
700 if (mPartitioning == DistributedTetrahedralMeshPartitionType::GEOMETRIC)
701 {
702 if (!mpSpaceRegion)
703 {
704 EXCEPTION("Space region not set for GEOMETRIC partition of DistributedTetrahedralMesh");
705 }
706
707 // Write a serial file, the load on distributed processors.
709 {
710 TrianglesMeshWriter<ELEMENT_DIM,SPACE_DIM> mesh_writer("", "temp_linear_mesh");
712 base_mesh.ConstructLinearMesh(width);
713 mesh_writer.WriteFilesUsingMesh(base_mesh);
714 }
716
717 OutputFileHandler output_handler("", false);
718
719 std::string output_dir = output_handler.GetOutputDirectoryFullPath();
720 TrianglesMeshReader<ELEMENT_DIM,SPACE_DIM> mesh_reader(output_dir+"temp_linear_mesh");
721
722 this->ConstructFromMeshReader(mesh_reader);
723 }
724 else // use a default partition.
725 {
726 //Use dumb partition so that archiving doesn't permute anything
727 mPartitioning=DistributedTetrahedralMeshPartitionType::DUMB;
728 mTotalNumNodes=width+1;
729 mTotalNumBoundaryElements=2u;
730 mTotalNumElements=width;
731
732 //Use DistributedVectorFactory to make a dumb partition of the nodes
733 assert(!this->mpDistributedVectorFactory);
734 this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes);
735 if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
736 {
737 // It's a short mesh and this process owns no nodes.
738 // This return cannot be covered by regular testing, but is covered by the Nightly -np 3 builder
739 return; //LCOV_EXCL_LINE
740 }
741
742 /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
743 * higher numbered process may have dropped out of this construction altogether
744 * (because is has no local ownership)
745 */
746 bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
747
748 unsigned lo_node=this->mpDistributedVectorFactory->GetLow();
749 unsigned hi_node=this->mpDistributedVectorFactory->GetHigh();
751 {
752 //Allow for a halo node
753 lo_node--;
754 }
755 if (!am_top_most)
756 {
757 //Allow for a halo node
758 hi_node++;
759 }
760 Node<SPACE_DIM>* p_old_node=nullptr;
761 for (unsigned node_index=lo_node; node_index<hi_node; node_index++)
762 {
763 // create node or halo-node
764 Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(node_index, node_index==0 || node_index==width, node_index);
765 if (node_index<this->mpDistributedVectorFactory->GetLow() ||
766 node_index==this->mpDistributedVectorFactory->GetHigh() )
767 {
768 //Beyond left or right it's a halo node
769 RegisterHaloNode(node_index);
770 mHaloNodes.push_back(p_node);
771 }
772 else
773 {
774 RegisterNode(node_index);
775 this->mNodes.push_back(p_node); // create node
776
777 //A boundary face has to be wholely owned by the process
778 //Since, when ELEMENT_DIM>1 we have *at least* boundary node as a non-halo
779 if (node_index==0) // create left boundary node and boundary element
780 {
781 this->mBoundaryNodes.push_back(p_node);
782 RegisterBoundaryElement(0);
783 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(0, p_node) );
784 }
785 if (node_index==width) // create right boundary node and boundary element
786 {
787 this->mBoundaryNodes.push_back(p_node);
788 RegisterBoundaryElement(1);
789 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(1, p_node) );
790 }
791 }
792 if (node_index>lo_node) // create element
793 {
794 std::vector<Node<SPACE_DIM>*> nodes;
795 nodes.push_back(p_old_node);
796 nodes.push_back(p_node);
797 RegisterElement(node_index-1);
798 this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(node_index-1, nodes) );
799 }
800 //Keep track of the node which we've just created
801 p_old_node=p_node;
802 }
803 }
804}
805
806template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
808{
809 assert(SPACE_DIM == 2); // LCOV_EXCL_LINE
810 assert(ELEMENT_DIM == 2); // LCOV_EXCL_LINE
811 //Check that there are enough nodes to make the parallelisation worthwhile
812 if (height==0)
813 {
814 EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
815 }
816
817 // Hook to pick up when we are using a geometric partition.
818 if (mPartitioning == DistributedTetrahedralMeshPartitionType::GEOMETRIC)
819 {
820 if (!mpSpaceRegion)
821 {
822 EXCEPTION("Space region not set for GEOMETRIC partition of DistributedTetrahedralMesh");
823 }
824
825 // Write a serial file, the load on distributed processors.
827 {
828 TrianglesMeshWriter<ELEMENT_DIM,SPACE_DIM> mesh_writer("", "temp_rectangular_mesh");
830 base_mesh.ConstructRectangularMesh(width, height);
831 mesh_writer.WriteFilesUsingMesh(base_mesh);
832 }
834
835 OutputFileHandler output_handler("", false);
836
837 std::string output_dir = output_handler.GetOutputDirectoryFullPath();
838 TrianglesMeshReader<ELEMENT_DIM,SPACE_DIM> mesh_reader(output_dir+"temp_rectangular_mesh");
839
840 this->ConstructFromMeshReader(mesh_reader);
841 }
842 else
843 {
844 //Use dumb partition so that archiving doesn't permute anything
845 mPartitioning=DistributedTetrahedralMeshPartitionType::DUMB;
846
847 mTotalNumNodes=(width+1)*(height+1);
848 mTotalNumBoundaryElements=(width+height)*2;
849 mTotalNumElements=width*height*2;
850
851 //Use DistributedVectorFactory to make a dumb partition of space
852 DistributedVectorFactory y_partition(height+1);
853 unsigned lo_y = y_partition.GetLow();
854 unsigned hi_y = y_partition.GetHigh();
855 //Dumb partition of nodes has to be such that each process gets complete slices
856 assert(!this->mpDistributedVectorFactory);
857 this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes, (width+1)*y_partition.GetLocalOwnership());
858 if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
859 {
860 // It's a short mesh and this process owns no nodes.
861 // This return cannot be covered by regular testing, but is covered by the Nightly -np 3 builder
862 return; //LCOV_EXCL_LINE
863 }
864
865 /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
866 * higher numbered process may have dropped out of this construction altogether
867 * (because is has no local ownership)
868 */
869 bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
870
871
873 {
874 //Allow for a halo node
875 lo_y--;
876 }
877 if (!am_top_most)
878 {
879 //Allow for a halo node
880 hi_y++;
881 }
882
883 //Construct the nodes
884 for (unsigned j=lo_y; j<hi_y; j++)
885 {
886 for (unsigned i=0; i<width+1; i++)
887 {
888 bool is_boundary=false;
889 if (i==0 || j==0 || i==width || j==height)
890 {
891 is_boundary=true;
892 }
893 unsigned global_node_index=((width+1)*(j) + i); //Verified from sequential
894 Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, is_boundary, i, j);
895 if (j<y_partition.GetLow() || j==y_partition.GetHigh() )
896 {
897 //Beyond left or right it's a halo node
898 RegisterHaloNode(global_node_index);
899 mHaloNodes.push_back(p_node);
900 }
901 else
902 {
903 RegisterNode(global_node_index);
904 this->mNodes.push_back(p_node);
905 }
906 if (is_boundary)
907 {
908 this->mBoundaryNodes.push_back(p_node);
909 }
910 }
911 }
912
913 //Construct the boundary elements
914 unsigned belem_index;
915 //Top
916 if (am_top_most)
917 {
918 for (unsigned i=0; i<width; i++)
919 {
920 std::vector<Node<SPACE_DIM>*> nodes;
921 nodes.push_back(GetNodeOrHaloNode( height*(width+1)+i+1 ));
922 nodes.push_back(GetNodeOrHaloNode( height*(width+1)+i ));
923 belem_index=i;
924 RegisterBoundaryElement(belem_index);
925 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
926 }
927 }
928
929 //Right
930 for (unsigned j=lo_y+1; j<hi_y; j++)
931 {
932 std::vector<Node<SPACE_DIM>*> nodes;
933 nodes.push_back(GetNodeOrHaloNode( (width+1)*j-1 ));
934 nodes.push_back(GetNodeOrHaloNode( (width+1)*(j+1)-1 ));
935 belem_index=width+j-1;
936 RegisterBoundaryElement(belem_index);
937 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
938 }
939
940 //Bottom
942 {
943 for (unsigned i=0; i<width; i++)
944 {
945 std::vector<Node<SPACE_DIM>*> nodes;
946 nodes.push_back(GetNodeOrHaloNode( i ));
947 nodes.push_back(GetNodeOrHaloNode( i+1 ));
948 belem_index=width+height+i;
949 RegisterBoundaryElement(belem_index);
950 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
951 }
952 }
953
954 //Left
955 for (unsigned j=lo_y; j<hi_y-1; j++)
956 {
957 std::vector<Node<SPACE_DIM>*> nodes;
958 nodes.push_back(GetNodeOrHaloNode( (width+1)*(j+1) ));
959 nodes.push_back(GetNodeOrHaloNode( (width+1)*(j) ));
960 belem_index=2*width+height+j;
961 RegisterBoundaryElement(belem_index);
962 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
963 }
964
965
966 //Construct the elements
967 unsigned elem_index;
968 for (unsigned j=lo_y; j<hi_y-1; j++)
969 {
970 for (unsigned i=0; i<width; i++)
971 {
972 unsigned parity=(i+(height-j))%2;//Note that parity is measured from the top-left (not bottom left) for historical reasons
973 unsigned nw=(j+1)*(width+1)+i; //ne=nw+1
974 unsigned sw=(j)*(width+1)+i; //se=sw+1
975 std::vector<Node<SPACE_DIM>*> upper_nodes;
976 upper_nodes.push_back(GetNodeOrHaloNode( nw ));
977 upper_nodes.push_back(GetNodeOrHaloNode( nw+1 ));
978 if (stagger==false || parity == 1)
979 {
980 upper_nodes.push_back(GetNodeOrHaloNode( sw+1 ));
981 }
982 else
983 {
984 upper_nodes.push_back(GetNodeOrHaloNode( sw ));
985 }
986 elem_index=2*(j*width+i);
987 RegisterElement(elem_index);
988 this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index,upper_nodes));
989 std::vector<Node<SPACE_DIM>*> lower_nodes;
990 lower_nodes.push_back(GetNodeOrHaloNode( sw+1 ));
991 lower_nodes.push_back(GetNodeOrHaloNode( sw ));
992 if (stagger==false ||parity == 1)
993 {
994 lower_nodes.push_back(GetNodeOrHaloNode( nw ));
995 }
996 else
997 {
998 lower_nodes.push_back(GetNodeOrHaloNode( nw+1 ));
999 }
1000 elem_index++;
1001 RegisterElement(elem_index);
1002 this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index,lower_nodes));
1003 }
1004 }
1005 }
1006}
1007
1008
1009template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1011 unsigned height,
1012 unsigned depth)
1013{
1014 assert(SPACE_DIM == 3); // LCOV_EXCL_LINE
1015 assert(ELEMENT_DIM == 3); // LCOV_EXCL_LINE
1016 //Check that there are enough nodes to make the parallelisation worthwhile
1017 if (depth==0)
1018 {
1019 EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
1020 }
1021
1022 // Hook to pick up when we are using a geometric partition.
1023 if (mPartitioning == DistributedTetrahedralMeshPartitionType::GEOMETRIC)
1024 {
1025 if (!mpSpaceRegion)
1026 {
1027 EXCEPTION("Space region not set for GEOMETRIC partition of DistributedTetrahedralMesh");
1028 }
1029
1030 // Write a serial file, the load on distributed processors.
1032 {
1033 TrianglesMeshWriter<ELEMENT_DIM,SPACE_DIM> mesh_writer("", "temp_cuboid_mesh");
1035 base_mesh.ConstructCuboid(width, height, depth);
1036 mesh_writer.WriteFilesUsingMesh(base_mesh);
1037 }
1039
1040 OutputFileHandler output_handler("", false);
1041
1042 std::string output_dir = output_handler.GetOutputDirectoryFullPath();
1043 TrianglesMeshReader<ELEMENT_DIM,SPACE_DIM> mesh_reader(output_dir+"temp_cuboid_mesh");
1044
1045 this->ConstructFromMeshReader(mesh_reader);
1046 }
1047 else
1048 {
1049 //Use dumb partition so that archiving doesn't permute anything
1050 mPartitioning=DistributedTetrahedralMeshPartitionType::DUMB;
1051
1052 mTotalNumNodes=(width+1)*(height+1)*(depth+1);
1053 mTotalNumBoundaryElements=((width*height)+(width*depth)+(height*depth))*4;//*2 for top-bottom, *2 for tessellating each unit square
1054 mTotalNumElements=width*height*depth*6;
1055
1056 //Use DistributedVectorFactory to make a dumb partition of space
1057 DistributedVectorFactory z_partition(depth+1);
1058 unsigned lo_z = z_partition.GetLow();
1059 unsigned hi_z = z_partition.GetHigh();
1060
1061 //Dumb partition of nodes has to be such that each process gets complete slices
1062 assert(!this->mpDistributedVectorFactory);
1063 this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes, (width+1)*(height+1)*z_partition.GetLocalOwnership());
1064 if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
1065 {
1066 // It's a short mesh and this process owns no nodes.
1067 // This return cannot be covered by regular testing, but is covered by the Nightly -np 3 builder
1068 return; //LCOV_EXCL_LINE
1069 }
1070
1071 /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
1072 * higher numbered process may have dropped out of this construction altogether
1073 * (because is has no local ownership)
1074 */
1075 bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
1076
1077 if (!PetscTools::AmMaster())
1078 {
1079 //Allow for a halo node
1080 lo_z--;
1081 }
1082 if (!am_top_most)
1083 {
1084 //Allow for a halo node
1085 hi_z++;
1086 }
1087
1088 //Construct the nodes
1089 unsigned global_node_index;
1090 for (unsigned k=lo_z; k<hi_z; k++)
1091 {
1092 for (unsigned j=0; j<height+1; j++)
1093 {
1094 for (unsigned i=0; i<width+1; i++)
1095 {
1096 bool is_boundary = false;
1097 if (i==0 || j==0 || k==0 || i==width || j==height || k==depth)
1098 {
1099 is_boundary = true;
1100 }
1101 global_node_index = (k*(height+1)+j)*(width+1)+i;
1102
1103 Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, is_boundary, i, j, k);
1104
1105 if (k<z_partition.GetLow() || k==z_partition.GetHigh() )
1106 {
1107 //Beyond left or right it's a halo node
1108 RegisterHaloNode(global_node_index);
1109 mHaloNodes.push_back(p_node);
1110 }
1111 else
1112 {
1113 RegisterNode(global_node_index);
1114 this->mNodes.push_back(p_node);
1115 }
1116
1117 if (is_boundary)
1118 {
1119 this->mBoundaryNodes.push_back(p_node);
1120 }
1121 }
1122 }
1123 }
1124
1125 // Construct the elements
1126
1127 unsigned element_nodes[6][4] = {{0, 1, 5, 7}, {0, 1, 3, 7},
1128 {0, 2, 3, 7}, {0, 2, 6, 7},
1129 {0, 4, 6, 7}, {0, 4, 5, 7}};
1130 std::vector<Node<SPACE_DIM>*> tetrahedra_nodes;
1131
1132 for (unsigned k=lo_z; k<hi_z-1; k++)
1133 {
1134 unsigned belem_index = 0;
1135 if (k != 0)
1136 {
1137 // height*width squares on upper face, k layers of 2*height+2*width square aroun
1138 belem_index = 2*(height*width+k*2*(height+width));
1139 }
1140
1141 for (unsigned j=0; j<height; j++)
1142 {
1143 for (unsigned i=0; i<width; i++)
1144 {
1145 // Compute the nodes' index
1146 unsigned global_node_indices[8];
1147 unsigned local_node_index = 0;
1148
1149 for (unsigned z = 0; z < 2; z++)
1150 {
1151 for (unsigned y = 0; y < 2; y++)
1152 {
1153 for (unsigned x = 0; x < 2; x++)
1154 {
1155 global_node_indices[local_node_index] = i+x+(width+1)*(j+y+(height+1)*(k+z));
1156
1157 local_node_index++;
1158 }
1159 }
1160 }
1161
1162 for (unsigned m = 0; m < 6; m++)
1163 {
1164 // Tetrahedra #m
1165
1166 tetrahedra_nodes.clear();
1167
1168 for (unsigned n = 0; n < 4; n++)
1169 {
1170 tetrahedra_nodes.push_back(GetNodeOrHaloNode( global_node_indices[element_nodes[m][n]] ));
1171 }
1172 unsigned elem_index = 6 * ((k*height+j)*width+i)+m;
1173 RegisterElement(elem_index);
1174 this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index, tetrahedra_nodes));
1175 }
1176
1177 //Are we at a boundary?
1178 std::vector<Node<SPACE_DIM>*> triangle_nodes;
1179
1180 if (i == 0) //low face at x==0
1181 {
1182 triangle_nodes.clear();
1183 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1184 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
1185 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
1186 RegisterBoundaryElement(belem_index);
1187 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1188 triangle_nodes.clear();
1189 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1190 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
1191 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
1192 RegisterBoundaryElement(belem_index);
1193 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1194 }
1195 if (i == width-1) //high face at x=width
1196 {
1197 triangle_nodes.clear();
1198 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
1199 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
1200 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1201 RegisterBoundaryElement(belem_index);
1202 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1203 triangle_nodes.clear();
1204 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
1205 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1206 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
1207 RegisterBoundaryElement(belem_index);
1208 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1209 }
1210 if (j == 0) //low face at y==0
1211 {
1212 triangle_nodes.clear();
1213 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1214 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
1215 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
1216 RegisterBoundaryElement(belem_index);
1217 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1218 triangle_nodes.clear();
1219 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1220 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
1221 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
1222 RegisterBoundaryElement(belem_index);
1223 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1224 }
1225 if (j == height-1) //high face at y=height
1226 {
1227 triangle_nodes.clear();
1228 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
1229 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
1230 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1231 RegisterBoundaryElement(belem_index);
1232 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1233 triangle_nodes.clear();
1234 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
1235 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1236 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
1237 RegisterBoundaryElement(belem_index);
1238 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1239 }
1240 if (k == 0) //low face at z==0
1241 {
1242 triangle_nodes.clear();
1243 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1244 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
1245 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
1246 RegisterBoundaryElement(belem_index);
1247 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1248 triangle_nodes.clear();
1249 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1250 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
1251 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
1252 RegisterBoundaryElement(belem_index);
1253 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1254 }
1255 if (k == depth-1) //high face at z=depth
1256 {
1257 triangle_nodes.clear();
1258 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
1259 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1260 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
1261 RegisterBoundaryElement(belem_index);
1262 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1263 triangle_nodes.clear();
1264 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
1265 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
1266 triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1267 RegisterBoundaryElement(belem_index);
1268 this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1269 }
1270 }//i
1271 }//j
1272 }//k
1273 }
1274}
1275
1276template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1277void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Scale(const double xFactor, const double yFactor, const double zFactor)
1278{
1279 //Base class scale (scales node positions)
1281 //Scales halos
1282 for (unsigned i=0; i<mHaloNodes.size(); i++)
1283 {
1284 c_vector<double, SPACE_DIM>& r_location = mHaloNodes[i]->rGetModifiableLocation();
1285 if (SPACE_DIM>=3)
1286 {
1287 r_location[2] *= zFactor;
1288 }
1289 if (SPACE_DIM>=2)
1290 {
1291 r_location[1] *= yFactor;
1292 }
1293 r_location[0] *= xFactor;
1294 }
1295
1296}
1297
1298
1299template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1302 std::set<unsigned>& rElementsOwned,
1303 std::set<unsigned>& rNodesOwned,
1304 std::set<unsigned>& rHaloNodesOwned,
1305 std::vector<unsigned>& rProcessorsOffset)
1306{
1307 assert(PetscTools::IsParallel());
1308 assert(ELEMENT_DIM==2 || ELEMENT_DIM==3); // LCOV_EXCL_LINE // Partitioning works with triangles and tetras
1309
1310 const unsigned num_elements = rMeshReader.GetNumElements();
1311 const unsigned num_nodes = rMeshReader.GetNumNodes();
1312 const unsigned num_procs = PetscTools::GetNumProcs();
1313 const unsigned local_proc_index = PetscTools::GetMyRank();
1314
1315 /*
1316 * Work out initial element distribution
1317 */
1318 boost::scoped_array<idx_t> element_distribution(new idx_t[num_procs+1]);
1319 boost::scoped_array<int> element_counts(new int[num_procs]);
1320
1321 element_distribution[0] = 0;
1322
1323 for (unsigned proc_index=1; proc_index<num_procs; proc_index++)
1324 {
1325 element_distribution[proc_index] = element_distribution[proc_index-1] + num_elements/num_procs;
1326 element_counts[proc_index-1] = element_distribution[proc_index] - element_distribution[proc_index-1];
1327 }
1328
1329 element_distribution[num_procs] = num_elements;
1330 element_counts[num_procs-1] = element_distribution[num_procs] - element_distribution[num_procs-1];
1331
1332 /*
1333 * Create distributed mesh data structure
1334 */
1335 idx_t first_local_element = element_distribution[local_proc_index];
1336 idx_t last_plus_one_element = element_distribution[local_proc_index+1];
1337 idx_t num_local_elements = last_plus_one_element - first_local_element;
1338
1339 boost::scoped_array<idx_t> eind(new idx_t[num_local_elements*(ELEMENT_DIM+1)]);
1340 boost::scoped_array<idx_t> eptr(new idx_t[num_local_elements+1]);
1341
1342 if (rMeshReader.IsFileFormatBinary() && first_local_element > 0)
1343 {
1344 // Advance the file pointer to the first element before the ones I own.
1345 rMeshReader.GetElementData(first_local_element - 1);
1346 }
1347 else
1348 {
1349 // Advance the file pointer to the first element before the ones I own.
1350 for (idx_t element_index = 0; element_index < first_local_element; element_index++)
1351 {
1352 rMeshReader.GetNextElementData();
1353 }
1354 }
1355
1356#ifdef CHASTE_HOMEMADE_MESH_TO_DUAL
1357 // element_node_matrix is an encoding of the .ele file. Each row is an element with the
1358 // 3 or 4 adjacent nodes indicated by a 1 in the approciate column
1359 Mat element_node_matrix;
1360 PetscTools::SetupMat(element_node_matrix, num_elements, num_nodes, ELEMENT_DIM+1, num_local_elements);
1361#endif
1362
1363 unsigned counter = 0;
1364 for (idx_t element_index = 0; element_index < num_local_elements; element_index++)
1365 {
1366 ElementData element_data;
1367
1368 element_data = rMeshReader.GetNextElementData();
1369
1370 eptr[element_index] = counter;
1371 for (unsigned i=0; i<ELEMENT_DIM+1; i++)
1372 {
1373#ifdef CHASTE_HOMEMADE_MESH_TO_DUAL
1374 PetscMatTools::SetElement(element_node_matrix, element_index+first_local_element, element_data.NodeIndices[i], 1.0);
1375#else
1376 eind[counter++] = element_data.NodeIndices[i];
1377#endif
1378 }
1379 }
1380 eptr[num_local_elements] = counter;
1381
1382 rMeshReader.Reset();
1383 idx_t numflag = 0; // ParMETIS speak for C-style numbering
1384
1385 MPI_Comm communicator = PETSC_COMM_WORLD;
1386
1387 idx_t* xadj;
1388 idx_t* adjncy;
1389
1390 Timer::Reset();
1391#ifdef CHASTE_HOMEMADE_MESH_TO_DUAL
1392 PetscMatTools::Finalise(element_node_matrix);
1393 std::vector<idx_t> my_xadj;
1394 std::vector<idx_t> my_adjncy;
1395 /* The goal is for my_adjncy to contain, for each local element, a list of the
1396 * elements that it is adjacent to. These are contiguous but my_xadj will contain the
1397 * start index for the data of each local element.
1398 * The element_node_matrix contains data on which nodes support each element.
1399 * The dot-product of two rows of this matrix is an indication of how many nodes two elements share:
1400 * ELEMENT_DIM+1 if they are the same row and ELEMENT_DIM if they are neighbours.
1401 * These dot-products are achieved (slowly) with
1402 * element_element_matrix = element_node_matrix * transpose(element_node_matrix)
1403 *
1404 * Each entry of element_element_matrix shows how many nodes a pair of elements share.
1405 */
1406
1407 Mat node_element_matrix;
1408 MatTranspose(element_node_matrix, MAT_INITIAL_MATRIX, &node_element_matrix);
1409 Mat element_element_matrix;
1410 MatMatMult(element_node_matrix, node_element_matrix, MAT_INITIAL_MATRIX, PETSC_DETERMINE, &element_element_matrix);
1411 my_xadj.push_back(0);
1412 PetscInt ncols;
1413 const PetscInt *cols;
1414 const PetscScalar *vals;
1415 for (PetscInt el_index=first_local_element; el_index<last_plus_one_element; el_index++)
1416 {
1417 MatGetRow(element_element_matrix, el_index, &ncols, &cols, &vals);
1418 for (PetscInt i=0;i<ncols;i++)
1419 {
1420 if (std::lround(vals[i])==ELEMENT_DIM)
1421 {
1422 // Shared edge/face between two elements
1423 my_adjncy.push_back(cols[i]);
1424 }
1425 }
1426 MatRestoreRow(element_element_matrix, el_index, &ncols, &cols, NULL);
1427 // Mark where next local element starts
1428 my_xadj.push_back(my_adjncy.size());
1429 }
1430 MatDestroy(&element_node_matrix);
1431 MatDestroy(&node_element_matrix);
1432 MatDestroy(&element_element_matrix);
1433 xadj = &my_xadj[0];
1434 adjncy = &my_adjncy[0];
1435
1436#else
1437 // The default behaviour is to use a ParMETIS (or possible Scotch) function to get the dual
1438 /* Connectivity degree.
1439 * GRAPH EDGE is placed between any two elements if and only if they share at least this many nodes.
1440 */
1441 idx_t ncommonnodes = 3; //Linear tetrahedra
1442 if (ELEMENT_DIM == 2)
1443 {
1444 ncommonnodes = 2;
1445 }
1446 ParMETIS_V3_Mesh2Dual(element_distribution.get(), eptr.get(), eind.get(),
1447 &numflag, &ncommonnodes, &xadj, &adjncy, &communicator);
1448#endif
1449 //Timer::Print("ParMETIS Mesh2Dual");
1450 // Be more memory efficient, and get rid of (maybe large) arrays as soon as they're no longer needed, rather than at end of scope
1451 eind.reset();
1452 eptr.reset();
1453
1454 idx_t weight_flag = 0; // unweighted graph
1455 idx_t n_constraints = 1; // number of weights that each vertex has (number of balance constraints)
1456 idx_t n_subdomains = PetscTools::GetNumProcs();
1457 idx_t options[3]; // extra options
1458 options[0] = 0; // ignore extra options
1459 idx_t edgecut;
1460 boost::scoped_array<real_t> tpwgts(new real_t[n_subdomains]);
1461 real_t ubvec_value = (real_t)1.05;
1462 for (unsigned proc=0; proc<PetscTools::GetNumProcs(); proc++)
1463 {
1464 tpwgts[proc] = ((real_t)1.0)/n_subdomains;
1465 }
1466 boost::scoped_array<idx_t> local_partition(new idx_t[num_local_elements]);
1467
1468/*
1469 * In order to use ParMETIS_V3_PartGeomKway, we need to sort out how to compute the coordinates of the
1470 * centers of each element efficiently.
1471 *
1472 * In the meantime use ParMETIS_V3_PartKway.
1473 */
1474// int n_dimensions = ELEMENT_DIM;
1475// float node_coordinates[num_local_elements*SPACE_DIM];
1476//
1477// ParMETIS_V3_PartGeomKway(element_distribution, xadj, adjncy, NULL, NULL, &weight_flag, &numflag,
1478// &n_dimensions, node_coordinates, &n_constraints, &n_subdomains, NULL, NULL,
1479// options, &edgecut, local_partition, &communicator);
1480
1481 Timer::Reset();
1482 ParMETIS_V3_PartKway(element_distribution.get(), xadj, adjncy, nullptr, nullptr, &weight_flag, &numflag,
1483 &n_constraints, &n_subdomains, tpwgts.get(), &ubvec_value,
1484 options, &edgecut, local_partition.get(), &communicator);
1485 //Timer::Print("ParMETIS PartKway");
1486 tpwgts.reset();
1487
1488 boost::scoped_array<idx_t> global_element_partition(new idx_t[num_elements]);
1489
1490 //idx_t is normally int (see metis-4.0/Lib/struct.h 17-22) but is 64bit on Windows
1491 MPI_Datatype mpi_idx_t = MPI_LONG_LONG_INT;
1492 if (sizeof(idx_t) == sizeof(int))
1493 {
1494 mpi_idx_t = MPI_INT;
1495 }
1496 boost::scoped_array<int> int_element_distribution(new int[num_procs+1]);
1497 for (unsigned i=0; i<num_procs+1; ++i)
1498 {
1499 int_element_distribution[i] = element_distribution[i];
1500 }
1501 MPI_Allgatherv(local_partition.get(), num_local_elements, mpi_idx_t,
1502 global_element_partition.get(), element_counts.get(), int_element_distribution.get(), mpi_idx_t, PETSC_COMM_WORLD);
1503
1504 local_partition.reset();
1505
1506 for (unsigned elem_index=0; elem_index<num_elements; elem_index++)
1507 {
1508 if ((unsigned) global_element_partition[elem_index] == local_proc_index)
1509 {
1510 rElementsOwned.insert(elem_index);
1511 }
1512 }
1513
1514 rMeshReader.Reset();
1515#ifdef CHASTE_HOMEMADE_MESH_TO_DUAL
1516 // These are contained in std::vectors that are automatically freed
1517 xadj = NULL;
1518 adjncy = NULL;
1519#endif
1520 free(xadj);
1521 free(adjncy);
1522 //unsigned num_nodes = rMeshReader.GetNumNodes();
1523
1524 // Initialise with no nodes known
1525 std::vector<unsigned> global_node_partition(num_nodes, UNASSIGNED_NODE);
1526
1527 assert(rProcessorsOffset.size() == 0); // Making sure the vector is empty. After calling resize() only newly created memory will be initialised to 0.
1528 rProcessorsOffset.resize(PetscTools::GetNumProcs(), 0);
1529
1530 /*
1531 * Work out node distribution based on initial element distribution returned by ParMETIS
1532 *
1533 * In this loop we handle 4 different data structures:
1534 * global_node_partition and rProcessorsOffset are global,
1535 * rNodesOwned and rHaloNodesOwned are local.
1536 */
1537
1538 /*
1539 * Note that at this point each process has to read the entire element file in order to compute
1540 * the node partition form the initial element distribution.
1541 * * Previously we randomly permuted the BIN file element access order on each process so that the processes
1542 * weren't reading the same file sectors at the same time
1543 * * We noted that with large files (above about 0.5 GigaByte) on HECToR the random access file reader
1544 * was spending all its time in fseekg. This is presumably because each fseekg from the start of the file
1545 * involves multiple levels of indirect file block pointers.
1546 * * Hence the use of random element reading is only useful for the niche of moderately large meshes with
1547 * process counts in the thousands.
1548 * Hence BIN file element permuting is deprecated - we just read the file in order.
1549 * See
1550 * https://github.com/Chaste/Old-Chaste-svn-mirror/blob/554dbbf5cb7e95105aa8a6f48ee57551edea2a8a/mesh/src/common/DistributedTetrahedralMesh.cpp#L1459
1551 */
1552
1553 for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
1554 {
1555 unsigned element_owner = global_element_partition[element_number];
1556
1557 ElementData element_data;
1558
1559 element_data = rMeshReader.GetNextElementData();
1560
1561 for (std::vector<unsigned>::const_iterator node_it = element_data.NodeIndices.begin();
1562 node_it != element_data.NodeIndices.end();
1563 ++node_it)
1564 {
1565 /*
1566 * For each node in this element, check whether it hasn't been assigned to another processor yet.
1567 * If so, assign it to the owner of the element. Otherwise, consider it halo.
1568 */
1569 if (global_node_partition[*node_it] == UNASSIGNED_NODE)
1570 {
1571 if (element_owner == local_proc_index)
1572 {
1573 rNodesOwned.insert(*node_it);
1574 }
1575
1576 global_node_partition[*node_it] = element_owner;
1577
1578 // Offset is defined as the first node owned by a processor. We compute it incrementally.
1579 // i.e. if node_index belongs to proc 3 (of 6) we have to shift the processors 4, 5, and 6
1580 // offset a position.
1581 for (unsigned proc=element_owner+1; proc<PetscTools::GetNumProcs(); proc++)
1582 {
1583 rProcessorsOffset[proc]++;
1584 }
1585 }
1586 else
1587 {
1588 if (element_owner == local_proc_index)
1589 {
1590 //if (rNodesOwned.find(*node_it) == rNodesOwned.end())
1591 if (global_node_partition[*node_it] != local_proc_index)
1592 {
1593 rHaloNodesOwned.insert(*node_it);
1594 }
1595 }
1596 }
1597 }
1598 }
1599
1600
1601 /*
1602 * Refine element distribution. Add extra elements that parMETIS didn't consider initially but
1603 * include any node owned by the processor. This ensures that all the system matrix rows are
1604 * assembled locally.
1605 * It may be that some of these elements are in the set of owned nodes erroneously.
1606 * The original set of owned elements (from the k-way partition) informed a
1607 * node partition. It may be that an element near the edge of this new node
1608 * partition may no longer be needed.
1609 *
1610 * Note that rather than clearing the set we could remove elements to the original element partition set
1611 * with erase(), if (!element_owned) below.
1612 */
1613 rElementsOwned.clear();
1614 rMeshReader.Reset();
1615 for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
1616 {
1617 ElementData element_data = rMeshReader.GetNextElementData();
1618
1619 bool element_owned = false;
1620 std::set<unsigned> temp_halo_nodes;
1621
1622 for (std::vector<unsigned>::const_iterator node_it = element_data.NodeIndices.begin();
1623 node_it != element_data.NodeIndices.end();
1624 ++node_it)
1625 {
1626 if (rNodesOwned.find(*node_it) != rNodesOwned.end())
1627 {
1628 element_owned = true;
1629 rElementsOwned.insert(element_number);
1630 }
1631 else
1632 {
1633 temp_halo_nodes.insert(*node_it);
1634 }
1635 }
1636
1637 if (element_owned)
1638 {
1639 rHaloNodesOwned.insert(temp_halo_nodes.begin(), temp_halo_nodes.end());
1640 }
1641 }
1642
1643 rMeshReader.Reset();
1644
1645 /*
1646 * Once we know the offsets we can compute the permutation vector
1647 */
1648 std::vector<unsigned> local_index(PetscTools::GetNumProcs(), 0);
1649
1650 this->mNodePermutation.resize(this->GetNumNodes());
1651
1652 for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
1653 {
1654 unsigned partition = global_node_partition[node_index];
1655 assert(partition != UNASSIGNED_NODE);
1656
1657 this->mNodePermutation[node_index] = rProcessorsOffset[partition] + local_index[partition];
1658
1659 local_index[partition]++;
1660 }
1661}
1662
1663template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1665{
1666 ChastePoint<SPACE_DIM> my_minimum_point {};
1667 ChastePoint<SPACE_DIM> my_maximum_point {};
1668
1669 try
1670 {
1672 my_minimum_point=my_box.rGetLowerCorner();
1673 my_maximum_point=my_box.rGetUpperCorner();
1674 }
1675 // LCOV_EXCL_START
1676 catch (Exception& e)
1677 {
1679 throw e;
1680
1681 }
1682 // LCOV_EXCL_STOP
1683
1685
1686 c_vector<double, SPACE_DIM> global_minimum_point;
1687 c_vector<double, SPACE_DIM> global_maximum_point;
1688 MPI_Allreduce(&my_minimum_point.rGetLocation()[0], &global_minimum_point[0], SPACE_DIM, MPI_DOUBLE, MPI_MIN, PETSC_COMM_WORLD);
1689 MPI_Allreduce(&my_maximum_point.rGetLocation()[0], &global_maximum_point[0], SPACE_DIM, MPI_DOUBLE, MPI_MAX, PETSC_COMM_WORLD);
1690
1691 ChastePoint<SPACE_DIM> min(global_minimum_point);
1692 ChastePoint<SPACE_DIM> max(global_maximum_point);
1693
1694 return ChasteCuboid<SPACE_DIM>(min, max);
1695}
1696
1697template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1699{
1700 // Call base method to find closest on local processor
1701 unsigned best_node_index = AbstractMesh<ELEMENT_DIM, SPACE_DIM>::GetNearestNodeIndex(rTestPoint);
1702
1703 // Recalculate the distance to the best node (if this process has one)
1704 double best_node_point_distance = DBL_MAX;
1705 if (best_node_index != UINT_MAX)
1706 {
1707 best_node_point_distance = norm_2(this->GetNode(best_node_index)->rGetLocation() - rTestPoint.rGetLocation());
1708 }
1709
1710
1711 // This is a handy data structure that will work with MPI_DOUBLE_INT data type.
1712 // There is no MPI_DOUBLE_UNSIGNED
1713 struct
1714 {
1715 double distance;
1716 int node_index;
1717 } value, minval;
1718
1719 value.node_index = best_node_index;
1720 value.distance = best_node_point_distance;
1721
1722 MPI_Allreduce( &value, &minval, 1, MPI_DOUBLE_INT, MPI_MINLOC, MPI_COMM_WORLD );
1723
1724 return minval.node_index;
1725}
1726
1727template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1729{
1731 c_vector<double, 2> global_min_max;
1732
1733 MPI_Allreduce(&local_min_max[0], &global_min_max[0], 1, MPI_DOUBLE, MPI_MIN, PETSC_COMM_WORLD);
1734 MPI_Allreduce(&local_min_max[1], &global_min_max[1], 1, MPI_DOUBLE, MPI_MAX, PETSC_COMM_WORLD);
1735
1736 return global_min_max;
1737}
1738
1739template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1744
1745template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1746void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Rotate(c_matrix<double, SPACE_DIM, SPACE_DIM> rotationMatrix)
1747{
1748 // First do the extras
1749 for (unsigned i=0; i<this->mHaloNodes.size(); i++)
1750 {
1751 c_vector<double, SPACE_DIM>& r_location = this->mHaloNodes[i]->rGetModifiableLocation();
1752 r_location = prod(rotationMatrix, r_location);
1753 }
1754 // Now a copy of the base class implementation
1755 for (unsigned i=0; i<this->mNodes.size(); i++)
1756 {
1757 c_vector<double, SPACE_DIM>& r_location = this->mNodes[i]->rGetModifiableLocation();
1758 r_location = prod(rotationMatrix, r_location);
1759 }
1760}
1761
1762template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1763void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Translate(const c_vector<double, SPACE_DIM>& rDisplacement)
1764{
1765 // First do the extras
1766 for (unsigned i=0; i<this->mHaloNodes.size(); i++)
1767 {
1768 c_vector<double, SPACE_DIM>& r_location = this->mHaloNodes[i]->rGetModifiableLocation();
1769 r_location += rDisplacement;
1770 }
1771 // Now a copy of the base class implementation
1772 for (unsigned i=0; i<this->mNodes.size(); i++)
1773 {
1774 c_vector<double, SPACE_DIM>& r_location = this->mNodes[i]->rGetModifiableLocation();
1775 r_location += rDisplacement;
1776 }
1777}
1778
1779template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1784
1785// Explicit instantiation
1786template class DistributedTetrahedralMesh<1,1>;
1787template class DistributedTetrahedralMesh<1,2>;
1788template class DistributedTetrahedralMesh<1,3>;
1789template class DistributedTetrahedralMesh<2,2>;
1790template class DistributedTetrahedralMesh<2,3>;
1791template class DistributedTetrahedralMesh<3,3>;
1792
1793// Serialization for Boost >= 1.36
#define EXCEPTION(message)
#define EXPORT_TEMPLATE_CLASS_ALL_DIMS(CLASS)
void SetAttribute(double attribute)
ElementIterator GetElementIteratorBegin()
virtual void Reset()=0
virtual unsigned GetNumElements() const =0
virtual std::vector< unsigned > GetContainingElementIndices(unsigned index)
virtual ElementData GetNextElementData()=0
virtual std::string GetMeshFileBaseName()
virtual unsigned GetNumFaces() const =0
NodeIterator GetNodeIteratorEnd()
virtual unsigned GetNumElementAttributes() const
virtual std::vector< double > GetNextNode()=0
virtual const std::vector< unsigned > & rGetNodePermutation()
virtual std::vector< double > GetNodeAttributes()
virtual unsigned GetNumFaceAttributes() const
virtual bool HasNodePermutation()
virtual bool IsFileFormatBinary()
virtual unsigned GetNumNodes() const =0
virtual ElementData GetNextFaceData()=0
NodeIterator GetNodeIteratorBegin()
virtual ElementData GetElementData(unsigned index)
ElementIterator GetElementIteratorEnd()
virtual void SetDistributedVectorFactory(DistributedVectorFactory *pFactory)
virtual ChasteCuboid< SPACE_DIM > CalculateBoundingBox() const
virtual unsigned GetNearestNodeIndex(const ChastePoint< SPACE_DIM > &rTestPoint)
virtual void Scale(const double xFactor=1.0, const double yFactor=1.0, const double zFactor=1.0)
virtual void WriteFilesUsingMesh(AbstractTetrahedralMesh< ELEMENT_DIM, SPACE_DIM > &rMesh, bool keepOriginalElementIndexing=true)
virtual c_vector< double, 2 > CalculateMinMaxEdgeLengths()
virtual void ConstructRectangularMesh(unsigned width, unsigned height, bool stagger=true)
virtual void ConstructCuboid(unsigned width, unsigned height, unsigned depth)
virtual void ConstructLinearMesh(unsigned width)
const ChastePoint< SPACE_DIM > & rGetUpperCorner() const
const ChastePoint< SPACE_DIM > & rGetLowerCorner() const
c_vector< double, DIM > & rGetLocation()
bool CalculateDesignatedOwnershipOfElement(unsigned elementIndex)
virtual void ConstructFromMeshReader(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader)
ChasteCuboid< SPACE_DIM > * GetProcessRegion()
virtual c_vector< double, 2 > CalculateMinMaxEdgeLengths()
Node< SPACE_DIM > * GetNodeOrHaloNode(unsigned index) const
void ComputeMeshPartitioning(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader, std::set< unsigned > &rNodesOwned, std::set< unsigned > &rHaloNodesOwned, std::set< unsigned > &rElementsOwned, std::vector< unsigned > &rProcessorsOffset)
virtual unsigned GetNearestNodeIndex(const ChastePoint< SPACE_DIM > &rTestPoint)
HaloNodeIterator GetHaloNodeIteratorEnd() const
unsigned SolveElementMapping(unsigned index) const
void ConstructRectangularMesh(unsigned width, unsigned height, bool stagger=true)
void Translate(const c_vector< double, SPACE_DIM > &rDisplacement)
DistributedTetrahedralMeshPartitionType::type GetPartitionType() const
unsigned SolveBoundaryElementMapping(unsigned index) const
void SetDistributedVectorFactory(DistributedVectorFactory *pFactory)
bool CalculateDesignatedOwnershipOfBoundaryElement(unsigned faceIndex)
HaloNodeIterator GetHaloNodeIteratorBegin() const
DistributedTetrahedralMesh(DistributedTetrahedralMeshPartitionType::type partitioningMethod=DistributedTetrahedralMeshPartitionType::PARMETIS_LIBRARY)
unsigned SolveNodeMapping(unsigned index) const
void ParMetisLibraryNodeAndElementPartitioning(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader, std::set< unsigned > &rElementsOwned, std::set< unsigned > &rNodesOwned, std::set< unsigned > &rHaloNodesOwned, std::vector< unsigned > &rProcessorsOffset)
virtual void Scale(const double xFactor=1.0, const double yFactor=1.0, const double zFactor=1.0)
void ConstructCuboid(unsigned width, unsigned height, unsigned depth)
void Rotate(c_matrix< double, SPACE_DIM, SPACE_DIM > rotationMatrix)
std::vector< Node< SPACE_DIM > * >::const_iterator HaloNodeIterator
void SetProcessRegion(ChasteCuboid< SPACE_DIM > *pRegion)
void GetHaloNodeIndices(std::vector< unsigned > &rHaloIndices) const
virtual ChasteCuboid< SPACE_DIM > CalculateBoundingBox() const
DistributedTetrahedralMeshPartitionType::type mPartitioning
static void DumbPartitioning(AbstractMesh< ELEMENT_DIM, SPACE_DIM > &rMesh, std::set< unsigned > &rNodesOwned)
static void PetscMatrixPartitioning(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader, std::vector< unsigned > &rNodePermutation, std::set< unsigned > &rNodesOwned, std::vector< unsigned > &rProcessorsOffset)
static void GeometricPartitioning(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader, std::vector< unsigned > &rNodePermutation, std::set< unsigned > &rNodesOwned, std::vector< unsigned > &rProcessorsOffset, ChasteCuboid< SPACE_DIM > *pRegion)
Definition Node.hpp:59
void AddNodeAttribute(double attribute)
Definition Node.cpp:170
std::string GetOutputDirectoryFullPath() const
static void Finalise(Mat matrix)
static void SetElement(Mat matrix, PetscInt row, PetscInt col, double value)
static bool AmMaster()
static bool AmTopMost()
static void Barrier(const std::string callerId="")
static bool IsParallel()
static unsigned GetMyRank()
static void ReplicateException(bool flag)
static void SetupMat(Mat &rMat, int numRows, int numColumns, unsigned rowPreallocation, int numLocalRows=PETSC_DECIDE, int numLocalColumns=PETSC_DECIDE, bool ignoreOffProcEntries=true, bool newAllocationError=true)
static unsigned GetNumProcs()
static bool HasParMetis()
static void PrintAndReset(std::string message)
Definition Timer.cpp:70
static void Reset()
Definition Timer.cpp:44
std::vector< unsigned > NodeIndices