Chaste  Release::3.4
DistributedTetrahedralMesh.cpp
1 /*
2 
3 Copyright (c) 2005-2016, University of Oxford.
4 All rights reserved.
5 
6 University of Oxford means the Chancellor, Masters and Scholars of the
7 University of Oxford, having an administrative office at Wellington
8 Square, Oxford OX1 2JD, UK.
9 
10 This file is part of Chaste.
11 
12 Redistribution and use in source and binary forms, with or without
13 modification, are permitted provided that the following conditions are met:
14  * Redistributions of source code must retain the above copyright notice,
15  this list of conditions and the following disclaimer.
16  * Redistributions in binary form must reproduce the above copyright notice,
17  this list of conditions and the following disclaimer in the documentation
18  and/or other materials provided with the distribution.
19  * Neither the name of the University of Oxford nor the names of its
20  contributors may be used to endorse or promote products derived from this
21  software without specific prior written permission.
22 
23 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
24 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
27 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
28 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
29 GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30 HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31 LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
32 OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
33 
34 */
35 
36 #include "DistributedTetrahedralMesh.hpp"
37 
38 #include <cassert>
39 #include <sstream>
40 #include <string>
41 #include <iterator>
42 #include <algorithm>
43 #include <boost/scoped_array.hpp>
44 
45 #include "Exception.hpp"
46 #include "Element.hpp"
47 #include "BoundaryElement.hpp"
48 
49 #include "PetscTools.hpp"
50 #include "DistributedVectorFactory.hpp"
51 #include "OutputFileHandler.hpp"
52 #include "NodePartitioner.hpp"
53 
54 #include "RandomNumberGenerator.hpp"
55 
56 #include "Timer.hpp"
57 #include "TetrahedralMesh.hpp"
58 
59 #include "petscao.h"
60 #include <parmetis.h>
61 #if (PARMETIS_MAJOR_VERSION >= 4) //ParMETIS 4.x and above
62 //Redefine the index type so that we can still use the old name "idxtype"
63 #define idxtype idx_t
64 #else
65 //Old version of ParMETIS used "float" which may appear elsewhere in, for example, tetgen
66 #define real_t float
67 #endif
68 
70 // IMPLEMENTATION
72 
73 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
75  :
76  mTotalNumElements(0u),
77  mTotalNumBoundaryElements(0u),
78  mTotalNumNodes(0u),
79  mpSpaceRegion(NULL),
80  mMetisPartitioning(partitioningMethod)
81 {
82  if (ELEMENT_DIM == 1 && (partitioningMethod != DistributedTetrahedralMeshPartitionType::GEOMETRIC))
83  {
84  //No METIS partition is possible - revert to DUMB
85  mMetisPartitioning = DistributedTetrahedralMeshPartitionType::DUMB;
86  }
87 }
88 
89 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
91 {
92  for (unsigned i=0; i<this->mHaloNodes.size(); i++)
93  {
94  delete this->mHaloNodes[i];
95  }
96 }
97 
98 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
100 {
102  mMetisPartitioning = DistributedTetrahedralMeshPartitionType::DUMB;
103 }
104 
105 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
108  std::set<unsigned>& rNodesOwned,
109  std::set<unsigned>& rHaloNodesOwned,
110  std::set<unsigned>& rElementsOwned,
111  std::vector<unsigned>& rProcessorsOffset)
112 {
114  if (mMetisPartitioning==DistributedTetrahedralMeshPartitionType::PARMETIS_LIBRARY && PetscTools::IsParallel())
115  {
116  /*
117  * With ParMetisLibraryNodeAndElementPartitioning we compute the element partition first
118  * and then we work out the node ownership.
119  */
120  ParMetisLibraryNodeAndElementPartitioning(rMeshReader, rElementsOwned, rNodesOwned, rHaloNodesOwned, rProcessorsOffset);
121  }
122  else
123  {
124  /*
125  * Otherwise we compute the node partition and then we work out element distribution
126  */
127  if (mMetisPartitioning==DistributedTetrahedralMeshPartitionType::METIS_LIBRARY && PetscTools::IsParallel())
128  {
129  NodePartitioner<ELEMENT_DIM, SPACE_DIM>::MetisLibraryPartitioning(rMeshReader, this->mNodePermutation, rNodesOwned, rProcessorsOffset);
130  }
131  else if (mMetisPartitioning==DistributedTetrahedralMeshPartitionType::PETSC_MAT_PARTITION && PetscTools::IsParallel())
132  {
133  NodePartitioner<ELEMENT_DIM, SPACE_DIM>::PetscMatrixPartitioning(rMeshReader, this->mNodePermutation, rNodesOwned, rProcessorsOffset);
134  }
135  else if (mMetisPartitioning==DistributedTetrahedralMeshPartitionType::GEOMETRIC && PetscTools::IsParallel())
136  {
137  if (!mpSpaceRegion)
138  {
139  EXCEPTION("Using GEOMETRIC partition for DistributedTetrahedralMesh with local regions not set. Call SetProcessRegion(ChasteCuboid)");
140  }
141  NodePartitioner<ELEMENT_DIM, SPACE_DIM>::GeometricPartitioning(rMeshReader, this->mNodePermutation, rNodesOwned, rProcessorsOffset, mpSpaceRegion);
142  }
143  else
144  {
146  }
147 
148  if ( rMeshReader.HasNclFile() )
149  {
150  // Form a set of all the element indices we are going to own
151  // (union of the sets from the lines in the NCL file)
152  for ( std::set<unsigned>::iterator iter=rNodesOwned.begin();
153  iter!=rNodesOwned.end();
154  ++iter )
155  {
156  std::vector<unsigned> containing_elements = rMeshReader.GetContainingElementIndices( *iter );
157  rElementsOwned.insert( containing_elements.begin(), containing_elements.end() );
158  }
159 
160  // Iterate through that set rather than mTotalNumElements (knowing that we own a least one node in each line)
161  // Then read all the data into a node_index set
162  std::set<unsigned> node_index_set;
163 
164  for ( std::set<unsigned>::iterator iter=rElementsOwned.begin();
165  iter!=rElementsOwned.end();
166  ++iter )
167  {
168  ElementData element_data = rMeshReader.GetElementData( *iter );
169  node_index_set.insert( element_data.NodeIndices.begin(), element_data.NodeIndices.end() );
170  }
171 
172  // Subtract off the rNodesOwned set to produce rHaloNodesOwned.
173  // Note that rNodesOwned is a subset of node_index_set.
174  std::set_difference(node_index_set.begin(), node_index_set.end(),
175  rNodesOwned.begin(), rNodesOwned.end(),
176  std::inserter(rHaloNodesOwned, rHaloNodesOwned.begin()));
177  }
178  else
179  {
180  for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
181  {
182  ElementData element_data = rMeshReader.GetNextElementData();
183 
184  bool element_owned = false;
185  std::set<unsigned> temp_halo_nodes;
186 
187  for (std::vector<unsigned>::const_iterator it = element_data.NodeIndices.begin();
188  it != element_data.NodeIndices.end();
189  ++it)
190  {
191  if (rNodesOwned.find(*it) != rNodesOwned.end())
192  {
193  element_owned = true;
194  rElementsOwned.insert(element_number);
195  }
196  else
197  {
198  temp_halo_nodes.insert(*it);
199  }
200  }
201 
202  if (element_owned)
203  {
204  rHaloNodesOwned.insert(temp_halo_nodes.begin(), temp_halo_nodes.end());
205  }
206  }
207  }
208 
209  if (mMetisPartitioning==DistributedTetrahedralMeshPartitionType::PETSC_MAT_PARTITION && PetscTools::IsParallel())
210  {
212  if (PetscTools::AmMaster())
213  {
214  Timer::PrintAndReset("Element and halo node assignation");
215  }
216  }
217  }
218  rMeshReader.Reset();
219 }
220 
221 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
224 {
225  std::set<unsigned> nodes_owned;
226  std::set<unsigned> halo_nodes_owned;
227  std::set<unsigned> elements_owned;
228  std::vector<unsigned> proc_offsets;//(PetscTools::GetNumProcs());
229 
230  this->mMeshFileBaseName = rMeshReader.GetMeshFileBaseName();
231  mTotalNumElements = rMeshReader.GetNumElements();
232  mTotalNumBoundaryElements = rMeshReader.GetNumFaces();
233  mTotalNumNodes = rMeshReader.GetNumNodes();
234 
235 
237  Timer::Reset();
238  ComputeMeshPartitioning(rMeshReader, nodes_owned, halo_nodes_owned, elements_owned, proc_offsets);
240  //Timer::Print("partitioning");
241 
242  // Reserve memory
243  this->mElements.reserve(elements_owned.size());
244  this->mNodes.reserve(nodes_owned.size());
245 
246  if ( rMeshReader.IsFileFormatBinary() )
247  {
250  std::vector<double> coords;
251  // Binary : load only the nodes which are needed
252  for (typename AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>::NodeIterator node_it = rMeshReader.GetNodeIteratorBegin(nodes_owned);
253  node_it != rMeshReader.GetNodeIteratorEnd();
254  ++node_it)
255  {
256  //Loop over wholly-owned nodes
257  unsigned global_node_index = node_it.GetIndex();
258  coords = *node_it;
259  RegisterNode(global_node_index);
260  Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, coords, false);
261 
262 //Node attributes in binary format are not yet supported, see #1730
263 // for (unsigned i = 0; i < rMeshReader.GetNodeAttributes().size(); i++)
264 // {
265 // double attribute = rMeshReader.GetNodeAttributes()[i];
266 // p_node->AddNodeAttribute(attribute);
267 // }
268 
269  this->mNodes.push_back(p_node);
270  }
271  for (typename AbstractMeshReader<ELEMENT_DIM, SPACE_DIM>::NodeIterator node_it = rMeshReader.GetNodeIteratorBegin(halo_nodes_owned);
272  node_it != rMeshReader.GetNodeIteratorEnd();
273  ++node_it)
274  {
275  //Loop over halo-owned nodes
276  unsigned global_node_index = node_it.GetIndex();
277  coords = *node_it;
278  RegisterHaloNode(global_node_index);
279  mHaloNodes.push_back(new Node<SPACE_DIM>(global_node_index, coords, false));
280  }
281  }
282  else
283  {
284  // Ascii : Sequentially load all the nodes and store those owned (or halo-owned) by the process
286  for (unsigned node_index=0; node_index < mTotalNumNodes; node_index++)
287  {
288  std::vector<double> coords;
290  coords = rMeshReader.GetNextNode();
291 
292  // The node is owned by the processor
293  if (nodes_owned.find(node_index) != nodes_owned.end())
294  {
295  RegisterNode(node_index);
296  Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(node_index, coords, false);
297 
298  for (unsigned i = 0; i < rMeshReader.GetNodeAttributes().size(); i++)
299  {
300  double attribute = rMeshReader.GetNodeAttributes()[i];
301  p_node->AddNodeAttribute(attribute);
302  }
303 
304  this->mNodes.push_back(p_node);
305  }
306 
307  // The node is a halo node in this processor
308  if (halo_nodes_owned.find(node_index) != halo_nodes_owned.end())
309  {
310  RegisterHaloNode(node_index);
311  mHaloNodes.push_back(new Node<SPACE_DIM>(node_index, coords, false));
312  }
313  }
314  }
315 
317  = rMeshReader.GetElementIteratorBegin(elements_owned);
318  elem_it != rMeshReader.GetElementIteratorEnd();
319  ++elem_it)
320  {
321  ElementData element_data = *elem_it;
322  unsigned global_element_index = elem_it.GetIndex();
323 
324  std::vector<Node<SPACE_DIM>*> nodes;
325  for (unsigned j=0; j<ELEMENT_DIM+1; j++)
326  {
327  // Because we have populated mNodes and mHaloNodes above, we can now use this method, which should never throw
328  nodes.push_back(this->GetNodeOrHaloNode(element_data.NodeIndices[j]));
329  }
330 
331  RegisterElement(global_element_index);
332  Element<ELEMENT_DIM,SPACE_DIM>* p_element = new Element<ELEMENT_DIM,SPACE_DIM>(global_element_index, nodes);
333  this->mElements.push_back(p_element);
334 
335  if (rMeshReader.GetNumElementAttributes() > 0)
336  {
337  assert(rMeshReader.GetNumElementAttributes() == 1);
338  double attribute_value = element_data.AttributeValue;
339  p_element->SetAttribute(attribute_value);
340  }
341  }
342 
343  // Boundary nodes and elements
344  try
345  {
346  for (unsigned face_index=0; face_index<mTotalNumBoundaryElements; face_index++)
347  {
348  ElementData face_data = rMeshReader.GetNextFaceData();
349  std::vector<unsigned> node_indices = face_data.NodeIndices;
350 
351  bool own = false;
352 
353  for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
354  {
355  // if I own this node
356  if (mNodesMapping.find(node_indices[node_index]) != mNodesMapping.end())
357  {
358  own = true;
359  break;
360  }
361  }
362 
363  if (!own)
364  {
365  continue;
366  }
367 
368  // Determine if this is a boundary face
369  //std::set<unsigned> containing_element_indices; // Elements that contain this face
370  std::vector<Node<SPACE_DIM>*> nodes;
371 
372  for (unsigned node_index=0; node_index<node_indices.size(); node_index++)
373  {
374  //because we have populated mNodes and mHaloNodes above, we can now use this method,
375  //which SHOULD never throw (but it does).
376  try
377  {
378  nodes.push_back(this->GetNodeOrHaloNode(node_indices[node_index]));
379  }
380  catch (Exception &)
381  {
382  EXCEPTION("Face does not appear in element file (Face " << face_index << " in "<<this->mMeshFileBaseName<< ")");
383  }
384  }
385 
386  // This is a boundary face
387  // Ensure all its nodes are marked as boundary nodes
388  for (unsigned j=0; j<nodes.size(); j++)
389  {
390  if (!nodes[j]->IsBoundaryNode())
391  {
392  nodes[j]->SetAsBoundaryNode();
393  this->mBoundaryNodes.push_back(nodes[j]);
394  }
395  // Register the index that this boundary element will have with the node
396  nodes[j]->AddBoundaryElement(face_index);
397  }
398 
399  RegisterBoundaryElement(face_index);
400  BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>* p_boundary_element = new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(face_index, nodes);
401  this->mBoundaryElements.push_back(p_boundary_element);
402 
403  if (rMeshReader.GetNumFaceAttributes() > 0)
404  {
405  assert(rMeshReader.GetNumFaceAttributes() == 1);
406  p_boundary_element->SetAttribute(face_data.AttributeValue);
407  }
408  }
409  }
410  catch (Exception &e)
411  {
412  PetscTools::ReplicateException(true); //Bad face exception
413  throw e;
414  }
416 
417  if (mMetisPartitioning != DistributedTetrahedralMeshPartitionType::DUMB && PetscTools::IsParallel())
418  {
419  assert(this->mNodePermutation.size() != 0);
420  // If we are partitioning (and permuting) a mesh, we need to be certain that we aren't doing it twice
421  assert(rMeshReader.HasNodePermutation() == false);
422 
423  // We reorder so that each process owns a contiguous set of the indices and we can then build a distributed vector factory.
424  ReorderNodes();
425 
426  unsigned num_owned;
427  unsigned rank = PetscTools::GetMyRank();
428  if ( !PetscTools::AmTopMost() )
429  {
430  num_owned = proc_offsets[rank+1]-proc_offsets[rank];
431  }
432  else
433  {
434  num_owned = mTotalNumNodes - proc_offsets[rank];
435  }
436 
437  assert(!this->mpDistributedVectorFactory);
438  this->mpDistributedVectorFactory = new DistributedVectorFactory(this->GetNumNodes(), num_owned);
439  }
440  else
441  {
442  // Dumb or sequential partition
443  assert(this->mpDistributedVectorFactory);
444 
445  if (rMeshReader.HasNodePermutation())
446  {
447  // This is probably an unarchiving operation where the original run applied a permutation to the mesh
448  // We need to re-record that the permutation has happened (so that we can archive it correctly later).
449  this->mNodePermutation = rMeshReader.rGetNodePermutation();
450  }
451  }
452  rMeshReader.Reset();
453 }
454 
455 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
457 {
458  return this->mNodes.size();
459 }
460 
461 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
463 {
464  return this->mHaloNodes.size();
465 }
466 
467 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
469 {
470  return this->mElements.size();
471 }
472 
473 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
475 {
476  return this->mBoundaryElements.size();
477 }
478 
479 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
481 {
482  return mTotalNumNodes;
483 }
484 
485 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
487 {
488  return mTotalNumNodes;
489 }
490 
491 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
493 {
494  return mTotalNumElements;
495 }
496 
497 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
499 {
500  return mMetisPartitioning;
501 }
502 
503 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
505 {
506  return mTotalNumBoundaryElements;
507 }
508 
509 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
510 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::GetHaloNodeIndices(std::vector<unsigned>& rHaloIndices) const
511 {
512  //Make sure the output vector is empty
513  rHaloIndices.clear();
514  for (unsigned i=0; i<mHaloNodes.size(); i++)
515  {
516  rHaloIndices.push_back(mHaloNodes[i]->GetIndex());
517  }
518 }
519 
520 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
522 {
523  if (mpSpaceRegion == NULL)
524  {
525  EXCEPTION("Trying to get unset mpSpaceRegion");
526  }
527  return mpSpaceRegion;
528 }
529 
530 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
532 {
533  // All the local elements are owned by the processor (obviously...)
534  //Does nothing - unlike the non-distributed version
535 }
536 
537 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
539 {
540  mpSpaceRegion = pRegion;
541 }
542 
543 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
545 {
546  try
547  {
549  }
550  catch(Exception&) // we don't own the element
551  {
552  return false;
553  }
554 }
555 
556 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
558 {
559  try
560  {
562  }
563  catch(Exception&) // we don't own the face
564  {
565  return false;
566  }
567 }
568 
569 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
571 {
572  mNodesMapping[index] = this->mNodes.size();
573 }
574 
575 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
577 {
578  mHaloNodesMapping[index] = mHaloNodes.size();
579 }
580 
581 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
583 {
584  mElementsMapping[index] = this->mElements.size();
585 }
586 
587 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
589 {
590  mBoundaryElementsMapping[index] = this->mBoundaryElements.size();
591 }
592 
593 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
595 {
596  std::map<unsigned, unsigned>::const_iterator node_position = mNodesMapping.find(index);
597 
598  if (node_position == mNodesMapping.end())
599  {
600  EXCEPTION("Requested node " << index << " does not belong to processor " << PetscTools::GetMyRank());
601  }
602  return node_position->second;
603 }
604 
605 //template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
606 //unsigned DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::SolveHaloNodeMapping(unsigned index)
607 //{
608 // assert(mHaloNodesMapping.find(index) != mHaloNodesMapping.end());
609 // return mHaloNodesMapping[index];
610 //}
611 
612 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
614 {
615  std::map<unsigned, unsigned>::const_iterator element_position = mElementsMapping.find(index);
616 
617  if (element_position == mElementsMapping.end())
618  {
619  EXCEPTION("Requested element " << index << " does not belong to processor " << PetscTools::GetMyRank());
620  }
621 
622  return element_position->second;
623 }
624 
625 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
627 {
628  std::map<unsigned, unsigned>::const_iterator boundary_element_position = mBoundaryElementsMapping.find(index);
629 
630  if (boundary_element_position == mBoundaryElementsMapping.end())
631  {
632  EXCEPTION("Requested boundary element " << index << " does not belong to processor " << PetscTools::GetMyRank());
633  }
634 
635  return boundary_element_position->second;
636 }
637 
638 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
640 {
641  std::map<unsigned, unsigned>::const_iterator node_position;
642  // First search the halo (expected to be a smaller map so quicker)
643  if ((node_position=mHaloNodesMapping.find(index)) != mHaloNodesMapping.end())
644  {
645  return mHaloNodes[node_position->second];
646  }
647  // Next search the owned node
648  if ((node_position=mNodesMapping.find(index)) != mNodesMapping.end())
649  {
650  //Found an owned node
651  return this->mNodes[node_position->second];
652  }
653  // Not here
654  EXCEPTION("Requested node/halo " << index << " does not belong to processor " << PetscTools::GetMyRank());
655 }
656 
657 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
659 {
660  assert(PetscTools::IsParallel());
661 
662  // Need to rebuild global-local maps
663  mNodesMapping.clear();
664  mHaloNodesMapping.clear();
665 
666  // Update indices
667  for (unsigned index=0; index<this->mNodes.size(); index++)
668  {
669  unsigned old_index = this->mNodes[index]->GetIndex();
670  unsigned new_index = this->mNodePermutation[old_index];
671 
672  this->mNodes[index]->SetIndex(new_index);
673  mNodesMapping[new_index] = index;
674  }
675 
676  for (unsigned index=0; index<mHaloNodes.size(); index++)
677  {
678  unsigned old_index = mHaloNodes[index]->GetIndex();
679  unsigned new_index = this->mNodePermutation[old_index];
680 
681  mHaloNodes[index]->SetIndex(new_index);
682  mHaloNodesMapping[new_index] = index;
683  }
684 }
685 
686 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
688 {
689  assert(ELEMENT_DIM == 1);
690 
691  //Check that there are enough nodes to make the parallelisation worthwhile
692  if (width==0)
693  {
694  EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
695  }
696 
697  // Hook to pick up when we are using a geometric partition.
698  if(mMetisPartitioning == DistributedTetrahedralMeshPartitionType::GEOMETRIC)
699  {
700  if (!mpSpaceRegion)
701  {
702  EXCEPTION("Space region not set for GEOMETRIC partition of DistributedTetrahedralMesh");
703  }
704 
705  // Write a serial file, the load on distributed processors.
707  {
708  TrianglesMeshWriter<ELEMENT_DIM,SPACE_DIM> mesh_writer("", "temp_linear_mesh");
710  base_mesh.ConstructLinearMesh(width);
711  mesh_writer.WriteFilesUsingMesh(base_mesh);
712  }
714 
715  OutputFileHandler output_handler("", false);
716 
717  std::string output_dir = output_handler.GetOutputDirectoryFullPath();
718  TrianglesMeshReader<ELEMENT_DIM,SPACE_DIM> mesh_reader(output_dir+"temp_linear_mesh");
719 
720  this->ConstructFromMeshReader(mesh_reader);
721  }
722  else // use a default partition.
723  {
724  //Use dumb partition so that archiving doesn't permute anything
725  mMetisPartitioning=DistributedTetrahedralMeshPartitionType::DUMB;
726  mTotalNumNodes=width+1;
727  mTotalNumBoundaryElements=2u;
728  mTotalNumElements=width;
729 
730  //Use DistributedVectorFactory to make a dumb partition of the nodes
731  assert(!this->mpDistributedVectorFactory);
732  this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes);
733  if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
734  {
735  //It's a short mesh and this process owns no nodes
736  return;
737  }
738 
739  /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
740  * higher numbered process may have dropped out of this construction altogether
741  * (because is has no local ownership)
742  */
743  bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
744 
745  unsigned lo_node=this->mpDistributedVectorFactory->GetLow();
746  unsigned hi_node=this->mpDistributedVectorFactory->GetHigh();
747  if (!PetscTools::AmMaster())
748  {
749  //Allow for a halo node
750  lo_node--;
751  }
752  if (!am_top_most)
753  {
754  //Allow for a halo node
755  hi_node++;
756  }
757  Node<SPACE_DIM>* p_old_node=NULL;
758  for (unsigned node_index=lo_node; node_index<hi_node; node_index++)
759  {
760  // create node or halo-node
761  Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(node_index, node_index==0 || node_index==width, node_index);
762  if (node_index<this->mpDistributedVectorFactory->GetLow() ||
763  node_index==this->mpDistributedVectorFactory->GetHigh() )
764  {
765  //Beyond left or right it's a halo node
766  RegisterHaloNode(node_index);
767  mHaloNodes.push_back(p_node);
768  }
769  else
770  {
771  RegisterNode(node_index);
772  this->mNodes.push_back(p_node); // create node
773 
774  //A boundary face has to be wholely owned by the process
775  //Since, when ELEMENT_DIM>1 we have *at least* boundary node as a non-halo
776  if (node_index==0) // create left boundary node and boundary element
777  {
778  this->mBoundaryNodes.push_back(p_node);
779  RegisterBoundaryElement(0);
780  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(0, p_node) );
781  }
782  if (node_index==width) // create right boundary node and boundary element
783  {
784  this->mBoundaryNodes.push_back(p_node);
785  RegisterBoundaryElement(1);
786  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(1, p_node) );
787  }
788  }
789  if (node_index>lo_node) // create element
790  {
791  std::vector<Node<SPACE_DIM>*> nodes;
792  nodes.push_back(p_old_node);
793  nodes.push_back(p_node);
794  RegisterElement(node_index-1);
795  this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(node_index-1, nodes) );
796  }
797  //Keep track of the node which we've just created
798  p_old_node=p_node;
799  }
800  }
801 }
802 
803 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
804 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::ConstructRectangularMesh(unsigned width, unsigned height, bool stagger)
805 {
806  assert(SPACE_DIM == 2);
807  assert(ELEMENT_DIM == 2);
808  //Check that there are enough nodes to make the parallelisation worthwhile
809  if (height==0)
810  {
811  EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
812  }
813 
814  // Hook to pick up when we are using a geometric partition.
815  if(mMetisPartitioning == DistributedTetrahedralMeshPartitionType::GEOMETRIC)
816  {
817  if (!mpSpaceRegion)
818  {
819  EXCEPTION("Space region not set for GEOMETRIC partition of DistributedTetrahedralMesh");
820  }
821 
822  // Write a serial file, the load on distributed processors.
824  {
825  TrianglesMeshWriter<ELEMENT_DIM,SPACE_DIM> mesh_writer("", "temp_rectangular_mesh");
827  base_mesh.ConstructRectangularMesh(width, height);
828  mesh_writer.WriteFilesUsingMesh(base_mesh);
829  }
831 
832  OutputFileHandler output_handler("", false);
833 
834  std::string output_dir = output_handler.GetOutputDirectoryFullPath();
835  TrianglesMeshReader<ELEMENT_DIM,SPACE_DIM> mesh_reader(output_dir+"temp_rectangular_mesh");
836 
837  this->ConstructFromMeshReader(mesh_reader);
838  }
839  else
840  {
841  //Use dumb partition so that archiving doesn't permute anything
842  mMetisPartitioning=DistributedTetrahedralMeshPartitionType::DUMB;
843 
844  mTotalNumNodes=(width+1)*(height+1);
845  mTotalNumBoundaryElements=(width+height)*2;
846  mTotalNumElements=width*height*2;
847 
848  //Use DistributedVectorFactory to make a dumb partition of space
849  DistributedVectorFactory y_partition(height+1);
850  unsigned lo_y = y_partition.GetLow();
851  unsigned hi_y = y_partition.GetHigh();
852  //Dumb partition of nodes has to be such that each process gets complete slices
853  assert(!this->mpDistributedVectorFactory);
854  this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes, (width+1)*y_partition.GetLocalOwnership());
855  if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
856  {
857  //It's a short mesh and this process owns no nodes
858  return;
859  }
860  /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
861  * higher numbered process may have dropped out of this construction altogether
862  * (because is has no local ownership)
863  */
864  bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
865 
866 
867  if (!PetscTools::AmMaster())
868  {
869  //Allow for a halo node
870  lo_y--;
871  }
872  if (!am_top_most)
873  {
874  //Allow for a halo node
875  hi_y++;
876  }
877 
878  //Construct the nodes
879  for (unsigned j=lo_y; j<hi_y; j++)
880  {
881  for (unsigned i=0; i<width+1; i++)
882  {
883  bool is_boundary=false;
884  if (i==0 || j==0 || i==width || j==height)
885  {
886  is_boundary=true;
887  }
888  unsigned global_node_index=((width+1)*(j) + i); //Verified from sequential
889  Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, is_boundary, i, j);
890  if (j<y_partition.GetLow() || j==y_partition.GetHigh() )
891  {
892  //Beyond left or right it's a halo node
893  RegisterHaloNode(global_node_index);
894  mHaloNodes.push_back(p_node);
895  }
896  else
897  {
898  RegisterNode(global_node_index);
899  this->mNodes.push_back(p_node);
900  }
901  if (is_boundary)
902  {
903  this->mBoundaryNodes.push_back(p_node);
904  }
905  }
906  }
907 
908  //Construct the boundary elements
909  unsigned belem_index;
910  //Top
911  if (am_top_most)
912  {
913  for (unsigned i=0; i<width; i++)
914  {
915  std::vector<Node<SPACE_DIM>*> nodes;
916  nodes.push_back(GetNodeOrHaloNode( height*(width+1)+i+1 ));
917  nodes.push_back(GetNodeOrHaloNode( height*(width+1)+i ));
918  belem_index=i;
919  RegisterBoundaryElement(belem_index);
920  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
921  }
922  }
923 
924  //Right
925  for (unsigned j=lo_y+1; j<hi_y; j++)
926  {
927  std::vector<Node<SPACE_DIM>*> nodes;
928  nodes.push_back(GetNodeOrHaloNode( (width+1)*j-1 ));
929  nodes.push_back(GetNodeOrHaloNode( (width+1)*(j+1)-1 ));
930  belem_index=width+j-1;
931  RegisterBoundaryElement(belem_index);
932  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
933  }
934 
935  //Bottom
936  if (PetscTools::AmMaster())
937  {
938  for (unsigned i=0; i<width; i++)
939  {
940  std::vector<Node<SPACE_DIM>*> nodes;
941  nodes.push_back(GetNodeOrHaloNode( i ));
942  nodes.push_back(GetNodeOrHaloNode( i+1 ));
943  belem_index=width+height+i;
944  RegisterBoundaryElement(belem_index);
945  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
946  }
947  }
948 
949  //Left
950  for (unsigned j=lo_y; j<hi_y-1; j++)
951  {
952  std::vector<Node<SPACE_DIM>*> nodes;
953  nodes.push_back(GetNodeOrHaloNode( (width+1)*(j+1) ));
954  nodes.push_back(GetNodeOrHaloNode( (width+1)*(j) ));
955  belem_index=2*width+height+j;
956  RegisterBoundaryElement(belem_index);
957  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index,nodes));
958  }
959 
960 
961  //Construct the elements
962  unsigned elem_index;
963  for (unsigned j=lo_y; j<hi_y-1; j++)
964  {
965  for (unsigned i=0; i<width; i++)
966  {
967  unsigned parity=(i+(height-j))%2;//Note that parity is measured from the top-left (not bottom left) for historical reasons
968  unsigned nw=(j+1)*(width+1)+i; //ne=nw+1
969  unsigned sw=(j)*(width+1)+i; //se=sw+1
970  std::vector<Node<SPACE_DIM>*> upper_nodes;
971  upper_nodes.push_back(GetNodeOrHaloNode( nw ));
972  upper_nodes.push_back(GetNodeOrHaloNode( nw+1 ));
973  if (stagger==false || parity == 1)
974  {
975  upper_nodes.push_back(GetNodeOrHaloNode( sw+1 ));
976  }
977  else
978  {
979  upper_nodes.push_back(GetNodeOrHaloNode( sw ));
980  }
981  elem_index=2*(j*width+i);
982  RegisterElement(elem_index);
983  this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index,upper_nodes));
984  std::vector<Node<SPACE_DIM>*> lower_nodes;
985  lower_nodes.push_back(GetNodeOrHaloNode( sw+1 ));
986  lower_nodes.push_back(GetNodeOrHaloNode( sw ));
987  if (stagger==false ||parity == 1)
988  {
989  lower_nodes.push_back(GetNodeOrHaloNode( nw ));
990  }
991  else
992  {
993  lower_nodes.push_back(GetNodeOrHaloNode( nw+1 ));
994  }
995  elem_index++;
996  RegisterElement(elem_index);
997  this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index,lower_nodes));
998  }
999  }
1000  }
1001 }
1002 
1003 
1004 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1006  unsigned height,
1007  unsigned depth)
1008 {
1009  assert(SPACE_DIM == 3);
1010  assert(ELEMENT_DIM == 3);
1011  //Check that there are enough nodes to make the parallelisation worthwhile
1012  if (depth==0)
1013  {
1014  EXCEPTION("There aren't enough nodes to make parallelisation worthwhile");
1015  }
1016 
1017  // Hook to pick up when we are using a geometric partition.
1018  if(mMetisPartitioning == DistributedTetrahedralMeshPartitionType::GEOMETRIC)
1019  {
1020  if (!mpSpaceRegion)
1021  {
1022  EXCEPTION("Space region not set for GEOMETRIC partition of DistributedTetrahedralMesh");
1023  }
1024 
1025  // Write a serial file, the load on distributed processors.
1027  {
1028  TrianglesMeshWriter<ELEMENT_DIM,SPACE_DIM> mesh_writer("", "temp_cuboid_mesh");
1030  base_mesh.ConstructCuboid(width, height, depth);
1031  mesh_writer.WriteFilesUsingMesh(base_mesh);
1032  }
1034 
1035  OutputFileHandler output_handler("", false);
1036 
1037  std::string output_dir = output_handler.GetOutputDirectoryFullPath();
1038  TrianglesMeshReader<ELEMENT_DIM,SPACE_DIM> mesh_reader(output_dir+"temp_cuboid_mesh");
1039 
1040  this->ConstructFromMeshReader(mesh_reader);
1041  }
1042  else
1043  {
1044  //Use dumb partition so that archiving doesn't permute anything
1045  mMetisPartitioning=DistributedTetrahedralMeshPartitionType::DUMB;
1046 
1047  mTotalNumNodes=(width+1)*(height+1)*(depth+1);
1048  mTotalNumBoundaryElements=((width*height)+(width*depth)+(height*depth))*4;//*2 for top-bottom, *2 for tessellating each unit square
1049  mTotalNumElements=width*height*depth*6;
1050 
1051  //Use DistributedVectorFactory to make a dumb partition of space
1052  DistributedVectorFactory z_partition(depth+1);
1053  unsigned lo_z = z_partition.GetLow();
1054  unsigned hi_z = z_partition.GetHigh();
1055 
1056  //Dumb partition of nodes has to be such that each process gets complete slices
1057  assert(!this->mpDistributedVectorFactory);
1058  this->mpDistributedVectorFactory = new DistributedVectorFactory(mTotalNumNodes, (width+1)*(height+1)*z_partition.GetLocalOwnership());
1059  if (this->mpDistributedVectorFactory->GetLocalOwnership() == 0)
1060  {
1061  return;
1062  }
1063  /* am_top_most is like PetscTools::AmTopMost() but accounts for the fact that a
1064  * higher numbered process may have dropped out of this construction altogether
1065  * (because is has no local ownership)
1066  */
1067  bool am_top_most = (this->mpDistributedVectorFactory->GetHigh() == mTotalNumNodes);
1068 
1069  if (!PetscTools::AmMaster())
1070  {
1071  //Allow for a halo node
1072  lo_z--;
1073  }
1074  if (!am_top_most)
1075  {
1076  //Allow for a halo node
1077  hi_z++;
1078  }
1079 
1080  //Construct the nodes
1081  unsigned global_node_index;
1082  for (unsigned k=lo_z; k<hi_z; k++)
1083  {
1084  for (unsigned j=0; j<height+1; j++)
1085  {
1086  for (unsigned i=0; i<width+1; i++)
1087  {
1088  bool is_boundary = false;
1089  if (i==0 || j==0 || k==0 || i==width || j==height || k==depth)
1090  {
1091  is_boundary = true;
1092  }
1093  global_node_index = (k*(height+1)+j)*(width+1)+i;
1094 
1095  Node<SPACE_DIM>* p_node = new Node<SPACE_DIM>(global_node_index, is_boundary, i, j, k);
1096 
1097  if (k<z_partition.GetLow() || k==z_partition.GetHigh() )
1098  {
1099  //Beyond left or right it's a halo node
1100  RegisterHaloNode(global_node_index);
1101  mHaloNodes.push_back(p_node);
1102  }
1103  else
1104  {
1105  RegisterNode(global_node_index);
1106  this->mNodes.push_back(p_node);
1107  }
1108 
1109  if (is_boundary)
1110  {
1111  this->mBoundaryNodes.push_back(p_node);
1112  }
1113  }
1114  }
1115  }
1116 
1117  // Construct the elements
1118 
1119  unsigned element_nodes[6][4] = {{0, 1, 5, 7}, {0, 1, 3, 7},
1120  {0, 2, 3, 7}, {0, 2, 6, 7},
1121  {0, 4, 6, 7}, {0, 4, 5, 7}};
1122  std::vector<Node<SPACE_DIM>*> tetrahedra_nodes;
1123 
1124  for (unsigned k=lo_z; k<hi_z-1; k++)
1125  {
1126  unsigned belem_index = 0;
1127  if (k != 0)
1128  {
1129  // height*width squares on upper face, k layers of 2*height+2*width square aroun
1130  belem_index = 2*(height*width+k*2*(height+width));
1131  }
1132 
1133  for (unsigned j=0; j<height; j++)
1134  {
1135  for (unsigned i=0; i<width; i++)
1136  {
1137  // Compute the nodes' index
1138  unsigned global_node_indices[8];
1139  unsigned local_node_index = 0;
1140 
1141  for (unsigned z = 0; z < 2; z++)
1142  {
1143  for (unsigned y = 0; y < 2; y++)
1144  {
1145  for (unsigned x = 0; x < 2; x++)
1146  {
1147  global_node_indices[local_node_index] = i+x+(width+1)*(j+y+(height+1)*(k+z));
1148 
1149  local_node_index++;
1150  }
1151  }
1152  }
1153 
1154  for (unsigned m = 0; m < 6; m++)
1155  {
1156  // Tetrahedra #m
1157 
1158  tetrahedra_nodes.clear();
1159 
1160  for (unsigned n = 0; n < 4; n++)
1161  {
1162  tetrahedra_nodes.push_back(GetNodeOrHaloNode( global_node_indices[element_nodes[m][n]] ));
1163  }
1164  unsigned elem_index = 6 * ((k*height+j)*width+i)+m;
1165  RegisterElement(elem_index);
1166  this->mElements.push_back(new Element<ELEMENT_DIM,SPACE_DIM>(elem_index, tetrahedra_nodes));
1167  }
1168 
1169  //Are we at a boundary?
1170  std::vector<Node<SPACE_DIM>*> triangle_nodes;
1171 
1172  if (i == 0) //low face at x==0
1173  {
1174  triangle_nodes.clear();
1175  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1176  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
1177  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
1178  RegisterBoundaryElement(belem_index);
1179  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1180  triangle_nodes.clear();
1181  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1182  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
1183  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
1184  RegisterBoundaryElement(belem_index);
1185  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1186  }
1187  if (i == width-1) //high face at x=width
1188  {
1189  triangle_nodes.clear();
1190  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
1191  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
1192  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1193  RegisterBoundaryElement(belem_index);
1194  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1195  triangle_nodes.clear();
1196  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
1197  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1198  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
1199  RegisterBoundaryElement(belem_index);
1200  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1201  }
1202  if (j == 0) //low face at y==0
1203  {
1204  triangle_nodes.clear();
1205  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1206  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
1207  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
1208  RegisterBoundaryElement(belem_index);
1209  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1210  triangle_nodes.clear();
1211  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1212  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
1213  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
1214  RegisterBoundaryElement(belem_index);
1215  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1216  }
1217  if (j == height-1) //high face at y=height
1218  {
1219  triangle_nodes.clear();
1220  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
1221  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
1222  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1223  RegisterBoundaryElement(belem_index);
1224  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1225  triangle_nodes.clear();
1226  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
1227  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1228  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
1229  RegisterBoundaryElement(belem_index);
1230  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1231  }
1232  if (k == 0) //low face at z==0
1233  {
1234  triangle_nodes.clear();
1235  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1236  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
1237  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[2] ));
1238  RegisterBoundaryElement(belem_index);
1239  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1240  triangle_nodes.clear();
1241  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[0] ));
1242  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[1] ));
1243  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[3] ));
1244  RegisterBoundaryElement(belem_index);
1245  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1246  }
1247  if (k == depth-1) //high face at z=depth
1248  {
1249  triangle_nodes.clear();
1250  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
1251  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1252  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[5] ));
1253  RegisterBoundaryElement(belem_index);
1254  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1255  triangle_nodes.clear();
1256  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[4] ));
1257  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[6] ));
1258  triangle_nodes.push_back(GetNodeOrHaloNode( global_node_indices[7] ));
1259  RegisterBoundaryElement(belem_index);
1260  this->mBoundaryElements.push_back(new BoundaryElement<ELEMENT_DIM-1,SPACE_DIM>(belem_index++,triangle_nodes));
1261  }
1262  }//i
1263  }//j
1264  }//k
1265  }
1266 }
1267 
1268 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1269 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Scale(const double xFactor, const double yFactor, const double zFactor)
1270 {
1271  //Base class scale (scales node positions)
1272  AbstractTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Scale(xFactor, yFactor, zFactor);
1273  //Scales halos
1274  for (unsigned i=0; i<mHaloNodes.size(); i++)
1275  {
1276  c_vector<double, SPACE_DIM>& r_location = mHaloNodes[i]->rGetModifiableLocation();
1277  if (SPACE_DIM>=3)
1278  {
1279  r_location[2] *= zFactor;
1280  }
1281  if (SPACE_DIM>=2)
1282  {
1283  r_location[1] *= yFactor;
1284  }
1285  r_location[0] *= xFactor;
1286  }
1287 
1288 }
1289 
1290 
1291 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1294  std::set<unsigned>& rElementsOwned,
1295  std::set<unsigned>& rNodesOwned,
1296  std::set<unsigned>& rHaloNodesOwned,
1297  std::vector<unsigned>& rProcessorsOffset)
1298 {
1299  assert(PetscTools::IsParallel());
1300  assert(ELEMENT_DIM==2 || ELEMENT_DIM==3); // Metis works with triangles and tetras
1301 
1302  const unsigned num_elements = rMeshReader.GetNumElements();
1303  const unsigned num_procs = PetscTools::GetNumProcs();
1304  const unsigned local_proc_index = PetscTools::GetMyRank();
1305 
1306  /*
1307  * Work out initial element distribution
1308  */
1309  boost::scoped_array<idxtype> element_distribution(new idxtype[num_procs+1]);
1310  boost::scoped_array<int> element_counts(new int[num_procs]);
1311 
1312  element_distribution[0] = 0;
1313 
1314  for (unsigned proc_index=1; proc_index<num_procs; proc_index++)
1315  {
1316  element_distribution[proc_index] = element_distribution[proc_index-1] + num_elements/num_procs;
1317  element_counts[proc_index-1] = element_distribution[proc_index] - element_distribution[proc_index-1];
1318  }
1319 
1320  element_distribution[num_procs] = num_elements;
1321  element_counts[num_procs-1] = element_distribution[num_procs] - element_distribution[num_procs-1];
1322 
1323  /*
1324  * Create distributed mesh data structure
1325  */
1326  idxtype first_local_element = element_distribution[local_proc_index];
1327  idxtype last_plus_one_element = element_distribution[local_proc_index+1];
1328  idxtype num_local_elements = last_plus_one_element - first_local_element;
1329 
1330  boost::scoped_array<idxtype> eind(new idxtype[num_local_elements*(ELEMENT_DIM+1)]);
1331  boost::scoped_array<idxtype> eptr(new idxtype[num_local_elements+1]);
1332 
1333  if ( rMeshReader.IsFileFormatBinary() && first_local_element > 0)
1334  {
1335  // Advance the file pointer to the first element before the ones I own.
1336  rMeshReader.GetElementData(first_local_element - 1);
1337  }
1338  else
1339  {
1340  // Advance the file pointer to the first element before the ones I own.
1341  for (idxtype element_index = 0; element_index < first_local_element; element_index++)
1342  {
1343  rMeshReader.GetNextElementData();
1344  }
1345  }
1346 
1347  unsigned counter = 0;
1348  for (idxtype element_index = 0; element_index < num_local_elements; element_index++)
1349  {
1350  ElementData element_data;
1351 
1352  element_data = rMeshReader.GetNextElementData();
1353 
1354  eptr[element_index] = counter;
1355  for (unsigned i=0; i<ELEMENT_DIM+1; i++)
1356  {
1357  eind[counter++] = element_data.NodeIndices[i];
1358  }
1359  }
1360  eptr[num_local_elements] = counter;
1361 
1362  rMeshReader.Reset();
1363 
1364  idxtype numflag = 0; // METIS speak for C-style numbering
1365  /* Connectivity degree.
1366  * Specifically, an GRAPH EDGE is placed between any two elements if and only if they share
1367  * at least this many nodes.
1368  *
1369  * Manual recommends "for meshes containing only triangular, tetrahedral,
1370  * hexahedral, or rectangular elements, this parameter can be set to two, three, four, or two, respectively.
1371  */
1372  idxtype ncommonnodes = 3; //Linear tetrahedra
1373  if (ELEMENT_DIM == 2)
1374  {
1375  ncommonnodes = 2;
1376  }
1377 
1378  MPI_Comm communicator = PETSC_COMM_WORLD;
1379 
1380  idxtype* xadj;
1381  idxtype* adjncy;
1382 
1383  Timer::Reset();
1384  ParMETIS_V3_Mesh2Dual(element_distribution.get(), eptr.get(), eind.get(),
1385  &numflag, &ncommonnodes, &xadj, &adjncy, &communicator);
1386  //Timer::Print("ParMETIS Mesh2Dual");
1387 
1388  // Be more memory efficient, and get rid of (maybe large) arrays as soon as they're no longer needed, rather than at end of scope
1389  eind.reset();
1390  eptr.reset();
1391 
1392  idxtype weight_flag = 0; // unweighted graph
1393  idxtype n_constraints = 1; // number of weights that each vertex has (number of balance constraints)
1394  idxtype n_subdomains = PetscTools::GetNumProcs();
1395  idxtype options[3]; // extra options
1396  options[0] = 0; // ignore extra options
1397  idxtype edgecut;
1398  boost::scoped_array<real_t> tpwgts(new real_t[n_subdomains]);
1399  real_t ubvec_value = (real_t)1.05;
1400  for (unsigned proc=0; proc<PetscTools::GetNumProcs(); proc++)
1401  {
1402  tpwgts[proc] = ((real_t)1.0)/n_subdomains;
1403  }
1404  boost::scoped_array<idxtype> local_partition(new idxtype[num_local_elements]);
1405 
1406 /*
1407  * In order to use ParMETIS_V3_PartGeomKway, we need to sort out how to compute the coordinates of the
1408  * centers of each element efficiently.
1409  *
1410  * In the meantime use ParMETIS_V3_PartKway.
1411  */
1412 // int n_dimensions = ELEMENT_DIM;
1413 // float node_coordinates[num_local_elements*SPACE_DIM];
1414 //
1415 // ParMETIS_V3_PartGeomKway(element_distribution, xadj, adjncy, NULL, NULL, &weight_flag, &numflag,
1416 // &n_dimensions, node_coordinates, &n_constraints, &n_subdomains, NULL, NULL,
1417 // options, &edgecut, local_partition, &communicator);
1418 
1419  Timer::Reset();
1420  ParMETIS_V3_PartKway(element_distribution.get(), xadj, adjncy, NULL, NULL, &weight_flag, &numflag,
1421  &n_constraints, &n_subdomains, tpwgts.get(), &ubvec_value,
1422  options, &edgecut, local_partition.get(), &communicator);
1423  //Timer::Print("ParMETIS PartKway");
1424  tpwgts.reset();
1425 
1426  boost::scoped_array<idxtype> global_element_partition(new idxtype[num_elements]);
1427 
1428  //idxtype is normally int (see metis-4.0/Lib/struct.h 17-22) but is 64bit on Windows
1429  MPI_Datatype mpi_idxtype = MPI_LONG_LONG_INT;
1430  if (sizeof(idxtype) == sizeof(int))
1431  {
1432  mpi_idxtype = MPI_INT;
1433  }
1434  boost::scoped_array<int> int_element_distribution(new int[num_procs+1]);
1435  for (unsigned i=0; i<num_procs+1; ++i)
1436  {
1437  int_element_distribution[i] = element_distribution[i];
1438  }
1439  MPI_Allgatherv(local_partition.get(), num_local_elements, mpi_idxtype,
1440  global_element_partition.get(), element_counts.get(), int_element_distribution.get(), mpi_idxtype, PETSC_COMM_WORLD);
1441 
1442  local_partition.reset();
1443 
1444  for (unsigned elem_index=0; elem_index<num_elements; elem_index++)
1445  {
1446  if ((unsigned) global_element_partition[elem_index] == local_proc_index)
1447  {
1448  rElementsOwned.insert(elem_index);
1449  }
1450  }
1451 
1452  rMeshReader.Reset();
1453  free(xadj);
1454  free(adjncy);
1455 
1456  unsigned num_nodes = rMeshReader.GetNumNodes();
1457 
1458  // Initialise with no nodes known
1459  std::vector<unsigned> global_node_partition(num_nodes, UNASSIGNED_NODE);
1460 
1461  assert(rProcessorsOffset.size() == 0); // Making sure the vector is empty. After calling resize() only newly created memory will be initialised to 0.
1462  rProcessorsOffset.resize(PetscTools::GetNumProcs(), 0);
1463 
1464  /*
1465  * Work out node distribution based on initial element distribution returned by ParMETIS
1466  *
1467  * In this loop we handle 4 different data structures:
1468  * global_node_partition and rProcessorsOffset are global,
1469  * rNodesOwned and rHaloNodesOwned are local.
1470  */
1471 
1472  /*
1473  * Note that at this point each process has to read the entire element file in order to compute
1474  * the node partition form the initial element distribution.
1475  * * Previously we randomly permuted the BIN file element access order on each process so that the processes
1476  * weren't reading the same file sectors at the same time
1477  * * We noted that with large files (above about 0.5 GigaByte) on HECToR the random access file reader
1478  * was spending all its time in fseekg. This is presumably because each fseekg from the start of the file
1479  * involves multiple levels of indirect file block pointers.
1480  * * Hence the use of random element reading is only useful for the niche of moderately large meshes with
1481  * process counts in the thousands.
1482  * Hence BIN file element permuting is deprecated - we just read the file in order.
1483  * See
1484  * https://chaste.cs.ox.ac.uk/trac/browser/trunk/mesh/src/common/DistributedTetrahedralMesh.cpp?rev=19291#L1459
1485  */
1486 
1487  for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
1488  {
1489  unsigned element_owner = global_element_partition[element_number];
1490 
1491  ElementData element_data;
1492 
1493  element_data = rMeshReader.GetNextElementData();
1494 
1495  for (std::vector<unsigned>::const_iterator node_it = element_data.NodeIndices.begin();
1496  node_it != element_data.NodeIndices.end();
1497  ++node_it)
1498  {
1499  /*
1500  * For each node in this element, check whether it hasn't been assigned to another processor yet.
1501  * If so, assign it to the owner of the element. Otherwise, consider it halo.
1502  */
1503  if ( global_node_partition[*node_it] == UNASSIGNED_NODE )
1504  {
1505  if (element_owner == local_proc_index)
1506  {
1507  rNodesOwned.insert(*node_it);
1508  }
1509 
1510  global_node_partition[*node_it] = element_owner;
1511 
1512  // Offset is defined as the first node owned by a processor. We compute it incrementally.
1513  // i.e. if node_index belongs to proc 3 (of 6) we have to shift the processors 4, 5, and 6
1514  // offset a position.
1515  for (unsigned proc=element_owner+1; proc<PetscTools::GetNumProcs(); proc++)
1516  {
1517  rProcessorsOffset[proc]++;
1518  }
1519  }
1520  else
1521  {
1522  if (element_owner == local_proc_index)
1523  {
1524  //if (rNodesOwned.find(*node_it) == rNodesOwned.end())
1525  if (global_node_partition[*node_it] != local_proc_index)
1526  {
1527  rHaloNodesOwned.insert(*node_it);
1528  }
1529  }
1530  }
1531  }
1532  }
1533 
1534 
1535  /*
1536  * Refine element distribution. Add extra elements that parMETIS didn't consider initially but
1537  * include any node owned by the processor. This ensures that all the system matrix rows are
1538  * assembled locally.
1539  * It may be that some of these elements are in the set of owned nodes erroneously.
1540  * The original set of owned elements (from the k-way partition) informed a
1541  * node partition. It may be that an element near the edge of this new node
1542  * partition may no longer be needed.
1543  *
1544  * Note that rather than clearing the set we could remove elements to the original element partition set
1545  * with erase(), if (!element_owned) below.
1546  */
1547  rElementsOwned.clear();
1548  rMeshReader.Reset();
1549  for (unsigned element_number = 0; element_number < mTotalNumElements; element_number++)
1550  {
1551  ElementData element_data = rMeshReader.GetNextElementData();
1552 
1553  bool element_owned = false;
1554  std::set<unsigned> temp_halo_nodes;
1555 
1556  for (std::vector<unsigned>::const_iterator node_it = element_data.NodeIndices.begin();
1557  node_it != element_data.NodeIndices.end();
1558  ++node_it)
1559  {
1560  if (rNodesOwned.find(*node_it) != rNodesOwned.end())
1561  {
1562  element_owned = true;
1563  rElementsOwned.insert(element_number);
1564  }
1565  else
1566  {
1567  temp_halo_nodes.insert(*node_it);
1568  }
1569  }
1570 
1571  if (element_owned)
1572  {
1573  rHaloNodesOwned.insert(temp_halo_nodes.begin(), temp_halo_nodes.end());
1574  }
1575  }
1576 
1577  rMeshReader.Reset();
1578 
1579  /*
1580  * Once we know the offsets we can compute the permutation vector
1581  */
1582  std::vector<unsigned> local_index(PetscTools::GetNumProcs(), 0);
1583 
1584  this->mNodePermutation.resize(this->GetNumNodes());
1585 
1586  for (unsigned node_index=0; node_index<this->GetNumNodes(); node_index++)
1587  {
1588  unsigned partition = global_node_partition[node_index];
1589  assert(partition != UNASSIGNED_NODE);
1590 
1591  this->mNodePermutation[node_index] = rProcessorsOffset[partition] + local_index[partition];
1592 
1593  local_index[partition]++;
1594  }
1595 }
1596 
1597 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1599 {
1600  ChastePoint<SPACE_DIM> my_minimum_point;
1601  ChastePoint<SPACE_DIM> my_maximum_point;
1602 
1603  try
1604  {
1606  my_minimum_point=my_box.rGetLowerCorner();
1607  my_maximum_point=my_box.rGetUpperCorner();
1608  }
1609  catch (Exception& e)
1610  {
1611 #define COVERAGE_IGNORE
1613  throw e;
1614 #undef COVERAGE_IGNORE
1615  }
1616 
1618 
1619  c_vector<double, SPACE_DIM> global_minimum_point;
1620  c_vector<double, SPACE_DIM> global_maximum_point;
1621  MPI_Allreduce(&my_minimum_point.rGetLocation()[0], &global_minimum_point[0], SPACE_DIM, MPI_DOUBLE, MPI_MIN, PETSC_COMM_WORLD);
1622  MPI_Allreduce(&my_maximum_point.rGetLocation()[0], &global_maximum_point[0], SPACE_DIM, MPI_DOUBLE, MPI_MAX, PETSC_COMM_WORLD);
1623 
1624  ChastePoint<SPACE_DIM> min(global_minimum_point);
1625  ChastePoint<SPACE_DIM> max(global_maximum_point);
1626 
1627  return ChasteCuboid<SPACE_DIM>(min, max);
1628 }
1629 
1630 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1632 {
1633  // Call base method to find closest on local processor
1634  unsigned best_node_index = AbstractMesh<ELEMENT_DIM, SPACE_DIM>::GetNearestNodeIndex(rTestPoint);
1635 
1636  // Recalculate the distance to the best node (if this process has one)
1637  double best_node_point_distance = DBL_MAX;
1638  if (best_node_index != UINT_MAX)
1639  {
1640  best_node_point_distance = norm_2(this->GetNode(best_node_index)->rGetLocation() - rTestPoint.rGetLocation());
1641  }
1642 
1643 
1644  // This is a handy data structure that will work with MPI_DOUBLE_INT data type.
1645  // There is no MPI_DOUBLE_UNSIGNED
1646  struct
1647  {
1648  double distance;
1649  int node_index;
1650  } value, minval;
1651 
1652  value.node_index = best_node_index;
1653  value.distance = best_node_point_distance;
1654 
1655  MPI_Allreduce( &value, &minval, 1, MPI_DOUBLE_INT, MPI_MINLOC, MPI_COMM_WORLD );
1656 
1657  return minval.node_index;
1658 }
1659 
1660 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1662 {
1664  c_vector<double, 2> global_min_max;
1665 
1666  MPI_Allreduce(&local_min_max[0], &global_min_max[0], 1, MPI_DOUBLE, MPI_MIN, PETSC_COMM_WORLD);
1667  MPI_Allreduce(&local_min_max[1], &global_min_max[1], 1, MPI_DOUBLE, MPI_MAX, PETSC_COMM_WORLD);
1668 
1669  return global_min_max;
1670 }
1671 
1672 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1674 {
1675  return mHaloNodes.begin();
1676 }
1677 
1678 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1679 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Rotate(c_matrix<double, SPACE_DIM, SPACE_DIM> rotationMatrix)
1680 {
1681  // First do the extras
1682  for (unsigned i=0; i<this->mHaloNodes.size(); i++)
1683  {
1684  c_vector<double, SPACE_DIM>& r_location = this->mHaloNodes[i]->rGetModifiableLocation();
1685  r_location = prod(rotationMatrix, r_location);
1686  }
1687  // Now a copy of the base class implementation
1688  for (unsigned i=0; i<this->mNodes.size(); i++)
1689  {
1690  c_vector<double, SPACE_DIM>& r_location = this->mNodes[i]->rGetModifiableLocation();
1691  r_location = prod(rotationMatrix, r_location);
1692  }
1693 }
1694 
1695 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1696 void DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::Translate(const c_vector<double, SPACE_DIM>& rDisplacement)
1697 {
1698  // First do the extras
1699  for (unsigned i=0; i<this->mHaloNodes.size(); i++)
1700  {
1701  c_vector<double, SPACE_DIM>& r_location = this->mHaloNodes[i]->rGetModifiableLocation();
1702  r_location += rDisplacement;
1703  }
1704  // Now a copy of the base class implementation
1705  for (unsigned i=0; i<this->mNodes.size(); i++)
1706  {
1707  c_vector<double, SPACE_DIM>& r_location = this->mNodes[i]->rGetModifiableLocation();
1708  r_location += rDisplacement;
1709  }
1710 }
1711 
1712 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM>
1714 {
1715  return mHaloNodes.end();
1716 }
1717 
1719 // Explicit instantiation
1721 
1722 template class DistributedTetrahedralMesh<1,1>;
1723 template class DistributedTetrahedralMesh<1,2>;
1724 template class DistributedTetrahedralMesh<1,3>;
1725 template class DistributedTetrahedralMesh<2,2>;
1726 template class DistributedTetrahedralMesh<2,3>;
1727 template class DistributedTetrahedralMesh<3,3>;
1728 
1729 
1730 // Serialization for Boost >= 1.36
ElementIterator GetElementIteratorBegin()
void SetAttribute(double attribute)
virtual unsigned GetNearestNodeIndex(const ChastePoint< SPACE_DIM > &rTestPoint)
virtual ElementData GetNextElementData()=0
Definition: Node.hpp:58
virtual ElementData GetElementData(unsigned index)
c_vector< double, DIM > & rGetLocation()
Definition: ChastePoint.cpp:76
static void Barrier(const std::string callerId="")
Definition: PetscTools.cpp:134
DistributedTetrahedralMeshPartitionType::type GetPartitionType() const
void Rotate(c_matrix< double, SPACE_DIM, SPACE_DIM > rotationMatrix)
void AddNodeAttribute(double attribute)
Definition: Node.cpp:171
NodeIterator GetNodeIteratorEnd()
void SetProcessRegion(ChasteCuboid< SPACE_DIM > *pRegion)
NodeIterator GetNodeIteratorBegin()
#define EXCEPTION(message)
Definition: Exception.hpp:143
DistributedTetrahedralMeshPartitionType::type mMetisPartitioning
std::vector< Node< SPACE_DIM > * >::const_iterator HaloNodeIterator
virtual void ConstructFromMeshReader(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader)
DistributedTetrahedralMesh(DistributedTetrahedralMeshPartitionType::type partitioningMethod=DistributedTetrahedralMeshPartitionType::PARMETIS_LIBRARY)
static bool AmMaster()
Definition: PetscTools.cpp:120
void GetHaloNodeIndices(std::vector< unsigned > &rHaloIndices) const
static void DumbPartitioning(AbstractMesh< ELEMENT_DIM, SPACE_DIM > &rMesh, std::set< unsigned > &rNodesOwned)
static void GeometricPartitioning(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader, std::vector< unsigned > &rNodePermutation, std::set< unsigned > &rNodesOwned, std::vector< unsigned > &rProcessorsOffset, ChasteCuboid< SPACE_DIM > *pRegion)
virtual ChasteCuboid< SPACE_DIM > CalculateBoundingBox() const
void SetDistributedVectorFactory(DistributedVectorFactory *pFactory)
bool CalculateDesignatedOwnershipOfBoundaryElement(unsigned faceIndex)
static void PrintAndReset(std::string message)
Definition: Timer.cpp:70
void ComputeMeshPartitioning(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader, std::set< unsigned > &rNodesOwned, std::set< unsigned > &rHaloNodesOwned, std::set< unsigned > &rElementsOwned, std::vector< unsigned > &rProcessorsOffset)
std::string GetOutputDirectoryFullPath() const
HaloNodeIterator GetHaloNodeIteratorBegin() const
virtual ElementData GetNextFaceData()=0
std::vector< unsigned > NodeIndices
virtual bool HasNodePermutation()
HaloNodeIterator GetHaloNodeIteratorEnd() const
static void MetisLibraryPartitioning(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader, std::vector< unsigned > &rNodePermutation, std::set< unsigned > &rNodesOwned, std::vector< unsigned > &rProcessorsOffset)
const ChastePoint< SPACE_DIM > & rGetUpperCorner() const
ElementIterator GetElementIteratorEnd()
static void PetscMatrixPartitioning(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader, std::vector< unsigned > &rNodePermutation, std::set< unsigned > &rNodesOwned, std::vector< unsigned > &rProcessorsOffset)
virtual unsigned GetNumElements() const =0
unsigned SolveBoundaryElementMapping(unsigned index) const
unsigned SolveNodeMapping(unsigned index) const
virtual void Reset()=0
static void ReplicateException(bool flag)
Definition: PetscTools.cpp:198
void Translate(const c_vector< double, SPACE_DIM > &rDisplacement)
virtual void ConstructRectangularMesh(unsigned width, unsigned height, bool stagger=true)
unsigned SolveElementMapping(unsigned index) const
#define EXPORT_TEMPLATE_CLASS_ALL_DIMS(CLASS)
virtual unsigned GetNumFaceAttributes() const
virtual unsigned GetNumElementAttributes() const
virtual c_vector< double, 2 > CalculateMinMaxEdgeLengths()
Node< SPACE_DIM > * GetNodeOrHaloNode(unsigned index) const
virtual ChasteCuboid< SPACE_DIM > CalculateBoundingBox() const
virtual void ConstructCuboid(unsigned width, unsigned height, unsigned depth)
virtual void Scale(const double xFactor=1.0, const double yFactor=1.0, const double zFactor=1.0)
static bool IsParallel()
Definition: PetscTools.cpp:97
virtual bool IsFileFormatBinary()
void ConstructCuboid(unsigned width, unsigned height, unsigned depth)
ChasteCuboid< SPACE_DIM > * GetProcessRegion()
virtual unsigned GetNearestNodeIndex(const ChastePoint< SPACE_DIM > &rTestPoint)
virtual std::vector< unsigned > GetContainingElementIndices(unsigned index)
static bool AmTopMost()
Definition: PetscTools.cpp:126
virtual void Scale(const double xFactor=1.0, const double yFactor=1.0, const double zFactor=1.0)
void ParMetisLibraryNodeAndElementPartitioning(AbstractMeshReader< ELEMENT_DIM, SPACE_DIM > &rMeshReader, std::set< unsigned > &rElementsOwned, std::set< unsigned > &rNodesOwned, std::set< unsigned > &rHaloNodesOwned, std::vector< unsigned > &rProcessorsOffset)
virtual std::string GetMeshFileBaseName()
static void Reset()
Definition: Timer.cpp:44
virtual void SetDistributedVectorFactory(DistributedVectorFactory *pFactory)
virtual void ConstructLinearMesh(unsigned width)
static unsigned GetNumProcs()
Definition: PetscTools.cpp:108
virtual std::vector< double > GetNextNode()=0
unsigned GetIndex() const
Definition: Node.cpp:159
bool CalculateDesignatedOwnershipOfElement(unsigned elementIndex)
static unsigned GetMyRank()
Definition: PetscTools.cpp:114
virtual void WriteFilesUsingMesh(AbstractTetrahedralMesh< ELEMENT_DIM, SPACE_DIM > &rMesh, bool keepOriginalElementIndexing=true)
void ConstructRectangularMesh(unsigned width, unsigned height, bool stagger=true)
virtual c_vector< double, 2 > CalculateMinMaxEdgeLengths()
virtual unsigned GetNumNodes() const =0
virtual std::vector< double > GetNodeAttributes()
virtual unsigned GetNumFaces() const =0
virtual const std::vector< unsigned > & rGetNodePermutation()
const ChastePoint< SPACE_DIM > & rGetLowerCorner() const