Chaste Release::3.1
|
00001 /* 00002 00003 Copyright (c) 2005-2012, University of Oxford. 00004 All rights reserved. 00005 00006 University of Oxford means the Chancellor, Masters and Scholars of the 00007 University of Oxford, having an administrative office at Wellington 00008 Square, Oxford OX1 2JD, UK. 00009 00010 This file is part of Chaste. 00011 00012 Redistribution and use in source and binary forms, with or without 00013 modification, are permitted provided that the following conditions are met: 00014 * Redistributions of source code must retain the above copyright notice, 00015 this list of conditions and the following disclaimer. 00016 * Redistributions in binary form must reproduce the above copyright notice, 00017 this list of conditions and the following disclaimer in the documentation 00018 and/or other materials provided with the distribution. 00019 * Neither the name of the University of Oxford nor the names of its 00020 contributors may be used to endorse or promote products derived from this 00021 software without specific prior written permission. 00022 00023 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" 00024 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE 00025 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE 00026 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE 00027 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR 00028 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE 00029 GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) 00030 HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT 00031 LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT 00032 OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 00033 00034 */ 00035 00036 #include "VtkMeshWriter.hpp" 00037 #include "DistributedTetrahedralMesh.hpp" 00038 #include "MixedDimensionMesh.hpp" 00039 00040 #ifdef CHASTE_VTK 00041 #include "vtkQuadraticTetra.h" 00042 #include "vtkQuadraticTriangle.h" 00043 00044 00046 // Implementation 00048 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM> 00049 VtkMeshWriter<ELEMENT_DIM, SPACE_DIM>::VtkMeshWriter(const std::string& rDirectory, 00050 const std::string& rBaseName, 00051 const bool& rCleanDirectory) 00052 : AbstractTetrahedralMeshWriter<ELEMENT_DIM, SPACE_DIM>(rDirectory, rBaseName, rCleanDirectory), 00053 mWriteParallelFiles(false) 00054 { 00055 this->mIndexFromZero = true; 00056 00057 // Dubious, since we shouldn't yet know what any details of the mesh are. 00058 mpVtkUnstructedMesh = vtkUnstructuredGrid::New(); 00059 } 00060 00061 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM> 00062 VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::~VtkMeshWriter() 00063 { 00064 mpVtkUnstructedMesh->Delete(); // Reference counted 00065 } 00066 00067 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM> 00068 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::MakeVtkMesh() 00069 { 00070 assert(SPACE_DIM==3 || SPACE_DIM == 2); 00071 assert(SPACE_DIM==ELEMENT_DIM); 00072 00073 00074 //Construct nodes aka as Points 00075 vtkPoints* p_pts = vtkPoints::New(VTK_DOUBLE); 00076 p_pts->GetData()->SetName("Vertex positions"); 00077 for (unsigned item_num=0; item_num<this->GetNumNodes(); item_num++) 00078 { 00079 std::vector<double> current_item = this->GetNextNode(); //this->mNodeData[item_num]; 00080 if (SPACE_DIM==2) 00081 { 00082 current_item.push_back(0.0);//For z-coordinate 00083 } 00084 assert(current_item.size() == 3); 00085 p_pts->InsertPoint(item_num, current_item[0], current_item[1], current_item[2]); 00086 } 00087 mpVtkUnstructedMesh->SetPoints(p_pts); 00088 p_pts->Delete(); //Reference counted 00089 00090 //Construct elements aka Cells 00091 for (unsigned item_num=0; item_num<this->GetNumElements(); item_num++) 00092 { 00093 std::vector<unsigned> current_element = this->GetNextElement().NodeIndices; // this->mElementData[item_num]; 00094 00095 assert((current_element.size() == ELEMENT_DIM + 1) || (current_element.size() == (ELEMENT_DIM+1)*(ELEMENT_DIM+2)/2)); 00096 00097 vtkCell* p_cell=NULL; 00098 if (SPACE_DIM == 3 && current_element.size() == 4) 00099 { 00100 p_cell = vtkTetra::New(); 00101 } 00102 else if(SPACE_DIM == 3 && current_element.size() == 10) 00103 { 00104 p_cell = vtkQuadraticTetra::New(); 00105 } 00106 else if (SPACE_DIM == 2 && current_element.size() == 3) 00107 { 00108 p_cell = vtkTriangle::New(); 00109 } 00110 else if (SPACE_DIM == 2 && current_element.size() == 6) 00111 { 00112 p_cell = vtkQuadraticTriangle::New(); 00113 } 00114 00115 //Set the linear nodes 00116 vtkIdList* p_cell_id_list = p_cell->GetPointIds(); 00117 for (unsigned j = 0; j < current_element.size(); ++j) 00118 { 00119 p_cell_id_list->SetId(j, current_element[j]); 00120 } 00121 00122 //VTK defines the node ordering in quadratic triangles differently to Chaste, so they must be treated as a special case 00123 if (SPACE_DIM == 2 && current_element.size() == 6) 00124 { 00125 p_cell_id_list->SetId(3, current_element[5]); 00126 p_cell_id_list->SetId(4, current_element[3]); 00127 p_cell_id_list->SetId(5, current_element[4]); 00128 } 00129 00130 mpVtkUnstructedMesh->InsertNextCell(p_cell->GetCellType(), p_cell_id_list); 00131 p_cell->Delete(); //Reference counted 00132 } 00133 00134 //If necessary, construct cables 00135 if (this->GetNumCableElements() > 0) 00136 { 00137 AugmentCellData(); 00138 //Make a blank cell radius data for the regular elements 00139 std::vector<double> radii(this->GetNumElements(), 0.0); 00140 for (unsigned item_num=0; item_num<this->GetNumCableElements(); item_num++) 00141 { 00142 ElementData cable_element_data = this->GetNextCableElement(); 00143 std::vector<unsigned> current_element = cable_element_data.NodeIndices; 00144 radii.push_back(cable_element_data.AttributeValue); 00145 assert(current_element.size() == 2); 00146 vtkCell* p_cell=vtkLine::New(); 00147 vtkIdList* p_cell_id_list = p_cell->GetPointIds(); 00148 for (unsigned j = 0; j < 2; ++j) 00149 { 00150 p_cell_id_list->SetId(j, current_element[j]); 00151 } 00152 mpVtkUnstructedMesh->InsertNextCell(p_cell->GetCellType(), p_cell_id_list); 00153 p_cell->Delete(); //Reference counted 00154 } 00155 AddCellData("Cable radius", radii); 00156 00157 } 00158 } 00159 00160 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM> 00161 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::AddProvenance(std::string fileName) 00162 { 00163 std::string comment = "<!-- " + ChasteBuildInfo::GetProvenanceString() + "-->"; 00164 00165 out_stream p_vtu_file = this->mpOutputFileHandler->OpenOutputFile(fileName, std::ios::out | std::ios::app); 00166 00167 *p_vtu_file << "\n" << comment << "\n"; 00168 p_vtu_file->close(); 00169 } 00170 00171 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM> 00172 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::WriteFiles() 00173 { 00174 // Using separate scope here to make sure file is properly closed before re-opening it to add provenance info. 00175 { 00176 MakeVtkMesh(); 00177 assert(mpVtkUnstructedMesh->CheckAttributes() == 0); 00178 vtkXMLUnstructuredGridWriter* p_writer = vtkXMLUnstructuredGridWriter::New(); 00179 p_writer->SetInput(mpVtkUnstructedMesh); 00180 //Uninitialised stuff arises (see #1079), but you can remove 00181 //valgrind problems by removing compression: 00182 // **** REMOVE WITH CAUTION ***** 00183 p_writer->SetCompressor(NULL); 00184 // **** REMOVE WITH CAUTION ***** 00185 std::string vtk_file_name = this->mpOutputFileHandler->GetOutputDirectoryFullPath() + this->mBaseName+".vtu"; 00186 p_writer->SetFileName(vtk_file_name.c_str()); 00187 //p_writer->PrintSelf(std::cout, vtkIndent()); 00188 p_writer->Write(); 00189 p_writer->Delete(); //Reference counted 00190 } 00191 00192 AddProvenance(this->mBaseName + ".vtu"); 00193 } 00194 00195 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM> 00196 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::AddCellData(std::string dataName, std::vector<double> dataPayload) 00197 { 00198 vtkDoubleArray* p_scalars = vtkDoubleArray::New(); 00199 p_scalars->SetName(dataName.c_str()); 00200 for (unsigned i=0; i<dataPayload.size(); i++) 00201 { 00202 p_scalars->InsertNextValue(dataPayload[i]); 00203 } 00204 00205 vtkCellData* p_cell_data = mpVtkUnstructedMesh->GetCellData(); 00206 p_cell_data->AddArray(p_scalars); 00207 p_scalars->Delete(); //Reference counted 00208 } 00209 00210 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM> 00211 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::AugmentCellData() 00212 { 00213 unsigned num_cell_arrays = mpVtkUnstructedMesh->GetCellData()->GetNumberOfArrays(); 00214 for (unsigned i = 0; i < num_cell_arrays; i++) 00215 { 00216 vtkDataArray* array = mpVtkUnstructedMesh->GetCellData()->GetArray(i); 00217 00218 //Check data was the correct size before the cables were added 00219 unsigned num_cable_pads = this->GetNumCableElements(); 00220 if (mWriteParallelFiles) 00221 { 00222 assert((unsigned)array->GetNumberOfTuples() == this->mpDistributedMesh->GetNumLocalElements()); 00223 num_cable_pads = this->mpMixedMesh->GetNumLocalCableElements(); 00224 } 00225 else 00226 { 00227 assert((unsigned)array->GetNumberOfTuples() == this->GetNumElements()); 00228 } 00229 00230 //Check that tuples of size 3 will be big enough for padding the rest of the data 00231 assert(array->GetNumberOfComponents() <= 3); 00232 double null_data[3] = {0.0, 0.0, 0.0}; 00233 00234 //Pad data 00235 for (unsigned new_index = 0; new_index < num_cable_pads; new_index++) 00236 { 00237 array->InsertNextTuple(null_data); 00238 } 00239 } 00240 } 00241 00242 00243 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM> 00244 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::AddCellData(std::string dataName, std::vector<c_vector<double, SPACE_DIM> > dataPayload) 00245 { 00246 vtkDoubleArray* p_vectors = vtkDoubleArray::New(); 00247 p_vectors->SetName(dataName.c_str()); 00248 p_vectors->SetNumberOfComponents(3); 00249 for (unsigned i=0; i<dataPayload.size(); i++) 00250 { 00251 for (unsigned j=0; j<SPACE_DIM; j++) 00252 { 00253 p_vectors->InsertNextValue(dataPayload[i][j]); 00254 } 00255 //When SPACE_DIM<3, then pad 00256 for (unsigned j=SPACE_DIM; j<3; j++) 00257 { 00258 p_vectors->InsertNextValue(0.0); 00259 } 00260 } 00261 00262 vtkCellData* p_cell_data = mpVtkUnstructedMesh->GetCellData(); 00263 p_cell_data->AddArray(p_vectors); 00264 p_vectors->Delete(); //Reference counted 00265 } 00266 00267 00268 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM> 00269 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::AddPointData(std::string dataName, std::vector<double> dataPayload) 00270 { 00271 vtkDoubleArray* p_scalars = vtkDoubleArray::New(); 00272 p_scalars->SetName(dataName.c_str()); 00273 00274 if (mWriteParallelFiles) 00275 { 00276 // In parallel, the vector we pass will only contain the values from the privately owned nodes. 00277 // To get the values from the halo nodes (which will be inserted at the end of the vector we need to 00278 // communicate with the equivalent vectors on other processes. 00279 00280 // resize the payload data to include halos 00281 assert( dataPayload.size() == this->mpDistributedMesh->GetNumLocalNodes() ); 00282 dataPayload.resize( this->mpDistributedMesh->GetNumLocalNodes() + this->mpDistributedMesh->GetNumHaloNodes() ); 00283 00284 00285 // then do the communication 00286 for ( unsigned rank_offset = 1; rank_offset < PetscTools::GetNumProcs(); rank_offset++ ) 00287 { 00288 unsigned send_to = (PetscTools::GetMyRank() + rank_offset) % (PetscTools::GetNumProcs()); 00289 unsigned receive_from = (PetscTools::GetMyRank() + PetscTools::GetNumProcs()- rank_offset ) % (PetscTools::GetNumProcs()); 00290 00291 unsigned number_of_nodes_to_send = mNodesToSendPerProcess[send_to].size(); 00292 unsigned number_of_nodes_to_receive = mNodesToReceivePerProcess[receive_from].size(); 00293 00294 // Pack 00295 if ( number_of_nodes_to_send > 0 ) 00296 { 00297 double send_data[number_of_nodes_to_send]; 00298 00299 for (unsigned node = 0; node < number_of_nodes_to_send; node++) 00300 { 00301 unsigned global_node_index = mNodesToSendPerProcess[send_to][node]; 00302 unsigned local_node_index = global_node_index 00303 - this->mpDistributedMesh->GetDistributedVectorFactory()->GetLow(); 00304 send_data[node] = dataPayload[local_node_index]; 00305 } 00306 00307 // Send 00308 int ret; 00309 ret = MPI_Send( send_data, 00310 number_of_nodes_to_send, 00311 MPI_DOUBLE, 00312 send_to, 00313 0, 00314 PETSC_COMM_WORLD ); 00315 assert ( ret == MPI_SUCCESS ); 00316 } 00317 00318 if ( number_of_nodes_to_receive > 0 ) 00319 { 00320 // Receive 00321 double receive_data[number_of_nodes_to_receive]; 00322 MPI_Status status; 00323 00324 int ret; 00325 ret = MPI_Recv( receive_data, 00326 number_of_nodes_to_receive, 00327 MPI_DOUBLE, 00328 receive_from, 00329 0, 00330 PETSC_COMM_WORLD, 00331 &status ); 00332 assert ( ret == MPI_SUCCESS); 00333 00334 // Unpack 00335 for ( unsigned node = 0; node < number_of_nodes_to_receive; node++ ) 00336 { 00337 unsigned global_node_index = mNodesToReceivePerProcess[receive_from][node]; 00338 unsigned halo_index = mGlobalToNodeIndexMap[global_node_index]; 00339 assert( halo_index >= this->mpDistributedMesh->GetNumLocalNodes() ); 00340 dataPayload[halo_index] = receive_data[node]; 00341 } 00342 } 00343 } 00344 } 00345 00346 for (unsigned i=0; i<dataPayload.size(); i++) 00347 { 00348 p_scalars->InsertNextValue(dataPayload[i]); 00349 } 00350 00351 vtkPointData* p_point_data = mpVtkUnstructedMesh->GetPointData(); 00352 p_point_data->AddArray(p_scalars); 00353 p_scalars->Delete(); //Reference counted 00354 00355 } 00356 00357 00358 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM> 00359 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::AddPointData(std::string dataName, std::vector<c_vector<double, SPACE_DIM> > dataPayload) 00360 { 00361 vtkDoubleArray* p_vectors = vtkDoubleArray::New(); 00362 p_vectors->SetName(dataName.c_str()); 00363 00364 if (mWriteParallelFiles) 00365 { 00366 // In parallel, the vector we pass will only contain the values from the privately owned nodes. 00367 // To get the values from the halo nodes (which will be inserted at the end of the vector we need to 00368 // communicate with the equivalent vectors on other processes. 00369 00370 // resize the payload data to include halos 00371 assert( dataPayload.size() == this->mpDistributedMesh->GetNumLocalNodes() ); 00372 dataPayload.resize( this->mpDistributedMesh->GetNumLocalNodes() + this->mpDistributedMesh->GetNumHaloNodes() ); 00373 00374 // then do the communication 00375 for ( unsigned rank_offset = 1; rank_offset < PetscTools::GetNumProcs(); rank_offset++ ) 00376 { 00377 unsigned send_to = (PetscTools::GetMyRank() + rank_offset) % (PetscTools::GetNumProcs()); 00378 unsigned receive_from = (PetscTools::GetMyRank() + PetscTools::GetNumProcs()- rank_offset ) % (PetscTools::GetNumProcs()); 00379 00380 unsigned number_of_nodes_to_send = mNodesToSendPerProcess[send_to].size(); 00381 unsigned number_of_nodes_to_receive = mNodesToReceivePerProcess[receive_from].size(); 00382 00383 // Pack 00384 if ( number_of_nodes_to_send > 0 ) 00385 { 00386 double send_data[number_of_nodes_to_send * SPACE_DIM]; 00387 00388 for (unsigned node = 0; node < number_of_nodes_to_send; node++) 00389 { 00390 unsigned global_node_index = mNodesToSendPerProcess[send_to][node]; 00391 unsigned local_node_index = global_node_index 00392 - this->mpDistributedMesh->GetDistributedVectorFactory()->GetLow(); 00393 for (unsigned j=0; j<SPACE_DIM; j++) 00394 { 00395 send_data[ node*SPACE_DIM + j ] = dataPayload[local_node_index][j]; 00396 } 00397 } 00398 00399 // Send 00400 int ret; 00401 ret = MPI_Send( send_data, 00402 number_of_nodes_to_send * SPACE_DIM, 00403 MPI_DOUBLE, 00404 send_to, 00405 0, 00406 PETSC_COMM_WORLD ); 00407 assert ( ret == MPI_SUCCESS ); 00408 } 00409 00410 if ( number_of_nodes_to_receive > 0 ) 00411 { 00412 // Receive 00413 double receive_data[number_of_nodes_to_receive * SPACE_DIM]; 00414 MPI_Status status; 00415 00416 int ret; 00417 ret = MPI_Recv( receive_data, 00418 number_of_nodes_to_receive * SPACE_DIM, 00419 MPI_DOUBLE, 00420 receive_from, 00421 0, 00422 PETSC_COMM_WORLD, 00423 &status ); 00424 assert ( ret == MPI_SUCCESS); 00425 00426 // Unpack 00427 for ( unsigned node = 0; node < number_of_nodes_to_receive; node++ ) 00428 { 00429 unsigned global_node_index = mNodesToReceivePerProcess[receive_from][node]; 00430 unsigned halo_index = mGlobalToNodeIndexMap[global_node_index]; 00431 assert( halo_index >= this->mpDistributedMesh->GetNumLocalNodes() ); 00432 for (unsigned j=0; j<SPACE_DIM; j++) 00433 { 00434 dataPayload[halo_index][j] = receive_data[ node*SPACE_DIM + j ]; 00435 } 00436 } 00437 } 00438 } 00439 } 00440 00441 p_vectors->SetNumberOfComponents(3); 00442 for (unsigned i=0; i<dataPayload.size(); i++) 00443 { 00444 for (unsigned j=0; j<SPACE_DIM; j++) 00445 { 00446 p_vectors->InsertNextValue(dataPayload[i][j]); 00447 } 00448 //When SPACE_DIM<3, then pad 00449 for (unsigned j=SPACE_DIM; j<3; j++) 00450 { 00451 p_vectors->InsertNextValue(0.0); 00452 } 00453 } 00454 00455 vtkPointData* p_point_data = mpVtkUnstructedMesh->GetPointData(); 00456 p_point_data->AddArray(p_vectors); 00457 p_vectors->Delete(); //Reference counted 00458 } 00459 00460 template <unsigned ELEMENT_DIM, unsigned SPACE_DIM> 00461 void VtkMeshWriter<ELEMENT_DIM,SPACE_DIM>::SetParallelFiles( AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>& rMesh ) 00462 { 00463 //Have we got a parallel mesh? 00464 this->mpDistributedMesh = dynamic_cast<DistributedTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>* >(&rMesh); 00465 00466 if (this->mpDistributedMesh == NULL) 00467 { 00468 EXCEPTION("Cannot write parallel files using a sequential mesh"); 00469 } 00470 00471 if ( PetscTools::IsSequential()) 00472 { 00473 return; // mWriteParallelFiles is not set sequentially (so we don't set up data exchange machinery) 00474 } 00475 00476 mWriteParallelFiles = true; 00477 00478 // Populate the global to node index map (as this will be required to add point data) 00479 00480 //Node index that we are writing to VTK (index into mNodes and mHaloNodes as if they were concatenated) 00481 unsigned index = 0; 00482 00483 // Owned nodes 00484 for (typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator node_iter = rMesh.GetNodeIteratorBegin(); 00485 node_iter != rMesh.GetNodeIteratorEnd(); 00486 ++node_iter) 00487 { 00488 mGlobalToNodeIndexMap[node_iter->GetIndex()] = index; 00489 index++; 00490 } 00491 00492 // Halo nodes 00493 for (typename DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::HaloNodeIterator halo_iter=this->mpDistributedMesh->GetHaloNodeIteratorBegin(); 00494 halo_iter != this->mpDistributedMesh->GetHaloNodeIteratorEnd(); 00495 ++halo_iter) 00496 { 00497 mGlobalToNodeIndexMap[(*halo_iter)->GetIndex()] = index; 00498 index++; 00499 } 00500 00501 //Calculate the halo exchange so that node-wise payloads can be communicated 00502 this->mpDistributedMesh->CalculateNodeExchange( mNodesToSendPerProcess, mNodesToReceivePerProcess ); 00503 00504 } 00505 00507 template<unsigned ELEMENT_DIM, unsigned SPACE_DIM> 00508 void VtkMeshWriter<ELEMENT_DIM, SPACE_DIM>::WriteFilesUsingMesh( 00509 AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>& rMesh, 00510 bool keepOriginalElementIndexing) 00511 { 00512 //Have we got a parallel mesh? 00513 this->mpDistributedMesh = dynamic_cast<DistributedTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>* >(&rMesh); 00514 this->mpMixedMesh = dynamic_cast<MixedDimensionMesh<ELEMENT_DIM,SPACE_DIM>* >(&rMesh); 00515 00516 if ( PetscTools::IsSequential() || !mWriteParallelFiles || this->mpDistributedMesh == NULL ) 00517 { 00518 AbstractTetrahedralMeshWriter<ELEMENT_DIM,SPACE_DIM>::WriteFilesUsingMesh( rMesh,keepOriginalElementIndexing ); 00519 } 00520 else 00521 { 00522 //Make the local mesh into a VtkMesh 00523 assert(SPACE_DIM==3 || SPACE_DIM == 2); 00524 assert(SPACE_DIM==ELEMENT_DIM); 00525 vtkPoints* p_pts = vtkPoints::New(VTK_DOUBLE); 00526 p_pts->GetData()->SetName("Vertex positions"); 00527 00528 // Owned nodes 00529 for (typename AbstractMesh<ELEMENT_DIM,SPACE_DIM>::NodeIterator node_iter = rMesh.GetNodeIteratorBegin(); 00530 node_iter != rMesh.GetNodeIteratorEnd(); 00531 ++node_iter) 00532 { 00533 c_vector<double, SPACE_DIM> current_item = node_iter->rGetLocation(); 00534 p_pts->InsertNextPoint(current_item[0], current_item[1], (SPACE_DIM==3)?current_item[2]:0.0); 00535 } 00536 00537 // Halo nodes 00538 for (typename DistributedTetrahedralMesh<ELEMENT_DIM, SPACE_DIM>::HaloNodeIterator halo_iter=this->mpDistributedMesh->GetHaloNodeIteratorBegin(); 00539 halo_iter != this->mpDistributedMesh->GetHaloNodeIteratorEnd(); 00540 ++halo_iter) 00541 { 00542 c_vector<double, SPACE_DIM> current_item = (*halo_iter)->rGetLocation(); 00543 p_pts->InsertNextPoint(current_item[0], current_item[1], (SPACE_DIM==3)?current_item[2]:0.0); 00544 } 00545 00546 mpVtkUnstructedMesh->SetPoints(p_pts); 00547 p_pts->Delete(); //Reference counted 00548 00549 for (typename AbstractTetrahedralMesh<ELEMENT_DIM,SPACE_DIM>::ElementIterator elem_iter = rMesh.GetElementIteratorBegin(); 00550 elem_iter != rMesh.GetElementIteratorEnd(); 00551 ++elem_iter) 00552 { 00553 00554 vtkCell* p_cell=NULL; 00555 if (SPACE_DIM == 3) 00556 { 00557 p_cell = vtkTetra::New(); 00558 } 00559 if (SPACE_DIM == 2) 00560 { 00561 p_cell = vtkTriangle::New(); 00562 } 00563 vtkIdList* p_cell_id_list = p_cell->GetPointIds(); 00564 for (unsigned j = 0; j < ELEMENT_DIM+1; ++j) 00565 { 00566 unsigned global_node_index = elem_iter->GetNodeGlobalIndex(j); 00567 p_cell_id_list->SetId(j, mGlobalToNodeIndexMap[global_node_index]); 00568 } 00569 mpVtkUnstructedMesh->InsertNextCell(p_cell->GetCellType(), p_cell_id_list); 00570 p_cell->Delete(); //Reference counted 00571 } 00572 //If necessary, construct cables 00573 if (this->mpMixedMesh ) 00574 { 00575 AugmentCellData(); 00576 //Make a blank cell radius data for the regular elements 00577 std::vector<double> radii(this->mpMixedMesh->GetNumLocalElements(), 0.0); 00578 for (typename MixedDimensionMesh<ELEMENT_DIM,SPACE_DIM>::CableElementIterator elem_iter = this->mpMixedMesh->GetCableElementIteratorBegin(); 00579 elem_iter != this->mpMixedMesh->GetCableElementIteratorEnd(); 00580 ++elem_iter) 00581 { 00582 radii.push_back((*elem_iter)->GetAttribute()); 00583 vtkCell* p_cell=vtkLine::New(); 00584 vtkIdList* p_cell_id_list = p_cell->GetPointIds(); 00585 for (unsigned j = 0; j < 2; ++j) 00586 { 00587 unsigned global_node_index = (*elem_iter)->GetNodeGlobalIndex(j); 00588 p_cell_id_list->SetId(j, mGlobalToNodeIndexMap[global_node_index]); 00589 } 00590 mpVtkUnstructedMesh->InsertNextCell(p_cell->GetCellType(), p_cell_id_list); 00591 p_cell->Delete(); //Reference counted 00592 } 00593 AddCellData("Cable radius", radii); 00594 } 00595 00596 00597 //This block is to guard the mesh writers (vtkXMLPUnstructuredGridWriter) so that they 00598 //go out of scope, flush buffers and close files 00599 { 00600 assert(mpVtkUnstructedMesh->CheckAttributes() == 0); 00601 vtkXMLPUnstructuredGridWriter* p_writer = vtkXMLPUnstructuredGridWriter::New(); 00602 00603 p_writer->SetDataModeToBinary(); 00604 00605 p_writer->SetNumberOfPieces(PetscTools::GetNumProcs()); 00606 //p_writer->SetGhostLevel(-1); 00607 p_writer->SetStartPiece(PetscTools::GetMyRank()); 00608 p_writer->SetEndPiece(PetscTools::GetMyRank()); 00609 00610 00611 p_writer->SetInput(mpVtkUnstructedMesh); 00612 //Uninitialised stuff arises (see #1079), but you can remove 00613 //valgrind problems by removing compression: 00614 // **** REMOVE WITH CAUTION ***** 00615 p_writer->SetCompressor(NULL); 00616 // **** REMOVE WITH CAUTION ***** 00617 std::string pvtk_file_name = this->mpOutputFileHandler->GetOutputDirectoryFullPath() + this->mBaseName+ ".pvtu"; 00618 p_writer->SetFileName(pvtk_file_name.c_str()); 00619 //p_writer->PrintSelf(std::cout, vtkIndent()); 00620 p_writer->Write(); 00621 p_writer->Delete(); //Reference counted 00622 } 00623 00624 // Add provenance to the individual files 00625 std::stringstream filepath; 00626 filepath << this->mBaseName << "_" << PetscTools::GetMyRank() << ".vtu"; 00627 AddProvenance(filepath.str()); 00629 if (PetscTools::AmMaster()) 00630 { 00631 AddProvenance(this->mBaseName+ ".pvtu"); 00632 } 00633 } 00634 } 00635 00637 // Explicit instantiation 00639 00640 template class VtkMeshWriter<1,1>; 00641 template class VtkMeshWriter<1,2>; 00642 template class VtkMeshWriter<1,3>; 00643 template class VtkMeshWriter<2,2>; // Actually used 00644 template class VtkMeshWriter<2,3>; 00645 template class VtkMeshWriter<3,3>; // Actually used 00646 00647 #endif //CHASTE_VTK