Actual source code: ex14.cxx

petsc-master 2018-06-20
Report Typos and Errors

  2: static char help[] = "Demonstrates calling Trilinos and then PETSc in the same program.\n\n";

  4: /*T
  5:    Concepts: introduction to PETSc^Trilinos
  6:    Processors: n

  8:    Example obtained from: http://trilinos.org/docs/dev/packages/tpetra/doc/html/Tpetra_Lesson01.html
  9: T*/



 13:  #include <petscsys.h>
 14: #include <Tpetra_DefaultPlatform.hpp>
 15: #include <Tpetra_Version.hpp>
 16: #include <Teuchos_GlobalMPISession.hpp>    // used if Trilinos is the one that starts up MPI

 18: // Do something with the given communicator.  In this case, we just
 19: // print Tpetra's version to stdout on Process 0 in the given
 20: // communicator.
 21: void
 22: exampleRoutine (const Teuchos::RCP<const Teuchos::Comm<int> >& comm)
 23: {
 24:   if (comm->getRank () == 0) {
 25:     // On (MPI) Process 0, print out the Tpetra software version.
 26:     std::cout << Tpetra::version () << std::endl << std::endl;
 27:   }
 28: }

 30: int main(int argc,char **argv)
 31: {
 33:   // These "using" declarations make the code more concise, in that
 34:   // you don't have to write the namespace along with the class or
 35:   // object name.  This is especially helpful with commonly used
 36:   // things like std::endl.
 37:   using std::cout;
 38:   using std::endl;
 39:   // Start up MPI, if using MPI.  Trilinos doesn't have to be built
 40:   // with MPI; it's called a "serial" build if you build without MPI.
 41:   // GlobalMPISession hides this implementation detail.
 42:   //
 43:   // Note the third argument.  If you pass GlobalMPISession the
 44:   // address of an std::ostream, it will print a one-line status
 45:   // message with the rank on each MPI process.  This may be
 46:   // undesirable if running with a large number of MPI processes.
 47:   // You can avoid printing anything here by passing in either
 48:   // NULL or the address of a Teuchos::oblackholestream.
 49:   Teuchos::GlobalMPISession mpiSession (&argc, &argv, NULL);
 50:   // Get a pointer to the communicator object representing
 51:   // MPI_COMM_WORLD.  getDefaultPlatform.getComm() doesn't create a
 52:   // new object every time you call it; it just returns the same
 53:   // communicator each time.  Thus, you can call it anywhere and get
 54:   // the same communicator.  (This is handy if you don't want to pass
 55:   // a communicator around everywhere, though it's always better to
 56:   // parameterize your algorithms on the communicator.)
 57:   //
 58:   // "Tpetra::DefaultPlatform" knows whether or not we built with MPI
 59:   // support.  If we didn't build with MPI, we'll get a "communicator"
 60:   // with size 1, whose only process has rank 0.
 61:   Teuchos::RCP<const Teuchos::Comm<int> > comm = Tpetra::DefaultPlatform::getDefaultPlatform ().getComm ();

 63:   PetscInitialize(&argc,&argv,(char*)0,help);if (ierr) return ierr;

 65:   // Get my process' rank, and the total number of processes.
 66:   // Equivalent to MPI_Comm_rank resp. MPI_Comm_size.
 67:   const int myRank = comm->getRank ();
 68:   const int size = comm->getSize ();
 69:   if (myRank == 0) {
 70:     cout << "Total number of processes: " << size << endl;
 71:   }
 72:   // Do something with the new communicator.
 73:   exampleRoutine (comm);
 74:   // This tells the Trilinos test framework that the test passed.
 75:   if (myRank == 0) {
 76:     cout << "End Result: TEST PASSED" << endl;
 77:   }
 78:   // GlobalMPISession calls MPI_Finalize() in its destructor, if
 79:   // appropriate.  You don't have to do anything here!  Just return
 80:   // from main().  Isn't that helpful?
 81:   PetscFinalize();
 82:   return ierr;
 83: }




 88: /*TEST

 90:    build:
 91:      requires: trilinos

 93:    test:
 94:       nsize: 3
 95:       filter: grep -v "Tpetra in Trilinos"

 97: TEST*/