Actual source code: ex2.c

  1: static char help[] = "Builds a parallel vector with 1 component on the first processor, 2 on the second, etc.\n\
  2:   Then each processor adds one to all elements except the last rank.\n\n";

  4: /*
  5:   Include "petscvec.h" so that we can use vectors.  Note that this file
  6:   automatically includes:
  7:      petscsys.h       - base PETSc routines   petscis.h     - index sets
  8:      petscviewer.h - viewers
  9: */
 10: #include <petscvec.h>

 12: int main(int argc, char **argv)
 13: {
 14:   PetscMPIInt rank;
 15:   PetscInt    i, N;
 16:   PetscScalar one = 1.0;
 17:   Vec         x;

 19:   PetscFunctionBeginUser;
 20:   PetscCall(PetscInitialize(&argc, &argv, (char *)0, help));
 21:   PetscCallMPI(MPI_Comm_rank(PETSC_COMM_WORLD, &rank));

 23:   /*
 24:      Create a parallel vector.
 25:       - In this case, we specify the size of each processor's local
 26:         portion, and PETSc computes the global size.  Alternatively,
 27:         if we pass the global size and use PETSC_DECIDE for the
 28:         local size PETSc will choose a reasonable partition trying
 29:         to put nearly an equal number of elements on each processor.
 30:   */
 31:   PetscCall(VecCreate(PETSC_COMM_WORLD, &x));
 32:   PetscCall(VecSetSizes(x, rank + 1, PETSC_DECIDE));
 33:   PetscCall(VecSetFromOptions(x));
 34:   PetscCall(VecGetSize(x, &N));
 35:   PetscCall(VecSet(x, one));

 37:   /*
 38:      Set the vector elements.
 39:       - Always specify global locations of vector entries.
 40:       - Each processor can contribute any vector entries,
 41:         regardless of which processor "owns" them; any nonlocal
 42:         contributions will be transferred to the appropriate processor
 43:         during the assembly process.
 44:       - In this example, the flag ADD_VALUES indicates that all
 45:         contributions will be added together.
 46:   */
 47:   for (i = 0; i < N - rank; i++) PetscCall(VecSetValues(x, 1, &i, &one, ADD_VALUES));

 49:   /*
 50:      Assemble vector, using the 2-step process:
 51:        VecAssemblyBegin(), VecAssemblyEnd()
 52:      Computations can be done while messages are in transition
 53:      by placing code between these two statements.
 54:   */
 55:   PetscCall(VecAssemblyBegin(x));
 56:   PetscCall(VecAssemblyEnd(x));

 58:   /*
 59:       View the vector; then destroy it.
 60:   */
 61:   PetscCall(VecView(x, PETSC_VIEWER_STDOUT_WORLD));
 62:   PetscCall(VecDestroy(&x));

 64:   PetscCall(PetscFinalize());
 65:   return 0;
 66: }

 68: /*TEST

 70:      test:
 71:        nsize: 2

 73: TEST*/