static char help[] = "Demonstrates using a local ordering to set values into a parallel vector.\n\n"; /*T Concepts: vectors^assembling vectors with local ordering; Processors: n T*/ /* Include "petscvec.h" so that we can use vectors. Note that this file automatically includes: petsc.h - base PETSc routines petscis.h - index sets petscsys.h - system routines petscviewer.h - viewers */ #include "petscvec.h" #undef __FUNCT__ #define __FUNCT__ "main" int main(int argc,char **argv) { PetscErrorCode ierr; PetscMPIInt rank; PetscInt i,N,ng,*gindices,rstart,rend,M; PetscScalar one = 1.0; Vec x; PetscInitialize(&argc,&argv,(char *)0,help); ierr = MPI_Comm_rank(PETSC_COMM_WORLD,&rank);CHKERRQ(ierr); /* Create a parallel vector. - In this case, we specify the size of each processor's local portion, and PETSc computes the global size. Alternatively, PETSc could determine the vector's distribution if we specify just the global size. */ ierr = VecCreate(PETSC_COMM_WORLD,&x);CHKERRQ(ierr); ierr = VecSetSizes(x,rank+1,PETSC_DECIDE);CHKERRQ(ierr); ierr = VecSetFromOptions(x);CHKERRQ(ierr); ierr = VecGetSize(x,&N);CHKERRQ(ierr); ierr = VecSet(x,one);CHKERRQ(ierr); /* Set the local to global ordering for the vector. Each processor generates a list of the global indices for each local index. Note that the local indices are just whatever is convenient for a particular application. In this case we treat the vector as lying on a one dimensional grid and have one ghost point on each end of the blocks owned by each processor. */ ierr = VecGetSize(x,&M);CHKERRQ(ierr); ierr = VecGetOwnershipRange(x,&rstart,&rend);CHKERRQ(ierr); ng = rend - rstart + 2; ierr = PetscMalloc(ng*sizeof(PetscInt),&gindices);CHKERRQ(ierr); gindices[0] = rstart - 1; for (i=0; i