Actual source code: adda.c
1: /*
3: Contributed by Arvid Bessen, Columbia University, June 2007
5: Extension of DA object to any number of dimensions.
7: */
8: #include ../src/dm/adda/addaimpl.h
12: /*@C
13: ADDACreate - Creates and ADDA object that translate between coordinates
14: in a geometric grid of arbitrary dimension and data in a PETSc vector
15: distributed on several processors.
17: Collective on MPI_Comm
19: Input Parameters:
20: + comm - MPI communicator
21: . dim - the dimension of the grid
22: . nodes - array with d entries that give the number of nodes in each dimension
23: . procs - array with d entries that give the number of processors in each dimension
24: (or PETSC_NULL if to be determined automatically)
25: . dof - number of degrees of freedom per node
26: - periodic - array with d entries that, i-th entry is set to true iff dimension i is periodic
28: Output Parameters:
29: . adda - pointer to ADDA data structure that is created
31: Level: intermediate
33: @*/
34: PetscErrorCode ADDACreate(MPI_Comm comm, PetscInt dim, PetscInt *nodes,PetscInt *procs,
35: PetscInt dof, PetscTruth *periodic,ADDA *adda_p)
36: {
38: ADDA adda;
39: PetscInt s=1; /* stencil width, fixed to 1 at the moment */
40: PetscMPIInt rank,size;
41: PetscInt i;
42: PetscInt nodes_total;
43: PetscInt nodesleft;
44: PetscInt procsleft;
45: PetscInt procsdimi;
46: PetscInt ranki;
47: PetscInt rpq;
52: #ifndef PETSC_USE_DYNAMIC_LIBRARIES
53: DMInitializePackage(PETSC_NULL);
54: #endif
56: PetscHeaderCreate(*adda_p,_p_ADDA,struct _ADDAOps,ADDA_COOKIE,0,"ADDA",comm,ADDADestroy,0);
57: adda = *adda_p;
58: adda->ops->view = ADDAView;
59: adda->ops->createglobalvector = ADDACreateGlobalVector;
60: adda->ops->getcoloring = ADDAGetColoring;
61: adda->ops->getmatrix = ADDAGetMatrix;
62: adda->ops->getinterpolation = ADDAGetInterpolation;
63: adda->ops->refine = ADDARefine;
64: adda->ops->coarsen = ADDACoarsen;
65: adda->ops->getinjection = ADDAGetInjection;
66: adda->ops->getaggregates = ADDAGetAggregates;
67:
68: MPI_Comm_size(comm,&size);
69: MPI_Comm_rank(comm,&rank);
70:
71: adda->dim = dim;
72: adda->dof = dof;
74: /* nodes */
75: PetscMalloc(dim*sizeof(PetscInt), &(adda->nodes));
76: PetscMemcpy(adda->nodes, nodes, dim*sizeof(PetscInt));
77: /* total number of nodes */
78: nodes_total = 1;
79: for(i=0; i<dim; i++) nodes_total *= nodes[i];
81: /* procs */
82: PetscMalloc(dim*sizeof(PetscInt), &(adda->procs));
83: /* create distribution of nodes to processors */
84: if(procs == PETSC_NULL) {
85: procs = adda->procs;
86: nodesleft = nodes_total;
87: procsleft = size;
88: /* figure out a good way to split the array to several processors */
89: for(i=0; i<dim; i++) {
90: if(i==dim-1) {
91: procs[i] = procsleft;
92: } else {
93: /* calculate best partition */
94: procs[i] = (PetscInt)(((double) nodes[i])*pow(((double) procsleft)/((double) nodesleft),1./((double)(dim-i)))+0.5);
95: if(procs[i]<1) procs[i]=1;
96: while( procs[i] > 0 ) {
97: if( procsleft % procs[i] )
98: procs[i]--;
99: else
100: break;
101: }
102: nodesleft /= nodes[i];
103: procsleft /= procs[i];
104: }
105: }
106: } else {
107: /* user provided the number of processors */
108: PetscMemcpy(adda->procs, procs, dim*sizeof(PetscInt));
109: }
110: /* check for validity */
111: procsleft = 1;
112: for(i=0; i<dim; i++) {
113: if (nodes[i] < procs[i]) {
114: SETERRQ3(PETSC_ERR_ARG_OUTOFRANGE,"Partition in direction %d is too fine! %D nodes, %D processors", i, nodes[i], procs[i]);
115: }
116: procsleft *= procs[i];
117: }
118: if(procsleft != size) {
119: SETERRQ(1, "Created or was provided with inconsistent distribution of processors");
120: }
122: /* periodicity */
123: adda->periodic = periodic;
124:
125: /* find out local region */
126: PetscMalloc(dim*sizeof(PetscInt), &(adda->lcs));
127: PetscMalloc(dim*sizeof(PetscInt), &(adda->lce));
128: procsdimi=size;
129: ranki=rank;
130: for(i=0; i<dim; i++) {
131: /* What is the number of processor for dimensions i+1, ..., dim-1? */
132: procsdimi /= procs[i];
133: /* these are all nodes that come before our region */
134: rpq = ranki / procsdimi;
135: adda->lcs[i] = rpq * (nodes[i]/procs[i]);
136: if( rpq + 1 < procs[i] ) {
137: adda->lce[i] = (rpq + 1) * (nodes[i]/procs[i]);
138: } else {
139: /* last one gets all the rest */
140: adda->lce[i] = nodes[i];
141: }
142: ranki = ranki - rpq*procsdimi;
143: }
144:
145: /* compute local size */
146: adda->lsize=1;
147: for(i=0; i<dim; i++) {
148: adda->lsize *= (adda->lce[i]-adda->lcs[i]);
149: }
150: adda->lsize *= dof;
152: /* find out ghost points */
153: PetscMalloc(dim*sizeof(PetscInt), &(adda->lgs));
154: PetscMalloc(dim*sizeof(PetscInt), &(adda->lge));
155: for(i=0; i<dim; i++) {
156: if( periodic[i] ) {
157: adda->lgs[i] = adda->lcs[i] - s;
158: adda->lge[i] = adda->lce[i] + s;
159: } else {
160: adda->lgs[i] = PetscMax(adda->lcs[i] - s, 0);
161: adda->lge[i] = PetscMin(adda->lce[i] + s, nodes[i]);
162: }
163: }
164:
165: /* compute local size with ghost points */
166: adda->lgsize=1;
167: for(i=0; i<dim; i++) {
168: adda->lgsize *= (adda->lge[i]-adda->lgs[i]);
169: }
170: adda->lgsize *= dof;
172: /* create global and local prototype vector */
173: VecCreateMPIWithArray(comm,adda->lsize,PETSC_DECIDE,0,&(adda->global));
174: VecSetBlockSize(adda->global,adda->dof);
175: /* local includes ghost points */
176: VecCreateSeqWithArray(PETSC_COMM_SELF,adda->lgsize,0,&(adda->local));
177: VecSetBlockSize(adda->local,dof);
179: PetscMalloc(dim*sizeof(PetscInt), &(adda->refine));
180: for(i=0; i<dim; i++) adda->refine[i] = 3;
181: adda->dofrefine = 1;
183: return(0);
184: }
188: /*@
189: ADDADestroy - Destroys a distributed array.
191: Collective on ADDA
193: Input Parameter:
194: . adda - the distributed array to destroy
196: Level: beginner
198: .keywords: distributed array, destroy
200: .seealso: ADDACreate()
201: @*/
202: PetscErrorCode ADDADestroy(ADDA adda)
203: {
208: /* check reference count */
209: if(--((PetscObject)adda)->refct > 0) return(0);
211: /* destroy the allocated data */
212: PetscFree(adda->nodes);
213: PetscFree(adda->procs);
214: PetscFree(adda->lcs);
215: PetscFree(adda->lce);
216: PetscFree(adda->lgs);
217: PetscFree(adda->lge);
219: PetscHeaderDestroy(adda);
220: return(0);
221: }
225: /*@
226: ADDAView - Views a distributed array.
228: Collective on ADDA
230: Input Parameter:
231: + adda - the ADDA object to view
232: - v - the viewer
234: Level: developer
236: .keywords: distributed array, view
238: .seealso: DMView()
239: @*/
240: PetscErrorCode ADDAView(ADDA adda, PetscViewer v) {
242: SETERRQ(PETSC_ERR_SUP, "Not implemented yet");
243: return(0);
244: }
248: /*@
249: ADDACreateGlobalVector - Creates global vector for distributed array.
251: Collective on ADDA
253: Input Parameter:
254: . adda - the distributed array for which we create a global vector
256: Output Parameter:
257: . vec - the global vector
259: Level: beginner
261: .keywords: distributed array, vector
263: .seealso: DMCreateGlobalVector()
264: @*/
265: PetscErrorCode ADDACreateGlobalVector(ADDA adda, Vec *vec) {
270: VecDuplicate(adda->global, vec);
271: return(0);
272: }
276: /*@
277: ADDAGetColoring - Creates coloring for distributed array.
279: Collective on ADDA
281: Input Parameter:
282: + adda - the distributed array for which we create a global vector
283: - ctype - IS_COLORING_GHOSTED or IS_COLORING_LOCAL
285: Output Parameter:
286: . coloring - the coloring
288: Level: developer
290: .keywords: distributed array, coloring
292: .seealso: DMGetColoring()
293: @*/
294: PetscErrorCode ADDAGetColoring(ADDA adda, ISColoringType ctype,ISColoring *coloring) {
296: SETERRQ(PETSC_ERR_SUP, "Not implemented yet");
297: return(0);
298: }
302: /*@
303: ADDAGetMatrix - Creates matrix compatible with distributed array.
305: Collective on ADDA
307: Input Parameter:
308: . adda - the distributed array for which we create the matrix
309: - mtype - Supported types are MATSEQAIJ, MATMPIAIJ, MATSEQBAIJ, MATMPIBAIJ, or
310: any type which inherits from one of these (such as MATAIJ, MATLUSOL, etc.).
312: Output Parameter:
313: . mat - the empty Jacobian
315: Level: beginner
317: .keywords: distributed array, matrix
319: .seealso: DMGetMatrix()
320: @*/
321: PetscErrorCode ADDAGetMatrix(ADDA adda, const MatType mtype, Mat *mat) {
325: MatCreate(((PetscObject)adda)->comm, mat);
326: MatSetSizes(*mat, adda->lsize, adda->lsize, PETSC_DECIDE, PETSC_DECIDE);
327: MatSetType(*mat, mtype);
328: return(0);
329: }
333: /*@
334: ADDAGetMatrixNS - Creates matrix compatiable with two distributed arrays
336: Collective on ADDA
338: Input Parameter:
339: . addar - the distributed array for which we create the matrix, which indexes the rows
340: . addac - the distributed array for which we create the matrix, which indexes the columns
341: - mtype - Supported types are MATSEQAIJ, MATMPIAIJ, MATSEQBAIJ, MATMPIBAIJ, or
342: any type which inherits from one of these (such as MATAIJ, MATLUSOL, etc.).
344: Output Parameter:
345: . mat - the empty Jacobian
347: Level: beginner
349: .keywords: distributed array, matrix
351: .seealso: DMGetMatrix()
352: @*/
353: PetscErrorCode ADDAGetMatrixNS(ADDA addar, ADDA addac, const MatType mtype, Mat *mat) {
359: MatCreate(((PetscObject)addar)->comm, mat);
360: MatSetSizes(*mat, addar->lsize, addac->lsize, PETSC_DECIDE, PETSC_DECIDE);
361: MatSetType(*mat, mtype);
362: return(0);
363: }
367: /*@
368: ADDAGetInterpolation - Gets interpolation matrix between two ADDA objects
370: Collective on ADDA
372: Input Parameter:
373: + adda1 - the fine ADDA object
374: - adda2 - the second, coarser ADDA object
376: Output Parameter:
377: + mat - the interpolation matrix
378: - vec - the scaling (optional)
380: Level: developer
382: .keywords: distributed array, interpolation
384: .seealso: DMGetInterpolation()
385: @*/
386: PetscErrorCode ADDAGetInterpolation(ADDA adda1,ADDA adda2,Mat *mat,Vec *vec) {
388: SETERRQ(PETSC_ERR_SUP, "Not implemented yet");
389: return(0);
390: }
394: /*@
395: ADDARefine - Refines a distributed array.
397: Collective on ADDA
399: Input Parameter:
400: + adda - the distributed array to refine
401: - comm - the communicator to contain the new ADDA object (or PETSC_NULL)
403: Output Parameter:
404: . addaf - the refined ADDA
406: Level: developer
408: .keywords: distributed array, refine
410: .seealso: DMRefine()
411: @*/
412: PetscErrorCode ADDARefine(ADDA adda, MPI_Comm comm, ADDA *addaf) {
414: SETERRQ(PETSC_ERR_SUP, "Not implemented yet");
415: return(0);
416: }
420: /*@
421: ADDACoarsen - Coarsens a distributed array.
423: Collective on ADDA
425: Input Parameter:
426: + adda - the distributed array to coarsen
427: - comm - the communicator to contain the new ADDA object (or PETSC_NULL)
429: Output Parameter:
430: . addac - the coarsened ADDA
432: Level: developer
434: .keywords: distributed array, coarsen
436: .seealso: DMCoarsen()
437: @*/
438: PetscErrorCode ADDACoarsen(ADDA adda, MPI_Comm comm,ADDA *addac) {
440: PetscInt *nodesc;
441: PetscInt *procsc;
442: PetscInt dofc;
443: PetscInt i;
447: PetscMalloc(adda->dim*sizeof(PetscInt), &nodesc);
448: for(i=0; i<adda->dim; i++) {
449: nodesc[i] = (adda->nodes[i] % adda->refine[i]) ? adda->nodes[i] / adda->refine[i] + 1 : adda->nodes[i] / adda->refine[i];
450: }
451: dofc = (adda->dof % adda->dofrefine) ? adda->dof / adda->dofrefine + 1 : adda->dof / adda->dofrefine;
452: PetscMalloc(adda->dim*sizeof(PetscInt), &procsc);
453: PetscMemcpy(procsc, adda->procs, adda->dim*sizeof(PetscInt));
454: ADDACreate(((PetscObject)adda)->comm, adda->dim, nodesc, procsc, dofc, adda->periodic, addac);
455: /* copy refinement factors */
456: ADDASetRefinement(*addac, adda->refine, adda->dofrefine);
457: return(0);
458: }
462: /*@
463: ADDAGetInjection - Gets injection between distributed arrays.
465: Collective on ADDA
467: Input Parameter:
468: + adda1 - the fine ADDA object
469: - adda2 - the second, coarser ADDA object
471: Output Parameter:
472: . ctx - the injection
474: Level: developer
476: .keywords: distributed array, injection
478: .seealso: DMGetInjection()
479: @*/
480: PetscErrorCode ADDAGetInjection(ADDA adda1, ADDA adda2, VecScatter *ctx) {
482: SETERRQ(PETSC_ERR_SUP, "Not implemented yet");
483: return(0);
484: }
486: /*@C
487: ADDAHCiterStartup - performs the first check for an iteration through a hypercube
488: lc, uc, idx all have to be valid arrays of size dim
489: This function sets idx to lc and then checks, whether the lower corner (lc) is less
490: than thre upper corner (uc). If lc "<=" uc in all coordinates, it returns PETSC_TRUE,
491: and PETSC_FALSE otherwise.
492:
493: Input Parameters:
494: + dim - the number of dimension
495: . lc - the "lower" corner
496: - uc - the "upper" corner
498: Output Parameters:
499: . idx - the index that this function increases
501: Level: developer
502: @*/
503: PetscTruth ADDAHCiterStartup(const PetscInt dim, const PetscInt *const lc, const PetscInt *const uc, PetscInt *const idx) {
505: PetscInt i;
507: PetscMemcpy(idx, lc, sizeof(PetscInt)*dim);
508: if(ierr) {
509: PetscError(__LINE__,__FUNCT__,__FILE__,__SDIR__,ierr,0," ");
510: return PETSC_FALSE;
511: }
512: for(i=0; i<dim; i++) {
513: if( lc[i] > uc[i] ) {
514: return PETSC_FALSE;
515: }
516: }
517: return PETSC_TRUE;
518: }
520: /*@C
521: ADDAHCiter - iterates through a hypercube
522: lc, uc, idx all have to be valid arrays of size dim
523: This function return PETSC_FALSE, if idx exceeds uc, PETSC_TRUE otherwise.
524: There are no guarantees on what happens if idx is not in the hypercube
525: spanned by lc, uc, this should be checked with ADDAHCiterStartup.
526:
527: Use this code as follows:
528: if( ADDAHCiterStartup(dim, lc, uc, idx) ) {
529: do {
530: ...
531: } while( ADDAHCiter(dim, lc, uc, idx) );
532: }
533:
534: Input Parameters:
535: + dim - the number of dimension
536: . lc - the "lower" corner
537: - uc - the "upper" corner
539: Output Parameters:
540: . idx - the index that this function increases
542: Level: developer
543: @*/
544: PetscTruth ADDAHCiter(const PetscInt dim, const PetscInt *const lc, const PetscInt *const uc, PetscInt *const idx) {
545: PetscInt i;
546: for(i=dim-1; i>=0; i--) {
547: idx[i] += 1;
548: if( uc[i] > idx[i] ) {
549: return PETSC_TRUE;
550: } else {
551: idx[i] -= uc[i] - lc[i];
552: }
553: }
554: return PETSC_FALSE;
555: }
559: /*@C
560: ADDAGetAggregates - Gets the aggregates that map between
561: grids associated with two ADDAs.
563: Collective on ADDA
565: Input Parameters:
566: + addac - the coarse grid ADDA
567: - addaf - the fine grid ADDA
569: Output Parameters:
570: . rest - the restriction matrix (transpose of the projection matrix)
572: Level: intermediate
574: .keywords: interpolation, restriction, multigrid
576: .seealso: ADDARefine(), ADDAGetInjection(), ADDAGetInterpolation()
577: @*/
578: PetscErrorCode ADDAGetAggregates(ADDA addac,ADDA addaf,Mat *rest)
579: {
580: PetscErrorCode ierr=0;
581: PetscInt i;
582: PetscInt dim;
583: PetscInt dofc, doff;
584: PetscInt *lcs_c, *lce_c;
585: PetscInt *lcs_f, *lce_f;
586: PetscInt *fgs, *fge;
587: PetscInt fgdofs, fgdofe;
588: ADDAIdx iter_c, iter_f;
589: PetscInt max_agg_size;
590: PetscMPIInt comm_size;
591: ADDAIdx *fine_nodes;
592: PetscInt fn_idx;
593: PetscScalar *one_vec;
599: if (addac->dim != addaf->dim) SETERRQ2(PETSC_ERR_ARG_INCOMP,"Dimensions of ADDA do not match %D %D", addac->dim, addaf->dim);
600: /* if (addac->dof != addaf->dof) SETERRQ2(PETSC_ERR_ARG_INCOMP,"DOF of ADDA do not match %D %D", addac->dof, addaf->dof); */
601: dim = addac->dim;
602: dofc = addac->dof;
603: doff = addaf->dof;
605: ADDAGetCorners(addac, &lcs_c, &lce_c);
606: ADDAGetCorners(addaf, &lcs_f, &lce_f);
607:
608: /* compute maximum size of aggregate */
609: max_agg_size = 1;
610: for(i=0; i<dim; i++) {
611: max_agg_size *= addaf->nodes[i] / addac->nodes[i] + 1;
612: }
613: max_agg_size *= doff / dofc + 1;
615: /* create the matrix that will contain the restriction operator */
616: MPI_Comm_size(PETSC_COMM_WORLD,&comm_size);
618: /* construct matrix */
619: if( comm_size == 1 ) {
620: ADDAGetMatrixNS(addac, addaf, MATSEQAIJ, rest);
621: MatSeqAIJSetPreallocation(*rest, max_agg_size, PETSC_NULL);
622: } else {
623: ADDAGetMatrixNS(addac, addaf, MATMPIAIJ, rest);
624: MatMPIAIJSetPreallocation(*rest, max_agg_size, PETSC_NULL, max_agg_size, PETSC_NULL);
625: }
626: /* store nodes in the fine grid here */
627: PetscMalloc(sizeof(ADDAIdx)*max_agg_size, &fine_nodes);
628: /* these are the values to set to, a collection of 1's */
629: PetscMalloc(sizeof(PetscScalar)*max_agg_size, &one_vec);
630: /* initialize */
631: for(i=0; i<max_agg_size; i++) {
632: PetscMalloc(sizeof(PetscInt)*dim, &(fine_nodes[i].x));
633: one_vec[i] = 1.0;
634: }
636: /* get iterators */
637: PetscMalloc(sizeof(PetscInt)*dim, &(iter_c.x));
638: PetscMalloc(sizeof(PetscInt)*dim, &(iter_f.x));
640: /* the fine grid node corner for each coarse grid node */
641: PetscMalloc(sizeof(PetscInt)*dim, &fgs);
642: PetscMalloc(sizeof(PetscInt)*dim, &fge);
644: /* loop over all coarse nodes */
645: PetscMemcpy(iter_c.x, lcs_c, sizeof(PetscInt)*dim);
646: if( ADDAHCiterStartup(dim, lcs_c, lce_c, iter_c.x) ) {
647: do {
648: /* find corresponding fine grid nodes */
649: for(i=0; i<dim; i++) {
650: fgs[i] = iter_c.x[i]*addaf->nodes[i]/addac->nodes[i];
651: fge[i] = PetscMin((iter_c.x[i]+1)*addaf->nodes[i]/addac->nodes[i], addaf->nodes[i]);
652: }
653: /* treat all dof of the coarse grid */
654: for(iter_c.d=0; iter_c.d<dofc; iter_c.d++) {
655: /* find corresponding fine grid dof's */
656: fgdofs = iter_c.d*doff/dofc;
657: fgdofe = PetscMin((iter_c.d+1)*doff/dofc, doff);
658: /* we now know the "box" of all the fine grid nodes that are mapped to one coarse grid node */
659: fn_idx = 0;
660: /* loop over those corresponding fine grid nodes */
661: if( ADDAHCiterStartup(dim, fgs, fge, iter_f.x) ) {
662: do {
663: /* loop over all corresponding fine grid dof */
664: for(iter_f.d=fgdofs; iter_f.d<fgdofe; iter_f.d++) {
665: PetscMemcpy(fine_nodes[fn_idx].x, iter_f.x, sizeof(PetscInt)*dim);
666: fine_nodes[fn_idx].d = iter_f.d;
667: fn_idx++;
668: }
669: } while( ADDAHCiter(dim, fgs, fge, iter_f.x) );
670: }
671: /* add all these points to one aggregate */
672: ADDAMatSetValues(*rest, addac, 1, &iter_c, addaf, fn_idx, fine_nodes, one_vec, INSERT_VALUES);
673: }
674: } while( ADDAHCiter(dim, lcs_c, lce_c, iter_c.x) );
675: }
677: /* free memory */
678: PetscFree(fgs);
679: PetscFree(fge);
680: PetscFree(iter_c.x);
681: PetscFree(iter_f.x);
682: PetscFree(lcs_c);
683: PetscFree(lce_c);
684: PetscFree(lcs_f);
685: PetscFree(lce_f);
686: PetscFree(one_vec);
687: for(i=0; i<dim; i++) {
688: PetscFree(fine_nodes[i].x);
689: }
690: PetscFree(fine_nodes);
692: MatAssemblyBegin(*rest, MAT_FINAL_ASSEMBLY);
693: MatAssemblyEnd(*rest, MAT_FINAL_ASSEMBLY);
694: return(0);
695: }
699: /*@
700: ADDASetRefinement - Sets the refinement factors of the distributed arrays.
702: Collective on ADDA
704: Input Parameter:
705: + adda - the ADDA object
706: . refine - array of refinement factors
707: - dofrefine - the refinement factor for the dof, usually just 1
709: Level: developer
711: .keywords: distributed array, refinement
712: @*/
713: PetscErrorCode ADDASetRefinement(ADDA adda, PetscInt *refine, PetscInt dofrefine) {
718: PetscMemcpy(adda->refine, refine, adda->dim*sizeof(PetscInt));
719: adda->dofrefine = dofrefine;
720: return(0);
721: }
725: /*@
726: ADDAGetCorners - Gets the corners of the local area
728: Collective on ADDA
730: Input Parameter:
731: . adda - the ADDA object
733: Output Parameter:
734: + lcorner - the "lower" corner
735: - ucorner - the "upper" corner
737: Both lcorner and ucorner are allocated by this procedure and will point to an
738: array of size adda->dim.
740: Level: beginner
742: .keywords: distributed array, refinement
743: @*/
744: PetscErrorCode ADDAGetCorners(ADDA adda, PetscInt **lcorner, PetscInt **ucorner) {
750: PetscMalloc(adda->dim*sizeof(PetscInt), lcorner);
751: PetscMalloc(adda->dim*sizeof(PetscInt), ucorner);
752: PetscMemcpy(*lcorner, adda->lcs, adda->dim*sizeof(PetscInt));
753: PetscMemcpy(*ucorner, adda->lce, adda->dim*sizeof(PetscInt));
754: return(0);
755: }
759: /*@
760: ADDAGetGhostCorners - Gets the ghost corners of the local area
762: Collective on ADDA
764: Input Parameter:
765: . adda - the ADDA object
767: Output Parameter:
768: + lcorner - the "lower" corner of the ghosted area
769: - ucorner - the "upper" corner of the ghosted area
771: Both lcorner and ucorner are allocated by this procedure and will point to an
772: array of size adda->dim.
774: Level: beginner
776: .keywords: distributed array, refinement
777: @*/
778: PetscErrorCode ADDAGetGhostCorners(ADDA adda, PetscInt **lcorner, PetscInt **ucorner) {
784: PetscMalloc(adda->dim*sizeof(PetscInt), lcorner);
785: PetscMalloc(adda->dim*sizeof(PetscInt), ucorner);
786: PetscMemcpy(*lcorner, adda->lgs, adda->dim*sizeof(PetscInt));
787: PetscMemcpy(*ucorner, adda->lge, adda->dim*sizeof(PetscInt));
788: return(0);
789: }
795: /*@C
796: ADDAMatSetValues - Inserts or adds a block of values into a matrix. The values
797: are indexed geometrically with the help of the ADDA data structure.
798: These values may be cached, so MatAssemblyBegin() and MatAssemblyEnd()
799: MUST be called after all calls to ADDAMatSetValues() have been completed.
801: Not Collective
803: Input Parameters:
804: + mat - the matrix
805: . addam - the ADDA geometry information for the rows
806: . m - the number of rows
807: . idxm - the row indices, each of the a proper ADDAIdx
808: + addan - the ADDA geometry information for the columns
809: . n - the number of columns
810: . idxn - the column indices, each of the a proper ADDAIdx
811: . v - a logically two-dimensional array of values of size m*n
812: - addv - either ADD_VALUES or INSERT_VALUES, where
813: ADD_VALUES adds values to any existing entries, and
814: INSERT_VALUES replaces existing entries with new values
816: Notes:
817: By default the values, v, are row-oriented and unsorted.
818: See MatSetOption() for other options.
820: Calls to ADDAMatSetValues() (and MatSetValues()) with the INSERT_VALUES and ADD_VALUES
821: options cannot be mixed without intervening calls to the assembly
822: routines.
824: Efficiency Alert:
825: The routine ADDAMatSetValuesBlocked() may offer much better efficiency
826: for users of block sparse formats (MATSEQBAIJ and MATMPIBAIJ).
828: Level: beginner
830: Concepts: matrices^putting entries in
832: .seealso: MatSetOption(), MatAssemblyBegin(), MatAssemblyEnd(), MatSetValues(), ADDAMatSetValuesBlocked(),
833: InsertMode, INSERT_VALUES, ADD_VALUES
834: @*/
835: PetscErrorCode ADDAMatSetValues(Mat mat, ADDA addam, PetscInt m, const ADDAIdx idxm[],
836: ADDA addan, PetscInt n, const ADDAIdx idxn[],
837: const PetscScalar v[], InsertMode addv) {
839: PetscInt *nodemult;
840: PetscInt i, j;
841: PetscInt *matidxm, *matidxn;
842: PetscInt *x, d;
843: PetscInt idx;
846: /* find correct multiplying factors */
847: PetscMalloc(addam->dim*sizeof(PetscInt), &nodemult);
848: nodemult[addam->dim-1] = 1;
849: for(j=addam->dim-2; j>=0; j--) {
850: nodemult[j] = nodemult[j+1]*(addam->nodes[j+1]);
851: }
852: /* convert each coordinate in idxm to the matrix row index */
853: PetscMalloc(m*sizeof(PetscInt), &matidxm);
854: for(i=0; i<m; i++) {
855: x = idxm[i].x; d = idxm[i].d;
856: idx = 0;
857: for(j=addam->dim-1; j>=0; j--) {
858: if( x[j] < 0 ) { /* "left", "below", etc. of boundary */
859: if( addam->periodic[j] ) { /* periodic wraps around */
860: x[j] += addam->nodes[j];
861: } else { /* non-periodic get discarded */
862: matidxm[i] = -1; /* entries with -1 are ignored by MatSetValues() */
863: goto endofloop_m;
864: }
865: }
866: if( x[j] >= addam->nodes[j] ) { /* "right", "above", etc. of boundary */
867: if( addam->periodic[j] ) { /* periodic wraps around */
868: x[j] -= addam->nodes[j];
869: } else { /* non-periodic get discarded */
870: matidxm[i] = -1; /* entries with -1 are ignored by MatSetValues() */
871: goto endofloop_m;
872: }
873: }
874: idx += x[j]*nodemult[j];
875: }
876: matidxm[i] = idx*(addam->dof) + d;
877: endofloop_m:
878: ;
879: }
880: PetscFree(nodemult);
882: /* find correct multiplying factors */
883: PetscMalloc(addan->dim*sizeof(PetscInt), &nodemult);
884: nodemult[addan->dim-1] = 1;
885: for(j=addan->dim-2; j>=0; j--) {
886: nodemult[j] = nodemult[j+1]*(addan->nodes[j+1]);
887: }
888: /* convert each coordinate in idxn to the matrix colum index */
889: PetscMalloc(n*sizeof(PetscInt), &matidxn);
890: for(i=0; i<n; i++) {
891: x = idxn[i].x; d = idxn[i].d;
892: idx = 0;
893: for(j=addan->dim-1; j>=0; j--) {
894: if( x[j] < 0 ) { /* "left", "below", etc. of boundary */
895: if( addan->periodic[j] ) { /* periodic wraps around */
896: x[j] += addan->nodes[j];
897: } else { /* non-periodic get discarded */
898: matidxn[i] = -1; /* entries with -1 are ignored by MatSetValues() */
899: goto endofloop_n;
900: }
901: }
902: if( x[j] >= addan->nodes[j] ) { /* "right", "above", etc. of boundary */
903: if( addan->periodic[j] ) { /* periodic wraps around */
904: x[j] -= addan->nodes[j];
905: } else { /* non-periodic get discarded */
906: matidxn[i] = -1; /* entries with -1 are ignored by MatSetValues() */
907: goto endofloop_n;
908: }
909: }
910: idx += x[j]*nodemult[j];
911: }
912: matidxn[i] = idx*(addan->dof) + d;
913: endofloop_n:
914: ;
915: }
916: /* call original MatSetValues() */
917: MatSetValues(mat, m, matidxm, n, matidxn, v, addv);
918: /* clean up */
919: PetscFree(nodemult);
920: PetscFree(matidxm);
921: PetscFree(matidxn);
922: return(0);
923: }