Actual source code: mpicsrperm.c

  1: #define PETSCMAT_DLL

 3:  #include src/mat/impls/aij/mpi/mpiaij.h
  6: /*@C
  7:    MatCreateMPICSRPERM - Creates a sparse parallel matrix whose local 
  8:    portions are stored as SEQCSRPERM matrices (a matrix class that inherits 
  9:    from SEQAIJ but includes some optimizations to allow more effective 
 10:    vectorization).  The same guidelines that apply to MPIAIJ matrices for 
 11:    preallocating the matrix storage apply here as well.

 13:       Collective on MPI_Comm

 15:    Input Parameters:
 16: +  comm - MPI communicator
 17: .  m - number of local rows (or PETSC_DECIDE to have calculated if M is given)
 18:            This value should be the same as the local size used in creating the 
 19:            y vector for the matrix-vector product y = Ax.
 20: .  n - This value should be the same as the local size used in creating the 
 21:        x vector for the matrix-vector product y = Ax. (or PETSC_DECIDE to have
 22:        calculated if N is given) For square matrices n is almost always m.
 23: .  M - number of global rows (or PETSC_DETERMINE to have calculated if m is given)
 24: .  N - number of global columns (or PETSC_DETERMINE to have calculated if n is given)
 25: .  d_nz  - number of nonzeros per row in DIAGONAL portion of local submatrix
 26:            (same value is used for all local rows)
 27: .  d_nnz - array containing the number of nonzeros in the various rows of the 
 28:            DIAGONAL portion of the local submatrix (possibly different for each row)
 29:            or PETSC_NULL, if d_nz is used to specify the nonzero structure. 
 30:            The size of this array is equal to the number of local rows, i.e 'm'. 
 31:            You must leave room for the diagonal entry even if it is zero.
 32: .  o_nz  - number of nonzeros per row in the OFF-DIAGONAL portion of local
 33:            submatrix (same value is used for all local rows).
 34: -  o_nnz - array containing the number of nonzeros in the various rows of the
 35:            OFF-DIAGONAL portion of the local submatrix (possibly different for
 36:            each row) or PETSC_NULL, if o_nz is used to specify the nonzero 
 37:            structure. The size of this array is equal to the number 
 38:            of local rows, i.e 'm'. 

 40:    Output Parameter:
 41: .  A - the matrix 

 43:    Notes:
 44:    If the *_nnz parameter is given then the *_nz parameter is ignored

 46:    m,n,M,N parameters specify the size of the matrix, and its partitioning across
 47:    processors, while d_nz,d_nnz,o_nz,o_nnz parameters specify the approximate
 48:    storage requirements for this matrix.

 50:    If PETSC_DECIDE or  PETSC_DETERMINE is used for a particular argument on one 
 51:    processor than it must be used on all processors that share the object for 
 52:    that argument.

 54:    The user MUST specify either the local or global matrix dimensions
 55:    (possibly both).

 57:    The parallel matrix is partitioned such that the first m0 rows belong to 
 58:    process 0, the next m1 rows belong to process 1, the next m2 rows belong 
 59:    to process 2 etc.. where m0,m1,m2... are the input parameter 'm'.

 61:    The DIAGONAL portion of the local submatrix of a processor can be defined 
 62:    as the submatrix which is obtained by extraction the part corresponding 
 63:    to the rows r1-r2 and columns r1-r2 of the global matrix, where r1 is the 
 64:    first row that belongs to the processor, and r2 is the last row belonging 
 65:    to the this processor. This is a square mxm matrix. The remaining portion 
 66:    of the local submatrix (mxN) constitute the OFF-DIAGONAL portion.

 68:    If o_nnz, d_nnz are specified, then o_nz, and d_nz are ignored.

 70:    When calling this routine with a single process communicator, a matrix of
 71:    type SEQCSRPERM is returned.  If a matrix of type MPICSRPERM is desired 
 72:    for this type of communicator, use the construction mechanism:
 73:      MatCreate(...,&A); MatSetType(A,MPIAIJ); MatMPIAIJSetPreallocation(A,...);

 75:    By default, this format uses inodes (identical nodes) when possible.
 76:    We search for consecutive rows with the same nonzero structure, thereby
 77:    reusing matrix information to achieve increased efficiency.

 79:    Options Database Keys:
 80: +  -mat_no_inode  - Do not use inodes
 81: .  -mat_inode_limit <limit> - Sets inode limit (max limit=5)
 82: -  -mat_aij_oneindex - Internally use indexing starting at 1
 83:         rather than 0.  Note that when calling MatSetValues(),
 84:         the user still MUST index entries starting at 0!

 86:    Level: intermediate

 88: .keywords: matrix, cray, sparse, parallel

 90: .seealso: MatCreate(), MatCreateSeqCSRPERM(), MatSetValues()
 91: @*/
 92: PetscErrorCode  MatCreateMPICSRPERM(MPI_Comm comm,PetscInt m,PetscInt n,PetscInt M,PetscInt N,PetscInt d_nz,const PetscInt d_nnz[],PetscInt o_nz,const PetscInt o_nnz[],Mat *A)
 93: {
 95:   PetscMPIInt    size;

 98:   MatCreate(comm,A);
 99:   MatSetSizes(*A,m,n,M,N);
100:   MPI_Comm_size(comm,&size);
101:   if (size > 1) {
102:     MatSetType(*A,MATMPICSRPERM);
103:     MatMPIAIJSetPreallocation(*A,d_nz,d_nnz,o_nz,o_nnz);
104:   } else {
105:     MatSetType(*A,MATSEQCSRPERM);
106:     MatSeqAIJSetPreallocation(*A,d_nz,d_nnz);
107:   }
108:   return(0);
109: }


114: /* MatConvert_MPIAIJ_MPICSRPERM() converts an MPIAIJ matrix into a 
115:  * SeqCSRPERM matrix.  This routine is called by the MatCreate_MPICSRPERM() 
116:  * routine, but can also be used to convert an assembled MPIAIJ matrix 
117:  * into a SeqCSRPERM one. */

121: PetscErrorCode  MatConvert_MPIAIJ_MPICSRPERM(Mat A,MatType type,MatReuse reuse,Mat *newmat)
122: {
124:   Mat            B = *newmat;
125:   Mat_MPIAIJ     *mpimat = (Mat_MPIAIJ *) B->data;
126:   Mat            localmat_A, localmat_B;

129:   if (reuse == MAT_INITIAL_MATRIX) {
130:     MatDuplicate(A,MAT_COPY_VALUES,&B);
131:   }

133:   /* Convert from MPIAIJ to MPICSRPERM by simply changing the typename to MPICSRPERM and convert the local 
134:    * submatrices from SEQAIJ to SEQCSRPERM. */
135:   PetscObjectChangeTypeName( (PetscObject) B, MATMPICSRPERM);
136:   localmat_A = mpimat->A;
137:   localmat_B = mpimat->B;
138:   MatConvert_SeqAIJ_SeqCSRPERM(localmat_A, MATSEQCSRPERM, MAT_REUSE_MATRIX, &localmat_A);
139:   MatConvert_SeqAIJ_SeqCSRPERM(localmat_B, MATSEQCSRPERM, MAT_REUSE_MATRIX, &localmat_B);
140:   *newmat = B;
141:   return(0);
142: }


149: PetscErrorCode  MatCreate_MPICSRPERM(Mat A)
150: {

154:   MatSetType(A,MATMPIAIJ);
155:   MatConvert_MPIAIJ_MPICSRPERM(A,MATMPICSRPERM,MAT_REUSE_MATRIX,&A);
156:   return(0);
157: }

160: /*MC
161:    MATCSRPERM - MATCSRPERM = "CSRPERM" - A matrix type to be used for sparse matrices.

163:    This matrix type is identical to MATSEQCSRPERM when constructed with a single process communicator,
164:    and MATMPICSRPERM otherwise.  As a result, for single process communicators, 
165:   MatSeqCSRPERMSetPreallocation is supported, and similarly MatMPICSRPERMSetPreallocation is supported 
166:   for communicators controlling multiple processes.  It is recommended that you call both of
167:   the above preallocation routines for simplicity.

169:    Options Database Keys:
170: . -mat_type csrperm - sets the matrix type to "CSRPERM" during a call to MatSetFromOptions()

172:   Level: beginner

174: .seealso: MatCreateMPICSRPERM,MATSEQCSRPERM,MATMPICSRPERM
175: M*/

180: PetscErrorCode  MatCreate_CSRPERM(Mat A)
181: {
183:   PetscMPIInt    size;

186:   MPI_Comm_size(A->comm,&size);
187:   if (size == 1) {
188:     MatSetType(A,MATSEQCSRPERM);
189:   } else {
190:     MatSetType(A,MATMPICSRPERM);
191:   }
192:   return(0);
193: }