Actual source code: mpisbaijspooles.c
1: #define PETSCMAT_DLL
3: /*
4: Provides an interface to the Spooles parallel sparse solver (MPI SPOOLES)
5: */
7: #include src/mat/impls/aij/seq/spooles/spooles.h
8: #include src/mat/impls/sbaij/mpi/mpisbaij.h
12: PetscErrorCode MatDestroy_MPISBAIJSpooles(Mat A)
13: {
15:
17: /* MPISBAIJ_Spooles isn't really the matrix that USES spooles, */
18: /* rather it is a factory class for creating a symmetric matrix that can */
19: /* invoke Spooles' parallel cholesky solver. */
20: /* As a result, we don't have to clean up the stuff set for use in spooles */
21: /* as in MatDestroy_MPIAIJ_Spooles. */
22: MatConvert_Spooles_Base(A,MATMPISBAIJ,MAT_REUSE_MATRIX,&A);
23: (*A->ops->destroy)(A);
24: return(0);
25: }
29: PetscErrorCode MatAssemblyEnd_MPISBAIJSpooles(Mat A,MatAssemblyType mode) {
31: int bs;
32: Mat_Spooles *lu=(Mat_Spooles *)(A->spptr);
35: (*lu->MatAssemblyEnd)(A,mode);
36: MatGetBlockSize(A,&bs);
37: if (bs > 1) SETERRQ1(PETSC_ERR_SUP,"Block size %D not supported by Spooles",bs);
38: lu->MatCholeskyFactorSymbolic = A->ops->choleskyfactorsymbolic;
39: A->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_MPISBAIJSpooles;
40: return(0);
41: }
43: /*
44: input:
45: F: numeric factor
46: output:
47: nneg, nzero, npos: global matrix inertia in all processors
48: */
52: PetscErrorCode MatGetInertia_MPISBAIJSpooles(Mat F,int *nneg,int *nzero,int *npos)
53: {
54: Mat_Spooles *lu = (Mat_Spooles*)F->spptr;
56: int neg,zero,pos,sbuf[3],rbuf[3];
59: FrontMtx_inertia(lu->frontmtx, &neg, &zero, &pos);
60: sbuf[0] = neg; sbuf[1] = zero; sbuf[2] = pos;
61: MPI_Allreduce(sbuf,rbuf,3,MPI_INT,MPI_SUM,F->comm);
62: *nneg = rbuf[0]; *nzero = rbuf[1]; *npos = rbuf[2];
63: return(0);
64: }
66: /* Note the Petsc r permutation is ignored */
69: PetscErrorCode MatCholeskyFactorSymbolic_MPISBAIJSpooles(Mat A,IS r,MatFactorInfo *info,Mat *F)
70: {
71: Mat B;
72: Mat_Spooles *lu;
74:
77: /* Create the factorization matrix */
78: MatCreate(A->comm,&B);
79: MatSetSizes(B,A->rmap.n,A->cmap.n,A->rmap.N,A->cmap.N);
80: MatSetType(B,A->type_name);
81: MatMPIAIJSetPreallocation(B,0,PETSC_NULL,0,PETSC_NULL);
82:
83: B->ops->choleskyfactornumeric = MatFactorNumeric_MPIAIJSpooles;
84: B->ops->getinertia = MatGetInertia_MPISBAIJSpooles;
85: B->factor = FACTOR_CHOLESKY;
87: lu = (Mat_Spooles*)(B->spptr);
88: lu->options.pivotingflag = SPOOLES_NO_PIVOTING;
89: lu->flg = DIFFERENT_NONZERO_PATTERN;
90: lu->options.useQR = PETSC_FALSE;
91: lu->options.symflag = SPOOLES_SYMMETRIC; /* default */
93: MPI_Comm_dup(A->comm,&(lu->comm_spooles));
94: *F = B;
95: return(0);
96: }
101: PetscErrorCode MatMPISBAIJSetPreallocation_MPISBAIJSpooles(Mat B,int bs,int d_nz,int *d_nnz,int o_nz,int *o_nnz)
102: {
103: Mat A;
104: Mat_Spooles *lu = (Mat_Spooles*)B->spptr;
108: /*
109: After performing the MPISBAIJ Preallocation, we need to convert the local diagonal block matrix
110: into Spooles type so that the block jacobi preconditioner (for example) can use Spooles. I would
111: like this to be done in the MatCreate routine, but the creation of this inner matrix requires
112: block size info so that PETSc can determine the local size properly. The block size info is set
113: in the preallocation routine.
114: */
115: (*lu->MatPreallocate)(B,bs,d_nz,d_nnz,o_nz,o_nnz);
116: A = ((Mat_MPISBAIJ *)B->data)->A;
117: MatConvert_SeqSBAIJ_SeqSBAIJSpooles(A,MATSEQSBAIJSPOOLES,MAT_REUSE_MATRIX,&A);
118: return(0);
119: }
122: /* make sun CC happy */
123: static void (*f)(void);
128: PetscErrorCode MatConvert_MPISBAIJ_MPISBAIJSpooles(Mat A,MatType type,MatReuse reuse,Mat *newmat)
129: {
131: Mat B=*newmat;
132: Mat_Spooles *lu;
135: if (reuse == MAT_INITIAL_MATRIX) {
136: /* This routine is inherited, so we know the type is correct. */
137: MatDuplicate(A,MAT_COPY_VALUES,&B);
138: }
140: PetscNew(Mat_Spooles,&lu);
141: B->spptr = (void*)lu;
143: lu->basetype = MATMPISBAIJ;
144: lu->MatDuplicate = A->ops->duplicate;
145: lu->MatCholeskyFactorSymbolic = A->ops->choleskyfactorsymbolic;
146: lu->MatLUFactorSymbolic = A->ops->lufactorsymbolic;
147: lu->MatView = A->ops->view;
148: lu->MatAssemblyEnd = A->ops->assemblyend;
149: lu->MatDestroy = A->ops->destroy;
150:
151: B->ops->duplicate = MatDuplicate_Spooles;
152: B->ops->choleskyfactorsymbolic = MatCholeskyFactorSymbolic_MPISBAIJSpooles;
153: B->ops->assemblyend = MatAssemblyEnd_MPISBAIJSpooles;
154: B->ops->destroy = MatDestroy_MPISBAIJSpooles;
156: /* I really don't like needing to know the tag: MatMPISBAIJSetPreallocation_C */
157: PetscObjectQueryFunction((PetscObject)B,"MatMPISBAIJSetPreallocation_C",&f);
158: if (f) {
159: lu->MatPreallocate = (PetscErrorCode (*)(Mat,int,int,int*,int,int*))f;
160: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatMPISBAIJSetPreallocation_C",
161: "MatMPISBAIJSetPreallocation_MPISBAIJSpooles",
162: MatMPISBAIJSetPreallocation_MPISBAIJSpooles);
163: }
165: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpisbaijspooles_mpisbaij_C",
166: "MatConvert_Spooles_Base",MatConvert_Spooles_Base);
167: PetscObjectComposeFunctionDynamic((PetscObject)B,"MatConvert_mpisbaij_mpisbaijspooles_C",
168: "MatConvert_MPISBAIJ_MPISBAIJSpooles",
169: MatConvert_MPISBAIJ_MPISBAIJSpooles);
171: PetscObjectChangeTypeName((PetscObject)B,MATMPISBAIJSPOOLES);
172: *newmat = B;
173: return(0);
174: }
177: /*MC
178: MATMPISBAIJSPOOLES - MATMPISBAIJSPOOLES = "mpisbaijspooles" - a matrix type providing direct solvers (Cholesky) for distributed symmetric
179: matrices via the external package Spooles.
181: If Spooles is installed (see the manual for
182: instructions on how to declare the existence of external packages),
183: a matrix type can be constructed which invokes Spooles solvers.
184: After calling MatCreate(...,A), simply call MatSetType(A,MATMPISBAIJSPOOLES).
186: This matrix inherits from MATMPISBAIJ. As a result, MatMPISBAIJSetPreallocation is
187: supported for this matrix type. One can also call MatConvert for an inplace conversion to or from
188: the MATMPISBAIJ type without data copy.
190: Options Database Keys:
191: + -mat_type mpisbaijspooles - sets the matrix type to mpisbaijspooles during a call to MatSetFromOptions()
192: . -mat_spooles_tau <tau> - upper bound on the magnitude of the largest element in L or U
193: . -mat_spooles_seed <seed> - random number seed used for ordering
194: . -mat_spooles_msglvl <msglvl> - message output level
195: . -mat_spooles_ordering <BestOfNDandMS,MMD,MS,ND> - ordering used
196: . -mat_spooles_maxdomainsize <n> - maximum subgraph size used by Spooles orderings
197: . -mat_spooles_maxzeros <n> - maximum number of zeros inside a supernode
198: . -mat_spooles_maxsize <n> - maximum size of a supernode
199: . -mat_spooles_FrontMtxInfo <true,fase> - print Spooles information about the computed factorization
200: . -mat_spooles_symmetryflag <0,1,2> - 0: SPOOLES_SYMMETRIC, 1: SPOOLES_HERMITIAN, 2: SPOOLES_NONSYMMETRIC
201: . -mat_spooles_patchAndGoFlag <0,1,2> - 0: no patch, 1: use PatchAndGo strategy 1, 2: use PatchAndGo strategy 2
202: . -mat_spooles_toosmall <dt> - drop tolerance for PatchAndGo strategy 1
203: . -mat_spooles_storeids <bool integer> - if nonzero, stores row and col numbers where patches were applied in an IV object
204: . -mat_spooles_fudge <delta> - fudge factor for rescaling diagonals with PatchAndGo strategy 2
205: - -mat_spooles_storevalues <bool integer> - if nonzero and PatchAndGo strategy 2 is used, store change in diagonal value in a DV object
207: Level: beginner
209: .seealso: MATSEQSBAIJSPOOLES, MATSEQAIJSPOOLES, MATMPIAIJSPOOLES, PCCHOLESKY
210: M*/
215: PetscErrorCode MatCreate_MPISBAIJSpooles(Mat A)
216: {
220: MatSetType(A,MATMPISBAIJ);
221: MatConvert_MPISBAIJ_MPISBAIJSpooles(A,MATMPISBAIJSPOOLES,MAT_REUSE_MATRIX,&A);
222: return(0);
223: }