Actual source code: tsfd.c
1: #define PETSCTS_DLL
3: #include include/private/matimpl.h
4: #include include/private/tsimpl.h
8: /*@C
9: TSDefaultComputeJacobianColor - Computes the Jacobian using
10: finite differences and coloring to exploit matrix sparsity.
11:
12: Collective on TS, Vec and Mat
14: Input Parameters:
15: + ts - nonlinear solver object
16: . t - current time
17: . x1 - location at which to evaluate Jacobian
18: - ctx - coloring context, where ctx must have type MatFDColoring,
19: as created via MatFDColoringCreate()
21: Output Parameters:
22: + J - Jacobian matrix (not altered in this routine)
23: . B - newly computed Jacobian matrix to use with preconditioner (generally the same as J)
24: - flag - flag indicating whether the matrix sparsity structure has changed
26: Options Database Keys:
27: $ -mat_fd_coloring_freq <freq>
29: Level: intermediate
31: .keywords: TS, finite differences, Jacobian, coloring, sparse
33: .seealso: TSSetJacobian(), MatFDColoringCreate(), MatFDColoringSetFunction()
34: @*/
35: PetscErrorCode TSDefaultComputeJacobianColor(TS ts,PetscReal t,Vec x1,Mat *J,Mat *B,MatStructure *flag,void *ctx)
36: {
37: MatFDColoring color = (MatFDColoring) ctx;
38: SNES snes;
40: PetscInt freq,it;
43: /*
44: If we are not using SNES we have no way to know the current iteration.
45: */
46: TSGetSNES(ts,&snes);
47: if (snes) {
48: MatFDColoringGetFrequency(color,&freq);
49: SNESGetIterationNumber(snes,&it);
51: if ((freq > 1) && ((it % freq) != 1)) {
52: PetscInfo2(color,"Skipping Jacobian, it %D, freq %D\n",it,freq);
53: *flag = SAME_PRECONDITIONER;
54: goto end;
55: } else {
56: PetscInfo2(color,"Computing Jacobian, it %D, freq %D\n",it,freq);
57: *flag = SAME_NONZERO_PATTERN;
58: }
59: }
60: MatFDColoringApplyTS(*B,color,t,x1,flag,ts);
61: end:
62: if (*J != *B) {
63: MatAssemblyBegin(*J,MAT_FINAL_ASSEMBLY);
64: MatAssemblyEnd(*J,MAT_FINAL_ASSEMBLY);
65: }
66: return(0);
67: }
71: /*@C
72: TSDefaultComputeJacobian - Computes the Jacobian using finite differences.
74: Input Parameters:
75: + ts - TS context
76: . xx1 - compute Jacobian at this point
77: - ctx - application's function context, as set with SNESSetFunction()
79: Output Parameters:
80: + J - Jacobian
81: . B - preconditioner, same as Jacobian
82: - flag - matrix flag
84: Notes:
85: This routine is slow and expensive, and is not optimized.
87: Sparse approximations using colorings are also available and
88: would be a much better alternative!
90: Level: intermediate
92: .seealso: TSDefaultComputeJacobianColor()
93: @*/
94: PetscErrorCode TSDefaultComputeJacobian(TS ts,PetscReal t,Vec xx1,Mat *J,Mat *B,MatStructure *flag,void *ctx)
95: {
96: Vec jj1,jj2,xx2;
98: PetscInt i,N,start,end,j;
99: PetscScalar dx,*y,scale,*xx,wscale;
100: PetscReal amax,epsilon = PETSC_SQRT_MACHINE_EPSILON;
101: PetscReal dx_min = 1.e-16,dx_par = 1.e-1;
102: MPI_Comm comm;
103: PetscTruth assembled;
106: VecDuplicate(xx1,&jj1);
107: VecDuplicate(xx1,&jj2);
108: VecDuplicate(xx1,&xx2);
110: PetscObjectGetComm((PetscObject)xx1,&comm);
111: MatAssembled(*J,&assembled);
112: if (assembled) {
113: MatZeroEntries(*J);
114: }
116: VecGetSize(xx1,&N);
117: VecGetOwnershipRange(xx1,&start,&end);
118: TSComputeRHSFunction(ts,ts->ptime,xx1,jj1);
120: /* Compute Jacobian approximation, 1 column at a time.
121: xx1 = current iterate, jj1 = F(xx1)
122: xx2 = perturbed iterate, jj2 = F(xx2)
123: */
124: for (i=0; i<N; i++) {
125: VecCopy(xx1,xx2);
126: if (i>= start && i<end) {
127: VecGetArray(xx1,&xx);
128: dx = xx[i-start];
129: VecRestoreArray(xx1,&xx);
130: #if !defined(PETSC_USE_COMPLEX)
131: if (dx < dx_min && dx >= 0.0) dx = dx_par;
132: else if (dx < 0.0 && dx > -dx_min) dx = -dx_par;
133: #else
134: if (PetscAbsScalar(dx) < dx_min && PetscRealPart(dx) >= 0.0) dx = dx_par;
135: else if (PetscRealPart(dx) < 0.0 && PetscAbsScalar(dx) < dx_min) dx = -dx_par;
136: #endif
137: dx *= epsilon;
138: wscale = 1.0/dx;
139: VecSetValues(xx2,1,&i,&dx,ADD_VALUES);
140: } else {
141: wscale = 0.0;
142: }
143: TSComputeRHSFunction(ts,t,xx2,jj2);
144: VecAXPY(jj2,-1.0,jj1);
145: /* Communicate scale to all processors */
146: MPI_Allreduce(&wscale,&scale,1,MPIU_SCALAR,PetscSum_Op,comm);
147: VecScale(jj2,scale);
148: VecNorm(jj2,NORM_INFINITY,&amax); amax *= 1.e-14;
149: VecGetArray(jj2,&y);
150: for (j=start; j<end; j++) {
151: if (PetscAbsScalar(y[j-start]) > amax) {
152: MatSetValues(*J,1,&j,1,&i,y+j-start,INSERT_VALUES);
153: }
154: }
155: VecRestoreArray(jj2,&y);
156: }
157: MatAssemblyBegin(*J,MAT_FINAL_ASSEMBLY);
158: MatAssemblyEnd(*J,MAT_FINAL_ASSEMBLY);
159: *flag = DIFFERENT_NONZERO_PATTERN;
161: VecDestroy(jj1);
162: VecDestroy(jj2);
163: VecDestroy(xx2);
165: return(0);
166: }