Actual source code: mpiu.c
1: #define PETSC_DLL
3: #include petsc.h
4: /*
5: Note that tag of 0 is ok because comm is a private communicator
6: generated below just for these routines.
7: */
11: PetscErrorCode PetscSequentialPhaseBegin_Private(MPI_Comm comm,int ng)
12: {
14: PetscMPIInt rank,size,tag = 0;
15: MPI_Status status;
18: MPI_Comm_size(comm,&size);
19: if (size == 1) return(0);
20: MPI_Comm_rank(comm,&rank);
21: if (rank) {
22: MPI_Recv(0,0,MPI_INT,rank-1,tag,comm,&status);
23: }
24: /* Send to the next process in the group unless we are the last process */
25: if ((rank % ng) < ng - 1 && rank != size - 1) {
26: MPI_Send(0,0,MPI_INT,rank + 1,tag,comm);
27: }
28: return(0);
29: }
33: PetscErrorCode PetscSequentialPhaseEnd_Private(MPI_Comm comm,int ng)
34: {
36: PetscMPIInt rank,size,tag = 0;
37: MPI_Status status;
40: MPI_Comm_rank(comm,&rank);
41: MPI_Comm_size(comm,&size);
42: if (size == 1) return(0);
44: /* Send to the first process in the next group */
45: if ((rank % ng) == ng - 1 || rank == size - 1) {
46: MPI_Send(0,0,MPI_INT,(rank + 1) % size,tag,comm);
47: }
48: if (!rank) {
49: MPI_Recv(0,0,MPI_INT,size-1,tag,comm,&status);
50: }
51: return(0);
52: }
54: /* ---------------------------------------------------------------------*/
55: /*
56: The variable Petsc_Seq_keyval is used to indicate an MPI attribute that
57: is attached to a communicator that manages the sequential phase code below.
58: */
59: static int Petsc_Seq_keyval = MPI_KEYVAL_INVALID;
63: /*@
64: PetscSequentialPhaseBegin - Begins a sequential section of code.
66: Collective on MPI_Comm
68: Input Parameters:
69: + comm - Communicator to sequentialize.
70: - ng - Number in processor group. This many processes are allowed to execute
71: at the same time (usually 1)
73: Level: intermediate
75: Notes:
76: PetscSequentialPhaseBegin() and PetscSequentialPhaseEnd() provide a
77: way to force a section of code to be executed by the processes in
78: rank order. Typically, this is done with
79: .vb
80: PetscSequentialPhaseBegin(comm, 1);
81: <code to be executed sequentially>
82: PetscSequentialPhaseEnd(comm, 1);
83: .ve
85: Often, the sequential code contains output statements (e.g., printf) to
86: be executed. Note that you may need to flush the I/O buffers before
87: calling PetscSequentialPhaseEnd(). Also, note that some systems do
88: not propagate I/O in any order to the controling terminal (in other words,
89: even if you flush the output, you may not get the data in the order
90: that you want).
92: .seealso: PetscSequentialPhaseEnd()
94: Concepts: sequential stage
96: @*/
97: PetscErrorCode PetscSequentialPhaseBegin(MPI_Comm comm,int ng)
98: {
100: PetscMPIInt size;
101: MPI_Comm local_comm,*addr_local_comm;
104: MPI_Comm_size(comm,&size);
105: if (size == 1) return(0);
107: /* Get the private communicator for the sequential operations */
108: if (Petsc_Seq_keyval == MPI_KEYVAL_INVALID) {
109: MPI_Keyval_create(MPI_NULL_COPY_FN,MPI_NULL_DELETE_FN,&Petsc_Seq_keyval,0);
110: }
112: MPI_Comm_dup(comm,&local_comm);
113: PetscMalloc(sizeof(MPI_Comm),&addr_local_comm);
114: *addr_local_comm = local_comm;
115: MPI_Attr_put(comm,Petsc_Seq_keyval,(void*)addr_local_comm);
116: PetscSequentialPhaseBegin_Private(local_comm,ng);
117: return(0);
118: }
122: /*@
123: PetscSequentialPhaseEnd - Ends a sequential section of code.
125: Collective on MPI_Comm
127: Input Parameters:
128: + comm - Communicator to sequentialize.
129: - ng - Number in processor group. This many processes are allowed to execute
130: at the same time (usually 1)
132: Level: intermediate
134: Notes:
135: See PetscSequentialPhaseBegin() for more details.
137: .seealso: PetscSequentialPhaseBegin()
139: Concepts: sequential stage
141: @*/
142: PetscErrorCode PetscSequentialPhaseEnd(MPI_Comm comm,int ng)
143: {
145: PetscMPIInt size,flag;
146: MPI_Comm local_comm,*addr_local_comm;
149: MPI_Comm_size(comm,&size);
150: if (size == 1) return(0);
152: MPI_Attr_get(comm,Petsc_Seq_keyval,(void **)&addr_local_comm,&flag);
153: if (!flag) {
154: SETERRQ(PETSC_ERR_ARG_INCOMP,"Wrong MPI communicator; must pass in one used with PetscSequentialPhaseBegin()");
155: }
156: local_comm = *addr_local_comm;
158: PetscSequentialPhaseEnd_Private(local_comm,ng);
160: PetscFree(addr_local_comm);
161: MPI_Comm_free(&local_comm);
162: MPI_Attr_delete(comm,Petsc_Seq_keyval);
163: return(0);
164: }