Actual source code: mpi.c

  1: /*
  2:       This provides a few of the MPI-uni functions that cannot be implemented
  3:     with C macros
  4: */
 5:  #include include/mpiuni/mpi.h
 6:  #include petsc.h

  8: #if defined(PETSC_HAVE_STDLIB_H)
  9: #include <stdlib.h>
 10: #endif

 12: #define MPI_SUCCESS 0
 13: #define MPI_FAILURE 1
 14: void    *MPIUNI_TMP        = 0;
 15: int     MPIUNI_DATASIZE[5] = { sizeof(int),sizeof(float),sizeof(double),2*sizeof(double),sizeof(char)};
 16: /*
 17:        With MPI Uni there is only one communicator, which is called 1.
 18: */
 19: #define MAX_ATTR 128

 21: typedef struct {
 22:   void                *extra_state;
 23:   void                *attribute_val;
 24:   int                 active;
 25:   MPI_Delete_function *del;
 26: } MPI_Attr;

 28: static MPI_Attr attr[MAX_ATTR];
 29: static int      num_attr = 1,mpi_tag_ub = 100000000;

 31: #if defined(__cplusplus)
 33: #endif

 35: /* 
 36:    To avoid problems with prototypes to the system memcpy() it is duplicated here
 37: */
 38: int MPIUNI_Memcpy(void *a,const void* b,int n) {
 39:   int  i;
 40:   char *aa= (char*)a;
 41:   char *bb= (char*)b;

 43:   for (i=0; i<n; i++) aa[i] = bb[i];
 44:   return 0;
 45: }

 47: /*
 48:    Used to set the built-in MPI_TAG_UB attribute
 49: */
 50: static int Keyval_setup(void)
 51: {
 52:   attr[0].active        = 1;
 53:   attr[0].attribute_val = &mpi_tag_ub;
 54:   return 0;
 55: }

 57: /*
 58:          These functions are mapped to the Petsc_ name by ./mpi.h
 59: */
 60: int Petsc_MPI_Keyval_create(MPI_Copy_function *copy_fn,MPI_Delete_function *delete_fn,int *keyval,void *extra_state)
 61: {
 62:   if (num_attr >= MAX_ATTR) MPI_Abort(MPI_COMM_WORLD,1);

 64:   attr[num_attr].extra_state = extra_state;
 65:   attr[num_attr].del         = delete_fn;
 66:   *keyval                    = num_attr++;
 67:   return 0;
 68: }

 70: int Petsc_MPI_Keyval_free(int *keyval)
 71: {
 72:   attr[*keyval].active = 0;
 73:   return MPI_SUCCESS;
 74: }

 76: int Petsc_MPI_Attr_put(MPI_Comm comm,int keyval,void *attribute_val)
 77: {
 78:   attr[keyval].active        = 1;
 79:   attr[keyval].attribute_val = attribute_val;
 80:   return MPI_SUCCESS;
 81: }
 82: 
 83: int Petsc_MPI_Attr_delete(MPI_Comm comm,int keyval)
 84: {
 85:   if (attr[keyval].active && attr[keyval].del) {
 86:     (*(attr[keyval].del))(comm,keyval,attr[keyval].attribute_val,attr[keyval].extra_state);
 87:   }
 88:   attr[keyval].active        = 0;
 89:   attr[keyval].attribute_val = 0;
 90:   return MPI_SUCCESS;
 91: }

 93: int Petsc_MPI_Attr_get(MPI_Comm comm,int keyval,void *attribute_val,int *flag)
 94: {
 95:   if (!keyval) Keyval_setup();
 96:   *flag                   = attr[keyval].active;
 97:   *(void **)attribute_val = attr[keyval].attribute_val;
 98:   return MPI_SUCCESS;
 99: }

101: static int dups = 0;
102: int Petsc_MPI_Comm_dup(MPI_Comm comm,MPI_Comm *out)
103: {
104:   *out = comm;
105:   dups++;
106:   return 0;
107: }

109: int Petsc_MPI_Comm_free(MPI_Comm *comm)
110: {
111:   int i;

113:   if (--dups) return MPI_SUCCESS;
114:   for (i=0; i<num_attr; i++) {
115:     if (attr[i].active && attr[i].del) {
116:       (*attr[i].del)(*comm,i,attr[i].attribute_val,attr[i].extra_state);
117:     }
118:     attr[i].active = 0;
119:   }
120:   return MPI_SUCCESS;
121: }

123: int Petsc_MPI_Abort(MPI_Comm comm,int errorcode)
124: {
125:   abort();
126:   return MPI_SUCCESS;
127: }

129: /* --------------------------------------------------------------------------*/
130: 
131: static int MPI_was_initialized = 0;
132: static int MPI_was_finalized   = 0;

134: int Petsc_MPI_Init(int *argc, char ***argv)
135: {
136:   if (MPI_was_initialized) return 1;
137:   if (MPI_was_finalized) return 1;
138:   MPI_was_initialized = 1;
139:   return 0;
140: }

142: int Petsc_MPI_Finalize(void)
143: {
144:   if (MPI_was_finalized) return 1;
145:   if (!MPI_was_initialized) return 1;
146:   MPI_was_finalized = 1;
147:   return 0;
148: }

150: int Petsc_MPI_Initialized(int *flag)
151: {
152:   *flag = MPI_was_initialized;
153:   return 0;
154: }

156: int Petsc_MPI_Finalized(int *flag)
157: {
158:   *flag = MPI_was_finalized;
159:   return 0;
160: }

162: /* -------------------     Fortran versions of several routines ------------------ */

164: #if defined(PETSC_HAVE_FORTRAN_CAPS)
165: #define mpi_init_             MPI_INIT
166: #define mpi_finalize_         MPI_FINALIZE
167: #define mpi_comm_size_        MPI_COMM_SIZE
168: #define mpi_comm_rank_        MPI_COMM_RANK
169: #define mpi_abort_            MPI_ABORT
170: #define mpi_allreduce_        MPI_ALLREDUCE
171: #define mpi_barrier_          MPI_BARRIER
172: #define mpi_bcast_            MPI_BCAST
173: #define mpi_gather_           MPI_GATHER
174: #define mpi_allgather_        MPI_ALLGATHER
175: #define mpi_comm_split_       MPI_COMM_SPLIT
176: #elif !defined(PETSC_HAVE_FORTRAN_UNDERSCORE)
177: #define mpi_init_             mpi_init
178: #define mpi_finalize_         mpi_finalize
179: #define mpi_comm_size_        mpi_comm_size
180: #define mpi_comm_rank_        mpi_comm_rank
181: #define mpi_abort_            mpi_abort
182: #define mpi_allreduce_        mpi_allreduce
183: #define mpi_barrier_          mpi_barrier
184: #define mpi_bcast_            mpi_bcast
185: #define mpi_gather_           mpi_gather
186: #define mpi_allgather_        mpi_allgather
187: #define mpi_comm_split_       mpi_comm_split
188: #endif

190: #if defined(PETSC_HAVE_FORTRAN_UNDERSCORE_UNDERSCORE)
191: #define mpi_init_             mpi_init__
192: #define mpi_finalize_         mpi_finalize__
193: #define mpi_comm_size_        mpi_comm_size__
194: #define mpi_comm_rank_        mpi_comm_rank__
195: #define mpi_abort_            mpi_abort__
196: #define mpi_allreduce_        mpi_allreduce__
197: #define mpi_barrier_          mpi_barrier__
198: #define mpi_bcast_            mpi_bcast__
199: #define mpi_gather_           mpi_gather__
200: #define mpi_allgather_        mpi_allgather__
201: #define mpi_comm_split_       mpi_comm_split__
202: #endif

204: void PETSC_STDCALL  mpi_init_(int *ierr)
205: {
206:   *Petsc_MPI_Init((int*)0, (char***)0);
207: }

209: void PETSC_STDCALL  mpi_finalize_(int *ierr)
210: {
211:   *Petsc_MPI_Finalize();
212: }

214: void PETSC_STDCALL mpi_comm_size_(MPI_Comm *comm,int *size,int *ierr)
215: {
216:   *size = 1;
217:   *0;
218: }

220: void PETSC_STDCALL mpi_comm_rank_(MPI_Comm *comm,int *rank,int *ierr)
221: {
222:   *rank=0;
223:   *ierr=MPI_SUCCESS;
224: }

226: void PETSC_STDCALL mpi_comm_split_(MPI_Comm *comm,int *color,int *key, MPI_Comm *newcomm, int *ierr)
227: {
228:   *newcomm = *comm;
229:   *ierr=MPI_SUCCESS;
230: }

232: void PETSC_STDCALL mpi_abort_(MPI_Comm *comm,int *errorcode,int *ierr)
233: {
234:   abort();
235:   *MPI_SUCCESS;
236: }

238: void PETSC_STDCALL mpi_allreduce_(void *sendbuf,void *recvbuf,int *count,int *datatype,int *op,int *comm,int *ierr)
239: {
240:   MPIUNI_Memcpy(recvbuf,sendbuf,(*count)*MPIUNI_DATASIZE[*datatype]);
241:   *MPI_SUCCESS;
242: }

244: void PETSC_STDCALL mpi_barrier_(MPI_Comm *comm,int *ierr)
245: {
246:   *MPI_SUCCESS;
247: }

249: void PETSC_STDCALL mpi_bcast_(void *buf,int *count,int *datatype,int *root,int *comm,int *ierr)
250: {
251:   *MPI_SUCCESS;
252: }


255: void PETSC_STDCALL mpi_gather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype, int *root,int *comm,int *ierr)
256: {
257:   MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
258:   *MPI_SUCCESS;
259: }


262: void PETSC_STDCALL mpi_allgather_(void *sendbuf,int *scount,int *sdatatype, void* recvbuf, int* rcount, int* rdatatype,int *comm,int *ierr)
263: {
264:   MPIUNI_Memcpy(recvbuf,sendbuf,(*scount)*MPIUNI_DATASIZE[*sdatatype]);
265:   *MPI_SUCCESS;
266: }

268: #if defined(__cplusplus)
269: }
270: #endif