Actual source code: fretrieve.c

petsc-3.3-p7 2013-05-11
  2: /*
  3:       Code for opening and closing files.
  4: */
  5: #include <petscsys.h>
  6: #if defined(PETSC_HAVE_PWD_H)
  7: #include <pwd.h>
  8: #endif
  9: #include <ctype.h>
 10: #include <sys/types.h>
 11: #include <sys/stat.h>
 12: #if defined(PETSC_HAVE_UNISTD_H)
 13: #include <unistd.h>
 14: #endif
 15: #if defined(PETSC_HAVE_STDLIB_H)
 16: #include <stdlib.h>
 17: #endif
 18: #if defined(PETSC_HAVE_SYS_UTSNAME_H)
 19: #include <sys/utsname.h>
 20: #endif
 21: #include <fcntl.h>
 22: #include <time.h>  
 23: #if defined(PETSC_HAVE_SYS_SYSTEMINFO_H)
 24: #include <sys/systeminfo.h>
 25: #endif

 27: EXTERN_C_BEGIN
 30: /*
 31:    Private routine to delete tmp/shared storage

 33:    This is called by MPI, not by users.

 35:    Note: this is declared extern "C" because it is passed to MPI_Keyval_create()

 37: */
 38: PetscMPIInt  MPIAPI Petsc_DelTmpShared(MPI_Comm comm,PetscMPIInt keyval,void *count_val,void *extra_state)
 39: {

 43:   PetscInfo1(0,"Deleting tmp/shared data in an MPI_Comm %ld\n",(long)comm);if (ierr) PetscFunctionReturn((PetscMPIInt)ierr);
 44:   PetscFree(count_val);if (ierr) PetscFunctionReturn((PetscMPIInt)ierr);
 45:   PetscFunctionReturn(MPI_SUCCESS);
 46: }
 47: EXTERN_C_END

 51: /*@C
 52:    PetscGetTmp - Gets the name of the tmp directory

 54:    Collective on MPI_Comm

 56:    Input Parameters:
 57: +  comm - MPI_Communicator that may share /tmp
 58: -  len - length of string to hold name

 60:    Output Parameters:
 61: .  dir - directory name

 63:    Options Database Keys:
 64: +    -shared_tmp 
 65: .    -not_shared_tmp
 66: -    -tmp tmpdir

 68:    Environmental Variables:
 69: +     PETSC_SHARED_TMP
 70: .     PETSC_NOT_SHARED_TMP
 71: -     PETSC_TMP

 73:    Level: developer

 75:    
 76:    If the environmental variable PETSC_TMP is set it will use this directory
 77:   as the "/tmp" directory.

 79: @*/
 80: PetscErrorCode  PetscGetTmp(MPI_Comm comm,char dir[],size_t len)
 81: {
 83:   PetscBool      flg;

 86:   PetscOptionsGetenv(comm,"PETSC_TMP",dir,len,&flg);
 87:   if (!flg) {
 88:     PetscStrncpy(dir,"/tmp",len);
 89:   }
 90:   return(0);
 91: }

 95: /*@C
 96:    PetscSharedTmp - Determines if all processors in a communicator share a
 97:          /tmp or have different ones.

 99:    Collective on MPI_Comm

101:    Input Parameters:
102: .  comm - MPI_Communicator that may share /tmp

104:    Output Parameters:
105: .  shared - PETSC_TRUE or PETSC_FALSE

107:    Options Database Keys:
108: +    -shared_tmp 
109: .    -not_shared_tmp
110: -    -tmp tmpdir

112:    Environmental Variables:
113: +     PETSC_SHARED_TMP
114: .     PETSC_NOT_SHARED_TMP
115: -     PETSC_TMP

117:    Level: developer

119:    Notes:
120:    Stores the status as a MPI attribute so it does not have
121:     to be redetermined each time.

123:       Assumes that all processors in a communicator either
124:        1) have a common /tmp or
125:        2) each has a separate /tmp
126:       eventually we can write a fancier one that determines which processors
127:       share a common /tmp.

129:    This will be very slow on runs with a large number of processors since
130:    it requires O(p*p) file opens.

132:    If the environmental variable PETSC_TMP is set it will use this directory
133:   as the "/tmp" directory.

135: @*/
136: PetscErrorCode  PetscSharedTmp(MPI_Comm comm,PetscBool  *shared)
137: {
138:   PetscErrorCode     ierr;
139:   PetscMPIInt        size,rank,*tagvalp,sum,cnt,i;
140:   PetscBool          flg,iflg;
141:   FILE               *fd;
142:   static PetscMPIInt Petsc_Tmp_keyval = MPI_KEYVAL_INVALID;
143:   int                err;

146:   MPI_Comm_size(comm,&size);
147:   if (size == 1) {
148:     *shared = PETSC_TRUE;
149:     return(0);
150:   }

152:   PetscOptionsGetenv(comm,"PETSC_SHARED_TMP",PETSC_NULL,0,&flg);
153:   if (flg) {
154:     *shared = PETSC_TRUE;
155:     return(0);
156:   }

158:   PetscOptionsGetenv(comm,"PETSC_NOT_SHARED_TMP",PETSC_NULL,0,&flg);
159:   if (flg) {
160:     *shared = PETSC_FALSE;
161:     return(0);
162:   }

164:   if (Petsc_Tmp_keyval == MPI_KEYVAL_INVALID) {
165:     MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelTmpShared,&Petsc_Tmp_keyval,0);
166:   }

168:   MPI_Attr_get(comm,Petsc_Tmp_keyval,(void**)&tagvalp,(int*)&iflg);
169:   if (!iflg) {
170:     char       filename[PETSC_MAX_PATH_LEN],tmpname[PETSC_MAX_PATH_LEN];

172:     /* This communicator does not yet have a shared tmp attribute */
173:     PetscMalloc(sizeof(PetscMPIInt),&tagvalp);
174:     MPI_Attr_put(comm,Petsc_Tmp_keyval,tagvalp);

176:     PetscOptionsGetenv(comm,"PETSC_TMP",tmpname,238,&iflg);
177:     if (!iflg) {
178:       PetscStrcpy(filename,"/tmp");
179:     } else {
180:       PetscStrcpy(filename,tmpname);
181:     }

183:     PetscStrcat(filename,"/petsctestshared");
184:     MPI_Comm_rank(comm,&rank);
185: 
186:     /* each processor creates a /tmp file and all the later ones check */
187:     /* this makes sure no subset of processors is shared */
188:     *shared = PETSC_FALSE;
189:     for (i=0; i<size-1; i++) {
190:       if (rank == i) {
191:         fd = fopen(filename,"w");
192:         if (!fd) {
193:           SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_FILE_OPEN,"Unable to open test file %s",filename);
194:         }
195:         err = fclose(fd);
196:         if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fclose() failed on file");
197:       }
198:       MPI_Barrier(comm);
199:       if (rank >= i) {
200:         fd = fopen(filename,"r");
201:         if (fd) cnt = 1; else cnt = 0;
202:         if (fd) {
203:           err = fclose(fd);
204:           if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fclose() failed on file");
205:         }
206:       } else {
207:         cnt = 0;
208:       }
209:       MPI_Allreduce(&cnt,&sum,1,MPI_INT,MPI_SUM,comm);
210:       if (rank == i) {
211:         unlink(filename);
212:       }

214:       if (sum == size) {
215:         *shared = PETSC_TRUE;
216:         break;
217:       } else if (sum != 1) {
218:         SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP_SYS,"Subset of processes share /tmp ");
219:       }
220:     }
221:     *tagvalp = (int)*shared;
222:     PetscInfo2(0,"processors %s %s\n",(*shared) ? "share":"do NOT share",(iflg ? tmpname:"/tmp"));
223:   } else {
224:     *shared = (PetscBool) *tagvalp;
225:   }
226:   return(0);
227: }

231: /*@C
232:    PetscSharedWorkingDirectory - Determines if all processors in a communicator share a
233:          working directory or have different ones.

235:    Collective on MPI_Comm

237:    Input Parameters:
238: .  comm - MPI_Communicator that may share working directory

240:    Output Parameters:
241: .  shared - PETSC_TRUE or PETSC_FALSE

243:    Options Database Keys:
244: +    -shared_working_directory 
245: .    -not_shared_working_directory

247:    Environmental Variables:
248: +     PETSC_SHARED_WORKING_DIRECTORY
249: .     PETSC_NOT_SHARED_WORKING_DIRECTORY

251:    Level: developer

253:    Notes:
254:    Stores the status as a MPI attribute so it does not have
255:     to be redetermined each time.

257:       Assumes that all processors in a communicator either
258:        1) have a common working directory or
259:        2) each has a separate working directory
260:       eventually we can write a fancier one that determines which processors
261:       share a common working directory.

263:    This will be very slow on runs with a large number of processors since
264:    it requires O(p*p) file opens.

266: @*/
267: PetscErrorCode  PetscSharedWorkingDirectory(MPI_Comm comm,PetscBool  *shared)
268: {
269:   PetscErrorCode     ierr;
270:   PetscMPIInt        size,rank,*tagvalp,sum,cnt,i;
271:   PetscBool          flg,iflg;
272:   FILE               *fd;
273:   static PetscMPIInt Petsc_WD_keyval = MPI_KEYVAL_INVALID;
274:   int                err;

277:   MPI_Comm_size(comm,&size);
278:   if (size == 1) {
279:     *shared = PETSC_TRUE;
280:     return(0);
281:   }

283:   PetscOptionsGetenv(comm,"PETSC_SHARED_WORKING_DIRECTORY",PETSC_NULL,0,&flg);
284:   if (flg) {
285:     *shared = PETSC_TRUE;
286:     return(0);
287:   }

289:   PetscOptionsGetenv(comm,"PETSC_NOT_SHARED_WORKING_DIRECTORY",PETSC_NULL,0,&flg);
290:   if (flg) {
291:     *shared = PETSC_FALSE;
292:     return(0);
293:   }

295:   if (Petsc_WD_keyval == MPI_KEYVAL_INVALID) {
296:     MPI_Keyval_create(MPI_NULL_COPY_FN,Petsc_DelTmpShared,&Petsc_WD_keyval,0);
297:   }

299:   MPI_Attr_get(comm,Petsc_WD_keyval,(void**)&tagvalp,(int*)&iflg);
300:   if (!iflg) {
301:     char       filename[PETSC_MAX_PATH_LEN];

303:     /* This communicator does not yet have a shared  attribute */
304:     PetscMalloc(sizeof(PetscMPIInt),&tagvalp);
305:     MPI_Attr_put(comm,Petsc_WD_keyval,tagvalp);

307:     PetscGetWorkingDirectory(filename,240);
308:     PetscStrcat(filename,"/petsctestshared");
309:     MPI_Comm_rank(comm,&rank);
310: 
311:     /* each processor creates a  file and all the later ones check */
312:     /* this makes sure no subset of processors is shared */
313:     *shared = PETSC_FALSE;
314:     for (i=0; i<size-1; i++) {
315:       if (rank == i) {
316:         fd = fopen(filename,"w");
317:         if (!fd) SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_FILE_OPEN,"Unable to open test file %s",filename);
318:         err = fclose(fd);
319:         if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fclose() failed on file");
320:       }
321:       MPI_Barrier(comm);
322:       if (rank >= i) {
323:         fd = fopen(filename,"r");
324:         if (fd) cnt = 1; else cnt = 0;
325:         if (fd) {
326:           err = fclose(fd);
327:           if (err) SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SYS,"fclose() failed on file");
328:         }
329:       } else {
330:         cnt = 0;
331:       }
332:       MPI_Allreduce(&cnt,&sum,1,MPI_INT,MPI_SUM,comm);
333:       if (rank == i) {
334:         unlink(filename);
335:       }

337:       if (sum == size) {
338:         *shared = PETSC_TRUE;
339:         break;
340:       } else if (sum != 1) {
341:         SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP_SYS,"Subset of processes share working directory");
342:       }
343:     }
344:     *tagvalp = (int)*shared;
345:   } else {
346:     *shared = (PetscBool) *tagvalp;
347:   }
348:   PetscInfo1(0,"processors %s working directory\n",(*shared) ? "shared" : "do NOT share");
349:   return(0);
350: }


355: /*@C
356:     PetscFileRetrieve - Obtains a library from a URL or compressed 
357:         and copies into local disk space as uncompressed.

359:     Collective on MPI_Comm

361:     Input Parameter:
362: +   comm     - processors accessing the library
363: .   libname  - name of library, including entire URL (with or without .gz)
364: -   llen     - length of llibname

366:     Output Parameter:
367: +   llibname - name of local copy of library
368: -   found - if found and retrieved the file

370:     Level: developer

372: @*/
373: PetscErrorCode  PetscFileRetrieve(MPI_Comm comm,const char libname[],char llibname[],size_t llen,PetscBool  *found)
374: {
375:   char              buf[1024],tmpdir[PETSC_MAX_PATH_LEN],urlget[PETSC_MAX_PATH_LEN],*par;
376:   const char        *pdir;
377:   FILE              *fp;
378:   PetscErrorCode    ierr;
379:   int               i;
380:   PetscMPIInt       rank;
381:   size_t            len = 0;
382:   PetscBool         flg1,flg2,flg3,sharedtmp,exists;

385:   *found = PETSC_FALSE;

387:   /* if file does not have an ftp:// or http:// or .gz then need not process file */
388:   PetscStrstr(libname,".gz",&par);
389:   if (par) {PetscStrlen(par,&len);}

391:   PetscStrncmp(libname,"ftp://",6,&flg1);
392:   PetscStrncmp(libname,"http://",7,&flg2);
393:   PetscStrncmp(libname,"file://",7,&flg3);
394:   if (!flg1 && !flg2 && !flg3 && (!par || len != 3)) {
395:     PetscStrncpy(llibname,libname,llen);
396:     PetscTestFile(libname,'r',found);
397:     if (*found) {
398:       PetscInfo1(PETSC_NULL,"Found file %s\n",libname);
399:     } else {
400:       PetscInfo1(PETSC_NULL,"Did not find file %s\n",libname);
401:     }
402:     return(0);
403:   }

405:   /* Determine if all processors share a common /tmp */
406:   PetscSharedTmp(comm,&sharedtmp);
407:   PetscOptionsGetenv(comm,"PETSC_TMP",tmpdir,PETSC_MAX_PATH_LEN,&flg1);

409:   MPI_Comm_rank(comm,&rank);
410:   if (!rank || !sharedtmp) {
411: 
412:     /* Construct the script to get URL file */
413:     PetscGetPetscDir(&pdir);
414:     PetscStrcpy(urlget,pdir);
415:     PetscStrcat(urlget,"/bin/urlget");
416:     PetscTestFile(urlget,'r',&exists);
417:     if (!exists) {
418:       PetscTestFile("urlget",'r',&exists);
419:       if (!exists) {
420:         SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"Cannot locate PETSc script urlget in %s or current directory",urlget);
421:       }
422:       PetscStrcpy(urlget,"urlget");
423:     }
424:     PetscStrcat(urlget," ");

426:     /* are we using an alternative /tmp? */
427:     if (flg1) {
428:       PetscStrcat(urlget,"-tmp ");
429:       PetscStrcat(urlget,tmpdir);
430:       PetscStrcat(urlget," ");
431:     }

433:     PetscStrcat(urlget,libname);
434:     PetscStrcat(urlget," 2>&1 ");

436: #if defined(PETSC_HAVE_POPEN)
437:     PetscPOpen(PETSC_COMM_SELF,PETSC_NULL,urlget,"r",&fp);
438: #else
439:     SETERRQ(PETSC_COMM_SELF,PETSC_ERR_SUP_SYS,"Cannot run external programs on this machine");
440: #endif
441:     if (!fgets(buf,1024,fp)) {
442:       SETERRQ1(PETSC_COMM_SELF,PETSC_ERR_PLIB,"No output from ${PETSC_DIR}/bin/urlget in getting file %s",libname);
443:     }
444:     PetscInfo1(0,"Message back from urlget: %s\n",buf);

446:     PetscStrncmp(buf,"Error",5,&flg1);
447:     PetscStrncmp(buf,"Traceback",9,&flg2);
448: #if defined(PETSC_HAVE_POPEN)
449:     PetscPClose(PETSC_COMM_SELF,fp);
450: #endif
451:     if (flg1 || flg2) {
452:       *found = PETSC_FALSE;
453:     } else {
454:       *found = PETSC_TRUE;
455: 
456:       /* Check for \n and make it 0 */
457:       for (i=0; i<1024; i++) {
458:         if (buf[i] == '\n') {
459:           buf[i] = 0;
460:           break;
461:         }
462:       }
463:       PetscStrncpy(llibname,buf,llen);
464:     }
465:   }
466:   if (sharedtmp) { /* send library name to all processors */
467:     MPI_Bcast(found,1,MPI_INT,0,comm);
468:     if (*found) {
469:       MPI_Bcast(llibname,llen,MPI_CHAR,0,comm);
470:       MPI_Bcast(found,1,MPI_INT,0,comm);
471:     }
472:   }

474:   return(0);
475: }