next up previous contents
Next: About this document ... Up: 4.4 Performance Analysis of Previous: 4.4 Performance Analysis of   Contents

4.4.1 Test program

The following is the test program, pthread_sendrecv.c, used in the previous experiment.

/*
   (C) 2007 by Argonne National Laboratory.
       See COPYRIGHT in top-level directory.
*/
#include "mpi.h"
#include <stdio.h>
#include <pthread.h>
#include <stdlib.h>
#include <string.h>

#define BUFLEN 512
#define NTIMES 100
#define MAX_THREADS 10

/*
    Concurrent send and recv by multiple threads on each process. 
*/
void *thd_sendrecv( void * );
void *thd_sendrecv( void *comm_ptr )
{
    MPI_Comm     comm;
    int         my_rank, num_procs, next, buffer_size, namelen, idx;
    char        buffer[BUFLEN], processor_name[MPI_MAX_PROCESSOR_NAME];
    MPI_Status  status;

    comm = *(MPI_Comm *) comm_ptr;

    MPI_Comm_size( comm, &num_procs );
    MPI_Comm_rank( comm, &my_rank );
    MPI_Get_processor_name( processor_name, &namelen );

    fprintf( stderr, "Process %d on %s\n", my_rank, processor_name );
    strcpy( buffer, "hello there" );
    buffer_size = strlen(buffer)+1;

    if ( my_rank == num_procs-1 )
        next = 0;
    else
        next = my_rank+1;

    for ( idx = 0; idx < NTIMES; idx++ ) {
        if (my_rank == 0) {
            MPI_Send(buffer, buffer_size, MPI_CHAR, next, 99, comm);
            MPI_Send(buffer, buffer_size, MPI_CHAR, MPI_PROC_NULL, 299, comm);
            MPI_Recv(buffer, BUFLEN, MPI_CHAR, MPI_ANY_SOURCE, 99,
                     comm, &status);
        }
        else {
            MPI_Recv(buffer, BUFLEN, MPI_CHAR, MPI_ANY_SOURCE, 99,
                     comm, &status);
            MPI_Recv(buffer, BUFLEN, MPI_CHAR, MPI_PROC_NULL, 299,
                     comm, &status);
            MPI_Send(buffer, buffer_size, MPI_CHAR, next, 99, comm);
        }
        /* MPI_Barrier(comm); */
    }

    pthread_exit( NULL );
    return 0;
}



int main( int argc,char *argv[] )
{
    MPI_Comm   comm[ MAX_THREADS ];
    pthread_t  thd_id[ MAX_THREADS ];
    int        my_rank, ii, provided;
    int        num_threads;

    MPI_Init_thread( &argc, &argv, MPI_THREAD_MULTIPLE, &provided );
    if ( provided != MPI_THREAD_MULTIPLE ) {
        printf( "Aborting, MPI_THREAD_MULTIPLE is needed...\n" );
        MPI_Abort( MPI_COMM_WORLD, 1 );
    }

    MPI_Comm_rank( MPI_COMM_WORLD, &my_rank );

    if ( my_rank == 0 ) {
        if (argc != 2) {
            printf( "Error: %s num_threads\n", argv[0] );
            MPI_Abort( MPI_COMM_WORLD, 1 );
        }
        num_threads = atoi( argv[1] );
        MPI_Bcast( &num_threads, 1, MPI_INT, 0, MPI_COMM_WORLD );
    }
    else
        MPI_Bcast( &num_threads, 1, MPI_INT, 0, MPI_COMM_WORLD );

    MPI_Barrier( MPI_COMM_WORLD );

    for ( ii=0; ii < num_threads; ii++ ) {
        MPI_Comm_dup( MPI_COMM_WORLD, &comm[ii] );
        pthread_create( &thd_id[ii], NULL, thd_sendrecv, (void *) &comm[ii] );
    }
        
    for ( ii=0; ii < num_threads; ii++ )
        pthread_join( thd_id[ii], NULL );

    MPI_Finalize();
    return 0;
}