Arman Akbarian
UNIVERSITY OF BRITISH COLUMBIA
PHYSICS & ASTRONOMY DEPT.

#include <stdio.h>
#include <mpi.h>


#define SIZE 1000
int main(int argc, char *argv[]) {
   int  numprocs, //to store the number of processors used
        rank,     //to store the rank of the processor that runs the code
        namelen;  //to store the length of the name of the processor
   char processor_name[MPI_MAX_PROCESSOR_NAME]; //to store the name of the processor
   int source, dest, tag, count; //variables used in point-to-point communications
   int rc;  //to store the return code at various points
   char inmsg, outmsg;               //variables used for ptop comm
   MPI_Status Stat; //specific type of data to store MPI status
   int ary[SIZE];
   int tary[SIZE];
   int offset, chunksize;
   int sum,tsum;
   int i;

/* MPI_Init:
 * should be called before any MPI routine,
 * just call once, to initialize MPI,
 * and can pass the program arguments to all the processors,
 * example of argument: '-np 4' (to run the program on 4 processors)
 */
   MPI_Init(&argc, &argv);

/* MPI_COMM_WORLD:
 * is the handle used to communicate between the processors
 * it is usually required for any commnunicator routines, see below:
 */


/* MPI_Comm_size:
 * determines the number of processors in the "MPI_COMM_WORLD"
 */
   MPI_Comm_size(MPI_COMM_WORLD, &numprocs);

   if (numprocs < 2) {
      printf("Quitting...Number of MPI tasks must be bigger than 1.\n");
      MPI_Abort(MPI_COMM_WORLD,rc); //to abort
      exit(0);
   }

/* MPI_Comm_rank:
 * determines the rank of the processor in the MPI_COMM_WORLD
 * each CPU has a "rank", an integer number starting from 0
 */
   MPI_Comm_rank(MPI_COMM_WORLD, &rank);
  
   if (rank == 0) {
      printf("Hello World! (from the master task)\n");
      printf("Total number of processors: %d\n",numprocs);
   }
  
/* MPI_Get_processor_name:
 * determines the name of the processor, the name must be
 * at least at length of MPI_MAX_PROCESSOR_NAME (it is 256)
 * second argument is the length of the returning processor_name
 */
        MPI_Get_processor_name(processor_name, &namelen);
  
   if (rank !=0) {
   printf("Process %d on %s says Hello World!\n", rank, processor_name);
   }


/* MPI_Send:
 * is to send point-to-point data (between two processors)
 * Format:
 * MPI_send(
 *        &data (buffer for data to be sent)
 *          int count (count of data)
 *          MPI_variable type (predefined MPI types, example: MPI_INT,MPI_DOUBLE,MPI_CHAR ...)
 *          int dest (task id)
 *          int tag (unique id for msg)
 *          MPI_COMM_WORLD (the group of communications, usually just MPI_COMM_WORLD)
 *         )
 */
   if (rank == 0) {
           dest = 1;
      outmsg = 'X';
      count = 1;
      tag = 100;
      printf("Sending Msg X from Proc 0 to 1...\n");
         rc = MPI_Send(&outmsg, count, MPI_CHAR, dest, tag,MPI_COMM_WORLD);
      /* rc is the return code, to capture the possible errors
       * however MPI library is written such that the routine quits
       * if some error occurs, so probably won't be able to capture the rc
       */
      printf("Msg: %c is sent to processor %d\n",outmsg,dest);
      outmsg = 'Y';
      tag = 101;
      printf("Sending Msg Y from Proc 0 to 1 with Sync Send...\n");

/* MPI_Ssend:
 * is similar to MPI_Send, while the send routine waits till the msg is received
 */
      rc = MPI_Ssend(&outmsg, count,MPI_CHAR,dest,tag,MPI_COMM_WORLD);

      /* next command will be invoked after 5 second: see the recieving part */ 
      printf("Msg: %c is sent to processor %d\n",outmsg,dest);
   }  
/* MPI_Recv:
 * similar to MPI_Send, for receiving the ptop data
 * format is pretty clear from the example
 */
   else if (rank == 1) {
     source = 0;
     count = 1;
     tag = 100; //note that if you use different tag, task 1 will not recieve the msg
     rc = MPI_Recv(&inmsg, count, MPI_CHAR, source, tag, MPI_COMM_WORLD, &Stat);      
     printf("Msg: %c is recieved from processor %d \n",inmsg, source);
     sleep(2);
     tag = 101;
     rc = MPI_Recv(&inmsg, count, MPI_CHAR, source, tag, MPI_COMM_WORLD, &Stat);      
     printf("Msg: %c is received from processor %d \n",inmsg,source);
   }

/* Example of Dividing an array into smaller portions and
 * adding them in each processor and sending the result to
 * the master task:
 */

   if (SIZE % numprocs != 0) {
         printf("Quitting. %d / %d is not zero\n",SIZE,numprocs);
         MPI_Abort(MPI_COMM_WORLD,rc);
   } else {
      chunksize = SIZE / numprocs;
   }

   if (rank == 0) {
      /* Initializing the array */
      for ( i = 0; i<SIZE; i++) {
         ary[i] = i+1;
      }
      printf("Array initialized...\n");
      /* Sending the portions of the array to each processor */
      printf("chunksize = %d \n",chunksize);
      for (i=1; i<numprocs; i++){
         tag = i;
         dest=i;
         offset = i*chunksize;
         /* Sending the offset so each proc first receives the pointer
          * to the actual data */
         MPI_Send(&offset,1,MPI_INT,dest,tag,MPI_COMM_WORLD);
         MPI_Send(&ary[offset],chunksize,MPI_INT,dest,tag,MPI_COMM_WORLD);
         printf("Data starting at %d is sent to proc %d \n"
               ,ary[offset],dest);
      }
      /* The first chunk of array is summed by the master task */
      for (i=0; i<chunksize; i++){
         sum+= ary[i];
      }
      /* Receiving the sum from each processors */
      for (i=1; i<numprocs; i++){
         source = i;
         tag = i;
         MPI_Recv(&tsum,1,MPI_INT,source,tag,MPI_COMM_WORLD,&Stat);
         printf("Sum %d is received from proc %d\n",tsum,source);
         sum+=tsum;
      }
      /* And printing the final result */
      printf("Total sum is: %d\n",sum);
      /*End of main task*/
   } else {
      source = 0;
      tag = rank;
      /* Receiving the data from master tast */
      MPI_Recv(&offset,1,MPI_INT,source,tag,MPI_COMM_WORLD,&Stat);
      MPI_Recv(&tary[offset],chunksize,MPI_INT,source,tag,MPI_COMM_WORLD,&Stat);
      printf("Data starting at %d is received to proc %d\n",tary[offset],rank);
      /* Doing the calculation on data */
      tsum = 0;
      for (i=offset; i<chunksize+offset; i++){
          tsum += tary[i];
      }
      dest = 0;
      /* Sending back the results to main task: */
      MPI_Send(&tsum,1,MPI_INT,dest,tag,MPI_COMM_WORLD);
   }


/* MPI_finalize:
 * to finalize the MPI, no MPI routine may be called after this:
 */

   MPI_Finalize();
}


last update: Wed May 11, 2016