3.12.7. Examples


Up: Derived datatypes Next: Pack and unpack Previous: Correct use of addresses

The following examples illustrate the use of derived datatypes.


Example Send and receive a section of a 3D array.


      REAL a(100,100,100), e(9,9,9) 
      INTEGER oneslice, twoslice, threeslice, sizeofreal, myrank, ierr 
      INTEGER status(MPI_STATUS_SIZE) 
 
C      extract the section a(1:17:2, 3:11, 2:10) 
C      and store it in e(:,:,:). 
 
      CALL MPI_COMM_RANK(MPI_COMM_WORLD, myrank) 
 
      CALL MPI_TYPE_EXTENT( MPI_REAL, sizeofreal, ierr) 
 
C     create datatype for a 1D section 
      CALL MPI_TYPE_VECTOR( 9, 1, 2, MPI_REAL, oneslice, ierr) 
 
C     create datatype for a 2D section 
      CALL MPI_TYPE_HVECTOR(9, 1, 100*sizeofreal, oneslice, twoslice, ierr) 
 
C     create datatype for the entire section 
      CALL MPI_TYPE_HVECTOR( 9, 1, 100*100*sizeofreal, twoslice, 1, 
                             threeslice, ierr) 
 
      CALL MPI_TYPE_COMMIT( threeslice, ierr) 
      CALL MPI_SENDRECV(a(1,3,2), 1, threeslice, myrank, 0, e, 9*9*9, 
                        MPI_REAL, myrank, 0, MPI_COMM_WORLD, status, ierr) 


Example Copy the (strictly) lower triangular part of a matrix.


      REAL a(100,100), b(100,100) 
      INTEGER  disp(100), blocklen(100), ltype, myrank, ierr 
      INTEGER status(MPI_STATUS_SIZE) 
 
C     copy lower triangular part of array a 
C     onto lower triangular part of array b 
 
      CALL MPI_COMM_RANK(MPI_COMM_WORLD, myrank) 
 
C     compute start and size of each column 
      DO i=1, 100 
        disp(i) = 100*(i-1) + i 
        block(i) = 100-i 
      END DO 
 
C     create datatype for lower triangular part 
      CALL MPI_TYPE_INDEXED( 100, block, disp, MPI_REAL, ltype, ierr) 
 
      CALL MPI_TYPE_COMMIT(ltype, ierr) 
      CALL MPI_SENDRECV( a, 1, ltype, myrank, 0, b, 1, 
                    ltype, myrank, 0, MPI_COMM_WORLD, status, ierr) 


Example Transpose a matrix.


      REAL a(100,100), b(100,100) 
      INTEGER row, xpose, sizeofreal, myrank, ierr 
      INTEGER status(MPI_STATUS_SIZE) 
 
C     transpose matrix a onto b 
 
      CALL MPI_COMM_RANK(MPI_COMM_WORLD, myrank) 
 
      CALL MPI_TYPE_EXTENT( MPI_REAL, sizeofreal, ierr) 
 
C     create datatype for one row 
      CALL MPI_TYPE_VECTOR( 100, 1, 100, MPI_REAL, row, ierr) 
 
C     create datatype for matrix in row-major order 
      CALL MPI_TYPE_HVECTOR( 100, 1, sizeofreal, row, xpose, ierr) 
 
      CALL MPI_TYPE_COMMIT( xpose, ierr) 
 
C     send matrix in row-major order and receive in column major order 
      CALL MPI_SENDRECV( a, 1, xpose, myrank, 0, b, 100*100, 
                MPI_REAL, myrank, 0, MPI_COMM_WORLD, status, ierr) 


Example Another approach to the transpose problem:

      REAL a(100,100), b(100,100) 
      INTEGER  disp(2), blocklen(2), type(2), row, row1, sizeofreal 
      INTEGER  myrank, ierr 
      INTEGER status(MPI_STATUS_SIZE) 
 
      CALL MPI_COMM_RANK(MPI_COMM_WORLD, myrank) 
 
C     transpose matrix a onto b 
 
      CALL MPI_TYPE_EXTENT( MPI_REAL, sizeofreal, ierr) 
 
C     create datatype for one row 
      CALL MPI_TYPE_VECTOR( 100, 1, 100, MPI_REAL, row, ierr) 
 
C     create datatype for one row, with the extent of one real number 
      disp(1) = 0 
      disp(2) = sizeofreal 
      type(1)  = row 
      type(2)  = MPI_UB 
      blocklen(1)  = 1 
      blocklen(2)  = 1 
      CALL MPI_TYPE_STRUCT( 2, blocklen, disp, type, row1, ierr) 
 
      CALL MPI_TYPE_COMMIT( row1, ierr) 
 
C     send 100 rows and receive in column major order 
      CALL MPI_SENDRECV( a, 100, row1, myrank, 0, b, 100*100, 
                MPI_REAL, myrank, 0, MPI_COMM_WORLD, status, ierr) 


Example We manipulate an array of structures.


struct Partstruct 
   { 
   int    class;  /* particle class */ 
   double d[6];   /* particle coordinates */ 
   char   b[7];   /* some additional information */ 
   }; 
 
struct Partstruct    particle[1000]; 
 
int                  i, dest, rank; 
MPI_Comm     comm; 
 
 
/* build datatype describing structure */ 
 
MPI_Datatype Particletype; 
MPI_Datatype type[3] = {MPI_INT, MPI_DOUBLE, MPI_CHAR}; 
int          blocklen[3] = {1, 6, 7}; 
MPI_Aint     disp[3]; 
int          base; 
 
 
/* compute displacements of structure components */ 
 
MPI_Address( particle, disp); 
MPI_Address( particle[0].d, disp+1); 
MPI_Address( particle[0].b, disp+2); 
base = disp[0]; 
for (i=0; i <3; i++) disp[i] -= base; 
 
MPI_Type_struct( 3, blocklen, disp, type, &Particletype); 
 
   /* If compiler does padding in mysterious ways, 
   the following may be safer */ 
 
MPI_Datatype type1[4] = {MPI_INT, MPI_DOUBLE, MPI_CHAR, MPI_UB}; 
int          blocklen1[4] = {1, 6, 7, 1}; 
MPI_Aint     disp1[4]; 
 
/* compute displacements of structure components */ 
 
MPI_Address( particle, disp1); 
MPI_Address( particle[0].d, disp1+1); 
MPI_Address( particle[0].b, disp1+2); 
MPI_Address( particle+1, disp1+3); 
base = disp1[0]; 
for (i=0; i <4; i++) disp1[i] -= base; 
 
/* build datatype describing structure */ 
 
MPI_Type_struct( 4, blocklen1, disp1, type1, &Particletype); 
 
 
              /* 4.1: 
        send the entire array */ 
 
MPI_Type_commit( &Particletype); 
MPI_Send( particle, 1000, Particletype, dest, tag, comm); 
 
 
              /* 4.2: 
        send only the entries of class zero particles, 
        preceded by the number of such entries */ 
 
MPI_Datatype Zparticles;   /* datatype describing all particles 
                              with class zero (needs to be recomputed 
                              if classes change) */ 
MPI_Datatype Ztype; 
 
MPI_Aint     zdisp[1000]; 
int zblock[1000], j, k; 
int zzblock[2] = {1,1}; 
MPI_Aint     zzdisp[2]; 
MPI_Datatype zztype[2]; 
 
/* compute displacements of class zero particles */ 
j = 0; 
for(i=0; i < 1000; i++) 
  if (particle[i].class==0) 
     { 
     zdisp[j] = i; 
     zblock[j] = 1; 
     j++; 
     } 
 
/* create datatype for class zero particles  */ 
MPI_Type_indexed( j, zblock, zdisp, Particletype, &Zparticles); 
 
/* prepend particle count */ 
MPI_Address(&j, zzdisp); 
MPI_Address(particle, zzdisp+1); 
zztype[0] = MPI_INT; 
zztype[1] = Zparticles; 
MPI_Type_struct(2, zzblock, zzdisp, zztype, &Ztype); 
 
MPI_Type_commit( &Ztype); 
MPI_Send( MPI_BOTTOM, 1, Ztype, dest, tag, comm); 
 
 
       /* A probably more efficient way of defining Zparticles */ 
 
/* consecutive particles with index zero are handled as one block */ 
j=0; 
for (i=0; i < 1000; i++) 
  if (particle[i].index==0) 
    { 
    for (k=i+1; (k < 1000)&&(particle[k].index == 0) ; k++); 
    zdisp[j] = i; 
    zblock[j] = k-i; 
    j++; 
    i = k; 
    } 
MPI_Type_indexed( j, zblock, zdisp, Particletype, &Zparticles); 
 
 
                /* 4.3: 
          send the first two coordinates of all entries */ 
 
MPI_Datatype Allpairs;      /* datatype for all pairs of coordinates */ 
 
MPI_Aint sizeofentry; 
 
MPI_Type_extent( Particletype, &sizeofentry); 
 
     /* sizeofentry can also be computed by subtracting the address 
        of particle[0] from the address of particle[1] */ 
 
MPI_Type_hvector( 1000, 2, sizeofentry, MPI_DOUBLE, &Allpairs); 
MPI_Type_commit( &Allpairs); 
MPI_Send( particle[0].d, 1, Allpairs, dest, tag, comm); 
 
      /* an alternative solution to 4.3 */ 
 
MPI_Datatype Onepair;   /* datatype for one pair of coordinates, with 
                          the extent of one particle entry */ 
MPI_Aint disp2[3]; 
MPI_Datatype type2[3] = {MPI_LB, MPI_DOUBLE, MPI_UB}; 
int blocklen2[3] = {1, 2, 1}; 
 
MPI_Address( particle, disp2); 
MPI_Address( particle[0].d, disp2+1); 
MPI_Address( particle+1, disp2+2); 
base = disp2[0]; 
for (i=0; i<2; i++) disp2[i] -= base; 
 
MPI_Type_struct( 3, blocklen2, disp2, type2, &Onepair); 
MPI_Type_commit( &Onepair); 
MPI_Send( particle[0].d, 1000, Onepair, dest, tag, comm); 
 


Example The same manipulations as in the previous example, but use absolute addresses in datatypes.


struct Partstruct 
   { 
   int class; 
   double d[6]; 
   char b[7]; 
   }; 
 
struct Partstruct particle[1000]; 
 
           /* build datatype describing first array entry */ 
 
MPI_Datatype Particletype; 
MPI_Datatype type[3] = {MPI_INT, MPI_DOUBLE, MPI_CHAR}; 
int          block[3] = {1, 6, 7}; 
MPI_Aint     disp[3]; 
 
MPI_Address( particle, disp); 
MPI_Address( particle[0].d, disp+1); 
MPI_Address( particle[0].b, disp+2); 
MPI_Type_struct( 3, block, disp, type, &Particletype); 
 
/* Particletype describes first array entry -- using absolute 
   addresses */ 
 
                  /* 5.1: 
            send the entire array */ 
 
MPI_Type_commit( &Particletype); 
MPI_Send( MPI_BOTTOM, 1000, Particletype, dest, tag, comm); 
 
 
                 /* 5.2: 
         send the entries of class zero, 
         preceded by the number of such entries */ 
 
MPI_Datatype Zparticles, Ztype; 
 
MPI_Aint zdisp[1000] 
int zblock[1000], i, j, k; 
int zzblock[2] = {1,1}; 
MPI_Datatype zztype[2]; 
MPI_Aint     zzdisp[2]; 
 
j=0; 
for (i=0; i < 1000; i++) 
  if (particle[i].index==0) 
    { 
    for (k=i+1; (k < 1000)&&(particle[k].index = 0) ; k++); 
    zdisp[j] = i; 
    zblock[j] = k-i; 
    j++; 
    i = k; 
    } 
MPI_Type_indexed( j, zblock, zdisp, Particletype, &Zparticles); 
/* Zparticles describe particles with class zero, using 
   their absolute addresses*/ 
 
/* prepend particle count */ 
MPI_Address(&j, zzdisp); 
zzdisp[1] = MPI_BOTTOM; 
zztype[0] = MPI_INT; 
zztype[1] = Zparticles; 
MPI_Type_struct(2, zzblock, zzdisp, zztype, &Ztype); 
 
MPI_Type_commit( &Ztype); 
MPI_Send( MPI_BOTTOM, 1, Ztype, dest, tag, comm); 
 


Example Handling of unions.


union { 
   int     ival; 
   float   fval; 
      } u[1000] 
 
int     utype; 
 
/* All entries of u have identical type; variable 
   utype keeps track of their current type */ 
 
MPI_Datatype   type[2]; 
int            blocklen[2] = {1,1}; 
MPI_Aint       disp[2]; 
MPI_Datatype   mpi_utype[2]; 
MPI_Aint       i,j; 
 
/* compute an MPI datatype for each possible union type; 
   assume values are left-aligned in union storage. */ 
 
MPI_Address( u, &i); 
MPI_Address( u+1, &j); 
disp[0] = 0; disp[1] = j-i; 
type[1] = MPI_UB; 
 
type[0] = MPI_INT; 
MPI_Type_struct(2, blocklen, disp, type, &mpi_utype[0]); 
 
type[0] = MPI_FLOAT; 
MPI_Type_struct(2, blocklen, disp, type, &mpi_utype[1]); 
 
for(i=0; i<2; i++) MPI_Type_commit(&mpi_utype[i]); 
 
/* actual communication */ 
 
MPI_Send(u, 1000, mpi_utype[utype], dest, tag, comm); 



Up: Derived datatypes Next: Pack and unpack Previous: Correct use of addresses


Return to MPI 1.1 Standard Index
Return to MPI 2.0 Standard Index
Return to MPI Forum Home Page

MPI-1.1 of June 12, 1995
HTML Generated on November 1, 2000