本文中是Michael J. Quinn在书《Parallel Programming in C with MPI and OpenMP》(中文《MPI与OpenMP并行程序设计:C语言版》,陈文光、武永卫等译)中所使用的MyMPI.h和MyMPI.c文件. 有小部分细节上的调整,并经过测试可用.
/* MyMPI.h
*
* Header file for a library of matrix/vector
* input/output/redistribution functions.
*
* Programed by Michael J. Quinn
* Last modification: 4 September 2002
* "Parallel Programming in C with MPI and OpenMP"
* Typed and modified by xiu_dim, 17 Nov. 2015
*/
/******************** MACROS *********************/
#define DATA_MSG 0
#define PROMPT_MSG 1
#define RESPONSE_MSG 2
#define OPEN_FILE_ERROR -1
#define MALLOC_ERROR -2
#define TYPE_ERROR -3
#define PTR_SIZE (sizeof(void *))
#define MIN(a,b) ((a)<(b)?(a):(b))
#define CEILING(i,j) (((i)+(j)-1)/(j))
#define BLOCK_LOW(id,p,n) ((id)*(n)/(p))
#define BLOCK_HIGH(id,p,n) (BLOCK_LOW((id)+1,p,n)-1)
#define BLOCK_SIZE(id,p,n) \
(BLOCK_HIGH(id,p,n)-BLOCK_LOW(id,p,n)+1)
#define BLOCK_OWNER(j,p,n) (((p)*((j)+1)-1)/(n))
/************ MISCELLANEOUS FUNCTIONS ************/
int get_size(MPI_Datatype);
void* my_malloc(int, int);
void terminate(int, char*);
/********** DATA DISTRIBUTION FUNCTIONS **********/
void create_mixed_xfer_arrays(int, int, int,
int**, int**);
void create_uniform_xfer_arrays(int, int, int,
int**, int**);
void replicate_block_vector(void*, int, void*,
MPI_Datatype, MPI_Comm);
/**************** INPUT FUNCTIONS ****************/
void read_row_striped_matrix(char*, void***, void**,
MPI_Datatype, int*, int*, MPI_Comm);
void read_col_striped_matrix(char*, void***, void**,
MPI_Datatype, int*, int*, MPI_Comm);
void read_checkerboard_matrix(char*, void***, void**,
MPI_Datatype, int*, int*, MPI_Comm);
void read_block_vector(char*, void**, MPI_Datatype,
int*, MPI_Comm);
void read_replicated_vector(char*, void**, MPI_Datatype,
int*, MPI_Comm);
/**************** OUTPUT FUNCTIONS ***************/
void print_submatrix(void**, MPI_Datatype, int, int);
void print_subvector(void*, MPI_Datatype, int);
void print_row_striped_matrix(void**, MPI_Datatype,
int, int, MPI_Comm);
void print_col_striped_matrix(void**, MPI_Datatype,
int, int, MPI_Comm);
void print_checkerboard_matrix(void**, MPI_Datatype,
int, int, MPI_Comm);
void print_block_vector(void*, MPI_Datatype, int, MPI_Comm);
void print_replicated_vector(void*, MPI_Datatype, int, MPI_Comm);
/* MyMPI.c -- A library of matrix/vector
* input/output/redistribution functions.
*
* Programed by Michael J. Quinn
* Last modification: 4 September 2002
* "Parallel Programming in C with MPI and OpenMP"
* Typed and modified by xiu_dim, 17 Nov. 2015
*/
#include
#include
#include
#include
#include "MyMPI.h"
/************ MISCELLANEOUS FUNCTIONS ************/
/*
* Given MPI_DataType 't', function 'get_size' returns the
* size of a single datum that data type.
*/
int get_size(MPI_Datatype t)
{
if (t == MPI_BYTE) return sizeof(char);
if (t == MPI_DOUBLE) return sizeof(double);
if (t == MPI_FLOAT) return sizeof(float);
if (t == MPI_INT) return sizeof(int);
printf("Error: Unrecognized argument to 'get_size'\n");
fflush(stdout);
MPI_Abort(MPI_COMM_WORLD, TYPE_ERROR);
}
/*
* Function 'my_malloc' is called when a process wants
* to allocate some space from the heap. If the memory
* allocation fails, the process prints an error message
* and then aborts execution of the program.
*/
void *my_malloc(
int id, /* IN - Process rank */
int bytes) /* IN - Bytes to allocate */
{
void *buffer;
if ((buffer = malloc((size_t)bytes)) == NULL) {
printf("Error:Malloc failed for process %d\n", id);
fflush(stdout);
MPI_Abort(MPI_COMM_WORLD, MALLOC_ERROR);
}
return buffer;
}
/*
* Function 'terminate' is called when the program should
* not continue execution, due to an error conditon that
* all of the processes are aware of. Process 0 prints the
* error message passed as an argument to the function.
*
* All processes must invoke this function together!
*/
void terminate(
int id, /* IN - Process rank */
char *error_message) /* IN - Message to print */
{
if (!id) {
printf("Error: %s\n", error_message);
fflush(stdout);
}
MPI_Finalize();
exit(-1);
}
/********** DATA DISTRIBUTION FUNCTIONS **********/
/*
* This function creates the count and displacement arrays
* needed by scatter and gather functions, when the number
* of elements send/received to/from other processes varies.
*/
void create_mixed_xfer_arrays(
int id, /* IN - Process rank */
int p, /* IN - Number of processes */
int n, /* IN - Total number of elements */
int **count, /* OUT - Array of counts */
int **disp) /* OUT - Array of displacements */
{
int i;
*count = my_malloc(id, p*sizeof(int));
*disp = my_malloc(id, p*sizeof(int));
(*count)[0] = BLOCK_SIZE(0,p,n);
(*disp)[0] = 0;
for (i=1; i1] + (*count)[i-1];
(*count)[i] = BLOCK_SIZE(i,p,n);
}
return;
}
/*
* This function creates the count and displacement arrays
* needed in an all-to-all exchange, when a process gets
* the same number of elements from every other process.
*/
void create_uniform_xfer_arrays(
int id, /* IN - Process rank */
int p, /* IN - Number of processes */
int n, /* IN - Total number of elements */
int **count, /* OUT - Array of counts */
int **disp) /* OUT - Array of displacements */
{
int i;
*count = my_malloc(id, p*sizeof(int));
*disp = my_malloc(id, p*sizeof(int));
(*count)[0] = BLOCK_SIZE(id,p,n);
(*disp)[0] = 0;
for (i=1; i
1] + (*count)[i-1];
(*count)[i] = BLOCK_SIZE(id,p,n);
}
return;
}
/*
* This function is used to transform a vector from a
* block distribution to a replicated distribution within
* a communicator.
*/
void replicate_block_vector(
void *ablock, /* IN - Block-distributed vector */
int n, /* IN - Number of elements in vector */
void *arep, /* OUT - Replicated vector */
MPI_Datatype dtype, /* IN - Vector element type */
MPI_Comm comm) /* IN - Communicator */
{
int p; /* Number of processes */
int id; /* Process rank */
int *cnt; /* Number of elements contributed by each process */
int *disp; /* Displacement in concatenated array */
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &id);
create_mixed_xfer_arrays(id, p, n, &cnt, &disp);
MPI_Allgatherv(ablock, cnt[id], dtype, arep,
cnt, disp, dtype, comm);
free(cnt);
free(disp);
return;
}
/**************** INPUT FUNCTIONS ****************/
/*
* In the matrix file, the first two elemnts are
* integers whose values are the dimensions of the
* matrix ('m' rows and 'n' columns). What follows
* are 'm'*'n' values representing the matrix elements
* stored in row-major order.
*/
/*
* Process p-1 opens a file and inputs a two-dimensional
* matrix, reading and distributing blocks of rows to the
* other MPI processes.
*/
void read_row_striped_matrix(
char *s, /* IN - File name */
void ***subs, /* OUT - Submatrix row indices */
void **storage, /* OUT - Submatrix stored here */
MPI_Datatype dtype, /* IN - Matrix element type */
int *m, /* OUT - Number of rows in matrix */
int *n, /* OUT - Number of cols in matrix */
MPI_Comm comm) /* IN - Communicator */
{
int p; /* Number of processes */
int id; /* Process rank */
int datum_size; /* Size of matrix element */
int local_rows; /* Number of rows in this process */
FILE *infileptr; /* Input file poiter */
void **lptr; /* Pointer into 'subs' */
void *rptr; /* Pointer into 'storage' */
MPI_Status status; /* Result of receive */
int x; /* Result of read */
int i;
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &id);
datum_size = get_size(dtype);
/* Process p-1 opens the file 's' */
if (id == p-1) {
infileptr = fopen(s, "r");
if (infileptr == NULL) *m = 0;
else {
x = fread(m, sizeof(int), 1, infileptr);
x = fread(n, sizeof(int), 1, infileptr);
}
}
/* Process p-1 broadcasts 'm' in communicator 'comm' */
MPI_Bcast(m, 1, MPI_INT, p-1, comm);
if (!(*m)) MPI_Abort(MPI_COMM_WORLD, OPEN_FILE_ERROR);
/* Process p-1 broadcasts 'n' in communicator 'comm' */
MPI_Bcast(n, 1, MPI_INT, p-1, comm);
/* Number of rows in this process */
local_rows = BLOCK_SIZE(id,p,*m);
/* Allocate array for submatrix in this process */
*storage = (void *)my_malloc(id,
local_rows*(*n)*datum_size);
/* Allocate array for row indices in this pointers */
*subs = (void **)my_malloc(id, local_rows*PTR_SIZE);
/* Connet 'subs' with 'storage' */
lptr = (void *)&(*subs[0]);
rptr = (void *)*storage;
for (i=0; ivoid *)rptr;
rptr += (*n) * datum_size;
}
if (id == (p-1)) {
for (i=0; i1; ++i) {
/* Process p-1 reads datum from 'infileptr'
and sends it together with tag 'DATA_MSG'
to process i in communicator 'comm' */
x = fread(*storage, datum_size,
BLOCK_SIZE(i,p,*m)*(*n), infileptr);
MPI_Send(*storage, BLOCK_SIZE(i,p,*m)*(*n),
dtype, i, DATA_MSG, comm);
}
/* Process p-1 reads datum from 'infileptr' */
x = fread(*storage, datum_size,
BLOCK_SIZE(i,p,*m)*(*n), infileptr);
fclose(infileptr);
} else
/* This process receives datum from process p-1 */
MPI_Recv(*storage, local_rows*(*n), dtype, p-1,
DATA_MSG, comm, &status);
return;
}
/*
* Process p-1 opens a file and inputs a two-dimensional
* matrix, reading and distributing blocks of columns to
* the other MPI processes.
*/
void read_col_striped_matrix(
char *s, /* IN - File name */
void ***subs, /* OUT - 2-D array */
void **storage, /* OUT - Array elements */
MPI_Datatype dtype, /* IN - Matrix element type */
int *m, /* OUT - Number of rows in matrix */
int *n, /* OUT - Number of cols in matrix */
MPI_Comm comm) /* IN - Communicator */
{
int p; /* Number of processes */
int id; /* Process rank */
int datum_size; /* Size of matrix element */
int local_cols; /* Number of cols in this process */
FILE *infileptr; /* Input file poiter */
void *buffer; /* File buffer */
void **lptr; /* Pointer into 'subs' */
void *rptr; /* Pointer into 'storage' */
int *send_count; /* Each process's count */
int *send_disp; /* Each process's displacement */
int i,j;
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &id);
datum_size = get_size(dtype);
/* Process p-1 opens the file 's' */
if (id == (p-1)) {
infileptr = fopen(s, "r");
if (infileptr == NULL) *m = 0;
else {
fread(m, sizeof(int), 1, infileptr);
fread(n, sizeof(int), 1, infileptr);
}
}
/* Process p-1 broadcasts 'm' in communicator 'comm' */
MPI_Bcast(m, 1, MPI_INT, p-1, comm);
if (!(*m)) MPI_Abort(comm, OPEN_FILE_ERROR);
/* Process p-1 broadcasts 'n' in communicator 'comm' */
MPI_Bcast(n, 1, MPI_INT, p-1, comm);
/* Number of cols in this process */
local_cols = BLOCK_SIZE(id,p,*n);
/* Dynamically allocate two-dimensional matrix 'subs' */
/* Allocate array for submatrix in this process */
*storage = my_malloc(id, (*m)*local_cols*datum_size);
/* Allocate array for row indices in this pointers */
*subs = (void **)my_malloc(id, (*m)*PTR_SIZE);
lptr = (void *)*subs;
rptr = (void *)*storage;
for (i=0; i<(*m); ++i) {
*(lptr++) = (void *)rptr;
rptr += local_cols * datum_size;
}
if (id == (p-1))
buffer = my_malloc(id, (*n)*datum_size);
create_mixed_xfer_arrays(id, p, *n, &send_count, &send_disp);
for (i=0; i<(*m); ++i) {
if (id == (p-1))
fread(buffer, datum_size, *n, infileptr);
MPI_Scatterv(buffer, send_count, send_disp, dtype,
(*storage)+i*local_cols*datum_size,
local_cols, dtype, p-1, comm);
}
free(send_count);
free(send_disp);
if (id == (p-1)) free(buffer);
return;
}
/*
* This function allocate blocks of the matrix to
* the MPI processes.
*
* The number of processes MUST be a square number.
*/
void read_checkerboard_matrix(
char *s, /* IN - File name */
void ***subs, /* OUT - 2-D array */
void **storage, /* OUT - Array elements */
MPI_Datatype dtype, /* IN - Matrix element type */
int *m, /* OUT - Number of rows in matrix */
int *n, /* OUT - Number of cols in matrix */
MPI_Comm grid_comm) /* IN - Communicator */
{
int p; /* Number of processes */
int grid_id; /* Process rank in grid */
int datum_size; /* Size of matrix element */
int coords[2]; /* Coords of process receiving next row of matrix */
int grid_coords[2]; /* Coords of this process */
int grid_period[2]; /* Wraparound */
int grid_size[2]; /* Dimemsions of process grid */
int local_rows; /* Number of matrix rows in this process */
int local_cols; /* Number of matrix cols in this process */
FILE *infileptr; /* Input file poiter */
void *buffer; /* File buffer */
void **lptr; /* Pointer into 'subs' */
void *rptr; /* Pointer into 'storage' */
void *laddr; /* Used when process 0 gets row */
void *raddr; /* Address of first element to send */
int dest_id; /* Rank of receiving process */
MPI_Status status; /* Result of receive */
int i,j,k;
MPI_Comm_size(grid_comm, &p);
MPI_Comm_rank(grid_comm, &grid_id);
datum_size = get_size(dtype);
/* Process 0 opens file, get numbers of rows and cols,
and broadcasts this information to the other processes */
if (grid_id == 0) {
infileptr = fopen(s, "r");
if (infileptr == NULL) *m = 0;
else {
fread(m, sizeof(int), 1, infileptr);
fread(n, sizeof(int), 1, infileptr);
}
}
MPI_Bcast(m, 1, MPI_INT, 0, grid_comm);
if (!(*m)) MPI_Abort(MPI_COMM_WORLD, OPEN_FILE_ERROR);
MPI_Bcast(n, 1, MPI_INT, 0, grid_comm);
/* Each process determines the size of the submatrix
it is responsible for. */
MPI_Cart_get(grid_comm, 2, grid_size,
grid_period, grid_coords);
local_rows = BLOCK_SIZE(grid_coords[0],grid_size[0],*m);
local_cols = BLOCK_SIZE(grid_coords[1],grid_size[1],*n);
/* Dynamicaly allocate two-dimensional matrix 'subs' */
*storage = my_malloc(grid_id,
local_rows*local_cols*datum_size);
*subs = (void **)my_malloc(grid_id, local_rows*PTR_SIZE);
lptr = (void *)*subs;
rptr = (void *)*storage;
for (i=0; ivoid *)rptr;
rptr += local_cols * datum_size;
}
/* Grid process 0 reads in the matrix one row at a time
and distributes each row among the MPI processes */
if (grid_id == 0)
buffer = my_malloc(grid_id, (*n)*datum_size);
/* For each row of processes in the process grid */
for (i=0; i0]; ++i) {
coords[0] = i;
/* For each matrix row controlled by this process row */
for (j=0; j0],*m); ++j) {
/* Read in a row of the matrix */
if (grid_id == 0) {
fread(buffer, datum_size, *n, infileptr);
}
/* Distribute it among processes in the grid row */
for (k=0; k1]; ++k) {
coords[1] = k;
/* Find address of first element to send */
raddr = buffer +
BLOCK_LOW(k,grid_size[1],*n) * datum_size;
/* Determine the grid ID of the process
getting the subrow */
MPI_Cart_rank(grid_comm, coords, &dest_id);
if (grid_id == 0) {
/* Process 0 is responsible for sending */
if (dest_id == 0) {
/* It is sending(copying) to itself */
laddr = (*subs)[j];
memcpy(laddr, raddr,
local_cols*datum_size);
} else {
/* It is sending to another process */
MPI_Send(raddr,
BLOCK_SIZE(k,grid_size[1],*n),
dtype, dest_id, 0, grid_comm);
}
} else if (grid_id == dest_id) {
/* Process 'dest_id' is responsible for receiving */
MPI_Recv((*subs)[j], local_cols, dtype,
0, 0, grid_comm, &status);
}
}
}
}
if (grid_id == 0) free(buffer);
return;
}
/*
* Process p-1 opens a file containing a vector, reads
* its contents, and distributes the elements by block
* among the processes in a communicator.
*/
void read_block_vector(
char *s, /* IN - File name */
void **v, /* OUT - Subvector */
MPI_Datatype dtype, /* IN - Vector element type */
int *n, /* OUT - Vector length */
MPI_Comm comm) /* IN - Communicator */
{
int p; /* Number of processes */
int id; /* Process rank */
int datum_size; /* Size of vector element */
int local_els; /* Number of elements in this process */
FILE *infileptr; /* Input file poiter */
MPI_Status status; /* Result of receive */
int x; /* Result of read */
int i;
MPI_Comm_size(comm,&p);
MPI_Comm_rank(comm, &id);
datum_size = get_size(dtype);
/* Process p-1 opens file, determines number of vector
elements, and broadcasts this value to the other
processes */
if (id == (p-1)) {
infileptr = fopen(s, "r");
if (infileptr == NULL) *n = 0;
else fread(n, sizeof(int), 1, infileptr);
}
MPI_Bcast(n, 1, MPI_INT, p-1, comm);
if (!(*n)) {
if (!id) {
printf("Input file '%s' cannot be opened\n", s);
fflush(stdout);
}
}
/* Block mapping of vetor elements to processes */
local_els = BLOCK_SIZE(id,p,*n);
*v = my_malloc(id, local_els*datum_size);
if (id == (p-1)) {
for (i=0; i1; ++i) {
x = fread(*v, datum_size, BLOCK_SIZE(i,p,*n),
infileptr);
MPI_Send(*v, BLOCK_SIZE(i,p,*n), dtype, i,
DATA_MSG, comm);
}
x = fread(*v, datum_size, BLOCK_SIZE(id,p,*n),
infileptr);
fclose(infileptr);
} else {
MPI_Recv(*v, BLOCK_SIZE(id,p,*n), dtype, p-1,
DATA_MSG, comm, &status);
}
return;
}
/*
* Process p-1 opens a file containing a vector, reads its
* contents, and replicates the vector among all processes
* in a communicator.
*/
void read_replicated_vector(
char *s, /* IN - File name */
void **v, /* OUT - Subvector */
MPI_Datatype dtype, /* IN - Vector element type */
int *n, /* OUT - Vector length */
MPI_Comm comm) /* IN - Communicator */
{
int p; /* Number of processes */
int id; /* Process rank */
int datum_size; /* Size of vector element */
FILE *infileptr; /* Input file poiter */
int i;
/* Process p-1 opens file, determines number of vector
elements, and broadcasts this value to the other
processes */
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &id);
datum_size = get_size(dtype);
if (id == (p-1)) {
infileptr = fopen(s, "r");
if (infileptr == NULL) *n = 0;
else fread(n, sizeof(int), 1, infileptr);
}
MPI_Bcast(n, 1, MPI_INT, p-1, comm);
if (!(*n)) terminate(id, "Cannot open vector file!");
*v = my_malloc(id, (*n)*datum_size);
if (id == (p-1)) {
fread(*v, datum_size, *n, infileptr);
fclose(infileptr);
}
MPI_Bcast(*v, *n, dtype, p-1, MPI_COMM_WORLD);
return;
}
/**************** OUTPUT FUNCTIONS ***************/
/*
* Print elements of a doubly subscripted array.
*/
void print_submatrix(
void **a, /* IN - Doubly subscripted array */
MPI_Datatype dtype, /* IN - Type of element */
int rows, /* IN - Number of rows */
int cols) /* IN - Number of columns */
{
int i,j;
for (i=0; ifor (j=0; jif (dtype == MPI_DOUBLE)
printf("%6.3f",((double **)a)[i][j]);
else {
if (dtype == MPI_FLOAT)
printf("%6.3f",((float **)a)[i][j]);
else
printf("%6d",((int **)a)[i][j]);
}
}
putchar('\n');
}
return;
}
/*
* Print elements of a singly subscripted array.
*/
void print_subvector(
void *a, /* IN - Singly subscripted array */
MPI_Datatype dtype, /* IN - Type of elements */
int n) /* IN - Size of array */
{
int i;
for (i=0; iif (dtype == MPI_DOUBLE)
printf("%6.3f ",((double *)a)[i]);
else {
if (dtype == MPI_FLOAT)
printf("%6.3f ",((float *)a)[i]);
else if (dtype == MPI_INT)
printf("%6d ",((int *)a)[i]);
}
}
return;
}
/*
* Process 0 prints a matrix that is distributed in row-
* striped fashion among the processes in a communicator.
*/
void print_row_striped_matrix(
void **a, /* IN - 2D array */
MPI_Datatype dtype, /* IN - Matrix element type */
int m, /* IN - Number of matrix rows */
int n, /* IN - Number of matrix columns */
MPI_Comm comm) /* IN - Communicator */
{
int p; /* Number of processes */
int id; /* Process rank */
int datum_size; /* Size of matrix element */
int local_rows; /* Rows in this process */
int max_block_size; /* Most matrix rows held by any process */
void *bstorage; /* Elements received from another process */
void **b; /* 2D array indexing into 'bstorage' */
MPI_Status status; /* Result of receive */
int prompt; /* Dummy variable */
int i;
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &id);
datum_size = get_size(dtype);
local_rows = BLOCK_SIZE(id,p,n);
if (!id) {
print_submatrix(a, dtype, local_rows, n);
if (p >1) {
max_block_size = BLOCK_SIZE(p-1,p,m);
bstorage = (void *)my_malloc(id,
max_block_size*n*datum_size);
b = (void **)my_malloc(id,
max_block_size*PTR_SIZE);
b[0] = bstorage;
for (i=1; i1] + n * datum_size;
}
for (i=1; i/* Send a prompt message to process i
in the default communicator */
MPI_Send(&prompt, 1, MPI_INT, i,
PROMPT_MSG, MPI_COMM_WORLD);
/* Receive datum with tag 'RESPONSE_MSG' from
process i in the default communicator */
MPI_Recv(bstorage, BLOCK_SIZE(i,p,m)*n, dtype,
i, RESPONSE_MSG, MPI_COMM_WORLD, &status);
print_submatrix(b, dtype, BLOCK_SIZE(i,p,m), n);
}
free(b);
free(bstorage);
}
putchar('\n');
} else {
/* Receive a prompt message from process 0
in the default communicator */
MPI_Recv(&prompt, 1, MPI_INT, 0, PROMPT_MSG,
MPI_COMM_WORLD,&status);
/* Send datum with tag 'RESPONSE_MSG' to process 0
in the default communicator */
MPI_Send(*a, local_rows*n, dtype, 0, RESPONSE_MSG,
MPI_COMM_WORLD);
}
return;
}
/*
* Process 0 prints a matrix that has a
* columnwise-block-striped data decompsition
* among the processes in a communicator.
*/
void print_col_striped_matrix(
void **a, /* IN - 2D array */
MPI_Datatype dtype, /* IN - Matrix element type */
int m, /* IN - Number of matrix rows */
int n, /* IN - Number of matrix columns */
MPI_Comm comm) /* IN - Communicator */
{
int p; /* Number of processes */
int id; /* Process rank */
int datum_size; /* Size of matrix element */
void *buffer; /* Buffer to hold one row */
int *rec_count; /* Number of elements received per process */
int *rec_disp; /* Offset of each process's block */
MPI_Status status; /* Result of receive */
int i,j;
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &id);
datum_size = get_size(dtype);
create_mixed_xfer_arrays(id, p, n, &rec_count, &rec_disp);
if (!id) buffer = my_malloc(id, n*datum_size);
for (i=0; iid,p,n), dtype, buffer,
rec_count, rec_disp, dtype, 0, MPI_COMM_WORLD);
if (!id) {
print_subvector(buffer, dtype, n);
putchar('\n');
}
}
free(rec_count);
free(rec_disp);
if (!id) {
free(buffer);
putchar('n');
}
return;
}
/*
* This function prints a matrix distributed checkerboard
* fashion among the processes in a communicator.
*/
void print_checkerboard_matrix(
void **a, /* IN - 2D array */
MPI_Datatype dtype, /* IN - Matrix element type */
int m, /* IN - Number of matrix rows */
int n, /* IN - Number of matrix columns */
MPI_Comm grid_comm) /* IN - Communicator */
{
int p; /* Number of processes */
int grid_id; /* Process rank in grid */
int datum_size; /* Size of matrix element */
int coords[2]; /* Grid coords of process sending elements */
int grid_coords[2]; /* Coords of this process */
int grid_period[2]; /* Wraparound */
int grid_size[2]; /* Dimemsions of process grid */
int local_cols; /* Number of matrix cols in this process */
int els; /* Element received */
int src; /* ID of process with subrow */
void *buffer; /* Buffer to hold one matrix row */
void *laddr; /* Address to put subrow */
MPI_Status status; /* Result of receive */
int i,j,k;
MPI_Comm_size(grid_comm, &p);
MPI_Comm_rank(grid_comm, &grid_id);
datum_size = get_size(dtype);
MPI_Cart_get(grid_comm, 2, grid_size,
grid_period, grid_coords);
local_cols = BLOCK_SIZE(grid_coords[1],grid_size[1],n);
if (!grid_id)
buffer = my_malloc(grid_id, n*datum_size);
/* For each row of the process grid */
for (i=0; i0]; ++i) {
coords[0] = i;
/* For each matrix row controlled by the process row*/
for (j=0; j0],m); ++j) {
/* Collect the matrix row on grid process 0 and
print it */
if (!grid_id) {
for (k=0; k1]; ++k) {
coords[1] = k;
MPI_Cart_rank(grid_comm, coords, &src);
els = BLOCK_SIZE(k,grid_size[1],n);
laddr = buffer +
BLOCK_LOW(k,grid_size[1],n) * datum_size;
if (src == 0) {
memcpy(laddr, a[j], els*datum_size);
} else {
MPI_Recv(laddr, els, dtype, src, 0,
grid_comm, &status);
}
}
print_subvector(buffer, dtype, n);
putchar('\n');
} else if (grid_coords[0] == i) {
MPI_Send(a[j], local_cols, dtype, 0, 0, grid_comm);
}
}
}
if (!grid_id) {
free(buffer);
putchar('\n');
}
return;
}
/*
* Process 0 prints a vector that is block distributed
* among the processes in a communicator.
*/
void print_block_vector(
void *v, /* IN - Address of vector */
MPI_Datatype dtype, /* IN - Vector element type */
int n, /* IN - Vector length */
MPI_Comm comm) /* IN - Communicator */
{
int p; /* Number of processes */
int id; /* Process rank */
int datum_size; /* Size of vector element */
void *tmp; /* Other process's subvector */
MPI_Status status; /* Result of receive */
int prompt; /* Dummy variable */
int i;
MPI_Comm_size(comm, &p);
MPI_Comm_rank(comm, &id);
datum_size = get_size(dtype);
if (!id) {
print_subvector(v, dtype, BLOCK_SIZE(id,p,n));
if (p>1) {
tmp = my_malloc(id,
BLOCK_SIZE(p-1,p,n)*datum_size);
for (i=1; i1, MPI_INT, i,
PROMPT_MSG, comm);
MPI_Recv(tmp, BLOCK_SIZE(i,p,n), dtype, i,
RESPONSE_MSG, comm, &status);
print_subvector(tmp, dtype,
BLOCK_SIZE(i,p,n));
}
free(tmp);
}
printf("\n\n");
} else {
MPI_Recv(&prompt, 1, MPI_INT, 0, PROMPT_MSG,
comm, &status);
MPI_Send(v, BLOCK_SIZE(id,p,n), dtype, 0,
RESPONSE_MSG, comm);
}
return;
}
/*
* Process 0 prints a vector that is replicated among
* the processes in a communicator.
*/
void print_replicated_vector(
void *v, /* IN - Address of vector */
MPI_Datatype dtype, /* IN - Vector element type */
int n, /* IN - Vector length */
MPI_Comm comm) /* IN - Communicator */
{
int id; /* Process rank */
MPI_Comm_rank(comm, &id);
if (!id) {
print_subvector(v, dtype, n);
printf("\n\n");
}
return;
}