Chinaunix首页 | 论坛 | 博客
  • 博客访问: 1864608
  • 博文数量: 283
  • 博客积分: 10141
  • 博客等级: 上将
  • 技术积分: 2931
  • 用 户 组: 普通用户
  • 注册时间: 2005-12-21 14:33
文章分类

全部博文(283)

文章存档

2013年(2)

2012年(2)

2011年(17)

2010年(36)

2009年(17)

2008年(18)

2007年(66)

2006年(105)

2005年(20)

分类: C/C++

2007-06-07 14:33:41

问题为:矩阵A*B=C, A为a行b列,B为b行c列,C为a行c列的矩阵.
设进程数 rank,主进程进程号为0,即进程编号为0, 1, ...., rank -1
 
第一种方法: //主从模式
主进程将矩阵A按行分块成子矩阵,发给从进程,如果 a%(rank-2) != 0, 多余的任务由主进程在发完任务后自己计算,然后依次收集计算结果.代码样例如下:
 

#include <stdlib.h>
#include <stdio.h>
#include <time.h>
#include "mpi.h"
#include "matrix.h"

/*Show the usage*/
void usage(char * program)
{
        printf("Usage %s a b c\n", program);
        printf("The size of the first matrix is a rows, b columns\n");
        printf("The size of the second matrix is b rows, c columns\n");
}

/*Create a matrix which has $row rows, $col columns and fill it*/
double ** create_matrix(int row, int col, char flag)
{
        int i,j;
        double ** new_matrix = (double **)malloc(row * sizeof(double*));
        srandom((unsigned int)time(NULL));
        for(i = 0; i < row; i ++)
        {
                new_matrix[i] = (double * )malloc(col * sizeof(double));
                if(flag != 'n')
                {
                        for(j = 0; j < col; j ++)
                                new_matrix[i][j] = (double)(random() % 10);
                }
                else
                {
                        for(j = 0; j < col; j ++)
                                new_matrix[i][j] = 0.00;
                }
        }
        return new_matrix;
}

/*Free the matirx*/
int free_matrix(double ** matrix, int row, int col)
{
        int i;
        for(i = 0; i < row; i ++)
                free(matrix[i]);
        free(matrix);
        return M_OK;
}

/*Print a matrix with $row rows, $col columns*/
void show_matrix(double ** matrix, int row, int col)
{
        int i,j;

        for(i = 0; i < col; i ++)
                printf("=======");
        printf("\n");

        for(i = 0; i < row; i ++)
        {
                for(j = 0; j < col; j ++)
                {
                        printf("%2.2f ", matrix[i][j]);
                }
                printf("\n");
        }
        for(i = 0; i < col; i ++)
                printf("=======");
        printf("\n");
}

double ** matrix_multi(double ** first_m, double ** second_m, int a, int b, int c)
{
        int i, j, k;
        double res_tmp;
        double ** matrix_res_part = create_matrix(a, c, 'n');
        for(i = 0; i < a; i ++)
        {
                for(j = 0; j < c; j ++)
                {
                        res_tmp = 0.00;
                        for(k = 0; k < b; k ++)
                                res_tmp += first_m[i][k] * second_m[k][j];

                        matrix_res_part[i][j] = res_tmp;
                }
        }
        return matrix_res_part;
}

int main(int argc, char * argv[])
{
        /*[axb]*[bxc]=[axc] matrix_size[0] = a, matrix_size[1] = b, matrix_size[2] = c*/
        long matrix_size[3];
        double ** matrix_1st = NULL;
        double ** matrix_2nd = NULL;
        double ** matrix_res = NULL;
        int i, j;

        /*MPICH related*/
        int myid, rank, numprocs;
        char processor_name[MPI_MAX_PROCESSOR_NAME];
        double * tmp_buf_dis = NULL;
        double * tmp_buf_gath = NULL;
        int msize_dis, psize_dis, msize_gath, psize_gath;
        int partition_size;
        int master = 0;
        MPI_Status status;

        /*read arguments*/
        if(argc != 4)
        {
                usage(argv[0]);
                exit(99);
        }

        for(i = 0; i < 3; i ++)
        {
                matrix_size[i] = atol(argv[i+1]);
                if(matrix_size[i] > MAX_MATRIX_SIZE)
                {
                        printf("The MAX matrix size is %d\n", MAX_MATRIX_SIZE);
                        exit(M_ERR);
                }
        }

        matrix_2nd = create_matrix(matrix_size[1], matrix_size[2], 'y');

        /*Initialize MPI environment*/
        MPI_Init(&argc,&argv);
        MPI_Comm_rank(MPI_COMM_WORLD, &myid);
        MPI_Comm_size(MPI_COMM_WORLD, &rank);
        MPI_Get_processor_name(processor_name,&numprocs);

        if(rank == 1) /*serial*/
        {
                matrix_res = matrix_multi(matrix_1st, matrix_2nd, matrix_size[0], matrix_size[1], ma
trix_size[2]);
                show_matrix(matrix_res, matrix_size[0], matrix_size[2]);
                free_matrix(matrix_res, matrix_size[0], matrix_size[2]);
                free_matrix(matrix_1st, matrix_size[0], matrix_size[1]);
                MPI_Finalize();
                return M_OK;
        }

        /*divid the matirx*/
        partition_size = matrix_size[0]/(rank - 1);
        if(partition_size == 0)
        {
                partition_size ++;
        }
        msize_dis = matrix_size[1];
        MPI_Pack_size(msize_dis, MPI_DOUBLE, MPI_COMM_WORLD, &psize_dis);
        tmp_buf_dis= (double*) malloc((psize_dis + 2 * MPI_BSEND_OVERHEAD) * sizeof(double));

        msize_gath = matrix_size[2];
        MPI_Pack_size(msize_gath, MPI_DOUBLE, MPI_COMM_WORLD, &psize_gath);
        tmp_buf_gath = (double*) malloc((psize_gath+2 * MPI_BSEND_OVERHEAD) * sizeof(double));

        if(!tmp_buf_dis || !tmp_buf_gath)
        {
                printf("Malloc MPI msg buffer failed!\n");
                MPI_Finalize();
                return M_ERR;
        }

        if(myid == master) /*master process*/
        {
                /*Create the matrices and show them*/
                matrix_1st = create_matrix(matrix_size[0], matrix_size[1], 'y');
                printf("First Matirx:\n");
                show_matrix(matrix_1st, matrix_size[0], matrix_size[1]);
                printf("Second Matirx:\n");
                show_matrix(matrix_2nd, matrix_size[1], matrix_size[2]);

                matrix_res = create_matrix(matrix_size[0], matrix_size[2], 'n');
                /*Distribute the jobs*/
                for(i = 1; (i < rank)&&(i <= matrix_size[0]); i ++)
                {
                        for(j = 0; j < partition_size; j ++)
                        {
                                MPI_Buffer_attach(tmp_buf_dis, psize_dis + MPI_BSEND_OVERHEAD);
                                MPI_Bsend(&matrix_1st[(i-1)* partition_size + j][0], msize_dis, MPI_
DOUBLE, i, 0, MPI_COMM_WORLD);
                                MPI_Buffer_detach(&tmp_buf_dis, &psize_dis);
                        }
                }

                /*left rows belong to master process*/
                if((rank - 1)*partition_size < matrix_size[0])
                {
                        int left_rows = matrix_size[0] - (rank - 1)*partition_size;
                        double ** left_matrix = matrix_multi(&matrix_1st[(rank-1)*partition_size], m
atrix_2nd, left_rows, matrix_size[1], matrix_size[2]);
                        for(i = (rank-1)*partition_size; i < matrix_size[0]; i ++)
                        {
                                for(j = 0 ; j < matrix_size[2]; j ++)
                                {
                                        matrix_res[i][j] = left_matrix[i- (rank-1)*partition_size][j
];
                                }
                        }
                        free_matrix(left_matrix, left_rows, matrix_size[2]);
                }

                /*Gather the result*/
                for(i = 1; (i < rank)&&(i <= matrix_size[0]); i ++)
                {
                        for(j = 0; j < partition_size; j ++)
                        {
                                MPI_Buffer_attach(tmp_buf_gath, psize_gath+ MPI_BSEND_OVERHEAD);
                                MPI_Recv(&matrix_res[(i-1)*partition_size + j][0], msize_gath, MPI_D
OUBLE, i, 0, MPI_COMM_WORLD, &status);
                                MPI_Buffer_detach(&tmp_buf_gath, &psize_gath);
                        }
                }

                /*Show results*/
                printf("The Result:\n");
                show_matrix(matrix_res, matrix_size[0], matrix_size[2]);

                free_matrix(matrix_res, matrix_size[0], matrix_size[2]);
                free_matrix(matrix_1st, matrix_size[0], matrix_size[1]);
        }
        else /*non-master process*/
        {
                double ** tmp_matrix;
                double ** part_res_matrix;
                if(myid > matrix_size[0])
                {
                        printf("proc %d no job at all\n", myid);
                }
                else
                {
                        tmp_matrix = create_matrix(partition_size, matrix_size[1], 'n');
                        for(i = 0; i < partition_size; i ++)
                        {
                                MPI_Buffer_attach(tmp_buf_dis, psize_dis + MPI_BSEND_OVERHEAD);
                                MPI_Recv(&tmp_matrix[i][0], msize_dis, MPI_DOUBLE, master, 0, MPI_CO
MM_WORLD, &status);
                                MPI_Buffer_detach(&tmp_buf_dis, &psize_dis);
                        }
                        part_res_matrix = matrix_multi(tmp_matrix, matrix_2nd, partition_size, matri
x_size[1], matrix_size[2]);
                        /*Send The Result*/
                        for(i = 0; i < partition_size; i ++)
                        {
                                MPI_Buffer_attach(tmp_buf_gath, psize_gath+ MPI_BSEND_OVERHEAD);
                                MPI_Bsend(&part_res_matrix[i][0], msize_gath, MPI_DOUBLE, master, 0,
 MPI_COMM_WORLD);
                                MPI_Buffer_detach(&tmp_buf_gath, &psize_gath);
                        }

                        free_matrix(part_res_matrix, partition_size, matrix_size[2]);
                        free_matrix(tmp_matrix, partition_size, matrix_size[1]);
                }
        }
        free(tmp_buf_gath);
        free(tmp_buf_dis);
        free_matrix(matrix_2nd, matrix_size[1], matrix_size[2]);
        MPI_Finalize();
        return M_OK;
}

阅读(4746) | 评论(0) | 转发(0) |
给主人留下些什么吧!~~