将顺序代码集成到并行代码MPI的问题

将顺序代码集成到并行代码MPI的问题,mpi,pgm,Mpi,Pgm,我尝试将边缘检测拉普拉斯算子集成到我以前的MPI代码中。我现在的问题是在数据分散后在一维阵列中进行边缘检测。我得到了输出,但颜色与想象中的图像相反。谁能帮我解决这个问题。这是并行代码: #include <stdio.h> #include <stdlib.h> #include <string.h> #include <mpi.h> #include <math.h> #define SIZE_X 640 #define SIZE_

我尝试将边缘检测拉普拉斯算子集成到我以前的MPI代码中。我现在的问题是在数据分散后在一维阵列中进行边缘检测。我得到了输出,但颜色与想象中的图像相反。谁能帮我解决这个问题。这是并行代码:

#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <mpi.h>
#include <math.h>

#define SIZE_X 640
#define SIZE_Y 480
#define   smooth  3

int mod(int z, int l);


/****************** Main Program ***********************/

int main(int argc, char **argv)
{
FILE *FR,*FW;
int ierr;
int rank, size, a[100000], sum, m, n;
int ncells;
int greys[SIZE_X][SIZE_Y];
int rows,cols, maxval;
int mystart, myend, myncells;
const int IONODE=0;
int *disps, *counts, *mydata;
int *data;
int i,j,temp1;
char dummy[50]="";
int csx,sum1,sum2,k,l,x;//w1[3][3],w2[3][3]; 

  //Laplacian Operator
static int w1[3][3]={
{0,-1,0},
{-1,4,-1},
{0,-1,0}
};

static int w2[3][3]={
{0,-1,0},
{-1,4,-1},
{0,-1,0}
};

/****************** Initialize MPI ***********************/



ierr = MPI_Init(&argc, &argv);
if (argc != 3) {
    fprintf(stderr,"Usage: %s infile outfile\n",argv[0]);
    fprintf(stderr,"outputs the negative of the input file.\n");
    return -1;
}            

ierr  = MPI_Comm_rank(MPI_COMM_WORLD, &rank);
ierr = MPI_Comm_size(MPI_COMM_WORLD, &size);
if (ierr) {
fprintf(stderr,"Catastrophic MPI problem;  exiting\n");MPI_Abort(MPI_COMM_WORLD,1);
  }




  /****************** Master open image file for read  ***********************/


if (rank == IONODE) {
    rows=SIZE_X;
    cols=SIZE_Y;
    maxval=255;
    FR=fopen(argv[1], "r+");


  /****************** Read the header part of the image ***********************/

    fgets(dummy,50,FR);
    do{  fgets(dummy,50,FR); } while(dummy[0]=='#');
    fgets(dummy,50,FR);

    for (j = 0; j <cols; j++)
        for (i = 0; i <rows; i++)
   {
       fscanf(FR,"%d",&temp1);
     greys[i][j] = temp1;
   }


 /****************** Read pixel values ***********************/

ncells = rows*cols;
disps = (int *)malloc(size * sizeof(int));
counts= (int *)malloc(size * sizeof(int));
data = &(greys[0][0]); /* we know all the data is contiguous */
}

// Time every processor 
//MPI_Barrier(MPI_COMM_WORLD);
//p_time = MPI_Wtime();


 /****************** Everyone calculate their number of cells ***********************/

ierr = MPI_Bcast(&ncells, 1, MPI_INT, IONODE, MPI_COMM_WORLD);
myncells = ncells/size;
mystart = rank*myncells;
myend   = mystart + myncells - 1;
if (rank == size-1) myend = ncells-1;
myncells = (myend-mystart)+1;
mydata = (int *)malloc(myncells * sizeof(int));


 /****************** Assemble the list of counts. Might not be equal if don't divide evenly.***********************/

ierr = MPI_Gather(&myncells, 1, MPI_INT, counts, 1, MPI_INT, IONODE, MPI_COMM_WORLD);
if (rank == IONODE) {
    disps[0] = 0;
    for (i=1; i<size; i++) {
        disps[i] = disps[i-1] + counts[i-1];
    }
}


/****************** Scatter the data to all processor ***********************/

ierr = MPI_Scatterv(data, counts, disps, MPI_INT, mydata, myncells, MPI_INT, IONODE, MPI_COMM_WORLD);


/****************** All processor do AVERAGE FILTERING ***********************/

csx=smooth/2; 
for (i=0; i<myncells; i++)
{
 sum1=0;
 sum2=0;
 for(k=0;k<smooth;k++)
 {
   for(l=0;l<smooth;l++) 
   {
     x=i+k-csx; 
     sum1+=w1[k][l]* mydata[mod(x,myncells)];
     sum2+=w2[k][l]* mydata[mod(x,myncells)];    
   }
 }

 if((abs(sum1)+abs(sum2))>125)
   mydata[i]=255;
 else
   mydata[i]=0;
}


/******************  Gather the data from all processor ***********************/

ierr = MPI_Gatherv(mydata, myncells, MPI_INT, data, counts, disps, MPI_INT, IONODE, MPI_COMM_WORLD);

//MPI_Barrier(MPI_COMM_WORLD);  
//p2_time = MPI_Wtime();
//printf("\nProcessor %d = %g microseconds\n", rank, (p2_time*1000000)-(p_time*1000000));       


/******************  Master open image file for write ***********************/  

if (rank == IONODE){
    FW=fopen(argv[2], "w");
    fprintf(FW,"P2\n%d %d\n255\n",rows,cols);    
    for(j=0;j<cols;j++)
        for(i=0;i<rows;i++)
            fprintf(FW,"%d ", greys[i][j]);

    }

free(mydata);
if (rank == IONODE) {
free(counts);
free(disps);

}

if (rank == IONODE) {
    fclose(FR);
    fclose(FW);

    }



MPI_Finalize();

return 0;

 }


//Sub routine

 /* periodic extension (outside of the  image frame) */
  int mod(int z, int l)
  {
  if( z >= 0 && z < l ) return z;
  else
  if( z < 0) return (z+l);
  else
    if( z > (l-1)) return (z-l);
   return 0;
  }
这是我需要集成到上面程序中的顺序代码

#include <stdio.h>
#include <stdlib.h>
#include<conio.h>
#include<time.h>
 #include<math.h>

#define   size_x  203   
#define   size_y  152


typedef struct

{

  int imagesize_x, imagesize_y;

  int **pixel;

}image_t;

image_t allocate_image(const int imagesize_x, const int imagesize_y);

int mod(int z, int l);



void main()

{

image_t   image_in,image_out;



 int m,n, temp;

 int smooth,csx,csy; 

 int k,l,x,y,sum1,sum2;



 FILE *cpp1,*cpp2;



 char dummy[50]="";



 //Laplacian Operator



 static int w1[3][3]={

  {0,-1,0},

  {-1,4,-1},

  {0,-1,0}

  };



 static int w2[3][3]={

  {0,-1,0},

  {-1,4,-1},

  {0,-1,0}

  };





 cpp1=fopen("e:\\input_image\\A.pgm", "r+");

 cpp2=fopen("e:\\output_image\\edge_lap.pgm", "w+");







 fgets(dummy,50,cpp1);           

 do{

  fgets(dummy,50,cpp1);         

 }while(dummy[0]=='#');

fgets(dummy,50,cpp1);           





 fprintf(cpp2,"P2\n%d %d\n255\n",(size_x),(size_y));





image_in =  allocate_image(size_x,size_y);

 image_out = allocate_image(size_x,size_y);





                            //Reading Input Image



 for (n = 0; n < size_y; n++)

 {

  for (m = 0; m <size_x; m++)

   {



    fscanf(cpp1,"%d",&temp);



      image_in.pixel[m][n] = temp;





    }

   }


                        /* Edge Detection  */



 smooth=3;

 csx=smooth/2; csy=smooth/2;



                         //Edge detection

 for (n = 0; n < size_y; n++) {

  for (m = 0; m < size_x; m++) { 



 sum1=0;sum2=0;



 for(k=0;k<smooth;k++){

   for(l=0;l<smooth;l++) {



 x=m+k-csx; y=n+l-csy;    



     sum1+=w1[k][l]* image_in.pixel[mod(x,size_x)][mod(y,size_y)];

     sum2+=w2[k][l]* image_in.pixel[mod(x,size_x)][mod(y,size_y)];



   }

 }

 if((fabs(sum1)+fabs(sum2))>125)

   image_out.pixel[m][n]=255;

 else

   image_out.pixel[m][n]=0;

   }

  }



                        //Writing Edge Detected Image



   for (n = 0; n < size_y; n++)

   {

for (m = 0; m <size_x; m++)

 {



    fprintf(cpp2,"%d ",image_out.pixel[m][n]);       



   }

    }

   }



    image_t allocate_image(const int imagesize_x, const int imagesize_y)

     {

  image_t result;

   int x =  0, y = 0;



   result.imagesize_x = imagesize_x;

     result.imagesize_y = imagesize_y;



      result.pixel =(int **) calloc(imagesize_x, sizeof(int*));



   for(x = 0; x < imagesize_x; x++)

     {

     result.pixel[x] =(int*) calloc(imagesize_y, sizeof(int));



     for(y = 0; y < imagesize_y; y++)

   {

     result.pixel[x][y] = 0;

     }

      }



     return result;

  }





  int mod(int z, int l)

   {

    if( z >= 0 && z < l ) return z;

       else

    if( z < 0) return (z+l);

    else

  if( z > (l-1)) return (z-l);

     }
这是输入图像

我使用:mpirun-np 10./mysource ballogs.pgm output.pgm运行,因此在您只需要反转图像的地方,图像的2d结构无关紧要,您可以将图像划分为像素数/进程数块,每个块都可以反转其像素

然而,在这里,二维结构确实很重要;要应用模具,需要有所有相邻像素。在图像的边缘,你需要来自另一个物理边缘的数据,我们称之为周期性边界条件,图像像环面一样环绕

因此,我们需要以一种保持数据二维结构的方式分解数据。我们可以做一个完整的二维分解;因此,如果有6个进程,图像会像

+---+---+---+
| 0 | 1 | 2 |
+---+---+---+
| 3 | 4 | 5 |
+---+---+---+
现在是2011年,我仍然在做ascii艺术,通过计算机进行交流。那是怎么发生的

但是现在做一维分解比较简单;也就是说,我们保持数据的二维结构,但我们只沿着一维分割数据。在C语言中,沿行执行此操作要容易得多,因此数据块在内存中是连续的:

-----1------
-----2------
-----3------
-----4------
-----5------
-----6------
到目前为止,最简单的方法是填充原始图像中的行数,以便在任务数之间平均分配

现在,同样,如果一个任务负责将模具应用到第0行,它需要来自第nrows-1行的数据,反之亦然;对于列0和列ncols-1也是如此。因此,我们将用另外两行和两列填充数组,并在读取数据时将数据从列ncols-1复制到列0中,以此类推

因此,在这种情况下,读取图像会变成这样:

fgets(dummy,50,FR);
do{  fgets(dummy,50,FR); } while(dummy[0]=='#');
sscanf(dummy,"%d %d",&cols, &rows);
fgets(dummy,50,FR);


nrowspertask = (rows/size);
if (nrowspertask*size < rows) nrowspertask++;
int totrows = nrowspertask*size;

/* pad the number of rows so it divides evenly by # of tasks */
/* and then add 2 rows, 2 cols, for "wraparound" at edges */

image_t image;
image = allocate_image( cols+2, totrows+2 );

/****************** Read pixel values ***********************/

for (j = 0; j <cols; j++)
    for (i = 0; i <rows; i++)
    {
        fscanf(FR,"%d",&temp1);
        image.pixel[j+1][i+1] = temp1;
    }

/* copy bottom row to top, top row to bottom */
for (j=1; j<cols+1; j++) {
    image.pixel[j][0]      = image.pixel[j][rows];
    image.pixel[j][rows+1] = image.pixel[j][1];
}

/* copy leftmost col to right, rightmost col to left */
for (i=1; i<rows+1; i++) {
    image.pixel[0][i]      = image.pixel[cols][i];
    image.pixel[cols+1][i] = image.pixel[1][i];
}
然后每个人都会得到他们的数据:

locimage = allocate_image(cols+2, nrowspertask+2);
ierr = MPI_Scatterv(data, counts, disps, MPI_INT,&(locimage.pixel[0][0]),
                         (nrowspertask+2)*(cols+2), MPI_INT, IONODE, MPI_COMM_WORLD);
现在你做你的过滤器非常像在串行情况下,除了你不必担心mods,因为你已经用你需要的信息填充了数据


您可以非常类似地将数据收集回数组中,但我将让您自己来解决这个问题;您只发送回真实的行,而不是填充的行,否则您将覆盖数据。

没有人确切知道数据格式以及您正在处理的所有其他内容。使用编辑器和调试器集成代码;主任务是读取数据的任务。但在上面的程序中,主任务也是工人之一;每个人都在做自己的部分。我用Jonathan Durso建议的解决方案尝试了你的代码,但结果是一个完全黑色的图像pgm。你知道你是怎么解决的吗?你可以发布正确的代码吗?
locimage = allocate_image(cols+2, nrowspertask+2);
ierr = MPI_Scatterv(data, counts, disps, MPI_INT,&(locimage.pixel[0][0]),
                         (nrowspertask+2)*(cols+2), MPI_INT, IONODE, MPI_COMM_WORLD);