c domain-dcmp/heat/heat-2d_x c 1-D domain decomposition in x-direction with 2 grids c include "mpif.h" parameter (MAX_X=200,MAX_Y=400) integer my_rank,nproc,ierr,stride1,stride2,k,nx1,nx2 real*8 h1,h2,r1,r2,ua(MAX_X,MAX_Y),ub(MAX_X,MAX_Y),dt,t real*8 va(MAX_X,MAX_Y),vb(MAX_X,MAX_Y) c call init(nproc,my_rank) c c fine grid nx2=8, r2=dt/h^2=0.25 r2=0.25 nx2=8 ntotx2=nx2*nproc+2 ntoty2=ntotx2 xl=1.0 h2=xl/(ntotx2-1) dt=r2*h2*h2 c coarse grid nx1=4 nx1=4 ntotx1=nx1*nproc+2 ntoty1=ntotx1 xl=1.0 h1=xl/(ntotx1-1) r1=dt/(h1*h1) c c coarse grid call init_cn(nproc,my_rank,nx1,MAX_X,ntoty1,h1,ua) call init_cn(nproc,my_rank,nx2,MAX_X,ntoty2,h2,va) c c define new type, stride1, of message passing for coarse grid c ntoty1 number of block c 1: number of elements in each block c MAX_X: number of elements between start of each block c MPI_DOUBLE_PRECISION: call MPI_Type_vector(ntoty1,1,MAX_X, * MPI_DOUBLE_PRECISION,stride1,ierr) call MPI_type_commit(stride1,ierr) c c define new type, stride2, of message passing for fine grid call MPI_Type_vector(ntoty2,1,MAX_X, * MPI_DOUBLE_PRECISION,stride2,ierr) call MPI_type_commit(stride2,ierr) c c 4 time levels do k=1,4 t=dt*dble(k-1) c coarse grid call bndry_cn(nproc,my_rank,nx1,MAX_X,ntoty1,ua,t) call comput(nproc,my_rank,nx1,MAX_X,ntoty1,r1,ua,ub) call exchange(nproc,my_rank,nx1,MAX_X,ntoty1,stride1,ub) call gather(k,nproc,my_rank,nx1,MAX_X,ntoty1,ub) do i=1,nx1+2 do j=1,ntoty1 ua(i,j)=ub(i,j) enddo enddo c c fine grid call bndry_cn(nproc,my_rank,nx2,MAX_X,ntoty2,va,t) call comput(nproc,my_rank,nx2,MAX_X,ntoty2,r2,va,vb) call exchange(nproc,my_rank,nx2,MAX_X,ntoty2,stride2,vb) call gather(k,nproc,my_rank,nx2,MAX_X,ntoty2,vb) do i=1,nx2+2 do j=1,ntoty2 va(i,j)=vb(i,j) enddo enddo enddo call MPI_Finalize(ierr) stop end c c-------------------------------------------------------- c Initial Condition c-------------------------------------------------------- subroutine init_cn(nproc,my_rank,nx,MAX_X,ntoty,h,u1) real*8 u1(MAX_X,1),h real*8 x(100),y(100),pi integer nproc,my_rank,nx pi=dacos(-1.d0) do i=1,nx+2 x(i)=my_rank*dble(nx)*h+dble(i-1)*h enddo do j=1,ntoty y(j)=h*(j-1) enddo c c initial condition do i=1,nx+2 do j=1,ntoty u1(i,j)=dsin(pi*x(i))*dsin(pi*y(j)) enddo enddo return end c--------------------------------------------------------- c Boundary Condition c--------------------------------------------------------- subroutine bndry_cn(nproc,my_rank,nx,MAX_X,ntoty,u,t) integer nproc,my_rank,nx,ntoty real*8 u(MAX_X,1),t c c bndry u(0,y)=sin(t) if(my_rank.eq.0) then do j=1,ntoty u(1,j)=dsin(t) enddo endif c bndry u(1,y)=sin(t) if(my_rank.eq.nproc-1) then do j=1,ntoty u(nx+2,j)=dsin(t) enddo endif c bndry u(x,0)=sin(t) do i=1,nx+2 u(i,1)=dsin(t) enddo c bndry u(x,1)=sin(t) do i=1,nx+2 u(i,ntoty)=dsin(t) enddo return end c c----------------------------------------------------------- c Exchangeing the first and the last u to the neighbors c----------------------------------------------------------- subroutine exchange(nproc,my_rank,nx,MAX_X,ntoty, * stride,u) include "mpif.h" real*8 u(MAX_X,ntoty) integer status(MPI_STATUS_SIZE),stride integer nproc,my_rank,itag,nproc1,l_nbr,r_nbr nproc1=nproc-1 c c sending the first u to my_rank-1 for 0< my_rank < nproc-1 c if((my_rank-1).lt.0) then l_nbr=MPI_PROC_NULL else l_nbr=my_rank-1 endif if((my_rank+1).gt.nproc1) then r_nbr=MPI_PROC_NULL else r_nbr=my_rank+1 endif c write(*,*) 'my_rank=',my_rank,' l_nbr=',l_nbr,' r_nbr=',r_nbr itag=20 c c send u(2,j) to left neighbor and receive u(n+2,j) from right neighbor c call MPI_Sendrecv(u(2,1),1,stride,l_nbr,itag, * u(nx+2,1),1,stride,r_nbr,itag, * MPI_COMM_WORLD,status,ierr) c c sending the last u to my_rank+1 for 0< my_rank < nproc-1 c itag=10 c c send u(n+1,j) to right neighbor and receive u(1,j) from left neighbor c call MPI_Sendrecv(u(nx+1,1),1,stride,r_nbr,itag, * u(1,1),1,stride,l_nbr,itag, * MPI_COMM_WORLD,status,ierr) return end c c-------------------------------------------------------------- c Computing u for the next time step c-------------------------------------------------------------- subroutine comput(nproc,my_rank,nx,MAX_X,ntoty,r,u1,u2) real*8 u1(MAX_X,1),u2(MAX_X,1),r integer nproc,my_rank c do i=1,nx+2 c do j=1,ntoty c u2(i,j)=u1(i,j) c enddo c enddo do i=2,nx+1 do j=2,ntoty-1 u2(i,j)=r*(u1(i-1,j)-2.*u1(i,j)+u1(i+1,j)+ * u1(i,j-1)-2.*u1(i,j)+u1(i,j+1))+u1(i,j) enddo enddo return end c-------------------------------------------------------------- c Gather the solution from each subdomain c-------------------------------------------------------------- subroutine gather(k,nproc,my_rank,nx,MAX_X,ntoty,u) integer k,iroot,nproc,my_rank,nx,ntoty real*8 u(MAX_X,1),uu(200),array(400) include 'mpif.h' c collocting u of each proc to global array iroot=0 do j=2,ntoty-1 do i=1,nx+2 uu(i)=u(i,j) enddo call MPI_Gather(uu(2),nx,MPI_DOUBLE_PRECISION,array,nx, * MPI_DOUBLE_PRECISION,iroot,MPI_COMM_WORLD,ierr) if(my_rank.eq.0) then write(*,*) 'k=',k,' j=',j,' nx=',nx,' ', * (array(i),i=1,nx*nproc) endif enddo return end c----------------------------------------------------------- c Initializing MPI c----------------------------------------------------------- subroutine init(nproc,my_rank) include "mpif.h" integer my_rank,nproc character*30 nodename integer nchar call MPI_init(ierr) call MPI_Comm_rank(MPI_COMM_WORLD,my_rank,ierr) call MPI_Comm_size(MPI_COMM_WORLD,nproc,ierr) call MPI_Get_processor_name(nodename,nchar,ierr) write(*,*) 'my_rank=',my_rank,' processor=',nodename return end