!******** FDTD Computation with 3-D PML ******** !******** Using MPI Library ******** call run_mpi_init call set_cell call inival call init_pml call fdmain call run_mpi_fin stop end !******** MPI_INIT ******** ! MPIのサブルーチンを使用した全てのルーチンに「include 'mpif.h'」を記述する. ! ierr(オプション)は使用することはほとんどないが,必ず指定する必要がある. subroutine run_mpi_init include 'mpif.h' include 'header.f' logical ipd(3),ir dimension idm(3) ! MPI実行環境の初期化を行う. call mpi_init( ierr ) ! 自分のプロセス数(isize)及びランク(irank)を取得する. call mpi_comm_size( mpi_comm_world, isize, ierr ) call mpi_comm_rank( mpi_comm_world, irank, ierr ) if( irank .eq. 0 ) then write(*,'(3(a,i3),a)') "id: ", irank, & ", process ", irank, " of ", isize, " is alive" t_start = mpi_wtime() tick = mpi_wtick() write(*,'(a,i3,a,f16.8)') "id: ", irank, & ", clock resolution in seconds: ", tick endif myprev = mod( (irank + isize - 1), isize ) mynext = mod( (irank + 1), isize ) ! 同期をとる(全てのプロセスがMPI_Barrierをコールするまで待機させる). call mpi_barrier(mpi_comm_world,ierr) write(*,'(4(a,i3))') "id: ", irank, & ", my id is ", irank, & ", prev is ", myprev, & ", next is ", mynext npdx(1) = -1 npdy(1) = -1 npdz(1) = -1 npdx(2) = -1 npdy(2) = -1 npdz(2) = -1 idm(1)= npx idm(2)= npy idm(3)= npz ipd(1)= .false. ipd(2)= .false. ipd(3)= .false. ir= .false. ! カルテシアンコンストラクタ call mpi_cart_create(mpi_comm_world, & ndims, & idm, & ipd, & ir, & mpi_comm_cart, & ierr) ! カルテシアン座標のシフト(x,y,z方向). ! シフトの送信元と送信先を計算する. if(npx .gt. 1) then call mpi_cart_shift(mpi_comm_cart, & 0, & 1, & npdx(1), & npdx(2), & ierr) end if if(npy .gt. 1) then call mpi_cart_shift(mpi_comm_cart, & 1, & 1, & npdy(1), & npdy(2), & ierr) end if if(npz .gt. 1) then call mpi_cart_shift(mpi_comm_cart, & 2, & 1, & npdz(1), & npdz(2), & ierr) end if ixrank=0 iyrank=0 izrank=0 if(npx .gt. 1) then if(npdx(1) .eq. -1) then if((npdx(2)-irank) .eq. 1) then ixrank = mod(irank,npx) else ixrank=(irank-mod(irank,(npdx(2)-irank)))/(npdx(2)-irank) endif else if((irank-npdx(1)) .eq. 1) then ixrank = mod(irank,npx) else ixrank=(irank-mod(irank,(irank-npdx(1))))/(irank-npdx(1)) endif endif endif if(npy .gt. 1) then if(npdy(1) .eq. -1) then if((npdy(2)-irank) .eq. 1) then iyrank = mod(irank,npy) else iyrank=(irank-mod(irank,(npdy(2)-irank)))/(npdy(2)-irank) endif else if((irank-npdy(1)) .eq. 1) then iyrank = mod(irank,npy) else iyrank=(irank-mod(irank,(irank-npdy(1))))/(irank-npdy(1)) endif endif endif if(npz .gt. 1) then if(npdz(1) .eq. -1) then if((npdz(2)-irank) .eq. 1) then izrank = mod(irank,npz) else izrank=(irank-mod(irank,(npdz(2)-irank)))/(npdz(2)-irank) endif else if((irank-npdz(1)) .eq. 1) then izrank = mod(irank,npz) else izrank=(irank-mod(irank,(irank-npdz(1))))/(irank-npdz(1)) endif endif endif call mpi_barrier(mpi_comm_world,ierr) write(6,*) '[x] id: ',irank,', prev is ',npdx(1), & ', next is ',npdx(2) write(6,*) '[y] id: ',irank,', prev is ',npdy(1), & ', next is ',npdy(2) write(6,*) '[z] id: ',irank,', prev is ',npdz(1), & ', next is ',npdz(2) return end !******** MPI_FIN ******** subroutine run_mpi_fin include 'mpif.h' include 'header.f' write(*,'(4(a,i3))') "id: ", irank, & ", my id is ", irank, & ", prev is ", myprev, & ", next is ", mynext if( irank .eq. 0 ) then write(*,'(a,i3,a,f16.8)') "id: ", irank, & ", clock resolution in seconds: ", tick endif if ( irank .eq. 0 ) then t_end = mpi_wtime() write(*,'(a,i3,a,f16.8)') "id: ", irank, & ", elapsed time in seconds: ", t_end-t_start open(unit=11,file='time') write(11,'(a,i3,a,f16.8)') "id: ", irank, & ", elapsed time in seconds: ", t_end-t_start close(unit=11) endif ! MPIを終了する. ! 他の全てのMPIサブルーチンより後にコールする必要がある. call mpi_finalize( ireturncode ) write(*,'(a,i3,a,i3)') & "id: ",irank,", return code is ", ireturncode return end !******** subroutine fdmain ******** subroutine fdmain include 'mpif.h' include 'header.f' character*30 filnam ii = 0 ! x,y,zをそれぞれi,j,kとし,各方向において電磁界受渡し通信で必要な ! 新しいデータ型の作成と登録を行う. if(npx .gt. 1) then call mpi_type_vector( & mdy_mpi*mdz_mpi, & 1, & mdx_mpi+1, & mpi_real, & jkvec_j, & ierr) call mpi_type_commit(jkvec_j,ierr) call mpi_type_vector( & (mdy_mpi+1)*mdz_mpi, & 1, & mdx_mpi+1, & mpi_real, & jkvec_k, & ierr) call mpi_type_commit(jkvec_k,ierr) endif if(npy .gt. 1) then call mpi_type_vector( & mdz_mpi, & mdx_mpi, & mdx_mpi*(mdy_mpi+1), & mpi_real, & ikvec_i, & ierr) call mpi_type_commit(ikvec_i,ierr) call mpi_type_vector( & mdz_mpi, & mdx_mpi, & (mdx_mpi+1)*(mdy_mpi+1), & mpi_real, & ikvec_k, & ierr) call mpi_type_commit(ikvec_k,ierr) endif if(npz .gt. 1) then call mpi_type_vector( & mdy_mpi, & mdx_mpi, & mdx_mpi, & mpi_real, & ijvec_i, & ierr) call mpi_type_commit(ijvec_i,ierr) call mpi_type_vector( & mdy_mpi, & mdx_mpi, & mdx_mpi+1, & mpi_real, & ijvec_j, & ierr) call mpi_type_commit(ijvec_j,ierr) endif ! 吸収境界条件を適用する場合,吸収層(x方向)を kx1〜kx2,kx3〜kx4とし ! 実際に境界面が存在するノードにおいて kx2_mpi あるいは kx3_mpiに層の厚みを与える. ! (Fortran77ではこの場合分けが必要となる) kx1_mpi = kx1 kx4_mpi = kx4 if (npdx(1) .eq. -1) then kx2_mpi = kx2 else kx2_mpi = kx1 endif if (npdx(2) .eq. -1) then kx3_mpi = kx3 else kx3_mpi = kx4 endif ky1_mpi = ky1 ky4_mpi = ky4 if (npdy(1) .eq. -1) then ky2_mpi = ky2 else ky2_mpi = ky1 endif if (npdy(2) .eq. -1) then ky3_mpi = ky3 else ky3_mpi = ky4 endif kz1_mpi = kz1 kz4_mpi = kz4 if (npdz(1) .eq. -1) then kz2_mpi = kz2 else kz2_mpi = kz1 endif if (npdz(2) .eq. -1) then kz3_mpi = kz3 else kz3_mpi = kz4 endif kt = -1 if (npx .gt. 1) then do jx=1,mdz_mpi xx(jx)=xx(ixrank*mdx_mpi+jx) enddo endif if (npy .gt. 1) then do jy=1,mdy_mpi yy(jy)=yy(iyrank*mdy_mpi+jy) enddo endif if (npz .gt. 1) then do jz=1,mdz_mpi zz(jz)=zz(izrank*mdz_mpi+jz) enddo endif ! set pec include 'set_pec.f' if (npx .gt. 1) then call sendxx endif if (npy .gt. 1) then call sendyy endif if (npz .gt. 1) then call sendzz endif 50 kt = kt + 1 if (irank .eq. 0 ) then if(mod(kt,10).eq.0) then write(6,*) kt,'/',it endif endif ! source x include 'input.f' ! first calculate elctric field include 'calc_e_mpi.f' include 'calc_pml_e_mpi.f' ! send elctric field if(npx .gt. 1) then call sende1 endif if(npy .gt. 1) then call sende2 endif if(npz .gt. 1) then call sende3 endif ! second calculate magnetic field include 'calc_h_mpi.f' include 'calc_pml_h_mpi.f' ! send magnetic field if(npx .gt. 1) then call sendh1 endif if(npy .gt. 1) then call sendh2 endif if(npz .gt. 1) then call sendh3 endif ! output include 'output.f' if (kt.gt.it) goto 900 call mpi_barrier(mpi_comm_world,ierr) goto 50 900 continue return end !******** subroutine mpi sendrecv ******** subroutine sende1 include 'mpif.h' include 'header.f' if (mod(ixrank,2) .eq. 0 ) then call mpi_recv(ey(mdx_mpi+1,1,1),1,jkvec_j,npdx(2),1, & mpi_comm_world,mstatus,ierr) call mpi_send(ey(1,1,1),1,jkvec_j,npdx(1),2, & mpi_comm_world,ierr) else call mpi_send(ey(1,1,1),1,jkvec_j,npdx(1),1, & mpi_comm_world,ierr) call mpi_recv(ey(mdx_mpi+1,1,1),1,jkvec_j,npdx(2),2, & mpi_comm_world,mstatus,ierr) endif if (mod(ixrank,2) .eq. 0 ) then call mpi_recv(ez(mdx_mpi+1,1,1),1,jkvec_k,npdx(2),1, & mpi_comm_world,mstatus,ierr) call mpi_send(ez(1,1,1),1,jkvec_k,npdx(1),2, & mpi_comm_world,ierr) else call mpi_send(ez(1,1,1),1,jkvec_k,npdx(1),1, & mpi_comm_world,ierr) call mpi_recv(ez(mdx_mpi+1,1,1),1,jkvec_k,npdx(2),2, & mpi_comm_world,mstatus,ierr) endif return end subroutine sendh1 include 'mpif.h' include 'header.f' if (mod(ixrank,2) .eq. 0 ) then call mpi_send(hy(mdx_mpi,1,0),1,jkvec_j,npdx(2),1, & mpi_comm_world,ierr) call mpi_recv(hy(0,1,0),1,jkvec_j,npdx(1),2, & mpi_comm_world,mstatus,ierr) else call mpi_recv(hy(0,1,0),1,jkvec_j,npdx(1),1, & mpi_comm_world,mstatus,ierr) call mpi_send(hy(mdx_mpi,1,0),1,jkvec_j,npdx(2),2, & mpi_comm_world,ierr) endif if (mod(ixrank,2) .eq. 0 ) then call mpi_send(hz(mdx_mpi,0,1),1,jkvec_k,npdx(2),1, & mpi_comm_world,ierr) call mpi_recv(hz(0,0,1),1,jkvec_k,npdx(1),2, & mpi_comm_world,mstatus,ierr) else call mpi_recv(hz(0,0,1),1,jkvec_k,npdx(1),1, & mpi_comm_world,mstatus,ierr) call mpi_send(hz(mdx_mpi,0,1),1,jkvec_k,npdx(2),2, & mpi_comm_world,ierr) endif return end subroutine sende2 include 'mpif.h' include 'header.f' if (mod(iyrank,2) .eq. 0 ) then call mpi_recv(ex(1,mdy_mpi+1,1),1,ikvec_i,npdy(2),1, & mpi_comm_world,mstatus,ierr) call mpi_send(ex(1,1,1),1,ikvec_i,npdy(1),2, & mpi_comm_world,ierr) else call mpi_send(ex(1,1,1),1,ikvec_i,npdy(1),1, & mpi_comm_world,ierr) call mpi_recv(ex(1,mdy_mpi+1,1),1,ikvec_i,npdy(2),2, & mpi_comm_world,mstatus,ierr) endif if (mod(iyrank,2) .eq. 0 ) then call mpi_recv(ez(1,mdy_mpi+1,1),1,ikvec_k,npdy(2),1, & mpi_comm_world,mstatus,ierr) call mpi_send(ez(1,1,1),1,ikvec_k,npdy(1),2, & mpi_comm_world,ierr) else call mpi_send(ez(1,1,1),1,ikvec_k,npdy(1),1, & mpi_comm_world,ierr) call mpi_recv(ez(1,mdy_mpi+1,1),1,ikvec_k,npdy(2),2, & mpi_comm_world,mstatus,ierr) endif return end subroutine sendh2 include 'mpif.h' include 'header.f' if (mod(iyrank,2) .eq. 0 ) then call mpi_send(hx(1,mdy_mpi,1),1,ikvec_i,npdy(2),1, & mpi_comm_world,ierr) call mpi_recv(hx(1,0,1),1,ikvec_i,npdy(1),2, & mpi_comm_world,mstatus,ierr) else call mpi_recv(hx(1,0,1),1,ikvec_i,npdy(1),1, & mpi_comm_world,mstatus,ierr) call mpi_send(hx(1,mdy_mpi,1),1,ikvec_i,npdy(2),2, & mpi_comm_world,ierr) endif if (mod(iyrank,2) .eq. 0 ) then call mpi_send(hz(1,mdy_mpi,1),1,ikvec_k,npdy(2),1, & mpi_comm_world,ierr) call mpi_recv(hz(1,0,1),1,ikvec_k,npdy(1),2, & mpi_comm_world,mstatus,ierr) else call mpi_recv(hz(1,0,1),1,ikvec_k,npdy(1),1, & mpi_comm_world,mstatus,ierr) call mpi_send(hz(1,mdy_mpi,1),1,ikvec_k,npdy(2),2, & mpi_comm_world,ierr) endif return end subroutine sende3 include 'mpif.h' include 'header.f' if (mod(izrank,2) .eq. 0 ) then call mpi_recv(ex(1,1,mdz_mpi+1),1,ijvec_i,npdz(2),1, & mpi_comm_world,mstatus,ierr) call mpi_send(ex(1,1,1),1,ijvec_i,npdz(1),2, & mpi_comm_world,ierr) else call mpi_send(ex(1,1,1),1,ijvec_i,npdz(1),1, & mpi_comm_world,ierr) call mpi_recv(ex(1,1,mdz_mpi+1),1,ijvec_i,npdz(2),2, & mpi_comm_world,mstatus,ierr) endif if (mod(izrank,2) .eq. 0 ) then call mpi_recv(ey(1,1,mdz_mpi+1),1,ijvec_j,npdz(2),1, & mpi_comm_world,mstatus,ierr) call mpi_send(ey(1,1,1),1,ijvec_j,npdz(1),2, & mpi_comm_world,ierr) else call mpi_send(ey(1,1,1),1,ijvec_j,npdz(1),1, & mpi_comm_world,ierr) call mpi_recv(ey(1,1,mdz_mpi+1),1,ijvec_j,npdz(2),2, & mpi_comm_world,mstatus,ierr) endif return end subroutine sendh3 include 'mpif.h' include 'header.f' if (mod(izrank,2) .eq. 0 ) then call mpi_send(hx(1,1,mdz_mpi),1,ijvec_i,npdz(2),1, & mpi_comm_world,ierr) call mpi_recv(hx(1,1,0),1,ijvec_i,npdz(1),2, & mpi_comm_world,mstatus,ierr) else call mpi_recv(hx(1,1,0),1,ijvec_i,npdz(1),1, & mpi_comm_world,mstatus,ierr) call mpi_send(hx(1,1,mdz_mpi),1,ijvec_i,npdz(2),2, & mpi_comm_world,ierr) endif if (mod(izrank,2) .eq. 0 ) then call mpi_send(hy(1,1,mdz_mpi),1,ijvec_j,npdz(2),1, & mpi_comm_world,ierr) call mpi_recv(hy(1,1,0),1,ijvec_j,npdz(1),2, & mpi_comm_world,mstatus,ierr) else call mpi_recv(hy(1,1,0),1,ijvec_j,npdz(1),1, & mpi_comm_world,mstatus,ierr) call mpi_send(hy(1,1,mdz_mpi),1,ijvec_j,npdz(2),2, & mpi_comm_world,ierr) endif return end subroutine sendxx include 'mpif.h' include 'header.f' if (ixrank .eq. 0 ) then call mpi_irecv(xx(mdx_mpi+1),1,mpi_real,npdx(2),1, & mpi_comm_world,ireq1,ierr) call mpi_isend(xx(mdx_mpi),1,mpi_real,npdx(2),1, & mpi_comm_world,ireq2,ierr) elseif (ixrank .eq. (npx-1)) then call mpi_isend(xx(1),1,mpi_real,npdx(1),1, & mpi_comm_world,ireq1,ierr) call mpi_irecv(xx(0),1,mpi_real,npdx(1),1, & mpi_comm_world,ireq2,ierr) else call mpi_isend(xx(1),1,mpi_real,npdx(1),1, & mpi_comm_world,ireq1,ierr) call mpi_irecv(xx(0),1,mpi_real,npdx(1),1, & mpi_comm_world,ireq2,ierr) call mpi_irecv(xx(mdx_mpi+1),1,mpi_real,npdx(2),1, & mpi_comm_world,ireq1,ierr) call mpi_isend(xx(mdx_mpi),1,mpi_real,npdx(2),1, & mpi_comm_world,ireq2,ierr) endif ! MPI_ISEND,MPI_IRECV を指定した場合,必ず MPI_WAIT を指定する ! 必要がある.MPI_WAIT の指定忘れや request のスペルミスにより ! タイミングによるエラーが発生する. call mpi_wait(ireq1,mstatus,ierr) call mpi_wait(ireq2,mstatus,ierr) return end subroutine sendyy include 'mpif.h' include 'header.f' if (iyrank .eq. 0 ) then call mpi_irecv(yy(mdy_mpi+1),1,mpi_real,npdy(2),1, & mpi_comm_world,ireq1,ierr) call mpi_isend(yy(mdy_mpi),1,mpi_real,npdy(2),1, & mpi_comm_world,ireq2,ierr) elseif (iyrank .eq. (npy-1)) then call mpi_isend(yy(1),1,mpi_real,npdy(1),1, & mpi_comm_world,ireq1,ierr) call mpi_irecv(yy(0),1,mpi_real,npdy(1),1, & mpi_comm_world,ireq2,ierr) else call mpi_isend(yy(1),1,mpi_real,npdy(1),1, & mpi_comm_world,ireq1,ierr) call mpi_irecv(yy(0),1,mpi_real,npdy(1),1, & mpi_comm_world,ireq2,ierr) call mpi_irecv(yy(mdy_mpi+1),1,mpi_real,npdy(2),1, & mpi_comm_world,ireq1,ierr) call mpi_isend(yy(mdy_mpi),1,mpi_real,npdy(2),1, & mpi_comm_world,ireq2,ierr) endif call mpi_wait(ireq1,mstatus,ierr) call mpi_wait(ireq2,mstatus,ierr) return end subroutine sendzz include 'mpif.h' include 'header.f' if (izrank .eq. 0 ) then call mpi_irecv(zz(mdz_mpi+1),1,mpi_real,npdz(2),1, & mpi_comm_world,ireq1,ierr) call mpi_isend(zz(mdz_mpi),1,mpi_real,npdz(2),1, & mpi_comm_world,ireq2,ierr) elseif (izrank .eq. (npz-1)) then call mpi_isend(zz(1),1,mpi_real,npdz(1),1, & mpi_comm_world,ireq1,ierr) call mpi_irecv(zz(0),1,mpi_real,npdz(1),1, & mpi_comm_world,ireq2,ierr) else call mpi_isend(zz(1),1,mpi_real,npdz(1),1, & mpi_comm_world,ireq1,ierr) call mpi_irecv(zz(0),1,mpi_real,npdz(1),1, & mpi_comm_world,ireq2,ierr) call mpi_irecv(zz(mdz_mpi+1),1,mpi_real,npdz(2),1, & mpi_comm_world,ireq1,ierr) call mpi_isend(zz(mdz_mpi),1,mpi_real,npdz(2),1, & mpi_comm_world,ireq2,ierr) endif call mpi_wait(ireq1,mstatus,ierr) call mpi_wait(ireq2,mstatus,ierr) return end