From 407792c7eb3a4273bfd4a54e7248d8b9b56a56bd Mon Sep 17 00:00:00 2001 From: Dan Kokron Date: Fri, 9 Jun 2023 15:08:44 +0000 Subject: [PATCH] Get rid of mpp pelist scoping calls in fill_nested_grid_cpl(). This depends on https://github.com/NOAA-GFDL/FMS/pull/1246 to be functional. More efficient 'if' test in fill_nested_grid_cpl() --- driver/fvGFS/atmosphere.F90 | 13 ++++--------- model/fv_arrays.F90 | 1 + model/fv_control.F90 | 23 ++++++++++++----------- 3 files changed, 17 insertions(+), 20 deletions(-) diff --git a/driver/fvGFS/atmosphere.F90 b/driver/fvGFS/atmosphere.F90 index bfde61d0d..a968928b8 100644 --- a/driver/fvGFS/atmosphere.F90 +++ b/driver/fvGFS/atmosphere.F90 @@ -168,7 +168,7 @@ module atmosphere_mod mpp_sync, mpp_sync_self, mpp_send, mpp_recv, mpp_broadcast use mpp_parameter_mod, only: EUPDATE, WUPDATE, SUPDATE, NUPDATE use mpp_domains_mod, only: CENTER, CORNER, NORTH, EAST, WEST, SOUTH -use mpp_domains_mod, only: domain2d, mpp_update_domains, mpp_global_field +use mpp_domains_mod, only: domain2d, mpp_update_domains, mpp_global_field, mpp_domain_is_tile_root_pe use mpp_domains_mod, only: mpp_get_data_domain, mpp_get_compute_domain, mpp_get_global_domain use xgrid_mod, only: grid_box_type use field_manager_mod, only: MODEL_ATMOS @@ -2433,7 +2433,6 @@ subroutine fill_nested_grid_cpl(this_grid, proc_in) logical, intent(in), optional :: proc_in real, allocatable :: g_dat(:,:,:) - integer, allocatable :: global_pelist(:) integer :: p integer :: isd_p, ied_p, jsd_p, jed_p integer :: isg, ieg, jsg, jeg @@ -2460,13 +2459,9 @@ subroutine fill_nested_grid_cpl(this_grid, proc_in) g_dat(isg:,jsg:,1), position=CENTER) endif - allocate(global_pelist(mpp_npes())) - call mpp_get_current_pelist(global_pelist) - if(any(mpp_pe() == Atm(this_grid)%Bcast_ranks)) then - call mpp_set_current_pelist(Atm(this_grid)%Bcast_ranks) - call mpp_broadcast(g_dat, size(g_dat),Atm(this_grid)%sending_proc, Atm(this_grid)%Bcast_ranks) + if(Atm(this_grid)%is_fine_pe .or. mpp_domain_is_tile_root_pe(Atm(this_grid)%parent_grid%domain)) then + call mpp_broadcast(g_dat, size(g_dat),Atm(this_grid)%Bcast_ranks(1), Atm(this_grid)%Bcast_ranks) endif - call mpp_set_current_pelist(global_pelist) call timing_off('COMM_TOTAL') if (process) then @@ -2475,7 +2470,7 @@ subroutine fill_nested_grid_cpl(this_grid, proc_in) 0, 0, isg, ieg, jsg, jeg, Atm(this_grid)%bd) endif deallocate(g_dat) - deallocate(global_pelist) + !deallocate(global_pelist) end subroutine fill_nested_grid_cpl diff --git a/model/fv_arrays.F90 b/model/fv_arrays.F90 index f11e00113..a017586fc 100644 --- a/model/fv_arrays.F90 +++ b/model/fv_arrays.F90 @@ -1306,6 +1306,7 @@ module fv_arrays_mod ! to replace numerous p2p MPI transfers with a single mpp_broadcast() integer, allocatable :: Bcast_ranks(:) integer :: Bcast_comm, sending_proc + logical :: is_fine_pe type(fv_grid_bounds_type) :: bd diff --git a/model/fv_control.F90 b/model/fv_control.F90 index 09686e236..b379b8435 100644 --- a/model/fv_control.F90 +++ b/model/fv_control.F90 @@ -144,7 +144,7 @@ module fv_control_mod use test_cases_mod, only: read_namelist_test_case_nml use fv_timing_mod, only: timing_on, timing_off, timing_init, timing_prt use mpp_domains_mod, only: domain2D - use mpp_domains_mod, only: mpp_define_nest_domains, nest_domain_type, mpp_get_global_domain + use mpp_domains_mod, only: mpp_define_nest_domains, nest_domain_type, mpp_get_global_domain, mpp_is_nest_fine use mpp_domains_mod, only: mpp_get_C2F_index, mpp_get_F2C_index use mpp_domains_mod, only: CENTER, CORNER, NORTH, EAST, WEST, SOUTH use mpp_mod, only: mpp_send, mpp_sync, mpp_transmit, mpp_set_current_pelist, & @@ -733,21 +733,22 @@ subroutine fv_control_init(Atm, dt_atmos, this_grid, grids_on_this_pe, p_split, Atm(this_grid)%neststruct%parent_proc = ANY(Atm(this_grid)%neststruct%child_grids) !ANY(tile_coarse == Atm(this_grid)%global_tile) Atm(this_grid)%neststruct%child_proc = ASSOCIATED(Atm(this_grid)%parent_grid) !this means a nested grid - call mpp_set_current_pelist( global_pelist ) - do n=1,ngrids + if(n>1) then + call mpp_set_current_pelist( global_pelist ) + do n=2,ngrids ! Construct the MPI communicators that are used in fill_nested_grid_cpl() - if(n>1) then Atm(n)%sending_proc = Atm(n)%parent_grid%pelist(1) + & ( Atm(n)%neststruct%parent_tile-tile_fine(Atm(n)%parent_grid%grid_number)+ & Atm(n)%parent_grid%flagstruct%ntiles-1 )*Atm(n)%parent_grid%npes_per_tile - allocate(Atm(n)%Bcast_ranks(0:size(Atm(n)%pelist))) + allocate(Atm(n)%Bcast_ranks(1+size(Atm(n)%pelist))) - Atm(n)%Bcast_ranks(0)=Atm(n)%sending_proc ! parent grid sending rank within the soon to be created Bcast_comm - Atm(n)%Bcast_ranks(1:size(Atm(n)%pelist))=Atm(n)%pelist ! Receivers - call mpp_declare_pelist(Atm(n)%Bcast_ranks) - endif - enddo - call mpp_set_current_pelist(Atm(this_grid)%pelist) + Atm(n)%Bcast_ranks(1)=Atm(n)%sending_proc ! parent grid sending rank within the soon to be created Bcast_comm + Atm(n)%Bcast_ranks(2:(1+size(Atm(n)%pelist)))=Atm(n)%pelist ! Receivers + call mpp_declare_pelist(Atm(n)%Bcast_ranks(:)) + Atm(n)%is_fine_pe = mpp_is_nest_fine(global_nest_domain, 1) + enddo + call mpp_set_current_pelist(Atm(this_grid)%pelist) + endif if (ngrids > 1) call setup_update_regions if (Atm(this_grid)%neststruct%nestbctype > 1) then