Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

cpp to MPI type mapping improvements #3495

Merged
merged 41 commits into from
Nov 29, 2024
Merged
Show file tree
Hide file tree
Changes from 30 commits
Commits
Show all changes
41 commits
Select commit Hold shift + click to select a range
55df724
Switch to gtype trait based mpi type dispatching
schnellerhase Nov 2, 2024
db6ba65
Add mpi type mapping for std::int64_t
schnellerhase Nov 2, 2024
b926fbf
Add mpi type mapping for std::int32_t
schnellerhase Nov 2, 2024
2ba98d8
Missing double cases
schnellerhase Nov 2, 2024
8d64f01
Add documentation
schnellerhase Nov 2, 2024
d492e11
Tidy and fix
schnellerhase Nov 2, 2024
4183a15
Some more
schnellerhase Nov 2, 2024
ec943ae
More fixes
schnellerhase Nov 2, 2024
0cbc972
Fix order
schnellerhase Nov 2, 2024
7094daa
Simplify
schnellerhase Nov 2, 2024
061caeb
Revert
schnellerhase Nov 2, 2024
a36535e
Add default NULL type
schnellerhase Nov 3, 2024
d56f991
Sanity check static assert
schnellerhase Nov 3, 2024
6a3e2d7
Fancy void_t fix?
schnellerhase Nov 3, 2024
b58e41a
Wrong position
schnellerhase Nov 3, 2024
831a3dc
Switch to non width types
schnellerhase Nov 3, 2024
07488b8
Revert type trait tickery and document odd behavior
schnellerhase Nov 3, 2024
da98370
Add char types
schnellerhase Nov 3, 2024
1cf84d3
Doc
schnellerhase Nov 3, 2024
e838edd
Doc for macros
schnellerhase Nov 3, 2024
d0888ab
Enabel preprocessing for doxygen
schnellerhase Nov 3, 2024
32e475c
Merge branch 'main' into mpi_types
schnellerhase Nov 4, 2024
1452cf7
Reactivate fixed with types
schnellerhase Nov 5, 2024
d3260e4
one more
schnellerhase Nov 5, 2024
5057409
Type size dependent overloading
schnellerhase Nov 6, 2024
858f934
Another
schnellerhase Nov 6, 2024
59b6de0
Try wordsize check
schnellerhase Nov 10, 2024
609e0ce
combine checks
schnellerhase Nov 10, 2024
1c1f6eb
Give up, make mpi type explicit for Kahip and remove general support …
schnellerhase Nov 10, 2024
aad8099
typo
schnellerhase Nov 10, 2024
15c61f4
Add KaHIP type comment
schnellerhase Nov 11, 2024
f3390d4
typos
schnellerhase Nov 11, 2024
133620c
Remove maps for char and bool
schnellerhase Nov 11, 2024
cc44381
Merge branch 'main' into mpi_types
schnellerhase Nov 11, 2024
b59c449
Merge branch 'main' into mpi_types
schnellerhase Nov 13, 2024
2096c33
Merge branch 'main' into mpi_types
schnellerhase Nov 16, 2024
c3fa9a9
Merge branch 'main' into mpi_types
schnellerhase Nov 19, 2024
370960a
Merge branch 'main' into mpi_types
schnellerhase Nov 23, 2024
4263bb4
Revert to non type trait usage, when not templated type
schnellerhase Nov 23, 2024
a193821
Merge branch 'main' into mpi_types
schnellerhase Nov 25, 2024
385463a
Merge branch 'main' into mpi_types
schnellerhase Nov 28, 2024
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions cpp/doc/Doxyfile
Original file line number Diff line number Diff line change
Expand Up @@ -2272,7 +2272,7 @@ PERLMOD_MAKEVAR_PREFIX =
# C-preprocessor directives found in the sources and include files.
# The default value is: YES.

ENABLE_PREPROCESSING = NO
ENABLE_PREPROCESSING = YES

# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
# in the source code. If set to NO, only conditional compilation will be
Expand All @@ -2289,7 +2289,7 @@ MACRO_EXPANSION = YES
# The default value is: NO.
# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.

EXPAND_ONLY_PREDEF = YES
EXPAND_ONLY_PREDEF = NO

# If the SEARCH_INCLUDES tag is set to YES, the include files in the
# INCLUDE_PATH will be searched if a #include is found.
Expand Down
111 changes: 62 additions & 49 deletions cpp/dolfinx/common/IndexMap.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -116,8 +116,10 @@ communicate_ghosts_to_owners(MPI_Comm comm, std::span<const int> src,
send_sizes.reserve(1);
recv_sizes.reserve(1);
MPI_Request sizes_request;
MPI_Ineighbor_alltoall(send_sizes.data(), 1, MPI_INT32_T, recv_sizes.data(),
1, MPI_INT32_T, comm0, &sizes_request);
MPI_Ineighbor_alltoall(send_sizes.data(), 1,
dolfinx::MPI::mpi_t<std::int32_t>, recv_sizes.data(),
1, dolfinx::MPI::mpi_t<std::int32_t>, comm0,
schnellerhase marked this conversation as resolved.
Show resolved Hide resolved
&sizes_request);

// Build send buffer and ghost position to send buffer position
for (auto& d : send_data)
Expand All @@ -136,10 +138,11 @@ communicate_ghosts_to_owners(MPI_Comm comm, std::span<const int> src,

// Send ghost indices to owner, and receive indices
recv_indices.resize(recv_disp.back());
ierr = MPI_Neighbor_alltoallv(send_indices.data(), send_sizes.data(),
send_disp.data(), MPI_INT64_T,
recv_indices.data(), recv_sizes.data(),
recv_disp.data(), MPI_INT64_T, comm0);
ierr = MPI_Neighbor_alltoallv(
send_indices.data(), send_sizes.data(), send_disp.data(),
dolfinx::MPI::mpi_t<std::int64_t>, recv_indices.data(),
recv_sizes.data(), recv_disp.data(), dolfinx::MPI::mpi_t<std::int64_t>,
comm0);
dolfinx::MPI::check_error(comm, ierr);

ierr = MPI_Comm_free(&comm0);
Expand Down Expand Up @@ -511,10 +514,10 @@ compute_submap_ghost_indices(std::span<const int> submap_src,
dolfinx::MPI::check_error(imap.comm(), ierr);

// Send indices to ghosting ranks
ierr = MPI_Neighbor_alltoallv(send_gidx.data(), recv_sizes.data(),
recv_disp.data(), MPI_INT64_T,
recv_gidx.data(), send_sizes.data(),
send_disp.data(), MPI_INT64_T, comm1);
ierr = MPI_Neighbor_alltoallv(
send_gidx.data(), recv_sizes.data(), recv_disp.data(),
dolfinx::MPI::mpi_t<std::int64_t>, recv_gidx.data(), send_sizes.data(),
send_disp.data(), dolfinx::MPI::mpi_t<std::int64_t>, comm1);
dolfinx::MPI::check_error(imap.comm(), ierr);

ierr = MPI_Comm_free(&comm1);
Expand Down Expand Up @@ -607,10 +610,10 @@ common::compute_owned_indices(std::span<const std::int32_t> indices,
// Send ghost indices to owner, and receive owned indices
std::vector<std::int64_t> recv_buffer(recv_disp.back());
std::vector<std::int64_t>& send_buffer = global_indices;
ierr = MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(),
send_disp.data(), MPI_INT64_T,
recv_buffer.data(), recv_sizes.data(),
recv_disp.data(), MPI_INT64_T, comm);
ierr = MPI_Neighbor_alltoallv(
send_buffer.data(), send_sizes.data(), send_disp.data(),
dolfinx::MPI::mpi_t<std::int64_t>, recv_buffer.data(), recv_sizes.data(),
recv_disp.data(), dolfinx::MPI::mpi_t<std::int64_t>, comm);
dolfinx::MPI::check_error(comm, ierr);
ierr = MPI_Comm_free(&comm);
dolfinx::MPI::check_error(map.comm(), ierr);
Expand Down Expand Up @@ -738,8 +741,9 @@ common::stack_index_maps(
std::vector<std::int32_t> recv_sizes(dest.size(), 0);
send_sizes.reserve(1);
recv_sizes.reserve(1);
ierr = MPI_Neighbor_alltoall(send_sizes.data(), 1, MPI_INT32_T,
recv_sizes.data(), 1, MPI_INT32_T, comm0);
ierr = MPI_Neighbor_alltoall(
send_sizes.data(), 1, dolfinx::MPI::mpi_t<std::int32_t>,
recv_sizes.data(), 1, dolfinx::MPI::mpi_t<std::int32_t>, comm0);
dolfinx::MPI::check_error(comm0, ierr);

// Prepare displacement vectors
Expand All @@ -752,10 +756,11 @@ common::stack_index_maps(

// Send ghost indices to owner, and receive indices
std::vector<std::int64_t> recv_indices(recv_disp.back());
ierr = MPI_Neighbor_alltoallv(send_indices.data(), send_sizes.data(),
send_disp.data(), MPI_INT64_T,
recv_indices.data(), recv_sizes.data(),
recv_disp.data(), MPI_INT64_T, comm0);
ierr = MPI_Neighbor_alltoallv(
send_indices.data(), send_sizes.data(), send_disp.data(),
dolfinx::MPI::mpi_t<std::int64_t>, recv_indices.data(),
recv_sizes.data(), recv_disp.data(), dolfinx::MPI::mpi_t<std::int64_t>,
comm0);
dolfinx::MPI::check_error(comm0, ierr);

// For each received index (which I should own), compute its new
Expand All @@ -773,10 +778,11 @@ common::stack_index_maps(

// Send back/receive new indices
std::vector<std::int64_t> ghosts_new_idx(send_disp.back());
ierr = MPI_Neighbor_alltoallv(ghost_old_to_new.data(), recv_sizes.data(),
recv_disp.data(), MPI_INT64_T,
ghosts_new_idx.data(), send_sizes.data(),
send_disp.data(), MPI_INT64_T, comm1);
ierr = MPI_Neighbor_alltoallv(
ghost_old_to_new.data(), recv_sizes.data(), recv_disp.data(),
dolfinx::MPI::mpi_t<std::int64_t>, ghosts_new_idx.data(),
send_sizes.data(), send_disp.data(), dolfinx::MPI::mpi_t<std::int64_t>,
comm1);
dolfinx::MPI::check_error(comm1, ierr);

// Unpack new indices and store owner
Expand Down Expand Up @@ -825,8 +831,9 @@ common::create_sub_index_map(const IndexMap& imap,
// Compute submap offset for this rank
std::int64_t submap_local_size = submap_owned.size();
std::int64_t submap_offset = 0;
int ierr = MPI_Exscan(&submap_local_size, &submap_offset, 1, MPI_INT64_T,
MPI_SUM, imap.comm());
int ierr
= MPI_Exscan(&submap_local_size, &submap_offset, 1,
dolfinx::MPI::mpi_t<std::int64_t>, MPI_SUM, imap.comm());
dolfinx::MPI::check_error(imap.comm(), ierr);

// Compute the global indices (w.r.t. the submap) of the submap ghosts
Expand Down Expand Up @@ -858,14 +865,16 @@ IndexMap::IndexMap(MPI_Comm comm, std::int32_t local_size) : _comm(comm, true)
std::int64_t offset = 0;
const std::int64_t local_size_tmp = local_size;
MPI_Request request_scan;
int ierr = MPI_Iexscan(&local_size_tmp, &offset, 1, MPI_INT64_T, MPI_SUM,
int ierr = MPI_Iexscan(&local_size_tmp, &offset, 1,
dolfinx::MPI::mpi_t<std::int64_t>, MPI_SUM,
_comm.comm(), &request_scan);
dolfinx::MPI::check_error(_comm.comm(), ierr);

// Send local size to sum reduction to get global size
MPI_Request request;
ierr = MPI_Iallreduce(&local_size_tmp, &_size_global, 1, MPI_INT64_T, MPI_SUM,
comm, &request);
ierr = MPI_Iallreduce(&local_size_tmp, &_size_global, 1,
dolfinx::MPI::mpi_t<std::int64_t>, MPI_SUM, comm,
&request);
dolfinx::MPI::check_error(_comm.comm(), ierr);

ierr = MPI_Wait(&request_scan, MPI_STATUS_IGNORE);
Expand Down Expand Up @@ -901,14 +910,16 @@ IndexMap::IndexMap(MPI_Comm comm, std::int32_t local_size,
std::int64_t offset = 0;
const std::int64_t local_size_tmp = local_size;
MPI_Request request_scan;
int ierr = MPI_Iexscan(&local_size_tmp, &offset, 1, MPI_INT64_T, MPI_SUM,
comm, &request_scan);
int ierr = MPI_Iexscan(&local_size_tmp, &offset, 1,
dolfinx::MPI::mpi_t<std::int64_t>, MPI_SUM, comm,
&request_scan);
dolfinx::MPI::check_error(_comm.comm(), ierr);

// Send local size to sum reduction to get global size
MPI_Request request;
ierr = MPI_Iallreduce(&local_size_tmp, &_size_global, 1, MPI_INT64_T, MPI_SUM,
comm, &request);
ierr = MPI_Iallreduce(&local_size_tmp, &_size_global, 1,
dolfinx::MPI::mpi_t<std::int64_t>, MPI_SUM, comm,
&request);
dolfinx::MPI::check_error(_comm.comm(), ierr);

// Wait for MPI_Iexscan to complete (get offset)
Expand Down Expand Up @@ -1070,10 +1081,11 @@ graph::AdjacencyList<int> IndexMap::index_to_dest_ranks() const

// Send ghost indices to owner, and receive owned indices
std::vector<std::int64_t> recv_buffer(recv_disp.back());
ierr = MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(),
send_disp.data(), MPI_INT64_T,
recv_buffer.data(), recv_sizes.data(),
recv_disp.data(), MPI_INT64_T, comm0);
ierr = MPI_Neighbor_alltoallv(
send_buffer.data(), send_sizes.data(), send_disp.data(),
dolfinx::MPI::mpi_t<std::int64_t>, recv_buffer.data(),
recv_sizes.data(), recv_disp.data(), dolfinx::MPI::mpi_t<std::int64_t>,
comm0);
dolfinx::MPI::check_error(_comm.comm(), ierr);
ierr = MPI_Comm_free(&comm0);
dolfinx::MPI::check_error(_comm.comm(), ierr);
Expand Down Expand Up @@ -1169,10 +1181,11 @@ graph::AdjacencyList<int> IndexMap::index_to_dest_ranks() const
std::next(recv_disp.begin()));

std::vector<std::int64_t> recv_indices(recv_disp.back());
ierr = MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(),
send_disp.data(), MPI_INT64_T,
recv_indices.data(), recv_sizes.data(),
recv_disp.data(), MPI_INT64_T, comm);
ierr = MPI_Neighbor_alltoallv(
send_buffer.data(), send_sizes.data(), send_disp.data(),
dolfinx::MPI::mpi_t<std::int64_t>, recv_indices.data(),
recv_sizes.data(), recv_disp.data(),
dolfinx::MPI::mpi_t<std::int64_t>, comm);
dolfinx::MPI::check_error(_comm.comm(), ierr);
ierr = MPI_Comm_free(&comm);
dolfinx::MPI::check_error(_comm.comm(), ierr);
Expand Down Expand Up @@ -1267,10 +1280,10 @@ std::vector<std::int32_t> IndexMap::shared_indices() const

// Send ghost indices to owner, and receive owned indices
std::vector<std::int64_t> recv_buffer(recv_disp.back());
ierr = MPI_Neighbor_alltoallv(send_buffer.data(), send_sizes.data(),
send_disp.data(), MPI_INT64_T,
recv_buffer.data(), recv_sizes.data(),
recv_disp.data(), MPI_INT64_T, comm);
ierr = MPI_Neighbor_alltoallv(
send_buffer.data(), send_sizes.data(), send_disp.data(),
dolfinx::MPI::mpi_t<std::int64_t>, recv_buffer.data(), recv_sizes.data(),
recv_disp.data(), dolfinx::MPI::mpi_t<std::int64_t>, comm);
dolfinx::MPI::check_error(_comm.comm(), ierr);

ierr = MPI_Comm_free(&comm);
Expand Down Expand Up @@ -1308,12 +1321,12 @@ std::array<double, 2> IndexMap::imbalance() const

// Find the maximum number of owned indices and the maximum number of ghost
// indices across all processes.
MPI_Allreduce(local_sizes.data(), max_count.data(), 2, MPI_INT32_T, MPI_MAX,
_comm.comm());
MPI_Allreduce(local_sizes.data(), max_count.data(), 2,
dolfinx::MPI::mpi_t<std::int32_t>, MPI_MAX, _comm.comm());

std::int32_t total_num_ghosts = 0;
MPI_Allreduce(&local_sizes[1], &total_num_ghosts, 1, MPI_INT32_T, MPI_SUM,
_comm.comm());
MPI_Allreduce(&local_sizes[1], &total_num_ghosts, 1,
dolfinx::MPI::mpi_t<std::int32_t>, MPI_SUM, _comm.comm());

// Compute the average number of owned and ghost indices per process.
int comm_size = dolfinx::MPI::size(_comm.comm());
Expand Down
92 changes: 50 additions & 42 deletions cpp/dolfinx/common/MPI.h
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
// Copyright (C) 2007-2023 Magnus Vikstrøm and Garth N. Wells
// Copyright (C) 2007-2023 Magnus Vikstrøm, Garth N. Wells and Paul T. Kühner
//
// This file is part of DOLFINx (https://www.fenicsproject.org)
//
Expand Down Expand Up @@ -269,39 +269,44 @@ struct dependent_false : std::false_type
};

/// MPI Type

/// @brief Type trait for MPI type conversions.
template <typename T>
constexpr MPI_Datatype mpi_type()
{
if constexpr (std::is_same_v<T, float>)
return MPI_FLOAT;
else if constexpr (std::is_same_v<T, double>)
return MPI_DOUBLE;
else if constexpr (std::is_same_v<T, std::complex<double>>)
return MPI_C_DOUBLE_COMPLEX;
else if constexpr (std::is_same_v<T, std::complex<float>>)
return MPI_C_FLOAT_COMPLEX;
else if constexpr (std::is_same_v<T, short int>)
return MPI_SHORT;
else if constexpr (std::is_same_v<T, int>)
return MPI_INT;
else if constexpr (std::is_same_v<T, unsigned int>)
return MPI_UNSIGNED;
else if constexpr (std::is_same_v<T, long int>)
return MPI_LONG;
else if constexpr (std::is_same_v<T, unsigned long>)
return MPI_UNSIGNED_LONG;
else if constexpr (std::is_same_v<T, long long>)
return MPI_LONG_LONG;
else if constexpr (std::is_same_v<T, unsigned long long>)
return MPI_UNSIGNED_LONG_LONG;
else if constexpr (std::is_same_v<T, bool>)
return MPI_C_BOOL;
else if constexpr (std::is_same_v<T, std::int8_t>)
return MPI_INT8_T;
else
// Issue compile time error
static_assert(!std::is_same_v<T, T>);
}
struct mpi_type_mapping;

/// @brief Retrieves the MPI data type associated to the provided type.
/// @tparam T cpp type to map
template <typename T>
MPI_Datatype mpi_t = mpi_type_mapping<T>::type;

/// @brief Registers for cpp_t the correpsonding mpi_t which can then be
/// retrieved with mpi_t<cpp_t> form here on.
schnellerhase marked this conversation as resolved.
Show resolved Hide resolved
#define MAP_TO_MPI_TYPE(cpp_t, mpi_t) \
schnellerhase marked this conversation as resolved.
Show resolved Hide resolved
template <> \
struct mpi_type_mapping<cpp_t> \
{ \
static inline MPI_Datatype type = mpi_t; \
};

/// @defgroup MPI type mappings
/// @{
/// @cond
MAP_TO_MPI_TYPE(char, MPI_CHAR)
MAP_TO_MPI_TYPE(bool, MPI_C_BOOL)
schnellerhase marked this conversation as resolved.
Show resolved Hide resolved
MAP_TO_MPI_TYPE(float, MPI_FLOAT)
schnellerhase marked this conversation as resolved.
Show resolved Hide resolved
MAP_TO_MPI_TYPE(double, MPI_DOUBLE)
MAP_TO_MPI_TYPE(std::complex<float>, MPI_C_FLOAT_COMPLEX)
MAP_TO_MPI_TYPE(std::complex<double>, MPI_C_DOUBLE_COMPLEX)
MAP_TO_MPI_TYPE(std::int8_t, MPI_INT8_T)
MAP_TO_MPI_TYPE(std::int16_t, MPI_INT16_T)
MAP_TO_MPI_TYPE(std::int32_t, MPI_INT32_T)
MAP_TO_MPI_TYPE(std::int64_t, MPI_INT64_T)
MAP_TO_MPI_TYPE(std::uint8_t, MPI_UINT8_T)
MAP_TO_MPI_TYPE(std::uint16_t, MPI_UINT16_T)
MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T)
MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T)
/// @endcond
/// @}

//---------------------------------------------------------------------------
template <typename U>
Expand Down Expand Up @@ -426,13 +431,14 @@ distribute_to_postoffice(MPI_Comm comm, const U& x,
std::vector<std::int64_t> recv_buffer_index(recv_disp.back());
err = MPI_Neighbor_alltoallv(
send_buffer_index.data(), num_items_per_dest.data(), send_disp.data(),
MPI_INT64_T, recv_buffer_index.data(), num_items_recv.data(),
recv_disp.data(), MPI_INT64_T, neigh_comm);
dolfinx::MPI::mpi_t<std::int64_t>, recv_buffer_index.data(),
num_items_recv.data(), recv_disp.data(),
dolfinx::MPI::mpi_t<std::int64_t>, neigh_comm);
dolfinx::MPI::check_error(comm, err);

// Send/receive data (x)
MPI_Datatype compound_type;
MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_type<T>(), &compound_type);
MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_t<T>, &compound_type);
MPI_Type_commit(&compound_type);
std::vector<T> recv_buffer_data(shape[1] * recv_disp.back());
err = MPI_Neighbor_alltoallv(
Expand Down Expand Up @@ -556,8 +562,9 @@ distribute_from_postoffice(MPI_Comm comm, std::span<const std::int64_t> indices,
std::vector<std::int64_t> recv_buffer_index(recv_disp.back());
err = MPI_Neighbor_alltoallv(
send_buffer_index.data(), num_items_per_src.data(), send_disp.data(),
MPI_INT64_T, recv_buffer_index.data(), num_items_recv.data(),
recv_disp.data(), MPI_INT64_T, neigh_comm0);
dolfinx::MPI::mpi_t<std::int64_t>, recv_buffer_index.data(),
num_items_recv.data(), recv_disp.data(),
dolfinx::MPI::mpi_t<std::int64_t>, neigh_comm0);
dolfinx::MPI::check_error(comm, err);

err = MPI_Comm_free(&neigh_comm0);
Expand Down Expand Up @@ -614,7 +621,7 @@ distribute_from_postoffice(MPI_Comm comm, std::span<const std::int64_t> indices,
dolfinx::MPI::check_error(comm, err);

MPI_Datatype compound_type0;
MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_type<T>(), &compound_type0);
MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_t<T>, &compound_type0);
MPI_Type_commit(&compound_type0);

std::vector<T> recv_buffer_data(shape[1] * send_disp.back());
Expand Down Expand Up @@ -682,15 +689,16 @@ distribute_data(MPI_Comm comm0, std::span<const std::int64_t> indices,

int err;
std::int64_t shape0 = 0;
err = MPI_Allreduce(&shape0_local, &shape0, 1, MPI_INT64_T, MPI_SUM, comm0);
err = MPI_Allreduce(&shape0_local, &shape0, 1,
dolfinx::MPI::mpi_t<std::int64_t>, MPI_SUM, comm0);
dolfinx::MPI::check_error(comm0, err);

std::int64_t rank_offset = -1;
if (comm1 != MPI_COMM_NULL)
{
rank_offset = 0;
err = MPI_Exscan(&shape0_local, &rank_offset, 1, MPI_INT64_T, MPI_SUM,
comm1);
err = MPI_Exscan(&shape0_local, &rank_offset, 1,
dolfinx::MPI::mpi_t<std::int64_t>, MPI_SUM, comm1);
dolfinx::MPI::check_error(comm1, err);
}
else
Expand Down
Loading
Loading