diff --git a/cpp/doc/Doxyfile b/cpp/doc/Doxyfile index f94948804e7..ed98958c440 100644 --- a/cpp/doc/Doxyfile +++ b/cpp/doc/Doxyfile @@ -2268,7 +2268,7 @@ PERLMOD_MAKEVAR_PREFIX = # C-preprocessor directives found in the sources and include files. # The default value is: YES. -ENABLE_PREPROCESSING = NO +ENABLE_PREPROCESSING = YES # If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names # in the source code. If set to NO, only conditional compilation will be @@ -2285,7 +2285,7 @@ MACRO_EXPANSION = YES # The default value is: NO. # This tag requires that the tag ENABLE_PREPROCESSING is set to YES. -EXPAND_ONLY_PREDEF = YES +EXPAND_ONLY_PREDEF = NO # If the SEARCH_INCLUDES tag is set to YES, the include files in the # INCLUDE_PATH will be searched if a #include is found. diff --git a/cpp/dolfinx/common/CMakeLists.txt b/cpp/dolfinx/common/CMakeLists.txt index 5f46222cee7..3a5c77044e0 100644 --- a/cpp/dolfinx/common/CMakeLists.txt +++ b/cpp/dolfinx/common/CMakeLists.txt @@ -12,7 +12,6 @@ set(HEADERS_common ${CMAKE_CURRENT_SOURCE_DIR}/Table.h ${CMAKE_CURRENT_SOURCE_DIR}/Timer.h ${CMAKE_CURRENT_SOURCE_DIR}/TimeLogger.h - ${CMAKE_CURRENT_SOURCE_DIR}/TimeLogManager.h ${CMAKE_CURRENT_SOURCE_DIR}/timing.h ${CMAKE_CURRENT_SOURCE_DIR}/utils.h PARENT_SCOPE @@ -26,6 +25,5 @@ target_sources( ${CMAKE_CURRENT_SOURCE_DIR}/MPI.cpp ${CMAKE_CURRENT_SOURCE_DIR}/Table.cpp ${CMAKE_CURRENT_SOURCE_DIR}/TimeLogger.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/TimeLogManager.cpp ${CMAKE_CURRENT_SOURCE_DIR}/timing.cpp ) diff --git a/cpp/dolfinx/common/MPI.h b/cpp/dolfinx/common/MPI.h index ab04d93cc64..7cfc77e8866 100644 --- a/cpp/dolfinx/common/MPI.h +++ b/cpp/dolfinx/common/MPI.h @@ -1,4 +1,4 @@ -// Copyright (C) 2007-2023 Magnus Vikstrøm and Garth N. Wells +// Copyright (C) 2007-2023 Magnus Vikstrøm, Garth N. Wells and Paul T. Kühner // // This file is part of DOLFINx (https://www.fenicsproject.org) // @@ -271,39 +271,42 @@ struct dependent_false : std::false_type }; /// MPI Type + +/// @brief Type trait for MPI type conversions. template -constexpr MPI_Datatype mpi_type() -{ - if constexpr (std::is_same_v) - return MPI_FLOAT; - else if constexpr (std::is_same_v) - return MPI_DOUBLE; - else if constexpr (std::is_same_v>) - return MPI_C_DOUBLE_COMPLEX; - else if constexpr (std::is_same_v>) - return MPI_C_FLOAT_COMPLEX; - else if constexpr (std::is_same_v) - return MPI_SHORT; - else if constexpr (std::is_same_v) - return MPI_INT; - else if constexpr (std::is_same_v) - return MPI_UNSIGNED; - else if constexpr (std::is_same_v) - return MPI_LONG; - else if constexpr (std::is_same_v) - return MPI_UNSIGNED_LONG; - else if constexpr (std::is_same_v) - return MPI_LONG_LONG; - else if constexpr (std::is_same_v) - return MPI_UNSIGNED_LONG_LONG; - else if constexpr (std::is_same_v) - return MPI_C_BOOL; - else if constexpr (std::is_same_v) - return MPI_INT8_T; - else - // Issue compile time error - static_assert(!std::is_same_v); -} +struct mpi_type_mapping; + +/// @brief Retrieves the MPI data type associated to the provided type. +/// @tparam T cpp type to map +template +MPI_Datatype mpi_t = mpi_type_mapping::type; + +/// @brief Registers for cpp_t the corresponding mpi_t which can then be +/// retrieved with mpi_t from here on. +#define MAP_TO_MPI_TYPE(cpp_t, mpi_t) \ + template <> \ + struct mpi_type_mapping \ + { \ + static inline MPI_Datatype type = mpi_t; \ + }; + +/// @defgroup MPI type mappings +/// @{ +/// @cond +MAP_TO_MPI_TYPE(float, MPI_FLOAT) +MAP_TO_MPI_TYPE(double, MPI_DOUBLE) +MAP_TO_MPI_TYPE(std::complex, MPI_C_FLOAT_COMPLEX) +MAP_TO_MPI_TYPE(std::complex, MPI_C_DOUBLE_COMPLEX) +MAP_TO_MPI_TYPE(std::int8_t, MPI_INT8_T) +MAP_TO_MPI_TYPE(std::int16_t, MPI_INT16_T) +MAP_TO_MPI_TYPE(std::int32_t, MPI_INT32_T) +MAP_TO_MPI_TYPE(std::int64_t, MPI_INT64_T) +MAP_TO_MPI_TYPE(std::uint8_t, MPI_UINT8_T) +MAP_TO_MPI_TYPE(std::uint16_t, MPI_UINT16_T) +MAP_TO_MPI_TYPE(std::uint32_t, MPI_UINT32_T) +MAP_TO_MPI_TYPE(std::uint64_t, MPI_UINT64_T) +/// @endcond +/// @} //--------------------------------------------------------------------------- template @@ -434,7 +437,7 @@ distribute_to_postoffice(MPI_Comm comm, const U& x, // Send/receive data (x) MPI_Datatype compound_type; - MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_type(), &compound_type); + MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_t, &compound_type); MPI_Type_commit(&compound_type); std::vector recv_buffer_data(shape[1] * recv_disp.back()); err = MPI_Neighbor_alltoallv( @@ -616,7 +619,7 @@ distribute_from_postoffice(MPI_Comm comm, std::span indices, dolfinx::MPI::check_error(comm, err); MPI_Datatype compound_type0; - MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_type(), &compound_type0); + MPI_Type_contiguous(shape[1], dolfinx::MPI::mpi_t, &compound_type0); MPI_Type_commit(&compound_type0); std::vector recv_buffer_data(shape[1] * send_disp.back()); @@ -691,8 +694,8 @@ distribute_data(MPI_Comm comm0, std::span indices, if (comm1 != MPI_COMM_NULL) { rank_offset = 0; - err = MPI_Exscan(&shape0_local, &rank_offset, 1, MPI_INT64_T, MPI_SUM, - comm1); + err = MPI_Exscan(&shape0_local, &rank_offset, 1, + dolfinx::MPI::mpi_t, MPI_SUM, comm1); dolfinx::MPI::check_error(comm1, err); } else diff --git a/cpp/dolfinx/common/Scatterer.h b/cpp/dolfinx/common/Scatterer.h index b38793dcd2c..72afe81ad11 100644 --- a/cpp/dolfinx/common/Scatterer.h +++ b/cpp/dolfinx/common/Scatterer.h @@ -145,8 +145,7 @@ class Scatterer // Scale sizes and displacements by block size { - auto rescale = [](auto& x, int bs) - { + auto rescale = [](auto& x, int bs) { std::ranges::transform(x, x.begin(), [bs](auto e) { return e *= bs; }); }; rescale(_sizes_local, bs); @@ -207,11 +206,11 @@ class Scatterer case type::neighbor: { assert(requests.size() == std::size_t(1)); - MPI_Ineighbor_alltoallv( - send_buffer.data(), _sizes_local.data(), _displs_local.data(), - dolfinx::MPI::mpi_type(), recv_buffer.data(), _sizes_remote.data(), - _displs_remote.data(), dolfinx::MPI::mpi_type(), _comm0.comm(), - requests.data()); + MPI_Ineighbor_alltoallv(send_buffer.data(), _sizes_local.data(), + _displs_local.data(), dolfinx::MPI::mpi_t, + recv_buffer.data(), _sizes_remote.data(), + _displs_remote.data(), dolfinx::MPI::mpi_t, + _comm0.comm(), requests.data()); break; } case type::p2p: @@ -220,14 +219,14 @@ class Scatterer for (std::size_t i = 0; i < _src.size(); i++) { MPI_Irecv(recv_buffer.data() + _displs_remote[i], _sizes_remote[i], - dolfinx::MPI::mpi_type(), _src[i], MPI_ANY_TAG, - _comm0.comm(), &requests[i]); + dolfinx::MPI::mpi_t, _src[i], MPI_ANY_TAG, _comm0.comm(), + &requests[i]); } for (std::size_t i = 0; i < _dest.size(); i++) { MPI_Isend(send_buffer.data() + _displs_local[i], _sizes_local[i], - dolfinx::MPI::mpi_type(), _dest[i], 0, _comm0.comm(), + dolfinx::MPI::mpi_t, _dest[i], 0, _comm0.comm(), &requests[i + _src.size()]); } break; @@ -404,11 +403,10 @@ class Scatterer case type::neighbor: { assert(requests.size() == 1); - MPI_Ineighbor_alltoallv(send_buffer.data(), _sizes_remote.data(), - _displs_remote.data(), MPI::mpi_type(), - recv_buffer.data(), _sizes_local.data(), - _displs_local.data(), MPI::mpi_type(), - _comm1.comm(), &requests[0]); + MPI_Ineighbor_alltoallv( + send_buffer.data(), _sizes_remote.data(), _displs_remote.data(), + MPI::mpi_t, recv_buffer.data(), _sizes_local.data(), + _displs_local.data(), MPI::mpi_t, _comm1.comm(), &requests[0]); break; } case type::p2p: @@ -418,8 +416,8 @@ class Scatterer for (std::size_t i = 0; i < _dest.size(); i++) { MPI_Irecv(recv_buffer.data() + _displs_local[i], _sizes_local[i], - dolfinx::MPI::mpi_type(), _dest[i], MPI_ANY_TAG, - _comm0.comm(), &requests[i]); + dolfinx::MPI::mpi_t, _dest[i], MPI_ANY_TAG, _comm0.comm(), + &requests[i]); } // Start non-blocking receive from neighbor process for which an owned @@ -427,7 +425,7 @@ class Scatterer for (std::size_t i = 0; i < _src.size(); i++) { MPI_Isend(send_buffer.data() + _displs_remote[i], _sizes_remote[i], - dolfinx::MPI::mpi_type(), _src[i], 0, _comm0.comm(), + dolfinx::MPI::mpi_t, _src[i], 0, _comm0.comm(), &requests[i + _dest.size()]); } break; diff --git a/cpp/dolfinx/common/Table.cpp b/cpp/dolfinx/common/Table.cpp index 814cc43d14e..241cebd8532 100644 --- a/cpp/dolfinx/common/Table.cpp +++ b/cpp/dolfinx/common/Table.cpp @@ -144,8 +144,9 @@ Table Table::reduce(MPI_Comm comm, Table::Reduction reduction) const std::partial_sum(pcounts.begin(), pcounts.end(), offsets.begin() + 1); std::vector values_all(offsets.back()); - err = MPI_Gatherv(values.data(), values.size(), MPI_DOUBLE, values_all.data(), - pcounts.data(), offsets.data(), MPI_DOUBLE, 0, comm); + err = MPI_Gatherv(values.data(), values.size(), dolfinx::MPI::mpi_t, + values_all.data(), pcounts.data(), offsets.data(), + dolfinx::MPI::mpi_t, 0, comm); dolfinx::MPI::check_error(comm, err); // Return empty table on rank > 0 diff --git a/cpp/dolfinx/common/TimeLogManager.cpp b/cpp/dolfinx/common/TimeLogManager.cpp deleted file mode 100644 index 94474d5d191..00000000000 --- a/cpp/dolfinx/common/TimeLogManager.cpp +++ /dev/null @@ -1,18 +0,0 @@ -// Copyright (C) 2003-2005 Anders Logg -// -// This file is part of DOLFINx (https://www.fenicsproject.org) -// -// SPDX-License-Identifier: LGPL-3.0-or-later - -#include "TimeLogManager.h" -#include "TimeLogger.h" - -// Initialise static data to avoid "static initialisation order fiasco". -// See also Meyers' singleton. - -dolfinx::common::TimeLogger& dolfinx::common::TimeLogManager::logger() -{ - // NB static - this only allocates a new Logger on the first call to logger() - static TimeLogger lg{}; - return lg; -} diff --git a/cpp/dolfinx/common/TimeLogManager.h b/cpp/dolfinx/common/TimeLogManager.h deleted file mode 100644 index 05d048f96fb..00000000000 --- a/cpp/dolfinx/common/TimeLogManager.h +++ /dev/null @@ -1,23 +0,0 @@ -// Copyright (C) 2003-2016 Anders Logg -// -// This file is part of DOLFINx (https://www.fenicsproject.org) -// -// SPDX-License-Identifier: LGPL-3.0-or-later - -#pragma once - -#include "TimeLogger.h" - -namespace dolfinx::common -{ - -class TimeLogger; - -/// Logger initialisation -class TimeLogManager -{ -public: - /// Singleton instance of logger - static TimeLogger& logger(); -}; -} // namespace dolfinx::common diff --git a/cpp/dolfinx/common/TimeLogger.cpp b/cpp/dolfinx/common/TimeLogger.cpp index bbe3a43229f..eb356f9b3a1 100644 --- a/cpp/dolfinx/common/TimeLogger.cpp +++ b/cpp/dolfinx/common/TimeLogger.cpp @@ -12,6 +12,13 @@ using namespace dolfinx; using namespace dolfinx::common; +//----------------------------------------------------------------------------- +TimeLogger& TimeLogger::instance() +{ + static TimeLogger _instance{}; + return _instance; +} + //----------------------------------------------------------------------------- void TimeLogger::register_timing( std::string task, std::chrono::duration> time) diff --git a/cpp/dolfinx/common/TimeLogger.h b/cpp/dolfinx/common/TimeLogger.h index e1b852da375..cad8de5f2ad 100644 --- a/cpp/dolfinx/common/TimeLogger.h +++ b/cpp/dolfinx/common/TimeLogger.h @@ -16,21 +16,16 @@ namespace dolfinx::common { -/// Timer logging +/// @brief Time logger maintaining data collected by Timer, if registered. +/// +/// @note This is a monotstate, i.e. the data members are static and thus +/// timings are aggregated into a single map. class TimeLogger { public: - /// Constructor - TimeLogger() = default; - - // This class is used as a singleton and thus should not allow copies. - TimeLogger(const TimeLogger&) = delete; - - // This class is used as a singleton and thus should not allow copies. - TimeLogger& operator=(const TimeLogger&) = delete; - - /// Destructor - ~TimeLogger() = default; + /// @brief Singleton access. + /// @return Unique time logger object. + static TimeLogger& instance(); /// Register timing (for later summary) void register_timing(std::string task, @@ -58,6 +53,18 @@ class TimeLogger timings() const; private: + /// Constructor + TimeLogger() = default; + + // This class is used as a singleton and thus should not allow copies. + TimeLogger(const TimeLogger&) = delete; + + // This class is used as a singleton and thus should not allow copies. + TimeLogger& operator=(const TimeLogger&) = delete; + + /// Destructor + ~TimeLogger() = default; + // List of timings for tasks, map from string to (num_timings, // total_wall_time) std::map #include #include #include +#include "TimeLogger.h" + namespace dolfinx::common { /// @brief Timer for measuring and logging elapsed time durations. @@ -56,7 +57,7 @@ class Timer if (_start_time.has_value() and _task.has_value()) { _acc += T::now() - *_start_time; - TimeLogManager::logger().register_timing(*_task, _acc); + TimeLogger::instance().register_timing(*_task, _acc); } } @@ -121,7 +122,7 @@ class Timer if (_task.has_value()) { - TimeLogManager::logger().register_timing(*_task, _acc); + TimeLogger::instance().register_timing(*_task, _acc); _task = std::nullopt; } } diff --git a/cpp/dolfinx/common/timing.cpp b/cpp/dolfinx/common/timing.cpp index cd965e0c342..f48ac44c030 100644 --- a/cpp/dolfinx/common/timing.cpp +++ b/cpp/dolfinx/common/timing.cpp @@ -6,31 +6,30 @@ #include "timing.h" #include "Table.h" -#include "TimeLogManager.h" #include "TimeLogger.h" #include "Timer.h" //----------------------------------------------------------------------- dolfinx::Table dolfinx::timing_table() { - return dolfinx::common::TimeLogManager::logger().timing_table(); + return dolfinx::common::TimeLogger::instance().timing_table(); } //----------------------------------------------------------------------------- void dolfinx::list_timings(MPI_Comm comm, Table::Reduction reduction) { - dolfinx::common::TimeLogManager::logger().list_timings(comm, reduction); + dolfinx::common::TimeLogger::instance().list_timings(comm, reduction); } //----------------------------------------------------------------------------- std::pair>> dolfinx::timing(std::string task) { - return dolfinx::common::TimeLogManager::logger().timing(task); + return dolfinx::common::TimeLogger::instance().timing(task); } //----------------------------------------------------------------------------- std::map>>> dolfinx::timings() { - return dolfinx::common::TimeLogManager::logger().timings(); + return dolfinx::common::TimeLogger::instance().timings(); } //----------------------------------------------------------------------------- diff --git a/cpp/dolfinx/common/utils.h b/cpp/dolfinx/common/utils.h index 945e5ad44d7..bbe7ec08e6d 100644 --- a/cpp/dolfinx/common/utils.h +++ b/cpp/dolfinx/common/utils.h @@ -88,9 +88,9 @@ std::size_t hash_global(MPI_Comm comm, const T& x) // Gather hash keys on root process std::vector all_hashes(dolfinx::MPI::size(comm)); - int err = MPI_Gather(&local_hash, 1, dolfinx::MPI::mpi_type(), - all_hashes.data(), 1, - dolfinx::MPI::mpi_type(), 0, comm); + int err = MPI_Gather(&local_hash, 1, dolfinx::MPI::mpi_t, + all_hashes.data(), 1, dolfinx::MPI::mpi_t, + 0, comm); dolfinx::MPI::check_error(comm, err); // Hash the received hash keys @@ -98,8 +98,7 @@ std::size_t hash_global(MPI_Comm comm, const T& x) std::size_t global_hash = hash(all_hashes); // Broadcast hash key to all processes - err = MPI_Bcast(&global_hash, 1, dolfinx::MPI::mpi_type(), 0, - comm); + err = MPI_Bcast(&global_hash, 1, dolfinx::MPI::mpi_t, 0, comm); dolfinx::MPI::check_error(comm, err); return global_hash; diff --git a/cpp/dolfinx/fem/interpolate.h b/cpp/dolfinx/fem/interpolate.h index d2a098d8c8c..b44596e3abb 100644 --- a/cpp/dolfinx/fem/interpolate.h +++ b/cpp/dolfinx/fem/interpolate.h @@ -274,9 +274,9 @@ void scatter_values(MPI_Comm comm, std::span src_ranks, std::vector values(recv_offsets.back()); values.reserve(1); MPI_Neighbor_alltoallv(send_values.data_handle(), send_sizes.data(), - send_offsets.data(), dolfinx::MPI::mpi_type(), + send_offsets.data(), dolfinx::MPI::mpi_t, values.data(), recv_sizes.data(), recv_offsets.data(), - dolfinx::MPI::mpi_type(), reverse_comm); + dolfinx::MPI::mpi_t, reverse_comm); MPI_Comm_free(&reverse_comm); // Insert values received from neighborhood communicator in output diff --git a/cpp/dolfinx/geometry/BoundingBoxTree.h b/cpp/dolfinx/geometry/BoundingBoxTree.h index 64ede45a057..d9b91c4573b 100644 --- a/cpp/dolfinx/geometry/BoundingBoxTree.h +++ b/cpp/dolfinx/geometry/BoundingBoxTree.h @@ -335,8 +335,8 @@ class BoundingBoxTree if (num_bboxes() > 0) std::copy_n(std::prev(_bbox_coordinates.end(), 6), 6, send_bbox.begin()); std::vector recv_bbox(mpi_size * 6); - MPI_Allgather(send_bbox.data(), 6, dolfinx::MPI::mpi_type(), - recv_bbox.data(), 6, dolfinx::MPI::mpi_type(), comm); + MPI_Allgather(send_bbox.data(), 6, dolfinx::MPI::mpi_t, recv_bbox.data(), + 6, dolfinx::MPI::mpi_t, comm); std::vector, std::int32_t>> _recv_bbox(mpi_size); for (std::size_t i = 0; i < _recv_bbox.size(); ++i) diff --git a/cpp/dolfinx/geometry/utils.h b/cpp/dolfinx/geometry/utils.h index 0f643e62643..4e287fcb4cb 100644 --- a/cpp/dolfinx/geometry/utils.h +++ b/cpp/dolfinx/geometry/utils.h @@ -771,8 +771,8 @@ PointOwnershipData determine_point_ownership(const mesh::Mesh& mesh, std::vector received_points((std::size_t)recv_offsets.back()); MPI_Neighbor_alltoallv( send_data.data(), send_sizes.data(), send_offsets.data(), - dolfinx::MPI::mpi_type(), received_points.data(), recv_sizes.data(), - recv_offsets.data(), dolfinx::MPI::mpi_type(), forward_comm); + dolfinx::MPI::mpi_t, received_points.data(), recv_sizes.data(), + recv_offsets.data(), dolfinx::MPI::mpi_t, forward_comm); // Get mesh geometry for closest entity const mesh::Geometry& geometry = mesh.geometry(); @@ -905,8 +905,8 @@ PointOwnershipData determine_point_ownership(const mesh::Mesh& mesh, std::vector recv_distances(recv_offsets.back()); MPI_Neighbor_alltoallv( squared_distances.data(), send_sizes.data(), send_offsets.data(), - dolfinx::MPI::mpi_type(), recv_distances.data(), recv_sizes.data(), - recv_offsets.data(), dolfinx::MPI::mpi_type(), reverse_comm); + dolfinx::MPI::mpi_t, recv_distances.data(), recv_sizes.data(), + recv_offsets.data(), dolfinx::MPI::mpi_t, reverse_comm); // Update point ownership with extrapolation information std::vector closest_distance(point_owners.size(), diff --git a/cpp/dolfinx/graph/partitioners.cpp b/cpp/dolfinx/graph/partitioners.cpp index 68bc0a57847..6efd9d1a80a 100644 --- a/cpp/dolfinx/graph/partitioners.cpp +++ b/cpp/dolfinx/graph/partitioners.cpp @@ -444,7 +444,7 @@ graph::partition_fn graph::scotch::partitioner(graph::scotch::strategy strategy, // Exchange halo with node_partition data for ghosts common::Timer timer3("SCOTCH: call SCOTCH_dgraphHalo"); err = SCOTCH_dgraphHalo(&dgrafdat, node_partition.data(), - dolfinx::MPI::mpi_type()); + dolfinx::MPI::mpi_t); if (err != 0) throw std::runtime_error("Error during SCOTCH halo exchange"); timer3.stop(); @@ -554,9 +554,8 @@ graph::partition_fn graph::parmetis::partitioner(double imbalance, const int psize = dolfinx::MPI::size(pcomm); const idx_t num_local_nodes = graph.num_nodes(); node_disp = std::vector(psize + 1, 0); - MPI_Allgather(&num_local_nodes, 1, dolfinx::MPI::mpi_type(), - node_disp.data() + 1, 1, dolfinx::MPI::mpi_type(), - pcomm); + MPI_Allgather(&num_local_nodes, 1, dolfinx::MPI::mpi_t, + node_disp.data() + 1, 1, dolfinx::MPI::mpi_t, pcomm); std::partial_sum(node_disp.begin(), node_disp.end(), node_disp.begin()); std::vector array(graph.array().begin(), graph.array().end()); std::vector offsets(graph.offsets().begin(), @@ -631,8 +630,13 @@ graph::partition_fn graph::kahip::partitioner(int mode, int seed, common::Timer timer1("KaHIP: build adjacency data"); std::vector node_disp(dolfinx::MPI::size(comm) + 1, 0); const T num_local_nodes = graph.num_nodes(); - MPI_Allgather(&num_local_nodes, 1, dolfinx::MPI::mpi_type(), - node_disp.data() + 1, 1, dolfinx::MPI::mpi_type(), comm); + + // KaHIP internally relies on an unsigned long long int type, which is not + // easily convertible to a general mpi type due to platform specific + // differences. So we can not rely on the general mpi_t<> mapping and do it + // by hand in this sole occurence. + MPI_Allgather(&num_local_nodes, 1, MPI_UNSIGNED_LONG_LONG, + node_disp.data() + 1, 1, MPI_UNSIGNED_LONG_LONG, comm); std::partial_sum(node_disp.begin(), node_disp.end(), node_disp.begin()); std::vector array(graph.array().begin(), graph.array().end()); std::vector offsets(graph.offsets().begin(), graph.offsets().end()); diff --git a/cpp/dolfinx/io/xdmf_utils.cpp b/cpp/dolfinx/io/xdmf_utils.cpp index d52f5fd23f7..444d3f71b3a 100644 --- a/cpp/dolfinx/io/xdmf_utils.cpp +++ b/cpp/dolfinx/io/xdmf_utils.cpp @@ -378,9 +378,8 @@ xdmf_utils::distribute_entity_data( std::vector recv_values_buffer(recv_disp.back()); err = MPI_Neighbor_alltoallv( send_values_buffer.data(), num_items_send.data(), send_disp.data(), - dolfinx::MPI::mpi_type(), recv_values_buffer.data(), - num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_type(), - comm0); + dolfinx::MPI::mpi_t, recv_values_buffer.data(), + num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_t, comm0); dolfinx::MPI::check_error(comm, err); err = MPI_Comm_free(&comm0); dolfinx::MPI::check_error(comm, err); @@ -403,8 +402,7 @@ xdmf_utils::distribute_entity_data( std::vector> dest_to_index; std::ranges::transform( indices, std::back_inserter(dest_to_index), - [size, num_nodes](auto n) - { + [size, num_nodes](auto n) { return std::pair(dolfinx::MPI::index_owner(size, n, num_nodes), n); }); std::ranges::sort(dest_to_index); @@ -552,9 +550,8 @@ xdmf_utils::distribute_entity_data( std::vector recv_values_buffer(recv_disp.back()); err = MPI_Neighbor_alltoallv( send_values_buffer.data(), num_items_send.data(), send_disp.data(), - dolfinx::MPI::mpi_type(), recv_values_buffer.data(), - num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_type(), - comm0); + dolfinx::MPI::mpi_t, recv_values_buffer.data(), + num_items_recv.data(), recv_disp.data(), dolfinx::MPI::mpi_t, comm0); dolfinx::MPI::check_error(comm, err); diff --git a/cpp/dolfinx/la/MatrixCSR.h b/cpp/dolfinx/la/MatrixCSR.h index 36d3038b155..1a6738d676a 100644 --- a/cpp/dolfinx/la/MatrixCSR.h +++ b/cpp/dolfinx/la/MatrixCSR.h @@ -688,9 +688,9 @@ void MatrixCSR::scatter_rev_begin() int status = MPI_Ineighbor_alltoallv( _ghost_value_data.data(), val_send_count.data(), _val_send_disp.data(), - dolfinx::MPI::mpi_type(), _ghost_value_data_in.data(), + dolfinx::MPI::mpi_t, _ghost_value_data_in.data(), val_recv_count.data(), _val_recv_disp.data(), - dolfinx::MPI::mpi_type(), _comm.comm(), &_request); + dolfinx::MPI::mpi_t, _comm.comm(), &_request); assert(status == MPI_SUCCESS); } //----------------------------------------------------------------------------- diff --git a/cpp/dolfinx/la/Vector.h b/cpp/dolfinx/la/Vector.h index 9b69e4670fb..306205338b9 100644 --- a/cpp/dolfinx/la/Vector.h +++ b/cpp/dolfinx/la/Vector.h @@ -245,7 +245,7 @@ auto inner_product(const V& a, const V& b) }); T result; - MPI_Allreduce(&local, &result, 1, dolfinx::MPI::mpi_type(), MPI_SUM, + MPI_Allreduce(&local, &result, 1, dolfinx::MPI::mpi_t, MPI_SUM, a.index_map()->comm()); return result; } @@ -279,7 +279,7 @@ auto norm(const V& x, Norm type = Norm::l2) = std::accumulate(data.begin(), data.end(), U(0), [](auto norm, auto x) { return norm + std::abs(x); }); U l1(0); - MPI_Allreduce(&local_l1, &l1, 1, MPI::mpi_type(), MPI_SUM, + MPI_Allreduce(&local_l1, &l1, 1, MPI::mpi_t, MPI_SUM, x.index_map()->comm()); return l1; } @@ -293,7 +293,7 @@ auto norm(const V& x, Norm type = Norm::l2) data, [](T a, T b) { return std::norm(a) < std::norm(b); }); auto local_linf = std::abs(*max_pos); decltype(local_linf) linf = 0; - MPI_Allreduce(&local_linf, &linf, 1, MPI::mpi_type(), + MPI_Allreduce(&local_linf, &linf, 1, MPI::mpi_t, MPI_MAX, x.index_map()->comm()); return linf; } diff --git a/cpp/dolfinx/mesh/graphbuild.cpp b/cpp/dolfinx/mesh/graphbuild.cpp index 9229c686eb5..a78e93f4821 100644 --- a/cpp/dolfinx/mesh/graphbuild.cpp +++ b/cpp/dolfinx/mesh/graphbuild.cpp @@ -292,9 +292,10 @@ graph::AdjacencyList compute_nonlocal_dual_graph( // Send back data std::vector recv_buffer1(send_disp.back()); MPI_Neighbor_alltoallv(send_buffer1.data(), num_items_recv.data(), - recv_disp.data(), MPI_INT64_T, recv_buffer1.data(), - num_items_per_dest.data(), send_disp.data(), - MPI_INT64_T, neigh_comm1); + recv_disp.data(), dolfinx::MPI::mpi_t, + recv_buffer1.data(), num_items_per_dest.data(), + send_disp.data(), dolfinx::MPI::mpi_t, + neigh_comm1); MPI_Comm_free(&neigh_comm1); // --- Build new graph diff --git a/cpp/dolfinx/mesh/topologycomputation.cpp b/cpp/dolfinx/mesh/topologycomputation.cpp index 006435cc42a..edefdbd1e26 100644 --- a/cpp/dolfinx/mesh/topologycomputation.cpp +++ b/cpp/dolfinx/mesh/topologycomputation.cpp @@ -538,9 +538,10 @@ compute_entities_by_key_matching( std::vector perm(global_vertices.size()); std::iota(perm.begin(), perm.end(), 0); - std::ranges::sort( - perm, [&global_vertices](std::size_t i0, std::size_t i1) - { return global_vertices[i0] < global_vertices[i1]; }); + std::ranges::sort(perm, + [&global_vertices](std::size_t i0, std::size_t i1) { + return global_vertices[i0] < global_vertices[i1]; + }); // For quadrilaterals, the vertex opposite the lowest vertex should // be last if (entity_type == mesh::CellType::quadrilateral) diff --git a/python/dolfinx/geometry.py b/python/dolfinx/geometry.py index 97b44095293..135d763729a 100644 --- a/python/dolfinx/geometry.py +++ b/python/dolfinx/geometry.py @@ -13,11 +13,10 @@ import numpy.typing as npt if typing.TYPE_CHECKING: - from dolfinx.cpp.graph import AdjacencyList_int32 from dolfinx.mesh import Mesh - from dolfinx import cpp as _cpp +from dolfinx.graph import AdjacencyList __all__ = [ "BoundingBoxTree", @@ -155,9 +154,7 @@ def compute_collisions_trees( return _cpp.geometry.compute_collisions_trees(tree0._cpp_object, tree1._cpp_object) -def compute_collisions_points( - tree: BoundingBoxTree, x: npt.NDArray[np.floating] -) -> _cpp.graph.AdjacencyList_int32: +def compute_collisions_points(tree: BoundingBoxTree, x: npt.NDArray[np.floating]) -> AdjacencyList: """Compute collisions between points and leaf bounding boxes. Bounding boxes can overlap, therefore points can collide with more @@ -172,7 +169,7 @@ def compute_collisions_points( point. """ - return _cpp.geometry.compute_collisions_points(tree._cpp_object, x) + return AdjacencyList(_cpp.geometry.compute_collisions_points(tree._cpp_object, x)) def compute_closest_entity( @@ -216,8 +213,8 @@ def create_midpoint_tree(mesh: Mesh, dim: int, entities: npt.NDArray[np.int32]) def compute_colliding_cells( - mesh: Mesh, candidates: AdjacencyList_int32, x: npt.NDArray[np.floating] -): + mesh: Mesh, candidates: AdjacencyList, x: npt.NDArray[np.floating] +) -> AdjacencyList: """From a mesh, find which cells collide with a set of points. Args: @@ -231,10 +228,14 @@ def compute_colliding_cells( collide with the ith point. """ - return _cpp.geometry.compute_colliding_cells(mesh._cpp_object, candidates, x) + return AdjacencyList( + _cpp.geometry.compute_colliding_cells(mesh._cpp_object, candidates._cpp_object, x) + ) -def squared_distance(mesh: Mesh, dim: int, entities: list[int], points: npt.NDArray[np.floating]): +def squared_distance( + mesh: Mesh, dim: int, entities: npt.NDArray[np.int32], points: npt.NDArray[np.floating] +) -> npt.NDArray[np.floating]: """Compute the squared distance between a point and a mesh entity. The distance is computed between the ith input points and the ith diff --git a/python/dolfinx/graph.py b/python/dolfinx/graph.py index dd16514a79a..ebc297ba000 100644 --- a/python/dolfinx/graph.py +++ b/python/dolfinx/graph.py @@ -1,4 +1,4 @@ -# Copyright (C) 2021 Garth N. Wells +# Copyright (C) 2021-2024 Garth N. Wells and Paul T. Kühner # # This file is part of DOLFINx (https://www.fenicsproject.org) # @@ -7,7 +7,10 @@ from __future__ import annotations +from typing import Optional, Union + import numpy as np +import numpy.typing as npt from dolfinx import cpp as _cpp from dolfinx.cpp.graph import partitioner @@ -28,10 +31,65 @@ pass -__all__ = ["adjacencylist", "partitioner"] +__all__ = ["AdjacencyList", "adjacencylist", "partitioner"] + + +class AdjacencyList: + _cpp_object: Union[_cpp.la.AdjacencyList_int32, _cpp.la.AdjacencyList_int64] + + def __init__(self, cpp_object: Union[_cpp.la.AdjacencyList_int32, _cpp.la.AdjacencyList_int64]): + """Creates a Python wrapper for the exported adjacency list class. + + Note: + Do not use this constructor directly. Instead use :func:`adjacencylist`. + + Args: + The underlying cpp instance that this object will wrap. + """ + self._cpp_object = cpp_object + + def links(self, node: Union[np.int32, np.int64]) -> npt.NDArray[Union[np.int32, np.int64]]: + """Retrieve the links of a node. + + Args: + Node to retrieve the connectitivty of. + + Returns: + Neighbors of the node. + """ + return self._cpp_object.links(node) + @property + def array(self) -> npt.NDArray[Union[np.int32, np.int64]]: + """Array representation of the adjacency list. -def adjacencylist(data: np.ndarray, offsets=None): + Returns: + Flattened array representation of the adjacency list. + """ + return self._cpp_object.array + + @property + def offsets(self) -> npt.NDArray[np.int32]: + """Offsets for each node in the :func:`array`. + + Returns: + Array of indices with shape `(num_nodes+1)`. + """ + return self._cpp_object.offsets + + @property + def num_nodes(self) -> np.int32: + """Number of nodes in the adjacency list. + + Returns: + Number of nodes. + """ + return self._cpp_object.num_nodes + + +def adjacencylist( + data: npt.NDArray[Union[np.int32, np.int64]], offsets: Optional[npt.NDArray[np.int32]] = None +) -> AdjacencyList: """Create an AdjacencyList for int32 or int64 datasets. Args: @@ -42,15 +100,14 @@ def adjacencylist(data: np.ndarray, offsets=None): Returns: An adjacency list. - """ - if offsets is None: - try: - return _cpp.graph.AdjacencyList_int32(data) - except TypeError: - return _cpp.graph.AdjacencyList_int64(data) + # TODO: Switch to np.isdtype(data.dtype, np.int32) once numpy >= 2.0 is enforced + if data.dtype == np.int32: + cpp_t = _cpp.graph.AdjacencyList_int32 + elif data.dtype == np.int64: + cpp_t = _cpp.graph.AdjacencyList_int64 else: - try: - return _cpp.graph.AdjacencyList_int32(data, offsets) - except TypeError: - return _cpp.graph.AdjacencyList_int64(data, offsets) + raise TypeError("Data type for adjacency list not supported.") + + cpp_object = cpp_t(data, offsets) if offsets is not None else cpp_t(data) + return AdjacencyList(cpp_object) diff --git a/python/dolfinx/io/gmshio.py b/python/dolfinx/io/gmshio.py index 7d27e7fde4b..327b3c63f76 100644 --- a/python/dolfinx/io/gmshio.py +++ b/python/dolfinx/io/gmshio.py @@ -19,6 +19,7 @@ from dolfinx import cpp as _cpp from dolfinx import default_real_type from dolfinx.cpp.graph import AdjacencyList_int32 +from dolfinx.graph import AdjacencyList, adjacencylist from dolfinx.io.utils import distribute_entity_data from dolfinx.mesh import CellType, Mesh, create_mesh, meshtags, meshtags_from_entities @@ -298,7 +299,7 @@ def model_to_mesh( mesh, mesh.topology.dim, cells, cell_values ) mesh.topology.create_connectivity(mesh.topology.dim, 0) - adj = _cpp.graph.AdjacencyList_int32(local_entities) + adj = adjacencylist(local_entities) ct = meshtags_from_entities( mesh, mesh.topology.dim, adj, local_values.astype(np.int32, copy=False) ) @@ -323,7 +324,7 @@ def model_to_mesh( mesh, tdim - 1, marked_facets, facet_values ) mesh.topology.create_connectivity(topology.dim - 1, tdim) - adj = _cpp.graph.AdjacencyList_int32(local_entities) + adj = adjacencylist(local_entities) ft = meshtags_from_entities(mesh, tdim - 1, adj, local_values.astype(np.int32, copy=False)) ft.name = "Facet tags" else: @@ -338,7 +339,7 @@ def read_from_msh( rank: int = 0, gdim: int = 3, partitioner: typing.Optional[ - typing.Callable[[_MPI.Comm, int, int, AdjacencyList_int32], AdjacencyList_int32] + typing.Callable[[_MPI.Comm, int, int, AdjacencyList], AdjacencyList_int32] ] = None, ) -> tuple[Mesh, _cpp.mesh.MeshTags_int32, _cpp.mesh.MeshTags_int32]: """Read a Gmsh .msh file and return a :class:`dolfinx.mesh.Mesh` and cell facet markers. diff --git a/python/dolfinx/mesh.py b/python/dolfinx/mesh.py index 48787c8c24e..d6f9f50c23a 100644 --- a/python/dolfinx/mesh.py +++ b/python/dolfinx/mesh.py @@ -33,6 +33,7 @@ from dolfinx.cpp.refinement import RefinementOption from dolfinx.fem import CoordinateElement as _CoordinateElement from dolfinx.fem import coordinate_element as _coordinate_element +from dolfinx.graph import AdjacencyList __all__ = [ "CellType", @@ -735,7 +736,7 @@ def meshtags( def meshtags_from_entities( - msh: Mesh, dim: int, entities: _cpp.graph.AdjacencyList_int32, values: npt.NDArray[typing.Any] + msh: Mesh, dim: int, entities: AdjacencyList, values: npt.NDArray[typing.Any] ): """Create a :class:dolfinx.mesh.MeshTags` object that associates data with a subset of mesh entities, where the entities are defined @@ -762,7 +763,9 @@ def meshtags_from_entities( elif isinstance(values, float): values = np.full(entities.num_nodes, values, dtype=np.double) values = np.asarray(values) - return MeshTags(_cpp.mesh.create_meshtags(msh.topology._cpp_object, dim, entities, values)) + return MeshTags( + _cpp.mesh.create_meshtags(msh.topology._cpp_object, dim, entities._cpp_object, values) + ) def create_interval( diff --git a/python/test/unit/fem/test_assemble_domains.py b/python/test/unit/fem/test_assemble_domains.py index 04a2191d9fd..8a7e6992391 100644 --- a/python/test/unit/fem/test_assemble_domains.py +++ b/python/test/unit/fem/test_assemble_domains.py @@ -11,9 +11,9 @@ import pytest import ufl -from dolfinx import cpp as _cpp from dolfinx import default_scalar_type, fem, la from dolfinx.fem import Constant, Function, assemble_scalar, dirichletbc, form, functionspace +from dolfinx.graph import adjacencylist from dolfinx.mesh import ( GhostMode, Mesh, @@ -33,9 +33,7 @@ def mesh(): def create_cell_meshtags_from_entities(mesh: Mesh, dim: int, cells: np.ndarray, values: np.ndarray): mesh.topology.create_connectivity(mesh.topology.dim, 0) cell_to_vertices = mesh.topology.connectivity(mesh.topology.dim, 0) - entities = _cpp.graph.AdjacencyList_int32( - np.array([cell_to_vertices.links(cell) for cell in cells]) - ) + entities = adjacencylist(np.array([cell_to_vertices.links(cell) for cell in cells])) return meshtags_from_entities(mesh, dim, entities, values) diff --git a/python/test/unit/fem/test_assembler.py b/python/test/unit/fem/test_assembler.py index 452d90eb077..f5507147c2f 100644 --- a/python/test/unit/fem/test_assembler.py +++ b/python/test/unit/fem/test_assembler.py @@ -1059,7 +1059,7 @@ def test_assemble_empty_rank_mesh(self): def partitioner(comm, nparts, local_graph, num_ghost_nodes): """Leave cells on the curent rank""" dest = np.full(len(cells), comm.rank, dtype=np.int32) - return graph.adjacencylist(dest) + return graph.adjacencylist(dest)._cpp_object if comm.rank == 0: # Put cells on rank 0 diff --git a/python/test/unit/fem/test_dofmap.py b/python/test/unit/fem/test_dofmap.py index 0115893407f..4275fc37a04 100644 --- a/python/test/unit/fem/test_dofmap.py +++ b/python/test/unit/fem/test_dofmap.py @@ -436,7 +436,8 @@ def test_empty_rank_collapse(): def self_partitioner(comm: MPI.Intracomm, n, m, topo): dests = np.full(len(topo[0]) // 2, comm.rank, dtype=np.int32) offsets = np.arange(len(topo[0]) // 2 + 1, dtype=np.int32) - return dolfinx.graph.adjacencylist(dests, offsets) + # TODO: can we improve on this interface? I.e. warp to do cpp type conversion automatically + return dolfinx.graph.adjacencylist(dests, offsets)._cpp_object mesh = create_mesh(MPI.COMM_WORLD, cells, nodes, c_el, partitioner=self_partitioner) diff --git a/python/test/unit/io/test_adios2.py b/python/test/unit/io/test_adios2.py index 0b97b0f98a3..692d0aa126f 100644 --- a/python/test/unit/io/test_adios2.py +++ b/python/test/unit/io/test_adios2.py @@ -184,7 +184,7 @@ def test_empty_rank_mesh(self, tempdir): def partitioner(comm, nparts, local_graph, num_ghost_nodes): """Leave cells on the current rank""" dest = np.full(len(cells), comm.rank, dtype=np.int32) - return adjacencylist(dest) + return adjacencylist(dest)._cpp_object if comm.rank == 0: cells = np.array([[0, 1, 2], [0, 2, 3]], dtype=np.int64) diff --git a/python/test/unit/mesh/test_dual_graph.py b/python/test/unit/mesh/test_dual_graph.py index e71c0ef2ff4..e1b43b63fdc 100644 --- a/python/test/unit/mesh/test_dual_graph.py +++ b/python/test/unit/mesh/test_dual_graph.py @@ -25,7 +25,9 @@ def test_dgrsph_1d(): x = 0 # Circular chain of interval cells cells = [[n0, n0 + 1], [n0 + 1, n0 + 2], [n0 + 2, x]] - w = mesh.build_dual_graph(MPI.COMM_WORLD, mesh.CellType.interval, to_adj(cells, np.int64)) + w = mesh.build_dual_graph( + MPI.COMM_WORLD, mesh.CellType.interval, to_adj(cells, np.int64)._cpp_object + ) assert w.num_nodes == 3 for i in range(w.num_nodes): assert len(w.links(i)) == 2 diff --git a/python/test/unit/mesh/test_mesh.py b/python/test/unit/mesh/test_mesh.py index 7d721f1d4f7..22762107ec4 100644 --- a/python/test/unit/mesh/test_mesh.py +++ b/python/test/unit/mesh/test_mesh.py @@ -556,7 +556,7 @@ def test_empty_rank_mesh(dtype): def partitioner(comm, nparts, local_graph, num_ghost_nodes): """Leave cells on the curent rank""" dest = np.full(len(cells), comm.rank, dtype=np.int32) - return graph.adjacencylist(dest) + return graph.adjacencylist(dest)._cpp_object if comm.rank == 0: cells = np.array([[0, 1, 2], [0, 2, 3]], dtype=np.int64)