Skip to content

Commit

Permalink
Deploying to pdoc from @ 3f07a87 🚀
Browse files Browse the repository at this point in the history
  • Loading branch information
github-merge-queue[bot] committed Mar 22, 2024
1 parent 50de307 commit f5d2cbd
Show file tree
Hide file tree
Showing 6 changed files with 112 additions and 79 deletions.
41 changes: 24 additions & 17 deletions domain_decomposition.html
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
<meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" />
<meta name="generator" content="pdoc 0.10.0" />
<title>PyMPDATA_MPI.domain_decomposition API documentation</title>
<meta name="description" content="" />
<meta name="description" content="MPI-aware domain decomposition utilities" />
<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/sanitize.min.css" integrity="sha256-PK9q560IAAa6WVRRh76LtCaI8pjTJ2z11v0miyNNjrs=" crossorigin>
<link rel="preload stylesheet" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/10up-sanitize.css/11.0.1/typography.min.css" integrity="sha256-7l/o7C8jubJiy74VsKTidCy1yBkRtiUGbVkYBylBqUg=" crossorigin>
<link rel="stylesheet preload" as="style" href="https://cdnjs.cloudflare.com/ajax/libs/highlight.js/10.1.1/styles/github.min.css" crossorigin>
Expand All @@ -22,26 +22,28 @@
<h1 class="title">Module <code>PyMPDATA_MPI.domain_decomposition</code></h1>
</header>
<section id="section-intro">
<p>MPI-aware domain decomposition utilities</p>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python"># pylint: disable=missing-module-docstring,missing-function-docstring,missing-class-docstring,invalid-name
<pre><code class="python">&#34;&#34;&#34; MPI-aware domain decomposition utilities &#34;&#34;&#34;

import numpy as np
from PyMPDATA.impl.domain_decomposition import make_subdomain
from PyMPDATA.impl.enumerations import OUTER

MPI_DIM = OUTER

subdomain = make_subdomain(jit_flags={})


def mpi_indices(grid, rank, size):
start, stop = subdomain(grid[MPI_DIM], rank, size)
xi, yi = np.indices((stop - start, grid[MPI_DIM - 1]), dtype=float)
xi += start
return xi, yi</code></pre>
def mpi_indices(*, grid, rank, size, mpi_dim):
&#34;&#34;&#34;returns a mapping from rank-local indices to domain-wide indices,
(subdomain-aware equivalent of np.indices)&#34;&#34;&#34;
start, stop = subdomain(grid[mpi_dim], rank, size)
indices_arg = list(grid)
indices_arg[mpi_dim] = stop - start
xyi = np.indices(tuple(indices_arg), dtype=float)
xyi[mpi_dim] += start
return xyi</code></pre>
</details>
</section>
<section>
Expand All @@ -52,19 +54,24 @@ <h1 class="title">Module <code>PyMPDATA_MPI.domain_decomposition</code></h1>
<h2 class="section-title" id="header-functions">Functions</h2>
<dl>
<dt id="PyMPDATA_MPI.domain_decomposition.mpi_indices"><code class="name flex">
<span>def <span class="ident">mpi_indices</span></span>(<span>grid, rank, size)</span>
<span>def <span class="ident">mpi_indices</span></span>(<span>*, grid, rank, size, mpi_dim)</span>
</code></dt>
<dd>
<div class="desc"></div>
<div class="desc"><p>returns a mapping from rank-local indices to domain-wide indices,
(subdomain-aware equivalent of np.indices)</p></div>
<details class="source">
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">def mpi_indices(grid, rank, size):
start, stop = subdomain(grid[MPI_DIM], rank, size)
xi, yi = np.indices((stop - start, grid[MPI_DIM - 1]), dtype=float)
xi += start
return xi, yi</code></pre>
<pre><code class="python">def mpi_indices(*, grid, rank, size, mpi_dim):
&#34;&#34;&#34;returns a mapping from rank-local indices to domain-wide indices,
(subdomain-aware equivalent of np.indices)&#34;&#34;&#34;
start, stop = subdomain(grid[mpi_dim], rank, size)
indices_arg = list(grid)
indices_arg[mpi_dim] = stop - start
xyi = np.indices(tuple(indices_arg), dtype=float)
xyi[mpi_dim] += start
return xyi</code></pre>
</details>
</dd>
</dl>
Expand Down
57 changes: 37 additions & 20 deletions impl/boundary_condition_commons.html
Original file line number Diff line number Diff line change
Expand Up @@ -27,20 +27,21 @@ <h1 class="title">Module <code>PyMPDATA_MPI.impl.boundary_condition_commons</cod
<summary>
<span>Expand source code</span>
</summary>
<pre><code class="python">&#34;&#34;&#34; boundary_condition common functions &#34;&#34;&#34;
<pre><code class="python"># pylint: disable=too-many-arguments
&#34;&#34;&#34; boundary_condition common functions &#34;&#34;&#34;

from functools import lru_cache

import numba
import numba_mpi as mpi
from PyMPDATA.impl.enumerations import INVALID_INDEX
from PyMPDATA.impl.enumerations import INVALID_INDEX, OUTER

IRRELEVANT = 666


@lru_cache()
def make_scalar_boundary_condition(
indexers, jit_flags, dimension_index, dtype, get_peer
*, indexers, jit_flags, dimension_index, dtype, get_peer, mpi_dim
):
&#34;&#34;&#34;returns fill_halos() function for scalar boundary conditions.
Provides default logic for scalar buffer filling. Notable arguments:
Expand All @@ -55,20 +56,20 @@ <h1 class="title">Module <code>PyMPDATA_MPI.impl.boundary_condition_commons</cod
(i, INVALID_INDEX, k), psi, sign
)

send_recv = _make_send_recv(indexers.set, jit_flags, fill_buf, dtype, get_peer)
send_recv = _make_send_recv(
indexers.set, jit_flags, fill_buf, dtype, get_peer, mpi_dim
)

# pylint: disable=too-many-arguments
@numba.njit(**jit_flags)
def fill_halos(buffer, i_rng, j_rng, k_rng, psi, _, sign):
send_recv(buffer, psi, i_rng, j_rng, k_rng, sign, IRRELEVANT, psi)

return fill_halos


# pylint: disable=too-many-arguments
@lru_cache()
def make_vector_boundary_condition( # pylint: disable=too-many-arguments
indexers, halo, jit_flags, dimension_index, dtype, get_peer
def make_vector_boundary_condition(
indexers, halo, jit_flags, dimension_index, dtype, get_peer, mpi_dim
):
&#34;&#34;&#34;returns fill_halos() function for vector boundary conditions.
Provides default logic for vector buffer filling. Notable arguments:
Expand All @@ -92,7 +93,9 @@ <h1 class="title">Module <code>PyMPDATA_MPI.impl.boundary_condition_commons</cod

buf[i - i_rng.start, k - k_rng.start] = value

send_recv = _make_send_recv(indexers.set, jit_flags, fill_buf, dtype, get_peer)
send_recv = _make_send_recv(
indexers.set, jit_flags, fill_buf, dtype, get_peer, mpi_dim
)

@numba.njit(**jit_flags)
def fill_halos_loop_vector(buffer, i_rng, j_rng, k_rng, components, dim, _, sign):
Expand All @@ -103,10 +106,17 @@ <h1 class="title">Module <code>PyMPDATA_MPI.impl.boundary_condition_commons</cod
return fill_halos_loop_vector


def _make_send_recv(set_value, jit_flags, fill_buf, dtype, get_peer):
def _make_send_recv(set_value, jit_flags, fill_buf, dtype, get_peer, mpi_dim):

@numba.njit(**jit_flags)
def get_buffer_chunk(buffer, i_rng, k_rng, chunk_index):
chunk_size = len(i_rng) * len(k_rng)
if mpi_dim != OUTER:
n_chunks = len(buffer) // (chunk_size * numba.get_num_threads())
chunk_index += numba.get_thread_id() * n_chunks
else:
n_chunks = len(buffer) // (chunk_size * 2)
chunk_index += int(numba.get_thread_id() != 0) * n_chunks
return buffer.view(dtype)[
chunk_index * chunk_size : (chunk_index + 1) * chunk_size
].reshape((len(i_rng), len(k_rng)))
Expand All @@ -126,12 +136,16 @@ <h1 class="title">Module <code>PyMPDATA_MPI.impl.boundary_condition_commons</cod

@numba.njit(**jit_flags)
def _send(buf, peer, fill_buf_args):
tag = numba.get_thread_id()
fill_buf(buf, *fill_buf_args)
mpi.send(buf, dest=peer)
mpi.send(buf, dest=peer, tag=tag)

@numba.njit(**jit_flags)
def _recv(buf, peer):
mpi.recv(buf, source=peer)
th_id = numba.get_thread_id()
n_th = numba.get_num_threads()
tag = th_id if mpi_dim != OUTER else {0: n_th - 1, n_th - 1: 0}[th_id]
mpi.recv(buf, source=peer, tag=tag)

@numba.njit(**jit_flags)
def _send_recv(buffer, psi, i_rng, j_rng, k_rng, sign, dim, output):
Expand Down Expand Up @@ -160,7 +174,7 @@ <h1 class="title">Module <code>PyMPDATA_MPI.impl.boundary_condition_commons</cod
<h2 class="section-title" id="header-functions">Functions</h2>
<dl>
<dt id="PyMPDATA_MPI.impl.boundary_condition_commons.make_scalar_boundary_condition"><code class="name flex">
<span>def <span class="ident">make_scalar_boundary_condition</span></span>(<span>indexers, jit_flags, dimension_index, dtype, get_peer)</span>
<span>def <span class="ident">make_scalar_boundary_condition</span></span>(<span>*, indexers, jit_flags, dimension_index, dtype, get_peer, mpi_dim)</span>
</code></dt>
<dd>
<div class="desc"><p>returns fill_halos() function for scalar boundary conditions.
Expand All @@ -173,7 +187,7 @@ <h2 class="section-title" id="header-functions">Functions</h2>
</summary>
<pre><code class="python">@lru_cache()
def make_scalar_boundary_condition(
indexers, jit_flags, dimension_index, dtype, get_peer
*, indexers, jit_flags, dimension_index, dtype, get_peer, mpi_dim
):
&#34;&#34;&#34;returns fill_halos() function for scalar boundary conditions.
Provides default logic for scalar buffer filling. Notable arguments:
Expand All @@ -188,9 +202,10 @@ <h2 class="section-title" id="header-functions">Functions</h2>
(i, INVALID_INDEX, k), psi, sign
)

send_recv = _make_send_recv(indexers.set, jit_flags, fill_buf, dtype, get_peer)
send_recv = _make_send_recv(
indexers.set, jit_flags, fill_buf, dtype, get_peer, mpi_dim
)

# pylint: disable=too-many-arguments
@numba.njit(**jit_flags)
def fill_halos(buffer, i_rng, j_rng, k_rng, psi, _, sign):
send_recv(buffer, psi, i_rng, j_rng, k_rng, sign, IRRELEVANT, psi)
Expand All @@ -199,7 +214,7 @@ <h2 class="section-title" id="header-functions">Functions</h2>
</details>
</dd>
<dt id="PyMPDATA_MPI.impl.boundary_condition_commons.make_vector_boundary_condition"><code class="name flex">
<span>def <span class="ident">make_vector_boundary_condition</span></span>(<span>indexers, halo, jit_flags, dimension_index, dtype, get_peer)</span>
<span>def <span class="ident">make_vector_boundary_condition</span></span>(<span>indexers, halo, jit_flags, dimension_index, dtype, get_peer, mpi_dim)</span>
</code></dt>
<dd>
<div class="desc"><p>returns fill_halos() function for vector boundary conditions.
Expand All @@ -211,8 +226,8 @@ <h2 class="section-title" id="header-functions">Functions</h2>
<span>Expand source code</span>
</summary>
<pre><code class="python">@lru_cache()
def make_vector_boundary_condition( # pylint: disable=too-many-arguments
indexers, halo, jit_flags, dimension_index, dtype, get_peer
def make_vector_boundary_condition(
indexers, halo, jit_flags, dimension_index, dtype, get_peer, mpi_dim
):
&#34;&#34;&#34;returns fill_halos() function for vector boundary conditions.
Provides default logic for vector buffer filling. Notable arguments:
Expand All @@ -236,7 +251,9 @@ <h2 class="section-title" id="header-functions">Functions</h2>

buf[i - i_rng.start, k - k_rng.start] = value

send_recv = _make_send_recv(indexers.set, jit_flags, fill_buf, dtype, get_peer)
send_recv = _make_send_recv(
indexers.set, jit_flags, fill_buf, dtype, get_peer, mpi_dim
)

@numba.njit(**jit_flags)
def fill_halos_loop_vector(buffer, i_rng, j_rng, k_rng, components, dim, _, sign):
Expand Down
65 changes: 35 additions & 30 deletions impl/mpi_boundary_condition.html
Original file line number Diff line number Diff line change
Expand Up @@ -35,10 +35,16 @@ <h1 class="title">Module <code>PyMPDATA_MPI.impl.mpi_boundary_condition</code></
class MPIBoundaryCondition:
&#34;&#34;&#34;common base class for MPI boundary conditions&#34;&#34;&#34;

def __init__(self, base, size):
def __init__(self, base, size, mpi_dim):
self.__mpi_size_one = size == 1
self.worker_pool_size = size
self.base = base
self.mpi_dim = mpi_dim

@staticmethod
def make_get_peer(_, __):
&#34;&#34;&#34;returns (lru-cached) numba-compiled callable.&#34;&#34;&#34;
raise NotImplementedError()

# pylint: disable=too-many-arguments
def make_scalar(self, indexers, halo, dtype, jit_flags, dimension_index):
Expand All @@ -48,17 +54,13 @@ <h1 class="title">Module <code>PyMPDATA_MPI.impl.mpi_boundary_condition</code></
indexers, halo, dtype, jit_flags, dimension_index
)
return make_scalar_boundary_condition(
indexers,
jit_flags,
dimension_index,
dtype,
self.make_get_peer(jit_flags, self.worker_pool_size),
)

@staticmethod
def make_get_peer(_, __):
&#34;&#34;&#34;returns (lru-cached) numba-compiled callable.&#34;&#34;&#34;
raise NotImplementedError()</code></pre>
indexers=indexers,
jit_flags=jit_flags,
dimension_index=dimension_index,
dtype=dtype,
get_peer=self.make_get_peer(jit_flags, self.worker_pool_size),
mpi_dim=self.mpi_dim,
)</code></pre>
</details>
</section>
<section>
Expand All @@ -72,7 +74,7 @@ <h2 class="section-title" id="header-classes">Classes</h2>
<dl>
<dt id="PyMPDATA_MPI.impl.mpi_boundary_condition.MPIBoundaryCondition"><code class="flex name class">
<span>class <span class="ident">MPIBoundaryCondition</span></span>
<span>(</span><span>base, size)</span>
<span>(</span><span>base, size, mpi_dim)</span>
</code></dt>
<dd>
<div class="desc"><p>common base class for MPI boundary conditions</p></div>
Expand All @@ -83,10 +85,16 @@ <h2 class="section-title" id="header-classes">Classes</h2>
<pre><code class="python">class MPIBoundaryCondition:
&#34;&#34;&#34;common base class for MPI boundary conditions&#34;&#34;&#34;

def __init__(self, base, size):
def __init__(self, base, size, mpi_dim):
self.__mpi_size_one = size == 1
self.worker_pool_size = size
self.base = base
self.mpi_dim = mpi_dim

@staticmethod
def make_get_peer(_, __):
&#34;&#34;&#34;returns (lru-cached) numba-compiled callable.&#34;&#34;&#34;
raise NotImplementedError()

# pylint: disable=too-many-arguments
def make_scalar(self, indexers, halo, dtype, jit_flags, dimension_index):
Expand All @@ -96,17 +104,13 @@ <h2 class="section-title" id="header-classes">Classes</h2>
indexers, halo, dtype, jit_flags, dimension_index
)
return make_scalar_boundary_condition(
indexers,
jit_flags,
dimension_index,
dtype,
self.make_get_peer(jit_flags, self.worker_pool_size),
)

@staticmethod
def make_get_peer(_, __):
&#34;&#34;&#34;returns (lru-cached) numba-compiled callable.&#34;&#34;&#34;
raise NotImplementedError()</code></pre>
indexers=indexers,
jit_flags=jit_flags,
dimension_index=dimension_index,
dtype=dtype,
get_peer=self.make_get_peer(jit_flags, self.worker_pool_size),
mpi_dim=self.mpi_dim,
)</code></pre>
</details>
<h3>Subclasses</h3>
<ul class="hlist">
Expand Down Expand Up @@ -149,11 +153,12 @@ <h3>Methods</h3>
indexers, halo, dtype, jit_flags, dimension_index
)
return make_scalar_boundary_condition(
indexers,
jit_flags,
dimension_index,
dtype,
self.make_get_peer(jit_flags, self.worker_pool_size),
indexers=indexers,
jit_flags=jit_flags,
dimension_index=dimension_index,
dtype=dtype,
get_peer=self.make_get_peer(jit_flags, self.worker_pool_size),
mpi_dim=self.mpi_dim,
)</code></pre>
</details>
</dd>
Expand Down
2 changes: 1 addition & 1 deletion index.html
Original file line number Diff line number Diff line change
Expand Up @@ -46,7 +46,7 @@ <h2 class="section-title" id="header-submodules">Sub-modules</h2>
<dl>
<dt><code class="name"><a title="PyMPDATA_MPI.domain_decomposition" href="domain_decomposition.html">PyMPDATA_MPI.domain_decomposition</a></code></dt>
<dd>
<div class="desc"></div>
<div class="desc"><p>MPI-aware domain decomposition utilities</p></div>
</dd>
<dt><code class="name"><a title="PyMPDATA_MPI.hdf_storage" href="hdf_storage.html">PyMPDATA_MPI.hdf_storage</a></code></dt>
<dd>
Expand Down
Loading

0 comments on commit f5d2cbd

Please sign in to comment.