diff --git a/.github/workflows/tests.yml b/.github/workflows/tests.yml index abd3397e92..e3cd8195d3 100644 --- a/.github/workflows/tests.yml +++ b/.github/workflows/tests.yml @@ -10,7 +10,7 @@ jobs: name: ${{ matrix.config.name }} runs-on: ${{ matrix.config.os }} env: - BOUT_TEST_TIMEOUT: "5m" + BOUT_TEST_TIMEOUT: "6m" PETSC_DIR: /usr/lib/petscdir/3.7.7/x86_64-linux-gnu-real PETSC_ARCH: "" SLEPC_DIR: /usr/lib/slepcdir/3.7.4/x86_64-linux-gnu-real diff --git a/.gitmodules b/.gitmodules index 897ed08f82..436e03dd4d 100644 --- a/.gitmodules +++ b/.gitmodules @@ -10,3 +10,9 @@ [submodule "externalpackages/fmt"] path = externalpackages/fmt url = https://github.com/fmtlib/fmt.git +[submodule "externalpackages/boutdata"] + path = externalpackages/boutdata + url = https://github.com/boutproject/boutdata.git +[submodule "externalpackages/boututils"] + path = externalpackages/boututils + url = https://github.com/boutproject/boututils.git diff --git a/CHANGELOG.md b/CHANGELOG.md index 21671f2e9d..dba003c9d0 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -43,6 +43,9 @@ - The `LaplaceShoot` Laplacian implementation was removed. There are very few cases, if any, where this implementation actually works. [\#2177](https://github.com/boutproject/BOUT-dev/pull/2177) +- `PhysicsModel` expects the options `datadir` and `dump_format` to + have been set; this is only a problem if you don't call + `BoutInitialise`. [\#2062](https://github.com/boutproject/BOUT-dev/pull/2062) ## [v4.3.2](https://github.com/boutproject/BOUT-dev/tree/v4.3.2) (2020-10-19) diff --git a/README.md b/README.md index 6a3f6118bd..b136fa8f79 100644 --- a/README.md +++ b/README.md @@ -25,10 +25,10 @@ equations appearing in a readable form. For example, the following set of equations for magnetohydrodynamics (MHD): -![ddt_rho](http://latex.codecogs.com/png.latex?%5Cfrac%7B%5Cpartial%20%5Crho%7D%7B%5Cpartial%20t%7D%20%3D%20-%5Cmathbf%7Bv%7D%5Ccdot%5Cnabla%5Crho%20-%20%5Crho%5Cnabla%5Ccdot%5Cmathbf%7Bv%7D) -![ddt_p](http://latex.codecogs.com/png.latex?%5Cfrac%7B%5Cpartial%20p%7D%7B%5Cpartial%20t%7D%20%3D%20-%5Cmathbf%7Bv%7D%5Ccdot%5Cnabla%20p%20-%20%5Cgamma%20p%5Cnabla%5Ccdot%5Cmathbf%7Bv%7D) -![ddt_v](http://latex.codecogs.com/png.latex?%5Cfrac%7B%5Cpartial%20%5Cmathbf%7Bv%7D%7D%7B%5Cpartial%20t%7D%20%3D%20-%5Cmathbf%7Bv%7D%5Ccdot%5Cnabla%5Cmathbf%7Bv%7D%20+%20%5Cfrac%7B1%7D%7B%5Crho%7D%28-%5Cnabla%20p%20+%20%28%5Cnabla%5Ctimes%5Cmathbf%7BB%7D%29%5Ctimes%5Cmathbf%7BB%7D%29) -![ddt_B](http://latex.codecogs.com/png.latex?%7B%7B%5Cfrac%7B%5Cpartial%20%5Cmathbf%7BB%7D%7D%7B%5Cpartial%20t%7D%7D%7D%20%3D%20%5Cnabla%5Ctimes%28%5Cmathbf%7Bv%7D%5Ctimes%5Cmathbf%7BB%7D%29) +![ddt_rho](http://latex.codecogs.com/png.latex?%5Cfrac%7B%5Cpartial%20%5Crho%7D%7B%5Cpartial%20t%7D%20%3D%20-%5Cmathbf%7Bv%7D%5Ccdot%5Cnabla%5Crho%20-%20%5Crho%5Cnabla%5Ccdot%5Cmathbf%7Bv%7D) +![ddt_p](http://latex.codecogs.com/png.latex?%5Cfrac%7B%5Cpartial%20p%7D%7B%5Cpartial%20t%7D%20%3D%20-%5Cmathbf%7Bv%7D%5Ccdot%5Cnabla%20p%20-%20%5Cgamma%20p%5Cnabla%5Ccdot%5Cmathbf%7Bv%7D) +![ddt_v](http://latex.codecogs.com/png.latex?%5Cfrac%7B%5Cpartial%20%5Cmathbf%7Bv%7D%7D%7B%5Cpartial%20t%7D%20%3D%20-%5Cmathbf%7Bv%7D%5Ccdot%5Cnabla%5Cmathbf%7Bv%7D%20+%20%5Cfrac%7B1%7D%7B%5Crho%7D%28-%5Cnabla%20p%20+%20%28%5Cnabla%5Ctimes%5Cmathbf%7BB%7D%29%5Ctimes%5Cmathbf%7BB%7D%29) +![ddt_B](http://latex.codecogs.com/png.latex?%7B%7B%5Cfrac%7B%5Cpartial%20%5Cmathbf%7BB%7D%7D%7B%5Cpartial%20t%7D%7D%7D%20%3D%20%5Cnabla%5Ctimes%28%5Cmathbf%7Bv%7D%5Ctimes%5Cmathbf%7BB%7D%29) can be written simply as: @@ -138,7 +138,6 @@ This directory contains * **boutdata** Routines to simplify accessing BOUT++ output * **boututils** Some useful routines for accessing and plotting data - * **bout_runners** A python wrapper to submit several runs at once (either on a normal computer, or through a PBS system) * **post_bout** Routines for post processing in BOUT++ * **slab** IDL routine for grid generation of a slab @@ -185,3 +184,4 @@ BOUT++ links by default with some GPL licensed libraries. Thus if you compile BOUT++ with any of them, BOUT++ will automatically be licensed as GPL. Thus if you want to use BOUT++ with GPL non-compatible code, make sure to compile without GPLed code. + diff --git a/autoconf_build_defines.hxx.in b/autoconf_build_defines.hxx.in index 9116b37c7f..925fe967ec 100644 --- a/autoconf_build_defines.hxx.in +++ b/autoconf_build_defines.hxx.in @@ -24,6 +24,9 @@ /* LAPACK support */ #undef BOUT_HAS_LAPACK +/* NETCDF support */ +#undef BOUT_HAS_LEGACY_NETCDF + /* NETCDF support */ #undef BOUT_HAS_NETCDF diff --git a/bin/bout-config.in b/bin/bout-config.in index dcef809fdc..bfcea24c04 100755 --- a/bin/bout-config.in +++ b/bin/bout-config.in @@ -29,6 +29,7 @@ idlpath="@IDLCONFIGPATH@" pythonpath="@PYTHONCONFIGPATH@" has_netcdf="@BOUT_HAS_NETCDF@" +has_legacy_netcdf="@BOUT_HAS_LEGACY_NETCDF@" has_pnetcdf="@BOUT_HAS_PNETCDF@" has_hdf5="@BOUT_HAS_HDF5@" has_pvode="@BOUT_HAS_PVODE@" @@ -68,6 +69,7 @@ Available values for OPTION include: --python Python path --has-netcdf NetCDF file support + --has-legacy-netcdf Legacy NetCDF file support --has-pnetcdf Parallel NetCDF file support --has-hdf5 HDF5 file support --has-pvode PVODE solver support @@ -103,6 +105,7 @@ all() echo " --python -> $pythonpath" echo echo " --has-netcdf -> $has_netcdf" + echo " --has-legacy-netcdf -> $has_legacy_netcdf" echo " --has-pnetcdf -> $has_pnetcdf" echo " --has-hdf5 -> $has_hdf5" echo " --has-pvode -> $has_pvode" @@ -189,6 +192,10 @@ while test $# -gt 0; do echo $has_netcdf ;; + --has-legacy-netcdf) + echo $has_legacy_netcdf + ;; + --has-pnetcdf) echo $has_pnetcdf ;; diff --git a/bin/bout-pylib-cmd-to-bin b/bin/bout-pylib-cmd-to-bin index 850aa6daf8..16217544df 100755 --- a/bin/bout-pylib-cmd-to-bin +++ b/bin/bout-pylib-cmd-to-bin @@ -250,7 +250,6 @@ if __name__ == "__main__": print("Please wait, scanning modules ...") x=boutmodules(["boutcore", "boutdata", - "bout_runners", "boututils", "post_bout", "zoidberg"]) @@ -292,3 +291,4 @@ if __name__ == "__main__": print("Creating failed. To rerun and overwrite the file without asking run:") print("%s %s %s %s -f"%(sys.argv[0],mod,fun,name)) raise + diff --git a/bin/bout-v5-macro-upgrader.py b/bin/bout-v5-macro-upgrader.py index 7f79d49ab6..16d3b3ed5b 100755 --- a/bin/bout-v5-macro-upgrader.py +++ b/bin/bout-v5-macro-upgrader.py @@ -131,6 +131,13 @@ "macro": True, "always_defined": True, }, + { + "old": "NCDF", + "new": "BOUT_HAS_LEGACY_NETCDF", + "headers": "bout/build_config.hxx", + "macro": True, + "always_defined": True, + }, { "old": "HDF5", "new": "BOUT_HAS_HDF5", @@ -274,14 +281,14 @@ def fix_ifdefs(old, source): def fix_always_defined_macros(old, new, source): """Fix '#ifdef's that should become plain '#if' """ - new_source = re.sub(r"#ifdef\s+{}".format(old), r"#if {}".format(new), source) - return re.sub(r"#ifndef\s+{}".format(old), r"#if !{}".format(new), new_source) + new_source = re.sub(r"#ifdef\s+{}\b".format(old), r"#if {}".format(new), source) + return re.sub(r"#ifndef\s+{}\b".format(old), r"#if !{}".format(new), new_source) def fix_replacement(old, new, source): """Straight replacements """ - return re.sub(r'([^"])\b{}\b([^"])'.format(old), r"\1{}\2".format(new), source) + return re.sub(r'([^"_])\b{}\b([^"_])'.format(old), r"\1{}\2".format(new), source) def apply_fixes(replacements, source): diff --git a/cmake_build_defines.hxx.in b/cmake_build_defines.hxx.in index fb245cc39f..d3edbb752a 100644 --- a/cmake_build_defines.hxx.in +++ b/cmake_build_defines.hxx.in @@ -27,4 +27,7 @@ #cmakedefine01 BOUT_USE_SIGNAL #cmakedefine01 BOUT_USE_TRACK +// CMake build does not support legacy interface +#define BOUT_HAS_LEGACY_NETCDF 0 + #endif // BOUT_BUILD_CONFIG_HXX diff --git a/configure b/configure index c0dadb222e..cb5593809b 100755 --- a/configure +++ b/configure @@ -643,6 +643,7 @@ BOUT_HAS_PVODE BOUT_HAS_PRETTY_FUNCTION BOUT_HAS_PNETCDF BOUT_HAS_PETSC +BOUT_HAS_LEGACY_NETCDF BOUT_HAS_NETCDF BOUT_HAS_LAPACK BOUT_HAS_IDA @@ -6631,6 +6632,8 @@ fi NCCONF="" # Configuration script +BOUT_HAS_NETCDF=no +BOUT_HAS_LEGACY_NETCDF=no if test "x$with_netcdf" != "xno"; then : ########################################## @@ -6784,7 +6787,9 @@ ac_compiler_gnu=$ac_cv_cxx_compiler_gnu LIBS=$save_LIBS LDFLAGS=$save_LDFLAGS - CXXFLAGS="$save_CXXFLAGS -DNCDF" + CXXFLAGS="$save_CXXFLAGS" + BOUT_HAS_NETCDF=yes + BOUT_HAS_LEGACY_NETCDF=yes fi EXTRA_LIBS="$EXTRA_LIBS $NCLIB" @@ -7197,7 +7202,8 @@ fi { $as_echo "$as_me:${as_lineno-$LINENO}: -> Legacy NetCDF support enabled" >&5 $as_echo "$as_me: -> Legacy NetCDF support enabled" >&6;} NCPATH="found" - CXXFLAGS="$CXXFLAGS -DNCDF" + BOUT_HAS_NETCDF=yes + BOUT_HAS_LEGACY_NETCDF=yes fi @@ -16368,6 +16374,18 @@ fi +if test "x$BOUT_HAS_LEGACY_NETCDF" = "xyes"; then : + +$as_echo "#define BOUT_HAS_LEGACY_NETCDF 1" >>confdefs.h + +else + +$as_echo "#define BOUT_HAS_LEGACY_NETCDF 0" >>confdefs.h + +fi + + + if test "x$BOUT_HAS_PETSC" = "xyes"; then : $as_echo "#define BOUT_HAS_PETSC 1" >>confdefs.h @@ -18047,8 +18065,8 @@ $as_echo "$as_me: CVODE support : $BOUT_HAS_CVODE" >&6;} $as_echo "$as_me: ARKODE support : $BOUT_HAS_ARKODE" >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: FFTW support : $BOUT_HAS_FFTW" >&5 $as_echo "$as_me: FFTW support : $BOUT_HAS_FFTW" >&6;} -{ $as_echo "$as_me:${as_lineno-$LINENO}: NetCDF support : $BOUT_HAS_NETCDF" >&5 -$as_echo "$as_me: NetCDF support : $BOUT_HAS_NETCDF" >&6;} +{ $as_echo "$as_me:${as_lineno-$LINENO}: NetCDF support : $BOUT_HAS_NETCDF (legacy: $BOUT_HAS_LEGACY_NETCDF)" >&5 +$as_echo "$as_me: NetCDF support : $BOUT_HAS_NETCDF (legacy: $BOUT_HAS_LEGACY_NETCDF)" >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: Parallel-NetCDF support : $BOUT_HAS_PNETCDF" >&5 $as_echo "$as_me: Parallel-NetCDF support : $BOUT_HAS_PNETCDF" >&6;} { $as_echo "$as_me:${as_lineno-$LINENO}: HDF5 support : $BOUT_HAS_HDF5 (parallel: $BOUT_HAS_PHDF5)" >&5 diff --git a/configure.ac b/configure.ac index 72afedd925..1f6614f9c3 100644 --- a/configure.ac +++ b/configure.ac @@ -488,6 +488,8 @@ BOUT_HAS_FFTW="no" NCCONF="" # Configuration script +BOUT_HAS_NETCDF=no +BOUT_HAS_LEGACY_NETCDF=no AS_IF([test "x$with_netcdf" != "xno"], [ ########################################## @@ -553,7 +555,9 @@ AS_IF([test "x$with_netcdf" != "xno"], AC_LANG_POP([C++]) LIBS=$save_LIBS LDFLAGS=$save_LDFLAGS - CXXFLAGS="$save_CXXFLAGS -DNCDF" + CXXFLAGS="$save_CXXFLAGS" + BOUT_HAS_NETCDF=yes + BOUT_HAS_LEGACY_NETCDF=yes ]) EXTRA_LIBS="$EXTRA_LIBS $NCLIB" @@ -574,7 +578,8 @@ AS_IF([test "x$with_netcdf" != "xno"], file_formats="$file_formats netCDF" AC_MSG_NOTICE([ -> Legacy NetCDF support enabled]) NCPATH="found" - CXXFLAGS="$CXXFLAGS -DNCDF" + BOUT_HAS_NETCDF=yes + BOUT_HAS_LEGACY_NETCDF=yes ], []) ]) @@ -1342,6 +1347,7 @@ BOUT_DEFINE_SUBST(BOUT_HAS_HDF5, [HDF5 support]) BOUT_DEFINE_SUBST(BOUT_HAS_IDA, [IDA support]) BOUT_DEFINE_SUBST(BOUT_HAS_LAPACK, [LAPACK support]) BOUT_DEFINE_SUBST(BOUT_HAS_NETCDF, [NETCDF support]) +BOUT_DEFINE_SUBST(BOUT_HAS_LEGACY_NETCDF, [NETCDF support]) BOUT_DEFINE_SUBST(BOUT_HAS_PETSC, [PETSc support]) BOUT_DEFINE_SUBST(BOUT_HAS_PNETCDF, [PNETCDF support]) BOUT_DEFINE_SUBST(BOUT_HAS_PRETTY_FUNCTION, [Compiler PRETTYFUNCTION support]) @@ -1389,7 +1395,7 @@ AC_MSG_NOTICE([ IDA support : $BOUT_HAS_IDA]) AC_MSG_NOTICE([ CVODE support : $BOUT_HAS_CVODE]) AC_MSG_NOTICE([ ARKODE support : $BOUT_HAS_ARKODE]) AC_MSG_NOTICE([ FFTW support : $BOUT_HAS_FFTW]) -AC_MSG_NOTICE([ NetCDF support : $BOUT_HAS_NETCDF]) +AC_MSG_NOTICE([ NetCDF support : $BOUT_HAS_NETCDF (legacy: $BOUT_HAS_LEGACY_NETCDF)]) AC_MSG_NOTICE([ Parallel-NetCDF support : $BOUT_HAS_PNETCDF]) AC_MSG_NOTICE([ HDF5 support : $BOUT_HAS_HDF5 (parallel: $BOUT_HAS_PHDF5)]) AC_MSG_NOTICE([ Lapack support : $BOUT_HAS_LAPACK]) diff --git a/examples/bout_runners_example/.gitignore b/examples/bout_runners_example/.gitignore deleted file mode 100644 index f06dccb407..0000000000 --- a/examples/bout_runners_example/.gitignore +++ /dev/null @@ -1,12 +0,0 @@ -data/n* -data/c* -data/t* -data/d* -MMS/MMS-mms_True* -MMS/mms_True* -*run_log* -*tmp* -*.log -*.err -diffusion_3D -grid_files/ diff --git a/examples/bout_runners_example/1-basic_driver.py b/examples/bout_runners_example/1-basic_driver.py deleted file mode 100755 index 4e963c2a73..0000000000 --- a/examples/bout_runners_example/1-basic_driver.py +++ /dev/null @@ -1,11 +0,0 @@ -#!/usr/bin/env python - -"""Driver which runs 3d_diffusion with the options given in BOUT.inp""" - -from bout_runners import basic_runner - -# Create the instance -my_runs = basic_runner() - -# Do the run -my_runs.execute_runs() diff --git a/examples/bout_runners_example/10-restart_with_resize.py b/examples/bout_runners_example/10-restart_with_resize.py deleted file mode 100644 index 8329c0239e..0000000000 --- a/examples/bout_runners_example/10-restart_with_resize.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python - -"""Driver which resizes the grid after restart""" - -from pre_and_post_processing.post_processing_show_the_data import show_the_data -from bout_runners import basic_runner - -# Initial run -# =========================================================================== -init_run = basic_runner(nz = 8) - -dmp_folder, _ =\ - init_run.execute_runs(\ - post_processing_function = show_the_data,\ - # This function will be called every time after - # performing a run - post_process_after_every_run = True,\ - # Below are the kwargs arguments being passed to - # show_the_data - t = slice(0,None),\ - x = 1,\ - y = slice(0,None),\ - z = slice(0,None)\ - ) -# =========================================================================== - - -# Restart the run after resizing the grid -# =========================================================================== -restart_run = basic_runner(restart = "overwrite" ,\ - restart_from = dmp_folder[0],\ - nx = 22 ,\ - ny = 22 ,\ - nz = 16 ,\ - ) - -restart_run.execute_runs(\ - post_processing_function = show_the_data,\ - # This function will be called every time after - # performing a run - post_process_after_every_run = True,\ - # Below are the kwargs arguments being passed to - # show_the_data - t = slice(0,None),\ - x = 1,\ - y = slice(0,None),\ - z = slice(0,None)\ - ) -# =========================================================================== diff --git a/examples/bout_runners_example/11-restart_with_scan.py b/examples/bout_runners_example/11-restart_with_scan.py deleted file mode 100755 index 4fad630453..0000000000 --- a/examples/bout_runners_example/11-restart_with_scan.py +++ /dev/null @@ -1,68 +0,0 @@ -#!/usr/bin/env python - -"""Driver which restarts a scan, given a restart function""" - -from pre_and_post_processing.post_processing_show_the_data import show_the_data -from pre_and_post_processing.restart_from_func import restart_from_func -from bout_runners import basic_runner - -scan = (("cst", "D_perp", (1.0,5.5)),\ - ("cst", "D_par", (1.5,2.5)) - ) - -# Given that the runs has already been performed -only_post_process = False - -# Initial runs -# =========================================================================== -init_run = basic_runner(additional = scan) - -dmp_folder, _ =\ - init_run.execute_runs(\ - post_processing_function = show_the_data,\ - # This function will be called every time after - # performing a run - post_process_after_every_run = True,\ - # Below are the kwargs arguments being passed to - # show_the_data - t = slice(0,None),\ - x = 1,\ - y = slice(0,None),\ - z = slice(0,None)\ - ) - -one_of_the_restart_paths_in_scan = dmp_folder[0] -# =========================================================================== - - -# Restart the scan -# =========================================================================== -if only_post_process: - restart = None -else: - restart = "overwrite" - -restart_run = basic_runner(nout = 5 ,\ - restart = restart ,\ - restart_from = restart_from_func,\ - additional = scan ,\ - ) - -restart_run.execute_runs(\ - post_processing_function = show_the_data,\ - # This function will be called every time after - # performing a run - post_process_after_every_run = True,\ - # Below are the kwargs arguments being passed to - # show_the_data - t = slice(0,None),\ - x = 1,\ - y = slice(0,None),\ - z = slice(0,None),\ - # Below are the kwargs given to the - # restart_from_func - one_of_the_restart_paths_in_scan =\ - one_of_the_restart_paths_in_scan,\ - scan_parameters = ["D_perp", "D_par"],\ - ) -# =========================================================================== diff --git a/examples/bout_runners_example/12-PBS_restart_with_waiting.py b/examples/bout_runners_example/12-PBS_restart_with_waiting.py deleted file mode 100755 index 86a21836b2..0000000000 --- a/examples/bout_runners_example/12-PBS_restart_with_waiting.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python - -"""Driver which restarts a scan, given a restart function""" - -from bout_runners import PBS_runner -from pre_and_post_processing.restart_from_func import restart_from_func - -scan = (("cst", "D_perp", (1.0,5.5)),\ - ("cst", "D_par", (1.5,2.5)) - ) - -# Initial runs -# =========================================================================== -init_run = PBS_runner(additional = scan) - -dmp_folder, PBS_ids =\ - init_run.execute_runs() - -one_of_the_restart_paths_in_scan = dmp_folder[0] -# =========================================================================== - - -# Restart the scan -# =========================================================================== -restart_run = PBS_runner(nout = 5 ,\ - restart = "overwrite" ,\ - restart_from = restart_from_func,\ - additional = scan ,\ - ) - -restart_run.execute_runs(\ - # Declare dependencies - job_dependencies = PBS_ids,\ - # Below are the kwargs given to the - # restart_from_func - one_of_the_restart_paths_in_scan =\ - one_of_the_restart_paths_in_scan,\ - scan_parameters = ("D_perp", "D_par"),\ - ) -# =========================================================================== diff --git a/examples/bout_runners_example/13-restart_w_add_noise.py b/examples/bout_runners_example/13-restart_w_add_noise.py deleted file mode 100755 index 9d68430b74..0000000000 --- a/examples/bout_runners_example/13-restart_w_add_noise.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python - -"""Driver which resizes the grid after restart""" - -from pre_and_post_processing.post_processing_show_the_data import show_the_data -from bout_runners import basic_runner - -# Initial run -# =========================================================================== -init_run = basic_runner(nz = 16) - -dmp_folder, _ =\ - init_run.execute_runs(\ - post_processing_function = show_the_data,\ - # This function will be called every time after - # performing a run - post_process_after_every_run = True,\ - # Below are the kwargs arguments being passed to - # show_the_data - t = slice(0,None),\ - x = 1,\ - y = slice(0,None),\ - z = slice(0,None)\ - ) -# =========================================================================== - - -# Restart the run after resizing the grid -# =========================================================================== -restart_run = basic_runner(restart = "overwrite" ,\ - restart_from = dmp_folder[0],\ - nx = 22 ,\ - ny = 22 ,\ - nz = 16 ,\ - # NOTE: This amount if noise is large - # relative to the backgroun, and is - # just added for illustrative purposes - add_noise = {"n": 5e-4} ,\ - ) - -restart_run.execute_runs(\ - post_processing_function = show_the_data,\ - # This function will be called every time after - # performing a run - post_process_after_every_run = True,\ - # Below are the kwargs arguments being passed to - # show_the_data - t = slice(0,None),\ - x = 1,\ - y = slice(0,None),\ - z = slice(0,None)\ - ) -# =========================================================================== diff --git a/examples/bout_runners_example/2-run_with_simple_post_processing.py b/examples/bout_runners_example/2-run_with_simple_post_processing.py deleted file mode 100644 index aca001b972..0000000000 --- a/examples/bout_runners_example/2-run_with_simple_post_processing.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python - -"""Driver which runs 3d_diffusion, and calls the function show_the_data when done""" - -from pre_and_post_processing.post_processing_show_the_data import show_the_data -from bout_runners import basic_runner - - -my_runs = basic_runner() - -# Put this in the post-processing function -my_runs.execute_runs(\ - post_processing_function = show_the_data,\ - # This function will be called every time after - # performing a run - post_process_after_every_run = True,\ - # Below are the kwargs arguments being passed to - # show_the_data - t = slice(0,None),\ - x = 1,\ - y = slice(0,None),\ - z = slice(0,None)\ - ) diff --git a/examples/bout_runners_example/3-override_BOUTinp.py b/examples/bout_runners_example/3-override_BOUTinp.py deleted file mode 100644 index a1fccfd4df..0000000000 --- a/examples/bout_runners_example/3-override_BOUTinp.py +++ /dev/null @@ -1,80 +0,0 @@ -#!/usr/bin/env python - -"""Driver which runs 3d_diffusion with other options than given in BOUT.inp""" - -from bout_runners import basic_runner - -my_runs = basic_runner(\ - # Number of processors - nproc = 2,\ - # Directory of the inp file - directory = 'data',\ - # Set the solver option - solver = 'rk4',\ - mms = False,\ - atol = 1.0e-8,\ - rtol = 1.0e-8,\ - mxstep = 10000000,\ - # Spatial domain option - nx = 19,\ - ny = 17,\ - nz = 16,\ - # These can be set if needed - zperiod = None,\ - zmin = None,\ - zmax = None,\ - # These are not set here, but the code handles them - # internally - dx = None,\ - dy = None,\ - dz = None,\ - # The same as in BOUT.inp - # (Setting them to a different value doesn't make much sense) - MXG = 1,\ - MYG = 1,\ - # These can also be set - ixseps1 = None,\ - ixseps2 = None,\ - jyseps1_1 = None,\ - jyseps1_2 = None,\ - jyseps2_1 = None,\ - jyseps2_2 = None,\ - symGlobX = None,\ - symGlobY = None,\ - # The differencing option - ddx_first = 'C2',\ - ddx_second = 'C2',\ - ddx_upwind = 'U1',\ - ddx_flux = 'SPLIT',\ - ddy_first = 'C2',\ - ddy_second = 'C2',\ - ddy_upwind = 'U1',\ - ddy_flux = 'SPLIT',\ - ddz_first = 'FFT',\ - ddz_second = 'FFT',\ - ddz_upwind = 'U4',\ - ddz_flux = 'SPLIT',\ - # Temporal domain option - nout = 11,\ - timestep = 0.02,\ - # Additional options - # (An example ofadditional options run in series is found in - # 6a-run_with_MMS_post_processing_specify_numbers.py) - # tuple[0] - section name - # tuple[1] - variable name for the section - # tuple[2] - value of the variable name in the section - additional = (('cst','D_perp',5), ('cst', 'D_par', 0.5)),\ - # Can set this to overwrite or append - restart = None,\ - # Will copy the source file - cpy_source = True,\ - # Will remake the file - make = True,\ - # Code will return an error if False, due to the mismatch - # between nx, ny and nproc - allow_size_modification = True) - -my_runs.execute_runs(\ - # Remove eventually old data - remove_old = True\ - ) diff --git a/examples/bout_runners_example/4-run_with_combinations.py b/examples/bout_runners_example/4-run_with_combinations.py deleted file mode 100644 index 15d4943f55..0000000000 --- a/examples/bout_runners_example/4-run_with_combinations.py +++ /dev/null @@ -1,40 +0,0 @@ -#!/usr/bin/env python - -"""Driver which runs 3d_diffusion for several different combinations of -the input. """ - -from bout_runners import basic_runner - -# With a few exceptions: All variables in the constructor can be given -# as an iterable. -# When execute_runs is called, bout_runners will run all combinations of -# the member data -my_runs = basic_runner(\ - # nx, ny and nz must be of the same size as they constitute - # one "part" of the combination (i.e. there will be no - # internal combination between the elements in nx, ny and - # nz) - nx = (9, 18),\ - ny = (6, 12),\ - nz = (8, 16),\ - # nout and timestep must be of the same dimension for the - # same reason as mention above - nout = (10, 11, 12),\ - timestep = (0.01, 0.01, 0.01),\ - # The differencing option - ddz_second = ('FFT','C2'),\ - # Additional options - additional = (('cst','D_perp',(1, 2)))\ - ) - -# Execute all the runs -# 2 runs for each combination of nx, ny, nz -# 3 runs for each combination of nout and timestep -# 2 runs for each combination in ddz_second -# 2 runs for each combination of cst:const:value -# In total: 24 runs -my_runs.execute_runs() - -# NOTE: If you feel that the explanation of the combinations was bad, -# have a look at the last lines of data/run_log.txt to see what -# runs have been performed after this run diff --git a/examples/bout_runners_example/5-run_with_grid_files.py b/examples/bout_runners_example/5-run_with_grid_files.py deleted file mode 100644 index 82365a759a..0000000000 --- a/examples/bout_runners_example/5-run_with_grid_files.py +++ /dev/null @@ -1,23 +0,0 @@ -#!/usr/bin/env python - -"""Driver which runs 3D_diffusion using grid files.""" - -from bout_runners import basic_runner -from pre_and_post_processing.grid_generator import generate_grid -import os - -# Generate a grid -file_name = os.path.join("grid_files","3D_diffusion_grid.nc") -generate_grid(file_name = file_name,\ - inp_path = "data") - -my_runs = basic_runner(\ - grid_file = file_name,\ - # Copy the grid file - cpy_grid = True,\ - # Set the flag in 3D_diffusion that a grid file will be - # used - additional = ('flags', 'use_grid', 'true')\ - ) - -my_runs.execute_runs() diff --git a/examples/bout_runners_example/6a-run_with_MMS_post_processing_specify_numbers.py b/examples/bout_runners_example/6a-run_with_MMS_post_processing_specify_numbers.py deleted file mode 100755 index b50c60cb28..0000000000 --- a/examples/bout_runners_example/6a-run_with_MMS_post_processing_specify_numbers.py +++ /dev/null @@ -1,58 +0,0 @@ -#!/usr/bin/env python - -"""Driver which runs 3D_diffusion and performs a MMS test by specifying -the grids by hand (see 6b-run_with_MMS_post_processing_grid_file.py to -see how to do the same is done by using grid files)""" - -from bout_runners import basic_runner -from pre_and_post_processing.post_processing_MMS import perform_MMS_test - -my_runs = basic_runner(\ - nproc = 1,\ - # Set the directory - directory = 'MMS',\ - # Set the time domain - nout = 1,\ - timestep = 1,\ - # Set mms to true - mms = True,\ - # Set the spatial domain - nx = (5, 8, 16),\ - ny = (5, 8, 16),\ - nz = (4, 8, 16),\ - # Additional (put here to illustrate the sorting) - series_add = (('cst','D_par',(1,2)), ('cst','D_perp',(0.5,1))),\ - # Since we would like to do a MMS test, we would like to run - # the runs in a particular order. In this example, we would - # like to run all the possible spatial variables before - # doing the test. Hence we would like the spatial domain - # option to be the fastest varying. - # Since we have put post_process_after_every_run = False in - # the run function below, the processing function being - # called when all possibilities of the fastest variable has - # been run. - sort_by = 'spatial_domain'\ - # Some additional sorting examples: - # - # This returns an error, stating the sorting possibilities - # (which will depend on the member data of this object) - # sort_by = 'uncomment_me'\ - # - # In this example cst:D_par will be the fastest varying - # variable, followed by the spatial_domain. The post - # processing function will be called when all possibilities - # of these variables has been run - # sort_by = ('cst:D_par', 'spatial_domain')\ - ) - -# Put this in the post-processing function -my_runs.execute_runs(\ - post_processing_function = perform_MMS_test,\ - # As we need several runs in order to perform the - # MMS test, this needs to be false - post_process_after_every_run = False,\ - # Below are the kwargs arguments being passed to - # perform_MMS_test - extension = 'png',\ - show_plot = True - ) diff --git a/examples/bout_runners_example/6b-run_with_MMS_post_processing_grid_file.py b/examples/bout_runners_example/6b-run_with_MMS_post_processing_grid_file.py deleted file mode 100644 index 3da96197db..0000000000 --- a/examples/bout_runners_example/6b-run_with_MMS_post_processing_grid_file.py +++ /dev/null @@ -1,59 +0,0 @@ -#!/usr/bin/env python - -"""Driver which runs 3D_diffusion and performs a MMS test by specifying -the grids by using grid_files (see -6a-run_with_MMS_post_processing_specify_numbers.py to see how to do the -same is done by specifying the grid manually)""" - -from bout_runners import basic_runner -from pre_and_post_processing.post_processing_MMS import perform_MMS_test -from pre_and_post_processing.grid_generator import generate_grid -import os - -# Generate the grids -# Specify the grid dimensions -grid_numbers = (5, 8, 16) -# Make an append able list -grid_files = [] -for grid_number in grid_numbers: - file_name = os.path.join("grid_files","grid_file_{}.nc".format(grid_number)) - # Generate the grids - generate_grid(nx = grid_number,\ - ny = grid_number,\ - nz = grid_number,\ - inp_path = 'MMS' ,\ - file_name = file_name) - # Append the grid_files list - grid_files.append(file_name) - -my_runs = basic_runner(\ - nproc = 1,\ - # Set the directory - directory = 'MMS',\ - # Set the time domain - nout = 1,\ - timestep = 1,\ - # Set mms to true - mms = True,\ - # Set the spatial domain - grid_file = grid_files,\ - # Set the flag in 3D_diffusion that a grid file will be - # used - additional = ('flags','use_grid','true'),\ - # Copy the grid file - cpy_grid = True,\ - # Sort the runs by the spatial domain - sort_by = 'grid_file' - ) - -# Put this in the post-processing function -my_runs.execute_runs(\ - post_processing_function = perform_MMS_test,\ - # As we need several runs in order to perform the - # MMS test, this needs to be false - post_process_after_every_run = False,\ - # Below are the kwargs arguments being passed to - # perform_MMS_test - extension = 'png',\ - show_plot = True\ - ) diff --git a/examples/bout_runners_example/7-basic_PBS_run.py b/examples/bout_runners_example/7-basic_PBS_run.py deleted file mode 100644 index 8afd4bb247..0000000000 --- a/examples/bout_runners_example/7-basic_PBS_run.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -""" -Driver which runs 3D_diffusion by submitting a job to a Portable Batch System -(PBS) -""" - -from bout_runners import PBS_runner - -my_runs = PBS_runner() - -my_runs.execute_runs() diff --git a/examples/bout_runners_example/8-PBS_run_extra_option.py b/examples/bout_runners_example/8-PBS_run_extra_option.py deleted file mode 100644 index a447e09117..0000000000 --- a/examples/bout_runners_example/8-PBS_run_extra_option.py +++ /dev/null @@ -1,25 +0,0 @@ -#!/usr/bin/env python - -"""Driver which runs 3D_diffusion by submitting a job to a PBS using -additional options.""" - -from bout_runners import PBS_runner - -my_runs = PBS_runner(\ - # Although nproc is a member of basic_runner, it is used - # together with BOUT_nodes and BOUT_ppn - nproc = 4,\ - # Number of nodes to be used on the cluster - BOUT_nodes = 1,\ - # Specifying processor per node - BOUT_ppn = 4,\ - # The maximum walltime of the run - BOUT_walltime = '0:15:00',\ - # Specify the queue to submit to (if any) - BOUT_queue = None,\ - # Specify a mail to be noticed when the run has finished - BOUT_mail = None\ - ) - -# Put this in the post-processing function -my_runs.execute_runs(remove_old = True) diff --git a/examples/bout_runners_example/9-PBS_with_MMS_post_processing_grid_file.py b/examples/bout_runners_example/9-PBS_with_MMS_post_processing_grid_file.py deleted file mode 100644 index 2b49971005..0000000000 --- a/examples/bout_runners_example/9-PBS_with_MMS_post_processing_grid_file.py +++ /dev/null @@ -1,73 +0,0 @@ -#!/usr/bin/env python - -"""Driver which runs 3D_diffusion by submitting a job to a PBS and -performs a MMS test by specifying the grids by using grid_files.""" - -from bout_runners import PBS_runner -from pre_and_post_processing.post_processing_MMS import perform_MMS_test -from pre_and_post_processing.grid_generator import generate_grid -import os - -# Generate the grids -# Specify the grid dimensions -grid_numbers = (8, 16, 32) -# Make an append able list -grid_files = [] -for grid_number in grid_numbers: - file_name = os.path.join("grid_files","grid_file_{}.nc".format(grid_number)) - # Generate the grids - generate_grid(nx = grid_number,\ - ny = grid_number,\ - nz = grid_number,\ - inp_path = 'MMS' ,\ - file_name = file_name) - grid_files.append(file_name) - -my_runs = PBS_runner(\ - # Specify the numbers used for the BOUT runs - nproc = 4,\ - BOUT_nodes = 1,\ - BOUT_ppn = 4,\ - BOUT_walltime = '0:15:00',\ - BOUT_queue = None,\ - BOUT_mail = None,\ - # Specify the numbers used for the post processing - post_process_nproc = 1,\ - post_process_nodes = 1,\ - post_process_ppn = 1,\ - post_process_walltime = '0:05:00',\ - post_process_queue = None,\ - post_process_mail = None,\ - # Set the directory - directory = 'MMS',\ - # Set the time domain - nout = 1,\ - timestep = 1,\ - # Set mms to true - mms = True,\ - # Set the spatial domain - grid_file = grid_files,\ - # Set the flag in 3D_diffusion that a grid file will be - # used - additional = ('flags','use_grid','true'),\ - # Add some additional option - series_add = (('cst','D_par' ,(1,2)),\ - ('cst','D_perp',(0.5,1))),\ - # Copy the grid file - cpy_grid = True,\ - # Sort the runs by the spatial domain - sort_by = 'grid_file' - ) - -# Put this in the post-processing function -my_runs.execute_runs(\ - remove_old = True,\ - post_processing_function = perform_MMS_test,\ - # As we need several runs in order to perform the - # MMS test, this needs to be false - post_process_after_every_run = False,\ - # Below are the kwargs arguments being passed to - # perform_MMS_test - extension = 'png',\ - show_plot = False\ - ) diff --git a/examples/bout_runners_example/MMS/BOUT.inp b/examples/bout_runners_example/MMS/BOUT.inp deleted file mode 100644 index 8dd5d141cc..0000000000 --- a/examples/bout_runners_example/MMS/BOUT.inp +++ /dev/null @@ -1,119 +0,0 @@ -# -# Input file for "bout_runners_example/MMS" -# - -# Root option -############################################################################### -nout = 10 # Number of output timesteps -timestep = 0.01 # Time between outputs - -dump_format="nc" # Write NetCDF format files - -# Setting the z coordinate -ZMIN = 0.0 -ZMAX = 1.0 # dz = 2*pi(ZMAX - ZMIN)/(MZ - 1) - -# Number of guard cells -MXG = 1 -MYG = 1 -############################################################################### - - -# Mesh option -############################################################################### -[mesh] -# Puts the boundaries half a step outside the last grid points -symmetricGlobalY=true -symmetricGlobalX=true - -# The spatial dimension -nx = 18 -ny = 16 -nz = 16 - -# Position of the separatrix (-1 is non periodic, >ny is periodic) -# --------Non-periodic---------- -ixseps1 = -1 -ixseps2 = -1 -# ------------------------------ -############################################################################### - - -# Methods option -############################################################################### -# Methods used for the radial (x) derivative terms -[mesh:ddx] -second = C2 # d^2/dx^2 (f) - -# Methods used for parallel (y) derivative terms -[mesh:ddy] -second = C2 # d^2/dy^2 (f) - -#Methods used for the azimuthal (z) derivative terms -[mesh:ddz] -second = FFT # d^2/dz^2 (f) -############################################################################### - - -# Solver settings -############################################################################### -[solver] -type = pvode # Which solver to use (cvode should be same as pvode) -mms = false # false by default - -atol = 1.0e-7 # absolute tolerance -rtol = 1.0e-7 # relative tolerance - -# Max allowed iterations in one step -mxstep = 100000000 -############################################################################### - - -# Specifying the output -############################################################################### -[output] -floats = false # floats = false => output in double -############################################################################### - - -# Additional options -############################################################################### -# Geometry -# ----------------------------------------------------------------------------- -[geom] -Lx = 1.2 # The length of x from boundary to boundary -Ly = 2.0 # The length of y from boundary to boundary -# Setting the spatial variables -yl = y * geom:Ly / (2.0*pi) #y in range [0,Ly] -xl = x * geom:Lx #x in range [0,Lx] -# ----------------------------------------------------------------------------- - -# Constants -# ----------------------------------------------------------------------------- -[cst] -D_par = 1.0 # Parallel diffusion constant -D_perp = 2.0 # Perpendicular diffusion constant -# ----------------------------------------------------------------------------- - -# Flags -# ----------------------------------------------------------------------------- -[flags] -use_grid = false # Whether or not to read from a grid file -# ----------------------------------------------------------------------------- - -# The particle density -# ----------------------------------------------------------------------------- -[n] -# Scaling -scale = 1.0 - -# Source and solution for MMS -solution = t^(2.5) + sin(3+geom:xl^2) + exp(0.5*geom:yl) + 4*cos(z) -source = 2.5*t^(1.5) - cst:D_perp*(2*(cos(geom:xl^2+3)-2*geom:xl^2*sin(geom:xl^2+3)) - 4*cos(z)) - cst:D_par*(0.25*exp(0.5*geom:yl)) - - -# Boundary conditions -# Set the boundary to the initial condition -bndry_all = dirichlet_o4(n:solution) -# ----------------------------------------------------------------------------- -############################################################################## diff --git a/examples/bout_runners_example/README.md b/examples/bout_runners_example/README.md deleted file mode 100644 index bdab947004..0000000000 --- a/examples/bout_runners_example/README.md +++ /dev/null @@ -1,48 +0,0 @@ -# bout_runners_example - -A tutorial on how to use `bout_runners`. - -Extra documentation of `bout_runners` can be found in the -docstring of `bout_runners`. - -## Contents -### The program: - -* `diffusion_3D.cxx` - Simulates 3D diffusion -* `make` - The corresponding make file (notice that the `bout_runner` - calls this, so no make is necessary for the `bout_runner` to work). - -### Folders: - -* `data` - Contains a `BOUT.inp` file -* `MMS` - Contains the `BOUT.inp` file for the MMS runs -* `pre_and_post_processing` - Contains the grid generator and the - post processing functions - -### Examples: - -* `1-basic_driver.py` - How to use `bout_runners` for a basic run -* `2-run_with_simple_post_processing.py` - How couple `bout_runners` - to a post processing routine -* `3-override_BOUTinp.py` - Use `bout_runners` to override settings - in `BOUT.inp` -* `4-run_with_combinations.py` - Use `bout_runners` to change - several settings at once -* `5-run_with_grid_files.py` - Run `bout_runners` with a grid file -* `6a-run_with_MMS_post_processing_specify_numbers.py` - Use - `bout_runners` to MMS the program -* `6b-run_with_MMS_post_processing_grid_file.py` - The same as `6a`, - but using a gride file -* `7-basic_PBS_run.py` - Submit jobs to a cluster using - `bout_runners` -* `8-PBS_run_extra_option.py` - Set the `PBS` option using - `bout_runners` -* `9-PBS_with_MMS_post_processing_grid_file.py` - As 6b, but on a - cluster -* `10-restart_with_resize.py` - Restart a run and re-size the grid - using `bout_runners` -* `11-restart_with_scan.py` - Use `bout_runners` to restart runs - belonging to a parameter scan -* `12-PBS_restart_with_waiting.py` - Runs where the restart waits for jobs to - finish -* `13-restart_w_add_noise.py` - Adds noise to a restart run diff --git a/examples/bout_runners_example/data/BOUT.inp b/examples/bout_runners_example/data/BOUT.inp deleted file mode 100644 index dd530806d6..0000000000 --- a/examples/bout_runners_example/data/BOUT.inp +++ /dev/null @@ -1,122 +0,0 @@ -# -# Input file for "bout_runners_example/data" -# - -# Root option -############################################################################### -nout = 10 # Number of output timesteps -timestep = 0.01 # Time between outputs - -dump_format="nc" # Write NetCDF format files - -# Setting the z coordinate -ZMIN = 0.0 -ZMAX = 1.0 # dz = 2*pi(ZMAX - ZMIN)/(MZ - 1) - -# Number of guard cells -MXG = 1 -MYG = 1 -############################################################################### - - -# Mesh option -############################################################################### -[mesh] -# Puts the boundaries half a step outside the last grid point -symmetricGlobalY=true -symmetricGlobalX=true - -# The spatial dimension -nx = 18 -ny = 16 -nz = 16 - -# Position of the separatrix (-1 is non periodic, >ny is periodic) -# --------Non-periodic---------- -ixseps1 = -1 -ixseps2 = -1 -# ------------------------------ -############################################################################### - - -# Methods option -############################################################################### -# Methods used for the radial (x) derivative terms -[mesh:ddx] -second = C2 # d^2/dx^2 (f) - -# Methods used for parallel (y) derivative terms -[mesh:ddy] -second = C2 # d^2/dy^2 (f) - -#Methods used for the azimuthal (z) derivative terms -[mesh:ddz] -second = FFT # d^2/dz^2 (f) -############################################################################### - - -# Solver settings -############################################################################### -[solver] -type = pvode # Which solver to use (cvode should be same as pvode) -mms = false # false by default - -atol = 1.0e-7 # absolute tolerance -rtol = 1.0e-7 # relative tolerance - -# Max allowed iterations in one step -mxstep = 100000000 -############################################################################### - - -# Specifying the output -############################################################################### -[output] -floats = false # floats = false => output in double -############################################################################### - - -# Additional options -############################################################################### -# Geometry -# ----------------------------------------------------------------------------- -[geom] -Lx = 1.2 # The length of x from boundary to boundary -Ly = 2.0 # The length of y from boundary to boundary -# Setting the spatial variables -yl = y * geom:Ly / (2.0*pi) #y in range [0,Ly] -xl = x * geom:Lx #x in range [0,Lx] -# ----------------------------------------------------------------------------- - -# Constants -# ----------------------------------------------------------------------------- -[cst] -D_par = 1.0 # Parallel diffusion constant -D_perp = 2.0 # Perpendicular diffusion constant -# Options for the Gaussian -x0 = 0.5 * geom:Lx # The x centering of the Gaussian -y0 = 0.5 * geom:Ly # The y centering of the Gaussian -z0 = pi # The z centering of the Gaussian -w = 2 # Width of the Gaussian -# ----------------------------------------------------------------------------- - -# Flags -# ----------------------------------------------------------------------------- -[flags] -use_grid = false # Whether or not to read from a grid file -# ----------------------------------------------------------------------------- - -# The particle density -# ----------------------------------------------------------------------------- -[n] -# Scaling -scale = 1.0 - -# Initial condition (a "spherical" Gaussian centered in the middle of the domain) -function = gauss(geom:xl - cst:x0, cst:w)*gauss(geom:yl - cst:y0, cst:w)*gauss(z - cst:z0, cst:w) - -# Boundary conditions -# Set the boundary to the initial condition -bndry_all = dirichlet_o4(n:function) -# ----------------------------------------------------------------------------- -############################################################################## diff --git a/examples/bout_runners_example/diffusion_3D.cxx b/examples/bout_runners_example/diffusion_3D.cxx deleted file mode 100644 index 27c02a29a8..0000000000 --- a/examples/bout_runners_example/diffusion_3D.cxx +++ /dev/null @@ -1,80 +0,0 @@ -// ******* Simulates 3D diffusion ******* - -#include -#include -// This gives the Laplace(f) options -#include -// Gives PI and TWOPI -#include - -class Diffusion_3d : public PhysicsModel { - Field3D n; // Evolved variable - BoutReal D_par, D_perp; // The diffusion constants - BoutReal Lx, Ly; // The spatial domain size - bool use_grid; // If the spatial size should be loaded from the grid - -protected: - int init(bool UNUSED(restarting)) override { - - // Get the option (before any sections) in the BOUT.inp file - Options* options = Options::getRoot(); - - // Get the diffusion constants - // ************************************************************************ - // Get the section of the variables from [cst] specified in BOUT.inp - // or in the command-line arguments - Options* constants = options->getSection("cst"); - // Storing the variables with the following syntax - // section_name->get("variable_name_in_input", variable_name_in_cxx, - // default_value) - constants->get("D_par", D_par, 1.0); - constants->get("D_perp", D_perp, 1.0); - - // Get domain dimensions - // ************************************************************************ - Options* flags = options->getSection("flags"); - // Get the option - flags->get("use_grid", use_grid, false); - if (use_grid) { - // Loading variables from the grid file so that they can be saved into the - // .dmp file (other variables such as dx, ny etc. are stored - // automatically) - GRID_LOAD2(Lx, Ly); - } else { - // Load from BOUT.inp - Options* geometry = options->getSection("geom"); - geometry->get("Lx", Lx, 1.0); - geometry->get("Ly", Ly, 1.0); - // Calculate the internal number of points - const int internal_x_points = mesh->GlobalNx - 2 * mesh->xstart; - const int internal_y_points = mesh->GlobalNy - 2 * mesh->ystart; - // Calculate dx and dy - // dx = Lx/line_segments_in_x - // On a line with equidistant points there is one less line - // segment than points from the first to the last point. - // The boundary lies (1/2)*dx away from the last point As there - // are 2 boundaries there will effectively add one more line - // segment in the domain. Hence - mesh->getCoordinates()->dx = Lx / (internal_x_points); - mesh->getCoordinates()->dy = Ly / (internal_y_points); - } - - // Specify what values should be stored in the .dmp file - SAVE_ONCE4(Lx, Ly, D_par, D_perp); - - // Tell BOUT++ to solve for n - SOLVE_FOR(n); - - return 0; - } - - int rhs(BoutReal UNUSED(t)) override { - mesh->communicate(n); // Communicate guard cells - - // Density diffusion - ddt(n) = D_par * Laplace_par(n) + D_perp * Laplace_perp(n); - return 0; - } -}; - -BOUTMAIN(Diffusion_3d) diff --git a/examples/bout_runners_example/makefile b/examples/bout_runners_example/makefile deleted file mode 100644 index a3cdcc1c3f..0000000000 --- a/examples/bout_runners_example/makefile +++ /dev/null @@ -1,6 +0,0 @@ - -BOUT_TOP ?= ../.. - -SOURCEC = diffusion_3D.cxx - -include $(BOUT_TOP)/make.config diff --git a/examples/bout_runners_example/pre_and_post_processing/__init__.py b/examples/bout_runners_example/pre_and_post_processing/__init__.py deleted file mode 100755 index dab6b68866..0000000000 --- a/examples/bout_runners_example/pre_and_post_processing/__init__.py +++ /dev/null @@ -1,12 +0,0 @@ -#!/usr/bin/env python - -"""Init file for pre and post processing""" - -import os -import matplotlib.pylab as plt - -# Set proper backend for the display -try: - os.environ["DISPLAY"] -except KeyError: - plt.switch_backend("Agg") diff --git a/examples/bout_runners_example/pre_and_post_processing/grid_generator.py b/examples/bout_runners_example/pre_and_post_processing/grid_generator.py deleted file mode 100755 index 1cfeeeb4da..0000000000 --- a/examples/bout_runners_example/pre_and_post_processing/grid_generator.py +++ /dev/null @@ -1,99 +0,0 @@ -#!/usr/bin/env python - -"""Generate an input mesh""" - -from boututils.datafile import DataFile -from boututils.options import BOUTOptions -import numpy as np -import os - -# Define pi, in case it is found in the BOUT.inp file -pi = np.pi - -def generate_grid(nx = 20 ,\ - ny = 10 ,\ - nz = 8 ,\ - Lx = None,\ - Ly = None,\ - MXG = None,\ - inp_path = None,\ - file_name = None): - """Generates a grid file based on the input, and on what is being - found in the BOUT.inp file""" - - # Calculate dx and dy - #{{{ Use BOUT.inp if Lx, Ly or MXG is not found - if (Lx == None) or (Ly == None) or (MXG == None): - # Make a BOUTOption object (see documentation of BOUTOption.py) - myOpts = BOUTOptions(inp_path) - - # Appendable list - warnings = [] - - # If Lx is not given - if Lx == None: - # Read 'Lx' from the 'geom' section - Lx = myOpts.geom['Lx'] - # Lx is now a string, we let python evaluate the string - Lx = eval(Lx) - # Append a warning - warnings.append('Lx') - # If Ly is not given - if Ly == None: - Ly = myOpts.geom['Ly'] - Ly = eval(Ly) - warnings.append('Ly') - # If MXG is not given - if MXG == None: - MXG = myOpts.root['MXG'] - MXG = eval(MXG) - warnings.append('MXG') - - # Print warnings - for warning in warnings: - print("\n"*2 + "!"*137) - print("WARNING!!! " + warning + " not given in generate_grid") - print("Will use value from BOUT.inp to calculate gridspacing, but"+\ - " note that this would be inconsistent if '" + warning +\ - "' is given in a bout_runner object") - print("!"*137 + "\n"*2) - #}}} - - # Calculate dx and dy - internal_x_points = nx - 2*MXG - internal_y_points = ny - # Since the grid points lay half between the grid - # (There is one less line segment than points, and there is one more - # "internal" in the grid due to 2 half grid points out to the - # boundary) - dx = Lx / (internal_x_points) - dy = Ly / (internal_y_points) - - # Set ixseps - ixseps1 = -1 - ixseps2 = -2 - - # Check that folder exists - grid_folder = os.path.split(file_name)[0] - if grid_folder != "": - if not os.path.exists(grid_folder): - os.makedirs(grid_folder) - - # Write the grid file - with DataFile(file_name, write=True, create=True) as grid: - # Write the dimensions to the grid file - grid.write("nx", nx) - grid.write("ny", ny) - grid.write("nz", nz) - - # Write the grid sizes to the grid file - grid.write("dx", dx) - grid.write("dy", dy) - - # Write the lengths to the grid file - grid.write("Lx", Lx) - grid.write("Ly", Ly) - - # Write the ixseps - grid.write("ixseps1", ixseps1) - grid.write("ixseps2", ixseps2) diff --git a/examples/bout_runners_example/pre_and_post_processing/post_processing_MMS.py b/examples/bout_runners_example/pre_and_post_processing/post_processing_MMS.py deleted file mode 100755 index f734b088fa..0000000000 --- a/examples/bout_runners_example/pre_and_post_processing/post_processing_MMS.py +++ /dev/null @@ -1,258 +0,0 @@ -#!/usr/bin/env python - -"""Post processing which performs MMS""" - -from boutdata.collect import collect -import matplotlib.pyplot as plt -import numpy as np - -#{{{perform_MMS_test -def perform_MMS_test(paths, extension='.pdf', show_plot=False): - """Collects the data members belonging to a convergence plot""" - - # Make a variable to store the errors and the spacing - data = {'error_2':[], 'error_inf':[], 'spacing':[]} - - # Loop over the runs in order to collect - for path in paths: - # Collect n_solution - n_numerical - error_array = collect('E_n', path=path, info=False,\ - xguards = False, yguards = False) - # Pick the last time point - error_array = error_array[-1] - - # The error in the 2-norm and infintiy-norm - data['error_2'] .append( np.sqrt(np.mean( error_array**2.0 )) ) - data['error_inf'].append( np.max(np.abs( error_array )) ) - - # Collect the spacings - dx_spacing = collect("dx", path=path, info=False,\ - xguards = False, yguards = False) - dy_spacing = collect("dy", path=path, info=False,\ - xguards = False, yguards = False) - dz_spacing = collect("dz", path=path, info=False,\ - xguards = False, yguards = False) - # We are interested in the max of the spacing - dx_spacing = np.max(dx_spacing) - dy_spacing = np.max(dy_spacing) - dz_spacing = np.max(dz_spacing) - - # Store the spacing in the data - data['spacing'].append(np.max([dx_spacing, dy_spacing, dz_spacing])) - - # Sort the data - data = sort_data(data) - - # Find the order of convergence in the 2 norm and infinity norm - order_2, order_inf = get_order(data) - - # Get the root name of the path (used for saving files) - root_folder = paths[0].split('/')[0] + '/' - - # Get the name of the plot based on the first folder name - name = paths[0].split('/') - # Remove the root folder and put 'MMS-' in front - name = 'MMS-' + '_'.join(name[1:]) - - # Print the convergence rate - print_convergence_rate(data, order_2, order_inf, root_folder, name) - - # Plot - # We want to show the lines of the last orders, so we send in - # order_2[-1] and order_inf[-1] - do_plot(data, order_2[-1], order_inf[-1],\ - root_folder, name, extension, show_plot) -#}}} - -# Help functions -#{{{sort_data -def sort_data(data): - """Sorts the data after highest grid spacing""" - - # Sort the data in case it is unsorted - list_of_tuples_to_be_sorted =\ - list(zip(data['spacing'], data['error_inf'], data['error_2'])) - - # Sort the list - # Note that we are sorting in reverse order, as we want the - # highest grid spacing first - sorted_list = sorted(list_of_tuples_to_be_sorted, reverse = True) - # Unzip the sorted list - data['spacing'], data['error_inf'], data['error_2'] =\ - list(zip(*sorted_list)) - - return data -#}}} - -#{{{get_order -def get_order(data): - # TODO: Check this - # Initialize the orders - order_2 = [np.nan] - order_inf = [np.nan] - - # The order will be found by finding a linear fit between two - # nearby points in the error-spacing plot. Hence, we must let - # the index in the for loop run to the length minus one - for index in range(len(data['spacing']) - 1): - # p = polyfit(x,y,n) finds the coefficients of a polynomial p(x) - # of degree that fits the data, p(x(i)) to y(i), in a least squares - # sense. - # The result p is a row vector of length n+1 containing the - # polynomial coefficients in descending powers - spacing_start = np.log(data['spacing'][index]) - spacing_end = np.log(data['spacing'][index + 1]) - error_start_2 = np.log(data['error_2'][index]) - error_end_2 = np.log(data['error_2'][index + 1]) - error_start_inf = np.log(data['error_inf'][index]) - error_end_inf = np.log(data['error_inf'][index + 1]) - # Finding the order in the two norm - order = np.polyfit([spacing_start, spacing_end],\ - [error_start_2, error_end_2], 1) - # Append it to the order_2 - order_2.append(order[0]) - - # Finding the infinity order - order = np.polyfit([spacing_start, spacing_end],\ - [error_start_inf, error_end_inf], 1) - # Append it to the order_inf - order_inf.append(order[0]) - - return order_2, order_inf -#}}} - -#{{{print_convergence_rate -def print_convergence_rate(data, order_2, order_inf, root_folder, name): - "Prints the convergence rates to the screen and to a file" - outstring = list(zip(data['spacing'],\ - data['error_2'],\ - order_2,\ - data['error_inf'],\ - order_inf)) - header = ['#spacing', 'error_2 ', 'order_2 ', 'error_inf ', 'order_inf'] - # Format on the rows (: accepts the argument, < flushes left, - # 20 denotes character width, .10e denotes scientific notation with - # 10 in precision) - header_format = "{:<20}" * (len(header)) - number_format = "{:<20.10e}" * (len(header)) - # * in front of header unpacks - header_string = header_format.format(*header) - text = header_string - for string in outstring: - text += '\n' + number_format.format(*string) - print('\nNow printing the results of the convergence test:') - print(text) - # Write the found orders - with open(root_folder + name + '.txt', 'w' ) as f: - f.write(text) - print('\n') -#}}} - -#{{{do_plot -def do_plot(data, order_2, order_inf, root_folder, name, extension, show_plot): - """Function which handles the actual plotting""" - - # Plot errors - # Set the plotting style - title_size = 30 - plt.rc("font", size = 30) - plt.rc("axes", labelsize = 25, titlesize = title_size) - plt.rc("xtick", labelsize = 25) - plt.rc("ytick", labelsize = 25) - plt.rc("legend", fontsize = 30) - plt.rc("lines", linewidth = 3.0) - plt.rc("lines", markersize = 20.0) - plt_size = (10, 7) - fig_no = 1 - # Try to make a figure with the current backend - try: - fig = plt.figure(fig_no, figsize = plt_size) - except: - # Switch if a backend needs the display - plt.switch_backend('Agg') - fig = plt.figure(fig_no, figsize = plt_size) - - ax = fig.add_subplot(111) - - # Plot errore - # Plot the error-space plot for the 2-norm - ax.plot(data['spacing'], data['error_2'], 'b-o', label=r'$L_2$') - # Plot the error-space plot for the inf-norm - ax.plot(data['spacing'], data['error_inf'], 'r-^', label=r'$L_\infty$') - - # Plot the order - #{{{ Explanaition of the calculation - # In the log-log plot, we have - # ln(y) = a*ln(x) + ln(b) - # y = error - # x = spacing (found from linear regression) - # a = order - # b = where the straight line intersects with the ordinate - # - # Using the logarithmic identities - # ln(x^a) = a*ln(x) - # ln(x*y) = ln(x) + ln(y) - # ln(x/y) = ln(x) - ln(y) - # - # We usually find b for x = 0. Here, on the other hand, we find it - # by solving the equation for the smallest grid point: - # ln[y(x[-1])] = a*ln(x[-1]) + ln(b), - # so - # ln(b) = ln[y(x[-1])] - a*ln(x[-1]) - # => - # ln(y) = a*ln(x) - a*ln(x[-1]) + ln[y(x[-1])] - # = a*[ln(x)-ln(x[-1])] + ln[y(x[-1])] - # = a*[ln(x/x[-1])] + ln[ys(x[-1])] - # = ln[(x/x[-1])^a*y(x[-1])] - #}}} - # Order in the inf norm - ax.plot(\ - (data['spacing'][-1],\ - data['spacing'][0]),\ - (\ - ((data['spacing'][-1] / data['spacing'][-1])**order_inf)*\ - data['error_inf'][-1],\ - ((data['spacing'][0] / data['spacing'][-1])**order_inf)*\ - data['error_inf'][-1]\ - ),\ - 'm--',\ - label=r"$\mathcal{O}_{L_\infty}="+"%.2f"%(order_inf)+r"$") - # Order in the 2 norm - ax.plot(\ - (data['spacing'][-1],\ - data['spacing'][0]),\ - (\ - ((data['spacing'][-1] / data['spacing'][-1])**order_2)*\ - data['error_2'][-1],\ - ((data['spacing'][0] / data['spacing'][-1])**order_2)*\ - data['error_2'][-1]\ - ),\ - 'c--',\ - label=r"$\mathcal{O}_{L_2}="+"%.2f"%(order_2)+r"$") - - # Set logaraithmic scale - ax.set_yscale('log') - ax.set_xscale('log') - - # Set axis label - ax.set_xlabel("Mesh spacing") - ax.set_ylabel("Error norm") - - # Make the plot look nice - # Plot the legend - leg = ax.legend(loc="best", fancybox = True, numpoints=1) - leg.get_frame().set_alpha(0.5) - # Plot the grid - ax.grid() - # Includes the xlabel if outside - plt.tight_layout() - - # Save the plot - plt.savefig(root_folder + name + '.' + extension) - print('\nPlot saved to ' + name + '.' + extension + '\n'*2) - - if show_plot: - plt.show() - - plt.close() -#}}} diff --git a/examples/bout_runners_example/pre_and_post_processing/post_processing_show_the_data.py b/examples/bout_runners_example/pre_and_post_processing/post_processing_show_the_data.py deleted file mode 100644 index ffa1853561..0000000000 --- a/examples/bout_runners_example/pre_and_post_processing/post_processing_show_the_data.py +++ /dev/null @@ -1,36 +0,0 @@ -#!/usr/bin/env python - -"""Post processing routine which shows the data""" - -from boutdata.collect import collect -from boututils.showdata import showdata - -# All post processing functions called by bout_runners must accept the -# first argument from bout_runners (called 'folder' in -# __call_post_processing_function) -def show_the_data(paths, t=None, x=None, y=None, z=None, **kwargs): - """Function which plots the data. - - Parameters - ---------- - paths : tuple - The paths of the runs - t : slice - The desired t slice of showdata - x : slice - The desired x slice of showdata - y : slice - The desired y slice of showdata - z : slice - The desired z slice of showdata - **kwargs : key word arguments - Not used here, but acts like a "dumpster" for additional keyword - arguments - """ - - for path in paths: - print("Showing data from {}".format(path)) - n = collect('n', xguards=False, yguards=False, path=path, info=False) - - # Show the data - showdata(n[t,x,y,z]) diff --git a/examples/bout_runners_example/pre_and_post_processing/restart_from_func.py b/examples/bout_runners_example/pre_and_post_processing/restart_from_func.py deleted file mode 100644 index 7438b530d5..0000000000 --- a/examples/bout_runners_example/pre_and_post_processing/restart_from_func.py +++ /dev/null @@ -1,72 +0,0 @@ -#!/usr/bin/env python - -"""Contains restart_from_func""" - -import re - -def restart_from_func(dmp_folder,\ - one_of_the_restart_paths_in_scan = None,\ - scan_parameters = None,\ - **kwargs): - """ - Function which returns the restart from dmp_folder and - one_of_the_restart_paths_in_scan - NOTE: This will not work if the values of one of the scan parameters - contains an underscore, or if the initial hit is in the root - folder. - Parameters - ---------- - dmp_folder : str - Given by the bout_runners. Used to find the current scan - values. - one_of_the_restart_paths_in_scan : str - One of the restart paths from a previously run scan. - scan_parameters : list - List of strings of the names of the scan paramemters. - kwargs : dict - Dictionary with additional keyword arguments, given by - bout_runners. - Returns - ------- - restart_from : str - String which gives path to restart from - """ - - # Make a template string of one_of_the_restart_paths_in_scan - restart_template = one_of_the_restart_paths_in_scan - for scan_parameter in scan_parameters: - hits = [m.start() for m in \ - re.finditer(scan_parameter, restart_template)] - while(len(hits) > 0): - # Replace the values with {} - # The value is separated from the value by 1 character - value_start = hits[0] + len(scan_parameter) + 1 - # Here we assume that the value is not separated by an - # underscore - value_len = len(restart_template[value_start:].split("_")[0]) - value_end = value_start + value_len - # Replace the values with {} - restart_template =\ - "{}{{0[{}]}}{}".format(\ - restart_template[:value_start],\ - scan_parameter,\ - restart_template[value_end:]) - # Update hits - hits.remove(hits[0]) - - # Get the values from the current dmp_folder - values = {} - for scan_parameter in scan_parameters: - hits = [m.start() for m in \ - re.finditer(scan_parameter, dmp_folder)] - # Choose the first hit to get the value from (again we assume - # that the value does not contain a _) - value_start = hits[0] + len(scan_parameter) + 1 - # Here we assume that the value is not separated by an - # underscore - values[scan_parameter] = dmp_folder[value_start:].split("_")[0] - - # Insert the values - restart_from = restart_template.format(values) - - return restart_from diff --git a/externalpackages/boutdata b/externalpackages/boutdata new file mode 160000 index 0000000000..211434161d --- /dev/null +++ b/externalpackages/boutdata @@ -0,0 +1 @@ +Subproject commit 211434161df05a85af4d152df44ed9a8225f170a diff --git a/externalpackages/boututils b/externalpackages/boututils new file mode 160000 index 0000000000..08b572d20a --- /dev/null +++ b/externalpackages/boututils @@ -0,0 +1 @@ +Subproject commit 08b572d20a6c693b051f6504c599c539f5a68e82 diff --git a/include/bout/build_config.hxx b/include/bout/build_config.hxx index 138fbdd8e2..acd8a6b33b 100644 --- a/include/bout/build_config.hxx +++ b/include/bout/build_config.hxx @@ -16,6 +16,7 @@ constexpr auto has_fftw = static_cast(BOUT_HAS_FFTW); constexpr auto has_gettext = static_cast(BOUT_HAS_GETTEXT); constexpr auto has_hdf5 = static_cast(BOUT_HAS_HDF5); constexpr auto has_lapack = static_cast(BOUT_HAS_LAPACK); +constexpr auto has_legacy_netcdf = static_cast(BOUT_HAS_LEGACY_NETCDF); constexpr auto has_netcdf = static_cast(BOUT_HAS_NETCDF); constexpr auto has_petsc = static_cast(BOUT_HAS_PETSC); constexpr auto has_pretty_function = static_cast(BOUT_HAS_PRETTY_FUNCTION); diff --git a/include/options_netcdf.hxx b/include/options_netcdf.hxx index 59a599d3f2..8b0885fc67 100644 --- a/include/options_netcdf.hxx +++ b/include/options_netcdf.hxx @@ -6,7 +6,7 @@ #include "bout/build_config.hxx" -#if !BOUT_HAS_NETCDF +#if !BOUT_HAS_NETCDF || BOUT_HAS_LEGACY_NETCDF #include diff --git a/manual/sphinx/figs/folder_tree.pdf b/manual/sphinx/figs/folder_tree.pdf deleted file mode 100644 index 8915cc79f7..0000000000 Binary files a/manual/sphinx/figs/folder_tree.pdf and /dev/null differ diff --git a/manual/sphinx/figs/folder_tree.png b/manual/sphinx/figs/folder_tree.png deleted file mode 100644 index 90afe90b75..0000000000 Binary files a/manual/sphinx/figs/folder_tree.png and /dev/null differ diff --git a/manual/sphinx/user_docs/installing.rst b/manual/sphinx/user_docs/installing.rst index aa2634e186..8f95d79831 100644 --- a/manual/sphinx/user_docs/installing.rst +++ b/manual/sphinx/user_docs/installing.rst @@ -439,22 +439,34 @@ make a note of what configure printed out. Python configuration ~~~~~~~~~~~~~~~~~~~~ -To use Python, you will need the NumPy and SciPy libraries. On Debian or -Ubuntu these can be installed with:: +To use Python, you will need the dependencies of the `boututils +`__ and `boutdata +`__ libraries. The simplest way to get these is +to install the packages with pip:: - $ sudo apt-get install python-scipy + $ pip install --user boutdata -which should then add all the other dependencies like NumPy. To test if -everything is installed, run:: +or conda:: - $ python -c "import scipy" + $ conda install boutdata -If not, see the SciPy website https://www.scipy.org for instructions on -installing. +You can also install all the packages directly (see the documentation in the `boututils +`__ and `boutdata +`__ repos for the most up to date list) +using pip:: -To do this, the path to ``tools/pylib`` should be added to the -``PYTHONPATH`` environment variable. Instructions for doing this are -printed at the end of the configure script, for example:: + $ pip install --user numpy scipy matplotlib sympy netCDF4 h5py future importlib-metadata + +or conda:: + + $ conda install numpy scipy matplotlib sympy netcdf4 h5py future importlib-metadata + +They may also be available from your Linux system's package manager. + +To use the versions of ``boututils`` and ``boutdata`` provided by BOUT++, the path to +``tools/pylib`` should be added to the ``PYTHONPATH`` environment variable. This is not +necessary if you have installed the ``boututils`` and ``boutdata`` packages. Instructions +for doing this are printed at the end of the configure script, for example:: Make sure that the tools/pylib directory is in your PYTHONPATH e.g. by adding to your ~/.bashrc file @@ -465,8 +477,13 @@ To test if this command has worked, try running:: $ python -c "import boutdata" -If this doesn’t produce any error messages then Python is configured -correctly. +If this doesn’t produce any error messages then Python is configured correctly. + +Note that ``boututils`` and ``boutdata`` are provided by BOUT++ as submodules, so versions +compatible with the checked out version of BOUT++ are downloaded into the +``externalpackages`` directory. These are the versions used by the tests run by ``make +check`` even if you have installed ``boututils`` and ``boutdata`` on your system. + .. _sec-config-idl: diff --git a/manual/sphinx/user_docs/python.rst b/manual/sphinx/user_docs/python.rst index 7c64df457d..7141d76861 100644 --- a/manual/sphinx/user_docs/python.rst +++ b/manual/sphinx/user_docs/python.rst @@ -24,7 +24,7 @@ boututils - ``linear_regression()`` -- ``showdata()`` visualises and animates 2D data (time + 1 spatial dimension) or 3D data (time + 2 spatial dimensions). The animation object can be returned, or the animation can be saved to a file or displayed on screen. +- ``showdata()`` visualises and animates 2D data (time + 1 spatial dimension) or 3D data (time + 2 spatial dimensions). The animation object can be returned, or the animation can be saved to a file or displayed on screen. - ``boutwarnings`` contains functions to raise warning messages. ``alwayswarn()`` by default prints the warning every time it is called. @@ -60,50 +60,3 @@ boutdata :members: :undoc-members: -.. _sec-bout_runners: - -bout_runners ------------- - -``bout_runners`` contains classes which gives an alternative way of -running BOUT++ simulations either normally using the class -``basic_runner`` , or on a cluster through a generated Portable Batch -System (PBS) script using the child class ``PBS_runner`` . Examples can -be found in ``examples/bout_runners_example/``. - -``bout_runners`` is especially useful if one needs to make several runs -with only small changes in the options (which is normally written in -``BOUT.inp`` or in the command-line), as is the case when performing a -parameter scan, or when performing a MMS test. - -Instead of making several runs with several different input files with -only small changes in the option, one can with ``bout_runners`` specify -the changes as member data of an instance of the appropriate -``bout_runners`` class. One way to do this is to write a *driver* in the -same directory as the executable. The *driver* is just a python script -which imports ``bout_runners`` , creates an instance, specifies the -running option as member data of that instance and finally calls the -member function ``self.execute_runs()`` . - -In addition, the ``bout_runners`` provides a way to run any python -post-processing script after finished simulations (as long as it accept -at least one parameter containing the folder name(s) of the run(s)). If -the simulations have been performed using the ``PBS_runner`` , the -post-processing function will be submitted to the cluster (although it -is possible to submit it to a different queue, using a different amount -of nodes etc.). - -When the function ``self.execute_runs()`` is executed, a folder -structure like the one presented in :numref:`fig-folder-tree` is -created. ``BOUT.inp`` is copied to the folder of execution, where the -``BOUT.*.dmp`` files are stored. Secondly a list of combination of the -options specified in the driver is made. Eventually unset options are -obtained from ``BOUT.inp`` or given a default value if the option is -nowhere to be found. - -.. _fig-folder-tree: -.. figure:: ../figs/folder_tree.* - :alt: Longest possible folder tree - - Longest possible folder tree made by the ``self.execute_runs()`` - function. diff --git a/manual/sphinx/user_docs/running_bout.rst b/manual/sphinx/user_docs/running_bout.rst index 14f29ad456..6493817c75 100644 --- a/manual/sphinx/user_docs/running_bout.rst +++ b/manual/sphinx/user_docs/running_bout.rst @@ -82,7 +82,7 @@ run, and produce a bunch of files in the ``data/`` subdirectory. if needed. In some cases the options used have documentation, with a brief explanation of how they are used. In most cases the type the option is used as (e.g. ``int``, ``BoutReal`` or ``bool``) is given. - + - ``BOUT.restart.*.nc`` are the restart files for the last time point. Currently each processor saves its own state in a separate file, but there is experimental support for parallel I/O. For the settings, see @@ -110,9 +110,21 @@ To see some of the other command-line options try "-h":: and see the section on options (:ref:`sec-options`). +There is also a python tool called |bout_runners|_ which can be used for executing ``BOUT++`` runs. +In addition, this tool can be used to + +- programmatically change parameters of a project in python + +- keep track of all the metadata of the runs of the project + +- automate the orchestration (including pre- and post-processing routines) of chains of runs locally or on a cluster + To analyse the output of the simulation, cd into the ``data`` subdirectory and start python or IDL (skip to :ref:`Using IDL ` for IDL). +.. |bout_runners| replace:: ``bout_runners`` +.. _bout_runners: https://pypi.org/project/bout-runners/ + Analysing the output using Python --------------------------------- @@ -129,7 +141,7 @@ will first need to have set up python to use the BOUT++ libraries ``boutdata`` and ``boututils``; see section :ref:`sec-config-python` for how to do this. The analysis routines have some requirements such as SciPy; see section -:ref:`sec-python-requirements` for details. +:ref:`sec-python-requirements` for details. To print a list of variables in the output files, one way is to use the ``DataFile`` class. This is a wrapper around the various NetCDF and HDF5 libraries for python: @@ -221,7 +233,7 @@ and to make this a coloured contour plot IDL> showdata, T[*,*,0,*], /cont -The equivalent commands in Python are as follows. +The equivalent commands in Python are as follows. .. _sec-run-nls: @@ -744,3 +756,4 @@ then the BOUT++ restart will fail. **Note** It is a good idea to set ``nxpe`` in the ``BOUT.inp`` file to be consistent with what you set here. If it is inconsistent then the restart will fail, but the error message may not be particularly enlightening. + diff --git a/src/bout++.cxx b/src/bout++.cxx index 2d1b8a7d3d..a35433c73a 100644 --- a/src/bout++.cxx +++ b/src/bout++.cxx @@ -474,16 +474,6 @@ void printCompileTimeOptions() { } output_info.write("\n"); -#ifdef NCDF - output_info.write(_("\tnetCDF support enabled\n")); -#else -#if BOUT_HAS_NETCDF - output_info.write(_("\tnetCDF4 support enabled\n")); -#else - output_info.write(_("\tnetCDF support disabled\n")); -#endif -#endif - #ifdef PNCDF output_info.write(_("\tParallel NetCDF support enabled\n")); #else @@ -498,7 +488,10 @@ void printCompileTimeOptions() { output_info.write(_("\tNatural language support {}\n"), is_enabled(has_gettext)); output_info.write(_("\tHDF5 support {}\n"), is_enabled(has_hdf5)); output_info.write(_("\tLAPACK support {}\n"), is_enabled(has_lapack)); - output_info.write(_("\tNetCDF support {}\n"), is_enabled(has_netcdf)); + // Horrible nested ternary to set this at compile time + constexpr auto netcdf_flavour = + has_netcdf ? (has_legacy_netcdf ? " (Legacy)" : " (NetCDF4)") : ""; + output_info.write(_("\tNetCDF support {}{}\n"), is_enabled(has_netcdf), netcdf_flavour); output_info.write(_("\tPETSc support {}\n"), is_enabled(has_petsc)); output_info.write(_("\tPretty function name support {}\n"), is_enabled(has_pretty_function)); @@ -623,8 +616,10 @@ Datafile setupDumpFile(Options& options, Mesh& mesh, const std::string& data_dir .withDefault(false); // Get file extensions - const auto default_dump_format = bout::build::has_netcdf ? "nc" : "h5"; - const auto dump_ext = options["dump_format"].withDefault(default_dump_format); + constexpr auto default_dump_format = bout::build::has_netcdf ? "nc" : "h5"; + const auto dump_ext = options["dump_format"] + .doc("File extension for output files") + .withDefault(default_dump_format); output_progress << "Setting up output (dump) file\n"; auto dump_file = Datafile(&(options["output"]), &mesh); @@ -650,6 +645,8 @@ Datafile setupDumpFile(Options& options, Mesh& mesh, const std::string& data_dir dump_file.addOnce(const_cast(bout::build::has_hdf5), "has_hdf5"); dump_file.addOnce(const_cast(bout::build::has_lapack), "has_lapack"); dump_file.addOnce(const_cast(bout::build::has_netcdf), "has_netcdf"); + dump_file.addOnce(const_cast(bout::build::has_legacy_netcdf), + "has_legacy_netcdf"); dump_file.addOnce(const_cast(bout::build::has_petsc), "has_petsc"); dump_file.addOnce(const_cast(bout::build::has_pretty_function), "has_pretty_function"); diff --git a/src/fileio/formatfactory.cxx b/src/fileio/formatfactory.cxx index 8e6c840254..0586bfb1b1 100644 --- a/src/fileio/formatfactory.cxx +++ b/src/fileio/formatfactory.cxx @@ -40,11 +40,11 @@ std::unique_ptr FormatFactory::createDataFormat(const char *filename #else } -#if BOUT_HAS_NETCDF +#if BOUT_HAS_NETCDF && !BOUT_HAS_LEGACY_NETCDF return bout::utils::make_unique(mesh_in); #else -#ifdef NCDF +#if BOUT_HAS_LEGACY_NETCDF return bout::utils::make_unique(mesh_in); #else @@ -54,8 +54,8 @@ std::unique_ptr FormatFactory::createDataFormat(const char *filename #error No file format available; aborting. -#endif // HDF5 -#endif // NCDF +#endif // BOUT_HAS_HDF5 +#endif // BOUT_HAS_LEGACY_NETCDF #endif // BOUT_HAS_NETCDF #endif // PNCDF throw BoutException("Parallel I/O disabled, no serial library found"); @@ -88,15 +88,11 @@ std::unique_ptr FormatFactory::createDataFormat(const char *filename const char *ncdf_match[] = {"cdl", "nc", "ncdf"}; if(matchString(s, 3, ncdf_match) != -1) { output.write("\tUsing NetCDF4 format for file '{:s}'\n", filename); +#if BOUT_HAS_LEGACY_NETCDF + return bout::utils::make_unique(); +#else return bout::utils::make_unique(); - } #endif - -#ifdef NCDF - const char *ncdf_match[] = {"cdl", "nc", "ncdf"}; - if(matchString(s, 3, ncdf_match) != -1) { - output.write("\tUsing NetCDF format for file '{:s}'\n", filename); - return bout::utils::make_unique(); } #endif diff --git a/src/fileio/impls/hdf5/h5_format.cxx b/src/fileio/impls/hdf5/h5_format.cxx index 940c773263..002b83bf62 100644 --- a/src/fileio/impls/hdf5/h5_format.cxx +++ b/src/fileio/impls/hdf5/h5_format.cxx @@ -260,13 +260,17 @@ bool H5Format::addVar(const std::string &name, bool repeat, hid_t write_hdf5_typ } int nd = 0; - if (datatype == "scalar") nd = 0; - else if (datatype == "vector") nd = 1; - else if (datatype == "FieldX") nd = 1; - else if (datatype == "Field2D") nd = 2; - else if (datatype == "FieldPerp") nd = 2; - else if (datatype == "Field3D") nd = 3; - else throw BoutException("Unrecognized datatype '"+datatype+"'"); + if (datatype == "scalar") { + nd = 0; + } else if (datatype == "vector" or datatype == "string" or datatype == "FieldX") { + nd = 1; + } else if (datatype == "Field2D" or datatype == "FieldPerp") { + nd = 2; + } else if (datatype == "Field3D") { + nd = 3; + } else { + throw BoutException("Unrecognized datatype '"+datatype+"'"); + } if (repeat) { // add time dimension @@ -400,7 +404,7 @@ bool H5Format::addVarIntVec(const std::string &name, bool repeat, size_t size) { } bool H5Format::addVarString(const std::string &name, bool repeat, size_t size) { - return addVar(name, repeat, H5T_C_S1, "vector", size); + return addVar(name, repeat, H5T_C_S1, "string", size); } bool H5Format::addVarBoutReal(const std::string &name, bool repeat) { diff --git a/src/fileio/impls/netcdf/nc_format.cxx b/src/fileio/impls/netcdf/nc_format.cxx index 38f9719baf..a9ebb520a0 100644 --- a/src/fileio/impls/netcdf/nc_format.cxx +++ b/src/fileio/impls/netcdf/nc_format.cxx @@ -23,12 +23,13 @@ #include #include "nc_format.hxx" -#ifdef NCDF +#if BOUT_HAS_LEGACY_NETCDF #include #include #include +#include "bout/build_config.hxx" #include #include @@ -1658,5 +1659,4 @@ void NcFormat::checkName(const char* name) { } } -#endif // NCDF - +#endif // BOUT_HAS_LEGACY_NETCDF diff --git a/src/fileio/impls/netcdf/nc_format.hxx b/src/fileio/impls/netcdf/nc_format.hxx index 4571f73d82..76ab3ac177 100644 --- a/src/fileio/impls/netcdf/nc_format.hxx +++ b/src/fileio/impls/netcdf/nc_format.hxx @@ -1,3 +1,4 @@ +#include "bout/build_config.hxx" /*! * \file nc_format.hxx * @@ -33,7 +34,7 @@ * */ -#ifndef NCDF +#if !BOUT_HAS_LEGACY_NETCDF #include "../emptyformat.hxx" using NcFormat = EmptyFormat; @@ -169,4 +170,4 @@ class NcFormat : public DataFormat { #endif // __NCFORMAT_H__ -#endif // NCDF +#endif // BOUT_HAS_LEGACY_NETCDF diff --git a/src/fileio/impls/netcdf4/ncxx4.cxx b/src/fileio/impls/netcdf4/ncxx4.cxx index eb39d57186..dc085c4040 100644 --- a/src/fileio/impls/netcdf4/ncxx4.cxx +++ b/src/fileio/impls/netcdf4/ncxx4.cxx @@ -24,7 +24,7 @@ #include "ncxx4.hxx" -#if BOUT_HAS_NETCDF +#if BOUT_HAS_NETCDF && !BOUT_HAS_LEGACY_NETCDF #include #include @@ -1416,5 +1416,5 @@ std::vector Ncxx4::getRecDimVec(int nd) { return vec; } -#endif // NCDF +#endif // BOUT_HAS_NETCDF diff --git a/src/fileio/impls/netcdf4/ncxx4.hxx b/src/fileio/impls/netcdf4/ncxx4.hxx index 57dc2a52b9..fc78923c0a 100644 --- a/src/fileio/impls/netcdf4/ncxx4.hxx +++ b/src/fileio/impls/netcdf4/ncxx4.hxx @@ -35,7 +35,7 @@ #include "bout/build_config.hxx" -#if !BOUT_HAS_NETCDF +#if !BOUT_HAS_NETCDF || BOUT_HAS_LEGACY_NETCDF #include "../emptyformat.hxx" using Ncxx4 = EmptyFormat; diff --git a/src/mesh/coordinates.cxx b/src/mesh/coordinates.cxx index 9e95af5f03..0dc9eae62e 100644 --- a/src/mesh/coordinates.cxx +++ b/src/mesh/coordinates.cxx @@ -1224,8 +1224,8 @@ void Coordinates::setParallelTransform(Options* options) { // Flux Coordinate Independent method const bool fci_zperiodic = (*ptoptions)["z_periodic"].withDefault(true); - transform = bout::utils::make_unique(*localmesh, fci_zperiodic, - ptoptions); + transform = + bout::utils::make_unique(*localmesh, dy, fci_zperiodic, ptoptions); } else { throw BoutException(_("Unrecognised paralleltransform option.\n" diff --git a/src/mesh/parallel/fci.cxx b/src/mesh/parallel/fci.cxx index bf37c88256..7c16dd27fb 100644 --- a/src/mesh/parallel/fci.cxx +++ b/src/mesh/parallel/fci.cxx @@ -47,30 +47,27 @@ #include -FCIMap::FCIMap(Mesh& mesh, Options& options, int offset_, BoundaryRegionPar* boundary, - bool zperiodic) +FCIMap::FCIMap(Mesh& mesh, const Field2D& dy, Options& options, int offset_, + BoundaryRegionPar* boundary, bool zperiodic) : map_mesh(mesh), offset(offset_), boundary_mask(map_mesh), corner_boundary_mask(map_mesh) { TRACE("Creating FCIMAP for direction {:d}", offset); if (offset == 0) { - throw BoutException("FCIMap called with offset = 0; You probably didn't mean to do that"); + throw BoutException( + "FCIMap called with offset = 0; You probably didn't mean to do that"); } auto& interpolation_options = options["xzinterpolation"]; - interp = XZInterpolationFactory::getInstance().create(&interpolation_options, &map_mesh); + interp = + XZInterpolationFactory::getInstance().create(&interpolation_options, &map_mesh); interp->setYOffset(offset); - interp_corner = XZInterpolationFactory::getInstance().create(&interpolation_options, &map_mesh); + interp_corner = + XZInterpolationFactory::getInstance().create(&interpolation_options, &map_mesh); interp_corner->setYOffset(offset); - // Index arrays contain guard cells in order to get subscripts right - // x-index of bottom-left grid point - auto i_corner = Tensor(map_mesh.LocalNx, map_mesh.LocalNy, map_mesh.LocalNz); - // z-index of bottom-left grid point - auto k_corner = Tensor(map_mesh.LocalNx, map_mesh.LocalNy, map_mesh.LocalNz); - // Index-space coordinates of forward/backward points Field3D xt_prime{&map_mesh}, zt_prime{&map_mesh}; @@ -128,8 +125,8 @@ FCIMap::FCIMap(Mesh& mesh, Options& options, int offset_, BoundaryRegionPar* bou auto i_zplus = i.zp(); auto i_xzplus = i_zplus.xp(); - if ((xt_prime[i] < 0.0) || (xt_prime[i_xplus] < 0.0) || (xt_prime[i_xzplus] < 0.0) || - (xt_prime[i_zplus] < 0.0)) { + if ((xt_prime[i] < 0.0) || (xt_prime[i_xplus] < 0.0) || (xt_prime[i_xzplus] < 0.0) + || (xt_prime[i_zplus] < 0.0)) { // Hit a boundary corner_boundary_mask(i.x(), i.y(), i.z()) = true; @@ -157,108 +154,88 @@ FCIMap::FCIMap(Mesh& mesh, Options& options, int offset_, BoundaryRegionPar* bou interp->calcWeights(xt_prime, zt_prime); } - int ncz = map_mesh.LocalNz; - - BoutReal t_x, t_z; - - Coordinates &coord = *(map_mesh.getCoordinates()); - - for (int x = map_mesh.xstart; x <= map_mesh.xend; x++) { - for (int y = map_mesh.ystart; y <= map_mesh.yend; y++) { - for (int z = 0; z < ncz; z++) { + const int ncz = map_mesh.LocalNz; - // The integer part of xt_prime, zt_prime are the indices of the cell - // containing the field line end-point - i_corner(x, y, z) = static_cast(floor(xt_prime(x, y, z))); + // Serial loop because call to BoundaryRegionPar::addPoint + // (probably?) can't be done in parallel + BOUT_FOR_SERIAL(i, xt_prime.getRegion("RGN_NOBNDRY")) { + // z is periodic, so make sure the z-index wraps around + if (zperiodic) { + zt_prime[i] = zt_prime[i] + - ncz * (static_cast(zt_prime[i] / static_cast(ncz))); - // z is periodic, so make sure the z-index wraps around - if (zperiodic) { - zt_prime(x, y, z) = - zt_prime(x, y, z) - - ncz * (static_cast(zt_prime(x, y, z) / static_cast(ncz))); + if (zt_prime[i] < 0.0) { + zt_prime[i] += ncz; + } + } - if (zt_prime(x, y, z) < 0.0) - zt_prime(x, y, z) += ncz; - } + if (xt_prime[i] >= 0.0) { + // Not a boundary + continue; + } - k_corner(x, y, z) = static_cast(floor(zt_prime(x, y, z))); + const auto x = i.x(); + const auto y = i.y(); + const auto z = i.z(); + + //---------------------------------------- + // Boundary stuff + // + // If a field line leaves the domain, then the forward or backward + // indices (forward/backward_xt_prime and forward/backward_zt_prime) + // are set to -1 + + boundary_mask(x, y, z) = true; + + // Need to specify the index of the boundary intersection, but + // this may not be defined in general. + // We do however have the real-space (R,Z) coordinates. Here we extrapolate, + // using the change in R and Z to calculate the change in (x,z) indices + // + // ( dR ) = ( dR/dx dR/dz ) ( dx ) + // ( dZ ) ( dZ/dx dZ/dz ) ( dz ) + // + // where (dR,dZ) is the change in (R,Z) along the field, + // (dx,dz) is the change in (x,z) index along the field, + // and the gradients dR/dx etc. are evaluated at (x,y,z) - // t_x, t_z are the normalised coordinates \in [0,1) within the cell - // calculated by taking the remainder of the floating point index - t_x = xt_prime(x, y, z) - static_cast(i_corner(x, y, z)); - t_z = zt_prime(x, y, z) - static_cast(k_corner(x, y, z)); + // Cache the offsets + const auto i_xp = i.xp(); + const auto i_xm = i.xm(); + const auto i_zp = i.zp(); + const auto i_zm = i.zm(); + + const BoutReal dR_dx = 0.5 * (R[i_xp] - R[i_xm]); + const BoutReal dZ_dx = 0.5 * (Z[i_xp] - Z[i_xm]); + + BoutReal dR_dz, dZ_dz; + // Handle the edge cases in Z + if (z == 0) { + dR_dz = R[i_zp] - R[i]; + dZ_dz = Z[i_zp] - Z[i]; + + } else if (z == map_mesh.LocalNz - 1) { + dR_dz = R[i] - R[i_zm]; + dZ_dz = Z[i] - Z[i_zm]; + + } else { + dR_dz = 0.5 * (R[i_zp] - R[i_zm]); + dZ_dz = 0.5 * (Z[i_zp] - Z[i_zm]); + } - // Check that t_x and t_z are in range - if ((t_x < 0.0) || (t_x > 1.0)) { - throw BoutException( - "t_x={:e} out of range at ({:d},{:d},{:d}) (xt_prime={:e}, i_corner={:d})", - t_x, x, y, z, xt_prime(x, y, z), i_corner(x, y, z)); - } + const BoutReal det = dR_dx * dZ_dz - dR_dz * dZ_dx; // Determinant of 2x2 matrix - if ((t_z < 0.0) || (t_z > 1.0)) { - throw BoutException( - "t_z={:e} out of range at ({:d},{:d},{:d}) (zt_prime={:e}, k_corner={:d})", - t_z, x, y, z, zt_prime(x, y, z), k_corner(x, y, z)); - } + const BoutReal dR = R_prime[i] - R[i]; + const BoutReal dZ = Z_prime[i] - Z[i]; - //---------------------------------------- - // Boundary stuff - // - // If a field line leaves the domain, then the forward or backward - // indices (forward/backward_xt_prime and forward/backward_zt_prime) - // are set to -1 - - if (xt_prime(x, y, z) < 0.0) { - // Hit a boundary - - boundary_mask(x, y, z) = true; - - // Need to specify the index of the boundary intersection, but - // this may not be defined in general. - // We do however have the real-space (R,Z) coordinates. Here we extrapolate, - // using the change in R and Z to calculate the change in (x,z) indices - // - // ( dR ) = ( dR/dx dR/dz ) ( dx ) - // ( dZ ) ( dZ/dx dZ/dz ) ( dz ) - // - // where (dR,dZ) is the change in (R,Z) along the field, - // (dx,dz) is the change in (x,z) index along the field, - // and the gradients dR/dx etc. are evaluated at (x,y,z) - - BoutReal dR_dx = 0.5 * (R(x + 1, y, z) - R(x - 1, y, z)); - BoutReal dZ_dx = 0.5 * (Z(x + 1, y, z) - Z(x - 1, y, z)); - - BoutReal dR_dz, dZ_dz; - // Handle the edge cases in Z - if (z == 0) { - dR_dz = R(x, y, z + 1) - R(x, y, z); - dZ_dz = Z(x, y, z + 1) - Z(x, y, z); - - } else if (z == map_mesh.LocalNz - 1) { - dR_dz = R(x, y, z) - R(x, y, z - 1); - dZ_dz = Z(x, y, z) - Z(x, y, z - 1); - - } else { - dR_dz = 0.5 * (R(x, y, z + 1) - R(x, y, z - 1)); - dZ_dz = 0.5 * (Z(x, y, z + 1) - Z(x, y, z - 1)); - } - - BoutReal det = dR_dx * dZ_dz - dR_dz * dZ_dx; // Determinant of 2x2 matrix - - BoutReal dR = R_prime(x, y, z) - R(x, y, z); - BoutReal dZ = Z_prime(x, y, z) - Z(x, y, z); - - // Invert 2x2 matrix to get change in index - BoutReal dx = (dZ_dz * dR - dR_dz * dZ) / det; - BoutReal dz = (dR_dx * dZ - dZ_dx * dR) / det; - boundary->add_point(x, y, z, - x + dx, y + 0.5*offset, z + dz, // Intersection point in local index space - 0.5*coord.dy(x,y), //sqrt( SQ(dR) + SQ(dZ) ), // Distance to intersection - PI // Right-angle intersection - ); - } - } - } + // Invert 2x2 matrix to get change in index + const BoutReal dx = (dZ_dz * dR - dR_dz * dZ) / det; + const BoutReal dz = (dR_dx * dZ - dZ_dx * dR) / det; + boundary->add_point(x, y, z, x + dx, y + 0.5 * offset, + z + dz, // Intersection point in local index space + 0.5 * dy[i], // Distance to intersection + PI // Right-angle intersection + ); } interp->setMask(boundary_mask); diff --git a/src/mesh/parallel/fci.hxx b/src/mesh/parallel/fci.hxx index 6f8c5ed916..3560d7fec9 100644 --- a/src/mesh/parallel/fci.hxx +++ b/src/mesh/parallel/fci.hxx @@ -44,8 +44,8 @@ class FCIMap { public: FCIMap() = delete; - FCIMap(Mesh& mesh, Options& options, int offset, BoundaryRegionPar* boundary, bool - zperiodic); + FCIMap(Mesh& mesh, const Field2D& dy, Options& options, int offset, + BoundaryRegionPar* boundary, bool zperiodic); // The mesh this map was created on Mesh& map_mesh; @@ -71,7 +71,7 @@ public: class FCITransform : public ParallelTransform { public: FCITransform() = delete; - FCITransform(Mesh& mesh, bool zperiodic = true, Options* opt = nullptr) + FCITransform(Mesh& mesh, const Field2D& dy, bool zperiodic = true, Options* opt = nullptr) : ParallelTransform(mesh, opt) { // check the coordinate system used for the grid data source @@ -86,8 +86,8 @@ public: field_line_maps.reserve(mesh.ystart * 2); for (int offset = 1; offset < mesh.ystart + 1; ++offset) { - field_line_maps.emplace_back(mesh, options, offset, forward_boundary, zperiodic); - field_line_maps.emplace_back(mesh, options, -offset, backward_boundary, zperiodic); + field_line_maps.emplace_back(mesh, dy, options, offset, forward_boundary, zperiodic); + field_line_maps.emplace_back(mesh, dy, options, -offset, backward_boundary, zperiodic); } } diff --git a/src/physics/physicsmodel.cxx b/src/physics/physicsmodel.cxx index 6f33524b4f..de107700d6 100644 --- a/src/physics/physicsmodel.cxx +++ b/src/physics/physicsmodel.cxx @@ -101,23 +101,17 @@ int PhysicsModel::postInit(bool restarting) { // Second argument specifies no time history solver->outputVars(restart, false); - std::string restart_dir; ///< Directory for restart files - std::string dump_ext, restart_ext; ///< Dump, Restart file extension - - Options *options = Options::getRoot(); - if (options->isSet("restartdir")) { - // Solver-specific restart directory - options->get("restartdir", restart_dir, "data"); - } else { - // Use the root data directory - options->get("datadir", restart_dir, "data"); - } - /// Get restart file extension - const auto default_dump_format = bout::build::has_netcdf ? "nc" : "h5"; - options->get("dump_format", dump_ext, default_dump_format); - options->get("restart_format", restart_ext, dump_ext); + auto& options = Options::root(); + + const std::string restart_dir = options["restartdir"] + .doc("Directory for restart files") + .withDefault(options["datadir"]); + + const std::string restart_ext = options["restart_format"] + .doc("Restart file extension") + .withDefault(options["dump_format"]); - std::string filename = restart_dir + "/BOUT.restart."+restart_ext; + const std::string filename = restart_dir + "/BOUT.restart." + restart_ext; if (restarting) { output.write("Loading restart file: {:s}\n", filename); diff --git a/src/sys/options/options_netcdf.cxx b/src/sys/options/options_netcdf.cxx index f4f5f1610c..7a05d8e48f 100644 --- a/src/sys/options/options_netcdf.cxx +++ b/src/sys/options/options_netcdf.cxx @@ -1,6 +1,6 @@ #include "bout/build_config.hxx" -#if BOUT_HAS_NETCDF +#if BOUT_HAS_NETCDF && !BOUT_HAS_LEGACY_NETCDF #include "options_netcdf.hxx" diff --git a/tests/integrated/test-io/runtest b/tests/integrated/test-io/runtest index 33c347bc48..0482acb5a8 100755 --- a/tests/integrated/test-io/runtest +++ b/tests/integrated/test-io/runtest @@ -4,6 +4,7 @@ # Run the test, compare results against the benchmark # # requires: netcdf +# requires: not legacy_netcdf # cores: 4 from boututils.run_wrapper import build_and_log, shell, launch_safe diff --git a/tests/integrated/test-io_hdf5/data/benchmark.out.0.hdf5 b/tests/integrated/test-io_hdf5/data/benchmark.out.0.hdf5 index 80c74f7ad0..2bbc88cbd6 100644 Binary files a/tests/integrated/test-io_hdf5/data/benchmark.out.0.hdf5 and b/tests/integrated/test-io_hdf5/data/benchmark.out.0.hdf5 differ diff --git a/tests/integrated/test-options-netcdf/CMakeLists.txt b/tests/integrated/test-options-netcdf/CMakeLists.txt index cfb7a3c016..f2d115d768 100644 --- a/tests/integrated/test-options-netcdf/CMakeLists.txt +++ b/tests/integrated/test-options-netcdf/CMakeLists.txt @@ -3,4 +3,5 @@ bout_add_integrated_test(test-options-netcdf USE_RUNTEST USE_DATA_BOUT_INP REQUIRES BOUT_HAS_NETCDF + CONFLICTS BOUT_HAS_LEGACY_NETCDF ) diff --git a/tests/integrated/test-options-netcdf/runtest b/tests/integrated/test-options-netcdf/runtest index 18d542f6a9..b02f80ddaf 100755 --- a/tests/integrated/test-options-netcdf/runtest +++ b/tests/integrated/test-options-netcdf/runtest @@ -2,7 +2,7 @@ # Note: This test requires NCDF4, whereas on Travis NCDF is used #requires: netcdf -#requires: not travis or fedora +#requires: not legacy_netcdf from boututils.datafile import DataFile from boututils.run_wrapper import build_and_log, shell, launch diff --git a/tests/integrated/test-solver/test_solver.cxx b/tests/integrated/test-solver/test_solver.cxx index 1f56354fce..8ad79add17 100644 --- a/tests/integrated/test-solver/test_solver.cxx +++ b/tests/integrated/test-solver/test_solver.cxx @@ -62,6 +62,8 @@ int main(int argc, char** argv) { root["output"]["enabled"] = false; root["restart"]["enabled"] = false; + root["datadir"] = "data"; + root["dump_format"] = "nc"; // Set the command-line arguments SlepcLib::setArgs(argc, argv); diff --git a/tests/integrated/test-squash/runtest b/tests/integrated/test-squash/runtest index 35a2f674ad..97362117fa 100755 --- a/tests/integrated/test-squash/runtest +++ b/tests/integrated/test-squash/runtest @@ -6,15 +6,19 @@ import time import numpy as np from boututils.run_wrapper import launch_safe, shell_safe, build_and_log import argparse +import re + + +# requires: all_tests +# requires: netcdf +# cores: 4 + +IGNORED_VARS_PATTERN = re.compile("(wtime|ncalls|arkode|cvode).*") -#requires: all_tests -#requires: netcdf -#cores: 4 class timer(object): - """Context manager for printing how long a command took + """Context manager for printing how long a command took""" - """ def __init__(self, msg): self.msg = msg @@ -27,34 +31,28 @@ class timer(object): def timed_shell_safe(cmd, *args, **kwargs): - """Wraps shell_safe in a timer - - """ + """Wraps shell_safe in a timer""" with timer(cmd): shell_safe(cmd, *args, **kwargs) def timed_launch_safe(cmd, *args, **kwargs): - """Wraps launch_safe in a timer - - """ + """Wraps launch_safe in a timer""" with timer(cmd): launch_safe(cmd, *args, **kwargs) def verify(f1, f2): - """Verifies that two BOUT++ files are identical - - """ + """Verifies that two BOUT++ files are identical""" with timer("verify %s %s" % (f1, f2)): d1 = DataFile(f1) d2 = DataFile(f2) for v in d1.keys(): if d1[v].shape != d2[v].shape: raise RuntimeError("shape mismatch in ", v, d1[v], d2[v]) - if v in ["MXSUB", "MYSUB", "NXPE", "NYPE", "iteration","wall_time"]: + if v in ["MXSUB", "MYSUB", "NXPE", "NYPE", "iteration", "wall_time"]: continue - if v.startswith("wtime") or v.startswith("ncalls"): + if IGNORED_VARS_PATTERN.match(v): continue if not np.allclose(d1[v], d2[v], equal_nan=True): err = "" @@ -66,8 +64,9 @@ def verify(f1, f2): parser = argparse.ArgumentParser(description="Test the bout-squashoutput wrapper") -parser.add_argument("executable", help="Path to bout-squashoutput", - default="../../../bin") +parser.add_argument( + "executable", help="Path to bout-squashoutput", default="../../../bin" +) args = parser.parse_args() build_and_log("Squash test") diff --git a/tests/unit/src/test_bout++.cxx b/tests/unit/src/test_bout++.cxx index 83be339061..a33f20be88 100644 --- a/tests/unit/src/test_bout++.cxx +++ b/tests/unit/src/test_bout++.cxx @@ -302,7 +302,7 @@ TEST_F(PrintStartupTest, CompileTimeOptions) { EXPECT_TRUE(IsSubString(buffer.str(), _("Compile-time options:\n"))); EXPECT_TRUE(IsSubString(buffer.str(), _("Signal"))); - EXPECT_TRUE(IsSubString(buffer.str(), "netCDF")); + EXPECT_TRUE(IsSubString(buffer.str(), "NetCDF")); EXPECT_TRUE(IsSubString(buffer.str(), "OpenMP")); EXPECT_TRUE(IsSubString(buffer.str(), _("Compiled with flags"))); } diff --git a/tests/unit/sys/test_options_netcdf.cxx b/tests/unit/sys/test_options_netcdf.cxx index faedc4422b..27a7eef565 100644 --- a/tests/unit/sys/test_options_netcdf.cxx +++ b/tests/unit/sys/test_options_netcdf.cxx @@ -2,7 +2,7 @@ #include "bout/build_config.hxx" -#if BOUT_HAS_NETCDF +#if BOUT_HAS_NETCDF && !BOUT_HAS_LEGACY_NETCDF #include "gtest/gtest.h" diff --git a/tools/pylib/bout_runners/README.md b/tools/pylib/bout_runners/README.md deleted file mode 100644 index da164a57d8..0000000000 --- a/tools/pylib/bout_runners/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# bout-runners - -Python-wrapper for doing simulation runs with BOUT++. -See [bout_runners_example](../../../examples/bout_runners_example) for examples diff --git a/tools/pylib/bout_runners/__init__.py b/tools/pylib/bout_runners/__init__.py deleted file mode 100644 index 9bcbc7acef..0000000000 --- a/tools/pylib/bout_runners/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Packages to be imported when writing from boutdata import * -__all__ = ["bout_runners"] - -from .bout_runners import basic_runner, PBS_runner diff --git a/tools/pylib/bout_runners/bout_runners.py b/tools/pylib/bout_runners/bout_runners.py deleted file mode 100755 index 76e2a3a546..0000000000 --- a/tools/pylib/bout_runners/bout_runners.py +++ /dev/null @@ -1,4418 +0,0 @@ -#!/usr/bin/env python3 - -""" -Classes for running one or several mpi-runs with BOUT++ at once. -Read the docstring of "basic_runner", or refer to the user manual of -BOUT++ for more info. Examples can be found in -BOUT/examples/bout_runners_example. -""" - -# NOTE: This document uses folding. A hash-symbol followed by three {'s -# denotes the start of a fold, and a hash-symbol followed by three -# }'s denotes the end of a fold -# NOTE: Improvement suggestions: -# It would be beneficial to refactor bout_runners -# 1. Better design: Shorter functions -# 2. Better input parsing: The input for the constructors are rather long. -# One alternative could be to have setters for a grouping of -# parameters -__authors__ = "Michael Loeiten" -__version__ = "1.08" -__date__ = "2018.01.07" - -import os -import sys -import re -import itertools -import glob -import timeit -import datetime -import time -import shutil -from numbers import Number -import numpy as np -from boututils.run_wrapper import shell, launch, getmpirun -from boututils.options import BOUTOptions -from boututils.datafile import DataFile -from boutdata.restart import redistribute, addnoise, resizeZ, resize - -#{{{class basic_runner -# As a child class uses the super function, the class must allow an -# object as input - - -class basic_runner(object): - #{{{docstring - """ - basic_runner - ------------ - - Class for mpi running one or several runs with BOUT++. - Calling self.execute_runs() will run your BOUT++ program with the possible - combinations given in the member data using the mpi runner. - - Before each run basic_runner will: - * Create a folder system, based on the member data, rooted in - self._directory. - * Copy BOUT.inp of self._directory to the execution folder. - * Check that the domain split is sane (suggest a split if a bad - domain split is given) - - If the restart option is checked, bout_runners will - * Put old data into a restart folder (so that nothing is lost - upon restart) - * Resize the mesh if new sizes are detected - - A log-file for the run is stored in self._directory - - By default self._directory = "data", self._nproc = 1 and - self._allow_size_modification = False - - self._program_name is by default set to the same name as any .o files in - thefolder where an instance of the object is created. If none is found the - constructor tries to run make. - - All other data members are set to None by default. - - The data members will override the corresponding options given in - self._directory/BOUT.inp. - - See the doctring of the constructor (__int__) for options. - See BOUT/examples/bout_runners_example for examples. - """ -#}}} - -#{{{__init__ - def __init__(self, - nproc=1, - directory="data", - prog_name=None, - solver=None, - mms=None, - atol=None, - rtol=None, - mxstep=None, - grid_file=None, - nx=None, - ny=None, - nz=None, - zperiod=None, - zmin=None, - zmax=None, - dx=None, - dy=None, - dz=None, - MXG=None, - MYG=None, - NXPE=None, - ixseps1=None, - ixseps2=None, - jyseps1_1=None, - jyseps1_2=None, - jyseps2_1=None, - jyseps2_2=None, - symGlobX=None, - symGlobY=None, - ddx_first=None, - ddx_second=None, - ddx_upwind=None, - ddx_flux=None, - ddy_first=None, - ddy_second=None, - ddy_upwind=None, - ddy_flux=None, - ddz_first=None, - ddz_second=None, - ddz_upwind=None, - ddz_flux=None, - nout=None, - timestep=None, - additional=None, - series_add=None, - restart=None, - restart_from=None, - redistribute=None, - use_expand=False, - max_proc=None, - intrp_method=None, - add_noise=None, - cpy_source=None, - cpy_grid=None, - sort_by=None, - make=None, - allow_size_modification=False): - #{{{docstring - """ - basic_runner constructor - ------------------------ - - All the member data is set to None by default. If the - data members are not set, the values from BOUT.inp will be used. - The exception is nproc (default = 1), directory (default = - "data"), use_expand (default = False) and - allow_size_modification (default = False), which always needs to - be set. - - Parameters - ---------- - nproc : int - Number of processors to use in the mpirun - directory : str - The directory of the BOUT.inp file - prog_name : str or iterable - Name of the excecutable. If none is set the name will be set from - the *.o file. - solver : str or iterable - The solver to be used in the runs - mms : bool - Whether or not mms should be run - atol : number or iterable - Absolute tolerance - rtol : number or iterable - Relative tolerance - mxstep : int or iterable - Max internal step pr output step - grid_file : str or iterable - The grid file - nx : int or iterable - Number of nx in the run - ny : int or iterable - Number of ny in the run - nz : int or iterable - Number of nz in the run - zperiod : int or iterable - Domain size in multiple of fractions of 2*pi - zmin : number - Minimum range of the z domain - zmax : number - Maximum range of the z domain - dx : number or iterable - Grid size in the x direction - dy : number or iterable - Grid size in the x direction number or iterable - dz : number or iterable - Grid size in the x direction number or iterable - MXG : int or iterable - The number of guard cells in the x direction - MYG : int or iterable - The number of guard cells in the y direction - NXPE : int or iterable - Numbers of processors in the x direction - ixseps1 : int or iterable - Separatrix location for "upper" divertor - ixseps2 : int or iterable - Separatrix location for "lower" divertor - jyseps1_1 : int or iterable - Branch cut location 1_1 (see user's manual for details) - jyseps1_2 : int or iterable - Branch cut location 1_2 (see user's manual for details) - jyseps2_1 : int or iterable - Branch cut location 2_1 (see user's manual for details) - jyseps2_2 : int or iterable - Branch cut location 2_2 (see user's manual for details) - symGlobX : bool - Whether or not to use symmetricGLobalX (x defined - symmetrically between 0 and 1) - symGlobY : bool - Whether or not to use symmetricGLobalY (y defined - symmetrically) - ddx_first : str or iterable - Method used for for first ddx terms - ddx_second : str or iterable - Method used for for second ddx terms - ddx_upwind : str or iterable - Method used for for upwind ddx terms - ddx_flux : str or iterable - Method used for for flux ddx terms - ddy_first : str or iterable - Method used for for first ddy terms - ddy_second : str or iterable - Method used for for second ddy terms - ddy_upwind : str or iterable - Method used for for upwind ddy terms - ddy_flux : str or iterable - Method used for for flux ddy terms - ddz_first : str or iterable - Method used for for first ddz terms - ddz_second : str or iterable - Method used for for second ddz terms - ddz_upwind : str or iterable - Method used for for upwind ddz terms - ddz_flux : str or iterable - Method used for for flux ddz terms - nout : int or iterable - Number of outputs stored in the *.dmp.* files - timestep : int or iterable - The time between each output stored in the *.dmp.* files - additional : tuple or iterable - Additional option for the run given on the form - - >>> ("section_name","variable name", values) - - or as iterable on the same form, where values can be any - value or string or an iterable of those - series_add : tuple or iterable - The same as above, with the exception that no combination - will be performed between the elements during a run - restart : str - Wheter or not to use the restart files. Must be either - "overwrite" or "append" if set - restart_from : [str | function] - Path to restart if string. If function: A function which - takes the current dmp_folder and kwargs (given to - execute_runs) as input and returns the restart path. The - function is handy when restarting from jobs while doing a - parameter scan. - redistribute : int - The number of processors the redistribute the restart files - to. Calls the redistribute function in boutdata.restart. - Will only be effective if "restart" is not None - use_expand : bool - Only used when restarting. - If there is a mismatch in nz between the requested nz and - the nz found in the restart file, boutdata.restart.resizeZ - will be used if use_expand = True, if not - boutdata.restart.resize will be used - max_proc : int - Only used when restarting. - Max processors used when calling boutdata.restart.resize - intrp_method: str - Only used when restarting, and when the mesh is resizied. - Sets the method used in the interpolation. - add_noise : dict - Adding noise to the restart files by calling the addnoise - function in boutdata.restart. Will only be effective if - "restart" is not None. Must be given as a dict with "var" - and 'scale" as keys if used. The value of "var" must be a - string or None. If set to None, then all the evolved - variables will be added noise to. The value of "scale" will - be the scale of the noise, if set to None the default value - will be used. - Example: - - >>> add_noise = {"n":1e-4, "Te":1e-5} - - cpy_source : bool - Wheter or not to copy the source files to the folder of the - *.dmp.* files - cpy_grid : bool - Whether or not to copy the grid files to the folder of the - *.dmp.* files - sort_by : str - Defining what will be the fastest running variable in the - run, which can be useful if one for example would like to - "group" the runs before sending it to a post processing - function (see the docstring of the run function for more - info). The possibilities are - - * "spatial_domain" - * "temporal_domain" - * "solver" - * "ddx_first" - * "ddx_second" - * "ddx_upwind" - * "ddx_flux" - * "ddy_first" - * "ddy_second" - * "ddy_upwind" - * "ddy_flux" - * "ddz_first" - * "ddz_second" - * "ddz_upwind" - * "ddz_flux" - * Any "variable_name" from additional or series_add - * An iterable consisting of several of these. - - If an iterable is given, then the first element is going to - be the fastest varying variable, the second element is going - to be the second fastest varying variable and so on. - make : bool - Whether or not to make the program - allow_size_modification : bool - Whether or not to allow bout_runners modify nx and ny in - order to find a valid split of the domain - """ - #}}} - - # Setting the member data - self._nproc = nproc - self._directory = directory - self._solver = self._set_member_data(solver) - self._mms = mms - self._atol = self._set_member_data(atol) - self._rtol = self._set_member_data(rtol) - self._mxstep = self._set_member_data(mxstep) - self._grid_file = self._set_member_data(grid_file) - self._nx = self._set_member_data(nx) - self._ny = self._set_member_data(ny) - self._nz = self._set_member_data(nz) - self._zperiod = self._set_member_data(zperiod) - self._zmin = self._set_member_data(zmin) - self._zmax = self._set_member_data(zmax) - self._dx = self._set_member_data(dx) - self._dy = self._set_member_data(dy) - self._dz = self._set_member_data(dz) - self._MXG = MXG - self._MYG = MYG - self._NXPE = self._set_member_data(NXPE) - self._ixseps1 = self._set_member_data(ixseps1) - self._ixseps2 = self._set_member_data(ixseps2) - self._jyseps1_1 = self._set_member_data(jyseps1_1) - self._jyseps1_2 = self._set_member_data(jyseps1_2) - self._jyseps2_1 = self._set_member_data(jyseps2_1) - self._jyseps2_2 = self._set_member_data(jyseps2_2) - self._symGlobX = symGlobX - self._symGlobY = symGlobY - self._ddx_first = self._set_member_data(ddx_first) - self._ddx_second = self._set_member_data(ddx_second) - self._ddx_upwind = self._set_member_data(ddx_upwind) - self._ddx_flux = self._set_member_data(ddx_flux) - self._ddy_first = self._set_member_data(ddy_first) - self._ddy_second = self._set_member_data(ddy_second) - self._ddy_upwind = self._set_member_data(ddy_upwind) - self._ddy_flux = self._set_member_data(ddy_flux) - self._ddz_first = self._set_member_data(ddz_first) - self._ddz_second = self._set_member_data(ddz_second) - self._ddz_upwind = self._set_member_data(ddz_upwind) - self._ddz_flux = self._set_member_data(ddz_flux) - self._nout = self._set_member_data(nout) - self._timestep = self._set_member_data(timestep) - self._additional = additional - self._series_add = series_add - self._restart = restart - self._restart_from = restart_from - self._redistribute = redistribute - self._use_expand = use_expand - self._max_proc = max_proc - self._intrp_method = intrp_method - self._add_noise = add_noise - self._cpy_source = cpy_source - self._cpy_grid = cpy_grid - self._sort_by = self._set_member_data(sort_by) - self._make = make - self._allow_size_modification = allow_size_modification - - # Make some space to distinguish from the rest of the terminal - print("\n") - - # Initializing self._warnings and self._error - # self._warnings will be filled with warnings - # self._errors will be filled with errors - # The warnings and errors will be printed when the destructor is called - self._warnings = [] - self._errors = [] - - # Check if make is a boolean - if self._make is not None: - if not isinstance(self._make, bool): - self._errors.append("TypeError") - raise TypeError("make must be boolean if set") - - # Set self._program_name - self._set_program_name(prog_name) - - # Make the file if make is True - if self._make: - self._run_make() - - # Obtain the MPIRUN - self._MPIRUN = getmpirun() - - # The run type is going to be written in the run.log file - self._run_type = "basic" - - #{{{ Set self._additional and self._series_add correctly - # self._additional must be on a special form (see - # basic_error_checker). - if self._additional is not None: - if not(hasattr(self._additional, "__iter__")) or\ - (isinstance(self._additional, str)) or\ - (isinstance(self._additional, dict)): - # Put additional as a double iterable - self._additional = ((self._additional),) - else: - if not(hasattr(self._additional[0], "__iter__")) or\ - (isinstance(self._additional[0], str)) or\ - (isinstance(self._additional, dict)): - # Put self._additional as an iterable - self._additional = (self._additional,) - # Do the same for series_add - if self._series_add is not None: - if not(hasattr(self._series_add, "__iter__")) or\ - (isinstance(self._series_add, str)) or\ - (isinstance(self._series_add, dict)): - # Put series_add as a double iterable - self._series_add = ((self._series_add),) - else: - if not(hasattr(self._series_add[0], "__iter__")) or\ - (isinstance(self._series_add[0], str)) or\ - (isinstance(self._series_add, dict)): - # Put self._series_add as an iterable - self._series_add = (self._series_add,) - #}}} - - # Check that nproc is given correctly - if not isinstance(self._nproc, int): - message = ("nproc is of wrong type\n" - "nproc must be given as an int") - self._errors.append("TypeError") - raise TypeError(message) - - #{{{ Set NYPE from NXPE and nproc - if self._NXPE is not None: - # Make self._NYPE as an appendable list - self._NYPE = [] - - # Check that NXPE is of correct type - check_if_int = ( - (self._NXPE, "NXPE"), - ) - self._check_for_correct_type(var=check_if_int, - the_type=int, - allow_iterable=True) - - # Check that NXPE and nproc is consistent - for cur_NXPE in self._NXPE: - if (self._nproc % cur_NXPE) != 0: - self._errors.append("RuntimeError") - message = "nproc =" + str(self._nproc) +\ - " not divisible by" +\ - " NXPE = " + str(cur_NXPE) +\ - " (the number of processors in the x direction)" - raise RuntimeError(message) - - # Append NYPE - self._NYPE.append(int(self._nproc / cur_NXPE)) - else: - self._NYPE = None - #}}} - - # Check if the instance is set correctly - self._check_for_basic_instance_error() - - # We need to find options in BOUT.inp. We use BOUTOption for this - # Object initialization - self._inputFileOpts = BOUTOptions(self._directory) - # Convert indices to lowercase - self._inputFileOpts.root = dict( - (key.lower(), value) for key, value in self._inputFileOpts.root.items()) - self._inputFileOpts.mesh = dict( - (key.lower(), value) for key, value in self._inputFileOpts.mesh.items()) - - # Initialize outputs from execute runs - self._PBS_id = [] - self._dmp_folders = [] -#}}} - -#{{{__del__ - def __del__(self): - """The destructor will print all the warning and error messages""" - - # Switch to see if error occured - error_occured = False - - # If errors occured - if len(self._errors) > 0: - message = "! A {} occurred. !".format(self._errors[0]) - # Find the boarder length - len_boarder = len(message) - # Print the message - print("{0}{1}\n{2}\n{1}{0}". - format("\n" * 2, "!" * len_boarder, message)) - error_occured = True - if len(self._warnings) > 0: - print("{}The following WARNINGS were detected:\n{}". - format("\n" * 3, "-" * 80)) - for warning in self._warnings: - print(warning + "\n") - print("{}{}".format("-" * 80, "\n" * 3)) - elif len(self._warnings) > 0 and not(error_occured): - print("{} {}".format("\n" * 3, "~" * 69)) - print(("| No WARNINGS detected before instance destruction in " - "'bout_runners'. |")) -#}}} - -#{{{execute_runs - def execute_runs(self, - job_dependencies=None, - remove_old=False, - post_processing_function=None, - post_process_after_every_run=False, - **kwargs): - #{{{docstring - """ - Makes a run for each of the combination given by the member data. - - Parameters - ---------- - job_dependencies : [None | sequence (not str)], default: None - If the jobs should be run after other jobs. This input is - only effective if the object calling the function is a - PBS_runner. - remove_old : bool, default : False - Whether old run files should be deleted or not - post_processing_function : callable - A function to be called after one or several run. This - function must accept the string of self._dmp_folder if - post_process_after_each_run is True, and a tuple of dmp - folders if post_process_after_each_run is False - post_process_after_each_run : bool, default: False - Boolean telling whether post_processing_function should be - called after each run (if True), or after the number of runs - decided by self._sort_by (see the constructor of - basic_runner for more info) - **kwargs : any - Parameters to be passed to the post_processing_function and - self._restart_from function (if any) - - Returns - ------- - self._dmp_folders : sequence (not str) - A sequence of the folder locations made from the runner - self._PBS_id : sequence (not str) - A sequence of the PBS ids is returned. - """ - #}}} - - if self.__class__.__name__ == "PBS_runner": - # Wait for jobs to finish - if job_dependencies is not None: - # Ensure that job_dependencies are just numbers - job_dependencies = [int(re.match('\d+', j).group(0)) - for j in job_dependencies - if re.match('\d+', j) is not None] - if len(job_dependencies) != 0: - print("\nWill now wait for these jobs to finish\n{}\n". - format("\n".join([str(j) for j in job_dependencies]))) - while len(job_dependencies) > 0: - # Get current jobs - status, output = shell("qstat", pipe=True) - job_queue = output.split("\n") - # Find the jobIds - job_queue = [int(re.match('\d+', j).group(0)) - for j in job_queue - if re.match('\d+', j) is not None] - # These jobs will be removed from job_dependencies - pop_jobs = [] - for job in job_dependencies: - if job not in job_queue: - pop_jobs.append(job) - - for job in pop_jobs: - job_dependencies.remove(job) - - time.sleep(60) - - # Check for errors in the run function - self._error_check_for_run_input(remove_old, - post_processing_function, - post_process_after_every_run) - - # Create the run log - self._create_run_log() - - # We check that the given combination of nx and ny is - # possible to perform with the given nproc - self._get_correct_domain_split() - - # Get the combinations of the member functions - possibilities = self._get_possibilities() - combinations = self._get_combinations(possibilities) - - # If we are not running the post processing function after every - # run, make an appendable list over all the runs which will be - # passed as an input parameter to the post processing function - if not(post_process_after_every_run): - seq_of_dmp_folders = [] - - # Print either "now running" or "now submitting" - self._print_run_or_submit() - - # Set self._len_group if post_processing_function is set, but - # self._sort_by is None - if (post_processing_function is not None) and\ - (not(post_process_after_every_run)) and\ - (self._len_group is None): - # self._len_group is to a number by _get_swapped_input_list - # (which is called if self._sort_by is not None) - # If there are no sorting, self._len_group will be None - # We will make self._len_group the length of the - # number of runs here - self._len_group = len(combinations) - - # The run - for run_no, combination in enumerate(combinations): - - # Get the folder to store the data - do_run = self._prepare_dmp_folder(combination, **kwargs) - if not(do_run): - # Skip this run - continue - - if remove_old: - # Remove old data - self._remove_data() - - # Copy the grid (if any) if cpy_grid files is True - if (self._cpy_grid) and (self._grid_file is not None): - combination_list = combination.split() - # Loop through all the combinations - for elem in combination_list: - # Find the grid - if elem[0:4] == "grid": - # Remove grid=, so that only the path remains - cur_grid = elem.replace("grid=", "") - # Copy the grid file - shutil.copy2(cur_grid, self._dmp_folder) - - # Check if the run has been performed previously - do_run = self._check_if_run_already_performed() - # Do the actual runs - if do_run: - # Call the driver for a run - self._run_driver(combination, run_no) - - # If we would like to call a post_processing function - if post_processing_function is not None: - if post_process_after_every_run: - # Call the post processing function - self._call_post_processing_function( - function=post_processing_function, - folders=(self._dmp_folder,), - **kwargs) - else: - # Append the dmp folder to the list of dmp folders - seq_of_dmp_folders.append(self._dmp_folder) - # If the run_no+1 is divisible by self._len_group - if ((run_no + 1) % self._len_group == 0): - # Call the post processing function - self._call_post_processing_function( - function=post_processing_function, - folders=tuple(seq_of_dmp_folders), - **kwargs) - # Reset the seq_of_dmp_folders - seq_of_dmp_folders = [] - - # Cast to tuple - self._PBS_id = tuple(self._PBS_id) - if hasattr(self._dmp_folders, "__iter__")\ - and not isinstance(self._dmp_folders, str): - self._dmp_folders = tuple(el for el in self._dmp_folders) - else: - self._dmp_folders = (self._dmp_folders,) - - return self._dmp_folders, self._PBS_id -#}}} - -#{{{_run_driver - def _run_driver(self, combination, run_no): - """ - The machinery which actually performs the runs. - """ - - # Get the time when the run starts - start = datetime.datetime.now() - # Do the run - output, run_time = self._single_run(combination) - # Print info to the log file for the runs - self._append_run_log(start, run_no, run_time) - print("\n") -#}}} - -#{{{ Functions called by the constructor -#{{{_set_member_data - def _set_member_data(self, input_parameter): - """ - Returns the input_parameter as a tuple if it is different than None, - and if it is not iterable - """ - - # If the input_data is not set, the value in BOUT.inp will - # be used - if input_parameter is not None: - # If the input_data is not an iterable, or if it is a - # string: Put it to a tuple - if not(hasattr(input_parameter, "__iter__")) or\ - (type(input_parameter)) == str: - input_parameter = (input_parameter,) - - return input_parameter -#}}} - -#{{{_set_program_name - def _set_program_name(self, prog_name=None): - """ - Will set self._program_name and make the program if the - prog_name.o file is not found. - - Parameters - ---------- - prog_name : str - Name of the exceutable. If None, the name will be set from - the *.o file. - """ - - if prog_name is not(None): - # Check that a string is given - if not isinstance(prog_name, str): - message = "prog_name must be given as a string" - self._errors.append("TypeError") - raise TypeError(message) - # Search for file - if os.path.isfile(prog_name): - self._program_name = prog_name - else: - print("{} not found, now making:".format(prog_name)) - # File not found, make - self._run_make() - # Set the make flag to False, so it is not made again - self._make = False - # Search for file - if not(os.path.isfile(prog_name)): - message = ("{} could not be found after make. " - "Please check for spelling mistakes").\ - format(prog_name) - self._errors.append("RuntimeError") - raise RuntimeError(message) - else: - self._program_name = prog_name - else: - # Find the *.o file - o_files = glob.glob("*.o") - if len(o_files) > 1: - message = ("More than one *.o file found. " - "The first *.o file is chosen. " - "Consider setting 'prog_name'.") - self._warning_printer(message) - self._warnings.append(message) - self._program_name = o_files[0].replace(".o", "") - elif len(o_files) == 1: - # Pick the first instance as the name - self._program_name = o_files[0].replace(".o", "") - else: - # Check if there exists a make - make_file = glob.glob("*make*") - if len(make_file) > 0: - # Run make - self._run_make() - # Set the make flag to False, so it is not made again - self._make = False - # Search for the .o file again - o_files = glob.glob("*.o") - if len(o_files) > 0: - self._program_name = o_files[0].replace(".o", "") - else: - self._program_name = False - message = ("The constructor could not make your" - " program") - self._errors.append("RuntimeError") - raise RuntimeError(message) - else: - self._errors.append("RuntimeError") - raise RuntimeError( - "No make file found in current directory") -#}}} - -#{{{_check_for_basic_instance_error - def _check_for_basic_instance_error(self): - """Check if there are any type errors when creating the object""" - - #{{{Check if nproc has the correct type - if not isinstance(self._nproc, int): - message = ("nproc is of wrong type\n" - "nproc must be given as an int") - self._errors.append("TypeError") - raise TypeError(message) - #}}} - - #{{{Check if directory has the correct type - if not isinstance(self._directory, str): - message = ("directory is of wrong type\n" - "directory must be given as a str") - self._errors.append("TypeError") - raise TypeError(message) - #}}} - - #{{{Check if MXG and MYG has the correct type - # Check if MXG and MYG is given as a single int - # One should not be allowed to specify MXG and MYG as an - # iterable, as MXG is used to find the correct split, and - # because it in principle could be incompatible with the method - # (first, second etc.) used - check_if_int = ( - (self._MXG, "MXG"), - (self._MYG, "MYG"), - ) - self._check_for_correct_type(var=check_if_int, - the_type=int, - allow_iterable=False) - #}}} - - #{{{Check if BOUT.inp exsists in the self._directory - # Check if there are any BOUT.inp files in the self._directory - inp_file = glob.glob(os.path.join(self._directory, "BOUT.inp")) - if len(inp_file) == 0: - self._errors.append("RuntimeError") - raise RuntimeError("No BOUT.inp files found in '{}'". - format(self._directory)) - #}}} - - #{{{Check grid_file are strings, that they exsist, and one can sort - if self._grid_file is not None: - # Set a variable which is has length over one if the test fails - not_found = [] - if isinstance(self._grid_file, str): - # See if the grid_file can be found - grid_file = glob.glob(self._grid_file) - # The grid_file cannot be found - if len(grid_file) == 0: - not_found.append(self._grid_file) - # If several grid files are given - elif hasattr(self._grid_file, "__iter__"): - for elem in self._grid_file: - # See if the grid_file can be found - grid_file = glob.glob(elem) - # The grid_file cannot be found - if len(grid_file) == 0: - not_found.append(elem) - if len(not_found) > 0: - message = ("The following grid files were not found\n" - "{}".format("\n".join(not_found))) - self._errors.append("RuntimeError") - raise RuntimeError(message) - if (self._sort_by is not None) and ("grid_file" in self._sort_by): - # Set a success flag - success = True - # The start name of the files - start_name = "grid_file" - # Check if grid file is iterable - if hasattr(self._grid_file, "__iter__"): - for grid in grid_file: - if grid[0:len(start_name)] != start_name: - success = False - else: - # Only one grid file - if self._grid_file[0:len(start_name)] != start_name: - success = False - if not(success): - message = ("The name of the grid file must start with" - " 'grid_file' in order to sort by them.") - self._errors.append("RuntimeError") - raise RuntimeError(message) - - #}}} - - #{{{Check nx, ny, nz, zperiod, nout, mxstep, separatrix are int/iterable - check_if_int = ( - (self._nx, "nx"), - (self._ny, "ny"), - (self._nz, "nz"), - (self._zperiod, "zperiod"), - (self._nout, "nout"), - (self._mxstep, "mxstep"), - (self._ixseps1, "ixseps1"), - (self._ixseps2, "ixseps2"), - (self._jyseps1_1, "jyseps1_1"), - (self._jyseps1_2, "jyseps1_2"), - (self._jyseps2_1, "jyseps2_1"), - (self._jyseps2_2, "jyseps2_2"), - ) - - self._check_for_correct_type(var=check_if_int, - the_type=int, - allow_iterable=True) - #}}} - - #{{{Check timestep, atol, rtol, zmin/max, dx, dy, dz is Number/iterable - # Check if the following is a number - check_if_number = ( - (self._timestep, "timestep"), - (self._zmin, "zmin"), - (self._zmax, "zmax"), - (self._dx, "dx"), - (self._dy, "dy"), - (self._dz, "dz"), - (self._atol, "atol"), - (self._rtol, "rtol") - ) - - self._check_for_correct_type(var=check_if_number, - the_type=Number, - allow_iterable=True) - #}}} - - #{{{Check if solver, grid_file, methods and sort_by is str/tuple of str - # Check if instance is string, or an iterable containing strings - check_if_string = ( - (self._solver, "solver"), - (self._grid_file, "grid_file"), - (self._ddx_first, "ddx_first"), - (self._ddx_second, "ddx_second"), - (self._ddx_upwind, "ddx_upwind"), - (self._ddx_flux, "ddx_flux"), - (self._ddy_first, "ddy_first"), - (self._ddy_second, "ddy_second"), - (self._ddy_upwind, "ddy_upwind"), - (self._ddy_flux, "ddy_flux"), - (self._ddz_first, "ddz_first"), - (self._ddz_second, "ddz_second"), - (self._ddz_upwind, "ddz_upwind"), - (self._ddz_flux, "ddz_flux"), - (self._sort_by, "sort_by") - ) - - self._check_for_correct_type(var=check_if_string, - the_type=str, - allow_iterable=True) - #}}} - - #{{{Check if solver is set to the correct possibility - # Check if the solver is possible - # From /include/bout/solver.hxx - possible_solvers = ( - "cvode", - "pvode", - "ida", - "petsc", - "slepc", - "karniadakis", - "rk4", - "euler", - "rk3ssp", - "power", - "arkode", - "imexbdf2", - "snes", - "rkgeneric", - ) - - # Do the check if the solver is set - if self._solver is not None: - self._check_if_set_correctly(var=(self._solver, "solver"), - possibilities=possible_solvers) - #}}} - - #{{{Check if the methods is set to the correct possibility - # Check if ddx or ddy is possible - possible_method = [ - "C2", - "C4", - ] - - # Make a tuple of the variables - the_vars = ( - (self._ddx_first, "ddx_first"), - (self._ddx_second, "ddx_second"), - (self._ddy_first, "ddy_first"), - (self._ddy_second, "ddy_second") - ) - - for var in the_vars: - # Do the check if the method is set - if var[0] is not None: - self._check_if_set_correctly(var=var, - possibilities=possible_method) - - # Check if ddz is possible - possible_method.append("FFT") - - # Make a tuple of the variables - the_vars = ( - (self._ddz_first, "ddz_first"), - (self._ddz_second, "ddz_second") - ) - - for var in the_vars: - # Do the check if the method is set - if var[0] is not None: - self._check_if_set_correctly(var=var, - possibilities=possible_method) - - # Check for upwind terms - possible_method = ( - "U1", - "U2", - "U4", - "W2", - "W3", - ) - - # Make a tuple of the variables - the_vars = ( - (self._ddx_upwind, "ddx_upwind"), - (self._ddy_upwind, "ddy_upwind"), - (self._ddz_upwind, "ddz_upwind") - ) - - for var in the_vars: - # Do the check if the method is set - if var[0] is not None: - self._check_if_set_correctly(var=var, - possibilities=possible_method) - - # Check for flux terms - possible_method = ( - "SPLIT", - "NND" - ) - - # Make a tuple of the variables - the_vars = ( - (self._ddx_flux, "ddx_flux"), - (self._ddy_flux, "ddy_flux"), - (self._ddz_flux, "ddz_flux") - ) - - for var in the_vars: - # Do the check if the method is set - if var[0] is not None: - self._check_if_set_correctly(var=var, - possibilities=possible_method) - #}}} - - #{{{Check if sort_by is set to the correct possibility - # Appendable list - possible_sort_by = [] - - # Append the 1st element of sort_checks if the 0th elements of - # sort_checks is not None - sort_checks = ( - (self._nx, "spatial_domain"), - (self._ny, "spatial_domain"), - (self._nz, "spatial_domain"), - (self._dx, "spatial_domain"), - (self._dy, "spatial_domain"), - (self._dz, "spatial_domain"), - (self._ixseps1, "spatial_domain"), - (self._ixseps2, "spatial_domain"), - (self._jyseps1_1, "spatial_domain"), - (self._jyseps1_2, "spatial_domain"), - (self._jyseps2_1, "spatial_domain"), - (self._jyseps2_2, "spatial_domain"), - (self._symGlobX, "spatial_domain"), - (self._symGlobY, "spatial_domain"), - (self._timestep, "temporal_domain"), - (self._nout, "temporal_domain"), - (self._solver, "solver"), - (self._mms, "solver"), - (self._atol, "solver"), - (self._rtol, "solver"), - (self._mxstep, "solver"), - (self._ddx_first, "ddx_first"), - (self._ddx_second, "ddx_second"), - (self._ddx_upwind, "ddx_upwind"), - (self._ddx_flux, "ddx_flux"), - (self._ddy_first, "ddy_first"), - (self._ddy_second, "ddy_second"), - (self._ddy_upwind, "ddy_upwind"), - (self._ddy_flux, "ddy_flux"), - (self._ddz_first, "ddz_first"), - (self._ddz_second, "ddz_second"), - (self._ddz_upwind, "ddz_upwind"), - (self._ddz_flux, "ddz_flux"), - (self._grid_file, "grid_file") - ) - - for sort_check in sort_checks: - if sort_check[0] is not None: - if not(sort_check[1] in possible_sort_by): - possible_sort_by.append(sort_check[1]) - - # Append the additionals and series_add - # If additional is set - if self._additional is not None: - for additional in self._additional: - # The additional now contains a tuple of three elements - # We would like to extract the section (if any) and variable - # and append them to the possibilities list - # If the section is empty - if additional[0] == "": - section = "" - else: - section = additional[0] + ":" - possible_sort_by.append(section + additional[1]) - # Do the same for series_add - if self._series_add is not None: - for series_add in self._series_add: - if series_add[0] == "": - section = "" - else: - section = series_add[0] + ":" - possible_sort_by.append(section + series_add[1]) - - # Make a tuple of the variables - the_vars = ( - (self._sort_by, "sort_by"), - ) - - for var in the_vars: - # Do the check if the method is set - if var[0] is not None: - self._check_if_set_correctly(var=var, - possibilities=possible_sort_by) - #}}} - - #{{{Check if restart is set correctly - if self._restart is not None: - if not isinstance(self._restart, str): - self._errors.append("TypeError") - raise TypeError("restart must be set as a string when set") - - possible_method = ( - "overwrite", - "append" - ) - - # Make a tuple of the variables - the_vars = ( - (self._restart, "restart"), - ) - - for var in the_vars: - # Do the check if the method is set - if var[0] is not None: - self._check_if_set_correctly(var=var, - possibilities=possible_method) - #}}} - - #{{{Check if restart_from is set correctly - if self._restart_from is not None: - # Throw warning if restart is None - if self._restart is None: - message = "restart_from will be ignored as restart = None" - self._warning_printer(message) - self._warnings.append(message) - - if not isinstance(self._restart_from, str)\ - and not(hasattr(self._restart_from, "__call__")): - self._errors.append("TypeError") - message = ("restart_from must be set as a string or a " - "function returning the restart path when set") - raise TypeError(message) - #}}} - - #{{{Check if redistribute is set correctly - if self._redistribute is not None: - # Throw warning if restart is None - if self._restart is None: - message = "redistribute will be ignored as restart = None" - self._warning_printer(message) - self._warnings.append(message) - # Throw a warning if restart is append - elif self._restart == "append": - message = ("redistribute is not None and restart = 'append' is" - " currently incompatible, setting restart to" - " 'overwrite'") - if not(self._restart_from): - message += " (previous files will be saved)" - self._warning_printer(message) - self._warnings.append(message) - self._restart = "overwrite" - if not isinstance(self._redistribute, int): - self._errors.append("TypeError") - message = "redistribute must be set as an integer when set" - raise TypeError(message) - # If nproc is set, and this is incompatible with NPES - if self._nproc != self._redistribute: - raise RuntimeError("nproc and redistribute must be equal") - #}}} - - #{{{Check if max_proc has the correct type - if self._restart is not None and self._max_proc is not None: - if not isinstance(self._max_proc, int): - message = ("max_proc is of wrong type\n" - "max_proc must be given as an int") - self._errors.append("TypeError") - raise TypeError(message) - #}}} - - #{{{Check if intrp_method has the correct type - if self._restart is not None and self._intrp_method is not None: - if not isinstance(self._intrp_method, str): - message = ("intrp_method is of wrong type\n" - "intrp_method must be given as a string") - self._errors.append("TypeError") - raise TypeError(message) - #}}} - - #{{{Check if add_noise is set correctly - if self._add_noise is not None: - # Throw warning if restart is None - if self._restart is None: - message = "add_noise will be ignored as restart = None" - self._warning_printer(message) - self._warnings.append(message) - - raise_error = False - is_key_none = False - if isinstance(self._add_noise, dict): - for var, scale in self._add_noise.items(): - if not isinstance(var, str): - if var is not(None): - raise_error = True - break - else: - is_key_none = True - if not(isinstance(scale, Number) or (scale is None)): - raise_error = True - break - if is_key_none and len(self._add_noise.keys()) > 1: - raise_error = True - else: - raise_error = True - - if raise_error: - self._errors.append("TypeError") - message = ("add_noise must be on the form " - "{'var1': number_or_none," - " 'var2': number_or_none, ...}'\n" - "or\n" - "{None: number_or_none}" - ) - raise TypeError(message) - #}}} - - #{{{Check for options set in both member data and in the grid file - if self._grid_file is not None: - # Check if the following variables are found in the grid - # file - check_if_in_grid = ( - (self._nx, "nx"), - (self._ny, "ny"), - (self._nz, "nz"), - (self._dx, "dx"), - (self._dy, "dy"), - (self._dz, "dz"), - (self._MXG, "MXG"), - (self._MYG, "MYG"), - (self._NXPE, "NXPE"), - (self._NYPE, "NYPE"), - (self._ixseps1, "ixseps1"), - (self._ixseps2, "ixseps2"), - (self._jyseps1_1, "jyseps1_1"), - (self._jyseps1_2, "jyseps1_2"), - (self._jyseps2_1, "jyseps2_1"), - (self._jyseps2_2, "jyseps2_2"), - (self._symGlobX, "symmmetricGlobalX"), - (self._symGlobY, "symmmetricGlobalY") - ) - for var in check_if_in_grid: - # If the variable is set - if var[0] is not None: - # Loop through the grid files - for grid_file in self._grid_file: - # Open (and automatically close) the grid files - f = DataFile(grid_file) - # Search for mesh data in the grid file - grid_variable = f.read(var[1]) - # If the variable is found - if grid_variable is not None: - self._errors.append("TypeError") - message = ("{0} was specified both in the " - "driver and in the grid file.\n" - "Please remove {}" - " from the driver if you would " - "like to run with a grid file.") - raise TypeError(message.format(var[1])) - #}}} - - #{{{If grid files are set: Use nx, ny and nz values in the grid file - if self._grid_file is not None: - # Make a dict of appendable lists - spatial_domain = {"nx": [], "ny": [], "nz": []} - for grid_file in self._grid_file: - # Open (and automatically close) the grid files - f = DataFile(grid_file) - # Search for nx, ny and nz in the grid file - mesh_types = ("nx", "ny", "nz") - for mesh_type in mesh_types: - grid_variable = f.read(mesh_type) - # If the variable is found - if grid_variable is not None: - spatial_domain[mesh_type].append(grid_variable) - # Check that the lengths of nx, ny and nz are the same - # unless they are not found - len_nx = len(spatial_domain["nx"]) - len_ny = len(spatial_domain["ny"]) - len_nz = len(spatial_domain["nz"]) - if len_nx != 0: - self._nx = spatial_domain["nx"] - if len_ny != 0: - self._ny = spatial_domain["ny"] - if len_nz != 0: - self._nz = spatial_domain["nz"] - #}}} - - #{{{Check that nx, ny and nz are of the same length - if self._nx is not None and self._ny is not None: - self._check_if_same_len((self._nx, "nx"), (self._ny, "ny")) - if self._nx is not None and self._nz is not None: - self._check_if_same_len((self._nx, "nx"), (self._nz, "nz")) - if self._ny is not None and self._nz is not None: - self._check_if_same_len((self._ny, "ny"), (self._nz, "nz")) - #}}} - - #{{{Check that NXPE and NYPE are of the same length as nx, ny, nz - if self._nx is not None and self._NXPE is not None: - self._check_if_same_len((self._nx, "nx"), (self._NXPE, "NXPE")) - if self._ny is not None and self._NXPE is not None: - self._check_if_same_len((self._ny, "ny"), (self._NXPE, "NXPE")) - if self._nz is not None and self._NXPE is not None: - self._check_if_same_len((self._nz, "nz"), (self._NXPE, "NXPE")) - - if self._nx is not None and self._NYPE is not None: - self._check_if_same_len((self._nx, "nx"), (self._NYPE, "NYPE")) - if self._ny is not None and self._NYPE is not None: - self._check_if_same_len((self._ny, "ny"), (self._NYPE, "NYPE")) - if self._nz is not None and self._NYPE is not None: - self._check_if_same_len((self._nz, "nz"), (self._NYPE, "NYPE")) - #}}} - - #{{{Check (zperiod), (zmin, zmax) and (dz) is not set simultaneously - if (self._zperiod is not None and - (self._zmin is not None or self._zmax is not None)): - self._errors.append("TypeError") - message = "zperiod and zmin or zmax cannot be set simultaneously." - raise TypeError(message) - elif (self._dz is not None and - (self._zmin is not None or self._zmax is not None)): - self._errors.append("TypeError") - message = "dz and zmin or zmax cannot be set simultaneously." - raise TypeError(message) - elif (self._zperiod is not None and self._dz): - self._errors.append("TypeError") - message = "dz and zperiod cannot be set simultaneously." - raise TypeError(message) - #}}} - - #{{{Check that dz is not set - # dz is currently set throught zmin and zmax - if self._dz is not None: - self._errors.append("TypeError") - message = ("dz can currently just be set through zmin and zmax\n" - "dz = 2*pi*(zmax-zmin)/(MZ)") - raise TypeError(message) - #}}} - - #{{{Check that dx, dy and dz are of the same length - if self._dx is not None and self._dy is not None: - self._check_if_same_len((self._dx, "dx"), (self._dy, "dy")) - if self._dx is not None and self._dz is not None: - self._check_if_same_len((self._dx, "dx"), (self._dz, "dz")) - if self._dy is not None and self._dz is not None: - self._check_if_same_len((self._dy, "dy"), (self._dz, "dz")) - #}}} - - #{{{Check that (dx, nx), (dy, ny) and (dz,nz) are of the same length - if self._dx is not None and self._nx is not None: - self._check_if_same_len((self._dx, "dx"), (self._nx, "nx")) - if self._dy is not None and self._ny is not None: - self._check_if_same_len((self._dy, "dy"), (self._ny, "ny")) - if self._nz is not None and self._dz is not None: - self._check_if_same_len((self._dz, "dz"), (self._nz, "nz")) - #}}} - - #{{{ Check that timestep and nout have the same len - if self._timestep is not None and self._nout is not None: - self._check_if_same_len((self._timestep, "timestep"), - (self._nout, "nout")) - #}}} - - #{{{Check that additional and series_add are on the correct form - self._error_check_additional((self._additional, "additional")) - self._error_check_additional((self._series_add, "series_add")) - #}}} - - #{{{Check that self._series_add[:][2] have the same length - if self._series_add is not None: - # Make the second indices iterable if they are not already - # Start by converting to list, so that self._series becomes - # modifyable - self._series_add = list(list(el) for el in self._series_add) - for index in range(len(self._series_add)): - if not(hasattr(self._series_add[index][2], "__iter__")): - self._series_add[index][2] = (self._series_add[index][2],) - # Conver to tuple - self._series_add = tuple(tuple(el) for el in self._series_add) - - # Collect all second indices - third_indicies = tuple(elems[2] for elems in self._series_add) - # Find the length of the second indices - lengths = tuple( - len(elem) for elem in third_indicies if ( - not isinstance( - elem, str) and not isinstance( - elem, dict))) - - # Check that the length of the second indices are the same - # L.count(value) -> integer -- return number of occurrences - # of value - # stackoverflow.com/questions/3844801/check-if-all-elements-in-a-list-are-identical - if not(lengths.count(lengths[0]) == len(lengths)): - message = ("The length of the third index of the elements" - " of series_add must be the same") - self._errors.append("TypeError") - raise TypeError(message) - #}}} - - #{{{Check mms, symGlobX, symGlobY, cpy_src/grid, use_expand and - # allow_size_mod is bool - check_if_bool = ( - (self._mms, "mms"), - (self._symGlobX, "symGlobX"), - (self._symGlobY, "symGlobY"), - (self._cpy_source, "cpy_source"), - (self._cpy_grid, "cpy_grid"), - (self._use_expand, "use_expand"), - (self._allow_size_modification, "allow_size_modification") - ) - - self._check_for_correct_type(var=check_if_bool, - the_type=bool) - #}}} - - #{{{Check grid_file is None if cpy_grid==True - if (self._grid_file is None) and (self._cpy_grid): - # Raise error - self._errors.append("TypeError") - message = ("Cannot copy the grid files if none exists in " - " 'grid_file'") - raise TypeError(message) - #}}} - - #{{{Check that zmin and zmax has the same length - if (self._zmin is not None) and (self._zmax is not None): - self._check_if_same_len((self._zmin, "zmin"), - (self._zmax, "zmax")) - - #}}} -#}}} -#}}} - -#{{{Functions called by _check_for_basic_instance_error - #{{{_error_check_additional - def _error_check_additional(self, input_member): - #{{{docstring - """ - Checks that the input_member is on the following form: - - >>> input_member = ((section1, name1, (value1-1, value1-2, ...)), - (section2, name2, (value2-1, value2-2, ...)), - ...) - - Parameters - ---------- - input member: [self._additional | self._series_add] - input_member[0] is the input data and - input_member[1] is the name of the input data - """ - #}}} - - # If input_member is set - if input_member[0] is not None: - # Set a success variable that will fail if anything goes - # wrong - success = True - - # Loop through all elements in input_member - for elem in input_member[0]: - # Check if self._addition is iterable, but not a string - # or dict - if (hasattr(elem, "__iter__")) and\ - (not isinstance(elem, str)) and\ - (not isinstance(elem, dict)): - if isinstance(elem[0], str): - # Check that the second element (the name) is a - # string - if not isinstance(elem[1], str): - success = False - # If more than three elements are given - if len(elem) != 3: - success = False - # elem[0] is not a string - else: - success = False - # elem is not iterable or is a dict or a string - else: - success = False - if not(success): - message =\ - ("{0} is on the wrong form.\n" - "{0} should be on the form\n" - "{0}=\ \n" - " ((section1, name1, (value1-1, value1-2,...)),\ \n" - " (section2, name2, (value2-1, value2-2,...)),\ \n" - " ...))\n").format(input_member[1]) - self._errors.append("TypeError") - raise TypeError(message) - #}}} -#}}} - -#{{{ Functions called by the execute_runs function -#{{{_error_check_for_run_input - def _error_check_for_run_input(self, - remove_old, - post_processing_function, - post_process_after_every_run - ): - """ - Check if there are any type errors in input for the run function - """ - - #{{{Check if remove_old is of the correct type - check_if_bool = ( - (remove_old, "remove_old"), - ) - - self._check_for_correct_type(var=check_if_bool, - the_type=bool) - #}}} - - #{{{Check if remove_old and restart is set on the same time - if remove_old and self._restart is not None: - self._errors.append("RuntimeError") - raise RuntimeError("You should not remove old data if you" - " want a restart run") - #}}} - - #{{{Check that the post_processing_function is a fuction - if (post_processing_function is not None) and\ - (not(hasattr(post_processing_function, "__call__"))): - self._errors.append("RuntimeError") - message = ("post_process_after_every_run must be a" - " function") - raise RuntimeError(message) - #}}} - - #{{{Check that the post_process_after_every_run is not set alone - if (post_process_after_every_run and - post_processing_function is None): - self._errors.append("RuntimeError") - message = ("post_process_after_every_run can only be set if" - " post_processing_function is given") - raise RuntimeError(message) - #}}} - - #{{{Check that the post_process_after_every_run is a boolean - if (post_process_after_every_run is not None) and\ - (not isinstance(post_process_after_every_run, bool)): - self._errors.append("RuntimeError") - message = ("post_process_after_every_run must be set to" - " a boolean when set") - raise RuntimeError(message) - #}}} - - # Check for errors in a child class - self._check_for_child_class_errors( - remove_old, - post_processing_function, - post_process_after_every_run - ) -#}}} - -#{{{_create_run_log - def _create_run_log(self): - """Makes a run_log file if it doesn't exists""" - - # Checks if run_log exists - self._run_log = os.path.join(self._directory, "run_log.txt") - if os.path.isfile(self._run_log) == False: - # The header - header = ("start_time", "run_type", "run_no", - "run_time_H:M:S", "dump_folder") - header_format = "{:<19} {:<9} {:<6} {:<17} {:<}" - # Create the log file, and print the header - with open(self._run_log, "w") as f: - f.write(header_format.format(*header) + "\n") - - # Preparation of the run - print("\nRunning with inputs from '{}'".format(self._directory)) -#}}} - -#{{{_get_correct_domain_split - def _get_correct_domain_split(self): - """ - Checks that the grid can be split in the correct number of - processors. - - If not, vary the number of points until value is found. - """ - - if (self._nx is None) and (self._ny is None): - #{{{ Set local_nx and local_ny from input - # Set the local nx value - local_nx = [self._get_dim_from_input("nx")] - - # Set the local ny value - local_ny = [self._get_dim_from_input("ny")] - #}}} - elif (self._nx is None): - #{{{ Set local_nx from input - # ny is given, so we only need to find nx - local_ny = list(self._ny) - - # Set the local nx value - local_nx = [self._get_dim_from_input("nx")] - - # Get the same length on nx and ny - local_nx = local_nx * len(local_ny) - #}}} - elif (self._ny is None): - #{{{ Set local_ny from input - # nx is given, so we only need to find ny - local_nx = list(self._nx) - - # Set the local ny value - local_ny = [self._get_dim_from_input("ny")] - - # Get the same length on nx and ny - local_ny = local_ny * len(local_nx) - #}}} - else: - local_nx = list(self._nx) - local_ny = list(self._ny) - - # If NXPE is not set, we will try to find a optimal grid size - # Flag to determine if a warning should be printed - produce_warning = False - print("\nChecking the grid split for the meshes\n") - # Obtain MXG - MXG, _MYG = self._get_MXG_MYG() - if self._NXPE is None: - #{{{ If NXPE is not set - for size_nr in range(len(local_nx)): - print("Checking nx={} and ny={}". - format(local_nx[size_nr], local_ny[size_nr])) - # Check to see if succeeded - init_split_found = False - cur_split_found = False - add_number = 1 - # Counter to see how many times the while loop has been - # called - count = 0 - - #{{{While cur_split_found == False - while cur_split_found == False: - # The same check as below is performed internally in - # BOUT++ (see boutmesh.cxx under - # if(options->isSet("NXPE"))) - for i in range(1, self._nproc + 1, 1): - MX = local_nx[size_nr] - 2 * MXG - # self._nproc is called NPES in boutmesh - if (self._nproc % i == 0) and \ - (MX % i == 0) and \ - (local_ny[size_nr] % (self._nproc / i) == 0): - # If the test passes - cur_split_found = True - - # Check if cur_split_found is true, eventually - # update the add_number - local_nx, local_ny, add_number, produce_warning\ - = self._check_cur_split_found(cur_split_found, - produce_warning, - add_number, - size_nr, - local_nx, - local_ny, - using_nx=True, - using_ny=True) - - #{{{ Check if the split was found the first go. - # This will be used if self_allow_size_modification is - # off, or if we are using a grid file - if count == 0 and cur_split_found: - init_split_found = True - #}}} - - # Add one to the counter - count += 1 - #}}} - - # Check if initial split succeeded - self._check_init_split_found(init_split_found, - size_nr, - local_nx, - local_ny, - test_nx=True, - test_ny=True, - produce_warning=produce_warning) - #}}} - else: - #{{{ If NXPE is set - # Check if NXPE and NYPE is set consistently with nproc - self._check_NXPE_or_NYPE(local_nx, - local_ny, - type_str="NXPE", - MXG=MXG) - self._check_NXPE_or_NYPE(local_nx, - local_ny, - type_str="NYPE") - #}}} -#}}} - -#{{{_get_possibilities - def _get_possibilities(self): - """ - Returns the list of the possibilities. In get_combinations - the elements of this list is going to be put together to a list - of strings which will be used when making a run. - """ - - #{{{Set combination of nx, ny and nz (if not set in grid_file) - # Appendable list - spatial_grid_possibilities = [] - if (self._grid_file is None): - # Dictionary where - # - the first element is the variable itself - # - the second element is the section of the variable - # - the third element is an appendable list - spatial_grid_str = { - "nx": (self._nx, "mesh:", []), - "ny": (self._ny, "mesh:", []), - "nz": (self._nz, "mesh:", []), - "dx": (self._dx, "mesh:", []), - "dy": (self._dy, "mesh:", []), - "dz": (self._dz, "mesh:", []), - "zperiod": (self._zperiod, "", []), - "zmin": (self._zmin, "", []), - "zmax": (self._zmax, "", []), - } - # Store the keys as an own variable - keys = tuple(spatial_grid_str.keys(), ) - # Append the different dimension to the list of strings - for key in keys: - # If the variable is not empty - if spatial_grid_str[key][0] is not None: - # Fill the appendable list with the elements from - # the variable - for elem in spatial_grid_str[key][0]: - spatial_grid_str[key][2].append( - "{}{}={}". - format(spatial_grid_str[key][1], key, elem) - ) - - # The goal is to combine the these strings to one string - # Find the largest length - lengths = tuple(len(spatial_grid_str[key][2]) for key in keys) - max_len = np.max(lengths) - # Make the strings the same length - for key in keys: - # We do this by filling it with empty strings - while len(spatial_grid_str[key][2]) <= max_len: - spatial_grid_str[key][2].append("") - - # Append this to the spatial grid possibilities as a string - for number in range(max_len): - # Make a tuple - current_grid = tuple(spatial_grid_str[key][2][number] - for key in keys) - # Join the strings in the list and append - spatial_grid_possibilities.append(" ".join(current_grid)) - #}}} - - #{{{Set the combination of timestep and nout if is not None - # Appendable lists - temporal_grid_possibilities = [] - timestep_str = [] - nout_str = [] - # Append the different time options to the list of strings - if self._timestep is not None: - for timestep in self._timestep: - timestep_str.append("timestep={}".format(timestep)) - if self._nout is not None: - for nout in self._nout: - nout_str.append("nout={}".format(nout)) - # Combine the strings to one string - # Find the largest length - max_len = np.max([len(timestep_str), len(nout_str)]) - # Make the strings the same length - if len(timestep_str) < max_len: - timestep_str.append("") - if len(nout_str) < max_len: - nout_str.append("") - # Append the temporal grid possibilities as a string - for number in range(max_len): - # Make a tuple - current_times = (timestep_str[number], - nout_str[number] - ) - # Join the strings in the list and append - temporal_grid_possibilities.append(" ".join(current_times)) - #}}} - - #{{{Set the combination of the series_add option if is not None - # Appendable list - series_add_possibilities = [] - if self._series_add is not None: - # Dictionary to handle the data, where the key is going to - # be the element number in self._series_add, and the values - # are going to be the sub dictionary defined below - all_info = {} - # Loop through all elements and fill the dictionary - for nr, elem in enumerate(self._series_add): - # Put in the sub dictionary - all_info[nr] = {"values": None, - "section_and_var": None, - "sec_var_vals": []} - # Fill the values - all_info[nr]["values"] = elem[2] - # Fill the section and variable key - all_info[nr]["section_and_var"] = "{}:{}=".\ - format(elem[0], elem[1]) - # Fill in the combinations - for val in all_info[nr]["values"]: - all_info[nr]["sec_var_vals"].append( - all_info[nr]["section_and_var"] + str(val) - ) - - # Make an appendable list - all_sec_var_vals = [] - for key in all_info.keys(): - all_sec_var_vals.append(all_info[key]["sec_var_vals"]) - - # Zip the sec_var_vals together (* unpacks), join them with - # a space, and append them to series_add_possibilities - for one_possibility in zip(*all_sec_var_vals): - series_add_possibilities.append(" ".join(one_possibility)) - #}}} - - #{{{Put non-iterable variables into a list if they are not set to None - # This makes the member data iterable, and usable in - # generate_possibilities - if self._MXG is not None: - self._MXG = (self._MXG,) - if self._MYG is not None: - self._MYG = (self._MYG,) - if self._mms is not None: - self._mms = (self._mms,) - if self._symGlobX is not None: - self._symGlobX = (self._symGlobX,) - if self._symGlobY is not None: - self._symGlobY = (self._symGlobY,) - #}}} - - #{{{tuple of tuple of variables to generate possibilities from - tuple_of_variables = [ - (self._solver, "solver", "type"), - (self._mms, "solver", "mms"), - (self._atol, "solver", "atol"), - (self._rtol, "solver", "rtol"), - (self._mxstep, "solver", "mxstep"), - (self._MXG, "", "MXG"), - (self._MYG, "", "MYG"), - (self._NXPE, "", "NXPE"), - (self._NYPE, "", "NYPE"), - (self._grid_file, "", "grid"), - (self._ddx_first, "ddx", "first"), - (self._ddx_second, "ddx", "second"), - (self._ddx_upwind, "ddx", "upwind"), - (self._ddx_flux, "ddx", "flux"), - (self._ddy_first, "ddy", "first"), - (self._ddy_second, "ddy", "second"), - (self._ddy_upwind, "ddy", "upwind"), - (self._ddy_flux, "ddy", "flux"), - (self._ddz_first, "ddz", "first"), - (self._ddz_second, "ddz", "second"), - (self._ddz_upwind, "ddz", "upwind"), - (self._ddz_flux, "ddz", "flux"), - (self._ixseps1, "mesh", "ixseps1"), - (self._ixseps2, "mesh", "ixseps2"), - (self._jyseps1_1, "mesh", "jyseps1_1"), - (self._jyseps1_2, "mesh", "jyseps1_2"), - (self._jyseps2_1, "mesh", "jyseps2_1"), - (self._jyseps2_2, "mesh", "jyseps2_2"), - (self._symGlobX, "mesh", "symmetricGlobalX"), - (self._symGlobY, "mesh", "symmetricGlobalY") - ] - #}}} - - #{{{Append the additional option to tuple of variables if set - if self._additional is not None: - for additional in self._additional: - # If the last element of additional is not iterable we need - # put them into a tuple to make them iterable (in order to - # use them in generate_possibilities) - if (not(hasattr(additional[2], "__iter__"))) or\ - (isinstance(additional[2], str)): - # We have to specify the whole additional, as this can - # be given as a tuple, and tuples does not support item - # assignments - additional = (additional[0], - additional[1], - (additional[2],)) - # Append the additional to tuple of variables - tuple_of_variables.append( - (additional[2], - additional[0], - additional[1]) - ) - #}}} - - #{{{List of the possibilities of the variables - # Start out with the already generated - # spatial_grid_possibilities and temporal_grid_possibilities - list_of_possibilities = [spatial_grid_possibilities, - temporal_grid_possibilities, - series_add_possibilities] - - # Append the possibilities to the list of possibilities - for var in tuple_of_variables: - list_of_possibilities.append( - self._generate_possibilities(var[0], var[1], var[2]) - ) - #}}} - - # Return the list_of possibilities - return list_of_possibilities -#}}} - -#{{{_get_combinations - def _get_combinations(self, input_list): - """ - The input_list is a list with lists as element. - Returns a list of all combinations between the elements of the - input_list. - """ - - # Remove empty elements in input_list in order for - # itertools.product to work - input_list = [elem for elem in input_list if elem != []] - - # If we would like to sort the input list (choose which variable - # to be the fastest varying) - if self._sort_by is not None: - # Swap the list corresponding to the sort_by statement so - # that that list will be the last. The itertools.product - # will then make that list the fastest varying in the list - input_list = self._get_swapped_input_list(input_list) - else: - # Initialize this member data to None - self._len_group = None - - # The last element in the input_list will be the fastest varying - # element - all_combinations_as_tuple = list(itertools.product(*input_list)) - - # all_combination_as_tuple is a list with tuples as elements - # We would like to combine the elements in these tuples to one - # string - # Make an appendable list - all_combinations_as_strings = [] - - # Loop over the elements in the list containing tuples - for a_tuple in all_combinations_as_tuple: - # Join the elements in a tuple and store it - all_combinations_as_strings.append(" ".join(a_tuple)) - - return all_combinations_as_strings -#}}} - -#{{{_print_run_or_submit - def _print_run_or_submit(self): - """Prints "Now running" """ - print("\nNow running:") -#}}} - -#{{{_prepare_dmp_folder - def _prepare_dmp_folder(self, combination, **kwargs): - """ - Prepare the dump folder for runs - - - Obtain the folder name to restart from - - Obtain folder name and copy the input file to the final folder. - - Check if restart files are present if restart is set (set - restart to None if not found). - - Find appropriate mxg and myg if redistribute is set. - - Copy restart files if restart_from and/or redistribute is set - - Redistribute restart files if redistribute and restart is set - - Resize the runs (change nx, ny and/or nz) if the dimension is - changed. - - resizeZ if nz is set and it deviates from what is found in the - restart files. - - Add noise to the restart files if add_noise and restart is set - - Copy files if restart is set to overwrite - - Copy the source files to the final folder is cpy_source is True. - - Parameters - ---------- - combination : sequence (not str) - The current combination to be run - **kwargs : any - Extra parameters given to self._restart_from from function (if - any) - - Returns do_run = False if there are any troubles with the copying - """ - - # do_run is set to True by default - do_run = True - - #{{{ Obtain folder name and copy the input file - folder_name = self._get_folder_name(combination) - self._dmp_folder = os.path.join(self._directory, folder_name) - # If the last character is "/", then remove it - if self._dmp_folder[-1] == "/": - self._dmp_folder = self._dmp_folder[:-1] - - # Create folder if it doesn't exists - self._create_folder(self._dmp_folder) - - if not isinstance(self._dmp_folders, tuple): - # If self._dmp_folders is a tuple, it means that execute runs - # is called more then once. - # self._dmp_folders should then not be appended - self._dmp_folders.append(self._dmp_folder) - - # If self._dmp_folder contains anything other than - # self._directory - if self._dmp_folder != self._directory: - # Copy the input file into this folder - src = os.path.join(self._directory, "BOUT.inp") - shutil.copy2(src, self._dmp_folder) - #}}} - - #{{{ Obtain the folder name to restart from - if self._restart_from is not None: - - if isinstance(self._restart_from, str): - self._cur_restart_from = self._restart_from - elif hasattr(self._restart_from, "__call__"): - self._cur_restart_from =\ - self._restart_from(self._dmp_folder, **kwargs) - if not isinstance(self._cur_restart_from, str): - message = ("The restart_from from function must " - "return a string") - raise ValueError(message) - - # Check if any restart files are present - # This check is performed after waiting for other runs to finish - if len(glob.glob( - os.path.join(self._cur_restart_from, "*restart*"))) == 0: - self._errors.append("FileNotFoundError") - raise FileNotFoundError("No restart files found in " + - self._cur_restart_from) - - else: - self._cur_restart_from = None - #}}} - - #{{{ Toggle restart - dmp_files = glob.glob(os.path.join(self._dmp_folder, "*.restart.*")) - # If no dump files are found, set restart to "None" - if len(dmp_files) == 0 and\ - self._restart is not None and\ - self._cur_restart_from is None: - message = ("'restart' was set to {}" - ", but no restart files found." - " Setting 'restart' to None").format(self._restart) - self._restart = None - self._warning_printer(message) - self._warnings.append(message) - #}}} - - #{{{ Find the appropriate mxg and myg if redistribute is set - if self._redistribute: - redistribute_MXG, redistribute_MYG = self._get_MXG_MYG() - #}}} - - #{{{ Copy restart files if restart_from and/or redistribute is set - if self._restart and self._cur_restart_from: - if self._redistribute: - # Use the redistribute function to copy the restart file - do_run = self._check_if_run_already_performed( - restart_file_search_reason="redistribute") - - if do_run: - print("\nCopying files from {0} to {1}\n". - format(self._cur_restart_from, self._dmp_folder)) - do_run = redistribute(self._redistribute, - path=self._cur_restart_from, - output=self._dmp_folder, - mxg=redistribute_MXG, - myg=redistribute_MYG, - ) - if not do_run: - message = "Redistribute failed, run skipped" - self._warning_printer(message) - self._warnings.append(message) - else: - # Copy the files to restart - do_run = self._copy_run_files() - - elif self._restart and self._redistribute: - # Save the files from previous runs - dst = self._move_old_runs(folder_name="redistribute", - include_restart=True) - - do_run = redistribute(self._redistribute, - path=dst, - output=self._dmp_folder, - mxg=redistribute_MXG, - myg=redistribute_MYG, - ) - #}}} - - #{{{ Save files if restart is set to "overwrite" - # NOTE: This is already done if self._redistribute is set - if self._restart == "overwrite" and not(self._redistribute) and do_run: - self._move_old_runs(folder_name="restart", - include_restart=False) - #}}} - - #{{{Finding cur_nz - if self._restart and do_run: - if self._nz: - # The current nz should be in the second index as any - # eventual other names would come from additional or - # series_add - cur_nz = int(self._dmp_folder. - split("nz")[1]. - split("/")[0]. - replace("_", "")) - else: - # The nz size is not changed, will use the one from - # the input file - try: - cur_nz = self._get_dim_from_input("nz") - except KeyError: - cur_nz = self._get_dim_from_input("mz") - - # Make sure cur_nz is divisible by 2 if cur_nz != 1 - if cur_nz != 1: - if cur_nz % 2 != 0: - old_cur_nz = cur_nz - cur_nz += 1 - if cur_nz % 2 != 0: - cur_nz = old_cur_nz - 1 - else: - message = "nz = {} not a power of 2".format(cur_nz) - raise RuntimeError(message) - #}}} - - # Flag to check if the mesh has been resized - resized = False - - #{{{ Resize nx, ny and nz of the evolved fields - if self._restart and do_run and (self._nx or self._ny or self._nz): - # Obtain MXG and MYG - MXG, MYG = self._get_MXG_MYG() - - # Checking if the sizes are changed - # Finding the current sizes - # The current sizes should be in the second index as any - # eventual other names would come from additional or - # series_add - # Finding nx - if self._nx: - cur_nx = int(self._dmp_folder. - split("nx")[1]. - split("/")[0]. - split("_")[1]) - else: - # The nx size is not changed, will use the one from - # the input file - cur_nx = self._get_dim_from_input("nx") - # Finding ny - if self._ny: - cur_ny = int(self._dmp_folder. - split("ny")[1]. - split("/")[0]. - split("_")[1]) - else: - # The ny size is not changed, will use the one from - # the input file - cur_ny = self._get_dim_from_input("ny") - - # Finding the sizes in the restart files - file_name = glob.glob( - os.path.join(self._dmp_folder, "BOUT.restart.0.*"))[0] - - with DataFile(file_name) as f: - # Loop over the variables in the file - NYPE = f.read("NYPE") - NXPE = f.read("NXPE") - for var in f.list(): - # Read the data - data = f.read(var) - - # Find 3D variables - if f.ndims(var) == 3: - local_nx, local_ny, nz = data.shape - MXSUB = local_nx - 2 * MXG - MYSUB = local_ny - 2 * MYG - nx = NXPE * MXSUB + 2 * MXG - ny = NYPE * MYSUB - - if nx == cur_nx and ny == cur_ny and nz == cur_nz: - call_resize = False - break - elif nx == cur_nx and ny == cur_ny and nz != cur_nz: - if nz == 1: - # Override user specification to save time - self._use_expand = True - if self._use_expand: - call_resize = False - else: - call_resize = True - break - else: - call_resize = True - if self._restart == "append": - message = ("Cannot change nx, ny and/or nz " - "when appending\n") - # Extra plane in nz - message += ( - "Requested nx = {}, nx in restart file = {}\n" - "Requested ny = {}, ny in restart file = {}\n" - "Requested nz = {}, nz in restart file = {}\n" - "Resizing:\n"). format( - cur_nx, nx, cur_ny, ny, cur_nz, nz) - raise IOError(message) - else: - break - - if call_resize: - # Move runs - dst = self._move_old_runs(folder_name="beforeResize", - include_restart=True) - - # Redistributing the data to one file - # Redistribute - success = redistribute(1, - path=dst, - output=self._dmp_folder, - mxg=MXG, - myg=MYG, - ) - if not success: - message = ("Failed to redistribute to one file when " - "resizing evolved variables") - raise RuntimeError(message) - - # Move the redistributed to the resize folder - file_name = glob.glob(os.path.join(self._dmp_folder, - "BOUT.restart.0.*"))[0] - path, name = os.path.split(file_name) - before_resize_dir = os.path.join(path, "beforeResizingOneFile") - self._create_folder(before_resize_dir) - shutil.move(file_name, before_resize_dir) - - if self._use_expand: - print("\nDimension change found:\n" - "Requested nx = {}, nx in restart file = {}\n" - "Requested ny = {}, ny in restart file = {}\n" - "Resizing:\n" - .format(cur_nx, nx, cur_ny, ny)) - the_nz = nz - else: - print("\nDimension change found:\n" - "Requested nx = {}, nx in restart file = {}\n" - "Requested ny = {}, ny in restart file = {}\n" - "Requested nz = {}, nz in restart file = {}\n" - "Resizing:\n" - .format(cur_nx, nx, cur_ny, ny, cur_nz, nz)) - the_nz = cur_nz - - # NOTE: Different definition for nx and ny - success = resize(cur_nx, cur_ny + 2 * MYG, the_nz, - mxg=MXG, - myg=MYG, - path=before_resize_dir, - output=self._dmp_folder, - method=self._intrp_method, - maxProc=self._max_proc) - print("\n") - - if not success: - do_run = False - if self._cur_restart_from: - print("Something went wrong: Reomving {}\n". - format(os.path.split(dst)[0], "\n")) - shutil.rmtree(os.path.split(dst)[0]) - message = "Resize failed, skipping run." - self._warnings.append(message) - self._warning_printer(message) - - # Move the resized restart file - path, name = os.path.split(file_name) - # Create a temporary file which "redistribute" can read - # from - after_resize_dir = os.path.join(path, "afterResizingOneFile") - self._create_folder(after_resize_dir) - shutil.move(file_name, after_resize_dir) - - # Redistribute to original split - if self._redistribute: - nproc = self._redistribute - else: - nproc = self._nproc - - success = redistribute(nproc, - path=after_resize_dir, - output=self._dmp_folder, - mxg=MXG, - myg=MYG, - ) - - if not success: - message = ("Failed to redistribute after " - "resizing evolved variables") - if self._cur_restart_from: - print("Something went wrong: Reomving {}\n". - format(os.path.split(dst)[0], "\n")) - shutil.rmtree(os.path.split(dst)[0]) - raise RuntimeError(message) - - resized = True - #}}} - - #{{{ Resize nz only - if self._restart and do_run\ - and self._nz and not resized and self._use_expand: - # The current nz should be in the second index as any - # eventual other names would come from additional or - # series_add - cur_nz = int(self._dmp_folder. - split("nz")[1]. - split("/")[0]. - split("_")[1]) - if self._restart == "append": - # Check if nz is the same as in the restart files - # Start by opening the 0th restart file - file_name = glob.glob(os.path.join(self._dmp_folder, - "BOUT.restart.0.*"))[0] - with DataFile(file_name) as f: - # Loop over the variables in the file - for var in f.list(): - # Read the data - data = f.read(var) - - # Find 3D variables - if f.ndims(var) == 3: - _nx, _ny, nz = data.shape - - if nz != cur_nz: - message = ("Cannot change nz when appending\n" - "nz in restart file = {}\n" - "current run nz = {}").\ - format(nz, cur_nz) - raise IOError(message) - else: - break - - # Get the folder of the restart files - elif resized: - # Copy the files to afterResizeRedistr - after_resize_dir = os.path.join(path, "afterResizeRedistr") - self._create_folder(after_resize_dir) - file_names = glob.glob( - os.path.join(self._dmp_folder, "BOUT.restart.*")) - for file_name in file_names: - shutil.copy2(file_name, after_resize_dir) - # The restart files are stored in the resize folder - folder = "afterResizeRedistr*" - elif self._restart == "overwrite" and not(self._redistribute): - # The restart files are stored in the restart folder - folder = "restart*" - elif self._restart == "overwrite" and self._redistribute: - if self._cur_restart_from: - _ = self._move_old_runs(folder_name="redistribute", - include_restart=True) - - # The restart files are stored in the restart folder - folder = "redistribute*" - - if self._restart == "overwrite": - # Find the restart files - location = sorted( - glob.glob( - os.path.join( - self._dmp_folder, - folder))) - location = location[-1] - - # Check whether nz is changing or not - file_name = glob.glob( - os.path.join(location, "BOUT.restart.0.*"))[0] - - with DataFile(file_name) as f: - # Loop over the variables in the file - for var in f.list(): - # Read the data - data = f.read(var) - - # Find 3D variables - if f.ndims(var) == 3: - nx, ny, nz = data.shape - - if nz == cur_nz: - call_expand = False - else: - if nz < cur_nz: - call_expand = True - else: - if self._cur_restart_from: - print(("Something went wrong: " - "Reomving {}\n"). - format(os.path.split(location)[0])) - shutil.rmtree( - os.path.split(location)[0]) - message = ("Cannot decrease nz from {} to" - " {} in a restart").\ - format(nz, cur_nz) - raise IOError(message) - - if call_expand: - print("\nnz is bigger than in restart file, expanding:\n") - success = resizeZ(cur_nz, - path=location, - output=self._dmp_folder) - print("\n") - - if not success: - do_run = False - if self._cur_restart_from: - print("Something went wrong: Reomving {}\n". - format(os.path.split(location)[0])) - shutil.rmtree(os.path.split(location)[0]) - message = "resizeZ failed, skipping run." - self._warnings.append(message) - self._warning_printer(message) - #}}} - - #{{{ Add noise - if self._restart and self._add_noise and do_run: - print("Now adding noise\n") - for var, scale in self._add_noise.items(): - if scale is None: - scale = 1e-5 - print("No scale set for '{}', setting to {}\n". - format(var, scale)) - try: - addnoise(path=self._dmp_folder, - var=var, - scale=scale) - except Exception as ex: - print("{0}{1}addnoise failed with the following error:{0}". - format("\n" * 4, "!" * 3)) - raise ex - print("\n") - #}}} - - #{{{ Copy the source files if cpy_source is True - if self._cpy_source and do_run: - # This will copy all C++ files to the dmp_folder - cpp_extension = (".cc", ".cpp", ".cxx", ".C", ".c++", - ".h", ".hpp", ".hxx", ".h++") - # Copy for all files in the extension - for extension in cpp_extension: - file_names = glob.glob("*" + extension) - for a_file in file_names: - shutil.copy2(a_file, self._dmp_folder) - #}}} - - return do_run -#}}} - -#{{{_remove_data - def _remove_data(self): - """ - Removes dmp.*, fail.*, restart.*, log.* and *.cpy files from the - dump directory - """ - - print("Removing old data") - remove_extensions = ("dmp.*", "fail.*", "restart.*", "log.*", "cpy") - files_to_rm = [] - for extension in remove_extensions: - files_to_rm.extend( - glob.glob( - os.path.join(self._dmp_folder, "*." + extension))) - - # Cast to set (unique values) - files_to_rm = set(files_to_rm) - for f in files_to_rm: - os.remove(f) - - # Remove dirs - folder_to_rm = glob.glob( - os.path.join(self._dmp_folder, "before_redistribution_*")) - folder_to_rm.extend(glob.glob(os.path.join(self._dmp_folder, "run_*"))) - # Filter to only inlcude folders - folder_to_rm = tuple(f for f in folder_to_rm if os.path.isdir(f)) - for f in folder_to_rm: - shutil.rmtree(f) -#}}} - -#{{{_check_if_run_already_performed - def _check_if_run_already_performed(self, - restart_file_search_reason=None): - """ - Checks if the run has been run previously. - - Parameters - ---------- - restart_file_search_reason : ["restart_from" | "redistribute" | None ] - Reason to check for restart files if not None. - - Returns - ------- - bool : [True|False] - If true is returned, the run will be performed, if not the - run will not be performed - """ - - dmp_files = glob.glob(os.path.join(self._dmp_folder, "*.dmp.*")) - - if restart_file_search_reason: - restart_files =\ - glob.glob(os.path.join(self._dmp_folder, "*.restart.*")) - # Check if dmp or restart files are found - if len(dmp_files) != 0 or len(restart_files) != 0: - message = ("Restart or dmp files was found in {}" - " when {}" - " was set. Run skipped.").\ - format(self._dmp_folder, restart_file_search_reason) - self._warning_printer(message) - self._warnings.append(message) - return False - else: - return True - # Check if dmp files are found if restart is None - elif len(dmp_files) != 0 and self._restart is None: - print("Skipping the run as *.dmp.* files was found in " - + self._dmp_folder) - print(("To overwrite old files, run with" - " self.execute_runs(remove_old=True)\n")) - return False - else: - return True -#}}} - -#{{{_call_post_processing_function - def _call_post_processing_function( - self, - function=None, - folders=None, - **kwargs): - """Function which calls the post_processing_function""" - - function(folders, **kwargs) - -#}}} -#}}} - -#{{{Functions called by _error_check_for_run_input - #{{{_check_for_child_class_errors - def _check_for_child_class_errors( - self, - remove_old, - post_processing_function, - post_process_after_every_run - ): - """ - Function which check for errors in a child class. - - Here a virtual function - """ - pass - #}}} -#}}} - -#{{{Function called by _set_program_name -#{{{_run_make - def _run_make(self): - """Make cleans and makes the .cxx program""" - - print("Make clean eventually previously compiled\n") - command = "make clean" - status, output = shell(command, pipe=True) - print("Making the .cxx program\n") - command = "make" - status, output = shell(command, pipe=True) - print(output) - # Check if any errors occurred - if status != 0: - self._errors.append("RuntimeError") - raise RuntimeError("Error encountered during make.") -#}}} -#}}} - -#{{{ Functions called by the basic_error_checker -#{{{_check_for_correct_type - def _check_for_correct_type(self, - var=None, - the_type=None, - allow_iterable=None): - """ - Checks if a variable has the correct type - - Parameters - ---------- - var : tuple - var[0] - the variable (a data member) - - var[1] - the name of the variable given as a string - the_type : type - The data type to be checked - allow_iterable : bool - If an iterable with the element as type is allowed - """ - - # Set a variable which is False if the test fails - success = True - for cur_var in var: - # There is an option that the variable could be set to None, - # and that the default value from BOUT.inp will be used - if cur_var[0] is not None: - # Check for the correct type - if isinstance(cur_var[0], the_type) == False: - # Check if it is an iterable if iterables are - # allowed - if allow_iterable and\ - hasattr(cur_var[0], "__iter__") and\ - not isinstance(cur_var[0], dict): - for elem in cur_var[0]: - # Check for the correct type - if isinstance(elem, the_type) == False: - success = False - else: - # Neither correct type, nor iterable - success = False - if not(success): - message = ("{} is of wrong type\n" - "{} must be {}").\ - format(cur_var[1], the_type.__name__) - if allow_iterable: - # If iterable is allowed, then add this - message += (" or an iterable with {}" - " as elements.").format(the_type.__name__) - self._errors.append("TypeError") - raise TypeError(message) -#}}} - -#{{{_check_if_set_correctly - def _check_if_set_correctly(self, - var=None, - possibilities=None): - """ - Check if a variable is set to a possible variable. - Called by the error checkers - """ - - # Set a variable which is False if the test fails - success = True - - # Due to the check done in check_for_correct_type: If the - # variable is not a string it will be an iterable - if not isinstance(var[0], str): - for elem in var[0]: - # Check if the element is contained in the possibilities - if not(elem in possibilities): - success = False - else: - # The variable was a string - if not(var[0] in possibilities): - success = False - - if not(success): - message = ("{} was not set to a possible option.\n" - "The possibilities are \n{}").\ - format(var[1], "\n".join(possibilities)) - self._errors.append("TypeError") - raise TypeError(message) -#}}} - -#{{{_check_if_same_len - def _check_if_same_len(self, object1=None, object2=None): - """Checks if object1 and object2 has the same length - - Input: - object1 - a tuple of the object [0] and its name [1] - object2 - a tuple an object [0] different than object1 together with - its name [1] - """ - - try: - len_dim1 = len(object1[0]) - # If object1 does not have length - except TypeError: - len_dim1 = 1 - try: - len_dim2 = len(object2[0]) - # If object2 does not have length - except TypeError: - len_dim2 = 1 - - if len_dim1 != len_dim2: - message = ("{} and {} must have the same" - " length when specified").format(object1[1], object2[1]) - self._errors.append("RuntimeError") - raise RuntimeError(message) -#}}} -#}}} - -#{{{ Functions called by _get_correct_domain_split - #{{{_check_cur_split_found - def _check_cur_split_found(self, - cur_split_found, - produce_warning, - add_number, - size_nr, - local_nx, - local_ny, - using_nx=None, - using_ny=None): - #{{{docstring - """ - Checks if the current split is found. - - Will add a number if not found. - - Parameters - ---------- - cur_split_found : bool - Whether or not the current split was found - produce_warning : bool - If a warning should be produced - add_number : int - The number added to nx and/or ny - local_nx : [int|sequence of int] - Sequence of values of nx (a local value is used in order not to - alter self._nx) - local_ny : [int|sequence of int] - Sequence of values of ny (a local value is used in order not to - alter self._ny) - size_nr : int - Index of the current nx and/or ny - using_nx : bool - If add_number should be added to nx - using_ny : bool - if add_number should be added to ny - - Returns - ------- - local_nx : [int|sequence of int] - Sequence of values of nx - local_ny : [int|sequence of int] - Sequence of values of ny - add_number : int - The number to eventually be added the next time - produce_warning : bool - Whether or not a warning should be produced - """ - #}}} - - # If the value tried is not a good value - if not cur_split_found: - # Produce a warning - produce_warning = True - if using_nx: - local_nx[size_nr] += add_number - if using_ny: - local_ny[size_nr] += add_number - - print("Mismatch, trying {}*{}". - format(local_nx[size_nr], local_ny[size_nr])) - - # FIXME: This is a crude approach as we are adding one to - # both nx and ny - # Consider: Something like this - # nx+1 ny - # nx ny+1 - # nx-1 ny - # nx ny-1 - # nx+2 ny - # nx ny+2 - # nx-2 ny - # nx ny-2 - # ... - add_number = (-1)**(abs(add_number))\ - * (abs(add_number) + 1) - else: - # If no warnings has been produced so far - if not(produce_warning): - produce_warning = False - - return local_nx, local_ny, add_number, produce_warning - #}}} - - #{{{_check_init_split_found - def _check_init_split_found(self, - init_split_found, - size_nr, - local_nx, - local_ny, - test_nx=None, - test_ny=None, - produce_warning=None): - #{{{docstring - """ - Check if the initial split was a good choice when checking the grids. - - Will raise eventual errors. - - Parameters - ---------- - init_split_found : bool - Whether or not a good split was found on the first trial - size_nr : int - The index of the current nx, ny or NXPE under consideration - local_nx : [int|sequence of int] - Sequence of values of nx (a local value is used in order not to - alter self._nx) - local_ny : [int|sequence of int] - Sequence of values of ny (a local value is used in order not to - alter self._ny) - test_nx : bool - whether or not the test was run on nx - test_ny : bool - whether or not the test was run on ny - produce_warning : bool - whether or not a warning should be produced - """ - #}}} - - #{{{ If the initial split did not succeed - if not(init_split_found): - # If modification is allowed - if not(self._allow_size_modification) or\ - (self._grid_file is not None): - # If the split fails and the a grid file is given - if self._grid_file is not None: - self._errors.append("RuntimeError") - message = ("The grid can not be split using the" - " current number of nproc.\n" - "Suggest using ") - if test_nx: - message += "nx = {} ".format(self._nx[size_nr]) - if test_ny: - message += "ny = {} ".format(self._ny[size_nr]) - message += " with the current nproc" - raise RuntimeError(message) - # If the split fails and no grid file is given - else: - self._errors.append("RuntimeError") - message = ("The grid can not be split using the" - " current number of nproc.\n" - "Setting allow_size_modification = True" - " will allow modification of the grid" - " so that it can be split with the" - " current number of nproc") - raise RuntimeError(message) - else: - # Set nx and ny - self._nx = local_nx - self._ny = local_ny - #}}} - - #{{{ When the good value is found - print("Successfully found the following good values for the mesh:") - message = "" - if test_nx: - message += "nx = {} ".format(local_nx[size_nr]) - if test_ny: - message += "ny = {} ".format(local_ny[size_nr]) - - print(message + "\n") - #}}} - - #{{{ Make the warning if produced - if produce_warning: - message = "The mesh was changed to allow the split given by nproc" - self._warning_printer(message) - self._warnings.append(message) - #}}} - #}}} - - #{{{_check_NXPE_or_NYPE - def _check_NXPE_or_NYPE(self, - local_nx, - local_ny, - type_str=None, - MXG=None, - produce_warning=None, - ): - #{{{docstring - """ - Check if NXPE or NYPE is consistent with nproc - - Parameters - ---------- - - local_nx : [int|sequence of int] - Sequence of values of nx (a local value is used in order not to - alter self._nx) - local_ny : [int|sequence of int] - Sequence of values of ny (a local value is used in order not to - alter self._ny) - type_str : ["NXPE" | "NYPE"] - Can be either "NXPE" or "NYPE" and is specifying whether - NXPE or NYPE should be checked - MXG : int - The current MXG - produce_warning : bool - Whether or not a warning should be produced - """ - #}}} - - for size_nr in range(len(local_nx)): - # Check the type - if type_str == "NXPE": - print("Checking nx = {} with NXPE = {}". - format(local_nx[size_nr], self._NXPE[size_nr])) - elif type_str == "NYPE": - print("Checking ny = {} with NYPE = {}". - format(local_ny[size_nr], self._NYPE[size_nr])) - # Check to see if succeeded - init_split_found = False - cur_split_found = False - add_number = 1 - # Counter to see how many times the while loop has been - # called - count = 0 - - #{{{While cur_split_found == False - while cur_split_found == False: - # The same check as below is performed internally in - # BOUT++ (see boutmesh.cxx under - # if((MX % NXPE) != 0) - # and - # if((MY % NYPE) != 0) - if type_str == "NXPE": - MX = local_nx[size_nr] - 2 * MXG - # self._nproc is called NPES in boutmesh - if (MX % self._NXPE[size_nr]) == 0: - # If the test passes - cur_split_found = True - # Check if cur_split_found is true, eventually - # update the add_number - local_nx, local_ny, add_number, produce_warning\ - = self._check_cur_split_found(cur_split_found, - produce_warning, - add_number, - size_nr, - local_nx, - local_ny, - using_nx=True, - using_ny=False) - elif type_str == "NYPE": - MY = local_ny[size_nr] - # self._nproc is called NPES in boutmesh - if (MY % self._NYPE[size_nr]) == 0: - # If the test passes - cur_split_found = True - # Check if cur_split_found is true, eventually - # update the add_number - local_nx, local_ny, add_number, produce_warning\ - = self._check_cur_split_found(cur_split_found, - produce_warning, - add_number, - size_nr, - local_nx, - local_ny, - using_nx=False, - using_ny=True) - - #{{{ Check if the split was found the first go. - # This will be used if self_allow_size_modification is - # off, or if we are using a grid file - if count == 0 and cur_split_found: - init_split_found = True - #}}} - - # Add one to the counter - count += 1 - #}}} - - # Check if initial split succeeded - if type_str == "NXPE": - self._check_init_split_found(init_split_found, - size_nr, - local_nx, - local_ny, - test_nx=True, - test_ny=False, - produce_warning=produce_warning) - elif type_str == "NYPE": - self._check_init_split_found(init_split_found, - size_nr, - local_nx, - local_ny, - test_nx=False, - test_ny=True, - produce_warning=produce_warning) - #}}} -#}}} - -#{{{Function called by _prepare_dmp_folder -#{{{_get_folder_name - def _get_folder_name(self, combination): - """ - Returning the folder name where the data will be stored. - - If all options are given the folder structure should be on the - form solver/method/nout_timestep/mesh/additional/grid - """ - - # Combination is one of the combination of the data members - # which is used as the command line arguments in the run - combination = combination.split() - - #{{{Append from eventual grid file - # FIXME: The grid-file names can become long if adding these, - # consider using just path name to gridfile - # If there is a grid file, we will extract the values from the - # file, and put it into this local combination variable, so that - # a proper dmp folder can be made on basis on the variables - # A flag to see whether or not the grid file was found - grid_file_found = False - # Check if grid is in element, and extract its path - for elem in combination: - if elem[0:5] == "grid=": - cur_grid = elem.replace("grid=", "") - grid_file_found = True - - # If the grid file is found, open it - if grid_file_found: - # Open (and automatically close) the grid files - f = DataFile(cur_grid) - # Search for mesh types in the grid file - mesh_types = ( - ("mesh:", "nx"), - ("mesh:", "ny"), - ("mesh:", "nz"), - ("mesh:", "zperiod"), - ("mesh:", "zmin"), - ("mesh:", "zmax"), - ("mesh:", "dx"), - ("mesh:", "dy"), - ("mesh:", "dz"), - ("mesh:", "ixseps1"), - ("mesh:", "ixseps2"), - ("mesh:", "jyseps1_1"), - ("mesh:", "jyseps1_2"), - ("mesh:", "jyseps2_1"), - ("mesh:", "jyseps2_2"), - ("", "MXG"), - ("", "MYG"), - ) - for mesh_type in mesh_types: - grid_variable = f.read(mesh_type[1]) - # If the variable is found - if grid_variable is not None: - if len(grid_variable.shape) > 0: - # Chosing the first - grid_variable =\ - "{:.2e}".format(grid_variable.flatten()[0]) - # Append it to the combinations list - combination.append("{}{}={}".format(mesh_type[0], - mesh_type[1], - grid_variable)) - #}}} - - # Make lists for the folder-type, so that we can append the - # elements in the combination folders if it is found - solver = [] - method = [] - nout_timestep = [] - mesh = [] - additional = [] - grid_file = [] - - # We will loop over the names describing the methods used - # Possible directional derivatives - dir_derivatives = ("ddx", "ddy", "ddz") - - # Check trough all the elements of combination - for elem in combination: - - # If "solver" is in the element - if "solver" in elem: - # Remove 'solver:' and append it to the mesh folder - cur_solver = elem.replace("solver:", "") - cur_solver = cur_solver.replace("=", "_") - # Append it to the solver folder - solver.append(cur_solver) - - # If nout or timestep is in the element - elif ("nout" in elem) or\ - ("timestep" in elem): - # Remove "=", and append it to the - # nout_timestep folder - nout_timestep.append(elem.replace("=", "_")) - - # If any quantity related to mesh is in the combination - elif ("mesh" in elem) or\ - ("MXG" in elem) or\ - ("MYG" in elem) or\ - ("NXPE" in elem) or\ - ("NYPE" in elem) or\ - ("zperiod" in elem) or\ - ("zmin" in elem) or\ - ("zmax" in elem) or\ - (("dx" in elem) and not("ddx" in elem)) or\ - (("dy" in elem) and not("ddy" in elem)) or\ - (("dz" in elem) and not("ddz" in elem)): - # Remove "mesh:", and append it to the mesh folder - cur_mesh = elem.replace("mesh:", "") - cur_mesh = cur_mesh.replace("=", "_") - # Simplify the mesh spacing - if ("dx" in elem) or ("dy" in elem) or ("dz" in elem): - cur_mesh = cur_mesh.split("_") - cur_mesh = "{}_{:.2e}".format( - cur_mesh[0], float(cur_mesh[1])) - mesh.append(cur_mesh) - - # If a grid file is in the combination - elif (elem[0:4] == "grid"): - # Remove .grd .nc and = - cur_grid = elem.replace(".grd", "") - cur_grid = cur_grid.replace(".nc", "") - cur_grid = cur_grid.replace("=", "_") - grid_file.append(cur_grid) - - # If the element is none of the above - else: - # It could either be a dir derivative - # Set a flag to state if any of the dir derivative was - # found in the combination - dir_derivative_set = False - # If any of the methods are in combination - for dir_derivative in dir_derivatives: - if dir_derivative in elem: - # Remove ":", and append it to the - # method folder - cur_method = elem.replace(":", "_") - cur_method = cur_method.replace("=", "_") - method.append(cur_method) - dir_derivative_set = True - - # If the dir_derivative_set was not set, the only - # possibility left is that the element is an - # "additional" option - if not(dir_derivative_set): - # Replace ":" and "=" and append it to the - # additional folder - cur_additional = elem.replace(":", "_") - cur_additional = cur_additional.replace("=", "_") - cur_additional = cur_additional.replace('"', "-") - cur_additional = cur_additional.replace("'", "-") - cur_additional = cur_additional.replace("(", ",") - cur_additional = cur_additional.replace(")", ",") - additional.append(cur_additional) - - # We sort the elements in the various folders alphabetically, - # to ensure that the naming convention is always the same, no - # matter how the full combination string looks like - # Sort alphabetically - solver.sort() - #{{{ Manual sort solver - # We want "type" to be first, and "atol" and "rtol" to be last - sort_these = ( - ("type", 0), - ("atol", -1), - ("rtol", -1) - ) - # Loop through everything we want to sort - for sort_this in sort_these: - # Flag to check if found - found_string = False - for elem_nr, elem in enumerate(solver): - if sort_this[0] in elem: - swap_nr = elem_nr - # Set the flag that the string is found - found_string = True - # If type was found - if found_string: - # Swap the elements in the solver - solver[sort_this[1]], solver[swap_nr] =\ - solver[swap_nr], solver[sort_this[1]] - #}}} - method.sort() - nout_timestep.sort() - mesh.sort() - additional.sort() - grid_file.sort() - - # Combine the elements in the various folders - solver = ("_".join(solver),) - method = ("_".join(method),) - nout_timestep = ("_".join(nout_timestep),) - mesh = ("_".join(mesh),) - additional = ("_".join(additional),) - grid_file = ("_".join(grid_file),) - - # Put all the folders into the combination_folder - combination_folder = ( - solver, - method, - nout_timestep, - mesh, - additional, - grid_file - ) - # We access the zeroth element (if given) as the folders are - # given as a sequence - combination_folder = tuple(folder[0] for folder in combination_folder - if (len(folder) != 0) and not("" in folder)) - - # Make the combination folder as a string - combination_folder = "/".join(combination_folder) - - return combination_folder -#}}} - -#{{{_create_folder - def _create_folder(self, folder): - """Creates a folder if it doesn't exists""" - - if not os.path.exists(folder): - os.makedirs(folder) - print(folder + " created\n") -#}}} - -#{{{_copy_run_files - def _copy_run_files(self): - """ - Function which copies run files from self._cur_restart_from - """ - - do_run =\ - self._check_if_run_already_performed( - restart_file_search_reason="restart_from") - - if do_run: - print("\nCopying files from {0} to {1}\n". - format(self._cur_restart_from, self._dmp_folder)) - - # Files with these extension will be given the - # additional extension .cpy when copied to the destination - # folder - extensions_w_cpy = ["inp"] - # When the extension is not a real extension - has_extensions_w_cpy = ["log.*"] - - if self._cpy_source: - extensions_w_cpy.extend(["cc", "cpp", "cxx", "C", "c++", - "h", "hpp", "hxx", "h++"]) - - # Python 3 syntax (not python 2 friendly) - # extensions =\ - # (*extensions_w_cpy, *has_extensions_w_cpy, "restart.*") - extensions = extensions_w_cpy - for item in has_extensions_w_cpy: - extensions.append(item) - extensions.append("restart.*") - - if self._restart == "append": - extensions.append("dmp.*") - - # Copy for all files in the extension - for extension in extensions: - file_names = glob.glob( - os.path.join( - self._cur_restart_from, - "*." + extension)) - for cur_file in file_names: - # Check if any of the extensions matches the current - # string - if any([cur_file.endswith(ewc) - for ewc in extensions_w_cpy]): - # Add ".cpy" to the file name (without the path) - name = os.path.split(cur_file)[-1] + ".cpy" - shutil.copy2(cur_file, - os.path.join(self._dmp_folder, name)) - # When the extension is not a real extension we must - # remove "*" in the string as shutil doesn't accept - # wildcards - elif any([hewc.replace("*", "") in cur_file - for hewc in has_extensions_w_cpy]): - # Add ".cpy" to the file name (without the path) - name = os.path.split(cur_file)[-1] + ".cpy" - shutil.copy2(cur_file, - os.path.join(self._dmp_folder, name)) - else: - shutil.copy2(cur_file, self._dmp_folder) - - return do_run -#}}} - -#{{{_move_old_runs - def _move_old_runs(self, folder_name="restart", include_restart=False): - """Move old runs, return the destination path""" - - # Check for folders in the dmp directory - directories = tuple( - name for name in - os.listdir(self._dmp_folder) if - os.path.isdir(os.path.join( - self._dmp_folder, name)) - ) - # Find occurrences of "folder_name", split, and cast result to number - restart_nr = tuple(int(name.split("_")[-1]) for name in directories - if folder_name in name) - # Check that the sequence is not empty - if len(restart_nr) != 0: - # Sort the folders in ascending order - restart_nr = sorted(restart_nr) - # Pick the last index - restart_nr = restart_nr[-1] - # Add one to the restart_nr, as we want to create - # a new directory - restart_nr += 1 - else: - # Set the restart_nr - restart_nr = 0 - # Create the folder for the previous runs - self._create_folder(os.path.join( - self._dmp_folder, - "{}_{}".format(folder_name, restart_nr))) - - extensions_to_move = ["cpy", "log.*", "dmp.*", - "cc", "cpp", "cxx", "C", "c++", - "h", "hpp", "hxx", "h++"] - - if include_restart: - extensions_to_move.append("restart.*") - - dst = os.path.join(self._dmp_folder, - "{}_{}".format(folder_name, restart_nr)) - - print("Moving old runs to {}\n".format(dst)) - - for extension in extensions_to_move: - file_names =\ - glob.glob(os.path.join(self._dmp_folder, "*." + extension)) - - # Cast to unique file_names - file_names = set(file_names) - - # Move the files - for cur_file in file_names: - shutil.move(cur_file, dst) - - if not(include_restart): - # We would like to save the restart files as well - print("Copying restart files to {}\n".format(dst)) - file_names =\ - glob.glob(os.path.join(self._dmp_folder, "*.restart.*")) - - # Cast to unique file_names - file_names = set(file_names) - - # Copy the files - for cur_file in file_names: - shutil.copy2(cur_file, dst) - - return dst -#}}} -#}}} - -#{{{Function called by _run_driver -#{{{_single_run - def _single_run(self, combination): - """Makes a single MPIRUN of the program""" - - # Get the command to be used - command = self._get_command_to_run(combination) - - # Time how long the time took - tic = timeit.default_timer() - - # Launch the command - status, out = launch(command, - runcmd=self._MPIRUN, - nproc=self._nproc, - pipe=True, - verbose=True) - - # If the run returns an exit code other than 0 - if status != 0: - message = "! An error occurred. Printing the output to stdout !" - print("{0}{1}{2}{1}{0}{3}". - format("\n", "!" * len(message), message, out)) - self._errors.append("RuntimeError") - message = ("An error occurred the run." - " Please see the output above for details.") - # Search if parantheses are present, but without ' or " - if ("(" in combination and - not(re.search(r'\"(.*)\(', combination) - or re.search(r"\'(.*)\(", combination)))\ - or (")" in combination and - not(re.search(r'\)(.*)\"', combination) - or re.search(r"\)(.*)\'", combination))): - message = ( - "A '(' and/or ')' symbol seem to have appeared in the" - " command line.\nIf this true, you can avoid" - " this problem by adding an extra set of" - " quotation marks. For example\n\n" - "additional=('variable', 'bndry_xin'," - " '\"dirichlet_o4(0.0)\")'\n" - "rather than\n" - "additional=('variable', 'bndry_xin'," - " 'dirichlet_o4(0.0))'") - else: - message = ("An error occurred the run." - " Please see the output above for details.") - raise RuntimeError(message) - - # Estimate elapsed time - toc = timeit.default_timer() - elapsed_time = toc - tic - - return out, elapsed_time -#}}} - -#{{{_append_run_log - def _append_run_log(self, start, run_no, run_time): - """Appends the run_log""" - - # Convert seconds to H:M:S - run_time = str(datetime.timedelta(seconds=run_time)) - - start_time = "{}-{}-{}-{}:{}:{}".\ - format(start.year, start.month, start.day, - start.hour, start.minute, start.second) - - # If the run is restarted with initial values from the last run - if self._restart: - dmp_line = "{}-restart-{}".format(self._dmp_folder, self._restart) - if self._cur_restart_from: - dmp_line += " from " + self._cur_restart_from - else: - dmp_line = self._dmp_folder - - # Line to write - line = (start_time, self._run_type, run_no, run_time, dmp_line) - # Opens for appending - log_format = "{:<19} {:^9} {:^6} {:<17} {:<}" - with open(self._run_log, "a") as f: - f.write(log_format.format(*line) + "\n") -#}}} -#}}} - -#{{{Function called by _get_possibilities -#{{{_generate_possibilities - def _generate_possibilities(self, variables=None, section=None, name=None): - """Generate the list of strings of possibilities""" - - if variables is not None: - # Set the section name correctly - if section != "": - section = section + ":" - else: - section = "" - # Set the combination of the variable - var_possibilities = [] - # Find the number of different dimensions - - for var in variables: - var_possibilities.append("{}{}={}".format(section, name, var)) - else: - var_possibilities = [] - - return var_possibilities -#}}} -#}}} - -#{{{Functions called by _get_combinations -#{{{_get_swapped_input_list - def _get_swapped_input_list(self, input_list): - """ - Finds the element in the input list, which corresponds to the - self._sort_by criterion. The element is swapped with the last - index, so that itertools.product will make this the fastest - varying variable - """ - - # We make a sort list containing the string to find in the - # input_list - sort_list = [] - - # We loop over the elements in self._sort_by to find what - # string we need to be looking for in the elements of the lists - # in input_list - for sort_by in self._sort_by: - # Find what list in the input_list which contains what we - # would sort by - - #{{{ If we would like to sort by the spatial domain - if sort_by == "spatial_domain": - # nx, ny and nz are all under the section "mesh" - find_in_list = "mesh" - #}}} - - #{{{ If we would like to sort by the temporal domain - elif sort_by == "temporal_domain": - # If we are sorting by the temporal domain, we can either - # search for timestep or nout - if self._timestep is not None: - find_in_list = "timestep" - elif self._nout is not None: - find_in_list = "nout" - #}}} - - #{{{ If we would like to sort by the method - elif (sort_by == "ddx_first") or\ - (sort_by == "ddx_second") or\ - (sort_by == "ddx_upwind") or\ - (sort_by == "ddx_flux") or\ - (sort_by == "ddy_first") or\ - (sort_by == "ddy_second") or\ - (sort_by == "ddy_upwind") or\ - (sort_by == "ddy_flux") or\ - (sort_by == "ddz_first") or\ - (sort_by == "ddz_second") or\ - (sort_by == "ddz_upwind") or\ - (sort_by == "ddz_flux"): - find_in_list = sort_by.replace("_", ":") - #}}} - - #{{{ If we would like to sort by the solver - elif sort_by == "solver": - find_in_list = sort_by - #}}} - - #{{{ If we would like to sort by anything else - else: - find_in_list = sort_by - #}}} - - # Append what to be found in the input_list - sort_list.append(find_in_list) - - # For all the sort_list, we would like check if the match - # can be found in any of the elements in input_list - # Appendable list - lengths = [] - for sort_nr, sort_by_txt in enumerate(sort_list): - # Make a flag to break the outermost loop if find_in_list is - # found - break_outer = False - # Loop over the lists in the input_list to find the match - for elem_nr, elem in enumerate(input_list): - # Each of the elements in this list is a string - for string in elem: - # Check if fins_in_list is in the string - if sort_by_txt in string: - # If there is a match, store the element number - swap_from_index = elem_nr - # Check the length of the element (as this is - # the number of times the run is repeated, only - # changing the values of sort_by [defining a - # group]) - lengths.append(len(elem)) - # Break the loop to save time - break_outer = True - break - # Break the outer loop if find_in_list_is_found - if break_outer: - break - - # As it is the last index which changes the fastest, we swap the - # element where the find_in_list was found with the last element - input_list[swap_from_index], input_list[-(sort_nr + 1)] =\ - input_list[-(sort_nr + 1)], input_list[swap_from_index] - - # The number of runs in one "group" - # Initialize self._len_group with one as we are going to - # multiply it with all the elements in lengths - self._len_group = 1 - for elem in lengths: - self._len_group *= elem - - return input_list -#}}} -#}}} - -#{{{Function called by _single_run -#{{{_get_command_to_run - def _get_command_to_run(self, combination): - """ - Returns a string of the command which will run the BOUT++ - program - """ - - # Creating the arguments - arg = " -d {} {}".format(self._dmp_folder, combination) - - # If the run is set to overwrite - if self._restart == "overwrite": - arg += " restart" - elif self._restart == "append": - arg += " restart append" - - # Replace excessive spaces with a single space - arg = " ".join(arg.split()) - command = "./{} {}".format(self._program_name, arg) - - return command -#}}} -#}}} - -#{{{Functions called from several places in the code -#{{{_get_MXG_MYG - def _get_MXG_MYG(self): - """Function which returns the MXG and MYG""" - - if self._MXG is None: - try: - MXG = eval(self._inputFileOpts.root["mxg"]) - except KeyError: - message = ("Could not find 'MXG' or 'mxg' " - "in the input file. " - "Setting MXG = 2") - self._warning_printer(message) - self._warnings.append(message) - MXG = 2 - else: - MXG = self._MXG - if self._MYG is None: - try: - MYG = eval(self._inputFileOpts.root["myg"]) - except KeyError: - message = ("Could not find 'MYG' or 'myg' " - "in the input file. " - "Setting MYG = 2") - self._warning_printer(message) - self._warnings.append(message) - MYG = 2 - else: - MYG = self._MYG - - return MXG, MYG -#}}} - -#{{{_get_dim_from_input - def _get_dim_from_input(self, direction): - """ - Get the dimension from the input - - Parameters - ---------- - direction : ["nx"|"ny"|"nz"|"mz"] - The direction to read - - Returns - ------- - Number of points in the given direction - """ - - # If nx and ny is a function of MXG and MYG - MXG, MYG = self._get_MXG_MYG() - # NOTE: MXG may seem unused, but it needs to be in the current - # namespace if eval(self._inputFileOpts.mesh["nx"]) depends on - # MXG - - if self._grid_file: - # Open the grid file and read it - with DataFile(self._grid_file) as f: - # Loop over the variables in the file - n_points = f.read(direction) - else: - try: - n_points = eval(self._inputFileOpts.mesh[direction]) - except NameError: - message = "Could not evaluate\n" - message += self._inputFileOpts.mesh[direction] - message += "\nfound in {} in [mesh] in the input file.".\ - format(direction) - raise RuntimeError(message) - - return n_points -#}}} - - #{{{_warning_printer - def _warning_printer(self, message): - """Function for printing warnings""" - - print("{}{}WARNING{}".format("\n" * 3, "*" * 37, "*" * 36)) - # Makes sure that no more than 80 characters are printed out at - # the same time - for chunk in self._message_chunker(message): - rigth_padding = " " * (76 - len(chunk)) - print("* {}{} *".format(chunk, rigth_padding)) - print("*" * 80 + "\n" * 3) - #}}} - - #{{{_message_chunker - def _message_chunker(self, message, chunk=76): - """Generator used to chop a message so it doesn't exceed some - width""" - - for start in range(0, len(message), chunk): - yield message[start:start + chunk] - #}}} -#}}} -#}}} - -#{{{class PBS_runner - - -class PBS_runner(basic_runner): - #{{{docstring - """ - pbs_runner - ---------- - - Class for mpi running one or several runs with BOUT++. - Works like the basic_runner, but submits the jobs to a Portable - Batch System (PBS). - - For the additional member data, see the docstring of __init__. - - For more info check the docstring of bout_runners. - """ -#}}} - -# The constructor -#{{{__init__ - def __init__(self, - BOUT_nodes=1, - BOUT_ppn=1, - BOUT_walltime=None, - BOUT_queue=None, - BOUT_mail=None, - BOUT_run_name=None, - BOUT_account=None, - post_process_nproc=None, - post_process_nodes=None, - post_process_ppn=None, - post_process_walltime=None, - post_process_queue=None, - post_process_mail=None, - post_process_run_name=None, - post_process_account=None, - **kwargs): - #{{{docstring - """ - PBS_runner constructor - ---------------------- - - All the member data is set to None by default, with the - exception of BOUT_nodes (default=1) and BOUT_ppn (default = 4). - - Parameters - ---------- - - BOUT_nodes : int - Number of nodes for one submitted BOUT job - BOUT_ppn : int - Processors per node for one submitted BOUT job - BOUT_walltime : str - Maximum wall time for one submitted BOUT job - BOUT_queue : str - The queue to submit the BOUT jobs - BOUT_mail : str - Mail address to notify when a BOUT job has finished - BOUT_run_name : str - Name of the BOUT run on the cluster (optional) - BOUT_account : str - Account number to use for the run (optional) - post_process_nproc : int - Total number of processors for one submitted post processing - job - post_process_nodes : int - Number of nodes for one submitted post processing job - post_process_ppn : int - Processors per node for one submitted BOUT job - post_process_walltime : str - Maximum wall time for one submitting post processing job - post_process_queue : str - The queue to submit the post processing jobs - post_process_mail : str - Mail address to notify when a post processing job has - finished - post_process_run_name : str - Name of the post processing run on the cluster (optional) - post_process_account : str - Account number to use for the post processing (optional) - **kwargs : any - As the constructor of bout_runners is called, this - additional keyword makes it possible to specify the member - data of bout_runners in the constructor of PBS_runner (i.e. - nprocs = 1 is an allowed keyword argument in the constructor - of PBS_runner). - - For a full sequence of possible keywords, see the docstring of - the bout_runners constructor. - """ - #}}} - - # Note that the constructor accepts additional keyword - # arguments (**kwargs). These must match the keywords of the - # parent class "basic_runner", which is called by the "super" - # function below - - # Call the constructor of the superclass - super(PBS_runner, self).__init__(**kwargs) - - # Options set for the BOUT runs - self._BOUT_nodes = BOUT_nodes - self._BOUT_ppn = BOUT_ppn - self._BOUT_walltime = BOUT_walltime - self._BOUT_mail = BOUT_mail - self._BOUT_queue = BOUT_queue - self._BOUT_run_name = BOUT_run_name - self._BOUT_account = BOUT_account - # Options set for the post_processing runs - self._post_process_nproc = post_process_nproc - self._post_process_nodes = post_process_nodes - self._post_process_ppn = post_process_ppn - self._post_process_walltime = post_process_walltime - self._post_process_mail = post_process_mail - self._post_process_queue = post_process_queue - self._post_process_run_name = post_process_run_name - self._post_process_account = post_process_account - - # Options set for all runs - self._run_type = "basic_PBS" - - # Error check the input data - self._check_for_PBS_instance_error() - - # Initialize the jobid returned from the PBS - self._PBS_id = [] -#}}} - -# The run_driver -#{{{_run_driver - def _run_driver(self, combination, run_no): - """The machinery which actually performs the run""" - - # Submit the job to the queue - self._single_submit(combination, run_no, append_to_run_log=True) -#}}} - -#{{{Functions called by the constructor - #{{{_check_for_PBS_instance_error - def _check_for_PBS_instance_error(self): - """Check if there are any type errors when creating the object""" - - #{{{Check if BOUT_ppn and BOUT_nodes have the correct type - # BOUT_ppn and BOUT_nodes are set by default, however, we must check - # that the user has not given them as wrong input - if not isinstance(self._BOUT_ppn, int): - message = ("BOUT_ppn is of wrong type\n" - "BOUT_ppn must be given as a int") - self._errors.append("TypeError") - raise TypeError(message) - if not isinstance(self._BOUT_nodes, int): - message = ("BOUT_nodes is of wrong type\n" - "BOUT_nodes must be given as a int") - self._errors.append("TypeError") - raise TypeError(message) - #}}} - - #{{{Check that nprocs, BOUT_nodes and BOUT_ppn is consistent - if self._nproc > (self._BOUT_nodes * self._BOUT_ppn): - message = "Must have nproc <= BOUT_nodes * BOUT_ppn" - self._errors.append("TypeError") - raise TypeError(message) - #}}} - - #{{{Check all the proper post_process data is set if any is set - check_if_set = ( - self._post_process_nproc, - self._post_process_nodes, - self._post_process_ppn, - ) - # All elements of check_if_set must be set if any is set - not_None = 0 - for check in check_if_set: - if check is not None: - not_None += 1 - - if (not_None != 0) and (not_None != len(check_if_set)): - message = ("If any of post_process_nproc, post_process_nodes," - " post_process_ppn and post_process_walltime is" - " set, all others must be set as well.") - self._errors.append("TypeError") - raise TypeError(message) - #}}} - - #{{{Check if post_process_ppn and post_process_nodes is int if set - check_if_int = ( - (self._post_process_nodes, "post_process_nodes"), - (self._post_process_ppn, "post_process_ppn") - ) - self._check_for_correct_type(var=check_if_int, - the_type=int, - allow_iterable=False) - #}}} - - #{{{Check that post_process_nprocs,nodes,ppn is consistent if set - if self._post_process_nproc is not None: - if self._post_process_nproc > \ - (self._post_process_nodes * self._post_process_ppn): - message = ("Must have post_process_nproc <= " - "post_process_nodes * post_process_ppn") - self._errors.append("TypeError") - raise TypeError(message) - #}}} - - #{{{Check if walltime, mail and queue is a string if set - check_if_str = ( - (self._BOUT_walltime, "BOUT_walltime"), - (self._BOUT_mail, "BOUT_mail"), - (self._BOUT_queue, "BOUT_queue"), - (self._BOUT_run_name, "BOUT_run_name"), - (self._BOUT_account, "BOUT_account"), - (self._post_process_walltime, "BOUT_walltime"), - (self._post_process_mail, "post_process_mail"), - (self._post_process_queue, "post_process_queue"), - (self._post_process_run_name, "post_process_run_name"), - (self._post_process_account, "post_process_account"), - ) - self._check_for_correct_type(var=check_if_str, - the_type=str, - allow_iterable=False) - #}}} - - #{{{Check that walltime is on correct format - # A list to loop over - walltimes = [] - # Append the walltimes if set - if self._BOUT_walltime is not None: - walltimes.append((self._BOUT_walltime, - "BOUT_walltime")) - if self._post_process_walltime is not None: - walltimes.append((self._post_process_walltime, - "post_process_walltime")) - - # Loop over the walltimes - for walltime in walltimes: - # Set a flag which states whether or not the check was - # successful - success = True - # Split the walltime string - walltime_list = walltime[0].split(":") - # Check that the list has three elements - if len(walltime_list) == 3: - - # Check that seconds is on the format SS - if len(walltime_list[2]) == 2: - # Check that the last element (seconds) is a digit (int) - if walltime_list[2].isdigit(): - # Check that the element is less than 59 - if int(walltime_list[2]) > 59: - success = False - # Seconds is not a digit - else: - success = False - # Seconds is not on the format SS - else: - success = False - - # Do the same for the second last element (minutes) - if len(walltime_list[1]) == 2: - # Check that the last element (seconds) is a digit (int) - if walltime_list[1].isdigit(): - if int(walltime_list[1]) > 59: - success = False - # Minutes is not a digit - else: - success = False - # Seconds is not on the format SS - else: - success = False - - # Check that the first element (hours) is a digit - if not(walltime_list[0].isdigit()): - success = False - - # walltime_list does not have three elements - else: - success = False - - if not(success): - message = walltime[1] + " must be on the form H...H:MM:SS" - self._errors.append("TypeError") - raise TypeError(message) - #}}} - #}}} -#}}} - -#{{{Functions called by _error_check_for_run_input - #{{{_check_for_child_class_errors - def _check_for_child_class_errors( - self, - remove_old, - post_processing_function, - post_process_after_every_run - ): - """Function which check for errors in a child class.""" - - # Check member data is set if post_processing_function is not None - if post_processing_function is not None: - check_if_set = ( - self._post_process_nproc, - self._post_process_nodes, - self._post_process_ppn, - ) - # All elements of check_if_set must be set if any is set - not_None = 0 - for check in check_if_set: - if check is not None: - not_None += 1 - - if (not_None != 0) and (not_None != len(check_if_set)): - message = ("post_process_nproc, post_process_nodes," - " and post_process_ppn and must" - " be set if post_processing_function is set.") - self._errors.append("TypeError") - raise TypeError(message) - #}}} -#}}} - -#{{{Functions called by the execute_runs - #{{{ _print_run_or_submit - def _print_run_or_submit(self): - """Prints "submitting" """ - print("\nSubmitting:") - #}}} -#}}} - -#{{{Functions called by _run_driver - #{{{_single_submit - def _single_submit(self, combination, run_no, append_to_run_log=None): - """Submit a single BOUT job and submit the jobid to self._PBS_id""" - - # Get the script (as a string) which is going to be - # submitted - job_string = self._get_job_string(run_no, - combination, - append_to_run_log) - - # The submission - PBS_id = self._submit_to_PBS(job_string) - self._PBS_id.append(PBS_id) - #}}} - - #{{{_call_post_processing_function - def _call_post_processing_function( - self, - function=None, - folders=None, - **kwargs - ): - """ - Function which submits the post processing to the PBS - - This is done by making a self deleting temporary python file - that will be called by a PBS script. - """ - - #{{{ Create a python script, calling the post-processing function - # Get the start_time (to be used in the name of the file) - start_time = self._get_start_time() - - # The name of the file - python_name = "tmp_{}_{}.py".format(function.__name__, start_time) - - # Make the script - python_tmp = "#!/usr/bin/env python3\n" - python_tmp += "import os, sys\n" - # Set the python path - python_tmp += "sys.path = {}\n".format(sys.path) - # Import the post processing function - python_tmp += "from {} import {}\n".\ - format(function.__module__, function.__name__) - # Convert the keyword args to proper arguments - # Appendable list - arguments = [] - for key in kwargs.keys(): - if not isinstance(kwargs[key], str): - # If the value is not a string, we can append it directly - arguments.append("{}={}".format(key, kwargs[key])) - else: - # If the value is a string, we need to put quotes around - arguments.append("{}='{}'".format(key, kwargs[key])) - - # Put a comma in between the arguments - arguments = ", ".join(arguments) - # Call the post processing function - if hasattr(folders, "__iter__") and not isinstance(folders, str): - python_tmp += "{}({},{})\n".\ - format(function.__name__, tuple(folders), arguments) - elif isinstance(folders, str): - python_tmp += "{}(('{}',),{})\n".\ - format(function.__name__, folders, arguments) - # When the script has run, it will delete itself - python_tmp += "os.remove('{}')\n".format(python_name) - - # Write the python script - with open(python_name, "w") as f: - f.write(python_tmp) - #}}} - - #{{{Create and submit the shell script - # Creating the job string - if self._post_process_run_name is None: - job_name = "post_process_{}_".format(function.__name__, start_time) - else: - job_name = self._post_process_run_name - - # Get core of the job string - job_string = self._create_PBS_core_string( - job_name=job_name, - nodes=self._post_process_nodes, - ppn=self._post_process_ppn, - walltime=self._post_process_walltime, - mail=self._post_process_mail, - queue=self._post_process_queue, - account=self._post_process_account, - ) - # Call the python script in the submission - - job_string += "python {}\n".format(python_name) - job_string += "exit" - - # Create the dependencies - dependencies = ":".join(self._PBS_id) - # Submit the job - print("\nSubmitting the post processing function '{}'\n". - format(function.__name__)) - self._submit_to_PBS(job_string, dependent_job=dependencies) - #}}} - #}}} -#}}} - -#{{{ Functions called by _single_submit - #{{{_get_job_string - def _get_job_string(self, run_no, combination, append_to_run_log): - """ - Make a string which will saved as a shell script before being - sent to the PBS queue. - """ - - #{{{Make the job name based on the combination - # Split the name to a list - combination_name = combination.split(" ") - # Remove whitespace - combination_name = tuple(element for element in combination_name - if element != "") - # Collect the elements - combination_name = "_".join(combination_name) - # Replace bad characters - combination_name = combination_name.replace(":", "") - combination_name = combination_name.replace("=", "-") - - # Name of job - if self._BOUT_run_name is None: - job_name = "{}_{}_{}".\ - format(combination_name, self._directory, run_no) - else: - job_name = self._BOUT_run_name - #}}} - - #{{{Make the main command that will be used in the PBS script - command = self._get_command_to_run(combination) - command = "mpirun -np {} {}".format(self._nproc, command) - - # Print the command - print(command + "\n") - #}}} - - #{{{ Creating the core job string - job_string = self._create_PBS_core_string( - job_name=job_name, - nodes=self._BOUT_nodes, - ppn=self._BOUT_ppn, - walltime=self._BOUT_walltime, - mail=self._BOUT_mail, - queue=self._BOUT_queue, - account=self._BOUT_account, - ) - #}}} - - if append_to_run_log: - #{{{ Get the time for start of the submission - start = datetime.datetime.now() - start_time = "{}-{}-{}-{}:{}:{}".\ - format(start.year, start.month, start.day, - start.hour, start.minute, start.second) - #}}} - - #{{{ Start the timer - job_string += "start=`date +%s`\n" - # Run the bout program - job_string += command + "\n" - # end the timer - job_string += "end=`date +%s`\n" - # Find the elapsed time - job_string += "time=$((end-start))\n" - # The string is now in seconds - # The following procedure will convert it to H:M:S - job_string += "h=$((time/3600))\n" - job_string += "m=$((($time%3600)/60))\n" - job_string += "s=$((time%60))\n" - #}}} - - #{{{ Append to the run log - # Ideally we would check if any process were writing to - # run_log.txt - # This could be done with lsof command as described in - # http://askubuntu.com/questions/14252/how-in-a-script-can-i-determine-if-a-file-is-currently-being-written-to-by-ano - # However, lsof is not available on all clusters - - # Using the same formatting as in _append_run_log, we are going - # to echo the following to the run_log when the run is finished - job_string += "echo '" +\ - "{:<19}".format(start_time) + " " * 3 +\ - "{:^9}".format(self._run_type) + " " * 3 +\ - "{:^6}".format(str(run_no)) + " " * 3 +\ - "'$h':'$m':'$s" + " " * 10 +\ - "{:<}".format(self._dmp_folder) + " " * 3 +\ - " >> $PBS_O_WORKDIR/" + self._directory +\ - "/run_log.txt\n" - #}}} - - # Exit the qsub - job_string += "exit" - - return job_string - #}}} -#}}} - -#{{{Functions called by _submit_to_PBS -#{{{_get_start_time - def _get_start_time(self): - """ - Returns a string of the current time down to micro precision - """ - - # The time is going to be appended to the job name and python name - # In case the process is really fast, so that more than one job - # is submitted per second, we add a microsecond in the - # names for safety - time_now = datetime.datetime.now() - start_time = "{}-{}-{}-{}".format(time_now.hour, - time_now.minute, - time_now.second, - time_now.microsecond, - ) - return start_time -#}}} -#}}} - -#{{{Functions called by several functions -#{{{_create_PBS_core_string - def _create_PBS_core_string( - self, - job_name=None, - nodes=None, - ppn=None, - walltime=None, - mail=None, - queue=None, - account=None, - ): - """ - Creates the core of a PBS script as a string - """ - - # Shebang line - job_string = "#!/bin/bash\n" - # The job name - job_string += "#PBS -N {}\n".format(job_name) - job_string += "#PBS -l nodes={}:ppn={}\n".format(nodes, ppn) - # If walltime is set - if walltime is not None: - # Wall time, must be in format HOURS:MINUTES:SECONDS - job_string += "#PBS -l walltime={}\n".format(walltime) - # If submitting to a specific queue - if queue is not None: - job_string += "#PBS -q {}\n".format(queue) - job_string += "#PBS -o {}.log\n".\ - format(os.path.join(self._dmp_folder, job_name)) - job_string += "#PBS -e {}.err\n".\ - format(os.path.join(self._dmp_folder, job_name)) - if account is not None: - job_string += "#PBS -A {}\n".format(account) - # If we want to be notified by mail - if mail is not None: - job_string += "#PBS -M {}\n".format(mail) - # #PBS -m abe - # a=aborted b=begin e=ended - job_string += "#PBS -m e\n" - # cd to the folder you are sending the qsub from - job_string += "cd $PBS_O_WORKDIR\n" - - return job_string -#}}} - -#{{{_submit_to_PBS - def _submit_to_PBS(self, job_string, dependent_job=None): - """ - Saves the job_string as a shell script, submits it and deletes - it. Returns the output from PBS as a string - """ - - # Create the name of the temporary shell script - # Get the start_time used for the name of the script - start_time = self._get_start_time() - script_name = "tmp_{}.sh".format(start_time) - - # Save the string as a script - with open(script_name, "w") as shell_script: - shell_script.write(job_string) - - # Submit the jobs - if dependent_job is None: - # Without dependencies - command = "qsub ./" + script_name - status, output = shell(command, pipe=True) - else: - # If the length of the depend job is 0, then all the jobs - # have completed, and we can carry on as usual without - # dependencies - if len(dependent_job) == 0: - command = "qsub ./" + script_name - status, output = shell(command, pipe=True) - else: - # With dependencies - command = "qsub -W depend=afterok:{} ./{}".\ - format(dependent_job, script_name) - status, output = shell(command, pipe=True) - - # Check for success - if status != 0: - if status == 208: - message = ("Runs finished before submission of the post" - " processing function. When the runs are done:" - " Run again with 'remove_old = False' to submit" - " the function.") - self._warnings.append(message) - else: - print("\nSubmission failed, printing output\n") - print(output) - self._errors.append("RuntimeError") - message = ("The submission failed with exit code {}" - ", see the output above").format(status) - raise RuntimeError(message) - - # Trims the end of the output string - output = output.strip(" \t\n\r") - - # Delete the shell script - try: - os.remove(script_name) - except FileNotFoundError: - # Do not raise an error - pass - - return output -#}}} -#}}} -#}}} - - -#{{{if __name__ == "__main__": -if __name__ == "__main__": - - print(("\n\nTo find out about the bout_runners, please read the user's " - "manual, or have a look at 'BOUT/examples/bout_runners_example', " - "or have a look at the documentation")) -#}}} diff --git a/tools/pylib/boutconfig/__init__.py.cin b/tools/pylib/boutconfig/__init__.py.cin index 138e1c6004..1ec4263b9e 100644 --- a/tools/pylib/boutconfig/__init__.py.cin +++ b/tools/pylib/boutconfig/__init__.py.cin @@ -17,6 +17,7 @@ config = { "idlpath": "@IDLCONFIGPATH@", "pythonpath": "@BOUT_PYTHONPATH@", "has_netcdf": "@BOUT_HAS_NETCDF@", + "has_legacy_netcdf": "@BOUT_HAS_LEGACY_NETCDF@", "has_pnetcdf": "OFF", "has_hdf5": "@BOUT_HAS_HDF5@", "has_pvode": "@BOUT_HAS_PVODE@", diff --git a/tools/pylib/boutconfig/__init__.py.in b/tools/pylib/boutconfig/__init__.py.in index 8d615f0389..f4d41658de 100644 --- a/tools/pylib/boutconfig/__init__.py.in +++ b/tools/pylib/boutconfig/__init__.py.in @@ -16,6 +16,7 @@ config = { "idlpath": "@IDLCONFIGPATH@", "pythonpath": "@PYTHONCONFIGPATH@", "has_netcdf": "@BOUT_HAS_NETCDF@", + "has_legacy_netcdf": "@BOUT_HAS_LEGACY_NETCDF@", "has_pnetcdf": "@BOUT_HAS_PNETCDF@", "has_hdf5": "@BOUT_HAS_HDF5@", "has_pvode": "@BOUT_HAS_PVODE@", diff --git a/tools/pylib/boutdata b/tools/pylib/boutdata new file mode 120000 index 0000000000..1aa0a53311 --- /dev/null +++ b/tools/pylib/boutdata @@ -0,0 +1 @@ +../../externalpackages/boutdata/boutdata/ \ No newline at end of file diff --git a/tools/pylib/boutdata/__init__.py b/tools/pylib/boutdata/__init__.py deleted file mode 100644 index 20b7d0101b..0000000000 --- a/tools/pylib/boutdata/__init__.py +++ /dev/null @@ -1,14 +0,0 @@ -""" Routines for exchanging data to/from BOUT++ """ - -try: - from builtins import str -except ImportError: - raise ImportError("Please install the future module to use Python 2") - -# Import this, as this almost always used when calling this package -from boutdata.collect import collect, attributes - -__all__ = ["attributes", "collect", "gen_surface", "pol_slice"] - -__version__ = '0.1.2' -__name__ = 'boutdata' diff --git a/tools/pylib/boutdata/cbdtoeqdsk.py b/tools/pylib/boutdata/cbdtoeqdsk.py deleted file mode 100644 index ea4c1c4689..0000000000 --- a/tools/pylib/boutdata/cbdtoeqdsk.py +++ /dev/null @@ -1,26 +0,0 @@ -from __future__ import print_function -from boututils.file_import import file_import -import numpy as np - -class Bunch: - pass - -def cbmtogeqdsk(g): - gg=Bunch() - gg.r=g['Rxy'] - gg.z=g['Zxy'] - gg.psi=g['psi'] - gg.pres=g['mu0p'] - gg.qpsi=g['qsafe'] - gg.fpol=g['f'] - gg.nx=g['nx'] - gg.ny=g['ny'] - i=np.argwhere(g['mu0p']==0) - - gg.simagx=gg.psi.min() - gg.sibdry=gg.psi[i[0]] - gg.xlim=0 - gg.ylim=0 - gg.nlim=0 - - return gg diff --git a/tools/pylib/boutdata/collect.py b/tools/pylib/boutdata/collect.py deleted file mode 100644 index 42a7591329..0000000000 --- a/tools/pylib/boutdata/collect.py +++ /dev/null @@ -1,817 +0,0 @@ -from __future__ import print_function -from __future__ import division - -from builtins import str, range - -import os -import sys -import glob - -import numpy as np - -from boututils.datafile import DataFile -from boututils.boutarray import BoutArray - - -def findVar(varname, varlist): - """Find variable name in a list - - First does case insensitive comparison, then - checks for abbreviations. - - Returns the matched string, or raises a ValueError - - Parameters - ---------- - varname : str - Variable name to look for - varlist : list of str - List of possible variable names - - Returns - ------- - str - The closest match to varname in varlist - - """ - # Try a variation on the case - v = [name for name in varlist if name.lower() == varname.lower()] - if len(v) == 1: - # Found case match - print("Variable '%s' not found. Using '%s' instead" % (varname, v[0])) - return v[0] - elif len(v) > 1: - print("Variable '"+varname + - "' not found, and is ambiguous. Could be one of: "+str(v)) - raise ValueError("Variable '"+varname+"' not found") - - # None found. Check if it's an abbreviation - v = [name for name in varlist - if name[:len(varname)].lower() == varname.lower()] - if len(v) == 1: - print("Variable '%s' not found. Using '%s' instead" % (varname, v[0])) - return v[0] - - if len(v) > 1: - print("Variable '"+varname + - "' not found, and is ambiguous. Could be one of: "+str(v)) - raise ValueError("Variable '"+varname+"' not found") - - -def _convert_to_nice_slice(r, N, name="range"): - """Convert r to a "sensible" slice in range [0, N] - - If r is None, the slice corresponds to the full range. - - Lists or tuples of one or two ints are converted to slices. - - Slices with None for one or more arguments have them replaced with - sensible values. - - Private helper function for collect - - Parameters - ---------- - r : None, int, slice or list of int - Range-like to check/convert to slice - N : int - Size of range - name : str, optional - Name of range for error message - - Returns - ------- - slice - "Sensible" slice with no Nones for start, stop or step - """ - - if N == 0: - raise ValueError("No data available in %s"%name) - if r is None: - temp_slice = slice(N) - elif isinstance(r, slice): - temp_slice = r - elif isinstance(r, (int, np.integer)): - if r >= N or r <-N: - # raise out of bounds error as if we'd tried to index the array with r - # without this, would return an empty array instead - raise IndexError(name+" index out of range, value was "+str(r)) - elif r == -1: - temp_slice = slice(r, None) - else: - temp_slice = slice(r, r + 1) - elif len(r) == 0: - return _convert_to_nice_slice(None, N, name) - elif len(r) == 1: - return _convert_to_nice_slice(r[0], N, name) - elif len(r) == 2: - r2 = list(r) - if r2[0] < 0: - r2[0] = r2[0] + N - if r2[1] < 0: - r2[1] = r2[1] + N - if r2[0] > r2[1]: - raise ValueError("{} start ({}) is larger than end ({})" - .format(name, *r2)) - # Lists uses inclusive end, we need exclusive end - temp_slice = slice(r2[0], r2[1] + 1) - elif len(r) == 3: - # Convert 3 element list to slice object - temp_slice = slice(r[0],r[1],r[2]) - else: - raise ValueError("Couldn't convert {} ('{}') to slice".format(name, r)) - - # slice.indices converts None to actual values - return slice(*temp_slice.indices(N)) - - -def collect(varname, xind=None, yind=None, zind=None, tind=None, path=".", - yguards=False, xguards=True, info=True, prefix="BOUT.dmp", - strict=False, tind_auto=False, datafile_cache=None): - """Collect a variable from a set of BOUT++ outputs. - - Parameters - ---------- - varname : str - Name of the variable - xind, yind, zind, tind : int, slice or list of int, optional - Range of X, Y, Z or time indices to collect. Either a single - index to collect, a list containing [start, end] (inclusive - end), or a slice object (usual python indexing). Default is to - fetch all indices - path : str, optional - Path to data files (default: ".") - prefix : str, optional - File prefix (default: "BOUT.dmp") - yguards : bool or "include_upper", optional - Collect Y boundary guard cells? (default: False) - If yguards=="include_upper" the y-boundary cells from the upper (second) target - are also included. - xguards : bool, optional - Collect X boundary guard cells? (default: True) - (Set to True to be consistent with the definition of nx) - info : bool, optional - Print information about collect? (default: True) - strict : bool, optional - Fail if the exact variable name is not found? (default: False) - tind_auto : bool, optional - Read all files, to get the shortest length of time_indices. - Useful if writing got interrupted (default: False) - datafile_cache : datafile_cache_tuple, optional - Optional cache of open DataFile instances: namedtuple as returned - by create_cache. Used by BoutOutputs to pass in a cache so that we - do not have to re-open the dump files to read another variable - (default: None) - - Examples - -------- - - >>> collect(name) - BoutArray([[[[...]]]]) - - """ - - if datafile_cache is None: - # Search for BOUT++ dump files - file_list, parallel, _ = findFiles(path, prefix) - else: - parallel = datafile_cache.parallel - file_list = datafile_cache.file_list - - def getDataFile(i): - """Get the DataFile from the cache, if present, otherwise open the - DataFile - - """ - if datafile_cache is not None: - return datafile_cache.datafile_list[i] - else: - return DataFile(file_list[i]) - - if parallel: - if info: - print("Single (parallel) data file") - - f = getDataFile(0) - - if varname not in f.keys(): - if strict: - raise ValueError("Variable '{}' not found".format(varname)) - else: - varname = findVar(varname, f.list()) - - dimensions = f.dimensions(varname) - - try: - mxg = f["MXG"] - except KeyError: - mxg = 0 - print("MXG not found, setting to {}".format(mxg)) - try: - myg = f["MYG"] - except KeyError: - myg = 0 - print("MYG not found, setting to {}".format(myg)) - - if xguards: - nx = f["nx"] - else: - nx = f["nx"] - 2*mxg - if yguards: - ny = f["ny"] + 2*myg - if yguards == "include_upper" and f["jyseps2_1"] != f["jyseps1_2"]: - # Simulation has a second (upper) target, with a second set of y-boundary - # points - ny = ny + 2*myg - else: - ny = f["ny"] - nz = f["MZ"] - t_array = f.read("t_array") - if t_array is None: - nt = 1 - t_array = np.zeros(1) - else: - try: - nt = len(t_array) - except TypeError: - # t_array is not an array here, which probably means it was a - # one-element array and has been read as a scalar. - nt = 1 - - xind = _convert_to_nice_slice(xind, nx, "xind") - yind = _convert_to_nice_slice(yind, ny, "yind") - zind = _convert_to_nice_slice(zind, nz, "zind") - tind = _convert_to_nice_slice(tind, nt, "tind") - - if not xguards: - xind = slice(xind.start+mxg, xind.stop+mxg, xind.step) - if not yguards: - yind = slice(yind.start+myg, yind.stop+myg, yind.step) - - if dimensions == (): - ranges = [] - elif dimensions == ('t',): - ranges = [tind] - elif dimensions == ('x', 'y'): - # Field2D - ranges = [xind, yind] - elif dimensions == ('x', 'z'): - # FieldPerp - ranges = [xind, zind] - elif dimensions == ('t', 'x', 'y'): - # evolving Field2D - ranges = [tind, xind, yind] - elif dimensions == ('t', 'x', 'z'): - # evolving FieldPerp - ranges = [tind, xind, zind] - elif dimensions == ('x', 'y', 'z'): - # Field3D - ranges = [xind, yind, zind] - elif dimensions == ('t', 'x', 'y', 'z'): - # evolving Field3D - ranges = [tind, xind, yind, zind] - else: - raise ValueError("Variable has incorrect dimensions ({})" - .format(dimensions)) - - data = f.read(varname, ranges) - var_attributes = f.attributes(varname) - return BoutArray(data, attributes=var_attributes) - - nfiles = len(file_list) - - # Read data from the first file - f = getDataFile(0) - - if varname not in f.keys(): - if strict: - raise ValueError("Variable '{}' not found".format(varname)) - else: - varname = findVar(varname, f.list()) - - dimensions = f.dimensions(varname) - - var_attributes = f.attributes(varname) - ndims = len(dimensions) - - # ndims is 0 for reals, and 1 for f.ex. t_array - if ndims == 0: - # Just read from file - data = f.read(varname) - if datafile_cache is None: - # close the DataFile if we are not keeping it in a cache - f.close() - return BoutArray(data, attributes=var_attributes) - - if ndims > 4: - raise ValueError("ERROR: Too many dimensions") - - def load_and_check(varname): - var = f.read(varname) - if var is None: - raise ValueError("Missing " + varname + " variable") - return var - - mxsub = load_and_check("MXSUB") - mysub = load_and_check("MYSUB") - mz = load_and_check("MZ") - mxg = load_and_check("MXG") - myg = load_and_check("MYG") - t_array = f.read("t_array") - if t_array is None: - nt = 1 - t_array = np.zeros(1) - else: - try: - nt = len(t_array) - except TypeError: - # t_array is not an array here, which probably means it was a - # one-element array and has been read as a scalar. - nt = 1 - if tind_auto: - for i in range(nfiles): - t_array_ = getDataFile(i).read("t_array") - nt = min(len(t_array_), nt) - - if info: - print("mxsub = %d mysub = %d mz = %d\n" % (mxsub, mysub, mz)) - - # Get the version of BOUT++ (should be > 0.6 for NetCDF anyway) - try: - version = f["BOUT_VERSION"] - except KeyError: - print("BOUT++ version : Pre-0.2") - version = 0 - if version < 3.5: - # Remove extra point - nz = mz-1 - else: - nz = mz - - # Fallback to sensible (?) defaults - try: - nxpe = f["NXPE"] - except KeyError: - nxpe = 1 - print("NXPE not found, setting to {}".format(nxpe)) - try: - nype = f["NYPE"] - except KeyError: - nype = nfiles - print("NYPE not found, setting to {}".format(nype)) - - npe = nxpe * nype - if info: - print("nxpe = %d, nype = %d, npe = %d\n" % (nxpe, nype, npe)) - if npe < nfiles: - print("WARNING: More files than expected (" + str(npe) + ")") - elif npe > nfiles: - print("WARNING: Some files missing. Expected " + str(npe)) - - if xguards: - nx = nxpe * mxsub + 2*mxg - else: - nx = nxpe * mxsub - - if yguards: - ny = mysub * nype + 2*myg - if yguards == "include_upper" and f["jyseps2_1"] != f["jyseps1_2"]: - # Simulation has a second (upper) target, with a second set of y-boundary - # points - ny = ny + 2*myg - ny_inner = f["ny_inner"] - yproc_upper_target = ny_inner // mysub - 1 - if f["ny_inner"] % mysub != 0: - raise ValueError("Trying to keep upper boundary cells but " - "mysub={} does not divide ny_inner={}" - .format(mysub, ny_inner)) - else: - yproc_upper_target = None - else: - ny = mysub * nype - - xind = _convert_to_nice_slice(xind, nx, "xind") - yind = _convert_to_nice_slice(yind, ny, "yind") - zind = _convert_to_nice_slice(zind, nz, "zind") - tind = _convert_to_nice_slice(tind, nt, "tind") - - xsize = xind.stop - xind.start - ysize = yind.stop - yind.start - zsize = int(np.ceil(float(zind.stop - zind.start)/zind.step)) - tsize = int(np.ceil(float(tind.stop - tind.start)/tind.step)) - - if ndims == 1: - if tind is None: - data = f.read(varname) - else: - data = f.read(varname, ranges=[tind]) - if datafile_cache is None: - # close the DataFile if we are not keeping it in a cache - f.close() - return BoutArray(data, attributes=var_attributes) - - if datafile_cache is None: - # close the DataFile if we are not keeping it in a cache - f.close() - - # Map between dimension names and output size - sizes = {'x': xsize, 'y': ysize, 'z': zsize, 't': tsize} - - # Create a list with size of each dimension - ddims = [sizes[d] for d in dimensions] - - # Create the data array - data = np.zeros(ddims) - - if dimensions == ('t', 'x', 'z') or dimensions == ('x', 'z'): - yindex_global = None - # The pe_yind that this FieldPerp is going to be read from - fieldperp_yproc = None - - for i in range(npe): - # Get X and Y processor indices - pe_yind = int(i/nxpe) - pe_xind = i % nxpe - - inrange = True - - if yguards: - # Get local ranges - ystart = yind.start - pe_yind*mysub - ystop = yind.stop - pe_yind*mysub - - # Check lower y boundary - if pe_yind == 0: - # Keeping inner boundary - if ystop <= 0: - inrange = False - if ystart < 0: - ystart = 0 - else: - if ystop < myg-1: - inrange = False - if ystart < myg: - ystart = myg - # and lower y boundary at upper target - if yproc_upper_target is not None and pe_yind - 1 == yproc_upper_target: - ystart = ystart - myg - - # Upper y boundary - if pe_yind == (nype - 1): - # Keeping outer boundary - if ystart >= (mysub + 2*myg): - inrange = False - if ystop > (mysub + 2*myg): - ystop = (mysub + 2*myg) - else: - if ystart >= (mysub + myg): - inrange = False - if ystop > (mysub + myg): - ystop = (mysub + myg) - # upper y boundary at upper target - if yproc_upper_target is not None and pe_yind == yproc_upper_target: - ystop = ystop + myg - - # Calculate global indices - ygstart = ystart + pe_yind * mysub - ygstop = ystop + pe_yind * mysub - - if yproc_upper_target is not None and pe_yind > yproc_upper_target: - ygstart = ygstart + 2*myg - ygstop = ygstop + 2*myg - - else: - # Get local ranges - ystart = yind.start - pe_yind*mysub + myg - ystop = yind.stop - pe_yind*mysub + myg - - if (ystart >= (mysub + myg)) or (ystop <= myg): - inrange = False # Y out of range - - if ystart < myg: - ystart = myg - if ystop > mysub + myg: - ystop = myg + mysub - - # Calculate global indices - ygstart = ystart + pe_yind * mysub - myg - ygstop = ystop + pe_yind * mysub - myg - - if xguards: - # Get local ranges - xstart = xind.start - pe_xind*mxsub - xstop = xind.stop - pe_xind*mxsub - - # Check lower x boundary - if pe_xind == 0: - # Keeping inner boundary - if xstop <= 0: - inrange = False - if xstart < 0: - xstart = 0 - else: - if xstop <= mxg: - inrange = False - if xstart < mxg: - xstart = mxg - - # Upper x boundary - if pe_xind == (nxpe - 1): - # Keeping outer boundary - if xstart >= (mxsub + 2*mxg): - inrange = False - if xstop > (mxsub + 2*mxg): - xstop = (mxsub + 2*mxg) - else: - if xstart >= (mxsub + mxg): - inrange = False - if xstop > (mxsub + mxg): - xstop = (mxsub+mxg) - - # Calculate global indices - xgstart = xstart + pe_xind * mxsub - xgstop = xstop + pe_xind * mxsub - - else: - # Get local ranges - xstart = xind.start - pe_xind*mxsub + mxg - xstop = xind.stop - pe_xind*mxsub + mxg - - if (xstart >= (mxsub + mxg)) or (xstop <= mxg): - inrange = False # X out of range - - if xstart < mxg: - xstart = mxg - if xstop > mxsub + mxg: - xstop = mxg + mxsub - - # Calculate global indices - xgstart = xstart + pe_xind * mxsub - mxg - xgstop = xstop + pe_xind * mxsub - mxg - - # Number of local values - nx_loc = xstop - xstart - ny_loc = ystop - ystart - - if not inrange: - continue # Don't need this file - - if info: - sys.stdout.write("\rReading from " + file_list[i] + ": [" + - str(xstart) + "-" + str(xstop-1) + "][" + - str(ystart) + "-" + str(ystop-1) + "] -> [" + - str(xgstart) + "-" + str(xgstop-1) + "][" + - str(ygstart) + "-" + str(ygstop-1) + "]") - - f = getDataFile(i) - - if dimensions == ('t', 'x', 'y', 'z'): - d = f.read(varname, ranges=[tind, - slice(xstart, xstop), - slice(ystart, ystop), - zind]) - data[:, (xgstart-xind.start):(xgstart-xind.start+nx_loc), - (ygstart-yind.start):(ygstart-yind.start+ny_loc), :] = d - elif dimensions == ('x', 'y', 'z'): - d = f.read(varname, ranges=[slice(xstart, xstop), - slice(ystart, ystop), - zind]) - data[(xgstart-xind.start):(xgstart-xind.start+nx_loc), - (ygstart-yind.start):(ygstart-yind.start+ny_loc), :] = d - elif dimensions == ('t', 'x', 'y'): - d = f.read(varname, ranges=[tind, - slice(xstart, xstop), - slice(ystart, ystop)]) - data[:, (xgstart-xind.start):(xgstart-xind.start+nx_loc), - (ygstart-yind.start):(ygstart-yind.start+ny_loc)] = d - elif dimensions == ('t', 'x', 'z'): - # FieldPerp should only be defined on processors which contain its yindex_global - f_attributes = f.attributes(varname) - temp_yindex = f_attributes["yindex_global"] - - if temp_yindex >= 0: - if yindex_global is None: - yindex_global = temp_yindex - - # we have found a file with containing the FieldPerp, get the attributes from here - var_attributes = f_attributes - assert temp_yindex == yindex_global - - if temp_yindex >= 0: - # Check we only read from one pe_yind - assert fieldperp_yproc is None or fieldperp_yproc == pe_yind - - fieldperp_yproc = pe_yind - - d = f.read(varname, ranges=[tind, - slice(xstart, xstop), - zind]) - data[:, (xgstart-xind.start):(xgstart-xind.start+nx_loc), :] = d - elif dimensions == ('x', 'y'): - d = f.read(varname, ranges=[slice(xstart, xstop), - slice(ystart, ystop)]) - data[(xgstart-xind.start):(xgstart-xind.start+nx_loc), - (ygstart-yind.start):(ygstart-yind.start+ny_loc)] = d - elif dimensions == ('x', 'z'): - # FieldPerp should only be defined on processors which contain its yindex_global - f_attributes = f.attributes(varname) - temp_yindex = f_attributes["yindex_global"] - - if temp_yindex >= 0: - if yindex_global is None: - yindex_global = temp_yindex - - # we have found a file with containing the FieldPerp, get the attributes from here - var_attributes = f_attributes - assert temp_yindex == yindex_global - - if temp_yindex >= 0: - # Check we only read from one pe_yind - assert fieldperp_yproc is None or fieldperp_yproc == pe_yind - - fieldperp_yproc = pe_yind - - d = f.read(varname, ranges=[slice(xstart, xstop), zind]) - data[(xgstart-xind.start):(xgstart-xind.start+nx_loc), :] = d - else: - raise ValueError('Incorrect dimensions '+str(dimensions)+' in collect') - - if datafile_cache is None: - # close the DataFile if we are not keeping it in a cache - f.close() - - # if a step was requested in x or y, need to apply it here - if xind.step is not None or yind.step is not None: - if dimensions == ('t', 'x', 'y', 'z'): - data = data[:, ::xind.step, ::yind.step] - elif dimensions == ('x', 'y', 'z'): - data = data[::xind.step, ::yind.step, :] - elif dimensions == ('t', 'x', 'y'): - data = data[:, ::xind.step, ::yind.step] - elif dimensions == ('t', 'x', 'z'): - data = data[:, ::xind.step, :] - elif dimensions == ('x', 'y'): - data = data[::xind.step, ::yind.step] - elif dimensions == ('x', 'z'): - data = data[::xind.step, :] - else: - raise ValueError('Incorrect dimensions '+str(dimensions)+' applying steps in collect') - - # Force the precision of arrays of dimension>1 - if ndims > 1: - try: - data = data.astype(t_array.dtype, copy=False) - except TypeError: - data = data.astype(t_array.dtype) - - # Finished looping over all files - if info: - sys.stdout.write("\n") - return BoutArray(data, attributes=var_attributes) - - -def attributes(varname, path=".", prefix="BOUT.dmp"): - """Return a dictionary of variable attributes in an output file - - Parameters - ---------- - varname : str - Name of the variable - path : str, optional - Path to data files (default: ".") - prefix : str, optional - File prefix (default: "BOUT.dmp") - - Returns - ------- - dict - A dictionary of attributes of varname - """ - # Search for BOUT++ dump files in NetCDF format - file_list, _, _ = findFiles(path, prefix) - - # Read data from the first file - f = DataFile(file_list[0]) - - return f.attributes(varname) - - -def dimensions(varname, path=".", prefix="BOUT.dmp"): - """Return the names of dimensions of a variable in an output file - - Parameters - ---------- - varname : str - Name of the variable - path : str, optional - Path to data files (default: ".") - prefix : str, optional - File prefix (default: "BOUT.dmp") - - Returns - ------- - tuple of strs - The elements of the tuple give the names of corresponding variable - dimensions - - """ - file_list, _, _ = findFiles(path, prefix) - return DataFile(file_list[0]).dimensions(varname) - - -def findFiles(path, prefix): - """Find files matching prefix in path. - - Netcdf (".nc", ".ncdf", ".cdl") and HDF5 (".h5", ".hdf5", ".hdf") - files are searched. - - Parameters - ---------- - path : str - Path to data files - prefix : str - File prefix - - Returns - ------- - tuple : (list of str, bool, str) - The first element of the tuple is the list of files, the second is - whether the files are a parallel dump file and the last element is - the file suffix. - - """ - - # Make sure prefix does not have a trailing . - if prefix[-1] == ".": - prefix = prefix[:-1] - - # Look for parallel dump files - suffixes = [".nc", ".ncdf", ".cdl", ".h5", ".hdf5", ".hdf"] - file_list_parallel = None - suffix_parallel = "" - for test_suffix in suffixes: - files = glob.glob(os.path.join(path, prefix+test_suffix)) - if files: - if file_list_parallel: # Already had a list of files - raise IOError("Parallel dump files with both {0} and {1} extensions are present. Do not know which to read.".format( - suffix, test_suffix)) - suffix_parallel = test_suffix - file_list_parallel = files - - file_list = None - suffix = "" - for test_suffix in suffixes: - files = glob.glob(os.path.join(path, prefix+".*"+test_suffix)) - if files: - if file_list: # Already had a list of files - raise IOError("Dump files with both {0} and {1} extensions are present. Do not know which to read.".format( - suffix, test_suffix)) - suffix = test_suffix - file_list = files - - if file_list_parallel and file_list: - raise IOError("Both regular (with suffix {0}) and parallel (with suffix {1}) dump files are present. Do not know which to read.".format( - suffix_parallel, suffix)) - elif file_list_parallel: - return file_list_parallel, True, suffix_parallel - elif file_list: - # make sure files are in the right order - nfiles = len(file_list) - file_list = [os.path.join(path, prefix+"."+str(i)+suffix) - for i in range(nfiles)] - return file_list, False, suffix - else: - raise IOError("ERROR: No data files found in path {0}".format(path)) - - -def create_cache(path, prefix): - """Create a list of DataFile objects to be passed repeatedly to - collect. - - Parameters - ---------- - path : str - Path to data files - prefix : str - File prefix - - Returns - ------- - namedtuple : (list of str, bool, str, list of :py:obj:`~boututils.datafile.DataFile`) - The cache of DataFiles in a namedtuple along with the file_list, - and parallel and suffix attributes - - """ - - # define namedtuple to return as the result - from collections import namedtuple - datafile_cache_tuple = namedtuple( - "datafile_cache", ["file_list", "parallel", "suffix", "datafile_list"]) - - file_list, parallel, suffix = findFiles(path, prefix) - - cache = [] - for f in file_list: - cache.append(DataFile(f)) - - return datafile_cache_tuple(file_list=file_list, parallel=parallel, suffix=suffix, datafile_list=cache) diff --git a/tools/pylib/boutdata/data.py b/tools/pylib/boutdata/data.py deleted file mode 100644 index ddbc543199..0000000000 --- a/tools/pylib/boutdata/data.py +++ /dev/null @@ -1,1276 +0,0 @@ -"""Provides a class BoutData which makes access to code inputs and -outputs easier. Creates a tree of maps, inspired by approach used in -OMFIT - -""" - -import copy -import glob -import io -import numpy -import os -import re - -from boutdata.collect import collect, create_cache -from boututils.boutwarnings import alwayswarn -from boututils.datafile import DataFile - -# These are imported to be used by 'eval' in -# BoutOptions.evaluate_scalar() and BoutOptionsFile.evaluate(). -# Change the names to match those used by C++/BOUT++ -from numpy import ( - pi, - sin, - cos, - tan, - arccos as acos, - arcsin as asin, - arctan as atan, - arctan2 as atan2, - sinh, - cosh, - tanh, - arcsinh as asinh, - arccosh as acosh, - arctanh as atanh, - exp, - log, - log10, - power as pow, - sqrt, - ceil, - floor, - round, - abs, -) - - -from collections import UserDict - - -class CaseInsensitiveDict(UserDict): - def __missing__(self, key): - return CaseInsensitiveDict({key: CaseInsensitiveDict()}) - - def __getitem__(self, key): - return self.data[key.lower()][1] - - def __setitem__(self, key, value): - self.data[key.lower()] = (key, value) - - def __delitem__(self, key): - del self.data[key.lower()] - - def __iter__(self): - return (key for key, _ in self.data.values()) - - def __contains__(self, key): - return key.lower() in self.data - - def __repr__(self): - return repr({key: value for key, value in self.data.values()}) - - -class BoutOptions(object): - """This class represents a tree structure. Each node (BoutOptions - object) can have several sub-nodes (sections), and several - key-value pairs. - - Parameters - ---------- - name : str, optional - Name of the root section (default: "root") - parent : BoutOptions, optional - A parent BoutOptions object (default: None) - - Examples - -------- - - >>> optRoot = BoutOptions() # Create a root - - Specify value of a key in a section "test" - If the section does not exist then it is created - - >>> optRoot.getSection("test")["key"] = 4 - - Get the value of a key in a section "test" - If the section does not exist then a KeyError is raised - - >>> print(optRoot["test"]["key"]) - 4 - - To pretty print the options - - >>> print(optRoot) - [test] - key = 4 - - """ - - def __init__(self, name="root", parent=None): - self._sections = CaseInsensitiveDict() - self._keys = CaseInsensitiveDict() - self._name = name - self._parent = parent - self.comments = CaseInsensitiveDict() - self.inline_comments = CaseInsensitiveDict() - self._comment_whitespace = CaseInsensitiveDict() - - def getSection(self, name): - """Return a section object. If the section does not exist then it is - created - - Parameters - ---------- - name : str - Name of the section to get/create - - Returns - ------- - BoutOptions - A new section with the original object as the parent - - """ - - if name in self._sections: - return self._sections[name] - else: - newsection = BoutOptions(name=name, parent=self) - self._sections[name] = newsection - return newsection - - def __getitem__(self, key): - """ - First check if it's a section, then a value - """ - - key_parts = key.split(":", maxsplit=1) - - if len(key_parts) > 1: - section = self[key_parts[0]] - return section[key_parts[1]] - - if key in self._sections: - return self._sections[key] - - if key not in self._keys: - raise KeyError("Key '%s' not in section '%s'" % (key, self.path())) - return self._keys[key] - - def __setitem__(self, key, value): - """ - Set a key - """ - if len(key) == 0: - return - - key_parts = key.split(":", maxsplit=1) - - if len(key_parts) > 1: - try: - section = self[key_parts[0]] - except KeyError: - section = self.getSection(key_parts[0]) - section[key_parts[1]] = value - else: - self._keys[key] = value - - def __delitem__(self, key): - key_parts = key.split(":", maxsplit=1) - - if len(key_parts) > 1: - section = self[key_parts[0]] - del section[key_parts[1]] - return - - if key in self._sections: - del self._sections[key] - elif key in self._keys: - del self._keys[key] - else: - raise KeyError(key) - - def __contains__(self, key): - key_parts = key.split(":", maxsplit=1) - - if len(key_parts) > 1: - if key_parts[0] in self: - return key_parts[1] in self[key_parts[0]] - return False - - return key in self._keys or key in self._sections - - __marker = object() - - def pop(self, key, default=__marker): - """options.pop(k[,d]) -> v, remove specified key and return the - corresponding value. If key is not found, d is returned if - given, otherwise KeyError is raised. - - """ - return self._pop_impl(key, default)[0] - - def _pop_impl(self, key, default=__marker): - """Private implementation of pop; also pops metadata - - """ - key_parts = key.split(":", maxsplit=1) - - if len(key_parts) > 1: - return self[key_parts[0]]._pop_impl(key_parts[1], default) - - if key in self._sections: - value = self._sections.pop(key) - name = self._name - parent = self._parent - elif key in self._keys: - value = self._keys.pop(key) - name = None - parent = None - elif default is self.__marker: - raise KeyError(key) - else: - return default - - comment = self.comments.pop(key, None) - inline_comment = self.inline_comments.pop(key, None) - comment_whitespace = self._comment_whitespace.pop(key, None) - - return (value, name, parent, comment, inline_comment, comment_whitespace) - - def rename(self, old_name, new_name): - """Rename old_name to new_name - """ - - def setattr_nested(parent, key, attr, value): - """Set one of the comment types on some nested section. Slightly - complicated because the comment attributes are dicts, but - we need to get the (possibly) nested parent section - - """ - # Don't set comment if it's None - if value is None: - return - - key_parts = key.split(":", maxsplit=1) - if len(key_parts) > 1: - setattr_nested(parent[key_parts[0]], key_parts[1], attr, value) - else: - getattr(parent, attr)[key] = value - - def ensure_sections(parent, path): - """Make sure all the components of path in parent are sections - """ - path_parts = path.split(":", maxsplit=1) - - def check_is_section(parent, path): - if path in parent and not isinstance(parent[path], BoutOptions): - raise TypeError( - "'{}:{}' already exists and is not a section!".format( - parent._name, path - ) - ) - - if len(path_parts) > 1: - new_parent_name, child_name = path_parts - check_is_section(parent, new_parent_name) - parent.getSection(new_parent_name) - ensure_sections(parent[new_parent_name], child_name) - else: - check_is_section(parent, path) - parent.getSection(path) - - value = self[old_name] - - if isinstance(value, BoutOptions): - # We're moving a section: make sure we don't clobber existing values - ensure_sections(self, new_name) - # Now we're definitely moving into an existing section, so - # update values and comments - for key in value: - self[new_name][key] = value[key] - setattr_nested(self[new_name], key, "comments", value.comments.get(key)) - setattr_nested( - self[new_name], - key, - "inline_comments", - value.inline_comments.get(key), - ) - setattr_nested( - self[new_name], - key, - "_comment_whitespace", - value._comment_whitespace.get(key), - ) - _, _, _, comment, inline_comment, comment_whitespace = self._pop_impl( - old_name - ) - else: - _, _, _, comment, inline_comment, comment_whitespace = self._pop_impl( - old_name - ) - self[new_name] = value - - # Update comments on new parent section - setattr_nested(self, new_name, "comments", comment) - setattr_nested(self, new_name, "inline_comments", inline_comment) - setattr_nested(self, new_name, "_comment_whitespace", comment_whitespace) - - def path(self): - """Returns the path of this section, joining together names of - parents - - """ - - if self._parent: - return self._parent.path() + ":" + self._name - return self._name - - def keys(self): - """Returns all keys, including sections and values - - """ - return list(self._sections) + list(self._keys) - - def sections(self): - """Return a list of sub-sections - - """ - return self._sections.keys() - - def values(self): - """Return a list of values - - """ - return self._keys.keys() - - def as_dict(self): - """Return a nested dictionary of all the options. - - """ - dicttree = {name: self[name] for name in self.values()} - dicttree.update({name: self[name].as_dict() for name in self.sections()}) - return dicttree - - def __len__(self): - return len(self._sections) + len(self._keys) - - def __eq__(self, other): - """Test if this BoutOptions is the same as another one.""" - if not isinstance(other, BoutOptions): - return False - if self is other: - # other is a reference to the same object - return True - if len(self._sections) != len(other._sections): - return False - if len(self._keys) != len(other._keys): - return False - for secname, section in self._sections.items(): - if secname not in other or section != other[secname]: - return False - for key, value in self._keys.items(): - if key not in other or value != other[key]: - return False - return True - - def __iter__(self): - """Iterates over all keys. First values, then sections - - """ - for k in self._keys: - yield k - for s in self._sections: - yield s - - def as_tree(self, indent=""): - """Return a string formatted as a pretty version of the options tree - - """ - text = self._name + "\n" - - for k in self._keys: - text += indent + " |- " + k + " = " + str(self._keys[k]) + "\n" - - for s in self._sections: - text += indent + " |- " + self._sections[s].as_tree(indent + " | ") - return text - - def __str__(self, basename=None, opts=None, f=None): - if f is None: - f = io.StringIO() - if opts is None: - opts = self - - def format_inline_comment(name, options): - if name in options.inline_comments: - f.write( - "{}{}".format( - options._comment_whitespace[name], options.inline_comments[name] - ) - ) - - for key, value in opts._keys.items(): - if key in opts.comments: - f.write("\n".join(opts.comments[key]) + "\n") - f.write("{} = {}".format(key, value)) - format_inline_comment(key, opts) - f.write("\n") - - for section in opts._sections.keys(): - section_name = basename + ":" + section if basename else section - if section in opts.comments: - f.write("\n".join(opts.comments[section])) - if opts[section]._keys: - f.write("\n[{}]".format(section_name)) - format_inline_comment(section, opts) - f.write("\n") - self.__str__(section_name, opts[section], f) - - return f.getvalue() - - def evaluate_scalar(self, name): - """ - Evaluate (recursively) scalar expressions - """ - expression = self._substitute_expressions(name) - - # replace ^ with ** so that Python evaluates exponentiation - expression = expression.replace("^", "**") - - return eval(expression) - - def _substitute_expressions(self, name): - expression = str(self[name]).lower() - expression = self._evaluate_section(expression, "") - parent = self._parent - while parent is not None: - sectionname = parent._name - if sectionname == "root": - sectionname = "" - expression = parent._evaluate_section(expression, sectionname) - parent = parent._parent - - return expression - - def _evaluate_section(self, expression, nested_sectionname): - # pass a nested section name so that we can traverse the options tree - # rooted at our own level and each level above us so that we can use - # relatively qualified variable names, e.g. if we are in section - # 'foo:bar:baz' then a variable 'x' from section 'bar' could be called - # 'bar:x' (found traversing the tree starting from 'bar') or - # 'foo:bar:x' (found when traversing tree starting from 'foo'). - for var in self.values(): - if nested_sectionname != "": - nested_name = nested_sectionname + ":" + var - else: - nested_name = var - if re.search( - r"(?>> opts = BoutOptionsFile("BOUT.inp") - >>> print(opts) # Print all options in a tree - root - |- nout = 100 - |- timestep = 2 - ... - - >>> opts["All"]["scale"] # Value "scale" in section "All" - 1.0 - - """ - - # Characters that start a comment - VALID_COMMENTS = ("#", ";") - # Get not just the comment, but also the preceeding whitespace - COMMENT_REGEX = re.compile(r"(.*?)(\s*)([{}].*)".format("".join(VALID_COMMENTS))) - - def __init__( - self, - filename="BOUT.inp", - name="root", - gridfilename=None, - nx=None, - ny=None, - nz=None, - ): - BoutOptions.__init__(self, name) - self.filename = filename - self.gridfilename = gridfilename - # Open the file - with open(filename, "r") as f: - # Go through each line in the file - section = self # Start with root section - comments = [] - for linenr, line in enumerate(f.readlines()): - # First remove comments, either # or ; - if line.lstrip().startswith(self.VALID_COMMENTS): - comments.append('#' + line.strip()[1:]) - continue - if line.strip() == "": - comments.append(line.strip()) - continue - - comment_match = self.COMMENT_REGEX.search(line) - if comment_match is not None: - line, comment_whitespace, inline_comment = comment_match.groups() - inline_comment = '#' + inline_comment.strip()[1:] - else: - inline_comment = None - comment_whitespace = None - - # Check section headers - startpos = line.find("[") - endpos = line.find("]") - if startpos != -1: - # A section heading - if endpos == -1: - raise SyntaxError("Missing ']' on line %d" % (linenr,)) - line = line[(startpos + 1) : endpos].strip() - - parent_section = self - while True: - scorepos = line.find(":") - if scorepos == -1: - sectionname = line - break - sectionname = line[0:scorepos] - line = line[(scorepos + 1) :] - parent_section = parent_section.getSection(sectionname) - section = parent_section.getSection(line) - if comments: - parent_section.comments[sectionname] = copy.deepcopy(comments) - comments = [] - if inline_comment is not None: - parent_section.inline_comments[sectionname] = inline_comment - parent_section._comment_whitespace[ - sectionname - ] = comment_whitespace - else: - # A key=value pair - - eqpos = line.find("=") - if eqpos == -1: - # No '=', so just set to true - section[line.strip()] = True - value_name = line.strip() - else: - value = line[(eqpos + 1) :].strip() - try: - # Try to convert to an integer - value = int(value) - except ValueError: - try: - # Try to convert to float - value = float(value) - except ValueError: - # Leave as a string - pass - - value_name = line[:eqpos].strip() - section[value_name] = value - if comments: - section.comments[value_name] = copy.deepcopy(comments) - comments = [] - if inline_comment is not None: - section.inline_comments[value_name] = inline_comment - section._comment_whitespace[value_name] = comment_whitespace - - try: - self.recalculate_xyz(nx=nx, ny=ny, nz=nz) - except Exception as e: - alwayswarn( - "While building x, y, z coordinate arrays, an " - "exception occured: " - + str(e) - + "\nEvaluating non-scalar options not available" - ) - - def recalculate_xyz(self, *, nx=None, ny=None, nz=None): - """ - Recalculate the x, y avd z arrays used to evaluate expressions - """ - # define arrays of x, y, z to be used for substitutions - gridfile = None - nzfromfile = None - if self.gridfilename: - if nx is not None or ny is not None: - raise ValueError( - "nx or ny given as inputs even though " - "gridfilename was given explicitly, " - "don't know which parameters to choose" - ) - with DataFile(self.gridfilename) as gridfile: - self.nx = float(gridfile["nx"]) - self.ny = float(gridfile["ny"]) - try: - nzfromfile = gridfile["MZ"] - except KeyError: - pass - elif nx or ny: - if nx is None: - raise ValueError( - "nx not specified. If either nx or ny are given, then both must be." - ) - if ny is None: - raise ValueError( - "ny not specified. If either nx or ny are given, then both must be." - ) - self.nx = nx - self.ny = ny - else: - try: - self.nx = self["mesh"].evaluate_scalar("nx") - self.ny = self["mesh"].evaluate_scalar("ny") - except KeyError: - try: - # get nx, ny, nz from output files - from boutdata.collect import findFiles - - file_list = findFiles( - path=os.path.dirname("."), prefix="BOUT.dmp" - ) - with DataFile(file_list[0]) as f: - self.nx = f["nx"] - self.ny = f["ny"] - nzfromfile = f["MZ"] - except (IOError, KeyError): - try: - gridfilename = self["mesh"]["file"] - except KeyError: - gridfilename = self["grid"] - with DataFile(gridfilename) as gridfile: - self.nx = float(gridfile["nx"]) - self.ny = float(gridfile["ny"]) - try: - nzfromfile = float(gridfile["MZ"]) - except KeyError: - pass - if nz is not None: - self.nz = nz - else: - try: - self.nz = self["mesh"].evaluate_scalar("nz") - except KeyError: - try: - self.nz = self.evaluate_scalar("mz") - except KeyError: - if nzfromfile is not None: - self.nz = nzfromfile - mxg = self._keys.get("MXG", 2) - myg = self._keys.get("MYG", 2) - - # make self.x, self.y, self.z three dimensional now so - # that expressions broadcast together properly. - self.x = numpy.linspace( - (0.5 - mxg) / (self.nx - 2 * mxg), - 1.0 - (0.5 - mxg) / (self.nx - 2 * mxg), - self.nx, - )[:, numpy.newaxis, numpy.newaxis] - self.y = ( - 2.0 - * numpy.pi - * numpy.linspace( - (0.5 - myg) / self.ny, - 1.0 - (0.5 - myg) / self.ny, - self.ny + 2 * myg, - )[numpy.newaxis, :, numpy.newaxis] - ) - self.z = ( - 2.0 - * numpy.pi - * numpy.linspace(0.5 / self.nz, 1.0 - 0.5 / self.nz, self.nz)[ - numpy.newaxis, numpy.newaxis, : - ] - ) - - def evaluate(self, name): - """Evaluate (recursively) expressions - - Sections and subsections must be given as part of 'name', - separated by colons - - Parameters - ---------- - name : str - Name of variable to evaluate, including sections and - subsections - - """ - section = self - split_name = name.split(":") - for subsection in split_name[:-1]: - section = section.getSection(subsection) - expression = section._substitute_expressions(split_name[-1]) - - # replace ^ with ** so that Python evaluates exponentiation - expression = expression.replace("^", "**") - - # substitute for x, y and z coordinates - for coord in ["x", "y", "z"]: - expression = re.sub( - r"\b" + coord.lower() + r"\b", "self." + coord, expression - ) - - return eval(expression) - - def write(self, filename=None, overwrite=False): - """ Write to BOUT++ options file - - This method will throw an error rather than overwriting an existing - file unless the overwrite argument is set to true. - Note, no comments from the original input file are transferred to the - new one. - - Parameters - ---------- - filename : str - Path of the file to write - (defaults to path of the file that was read in) - overwrite : bool - If False then throw an exception if 'filename' already exists. - Otherwise, just overwrite without asking. - (default False) - """ - if filename is None: - filename = self.filename - - if not overwrite and os.path.exists(filename): - raise ValueError( - "Not overwriting existing file, cannot write output to " + filename - ) - - with open(filename, "w") as f: - f.write(str(self)) - - -class BoutOutputs(object): - """Emulates a map class, represents the contents of a BOUT++ dmp - files. Does not allow writing, only reading of data. By default - there is no cache, so each time a variable is read it is - collected; if caching is set to True variables are stored once - they are read. Extra keyword arguments are passed through to - collect. - - Parameters - ---------- - path : str, optional - Path to data files (default: ".") - prefix : str, optional - File prefix (default: "BOUT.dmp") - suffix : str, optional - File suffix (default: None, searches all file extensions) - caching : bool, float, optional - Switches on caching of data, so it is only read into memory - when first accessed (default False) If caching is set to a - number, it gives the maximum size of the cache in GB, after - which entries will be discarded in first-in-first-out order to - prevent the cache getting too big. If the variable being - returned is bigger than the maximum cache size, then the - variable will be returned without being added to the cache, - and the rest of the cache will be left (default: False) - DataFileCaching : bool, optional - Switch for creation of a cache of DataFile objects to be - passed to collect so that DataFiles do not need to be - re-opened to read each variable (default: True) - - **kwargs - keyword arguments that are passed through to _caching_collect() - - Examples - -------- - - >>> d = BoutOutputs(".") # Current directory - >> d.keys() # List all valid keys - ['iteration', - 'zperiod', - 'MYSUB', - ... - ] - - >>> d.dimensions["ne"] # Get the dimensions of the field ne - ('t', 'x', 'y', 'z') - - >>> d["ne"] # Read "ne" from data files - BoutArray([[[[...]]]]) - - >>> d = BoutOutputs(".", prefix="BOUT.dmp", caching=True) # Turn on caching - - """ - - def __init__( - self, - path=".", - prefix="BOUT.dmp", - suffix=None, - caching=False, - DataFileCaching=True, - **kwargs - ): - """ - Initialise BoutOutputs object - """ - self._path = path - # normalize prefix by removing trailing '.' if present - self._prefix = prefix.rstrip(".") - if suffix is None: - temp_file_list = glob.glob(os.path.join(self._path, self._prefix + "*")) - latest_file = max(temp_file_list, key=os.path.getctime) - self._suffix = latest_file.split(".")[-1] - else: - # normalize suffix by removing leading '.' if present - self._suffix = suffix.lstrip(".") - self._caching = caching - self._DataFileCaching = DataFileCaching - self._kwargs = kwargs - - # Label for this data - self.label = path - - self._file_list = glob.glob( - os.path.join(path, self._prefix + "*" + self._suffix) - ) - if suffix is not None: - latest_file = max(self._file_list, key=os.path.getctime) - # if suffix==None we already found latest_file - - # Check that the path contains some data - if len(self._file_list) == 0: - raise ValueError("ERROR: No data files found") - - # Available variables - self.varNames = [] - self.dimensions = {} - self.evolvingVariableNames = [] - - with DataFile(latest_file) as f: - npes = f.read("NXPE") * f.read("NYPE") - if len(self._file_list) != npes: - alwayswarn("Too many data files, reading most recent ones") - if npes == 1: - # single output file - # do like this to catch, e.g. either 'BOUT.dmp.nc' or 'BOUT.dmp.0.nc' - self._file_list = [latest_file] - else: - self._file_list = [ - os.path.join( - path, self._prefix + "." + str(i) + "." + self._suffix - ) - for i in range(npes) - ] - - # Get variable names - self.varNames = f.keys() - for name in f.keys(): - dimensions = f.dimensions(name) - self.dimensions[name] = dimensions - if name != "t_array" and "t" in dimensions: - self.evolvingVariableNames.append(name) - - # Private variables - if self._caching: - from collections import OrderedDict - - self._datacache = OrderedDict() - if self._caching is not True: - # Track the size of _datacache and limit it to a maximum of _caching - try: - # Check that _caching is a number of some sort - float(self._caching) - except ValueError: - raise ValueError( - "BoutOutputs: Invalid value for caching argument. Caching should be either a number (giving the maximum size of the cache in GB), True for unlimited size or False for no caching." - ) - self._datacachesize = 0 - self._datacachemaxsize = self._caching * 1.0e9 - - self._DataFileCache = None - - def keys(self): - """Return a list of available variable names - - """ - return self.varNames - - def evolvingVariables(self): - """Return a list of names of time-evolving variables - - """ - return self.evolvingVariableNames - - def redistribute(self, npes, nxpe=None, mxg=2, myg=2, include_restarts=True): - """Create a new set of dump files for npes processors. - - Useful for restarting simulations using more or fewer processors. - - Existing data and restart files are kept in the directory - "redistribution_backups". redistribute() will fail if this - directory already exists, to avoid overwriting anything - - Parameters - ---------- - npes : int - Number of new files to create - nxpe : int, optional - If nxpe is None (the default), then an 'optimal' number will be - selected automatically - mxg, myg : int, optional - Number of guard cells in x, y (default: 2) - include_restarts : bool, optional - If True, then restart.redistribute will be used to - redistribute the restart files also (default: True) - - """ - from boutdata.processor_rearrange import ( - get_processor_layout, - create_processor_layout, - ) - from os import rename, path, mkdir - - # use get_processor_layout to get nx, ny - old_processor_layout = get_processor_layout( - DataFile(self._file_list[0]), has_t_dimension=True, mxg=mxg, myg=myg - ) - old_nxpe = old_processor_layout.nxpe - old_nype = old_processor_layout.nype - old_npes = old_processor_layout.npes - old_mxsub = old_processor_layout.mxsub - old_mysub = old_processor_layout.mysub - nx = old_processor_layout.nx - ny = old_processor_layout.ny - mz = old_processor_layout.mz - mxg = old_processor_layout.mxg - myg = old_processor_layout.myg - - # calculate new processor layout - new_processor_layout = create_processor_layout( - old_processor_layout, npes, nxpe=nxpe - ) - nxpe = new_processor_layout.nxpe - nype = new_processor_layout.nype - mxsub = new_processor_layout.mxsub - mysub = new_processor_layout.mysub - - # move existing files to backup directory - # don't overwrite backup: os.mkdir will raise exception if directory already exists - backupdir = path.join(self._path, "redistribution_backups") - mkdir(backupdir) - for f in self._file_list: - rename(f, path.join(backupdir, path.basename(f))) - - # create new output files - outfile_list = [] - this_prefix = self._prefix - if not this_prefix[-1] == ".": - # ensure prefix ends with a '.' - this_prefix = this_prefix + "." - for i in range(npes): - outpath = os.path.join( - self._path, this_prefix + str(i) + "." + self._suffix - ) - if self._suffix.split(".")[-1] in ["nc", "ncdf", "cdl"]: - # set format option to DataFile explicitly to avoid creating netCDF3 files, which can only contain up to 2GB of data - outfile_list.append( - DataFile(outpath, write=True, create=True, format="NETCDF4") - ) - else: - outfile_list.append(DataFile(outpath, write=True, create=True)) - - # Create a DataFileCache, if needed - if self._DataFileCaching: - DataFileCache = create_cache(backupdir, self._prefix) - else: - DataFileCache = None - # read and write the data - for v in self.varNames: - print("processing " + v) - data = collect( - v, - path=backupdir, - prefix=self._prefix, - xguards=True, - yguards=True, - info=False, - datafile_cache=DataFileCache, - ) - ndims = len(data.shape) - - # write data - for i in range(npes): - ix = i % nxpe - iy = int(i / nxpe) - outfile = outfile_list[i] - if v == "NPES": - outfile.write(v, npes) - elif v == "NXPE": - outfile.write(v, nxpe) - elif v == "NYPE": - outfile.write(v, nype) - elif v == "MXSUB": - outfile.write(v, mxsub) - elif v == "MYSUB": - outfile.write(v, mysub) - elif ndims == 0: - # scalar - outfile.write(v, data) - elif ndims == 1: - # time evolving scalar - outfile.write(v, data) - elif ndims == 2: - # Field2D - if data.shape != (nx + 2 * mxg, ny + 2 * myg): - # FieldPerp? - # check is not perfect, fails if ny=nz - raise ValueError( - "Error: Found FieldPerp '" - + v - + "'. This case is not currently handled by BoutOutputs.redistribute()." - ) - outfile.write( - v, - data[ - ix * mxsub : (ix + 1) * mxsub + 2 * mxg, - iy * mysub : (iy + 1) * mysub + 2 * myg, - ], - ) - elif ndims == 3: - # Field3D - if data.shape[:2] != (nx + 2 * mxg, ny + 2 * myg): - # evolving Field2D, but this case is not handled - # check is not perfect, fails if ny=nx and nx=nt - raise ValueError( - "Error: Found evolving Field2D '" - + v - + "'. This case is not currently handled by BoutOutputs.redistribute()." - ) - outfile.write( - v, - data[ - ix * mxsub : (ix + 1) * mxsub + 2 * mxg, - iy * mysub : (iy + 1) * mysub + 2 * myg, - :, - ], - ) - elif ndims == 4: - outfile.write( - v, - data[ - :, - ix * mxsub : (ix + 1) * mxsub + 2 * mxg, - iy * mysub : (iy + 1) * mysub + 2 * myg, - :, - ], - ) - else: - print( - "ERROR: variable found with unexpected number of dimensions,", - ndims, - ) - - for outfile in outfile_list: - outfile.close() - - if include_restarts: - print("processing restarts") - from boutdata import restart - from glob import glob - - restart_prefix = "BOUT.restart" - restarts_list = glob(path.join(self._path, restart_prefix + "*")) - - # Move existing restart files to backup directory - for f in restarts_list: - rename(f, path.join(backupdir, path.basename(f))) - - # Redistribute restarts - restart.redistribute( - npes, path=backupdir, nxpe=nxpe, output=self._path, mxg=mxg, myg=myg - ) - - def _collect(self, *args, **kwargs): - """Wrapper for collect to pass self._DataFileCache if necessary. - - """ - if self._DataFileCaching and self._DataFileCache is None: - # Need to create the cache - self._DataFileCache = create_cache(self._path, self._prefix) - return collect(*args, datafile_cache=self._DataFileCache, **kwargs) - - def __len__(self): - return len(self.varNames) - - def __getitem__(self, name): - """Reads a variable - - Caches result and returns later if called again, if caching is - turned on for this instance - - """ - - if self._caching: - if name not in self._datacache.keys(): - item = self._collect( - name, path=self._path, prefix=self._prefix, **self._kwargs - ) - if self._caching is not True: - itemsize = item.nbytes - if itemsize > self._datacachemaxsize: - return item - self._datacache[name] = item - self._datacachesize += itemsize - while self._datacachesize > self._datacachemaxsize: - self._removeFirstFromCache() - else: - self._datacache[name] = item - return item - else: - return self._datacache[name] - else: - # Collect the data from the repository - data = self._collect( - name, path=self._path, prefix=self._prefix, **self._kwargs - ) - return data - - def _removeFirstFromCache(self): - """Pop the first item from the OrderedDict _datacache - - """ - item = self._datacache.popitem(last=False) - self._datacachesize -= item[1].nbytes - - def __iter__(self): - """Iterate through all keys, starting with "options" then going - through all variables for _caching_collect - - """ - for k in self.varNames: - yield k - - def __str__(self, indent=""): - """Print a pretty version of the tree - - """ - text = "" - for k in self.varNames: - text += indent + k + "\n" - - return text - - -def BoutData(path=".", prefix="BOUT.dmp", caching=False, **kwargs): - """Returns a dictionary, containing the contents of a BOUT++ output - directory. - - Does not allow writing, only reading of data. By default there is - no cache, so each time a variable is read it is collected; if - caching is set to True variables are stored once they are read. - - Parameters - ---------- - path : str, optional - Path to data files (default: ".") - prefix : str, optional - File prefix (default: "BOUT.dmp") - caching : bool, float, optional - Switches on caching of data, so it is only read into memory - when first accessed (default False) If caching is set to a - number, it gives the maximum size of the cache in GB, after - which entries will be discarded in first-in-first-out order to - prevent the cache getting too big. If the variable being - returned is bigger than the maximum cache size, then the - variable will be returned without being added to the cache, - and the rest of the cache will be left (default: False) - DataFileCaching : bool, optional - Switch for creation of a cache of DataFile objects to be - passed to collect so that DataFiles do not need to be - re-opened to read each variable (default: True) - **kwargs - Keyword arguments that are passed through to collect() - - Returns - ------- - dict - Contents of a BOUT++ output directory, including options and - output files - - Examples - -------- - - >>> d = BoutData(".") # Current directory - - >>> d.keys() # List all valid keys - - >>> print(d["options"]) # Prints tree of options - - >>> d["options"]["nout"] # Value of nout in BOUT.inp file - - >>> print(d["outputs"]) # Print available outputs - - >>> d["outputs"]["ne"] # Read "ne" from data files - - >>> d = BoutData(".", prefix="BOUT.dmp", caching=True) # Turn on caching - - """ - - data = {} # Map for the result - - data["path"] = path - - # Options from BOUT.inp file - data["options"] = BoutOptionsFile(os.path.join(path, "BOUT.inp"), name="options") - - # Output from .dmp.* files - data["outputs"] = BoutOutputs(path, prefix=prefix, caching=caching, **kwargs) - - return data diff --git a/tools/pylib/boutdata/gen_surface.py b/tools/pylib/boutdata/gen_surface.py deleted file mode 100644 index 9b1caf81a0..0000000000 --- a/tools/pylib/boutdata/gen_surface.py +++ /dev/null @@ -1,154 +0,0 @@ -"""Flux surface generator for tokamak grid files - -""" -from __future__ import print_function - -import numpy as np - - -def gen_surface(grid): - """Generator for iterating over flux surfaces - - Parameters - ---------- - grid : DataFile - An input grid file to read to find flux surfaces - - Yields - ------ - tuple : (int, list of int, bool) - A tuple containing the x index, list of y indices and whether - this flux surface is periodic - - """ - # Read the grid data - nx = grid.read("nx") - ny = grid.read("ny") - - npol = grid.read("npol") - if npol is None: - # Domains not stored in file (BOUT style input) - ixseps1 = grid.read("ixseps1") - ixseps2 = grid.read("ixseps2") - jyseps1_1 = grid.read("jyseps1_1") - jyseps1_2 = grid.read("jyseps1_2") - jyseps2_1 = grid.read("jyseps2_1") - jyseps2_2 = grid.read("jyseps2_2") - - if ixseps1 == ixseps2: - # Single null - ndomains = 3 - else: - # Double null - ndomains = 6 - - yup_xsplit = np.zeros(ndomains) - ydown_xsplit = np.zeros(ndomains) - yup_xin = np.zeros(ndomains) - yup_xout = np.zeros(ndomains) - ydown_xin = np.zeros(ndomains) - ydown_xout = np.zeros(ndomains) - - ystart = np.zeros(ndomains+1) - ystart[ndomains] = ny - - # Inner lower leg - ydown_xsplit[0] = -1 - ydown_xout[0] = -1 - yup_xsplit[0] = ixseps1 - yup_xin[0] = ndomains-1 # Outer lower leg - yup_xout[0] = 1 - - # Outer lower leg - ydown_xsplit[ndomains-1] = ixseps1 - ydown_xin[ndomains-1] = 0 - ydown_xout[ndomains-1] = ndomains-2 - yup_xsplit[ndomains-1] = -1 - yup_xout[ndomains-1] = -1 - ystart[ndomains-1] = jyseps2_2+1 - - if ixseps1 == ixseps2: - # Single null - - ydown_xsplit[1] = ixseps1 - ydown_xin[1] = 1 - ydown_xout[1] = 0 - yup_xsplit[1] = ixseps1 - yup_xin[1] = 1 - yup_xout[1] = 2 - ystart[1] = jyseps1_1+1 - else: - # Double null - raise RuntimeError("SORRY - NO DOUBLE NULL YET") - else: - # Use domains stored in the file - ndomains = npol.size # Number of domains - yup_xsplit = grid.read("yup_xsplit") - ydown_xsplit = grid.read("ydown_xsplit") - yup_xin = grid.read("yup_xin") - yup_xout = grid.read("yup_xout") - ydown_xin = grid.read("ydown_xin") - ydown_xout = grid.read("ydown_xout") - - # Calculate starting positions - ystart = np.zeros(ndomains+1) - for i in np.arange(1,ndomains): - ystart[i] = ystart[i-1] + npol[i-1] - ystart[ndomains] = ny - - # Record whether a domain has been visited - visited = np.zeros(ndomains) - - x = 0 # X index - while True: - yinds = None # Y indices result - - # Find a domain which hasn't been visited - domain = None - for i in np.arange(ndomains): - if visited[i] == 0: - domain = i - break - - if domain is None: - # All domains visited - x = x + 1 # Go to next x surface - visited = np.zeros(ndomains) # Clear the visited array - if x == nx: - break # Finished - continue - - # Follow surface back until it hits a boundary - while True: - if x < ydown_xsplit[domain]: - d = ydown_xin[domain] - else: - d = ydown_xout[domain] - if d < 0: - break # Hit boundary - domain = d # Keep going - - # Starting from domain, follow surface - - periodic = False - while domain >= 0: - if visited[domain] == 1: - # Already visited domain -> periodic - periodic = True - break; - # Get range of y indices in this domain - yi = np.arange(ystart[domain], ystart[domain+1]) - if yinds is None: - yinds = yi - else: - yinds = np.concatenate((yinds, yi)) - # mark this domain as visited - visited[domain] = 1 - # Get next domain - if x < yup_xsplit[domain]: - domain = yup_xin[domain] - else: - domain = yup_xout[domain] - - # Finished this surface - yield x, yinds, periodic diff --git a/tools/pylib/boutdata/griddata.py b/tools/pylib/boutdata/griddata.py deleted file mode 100644 index 8246ddd8ab..0000000000 --- a/tools/pylib/boutdata/griddata.py +++ /dev/null @@ -1,493 +0,0 @@ -"""Routines for manipulating grid files - -""" -from __future__ import print_function - -from numpy import ndarray, zeros, concatenate, linspace, amin, amax -import matplotlib.pyplot as plt - -from boututils.datafile import DataFile - - -def slice(infile, outfile, region=None, xind=None, yind=None): - """Copy an X-Y slice from one DataFile to another - - Parameters - ---------- - infile : str - Name of DataFile to read slice from - outfile : str - Name of DataFile to write slice to. File will be created, and - will be overwritten if it already exists - region : {0, 1, 2, 3, 4, 5, None}, optional - Copy a whole region. The available regions are: - - 0: Lower inner leg - - 1: Inner core - - 2: Upper inner leg - - 3: Upper outer leg - - 4: Outer core - - 5: Lower outer leg - xind, yind : (int, int), optional - Index ranges for x and y. Range includes first point, but not - last point - - TODO - ---- - - Rename to not clobber builtin `slice` - - Better regions? - - """ - - # Open input and output files - indf = DataFile(infile) - outdf = DataFile(outfile, create=True) - - nx = indf["nx"][0] - ny = indf["ny"][0] - - if region: - # Select a region of the mesh - - xind = [0, nx] - if region == 0: - # Lower inner leg - yind = [0, indf["jyseps1_1"][0]+1] - elif region == 1: - # Inner core - yind = [indf["jyseps1_1"][0]+1, indf["jyseps2_1"][0]+1] - elif region == 2: - # Upper inner leg - yind = [indf["jyseps2_1"][0]+1, indf["ny_inner"][0]] - elif region == 3: - # Upper outer leg - yind = [indf["ny_inner"][0], indf["jyseps1_2"][0]+1] - elif region == 4: - # Outer core - yind = [indf["jyseps1_2"][0]+1, indf["jyseps2_2"][0]+1] - else: - # Lower outer leg - yind = [indf["jyseps2_2"][0]+1, ny] - else: - # Use indices - if not xind: - xind = [0, nx] - if not yind: - yind = [0, ny] - - print("Indices: [%d:%d, %d:%d]" % (xind[0], xind[1], yind[0], yind[1])) - # List of variables requiring special handling - special = ["nx", "ny", "ny_inner", - "ixseps1", "ixseps2", - "jyseps1_1", "jyseps1_2", "jyseps2_1", "jyseps2_2", - "ShiftAngle"] - - outdf["nx"] = xind[1] - xind[0] - outdf["ny"] = yind[1] - yind[0] - outdf["ny_inner"] = indf["ny_inner"][0] - yind[0] - - outdf["ixseps1"] = indf["ixseps1"][0] - outdf["ixseps2"] = indf["ixseps2"][0] - - outdf["jyseps1_1"] = indf["jyseps1_1"][0] - yind[0] - outdf["jyseps2_1"] = indf["jyseps2_1"][0] - yind[0] - outdf["jyseps1_2"] = indf["jyseps1_2"][0] - yind[0] - outdf["jyseps2_2"] = indf["jyseps2_2"][0] - yind[0] - - outdf["ShiftAngle"] = indf["ShiftAngle"][xind[0]:xind[1]] - - # Loop over all variables - for v in list(indf.keys()): - if v in special: - continue # Skip these variables - - ndims = indf.ndims(v) - if ndims == 0: - # Copy scalars - print("Copying variable: " + v) - outdf[v] = indf[v][0] - elif ndims == 2: - # Assume [x,y] - print("Slicing variable: " + v); - outdf[v] = indf[v][xind[0]:xind[1], yind[0]:yind[1]] - else: - # Skip - print("Skipping variable: " + v) - - indf.close() - outdf.close() - - -def rotate(gridfile, yshift, output=None): - """Shifts a grid file by the specified number of points in y - - This moves the branch cut around, and can be used to change the - limiter location - - Parameters - ---------- - gridfile : str - Name of DataFile to rotate - yshift : int - Number of points in y to shift by - output : str, optional - Name of DataFile to write to. If None, will write to a new - file with the same name as `gridfile` + '_rot' - - """ - - if output is None: - output = gridfile + "_rot" - - print("Rotating grid file '%s' -> '%s'" % (gridfile, output)) - - # Open input grid file - with DataFile(gridfile) as d: - # Open output file - with DataFile(output, write=True, create=True) as out: - # Loop over variables - for varname in d.list(): - # Number of dimensions - ndims = d.ndims(varname) - - if ndims == 2: - print("Shifting '%s' (x,y)" % (varname,)) - # 2D, assume X-Y - - var = d[varname] # Read - ny = var.shape[1] - - # Make sure yshift is positive and in range - yshift = ((yshift % ny) + ny) % ny - - newvar = ndarray(var.shape) - - # Rotate - newvar[:,0:(ny-yshift)] = var[:,yshift:ny] - newvar[:,(ny-yshift):] = var[:,:yshift] - - # Write to output - #out[varname] = newvar # Write - out.write(varname, newvar) - elif ndims == 3: - print("Shifting '%s' (x,y,z)" % (varname,)) - # 3D, assume X-Y-Z - - var = d[varname] # Read - ny = var.shape[1] - - # Make sure yshift is positive and in range - yshift = ((yshift % ny) + ny) % ny - - newvar = ndarray(var.shape) - - newvar[:,0:(ny-yshift),:] = var[:,yshift:ny,:] - newvar[:,(ny-yshift):,:] = var[:,:yshift,:] - - # Write to output - out.write(varname, newvar) - else: - # Just copy - print("Copying '%s' (%d dimensions)" % (varname, ndims)) - out.write(varname, d[varname]) - - - -def gridcontourf(grid, data2d, nlevel=31, show=True, - mind=None, maxd=None, symmetric=False, - cmap=None, ax=None, - xlabel="Major radius [m]", ylabel="Height [m]", - separatrix=False): - """Plots a 2D contour plot, taking into account branch cuts - (X-points). - - Parameters - ---------- - grid : DataFile - A DataFile object - data2d : array_like - A 2D (x,y) NumPy array of data to plot - nlevel : int, optional - Number of levels in the contour plot - show : bool, optional - If True, will immediately show the plot - mind : float, optional - Minimum data level - maxd : float, optional - Maximum data level - symmetric : bool, optional - Make mind, maxd symmetric about zero - cmap : Colormap, optional - A matplotlib colormap to use. If None, use the current default - ax : Axes, optional - A matplotlib axes instance to plot to. If None, create a new - figure and axes, and plot to that - xlabel, ylabel : str, optional - Labels for the x/y axes - separatrix : bool, optional - Add separatrix - - Returns - ------- - con - The contourf instance - - Examples - -------- - - To put a plot into an axis with a color bar: - - >>> fig, axis = plt.subplots() - >>> c = gridcontourf(grid, data, show=False, ax=axis) - >>> fig.colorbar(c, ax=axis) - >>> plt.show() - - TODO - ---- - - Move into a plotting module - - """ - - if cmap is None: - cmap = plt.cm.get_cmap("YlOrRd") - - if len(data2d.shape) != 2: - raise ValueError("data2d must be 2D (x,y)") - - j11 = grid["jyseps1_1"] - j12 = grid["jyseps1_2"] - j21 = grid["jyseps2_1"] - j22 = grid["jyseps2_2"] - ix1 = grid["ixseps1"] - ix2 = grid["ixseps2"] - try: - nin = grid["ny_inner"] - except: - nin = j12 - - nx = grid["nx"] - ny = grid["ny"] - - if (data2d.shape[0] != nx) or (data2d.shape[1] != ny): - raise ValueError("data2d has wrong size: (%d,%d), expected (%d,%d)" % (data2d.shape[0], data2d.shape[1], nx, ny)) - - if hasattr(j11, "__len__"): - # Arrays rather than scalars - try: - j11 = j11[0] - j12 = j12[0] - j21 = j21[0] - j22 = j22[0] - ix1 = ix1[0] - ix2 = ix2[0] - nin = nin[0] - nx = nx[0] - ny = ny[0] - except: - pass - - R = grid["Rxy"] - Z = grid["Zxy"] - - if data2d.shape != (nx, ny): - raise ValueError("Dimensions do not match") - - add_colorbar = False - if ax is None: - fig = plt.figure() - ax = fig.add_subplot(111) - add_colorbar = True - - if mind is None: - mind = amin(data2d) - if maxd is None: - maxd = amax(data2d) - - if symmetric: - # Make mind, maxd symmetric about zero - maxd = max([maxd, abs(mind)]) - mind = -maxd - - levels = linspace(mind, maxd, nlevel, endpoint=True) - - ystart = 0 # Y index to start the next section - if j11 >= 0: - # plot lower inner leg - ax.contourf(R[:,ystart:(j11+1)], Z[:,ystart:(j11+1)], data2d[:,ystart:(j11+1)], levels,cmap=cmap) - - yind = [j11, j22+1] - ax.contourf(R[:ix1, yind].transpose(), Z[:ix1, yind].transpose(), data2d[:ix1, yind].transpose(), levels,cmap=cmap) - - ax.contourf(R[ix1:,j11:(j11+2)], Z[ix1:,j11:(j11+2)], data2d[ix1:,j11:(j11+2)], levels,cmap=cmap) - ystart = j11+1 - - yind = [j22, j11+1] - ax.contourf(R[:ix1, yind].transpose(), Z[:ix1, yind].transpose(), data2d[:ix1, yind].transpose(), levels, cmap=cmap) - - # Inner SOL - con = ax.contourf(R[:,ystart:(j21+1)], Z[:,ystart:(j21+1)], data2d[:,ystart:(j21+1)], levels, cmap=cmap) - ystart = j21+1 - - if j12 > j21: - # Contains upper PF region - - # Inner leg - ax.contourf(R[ix1:,j21:(j21+2)], Z[ix1:,j21:(j21+2)], data2d[ix1:,j21:(j21+2)], levels, cmap=cmap) - ax.contourf(R[:,ystart:nin], Z[:,ystart:nin], data2d[:,ystart:nin], levels, cmap=cmap) - - # Outer leg - ax.contourf(R[:,nin:(j12+1)], Z[:,nin:(j12+1)], data2d[:,nin:(j12+1)], levels, cmap=cmap) - ax.contourf(R[ix1:,j12:(j12+2)], Z[ix1:,j12:(j12+2)], data2d[ix1:,j12:(j12+2)], levels, cmap=cmap) - ystart = j12+1 - - yind = [j21, j12+1] - ax.contourf(R[:ix1, yind].transpose(), Z[:ix1, yind].transpose(), data2d[:ix1, yind].transpose(), levels, cmap=cmap) - - yind = [j21+1, j12] - ax.contourf(R[:ix1, yind].transpose(), Z[:ix1, yind].transpose(), data2d[:ix1, yind].transpose(), levels, cmap=cmap) - else: - ystart -= 1 - # Outer SOL - ax.contourf(R[:,ystart:(j22+1)], Z[:,ystart:(j22+1)], data2d[:,ystart:(j22+1)], levels, cmap=cmap) - - ystart = j22+1 - - if j22+1 < ny: - # Outer leg - ax.contourf(R[ix1:,j22:(j22+2)], Z[ix1:,j22:(j22+2)], data2d[ix1:,j22:(j22+2)], levels, cmap=cmap) - ax.contourf(R[:,ystart:ny], Z[:,ystart:ny], data2d[:,ystart:ny], levels, cmap=cmap) - - # X-point - Rx = [ [R[ix1-1,j11], R[ix1,j11], R[ix1,j11+1], R[ix1-1,j11+1]], - [R[ix1-1,j22+1], R[ix1,j22+1], R[ix1,j22], R[ix1-1,j22]] ] - - - Zx = [ [Z[ix1-1,j11], Z[ix1,j11], Z[ix1,j11+1], Z[ix1-1,j11+1]], - [Z[ix1-1,j22+1], Z[ix1,j22+1], Z[ix1,j22], Z[ix1-1,j22]] ] - Dx = [ [data2d[ix1-1,j11], data2d[ix1,j11], data2d[ix1,j11+1], data2d[ix1-1,j11+1]], - [data2d[ix1-1,j22+1], data2d[ix1,j22+1], data2d[ix1,j22], data2d[ix1-1,j22]] ] - ax.contourf(Rx, Zx, Dx, levels, cmap=cmap) - - if add_colorbar: - fig.colorbar(con) - - ax.set_aspect("equal") - if xlabel is not None: - ax.set_xlabel(xlabel) - if ylabel is not None: - ax.set_ylabel(ylabel) - - if separatrix: - # Plot separatrix - - # Lower X-point location - Rx = 0.125*(R[ix1-1,j11] + R[ix1,j11] + R[ix1,j11+1] + R[ix1-1,j11+1] - + R[ix1-1,j22+1] + R[ix1,j22+1] + R[ix1,j22] + R[ix1-1,j22]) - Zx = 0.125*(Z[ix1-1,j11] + Z[ix1,j11] + Z[ix1,j11+1] + Z[ix1-1,j11+1] - + Z[ix1-1,j22+1] + Z[ix1,j22+1] + Z[ix1,j22] + Z[ix1-1,j22]) - # Lower inner leg - ax.plot( concatenate( (0.5*(R[ix1-1,0:(j11+1)] + R[ix1,0:(j11+1)]), [Rx]) ), concatenate( (0.5*(Z[ix1-1,0:(j11+1)] + Z[ix1,0:(j11+1)]), [Zx]) ), 'k-') - # Lower outer leg - ax.plot( concatenate( ([Rx],0.5*(R[ix1-1,(j22+1):] + R[ix1,(j22+1):])) ), concatenate( ([Zx], 0.5*(Z[ix1-1,(j22+1):] + Z[ix1,(j22+1):])) ), 'k-') - # Core - - ax.plot( concatenate( ([Rx], 0.5*(R[ix1-1,(j11+1):(j21+1)] + R[ix1,(j11+1):(j21+1)]), 0.5*(R[ix1-1,(j12+1):(j22+1)] + R[ix1,(j12+1):(j22+1)]), [Rx]) ), - concatenate( ([Zx], 0.5*(Z[ix1-1,(j11+1):(j21+1)] + Z[ix1,(j11+1):(j21+1)]), 0.5*(Z[ix1-1,(j12+1):(j22+1)] + Z[ix1,(j12+1):(j22+1)]), [Zx]) ), 'k-') - if show: - plt.show() - - return con - - -def bout2sonnet(grdname, outf): - """Creates a Sonnet format grid from a BOUT++ grid. - - NOTE: Branch cuts are not yet supported - - Parameters - ---------- - grdname : str - Filename of BOUT++ grid file - outf : File - The file-like object to write to - - Examples - -------- - - >>> with open("output.sonnet", "w") as f: - ... bout2sonnet("BOUT.grd.nc", f) - - """ - - with DataFile(grdname) as g: - Rxy = g["Rxy"] - Zxy = g["Zxy"] - Bpxy = g["Bpxy"] - Btxy = g["Btxy"] - Bxy = g["Bxy"] - - # Now iterate over cells in the order Eirene expects - - nx, ny = Rxy.shape - - # Extrapolate values in Y - R = zeros([nx,ny+2]) - Z = zeros([nx,ny+2]) - - R[:,1:-1] = Rxy - Z[:,1:-1] = Zxy - - R[:,0] = 2.*R[:,1] - R[:,2] - Z[:,0] = 2.*Z[:,1] - Z[:,2] - - R[:,-1] = 2.*R[:,-2] - R[:,-3] - Z[:,-1] = 2.*Z[:,-2] - Z[:,-3] - - element = 1 # Element number - - outf.write("BOUT++: "+grdname+"\n\n") - - outf.write("=====================================\n") - - for i in range(2, nx-2): - # Loop in X, excluding guard cells - for j in range(1,ny+1): - # Loop in Y. Guard cells not in grid file - - # Lower left (low Y, low X) - ll = ( 0.25*(R[i-1,j-1] + R[i-1,j] + R[i,j-1] + R[i,j]), - 0.25*(Z[i-1,j-1] + Z[i-1,j] + Z[i,j-1] + Z[i,j]) ) - - # Lower right (low Y, upper X) - lr = ( 0.25*(R[i+1,j-1] + R[i+1,j] + R[i,j-1] + R[i,j]), - 0.25*(Z[i+1,j-1] + Z[i+1,j] + Z[i,j-1] + Z[i,j]) ) - - # Upper left (upper Y, lower X) - ul = ( 0.25*(R[i-1,j+1] + R[i-1,j] + R[i,j+1] + R[i,j]), - 0.25*(Z[i-1,j+1] + Z[i-1,j] + Z[i,j+1] + Z[i,j]) ) - - # Upper right (upper Y, upper X) - ur = ( 0.25*(R[i+1,j+1] + R[i+1,j] + R[i,j+1] + R[i,j]), - 0.25*(Z[i+1,j+1] + Z[i+1,j] + Z[i,j+1] + Z[i,j]) ) - - # Element number - outf.write(" ELEMENT %d = ( %d, %d): (%e, %e) (%e, %e)\n" % ( - element, - j-1, i-2, - ll[0], ll[1], - ul[0], ul[1])) - - # Ratio Bt / Bp at cell centre. Note j-1 because - # Bpxy and Btxy have not had extra points added - outf.write(" FIELD RATIO = %e (%e, %e)\n" % (Bpxy[i,j-1] / Btxy[i,j-1], R[i,j], Z[i,j]) ) - - outf.write(" (%e, %e) (%e, %e)\n" % ( - lr[0], lr[1], - ur[0], ur[1])) - - if (i == nx-3) and (j == ny+1): - # Last element - outf.write("=====================================\n") - else: - outf.write("-------------------------------------\n") - - element += 1 diff --git a/tools/pylib/boutdata/input.py b/tools/pylib/boutdata/input.py deleted file mode 100644 index 46759cfaa5..0000000000 --- a/tools/pylib/boutdata/input.py +++ /dev/null @@ -1,58 +0,0 @@ -"""Fourier transform data for input to BOUT++ - -""" -from builtins import range - -from numpy.fft import rfft -from numpy import ndarray - - -def transform3D(arr): - """Fourier transforms a 3D array in the Z dimension - - BOUT++ can take 3D inputs to be Fourier transformed in the Z - direction. - - Parameters - ---------- - arr : array_like - Input 3-D array - - Returns - ------- - array_like - A 3D array [x,y,kz] where kz is organised in the standard FFT - order, with constant (DC, kz=0) component first, followed by - real/imaginary pairs. - - kz = [0, (real, imag), (real, imag), ...] - - """ - - if len(arr.shape) != 3: - raise ValueError("Input array must be 3D") - - # Take FFT over z (last index), returning a complex array - fa = rfft(arr, axis=-1) - - nmodes = fa.shape[-1] - - # scipy fft normalises to N, but fftw doesn't - fa /= arr.shape[-1] - # Unpack complex array into a real array - - shape = list(arr.shape) - shape[-1] = 1 + (nmodes-1)*2 # One for DC + 2 for other modes - - result = ndarray(shape) - - # kz = 0 (DC) component only has real part - result[:,:,0] = fa[:,:,0].real - - # All other components have both real and imaginary parts - for k in range(1,nmodes): - result[:,:,2*k-1] = fa[:,:,k].real - result[:,:,2*k] = fa[:,:,k].imag - - return result - diff --git a/tools/pylib/boutdata/mayavi2.py b/tools/pylib/boutdata/mayavi2.py deleted file mode 100644 index a32b7433a1..0000000000 --- a/tools/pylib/boutdata/mayavi2.py +++ /dev/null @@ -1,118 +0,0 @@ -from __future__ import print_function -from builtins import range - -import numpy as np -from numpy import cos, sin, pi - -from enthought.tvtk.api import tvtk -from enthought.mayavi.scripts import mayavi2 - -def aligned_points(grid, nz=1, period=1.0, maxshift=0.4): - try: - nx = grid["nx"] - ny = grid["ny"] - zshift = grid["zShift"] - Rxy = grid["Rxy"] - Zxy = grid["Zxy"] - except: - print("Missing required data") - return None - - dz = 2.*pi / (period * (nz-1)) - phi0 = np.linspace(0,2.*pi / period, nz) - - # Need to insert additional points in Y so mesh looks smooth - #for y in range(1,ny): - # ms = np.max(np.abs(zshift[:,y] - zshift[:,y-1])) - # if( - - # Create array of points, structured - points = np.zeros([nx*ny*nz, 3]) - - start = 0 - for y in range(ny): - end = start + nx*nz - - phi = zshift[:,y] + phi0[:,None] - r = Rxy[:,y] + (np.zeros([nz]))[:,None] - - xz_points = points[start:end] - xz_points[:,0] = (r*cos(phi)).ravel() # X - xz_points[:,1] = (r*sin(phi)).ravel() # Y - xz_points[:,2] = (Zxy[:,y]+(np.zeros([nz]))[:,None]).ravel() # Z - - start = end - - return points - -def create_grid(grid, data, period=1): - - s = np.shape(data) - - nx = grid["nx"] - ny = grid["ny"] - nz = s[2] - - print("data: %d,%d,%d grid: %d,%d\n" % (s[0],s[1],s[2], nx,ny)) - - dims = (nx, nz, ny) - sgrid = tvtk.StructuredGrid(dimensions=dims) - pts = aligned_points(grid, nz, period) - print(np.shape(pts)) - sgrid.points = pts - - scalar = np.zeros([nx*ny*nz]) - start = 0 - for y in range(ny): - end = start + nx*nz - scalar[start:end] = (data[:,y,:]).transpose().ravel() - print(y, " = " , np.max(scalar[start:end])) - start = end - - sgrid.point_data.scalars = np.ravel(scalar.copy()) - sgrid.point_data.scalars.name = "data" - - return sgrid - -@mayavi2.standalone -def view3d(sgrid): - from enthought.mayavi.sources.vtk_data_source import VTKDataSource - from enthought.mayavi.modules.api import Outline, GridPlane - - mayavi.new_scene() - src = VTKDataSource(data=sgrid) - mayavi.add_source(src) - mayavi.add_module(Outline()) - g = GridPlane() - g.grid_plane.axis = 'x' - mayavi.add_module(g) - -if __name__ == '__main__': - from boutdata.collect import collect - from boututils.file_import import file_import - - path = "/media/449db594-b2fe-4171-9e79-2d9b76ac69b6/runs/data_33/" - #path="/home/ben/run4" - - #g = file_import("../cbm18_dens8.grid_nx68ny64.nc") - g = file_import("data/cbm18_8_y064_x516_090309.nc") - #g = file_import("/home/ben/run4/reduced_y064_x256.nc") - - data = collect("P", tind=50, path=path) - data = data[0,:,:,:] - s = np.shape(data) - nz = s[2] - - bkgd = collect("P0", path=path) - for z in range(nz): - data[:,:,z] += bkgd - - # Create a structured grid - sgrid = create_grid(g, data, 10) - - - w = tvtk.XMLStructuredGridWriter(input=sgrid, file_name='sgrid.vts') - w.write() - - # View the structured grid - view3d(sgrid) diff --git a/tools/pylib/boutdata/mms.py b/tools/pylib/boutdata/mms.py deleted file mode 100644 index b95ff175e1..0000000000 --- a/tools/pylib/boutdata/mms.py +++ /dev/null @@ -1,591 +0,0 @@ -""" Functions for calculating sources for the - Method of Manufactured Solutions (MMS) - -""" -from __future__ import print_function -from __future__ import division - -from sympy import symbols, cos, sin, diff, sqrt, pi, simplify, trigsimp, Wild - -from numpy import arange, zeros - -# Constants -qe = 1.602e-19 -Mp = 1.67262158e-27 -mu0 = 4.e-7*3.141592653589793 - -# Define symbols - -x = symbols('x') -y = symbols('y') -z = symbols('z') -t = symbols('t') - -class Metric(object): - def __init__(self): - # Create an identity metric - self.x = symbols('x\'') - self.y = symbols('y\'') - self.z = symbols('z\'') - - self.g11 = self.g22 = self.g33 = 1.0 - self.g12 = self.g23 = self.g13 = 0.0 - - self.g_11 = self.g_22 = self.g_33 = 1.0 - self.g_12 = self.g_23 = self.g_13 = 0.0 - - self.J = 1.0 - self.B = 1.0 - -identity = Metric() - -# Basic differencing -def ddt(f): - """Time derivative""" - return diff(f, t) - - -def DDX(f, metric = identity): - return diff(f, metric.x) - -def DDY(f, metric = identity): - return diff(f, metric.y) - -def DDZ(f, metric = identity): - return diff(f, metric.z) - - -def D2DX2(f, metric = identity): - return diff(f, metric.x, 2) - -def D2DY2(f, metric = identity): - return diff(f, metric.y, 2) - -def D2DZ2(f, metric = identity): - return diff(f, metric.z, 2) - - -def D2DXDY(f, metric = identity): - message = "* WARNING: D2DXDY is currently not set in BOUT++."+\ - " Check src/sys/derivs.cxx if situation has changed. *" - print("\n"*3) - print("*"*len(message)) - print(message) - print("*"*len(message)) - print("\n"*3) - return DDX(DDY(f, metric), metric) - -def D2DXDZ(f, metric = identity): - return DDX(DDZ(f, metric), metric) - -def D2DYDZ(f, metric = identity): - return DDY(DDZ(f, metric), metric) - -# Operators - -def bracket(f, g, metric = identity): - """ - Calculates [f,g] symbolically - """ - - dfdx = diff(f, metric.x) - dfdz = diff(f, metric.z) - - dgdx = diff(g, metric.x) - dgdz = diff(g, metric.z) - - return dfdz * dgdx - dfdx * dgdz - -def b0xGrad_dot_Grad(phi, A, metric = identity): - """ - Perpendicular advection operator, including - derivatives in y - - Note: If y derivatives are neglected, then this reduces - to bracket(f, g, metric) * metric.B - (in a Clebsch coordinate system) - - """ - dpdx = DDX(phi, metric) - dpdy = DDY(phi, metric) - dpdz = DDZ(phi, metric) - - vx = metric.g_22*dpdz - metric.g_23*dpdy; - vy = metric.g_23*dpdx - metric.g_12*dpdz; - vz = metric.g_12*dpdy - metric.g_22*dpdx; - - return (+ vx*DDX(A, metric) - + vy*DDY(A, metric) - + vz*DDZ(A, metric) ) / (metric.J*sqrt(metric.g_22)) - -def Delp2(f, metric = identity, all_terms=True): - """ Laplacian in X-Z - - If all_terms is false then first derivative terms are neglected. - By default all_terms is true, but can be disabled - in the BOUT.inp file (laplace section) - - """ - d2fdx2 = diff(f, metric.x, 2) - d2fdz2 = diff(f, metric.z, 2) - d2fdxdz = diff(f, metric.x, metric.z) - - result = metric.g11*d2fdx2 + metric.g33*d2fdz2 + 2.*metric.g13*d2fdxdz - - if all_terms: - G1 = (DDX(metric.J*metric.g11, metric) + DDY(metric.J*metric.g12, metric) + DDZ(metric.J*metric.g13, metric)) / metric.J - G3 = (DDX(metric.J*metric.g13, metric) + DDY(metric.J*metric.g23, metric) + DDZ(metric.J*metric.g33, metric)) / metric.J - result += G1 * diff(f, metric.x) + G3 * diff(f, metric.z) - - return result - -def Delp4(f, metric = identity): - d4fdx4 = diff(f, metric.x, 4) - d4fdz4 = diff(f, metric.z, 4) - - return d4fdx4 + d4fdz4 - -def Grad_par(f, metric = identity): - """The parallel gradient""" - return diff(f, metric.y) / sqrt(metric.g_22) - -def Vpar_Grad_par(v, f, metric = identity): - """Parallel advection operator $$v_\parallel \cdot \nabla_\parallel (f)$$""" - return v * Grad_par(f, metric=metric) - -def Div_par(f, metric=identity): - ''' - Divergence of magnetic field aligned vector $$v = \hat{b} f - \nabla \cdot (\hat{b} f) = 1/J \partial_y (f/B) - = B Grad_par(f/B)$$ - ''' - return metric.B*Grad_par(f/metric.B, metric) - -def Laplace(f, metric=identity): - """The full Laplace operator""" - G1 = (DDX(metric.J*metric.g11, metric) + DDY(metric.J*metric.g12, metric) + DDZ(metric.J*metric.g13, metric)) / metric.J - G2 = (DDX(metric.J*metric.g12, metric) + DDY(metric.J*metric.g22, metric) + DDZ(metric.J*metric.g23, metric)) / metric.J - G3 = (DDX(metric.J*metric.g13, metric) + DDY(metric.J*metric.g23, metric) + DDZ(metric.J*metric.g33, metric)) / metric.J - - result = G1*DDX(f, metric) + G2*DDY(f, metric) + G3*DDZ(f, metric)\ - + metric.g11*D2DX2(f, metric) + metric.g22*D2DY2(f, metric) + metric.g33*D2DZ2(f, metric)\ - + 2.0*(metric.g12*D2DXDY(f, metric) + metric.g13*D2DXDZ(f, metric) + metric.g23*D2DYDZ(f, metric)) - - return result - -def Laplace_par(f, metric=identity): - """ - Div( b (b.Grad(f) ) ) = (1/J) d/dy ( J/g_22 * df/dy ) - """ - return diff( (metric.J/metric.g_22)*diff(f, metric.y), metric.y)/ metric.J - -def Laplace_perp(f, metric=identity): - """ - The perpendicular Laplace operator - - Laplace_perp = Laplace - Laplace_par - """ - return Laplace(f, metric) - Laplace_par(f, metric) - -# Convert expression to string - -def trySimplify(expr): - """ - Tries to simplify an expression - """ - try: - return simplify(expr) - except ValueError: - return expr - -def exprToStr(expr): - """ Convert a sympy expression to a string for BOUT++ input - """ - - s = str(expr).replace("**", "^") # Replace exponent operator - - # Try to remove lots of 1.0*... - s = s.replace("(1.0*", "(") - s = s.replace(" 1.0*", " ") - - return s - -def exprMag(expr): - """ - Estimate the magnitude of an expression - - """ - - # Replace all sin, cos with 1 - any = Wild('a') # Wildcard - expr = expr.replace(sin(any), 1.0) - expr = expr.replace(cos(any), 1.0) - - # Pick maximum values of x,y,z - expr = expr.subs(x, 1.0) - expr = expr.subs(y, 2.*pi) - expr = expr.subs(z, 2.*pi) - - return expr.evalf() - -################################## - -class SimpleTokamak(object): - """ - Simple tokamak - - NOTE: This is NOT an equilibrium calculation. The input - is intended solely for testing with MMS - """ - def __init__(self, R = 2, Bt = 1.0, eps = 0.1, dr=0.02, q = lambda x:2+x**2): - """ - R - Major radius [m] - - Bt - Toroidal field [T] - - eps - Inverse aspect ratio - - dr - Width of the radial region [m] - - q(x) - A function which returns the safety factor - as a function of x in range [0,1] - - - Coordinates: - x - Radial, [0,1] - y - Poloidal, [0,2pi]. Origin is at inboard midplane. - - - """ - # X has a range [0,1], and y [0,2pi] - #x, y = symbols("x y") - - self.x = x - self.y = y - - self.R = R - - self.dr = dr - - # Minor radius - self.r = R * eps - - # Get safety factor - self.q = q(x) - - # Toroidal angle of a field-line as function - # of poloidal angle y - self.zShift = self.q*(y + eps * sin(y)) - - # Field-line pitch - self.nu = self.q*(1 + eps*cos(y)) #diff(self.zShift, y) - - # Coordinates of grid points - self.Rxy = R - self.r * cos(y) - self.Zxy = self.r * sin(y) - - # Poloidal arc length - self.hthe = self.r + 0.*x - - # Toroidal magnetic field - self.Btxy = Bt * R / self.Rxy - - # Poloidal magnetic field - self.Bpxy = self.Btxy * self.hthe / (self.nu * self.Rxy) - - # Total magnetic field - self.Bxy = sqrt(self.Btxy**2 + self.Bpxy**2) - - # Approximate poloidal field for radial width calculation - Bp0 = Bt * self.r / (q(0.5) * R) - print("Bp0 = %e" % Bp0) - - # dx = Bp * R * dr -- width of the box in psi space - self.psiwidth = Bp0 * R * dr - print("psi width = %e" % self.psiwidth) - - # Integrated shear - self.sinty = diff(self.zShift, x) / self.psiwidth - - # Extra expressions to add to grid file - self._extra = {} - - def add(self, expr, name): - """ - Add an additional expression to be written to the grid files - - """ - self._extra[name] = expr - - - def write(self, nx, ny, output, MXG=2): - """ - Outputs a tokamak shape to a grid file - - nx - Number of radial grid points, not including guard cells - ny - Number of poloidal (parallel) grid points - output - boututils.datafile object, e.g., an open netCDF file - MXG, Number of guard cells in the x-direction - """ - - ngx = nx + 2*MXG - ngy = ny - - # Create an x and y grid to evaluate expressions on - xarr = (arange(nx + 2*MXG) - MXG + 0.5) / nx - yarr = 2.*pi*arange(ny)/ny - - output.write("nx", ngx) - output.write("ny", ngy) - - dx = self.psiwidth / nx + 0.*self.x - dy = 2.*pi / ny + 0.*self.x - - for name, var in [ ("dx", dx), - ("dy", dy), - ("Rxy", self.Rxy), - ("Zxy", self.Zxy), - ("Btxy", self.Btxy), - ("Bpxy", self.Bpxy), - ("Bxy", self.Bxy), - ("hthe", self.hthe), - ("sinty", self.sinty), - ("zShift", self.zShift)]: - - # Note: This is slow, and could be improved using something like lambdify - values = zeros([ngx, ngy]) - for i, x in enumerate(xarr): - for j, y in enumerate(yarr): - values[i,j] = var.evalf(subs={self.x:x, self.y:y}) - - output.write(name, values) - - for name, var in list(self._extra.items()): - values = zeros([ngx, ngy]) - for i, x in enumerate(xarr): - for j, y in enumerate(yarr): - values[i,j] = var.evalf(subs={self.x:x, self.y:y}) - - output.write(name, values) - - shiftAngle = zeros(ngx) - for i, x in enumerate(xarr): - shiftAngle[i] = 2.*pi*self.q.evalf(subs={self.x:x}) - - output.write("ShiftAngle", shiftAngle) - - def metric(self): - """ - Returns an analytic metric tensor - """ - m = Metric() - - # Set symbols for x and y directions - m.x = self.x - m.y = self.y - - # Calculate metric tensor - - m.g11 = (self.Rxy * self.Bpxy)**2 - m.g22 = 1./self.hthe**2 - m.g33 = self.sinty**2*m.g11 + self.Bxy**2/m.g11 - m.g12 = 0.0*x - m.g13 = -self.sinty*m.g11 - m.g23 = -self.Btxy / (self.hthe * self.Bpxy * self.R) - - m.g_11 = 1./m.g11 + (self.sinty*self.Rxy)**2 - m.g_22 = (self.Bxy * self.hthe / self.Bpxy)**2 - m.g_33 = self.Rxy**2 - m.g_12 = self.Btxy*self.hthe*self.sinty*self.Rxy / self.Bpxy - m.g_13 = self.sinty*self.Rxy**2 - m.g_23 = self.Btxy*self.hthe*self.Rxy / self.Bpxy - - m.J = self.hthe / self.Bpxy - m.B = self.Bxy - - # Convert all "x" symbols from [0,1] into flux - m.Lx = self.psiwidth - xsub = m.x / self.psiwidth - - m.g11 = m.g11.subs(x, xsub) - m.g22 = m.g22.subs(x, xsub) - m.g33 = m.g33.subs(x, xsub) - m.g12 = m.g12.subs(x, xsub) - m.g13 = m.g13.subs(x, xsub) - m.g23 = m.g23.subs(x, xsub) - - m.g_11 = m.g_11.subs(x, xsub) - m.g_22 = m.g_22.subs(x, xsub) - m.g_33 = m.g_33.subs(x, xsub) - m.g_12 = m.g_12.subs(x, xsub) - m.g_13 = m.g_13.subs(x, xsub) - m.g_23 = m.g_23.subs(x, xsub) - - m.J = m.J.subs(x, xsub) - m.B = m.B.subs(x, xsub) - - return m - -########################## -# Shaped tokamak - -class ShapedTokamak(object): - def __init__(self, Rmaj=6.0, rmin=2.0, dr=0.1, kappa=1.0, delta=0.0, b=0.0, ss=0.0, Bt0=1.0, Bp0 = 0.2): - """ - Rmaj - Major radius [m] - rmin - Minor radius [m] - dr - Radial width of region [m] - - kappa - Ellipticity, 1 for a circle - delta - Triangularity, 0 for circle - b - Indentation ("bean" shape), 0 for circle - - ss - Shafranov shift [m] - - Bt0 - Toroidal magnetic field on axis [T]. Varies as 1/R - Bp0 - Poloidal field at outboard midplane [T] - - Outputs - ------- - - Assigns member variables - - x, y - Symbols for x and y coordinates - - R (x,y) - Z (x,y) - - """ - - # X has a range [0,1], and y [0,2pi] - x, y = symbols("x y") - - # Minor radius as function of x - rminx = rmin + (x-0.5)*dr - - # Analytical expression for R and Z coordinates as function of x and y - Rxy = Rmaj - b + (rminx + b*cos(y))*cos(y + delta*sin(y)) + ss*(0.5-x)*(dr/rmin) - Zxy = kappa * rminx * sin(y) - - # Toroidal magnetic field - Btxy = Bt0 * Rmaj / Rxy - - # Poloidal field. dx constant, so set poloidal field - # at outboard midplane (y = 0) - # NOTE: Approximate calculation - - # Distance between flux surface relative to outboard midplane. - expansion = (1 - (old_div(ss,rmin))*cos(y))/(1 - (ss/rmin)) - - Bpxy = Bp0 * ((Rmaj + rmin) / Rxy) / expansion - - # Calculate hthe - hthe = sqrt(diff(Rxy, y)**2 + diff(Zxy, y)**2) - try: - hthe = trigsimp(hthe) - except ValueError: - pass - - # Field-line pitch - nu = Btxy * hthe / (Bpxy * Rxy) - - # Shift angle - # NOTE: Since x has a range [0,1] this could be done better - # than ignoring convergence conditions - self.zShift = integrate(nu, y, conds='none') - - # Safety factor - self.shiftAngle = self.zShift.subs(y, 2*pi) - self.zShift.subs(y, 0) - - # Integrated shear - self.I = diff(self.zShift, x) - - self.x = x - self.y = y - - self.R = Rxy - self.Z = Zxy - - self.Bt = Btxy - self.Bp = Bpxy - self.B = sqrt(Btxy**2 + Bpxy**2) - - self.hthe = hthe - - def write(self, nx, ny, filename, MXG=2): - """ - Outputs a tokamak shape to a grid file - - nx - Number of radial grid points, not including guard cells - ny - Number of poloidal (parallel) grid points - output - boututils.datafile object, e.g., an open netCDF file - MXG, Number of guard cells in the x-direction - """ - - ngx = nx + 2*MXG - ngy = ny - - # Create an x and y grid to evaluate expressions on - xarr = (arange(nx + 2*MXG) - MXG + 0.5) / nx - yarr = 2.*pi*arange(ny)/ny - - Rxy = zeros([ngx, ngy]) - Zxy = zeros([ngx, ngy]) - - Btxy = zeros([ngx, ngy]) - Bpxy = zeros([ngx, ngy]) - - hthe = zeros([ngx, ngy]) - - - I = zeros([ngx, ngy]) - - # Note: This is slow, and could be improved using something like lambdify - for i, x in enumerate(xarr): - for j, y in enumerate(yarr): - Rxy[i,j] = self.R.evalf(subs={self.x:x, self.y:y}) - Zxy[i,j] = self.Z.evalf(subs={self.x:x, self.y:y}) - - Btxy[i,j] = self.Bt.evalf(subs={self.x:x, self.y:y}) - Bpxy[i,j] = self.Bp.evalf(subs={self.x:x, self.y:y}) - - hthe[i,j] = self.hthe.evalf(subs={self.x:x, self.y:y}) - - - plt.plot(Rxy[i,:], Zxy[i,:]) - plt.show() - - Bxy = sqrt(Btxy**2 + Bpxy**2) - - def metric(self): - """ - Returns an analytic metric tensor - """ - m = Metric() - - # Set symbols for x and y directions - m.x = self.x - m.y = self.y - - # Calculate metric tensor - - m.g11 = (self.R * self.Bp)**2 - m.g22 = 1./self.hthe**2 - m.g33 = self.I**2*m.g11 + self.B**2 / m.g11 - m.g12 = 0.0 - m.g13 = -self.I*m.g11 - m.g23 = -self.Bt / (self.hthe * self.Bp * self.R) - - m.g_11 = 1./m.g11 + (self.I*self.R)**2 - m.g_22 = (self.B * self.hthe / self.Bpxy)**2 - m.g_33 = self.R**2 - m.g_12 = self.Bt*self.hthe*self.I*self.R / self.Bp - m.g_13 = self.I*self.R**2 - m.g_23 = self.Bt*self.hthe*self.R / self.Bp - - m.J = self.hthe / self.Bp - m.B = self.B - - return m - - diff --git a/tools/pylib/boutdata/pol_slice.py b/tools/pylib/boutdata/pol_slice.py deleted file mode 100644 index 7adea90c50..0000000000 --- a/tools/pylib/boutdata/pol_slice.py +++ /dev/null @@ -1,110 +0,0 @@ -from __future__ import print_function -from __future__ import division - -from boututils.datafile import DataFile -import numpy as np -from scipy.ndimage import map_coordinates - - -def pol_slice(var3d, gridfile, n=1, zangle=0.0, nyInterp=None): - """Takes a 3D variable, and returns a 2D slice at fixed toroidal angle - - Parameters - ---------- - var3d : array_like - The input array. Should be 3D - gridfile : str - The gridfile containing the coordinate system to used - n : int, optional - The number of times the data must be repeated for a full torus, - e.g. n=2 is half a torus - zangle : float, optional - The (real) toroidal angle of the result - nyInterp : int, optional - The number of y (theta) points to use in the final result. - - Returns - ------- - array - A 2D-slice of var3d interpolated at a fixed toroidal angle - """ - n = int(n) - zangle = float(zangle) - - s = np.shape(var3d) - if len(s) != 3: - raise ValueError("pol_slice expects a 3D variable (got {} dimensions)" - .format(len(s))) - - nx, ny, nz = s - - # Open the grid file - with DataFile(gridfile) as gf: - # Check the grid size is correct - grid_nx = gf.read("nx") - if grid_nx != nx: - raise ValueError("Grid X size ({}) is different to the variable ({})" - .format(grid_nx, nx)) - grid_ny = gf.read("ny") - if grid_ny != ny: - raise ValueError("Grid Y size ({}) is different to the variable ({})" - .format(grid_ny, ny)) - - # Get the toroidal shift - zShift = gf.read("qinty") - - if zShift is not None: - print("Using qinty as toroidal shift angle") - else: - zShift = gf.read("zShift") - if zShift is not None: - print("Using zShift as toroidal shift angle") - else: - raise ValueError("Neither qinty nor zShift found") - - # Decide if we've asked to do interpolation - if nyInterp is not None and nyInterp != ny: - varTmp = var3d - - # Interpolate to output positions and make the correct shape - # np.mgrid gives us an array of indices - # 0:ny-1:nyInterp*1j means use nyInterp points between 0 and ny-1 inclusive - var3d = map_coordinates(varTmp, np.mgrid[0:nx, 0:ny-1:nyInterp*1j, 0:nz], - cval=-999) - zShift = map_coordinates(zShift, np.mgrid[0:nx, 0:ny-1:nyInterp*1j], - cval=-999) - - # Update shape - ny = nyInterp - - var2d = np.zeros([nx, ny]) - - ###################################### - # Perform 2D slice - dz = 2.*np.pi / float(n * nz) - zind = (zangle - zShift) / dz - z0f = np.floor(zind) - z0 = z0f.astype(int) - p = zind - z0f - - # Make z0 between 0 and (nz-2) - z0 = ((z0 % (nz-1)) + (nz-1)) % (nz-1) - - # Get z+ and z- - zp = (z0 + 1) % (nz-1) - zm = (z0 - 1 + (nz-1)) % (nz-1) - - # For some reason numpy imposes a limit of 32 entries to choose - # so if nz>32 we have to use a different approach. This limit may change with numpy version - if nz >= 32: - for x in np.arange(nx): - for y in np.arange(ny): - var2d[x, y] = (0.5*p[x, y]*(p[x, y]-1.0) * var3d[x, y, zm[x, y]] + - (1.0 - p[x, y]*p[x, y]) * var3d[x, y, z0[x, y]] + - 0.5*p[x, y]*(p[x, y]+1.0) * var3d[x, y, zp[x, y]]) - else: - var2d = (0.5*p*(p-1.0) * np.choose(zm.T, var3d.T).T + - (1.0 - p*p) * np.choose(z0.T, var3d.T).T + - 0.5*p*(p+1.0) * np.choose(zp.T, var3d.T).T) - - return var2d diff --git a/tools/pylib/boutdata/processor_rearrange.py b/tools/pylib/boutdata/processor_rearrange.py deleted file mode 100644 index fb91af3763..0000000000 --- a/tools/pylib/boutdata/processor_rearrange.py +++ /dev/null @@ -1,161 +0,0 @@ -"""Routines for redistributing files over different numbers of -processors - -""" - -from math import sqrt -from collections import namedtuple - -processor_layout_ = namedtuple("BOUT_processor_layout", - ["nxpe", "nype", "npes", "mxsub", "mysub", - "nx", "ny", "mz", "mxg", "myg"]) - - -# Subclass the namedtuple above so we can add a docstring -class processor_layout(processor_layout_): - """A namedtuple describing the processor layout, including grid sizes - and guard cells - - Parameters - ---------- - - nxpe, nype : int - The number of processors in x and y - npes : int - The total number of procesors - mxsub, mysub : int - The size of the grid in x and y on a single processor - nx, ny, mz : int - The total size of the grid in x, y and z - mxg : int - The number of guard cells in x and y - - """ - pass - - -def get_processor_layout(boutfile, has_t_dimension=True, mxg=2, myg=2): - """Given a BOUT.restart.* or BOUT.dmp.* file (as a DataFile object), - return the processor layout for its data - - Parameters - ---------- - boutfile : DataFile - Restart or dump file to read - has_t_dimension : bool, optional - Does this file have a time dimension? - mxg, myg : int, optional - Number of x, y guard cells - - Returns - ------- - processor_layout - A description of the processor layout and grid sizes - - """ - - nxpe = boutfile.read('NXPE') - nype = boutfile.read("NYPE") - npes = nxpe * nype - - # Get list of variables - var_list = boutfile.list() - if len(var_list) == 0: - raise ValueError("ERROR: No data found") - - mxsub = 0 - mysub = 0 - mz = 0 - - if has_t_dimension: - maxdims = 4 - else: - maxdims = 3 - for v in var_list: - if boutfile.ndims(v) == maxdims: - s = boutfile.size(v) - mxsub = s[maxdims - 3] - 2 * mxg - if mxsub < 0: - if s[maxdims - 3] == 1: - mxsub = 1 - mxg = 0 - elif s[maxdims - 3] == 3: - mxsub = 1 - mxg = 1 - else: - print("Number of x points is wrong?") - return False - - mysub = s[maxdims - 2] - 2 * myg - if mysub < 0: - if s[maxdims - 2] == 1: - mysub = 1 - myg = 0 - elif s[maxdims - 2] == 3: - mysub = 1 - myg = 1 - else: - print("Number of y points is wrong?") - return False - - mz = s[maxdims - 1] - break - - # Calculate total size of the grid - nx = mxsub * nxpe - ny = mysub * nype - - result = processor_layout(nxpe=nxpe, nype=nype, npes=npes, mxsub=mxsub, mysub=mysub, nx=nx, ny=ny, mz=mz, mxg=mxg, myg=myg) - - return result - - -def create_processor_layout(old_processor_layout, npes, nxpe=None): - """Convert one processor layout into another one with a different - total number of processors - - If nxpe is None, use algorithm from BoutMesh to select optimal nxpe. - Otherwise, check nxpe is valid (divides npes) - - Parameters - ---------- - old_processor_layout : processor_layout - The processor layout to convert - npes : int - The new total number of procesors - nxpe : int, optional - The number of procesors in x to use - - Returns - ------- - processor_layout - A description of the processor layout and grid sizes - - """ - - if nxpe is None: # Copy algorithm from BoutMesh for selecting nxpe - ideal = sqrt(float(old_processor_layout.nx) * float(npes) / float(old_processor_layout.ny)) - # Results in square domain - - for i in range(1, npes + 1): - if npes % i == 0 and old_processor_layout.nx % i == 0 and int(old_processor_layout.nx / i) >= old_processor_layout.mxg and old_processor_layout.ny % (npes / i) == 0: - # Found an acceptable value - # Warning: does not check branch cuts! - - if nxpe is None or abs(ideal - i) < abs(ideal - nxpe): - nxpe = i # Keep value nearest to the ideal - - if nxpe is None: - raise ValueError("ERROR: could not find a valid value for nxpe") - elif npes % nxpe != 0: - raise ValueError( - "ERROR: requested nxpe is invalid, it does not divide npes") - - nype = int(npes / nxpe) - - mxsub = int(old_processor_layout.nx / nxpe) - mysub = int(old_processor_layout.ny / nype) - - result = processor_layout(nxpe=nxpe, nype=nype, npes=npes, mxsub=mxsub, mysub=mysub, nx=old_processor_layout.nx, ny=old_processor_layout.ny, mz=old_processor_layout.mz, mxg=old_processor_layout.mxg, myg=old_processor_layout.myg) - - return result diff --git a/tools/pylib/boutdata/restart.py b/tools/pylib/boutdata/restart.py deleted file mode 100644 index 092c7a98cf..0000000000 --- a/tools/pylib/boutdata/restart.py +++ /dev/null @@ -1,829 +0,0 @@ -"""Routines for manipulating restart files - -TODO ----- - -- Don't import ``numpy.random.normal`` directly, just the ``random`` - submodule, or sphinx includes the documentation for ``normal`` - -""" - -from __future__ import print_function -from __future__ import division -from builtins import str, range - -import os -import glob - -from boutdata.collect import collect, create_cache -from boututils.datafile import DataFile -from boututils.boutarray import BoutArray -from boutdata.processor_rearrange import get_processor_layout, create_processor_layout - -import multiprocessing -import numpy as np -from numpy import mean, zeros, arange -from numpy.random import normal - -from scipy.interpolate import interp1d -try: - from scipy.interpolate import RegularGridInterpolator -except ImportError: - pass - -def resize3DField(var, data, coordsAndSizesTuple, method, mute): - """Resize 3D fields - - To be called by resize. - - Written as a function in order to call it using multiprocess. Must - be defined as a top level function in order to be pickable by the - multiprocess. - - See the function resize for details - - """ - - # Unpack the tuple for better readability - xCoordOld, yCoordOld, zCoordOld,\ - xCoordNew, yCoordNew, zCoordNew,\ - newNx, newNy, newNz = coordsAndSizesTuple - - if not(mute): - print(" Resizing "+var + - ' to (nx,ny,nz) = ({},{},{})'.format(newNx, newNy, newNz)) - - # Make the regular grid function (see examples in - # https://docs.scipy.org/doc/scipy/reference/generated/scipy.interpolate.RegularGridInterpolator.html - # for details) - gridInterpolator = RegularGridInterpolator( - (xCoordOld, yCoordOld, zCoordOld), data, method) - - # Need to fill with one exrta z plane (will only contain zeros) - newData = np.zeros((newNx, newNy, newNz)) - - # Interpolate to the new values - for xInd, x in enumerate(xCoordNew): - for yInd, y in enumerate(yCoordNew): - for zInd, z in enumerate(zCoordNew): - newData[xInd, yInd, zInd] = gridInterpolator([x, y, z]) - - return var, newData - - -def resize(newNx, newNy, newNz, mxg=2, myg=2, - path="data", output="./", informat="nc", outformat=None, - method='linear', maxProc=None, mute=False): - """Increase/decrease the number of points in restart files. - - NOTE: Can't overwrite - WARNING: Currently only implemented with uniform BOUT++ grid - - Parameters - ---------- - newNx, newNy, newNz : int - nx, ny, nz for the new file (including ghost points) - mxg, myg : int, optional - Number of ghost points in x, y (default: 2) - path : str, optional - Input path to data files - output : str, optional - Path to write new files - informat : str, optional - File extension of input - outformat : {None, str}, optional - File extension of output (default: use the same as `informat`) - method : {'linear', 'nearest'}, optional - What interpolation method to be used - maxProc : {None, int}, optional - Limits maximum processors to use when interpolating if set - mute : bool, optional - Whether or not output should be printed from this function - - Returns - ------- - return : bool - True on success, else False - - TODO - ---- - - Add 2D field interpolation - - Replace printing errors with raising `ValueError` - - Make informat work like `redistribute` - - """ - - if method is None: - # Make sure the method is set - method = 'linear' - - if outformat is None: - outformat = informat - - if path == output: - print("ERROR: Can't overwrite restart files when expanding") - return False - - def is_pow2(x): - """Returns true if x is a power of 2""" - return (x > 0) and ((x & (x-1)) == 0) - - if not is_pow2(newNz): - print("ERROR: New Z size {} must be a power of 2".format(newNz)) - return False - - file_list = glob.glob(os.path.join(path, "BOUT.restart.*."+informat)) - file_list.sort() - nfiles = len(file_list) - - if nfiles == 0: - print("ERROR: No data found in {}".format(path)) - return False - - if not(mute): - print("Number of files found: " + str(nfiles)) - - for f in file_list: - new_f = os.path.join(output, f.split('/')[-1]) - if not(mute): - print("Changing {} => {}".format(f, new_f)) - - # Open the restart file in read mode and create the new file - with DataFile(f) as old, DataFile(new_f, write=True, create=True) as new: - - # Find the dimension - for var in old.list(): - # Read the data - data = old.read(var) - # Find 3D variables - if old.ndims(var) == 3: - break - - nx, ny, nz = data.shape - # Make coordinates - # NOTE: The max min of the coordinates are irrelevant when - # interpolating (as long as old and new coordinates - # are consistent), so we just choose all variable to - # be between 0 and 1 Calculate the old coordinates - xCoordOld = np.linspace(0, 1, nx) - yCoordOld = np.linspace(0, 1, ny) - zCoordOld = np.linspace(0, 1, nz) - - # Calculate the new coordinates - xCoordNew = np.linspace(xCoordOld[0], xCoordOld[-1], newNx) - yCoordNew = np.linspace(yCoordOld[0], yCoordOld[-1], newNy) - zCoordNew = np.linspace(zCoordOld[0], zCoordOld[-1], newNz) - - # Make a pool of workers - pool = multiprocessing.Pool(maxProc) - # List of jobs and results - jobs = [] - # Pack input to resize3DField together - coordsAndSizesTuple = (xCoordOld, yCoordOld, zCoordOld, - xCoordNew, yCoordNew, zCoordNew, - newNx, newNy, newNz) - - # Loop over the variables in the old file - for var in old.list(): - # Read the data - data = old.read(var) - attributes = old.attributes(var) - - # Find 3D variables - if old.ndims(var) == 3: - - # Asynchronous call (locks first at .get()) - jobs.append(pool.apply_async(resize3DField, - args=(var, data, coordsAndSizesTuple, method, mute, ))) - - else: - if not(mute): - print(" Copying "+var) - newData = data.copy() - if not(mute): - print("Writing "+var) - new.write(var, newData) - - for job in jobs: - var, newData = job.get() - newData = BoutArray(newData, attributes=attributes) - if not(mute): - print("Writing "+var) - new.write(var, newData) - - # Close the pool of workers - pool.close() - # Wait for all processes to finish - pool.join() - - return True - - -def resizeZ(newNz, path="data", output="./", informat="nc", outformat=None): - """Increase the number of Z points in restart files - - NOTE: - * Can't overwrite - * Will not yield a result close to the original if there are - asymmetries in the z-direction - - Parameters - ---------- - newNz : int - nz for the new file - path : str, optional - Path to original restart files (default: "data") - output : str, optional - Path to write new restart files (default: current directory) - informat : str, optional - File extension of original files (default: "nc") - outformat : str, optional - File extension of new files (default: use the same as `informat`) - - Returns - ------- - True on success, else False - - TODO - ---- - - Replace printing errors with raising `ValueError` - - Make informat work like `redistribute` - - """ - - if outformat is None: - outformat = informat - - if path == output: - print("ERROR: Can't overwrite restart files when expanding") - return False - - def is_pow2(x): - """Returns true if x is a power of 2""" - return (x > 0) and ((x & (x-1)) == 0) - - if not is_pow2(newNz): - print("ERROR: New Z size must be a power of 2") - return False - - file_list = glob.glob(os.path.join(path, "BOUT.restart.*."+informat)) - file_list.sort() - nfiles = len(file_list) - - if nfiles == 0: - print("ERROR: No data found") - return False - - print("Number of files found: " + str(nfiles)) - - for f in file_list: - new_f = os.path.join(output, f.split('/')[-1]) - print("Changing {} => {}".format(f, new_f)) - - # Open the restart file in read mode and create the new file - with DataFile(f) as old,\ - DataFile(new_f, write=True, create=True) as new: - # Loop over the variables in the old file - for var in old.list(): - # Read the data - data = old.read(var) - attributes = old.attributes(var) - - # Find 3D variables - if old.ndims(var) == 3: - print(" Resizing "+var) - - nx, ny, nz = data.shape - - newdata = np.zeros((nx, ny, newNz)) - for x in range(nx): - for y in range(ny): - f_old = np.fft.fft(data[x, y, :]) - - # Number of points in f is power of 2 - f_new = np.zeros(newNz) - - # Copy coefficients across (ignoring Nyquist) - f_new[0] = f_old[0] # DC - for m in range(1, int(nz/2)): - # + ve frequencies - f_new[m] = f_old[m] - # - ve frequencies - f_new[newNz-m] = f_old[nz-m] - - # Invert fft - newdata[x, y, :] = np.fft.ifft(f_new).real - newdata[x, y, :] = newdata[x, y, 0] - - # Multiply with the ratio of newNz/nz - # This is not needed in the IDL routine as the - # forward transfrom has the scaling factor 1/N in - # the forward transform, whereas the scaling factor - # 1/N is the inverse transform in np.fft - # Note that ifft(fft(a)) = a for the same number of - # points in both IDL and np.ftt - newdata *= (newNz/nz) - else: - print(" Copying "+var) - newdata = data.copy() - - newdata = BoutArray(newdata, attributes=attributes) - - new.write(var, newdata) - - return True - - -def addnoise(path=".", var=None, scale=1e-5): - """Add random noise to restart files - - .. warning:: Modifies restart files in place! This is in contrast - to most of the functions in this module! - - Parameters - ---------- - path : str, optional - Path to restart files (default: current directory) - var : str, optional - The variable to modify. By default all 3D variables are modified - scale : float - Amplitude of the noise. Gaussian noise is used, with zero mean - and this parameter as the standard deviation - - """ - file_list = glob.glob(os.path.join(path, "BOUT.restart.*")) - nfiles = len(file_list) - - print("Number of restart files: %d" % (nfiles,)) - - for file in file_list: - print(file) - with DataFile(file, write=True) as d: - if var is None: - for v in d.list(): - if d.ndims(v) == 3: - print(" -> "+v) - data = d.read(v, asBoutArray=True) - data += normal(scale=scale, size=data.shape) - d.write(v, data) - else: - # Modify a single variable - print(" -> "+var) - data = d.read(var) - data += normal(scale=scale, size=data.shape) - d.write(var, data) - - -def scalevar(var, factor, path="."): - """Scales a variable by a given factor, modifying restart files in - place - - .. warning:: Modifies restart files in place! This is in contrast - to most of the functions in this module! - - Parameters - ---------- - var : str - Name of the variable - factor : float - Factor to multiply - path : str, optional - Path to the restart files (default: current directory) - - """ - - file_list = glob.glob(os.path.join(path, "BOUT.restart.*")) - nfiles = len(file_list) - - print("Number of restart files: %d" % (nfiles,)) - for file in file_list: - print(file) - with DataFile(file, write=True) as d: - d[var] = d[var] * factor - - -def create(averagelast=1, final=-1, path="data", output="./", informat="nc", outformat=None): - """Create restart files from data (dmp) files. - - Parameters - ---------- - averagelast : int, optional - Number of time points (counting from `final`, inclusive) to - average over (default is 1 i.e. just take last time-point) - final : int, optional - The last time point to use (default is last, -1) - path : str, optional - Path to original restart files (default: "data") - output : str, optional - Path to write new restart files (default: current directory) - informat : str, optional - File extension of original files (default: "nc") - outformat : str, optional - File extension of new files (default: use the same as `informat`) - - """ - - if outformat is None: - outformat = informat - - file_list = glob.glob(os.path.join(path, "BOUT.dmp.*."+informat)) - nfiles = len(file_list) - - print(("Number of data files: ", nfiles)) - - for i in range(nfiles): - # Open each data file - infname = os.path.join(path, "BOUT.dmp."+str(i)+"."+informat) - outfname = os.path.join(output, "BOUT.restart."+str(i)+"."+outformat) - - print((infname, " -> ", outfname)) - - infile = DataFile(infname) - outfile = DataFile(outfname, create=True) - - # Get the data always needed in restart files - hist_hi = infile.read("iteration") - print(("hist_hi = ", hist_hi)) - outfile.write("hist_hi", hist_hi) - - t_array = infile.read("t_array") - tt = t_array[final] - print(("tt = ", tt)) - outfile.write("tt", tt) - - tind = final - if tind < 0.0: - tind = len(t_array) + final - - NXPE = infile.read("NXPE") - NYPE = infile.read("NYPE") - print(("NXPE = ", NXPE, " NYPE = ", NYPE)) - outfile.write("NXPE", NXPE) - outfile.write("NYPE", NYPE) - - # Get a list of variables - varnames = infile.list() - - for var in varnames: - if infile.ndims(var) == 4: - # Could be an evolving variable - - print((" -> ", var)) - - data = infile.read(var) - - if averagelast == 1: - slice = data[final, :, :, :] - else: - slice = mean(data[(final - averagelast) - :final, :, :, :], axis=0) - - print(slice.shape) - - outfile.write(var, slice) - - infile.close() - outfile.close() - - -def redistribute(npes, path="data", nxpe=None, output=".", informat=None, outformat=None, mxg=2, myg=2): - """Resize restart files across NPES processors. - - Does not check if new processor arrangement is compatible with the - branch cuts. In this respect :py:func:`restart.split` is - safer. However, BOUT++ checks the topology during initialisation - anyway so this is not too serious. - - Parameters - ---------- - npes : int - Number of processors for the new restart files - path : str, optional - Path to original restart files (default: "data") - nxpe : int, optional - Number of processors to use in the x-direction (determines - split: npes = nxpe * nype). Default is None which uses the - same algorithm as BoutMesh (but without topology information) - to determine a suitable value for nxpe. - output : str, optional - Location to save new restart files (default: current directory) - informat : str, optional - Specify file format of old restart files (must be a suffix - understood by DataFile, e.g. 'nc'). Default uses the format of - the first 'BOUT.restart.*' file listed by glob.glob. - outformat : str, optional - Specify file format of new restart files (must be a suffix - understood by DataFile, e.g. 'nc'). Default is to use the same - as informat. - - Returns - ------- - True on success - - TODO - ---- - - Replace printing errors with raising `ValueError` - - """ - - if npes <= 0: - print("ERROR: Negative or zero number of processors") - return False - - if path == output: - print("ERROR: Can't overwrite restart files") - return False - - if informat is None: - file_list = glob.glob(os.path.join(path, "BOUT.restart.*")) - else: - file_list = glob.glob(os.path.join(path, "BOUT.restart.*."+informat)) - - nfiles = len(file_list) - - # Read old processor layout - f = DataFile(file_list[0]) - - # Get list of variables - var_list = f.list() - if len(var_list) == 0: - print("ERROR: No data found") - return False - - old_processor_layout = get_processor_layout(f, has_t_dimension=False) - print("Grid sizes: ", old_processor_layout.nx, - old_processor_layout.ny, old_processor_layout.mz) - - if nfiles != old_processor_layout.npes: - print("WARNING: Number of restart files inconsistent with NPES") - print("Setting nfiles = " + str(old_processor_layout.npes)) - nfiles = old_processor_layout.npes - - if nfiles == 0: - print("ERROR: No restart files found") - return False - - informat = file_list[0].split(".")[-1] - if outformat is None: - outformat = informat - - try: - new_processor_layout = create_processor_layout( - old_processor_layout, npes, nxpe=nxpe) - except ValueError as e: - print("Could not find valid processor split. " + e.what()) - - nx = old_processor_layout.nx - ny = old_processor_layout.ny - mz = old_processor_layout.mz - mxg = old_processor_layout.mxg - myg = old_processor_layout.myg - old_npes = old_processor_layout.npes - old_nxpe = old_processor_layout.nxpe - old_nype = old_processor_layout.nype - old_mxsub = old_processor_layout.mxsub - old_mysub = old_processor_layout.mysub - - nxpe = new_processor_layout.nxpe - nype = new_processor_layout.nype - mxsub = new_processor_layout.mxsub - mysub = new_processor_layout.mysub - mzsub = new_processor_layout.mz - - outfile_list = [] - for i in range(npes): - outpath = os.path.join(output, "BOUT.restart."+str(i)+"."+outformat) - outfile_list.append(DataFile(outpath, write=True, create=True)) - - DataFileCache = create_cache(path, "BOUT.restart") - - for v in var_list: - dimensions = f.dimensions(v) - ndims = len(dimensions) - - # collect data - data = collect(v, xguards=True, yguards=True, info=False, - datafile_cache=DataFileCache) - - # write data - for i in range(npes): - ix = i % nxpe - iy = int(i/nxpe) - outfile = outfile_list[i] - if v == "NPES": - outfile.write(v, npes) - elif v == "NXPE": - outfile.write(v, nxpe) - elif v == "NYPE": - outfile.write(v, nype) - elif v == "MXSUB": - outfile.write(v, mxsub) - elif v == "MYSUB": - outfile.write(v, mysub) - elif v == "MZSUB": - outfile.write(v, mzsub) - elif dimensions == (): - # scalar - outfile.write(v, data) - elif dimensions == ('x', 'y'): - # Field2D - outfile.write( - v, data[ix*mxsub:(ix+1)*mxsub+2*mxg, iy*mysub:(iy+1)*mysub+2*myg]) - elif dimensions == ('x', 'z'): - # FieldPerp - yindex_global = data.attributes['yindex_global'] - if yindex_global + myg >= iy*mysub and yindex_global + myg < (iy+1)*mysub+2*myg: - outfile.write(v, data[ix*mxsub:(ix+1)*mxsub+2*mxg, :]) - else: - nullarray = BoutArray(np.zeros([mxsub+2*mxg, mysub+2*myg]), attributes={"bout_type":"FieldPerp", "yindex_global":-myg-1}) - outfile.write(v, nullarray) - elif dimensions == ('x', 'y', 'z'): - # Field3D - outfile.write( - v, data[ix*mxsub:(ix+1)*mxsub+2*mxg, iy*mysub:(iy+1)*mysub+2*myg, :]) - else: - print( - "ERROR: variable found with unexpected dimensions,", dimensions, v) - - f.close() - for outfile in outfile_list: - outfile.close() - - return True - - -def resizeY(newy, path="data", output=".", informat="nc", outformat=None, myg=2): - """Increase the number of Y points in restart files - - NOTE: - * Can't overwrite - - Parameters - ---------- - newy : int - ny for the new file - path : str, optional - Path to original restart files (default: "data") - output : str, optional - Path to write new restart files (default: current directory) - informat : str, optional - File extension of original files (default: "nc") - outformat : str, optional - File extension of new files (default: use the same as `informat`) - myg : int, optional - Number of ghost points in y (default: 2) - - Returns - ------- - True on success, else False - - TODO - ---- - - Replace printing errors with raising `ValueError` - - Make informat work like `redistribute` - - """ - - if outformat is None: - outformat = informat - - file_list = glob.glob(os.path.join(path, "BOUT.restart.*."+informat)) - - nfiles = len(file_list) - - if nfiles == 0: - print("ERROR: No restart files found") - return False - - for i in range(nfiles): - # Open each data file - infname = os.path.join(path, "BOUT.restart."+str(i)+"."+informat) - outfname = os.path.join(output, "BOUT.restart."+str(i)+"."+outformat) - - print("Processing %s -> %s" % (infname, outfname)) - - infile = DataFile(infname) - outfile = DataFile(outfname, create=True) - - # Copy basic information - for var in ["hist_hi", "NXPE", "NYPE", "tt"]: - data = infile.read(var) - try: - # Convert to scalar if necessary - data = data[0] - except: - pass - outfile.write(var, data) - - # Get a list of variables - varnames = infile.list() - - for var in varnames: - dimensions = infile.dimensions(var) - if dimensions == ('x', 'y', 'z'): - # Could be an evolving variable [x,y,z] - - print(" -> Resizing " + var) - - # Read variable from input - indata = infile.read(var) - - nx, ny, nz = indata.shape - - # y coordinate in input and output data - iny = (arange(ny) - myg + 0.5) / (ny - 2*myg) - outy = (arange(newy) - myg + 0.5) / (newy - 2*myg) - - outdata = zeros([nx, newy, nz]) - - for x in range(nx): - for z in range(nz): - f = interp1d( - iny, indata[x, :, z], bounds_error=False, fill_value=0.0) - outdata[x, :, z] = f(outy) - - outfile.write(var, outdata) - elif dimensions == ('x', 'y'): - # Assume evolving variable [x,y] - print(" -> Resizing " + var) - - # Read variable from input - indata = infile.read(var) - - nx, ny = indata.shape - - # y coordinate in input and output data - iny = (arange(ny) - myg + 0.5) / (ny - 2*myg) - outy = (arange(newy) - myg + 0.5) / (newy - 2*myg) - - outdata = zeros([nx, newy]) - - for x in range(nx): - f = interp1d(iny, indata[x, :], - bounds_error=False, fill_value=0.0) - outdata[x, :] = f(outy) - - outfile.write(var, outdata) - else: - # Copy variable - print(" -> Copying " + var) - - # Read variable from input - data = infile.read(var) - try: - # Convert to scalar if necessary - data = data[0] - except: - pass - outfile.write(var, data) - - infile.close() - outfile.close() - - -def addvar(var, value, path="."): - """Adds a variable with constant value to all restart files. - - .. warning:: Modifies restart files in place! This is in contrast - to most of the functions in this module! - - This is useful for restarting simulations whilst turning on new - equations. By default BOUT++ throws an error if an evolving - variable is not in the restart file. By setting an option the - variable can be set to zero. This allows it to start with a - non-zero value. - - Parameters - ---------- - var : str - The name of the variable to add - value : float - Constant value for the variable - path : str, optional - Input path to data files (default: current directory) - - """ - - file_list = glob.glob(os.path.join(path, "BOUT.restart.*")) - nfiles = len(file_list) - - print("Number of restart files: %d" % (nfiles,)) - # Loop through all the restart files - for filename in file_list: - print(filename) - # Open the restart file for writing (modification) - with DataFile(filename, write=True) as df: - size = None - # Find a 3D variable and get its size - for varname in df.list(): - size = df.size(varname) - if len(size) == 3: - break - if size is None: - raise Exception("no 3D variables found") - - # Create a new 3D array with input value - data = np.zeros(size) + value - - # Set the variable in the NetCDF file - df.write(var, data) diff --git a/tools/pylib/boutdata/settings.py b/tools/pylib/boutdata/settings.py deleted file mode 100644 index c09b5cbd7f..0000000000 --- a/tools/pylib/boutdata/settings.py +++ /dev/null @@ -1,92 +0,0 @@ -"""Parse BOUT.inp settings file - -""" - - -def get(filename, name, section=None): - """Find and return a single value from a BOUT.inp settings file - - .. deprecated::3.0 - `settings.get` has been replaced with - `boututils.options.BoutOptions` - - Parameters - ---------- - filename : str - Name of the settings file - name : str - The name of the setting - section : str, optional - The section to look in (default: the global section) - - Note that names and sections are case insensitive - - Returns - ------- - str - Value of the setting. If not found, raises a ValueError - - Examples - -------- - - >>> settings.get("BOUT.inp", "nout") - '100' - - >>> settings.get("BOUT.inp", "compress", section="highbeta") - 'true' - - """ - with open(filename, "rt") as f: - if section is not None: - # First find the section - found = False - for line in f: - # Strip spaces from left - line = line.lstrip(' \t\n\r') - if len(line) < 1: - continue # Empty line - - # if line starts with '[' then this is a section - if line[0] == '[': - # Split on ']' - head, _ = line[1:].split(']', 1) - # head is now the section name - if head.lower() == section.lower(): - found = True - break - if not found: - raise ValueError("Section '%s' not found" % (section)) - - # Now in the correct section - - for line in f: - # Strip spaces from left - line = line.lstrip(' \t\n\r') - if len(line) < 1: - continue # Empty line - - # if line starts with '[' then this is a section - if line[0] == '[': - raise ValueError("Name '%s' not found in section '%s'" % (name,section)) - # Check if this line contains an '=' - if '=' in line: - # Check if contains comment - comment = '' - if '#' in line: - line, comment = line.split('#', 1) - # Split on '=' - key, value = line.split('=',1) - # Strip whitespace - key = key.strip(' \t\n\r') - value = value.strip(' \t\n\r') - - # Strip out quotes if present - if value[0] == '"' or value[0] == "'": - value = value[1:] - if value[-1] == '"' or value[-1] == "'": - value = value[:-1] - - #print("'%s' = '%s'" % (key, value)) - if key.lower() == name.lower(): # Case insensitive - return value - diff --git a/tools/pylib/boutdata/shiftz.py b/tools/pylib/boutdata/shiftz.py deleted file mode 100644 index 606b94f176..0000000000 --- a/tools/pylib/boutdata/shiftz.py +++ /dev/null @@ -1,91 +0,0 @@ -from numpy import ndarray, pi, cos, sin -from numpy import fft - - -def shiftz(var, zangle, zperiod=1.0): - """Shift a variable in Z, changing between field-aligned and - orthogonal X-Z coordinates. This mainly used for tokamak - simulations in field-aligned coordinates. - - Parameters - ---------- - var : array_like - Data to be shifted - 4D [t,x,y,z] - 3D [x,y,z] or [t,x,z] - 2D [x,z] - zangle : array_like - The shift angle - 2D [x,y] (if var is 4D or 3D [x,y,z]) - 1D [x] (if var is 3D [t,x,z] or 2D) - zperiod : float, optional - The fraction of 2pi covered by the variable in Z. This - corresponds to the ZPERIOD variable in BOUT.inp and multiplies - the kz wavenumbers by this factor. - - Returns - ------- - ndarray - A numpy array of the same size and shape as var - - Examples - -------- - - >>> from boutdata import collect - >>> from boututils.datafile import DataFile - >>> from boutdata.shiftz import shiftz - >>> n = collect("Ne") # Read 4D variable [t,x,y,z] - >>> d = DataFile("grid.nc") # Read the grid file - >>> nxz = shiftz(n, d["zShift"], zperiod=4) - - nxz is now in orthogonal X-Z coordinates (X is psi). - - Note that in older grid files "qinty" is used rather - than "zShift". - - """ - - if len(var.shape) == 4: - # 4D variable [t,x,y,z] - result = ndarray(var.shape) - for t in range(var.shape[0]): - # Shift each time slice separately - result[t,:,:,:] = shiftz(var[t,:,:,:], zangle, zperiod=zperiod) - return result - elif len(var.shape) == 3: - if len(zangle.shape) == 2: - # 3D variable [x,y,z], array [x,y] - result = ndarray(var.shape) - for y in range(var.shape[1]): - result[:,y,:] = shiftz(var[:,y,:], zangle[:,y], zperiod=zperiod) - return result - elif len(zangle.shape) == 1: - # 3D variable [t,x,z], array [x] - result = ndarray(var.shape) - for t in range(var.shape[0]): - result[t,:,:] = shiftz(var[t,:,:], zangle, zperiod=zperiod) - return result - else: - raise ValueError("Expecting zangle to be 1 or 2D") - elif len(var.shape) == 2: - if len(zangle.shape) != 1: - raise ValueError("Expecting zangle to be 1D") - - ################################ - # Main algorithm here - # var is [x,z] - # zangle is [x] - - # Take FFT in Z direction - f = fft.rfft(var, axis=1) - - zlength = 2.*pi/zperiod - - for z in range(1, f.shape[1]): - kwave=z*2.0*pi/zlength - f[:,z] *= cos(kwave * zangle) - 1j*sin(kwave*zangle) - return fft.irfft(f, var.shape[1], axis=1) - - else: - raise ValueError("Don't know how to handle 1D variable") - diff --git a/tools/pylib/boutdata/squashoutput.py b/tools/pylib/boutdata/squashoutput.py deleted file mode 100644 index 983393980c..0000000000 --- a/tools/pylib/boutdata/squashoutput.py +++ /dev/null @@ -1,160 +0,0 @@ -""" -Collect all data from BOUT.dmp.* files and create a single output file. - -Output file named BOUT.dmp.nc by default - -Useful because this discards ghost cell data (that is only useful for debugging) -and because single files are quicker to download. - -""" - -from boutdata.data import BoutOutputs -from boututils.datafile import DataFile -from boututils.boutarray import BoutArray -import numpy -import os -import gc -import tempfile -import shutil -import glob - - -def squashoutput(datadir=".", outputname="BOUT.dmp.nc", format="NETCDF4", tind=None, - xind=None, yind=None, zind=None, xguards=True, yguards="include_upper", - singleprecision=False, compress=False, least_significant_digit=None, - quiet=False, complevel=None, append=False, delete=False): - """ - Collect all data from BOUT.dmp.* files and create a single output file. - - Parameters - ---------- - datadir : str - Directory where dump files are and where output file will be created. - default "." - outputname : str - Name of the output file. File suffix specifies whether to use NetCDF or - HDF5 (see boututils.datafile.DataFile for suffixes). - default "BOUT.dmp.nc" - format : str - format argument passed to DataFile - default "NETCDF4" - tind : slice, int, or [int, int, int] - tind argument passed to collect - default None - xind : slice, int, or [int, int, int] - xind argument passed to collect - default None - yind : slice, int, or [int, int, int] - yind argument passed to collect - default None - zind : slice, int, or [int, int, int] - zind argument passed to collect - default None - xguards : bool - xguards argument passed to collect - default True - yguards : bool or "include_upper" - yguards argument passed to collect (note different default to collect's) - default "include_upper" - singleprecision : bool - If true convert data to single-precision floats - default False - compress : bool - If true enable compression in the output file - least_significant_digit : int or None - How many digits should be retained? Enables lossy - compression. Default is lossless compression. Needs - compression to be enabled. - complevel : int or None - Compression level, 1 should be fastest, and 9 should yield - highest compression. - quiet : bool - Be less verbose. default False - append : bool - Append to existing squashed file - delete : bool - Delete the original files after squashing. - """ - - fullpath = os.path.join(datadir, outputname) - - if append: - datadirnew = tempfile.mkdtemp(dir=datadir) - for f in glob.glob(datadir + "/BOUT.dmp.*.??"): - if not quiet: - print("moving", f, flush=True) - shutil.move(f, datadirnew) - oldfile = datadirnew + "/" + outputname - datadir = datadirnew - - if os.path.isfile(fullpath) and not append: - raise ValueError( - fullpath + " already exists. Collect may try to read from this file, which is presumably not desired behaviour.") - - # useful object from BOUT pylib to access output data - outputs = BoutOutputs(datadir, info=False, xguards=xguards, - yguards=yguards, tind=tind, xind=xind, yind=yind, zind=zind) - outputvars = outputs.keys() - # Read a value to cache the files - outputs[outputvars[0]] - - if append: - # move only after the file list is cached - shutil.move(fullpath, oldfile) - - t_array_index = outputvars.index("t_array") - outputvars.append(outputvars.pop(t_array_index)) - - kwargs = {} - if compress: - kwargs['zlib'] = True - if least_significant_digit is not None: - kwargs['least_significant_digit'] = least_significant_digit - if complevel is not None: - kwargs['complevel'] = complevel - if append: - old = DataFile(oldfile) - # Check if dump on restart was enabled - # If so, we want to drop the duplicated entry - cropnew = 0 - if old['t_array'][-1] == outputs['t_array'][0]: - cropnew = 1 - # Make sure we don't end up with duplicated data: - for ot in old['t_array']: - if ot in outputs['t_array'][cropnew:]: - raise RuntimeError( - "For some reason t_array has some duplicated entries in the new and old file.") - # Create single file for output and write data - with DataFile(fullpath, create=True, write=True, format=format, **kwargs) as f: - for varname in outputvars: - if not quiet: - print(varname, flush=True) - - var = outputs[varname] - if append: - dims = outputs.dimensions[varname] - if 't' in dims: - var = var[cropnew:, ...] - varold = old[varname] - var = BoutArray(numpy.append( - varold, var, axis=0), var.attributes) - - if singleprecision: - if not isinstance(var, int): - var = BoutArray(numpy.float32(var), var.attributes) - - f.write(varname, var) - # Write changes, free memory - f.sync() - var = None - gc.collect() - - if delete: - if append: - os.remove(oldfile) - for f in glob.glob(datadir + "/BOUT.dmp.*.??"): - if not quiet: - print("Deleting", f, flush=True) - os.remove(f) - if append: - os.rmdir(datadir) diff --git a/tools/pylib/boututils b/tools/pylib/boututils new file mode 120000 index 0000000000..5eaca68d8c --- /dev/null +++ b/tools/pylib/boututils @@ -0,0 +1 @@ +../../externalpackages/boututils/boututils/ \ No newline at end of file diff --git a/tools/pylib/boututils/View3D.py b/tools/pylib/boututils/View3D.py deleted file mode 100644 index f3a771dfc7..0000000000 --- a/tools/pylib/boututils/View3D.py +++ /dev/null @@ -1,390 +0,0 @@ -""" -View a 3D rendering of the magnetic field lines and the streamlines of the rational surfaces. -The quality of the later can be used as an indicator of the quality of the grid. The magnetic field -is computed from efit_analyzed.py. The script can be used as a template to show additional properties of the field - -based on enthought's example by Gael Varoquaux -https://docs.enthought.com/mayavi/mayavi/auto/example_magnetic_field.html#example-magnetic-field - -""" -from __future__ import absolute_import -from __future__ import division -from builtins import range -from past.utils import old_div - - -from boutdata.collect import collect -import numpy as np - -import sys - -if sys.version_info[0]>=3: - message = "View3D uses the VTK library through mayavi, which"+\ - " is currently only available in python 2" - raise ImportError(message) -else: - from mayavi import mlab - -from .read_geqdsk import read_geqdsk -from boututils.View2D import View2D -from scipy import interpolate -from .boutgrid import * - - -def View3D(g,path=None, gb=None): - ############################################################################## - # Resolution - - n=51 - - #compute Bxy - [Br,Bz,x,y,q]=View2D(g,option=1) - - - rd=g.r.max()+.5 - zd=g.z.max()+.5 - ############################################################################## - # The grid of points on which we want to evaluate the field - X, Y, Z = np.mgrid[-rd:rd:n*1j, -rd:rd:n*1j, -zd:zd:n*1j] - ## Avoid rounding issues : - #f = 1e4 # this gives the precision we are interested by : - #X = np.round(X * f) / f - #Y = np.round(Y * f) / f - #Z = np.round(Z * f) / f - - r = np.c_[X.ravel(), Y.ravel(), Z.ravel()] - - ############################################################################## - # Calculate field - # First initialize a container matrix for the field vector : - B = np.empty_like(r) - - - #Compute Toroidal field - # fpol is given between simagx (psi on the axis) and sibdry ( - # psi on limiter or separatrix). So the toroidal field (fpol/R) and the q profile are within these boundaries - # For each r,z we have psi thus we get fpol if (r,z) is within the boundary (limiter or separatrix) and fpol=fpol(outer_boundary) for outside - - #The range of psi is g.psi.max(), g.psi.min() but we have f(psi) up to the limit. Thus we use a new extended variable padded up to max psi - # set points between psi_limit and psi_max - - add_psi=np.linspace(g.sibdry,g.psi.max(),10) - - # define the x (psi) array - xf=np.arange(np.float(g.qpsi.size))*(g.sibdry-g.simagx)/np.float(g.qpsi.size-1) + g.simagx - - # pad the extra values excluding the 1st value - - xf=np.concatenate((xf, add_psi[1::]), axis=0) - - # pad fpol with corresponding points - - fp=np.lib.pad(g.fpol, (0,9), 'edge') - - # create interpolating function - - f = interpolate.interp1d(xf, fp) - - #calculate Toroidal field - - Btrz = old_div(f(g.psi), g.r) - - - rmin=g.r[:,0].min() - rmax=g.r[:,0].max() - zmin=g.z[0,:].min() - zmax=g.z[0,:].max() - - - B1p,B2p,B3p,B1t,B2t,B3t = magnetic_field(g,X,Y,Z,rmin,rmax,zmin,zmax, Br,Bz,Btrz) - - bpnorm = np.sqrt(B1p**2 + B2p**2 + B3p**2) - btnorm = np.sqrt(B1t**2 + B2t**2 + B3t**2) - - BBx=B1p+B1t - BBy=B2p+B2t - BBz=B3p+B3t - btotal = np.sqrt(BBx**2 + BBy**2 + BBz**2) - - Psi = psi_field(g,X,Y,Z,rmin,rmax,zmin,zmax) - - ############################################################################## - # Visualization - - # We threshold the data ourselves, as the threshold filter produce a - # data structure inefficient with IsoSurface - #bmax = bnorm.max() - # - #B1[B > bmax] = 0 - #B2[B > bmax] = 0 - #B3[B > bmax] = 0 - #bnorm[bnorm > bmax] = bmax - - mlab.figure(1, size=(1080,1080))#, bgcolor=(1, 1, 1), fgcolor=(0.5, 0.5, 0.5)) - - mlab.clf() - - fieldp = mlab.pipeline.vector_field(X, Y, Z, B1p, B2p, B3p, - scalars=bpnorm, name='Bp field') - - fieldt = mlab.pipeline.vector_field(X, Y, Z, B1t, B2t, B3t, - scalars=btnorm, name='Bt field') - - field = mlab.pipeline.vector_field(X, Y, Z, BBx, BBy, BBz, - scalars=btotal, name='B field') - - - - field2 = mlab.pipeline.scalar_field(X, Y, Z, Psi, name='Psi field') - - #vectors = mlab.pipeline.vectors(field, - # scale_factor=1,#(X[1, 0, 0] - X[0, 0, 0]), - # ) - - #vcp1 = mlab.pipeline.vector_cut_plane(fieldp, - # scale_factor=1, - # colormap='jet', - # plane_orientation='y_axes') - ## - #vcp2 = mlab.pipeline.vector_cut_plane(fieldt, - # scale_factor=1, - # colormap='jet', - # plane_orientation='x_axes') - - - # Mask random points, to have a lighter visualization. - #vectors.glyph.mask_input_points = True - #vectors.glyph.mask_points.on_ratio = 6 - - #vcp = mlab.pipeline.vector_cut_plane(field1) - #vcp.glyph.glyph.scale_factor=5*(X[1, 0, 0] - X[0, 0, 0]) - # For prettier picture: - #vcp1.implicit_plane.widget.enabled = False - #vcp2.implicit_plane.widget.enabled = False - - iso = mlab.pipeline.iso_surface(field2, - contours=[Psi.min()+.01], - opacity=0.4, - colormap='bone') - - for i in range(q.size): - iso.contour.contours[i+1:i+2]=[q[i]] - - iso.compute_normals = True - # - - #mlab.pipeline.image_plane_widget(field2, - # plane_orientation='x_axes', - # #slice_index=10, - # extent=[-rd, rd, -rd, rd, -zd,zd] - # ) - #mlab.pipeline.image_plane_widget(field2, - # plane_orientation='y_axes', - # # slice_index=10, - # extent=[-rd, rd, -rd,rd, -zd,zd] - # ) - - - - #scp = mlab.pipeline.scalar_cut_plane(field2, - # colormap='jet', - # plane_orientation='x_axes') - # For prettier picture and with 2D streamlines: - #scp.implicit_plane.widget.enabled = False - #scp.enable_contours = True - #scp.contour.number_of_contours = 20 - - # - - # Magnetic Axis - - s=mlab.pipeline.streamline(field) - s.streamline_type = 'line' - s.seed.widget = s.seed.widget_list[3] - s.seed.widget.position=[g.rmagx,0.,g.zmagx] - s.seed.widget.enabled = False - - - # q=i surfaces - - for i in range(np.shape(x)[0]): - - s=mlab.pipeline.streamline(field) - s.streamline_type = 'line' - ##s.seed.widget = s.seed.widget_list[0] - ##s.seed.widget.center = 0.0, 0.0, 0.0 - ##s.seed.widget.radius = 1.725 - ##s.seed.widget.phi_resolution = 16 - ##s.seed.widget.handle_direction =[ 1., 0., 0.] - ##s.seed.widget.enabled = False - ##s.seed.widget.enabled = True - ##s.seed.widget.enabled = False - # - if x[i].size>1 : - s.seed.widget = s.seed.widget_list[3] - s.seed.widget.position=[x[i][0],0.,y[i][0]] - s.seed.widget.enabled = False - - - # A trick to make transparency look better: cull the front face - iso.actor.property.frontface_culling = True - - #mlab.view(39, 74, 0.59, [.008, .0007, -.005]) - out=mlab.outline(extent=[-rd, rd, -rd, rd, -zd, zd], line_width=.5 ) - out.outline_mode = 'cornered' - out.outline_filter.corner_factor = 0.0897222 - - - w = mlab.gcf() - w.scene.camera.position = [13.296429046581462, 13.296429046581462, 12.979811259697154] - w.scene.camera.focal_point = [0.0, 0.0, -0.31661778688430786] - w.scene.camera.view_angle = 30.0 - w.scene.camera.view_up = [0.0, 0.0, 1.0] - w.scene.camera.clipping_range = [13.220595435695394, 35.020427055647517] - w.scene.camera.compute_view_plane_normal() - w.scene.render() - w.scene.show_axes = True - - mlab.show() - - if(path is not None): - #BOUT data - #path='../Aiba/' - # - #gb = file_import(path+'aiba.bout.grd.nc') - #gb = file_import("../cbm18_8_y064_x516_090309.nc") - #gb = file_import("cbm18_dens8.grid_nx68ny64.nc") - #gb = file_import("/home/ben/run4/reduced_y064_x256.nc") - - data = collect('P', path=path) - data = data[50,:,:,:] - #data0=collect("P0", path=path) - #data=data+data0[:,:,None] - - s = np.shape(data) - nz = s[2] - - - sgrid = create_grid(gb, data, 1) - - # OVERPLOT the GRID - #mlab.pipeline.add_dataset(sgrid) - #gr=mlab.pipeline.grid_plane(sgrid) - #gr.grid_plane.axis='x' - - - ## pressure scalar cut plane from bout - scpb = mlab.pipeline.scalar_cut_plane(sgrid, - colormap='jet', - plane_orientation='x_axes') - - scpb.implicit_plane.widget.enabled = False - scpb.enable_contours = True - scpb.contour.filled_contours=True - # - scpb.contour.number_of_contours = 20 - # - # - #loc=sgrid.points - #p=sgrid.point_data.scalars - - # compute pressure from scatter points interpolation - #pint=interpolate.griddata(loc, p, (X, Y, Z), method='linear') - #dpint=np.ma.masked_array(pint,np.isnan(pint)).filled(0.) - # - #p2 = mlab.pipeline.scalar_field(X, Y, Z, dpint, name='P field') - # - #scp2 = mlab.pipeline.scalar_cut_plane(p2, - # colormap='jet', - # plane_orientation='y_axes') - # - #scp2.implicit_plane.widget.enabled = False - #scp2.enable_contours = True - #scp2.contour.filled_contours=True - #scp2.contour.number_of_contours = 20 - #scp2.contour.minimum_contour=.001 - - - - # CHECK grid orientation - #fieldr = mlab.pipeline.vector_field(X, Y, Z, -BBx, BBy, BBz, - # scalars=btotal, name='B field') - # - #sg=mlab.pipeline.streamline(fieldr) - #sg.streamline_type = 'tube' - #sg.seed.widget = sg.seed.widget_list[3] - #sg.seed.widget.position=loc[0] - #sg.seed.widget.enabled = False - - - - #OUTPUT grid - - #ww = tvtk.XMLStructuredGridWriter(input=sgrid, file_name='sgrid.vts') - #ww.write() - - return - -def magnetic_field(g,X,Y,Z,rmin,rmax,zmin,zmax,Br,Bz,Btrz): - - rho = np.sqrt(X**2 + Y**2) - phi=np.arctan2(Y,X) - - br=np.zeros(np.shape(X)) - bz=np.zeros(np.shape(X)) - bt=np.zeros(np.shape(X)) - - nx,ny,nz=np.shape(X) - - mask = (rho >= rmin) & (rho <= rmax) & (Z >= zmin) & (Z <= zmax) - k=np.argwhere(mask==True) - - fr=interpolate.interp2d(g.r[:,0], g.z[0,:], Br.T) - fz=interpolate.interp2d(g.r[:,0], g.z[0,:], Bz.T) - ft=interpolate.interp2d(g.r[:,0], g.z[0,:], Btrz.T) - - for i in range(len(k)): - br[k[i,0],k[i,1],k[i,2]]=fr(rho[k[i,0],k[i,1],k[i,2]],Z[k[i,0],k[i,1],k[i,2]]) - bz[k[i,0],k[i,1],k[i,2]]=fz(rho[k[i,0],k[i,1],k[i,2]],Z[k[i,0],k[i,1],k[i,2]]) - bt[k[i,0],k[i,1],k[i,2]]=ft(rho[k[i,0],k[i,1],k[i,2]],Z[k[i,0],k[i,1],k[i,2]]) - - # Toroidal component - B1t=-bt*np.sin(phi) - B2t=bt*np.cos(phi) - B3t=0*bz - - # Poloidal component - B1p=br*np.cos(phi) - B2p=br*np.sin(phi) - B3p=bz - - - # Rotate the field back in the lab's frame - return B1p,B2p,B3p,B1t,B2t,B3t - - -def psi_field(g,X,Y,Z,rmin,rmax,zmin,zmax): - - rho = np.sqrt(X**2 + Y**2) - - psi=np.zeros(np.shape(X)) - - nx,ny,nz=np.shape(X) - - mask = (rho >= rmin) & (rho <= rmax) & (Z >= zmin) & (Z <= zmax) - k=np.argwhere(mask==True) - - f=interpolate.interp2d(g.r[:,0], g.z[0,:], g.psi.T) - - for i in range(len(k)): - psi[k[i,0],k[i,1],k[i,2]]=f(rho[k[i,0],k[i,1],k[i,2]],Z[k[i,0],k[i,1],k[i,2]]) - - # Rotate the field back in the lab's frame - return psi - - -if __name__ == '__main__': - path='../../tokamak_grids/pyGridGen/' - g=read_geqdsk(path+"g118898.03400") - View3D(g) - mlab.show() diff --git a/tools/pylib/boututils/__init__.py b/tools/pylib/boututils/__init__.py deleted file mode 100644 index 2b3a54d3ce..0000000000 --- a/tools/pylib/boututils/__init__.py +++ /dev/null @@ -1,42 +0,0 @@ -""" Generic routines, useful for all data """ - -import sys - -try: - from builtins import str -except ImportError: - raise ImportError("Please install the future module to use Python 2") - -# Modules to be imported independent of version -for_all_versions = [\ - 'calculus',\ - 'closest_line',\ - 'datafile',\ - # 'efit_analyzer',\ # bunch pkg required - 'fft_deriv',\ - 'fft_integrate',\ - 'file_import',\ - 'int_func',\ - 'linear_regression',\ - 'mode_structure',\ - # 'moment_xyzt',\ # bunch pkg requried - 'run_wrapper',\ - 'shell',\ - 'showdata',\ - # 'surface_average',\ - # 'volume_integral',\ #bunch pkg required - ] - -# Check the current python version -if sys.version_info[0]>=3: - do_import = for_all_versions - __all__ = do_import -else: - do_import = for_all_versions - do_import.append('anim') - do_import.append('plotpolslice') - do_import.append('View3D') - __all__ = do_import - -__version__ = '0.1.4' -__name__ = 'boututils' diff --git a/tools/pylib/boututils/analyse_equil_2.py b/tools/pylib/boututils/analyse_equil_2.py deleted file mode 100644 index d315c6abd6..0000000000 --- a/tools/pylib/boututils/analyse_equil_2.py +++ /dev/null @@ -1,270 +0,0 @@ -"""Equilibrium analysis routine - -Takes a RZ psi grid, and finds x-points and o-points -""" - -from __future__ import print_function -from __future__ import division - -from builtins import zip -from builtins import str -from builtins import range -from past.utils import old_div - -import numpy -from . import local_min_max -from scipy.interpolate import RectBivariateSpline -from matplotlib.pyplot import contour, gradient, annotate, plot, draw -from crosslines import find_inter - - -def analyse_equil(F, R, Z): - """Takes an RZ psi grid, and finds x-points and o-points - - Parameters - ---------- - F : array_like - 2-D array of psi values - R : array_like - 1-D array of major radii, its length should be the same as the - first dimension of F - Z : array_like - 1-D array of heights, its length should be the same as the - second dimension of F - - Returns - ------- - object - An object of critical points containing: - - n_opoint, n_xpoint - Number of O- and X-points - primary_opt - Index of plasma centre O-point - inner_sep - X-point index of inner separatrix - opt_ri, opt_zi - R and Z indices for each O-point - opt_f - Psi value at each O-point - xpt_ri, xpt_zi - R and Z indices for each X-point - xpt_f - Psi value of each X-point - - """ - s = numpy.shape(F) - nx = s[0] - ny = s[1] - - #;;;;;;;;;;;;;;; Find critical points ;;;;;;;;;;;;; - # - # Need to find starting locations for O-points (minima/maxima) - # and X-points (saddle points) - # - Rr=numpy.tile(R,nx).reshape(nx,ny).T - Zz=numpy.tile(Z,ny).reshape(nx,ny) - - contour1=contour(Rr,Zz,gradient(F)[0], levels=[0.0], colors='r') - contour2=contour(Rr,Zz,gradient(F)[1], levels=[0.0], colors='r') - - draw() - - -### 1st method - line crossings --------------------------- - res=find_inter( contour1, contour2) - - #rex1=numpy.interp(res[0], R, numpy.arange(R.size)).astype(int) - #zex1=numpy.interp(res[1], Z, numpy.arange(Z.size)).astype(int) - - rex1=res[0] - zex1=res[1] - - w=numpy.where((rex1 > R[2]) & (rex1 < R[nx-3]) & (zex1 > Z[2]) & (zex1 < Z[nx-3])) - nextrema = numpy.size(w) - rex1=rex1[w].flatten() - zex1=zex1[w].flatten() - - -### 2nd method - local maxima_minima ----------------------- - res1=local_min_max.detect_local_minima(F) - res2=local_min_max.detect_local_maxima(F) - res=numpy.append(res1,res2,1) - - rex2=res[0,:].flatten() - zex2=res[1,:].flatten() - - - w=numpy.where((rex2 > 2) & (rex2 < nx-3) & (zex2 >2) & (zex2 < nx-3)) - nextrema = numpy.size(w) - rex2=rex2[w].flatten() - zex2=zex2[w].flatten() - - - n_opoint=nextrema - n_xpoint=numpy.size(rex1)-n_opoint - - # Needed for interp below - - Rx=numpy.arange(numpy.size(R)) - Zx=numpy.arange(numpy.size(Z)) - - - - print("Number of O-points: "+numpy.str(n_opoint)) - print("Number of X-points: "+numpy.str(n_xpoint)) - - # Deduce the O & X points - - x=R[rex2] - y=Z[zex2] - - dr=old_div((R[numpy.size(R)-1]-R[0]),numpy.size(R)) - dz=old_div((Z[numpy.size(Z)-1]-Z[0]),numpy.size(Z)) - - - repeated=set() - for i in range(numpy.size(rex1)): - for j in range(numpy.size(x)): - if numpy.abs(rex1[i]-x[j]) < 2*dr and numpy.abs(zex1[i]-y[j]) < 2*dz : repeated.add(i) - - # o-points - - o_ri=numpy.take(rex1,numpy.array(list(repeated))) - opt_ri=numpy.interp(o_ri,R,Rx) - o_zi=numpy.take(zex1,numpy.array(list(repeated))) - opt_zi=numpy.interp(o_zi,Z,Zx) - opt_f=numpy.zeros(numpy.size(opt_ri)) - func = RectBivariateSpline(Rx, Zx, F) - for i in range(numpy.size(opt_ri)): opt_f[i]=func(opt_ri[i], opt_zi[i]) - - n_opoint=numpy.size(opt_ri) - - # x-points - - x_ri=numpy.delete(rex1, numpy.array(list(repeated))) - xpt_ri=numpy.interp(x_ri,R,Rx) - x_zi=numpy.delete(zex1, numpy.array(list(repeated))) - xpt_zi=numpy.interp(x_zi,Z,Zx) - xpt_f=numpy.zeros(numpy.size(xpt_ri)) - func = RectBivariateSpline(Rx, Zx, F) - for i in range(numpy.size(xpt_ri)): xpt_f[i]=func(xpt_ri[i], xpt_zi[i]) - - n_xpoint=numpy.size(xpt_ri) - - # plot o-points - - plot(o_ri,o_zi,'o', markersize=10) - - labels = ['{0}'.format(i) for i in range(o_ri.size)] - for label, xp, yp in zip(labels, o_ri, o_zi): - annotate(label, xy = (xp, yp), xytext = (10, 10), textcoords = 'offset points',size='large', color='b') - - draw() - - # plot x-points - - plot(x_ri,x_zi,'x', markersize=10) - - labels = ['{0}'.format(i) for i in range(x_ri.size)] - for label, xp, yp in zip(labels, x_ri, x_zi): - annotate(label, xy = (xp, yp), xytext = (10, 10), textcoords = 'offset points',size='large', color='r') - - draw() - - print("Number of O-points: "+str(n_opoint)) - - if n_opoint == 0 : - raise RuntimeError("No O-points! Giving up on this equilibrium") - - - #;;;;;;;;;;;;;; Find plasma centre ;;;;;;;;;;;;;;;;;;; - # Find the O-point closest to the middle of the grid - - mind = (opt_ri[0] - (old_div(numpy.float(nx),2.)))**2 + (opt_zi[0] - (old_div(numpy.float(ny),2.)))**2 - ind = 0 - for i in range (1, n_opoint) : - d = (opt_ri[i] - (old_div(numpy.float(nx),2.)))**2 + (opt_zi[i] - (old_div(numpy.float(ny),2.)))**2 - if d < mind : - ind = i - mind = d - - primary_opt = ind - print("Primary O-point is at "+ numpy.str(numpy.interp(opt_ri[ind],numpy.arange(numpy.size(R)),R)) + ", " + numpy.str(numpy.interp(opt_zi[ind],numpy.arange(numpy.size(Z)),Z))) - print("") - - if n_xpoint > 0 : - - # Find the primary separatrix - - # First remove non-monotonic separatrices - nkeep = 0 - for i in range (n_xpoint) : - # Draw a line between the O-point and X-point - - n = 100 # Number of points - farr = numpy.zeros(n) - dr = old_div((xpt_ri[i] - opt_ri[ind]), numpy.float(n)) - dz = old_div((xpt_zi[i] - opt_zi[ind]), numpy.float(n)) - for j in range (n) : - # interpolate f at this location - func = RectBivariateSpline(Rx, Zx, F) - - farr[j] = func(opt_ri[ind] + dr*numpy.float(j), opt_zi[ind] + dz*numpy.float(j)) - - - # farr should be monotonic, and shouldn't cross any other separatrices - - maxind = numpy.argmax(farr) - minind = numpy.argmin(farr) - if (maxind < minind) : maxind, minind = minind, maxind - - # Allow a little leeway to account for errors - # NOTE: This needs a bit of refining - if (maxind > (n-3)) and (minind < 3) : - # Monotonic, so add this to a list of x-points to keep - if nkeep == 0 : - keep = [i] - else: - keep = numpy.append(keep, i) - - - nkeep = nkeep + 1 - - - if nkeep > 0 : - print("Keeping x-points ", keep) - xpt_ri = xpt_ri[keep] - xpt_zi = xpt_zi[keep] - xpt_f = xpt_f[keep] - else: - "No x-points kept" - - n_xpoint = nkeep - - - # Now find x-point closest to primary O-point - s = numpy.argsort(numpy.abs(opt_f[ind] - xpt_f)) - xpt_ri = xpt_ri[s] - xpt_zi = xpt_zi[s] - xpt_f = xpt_f[s] - inner_sep = 0 - - else: - - # No x-points. Pick mid-point in f - - xpt_f = 0.5*(numpy.max(F) + numpy.min(F)) - - print("WARNING: No X-points. Setting separatrix to F = "+str(xpt_f)) - - xpt_ri = 0 - xpt_zi = 0 - inner_sep = 0 - - - - #;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - # Put results into a structure - - result = Bunch(n_opoint=n_opoint, n_xpoint=n_xpoint, # Number of O- and X-points - primary_opt=primary_opt, # Which O-point is the plasma centre - inner_sep=inner_sep, #Innermost X-point separatrix - opt_ri=opt_ri, opt_zi=opt_zi, opt_f=opt_f, # O-point location (indices) and psi values - xpt_ri=xpt_ri, xpt_zi=xpt_zi, xpt_f=xpt_f) # X-point locations and psi values - - return result - diff --git a/tools/pylib/boututils/anim.py b/tools/pylib/boututils/anim.py deleted file mode 100755 index d2f783858c..0000000000 --- a/tools/pylib/boututils/anim.py +++ /dev/null @@ -1,115 +0,0 @@ -#!/usr/bin/env python3 -"""Animate graph with mayavi - -""" - -from __future__ import print_function -from builtins import range -from boutdata.collect import collect -import numpy as np -import os -try: - from enthought.mayavi import mlab - from enthought.mayavi.mlab import * -except ImportError: - try: - from mayavi import mlab - from mayavi.mlab import * - except ImportError: - print("No mlab available") - -from tvtk.tools import visual - - -@mlab.show -@mlab.animate(delay=250) -def anim(s, d, *args, **kwargs): - """Animate graph with mayavi - - Parameters - ---------- - s : mayavi axis object - Axis to animate data on - d : array_like - 3-D array to animate - s1 : mayavi axis object, optional - Additional bundled graph (first item in *args) - save : bool, optional - Save png files for creating movie (default: False) - - """ - - if len(args) == 1: - s1 = args[0] - else: - s1=None - - try: - save = kwargs['save'] - except: - save = False - - - nt=d.shape[0] - - print('animating for ',nt,'timesteps') - if save == True : - print('Saving pics in folder Movie') - if not os.path.exists('Movie'): - os.makedirs('Movie') - - - for i in range(nt): - s.mlab_source.scalars = d[i,:,:] - if s1 is not None : s1.mlab_source.scalars = d[i,:,:] - title="t="+np.string0(i) - mlab.title(title,height=1.1, size=0.26) - if save == True : mlab.savefig('Movie/anim%d.png'%i) - yield - -if __name__ == '__main__': - - path='../../../examples/elm-pb/data' - - data = collect("P", path=path) - - nt=data.shape[0] - - ns=data.shape[1] - ne=data.shape[2] - nz=data.shape[3] - - - f = mayavi.mlab.figure(size=(600,600)) - # Tell visual to use this as the viewer. - visual.set_viewer(f) - - #First way - - s1 = contour_surf(data[0,:,:,10]+.1, contours=30, line_width=.5, transparent=True) - s = surf(data[0,:,:,10]+.1, colormap='Spectral')#, warp_scale='.1')#, representation='wireframe') - - - # second way - - #x, y= mgrid[0:ns:1, 0:ne:1] - #s = mesh(x,y,data[0,:,:,10], colormap='Spectral')#, warp_scale='auto')#, representation='wireframe') - s.enable_contours=True - s.contour.filled_contours=True -# - - #x, y, z= mgrid[0:ns:1, 0:ne:1, 0:nz:1] - # - #p=plot3d(x,y,z,data[10,:,:,:], tube_radius=0.025, colormap='Spectral') - #p=points3d(x,y,z,data[10,:,:,:], colormap='Spectral') -# - #s=contour3d(x,y,z,data[10,:,:,:], contours=4, transparent=True) - - #mlab.view(0.,0.) - colorbar() - #axes() - #outline() - - - # Run the animation. - anim(s,data[:,:,:,10]+.1,s1, save=True) diff --git a/tools/pylib/boututils/ask.py b/tools/pylib/boututils/ask.py deleted file mode 100644 index 31cbef059c..0000000000 --- a/tools/pylib/boututils/ask.py +++ /dev/null @@ -1,53 +0,0 @@ -"""Ask a yes/no question and return the answer. - -""" - -from builtins import input -import sys - - -def query_yes_no(question, default="yes"): - """Ask a yes/no question via input() and return their answer. - - Answers are case-insensitive. - - Probably originally from https://code.activestate.com/recipes/577058/ - via https://stackoverflow.com/a/3041990/2043465 - - Parameters - ---------- - question : str - Question to be presented to the user - default : {"yes", "no", None} - The presumed answer if the user just hits . - It must be "yes" (the default), "no" or None (meaning - an answer is required of the user). - - Returns - ------- - bool - True if the answer was "yes" or "y", False if "no" or "n" - """ - - valid = {"yes":True, "y":True, "ye":True, - "no":False, "n":False, "No":False, "N":False } - - if default is None: - prompt = " [y/n] " - elif default == "yes": - prompt = " [Y/n] " - elif default == "no": - prompt = " [y/N] " - else: - raise ValueError("invalid default answer: '%s'" % default) - - while True: - sys.stdout.write(question + prompt) - choice = input().lower() - if default is not None and choice == '': - return valid[default] - elif choice in valid: - return valid[choice] - else: - sys.stdout.write("Please respond with 'yes' or 'no' "\ - "(or 'y' or 'n').\n") diff --git a/tools/pylib/boututils/boutarray.py b/tools/pylib/boututils/boutarray.py deleted file mode 100644 index ce38baec2c..0000000000 --- a/tools/pylib/boututils/boutarray.py +++ /dev/null @@ -1,73 +0,0 @@ -"""Wrapper for ndarray with extra attributes for BOUT++ fields. - -""" - -import numpy - - -class BoutArray(numpy.ndarray): - """Wrapper for ndarray with extra attributes for BOUT++ fields. - - Parameters - ---------- - input_array : array_like - Data to convert to BoutArray - attributes : dict - Dictionary of extra attributes for BOUT++ fields - - Notably, these attributes should contain - ``bout_type``. Possible values are: - - - scalar - - Field2D - - Field3D - - If the variable is an evolving variable (i.e. has a time - dimension), then it is appended with a "_t" - - """ - - # See https://docs.scipy.org/doc/numpy-1.13.0/user/basics.subclassing.html - # for explanation of the structure of this numpy.ndarray wrapper - - def __new__(cls, input_array, attributes={}): - # Input array is an already formed ndarray instance - # We first cast to be our class type - obj = numpy.asarray(input_array).view(cls) - # add the dict of attributes to the created instance - obj.attributes = attributes - # Finally, we must return the newly created object: - return obj - - def __array_finalize__(self, obj): - # ``self`` is a new object resulting from - # ndarray.__new__(BoutArray, ...), therefore it only has - # attributes that the ndarray.__new__ constructor gave it - - # i.e. those of a standard ndarray. - # - # We could have got to the ndarray.__new__ call in 3 ways: - # From an explicit constructor - e.g. BoutArray(): - # obj is None - # (we're in the middle of the BoutArray.__new__ - # constructor, and self.attributes will be set when we return to - # BoutArray.__new__) - if obj is None: - return - # From view casting - e.g arr.view(BoutArray): - # obj is arr - # (type(obj) can be BoutArray) - # From new-from-template - e.g boutarray[:3] - # type(obj) is BoutArray - # - # Note that it is here, rather than in the __new__ method, that we set - # the default value for 'attributes', because this method sees all - # creation of default objects - with the BoutArray.__new__ constructor, - # but also with arr.view(BoutArray). - self.attributes = getattr(obj, 'attributes', None) - # We do not need to return anything - - def __format__(self, str): - try: - return super().__format__(str) - except TypeError: - return float(self).__format__(str) diff --git a/tools/pylib/boututils/boutgrid.py b/tools/pylib/boututils/boutgrid.py deleted file mode 100755 index ace67663cd..0000000000 --- a/tools/pylib/boututils/boutgrid.py +++ /dev/null @@ -1,139 +0,0 @@ -#!/usr/bin/env python3 -from __future__ import print_function -from builtins import range - -import numpy as np -from numpy import cos, sin, pi - -from tvtk.api import tvtk -#from enthought.mayavi.scripts import mayavi2 - -def aligned_points(grid, nz=1, period=1.0, maxshift=0.4): - try: - nx = grid["nx"]#[0] - ny = grid["ny"]#[0] - zshift = grid["zShift"] - Rxy = grid["Rxy"] - Zxy = grid["Zxy"] - except: - print("Missing required data") - return None - - - dz = 2.*pi / (period * (nz-1)) - phi0 = np.linspace(0,2.*pi / period, nz) - - - # Need to insert additional points in Y so mesh looks smooth - #for y in range(1,ny): - # ms = np.max(np.abs(zshift[:,y] - zshift[:,y-1])) - # if( - - # Create array of points, structured - - points = np.zeros([nx*ny*nz, 3]) - - - start = 0 - for y in range(ny): - - - end = start + nx*nz - - phi = zshift[:,y] + phi0[:,None] - r = Rxy[:,y] + (np.zeros([nz]))[:,None] - - xz_points = points[start:end] - - - xz_points[:,0] = (r*cos(phi)).ravel() # X - xz_points[:,1] = (r*sin(phi)).ravel() # Y - xz_points[:,2] = (Zxy[:,y]+(np.zeros([nz]))[:,None]).ravel() # Z - - - start = end - - return points - -def create_grid(grid, data, period=1): - - s = np.shape(data) - - nx = grid["nx"]#[0] - ny = grid["ny"]#[0] - nz = s[2] - - print("data: %d,%d,%d grid: %d,%d\n" % (s[0],s[1],s[2], nx,ny)) - - dims = (nx, nz, ny) - sgrid = tvtk.StructuredGrid(dimensions=dims) - pts = aligned_points(grid, nz, period) - print(np.shape(pts)) - sgrid.points = pts - - scalar = np.zeros([nx*ny*nz]) - start = 0 - for y in range(ny): - end = start + nx*nz - - #scalar[start:end] = (data[:,y,:]).transpose().ravel() - scalar[start:end] = (data[:,y,:]).ravel() - - print(y, " = " , np.max(scalar[start:end])) - start = end - - sgrid.point_data.scalars = np.ravel(scalar.copy()) - sgrid.point_data.scalars.name = "data" - - return sgrid - -#@mayavi2.standalone -def view3d(sgrid): - from mayavi.sources.vtk_data_source import VTKDataSource - from mayavi.modules.api import Outline, GridPlane - from mayavi.api import Engine - from mayavi.core.ui.engine_view import EngineView - e=Engine() - e.start() - s = e.new_scene() - # Do this if you need to see the MayaVi tree view UI. - ev = EngineView(engine=e) - ui = ev.edit_traits() - -# mayavi.new_scene() - src = VTKDataSource(data=sgrid) - e.add_source(src) - e.add_module(Outline()) - g = GridPlane() - g.grid_plane.axis = 'x' - e.add_module(g) - -if __name__ == '__main__': - from boutdata.collect import collect - from boututils.file_import import file_import - - #path = "/media/449db594-b2fe-4171-9e79-2d9b76ac69b6/runs/data_33/" - path="../data" - - g = file_import("../bout.grd.nc") - #g = file_import("../cbm18_8_y064_x516_090309.nc") - #g = file_import("/home/ben/run4/reduced_y064_x256.nc") - - data = collect("P", tind=10, path=path) - data = data[0,:,:,:] - s = np.shape(data) - nz = s[2] - - #bkgd = collect("P0", path=path) - #for z in range(nz): - # data[:,:,z] += bkgd - - # Create a structured grid - sgrid = create_grid(g, data, 1) - - - w = tvtk.XMLStructuredGridWriter(input=sgrid, file_name='sgrid.vts') - w.write() - - # View the structured grid - view3d(sgrid) diff --git a/tools/pylib/boututils/boutwarnings.py b/tools/pylib/boututils/boutwarnings.py deleted file mode 100644 index cdb03b0518..0000000000 --- a/tools/pylib/boututils/boutwarnings.py +++ /dev/null @@ -1,19 +0,0 @@ -""" -Wrappers for warnings functions. - -Allows raising warnings that are always printed by default. -""" - -import warnings - -class AlwaysWarning(UserWarning): - def __init__(self, *args, **kwargs): - super(AlwaysWarning, self).__init__(*args, **kwargs) - -warnings.simplefilter("always", AlwaysWarning) - -def alwayswarn(message): - warnings.warn(message, AlwaysWarning, stacklevel=2) - -def defaultwarn(message): - warnings.warn(message, stacklevel=2) diff --git a/tools/pylib/boututils/bunch.py b/tools/pylib/boututils/bunch.py deleted file mode 100644 index 2bc1ca04c0..0000000000 --- a/tools/pylib/boututils/bunch.py +++ /dev/null @@ -1,6 +0,0 @@ -# what we need from bunch - -class Bunch: - def __init__(self, **dict): - for k in dict: - setattr(self, k, dict[k]) diff --git a/tools/pylib/boututils/calculus.py b/tools/pylib/boututils/calculus.py deleted file mode 100644 index 274230906b..0000000000 --- a/tools/pylib/boututils/calculus.py +++ /dev/null @@ -1,252 +0,0 @@ -""" -Derivatives and integrals of periodic and non-periodic functions - - -B.Dudson, University of York, Nov 2009 -""" -from __future__ import print_function -from __future__ import division - -from builtins import range - -try: - from past.utils import old_div -except ImportError: - def old_div(a, b): - return a / b - -from numpy import zeros, pi, array, transpose, sum, where, arange, multiply -from numpy.fft import rfft, irfft - -def deriv(*args, **kwargs): - """Take derivative of 1D array - - result = deriv(y) - result = deriv(x, y) - - keywords - - periodic = False Domain is periodic - """ - - nargs = len(args) - if nargs == 1: - var = args[0] - x = arange(var.size) - elif nargs == 2: - x = args[0] - var = args[1] - else: - raise RuntimeError("deriv must be given 1 or 2 arguments") - - try: - periodic = kwargs['periodic'] - except: - periodic = False - - n = var.size - if periodic: - # Use FFTs to take derivatives - f = rfft(var) - f[0] = 0.0 # Zero constant term - if n % 2 == 0: - # Even n - for i in arange(1,old_div(n,2)): - f[i] *= 2.0j * pi * float(i)/float(n) - f[-1] = 0.0 # Nothing from Nyquist frequency - else: - # Odd n - for i in arange(1,old_div((n-1),2) + 1): - f[i] *= 2.0j * pi * float(i)/float(n) - return irfft(f) - else: - # Non-periodic function - result = zeros(n) # Create empty array - if n > 2: - for i in arange(1, n-1): - # 2nd-order central difference in the middle of the domain - result[i] = old_div((var[i+1] - var[i-1]), (x[i+1] - x[i-1])) - # Use left,right-biased stencils on edges (2nd order) - result[0] = old_div((-1.5*var[0] + 2.*var[1] - 0.5*var[2]), (x[1] - x[0])) - result[n-1] = old_div((1.5*var[n-1] - 2.*var[n-2] + 0.5*var[n-3]), (x[n-1] - x[n-2])) - elif n == 2: - # Just 1st-order difference for both points - result[0] = result[1] = old_div((var[1] - var[0]),(x[1] - x[0])) - elif n == 1: - result[0] = 0.0 - return result - -def deriv2D(data,axis=-1,dx=1.0,noise_suppression=True): - """ Takes 1D or 2D Derivative of 2D array using convolution - - result = deriv2D(data) - result = deriv2D(data, dx) - - output is 2D (if only one axis specified) - output is 3D if no axis specified [nx,ny,2] with the third dimension being [dfdx, dfdy] - - keywords: - axis = 0/1 If no axis specified 2D derivative will be returned - dx = 1.0 axis spacing, must be 2D if 2D deriv is taken - default is [1.0,1.0] - noise_suppression = True noise suppressing coefficients used to take derivative - default = True - """ - - from scipy.signal import convolve - - s = data.shape - if axis > len(s)-1: - raise RuntimeError("ERROR: axis out of bounds for derivative") - - if noise_suppression: - if s[axis] < 11: - raise RuntimeError("Data too small to use 11th order method") - tmp = array([old_div(-1.0,512.0),old_div(-8.0,512.0),old_div(-27.0,512.0),old_div(-48.0,512.0),old_div(-42.0,512.0),0.0,old_div(42.0,512.0),old_div(48.0,512.0),old_div(27.0,512.0),old_div(8.0,512.0),old_div(1.0,512.0)]) - else: - if s[axis] < 9: - raise RuntimeError("Data too small to use 9th order method") - tmp = array([old_div(1.0,280.0),old_div(-4.0,105.0),old_div(1.0,5.0),old_div(-4.0,5.0),0.0,old_div(4.0,5.0),old_div(-1.0,5.0),old_div(4.0,105.0),old_div(-1.0,280.0)]) - - N = int((tmp.size-1)/2) - if axis==1: - W = transpose(tmp[:,None]) - data_deriv = convolve(data,W,mode='same')/dx*-1.0 - for i in range(s[0]): - data_deriv[i,0:N-1] = old_div(deriv(data[i,0:N-1]),dx) - data_deriv[i,s[1]-N:] = old_div(deriv(data[i,s[1]-N:]),dx) - - elif axis==0: - W = tmp[:,None] - data_deriv = convolve(data,W,mode='same')/dx*-1.0 - for i in range(s[1]): - data_deriv[0:N-1,i] = old_div(deriv(data[0:N-1,i]),dx) - data_deriv[s[0]-N:,i] = old_div(deriv(data[s[0]-N:,i]),dx) - else: - data_deriv = zeros((s[0],s[1],2)) - if (not hasattr(dx, '__len__')) or len(dx)==1: - dx = array([dx,dx]) - - W = tmp[:,None]#transpose(multiply(tmp,ones((s[1],tmp.size)))) - data_deriv[:,:,0] = convolve(data,W,mode='same')/dx[0]*-1.0 - for i in range(s[1]): - data_deriv[0:N-1,i,0] = old_div(deriv(data[0:N-1,i]),dx[0]) - data_deriv[s[0]-N:s[0]+1,i,0] = old_div(deriv(data[s[0]-N:s[0]+1,i]),dx[0]) - - W = transpose(tmp[:,None])#multiply(tmp,ones((s[0],tmp.size))) - data_deriv[:,:,1] = convolve(data,W,mode='same')/dx[1]*-1.0 - for i in range(s[0]): - data_deriv[i,0:N-1,1] = old_div(deriv(data[i,0:N-1]),dx[1]) - data_deriv[i,s[1]-N:s[1]+1,1] = old_div(deriv(data[i,s[1]-N:s[1]+1]),dx[1]) - - return data_deriv - -def integrate(var, periodic=False): - """Integrate a 1D array - - Return array is the same size as the input - """ - if periodic: - # Use FFT - f = rfft(var) - n = var.size - # Zero frequency term - result = f[0].real*arange(n, dtype=float) - f[0] = 0. - if n % 2 == 0: - # Even n - for i in arange(1,old_div(n,2)): - f[i] /= 2.0j * pi * float(i)/float(n) - f[-1] = 0.0 # Nothing from Nyquist frequency - else: - # Odd n - for i in arange(1,old_div((n-1),2) + 1): - f[i] /= 2.0j * pi * float(i)/float(n) - return result + irfft(f) - else: - # Non-periodic function - def int_total(f): - """Integrate over a set of points""" - n = f.size - if n > 7: - # Need to split into several segments - # Use one 5-point, leaving at least 4-points - return int_total(f[0:5]) + int_total(f[4:]) - elif (n == 7) or (n == 6): - # Try to keep 4th-order - # Split into 4+4 or 4+3 - return int_total(f[0:4]) + int_total(f[3:]) - elif n == 5: - # 6th-order Bool's rule - return 4.*(7.*f[0] + 32.*f[1] + 12.*f[2] + 32.*f[3] + 7.*f[4])/90. - elif n == 4: - # 4th-order Simpson's 3/8ths rule - return 3.*(f[0] + 3.*f[1] + 3.*f[2] + f[3])/8. - elif n == 3: - # 4th-order Simpson's rule - return (f[0] + 4.*f[1] + f[2])/3. - elif n == 2: - # 2nd-order Trapezium rule - return 0.5*(f[0] + f[1]) - else: - print("WARNING: Integrating a single point") - return 0.0 - # Integrate using maximum number of grid-points - n = var.size - n2 = int(old_div(n,2)) - result = zeros(n) - for i in arange(n2, n): - result[i] = int_total(var[0:(i+1)]) - for i in arange(1, n2): - result[i] = result[-1] - int_total(var[i:]) - return result - -def simpson_integrate(data,dx,dy,kernel=0.0,weight=1.0): - """ Integrates 2D data to one value using the simpson method and matrix convolution - - result = simpson_integrate(data,dx,dy) - - keywords: - - kernel - can be supplied if the simpson matrix is calculated ahead of time - - if not supplied, is calculated within this function - - if you need to integrate the same shape data over and over, calculated - it ahead of time using: - kernel = simpson_matrix(Nx,Ny,dx,dy) - - weight - can be used to scale data if single number - - can be used to mask data if weight is array (same size as data) - """ - s = data.shape - Nx = s[0] - Ny = s[1] - - if len(kernel)==1: - kernel = simpson_matrix(Nx,Ny,dx,dy) - - return sum(multiply(multiply(weight,kernel),data))/sum(multiply(weight,kernel)) - - -def simpson_matrix(Nx,Ny,dx,dy): - """ - Creates a 2D matrix of coefficients for the simpson_integrate function - - Call ahead of time if you need to perform integration of the same size data with the - same dx and dy - - Otherwise, simpson_integrate will automatically call this - - """ - Wx = arange(Nx) + 2 - Wx[where(arange(Nx) % 2 == 1)] = 4 - Wx[0] = 1 - Wx[Nx-1] = 1 - - Wy = arange(Ny) + 2 - Wy[where(arange(Ny) % 2 == 1)] = 4 - Wy[0] = 1 - Wy[Ny-1] = 1 - - W = Wy[None,:] * Wx[:,None] - - A = dx*dy/9.0 - - return W*A diff --git a/tools/pylib/boututils/check_scaling.py b/tools/pylib/boututils/check_scaling.py deleted file mode 100644 index af59b0b786..0000000000 --- a/tools/pylib/boututils/check_scaling.py +++ /dev/null @@ -1,90 +0,0 @@ -"""Functions for checking the error scaling of MMS or MES results - -""" - -from numpy import array, isclose, log, polyfit - - -def get_order(grid_spacing, errors): - """Get the convergence order of errors over the full range of - grid_spacing, and at small spacings - - Parameters - ---------- - grid_spacing : list of float - The grid spacing or inverse of number of grid points - errors : list of float - The error at each grid spacing - - Returns - ------- - tuple of float - The first value is the error scaling over the full range of - grid spacings; the second value is the scaling over the last - two points - - """ - if len(errors) != len(grid_spacing): - raise ValueError("errors (len: {}) and grid_spacing (len: {}) should be the same length" - .format(len(errors), len(grid_spacing))) - - full_range = polyfit(log(grid_spacing), log(errors), 1) - - small_spacing = log(errors[-2] / errors[-1]) / log(grid_spacing[-2] / grid_spacing[-1]) - - return (full_range[0], small_spacing) - - -def check_order(error_list, expected_order, tolerance=2.e-1, spacing=None): - """Check if the actual_order is sufficiently close to the - expected_order within a given tolerance - - """ - - if len(error_list) < 2: - raise RuntimeError("Expected at least 2 data points to calculate error") - - success = True - - for i in range(len(error_list)-1): - grid_spacing = 2 if spacing is None else spacing[i] / spacing[i+1] - actual_order = log(error_list[i] / error_list[i+1]) / log(grid_spacing) - - if not isclose(actual_order, expected_order, atol=tolerance, rtol=0): - success = False - return success - - -def error_rate_table(errors, grid_sizes, label): - """Create a nicely formatted table of the error convergence rate over - the grid_sizes - - The error rate is calculated between adjacent points - - Parameters - ---------- - errors : list of float - The errors at each grid size - grid_sizes : list of int - The number of grid points - label : string - What the error is measuring - - Returns - ------- - string - - """ - if len(errors) != len(grid_sizes): - raise ValueError("errors (len: {}) and grid_sizes (len: {}) should be the same length" - .format(len(errors), len(grid_sizes))) - - dx = 1. / array(grid_sizes) - message = "{}:\nGrid points | Error | Rate\n".format(label) - for i, grid_size in enumerate(grid_sizes): - message += "{:<11} | {:f} | ".format(grid_size, errors[i]) - if i > 0: - message += "{:f} \n".format(log(errors[i] / errors[i-1]) / log(dx[i] / dx[i-1])) - else: - message += "--\n" - return message diff --git a/tools/pylib/boututils/closest_line.py b/tools/pylib/boututils/closest_line.py deleted file mode 100644 index 42dbbe047f..0000000000 --- a/tools/pylib/boututils/closest_line.py +++ /dev/null @@ -1,14 +0,0 @@ -from builtins import range -import numpy -# Find the closest contour line to a given point -def closest_line(n, x, y, ri, zi, mind=None): - - mind = numpy.min( (x[0] - ri)**2 + (y[0] - zi)**2 ) - ind = 0 - - for i in range (1, n) : - d = numpy.min( (x[i] - ri)**2 + (y[i] - zi)**2 ) - if d < mind : - mind = d - ind = i - return ind diff --git a/tools/pylib/boututils/contour.py b/tools/pylib/boututils/contour.py deleted file mode 100644 index 1d3fc97e22..0000000000 --- a/tools/pylib/boututils/contour.py +++ /dev/null @@ -1,80 +0,0 @@ -""" -Contour calculation routines - -https://web.archive.org/web/20140901225541/https://members.bellatlantic.net/~vze2vrva/thesis.html""" -from __future__ import print_function -from __future__ import division -from past.utils import old_div - -import numpy as np - - -def contour(f, level): - """Return a list of contours matching the given level""" - - if len(f.shape) != 2: - print("Contour only works on 2D data") - return None - nx,ny = f.shape - - # Go through each cell edge and mark which ones contain - # a level crossing. Approximating function as - # f = axy + bx + cy + d - # Hence linear interpolation along edges. - - edgecross = {} # Dictionary: (cell number, edge number) to crossing location - - for i in np.arange(nx-1): - for j in np.arange(ny-1): - # Lower-left corner of cell is (i,j) - if (np.max(f[i:(i+2),j:(j+2)]) < level) or (np.min(f[i:(i+2),j:(j+2)]) > level): - # not in the correct range - skip - continue - - # Check each edge - ncross = 0 - def location(a, b): - if (a > level) ^ (a > level): - # One of the corners is > level, and the other is <= level - ncross += 1 - # Find location - return old_div((level - a), (b - a)) - else: - return None - - loc = [ - location(f[i,j], f[i+1,j]), - location(f[i+1,j], f[i+1,j+1]), - location(f[i+1,j+1], f[i,j+1]), - location(f[i,j+1], f[i,j])] - - if ncross != 0: # Only put into dictionary if has a crossing - cellnr = (ny-1)*i + j # The cell number - edgecross[cellnr] = [loc,ncross] # Tack ncross onto the end - - # Process crossings into contour lines - - while True: - # Start from an arbitrary location and follow until - # it goes out of the domain or closes - try: - startcell, cross = edgecross.popitem() - except KeyError: - # No keys left so finished - break - - def follow(): - return - - # Follow - - return - -def find_opoints(var2d): - """Find O-points in psi i.e. local minima/maxima""" - return - -def find_xpoints(var2d): - """Find X-points in psi i.e. inflection points""" - return - diff --git a/tools/pylib/boututils/crosslines.py b/tools/pylib/boututils/crosslines.py deleted file mode 120000 index fa0acafd36..0000000000 --- a/tools/pylib/boututils/crosslines.py +++ /dev/null @@ -1 +0,0 @@ -../../tokamak_grids/pyGridGen/crosslines.py \ No newline at end of file diff --git a/tools/pylib/boututils/datafile.py b/tools/pylib/boututils/datafile.py deleted file mode 100644 index 00bb987706..0000000000 --- a/tools/pylib/boututils/datafile.py +++ /dev/null @@ -1,955 +0,0 @@ -"""File I/O class - -A wrapper around various NetCDF libraries and h5py, used by BOUT++ -routines. Creates a consistent interface across machines - -Supported libraries: - -- ``h5py`` (for HDF5 files) -- ``netCDF4`` (preferred NetCDF library) - -NOTE ----- -NetCDF and HDF5 include unlimited dimensions, but this library is just -for very simple I/O operations. Educated guesses are made for the -dimensions. - -TODO ----- -- Don't raise ``ImportError`` if no NetCDF libraries found, use HDF5 - instead? -- Cleaner handling of different NetCDF libraries -- Support for h5netcdf? - -""" - -from __future__ import print_function -from builtins import map, zip, str, object - -import numpy as np -import time -import getpass -from boututils.boutwarnings import alwayswarn -from boututils.boutarray import BoutArray - -try: - from netCDF4 import Dataset - has_netCDF = True -except ImportError: - raise ImportError( - "DataFile: No supported NetCDF modules available -- requires netCDF4") - -try: - import h5py - has_h5py = True -except ImportError: - has_h5py = False - - -class DataFile(object): - """File I/O class - - A wrapper around various NetCDF libraries and h5py, used by BOUT++ - routines. Creates a consistent interface across machines - - Parameters - ---------- - filename : str, optional - Name of file to open. If no filename supplied, you will need - to call :py:obj:`~DataFile.open` and supply `filename` there - write : bool, optional - If True, open the file in read-write mode (existing files will - be appended to). Default is read-only mode - create : bool, optional - If True, open the file in write mode (existing files will be - truncated). Default is read-only mode - format : str, optional - Name of a filetype to use (e.g. ``NETCDF3_CLASSIC``, - ``NETCDF3_64BIT``, ``NETCDF4``, ``HDF5``) - - TODO - ---- - - `filename` should not be optional! - - Take a ``mode`` argument to be more in line with other file types - - `format` should be checked to be a sensible value - - Make sure ``__init__`` methods are first - - Make `impl` and `handle` private - - """ - impl = None - - def __init__(self, filename=None, write=False, create=False, format='NETCDF3_64BIT', **kwargs): - """ - - NetCDF formats are described here: https://unidata.github.io/netcdf4-python/ - - NETCDF3_CLASSIC Limited to 2.1Gb files - - NETCDF3_64BIT_OFFSET or NETCDF3_64BIT is an extension to allow larger file sizes - - NETCDF3_64BIT_DATA adds 64-bit integer data types and 64-bit dimension sizes - - NETCDF4 and NETCDF4_CLASSIC use HDF5 as the disk format - """ - if filename is not None: - if filename.split('.')[-1] in ('hdf5', 'hdf', 'h5'): - self.impl = DataFile_HDF5( - filename=filename, write=write, create=create, format=format) - else: - self.impl = DataFile_netCDF( - filename=filename, write=write, create=create, format=format, **kwargs) - elif format == 'HDF5': - self.impl = DataFile_HDF5( - filename=filename, write=write, create=create, - format=format) - else: - self.impl = DataFile_netCDF( - filename=filename, write=write, create=create, format=format, **kwargs) - - def open(self, filename, write=False, create=False, - format='NETCDF3_CLASSIC'): - """Open the file - - Parameters - ---------- - filename : str, optional - Name of file to open - write : bool, optional - If True, open the file in read-write mode (existing files will - be appended to). Default is read-only mode - create : bool, optional - If True, open the file in write mode (existing files will be - truncated). Default is read-only mode - format : str, optional - Name of a filetype to use (e.g. ``NETCDF3_CLASSIC``, - ``NETCDF4``, ``HDF5``) - - TODO - ---- - - Return the result of calling open to be more like stdlib's - open - - `keys` should be more pythonic (return generator) - - """ - self.impl.open(filename, write=write, create=create, - format=format) - - def close(self): - """Close a file and flush data to disk - - """ - self.impl.close() - - def __del__(self): - if self.impl is not None: - self.impl.__del__() - - def __enter__(self): - self.impl.__enter__() - return self - - def __exit__(self, type, value, traceback): - self.impl.__exit__(type, value, traceback) - - def read(self, name, ranges=None, asBoutArray=True): - """Read a variable from the file - - Parameters - ---------- - name : str - Name of the variable to read - ranges : list of slice objects, optional - Slices of variable to read, can also be converted from lists or - tuples of (start, stop, stride). The number of elements in `ranges` - should be equal to the number of dimensions of the variable you - wish to read. See :py:obj:`~DataFile.size` for how to get the - dimensions - asBoutArray : bool, optional - If True, return the variable as a - :py:obj:`~boututils.boutarray.BoutArray` (the default) - - Returns - ------- - ndarray or :py:obj:`~boututils.boutarray.BoutArray` - The variable from the file - (:py:obj:`~boututils.boutarray.BoutArray` if `asBoutArray` - is True) - - """ - if ranges is not None: - for x in ranges: - if isinstance(x, (list, tuple)): - x = slice(*x) - return self.impl.read(name, ranges=ranges, asBoutArray=asBoutArray) - - def list(self): - """List all variables in the file - - Returns - ------- - list of str - A list containing all the names of the variables - - """ - return self.impl.list() - - def keys(self): - """A synonym for :py:obj:`~DataFile.list` - - TODO - ---- - - Make a generator to be more like python3 dict keys - - """ - return self.list() - - def dimensions(self, varname): - """Return the names of all the dimensions of a variable - - Parameters - ---------- - varname : str - The name of the variable - - Returns - ------- - tuple of str - The names of the variable's dimensions - - """ - return self.impl.dimensions(varname) - - def ndims(self, varname): - """Return the number of dimensions for a variable - - Parameters - ---------- - varname : str - The name of the variable - - Returns - ------- - int - The number of dimensions - - """ - return self.impl.ndims(varname) - - def sync(self): - """Write pending changes to disk. - - """ - self.impl.sync() - - def size(self, varname): - """Return the size of each dimension of a variable - - Parameters - ---------- - varname : str - The name of the variable - - Returns - ------- - tuple of int - The size of each dimension - - """ - return self.impl.size(varname) - - def bout_type(self, varname): - """Return the name of the BOUT++ type of a variable - - Possible values are: - - - scalar - - Field2D - - Field3D - - If the variable is an evolving variable (i.e. has a time - dimension), then it is appended with a "_t" - - Parameters - ---------- - varname : str - The name of the variable - - Returns - ------- - str - The name of the BOUT++ type - - """ - return self.attributes(varname)["bout_type"] - - def write(self, name, data, info=False): - """Write a variable to file - - If the variable is not a :py:obj:`~boututils.boutarray.BoutArray` with - the ``bout_type`` attribute, a guess will be made for the - dimensions - - Parameters - ---------- - name : str - Name of the variable to use in the file - data : :py:obj:`~boututils.boutarray.BoutArray` or ndarray - An array containing the variable data - info : bool, optional - If True, print information about what is being written to - file - - Returns - ------- - None - - """ - return self.impl.write(name, data, info) - - def __getitem__(self, name): - return self.impl.__getitem__(name) - - def __setitem__(self, key, value): - self.impl.__setitem__(key, value) - - def attributes(self, varname): - """Return a dictionary of attributes - - Parameters - ---------- - varname : str - The name of the variable - - Returns - ------- - dict - The attribute names and their values - - """ - return self.impl.attributes(varname) - - -class DataFile_netCDF(DataFile): - handle = None - - def open(self, filename, write=False, create=False, - format='NETCDF3_CLASSIC'): - if (not write) and (not create): - self.handle = Dataset(filename, "r") - elif create: - self.handle = Dataset(filename, "w", format=format) - else: - self.handle = Dataset(filename, "a") - # Record if writing - self.writeable = write or create - - def close(self): - if self.handle is not None: - self.handle.close() - self.handle = None - - def __init__(self, filename=None, write=False, create=False, - format='NETCDF3_CLASSIC', **kwargs): - self._kwargs = kwargs - if not has_netCDF: - message = "DataFile: No supported NetCDF python-modules available" - raise ImportError(message) - if filename is not None: - self.open(filename, write=write, create=create, format=format) - self._attributes_cache = {} - - def __del__(self): - self.close() - - def __enter__(self): - return self - - def __exit__(self, type, value, traceback): - self.close() - - def read(self, name, ranges=None, asBoutArray=True): - """Read a variable from the file.""" - if self.handle is None: - return None - - try: - var = self.handle.variables[name] - n = name - except KeyError: - # Not found. Try to find using case-insensitive search - var = None - for n in list(self.handle.variables.keys()): - if n.lower() == name.lower(): - print( - "WARNING: Reading '" + n + "' instead of '" + name + "'") - var = self.handle.variables[n] - if var is None: - return None - - if asBoutArray: - attributes = self.attributes(n) - - ndims = len(var.dimensions) - if ndims == 0: - data = var.getValue() - if asBoutArray: - data = BoutArray(data, attributes=attributes) - return data # [0] - else: - if ranges: - if len(ranges) == 2 * ndims: - # Reform list of pairs of ints into slices - ranges = [slice(a, b) for a, b in - zip(ranges[::2], ranges[1::2])] - elif len(ranges) != ndims: - raise ValueError("Incorrect number of elements in ranges argument " - "(got {}, expected {} or {})" - .format(len(ranges), ndims, 2 * ndims)) - - data = var[ranges[:ndims]] - if asBoutArray: - data = BoutArray(data, attributes=attributes) - return data - else: - data = var[:] - if asBoutArray: - data = BoutArray(data, attributes=attributes) - return data - - def __getitem__(self, name): - var = self.read(name) - if var is None: - raise KeyError("No variable found: " + name) - return var - - def __setitem__(self, key, value): - self.write(key, value) - - def list(self): - if self.handle is None: - return [] - return list(self.handle.variables.keys()) - - def keys(self): - return self.list() - - def dimensions(self, varname): - if self.handle is None: - return None - try: - var = self.handle.variables[varname] - except KeyError: - raise ValueError("No such variable") - return var.dimensions - - def ndims(self, varname): - if self.handle is None: - raise ValueError("File not open") - try: - var = self.handle.variables[varname] - except KeyError: - raise ValueError("No such variable") - return len(var.dimensions) - - def sync(self): - self.handle.sync() - - def size(self, varname): - if self.handle is None: - return [] - try: - var = self.handle.variables[varname] - except KeyError: - return [] - - def dimlen(d): - dim = self.handle.dimensions[d] - if dim is not None: - t = type(dim).__name__ - if t == 'int': - return dim - return len(dim) - return 0 - return [dimlen(d) for d in var.dimensions] - - def _bout_type_from_dimensions(self, varname): - dims = self.dimensions(varname) - dims_dict = { - ('t', 'x', 'y', 'z'): "Field3D_t", - ('t', 'x', 'y'): "Field2D_t", - ('t', 'x', 'z'): "FieldPerp_t", - ('t',): "scalar_t", - ('x', 'y', 'z'): "Field3D", - ('x', 'y'): "Field2D", - ('x', 'z'): "FieldPerp", - ('x',): "ArrayX", - (): "scalar", - } - - return dims_dict.get(dims, None) - - def _bout_dimensions_from_type(self, bout_type): - dims_dict = { - "Field3D_t": ('t', 'x', 'y', 'z'), - "Field2D_t": ('t', 'x', 'y'), - "FieldPerp_t": ('t', 'x', 'z'), - "scalar_t": ('t',), - "Field3D": ('x', 'y', 'z'), - "Field2D": ('x', 'y'), - "FieldPerp": ('x', 'z'), - "ArrayX": ('x',), - "scalar": (), - } - - return dims_dict.get(bout_type, None) - - def write(self, name, data, info=False): - - if not self.writeable: - raise Exception("File not writeable. Open with write=True keyword") - - s = np.shape(data) - - # Get the variable type - t = type(data).__name__ - - if t == 'NoneType': - print("DataFile: None passed as data to write. Ignoring") - return - - if t == 'ndarray' or t == 'BoutArray': - # Numpy type or BoutArray wrapper for Numpy type. Get the data type - t = data.dtype.str - - if t == 'list': - # List -> convert to numpy array - data = np.array(data) - t = data.dtype.str - - if (t == 'int') or (t == ' -# -# * Modified to allow calls with only one argument -# - -def int_func( xin, fin=None, simple=None): - if fin is None : - f = copy.deepcopy(xin) - x = numpy.arange(numpy.size(f)).astype(float) - else: - f = copy.deepcopy(fin) - x = copy.deepcopy(xin) - - n = numpy.size(f) - - g = numpy.zeros(n) - - if simple is not None : - # Just use trapezium rule - - g[0] = 0.0 - for i in range (1, n) : - g[i] = g[i-1] + 0.5*(x[i] - x[i-1])*(f[i] + f[i-1]) - - else: - - n2 = numpy.int(old_div(n,2)) - - g[0] = 0.0 - for i in range (n2, n) : - g[i] = simps( f[0:i+1], x[0:i+1]) - - - - for i in range (1, n2) : - g[i] = g[n-1] - simps( f[i::], x[i::]) - - return g - - diff --git a/tools/pylib/boututils/linear_regression.py b/tools/pylib/boututils/linear_regression.py deleted file mode 100644 index c2f3f2cc39..0000000000 --- a/tools/pylib/boututils/linear_regression.py +++ /dev/null @@ -1,27 +0,0 @@ -from __future__ import division -# -# Perform a linear regression fit -# - -from numpy import mean - -def linear_regression(x, y): - """ Simple linear regression of two variables - - y = a + bx - - a, b = linear_regression(x, y) - - """ - - if x.size != y.size: - raise ValueError("x and y inputs must be the same size") - - mx = mean(x) - my = mean(y) - - b = (mean(x*y) - mx*my) / (mean(x**2) - mx**2) - a = my - b*mx - - return a, b - diff --git a/tools/pylib/boututils/local_min_max.py b/tools/pylib/boututils/local_min_max.py deleted file mode 120000 index 6ac6b0819e..0000000000 --- a/tools/pylib/boututils/local_min_max.py +++ /dev/null @@ -1 +0,0 @@ -../../tokamak_grids/pyGridGen/local_min_max.py \ No newline at end of file diff --git a/tools/pylib/boututils/mode_structure.py b/tools/pylib/boututils/mode_structure.py deleted file mode 100644 index 1196c823c6..0000000000 --- a/tools/pylib/boututils/mode_structure.py +++ /dev/null @@ -1,417 +0,0 @@ -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division -from builtins import range -from past.utils import old_div -import numpy as numpy -import sys -from pylab import plot,xlabel,ylim,savefig,gca, xlim, show, clf, draw, title -from boututils.fft_integrate import fft_integrate -from .ask import query_yes_no - -#; Calculates mode structure from BOUT++ output -#; for comparison to ELITE -#; -#; April 2009 - Added ERGOS flag. This is intended -#; for producing plots similar to the ERGOS -#; vacuum RMP code - -# interpolates a 1D periodic function -def zinterp( v, zind): - - v = numpy.ravel(v) - - nz = numpy.size(v) - z0 = numpy.round(zind) - - p = zind - float(z0) # between -0.5 and 0.5 - - if p < 0.0 : - z0 = z0 - 1 - p = p + 1.0 - - - z0 = ((z0 % (nz-1)) + (nz-1)) % (nz-1) - - # for now 3-point interpolation - - zp = (z0 + 1) % (nz - 1) - zm = (z0 - 1 + (nz-1)) % (nz - 1) - - - result = 0.5*p*(p-1.0)*v[zm.astype(int)] \ - + (1.0 - p*p)*v[z0.astype(int)] \ - + 0.5*p*(p+1.0)*v[zp.astype(int)] - - return result - - -def mode_structure( var_in, grid_in, period=1, - zangle=0.0, n=None, addq=None, output=None, - xq=None, xpsi=None, slow=None, subset=None, - filter=None, famp=None, quiet=None, - ergos=None, ftitle=None, - xrange=None, yrange=None, rational=None, pmodes=None, - _extra=None): - - - #ON_ERROR, 2 - # - # period = 1 ; default = full torus - - if n is None : - if filter is not None : - n = filter*period - else: n = period - - - # if (grid_in.JYSEPS1_1 GE 0) OR (grid_in.JYSEPS1_2 NE grid_in.JYSEPS2_1) OR (grid_in.JYSEPS2_2 NE grid_in.ny-1) THEN BEGIN - # PRINT, "Mesh contains branch-cuts. Keeping only core" - # - # grid = core_mesh(grid_in) - # var = core_mesh(var_in, grid_in) - #ENDIF ELSE BEGIN - grid = grid_in - vr = var_in - #ENDELSE - - - #IF KEYWORD_SET(filter) THEN BEGIN - # var = zfilter(var, filter) - #ENDIF - - nx = grid.get('nx') - ny = grid.get('ny') - - s = numpy.shape(vr) - if numpy.size(s) != 3 : - print("Error: Variable must be 3 dimensional") - return - - if (s[0] != nx) or (s[1] != ny) : - print("Error: Size of variable doesn't match grid") - - return - - nz = s[2] - - dz = 2.0*numpy.pi / numpy.float(period*(nz-1)) - - # GET THE TOROIDAL SHIFT - tn = list(grid.keys()) - tn = numpy.char.upper(tn) - count = numpy.where(tn == "QINTY") - if numpy.size(count) > 0 : - print("Using qinty as toroidal shift angle") - zShift = grid.get('qinty') - else: - count = numpy.where(tn == "ZSHIFT") - if numpy.size(count) > 0 : - print("Using zShift as toroidal shift angle") - zShift = grid.get('zShift') - else: - print("ERROR: Can't find qinty or zShift variable") - return - - zshift=grid.get('zShift') - - rxy=grid.get('Rxy') - zxy=grid.get('Zxy') - Btxy=grid.get('Btxy') - Bpxy=grid.get('Bpxy') - shiftangle=grid.get('ShiftAngle') - psixy=grid.get('psixy') - psi_axis=grid.get('psi_axis') - psi_bndry=grid.get('psi_bndry') - - np = 4*ny - - nf = old_div((np - 2), 2) - famp = numpy.zeros((nx, nf)) - - for x in range (nx): - #;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - # transform data into fixed poloidal angle - - # get number of poloidal points - nskip = numpy.zeros(ny-1) - for y in range (ny-1): - yp = y + 1 - nskip[y] = old_div(numpy.abs(zshift[x,yp] - zshift[x,y]), dz) - 1 - - - nskip =numpy.int_(numpy.round(nskip)) - nskip=numpy.where(nskip > 0, nskip, 0) - - - ny2 = numpy.int_(ny + numpy.sum(nskip)) # number of poloidal points - - # IF NOT KEYWORD_SET(quiet) THEN PRINT, x, ny2 - - f = numpy.zeros(ny2) # array for values - R = numpy.zeros(ny2) # Rxy - Z = numpy.zeros(ny2) # Zxy - BtBp = numpy.zeros(ny2) # Bt / Bp - - # interpolate values onto points - - ypos = 0 - for y in range(ny-1): - # original points - zind = old_div((zangle - zshift[x,y]),dz) - - - if numpy.size(zind) != 1 : sys.exit() - f[ypos] = zinterp(vr[x,y,:], zind) - R[ypos] = rxy[x,y] - Z[ypos] = zxy[x,y] - BtBp[ypos] = old_div(Btxy[x,y], Bpxy[x,y]) - - ypos = ypos + 1 - - # add the extra points - - zi0 = old_div((zangle - zshift[x,y]),dz) - zip1 = old_div((zangle - zshift[x,y+1]),dz) - - dzi = old_div((zip1 - zi0), (nskip[y] + 1)) - - for i in range (nskip[y]): - zi = zi0 + numpy.float(i+1)*dzi # zindex - w = old_div(numpy.float(i+1),numpy.float(nskip[y]+1)) # weighting - - f[ypos+i] = w*zinterp(vr[x,y+1,:], zi) + (1.0-w)*zinterp(vr[x,y,:], zi) - - R[ypos+i] = w*rxy[x,y+1] + (1.0-w)*rxy[x,y] - Z[ypos+i] = w*zxy[x,y+1] + (1.0-w)*zxy[x,y] - BtBp[ypos+i] = old_div((w*Btxy[x,y+1] + (1.0-w)*Btxy[x,y]), (w*Bpxy[x,y+1] + (1.0-w)*Bpxy[x,y])) - - ypos = ypos + nskip[y] - - # final point - - zind = old_div((zangle - zShift[x,ny-1]),dz) - - f[ypos] = zinterp(vr[x,ny-1,:], zind) - R[ypos] = rxy[x,ny-1] - Z[ypos] = zxy[x,ny-1] - BtBp[ypos] = old_div(Btxy[x,ny-1], Bpxy[x,ny-1]) - - - #STOP - - #;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - #; calculate poloidal angle - - - drxy = numpy.gradient(R) - dzxy = numpy.gradient(Z) - dl = numpy.sqrt(drxy*drxy + dzxy*dzxy) - - nu = dl * BtBp / R # field-line pitch - theta = old_div(numpy.real(fft_integrate(nu)), shiftangle[x]) - - if numpy.max(theta) > 1.0 : - # mis-match between q and nu (integration error?) - if quiet is None : print("Mismatch ", x, numpy.max(theta)) - theta = old_div(theta, (numpy.max(theta) + numpy.abs(theta[1] - theta[0]))) - - - theta = 2.0*numpy.pi * theta - - #;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; - #; take Fourier transform in theta angle - - tarr = 2.0*numpy.pi*numpy.arange(np) / numpy.float(np) # regular array in theta - - farr = numpy.interp(tarr, theta, f) - - #STOP - - ff = old_div(numpy.fft.fft(farr),numpy.size(farr)) - - for i in range (nf): - famp[x, i] = 2.0*numpy.abs(ff[i+1]) - - - - - # sort modes by maximum size - - fmax = numpy.zeros(nf) - for i in range(nf): - fmax[i] = numpy.max(famp[:,i]) - - - inds = numpy.argsort(fmax)[::-1] - - - if pmodes is None : pmodes = 10 - - qprof = old_div(numpy.abs(shiftangle), (2.0*numpy.pi)) - - xarr = numpy.arange(nx) - xtitle="Radial index" - if xq is not None : - # show as a function of q*n - xarr = qprof*numpy.float(n) - - xtitle="q * n" - elif xpsi is not None : - # show as a function of psi. Should be normalised psi - xarr = psixy[:,0] - - # Check if the grid includes psi axis and boundary - count1 = numpy.where(tn == "PSI_AXIS") - count2 = numpy.where(tn == "PSI_BNDRY") - - if (numpy.size(count1) > 0) and (numpy.size(count2) > 0) : - xarr = old_div((xarr - psi_axis), (psi_bndry - psi_axis)) - - else: - # Use hard-wired values - print("WARNING: Using hard-wired psi normalisation") - # for circular case - #xarr = (xarr + 0.1937) / (0.25044 + 0.1937) - # for ellipse case - #xarr = xarr / 0.74156 - - # cbm18_dens8 - xarr = old_div((xarr + 0.854856), (0.854856 + 0.0760856)) - - - xtitle="Psi normalised" - - - - if slow is not None : - # plot modes slowly for examination - #safe_colors, /first -# ax = fig.add_subplot(111) - # go through and plot each mode - for i in range(nf): - if numpy.max(famp[:,i]) > 0.05*numpy.max(famp): - print("Mode m = ", i+1, " of ", nf) - plot(xarr, famp[:,i], 'k') - ylim(0,numpy.max(famp)) - xlim(xrange) - xlabel(xtitle) - show(block=False) - - q = old_div(numpy.float(i+1), numpy.float(n)) - - pos = numpy.interp(q, qprof, xarr) - - plot( [pos, pos],[0, 2.*numpy.max(fmax)], 'k--') - draw() - - ans=query_yes_no('next mode') - if ans: - clf() - - - - elif ergos is not None : - # ERGOS - style output - - if output is not None and slow is None : - savefig('output.png') - - -# -# contour2, famp, xarr, indgen(nf)+1, $ -# xlabel=xtitle, xrange=xrange, yrange=yrange, _extra=_extra -# -# ; overplot the q profile -# -# oplot, xarr, qprof * n, color=1, thick=2 -# -# IF KEYWORD_SET(rational) THEN BEGIN -# maxm = FIX(MAX(qprof)) * n -# -# qreson = (FINDGEN(maxm)+1) / FLOAT(n) -# -# ; get x location for each of these resonances -# qloc = INTERPOL(xarr, qprof, qreson) -# -# oplot, qloc, findgen(maxm)+1., psym=4, color=1 -# ENDIF -# -# IF KEYWORD_SET(output) THEN BEGIN -# ; output data to save file -# SAVE, xarr, qprof, famp, file=output+".idl" -# -# DEVICE, /close -# SET_PLOT, 'X' -# ENDIF - - else: - if output is not None and slow is None : - savefig('output.png') - # savefig('output.ps') - - # - # - if subset is not None : - - # get number of modes larger than 5% of the maximum - count = numpy.size(numpy.where(fmax > 0.10*numpy.max(fmax))) - - minind = numpy.min(inds[0:count]) - maxind = numpy.max(inds[0:count]) - - print("Mode number range: ", minind, maxind) - - plot( xarr, famp[:,0], 'k', visible=False) - ylim(0,numpy.max(famp)) - xlabel(xtitle) - xlim(xrange) - title(ftitle) - - gca().set_color_cycle(['red', 'red', 'black', 'black']) - - for i in range(minind, maxind+1, subset): - plot( xarr, famp[:,i]) - - q = old_div(numpy.float(i+1), numpy.float(n)) - pos = numpy.interp(q, qprof, xarr) - - plot( [pos, pos], [0, 2.*numpy.max(fmax)], '--') - - - # - else: - # default - just plot everything - gca().set_color_cycle(['black', 'red']) - - plot(xarr, famp[:,0]) - ylim(0,numpy.max(famp)) #, color=1, - xlabel(xtitle) #, chars=1.5, xrange=xrange,title=title, _extra=_extra - xlim(xrange) - for i in range (nf): - plot( xarr, famp[:,i]) - - - # - # IF KEYWORD_SET(addq) THEN BEGIN - # - # FOR i=0, pmodes-1 DO BEGIN - # PRINT, "m = "+STRTRIM(STRING(inds[i]+1), 2)+" amp = "+STRTRIM(STRING(fmax[inds[i]]),2) - # q = FLOAT(inds[i]+1) / FLOAT(n) - # - # pos = INTERPOL(xarr, qprof, q) - # - # oplot, [pos, pos], [0, 2.*MAX(fmax)], lines=2, color=1 - # ENDFOR - # ENDIF - # - # ENDELSE - # IF KEYWORD_SET(output) THEN BEGIN - # ; output data to save file - # SAVE, xarr, qprof, famp, file=output+".idl" - # - # DEVICE, /close - # SET_PLOT, 'X' - # ENDIF - # ENDELSE - # diff --git a/tools/pylib/boututils/moment_xyzt.py b/tools/pylib/boututils/moment_xyzt.py deleted file mode 100644 index 675d1f135b..0000000000 --- a/tools/pylib/boututils/moment_xyzt.py +++ /dev/null @@ -1,74 +0,0 @@ -from __future__ import print_function -from __future__ import division -from builtins import range -from past.utils import old_div -import numpy as np -from boututils.bunch import Bunch - -def RMSvalue( vec1d): -#; -#; -get rms of a 1D signal -#;------------------------ - - nel=np.size(vec1d) - valav=old_div(np.sum(vec1d),nel) - valrms=np.sqrt(old_div(np.sum((vec1d-valav)**2),nel)) - acvec=vec1d-valav - - return Bunch(valrms=valrms, - valav=valav, - acvec=acvec) - - - -def moment_xyzt( sig_xyzt, *args):#rms=None, dc=None, ac=None): -#; -#; Calculate moments of a 4d signal of (x,y,z,t), i.e, -#; -RMS, i.e., a function of (x,y,t) -#; -DC (average in z), i.e., a function of (x,y,t) -#; -AC (DC subtracted out), i.e., a function of (x,y,z,t) -#;------------------------------------------------------------------- - - d = np.shape(sig_xyzt) - if np.size(d) != 4 : - print("Error: Variable must be 4D (x,y,z,t)") - return - - - siz=np.shape(sig_xyzt) - rms=np.zeros((siz[0],siz[1],siz[2])) - dc=np.zeros((siz[0],siz[1],siz[2])) - if 'AC' in args : ac=np.zeros((siz[0],siz[1],siz[2],siz[3])) - - - data = sig_xyzt - if np.modf(np.log2(siz[3]))[0] != 0.0 : - print("WARNING: Expecting a power of 2 in Z direction") - - if np.modf(np.log2(siz[3]-1))[0] and (siz[3] > 1) : - print(" -> Truncating last point to get power of 2") - data = data[:,:,0:(siz[3]-2),:] - siz[3] = siz[3] - 1 - - - for ix in range (siz[1]): - for iy in range (siz[2]): - for it in range (siz[0]): - val=RMSvalue(sig_xyzt[it,ix,iy,:]) - - rms[it,ix,iy]=val.valrms - dc[it,ix,iy]=val.valav - if 'AC' in args : ac[it,ix,iy,:]=[val.acvec,val.acvec[0]] - - res=Bunch() - - if 'RMS' in args: - res.rms = rms - if 'DC' in args: - res.dc = dc - if 'AC' in args: - res.ac = ac - - if 'RMS' not in args and 'DC' not in args and 'AC' not in args : - raise RuntimeError('Wrong argument') - return res diff --git a/tools/pylib/boututils/options.py b/tools/pylib/boututils/options.py deleted file mode 100644 index 2de3ebc3e0..0000000000 --- a/tools/pylib/boututils/options.py +++ /dev/null @@ -1,165 +0,0 @@ -"""Module to allow BOUT.inp files to be read into python and -manipulated with ease. - - -Nick Walkden, June 2015 -nick.walkden@ccfe.ac.uk - -""" - -from copy import copy -import os - - -class BOUTOptions(object): - """Class to store and interact with options from BOUT++ - - Parameters - ---------- - inp_path : str, optional - Path to BOUT++ options file - - Examples - -------- - - Instantiate with - - >>> myOpts = BOUTOptions() - >>> myOpts.read_inp('path/to/input/file') - - or - - >>> myOpts = BOUTOptions('path/to/input/file') - - To get a list of sections use - - >>> section_list = myOpts.list_sections - >>> # Also print to screen: - >>> section_list = myOpts.list_sections(verbose=True) - - Each section of the input is stored as a dictionary attribute so - that, if you want all the settings in the section [ddx]: - - >> ddx_opt_dict = myOpts.ddx - - and access individual settings by - - >>> ddx_setting = myOpts.ddx['first'] - - Any settings in BOUT.inp without a section are stored in - - >>> root_dict = myOpts.root - - TODO - ---- - - Merge this and BoutOptionsFile or replace with better class - - """ - - def __init__(self, inp_path=None): - - self._sections = ['root'] - - for section in self._sections: - super(BOUTOptions,self).__setattr__(section,{}) - - if inp_path is not None: - self.read_inp(inp_path) - - def read_inp(self, inp_path=''): - """Read a BOUT++ input file - - Parameters - ---------- - inp_path : str, optional - Path to the input file (default: current directory) - - """ - - try: - inpfile = open(os.path.join(inp_path, 'BOUT.inp'),'r') - except: - raise TypeError("ERROR: Could not read file "+\ - os.path.join(inp_path, "BOUT.inp")) - - current_section = 'root' - inplines = inpfile.read().splitlines() - # Close the file after use - inpfile.close() - for line in inplines: - #remove white space - line = line.replace(" ","") - - - if len(line) > 0 and line[0] is not '#': - #Only read lines that are not comments or blank - if '[' in line: - #Section header - section = line.split('[')[1].split(']')[0] - current_section = copy(section) - if current_section not in self._sections: - self.add_section(current_section) - - elif '=' in line: - #option setting - attribute = line.split('=')[0] - value = line.split('=')[1].split('#')[0] - value = value.replace("\n","") - value = value.replace("\t","") - value = value.replace("\r","") - value = value.replace("\"","") - self.__dict__[copy(current_section)][copy(attribute)] = copy(value) - else: - pass - - def add_section(self, section): - """Add a section to the options - - Parameters - ---------- - section : str - The name of a new section - - TODO - ---- - - Guard against wrong type - """ - self._sections.append(section) - super(BOUTOptions,self).__setattr__(section,{}) - - def remove_section(self, section): - """Remove a section from the options - - Parameters - ---------- - section : str - The name of a section to remove - - TODO - ---- - - Fix undefined variable - """ - if section in self._sections: - self._sections.pop(self._sections.index(sections)) - super(BOUTOptions,self).__delattr__(section) - else: - print("WARNING: Section "+section+" not found.\n") - - def list_sections(self, verbose=False): - """Return all the sections in the options - - Parameters - ---------- - verbose : bool, optional - If True, print sections to screen - - TODO - ---- - - Better pretty-print - """ - if verbose: - print("Sections Contained: \n") - for section in self._sections: - print("\t"+section+"\n") - - return self._sections diff --git a/tools/pylib/boututils/plotdata.py b/tools/pylib/boututils/plotdata.py deleted file mode 100644 index 72627f4033..0000000000 --- a/tools/pylib/boututils/plotdata.py +++ /dev/null @@ -1,90 +0,0 @@ -from __future__ import print_function -# Plot a data set - -import numpy as np -import matplotlib -import matplotlib.cm as cm -import matplotlib.mlab as mlab -import matplotlib.pyplot as plt - -matplotlib.rcParams['xtick.direction'] = 'out' -matplotlib.rcParams['ytick.direction'] = 'out' - -def plotdata(data, x=None, y=None, - title=None, xtitle=None, ytitle=None, - output=None, range=None, - fill=True, mono=False, colorbar=True, - xerr=None, yerr=None): - """Plot 1D or 2D data, with a variety of options.""" - - size = data.shape - ndims = len(size) - - if ndims == 1: - if (xerr is not None) or (yerr is not None): - # Points with error bars - if x is None: - x = np.arange(size) - errorbar(x, data, xerr, yerr) - # Line plot - if x is None: - plt.plot(data) - else: - plt.plot(x, data) - - elif ndims == 2: - # A contour plot - - if x is None: - x = np.arange(size[1]) - if y is None: - y = np.arange(size[0]) - - if fill: - #plt.contourf(data, colors=colors) - cmap=None - if mono: cmap = cm.gray - plt.imshow(data, interpolation='bilinear', cmap=cmap) - else: - colors = None - if mono: colors = 'k' - - plt.contour(x, y, data, colors=colors) - - # Add a color bar - if colorbar: - CB = plt.colorbar(shrink=0.8, extend='both') - - else: - print("Sorry, can't handle %d-D variables" % ndims) - return - - if title is not None: - plt.title(title) - if xtitle is not None: - plt.xlabel(xtitle) - if ytitle is not None: - plt.ylabel(ytitle) - - if output is not None: - # Write to a file - plt.savefig(output) - else: - # Plot to screen - plt.show() - -def test(): - """Test the plotdata routine.""" - # Generate and plot test data - - delta = 0.025 - x = np.arange(-3.0, 3.0, delta) - y = np.arange(-2.0, 2.0, delta) - X, Y = np.meshgrid(x, y) - Z1 = mlab.bivariate_normal(X, Y, 1.0, 1.0, 0.0, 0.0) - Z2 = mlab.bivariate_normal(X, Y, 1.5, 0.5, 1, 1) - # difference of Gaussians - Z = 10.0 * (Z2 - Z1) - - plotdata(Z, title="test data", fill=False, mono=False) - plotdata(Z, title="Fill in mono", fill=True, mono=True) diff --git a/tools/pylib/boututils/plotpolslice.py b/tools/pylib/boututils/plotpolslice.py deleted file mode 100644 index 924272fb85..0000000000 --- a/tools/pylib/boututils/plotpolslice.py +++ /dev/null @@ -1,143 +0,0 @@ -from __future__ import print_function -from __future__ import division -from builtins import str -from builtins import range -from past.utils import old_div -import numpy as np -from boututils.file_import import file_import -import sys - -if sys.version_info[0]>=3: - message = "polplotslice uses the VTK library through mayavi, which"+\ - " is currently only available in python 2" - raise ImportError(message) -else: - from mayavi import mlab - from tvtk.tools import visual - - - -def zinterp( v, zind): - #v = REFORM(v) - nz = np.size(v) - z0 = np.round(zind) - - p = zind - float(z0) # between -0.5 and 0.5 - - if p < 0.0 : - z0 = z0 - 1 - p = p + 1.0 - - - z0 = ((z0 % (nz-1)) + (nz-1)) % (nz-1) - - # for now 3-point interpolation - - zp = (z0 + 1) % (nz - 1) - zm = (z0 - 1 + (nz-1)) % (nz - 1) - - result = 0.5*p*(p-1.0)*v[zm] \ - + (1.0 - p*p)*v[z0] \ - + 0.5*p*(p+1.0)*v[zp] - - return result - - -def plotpolslice(var3d,gridfile,period=1,zangle=0.0, rz=1, fig=0): - """ data2d = plotpolslice(data3d, 'gridfile' , period=1, zangle=0.0, rz:return (r,z) grid also=1, fig: to do the graph, set to 1 ) """ - - g=file_import(gridfile) - - nx=var3d.shape[0] - ny=var3d.shape[1] - nz=var3d.shape[2] - - - zShift=g.get('zShift') - rxy=g.get('Rxy') - zxy=g.get('Zxy') - - dz = 2.0*np.pi / float(period*nz) - - ny2=ny - nskip=np.zeros(ny-1) - for i in range(ny-1): - ip=(i+1)%ny - nskip[i]=0 - for x in range(nx): - ns=old_div(np.max(np.abs(zShift[x,ip]-zShift[x,i])),dz)-1 - if ns > nskip[i] : nskip[i] = ns - - nskip = np.int_(np.round(nskip)) - ny2 = np.int_(ny2 + np.sum(nskip)) - - print("Number of poloidal points in output:" + str(ny2)) - - var2d = np.zeros((nx, ny2)) - r = np.zeros((nx, ny2)) - z = np.zeros((nx, ny2)) - - ypos = 0 - for y in range (ny-1) : - # put in the original points - for x in range (nx): - zind = old_div((zangle - zShift[x,y]),dz) - var2d[x,ypos] = zinterp(var3d[x,y,:], zind) - # IF KEYWORD_SET(profile) THEN var2d[x,ypos] = var2d[x,ypos] + profile[x,y] - r[x,ypos] = rxy[x,y] - z[x,ypos] = zxy[x,y] - - ypos = ypos + 1 - - print((y, ypos)) - - # and the extra points - - for x in range (nx): - zi0 = old_div((zangle - zShift[x,y]),dz) - zip1 = old_div((zangle - zShift[x,y+1]),dz) - - dzi = old_div((zip1 - zi0), (nskip[y] + 1)) - - for i in range (nskip[y]): - zi = zi0 + float(i+1)*dzi # zindex - w = old_div(float(i+1),float(nskip[y]+1)) # weighting - - var2d[x,ypos+i] = w*zinterp(var3d[x,y+1,:], zi) + (1.0-w)*zinterp(var3d[x,y,:], zi) - # IF KEYWORD_SET(profile) THEN var2d[x,ypos+i] = var2d[x,ypos+i] + w*profile[x,y+1] + (1.0-w)*profile[x,y] - r[x,ypos+i] = w*rxy[x,y+1] + (1.0-w)*rxy[x,y] - z[x,ypos+i] = w*zxy[x,y+1] + (1.0-w)*zxy[x,y] - - - - ypos = ypos + nskip[y] - - - # FINAL POINT - - for x in range(nx): - zind = old_div((zangle - zShift[x,ny-1]),dz) - var2d[x,ypos] = zinterp(var3d[x,ny-1,:], zind) - # IF KEYWORD_SET(profile) THEN var2d[x,ypos] = var2d[x,ypos] + profile[x,ny-1] - r[x,ypos] = rxy[x,ny-1] - z[x,ypos] = zxy[x,ny-1] - - - if(fig==1): - - f = mlab.figure(size=(600,600)) - # Tell visual to use this as the viewer. - visual.set_viewer(f) - - - s = mlab.mesh(r,z,var2d, colormap='PuOr')#, wrap_scale='true')#, representation='wireframe') - s.enable_contours=True - s.contour.filled_contours=True - mlab.view(0,0) - - else: - # return according to opt - if rz==1 : - return r,z,var2d - else: - return var2d diff --git a/tools/pylib/boututils/radial_grid.py b/tools/pylib/boututils/radial_grid.py deleted file mode 100644 index e0cf9c446c..0000000000 --- a/tools/pylib/boututils/radial_grid.py +++ /dev/null @@ -1,68 +0,0 @@ -from __future__ import division -from past.utils import old_div -import numpy -#;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;;; -# -# radial grid -# -# n - number of grid points -# pin, pout - range of psi -# seps - locations of separatrices -# sep_factor - separatrix peaking -# in_dp=in_dp - Fix the dx on the lower side -# out_dp=out_dp - Fix the dx on the upper side - -def radial_grid( n, pin, pout, include_in, include_out, seps, sep_factor, - in_dp=None, out_dp=None): - - if n == 1 : - return [0.5*(pin+pout)] - - - x = numpy.arange(0.,n) - m = numpy.float(n-1) - if include_in is None : - x = x + 0.5 - m = m + 0.5 - - - if include_out is None: - m = m + 0.5 - - x = old_div(x, m) - - - if in_dp is None and out_dp is None : - # Neither inner or outer gradients set. Just return equal spacing - return pin + (pout - pin)*x - - - norm = (x[1] - x[0])*(pout - pin) - - if in_dp is not None and out_dp is not None : - # Fit to dist = a*i^3 + b*i^2 + c*i - c = old_div(in_dp,norm) - b = 3.*(1. - c) - old_div(out_dp,norm) + c - a = 1. - c - b - elif in_dp is not None : - # Only inner set - c = old_div(in_dp,norm) - a = 0.5*(c-1.) - b = 1. - c - a - - #a = 0 - #c = in_dp/norm - #b = 1. - c - else: - # Only outer set. Used in PF region - # Fit to (1-b)*x^a + bx for fixed b - df = old_div(out_dp, norm) - b = 0.25 < df # Make sure a > 0 - a = old_div((df - b), (1. - b)) - vals = pin + (pout - pin)*( (1.-b)*x^a + b*x ) - return vals - - - vals = pin + (pout - pin)*(c*x + b*x^2 + a*x^3) - #STOP - return vals diff --git a/tools/pylib/boututils/read_geqdsk.py b/tools/pylib/boututils/read_geqdsk.py deleted file mode 100644 index f665f59ac4..0000000000 --- a/tools/pylib/boututils/read_geqdsk.py +++ /dev/null @@ -1,90 +0,0 @@ -from __future__ import print_function -from builtins import range -import numpy -from geqdsk import Geqdsk -from boututils.bunch import Bunch - -def read_geqdsk (file): - - data=Geqdsk() - - data.openFile(file) - - nxefit =data.get('nw') - nyefit =data.get('nh') - xdim =data.get('rdim') - zdim =data.get('zdim') - rcentr =data.get('rcentr') - rgrid1 =data.get('rleft') - zmid =data.get('zmid') - - rmagx =data.get('rmaxis') - zmagx =data.get('zmaxis') - simagx =data.get('simag') - sibdry =data.get('sibry') - bcentr =data.get('bcentr') - - cpasma =data.get('current') - #simagx =data.get('simag') - #xdum =data.get() - #rmagx =data.get('rmaxis') - #xdum =data.get() - - #zmagx =data.get('zmaxis') - #xdum =data.get() - #sibdry =data.get('sibry') - #xdum =data.get() - #xdum =data.get() - -# Read arrays - - fpol=data.get('fpol') - pres=data.get('pres') - - f=data.get('psirz') - qpsi=data.get('qpsi') - - nbdry=data.get('nbbbs') - nlim=data.get('limitr') - - if(nlim != 0) : - xlim=data.get('rlim') - ylim=data.get('zlim') - else: - xlim=[0] - ylim=[0] - - rbdry=data.get('rbbbs') - zbdry=data.get('zbbbs') - - - # Reconstruct the (R,Z) mesh - r=numpy.zeros((nxefit, nyefit), numpy.float64) - z=numpy.zeros((nxefit, nyefit), numpy.float64) - - - for i in range(0,nxefit): - for j in range(0,nyefit): - r[i,j] = rgrid1 + xdim*i/(nxefit-1) - z[i,j] = (zmid-0.5*zdim) + zdim*j/(nyefit-1) - - f=f.T - - print('nxefit = ', nxefit, ' nyefit= ', nyefit) - - return Bunch(nx=nxefit, ny=nyefit, # Number of horizontal and vertical points - r=r, z=z, # Location of the grid-points - xdim=xdim, zdim=zdim, # Size of the domain in meters - rcentr=rcentr, bcentr=bcentr, # Reference vacuum toroidal field (m, T) - rgrid1=rgrid1, # R of left side of domain - zmid=zmid, # Z at the middle of the domain - rmagx=rmagx, zmagx=zmagx, # Location of magnetic axis - simagx=simagx, # Poloidal flux at the axis (Weber / rad) - sibdry=sibdry, # Poloidal flux at plasma boundary (Weber / rad) - cpasma=cpasma, # - psi=f, # Poloidal flux in Weber/rad on grid points - fpol=fpol, # Poloidal current function on uniform flux grid - pres=pres, # Plasma pressure in nt/m^2 on uniform flux grid - qpsi=qpsi, # q values on uniform flux grid - nbdry=nbdry, rbdry=rbdry, zbdry=zbdry, # Plasma boundary - nlim=nlim, xlim=xlim, ylim=ylim) # Wall boundary diff --git a/tools/pylib/boututils/run_wrapper.py b/tools/pylib/boututils/run_wrapper.py deleted file mode 100644 index 8c0aa658fe..0000000000 --- a/tools/pylib/boututils/run_wrapper.py +++ /dev/null @@ -1,332 +0,0 @@ -"""Collection of functions which can be used to make a BOUT++ run""" - -from builtins import str -import os -import pathlib -import re -import subprocess -from subprocess import call, Popen, STDOUT, PIPE - - -if os.name == "nt": - # Default on Windows - DEFAULT_MPIRUN = "mpiexec.exe -n" -else: - DEFAULT_MPIRUN = "mpirun -np" - - -def getmpirun(default=DEFAULT_MPIRUN): - """Return environment variable named MPIRUN, if it exists else return - a default mpirun command - - Parameters - ---------- - default : str, optional - An mpirun command to return if ``MPIRUN`` is not set in the environment - - """ - MPIRUN = os.getenv("MPIRUN") - - if MPIRUN is None or MPIRUN == "": - MPIRUN = default - print("getmpirun: using the default " + str(default)) - - return MPIRUN - - -def shell(command, pipe=False): - """Run a shell command - - Parameters - ---------- - command : list of str - The command to run, split into (shell) words - pipe : bool, optional - Grab the output as text, else just run the command in the - background - - Returns - ------- - tuple : (int, str) - The return code, and either command output if pipe=True else None - """ - output = None - status = 0 - if pipe: - child = Popen(command, stderr=STDOUT, stdout=PIPE, shell=True) - # This returns a b'string' which is casted to string in - # python 2. However, as we want to use f.write() in our - # runtest, we cast this to utf-8 here - output = child.stdout.read().decode("utf-8", "ignore") - # Wait for the process to finish. Note that child.wait() - # would have deadlocked the system as stdout is PIPEd, we - # therefore use communicate, which in the end also waits for - # the process to finish - child.communicate() - status = child.returncode - else: - status = call(command, shell=True) - - return status, output - - -def determineNumberOfCPUs(): - """Number of virtual or physical CPUs on this system - - i.e. user/real as output by time(1) when called with an optimally - scaling userspace-only program - - Taken from a post on stackoverflow: - https://stackoverflow.com/questions/1006289/how-to-find-out-the-number-of-cpus-in-python - - Returns - ------- - int - The number of CPUs - """ - - # cpuset - # cpuset may restrict the number of *available* processors - try: - m = re.search(r'(?m)^Cpus_allowed:\s*(.*)$', - open('/proc/self/status').read()) - if m: - res = bin(int(m.group(1).replace(',', ''), 16)).count('1') - if res > 0: - return res - except IOError: - pass - - # Python 2.6+ - try: - import multiprocessing - return multiprocessing.cpu_count() - except (ImportError,NotImplementedError): - pass - - # POSIX - try: - res = int(os.sysconf('SC_NPROCESSORS_ONLN')) - - if res > 0: - return res - except (AttributeError,ValueError): - pass - - # Windows - try: - res = int(os.environ['NUMBER_OF_PROCESSORS']) - - if res > 0: - return res - except (KeyError, ValueError): - pass - - # jython - try: - from java.lang import Runtime - runtime = Runtime.getRuntime() - res = runtime.availableProcessors() - if res > 0: - return res - except ImportError: - pass - - # BSD - try: - sysctl = subprocess.Popen(['sysctl', '-n', 'hw.ncpu'], - stdout=subprocess.PIPE) - scStdout = sysctl.communicate()[0] - res = int(scStdout) - - if res > 0: - return res - except (OSError, ValueError): - pass - - # Linux - try: - res = open('/proc/cpuinfo').read().count('processor\t:') - - if res > 0: - return res - except IOError: - pass - - # Solaris - try: - pseudoDevices = os.listdir('/devices/pseudo/') - expr = re.compile('^cpuid@[0-9]+$') - - res = 0 - for pd in pseudoDevices: - if expr.match(pd) is not None: - res += 1 - - if res > 0: - return res - except OSError: - pass - - # Other UNIXes (heuristic) - try: - try: - dmesg = open('/var/run/dmesg.boot').read() - except IOError: - dmesgProcess = subprocess.Popen(['dmesg'], stdout=subprocess.PIPE) - dmesg = dmesgProcess.communicate()[0] - - res = 0 - while '\ncpu' + str(res) + ':' in dmesg: - res += 1 - - if res > 0: - return res - except OSError: - pass - - raise Exception('Can not determine number of CPUs on this system') - - -def launch(command, runcmd=None, nproc=None, mthread=None, - output=None, pipe=False, verbose=False): - """Launch parallel MPI jobs - - >>> status = launch(command, nproc, output=None) - - Parameters - ---------- - command : str - The command to run - runcmd : str, optional - Command for running parallel job; defaults to what getmpirun() returns" - nproc : int, optional - Number of processors (default: all available processors) - mthread : int, optional - Number of omp threads (default: the value of the - ``OMP_NUM_THREADS`` environment variable - output : str, optional - Name of file to save output to - pipe : bool, optional - If True, return the output of the command - verbose : bool, optional - Print the full command to be run before running it - - Returns - ------- - tuple : (int, str) - The return code, and either command output if pipe=True else None - - """ - - if runcmd is None: - runcmd = getmpirun() - - if nproc is None: - # Determine number of CPUs on this machine - nproc = determineNumberOfCPUs() - - cmd = runcmd + " " + str(nproc) + " " + command - - if output is not None: - cmd = cmd + " > "+output - - if mthread is not None: - if os.name == "nt": - # We're on windows, so we have to do it a little different - cmd = 'cmd /C "set OMP_NUM_THREADS={} && {}"'.format(mthread, cmd) - else: - cmd = "OMP_NUM_THREADS={} {}".format(mthread, cmd) - - if verbose == True: - print(cmd) - - return shell(cmd, pipe=pipe) - - -def shell_safe(command, *args, **kwargs): - """'Safe' version of shell. - - Raises a `RuntimeError` exception if the command is not - successful - - Parameters - ---------- - command : str - The command to run - *args, **kwargs - Optional arguments passed to `shell` - - """ - s, out = shell(command,*args,**kwargs) - if s: - raise RuntimeError("Run failed with %d.\nCommand was:\n%s\n\n" - "Output was\n\n%s"% - (s,command,out)) - return s, out - - -def launch_safe(command, *args, **kwargs): - """'Safe' version of launch. - - Raises an RuntimeError exception if the command is not successful - - Parameters - ---------- - command : str - The command to run - *args, **kwargs - Optional arguments passed to `shell` - - """ - s, out = launch(command,*args,**kwargs) - if s: - raise RuntimeError("Run failed with %d.\nCommand was:\n%s\n\n" - "Output was\n\n%s"% - (s,command,out)) - return s, out - - -def build_and_log(test): - """Run make and redirect the output to a log file. Prints input - - On Windows, does nothing because executable should have already - been built - - """ - - if os.name == "nt": - return - - print("Making {}".format(test)) - - if os.path.exists("makefile") or os.path.exists("Makefile"): - return shell_safe("make > make.log") - - ctest_filename = "CTestTestfile.cmake" - if not os.path.exists(ctest_filename): - raise RuntimeError("Could not build: no makefile and no CMake files detected") - - # We're using CMake, but we need to know the target name. If - # bout_add_integrated_test was used (which it should have been!), - # then the test name is the same as the target name - with open(ctest_filename, "r") as f: - contents = f.read() - match = re.search("add_test.(.*) ", contents) - if match is None: - raise RuntimeError("Using CMake, but could not determine test name") - test_name = match.group(1).split()[0] - - # Now we need to find the build directory. It'll be the first - # parent containing CMakeCache.txt - here = pathlib.Path(".").absolute() - for parent in here.parents: - if (parent / "CMakeCache.txt").exists(): - return shell_safe( - "cmake --build {} --target {} > make.log".format(parent, test_name) - ) - - # We've just looked up the entire directory structure and not - # found the build directory, this could happen if CMakeCache was - # deleted, in which case we can't build anyway - raise RuntimeError("Using CMake, but could not find build directory") diff --git a/tools/pylib/boututils/showdata.py b/tools/pylib/boututils/showdata.py deleted file mode 100644 index 4c0eb974f6..0000000000 --- a/tools/pylib/boututils/showdata.py +++ /dev/null @@ -1,702 +0,0 @@ -""" -Visualisation and animation routines - -Written by Luke Easy -le590@york.ac.uk -Last Updated 19/3/2015 -Additional functionality by George Breyiannis 26/12/2014 - -""" -from __future__ import print_function -from __future__ import division -from builtins import str, chr, range - -from matplotlib import pyplot as plt -from matplotlib import animation -from numpy import linspace, meshgrid, array, min, max, abs, floor, pi, isclose -from boutdata.collect import collect -from boututils.boutwarnings import alwayswarn - -#################################################################### -# Specify manually ffmpeg path -#plt.rcParams['animation.ffmpeg_path'] = '/usr/bin/ffmpeg' - -FFwriter = animation.FFMpegWriter() -#################################################################### - - -################### -#https://stackoverflow.com/questions/16732379/stop-start-pause-in-python-matplotlib-animation -# -j=-2 -pause = False -################### - - -def showdata(vars, titles=[], legendlabels=[], surf=[], polar=[], tslice=0, t_array=None, - movie=0, fps=28, dpi=200, intv=1, Ncolors=25, x=[], y=[], - global_colors=False, symmetric_colors=False, hold_aspect=False, - cmap=None, clear_between_frames=None, return_animation=False, window_title=""): - """A Function to animate time dependent data from BOUT++ - - To animate multiple variables on different axes: - - >>> showdata([var1, var2, var3]) - - To animate more than one line on a single axes: - - >>> showdata([[var1, var2, var3]]) - - The default graph types are: - 2D (time + 1 spatial dimension) arrays = animated line plot - 3D (time + 2 spatial dimensions) arrays = animated contour plot. - - To use surface or polar plots: - - >>> showdata(var, surf=1) - >>> showdata(var, polar=1) - - Can plot different graph types on different axes. Default graph types will - be used depending on the dimensions of the input arrays. To specify - polar/surface plots on different axes: - - >>> showdata([var1, var2], surf=[1, 0], polar=[0, 1]) - - Movies require FFmpeg (for .mp4) and/or ImageMagick (for .gif) to be - installed. The 'movie' option can be set to 1 (which will produce an mp4 - called 'animation.mp4'), to a name with no extension (which will produce an - mp4 called '.mp4') - - The `tslice` variable is used to control the time value that is printed on - each frame of the animation. If the input data matches the time values - found within BOUT++'s dmp data files, then these time values will be used. - Otherwise, an integer counter is used. - - The `cmap` variable (if specified) will set the colormap used in the plot - cmap must be a matplotlib colormap instance, or the name of a registered - matplotlib colormap - - During animation click once to stop in the current frame. Click again to - continue. - - Parameters - ---------- - vars : array_like or list of array_like - Variable or list of variables to plot - titles : str or list of str, optional - Title or list of titles for each axis - legendlabels : str or list of str, optional - Legend or list of legends for each variable - surf : list of int - Which axes to plot as a surface plot - polar : list of int - Which axes to plot as a polar plot - tslice : list of int - Use these time values from a dump file (see above) - t_array : array - Pass in t_array using this argument to use the simulation time in plot - titles. Otherwise, just use the t-index. - movie : int - If 1, save the animation to file - fps : int - Number of frames per second to use when saving animation - dpi : int - Dots per inch to use when saving animation - intv : int - ??? - Ncolors : int - Number of levels in contour plots - x, y : array_like, list of array_like - X, Y coordinates - global_colors : bool - If "vars" is a list the colorlevels are determined from the - maximum of the maxima and and the minimum of the minima in all - fields in vars - symmetric_colors : bool - Colour levels are symmetric - hold_aspect : bool - Use equal aspect ratio in plots - cmap : colormap - A matplotlib colormap instance to use - clear_between_frames : bool, optional - - Default (None) - all plots except line plots will clear between frames - - True - all plots will clear between frames - - False - no plots will clear between frames - return_animation : bool - Return the matplotlib animation instance - window_title : str - Give a title for the animation window - - TODO - ---- - - Replace empty lists in signature with None - - Use bools in sensible places - - Put massive list of arguments in kwargs - - Speed up animations ???? - - Look at theta in polar plots - periodic?!? - - Log axes, colorbars - - Figureplot - - """ - plt.ioff() - - # Check to see whether vars is a list or not. - if isinstance(vars, list): - Nvar = len(vars) - else: - vars = [vars] - Nvar = len(vars) - - if Nvar < 1: - raise ValueError("No data supplied") - - # Check to see whether each variable is a list - used for line plots only - Nlines = [] - for i in range(0, Nvar): - if isinstance(vars[i], list): - Nlines.append(len(vars[i])) - else: - Nlines.append(1) - vars[i] = [vars[i]] - - # Sort out titles - if len(titles) == 0: - for i in range(0,Nvar): - titles.append(('Var' + str(i+1))) - elif len(titles) != Nvar: - raise ValueError('The length of the titles input list must match the length of the vars list.') - - # Sort out legend labels - if len(legendlabels) == 0: - for i in range(0,Nvar): - legendlabels.append([]) - for j in range(0,Nlines[i]): - legendlabels[i].append(chr(97+j)) - elif (isinstance(legendlabels[0], list) != 1): - if Nvar != 1: - check = 0 - for i in range(0,Nvar): - if len(legendlabels) != Nlines[i]: - check = check+1 - if check == 0: - alwayswarn("The legendlabels list does not contain a sublist for each variable, but its length matches the number of lines on each plot. Will apply labels to each plot") - legendlabelsdummy = [] - for i in range(0, Nvar): - legendlabelsdummy.append([]) - for j in range(0,Nlines[i]): - legendlabelsdummy[i].append(legendlabels[j]) - legendlabels = legendlabelsdummy - else: - alwayswarn("The legendlabels list does not contain a sublist for each variable, and it's length does not match the number of lines on each plot. Will default apply labels to each plot") - legendlabels = [] - for i in range(0,Nvar): - legendlabels.append([]) - for j in range(0,Nlines[i]): - legendlabels[i].append(chr(97+j)) - else: - if (Nlines[0] == len(legendlabels)): - legendlabels = [legendlabels] - elif len(legendlabels) != Nvar: - alwayswarn("The length of the legendlabels list does not match the length of the vars list, will continue with default values") - legendlabels = [] - for i in range(0,Nvar): - legendlabels.append([]) - for j in range(0,Nlines[i]): - legendlabels[i].append(chr(97+j)) - else: - for i in range(0,Nvar): - if isinstance(legendlabels[i], list): - if len(legendlabels[i]) != Nlines[i]: - alwayswarn('The length of the legendlabel (sub)list for each plot does not match the number of datasets for each plot. Will continue with default values') - legendlabels[i] = [] - for j in range(0,Nlines[i]): - legendlabels[i].append(chr(97+j)) - else: - legendlabels[i] = [legendlabels[i]] - if len(legendlabels[i]) != Nlines[i]: - alwayswarn('The length of the legendlabel (sub)list for each plot does not match the number of datasets for each plot. Will continue with default values') - legendlabels[i] = [] - for j in range(0,Nlines[i]): - legendlabels[i].append(chr(97+j)) - - - # Sort out surf list - if isinstance(surf, list): - if (len(surf) == Nvar): - for i in range(0, Nvar): - if surf[i] >= 1: - surf[i] = 1 - else: - surf[i] = 0 - elif (len(surf) == 1): - if surf[0] >= 1: - surf[0] = 1 - else: - surf[0] = 0 - if (Nvar > 1): - for i in range(1,Nvar): - surf.append(surf[0]) - elif (len(surf) == 0): - for i in range(0,Nvar): - surf.append(0) - else: - alwayswarn('Length of surf list does not match number of variables. Will default to no polar plots') - for i in range(0,Nvar): - surf.append(0) - - else: - surf = [surf] - if surf[0] >= 1: - surf[0] = 1 - else: - surf[0] = 0 - if (Nvar > 1): - for i in range(1,Nvar): - surf.append(surf[0]) - - # Sort out polar list - if isinstance(polar, list): - if (len(polar) == Nvar): - for i in range(0, Nvar): - if polar[i] >= 1: - polar[i] = 1 - else: - polar[i] = 0 - elif (len(polar) == 1): - if polar[0] >= 1: - polar[0] = 1 - else: - polar[0] = 0 - if (Nvar > 1): - for i in range(1,Nvar): - polar.append(polar[0]) - elif (len(polar) == 0): - for i in range(0,Nvar): - polar.append(0) - else: - alwayswarn('Length of polar list does not match number of variables. Will default to no polar plots') - for i in range(0,Nvar): - polar.append(0) - else: - polar = [polar] - if polar[0] >= 1: - polar[0] = 1 - else: - polar[0] = 0 - if (Nvar > 1): - for i in range(1,Nvar): - polar.append(polar[0]) - - # Determine shapes of arrays - dims = [] - Ndims = [] - lineplot = [] - contour = [] - for i in range(0,Nvar): - dims.append([]) - Ndims.append([]) - for j in range(0, Nlines[i]): - dims[i].append(array((vars[i][j].shape))) - Ndims[i].append(dims[i][j].shape[0]) - # Perform check to make sure that data is either 2D or 3D - if (Ndims[i][j] < 2): - raise ValueError('data must be either 2 or 3 dimensional. Exiting') - - if (Ndims[i][j] > 3): - raise ValueError('data must be either 2 or 3 dimensional. Exiting') - - if ((Ndims[i][j] == 2) & (polar[i] != 0)): - alwayswarn('Data must be 3 dimensional (time, r, theta) for polar plots. Will plot lineplot instead') - - if ((Ndims[i][j] == 2) & (surf[i] != 0)): - alwayswarn('Data must be 3 dimensional (time, x, y) for surface plots. Will plot lineplot instead') - - if ((Ndims[i][j] == 3) & (Nlines[i] != 1)): - raise ValueError('cannot have multiple sets of 3D (time + 2 spatial dimensions) on each subplot') - - - if ((Ndims[i][j] != Ndims[i][0])): - raise ValueError('Error, Number of dimensions must be the same for all variables on each plot.') - - if (Ndims[i][0] == 2): # Set polar and surf list entries to 0 - polar[i] = 0 - surf[i] = 0 - lineplot.append(1) - contour.append(0) - else: - if ((polar[i] == 1) & (surf[i] == 1)): - alwayswarn('Cannot do polar and surface plots at the same time. Default to contour plot') - contour.append(1) - lineplot.append(0) - polar[i] = 0 - surf[i] = 0 - elif (polar[i] == 1) | (surf[i] == 1): - contour.append(0) - lineplot.append(0) - else: - contour.append(1) - lineplot.append(0) - - # Obtain size of data arrays - Nt = [] - Nx = [] - Ny = [] - for i in range(0, Nvar): - Nt.append([]) - Nx.append([]) - Ny.append([]) - for j in range(0, Nlines[i]): - Nt[i].append(vars[i][j].shape[0]) - Nx[i].append(vars[i][j].shape[1]) - if (Nt[i][j] != Nt[0][0]): - raise ValueError('time dimensions must be the same for all variables.') - - #if (Nx[i][j] != Nx[i][0]): - # raise ValueError('Dimensions must be the same for all variables on each plot.') - - if (Ndims[i][j] == 3): - Ny[i].append(vars[i][j].shape[2]) - #if (Ny[i][j] != Ny[i][0]): - # raise ValueError('Dimensions must be the same for all variables.') - - # Obtain number of frames - Nframes = int(Nt[0][0]/intv) - - # Generate grids for plotting - # Try to use provided grids where possible - # If x and/or y are not lists, apply to all variables - if not isinstance(x, (list,tuple)): - x = [x]*Nvar # Make list of x with length Nvar - if not isinstance(y, (list,tuple)): - y = [y]*Nvar # Make list of x with length Nvar - xnew = [] - ynew = [] - for i in range(0,Nvar): - xnew.append([]) - try: - xnew[i].append(x[i]) - if not (x[i].shape==(Nx[i][0],) or x[i].shape==(Nx[i][0],Ny[i][0]) or x[i].shape==(Nt[i][0],Nx[i][0],Ny[i],[0])): - raise ValueError("For variable number "+str(i)+", "+titles[i]+", the shape of x is not compatible with the shape of the variable. Shape of x should be (Nx), (Nx,Ny) or (Nt,Nx,Ny).") - except: - for j in range(0, Nlines[i]): - xnew[i].append(linspace(0,Nx[i][j]-1, Nx[i][j])) - - #x.append(linspace(0,Nx[i][0]-1, Nx[i][0])) - - if (Ndims[i][0] == 3): - try: - ynew.append(y[i]) - if not (y[i].shape==(Ny[i][0],) or y[i].shape==(Nx[i][0],Ny[i][0]) or y[i].shape==(Nt[i][0],Nx[i][0],Ny[i],[0])): - raise ValueError("For variable number "+str(i)+", "+titles[i]+", the shape of y is not compatible with the shape of the variable. Shape of y should be (Ny), (Nx,Ny) or (Nt,Nx,Ny).") - except: - ynew.append(linspace(0, Ny[i][0]-1, Ny[i][0])) - else: - ynew.append(0) - x = xnew - y = ynew - # Determine range of data. Used to ensure constant colour map and - # to set y scale of line plot. - fmax = [] - fmin = [] - xmax = [] - dummymax = [] - dummymin = [] - clevels = [] - - for i in range(0,Nvar): - - dummymax.append([]) - dummymin.append([]) - for j in range(0,Nlines[i]): - dummymax[i].append(max(vars[i][j])) - dummymin[i].append(min(vars[i][j])) - - fmax.append(max(dummymax[i])) - fmin.append(min(dummymin[i])) - - if(symmetric_colors): - absmax =max(abs(array(fmax[i], fmin[i]))) - fmax[i] = absmax - fmin[i] = -absmax - - for j in range(0,Nlines[i]): - dummymax[i][j] = max(x[i][j]) - xmax.append(max(dummymax[i])) - - - if not (global_colors): - if isclose(fmin[i], fmax[i]): - # add/subtract very small constant in case fmin=fmax=0 - thiscontourmin = fmin[i] - 3.e-15*abs(fmin[i]) - 1.e-36 - thiscontourmax = fmax[i] + 3.e-15*abs(fmax[i]) + 1.e-36 - alwayswarn("Contour levels too close, adding padding to colorbar range") - clevels.append(linspace(thiscontourmin, thiscontourmax, Ncolors)) - else: - clevels.append(linspace(fmin[i], fmax[i], Ncolors)) - - if(global_colors): - fmaxglobal = max(fmax) - fminglobal = min(fmin) - if isclose(fminglobal, fmaxglobal): - fminglobal = fminglobal - 3.e-15*abs(fminglobal) - 1.e-36 - fmaxglobal = fmaxglobal + 3.e-15*abs(fmaxglobal) + 1.e-36 - for i in range(0,Nvar): - clevels.append(linspace(fminglobal, fmaxglobal, Ncolors)) - - # Create figures for animation plotting - if (Nvar < 2): - row = 1 - col = 1 - h = 6.0 - w = 8.0 - elif (Nvar <3): - row = 1 - col = 2 - h = 6.0 - w = 12.0 - elif (Nvar < 5): - row = 2 - col = 2 - h = 8.0 - w = 12.0 - - elif (Nvar < 7): - row = 2 - col = 3 - h = 8.0 - w = 14.0 - - elif (Nvar < 10) : - row = 3 - col = 3 - h = 12.0 - w = 14.0 - else: - raise ValueError('too many variables...') - - - fig = plt.figure(window_title, figsize=(w,h)) - title = fig.suptitle(r' ', fontsize=14 ) - - # Initiate all list variables required for plotting here - ax = [] - lines = [] - plots = [] - cbars = [] - xstride = [] - ystride = [] - r = [] - theta = [] - - - # Initiate figure frame - for i in range(0,Nvar): - lines.append([]) - if (lineplot[i] == 1): - ax.append(fig.add_subplot(row,col,i+1)) - ax[i].set_xlim((0,xmax[i])) - ax[i].set_ylim((fmin[i], fmax[i])) - for j in range(0,Nlines[i]): - lines[i].append(ax[i].plot([],[],lw=2, label = legendlabels[i][j])[0]) - #Need the [0] to 'unpack' the line object from tuple. Alternatively: - #lines[i], = lines[i] - ax[i].set_xlabel(r'x') - ax[i].set_ylabel(titles[i]) - if (Nlines[i] != 1): - legendneeded = 1 - for k in range(0,i): - if (Nlines[i] == Nlines[k]): - legendneeded = 0 - if (legendneeded == 1): - plt.axes(ax[i]) - plt.legend(loc = 0) - # Pad out unused list variables with zeros - plots.append(0) - cbars.append(0) - xstride.append(0) - ystride.append(0) - r.append(0) - theta.append(0) - - elif (contour[i] == 1): - ax.append(fig.add_subplot(row,col,i+1)) - #ax[i].set_xlim((0,Nx[i][0]-1)) - #ax[i].set_ylim((0,Ny[i][0]-1)) - ax[i].set_xlim(min(x[i]),max(x[i])) - ax[i].set_ylim(min(y[i]),max(y[i])) - ax[i].set_xlabel(r'x') - ax[i].set_ylabel(r'y') - ax[i].set_title(titles[i]) - if hold_aspect: - ax[i].set_aspect('equal') - thisx = x[i][0] - if len(thisx.shape) == 3: - thisx = thisx[0] - thisy = y[i] - if len(thisy.shape) == 3: - thisy = thisy[0] - plots.append(ax[i].contourf(thisx.T,thisy.T,vars[i][0][0,:,:].T, Ncolors, cmap=cmap, lw=0, levels=clevels[i] )) - plt.axes(ax[i]) - cbars.append(fig.colorbar(plots[i], format='%1.1e')) - # Pad out unused list variables with zeros - lines[i].append(0) - xstride.append(0) - ystride.append(0) - r.append(0) - theta.append(0) - - elif (surf[i] == 1): - if (len(x[i][0].shape)==1 and len(y[i].shape)==1): - # plot_wireframe() requires 2d arrays for x and y coordinates - x[i][0],y[i] = meshgrid(x[i][0],y[i]) - thisx = x[i][0] - if len(thisx.shape) == 3: - thisx = thisx[0] - thisy = y[i] - if len(thisy.shape) == 3: - thisy = thisy[0] - if (Nx[i][0]<= 20): - xstride.append(1) - else: - xstride.append(int(floor(Nx[i][0]/20))) - if (Ny[i][0]<=20): - ystride.append(1) - else: - ystride.append(int(floor(Ny[i][0]/20))) - ax.append(fig.add_subplot(row,col,i+1, projection='3d')) - plots.append(ax[i].plot_wireframe(thisx, thisy, vars[i][0][0,:,:].T, rstride=ystride[i], cstride=xstride[i])) - title = fig.suptitle(r'', fontsize=14 ) - ax[i].set_xlabel(r'x') - ax[i].set_ylabel(r'y') - ax[i].set_zlabel(titles[i]) - # Pad out unused list variables with zeros - lines[i].append(0) - cbars.append(0) - r.append(0) - theta.append(0) - - elif (polar[i] == 1): - r.append(linspace(1,Nx[i][0], Nx[i][0])) - theta.append(linspace(0,2*pi, Ny[i][0])) - r[i],theta[i] = meshgrid(r[i], theta[i]) - ax.append(fig.add_subplot(row,col,i+1, projection='polar')) - plots.append(ax[i].contourf(theta[i], r[i], vars[i][0][0,:,:].T, cmap=cmap, levels=clevels[i])) - plt.axes(ax[i]) - cbars.append(fig.colorbar(plots[i], format='%1.1e')) - ax[i].set_rmax(Nx[i][0]-1) - ax[i].set_title(titles[i]) - # Pad out unused list variables with zeros - lines[i].append(0) - xstride.append(0) - ystride.append(0) - - - - def onClick(event): - global pause - pause ^= True - - - def control(): - global j, pause - if j == Nframes-1 : j = -1 - if not pause: - j=j+1 - - return j - - - # Animation function - def animate(i): - j=control() - - index = j*intv - - for j in range(0,Nvar): - #Default to clearing axis between frames on all plots except line plots - if (clear_between_frames is None and lineplot[j] != 1 ) or clear_between_frames is True: - ax[j].cla() #Clear axis between frames so that masked arrays can be plotted - if (lineplot[j] == 1): - for k in range(0,Nlines[j]): - lines[j][k].set_data(x[j][k], vars[j][k][index,:]) - elif (contour[j] == 1): - thisx = x[j][0] - if len(thisx.shape) == 3: - thisx = thisx[index] - thisy = y[j] - if len(thisy.shape) == 3: - thisy = thisy[index] - plots[j] = ax[j].contourf(x[j][0].T,y[j].T,vars[j][0][index,:,:].T, Ncolors, cmap=cmap, lw=0, levels=clevels[j]) - ax[j].set_xlabel(r'x') - ax[j].set_ylabel(r'y') - ax[j].set_title(titles[j]) - elif (surf[j] == 1): - thisx = x[j][0] - if len(thisx.shape) == 3: - thisx = thisx[index] - thisy = y[j][0] - if len(thisy.shape) == 3: - thisy = thisy[index] - ax[j] = fig.add_subplot(row,col,j+1, projection='3d') - plots[j] = ax[j].plot_wireframe(thisx, thisy, vars[j][0][index,:,:].T, rstride=ystride[j], cstride=xstride[j]) - ax[j].set_zlim(fmin[j],fmax[j]) - ax[j].set_xlabel(r'x') - ax[j].set_ylabel(r'y') - ax[j].set_title(titles[j]) - elif (polar[j] == 1): - plots[j] = ax[j].contourf(theta[j], r[j], vars[j][0][index,:,:].T,cmap=cmap, levels=clevels[j]) - ax[j].set_rmax(Nx[j][0]-1) - ax[j].set_title(titles[j]) - - if t_array is not None: - title.set_text('t = %1.2e' % t_array[index]) - else: - title.set_text('t = %i' % index) - return plots - - def init(): - global j, pause - j=-2 - pause = False - return animate(0) - - - - - - - # Call Animation function - - fig.canvas.mpl_connect('button_press_event', onClick) - anim = animation.FuncAnimation(fig, animate, init_func=init, frames=Nframes) - - #If movie is not passed as a string assign the default filename - if (movie==1): - movie='animation.mp4' - - # Save movie with given or default name - if ((isinstance(movie,str)==1)): - movietype = movie.split('.')[-1] - if movietype == 'mp4': - try: - anim.save(movie,writer = FFwriter, fps=fps, dpi=dpi, extra_args=['-vcodec', 'libx264']) - except Exception: - #Try specifying writer by string if ffmpeg not found - try: - anim.save(movie,writer = 'ffmpeg', fps=fps, dpi=dpi, extra_args=['-vcodec', 'libx264']) - except Exception: - print('Save failed: Check ffmpeg path') - raise - elif movietype == 'gif': - anim.save(movie,writer = 'imagemagick', fps=fps, dpi=dpi) - else: - raise ValueError("Unrecognized file type for movie. Supported types are .mp4 and .gif") - - # Show animation if not saved or returned, otherwise close the plot - if (movie==0 and return_animation == 0): - plt.show() - else: - plt.close() - # Return animation object - if(return_animation == 1): - return(anim) diff --git a/tools/pylib/boututils/spectrogram.py b/tools/pylib/boututils/spectrogram.py deleted file mode 100644 index d1c2a3617b..0000000000 --- a/tools/pylib/boututils/spectrogram.py +++ /dev/null @@ -1,163 +0,0 @@ -"""Creates spectrograms using the Gabor transform to maintain time and -frequency resolution - -written by: Jarrod Leddy -updated: 23/06/2016 - -""" -from __future__ import print_function -from __future__ import division -from builtins import range - -from numpy import arange, zeros, exp, power, transpose, sin, cos, linspace, min, max -from scipy import fftpack, pi - - -def spectrogram(data, dx, sigma, clip=1.0, optimise_clipping=True, nskip=1.0): - """Creates spectrograms using the Gabor transform to maintain time - and frequency resolution - - .. note:: Very early and very late times will have some issues due - to the method - truncate them after taking the spectrogram - if they are below your required standards - - .. note:: If you are seeing issues at the top or bottom of the - frequency range, you need a longer time series - - written by: Jarrod Leddy - updated: 23/06/2016 - - Parameters - ---------- - data : array_like - The time series you want spectrogrammed - dt : float - Time resolution - sigma : float - Used in the Gabor transform, will balance time and frequency - resolution suggested value is 1.0, but may need to be adjusted - manually until result is as desired: - - - If bands are too tall raise sigma - - If bands are too wide, lower sigma - clip : float, optional - Makes the spectrogram run faster, but decreases frequency - resolution. clip is by what factor the time spectrum should be - clipped by --> N_new = N / clip - optimise_clip : bool - If true (default) will change the data length to be 2^N - (rounded down from your inputed clip value) to make FFT's fast - nskip : float - Scales final time axis, skipping points over which to centre - the gaussian window for the FFTs - - Returns - ------- - tuple : (array_like, array_like, array_like) - A tuple containing the spectrogram, frequency and time - - """ - n = data.size - nnew = int(n/nskip) - xx = arange(n)*dx - xxnew = arange(nnew)*dx*nskip - sigma = sigma * dx - - n_clipped = int(n/clip) - - # check to see if n_clipped is near a 2^n factor for speed - if(optimise_clipping): - nn = n_clipped - two_count = 1 - while(1): - nn = nn/2.0 - if(nn <= 2.0): - n_clipped = 2**two_count - print('clipping window length from ',n,' to ',n_clipped,' points') - break - else: - two_count += 1 - else: - print('using full window length: ',n_clipped,' points') - - halfclip = int(n_clipped/2) - spectra = zeros((nnew,halfclip)) - - omega = fftpack.fftfreq(n_clipped, dx) - omega = omega[0:halfclip] - - for i in range(nnew): - beg = i*nskip-halfclip - end = i*nskip+halfclip-1 - - if beg < 0: - end = end-beg - beg = 0 - elif end >= n: - end = n-1 - beg = end - n_clipped + 1 - - gaussian = 1.0 / (sigma * 2.0 * pi) * exp(-0.5 * power(( xx[beg:end] - xx[i*nskip] ),2.0) / (2.0 * sigma) ) - fftt = abs(fftpack.fft(data[beg:end] * gaussian)) - fftt = fftt[:halfclip] - spectra[i,:] = fftt - - return (transpose(spectra), omega, xxnew) - - -def test_spectrogram(n, d, s): - """Function used to test the performance of spectrogram with various - values of sigma - - Parameters - ---------- - n : int - Number of points - d : float - Grid spacing - s : float - Initial sigma - - """ - - import matplotlib.pyplot as plt - - nskip = 10 - xx = arange(n)/d - test_data = sin(2.0*pi*512.0*xx * ( 1.0 + 0.005*cos(xx*50.0))) + 0.5*exp(xx)*cos(2.0*pi*100.0*power(xx,2)) - test_sigma = s - dx = 1.0/d - - s1 = test_sigma*0.1 - s2 = test_sigma - s3 = test_sigma*10.0 - - (spec2,omega2,xx) = spectrogram(test_data, dx, s2, clip=5.0, nskip=nskip) - (spec3,omega3,xx) = spectrogram(test_data, dx, s3, clip=5.0, nskip=nskip) - (spec1,omega1,xx) = spectrogram(test_data, dx, s1, clip=5.0, nskip=nskip) - - levels = linspace(min(spec1),max(spec1),100) - plt.subplot(311) - plt.contourf(xx,omega1,spec1,levels=levels) - plt.ylabel("frequency") - plt.xlabel(r"$t$") - plt.title(r"Spectrogram of $sin(t + cos(t) )$ with $\sigma=$%3.1f"%s1) - - levels = linspace(min(spec2),max(spec2),100) - plt.subplot(312) - plt.contourf(xx,omega2,spec2,levels=levels) - plt.ylabel("frequency") - plt.xlabel(r"$t$") - plt.title(r"Spectrogram of $sin(t + cos(t) )$ with $\sigma=$%3.1f"%s2) - - levels = linspace(min(spec3),max(spec3),100) - plt.subplot(313) - plt.contourf(xx,omega3,spec3,levels=levels) - plt.ylabel("frequency") - plt.xlabel(r"$t$") - plt.title(r"Spectrogram of $sin(t + cos(t) )$ with $\sigma=$%3.1f"%s3) - plt.tight_layout() - plt.show() - -if __name__ == "__main__": - test_spectrogram(2048, 2048.0, 0.01) # array size, divisions per unit, sigma of gaussian diff --git a/tools/pylib/boututils/surface_average.py b/tools/pylib/boututils/surface_average.py deleted file mode 100644 index 340d252a69..0000000000 --- a/tools/pylib/boututils/surface_average.py +++ /dev/null @@ -1,93 +0,0 @@ -"""Average over a surface - -""" - -from __future__ import print_function -from __future__ import absolute_import -from __future__ import division -from builtins import range -from past.utils import old_div -import numpy as np -from boututils.calculus import deriv -from boututils.int_func import int_func -from .idl_tabulate import idl_tabulate - - -def surface_average(var, grid, area=None): - """Average a variable over a surface - - Parameters - ---------- - var : array_like - 3-D or 4D variable to integrate (either [x,y,z] or [t,x,y,z]) - grid : dict - A dictionary of various grid quantities - area : bool - Average by flux-surface area = (B/Bp)*dl * R*dz - - Returns - ------- - float - Surface average of variable - - """ - - s = np.ndim(var) - - if s == 4 : - nx = np.shape(var)[1] - ny = np.shape(var)[2] - nt = np.shape(var)[0] - - result = np.zeros((nx,nt)) - for t in range (nt): - result[:,t] = surface_average(var[t,:,:,:], grid, area=area) - - return result - elif s != 3 : - raise RuntimeError("ERROR: surface_average var must be 3D or 4D") - - # 3D [x,y,z] - nx = np.shape(var)[0] - ny = np.shape(var)[1] - - - # Calculate poloidal angle from grid - theta = np.zeros((nx,ny)) - - #status = gen_surface(mesh=grid) ; Start generator - xi = -1 - yi = np.arange(0,ny,dtype=int) - last = 0 - while True: - #yi = gen_surface(last=last, xi=xi, period=periodic) - xi = xi + 1 - if xi == nx-1 : - last = 1 - - dtheta = 2.*np.pi / np.float(ny) - r = grid['Rxy'][xi,yi] - z = grid['Zxy'][xi,yi] - n = np.size(r) - - dl = old_div(np.sqrt( deriv(r)**2 + deriv(z)**2 ), dtheta) - if area: - dA = (old_div(grid['Bxy'][xi,yi],grid['Bpxy'][xi,yi]))*r*dl - A = int_func(np.arange(n),dA) - theta[xi,yi] = 2.*np.pi*A/A[n-1] - else: - nu = dl * (grid['Btxy'][xi,yi]) / ((grid['Bpxy'][xi,yi]) * r ) - theta[xi,yi] = int_func(np.arange(n)*dtheta,nu) - theta[xi,yi] = 2.*np.pi*theta[xi,yi] / theta[xi,yi[n-1]] - - if last==1 : break - - vy = np.zeros(ny) - result = np.zeros(nx) - for x in range(nx) : - for y in range(ny) : - vy[y] = np.mean(var[x,y,:]) - - result[x] = old_div(idl_tabulate(theta[x,:], vy), (2.*np.pi)) - - return result diff --git a/tools/pylib/boututils/volume_integral.py b/tools/pylib/boututils/volume_integral.py deleted file mode 100644 index 7e5d4da5cd..0000000000 --- a/tools/pylib/boututils/volume_integral.py +++ /dev/null @@ -1,102 +0,0 @@ -"""Integrate over a volume - -""" - -from __future__ import print_function -from __future__ import division -from builtins import range -from past.utils import old_div -import numpy as np -from boututils.calculus import deriv - -def volume_integral(var, grid, xr=False): - """Integrate a variable over a volume - - Parameters - ---------- - var : array_like - Variable to integrate - grid : dict - A dictionary of various grid quantities - xr : (int, int), optional - Range of x indices (default: all of x) - - Returns - ------- - float - Volumne integral of variable - - """ - - s = np.ndim(var) - - if s == 4 : - # 4D [t,x,y,z] - integrate for each t - nx = np.shape(var)[1] - ny = np.shape(var)[2] - nt = np.shape(var)[0] - - result = np.zeros(nt) - for t in range(nt) : - result[t] = volume_integral(var[t,:,:,:],g,xr=xr) - return result - - elif s == 3 : - # 3D [x,y,z] - average in Z - nx = np.shape(var)[0] - ny = np.shape(var)[1] - # nz = np.shape(var)[2] - - zi = np.zeros((nx, ny)) - for x in range(nx): - for y in range(ny): - zi[x,y] = np.mean(var[x,y,:]) - - return volume_integral(zi, g, xr=xr) - - - elif s != 2 : - print("ERROR: volume_integral var must be 2, 3 or 4D") - - - # 2D [x,y] - nx = np.shape(var)[0] - ny = np.shape(var)[1] - - if xr == False : xr=[0,nx-1] - - result = 0.0 - - #status = gen_surface(mesh=grid) ; Start generator - xi = -1 - yi = np.arange(0,ny,dtype=int) - last = 0 - # iy = np.zeros(nx) - while True: - #yi = gen_surface(last=last, xi=xi, period=periodic) - xi = xi + 1 - if xi == nx-1 : last = 1 - - if (xi >= np.min(xr)) & (xi <= np.max(xr)) : - dtheta = 2.*np.pi / np.float(ny) - r = grid['Rxy'][xi,yi] - z = grid['Zxy'][xi,yi] - n = np.size(r) - dl = old_div(np.sqrt( deriv(r)**2 + deriv(z)**2 ), dtheta) - - # Area of flux-surface - dA = (grid['Bxy'][xi,yi]/grid['Bpxy'][xi,yi]*dl) * (r*2.*np.pi) - # Volume - if xi == nx-1 : - dpsi = (grid['psixy'][xi,yi] - grid['psixy'][xi-1,yi]) - else: - dpsi = (grid['psixy'][xi+1,yi] - grid['psixy'][xi,yi]) - - dV = dA * dpsi / (r*(grid['Bpxy'][xi,yi])) # May need factor of 2pi - dV = np.abs(dV) - - result = result + np.sum(var[xi,yi] * dV) - - if last==1 : break - - return result diff --git a/tools/pylib/boututils/watch.py b/tools/pylib/boututils/watch.py deleted file mode 100644 index e7d038c4e1..0000000000 --- a/tools/pylib/boututils/watch.py +++ /dev/null @@ -1,84 +0,0 @@ -""" -Routines for watching files for changes - -""" -from __future__ import print_function -from builtins import zip - -import time -import os - - -def watch(files, timeout=None, poll=2): - """Watch a given file or collection of files until one changes. Uses - polling. - - Parameters - ---------- - files : str or list of str - Name of one or more files to watch - timeout : int, optional - Timeout in seconds (default is no timeout) - poll : int, optional - Polling interval in seconds (default: 2) - - Returns - ------- - str - The name of the first changed file, - or None if timed out before any changes - - Examples - -------- - - To watch one file, timing out after 60 seconds: - - >>> watch('file1', timeout=60) - - To watch 2 files, never timing out: - - >>> watch(['file1', 'file2']) - - Author: Ben Dudson - - """ - - # Get modification time of file(s) - try: - if hasattr(files, '__iter__'): - # Iterable - lastmod = [ os.stat(f).st_mtime for f in files ] - iterable = True - else: - # Not iterable -> just one file - lastmod = os.stat(files).st_mtime - iterable = False - except: - print("Can't test modified time. Wrong file name?") - raise - - start_time = time.time() - running = True - while running: - sleepfor = poll - if timeout: - # Check if timeout will be reached before next poll - if time.time() - start_time + sleepfor > timeout: - # Adjust time so that finish at timeout - sleepfor = timeout - (time.time() - start_time) - running = False # Stop after next test - - time.sleep(sleepfor) - - if iterable: - for last_t, f in zip(lastmod, files): - # Get the new modification time - t = os.stat(f).st_mtime - if t > last_t + 1.0: # +1 to reduce risk of false alarms - # File has been modified - return f - else: - t = os.stat(files).st_mtime - if t > lastmod + 1.0: - return files - return None