diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index e30841eb..ff808dad 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -11,13 +11,14 @@ jobs: run: | sudo rm -rf /home/linuxbrew sudo apt-get update -y - sudo apt-get install libasound2-dev libsoundio-dev libsndfile1-dev fftw3-dev -y + sudo apt-get install libasound2-dev libsndfile1-dev fftw3-dev -y sudo apt-get install python3 python3-setuptools python3-pip # Requires setuptools >= 62.1 for `python setup.py test`, as earlier versions # used a different build path to the .so file as located in tests/__init__.py # 2024-07-29: Require importlib_metadata for now due to this: # https://github.com/pypa/setuptools/issues/4478 sudo pip3 install -U pytest numpy scipy setuptools>=62.1.0 importlib_metadata + curl https://raw.githubusercontent.com/mackron/miniaudio/master/miniaudio.h -o source/include/signalflow/node/io/output/miniaudio-library.h - name: Configure run: mkdir build && cd build && cmake .. - name: Make diff --git a/.github/workflows/wheels.yml b/.github/workflows/wheels.yml index 19d82286..6076f318 100644 --- a/.github/workflows/wheels.yml +++ b/.github/workflows/wheels.yml @@ -21,16 +21,15 @@ jobs: - name: Build wheels run: python -m cibuildwheel --output-dir wheelhouse env: - CIBW_BUILD: cp38-manylinux* cp39-manylinux* cp310-manylinux* cp311-manylinux* cp312-manylinux* + CIBW_BUILD: cp38-manylinux* cp39-manylinux* cp310-manylinux* cp311-manylinux* cp312-manylinux* cp313-manylinux* CIBW_ARCHS_MACOS: arm64 x86_64 CIBW_ARCHS_LINUX: x86_64 CIBW_BEFORE_ALL_LINUX: > yum install -y fftw-devel wget python3 sudo gcc && wget https://github.com/jackaudio/jack2/archive/v1.9.22.tar.gz && tar xzf v1.9.22.tar.gz && cd jack2-1.9.22 && python3 ./waf configure && /usr/bin/sudo python3 ./waf install && cd .. && wget https://www.alsa-project.org/files/pub/lib/alsa-lib-1.2.9.tar.bz2 && tar xjf alsa-lib-1.2.9.tar.bz2 && cd alsa-lib-1.2.9 && ./configure && make && /usr/bin/sudo make install && cd .. && - git clone https://github.com/libsndfile/libsndfile.git && cd libsndfile && cmake -DBUILD_SHARED_LIBS=1 . && make && make install && cd .. && - git clone https://github.com/andrewrk/libsoundio.git && cd libsoundio && cmake . && make && make install - CIBW_BEFORE_ALL_MACOS: brew install cmake python libsndfile libsoundio + git clone https://github.com/libsndfile/libsndfile.git && cd libsndfile && cmake -DBUILD_SHARED_LIBS=1 . && make && make install && cd .. + CIBW_BEFORE_ALL_MACOS: brew install cmake python libsndfile - uses: actions/upload-artifact@v3 with: diff --git a/.gitignore b/.gitignore index e78446fd..476da572 100644 --- a/.gitignore +++ b/.gitignore @@ -30,3 +30,5 @@ dist .coverage .ipynb_checkpoints wheelhouse/ +.vscode +.vs diff --git a/CHANGELOG.md b/CHANGELOG.md index f3b7bc72..59c34c4c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,23 @@ # CHANGELOG +## [v0.5.0](https://github.com/ideoforms/signalflow/tree/v0.5.0) (2024-10-28) + +- Replaced the `libsoundio` audio abstraction layer with `miniaudio`, heralding first-class Windows and Linux support. +- Retired historical `AudioOut` classes for different operating systems, and refactored querying of inputs/outputs/backends +- `AudioGraphConfig`: Added `auto_record` flag, to automatically record all output in timestamped audio files +- Added support for instantiating `AudioGraph` and `AudioGraphConfig` with the path of a config file +- Modified `AudioGraph` to become a singleton, and throw a warning instead of an exception upon attempting to create a second `AudioGraph` +- Added Python bindings and added unit tests for `SampleRingBuffer` and `SampleRingQueue` classes +- Nodes: + - Added `Bus` node, to act as a fixed-channel summer with variable inputs + - Added `Maraca` node, a simple physically-inspired model of a shaker, after Cook (1997) + - Added `ChannelOffset` node to offset a node's output by `N` channels, and `node.play(output_channel=N)` syntax + - Added `SelectInput` node, to pass the output of an input whose index can be modulated at audio rate + - Added `HistoryBufferWriter` node to capture a rolling signal history window, useful for oscilloscope UI display + - Added `Accumulator` node, to accumulate energy with some leaky decay coefficient, and accompanying `calculate_decay_coefficient` function + - Added abstract `VariableInputNode` class + - Added `stutter_probability` and `stutter_advance_time` inputs to `Stutter` + ## [v0.4.10](https://github.com/ideoforms/signalflow/tree/v0.4.10) (2024-08-13) - Added `TriggerRoundRobin` node, to sequentially distribute triggers across outputs diff --git a/CMakeLists.txt b/CMakeLists.txt index 7fe20629..76b75122 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -4,42 +4,47 @@ #------------------------------------------------------------------------------- cmake_minimum_required(VERSION 3.15.0) +#-------------------------------------------------------------------------------- +# Allow deployment on older versions of macOS (back to 10.14 Mojave), +# and default to the include/lib paths of the current Python virtualenv +# (important for cross-compiling wheels) +#-------------------------------------------------------------------------------- +set(CMAKE_OSX_DEPLOYMENT_TARGET "10.15" CACHE STRING "Minimum macOS deployment version" FORCE) +set(Python_FIND_VIRTUALENV STANDARD) +set(Python_FIND_FRAMEWORKS LAST) + +#------------------------------------------------------------------------------- +# Note that project() call should come after set CMAKE_OSX_DEPLOYMENT_TARGET, +# but CMAKE_SYSTEM_NAME is only available *after* project(), so any platform- +# dependent code should come later on. +#------------------------------------------------------------------------------- +project(SignalFlow C CXX) +if (NOT CMAKE_BUILD_TYPE) + set(CMAKE_BUILD_TYPE Develop) +endif() + if (CMAKE_SYSTEM_NAME STREQUAL "Darwin") #------------------------------------------------------------------------------- # On Apple, build the current native system by default #------------------------------------------------------------------------------- if (NOT CMAKE_OSX_ARCHITECTURES) execute_process(COMMAND uname -m - OUTPUT_VARIABLE CMAKE_OSX_ARCHITECTURES - OUTPUT_STRIP_TRAILING_WHITESPACE) + OUTPUT_VARIABLE CMAKE_OSX_ARCHITECTURES + OUTPUT_STRIP_TRAILING_WHITESPACE) endif() #------------------------------------------------------------------------------- - # Select the appropriate homebrew prefix by architecture + # Select the appropriate homebrew prefix by architecture. + # This is necessary so that the library is correctly linked against + # dependencies later on. #------------------------------------------------------------------------------- if (CMAKE_OSX_ARCHITECTURES STREQUAL "arm64") set(CMAKE_PREFIX_PATH /opt/homebrew) else() set(CMAKE_PREFIX_PATH /usr/local) endif() - - #-------------------------------------------------------------------------------- - # Allow deployment on older versions of macOS (back to 10.14 Mojave), - # and default to the include/lib paths of the current Python virtualenv - # (important for cross-compiling wheels) - #-------------------------------------------------------------------------------- - set(CMAKE_OSX_DEPLOYMENT_TARGET "10.15" CACHE STRING "Minimum macOS deployment version" FORCE) - set(Python_FIND_VIRTUALENV STANDARD) - set(Python_FIND_FRAMEWORKS LAST) -endif() - -# project call should come after set CMAKE_OSX_DEPLOYMENT_TARGET -project(SignalFlow C CXX) -if (NOT CMAKE_BUILD_TYPE) - set(CMAKE_BUILD_TYPE Develop) endif() - #-------------------------------------------------------------------------------- # Print config setup to help with debugging #-------------------------------------------------------------------------------- @@ -55,25 +60,32 @@ set(CMAKE_CXX_STANDARD 11) set(CMAKE_MACOSX_RPATH 1) #------------------------------------------------------------------------------- -# Shared compiler flags. +# Compiler flags for optimisations, warnings, etc. #------------------------------------------------------------------------------- - -if (NOT MSVC) +if (MSVC) + #------------------------------------------------------------------------------- + # Windows Visual C: Enable parallelisation + #------------------------------------------------------------------------------- + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} /MP") +else() + #------------------------------------------------------------------------------- + # GCC/Clang: Enable strict compiler warnings + #------------------------------------------------------------------------------- add_compile_options( -pedantic -fPIC -Wall ) -endif() -#------------------------------------------------------------------------------- -# Hide superfluous compiler warnings on macOS -#------------------------------------------------------------------------------- -if (CMAKE_SYSTEM_NAME STREQUAL "Darwin") - add_compile_options( - -Wno-gnu-zero-variadic-macro-arguments - -Wno-vla-extension - ) + #------------------------------------------------------------------------------- + # Hide superfluous compiler warnings on macOS + #------------------------------------------------------------------------------- + if (CMAKE_SYSTEM_NAME STREQUAL "Darwin") + add_compile_options( + -Wno-gnu-zero-variadic-macro-arguments + -Wno-vla-extension + ) + endif() endif() include_directories( @@ -84,15 +96,30 @@ include_directories( source/lib/pybind11/include ) +#------------------------------------------------------------------------------- +# Compiler flags for debug vs release vs dev mode +#------------------------------------------------------------------------------- if (${CMAKE_BUILD_TYPE} STREQUAL "Debug") message("Building in debug mode") - add_compile_options(-ggdb3 -O0 -DDEBUG) + if (CMAKE_SYSTEM_NAME STREQUAL "Windows") + add_compile_options(-O1) + else() + add_compile_options(-ggdb3 -O0 -DDEBUG) + endif() elseif (${CMAKE_BUILD_TYPE} STREQUAL "Release") message("Building in release mode") - add_compile_options(-O3 -funroll-loops) + if (CMAKE_SYSTEM_NAME STREQUAL "Windows") + add_compile_options(-O2) + else() + add_compile_options(-O3 -funroll-loops) + endif() else() message("Building in dev mode") - add_compile_options(-O0) + if (CMAKE_SYSTEM_NAME STREQUAL "Windows") + add_compile_options(-O1) + else() + add_compile_options(-O0) + endif() endif() #------------------------------------------------------------------------------- @@ -130,57 +157,36 @@ add_library(signalflow SHARED ${SRC}) add_compile_definitions(SIGNALFLOW_VERSION="${SIGNALFLOW_VERSION}") #------------------------------------------------------------------------------- -# Dependencies +# Dependency: libsndfile #------------------------------------------------------------------------------- - -set(SOUNDIO_BUILD_DIR "" CACHE PATH "Path to built SoundIO library (will use find_library if blank)") - -if (SOUNDIO_BUILD_DIR) - set(SOUNDIO_INCLUDE_DIR "${SOUNDIO_BUILD_DIR}/.." CACHE PATH "Path to SoundIO include directory (ignored if SOUNDIO_BUILD_DIR is blank") - add_definitions(-DHAVE_SOUNDIO) - target_link_libraries(signalflow "${SOUNDIO_BUILD_DIR}/x64-Debug/soundio.lib") - include_directories(signalflow "${SOUNDIO_BUILD_DIR}/$/") - include_directories(signalflow "${SOUNDIO_INCLUDE_DIR}/") -else() - find_library(SOUNDIO soundio) - if (SOUNDIO) - message("Found libsoundio") - add_definitions(-DHAVE_SOUNDIO) - target_link_libraries(signalflow ${SOUNDIO}) - else() - message(SEND_ERROR "Couldn't find libsoundio") - endif() -endif() - - -set(SNDFILE_BUILD_DIR "" CACHE PATH "Path to build sndfile library (will use find_library if blank)") - -if (SNDFILE_BUILD_DIR) - set(SNDFILE_INCLUDE_DIR "${SNDFILE_BUILD_DIR}/../include" CACHE PATH "Path to sndfile include directory (ignored if SNDFILE_BUILD_DIR is blank") +if (CMAKE_SYSTEM_NAME STREQUAL "Windows") + set(SNDFILE_BINARY_DIR "${PROJECT_SOURCE_DIR}/../libsndfile-1.2.2-win64" CACHE PATH "For Windows, path to downloaded sndfile directory") add_definitions(-DHAVE_SNDFILE) - target_link_libraries(signalflow "${SNDFILE_BUILD_DIR}/sndfile") - include_directories(signalflow "${SNDFILE_BUILD_DIR}/include/") - include_directories(signalflow "${SNDFILE_INCLUDE_DIR}/") + target_link_libraries(signalflow "${SNDFILE_BINARY_DIR}/lib/sndfile.lib") + include_directories(signalflow "${SNDFILE_BINARY_DIR}/include/") else() find_library(SNDFILE sndfile) if (SNDFILE) message("Found sndfile") add_definitions(-DHAVE_SNDFILE) target_link_libraries(signalflow ${SNDFILE}) - else() - message(SEND_ERROR "Couldn't find libsndfile") + else() + message(FATAL_ERROR "Couldn't find libsndfile") endif() endif() +#------------------------------------------------------------------------------- +# Dependency: fftw3 +#------------------------------------------------------------------------------- if (NOT CMAKE_SYSTEM_NAME STREQUAL "Darwin") - set(FFTW_BUILD_DIR "" CACHE PATH "Path to prebuilt FFTW library (will use find_library if blank)") - if (FFTW_BUILD_DIR) + if (CMAKE_SYSTEM_NAME STREQUAL "Windows") + set(FFTW_BUILD_DIR "${PROJECT_SOURCE_DIR}/../fftw-3.3.5-dll64" CACHE PATH "Path to prebuilt FFTW library (will use find_library if blank)") include_directories("${FFTW_BUILD_DIR}") add_definitions(-DFFT_FFTW) target_link_libraries(signalflow - "${FFTW_BUILD_DIR}/libfftw3-3" - "${FFTW_BUILD_DIR}/libfftw3f-3" - "${FFTW_BUILD_DIR}/libfftw3l-3" + "${FFTW_BUILD_DIR}/libfftw3-3.lib" + "${FFTW_BUILD_DIR}/libfftw3f-3.lib" + "${FFTW_BUILD_DIR}/libfftw3l-3.lib" ) else() find_library(FFTW3F fftw3f) @@ -225,4 +231,4 @@ endif() # Install shared lib and all includes #------------------------------------------------------------------------------- install(TARGETS signalflow DESTINATION lib) -install(DIRECTORY source/include/signalflow DESTINATION include) +install(DIRECTORY source/include/signalflow DESTINATION include) \ No newline at end of file diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index fe207e69..3041fd20 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -17,7 +17,7 @@ Building from source assumes that you have a working installation of Python 3, i To build on macOS from source, install dependencies with Homebrew: ``` -brew install cmake libsndfile libsoundio +brew install cmake libsndfile ``` Clone this repository, then build and install with `pip`: @@ -35,13 +35,8 @@ SignalFlow supports Linux (verified on Ubuntu 20.04 and Raspberry Pi OS buster) To build the Python library from source on Linux, install dependencies with apt: ``` -apt-get install -y git cmake g++ python3-pip libasound2-dev libsndfile1-dev libsoundio-dev fftw3-dev -``` - -If you experience an error on Raspberry Pi `libf77blas.so.3: cannot open shared object file`: - -``` -sudo apt-get install -y libatlas-base-dev +# If on Raspberry Pi: libfftw3-dev +apt-get install -y git cmake g++ python3-pip libasound2-dev libsndfile1-dev fftw3-dev ``` Clone this repository, then build and install with `pip`: @@ -56,19 +51,13 @@ pip3 install . ### Windows -This is work in progress. - -Currently, dependencies need to be downloaded and built by hand. These can be placed anywhere. - -- https://github.com/timmb/libsoundio - check out the `fix-msvc` branch. - - Use CMake GUI to build libsoundio with Visual Studio 2019 with binaries in a subfolder of that repo named `build`. (Configure, Generate, Open project, Batch build all configurations) -- https://github.com/libsndfile/libsndfile - - Use CMake GUI to build libsndfile with Visual Studio 2019 with binaries in a subfolder of that repo named `build`. (Configure, Generate, Open project, Batch build all configurations) -- Download Windows binaries of FFTW from http://fftw.org/install/windows.html. - -To build SignalFlow, use the CMake GUI. Press configure and you will see three empty fields to fill in with the path to the two build folders and the FFTW binaries folder (see above). Set these parameters then press Configure, then Generate then Open. Then build in Visual Studio 2019. +The build process for SignalFlow on 64-bit Windows has been verified with Visual Studio 2022 and CMake. -As of 2021-03-03, only the signalflow project has been ported to build correctly on Windows. Only tested in x64 and for Debug builds. Tested using Visual Studio 2019. +- Download Windows binaries of [FFTW](http://fftw.org/install/windows.html) and [libsndfile](https://github.com/libsndfile/libsndfile/releases/), and unzip them in the same filesystem location as the `signalflow` source directory +- Install Python 3, and dependencies: `python -m pip install build delvewheel` +- Build the binary wheel: `python -m build --wheel` +- Copy the libsndfile and fftw binaries into `dlls` +- Bundle the DLL dependencies with the wheel: `python -m delvewheel repair --add-path=dlls *.whl` diff --git a/auxiliary/cibuildwheel/make-macos-x86-arm64.sh b/auxiliary/cibuildwheel/make-macos-x86-arm64.sh index 1c1b4f55..b407b240 100755 --- a/auxiliary/cibuildwheel/make-macos-x86-arm64.sh +++ b/auxiliary/cibuildwheel/make-macos-x86-arm64.sh @@ -8,22 +8,30 @@ ROOT=auxiliary/cibuildwheel -for VERSION in 38 39 310 311 312 +for VERSION in 38 39 310 312 313 313 do rm -r build export CIBW_BUILD="cp${VERSION}-*" - export CIBW_BUILD_VERBOSITY=2 + export CIBW_BUILD_VERBOSITY=1 . $ROOT/venv-$VERSION/bin/activate - pip3 install cibuildwheel + pip3 install cibuildwheel delocate + + # For some reason, Python 3.13 seems to do additional validation on delocate which + # throws an exception when dependencies have a deployment target version set too high, + # and many of the dependencies on my build machine have a target of macOS 13 (Ventura). + # Need to verify whether the pre-3.13 builds are actually truly compatible with pre-Ventura! + if [ "$VERSION" == "313" ]; then + export MACOSX_DEPLOYMENT_TARGET=13.0 + fi #-------------------------------------------------------------------------------- - # Make x86 + # Make x86. #-------------------------------------------------------------------------------- export REPAIR_LIBRARY_PATH=/usr/local/lib export CIBW_ARCHS_MACOS="x86_64" export CMAKE_OSX_ARCHITECTURES=x86_64 - export CIBW_REPAIR_WHEEL_COMMAND_MACOS="DYLD_LIBRARY_PATH=$REPAIR_LIBRARY_PATH delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel}" + export CIBW_REPAIR_WHEEL_COMMAND_MACOS="DYLD_LIBRARY_PATH=$REPAIR_LIBRARY_PATH delocate-wheel -w {dest_dir} -v {wheel}" python3 -m cibuildwheel --output-dir wheelhouse --platform macos @@ -33,7 +41,7 @@ do export REPAIR_LIBRARY_PATH=/opt/homebrew/lib export CIBW_ARCHS_MACOS="arm64" export CMAKE_OSX_ARCHITECTURES=arm64 - export CIBW_REPAIR_WHEEL_COMMAND_MACOS="DYLD_LIBRARY_PATH=$REPAIR_LIBRARY_PATH delocate-wheel --require-archs {delocate_archs} -w {dest_dir} -v {wheel}" + export CIBW_REPAIR_WHEEL_COMMAND_MACOS="DYLD_LIBRARY_PATH=$REPAIR_LIBRARY_PATH delocate-wheel -w {dest_dir} -v {wheel}" python3 -m cibuildwheel --output-dir wheelhouse --platform macos done diff --git a/auxiliary/libs/signalflow_cli/__init__.py b/auxiliary/libs/signalflow_cli/__init__.py index 904fb73b..7c432f1d 100755 --- a/auxiliary/libs/signalflow_cli/__init__.py +++ b/auxiliary/libs/signalflow_cli/__init__.py @@ -58,24 +58,24 @@ def run_version(): print(signalflow.__version__) -def run_list_output_device_names(output_backend_name: str = None): - config = AudioGraphConfig() - if output_backend_name: - config.output_backend_name = output_backend_name - config.output_device_name = "" - graph = AudioGraph(config=config, start=False) +def run_list_output_device_names(backend_name: str = None): + output_device_names = AudioGraph.get_output_device_names(backend_name) print("Available output device names:") - for name in graph.output_device_names: + for name in output_device_names: print(" - %s" % name) -def run_list_output_backend_names(): - config = AudioGraphConfig() - config.output_backend_name = "dummy" - config.output_device_name = "" - graph = AudioGraph(config=config, start=False) +def run_list_input_device_names(backend_name: str = None): + input_device_names = AudioGraph.get_input_device_names(backend_name) + print("Available input device names:") + for name in input_device_names: + print(" - %s" % name) + + +def run_list_backend_names(): + backend_names = AudioGraph.get_backend_names() print("Available output backend names:") - for name in graph.output_backend_names: + for name in backend_names: print(" - %s" % name) @@ -136,15 +136,23 @@ def main(): # Command: list-output-device-names # -------------------------------------------------------------------------------- list_output_device_names = subparsers.add_parser('list-output-device-names', help='list available output devices') - list_output_device_names.add_argument('--output-backend-name', type=str, - help='name of output backend to use (default: system default backend)', + list_output_device_names.add_argument('--backend-name', type=str, + help='name of audio backend to use (default: system default backend)', default=None) + # -------------------------------------------------------------------------------- + # Command: list-input-device-names + # -------------------------------------------------------------------------------- + list_input_device_names = subparsers.add_parser('list-input-device-names', help='list available input devices') + list_input_device_names.add_argument('--backend-name', type=str, + help='name of audio backend to use (default: system default backend)', + default=None) + # -------------------------------------------------------------------------------- # Command: list-output-backend-names # -------------------------------------------------------------------------------- - list_output_backend_names = subparsers.add_parser('list-output-backend-names', - help='list available output backends') + list_backend_names = subparsers.add_parser('list-backend-names', + help='list available output backends') help = subparsers.add_parser('help', help='show help') # -------------------------------------------------------------------------------- @@ -175,9 +183,11 @@ def main(): elif args.command == 'test': run_test(args.frequency, args.gain, args.output_backend_name, args.output_device_name) elif args.command == 'list-output-device-names': - run_list_output_device_names(args.output_backend_name) - elif args.command == 'list-output-backend-names': - run_list_output_backend_names() + run_list_output_device_names(args.backend_name) + elif args.command == 'list-input-device-names': + run_list_input_device_names(args.backend_name) + elif args.command == 'list-backend-names': + run_list_backend_names() elif args.command == 'list-midi-output-device-names': run_list_midi_output_device_names() elif args.command == 'list-midi-input-device-names': diff --git a/auxiliary/scripts/auto-generator.py b/auxiliary/scripts/auto-generator.py index 53a48484..137c7c91 100755 --- a/auxiliary/scripts/auto-generator.py +++ b/auxiliary/scripts/auto-generator.py @@ -140,7 +140,10 @@ def extract_docs(doxygen: str) -> str: if re.search(r"^\s*/\*", line) or re.search(r"\*/\s*$", line): continue line = re.sub(r"^\s*\*\s*", "", line) - output = output + line + " " + + # Escape quote marks to avoid breaking auto-generated pydocs + line = re.sub('"', '\\"', line) + output = output + line + "\\n" return output.strip() diff --git a/docs/index.md b/docs/index.md index fa2fbca8..12701fe7 100644 --- a/docs/index.md +++ b/docs/index.md @@ -2,11 +2,9 @@ title: Explore sound synthesis and DSP with Python # SignalFlow: Explore sound synthesis and DSP with Python -SignalFlow is a sound synthesis framework whose goal is to make it quick and intuitive to explore complex sonic ideas. It has a simple Python API, allowing for rapid prototyping in Jupyter notebooks or on the command-line. It comes with over 100 signal processing classes for creative exploration, from filters and delays to FFT-based spectral processing and Euclidean rhythm generators. +SignalFlow is a sound synthesis framework designed for quick and intuitive expression of complex sonic ideas. It has a simple Python API, allowing for rapid prototyping in Jupyter notebooks or on the command-line. It comes with over 100 signal processing classes for creative exploration, from filters and delays to FFT-based spectral processing and Euclidean rhythm generators. -Its core is implemented in efficient C++11, with cross-platform hardware acceleration. - -SignalFlow has robust support for macOS and Linux (including Raspberry Pi), and has work-in-progress support for Windows. The overall project is currently in alpha status, and interfaces may change without warning. +Its core is implemented in efficient C++11, with cross-platform hardware acceleration, with cross-platform support for macOS, Linux (including Raspberry Pi) and Windows. --- diff --git a/docs/installation/macos/easy.md b/docs/installation/easy.md similarity index 78% rename from docs/installation/macos/easy.md rename to docs/installation/easy.md index d36a248f..f6b88d05 100644 --- a/docs/installation/macos/easy.md +++ b/docs/installation/easy.md @@ -1,14 +1,14 @@ -# SignalFlow: Easy install for macOS +# SignalFlow: Easy install with Visual Studio Code -The simplest way to start exploring SignalFlow is with the free [Visual Studio Code](https://code.visualstudio.com/) editor. Visual Studio Code can edit interactive "Jupyter" notebooks, which allow you to run and modify blocks of Python code in real-time, which is a great way to experiment live with audio synthesis. +The simplest way to start exploring SignalFlow is with the free [Visual Studio Code](https://code.visualstudio.com/) editor. Visual Studio Code can edit interactive Jupyter notebooks, allowing you to run and modify blocks of Python code in real-time, which is a great way to experiment live with audio synthesis. -You'll only need to do this installation process once. Once setup, experimenting with SignalFlow is as simple as opening Visual Studio Code. +You'll only need to do this installation process once. Once set up, experimenting with SignalFlow is as simple as opening Visual Studio Code. --- ## 1. Install Python -Download and install the latest version of Python (currently 3.12). +Download and install the latest version of Python. [Download Python](https://www.python.org/downloads/){ .md-button } @@ -20,7 +20,7 @@ Download and install the latest version of Visual Studio Code. [Download Visual Studio Code](https://code.visualstudio.com/Download){ .md-button } -Once installed, open `Applications` and run `Visual Studio Code`. +Once installed, open Visual Studio Code. --- @@ -28,7 +28,7 @@ Once installed, open `Applications` and run `Visual Studio Code`. Visual Studio Code requires extensions to be installed to handle Python and Jupyter files. -In Visual Studio Code, select the `Extensions` icon from in the far-left column (or press `⇧⌘X`), and install the `Python` and `Jupyter` extensions by searching for their names and clicking "Install" on each. +In Visual Studio Code, select the `Extensions` icon from in the far-left column, and install the `Python` and `Jupyter` extensions by searching for their names and clicking "Install" on each. Once installation has finished, close the `Extensions` tab. @@ -51,7 +51,7 @@ In Visual Studio code, create a new folder to contain your new SignalFlow projec ## 5. Create a notebook -Select `File → New File...` (`^⌥⌘N`), and select `Jupyter Notebook`. You should see the screen layout change to display an empty black text block (in Jupyter parlance, a "cell"). +Select `File → New File...`, and select `Jupyter Notebook`. You should see the screen layout change to display an empty black text block (in Jupyter parlance, a "cell"). --- @@ -62,14 +62,14 @@ Click the button marked `Select Kernel` in the top right. - Select `Python Environments...` - Select `Create Python Environment` - Select `Venv` - - Finally, select the version of Python you just installed (`3.12.x`). + - Finally, select the version of Python you just installed. !!! info "Multiple versions of Python?" If you already have one or more versions of Python installed, any version from Python 3.8 upwards is fine. Visual Studio Code will launch into some activity, in which it is installing necessary libraries and creating a Python "virtual environment", which is an isolated area of the filesystem containing all the packages needed for this working space. Working in different virtual environments for different projects is good practice to minimise the likelihood of conflicts and disruptions. -When the setup is complete, the button in the top right should change to say `.venv (Python 3.12.x)`. +When the setup is complete, the button in the top right should change to say `.venv (Python 3.x.x)`. !!! info New notebooks created within this workspace will share the same Python virtual environment. @@ -84,7 +84,7 @@ In the first block, copy and paste the below: %pip install signalflow ``` -To run the cell, press `^↵` (control-enter). After a minute, you should see some output saying `Successfully installed signalflow`. +To run the cell, press `Ctrl-Enter`. After a minute, you should see some output saying `Successfully installed signalflow`. !!! info "Running cells with '.venv' requires the ipykernel package." If you are given a prompt that the `ipykernel` package is required, press "Install" to install the package. @@ -102,7 +102,7 @@ print("Hello") print("world!") ``` -Press `^↵` (control-enter) to run the cell. You should see "Hello world!" appear below the cell. +Press `Ctrl-Enter` to run the cell. You should see "Hello world!" appear below the cell. !!! info "Keyboard shortcuts" - Navigate between cells with the arrow keys @@ -120,7 +120,7 @@ Clear the first cell, and replace it with: from signalflow import * ``` -Run the cell with `^↵`. This command imports all of the SignalFlow commands and classes, and only needs to be run once per session. +Run the cell with `Ctrl-Enter`. This command imports all of the SignalFlow commands and classes, and only needs to be run once per session. Create a new cell by pressing `b`, and in the new cell, run: diff --git a/docs/installation/index.md b/docs/installation/index.md index 96598412..f3714398 100644 --- a/docs/installation/index.md +++ b/docs/installation/index.md @@ -2,26 +2,24 @@ ## Requirements -SignalFlow supports macOS, Linux (including Raspberry Pi), and has alpha support for Windows. +SignalFlow supports macOS, Linux (including Raspberry Pi), and Windows (64-bit). It requires Python 3.8 or above. ## Installation ---- - -### macOS +If you're new to Python or getting started from scratch, the tutorial below will walk you through the setup process with Visual Studio Code. -{% - include-markdown "installation/macos/buttons.md" -%} +[Easy install with Visual Studio Code](easy.md){ .md-button } ---- +## Command-line installation -### Linux +If you are an existing Python user and confident with the command line: -{% include-markdown "installation/linux/buttons.md" %} +[Install from the command line](command-line-generic.md){ .md-button } --- ## Examples [Several example scripts](https://github.com/ideoforms/signalflow/tree/master/examples) are included within the repo, covering simple control and modulation, FM synthesis, sample granulation, MIDI control, chaotic functions, etc. + +--- diff --git a/docs/installation/macos/buttons.md b/docs/installation/macos/buttons.md index 1c4c26e0..6c8db519 100644 --- a/docs/installation/macos/buttons.md +++ b/docs/installation/macos/buttons.md @@ -1,6 +1,6 @@ If you're new to Python or getting started from scratch: -[macOS: Easy install with Visual Studio Code](easy.md){ .md-button } +[macOS: Easy install with Visual Studio Code](../easy.md){ .md-button } If you are an existing Python user and confident with the command line: diff --git a/examples/audio-through-example.py b/examples/audio-through-example.py index a319006d..4b9b9626 100755 --- a/examples/audio-through-example.py +++ b/examples/audio-through-example.py @@ -23,7 +23,7 @@ def main(): #-------------------------------------------------------------------------------- # Add some delay, and play #-------------------------------------------------------------------------------- - output = audio_in + CombDelay(audio_in, 0.2, 0.8) * 0.3 + output = audio_in stereo = StereoPanner(output) graph.play(stereo) diff --git a/examples/euclidean-rhythm-example.py b/examples/euclidean-rhythm-example.py index 4cc182ed..48a302af 100755 --- a/examples/euclidean-rhythm-example.py +++ b/examples/euclidean-rhythm-example.py @@ -68,7 +68,7 @@ def __init__(self, input=0, delay_time=1/8, feedback=0.7, wet=0.3): pingpong = PingPongDelayPatch(mix) pingpong.play() - graph.wait() + graph.wait(20) if __name__ == "__main__": diff --git a/examples/hello-world-example.py b/examples/hello-world-example.py index 33702f64..1412ffb4 100755 --- a/examples/hello-world-example.py +++ b/examples/hello-world-example.py @@ -24,7 +24,7 @@ def main(): # Play the #------------------------------------------------------------------------ graph.play(stereo) - graph.wait() + graph.wait(2) if __name__ == "__main__": main() \ No newline at end of file diff --git a/setup.cfg b/setup.cfg index b845b0e4..0ea7d53d 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,9 +1,9 @@ [metadata] name = signalflow -version = 0.4.10 +version = 0.5.0 author = Daniel Jones author_email = dan@erase.net -description = SignalFlow is a sound synthesis library designed to make it quick and intuitive to explore complex sonic ideas +description = SignalFlow is a sound synthesis library designed for clear and concise expression of sonic ideas long_description = file: README.md long_description_content_type = text/markdown keywords = audio, sound, synthesis, dsp, sound-synthesis @@ -23,13 +23,6 @@ install_requires = numpy package_dir = = auxiliary/libs -packages = - signalflow-stubs - signalflow_midi - signalflow_cli - signalflow_examples - signalflow_visualisation - signalflow_analysis include_package_data = true [options.extras_require] diff --git a/setup.py b/setup.py index cf15e948..9a90f222 100644 --- a/setup.py +++ b/setup.py @@ -69,6 +69,7 @@ def build_extension(self, ext): signalflow_package_data = ['*.pyd'] setup( + packages=signalflow_packages, ext_modules=[CMakeExtension('signalflow')], cmdclass=dict(build_ext=CMakeBuild), ) diff --git a/source/include/signalflow/buffer/ringbuffer.h b/source/include/signalflow/buffer/ringbuffer.h index ee95d27e..0c327f0a 100644 --- a/source/include/signalflow/buffer/ringbuffer.h +++ b/source/include/signalflow/buffer/ringbuffer.h @@ -7,9 +7,12 @@ *--------------------------------------------------------------------------------*/ #include +#include +#include #include #include #include +#include enum signalflow_interpolation_mode_t : unsigned int; @@ -20,27 +23,55 @@ template class RingBuffer { public: - RingBuffer(int size); + RingBuffer(unsigned int capacity); ~RingBuffer(); void append(T value); - void extend(T *ptr, int count); + void extend(T *ptr, unsigned int count); + void extend(std::vector vec); T get(double index); T operator[](double index) { return this->get(index); } + unsigned int get_capacity() { return this->capacity; } + unsigned int get_write_position() { return this->write_position; } -private: +protected: T *data = nullptr; - int size; - int position; + unsigned int capacity; + unsigned int write_position; signalflow_interpolation_mode_t interpolation_mode; }; template -RingBuffer::RingBuffer(int size) +class RingQueue : public RingBuffer +{ +public: + RingQueue(unsigned int capacity) + : RingBuffer(capacity) + { + this->read_position = this->capacity - 1; + this->filled_count = 0; + } + T pop(); + + int get_filled_count() { return this->filled_count; } + void append(T value); + +private: + unsigned int read_position; + int filled_count; + std::mutex mutex; +}; + +template +RingBuffer::RingBuffer(unsigned int capacity) { - this->data = new T[size](); - this->position = 0; - this->size = size; + if (capacity == 0) + { + throw std::runtime_error("RingBuffer must have a capacity greater than zero"); + } + this->data = new T[capacity](); + this->write_position = capacity - 1; + this->capacity = capacity; } template @@ -52,34 +83,61 @@ RingBuffer::~RingBuffer() template void RingBuffer::append(T value) { - this->data[this->position] = value; - this->position = (this->position + 1) % this->size; + this->write_position = (this->write_position + 1) % this->capacity; + this->data[this->write_position] = value; } template -void RingBuffer::extend(T *ptr, int count) +void RingBuffer::extend(T *ptr, unsigned int count) { for (int i = 0; i < count; i++) this->append(ptr[i]); } +template +void RingBuffer::extend(std::vector vec) +{ + for (auto item : vec) + this->append(item); +} + template T RingBuffer::get(double index) { - double frame = index + this->position; + double frame = index + this->write_position; while (frame < 0) { - frame += this->size; + frame += this->capacity; } - frame = fmod(frame, this->size); + frame = fmod(frame, this->capacity); double frame_frac = (frame - (int) frame); int frame_index = (int) frame; - int next_frame_index = ((int) ceil(frame)) % size; + int next_frame_index = ((int) ceil(frame)) % this->capacity; - T rv = ((1.0 - frame_frac) * data[frame_index]) + (frame_frac * data[next_frame_index]); + T rv = ((1.0 - frame_frac) * this->data[frame_index]) + (frame_frac * this->data[next_frame_index]); return rv; } +template +T RingQueue::pop() +{ + mutex.lock(); + this->read_position = (this->read_position + 1) % this->capacity; + this->filled_count--; + T rv = this->data[this->read_position]; + mutex.unlock(); + return rv; +} + +template +void RingQueue::append(T value) +{ + mutex.lock(); + this->RingBuffer::append(value); + this->filled_count++; + mutex.unlock(); +} + } diff --git a/source/include/signalflow/core/constants.h b/source/include/signalflow/core/constants.h index a7765dd8..656249c5 100644 --- a/source/include/signalflow/core/constants.h +++ b/source/include/signalflow/core/constants.h @@ -14,6 +14,7 @@ typedef float sample; typedef sample *frame; typedef RingBuffer SampleRingBuffer; +typedef RingQueue SampleRingQueue; #if defined(__APPLE__) && !defined(FFT_FFTW) #define FFT_ACCELERATE diff --git a/source/include/signalflow/core/graph.h b/source/include/signalflow/core/graph.h index 9892aff3..7b91d271 100644 --- a/source/include/signalflow/core/graph.h +++ b/source/include/signalflow/core/graph.h @@ -199,7 +199,15 @@ class AudioGraph * @return The list of device names. * *--------------------------------------------------------------------------------*/ - std::list get_output_device_names(); + static std::list get_output_device_names(std::string backend_name = ""); + + /**-------------------------------------------------------------------------------- + * Returns a list of available audio I/O input devices. + * + * @return The list of device names. + * + *--------------------------------------------------------------------------------*/ + static std::list get_input_device_names(std::string backend_name = ""); /**-------------------------------------------------------------------------------- * Returns a list of available audio I/O output backends. @@ -207,7 +215,7 @@ class AudioGraph * @return The list of backend names. * *--------------------------------------------------------------------------------*/ - std::list get_output_backend_names(); + static std::list get_backend_names(); /**-------------------------------------------------------------------------------- * Schedule a node for rendering without connecting the node to the graph's output. diff --git a/source/include/signalflow/core/util.h b/source/include/signalflow/core/util.h index 15d683ec..d3992ba0 100644 --- a/source/include/signalflow/core/util.h +++ b/source/include/signalflow/core/util.h @@ -18,6 +18,7 @@ namespace signalflow { double signalflow_timestamp(); +void signalflow_msleep(int millis); long signalflow_create_random_seed(); double signalflow_clip(double value, double min, double max); diff --git a/source/include/signalflow/node/io/input/abstract.h b/source/include/signalflow/node/io/input/abstract.h index a1c28beb..500a8be4 100644 --- a/source/include/signalflow/node/io/input/abstract.h +++ b/source/include/signalflow/node/io/input/abstract.h @@ -7,16 +7,18 @@ namespace signalflow { + class AudioIn_Abstract : public Node { public: AudioIn_Abstract(); - virtual int init() = 0; - virtual int start() = 0; - virtual int stop() = 0; - virtual int destroy() = 0; + virtual void init() = 0; + virtual void start() = 0; + virtual void stop() = 0; + virtual void destroy() = 0; virtual void process(Buffer &out, int num_samples) = 0; }; + } diff --git a/source/include/signalflow/node/io/input/miniaudio.h b/source/include/signalflow/node/io/input/miniaudio.h new file mode 100644 index 00000000..ec836e05 --- /dev/null +++ b/source/include/signalflow/node/io/input/miniaudio.h @@ -0,0 +1,32 @@ +#pragma once + +#include + +#include "abstract.h" + +#include "../output/miniaudio-library.h" +#include "signalflow/core/graph.h" + +namespace signalflow +{ + +class AudioIn : public AudioIn_Abstract +{ +public: + AudioIn(unsigned int num_channels = 1); + virtual ~AudioIn() override; + virtual void init() override; + virtual void start() override; + virtual void stop() override; + virtual void destroy() override; + virtual void process(Buffer &out, int num_samples) override; + +private: + std::string backend_name; + std::string device_name; + ma_context context; + ma_device device; + unsigned int num_channels; +}; + +} diff --git a/source/include/signalflow/node/io/input/soundio.h b/source/include/signalflow/node/io/input/soundio.h deleted file mode 100644 index 90e4e273..00000000 --- a/source/include/signalflow/node/io/input/soundio.h +++ /dev/null @@ -1,40 +0,0 @@ -#pragma once - -#ifdef HAVE_SOUNDIO - -#define AudioIn AudioIn_SoundIO - -#include -#include - -#include "abstract.h" - -#include "signalflow/core/graph.h" - -namespace signalflow -{ - -class AudioIn_SoundIO : public AudioIn_Abstract -{ -public: - AudioIn_SoundIO(unsigned int num_channels = 1); - virtual ~AudioIn_SoundIO() override; - virtual int init() override; - virtual int start() override; - virtual int stop() override; - virtual int destroy() override; - virtual void process(Buffer &out, int num_samples) override; - - struct SoundIo *soundio; - struct SoundIoDevice *device; - struct SoundIoInStream *instream; - - Buffer *buffer; - int read_pos; - int write_pos; - unsigned int num_channels_requested; -}; - -} - -#endif diff --git a/source/include/signalflow/node/io/output/abstract.h b/source/include/signalflow/node/io/output/abstract.h index a891e4a2..c6b6c6a4 100644 --- a/source/include/signalflow/node/io/output/abstract.h +++ b/source/include/signalflow/node/io/output/abstract.h @@ -13,10 +13,10 @@ class AudioOut_Abstract : public Node AudioOut_Abstract(); virtual void process(Buffer &out, int num_samples); - virtual int init() = 0; - virtual int start() = 0; - virtual int stop() = 0; - virtual int destroy() = 0; + virtual void init() = 0; + virtual void start() = 0; + virtual void stop() = 0; + virtual void destroy() = 0; virtual void add_input(NodeRef node); virtual void remove_input(NodeRef node); @@ -26,10 +26,16 @@ class AudioOut_Abstract : public Node virtual void set_channels(int num_input_channels, int num_output_channels); + /**-------------------------------------------------------------------------------- + * Returns the audio output's sample rate. Note that this may not be the + * same as the audio hardware's sample rate if the user has specified + * a non-zero sample rate in AudioGraphConfig. + *-------------------------------------------------------------------------------*/ unsigned int get_sample_rate(); /**-------------------------------------------------------------------------------- - * Returns the buffer size required by the audio hardware. + * Returns the buffer size observed by the audio HAL. Note that this is + * served by miniaudio. * * @return The buffer size, in frames. *-------------------------------------------------------------------------------*/ diff --git a/source/include/signalflow/node/io/output/dummy.h b/source/include/signalflow/node/io/output/dummy.h index 73e102c7..7d79f7b9 100644 --- a/source/include/signalflow/node/io/output/dummy.h +++ b/source/include/signalflow/node/io/output/dummy.h @@ -11,10 +11,10 @@ class AudioOut_Dummy : public AudioOut_Abstract public: AudioOut_Dummy(int num_channels = 2, int buffer_size = 256); - virtual int init() { return 0; } - virtual int start() { return 0; } - virtual int stop() { return 0; } - virtual int destroy() { return 0; } + virtual void init() {} + virtual void start() {} + virtual void stop() {} + virtual void destroy() {} }; REGISTER(AudioOut_Dummy, "audioout-dummy") diff --git a/source/include/signalflow/node/io/output/ios.h b/source/include/signalflow/node/io/output/ios.h deleted file mode 100644 index bdc83f01..00000000 --- a/source/include/signalflow/node/io/output/ios.h +++ /dev/null @@ -1,33 +0,0 @@ -#pragma once - -#include "signalflow/core/platform.h" - -#if __APPLE__ - -#if TARGET_OS_IPHONE - -#define AudioOut AudioOut_iOS - -#include "abstract.h" - -#include "signalflow/core/graph.h" -#include "signalflow/node/node.h" - -namespace signalflow -{ - -class AudioOut_iOS : public AudioOut_Abstract -{ -public: - AudioOut_iOS(AudioGraph *graph); - - virtual int init() override; - virtual int start() override; - virtual int close() override; -}; - -} // namespace signalflow - -#endif - -#endif diff --git a/source/include/signalflow/node/io/output/miniaudio.h b/source/include/signalflow/node/io/output/miniaudio.h new file mode 100644 index 00000000..460b37ec --- /dev/null +++ b/source/include/signalflow/node/io/output/miniaudio.h @@ -0,0 +1,48 @@ +#pragma once + +#include + +#include "abstract.h" + +#include "miniaudio-library.h" +#include "signalflow/core/graph.h" +#include "signalflow/node/node.h" + +namespace signalflow +{ + +class AudioOut : public AudioOut_Abstract +{ +public: + AudioOut(const std::string &backend_name = "", + const std::string &device_name = "", + unsigned int sample_rate = 0, + unsigned int buffer_size = 0); + + virtual void init() override; + virtual void start() override; + virtual void stop() override; + virtual void destroy() override; + + static std::list get_output_device_names(std::string backend_name = ""); + static std::list get_input_device_names(std::string backend_name = ""); + static std::list get_backend_names(); + + /*-------------------------------------------------------------------------------- + * Initialise a new miniaudio context, using the specified backend name if + * present, or the default backend otherwise. + * + * Public because AudioIn also uses this method. + *-------------------------------------------------------------------------------*/ + static void init_context(ma_context *context, std::string backend_name = ""); + +private: + std::string backend_name; + std::string device_name; + ma_context context; + ma_device device; +}; + +REGISTER(AudioOut, "audioout") + +} // namespace signalflow diff --git a/source/include/signalflow/node/io/output/soundio.h b/source/include/signalflow/node/io/output/soundio.h deleted file mode 100644 index 518cc988..00000000 --- a/source/include/signalflow/node/io/output/soundio.h +++ /dev/null @@ -1,48 +0,0 @@ -#pragma once - -#ifdef HAVE_SOUNDIO - -#define AudioOut AudioOut_SoundIO - -#include -#include - -#include "abstract.h" - -#include "signalflow/core/graph.h" -#include "signalflow/node/node.h" - -namespace signalflow -{ - -class AudioOut_SoundIO : public AudioOut_Abstract -{ -public: - AudioOut_SoundIO(const std::string &backend_name = "", - const std::string &device_name = "", - unsigned int sample_rate = 0, - unsigned int buffer_size = 0); - - virtual int init() override; - virtual int start() override; - virtual int stop() override; - virtual int destroy() override; - - std::list get_output_device_names(); - std::list get_output_backend_names(); - int get_default_output_device_index(); - - struct SoundIo *soundio; - struct SoundIoDevice *device; - struct SoundIoOutStream *outstream; - -private: - std::string backend_name; - std::string device_name; -}; - -REGISTER(AudioOut_SoundIO, "audioout-soundio") - -} // namespace signalflow - -#endif diff --git a/source/include/signalflow/signalflow.h b/source/include/signalflow/signalflow.h index 59937362..64b20819 100644 --- a/source/include/signalflow/signalflow.h +++ b/source/include/signalflow/signalflow.h @@ -65,13 +65,11 @@ /*------------------------------------------------------------------------ * I/O *-----------------------------------------------------------------------*/ +#include +#include #include #include -#include -#include - -#include -#include +#include /*------------------------------------------------------------------------ * Oscillators diff --git a/source/src/CMakeLists.txt b/source/src/CMakeLists.txt index 7576c3d9..36db5f25 100644 --- a/source/src/CMakeLists.txt +++ b/source/src/CMakeLists.txt @@ -83,10 +83,10 @@ set(SRC ${SRC} ${CMAKE_CURRENT_SOURCE_DIR}/node/processors/fold.cpp ${CMAKE_CURRENT_SOURCE_DIR}/node/processors/wetdry.cpp ${CMAKE_CURRENT_SOURCE_DIR}/node/io/input/abstract.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/node/io/input/soundio.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/node/io/input/miniaudio.cpp ${CMAKE_CURRENT_SOURCE_DIR}/node/io/output/abstract.cpp ${CMAKE_CURRENT_SOURCE_DIR}/node/io/output/dummy.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/node/io/output/soundio.cpp + ${CMAKE_CURRENT_SOURCE_DIR}/node/io/output/miniaudio.cpp ${CMAKE_CURRENT_SOURCE_DIR}/node/operators/add.cpp ${CMAKE_CURRENT_SOURCE_DIR}/node/operators/amplitude-to-decibels.cpp ${CMAKE_CURRENT_SOURCE_DIR}/node/operators/bus.cpp diff --git a/source/src/core/graph.cpp b/source/src/core/graph.cpp index 6b2c941d..728b3eb1 100644 --- a/source/src/core/graph.cpp +++ b/source/src/core/graph.cpp @@ -3,8 +3,7 @@ #include "signalflow/core/graph.h" #include "signalflow/node/io/output/abstract.h" #include "signalflow/node/io/output/dummy.h" -#include "signalflow/node/io/output/ios.h" -#include "signalflow/node/io/output/soundio.h" +#include "signalflow/node/io/output/miniaudio.h" #include "signalflow/node/node.h" #include "signalflow/node/oscillators/constant.h" #include "signalflow/patch/patch.h" @@ -17,7 +16,7 @@ #include #include -#include +//#include namespace signalflow { @@ -49,7 +48,7 @@ AudioGraph::AudioGraph(AudioGraphConfig *config, std::string output_device, bool this->config = *config; } - if (output_device == "dummy") + if (output_device == "dummy" || this->config.get_output_device_name() == "dummy") { this->output = new AudioOut_Dummy(); } @@ -170,6 +169,7 @@ void AudioGraph::start() std::string recording_filename = recordings_dir + "/signalflow-" + timestamp_str + ".wav"; // TODO: This is all very POSIX-specific and won't work on Windows + /* struct stat st; if (stat(SIGNALFLOW_USER_DIR.c_str(), &st) == -1) { @@ -187,6 +187,7 @@ void AudioGraph::start() throw std::runtime_error("AudioGraph: Failed creating recordings directory for auto_record (" + recordings_dir + ")"); } } + */ this->start_recording(recording_filename, this->output->get_num_input_channels()); } } @@ -588,16 +589,22 @@ std::list AudioGraph::get_outputs() return output->get_inputs(); } -std::list AudioGraph::get_output_device_names() +// static +std::list AudioGraph::get_output_device_names(std::string backend_name) { - AudioOut_SoundIO *output = (AudioOut_SoundIO *) (this->output.get()); - return output->get_output_device_names(); + return AudioOut::get_output_device_names(backend_name); } -std::list AudioGraph::get_output_backend_names() +// static +std::list AudioGraph::get_input_device_names(std::string backend_name) { - AudioOut_SoundIO *output = (AudioOut_SoundIO *) (this->output.get()); - return output->get_output_backend_names(); + return AudioOut::get_input_device_names(backend_name); +} + +// static +std::list AudioGraph::get_backend_names() +{ + return AudioOut::get_backend_names(); } NodeRef AudioGraph::add_node(NodeRef node) diff --git a/source/src/core/util.cpp b/source/src/core/util.cpp index 92caf3e5..7ba9c878 100644 --- a/source/src/core/util.cpp +++ b/source/src/core/util.cpp @@ -12,6 +12,7 @@ #include #include #include +#include namespace signalflow { @@ -28,6 +29,11 @@ double signalflow_timestamp() / 1000000.0; } +void signalflow_msleep(int millis) +{ + std::this_thread::sleep_for(std::chrono::milliseconds(millis)); +} + long signalflow_create_random_seed() { /*--------------------------------------------------------------------* diff --git a/source/src/node/io/input/abstract.cpp b/source/src/node/io/input/abstract.cpp index 76a266e4..18d34b10 100644 --- a/source/src/node/io/input/abstract.cpp +++ b/source/src/node/io/input/abstract.cpp @@ -2,15 +2,9 @@ namespace signalflow { -AudioIn_Abstract *shared_in = nullptr; AudioIn_Abstract::AudioIn_Abstract() { - if (shared_in) - throw std::runtime_error("Multiple AudioIn nodes are not yet supported."); - - shared_in = this; - this->name = "audioin"; this->set_channels(0, 1); } diff --git a/source/src/node/io/input/miniaudio.cpp b/source/src/node/io/input/miniaudio.cpp new file mode 100644 index 00000000..cfdc22be --- /dev/null +++ b/source/src/node/io/input/miniaudio.cpp @@ -0,0 +1,178 @@ +#include "signalflow/node/io/input/miniaudio.h" + +#include "signalflow/core/graph.h" +#include "signalflow/node/io/output/miniaudio.h" + +#include +#include +#include + +static bool is_processing = false; + +namespace signalflow +{ + +AudioIn *shared_in; +std::vector input_queue; + +void read_callback(ma_device *pDevice, + void *pOutput, + const void *pInput, + ma_uint32 frameCount) +{ + is_processing = true; + + AudioIn *input_node = (AudioIn *) shared_in; + if (!input_node) + return; + + float *input_samples = (float *) pInput; + + // TODO: the number of channels at the mic input might not be the same as the number of channels of this device + int num_channels = input_node->get_num_output_channels(); + for (unsigned int frame = 0; frame < frameCount; frame++) + { + for (int channel = 0; channel < num_channels; channel++) + { + input_queue[channel]->append(input_samples[frame * num_channels + channel]); + } + } + + is_processing = false; +} + +AudioIn::AudioIn(unsigned int num_channels) + : AudioIn_Abstract() +{ + shared_in = this; + this->name = "audioin-miniaudio"; + this->num_channels = num_channels; + this->init(); +} + +AudioIn::~AudioIn() +{ + // TODO: call superclass destructor to set shared_in to null + this->destroy(); +} + +void AudioIn::init() +{ + ma_result rv; + ma_device_config config = ma_device_config_init(ma_device_type_capture); + config.capture.format = ma_format_f32; + config.capture.channels = this->num_channels; + config.periodSizeInFrames = this->get_graph()->get_output_buffer_size(); + config.sampleRate = this->get_graph()->get_sample_rate(); + config.dataCallback = read_callback; + + ma_device_info *capture_devices; + ma_uint32 capture_device_count; + + // TODO: Add get_input_backend_name + AudioOut::init_context(&this->context, this->get_graph()->get_config().get_output_backend_name()); + + rv = ma_context_get_devices(&this->context, + NULL, + NULL, + &capture_devices, + &capture_device_count); + int selected_device_index = -1; + std::string device_name = this->get_graph()->get_config().get_input_device_name(); + + if (!device_name.empty()) + { + for (unsigned int i = 0; i < capture_device_count; i++) + { + /*-----------------------------------------------------------------------* + * For ease of use, SignalFlow allows for partial matches so that only + * the first part of the device names needs to be specified. However, + * an errors is thrown if the match is ambiguous. + *-----------------------------------------------------------------------*/ + if (strncmp(capture_devices[i].name, device_name.c_str(), strlen(device_name.c_str())) == 0) + { + if (selected_device_index != -1) + { + throw audio_io_exception("More than one audio device found matching name '" + device_name + "'"); + } + selected_device_index = i; + } + } + if (selected_device_index == -1) + { + throw audio_io_exception("No audio device found matching name '" + device_name + "'"); + } + + config.capture.pDeviceID = &capture_devices[selected_device_index].id; + } + + rv = ma_device_init(NULL, &config, &device); + if (rv != MA_SUCCESS) + { + throw audio_io_exception("miniaudio: Error initialising input device"); + } + + this->set_channels(0, device.capture.internalChannels); + + /*-------------------------------------------------------------------------------- + * Note that the underlying sample rate used by the recording hardware + * (`device.capture.internalSampleRate`) may not be the same as that used + * by `AudioIn`: SignalFlow requires that the input and output streams are both + * on the same sample rate, so miniaudio's resampling is used to unify them. + *-------------------------------------------------------------------------------*/ + std::string s = device.capture.internalChannels == 1 ? "" : "s"; + std::cerr << "[miniaudio] Input device: " << std::string(device.capture.name) << " (" << device.capture.internalSampleRate << "Hz, " + << "buffer size " << device.capture.internalPeriodSizeInFrames << " samples, " << device.capture.internalChannels << " channel" << s << ")" + << std::endl; + + for (int channel = 0; channel < device.capture.internalChannels; channel++) + { + SampleRingQueue *queue = new SampleRingQueue(device.capture.internalPeriodSizeInFrames * 8); + std::vector silence(device.capture.internalPeriodSizeInFrames, 0); + queue->extend(silence); + input_queue.push_back(queue); + } + + this->start(); +} + +void AudioIn::start() +{ + ma_result rv = ma_device_start(&device); + if (rv != MA_SUCCESS) + { + throw audio_io_exception("miniaudio: Error starting device"); + } +} + +void AudioIn::stop() +{ + ma_result rv = ma_device_stop(&device); + if (rv != MA_SUCCESS) + { + throw audio_io_exception("miniaudio: Error stopping device"); + } +} + +void AudioIn::destroy() +{ + while (is_processing) + { + } + + this->stop(); + shared_in = nullptr; +} + +void AudioIn::process(Buffer &out, int num_samples) +{ + for (int channel = 0; channel < this->num_output_channels; channel++) + { + for (int frame = 0; frame < num_samples; frame++) + { + out[channel][frame] = input_queue[channel]->pop(); + } + } +} + +} diff --git a/source/src/node/io/input/soundio.cpp b/source/src/node/io/input/soundio.cpp deleted file mode 100644 index fa529c5d..00000000 --- a/source/src/node/io/input/soundio.cpp +++ /dev/null @@ -1,177 +0,0 @@ -#include "signalflow/node/io/input/soundio.h" - -#ifdef HAVE_SOUNDIO - -#define SIGNALFLOW_AUDIO_IN_DEFAULT_BUFFER_SIZE 1024 - -#include "signalflow/core/graph.h" -#include "signalflow/node/io/output/soundio.h" - -#include -#include -#include -#include -#include - -static bool is_processing = false; - -namespace signalflow -{ -extern AudioIn_Abstract *shared_in; - -void read_callback(struct SoundIoInStream *instream, - int frame_count_min, int frame_count_max) -{ - is_processing = true; - - AudioIn_SoundIO *input = (AudioIn_SoundIO *) shared_in; - if (!shared_in) - return; - - struct SoundIoChannelArea *areas; - int frame_count = frame_count_max; - int frames_left = frame_count_max; - - /*-----------------------------------------------------------------------* - * On some drivers (eg Linux), we cannot write all samples at once. - * Keep reading as many as we can until we have cleared the buffer. - *-----------------------------------------------------------------------*/ - while (frames_left > 0) - { - int err; - - if ((err = soundio_instream_begin_read(instream, &areas, &frame_count))) - throw audio_io_exception("libsoundio error on begin read: " + std::string(soundio_strerror(err))); - - if (!input) - continue; - // throw std::runtime_error("libsoundio error: No global input created"); - - for (int frame = 0; frame < frame_count; frame++) - { - for (unsigned int channel = 0; channel < input->buffer->get_num_channels(); channel += 1) - { - float *ptr = reinterpret_cast(areas[channel].ptr + areas[channel].step * frame); - input->buffer->data[channel][input->write_pos] = *ptr; - } - input->write_pos = (input->write_pos + 1) % input->buffer->get_num_frames(); - } - - if ((err = soundio_instream_end_read(instream))) - throw audio_io_exception("libsoundio error on end read: " + std::string(soundio_strerror(err))); - - frames_left -= frame_count; - } - - is_processing = false; -} - -AudioIn_SoundIO::AudioIn_SoundIO(unsigned int num_channels) - : AudioIn_Abstract() -{ - // Allocate enough buffer for twice our block size, else - // we risk overwriting our input buffer from the audio in - // while it is still being read from. - // TODO: Bad hardcoded block size - - this->num_channels_requested = num_channels; - this->read_pos = 0; - this->write_pos = (int) (SIGNALFLOW_AUDIO_IN_DEFAULT_BUFFER_SIZE / 2); - this->name = "audioin_soundio"; - this->buffer = NULL; - - this->init(); -} - -AudioIn_SoundIO::~AudioIn_SoundIO() -{ - this->destroy(); -} - -int AudioIn_SoundIO::init() -{ - int err; - - this->soundio = ((AudioOut_SoundIO *) this->graph->get_output().get())->soundio; - - if (!this->soundio) - throw audio_io_exception("libsoundio init error: No output node found in graph (initialising input before output?)"); - - int default_in_device_index = soundio_default_input_device_index(this->soundio); - if (default_in_device_index < 0) - throw device_not_found_exception("No input devices found. More information: https://signalflow.dev/troubleshooting/device_not_found_exception/"); - - this->device = soundio_get_input_device(this->soundio, default_in_device_index); - if (!device) - throw audio_io_exception("libsoundio init error: out of memory."); - - this->instream = soundio_instream_create(device); - this->instream->format = SoundIoFormatFloat32NE; - this->instream->read_callback = read_callback; - this->instream->sample_rate = device->sample_rate_current; - this->instream->software_latency = 256.0 / this->instream->sample_rate; - - if ((err = soundio_instream_open(this->instream))) - { - throw audio_io_exception("libsoundio init error: unable to open device: " + std::string(soundio_strerror(err))); - } - - if ((err = soundio_instream_start(instream))) - { - throw audio_io_exception("libsoundio init error: unable to start device: " + std::string(soundio_strerror(err))); - } - - if (this->num_channels_requested > (unsigned int) this->instream->layout.channel_count) - { - throw audio_io_exception("AudioIn: Not enough input channels available (requested " + std::to_string(this->num_channels_requested) + ", available " + std::to_string(this->instream->layout.channel_count) + ")"); - } - this->set_channels(0, this->num_channels_requested); - this->buffer = new Buffer(this->num_output_channels, SIGNALFLOW_AUDIO_IN_DEFAULT_BUFFER_SIZE); - - int buffer_size = this->instream->software_latency * this->instream->sample_rate; - std::string s = num_output_channels == 1 ? "" : "s"; - - std::cerr << "Input device: " << device->name << " (" << this->instream->sample_rate << "Hz, " - << "buffer size " << buffer_size << " samples, " << num_output_channels << " channel" << s << ")" << std::endl; - - return 0; -} - -int AudioIn_SoundIO::start() -{ - return 0; -} - -int AudioIn_SoundIO::stop() -{ - return 0; -} - -int AudioIn_SoundIO::destroy() -{ - while (is_processing) - { - } - - shared_in = nullptr; - soundio_instream_destroy(this->instream); - soundio_device_unref(this->device); - - return 0; -} - -void AudioIn_SoundIO::process(Buffer &out, int num_frames) -{ - for (int frame = 0; frame < num_frames; frame++) - { - for (int channel = 0; channel < num_output_channels; channel++) - { - out[channel][frame] = this->buffer->data[channel][this->read_pos]; - } - this->read_pos = (this->read_pos + 1) % this->buffer->get_num_frames(); - } -} - -} - -#endif diff --git a/source/src/node/io/output/abstract.cpp b/source/src/node/io/output/abstract.cpp index e766ada9..2862ad44 100644 --- a/source/src/node/io/output/abstract.cpp +++ b/source/src/node/io/output/abstract.cpp @@ -7,7 +7,6 @@ namespace signalflow AudioOut_Abstract::AudioOut_Abstract() { this->name = "audioout"; - // do we need to set num_output channels to allocate the right number of output buffers? this->set_channels(2, 0); this->no_input_upmix = true; this->has_variable_inputs = true; @@ -105,6 +104,11 @@ void AudioOut_Abstract::replace_input(NodeRef node, NodeRef other) void AudioOut_Abstract::set_channels(int num_input_channels, int num_output_channels) { Node::set_channels(num_input_channels, num_output_channels); + + /*-------------------------------------------------------------------------------- + * Typically, Node objects allocate an output buffer per output channel. + * In this unique case, allocate an output buffer per input channel. + *--------------------------------------------------------------------------------*/ this->resize_output_buffers(num_input_channels); } diff --git a/source/src/node/io/output/ios.mm b/source/src/node/io/output/ios.mm deleted file mode 100644 index 9eb2f2a0..00000000 --- a/source/src/node/io/output/ios.mm +++ /dev/null @@ -1,63 +0,0 @@ -#include "signalflow/node/io/output/ios.h" - -#if TARGET_OS_IPHONE - -#include "signalflow/core/graph.h" - -#ifdef __OBJC__ -#include "AudioIOManager.h" -#endif - -#include -#include -#include -#include -#include - - -namespace libsignal -{ - -extern AudioGraph *shared_graph; - -void audio_callback(float **data, int num_channels, int num_frames) -{ - shared_graph->pull_input(num_frames); - - for (int frame = 0; frame < num_frames; frame++) - { - for (int channel = 0; channel < num_channels; channel++) - { - data[channel][frame] = shared_graph->get_output()->out[channel][frame]; - } - } -} - -AudioOut_iOS::AudioOut_iOS(AudioGraph *graph) : AudioOut_Abstract(graph) -{ - this->init(); -} - -int AudioOut_iOS::init() -{ - AudioIOManager *ioManager = [[AudioIOManager alloc] initWithCallback:audio_callback]; - [ioManager start]; - - return 0; -} - -int AudioOut_iOS::start() -{ - return 0; -} - -int AudioOut_iOS::close() -{ - return 0; -} - - -} // namespace libsignal - -#endif /* TARGET_OS_IPHONE */ - diff --git a/source/src/node/io/output/miniaudio.cpp b/source/src/node/io/output/miniaudio.cpp new file mode 100644 index 00000000..9b0c257b --- /dev/null +++ b/source/src/node/io/output/miniaudio.cpp @@ -0,0 +1,329 @@ +#include "signalflow/node/io/output/miniaudio.h" + +#define MINIAUDIO_IMPLEMENTATION +#include "signalflow/node/io/output/miniaudio-library.h" + +#include "signalflow/core/graph.h" + +#include +#include +#include +#include +#include +#include +#include + +static bool is_processing = false; + +namespace signalflow +{ + +extern AudioGraph *shared_graph; + +std::unordered_map possible_backend_names = { + { "wasapi", ma_backend_wasapi }, + { "dsound", ma_backend_dsound }, + { "ma_backend_winmm", ma_backend_winmm }, + { "coreaudio", ma_backend_coreaudio }, + { "sndio", ma_backend_sndio }, + { "audio4", ma_backend_audio4 }, + { "oss", ma_backend_oss }, + { "pulseaudio", ma_backend_pulseaudio }, + { "alsa", ma_backend_alsa }, + { "jack", ma_backend_jack }, + { "aaudio", ma_backend_aaudio }, + { "opensl", ma_backend_opensl }, + { "webaudio", ma_backend_webaudio }, + { "null", ma_backend_null }, +}; + +void data_callback(ma_device *ma_device_ptr, + void *ma_output_pointer, + const void *ma_input_pointer, + ma_uint32 ma_frame_count) +{ + is_processing = true; + int channel_count = ma_device_ptr->playback.channels; + + /*-----------------------------------------------------------------------* + * Do nothing if the shared_graph hasn't been initialized yet. + *-----------------------------------------------------------------------*/ + if (!shared_graph || !shared_graph->get_output()) + { + return; + } + + float *output_pointer = (float *) ma_output_pointer; + + try + { + shared_graph->render(ma_frame_count); + } + catch (const std::exception &e) + { + std::cerr << "Exception in AudioGraph: " << e.what() << std::endl; + exit(1); + } + + NodeRef output = shared_graph->get_output(); + for (unsigned int frame = 0; frame < ma_frame_count; frame++) + { + for (int channel = 0; channel < channel_count; channel += 1) + { + output_pointer[channel_count * frame + channel] = output->out[channel][frame]; + } + } + + is_processing = false; +} + +AudioOut::AudioOut(const std::string &backend_name, + const std::string &device_name, + unsigned int sample_rate, + unsigned int buffer_size) + : AudioOut_Abstract() +{ + this->backend_name = backend_name; + this->device_name = device_name; + this->sample_rate = sample_rate; + this->buffer_size = buffer_size; + this->name = "audioout"; + + this->init(); +} + +void AudioOut::init() +{ + ma_device_config config = ma_device_config_init(ma_device_type_playback); + + ma_device_info *playback_devices; + ma_uint32 playback_device_count; + ma_result rv; + + AudioOut::init_context(&this->context, this->backend_name); + + rv = ma_context_get_devices(&this->context, + &playback_devices, + &playback_device_count, + NULL, + NULL); + int selected_device_index = -1; + if (!this->device_name.empty()) + { + for (unsigned int i = 0; i < playback_device_count; i++) + { + /*-----------------------------------------------------------------------* + * For ease of use, SignalFlow allows for partial matches so that only + * the first part of the device names needs to be specified. However, + * an errors is thrown if the match is ambiguous. + *-----------------------------------------------------------------------*/ + if (strncmp(playback_devices[i].name, device_name.c_str(), strlen(device_name.c_str())) == 0) + { + if (selected_device_index != -1) + { + throw audio_io_exception("More than one audio device found matching name '" + device_name + "'"); + } + selected_device_index = i; + } + } + if (selected_device_index == -1) + { + throw audio_io_exception("No audio device found matching name '" + device_name + "'"); + } + + config.playback.pDeviceID = &playback_devices[selected_device_index].id; + } + + // Set to ma_format_unknown to use the device's native format. + config.playback.format = ma_format_f32; + + // Set to 0 to use the device's native channel count. + config.playback.channels = 0; + + // Set to 0 to use the device's native buffer size. + config.periodSizeInFrames = buffer_size; + + // Note that the underlying connection always uses the device's native sample rate. + // Setting values other than zero instantiates miniaudio's internal resampler. + config.sampleRate = this->sample_rate; + config.dataCallback = data_callback; + + // On Core Audio, let the application select a preferred sample rate. + config.coreaudio.allowNominalSampleRateChange = 1; + + rv = ma_device_init(NULL, &config, &device); + if (rv != MA_SUCCESS) + { + throw audio_io_exception("miniaudio: Error initialising output device"); + } + + this->set_channels(device.playback.internalChannels, 0); + + /*-------------------------------------------------------------------------------- + * If no specified sample rate was given, update AudioOut's sample rate to + * reflect the actual underlying sample rate. + * + * Otherwise, SignalFlow will use the user-specified sample rate, and miniaudio + * will perform sample-rate conversion. + *-------------------------------------------------------------------------------*/ + if (this->sample_rate == 0) + { + this->sample_rate = device.playback.internalSampleRate; + } + + /*-------------------------------------------------------------------------------- + * Update AudioOut's buffer size to reflect the actual underlying buffer size. + *-------------------------------------------------------------------------------*/ + this->buffer_size = device.playback.internalPeriodSizeInFrames; + + std::string s = device.playback.internalChannels == 1 ? "" : "s"; + std::cerr << "[miniaudio] Output device: " << std::string(device.playback.name) << " (" << device.playback.internalSampleRate << "Hz, " + << "buffer size " << this->buffer_size << " samples, " << device.playback.internalChannels << " channel" << s << ")" + << std::endl; +} + +void AudioOut::start() +{ + ma_result rv = ma_device_start(&device); + if (rv != MA_SUCCESS) + { + throw audio_io_exception("miniaudio: Error starting output device"); + } + this->set_state(SIGNALFLOW_NODE_STATE_ACTIVE); +} + +void AudioOut::stop() +{ + ma_result rv = ma_device_stop(&device); + if (rv != MA_SUCCESS) + { + throw audio_io_exception("miniaudio: Error stopping output device"); + } + this->set_state(SIGNALFLOW_NODE_STATE_STOPPED); +} + +void AudioOut::destroy() +{ + while (is_processing) + { + } + + ma_device_uninit(&device); +} + +// static +void AudioOut::init_context(ma_context *context, std::string backend_name) +{ + if (!backend_name.empty()) + { + if (possible_backend_names.find(backend_name) == possible_backend_names.end()) + { + throw audio_io_exception("miniaudio: Backend name not recognised: " + backend_name); + } + ma_backend backend = possible_backend_names[backend_name]; + + if (ma_context_init(&backend, 1, NULL, context) != MA_SUCCESS) + { + throw audio_io_exception("miniaudio: Error initialising context"); + } + } + else + { + if (ma_context_init(NULL, 0, NULL, context) != MA_SUCCESS) + { + throw audio_io_exception("miniaudio: Error initialising context"); + } + } +} + +std::list AudioOut::get_output_device_names(std::string backend_name) +{ + std::list device_names; + + ma_result rv; + ma_device_info *playback_devices; + ma_uint32 playback_device_count; + ma_context context; + + AudioOut::init_context(&context, backend_name); + + rv = ma_context_get_devices(&context, + &playback_devices, + &playback_device_count, + NULL, + NULL); + if (rv != MA_SUCCESS) + { + throw audio_io_exception("miniaudio: Failure querying audio devices"); + } + for (unsigned int i = 0; i < playback_device_count; i++) + { + device_names.push_back(std::string(playback_devices[i].name)); + } + + ma_context_uninit(&context); + + return device_names; +} + +std::list AudioOut::get_input_device_names(std::string backend_name) +{ + std::list device_names; + + ma_result rv; + ma_device_info *capture_devices; + ma_uint32 capture_device_count; + ma_context context; + + AudioOut::init_context(&context, backend_name); + + rv = ma_context_get_devices(&context, + NULL, + NULL, + &capture_devices, + &capture_device_count); + if (rv != MA_SUCCESS) + { + throw audio_io_exception("miniaudio: Failure querying audio devices"); + } + for (unsigned int i = 0; i < capture_device_count; i++) + { + device_names.push_back(std::string(capture_devices[i].name)); + } + + ma_context_uninit(&context); + + return device_names; +} + +std::list AudioOut::get_backend_names() +{ + std::list backend_names; + ma_backend enabled_backends[MA_BACKEND_COUNT]; + size_t enabled_backend_count; + ma_result rv; + + rv = ma_get_enabled_backends(enabled_backends, MA_BACKEND_COUNT, &enabled_backend_count); + if (rv != MA_SUCCESS) + { + throw audio_io_exception("miniaudio: Failure querying backend devices"); + } + for (unsigned int i = 0; i < enabled_backend_count; i++) + { + for (auto pair : possible_backend_names) + { + if (pair.second == enabled_backends[i]) + { + std::string backend_name = pair.first; + if (backend_name != "null") + { + backend_names.push_back(backend_name); + } + } + } + } + + return backend_names; +} + +} // namespace signalflow diff --git a/source/src/node/io/output/soundio.cpp b/source/src/node/io/output/soundio.cpp deleted file mode 100644 index 2dee13f3..00000000 --- a/source/src/node/io/output/soundio.cpp +++ /dev/null @@ -1,343 +0,0 @@ -#include "signalflow/node/io/output/soundio.h" - -#ifdef HAVE_SOUNDIO - -#include "signalflow/core/graph.h" - -#include -#include -#include -#include -#include -#include - -static bool is_processing = false; - -namespace signalflow -{ - -extern AudioGraph *shared_graph; - -void write_callback(struct SoundIoOutStream *outstream, int frame_count_min, int frame_count_max) -{ - is_processing = true; - - const struct SoundIoChannelLayout *layout = &outstream->layout; - struct SoundIoChannelArea *areas; - int frame_count = frame_count_max; - int frames_left = frame_count_max; - - /*-----------------------------------------------------------------------* - * Return if the shared_graph hasn't been initialized yet. - * (The libsoundio Pulse Audio driver calls the write_callback once - * on initialization, so this may happen legitimately.) - *-----------------------------------------------------------------------*/ - if (!shared_graph || !shared_graph->get_output()) - { - return; - } - - AudioOut_SoundIO *out_node = (AudioOut_SoundIO *) outstream->userdata; - - /*-----------------------------------------------------------------------* - * On some drivers (eg Linux), we cannot write all samples at once. - * Keep writing as many as we can until we have cleared the buffer. - *-----------------------------------------------------------------------*/ - while (frames_left > 0) - { - int err; - - if ((err = soundio_outstream_begin_write(outstream, &areas, &frame_count))) - { - throw audio_io_exception("libsoundio error on begin write: " + std::string(soundio_strerror(err))); - } - if (out_node->get_state() == SIGNALFLOW_NODE_STATE_ACTIVE) - { - try - { - shared_graph->render(frame_count); - } - catch (const std::exception &e) - { - std::cerr << "Exception in AudioGraph: " << e.what() << std::endl; - exit(1); - } - - NodeRef output = shared_graph->get_output(); - for (int frame = 0; frame < frame_count; frame++) - { - for (int channel = 0; channel < layout->channel_count; channel += 1) - { - if (outstream->format == SoundIoFormatFloat32NE) - { - float *ptr = reinterpret_cast(areas[channel].ptr + areas[channel].step * frame); - *ptr = output->out[channel][frame]; - /*-----------------------------------------------------------------------* - * Hard limiter. - *-----------------------------------------------------------------------*/ - if (*ptr > 1.0) - *ptr = 1.0; - if (*ptr < -1.0) - *ptr = -1.0; - } - else if (outstream->format == SoundIoFormatS16LE) - { - int16_t *ptr = reinterpret_cast(areas[channel].ptr + areas[channel].step * frame); - *ptr = (int16_t)(output->out[channel][frame] * 32768.0f); - } - } - } - } - else - { - for (int frame = 0; frame < frame_count; frame++) - { - for (int channel = 0; channel < layout->channel_count; channel += 1) - { - float *ptr = reinterpret_cast(areas[channel].ptr + areas[channel].step * frame); - *ptr = 0; - } - } - } - - if ((err = soundio_outstream_end_write(outstream))) - { - throw audio_io_exception("libsoundio error on end write: " + std::string(soundio_strerror(err))); - } - - frames_left -= frame_count; - } - - is_processing = false; -} - -int soundio_get_device_by_name(struct SoundIo *soundio, const char *name) -{ - int output_count = soundio_output_device_count(soundio); - for (int i = 0; i < output_count; i++) - { - struct SoundIoDevice *device = soundio_get_output_device(soundio, i); - if (strcmp(device->name, name) == 0) - { - return i; - } - } - std::cerr << "Couldn't find output device " << std::string(name) << std::endl; - - return -1; -} - -AudioOut_SoundIO::AudioOut_SoundIO(const std::string &backend_name, - const std::string &device_name, - unsigned int sample_rate, - unsigned int buffer_size) - : AudioOut_Abstract() -{ - this->backend_name = backend_name; - this->device_name = device_name; - this->sample_rate = sample_rate; - this->buffer_size = buffer_size; - this->name = "audioout-soundio"; - - this->init(); -} - -int AudioOut_SoundIO::init() -{ - int err; - - this->soundio = soundio_create(); - - if (!this->soundio) - throw audio_io_exception("libsoundio error: out of memory"); - - if (!this->backend_name.empty()) - { - // Backend name is specified; connect to the given backend - std::vector possible_backend_names = { - "none", - "jack", - "pulseaudio", - "alsa", - "coreaudio", - "wasapi", - "dummy" - }; - - auto location = std::find(possible_backend_names.begin(), - possible_backend_names.end(), - this->backend_name); - - if (location == possible_backend_names.end()) - { - throw audio_io_exception("libsoundio error: could not find backend name " + this->backend_name); - } - enum SoundIoBackend backend_index = (enum SoundIoBackend)(location - possible_backend_names.begin()); - if ((err = soundio_connect_backend(this->soundio, backend_index))) - throw audio_io_exception("libsoundio error: could not connect (" + std::string(soundio_strerror(err)) + ")"); - } - else - { - if ((err = soundio_connect(this->soundio))) - throw audio_io_exception("libsoundio error: could not connect (" + std::string(soundio_strerror(err)) + ")"); - } - - soundio_flush_events(this->soundio); - - int default_out_device_index = soundio_default_output_device_index(this->soundio); - if (default_out_device_index < 0) - throw device_not_found_exception("No audio devices were found. More information: https://signalflow.dev/troubleshooting/device_not_found_exception/"); - - if (!this->device_name.empty()) - { - int index = soundio_get_device_by_name(this->soundio, this->device_name.c_str()); - if (index == -1) - { - throw device_not_found_exception("Could not find device name: " + this->device_name + ". More information: https://signalflow.dev/troubleshooting/device_not_found_exception/"); - } - this->device = soundio_get_output_device(this->soundio, index); - } - else - { - this->device = soundio_get_output_device(this->soundio, default_out_device_index); - } - - if (!device) - throw audio_io_exception("libsoundio error: out of memory."); - - this->outstream = soundio_outstream_create(device); - if (soundio_device_supports_format(device, SoundIoFormatFloat32NE)) - { - this->outstream->format = SoundIoFormatFloat32NE; - } - else if (soundio_device_supports_format(device, SoundIoFormatS16LE)) - { - this->outstream->format = SoundIoFormatS16LE; - } - else - { - /*-----------------------------------------------------------------------* - * SignalFlow currently only supports float32 sample output - *-----------------------------------------------------------------------*/ - throw audio_io_exception("libsoundio error: Output device does not support float32 or int16le samples"); - } - this->outstream->write_callback = write_callback; - if (!this->sample_rate) - { - this->sample_rate = this->device->sample_rate_current; - } - this->outstream->sample_rate = this->sample_rate; - this->outstream->software_latency = (double) this->buffer_size / this->outstream->sample_rate; - this->outstream->userdata = (void *) this; - // With a device with multiple layouts, use the first. - // To check: is this always the layout with the most channels? - this->outstream->layout = device->layouts[0]; - - if ((err = soundio_outstream_open(this->outstream))) - { - throw audio_io_exception("libsoundio error: unable to open device: " + std::string(soundio_strerror(err))); - } - - if (this->outstream->layout_error) - { - /*-------------------------------------------------------------------------------- - * This should not be a fatal error (see example in libsoundio sio_sine.c). - * Should just generate a warning instead. - * Experienced on Raspberry Pi 4 with raspi-audio interface. - *-------------------------------------------------------------------------------*/ - std::cerr << "libsoundio warning: unable to set channel layout: " - << std::string(soundio_strerror(this->outstream->layout_error)) << std::endl; - } - - this->num_output_channels = this->outstream->layout.channel_count; - - // update based on the actual buffer size - this->buffer_size = (int) round(this->outstream->software_latency * this->outstream->sample_rate); - - std::string s = num_output_channels == 1 ? "" : "s"; - - std::cerr << "Output device: " << device->name << " (" << sample_rate << "Hz, " - << "buffer size " << buffer_size << " samples, " << num_output_channels << " channel" << s << ")" - << std::endl; - - // do we need to set num_output channels to allocate the right number of output buffers? - this->set_channels(num_output_channels, 0); - - return 0; -} - -int AudioOut_SoundIO::start() -{ - int err; - if ((err = soundio_outstream_start(outstream))) - throw audio_io_exception("libsoundio error: unable to start device: " + std::string(soundio_strerror(err))); - this->set_state(SIGNALFLOW_NODE_STATE_ACTIVE); - - return 0; -} - -int AudioOut_SoundIO::stop() -{ - this->set_state(SIGNALFLOW_NODE_STATE_STOPPED); - return 0; -} - -int AudioOut_SoundIO::destroy() -{ - while (is_processing) - { - } - - soundio_outstream_destroy(this->outstream); - soundio_device_unref(this->device); - soundio_destroy(this->soundio); - - return 0; -} - -std::list AudioOut_SoundIO::get_output_device_names() -{ - int output_count = soundio_output_device_count(this->soundio); - - std::list device_names; - - for (int i = 0; i < output_count; i++) - { - struct SoundIoDevice *device = soundio_get_output_device(soundio, i); - device_names.push_back(std::string(device->name)); - } - - return device_names; -} - -int AudioOut_SoundIO::get_default_output_device_index() -{ - unsigned int default_output = soundio_default_output_device_index(this->soundio); - return default_output; -} - -std::list AudioOut_SoundIO::get_output_backend_names() -{ - std::list backend_names; - std::vector possible_backend_names = { - "none", - "jack", - "pulseaudio", - "alsa", - "coreaudio", - "wasapi", - "dummy" - }; - for (int i = 0; i < soundio_backend_count(this->soundio); i++) - { - int backend_index = soundio_get_backend(this->soundio, i); - std::string backend_name = possible_backend_names[backend_index]; - backend_names.push_back(backend_name); - } - - return backend_names; -} - -} // namespace signalflow - -#endif diff --git a/source/src/node/processors/delays/comb.cpp b/source/src/node/processors/delays/comb.cpp index 1f7cde05..91f4d639 100644 --- a/source/src/node/processors/delays/comb.cpp +++ b/source/src/node/processors/delays/comb.cpp @@ -46,7 +46,7 @@ void CombDelay::process(Buffer &out, int num_frames) signalflow_audio_thread_error("CombDelay: Delay time exceeds maximum. Reduce the delay_time, or increase max_delay_time."); } - sample rv = input->out[channel][frame] + (feedback * buffers[channel]->get(-offset)); + sample rv = input->out[channel][frame] + (feedback * buffers[channel]->get(-offset + 1)); out[channel][frame] = rv; buffers[channel]->append(rv); } diff --git a/source/src/node/processors/delays/onetap.cpp b/source/src/node/processors/delays/onetap.cpp index b7826853..a9108824 100644 --- a/source/src/node/processors/delays/onetap.cpp +++ b/source/src/node/processors/delays/onetap.cpp @@ -42,7 +42,7 @@ void OneTapDelay::process(Buffer &out, int num_frames) * through the current frame immediately *-------------------------------------------------------------------------------*/ buffers[channel]->append(this->input->out[channel][frame]); - out[channel][frame] = buffers[channel]->get(-offset - 1); + out[channel][frame] = buffers[channel]->get(-offset); } } } diff --git a/source/src/python/buffer.cpp b/source/src/python/buffer.cpp index e2ae6c54..f887294f 100644 --- a/source/src/python/buffer.cpp +++ b/source/src/python/buffer.cpp @@ -2,6 +2,25 @@ void init_python_buffer(py::module &m) { + py::class_(m, "SampleRingBuffer", "A circular buffer of audio samples with a single read/write head") + .def(py::init(), "capacity"_a, R"pbdoc(Create a new ring buffer)pbdoc") + .def("append", &SampleRingBuffer::append, R"pbdoc(Append an item to the ring buffer.)pbdoc") + .def( + "extend", [](SampleRingBuffer &buf, std::vector vec) { buf.extend(vec); }, + R"pbdoc(Extend the ring buffer.)pbdoc") + .def("get", &SampleRingBuffer::get, R"pbdoc(Retrieve an item from the ring buffer, with offset relative to the read head.)pbdoc") + .def("get_capacity", &SampleRingBuffer::get_capacity, R"pbdoc(Returns the capacity of the ring buffer.)pbdoc"); + + py::class_(m, "SampleRingQueue", "A circular queue of audio samples with separate read/write heads") + .def(py::init(), "capacity"_a, R"pbdoc(Create a new ring queue)pbdoc") + .def("append", &SampleRingQueue::append, R"pbdoc(Append an item to the ring queue.)pbdoc") + .def( + "extend", [](SampleRingQueue &buf, std::vector vec) { buf.extend(vec); }, + R"pbdoc(Extend the ring queue.)pbdoc") + .def("pop", &SampleRingQueue::pop, R"pbdoc(Pop an item from the ring queue.)pbdoc") + .def("get_capacity", &SampleRingQueue::get_capacity, R"pbdoc(Returns the capacity of the ring queue.)pbdoc") + .def("get_filled_count", &SampleRingQueue::get_filled_count, R"pbdoc(Returns the number of items filled in the ring queue.)pbdoc"); + /*-------------------------------------------------------------------------------- * Buffer *-------------------------------------------------------------------------------*/ diff --git a/source/src/python/graph.cpp b/source/src/python/graph.cpp index a6c82393..b08cb540 100644 --- a/source/src/python/graph.cpp +++ b/source/src/python/graph.cpp @@ -1,19 +1,47 @@ #include "signalflow/python/python.h" +void graph_created_warning() +{ + std::cerr << "AudioGraph: The global audio graph has already been created. To create a new graph, call .destroy() first." << std::endl; +} + void init_python_graph(py::module &m) { /*-------------------------------------------------------------------------------- - * Graph + * AudioGraph. + * + * This class is a singleton, which is handled by this block of constructors. + * If a shared_graph already exists, return it, with a warning. *-------------------------------------------------------------------------------*/ - py::class_(m, "AudioGraph", "The global audio signal processing graph") - .def(py::init(), "config"_a = nullptr, "output_device"_a = nullptr, - "start"_a = true) - .def(py::init(), "config"_a = nullptr, "output_device"_a = "", - "start"_a = true) - .def(py::init(), "config_name"_a = nullptr, "output_device"_a = nullptr, - "start"_a = true) - .def(py::init(), "config_name"_a = nullptr, "output_device"_a = "", - "start"_a = true) + py::class_>(m, "AudioGraph", "The global audio signal processing graph") + .def(py::init<>([](AudioGraphConfig *config, NodeRef output_device, bool start) { + AudioGraph *graph = AudioGraph::get_shared_graph(); + if (graph) + graph_created_warning(); + return graph ? graph : new AudioGraph(config, output_device, start); + }), + "config"_a = nullptr, "output_device"_a = nullptr, "start"_a = true) + .def(py::init<>([](AudioGraphConfig *config, std::string output_device, bool start) { + AudioGraph *graph = AudioGraph::get_shared_graph(); + if (graph) + graph_created_warning(); + return graph ? graph : new AudioGraph(config, output_device, start); + }), + "config"_a = nullptr, "output_device"_a = nullptr, "start"_a = true) + .def(py::init<>([](std::string config_name, NodeRef output_device, bool start) { + AudioGraph *graph = AudioGraph::get_shared_graph(); + if (graph) + graph_created_warning(); + return graph ? graph : new AudioGraph(config_name, output_device, start); + }), + "config"_a = nullptr, "output_device"_a = nullptr, "start"_a = true) + .def(py::init<>([](std::string config_name, NodeRef output_device, bool start) { + AudioGraph *graph = AudioGraph::get_shared_graph(); + if (graph) + graph_created_warning(); + return graph ? graph : new AudioGraph(config_name, output_device, start); + }), + "config"_a = nullptr, "output_device"_a = nullptr, "start"_a = true) .def_static("get_shared_graph", &AudioGraph::get_shared_graph) /*-------------------------------------------------------------------------------- @@ -36,10 +64,6 @@ void init_python_graph(py::module &m) .def_property_readonly( "outputs", &AudioGraph::get_outputs, R"pbdoc(int: Get the list of Node objects currently connected to the graph's output.)pbdoc") - .def_property_readonly("output_device_names", &AudioGraph::get_output_device_names, - R"pbdoc(list[str]: List the available output device names.)pbdoc") - .def_property_readonly("output_backend_names", &AudioGraph::get_output_backend_names, - R"pbdoc(list[str]: List the available output backend names.)pbdoc") .def_property_readonly( "status", &AudioGraph::get_status, R"pbdoc(int: Get a text representation of the AudioGraph's status (node count, patch count, CPU usage).)pbdoc") @@ -50,41 +74,46 @@ void init_python_graph(py::module &m) R"pbdoc(int: Get/set the graph's sample rate.)pbdoc") .def_property("output", &AudioGraph::get_output, &AudioGraph::set_output) + /*-------------------------------------------------------------------------------- + * Static methods + *-------------------------------------------------------------------------------*/ + .def_static( + "get_output_device_names", [](py::object backend_name) { + std::string backend_name_str = backend_name.is_none() ? "" : backend_name.cast(); + return AudioGraph::get_output_device_names(backend_name_str); + }, + "backend_name"_a = "", R"pbdoc(list[str]: List the available output device names.)pbdoc") + .def_static( + "get_input_device_names", [](py::object backend_name) { + std::string backend_name_str = backend_name.is_none() ? "" : backend_name.cast(); + return AudioGraph::get_input_device_names(backend_name_str); + }, + "backend_name"_a = "", R"pbdoc(list[str]: List the available input device names.)pbdoc") + .def_static("get_backend_names", &AudioGraph::get_backend_names, R"pbdoc(list[str]: List the available audio backend names.)pbdoc") + /*-------------------------------------------------------------------------------- * Methods *-------------------------------------------------------------------------------*/ .def("start", &AudioGraph::start, R"pbdoc(Start the AudioGraph processing.)pbdoc") .def( "stop", [](AudioGraph &graph) { graph.stop(); }, R"pbdoc(Stop the AudioGraph processing.)pbdoc") - .def("clear", &AudioGraph::clear, - R"pbdoc(Remove all Node and Patches objects currently in the processing graph.)pbdoc") - .def("destroy", &AudioGraph::destroy, - R"pbdoc(Clear the AudioGraph and deallocate its memory, ready to create a new AudioGraph.)pbdoc") + .def("clear", &AudioGraph::clear, R"pbdoc(Remove all Node and Patches objects currently in the processing graph.)pbdoc") + .def("destroy", &AudioGraph::destroy, R"pbdoc(Clear the AudioGraph and deallocate its memory, ready to create a new AudioGraph.)pbdoc") .def( - "show_structure", [](AudioGraph &graph) { graph.show_structure(); }, - R"pbdoc(Print the AudioGraph's node connectivity structure to stdout.)pbdoc") - .def( - "poll", [](AudioGraph &graph, float frequency) { graph.poll(frequency); }, "frequency"_a, - R"pbdoc(Begin polling the AudioGraph's status every `frequency` seconds, printing it to stdout.)pbdoc") + "show_structure", [](AudioGraph &graph) { graph.show_structure(); }, R"pbdoc(Print the AudioGraph's node connectivity structure to stdout.)pbdoc") .def( - "poll", [](AudioGraph &graph) { graph.poll(); }, - R"pbdoc(Begin polling the AudioGraph's status every 1.0 seconds, printing it to stdout.)pbdoc") + "poll", [](AudioGraph &graph, float frequency) { graph.poll(frequency); }, "frequency"_a, R"pbdoc(Begin polling the AudioGraph's status every `frequency` seconds, printing it to stdout.)pbdoc") .def( - "render", [](AudioGraph &graph) { graph.render(); }, - R"pbdoc(Render a single block (of `output_buffer_size` frames) of the AudioGraph's output.)pbdoc") + "poll", [](AudioGraph &graph) { graph.poll(); }, R"pbdoc(Begin polling the AudioGraph's status every 1.0 seconds, printing it to stdout.)pbdoc") .def( - "render", [](AudioGraph &graph, int num_frames) { graph.render(num_frames); }, "num_frames"_a, - R"pbdoc(Render a specified number of samples of the AudioGraph's output.)pbdoc") + "render", [](AudioGraph &graph) { graph.render(); }, R"pbdoc(Render a single block (of `output_buffer_size` frames) of the AudioGraph's output.)pbdoc") .def( - "render_to_buffer", &AudioGraph::render_to_buffer, "buffer"_a, - R"pbdoc(Render the graph's output to the specified buffer, for the same number of frames as the buffer's length.)pbdoc") + "render", [](AudioGraph &graph, int num_frames) { graph.render(num_frames); }, "num_frames"_a, R"pbdoc(Render a specified number of samples of the AudioGraph's output.)pbdoc") + .def("render_to_buffer", &AudioGraph::render_to_buffer, "buffer"_a, R"pbdoc(Render the graph's output to the specified buffer, for the same number of frames as the buffer's length.)pbdoc") + .def("render_to_new_buffer", &AudioGraph::render_to_new_buffer, "num_frames"_a, R"pbdoc(Render the graph's output for the specified number of frames, and return the resultant buffer.)pbdoc") .def( - "render_to_new_buffer", &AudioGraph::render_to_new_buffer, "num_frames"_a, - R"pbdoc(Render the graph's output for the specified number of frames, and return the resultant buffer.)pbdoc") - .def( - "render_subgraph", - [](AudioGraph &graph, NodeRef node, int num_frames, bool reset) { + "render_subgraph", [](AudioGraph &graph, NodeRef node, int num_frames, bool reset) { if (reset) { graph.reset_subgraph(node); @@ -98,61 +127,47 @@ void init_python_graph(py::module &m) graph.render_subgraph(node); } }, - "node"_a, "num_frames"_a = 0, "reset"_a = false, - R"pbdoc(Recursively render the nodes in the tree starting at `node`. If `reset` is true, call `reset_subgraph` first.)pbdoc") - .def("reset_subgraph", &AudioGraph::reset_subgraph, - R"pbdoc(Reset the `played` status of nodes in the tree starting at `node`.)pbdoc") - .def( - "play", [](AudioGraph &graph, NodeRef node) { graph.play(node); }, "node"_a, - R"pbdoc(Begin playback of `node` (by connecting it to the output of the graph))pbdoc") + "node"_a, "num_frames"_a = 0, "reset"_a = false, R"pbdoc(Recursively render the nodes in the tree starting at `node`. If `reset` is true, call `reset_subgraph` first.)pbdoc") + .def("reset_subgraph", &AudioGraph::reset_subgraph, R"pbdoc(Reset the `played` status of nodes in the tree starting at `node`.)pbdoc") .def( - "play", [](AudioGraph &graph, PatchRef patch) { graph.play(patch); }, "patch"_a, - R"pbdoc(Begin playback of `patch` (by connecting it to the output of the graph))pbdoc") + "play", [](AudioGraph &graph, NodeRef node) { graph.play(node); }, "node"_a, R"pbdoc(Begin playback of `node` (by connecting it to the output of the graph))pbdoc") .def( - "stop", [](AudioGraph &graph, NodeRef node) { graph.stop(node); }, "node"_a, - R"pbdoc(Stop playback of `node` (by disconnecting it from the output of the graph))pbdoc") + "play", [](AudioGraph &graph, PatchRef patch) { graph.play(patch); }, "patch"_a, R"pbdoc(Begin playback of `patch` (by connecting it to the output of the graph))pbdoc") .def( - "stop", [](AudioGraph &graph, PatchRef patch) { graph.stop(patch); }, "patch"_a, - R"pbdoc(Stop playback of `patch]` (by disconnecting it from the output of the graph))pbdoc") + "stop", [](AudioGraph &graph, NodeRef node) { graph.stop(node); }, "node"_a, R"pbdoc(Stop playback of `node` (by disconnecting it from the output of the graph))pbdoc") .def( - "replace", [](AudioGraph &graph, NodeRef node, NodeRef other) { graph.replace(node, other); }, "node"_a, - "other"_a, R"pbdoc(Replace `node` in the graph's output with `other`.)pbdoc") + "stop", [](AudioGraph &graph, PatchRef patch) { graph.stop(patch); }, "patch"_a, R"pbdoc(Stop playback of `patch]` (by disconnecting it from the output of the graph))pbdoc") .def( - "add_node", &AudioGraph::add_node, "node"_a, - R"pbdoc(Add `node` to the graph so that it is processed in future blocks, without connecting it to the graph's output. Useful for non-playback nodes (e.g. BufferRecorder).)pbdoc") - .def("remove_node", &AudioGraph::remove_node, "node"_a, - R"pbdoc(Remove a `node` that has previously been added with `add_node()`)pbdoc") + "replace", [](AudioGraph &graph, NodeRef node, NodeRef other) { graph.replace(node, other); }, "node"_a, "other"_a, R"pbdoc(Replace `node` in the graph's output with `other`.)pbdoc") + .def("add_node", &AudioGraph::add_node, "node"_a, R"pbdoc(Add `node` to the graph so that it is processed in future blocks, without connecting it to the graph's output. Useful for non-playback nodes (e.g. BufferRecorder).)pbdoc") + .def("remove_node", &AudioGraph::remove_node, "node"_a, R"pbdoc(Remove a `node` that has previously been added with `add_node()`)pbdoc") - .def( - "start_recording", &AudioGraph::start_recording, "filename"_a = "", "num_channels"_a = 0, - R"pbdoc(Start recording the graph's output to an audio file, with the same number of channels as the AudioGraph or `num_channels` if specified.)pbdoc") + .def("start_recording", &AudioGraph::start_recording, "filename"_a = "", "num_channels"_a = 0, R"pbdoc(Start recording the graph's output to an audio file, with the same number of channels as the AudioGraph or `num_channels` if specified.)pbdoc") .def("stop_recording", &AudioGraph::stop_recording, R"pbdoc(Stop recording the graph's output.)pbdoc") - .def("wait", - [](AudioGraph &graph) { - /*-------------------------------------------------------------------------------- - * Interruptible wait - * https://pybind11.readthedocs.io/en/stable/faq.html#how-can-i-properly-handle-ctrl-c-in-long-running-functions - *-------------------------------------------------------------------------------*/ - for (;;) - { - if (PyErr_CheckSignals() != 0) - throw py::error_already_set(); - /*-------------------------------------------------------------------------------- - * Release the GIL so that other threads can do processing. - *-------------------------------------------------------------------------------*/ - py::gil_scoped_release release; + .def("wait", [](AudioGraph &graph) { + /*-------------------------------------------------------------------------------- + * Interruptible wait + * https://pybind11.readthedocs.io/en/stable/faq.html#how-can-i-properly-handle-ctrl-c-in-long-running-functions + *-------------------------------------------------------------------------------*/ + for (;;) + { + if (PyErr_CheckSignals() != 0) + throw py::error_already_set(); + /*-------------------------------------------------------------------------------- + * Release the GIL so that other threads can do processing. + *-------------------------------------------------------------------------------*/ + py::gil_scoped_release release; + + signalflow_msleep(5); - if (graph.has_raised_audio_thread_error()) - break; - } - }) + if (graph.has_raised_audio_thread_error()) + break; + } + }) .def( - "wait", - [](AudioGraph &graph, float timeout_seconds) { - timeval tv; - gettimeofday(&tv, NULL); - double t0 = tv.tv_sec + tv.tv_usec / 1000000.0; + "wait", [](AudioGraph &graph, float timeout_seconds) { + double t0 = signalflow_timestamp(); for (;;) { @@ -161,8 +176,7 @@ void init_python_graph(py::module &m) if (timeout_seconds) { - gettimeofday(&tv, NULL); - double t1 = tv.tv_sec + tv.tv_usec / 1000000.0; + double t1 = signalflow_timestamp(); if (t1 - t0 > timeout_seconds) { break; @@ -174,6 +188,8 @@ void init_python_graph(py::module &m) *-------------------------------------------------------------------------------*/ py::gil_scoped_release release; + signalflow_msleep(5); + if (graph.has_raised_audio_thread_error()) break; } diff --git a/source/src/python/nodes.cpp b/source/src/python/nodes.cpp index 4bc0ff07..8655e398 100644 --- a/source/src/python/nodes.cpp +++ b/source/src/python/nodes.cpp @@ -16,461 +16,461 @@ void init_python_nodes(py::module &m) py::class_>(m, "AudioOut", "Audio output") .def(py::init(), "backend_name"_a = "", "device_name"_a = "", "sample_rate"_a = 0, "buffer_size"_a = 0); - py::class_>(m, "CrossCorrelate", "Outputs the cross-correlation of the input signal with the given buffer. If hop_size is zero, calculates the cross-correlation every sample.") + py::class_>(m, "CrossCorrelate", "Outputs the cross-correlation of the input signal with the given buffer.\nIf hop_size is zero, calculates the cross-correlation every sample.\n") .def(py::init(), "input"_a = nullptr, "buffer"_a = nullptr, "hop_size"_a = 0); - py::class_>(m, "NearestNeighbour", "Nearest Neighbour.") + py::class_>(m, "NearestNeighbour", "Nearest Neighbour.\n") .def(py::init(), "buffer"_a = nullptr, "target"_a = 0.0); - py::class_>(m, "OnsetDetector", "Simple time-domain onset detector. Outputs an impulse when an onset is detected in the input. Maintains short-time and long-time averages. An onset is registered when the short-time average is threshold x the long-time average. min_interval is the minimum interval between onsets, in seconds.") + py::class_>(m, "OnsetDetector", "Simple time-domain onset detector. Outputs an impulse when an onset is detected\nin the input. Maintains short-time and long-time averages. An onset is registered\nwhen the short-time average is threshold x the long-time average.\nmin_interval is the minimum interval between onsets, in seconds.\n") .def(py::init(), "input"_a = 0.0, "threshold"_a = 2.0, "min_interval"_a = 0.1); #ifdef HAVE_VAMP - py::class_>(m, "VampAnalysis", "Feature extraction using the Vamp plugin toolkit.") + py::class_>(m, "VampAnalysis", "Feature extraction using the Vamp plugin toolkit.\n") .def(py::init(), "input"_a = 0.0, "plugin_id"_a = "vamp-example-plugins:spectralcentroid:linearcentroid") .def("list_plugins", &VampAnalysis::list_plugins, R"pbdoc(list[str]: List the available plugin names.)pbdoc"); #endif - py::class_>(m, "BeatCutter", "Cuts a buffer into segment_count segments, and stutters/jumps with the given probabilities.") + py::class_>(m, "BeatCutter", "Cuts a buffer into segment_count segments, and stutters/jumps with\nthe given probabilities.\n") .def(py::init(), "buffer"_a = nullptr, "segment_count"_a = 8, "stutter_probability"_a = 0.0, "stutter_count"_a = 1, "jump_probability"_a = 0.0, "duty_cycle"_a = 1.0, "rate"_a = 1.0, "segment_rate"_a = 1.0); - py::class_>(m, "BufferLooper", "Read and write from a buffer concurrently, with controllable overdub.") + py::class_>(m, "BufferLooper", "Read and write from a buffer concurrently, with controllable overdub.\n") .def(py::init(), "buffer"_a = nullptr, "input"_a = 0.0, "feedback"_a = 0.0, "loop_playback"_a = 0, "loop_record"_a = 0, "start_time"_a = nullptr, "end_time"_a = nullptr, "looper_level"_a = 1.0, "playthrough_level"_a = 0.0); - py::class_>(m, "BufferPlayer", "Plays the contents of the given buffer. `start_time`/`end_time` are in seconds. When a `clock` signal is received, rewinds to the `start_time`. Set `clock` to `0` to prevent the buffer from being triggered immediately.") + py::class_>(m, "BufferPlayer", "Plays the contents of the given buffer. `start_time`/`end_time` are in seconds.\nWhen a `clock` signal is received, rewinds to the `start_time`.\n\nSet `clock` to `0` to prevent the buffer from being triggered immediately.\n") .def(py::init(), "buffer"_a = nullptr, "rate"_a = 1.0, "loop"_a = 0, "start_time"_a = nullptr, "end_time"_a = nullptr, "clock"_a = nullptr); - py::class_>(m, "BufferRecorder", "Records the input to a buffer. feedback controls overdub.") + py::class_>(m, "BufferRecorder", "Records the input to a buffer. feedback controls overdub.\n") .def(py::init(), "buffer"_a = nullptr, "input"_a = 0.0, "feedback"_a = 0.0, "loop"_a = false); - py::class_>(m, "FeedbackBufferReader", "Counterpart to FeedbackBufferWriter.") + py::class_>(m, "FeedbackBufferReader", "Counterpart to FeedbackBufferWriter.\n") .def(py::init(), "buffer"_a = nullptr); - py::class_>(m, "FeedbackBufferWriter", "Counterpart to FeedbackBufferReader.") + py::class_>(m, "FeedbackBufferWriter", "Counterpart to FeedbackBufferReader.\n") .def(py::init(), "buffer"_a = nullptr, "input"_a = 0.0, "delay_time"_a = 0.1); - py::class_>(m, "HistoryBufferWriter", "Writes a rolling history buffer of a given duration. At a given moment in time, the contents of the buffer will be equal to the past N seconds of the audio generated by `input`. This is useful for (e.g.) a visual display of a rolling waveform or LFO window. `downsample` can be used to downsample the input; for example, with `downsample` of 10, a 1-second buffer can be used to display 10 seconds of historical audio.") + py::class_>(m, "HistoryBufferWriter", "Writes a rolling history buffer of a given duration. At a given moment in time,\nthe contents of the buffer will be equal to the past N seconds of the audio\ngenerated by `input`. This is useful for (e.g.) a visual display of a rolling\nwaveform or LFO window. `downsample` can be used to downsample the input;\nfor example, with `downsample` of 10, a 1-second buffer can be used to display\n10 seconds of historical audio.\n") .def(py::init(), "buffer"_a = nullptr, "input"_a = 0.0, "downsample"_a = 1); - py::class_>(m, "SegmentPlayer", "Trigger segments of `buffer` at the given list of `onsets` positions, in seconds. `index` determines the index of the onset to play back at, which can also be passed as an argument to trigger(). `rate` determines the playback rate, and `clock` can be used to retrigger based on the output of another Node. If `continue_after_segment` is non-zero, playback will continue after the subsequent onset.") + py::class_>(m, "SegmentPlayer", "Trigger segments of `buffer` at the given list of `onsets` positions, in\nseconds. `index` determines the index of the onset to play back at, which can\nalso be passed as an argument to trigger(). `rate` determines the playback rate,\nand `clock` can be used to retrigger based on the output of another Node.\nIf `continue_after_segment` is non-zero, playback will continue after the\nsubsequent onset.\n") .def(py::init, NodeRef, NodeRef, NodeRef, NodeRef, NodeRef>(), "buffer"_a = nullptr, "onsets"_a = 0, "index"_a = nullptr, "rate"_a = 1.0, "start_offset"_a = nullptr, "clock"_a = nullptr, "continue_after_segment"_a = 0); - py::class_>(m, "SegmentedGranulator", "Segmented Granulator.") + py::class_>(m, "SegmentedGranulator", "Segmented Granulator.\n") .def(py::init, std::vector, NodeRef, NodeRef, NodeRef, NodeRef>(), "buffer"_a = nullptr, "onset_times"_a = 0, "durations"_a = 0, "index"_a = 0.0, "rate"_a = 1.0, "clock"_a = 0, "max_grains"_a = 2048); - py::class_>(m, "Granulator", "Granulator. Generates a grain from the given buffer each time a trigger is received on the `clock` input. Each new grain uses the given `duration`, `amplitude`, `pan` and `rate` values presented at each input at the moment the grain is created. The input buffer can be mono or stereo. If `wrap` is true, grain playback can wrap around the end/start of the buffer.") + py::class_>(m, "Granulator", "Granulator. Generates a grain from the given buffer each time a trigger is\nreceived on the `clock` input. Each new grain uses the given `duration`,\n`amplitude`, `pan` and `rate` values presented at each input at the moment the\ngrain is created. The input buffer can be mono or stereo. If `wrap` is true,\ngrain playback can wrap around the end/start of the buffer.\n") .def(py::init(), "buffer"_a = nullptr, "clock"_a = 0, "pos"_a = 0, "duration"_a = 0.1, "amplitude"_a = 1.0, "pan"_a = 0.0, "rate"_a = 1.0, "max_grains"_a = 2048, "wrap"_a = false); #ifdef __APPLE__ - py::class_>(m, "MouseX", "Outputs the normalised cursor X position, from 0 to 1. Currently only supported on macOS.") + py::class_>(m, "MouseX", "Outputs the normalised cursor X position, from 0 to 1.\nCurrently only supported on macOS.\n") .def(py::init<>()); #endif #ifdef __APPLE__ - py::class_>(m, "MouseY", "Outputs the normalised cursor Y position, from 0 to 1. Currently only supported on macOS.") + py::class_>(m, "MouseY", "Outputs the normalised cursor Y position, from 0 to 1.\nCurrently only supported on macOS.\n") .def(py::init<>()); #endif #ifdef __APPLE__ - py::class_>(m, "MouseDown", "Outputs 1 if the left mouse button is down, 0 otherwise. Currently only supported on macOS.") + py::class_>(m, "MouseDown", "Outputs 1 if the left mouse button is down, 0 otherwise.\nCurrently only supported on macOS.\n") .def(py::init(), "button_index"_a = 0); #endif - py::class_>(m, "Accumulator", "Accumulator with decay.") + py::class_>(m, "Accumulator", "Accumulator with decay.\n") .def(py::init(), "strike_force"_a = 0.5, "decay_coefficient"_a = 0.9999, "trigger"_a = nullptr); - py::class_>(m, "ADSREnvelope", "Attack-decay-sustain-release envelope. Sustain portion is held until gate is zero.") + py::class_>(m, "ADSREnvelope", "Attack-decay-sustain-release envelope.\nSustain portion is held until gate is zero.\n") .def(py::init(), "attack"_a = 0.1, "decay"_a = 0.1, "sustain"_a = 0.5, "release"_a = 0.1, "gate"_a = 0); - py::class_>(m, "ASREnvelope", "Attack-sustain-release envelope.") + py::class_>(m, "ASREnvelope", "Attack-sustain-release envelope.\n") .def(py::init(), "attack"_a = 0.1, "sustain"_a = 0.5, "release"_a = 0.1, "curve"_a = 1.0, "clock"_a = nullptr); - py::class_>(m, "DetectSilence", "Detects blocks of silence below the threshold value. Used as an auto-free node to terminate a Patch after processing is complete.") + py::class_>(m, "DetectSilence", "Detects blocks of silence below the threshold value. Used as an auto-free\nnode to terminate a Patch after processing is complete.\n") .def(py::init(), "input"_a = nullptr, "threshold"_a = 0.00001); - py::class_>(m, "Envelope", "Generic envelope constructor, given an array of levels, times and curves.") + py::class_>(m, "Envelope", "Generic envelope constructor, given an array of levels, times and curves.\n") .def(py::init, std::vector, std::vector, NodeRef, bool>(), "levels"_a = std::vector(), "times"_a = std::vector(), "curves"_a = std::vector(), "clock"_a = nullptr, "loop"_a = false); - py::class_>(m, "Line", "Line segment with the given start/end values, and duration (in seconds). If loop is true, repeats indefinitely. Retriggers on a clock signal.") + py::class_>(m, "Line", "Line segment with the given start/end values, and duration (in seconds).\nIf loop is true, repeats indefinitely.\nRetriggers on a clock signal.\n") .def(py::init(), "start"_a = 0.0, "end"_a = 1.0, "time"_a = 1.0, "loop"_a = 0, "clock"_a = nullptr); - py::class_>(m, "RectangularEnvelope", "Rectangular envelope with the given sustain duration.") + py::class_>(m, "RectangularEnvelope", "Rectangular envelope with the given sustain duration.\n") .def(py::init(), "sustain_duration"_a = 1.0, "clock"_a = nullptr); - py::class_>(m, "FFTContinuousPhaseVocoder", "Continuous phase vocoder. Requires an FFT* input.") + py::class_>(m, "FFTContinuousPhaseVocoder", "Continuous phase vocoder.\nRequires an FFT* input.\n") .def(py::init(), "input"_a = nullptr, "rate"_a = 1.0); #ifdef __APPLE__ - py::class_>(m, "FFTConvolve", "Frequency-domain convolution, using overlap-add. Useful for convolution reverb, with the input buffer containing an impulse response. Requires an FFT* input.") + py::class_>(m, "FFTConvolve", "Frequency-domain convolution, using overlap-add.\nUseful for convolution reverb, with the input buffer containing an impulse response.\nRequires an FFT* input.\n") .def(py::init(), "input"_a = nullptr, "buffer"_a = nullptr); #endif - py::class_>(m, "FFTBufferPlayer", "FFTBufferPlayer. Plays from a buffer of audio spectra in mag/phase format.") + py::class_>(m, "FFTBufferPlayer", "FFTBufferPlayer.\nPlays from a buffer of audio spectra in mag/phase format.\n") .def(py::init(), "buffer"_a = nullptr, "rate"_a = 1.0); - py::class_>(m, "FFTContrast", "FFT Contrast. Requires an FFT* input.") + py::class_>(m, "FFTContrast", "FFT Contrast.\nRequires an FFT* input.\n") .def(py::init(), "input"_a = 0, "contrast"_a = 1); - py::class_>(m, "FFTCrossFade", "FFT FFTCrossFade. Requires two FFT* inputs.") + py::class_>(m, "FFTCrossFade", "FFT FFTCrossFade.\nRequires two FFT* inputs.\n") .def(py::init(), "inputA"_a = 0, "inputB"_a = 0, "crossfade"_a = 0.0); - py::class_>(m, "FFTLFO", "FFT LFO. Requires an FFT* input.") + py::class_>(m, "FFTLFO", "FFT LFO.\nRequires an FFT* input.\n") .def(py::init(), "input"_a = 0, "frequency"_a = 1.0, "spectral_cycles"_a = 1.0); - py::class_>(m, "FFTMagnitudePhaseArray", "Fixed mag/phase array.") + py::class_>(m, "FFTMagnitudePhaseArray", "Fixed mag/phase array.\n") .def(py::init, std::vector>(), "input"_a = 0, "magnitudes"_a = 0, "phases"_a = 0) .def("set_magnitudes", &FFTMagnitudePhaseArray::set_magnitudes); - py::class_>(m, "FFTRandomPhase", "Randomise phase values.") + py::class_>(m, "FFTRandomPhase", "Randomise phase values.\n") .def(py::init(), "input"_a = 0, "level"_a = 1.0); - py::class_>(m, "FFTScaleMagnitudes", "Randomise phase values.") + py::class_>(m, "FFTScaleMagnitudes", "Randomise phase values.\n") .def(py::init>(), "input"_a = 0, "scale"_a = 0); - py::class_>(m, "FFTTransform", "Transforms the FFT magnitude spectrum in the X axis. Requires an FFT* input.") + py::class_>(m, "FFTTransform", "Transforms the FFT magnitude spectrum in the X axis.\nRequires an FFT* input.\n") .def(py::init(), "input"_a = 0, "flip"_a = 0, "rotate"_a = 0); - py::class_>(m, "FFT", "Fast Fourier Transform. Takes a time-domain input, and generates a frequency-domain (FFT) output.") + py::class_>(m, "FFT", "Fast Fourier Transform.\nTakes a time-domain input, and generates a frequency-domain (FFT) output.\n") .def(py::init(), "input"_a = 0.0, "fft_size"_a = SIGNALFLOW_DEFAULT_FFT_SIZE, "hop_size"_a = SIGNALFLOW_DEFAULT_FFT_HOP_SIZE, "window_size"_a = 0, "do_window"_a = true); - py::class_>(m, "FFTFindPeaks", "Find peaks in the FFT magnitude spectrum. Requires an FFT* input.") + py::class_>(m, "FFTFindPeaks", "Find peaks in the FFT magnitude spectrum.\nRequires an FFT* input.\n") .def(py::init(), "input"_a = 0, "prominence"_a = 1, "threshold"_a = 0.000001, "count"_a = SIGNALFLOW_MAX_CHANNELS, "interpolate"_a = true); - py::class_>(m, "IFFT", "Inverse Fast Fourier Transform. Requires an FFT* input, generates a time-domain output.") + py::class_>(m, "IFFT", "Inverse Fast Fourier Transform.\nRequires an FFT* input, generates a time-domain output.\n") .def(py::init(), "input"_a = nullptr, "do_window"_a = false); - py::class_>(m, "FFTLPF", "FFT-based brick wall low pass filter. Requires an FFT* input.") + py::class_>(m, "FFTLPF", "FFT-based brick wall low pass filter.\nRequires an FFT* input.\n") .def(py::init(), "input"_a = 0, "frequency"_a = 2000); - py::class_>(m, "FFTNoiseGate", "FFT-based noise gate. Requires an FFT* input.") + py::class_>(m, "FFTNoiseGate", "FFT-based noise gate.\nRequires an FFT* input.\n") .def(py::init(), "input"_a = 0, "threshold"_a = 0.5, "invert"_a = 0.0); - py::class_>(m, "FFTPhaseVocoder", "Phase vocoder. Requires an FFT* input.") + py::class_>(m, "FFTPhaseVocoder", "Phase vocoder.\nRequires an FFT* input.\n") .def(py::init(), "input"_a = nullptr); - py::class_>(m, "FFTTonality", "Tonality filter. Requires an FFT* input.") + py::class_>(m, "FFTTonality", "Tonality filter.\nRequires an FFT* input.\n") .def(py::init(), "input"_a = 0, "level"_a = 0.5, "smoothing"_a = 0.9); - py::class_>(m, "Add", "Add each sample of a to each sample of b. Can also be written as a + b") + py::class_>(m, "Add", "Add each sample of a to each sample of b.\nCan also be written as a + b\n") .def(py::init(), "a"_a = 0, "b"_a = 0); - py::class_>(m, "AmplitudeToDecibels", "Map a linear amplitude value to decibels.") + py::class_>(m, "AmplitudeToDecibels", "Map a linear amplitude value to decibels.\n") .def(py::init(), "a"_a = 0); py::class_>(m, "DecibelsToAmplitude", "DecibelsToAmplitude") .def(py::init(), "a"_a = 0); - py::class_>(m, "Bus", "Bus is a node with a fixed number of input channels and arbitrary number of inputs, used to aggregate multiple sources. It is similar to Sum, but with a defined channel count that does not adapt to its inputs.") + py::class_>(m, "Bus", "Bus is a node with a fixed number of input channels and arbitrary number of\ninputs, used to aggregate multiple sources. It is similar to Sum, but with\na defined channel count that does not adapt to its inputs.\n") .def(py::init(), "num_channels"_a = 1); - py::class_>(m, "ChannelArray", "Takes an array of inputs and spreads them across multiple channels of output.") + py::class_>(m, "ChannelArray", "Takes an array of inputs and spreads them across multiple channels of output.\n") .def(py::init<>()) .def(py::init>(), "inputs"_a) .def(py::init>(), "inputs"_a) .def(py::init>(), "inputs"_a) .def(py::init>(), "inputs"_a); - py::class_>(m, "ChannelCrossfade", "Given a multichannel input, crossfades between channels based on the given position within the virtual array, producing a single-channel output.") + py::class_>(m, "ChannelCrossfade", "Given a multichannel input, crossfades between channels based on the given\nposition within the virtual array, producing a single-channel output.\n") .def(py::init(), "input"_a = nullptr, "index"_a = nullptr, "num_output_channels"_a = 1); - py::class_>(m, "ChannelMixer", "Downmix a multichannel input to a lower-channel output. If num_channels is greater than one, spreads the input channels across the field. If amplitude_compensation is enabled, scale down the amplitude based on the ratio of input to output channels.") + py::class_>(m, "ChannelMixer", "Downmix a multichannel input to a lower-channel output. If num_channels is\ngreater than one, spreads the input channels across the field.\nIf amplitude_compensation is enabled, scale down the amplitude based on the\nratio of input to output channels.\n") .def(py::init(), "num_channels"_a = 1, "input"_a = 0, "amplitude_compensation"_a = true); - py::class_>(m, "ChannelOffset", "Offsets the input by a specified number of channels. With an N-channel input and an offset of M, the output will have M+N channels.") + py::class_>(m, "ChannelOffset", "Offsets the input by a specified number of channels. With an N-channel input\nand an offset of M, the output will have M+N channels.\n") .def(py::init(), "offset"_a = 0, "input"_a = nullptr); - py::class_>(m, "ChannelSelect", "Select a subset of channels from a multichannel input, starting at offset, up to a maximum of maximum, with the given step.") + py::class_>(m, "ChannelSelect", "Select a subset of channels from a multichannel input, starting at offset,\nup to a maximum of maximum, with the given step.\n") .def(py::init(), "input"_a = nullptr, "offset"_a = 0, "maximum"_a = 0, "step"_a = 1); - py::class_>(m, "Equal", "Compares the output of a to the output of b. Outputs 1 when equal, 0 otherwise. Can also be written as a == b") + py::class_>(m, "Equal", "Compares the output of a to the output of b. Outputs 1 when equal, 0 otherwise.\nCan also be written as a == b\n") .def(py::init(), "a"_a = 0, "b"_a = 0); - py::class_>(m, "NotEqual", "Compares the output of a to the output of b. Outputs 0 when equal, 1 otherwise. Can also be written as a != b") + py::class_>(m, "NotEqual", "Compares the output of a to the output of b. Outputs 0 when equal, 1 otherwise.\nCan also be written as a != b\n") .def(py::init(), "a"_a = 0, "b"_a = 0); - py::class_>(m, "GreaterThan", "Compares the output of a to the output of b. Outputs 1 when a > b, 0 otherwise. Can also be written as a > b") + py::class_>(m, "GreaterThan", "Compares the output of a to the output of b. Outputs 1 when a > b, 0 otherwise.\nCan also be written as a > b\n") .def(py::init(), "a"_a = 0, "b"_a = 0); - py::class_>(m, "GreaterThanOrEqual", "Compares the output of a to the output of b. Outputs 1 when a >= b, 0 otherwise. Can also be written as a >= b") + py::class_>(m, "GreaterThanOrEqual", "Compares the output of a to the output of b. Outputs 1 when a >= b, 0 otherwise.\nCan also be written as a >= b\n") .def(py::init(), "a"_a = 0, "b"_a = 0); - py::class_>(m, "LessThan", "Compares the output of a to the output of b. Outputs 1 when a < b, 0 otherwise. Can also be written as a < b") + py::class_>(m, "LessThan", "Compares the output of a to the output of b. Outputs 1 when a < b, 0 otherwise.\nCan also be written as a < b\n") .def(py::init(), "a"_a = 0, "b"_a = 0); - py::class_>(m, "LessThanOrEqual", "Compares the output of a to the output of b. Outputs 1 when a <= b, 0 otherwise. Can also be written as a <= b") + py::class_>(m, "LessThanOrEqual", "Compares the output of a to the output of b. Outputs 1 when a <= b, 0 otherwise.\nCan also be written as a <= b\n") .def(py::init(), "a"_a = 0, "b"_a = 0); - py::class_>(m, "Modulo", "Outputs the value of a modulo b, per sample. Supports fractional values. Can also be written as a % b") + py::class_>(m, "Modulo", "Outputs the value of a modulo b, per sample. Supports fractional values.\nCan also be written as a % b\n") .def(py::init(), "a"_a = 0, "b"_a = 0); - py::class_>(m, "Abs", "Outputs the absolute value of a, per sample. Can also be written as abs(a)") + py::class_>(m, "Abs", "Outputs the absolute value of a, per sample.\nCan also be written as abs(a)\n") .def(py::init(), "a"_a = 0); - py::class_>(m, "If", "Outputs value_if_true for each non-zero value of a, value_if_false for all other values.") + py::class_>(m, "If", "Outputs value_if_true for each non-zero value of a, value_if_false for all\nother values.\n") .def(py::init(), "a"_a = 0, "value_if_true"_a = 0, "value_if_false"_a = 0); - py::class_>(m, "Divide", "Divide each sample of a by each sample of b. Can also be written as a / b") + py::class_>(m, "Divide", "Divide each sample of a by each sample of b.\nCan also be written as a / b\n") .def(py::init(), "a"_a = 1, "b"_a = 1); - py::class_>(m, "FrequencyToMidiNote", "Map a frequency to a MIDI note (where 440Hz = A4 = 69), with floating-point output.") + py::class_>(m, "FrequencyToMidiNote", "Map a frequency to a MIDI note (where 440Hz = A4 = 69), with floating-point\noutput.\n") .def(py::init(), "a"_a = 0); - py::class_>(m, "MidiNoteToFrequency", "Map a MIDI note to a frequency (where 440Hz = A4 = 69), supporting floating-point input.") + py::class_>(m, "MidiNoteToFrequency", "Map a MIDI note to a frequency (where 440Hz = A4 = 69), supporting floating-point\ninput.\n") .def(py::init(), "a"_a = 0); - py::class_>(m, "Multiply", "Multiply each sample of a by each sample of b. Can also be written as a * b") + py::class_>(m, "Multiply", "Multiply each sample of a by each sample of b.\nCan also be written as a * b\n") .def(py::init(), "a"_a = 1.0, "b"_a = 1.0); - py::class_>(m, "Pow", "Outputs a to the power of b, per sample. Can also be written as a ** b") + py::class_>(m, "Pow", "Outputs a to the power of b, per sample.\nCan also be written as a ** b\n") .def(py::init(), "a"_a = 0, "b"_a = 0); - py::class_>(m, "RoundToScale", "Given a frequency input, generates a frequency output that is rounded to the nearest MIDI note. (TODO: Not very well named)") + py::class_>(m, "RoundToScale", "Given a frequency input, generates a frequency output that is rounded to the nearest MIDI note.\n(TODO: Not very well named)\n") .def(py::init(), "a"_a = 0); - py::class_>(m, "Round", "Round the input to the nearest integer value.") + py::class_>(m, "Round", "Round the input to the nearest integer value.\n") .def(py::init(), "a"_a = 0); - py::class_>(m, "ScaleLinExp", "Scales the input from a linear range (between a and b) to an exponential range (between c and d).") + py::class_>(m, "ScaleLinExp", "Scales the input from a linear range (between a and b)\nto an exponential range (between c and d).\n") .def(py::init(), "input"_a = 0, "a"_a = 0, "b"_a = 1, "c"_a = 1, "d"_a = 10); - py::class_>(m, "ScaleLinLin", "Scales the input from a linear range (between a and b) to a linear range (between c and d).") + py::class_>(m, "ScaleLinLin", "Scales the input from a linear range (between a and b)\nto a linear range (between c and d).\n") .def(py::init(), "input"_a = 0, "a"_a = 0, "b"_a = 1, "c"_a = 1, "d"_a = 10); - py::class_>(m, "SelectInput", "Pass through the output of one or more `inputs`, based on the integer input index specified in `index`. Unlike `ChannelSelect`, inputs may be multichannel, and `index` can be modulated in real time.") + py::class_>(m, "SelectInput", "Pass through the output of one or more `inputs`, based on the integer input index\nspecified in `index`. Unlike `ChannelSelect`, inputs may be multichannel,\nand `index` can be modulated in real time.\n") .def(py::init(), "index"_a = 0) .def(py::init, NodeRef>(), "inputs"_a, "index"_a = 0) .def(py::init, NodeRef>(), "inputs"_a, "index"_a = 0) .def(py::init, NodeRef>(), "inputs"_a, "index"_a = 0) .def(py::init, NodeRef>(), "inputs"_a, "index"_a = 0); - py::class_>(m, "Subtract", "Subtract each sample of b from each sample of a. Can also be written as a - b") + py::class_>(m, "Subtract", "Subtract each sample of b from each sample of a.\nCan also be written as a - b\n") .def(py::init(), "a"_a = 0, "b"_a = 0); - py::class_>(m, "Sum", "Sums the output of all of the input nodes, by sample.") + py::class_>(m, "Sum", "Sums the output of all of the input nodes, by sample.\n") .def(py::init<>()) .def(py::init>(), "inputs"_a) .def(py::init>(), "inputs"_a) .def(py::init>(), "inputs"_a) .def(py::init>(), "inputs"_a); - py::class_>(m, "TimeShift", "TimeShift") + py::class_>(m, "TimeShift", "TimeShift\n") .def(py::init(), "a"_a = 0); - py::class_>(m, "Sin", "Outputs sin(a), per sample.") + py::class_>(m, "Sin", "Outputs sin(a), per sample.\n") .def(py::init(), "a"_a = 0); - py::class_>(m, "Cos", "Outputs cos(a), per sample.") + py::class_>(m, "Cos", "Outputs cos(a), per sample.\n") .def(py::init(), "a"_a = 0); - py::class_>(m, "Tan", "Outputs tan(a), per sample.") + py::class_>(m, "Tan", "Outputs tan(a), per sample.\n") .def(py::init(), "a"_a = 0); - py::class_>(m, "Tanh", "Outputs tanh(a), per sample. Can be used as a soft clipper.") + py::class_>(m, "Tanh", "Outputs tanh(a), per sample.\nCan be used as a soft clipper.\n") .def(py::init(), "a"_a = 0); - py::class_>(m, "Constant", "Produces a constant value.") + py::class_>(m, "Constant", "Produces a constant value.\n") .def(py::init(), "value"_a = 0); - py::class_>(m, "Impulse", "Produces a value of 1 at the given `frequency`, with output of 0 at all other times. If frequency is 0, produces a single impulse.") + py::class_>(m, "Impulse", "Produces a value of 1 at the given `frequency`, with output of 0 at all other times.\nIf frequency is 0, produces a single impulse.\n") .def(py::init(), "frequency"_a = 1.0); py::class_>(m, "LFO", "LFO") .def(py::init(), "frequency"_a = 1.0, "min"_a = 0.0, "max"_a = 1.0, "phase"_a = 0.0); - py::class_>(m, "SawLFO", "Produces a sawtooth LFO at the given `frequency` and `phase` offset, with output ranging from `min` to `max`.") + py::class_>(m, "SawLFO", "Produces a sawtooth LFO at the given `frequency` and `phase` offset, with output ranging from `min` to `max`.\n") .def(py::init(), "frequency"_a = 1.0, "min"_a = 0.0, "max"_a = 1.0, "phase"_a = 0.0); - py::class_>(m, "SawOscillator", "Produces a (non-band-limited) sawtooth wave, with the given `frequency` and `phase` offset. When a `reset` or trigger is received, resets the phase to zero.") + py::class_>(m, "SawOscillator", "Produces a (non-band-limited) sawtooth wave, with the given `frequency` and\n`phase` offset. When a `reset` or trigger is received, resets the phase to zero.\n") .def(py::init(), "frequency"_a = 440, "phase_offset"_a = nullptr, "reset"_a = nullptr); - py::class_>(m, "SineLFO", "Produces a sinusoidal LFO at the given `frequency` and `phase` offset, with output ranging from `min` to `max`.") + py::class_>(m, "SineLFO", "Produces a sinusoidal LFO at the given `frequency` and `phase` offset,\nwith output ranging from `min` to `max`.\n") .def(py::init(), "frequency"_a = 1.0, "min"_a = 0.0, "max"_a = 1.0, "phase"_a = 0.0); - py::class_>(m, "SineOscillator", "Produces a sine wave at the given `frequency`.") + py::class_>(m, "SineOscillator", "Produces a sine wave at the given `frequency`.\n") .def(py::init(), "frequency"_a = 440, "phase_offset"_a = nullptr, "reset"_a = nullptr); - py::class_>(m, "SquareLFO", "Produces a pulse wave LFO with the given `frequency` and pulse `width`, ranging from `min` to `max`, where `width` of `0.5` is a square wave and other values produce a rectangular wave.") + py::class_>(m, "SquareLFO", "Produces a pulse wave LFO with the given `frequency` and pulse `width`, \nranging from `min` to `max`, where `width` of `0.5` is a square wave and\nother values produce a rectangular wave.\n") .def(py::init(), "frequency"_a = 1.0, "min"_a = 0.0, "max"_a = 1.0, "width"_a = 0.5, "phase"_a = 0.0); - py::class_>(m, "SquareOscillator", "Produces a pulse wave with the given `frequency` and pulse `width`, where `width` of `0.5` is a square wave and other values produce a rectangular wave.") + py::class_>(m, "SquareOscillator", "Produces a pulse wave with the given `frequency` and pulse `width`, \nwhere `width` of `0.5` is a square wave and other values produce a\nrectangular wave.\n") .def(py::init(), "frequency"_a = 440, "width"_a = 0.5); - py::class_>(m, "TriangleLFO", "Produces a triangle LFO with the given `frequency` and `phase` offset, ranging from `min` to `max`.") + py::class_>(m, "TriangleLFO", "Produces a triangle LFO with the given `frequency` and `phase` offset,\nranging from `min` to `max`.\n") .def(py::init(), "frequency"_a = 1.0, "min"_a = 0.0, "max"_a = 1.0, "phase"_a = 0.0); - py::class_>(m, "TriangleOscillator", "Produces a triangle wave with the given `frequency`.") + py::class_>(m, "TriangleOscillator", "Produces a triangle wave with the given `frequency`.\n") .def(py::init(), "frequency"_a = 440); - py::class_>(m, "Wavetable", "Plays the wavetable stored in buffer at the given `frequency` and `phase` offset. `sync` can be used to provide a hard sync input, which resets the wavetable's phase at each zero-crossing.") + py::class_>(m, "Wavetable", "Plays the wavetable stored in buffer at the given `frequency` and `phase` offset.\n`sync` can be used to provide a hard sync input, which resets the wavetable's\nphase at each zero-crossing.\n") .def(py::init(), "buffer"_a = nullptr, "frequency"_a = 440, "phase_offset"_a = 0, "sync"_a = 0, "phase_map"_a = nullptr); py::class_>(m, "Wavetable2D", "Wavetable2D") .def(py::init(), "buffer"_a = nullptr, "frequency"_a = 440, "crossfade"_a = 0.0, "phase_offset"_a = 0.0, "sync"_a = 0); - py::class_>(m, "Maraca", "Model of maraca") + py::class_>(m, "Maraca", "Physically-inspired model of a maraca.\n\nParameters:\n- `num_beans`: The number of simulated beans in the maraca (1-1024)\n- `shake_decay`: Decay constant for the energy injected per shake\n- `grain_decay`: Decay constant for the energy created per bean collision\n- `shake_duration`: Duration of each shake action, milliseconds\n- `shell_frequency`: Resonant frequency of the maraca's shell, hertz\n- `shell_resonance`: Resonanc of the maraca's shell (0-1)\n- `clock`: If specified, triggers shake actions\n- `energy`: If specified, injects energy into the maraca\n\nFrom Cook (1997), \"Physically Informed Sonic Modeling (PhISM): Synthesis of\nPercussive Sounds\", Computer Music Journal.\n") .def(py::init(), "num_beans"_a = 64, "shake_decay"_a = 0.99, "grain_decay"_a = 0.99, "shake_duration"_a = 0.02, "shell_frequency"_a = 12000, "shell_resonance"_a = 0.9, "clock"_a = nullptr, "energy"_a = nullptr); - py::class_>(m, "Clip", "Clip the input to `min`/`max`.") + py::class_>(m, "Clip", "Clip the input to `min`/`max`.\n") .def(py::init(), "input"_a = nullptr, "min"_a = -1.0, "max"_a = 1.0); - py::class_>(m, "Fold", "Fold the input beyond `min`/`max`, reflecting the excess back.") + py::class_>(m, "Fold", "Fold the input beyond `min`/`max`, reflecting the excess back.\n") .def(py::init(), "input"_a = nullptr, "min"_a = -1.0, "max"_a = 1.0); - py::class_>(m, "Smooth", "Smooth the input with a given smoothing coefficient. When `smooth` = 0, applies no smoothing.") + py::class_>(m, "Smooth", "Smooth the input with a given smoothing coefficient.\nWhen `smooth` = 0, applies no smoothing.\n") .def(py::init(), "input"_a = nullptr, "smooth"_a = 0.99); - py::class_>(m, "WetDry", "Takes `wet` and `dry` inputs, and outputs a mix determined by `wetness`.") + py::class_>(m, "WetDry", "Takes `wet` and `dry` inputs, and outputs a mix determined by `wetness`.\n") .def(py::init(), "dry_input"_a = nullptr, "wet_input"_a = nullptr, "wetness"_a = 0.0); - py::class_>(m, "Wrap", "Wrap the input beyond `min`/`max`.") + py::class_>(m, "Wrap", "Wrap the input beyond `min`/`max`.\n") .def(py::init(), "input"_a = nullptr, "min"_a = -1.0, "max"_a = 1.0); - py::class_>(m, "AllpassDelay", "All-pass delay, with `feedback` between 0 and 1. `delay_time` must be less than or equal to `max_delay_time`.") + py::class_>(m, "AllpassDelay", "All-pass delay, with `feedback` between 0 and 1.\n`delay_time` must be less than or equal to `max_delay_time`.\n") .def(py::init(), "input"_a = 0.0, "delay_time"_a = 0.1, "feedback"_a = 0.5, "max_delay_time"_a = 0.5); - py::class_>(m, "CombDelay", "Comb delay, with `feedback` between 0 and 1. `delay_time` must be less than or equal to `max_delay_time`.") + py::class_>(m, "CombDelay", "Comb delay, with `feedback` between 0 and 1.\n`delay_time` must be less than or equal to `max_delay_time`.\n") .def(py::init(), "input"_a = 0.0, "delay_time"_a = 0.1, "feedback"_a = 0.5, "max_delay_time"_a = 0.5); - py::class_>(m, "OneTapDelay", "Single-tap delay line. `delay_time` must be less than or equal to `max_delay_time`.") + py::class_>(m, "OneTapDelay", "Single-tap delay line.\n`delay_time` must be less than or equal to `max_delay_time`.\n") .def(py::init(), "input"_a = 0.0, "delay_time"_a = 0.1, "max_delay_time"_a = 0.5); - py::class_>(m, "Stutter", "Stutters the input whenever a trigger is received on `clock`. Generates `stutter_count` repeats, with duration of `stutter_time`.") + py::class_>(m, "Stutter", "Stutters the input whenever a trigger is received on `clock`.\nGenerates `stutter_count` repeats, with duration of `stutter_time`.\n") .def(py::init(), "input"_a = 0.0, "stutter_time"_a = 0.1, "stutter_count"_a = 1, "stutter_probability"_a = 1.0, "stutter_advance_time"_a = 0.0, "clock"_a = nullptr, "max_stutter_time"_a = 1.0); - py::class_>(m, "Resample", "Resampler and bit crusher. `sample_rate` is in Hz, `bit_rate` is an integer between 0 and 16.") + py::class_>(m, "Resample", "Resampler and bit crusher. `sample_rate` is in Hz, `bit_rate` is an integer\nbetween 0 and 16.\n") .def(py::init(), "input"_a = 0, "sample_rate"_a = 44100, "bit_rate"_a = 16); - py::class_>(m, "SampleAndHold", "Samples and holds the input each time a trigger is received on `clock`.") + py::class_>(m, "SampleAndHold", "Samples and holds the input each time a trigger is received on `clock`.\n") .def(py::init(), "input"_a = nullptr, "clock"_a = nullptr); - py::class_>(m, "Squiz", "Implementation of Dan Stowell's Squiz algorithm, a kind of downsampler.") + py::class_>(m, "Squiz", "Implementation of Dan Stowell's Squiz algorithm, a kind of downsampler.\n") .def(py::init(), "input"_a = 0.0, "rate"_a = 2.0, "chunk_size"_a = 1); - py::class_>(m, "WaveShaper", "Applies wave-shaping as described in the WaveShaperBuffer `buffer`.") + py::class_>(m, "WaveShaper", "Applies wave-shaping as described in the WaveShaperBuffer `buffer`.\n") .def(py::init(), "input"_a = 0.0, "buffer"_a = nullptr); - py::class_>(m, "Compressor", "Dynamic range compression, with optional `sidechain` input. When the input amplitude is above `threshold`, compresses the amplitude with the given `ratio`, following the given `attack_time` and `release_time` in seconds.") + py::class_>(m, "Compressor", "Dynamic range compression, with optional `sidechain` input.\nWhen the input amplitude is above `threshold`, compresses the amplitude with\nthe given `ratio`, following the given `attack_time` and `release_time`\nin seconds.\n") .def(py::init(), "input"_a = 0.0, "threshold"_a = 0.1, "ratio"_a = 2, "attack_time"_a = 0.01, "release_time"_a = 0.1, "sidechain"_a = nullptr); - py::class_>(m, "Gate", "Outputs the input value when it is above the given `threshold`, otherwise zero.") + py::class_>(m, "Gate", "Outputs the input value when it is above the given `threshold`, otherwise zero.\n") .def(py::init(), "input"_a = 0.0, "threshold"_a = 0.1); - py::class_>(m, "Maximiser", "Gain maximiser.") + py::class_>(m, "Maximiser", "Gain maximiser.\n") .def(py::init(), "input"_a = 0.0, "ceiling"_a = 0.5, "attack_time"_a = 1.0, "release_time"_a = 1.0); - py::class_>(m, "RMS", "Outputs the root-mean-squared value of the input, in buffers equal to the graph's current buffer size.") + py::class_>(m, "RMS", "Outputs the root-mean-squared value of the input, in buffers equal to the\ngraph's current buffer size.\n") .def(py::init(), "input"_a = 0.0); - py::class_>(m, "BiquadFilter", "Biquad filter. filter_type can be 'low_pass', 'band_pass', 'high_pass', 'notch', 'peak', 'low_shelf', 'high_shelf'. Not recommended for real-time modulation; for this, use SVFilter.") + py::class_>(m, "BiquadFilter", "Biquad filter.\nfilter_type can be 'low_pass', 'band_pass', 'high_pass', 'notch', 'peak',\n'low_shelf', 'high_shelf'.\nNot recommended for real-time modulation; for this, use SVFilter.\n") .def(py::init(), "input"_a = 0.0, "filter_type"_a = SIGNALFLOW_FILTER_TYPE_LOW_PASS, "cutoff"_a = 440, "resonance"_a = 0.0, "peak_gain"_a = 0.0) .def(py::init(), "input"_a, "filter_type"_a, "cutoff"_a = 440, "resonance"_a = 0.0, "peak_gain"_a = 0.0); - py::class_>(m, "DCFilter", "Remove low-frequency and DC content from a signal.") + py::class_>(m, "DCFilter", "Remove low-frequency and DC content from a signal.\n") .def(py::init(), "input"_a = 0.0); - py::class_>(m, "EQ", "Three-band EQ.") + py::class_>(m, "EQ", "Three-band EQ.\n") .def(py::init(), "input"_a = 0.0, "low_gain"_a = 1.0, "mid_gain"_a = 1.0, "high_gain"_a = 1.0, "low_freq"_a = 500, "high_freq"_a = 5000); - py::class_>(m, "MoogVCF", "Simulation of the Moog ladder low-pass filter. `cutoff` sets the cutoff frequency; `resonance` should typically be between 0..1.") + py::class_>(m, "MoogVCF", "Simulation of the Moog ladder low-pass filter. `cutoff` sets the cutoff\nfrequency; `resonance` should typically be between 0..1.\n") .def(py::init(), "input"_a = 0.0, "cutoff"_a = 200.0, "resonance"_a = 0.0); - py::class_>(m, "SVFilter", "State variable filter. `filter_type` can be 'low_pass', 'band_pass', 'high_pass', 'notch', 'peak', 'low_shelf', 'high_shelf'. `resonance` should be between `[0..1]`.") + py::class_>(m, "SVFilter", "State variable filter.\n`filter_type` can be 'low_pass', 'band_pass', 'high_pass', 'notch', 'peak',\n'low_shelf', 'high_shelf'. `resonance` should be between `[0..1]`.\n") .def(py::init(), "input"_a = 0.0, "filter_type"_a = SIGNALFLOW_FILTER_TYPE_LOW_PASS, "cutoff"_a = 440, "resonance"_a = 0.0) .def(py::init(), "input"_a, "filter_type"_a, "cutoff"_a = 440, "resonance"_a = 0.0); - py::class_>(m, "AzimuthPanner", "Pan input around an equally-spaced ring of `num_channels` speakers. `pan` is the pan position from -1..+1, where 0 = centre front. `width` is the source's width, where 1.0 spans exactly between an adjacent pair of channels.") + py::class_>(m, "AzimuthPanner", "Pan input around an equally-spaced ring of `num_channels` speakers.\n`pan` is the pan position from -1..+1, where 0 = centre front.\n`width` is the source's width, where 1.0 spans exactly between an adjacent pair of channels.\n") .def(py::init(), "num_channels"_a = 2, "input"_a = 0, "pan"_a = 0.0, "width"_a = 1.0); - py::class_>(m, "ChannelPanner", "Pan the input between a linear series of channels, where `pan` 0 = channel 0, 1 = channel 1, etc. No wrapping is applied.") + py::class_>(m, "ChannelPanner", "Pan the input between a linear series of channels, where `pan` 0 = channel 0,\n1 = channel 1, etc. No wrapping is applied.\n") .def(py::init(), "num_channels"_a = 2, "input"_a = 0, "pan"_a = 0.0, "width"_a = 1.0); - py::class_>(m, "SpatialPanner", "Implements a spatial panning algorithm, applied to a given SpatialEnvironment. Currently, only DBAP is supported.") + py::class_>(m, "SpatialPanner", "Implements a spatial panning algorithm, applied to a given SpatialEnvironment.\nCurrently, only DBAP is supported.\n") .def(py::init, NodeRef, NodeRef, NodeRef, NodeRef, NodeRef, NodeRef, std::string>(), "env"_a = nullptr, "input"_a = 0.0, "x"_a = 0.0, "y"_a = 0.0, "z"_a = 0.0, "radius"_a = 1.0, "use_delays"_a = 1.0, "algorithm"_a = "dbap"); - py::class_>(m, "StereoBalance", "Takes a stereo input and rebalances it, where `balance` of `0` is unchanged, `-1` is hard left, and `1` is hard right.") + py::class_>(m, "StereoBalance", "Takes a stereo input and rebalances it, where `balance` of `0` is unchanged,\n`-1` is hard left, and `1` is hard right.\n") .def(py::init(), "input"_a = 0, "balance"_a = 0); - py::class_>(m, "StereoPanner", "Pans a mono input to a stereo output. `pan` should be between -1 (hard left) to +1 (hard right), with 0 = centre.") + py::class_>(m, "StereoPanner", "Pans a mono input to a stereo output. `pan` should be between -1 (hard left) to\n+1 (hard right), with 0 = centre.\n") .def(py::init(), "input"_a = 0, "pan"_a = 0.0); - py::class_>(m, "StereoWidth", "Reduces the width of a stereo signal. When `width` = 1, input is unchanged. When `width` = 0, outputs a pair of identical channels both containing L+R.") + py::class_>(m, "StereoWidth", "Reduces the width of a stereo signal.\nWhen `width` = 1, input is unchanged.\nWhen `width` = 0, outputs a pair of identical channels both containing L+R.\n") .def(py::init(), "input"_a = 0, "width"_a = 1); - py::class_>(m, "ClockDivider", "When given a `clock` input (e.g., an Impulse), divides the clock by the given `factor`. factor must be an integer greater than or equal to 1.") + py::class_>(m, "ClockDivider", "When given a `clock` input (e.g., an Impulse), divides the clock by the given\n`factor`. factor must be an integer greater than or equal to 1.\n") .def(py::init(), "clock"_a = 0, "factor"_a = 1); - py::class_>(m, "Counter", "Count upwards from `min` to `max`, driven by `clock`.") + py::class_>(m, "Counter", "Count upwards from `min` to `max`, driven by `clock`.\n") .def(py::init(), "clock"_a = 0, "min"_a = 0, "max"_a = 2147483647); - py::class_>(m, "Euclidean", "Euclidean rhythm as described by Toussaint, with `sequence_length` (n) and `num_events` (k), driven by `clock`.") + py::class_>(m, "Euclidean", "Euclidean rhythm as described by Toussaint, with `sequence_length` (n)\nand `num_events` (k), driven by `clock`.\n") .def(py::init(), "clock"_a = 0, "sequence_length"_a = 0, "num_events"_a = 0); - py::class_>(m, "FlipFlop", "Flips from 0/1 on each `clock`.") + py::class_>(m, "FlipFlop", "Flips from 0/1 on each `clock`.\n") .def(py::init(), "clock"_a = 0); - py::class_>(m, "ImpulseSequence", "Each time a `clock` or trigger is received, outputs the next value in `sequence`. At all other times, outputs zero.") + py::class_>(m, "ImpulseSequence", "Each time a `clock` or trigger is received, outputs the next value in\n`sequence`. At all other times, outputs zero.\n") .def(py::init, NodeRef>(), "sequence"_a = std::vector(), "clock"_a = nullptr) .def(py::init(), "sequence"_a, "clock"_a = nullptr); - py::class_>(m, "Index", "Outputs the value in `list` corresponding to `index`.") + py::class_>(m, "Index", "Outputs the value in `list` corresponding to `index`.\n") .def(py::init, NodeRef>(), "list"_a = 0, "index"_a = 0); - py::class_>(m, "Latch", "Initially outputs 0. When a trigger is received at `set`, outputs 1. When a trigger is subsequently received at `reset`, outputs 0, until the next `set`.") + py::class_>(m, "Latch", "Initially outputs 0.\nWhen a trigger is received at `set`, outputs 1.\nWhen a trigger is subsequently received at `reset`, outputs 0, until the next\n`set`.\n") .def(py::init(), "set"_a = 0, "reset"_a = 0); - py::class_>(m, "Sequence", "Outputs the elements in `sequence`, incrementing position on each `clock`.") + py::class_>(m, "Sequence", "Outputs the elements in `sequence`, incrementing position on each `clock`.\n") .def(py::init, NodeRef>(), "sequence"_a = std::vector(), "clock"_a = nullptr); - py::class_>(m, "TriggerMult", "Distribute any triggers to all output nodes.") + py::class_>(m, "TriggerMult", "Distribute any triggers to all output nodes.\n") .def(py::init(), "a"_a = 0); - py::class_>(m, "TriggerRoundRobin", "Relay trigger() events to a single node from the list of connected outputs, with `direction` determining the direction: 1 (or above) = move forwards by N, -1 = move backwards by N, 0 = stationary.") + py::class_>(m, "TriggerRoundRobin", "Relay trigger() events to a single node from the list of connected outputs,\nwith `direction` determining the direction: 1 (or above) = move forwards by N,\n-1 = move backwards by N, 0 = stationary.\n") .def(py::init(), "direction"_a = 1); - py::class_>(m, "Logistic", "Logistic noise.") + py::class_>(m, "Logistic", "Logistic noise.\n") .def(py::init(), "chaos"_a = 3.7, "frequency"_a = 0.0); - py::class_>(m, "PinkNoise", "Pink noise, with specified low/high cutoffs.") + py::class_>(m, "PinkNoise", "Pink noise, with specified low/high cutoffs.\n") .def(py::init(), "low_cutoff"_a = 20.0, "high_cutoff"_a = 20000.0, "reset"_a = nullptr); - py::class_>(m, "RandomBrownian", "Outputs Brownian noise between min/max, with a mean change of delta between samples. If a clock is passed, only generates a new value on a clock tick.") + py::class_>(m, "RandomBrownian", "Outputs Brownian noise between min/max, with a mean change of delta between samples.\nIf a clock is passed, only generates a new value on a clock tick.\n") .def(py::init(), "min"_a = -1.0, "max"_a = 1.0, "delta"_a = 0.01, "clock"_a = nullptr, "reset"_a = nullptr); - py::class_>(m, "RandomChoice", "Pick a random value from the given array. If a clock is passed, only picks a new value on a clock tick.") + py::class_>(m, "RandomChoice", "Pick a random value from the given array.\nIf a clock is passed, only picks a new value on a clock tick.\n") .def(py::init, NodeRef, NodeRef>(), "values"_a = std::vector(), "clock"_a = nullptr, "reset"_a = nullptr); - py::class_>(m, "RandomCoin", "Flip a coin with the given probability. If a clock is passed, only picks a new value on a clock tick.") + py::class_>(m, "RandomCoin", "Flip a coin with the given probability.\nIf a clock is passed, only picks a new value on a clock tick.\n") .def(py::init(), "probability"_a = 0.5, "clock"_a = nullptr, "reset"_a = nullptr); - py::class_>(m, "RandomExponentialDist", "Generate an random value following the exponential distribution. If a clock is passed, only picks a new value on a clock tick.") + py::class_>(m, "RandomExponentialDist", "Generate an random value following the exponential distribution.\nIf a clock is passed, only picks a new value on a clock tick.\n") .def(py::init(), "scale"_a = 0.0, "clock"_a = nullptr, "reset"_a = nullptr); - py::class_>(m, "RandomExponential", "Generate an random exponential value between min/max. If a clock is passed, only picks a new value on a clock tick.") + py::class_>(m, "RandomExponential", "Generate an random exponential value between min/max.\nIf a clock is passed, only picks a new value on a clock tick.\n") .def(py::init(), "min"_a = 0.001, "max"_a = 1.0, "clock"_a = nullptr, "reset"_a = nullptr); - py::class_>(m, "RandomGaussian", "Generate an random Gaussian value, with given mean and sigma. If a clock is passed, only picks a new value on a clock tick.") + py::class_>(m, "RandomGaussian", "Generate an random Gaussian value, with given mean and sigma.\nIf a clock is passed, only picks a new value on a clock tick.\n") .def(py::init(), "mean"_a = 0.0, "sigma"_a = 0.0, "clock"_a = nullptr, "reset"_a = nullptr); - py::class_>(m, "RandomImpulseSequence", "Generates a random sequence of 0/1 bits with the given length, and the given probability each each bit = 1. The position of the sequence is incremented on each clock signal. explore and generate are trigger inputs which cause the sequence to mutate and re-generate respectively.") + py::class_>(m, "RandomImpulseSequence", "Generates a random sequence of 0/1 bits with the given length, and the given\nprobability each each bit = 1. The position of the sequence is incremented\non each clock signal. explore and generate are trigger inputs which cause\nthe sequence to mutate and re-generate respectively.\n") .def(py::init(), "probability"_a = 0.5, "length"_a = 8, "clock"_a = nullptr, "explore"_a = nullptr, "generate"_a = nullptr, "reset"_a = nullptr); - py::class_>(m, "RandomImpulse", "Generate random impulses at the given frequency, with either uniform or poisson distribution.") + py::class_>(m, "RandomImpulse", "Generate random impulses at the given frequency, with either uniform\nor poisson distribution.\n") .def(py::init(), "frequency"_a = 1.0, "distribution"_a = SIGNALFLOW_EVENT_DISTRIBUTION_UNIFORM, "reset"_a = nullptr) .def(py::init(), "frequency"_a, "distribution"_a, "reset"_a = nullptr); - py::class_>(m, "RandomUniform", "Generates a uniformly random value between min/max. If a clock is passed, only picks a new value on a clock tick.") + py::class_>(m, "RandomUniform", "Generates a uniformly random value between min/max.\nIf a clock is passed, only picks a new value on a clock tick.\n") .def(py::init(), "min"_a = 0.0, "max"_a = 1.0, "clock"_a = nullptr, "reset"_a = nullptr); - py::class_>(m, "WhiteNoise", "Generates whitenoise between min/max. If frequency is zero, generates at audio rate. For frequencies lower than audio rate, interpolate applies linear interpolation between values, and random_interval specifies whether new random values should be equally-spaced or randomly-spaced.") + py::class_>(m, "WhiteNoise", "Generates whitenoise between min/max. If frequency is zero, generates at\naudio rate. For frequencies lower than audio rate, interpolate applies linear\ninterpolation between values, and random_interval specifies whether new\nrandom values should be equally-spaced or randomly-spaced.\n") .def(py::init(), "frequency"_a = 0.0, "min"_a = -1.0, "max"_a = 1.0, "interpolate"_a = true, "random_interval"_a = true, "reset"_a = nullptr); } diff --git a/tests/test_buffer.py b/tests/test_buffer.py index f3795142..01cdc491 100644 --- a/tests/test_buffer.py +++ b/tests/test_buffer.py @@ -1,4 +1,4 @@ -from signalflow import Buffer, Buffer2D +from signalflow import Buffer, Buffer2D, SampleRingBuffer, SampleRingQueue from signalflow import SIGNALFLOW_INTERPOLATION_MODE_NONE, SIGNALFLOW_INTERPOLATION_MODE_LINEAR from signalflow import GraphNotCreatedException import numpy as np @@ -202,3 +202,37 @@ def test_buffer_2d(graph): assert b2d.get2D(1.5, 1.00) == 5 # TODO: Test with no interpolation + + +def test_ring_buffer(): + buf = SampleRingBuffer(128) + assert buf.get_capacity() == 128 + + assert buf.get(0) == 0.0 + buf.append(7) + buf.append(9) + buf.append(8) + assert buf.get(0) == 8 + assert buf.get(-1) == 9 + assert buf.get(-2) == 7 + + buf.extend([1, 2, 3]) + assert buf.get(0) == 3 + assert buf.get(-1) == 2 + assert buf.get(-2) == 1 + +def test_ring_queue(): + queue = SampleRingQueue(128) + assert queue.get_capacity() == 128 + assert queue.get_filled_count() == 0 + queue.append(7) + queue.append(8) + assert queue.get_filled_count() == 2 + assert queue.pop() == 7 + assert queue.pop() == 8 + assert queue.get_filled_count() == 0 + + queue.extend([1, 2, 3]) + assert queue.pop() == 1 + assert queue.pop() == 2 + assert queue.pop() == 3 diff --git a/tests/test_graph.py b/tests/test_graph.py index 80f8be54..53f87ad5 100644 --- a/tests/test_graph.py +++ b/tests/test_graph.py @@ -81,7 +81,7 @@ def test_graph_num_output_channels(): output = AudioOut_Dummy(5) graph = AudioGraph(output_device=output, start=False) assert graph.num_output_channels == 5 - del graph + graph.destroy() def test_graph_render_to_buffer(graph):