AnalogTapeModel

Physical modelling signal processing for analog tape recording
Log | Files | Refs | Submodules | README | LICENSE

commit cc960fe9e884e8fb2f8cc3a71ac499434fc405b3
parent dabe5925b335daec05cdbcdfaaf684f1dbfaf371
Author: jatinchowdhury18 <jatinchowdhury18@gmail.com>
Date:   Tue, 23 Feb 2021 19:59:26 -0800

Refactor STN mode to use RTNeural (#141)

* Get rid of STN inferencing code and replace with RTNeural library

* Benchmark STN mode with XSIMD backend

* {Apply clang-format}

Co-authored-by: jatinchowdhury18 <jatinchowdhury18@users.noreply.github.com>
Co-authored-by: github-actions[bot] <github-actions[bot]@users.noreply.github.com>
Diffstat:
M.github/workflows/cmake.yml | 2+-
M.gitmodules | 3+++
MPlugin/CMakeLists.txt | 1+
MPlugin/Source/Headless/Benchmarks.cpp | 2+-
MPlugin/Source/Processors/CMakeLists.txt | 1-
MPlugin/Source/Processors/Hysteresis/HysteresisProcessing.cpp | 1-
MPlugin/Source/Processors/Hysteresis/HysteresisSTN.cpp | 42++++++++++++++++++++++++++----------------
MPlugin/Source/Processors/Hysteresis/HysteresisSTN.h | 5+++--
DPlugin/Source/Processors/Hysteresis/RTNeural/.gitignore | 4----
DPlugin/Source/Processors/Hysteresis/RTNeural/CMakeLists.txt | 20--------------------
DPlugin/Source/Processors/Hysteresis/RTNeural/LICENSE | 29-----------------------------
DPlugin/Source/Processors/Hysteresis/RTNeural/README.md | 15---------------
DPlugin/Source/Processors/Hysteresis/RTNeural/src/CMakeLists.txt | 13-------------
DPlugin/Source/Processors/Hysteresis/RTNeural/src/Json2RnnParser.cpp | 173-------------------------------------------------------------------------------
DPlugin/Source/Processors/Hysteresis/RTNeural/src/Json2RnnParser.h | 25-------------------------
DPlugin/Source/Processors/Hysteresis/RTNeural/src/Layer.h | 28----------------------------
DPlugin/Source/Processors/Hysteresis/RTNeural/src/Model.h | 79-------------------------------------------------------------------------------
DPlugin/Source/Processors/Hysteresis/RTNeural/src/activation.h | 85-------------------------------------------------------------------------------
DPlugin/Source/Processors/Hysteresis/RTNeural/src/dense.h | 120-------------------------------------------------------------------------------
DPlugin/Source/Processors/Hysteresis/RTNeural/src/dense_eigen.h | 72------------------------------------------------------------------------
DPlugin/Source/Processors/Hysteresis/RTNeural/src/gru.cpp | 155-------------------------------------------------------------------------------
DPlugin/Source/Processors/Hysteresis/RTNeural/src/gru.h | 87-------------------------------------------------------------------------------
DPlugin/Source/Processors/Hysteresis/RTNeural/src/gru_eigen.cpp | 109-------------------------------------------------------------------------------
DPlugin/Source/Processors/Hysteresis/RTNeural/src/gru_eigen.h | 72------------------------------------------------------------------------
DPlugin/Source/Processors/Hysteresis/RTNeural/tests/CMakeLists.txt | 6------
DPlugin/Source/Processors/Hysteresis/RTNeural/tests/dense.cpp | 37-------------------------------------
DPlugin/Source/Processors/Hysteresis/RTNeural/tests/load_csv.hpp | 32--------------------------------
DPlugin/Source/Processors/Hysteresis/RTNeural/tests/model_loader.hpp | 86-------------------------------------------------------------------------------
MPlugin/modules/CMakeLists.txt | 5+++++
APlugin/modules/RTNeural | 1+
30 files changed, 41 insertions(+), 1269 deletions(-)

diff --git a/.github/workflows/cmake.yml b/.github/workflows/cmake.yml @@ -41,7 +41,7 @@ jobs: - name: Checkout code uses: actions/checkout@v2 with: - submodules: true + submodules: recursive # - name: Setup debug session # if: runner.os == 'Linux' diff --git a/.gitmodules b/.gitmodules @@ -7,3 +7,6 @@ [submodule "Plugin/modules/chowdsp_utils"] path = Plugin/modules/chowdsp_utils url = https://github.com/Chowdhury-DSP/chowdsp_utils +[submodule "Plugin/modules/RTNeural"] + path = Plugin/modules/RTNeural + url = https://github.com/jatinchowdhury18/RTNeural diff --git a/Plugin/CMakeLists.txt b/Plugin/CMakeLists.txt @@ -4,6 +4,7 @@ set(CMAKE_CXX_STANDARD 17) project(CHOWTapeModel VERSION 2.7.0) add_subdirectory(modules) +include_directories(modules/RTNeural) # set up plugin project # juce_set_vst2_sdk_path(C:/SDKs/VST_SDK/VST2_SDK/) diff --git a/Plugin/Source/Headless/Benchmarks.cpp b/Plugin/Source/Headless/Benchmarks.cpp @@ -46,7 +46,7 @@ void setParameters (AudioProcessor* plugin, int mode) << ": " << param->getText (param->getValue(), 1024) << std::endl; } - if (param->getName (1024) == "Mode") + if (param->getName (1024) == "Tape Mode") { param->setValue ((float) mode / 5.0f); // STN std::cout << "Setting parameter " << param->getName (1024) diff --git a/Plugin/Source/Processors/CMakeLists.txt b/Plugin/Source/Processors/CMakeLists.txt @@ -6,7 +6,6 @@ target_sources(CHOWTapeModel PRIVATE Hysteresis/HysteresisProcessor.cpp Hysteresis/HysteresisSTN.cpp Hysteresis/ToneControl.cpp - Hysteresis/RTNeural/src/Json2RnnParser.cpp Input_Filters/InputFilters.cpp Loss_Effects/LossFilter.cpp diff --git a/Plugin/Source/Processors/Hysteresis/HysteresisProcessing.cpp b/Plugin/Source/Processors/Hysteresis/HysteresisProcessing.cpp @@ -1,5 +1,4 @@ #include "HysteresisProcessing.h" -#include "RTNeural/src/Json2RnnParser.h" #include <math.h> namespace diff --git a/Plugin/Source/Processors/Hysteresis/HysteresisSTN.cpp b/Plugin/Source/Processors/Hysteresis/HysteresisSTN.cpp @@ -1,5 +1,4 @@ #include "HysteresisSTN.h" -#include "RTNeural/src/Json2RnnParser.h" #include <future> namespace @@ -43,27 +42,38 @@ std::unique_ptr<MemoryInputStream> getModelFileStream (const String& modelFile) HysteresisSTN::HysteresisSTN() { - // load models + // Since we have a lot of models to load + // let's split them up and load them asychronously! + // This cuts down the model loading time for both + // channels from ~100 ms to ~30 ms size_t widthLoadIdx = 0; + std::vector<std::future<void>> futures; for (const auto& width : widthTags) { - auto modelsStream = getModelFileStream ("hyst_width_" + width + ".json"); - jassert (modelsStream != nullptr); + auto loadModelSet = [=] (size_t widthModelIdx) { + auto modelsStream = getModelFileStream ("hyst_width_" + width + ".json"); + jassert (modelsStream != nullptr); - auto modelsJson = JSON::parse (*modelsStream.get()); - size_t satLoadIdx = 0; - for (const auto& sat : satTags) - { - String modelTag = "drive_" + sat + "_" + width; - auto modelJson = modelsJson[modelTag.toRawUTF8()]; - stnModels[widthLoadIdx][satLoadIdx] = Json2RnnParser::parseJson<double> (modelJson); + auto modelsJson = nlohmann::json::parse (modelsStream->readEntireStreamAsString().toStdString()); + size_t satLoadIdx = 0; + for (const auto& sat : satTags) + { + String modelTag = "drive_" + sat + "_" + width; + auto thisModelJson = modelsJson[modelTag.toStdString()]; + stnModels[widthModelIdx][satLoadIdx] = RTNeural::json_parser::parseJson<double> (thisModelJson); - jassert (stnModels[widthLoadIdx][satLoadIdx] != nullptr); - jassert (stnModels[widthLoadIdx][satLoadIdx]->layers[0]->in_size == inputSize); - satLoadIdx++; - } - widthLoadIdx++; + jassert (stnModels[widthModelIdx][satLoadIdx] != nullptr); + jassert (stnModels[widthModelIdx][satLoadIdx]->layers[0]->in_size == inputSize); + satLoadIdx++; + } + }; + + futures.push_back (std::async (std::launch::async, + [=, &widthLoadIdx] { loadModelSet (widthLoadIdx++); })); } + + for (auto& f : futures) + f.wait(); } void HysteresisSTN::prepare (double sampleRate) diff --git a/Plugin/Source/Processors/Hysteresis/HysteresisSTN.h b/Plugin/Source/Processors/Hysteresis/HysteresisSTN.h @@ -1,8 +1,9 @@ #ifndef HYSTERESISSTN_H_INCLUDED #define HYSTERESISSTN_H_INCLUDED -#include "RTNeural/src/Model.h" +// #include "RTNeural/src/Model.h" #include <JuceHeader.h> +#include <RTNeural/RTNeural.h> /** * Class that implements a "State Transition Network" for @@ -33,7 +34,7 @@ public: }; private: - std::unique_ptr<MLUtils::Model<double>> stnModels[numWidthModels][numSatModels]; + std::unique_ptr<RTNeural::Model<double>> stnModels[numWidthModels][numSatModels]; double sampleRateCorr = 1.0; size_t widthIdx = 0; size_t satIdx = 0; diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/.gitignore b/Plugin/Source/Processors/Hysteresis/RTNeural/.gitignore @@ -1,4 +0,0 @@ -/.vscode -/build -__pycache__ - diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/CMakeLists.txt b/Plugin/Source/Processors/Hysteresis/RTNeural/CMakeLists.txt @@ -1,20 +0,0 @@ -cmake_minimum_required(VERSION 3.8) -project(MLUtils) - -set(CMAKE_CXX_STANDARD 17) -set(CMAKE_CXX_FLAGS "-O3") - -add_definitions(-DUSE_EIGEN) - -add_subdirectory(src) - -option(BUILD_TESTS "Build MLUtils accuracy tests" OFF) -if(BUILD_TESTS) - message("Building tests...") - add_subdirectory(tests) -endif() - -option(BUILD_BENCH "Build MLUtils benchmarks" OFF) -if(BUILD_BENCH) - message("Building benchmarks...") -endif() diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/LICENSE b/Plugin/Source/Processors/Hysteresis/RTNeural/LICENSE @@ -1,29 +0,0 @@ -BSD 3-Clause License - -Copyright (c) 2020, jatinchowdhury18 -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -1. Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -2. Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -3. Neither the name of the copyright holder nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/README.md b/Plugin/Source/Processors/Hysteresis/RTNeural/README.md @@ -1,15 +0,0 @@ -# MLUtils - -A lightweight neural network inferencing engine written in C++. -This library was designed with the intention of being used in -real-time audio processing. - -Currently supported layers: - - - [x] dense - - [ ] GRU - -Currently supported activations: - - [x] tanh - - [ ] sigmoid - - [ ] ReLU diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/src/CMakeLists.txt b/Plugin/Source/Processors/Hysteresis/RTNeural/src/CMakeLists.txt @@ -1,13 +0,0 @@ -include_directories(../modules/Eigen) - -add_library(MLUtils STATIC - gru.h - gru.cpp - gru_eigen.h - gru_eigen.cpp - dense.h - dense_eigen.h - activation.h - Model.h - Layer.h -) diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/src/Json2RnnParser.cpp b/Plugin/Source/Processors/Hysteresis/RTNeural/src/Json2RnnParser.cpp @@ -1,173 +0,0 @@ -#include "Json2RnnParser.h" - -using namespace MLUtils; - -template <typename T> -std::unique_ptr<Model<T>> Json2RnnParser::parseJson (InputStream& input) -{ - auto parent = JSON::parse (input); - return parseJson<T> (parent); -} - -template <typename T> -std::unique_ptr<MLUtils::Model<T>> Json2RnnParser::parseJson (const var& parent) -{ - auto shape = parent["in_shape"]; - auto layers = parent["layers"]; - - if (! shape.isArray() || ! layers.isArray()) - return nullptr; - - auto nDims = int (shape.getArray()->getLast()); - // std::cout << "# dimensions: " << nDims <<std::endl; - auto model = std::make_unique<Model<T>> (nDims); - - for (int i = 0; i < layers.getArray()->size(); ++i) - { - auto l = layers.getArray()->getUnchecked (i); - const auto type = l["type"].toString(); - - const auto layerShape = l["shape"]; - auto layerDims = int (layerShape.getArray()->getLast()); - - auto weights = l["weights"]; - // std::cout << type << " " << layerDims << std::endl; - - if (type == "time-distributed-dense" || type == "dense") - { - auto dense = createDense<T> (model->getNextInSize(), layerDims, weights); - model->addLayer (dense.release()); - - const auto activationType = l["activation"].toString(); - if (activationType == "tanh") - { - auto activation = std::make_unique<TanhActivation<T>> (layerDims); - model->addLayer (activation.release()); - } - } - else if (type == "gru") - { - auto gru = createGRU<T> (model->getNextInSize(), layerDims, weights); - model->addLayer (gru.release()); - } - else - { - jassertfalse; - } - } - - return std::move (model); -} - -template <typename T> -std::unique_ptr<Dense<T>> Json2RnnParser::createDense (size_t in_size, size_t out_size, var& weights) -{ - auto dense = std::make_unique<Dense<T>> (in_size, out_size); - - // load kernel weights - T** denseWeights; - denseWeights = new T*[out_size]; - for (size_t i = 0; i < out_size; ++i) - denseWeights[i] = new T[in_size]; - - auto layerWeights = weights.getArray()->getUnchecked (0); - for (int i = 0; i < layerWeights.getArray()->size(); ++i) - { - auto lw = layerWeights.getArray()->getUnchecked (i); - for (int j = 0; j < lw.getArray()->size(); ++j) - denseWeights[j][i] = (T) (double) lw.getArray()->getUnchecked (j); - } - - dense->setWeights (denseWeights); - - for (size_t i = 0; i < out_size; ++i) - delete[] denseWeights[i]; - delete[] denseWeights; - - // load bias - std::vector<T> denseBias (out_size, 0.0f); - auto layerBiases = weights.getArray()->getUnchecked (1); - for (int i = 0; i < layerBiases.getArray()->size(); ++i) - denseBias[i] = (T) (double) layerBiases.getArray()->getUnchecked (i); - - dense->setBias (denseBias.data()); - - return std::move (dense); -} - -template <typename T> -std::unique_ptr<GRULayer<T>> Json2RnnParser::createGRU (size_t in_size, size_t out_size, var& weights) -{ - auto gru = std::make_unique<GRULayer<T>> (in_size, out_size); - - // load kernel weights - T** kernelWeights; - kernelWeights = new T*[in_size]; - for (size_t i = 0; i < in_size; ++i) - kernelWeights[i] = new T[3 * out_size]; - - auto layerWeights = weights.getArray()->getUnchecked (0); - for (int i = 0; i < layerWeights.getArray()->size(); ++i) - { - auto lw = layerWeights.getArray()->getUnchecked (i); - for (int j = 0; j < lw.getArray()->size(); ++j) - kernelWeights[i][j] = (T) (double) lw.getArray()->getUnchecked (j); - } - - gru->setWVals (kernelWeights); - - for (size_t i = 0; i < in_size; ++i) - delete[] kernelWeights[i]; - delete[] kernelWeights; - - // load recurrent weights - T** recurrentWeights; - recurrentWeights = new T*[out_size]; - for (size_t i = 0; i < out_size; ++i) - recurrentWeights[i] = new T[3 * out_size]; - - auto layerWeights2 = weights.getArray()->getUnchecked (1); - for (int i = 0; i < layerWeights2.getArray()->size(); ++i) - { - auto lw = layerWeights2.getArray()->getUnchecked (i); - for (int j = 0; j < lw.getArray()->size(); ++j) - recurrentWeights[i][j] = (T) (double) lw.getArray()->getUnchecked (j); - } - - gru->setUVals (recurrentWeights); - - for (size_t i = 0; i < out_size; ++i) - delete[] recurrentWeights[i]; - delete[] recurrentWeights; - - // load biases - T** gruBias; - gruBias = new T*[2]; - for (size_t i = 0; i < 2; ++i) - gruBias[i] = new T[3 * out_size]; - - auto layerBias = weights.getArray()->getUnchecked (2); - for (int i = 0; i < layerBias.getArray()->size(); ++i) - { - auto lw = layerBias.getArray()->getUnchecked (i); - for (int j = 0; j < lw.getArray()->size(); ++j) - gruBias[i][j] = (T) (double) lw.getArray()->getUnchecked (j); - } - - gru->setBVals (gruBias); - - for (size_t i = 0; i < 2; ++i) - delete[] gruBias[i]; - delete[] gruBias; - - return std::move (gru); -} - -template std::unique_ptr<MLUtils::Model<float>> Json2RnnParser::parseJson (InputStream& input); -template std::unique_ptr<MLUtils::Model<double>> Json2RnnParser::parseJson (InputStream& input); - -template std::unique_ptr<Dense<float>> Json2RnnParser::createDense (size_t in_size, size_t out_size, var& weights); -template std::unique_ptr<Dense<double>> Json2RnnParser::createDense (size_t in_size, size_t out_size, var& weights); - -template std::unique_ptr<GRULayer<float>> Json2RnnParser::createGRU (size_t in_size, size_t out_size, var& weights); -template std::unique_ptr<GRULayer<double>> Json2RnnParser::createGRU (size_t in_size, size_t out_size, var& weights); diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/src/Json2RnnParser.h b/Plugin/Source/Processors/Hysteresis/RTNeural/src/Json2RnnParser.h @@ -1,25 +0,0 @@ -#ifndef JSON2RNNPARSER_H_INCLUDED -#define JSON2RNNPARSER_H_INCLUDED - -#include "Model.h" -#include <JuceHeader.h> - -class Json2RnnParser -{ -public: - Json2RnnParser() {} - - template <typename T> - static std::unique_ptr<MLUtils::Model<T>> parseJson (InputStream& input); - - template <typename T> - static std::unique_ptr<MLUtils::Model<T>> parseJson (const var& json); - - template <typename T> - static std::unique_ptr<MLUtils::Dense<T>> createDense (size_t in_size, size_t out_size, var& weights); - - template <typename T> - static std::unique_ptr<MLUtils::GRULayer<T>> createGRU (size_t in_size, size_t out_size, var& weights); -}; - -#endif // JSON2RNNPARSER_H_INCLUDED diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/src/Layer.h b/Plugin/Source/Processors/Hysteresis/RTNeural/src/Layer.h @@ -1,28 +0,0 @@ -#ifndef LAYER_H_INCLUDED -#define LAYER_H_INCLUDED - -#include <stddef.h> - -namespace MLUtils -{ -/** Neural network layer */ -template <typename T> -class Layer -{ -public: - Layer (size_t in_size, size_t out_size) : in_size (in_size), - out_size (out_size) - { - } - - virtual ~Layer() {} - - virtual void forward (const T* input, T* out) = 0; - - const size_t in_size; - const size_t out_size; -}; - -} // namespace MLUtils - -#endif // LAYER_H_INCLUDED diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/src/Model.h b/Plugin/Source/Processors/Hysteresis/RTNeural/src/Model.h @@ -1,79 +0,0 @@ -#ifndef MODEL_H_INCLUDED -#define MODEL_H_INCLUDED - -#include <iostream> -#include <vector> - -#include "Layer.h" -#include "activation.h" -#include "dense.h" -#include "gru.cpp" -#include "gru.h" - -namespace MLUtils -{ -/** Neural network model */ -template <typename T> -class Model -{ -public: - Model (size_t in_size) : in_size (in_size) - { - } - - ~Model() - { - for (auto l : layers) - delete l; - layers.clear(); - - for (auto o : outs) - delete[] o; - outs.clear(); - } - - size_t getNextInSize() - { - if (layers.empty()) - return in_size; - - return layers.back()->out_size; - } - - void addLayer (Layer<T>* layer) - { - layers.push_back (layer); - outs.push_back (new T[layer->out_size]); - } - - void reset() - { - for (auto* l : layers) - { - if (auto* lCast = dynamic_cast<GRULayer<T>*> (l)) - lCast->reset(); - } - } - - inline T forward (const T* input) - { - layers[0]->forward (input, outs[0]); - - for (size_t i = 1; i < layers.size(); ++i) - { - layers[i]->forward (outs[i - 1], outs[i]); - } - - return outs.back()[0]; - } - - std::vector<Layer<T>*> layers; - -private: - const size_t in_size; - std::vector<T*> outs; -}; - -} // namespace MLUtils - -#endif // MODEL_H_INCLUDED diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/src/activation.h b/Plugin/Source/Processors/Hysteresis/RTNeural/src/activation.h @@ -1,85 +0,0 @@ -#ifndef ACTIVATION_H_INCLUDED -#define ACTIVATION_H_INCLUDED - -#include "Layer.h" -#include <functional> - -namespace MLUtils -{ -template <typename T> -class Activation : public Layer<T> -{ -public: - Activation (size_t size, std::function<T (T)> func) : Layer<T> (size, size), - func (func) - { - } - - virtual ~Activation() {} - - inline void forward (const T* input, T* out) override - { - for (size_t i = 0; i < Layer<T>::out_size; ++i) - out[i] = func (input[i]); - } - -private: - const std::function<T (T)> func; -}; - -} // namespace MLUtils - -#ifdef USE_EIGEN -#include <Eigen/Dense> - -namespace MLUtils -{ -template <typename T> -class TanhActivation : public Activation<T> -{ -public: - TanhActivation (size_t size) : Activation<T> (size, {}) - { - inVec.resize (size, 1); - outVec.resize (size, 1); - } - - inline void forward (const T* input, T* out) override - { - inVec = Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, 1>> (input, Layer<T>::in_size, 1); - outVec = inVec.array().tanh(); - - std::copy (outVec.data(), outVec.data() + Layer<T>::in_size, out); - } - - Eigen::Matrix<T, Eigen::Dynamic, 1> inVec; - Eigen::Matrix<T, Eigen::Dynamic, 1> outVec; -}; - -} // namespace MLUtils - -#else -#include <cmath> - -namespace MLUtils -{ -template <typename T> -class TanhActivation : public Activation<T> -{ -public: - TanhActivation (size_t size) : Activation<T> (size, [] (T x) { return std::tanh (x); }) - { - } - - inline void forward (const T* input, T* out) override - { - for (size_t i = 0; i < Layer<T>::out_size; ++i) - out[i] = std::tanh (input[i]); - } -}; - -} // namespace MLUtils - -#endif // USE_EIGEN - -#endif // ACTIVATION_H_INCLUDED diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/src/dense.h b/Plugin/Source/Processors/Hysteresis/RTNeural/src/dense.h @@ -1,120 +0,0 @@ -#ifndef DENSE_H_INCLUDED -#define DENSE_H_INCLUDED - -#include <algorithm> -#include <numeric> -#include <vector> - -#ifdef USE_EIGEN -#include "dense_eigen.h" -#else -#include "Layer.h" - -namespace MLUtils -{ -template <typename T> -class Dense1 -{ -public: - Dense1 (size_t in_size) : in_size (in_size) - { - weights = new T[in_size]; - } - - ~Dense1() - { - delete[] weights; - } - - inline T forward (const T* input) - { - return std::inner_product (weights, weights + in_size, input, (T) 0) + bias; - } - - void setWeights (const T* newWeights) - { - for (size_t i = 0; i < in_size; ++i) - weights[i] = newWeights[i]; - } - - void setBias (T b) { bias = b; } - - T getWeight (size_t i) const noexcept - { - return weights[i]; - } - - T getBias() const noexcept - { - return bias; - } - -private: - const size_t in_size; - T bias; - - T* weights; -}; - -template <typename T> -class Dense : public Layer<T> -{ -public: - Dense (size_t in_size, size_t out_size) : Layer<T> (in_size, out_size) - { - subLayers = new Dense1<T>*[out_size]; - for (size_t i = 0; i < out_size; ++i) - subLayers[i] = new Dense1<T> (in_size); - } - - virtual ~Dense() - { - for (size_t i = 0; i < Layer<T>::out_size; ++i) - delete subLayers[i]; - - delete[] subLayers; - } - - inline void forward (const T* input, T* out) override - { - for (size_t i = 0; i < Layer<T>::out_size; ++i) - out[i] = subLayers[i]->forward (input); - } - - void setWeights (const std::vector<std::vector<T>>& newWeights) - { - for (size_t i = 0; i < Layer<T>::out_size; ++i) - subLayers[i]->setWeights (newWeights[i].data()); - } - - void setWeights (T** newWeights) - { - for (size_t i = 0; i < Layer<T>::out_size; ++i) - subLayers[i]->setWeights (newWeights[i]); - } - - void setBias (T* b) - { - for (size_t i = 0; i < Layer<T>::out_size; ++i) - subLayers[i]->setBias (b[i]); - } - - T getWeight (size_t i, size_t k) const noexcept - { - return subLayers[i]->getWeight (k); - } - - T getBias (size_t i) const noexcept - { - return subLayers[i]->getBias(); - } - -private: - Dense1<T>** subLayers; -}; - -} // namespace MLUtils - -#endif // USE_EIGEN - -#endif // DENSE_H_INCLUDED diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/src/dense_eigen.h b/Plugin/Source/Processors/Hysteresis/RTNeural/src/dense_eigen.h @@ -1,72 +0,0 @@ -#ifndef DENSEEIGEN_H_INCLUDED -#define DENSEEIGEN_H_INCLUDED - -#include "Layer.h" -#include <Eigen/Dense> - -namespace MLUtils -{ -template <typename T> -class Dense : public Layer<T> -{ -public: - Dense (size_t in_size, size_t out_size) : Layer<T> (in_size, out_size) - { - weights.resize (out_size, in_size); - bias.resize (out_size, 1); - - inVec.resize (in_size, 1); - outVec.resize (out_size, 1); - } - - virtual ~Dense() {} - - inline void forward (const T* input, T* out) override - { - inVec = Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, 1>> (input, Layer<T>::in_size, 1); - outVec = weights * inVec + bias; - - std::copy (outVec.data(), outVec.data() + Layer<T>::out_size, out); - } - - void setWeights (const std::vector<std::vector<T>>& newWeights) - { - for (size_t i = 0; i < Layer<T>::out_size; ++i) - for (size_t k = 0; k < Layer<T>::in_size; ++k) - weights (i, k) = newWeights[i][k]; - } - - void setWeights (T** newWeights) - { - for (size_t i = 0; i < Layer<T>::out_size; ++i) - for (size_t k = 0; k < Layer<T>::in_size; ++k) - weights (i, k) = newWeights[i][k]; - } - - void setBias (T* b) - { - for (size_t i = 0; i < Layer<T>::out_size; ++i) - bias (i, 0) = b[i]; - } - - T getWeight (size_t i, size_t k) const noexcept - { - return weights (i, k); - } - - T getBias (size_t i) const noexcept - { - return bias (i, 0); - } - -private: - Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> weights; - Eigen::Matrix<T, Eigen::Dynamic, 1> bias; - - Eigen::Matrix<T, Eigen::Dynamic, 1> inVec; - Eigen::Matrix<T, Eigen::Dynamic, 1> outVec; -}; - -} // namespace MLUtils - -#endif // DENSEEIGEN_H_INCLUDED diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/src/gru.cpp b/Plugin/Source/Processors/Hysteresis/RTNeural/src/gru.cpp @@ -1,155 +0,0 @@ -#include "gru.h" - -namespace MLUtils -{ -#ifndef USE_EIGEN -template <typename T> -GRULayer<T>::GRULayer (size_t in_size, size_t out_size) : Layer<T> (in_size, out_size), - zWeights (in_size, out_size), - rWeights (in_size, out_size), - cWeights (in_size, out_size) -{ - ht1 = new T[out_size]; - zVec = new T[out_size]; - rVec = new T[out_size]; - cVec = new T[out_size]; -} - -template <typename T> -GRULayer<T>::~GRULayer() -{ - delete[] ht1; - delete[] zVec; - delete[] rVec; - delete[] cVec; -} - -template <typename T> -GRULayer<T>::WeightSet::WeightSet (size_t in_size, size_t out_size) : out_size (out_size) -{ - W = new T*[out_size]; - U = new T*[out_size]; - b[0] = new T[out_size]; - b[1] = new T[out_size]; - - for (size_t i = 0; i < out_size; ++i) - { - W[i] = new T[in_size]; - U[i] = new T[out_size]; - } -} - -template <typename T> -GRULayer<T>::WeightSet::~WeightSet() -{ - delete[] b[0]; - delete[] b[1]; - - for (size_t i = 0; i < out_size; ++i) - { - delete[] W[i]; - delete[] U[i]; - } - - delete[] W; - delete[] U; -} - -template <typename T> -void GRULayer<T>::setWVals (T** wVals) -{ - for (size_t i = 0; i < Layer<T>::in_size; ++i) - { - for (size_t k = 0; k < Layer<T>::out_size; ++k) - { - zWeights.W[k][i] = wVals[i][k]; - rWeights.W[k][i] = wVals[i][k + Layer<T>::out_size]; - cWeights.W[k][i] = wVals[i][k + Layer<T>::out_size * 2]; - } - } -} - -template <typename T> -void GRULayer<T>::setUVals (T** uVals) -{ - for (size_t i = 0; i < Layer<T>::out_size; ++i) - { - for (size_t k = 0; k < Layer<T>::out_size; ++k) - { - zWeights.U[k][i] = uVals[i][k]; - rWeights.U[k][i] = uVals[i][k + Layer<T>::out_size]; - cWeights.U[k][i] = uVals[i][k + Layer<T>::out_size * 2]; - } - } -} - -template <typename T> -void GRULayer<T>::setBVals (T** bVals) -{ - for (size_t i = 0; i < 2; ++i) - { - for (size_t k = 0; k < Layer<T>::out_size; ++k) - { - zWeights.b[i][k] = bVals[i][k]; - rWeights.b[i][k] = bVals[i][k + Layer<T>::out_size]; - cWeights.b[i][k] = bVals[i][k + Layer<T>::out_size * 2]; - } - } -} - -template <typename T> -T GRULayer<T>::getWVal (size_t i, size_t k) const noexcept -{ - T** set = zWeights.W; - if (k > 2 * Layer<T>::out_size) - { - k -= 2 * Layer<T>::out_size; - set = cWeights.W; - } - else if (k > Layer<T>::out_size) - { - k -= Layer<T>::out_size; - set = rWeights.W; - } - - return set[i][k]; -} - -template <typename T> -T GRULayer<T>::getUVal (size_t i, size_t k) const noexcept -{ - T** set = zWeights.U; - if (k > 2 * Layer<T>::out_size) - { - k -= 2 * Layer<T>::out_size; - set = cWeights.U; - } - else if (k > Layer<T>::out_size) - { - k -= Layer<T>::out_size; - set = rWeights.U; - } - - return set[i][k]; -} - -template <typename T> -T GRULayer<T>::getBVal (size_t i, size_t k) const noexcept -{ - T** set = zWeights.b; - if (k > 2 * Layer<T>::out_size) - { - k -= 2 * Layer<T>::out_size; - set = cWeights.b; - } - else if (k > Layer<T>::out_size) - { - k -= Layer<T>::out_size; - set = rWeights.b; - } - - return set[i][k]; -} -#endif // USE_EIGEN - -} // namespace MLUtils diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/src/gru.h b/Plugin/Source/Processors/Hysteresis/RTNeural/src/gru.h @@ -1,87 +0,0 @@ -#ifndef GRU_H_INCLUDED -#define GRU_H_INCLUDED - -#include <algorithm> -#include <cmath> -#include <cstring> -#include <numeric> - -#ifdef USE_EIGEN -#include "gru_eigen.cpp" -#include "gru_eigen.h" -#else -#include "Layer.h" - -namespace MLUtils -{ -template <typename T> -class GRULayer : public Layer<T> -{ -public: - GRULayer (size_t in_size, size_t out_size); - virtual ~GRULayer(); - - virtual void reset() - { - std::fill (ht1, ht1 + Layer<T>::out_size, (T) 0); - } - - virtual inline void forward (const T* input, T* h) override - { - for (size_t i = 0; i < Layer<T>::out_size; ++i) - { - zVec[i] = sigmoid (vMult (zWeights.W[i], input, Layer<T>::in_size) + vMult (zWeights.U[i], ht1, Layer<T>::out_size) + zWeights.b[0][i] + zWeights.b[1][i]); - rVec[i] = sigmoid (vMult (rWeights.W[i], input, Layer<T>::in_size) + vMult (rWeights.U[i], ht1, Layer<T>::out_size) + rWeights.b[0][i] + rWeights.b[1][i]); - cVec[i] = std::tanh (vMult (cWeights.W[i], input, Layer<T>::in_size) + rVec[i] * (vMult (cWeights.U[i], ht1, Layer<T>::out_size) + cWeights.b[1][i]) + cWeights.b[0][i]); - h[i] = ((T) 1 - zVec[i]) * cVec[i] + zVec[i] * ht1[i]; - } - - std::copy (h, h + Layer<T>::out_size, ht1); - } - - inline T vMult (const T* arg1, const T* arg2, size_t dim) - { - return std::inner_product (arg1, arg1 + dim, arg2, (T) 0); - } - - inline T sigmoid (T value) - { - return (T) 1 / ((T) 1 + std::exp (-value)); - } - - void setWVals (T** wVals); - void setUVals (T** uVals); - void setBVals (T** bVals); - - T getWVal (size_t i, size_t k) const noexcept; - T getUVal (size_t i, size_t k) const noexcept; - T getBVal (size_t i, size_t k) const noexcept; - -protected: - T* ht1; - - struct WeightSet - { - WeightSet (size_t in_size, size_t out_size); - ~WeightSet(); - - T** W; - T** U; - T* b[2]; - const size_t out_size; - }; - - WeightSet zWeights; - WeightSet rWeights; - WeightSet cWeights; - - T* zVec; - T* rVec; - T* cVec; -}; - -} // namespace MLUtils - -#endif // USE_EIGEN - -#endif // GRU_H_INCLUDED diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/src/gru_eigen.cpp b/Plugin/Source/Processors/Hysteresis/RTNeural/src/gru_eigen.cpp @@ -1,109 +0,0 @@ -#ifdef USE_EIGEN - -#include "gru_eigen.h" - -namespace MLUtils -{ -template <typename T> -GRULayer<T>::GRULayer (size_t in_size, size_t out_size) : Layer<T> (in_size, out_size) -{ - wVec_z.resize (out_size, in_size); - wVec_r.resize (out_size, in_size); - wVec_c.resize (out_size, in_size); - uVec_z.resize (out_size, out_size); - uVec_r.resize (out_size, out_size); - uVec_c.resize (out_size, out_size); - bVec_z.resize (out_size, 2); - bVec_r.resize (out_size, 2); - bVec_c.resize (out_size, 2); - - ht1.resize (out_size, 1); - zVec.resize (out_size, 1); - rVec.resize (out_size, 1); - cVec.resize (out_size, 1); - - inVec.resize (in_size, 1); - ones = Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>::Ones (out_size, 1); -} - -template <typename T> -void GRULayer<T>::setWVals (T** wVals) -{ - for (size_t i = 0; i < Layer<T>::in_size; ++i) - { - for (size_t k = 0; k < Layer<T>::out_size; ++k) - { - wVec_z (k, i) = wVals[i][k]; - wVec_r (k, i) = wVals[i][k + Layer<T>::out_size]; - wVec_c (k, i) = wVals[i][k + Layer<T>::out_size * 2]; - } - } -} - -template <typename T> -void GRULayer<T>::setUVals (T** uVals) -{ - for (size_t i = 0; i < Layer<T>::out_size; ++i) - { - for (size_t k = 0; k < Layer<T>::out_size; ++k) - { - uVec_z (k, i) = uVals[i][k]; - uVec_r (k, i) = uVals[i][k + Layer<T>::out_size]; - uVec_c (k, i) = uVals[i][k + Layer<T>::out_size * 2]; - } - } -} - -template <typename T> -void GRULayer<T>::setBVals (T** bVals) -{ - for (size_t i = 0; i < 2; ++i) - { - for (size_t k = 0; k < Layer<T>::out_size; ++k) - { - bVec_z (k, i) = bVals[i][k]; - bVec_r (k, i) = bVals[i][k + Layer<T>::out_size]; - bVec_c (k, i) = bVals[i][k + Layer<T>::out_size * 2]; - } - } -} - -template <typename T> -T GRULayer<T>::getWVal (size_t i, size_t k) const noexcept -{ - Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> set = wVec_z; - if (k > 2 * Layer<T>::out_size) - set = wVec_c; - else if (k > Layer<T>::out_size) - set = wVec_r; - - return set (k % Layer<T>::out_size, i); -} - -template <typename T> -T GRULayer<T>::getUVal (size_t i, size_t k) const noexcept -{ - Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> set = uVec_z; - if (k > 2 * Layer<T>::out_size) - set = uVec_c; - else if (k > Layer<T>::out_size) - set = uVec_r; - - return set (k % Layer<T>::out_size, i); -} - -template <typename T> -T GRULayer<T>::getBVal (size_t i, size_t k) const noexcept -{ - Eigen::Matrix<T, Eigen::Dynamic, 2> set = bVec_z; - if (k > 2 * Layer<T>::out_size) - set = bVec_c; - else if (k > Layer<T>::out_size) - set = bVec_r; - - return set (k % Layer<T>::out_size, i); -} - -} // namespace MLUtils - -#endif // USE_EIGEN diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/src/gru_eigen.h b/Plugin/Source/Processors/Hysteresis/RTNeural/src/gru_eigen.h @@ -1,72 +0,0 @@ -#ifndef GRUEIGEN_H_INCLUDED -#define GRUEIGEN_H_INCLUDED - -#include "Layer.h" -#include <Eigen/Dense> - -namespace MLUtils -{ -template <typename T> -class GRULayer : public Layer<T> -{ -public: - GRULayer (size_t in_size, size_t out_size); - virtual ~GRULayer() {} - - void reset() - { - std::fill (ht1.data(), ht1.data() + Layer<T>::out_size, (T) 0); - } - - inline void forward (const T* input, T* h) override - { - inVec = Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, 1>> (input, Layer<T>::in_size, 1); - - zVec = wVec_z * inVec + uVec_z * ht1 + bVec_z.col (0) + bVec_z.col (1); - rVec = wVec_r * inVec + uVec_r * ht1 + bVec_r.col (0) + bVec_r.col (1); - sigmoid (zVec); - sigmoid (rVec); - - cVec = wVec_c * inVec + rVec.cwiseProduct (uVec_c * ht1 + bVec_c.col (1)) + bVec_c.col (0); - cVec = cVec.array().tanh(); - - ht1 = (ones - zVec).cwiseProduct (cVec) + zVec.cwiseProduct (ht1); - std::copy (ht1.data(), ht1.data() + Layer<T>::out_size, h); - } - - inline void sigmoid (Eigen::Matrix<T, Eigen::Dynamic, 1>& vector) - { - vector = (T) 1 / (((T) -1 * vector.array()).array().exp() + (T) 1); - } - - void setWVals (T** wVals); - void setUVals (T** uVals); - void setBVals (T** bVals); - - T getWVal (size_t i, size_t k) const noexcept; - T getUVal (size_t i, size_t k) const noexcept; - T getBVal (size_t i, size_t k) const noexcept; - -private: - Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> wVec_z; - Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> wVec_r; - Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> wVec_c; - Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> uVec_z; - Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> uVec_r; - Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic> uVec_c; - Eigen::Matrix<T, Eigen::Dynamic, 2> bVec_z; - Eigen::Matrix<T, Eigen::Dynamic, 2> bVec_r; - Eigen::Matrix<T, Eigen::Dynamic, 2> bVec_c; - - Eigen::Matrix<T, Eigen::Dynamic, 1> ht1; - Eigen::Matrix<T, Eigen::Dynamic, 1> zVec; - Eigen::Matrix<T, Eigen::Dynamic, 1> rVec; - Eigen::Matrix<T, Eigen::Dynamic, 1> cVec; - - Eigen::Matrix<T, Eigen::Dynamic, 1> inVec; - Eigen::Matrix<T, Eigen::Dynamic, 1> ones; -}; - -} // namespace MLUtils - -#endif // GRUEIGEN_H_INCLUDED diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/tests/CMakeLists.txt b/Plugin/Source/Processors/Hysteresis/RTNeural/tests/CMakeLists.txt @@ -1,6 +0,0 @@ -include_directories(../src) -include_directories(../modules/Eigen) -include_directories(../modules/json) - -add_executable(dense dense.cpp) -target_link_libraries(dense LINK_PUBLIC MLUtils) diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/tests/dense.cpp b/Plugin/Source/Processors/Hysteresis/RTNeural/tests/dense.cpp @@ -1,37 +0,0 @@ -#include "load_csv.hpp" -#include "model_loader.hpp" -#include <fstream> -#include <iostream> - -using T = double; - -int main() -{ - std::cout << "TESTING DENSE IMPLEMENTATION..." << std::endl; - - std::ifstream jsonStream ("models/dense.json", std::ifstream::binary); - auto model = json_parser::parseJson<T> (jsonStream); - - std::ifstream pythonX ("test_data/dense_x_python.csv"); - auto xData = load_csv::loadFile<T> (pythonX); - - std::ifstream pythonY ("test_data/dense_y_python.csv"); - const auto yRefData = load_csv::loadFile<T> (pythonY); - - std::vector<T> yData (xData.size(), (T) 0); - for (size_t n = 0; n < xData.size(); ++n) - yData[n] = model->forward (xData[n].data()); - - constexpr T THRESH = 1.0e-6; // 2.0e-8; - for (size_t n = 0; n < xData.size(); ++n) - { - auto err = std::abs (yData[n] - yRefData[n][0]); - if (err > THRESH) - { - std::cout << "ERR: " << err << ", idx: " << n << std::endl; - break; - } - } - - return 0; -} diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/tests/load_csv.hpp b/Plugin/Source/Processors/Hysteresis/RTNeural/tests/load_csv.hpp @@ -1,32 +0,0 @@ -#include <fstream> -#include <vector> -#include <string> -#include <sstream> - -namespace load_csv { - -template<typename T> -std::vector<std::vector<T>> loadFile(std::ifstream& stream) -{ - std::vector<std::vector<T>> vec; - - std::string line; - if(stream.is_open()) { - while(std::getline(stream, line)) { - std::stringstream ss(line); - - std::string number; - std::vector<T> lineVec; - while(std::getline(ss, number, ',')) - lineVec.push_back(static_cast<T>(std::stod(number))); - - vec.push_back(lineVec); - } - - stream.close(); - } - - return vec; -} - -} // namespace load_csv diff --git a/Plugin/Source/Processors/Hysteresis/RTNeural/tests/model_loader.hpp b/Plugin/Source/Processors/Hysteresis/RTNeural/tests/model_loader.hpp @@ -1,86 +0,0 @@ -#pragma once - -#include <iostream> -#include <fstream> -#include <string> -#include <memory> -#include "../src/Model.h" -#include "../modules/json/json.hpp" - -namespace json_parser { - -using json = nlohmann::json; - -template<typename T> -std::unique_ptr<MLUtils::Dense<T>> createDense(size_t in_size, size_t out_size, const json& weights) -{ - auto dense = std::make_unique<MLUtils::Dense<T>>(in_size, out_size); - - // load weights - std::vector<std::vector<T>> denseWeights (out_size); - for(auto& w : denseWeights) - w.resize(in_size, (T) 0); - - auto layerWeights = weights[0]; - for(size_t i = 0; i < layerWeights.size(); ++i) - { - auto lw = layerWeights[i]; - for(size_t j = 0; j < lw.size(); ++j) - denseWeights[j][i] = lw[j].get<T>(); - } - - dense->setWeights(denseWeights); - - // load biases - std::vector<T> denseBias = weights[1].get<std::vector<T>>(); - dense->setBias(denseBias.data()); - - return std::move(dense); -} - -template<typename T> -std::unique_ptr<MLUtils::Model<T>> parseJson (std::ifstream& jsonStream) -{ - json parent; - jsonStream >> parent; - auto shape = parent["in_shape"]; - auto layers = parent["layers"]; - - if(! shape.is_array() || ! layers.is_array()) - return {}; - - const auto nDims = shape.back().get<int>(); - std::cout << "# dimensions: " << nDims << std::endl; - - auto model = std::make_unique<MLUtils::Model<T>> (nDims); - - for(const auto& l : layers) - { - const auto type = l["type"].get<std::string>(); - std::cout << "Layer: " << type << std::endl; - - const auto layerShape = l["shape"]; - const auto layerDims = layerShape.back().get<int>(); - std::cout << " Dims: " << layerDims << std::endl; - - const auto weights = l["weights"]; - - if(type == "dense") - { - auto dense = createDense<T>(model->getNextInSize(), layerDims, weights); - model->addLayer(dense.release()); - - const auto activationType = l["activation"].get<std::string>(); - if(activationType == "tanh") - { - std::cout << " activation: " << activationType << std::endl; - auto activation = std::make_unique<MLUtils::TanhActivation<T>> (layerDims); - model->addLayer(activation.release()); - } - } - } - - return std::move(model); -} - -} // namespace json_parser diff --git a/Plugin/modules/CMakeLists.txt b/Plugin/modules/CMakeLists.txt @@ -3,6 +3,10 @@ add_subdirectory(JUCE) juce_add_modules(foleys_gui_magic) juce_add_modules(chowdsp_utils) +# Using RTNeural with XSimd backend +set(RTNEURAL_XSIMD ON CACHE BOOL "Use RTNeural with XSimd backend" FORCE) +add_subdirectory(RTNeural) + add_library(juce_plugin_modules STATIC) target_link_libraries(juce_plugin_modules PRIVATE @@ -11,6 +15,7 @@ target_link_libraries(juce_plugin_modules juce::juce_audio_plugin_client foleys_gui_magic chowdsp_utils + RTNeural PUBLIC juce::juce_recommended_config_flags juce::juce_recommended_lto_flags diff --git a/Plugin/modules/RTNeural b/Plugin/modules/RTNeural @@ -0,0 +1 @@ +Subproject commit 26b1e81e1f305695e66c45c2490eb22d504c27a5