AnalogTapeModel

Physical modelling signal processing for analog tape recording
Log | Files | Refs | Submodules | README | LICENSE

commit 0fa05461cd89f8045ec7c501a3f4924b17c423a5
parent 4f0fd9d3d0c991da6b1d2620cf93b060aa76c492
Author: Jatin Chowdhury <[email protected]>
Date:   Thu, 16 Dec 2021 21:49:53 +0000

Remove stale code from STNModel.h

Diffstat:
MPlugin/Source/Processors/Hysteresis/STNModel.cpp | 143+++++++------------------------------------------------------------------------
MPlugin/Source/Processors/Hysteresis/STNModel.h | 187+------------------------------------------------------------------------------
2 files changed, 13 insertions(+), 317 deletions(-)

diff --git a/Plugin/Source/Processors/Hysteresis/STNModel.cpp b/Plugin/Source/Processors/Hysteresis/STNModel.cpp @@ -2,147 +2,28 @@ namespace STNSpace { -Dense54::Dense54() -{ - for (size_t i = 0; i < 3; ++i) - ins[i] = v_type (0.0); - - for (size_t i = 0; i < 16; ++i) - weights[i] = v_type (0.0); - - for (size_t i = 0; i < 2; ++i) - bias[i] = v_type (0.0); -} - -void Dense54::setWeights (std::vector<std::vector<double>>& w) -{ - for (size_t i = 0; i < out_size; ++i) - for (size_t j = 0; j < in_size; ++j) - weights[i * out_size + j / 2].set (j % 2, w[i][j]); -} - -void Dense54::setBias (std::vector<double>& b) -{ - for (size_t i = 0; i < out_size; ++i) - bias[i / 2].set (i % 2, b[i]); -} - -//=========================================================== -Dense44::Dense44() -{ - for (size_t i = 0; i < 16; ++i) - weights[i] = v_type (0.0); - - for (size_t i = 0; i < 2; ++i) - bias[i] = v_type (0.0); -} - -void Dense44::setWeights (std::vector<std::vector<double>>& w) -{ - for (size_t i = 0; i < out_size; ++i) - for (size_t j = 0; j < in_size; ++j) - weights[i * out_size + j / 2].set (j % 2, w[i][j]); -} - -void Dense44::setBias (std::vector<double>& b) -{ - for (size_t i = 0; i < out_size; ++i) - bias[i / 2].set (i % 2, b[i]); -} - -//=========================================================== -Dense41::Dense41() -{ - for (size_t i = 0; i < 2; ++i) - weights[i] = v_type (0.0); - - bias = 0.0; -} +static bool printed = false; -void Dense41::setWeights (std::vector<std::vector<double>>& w) +STNModel::STNModel() { - for (size_t i = 0; i < out_size; ++i) - for (size_t j = 0; j < in_size; ++j) - weights[j / 2].set (j % 2, w[i][j]); -} - -void Dense41::setBias (std::vector<double>& b) -{ - bias = b[0]; + // useful for debugging... + if (! printed) + { +#if USE_RTNEURAL_STATIC + DBG ("Using RTNeural ModelT STN"); +#elif USE_RTNEURAL_POLY + DBG ("Using RTNeural polymorphic STN"); +#endif + printed = true; + } } -//=========================================================== void STNModel::loadModel (const nlohmann::json& modelJ) { #if USE_RTNEURAL_STATIC model.parseJson (modelJ); #elif USE_RTNEURAL_POLY model = RTNeural::json_parser::parseJson<double> (modelJ); -#else - auto layers = modelJ["layers"]; - - const auto weights_l0 = layers.at (0)["weights"]; - { - // load weights - std::vector<std::vector<double>> denseWeights (4); - for (auto& w : denseWeights) - w.resize (5, 0.0); - - auto layerWeights = weights_l0[0]; - for (size_t i = 0; i < layerWeights.size(); ++i) - { - auto lw = layerWeights[i]; - for (size_t j = 0; j < lw.size(); ++j) - denseWeights[j][i] = lw[j].get<double>(); - } - dense54.setWeights (denseWeights); - - // load biases - std::vector<double> denseBias = weights_l0[1].get<std::vector<double>>(); - dense54.setBias (denseBias); - } - - const auto weights_l1 = layers.at (1)["weights"]; - { - // load weights - std::vector<std::vector<double>> denseWeights (4); - for (auto& w : denseWeights) - w.resize (4, 0.0); - - auto layerWeights = weights_l1[0]; - for (size_t i = 0; i < layerWeights.size(); ++i) - { - auto lw = layerWeights[i]; - for (size_t j = 0; j < lw.size(); ++j) - denseWeights[j][i] = lw[j].get<double>(); - } - dense44.setWeights (denseWeights); - - // load biases - std::vector<double> denseBias = weights_l1[1].get<std::vector<double>>(); - dense44.setBias (denseBias); - } - - const auto weights_l2 = layers.at (2)["weights"]; - { - // load weights - std::vector<std::vector<double>> denseWeights (1); - for (auto& w : denseWeights) - w.resize (4, 0.0); - - auto layerWeights = weights_l2[0]; - for (size_t i = 0; i < layerWeights.size(); ++i) - { - auto lw = layerWeights[i]; - for (size_t j = 0; j < lw.size(); ++j) - denseWeights[j][i] = lw[j].get<double>(); - } - dense41.setWeights (denseWeights); - - // load biases - std::vector<double> denseBias = weights_l2[1].get<std::vector<double>>(); - dense41.setBias (denseBias); - } #endif } diff --git a/Plugin/Source/Processors/Hysteresis/STNModel.h b/Plugin/Source/Processors/Hysteresis/STNModel.h @@ -3,188 +3,15 @@ #include <JuceHeader.h> #include <RTNeural/RTNeural.h> -// include <Accelerate> on Apple devices so we can use vvtanh -#if JUCE_MAC || JUCE_IOS -#define Point CarbonDummyPointName -#define Component CarbonDummyCompName -#include <Accelerate/Accelerate.h> -#undef Point -#undef Component -#endif - #define USE_RTNEURAL_POLY 0 #define USE_RTNEURAL_STATIC 1 namespace STNSpace { -using v_type = dsp::SIMDRegister<double>; -constexpr auto v_size = v_type::SIMDNumElements; -static_assert (v_size == 2, "SIMD double size is required to be 2."); - -class Dense54 -{ -public: - Dense54(); - - void setWeights (std::vector<std::vector<double>>& w); - void setBias (std::vector<double>& b); - - inline void forward (const double* input) noexcept - { - ins[0] = v_type::fromRawArray (input); - ins[1] = v_type::fromRawArray (input + v_size); - ins[2] = v_type::fromRawArray (input + 2 * v_size); - - outs[0] = v_type (0.0); - outs[1] = v_type (0.0); - for (size_t k = 0; k < 3; ++k) - { - // output 0 - outs[0].set (0, outs[0].get (0) + (ins[k] * weights[0 * out_size + k]).sum()); - // output 1 - outs[0].set (1, outs[0].get (1) + (ins[k] * weights[1 * out_size + k]).sum()); - // output 2 - outs[1].set (0, outs[1].get (0) + (ins[k] * weights[2 * out_size + k]).sum()); - // output 3 - outs[1].set (1, outs[1].get (1) + (ins[k] * weights[3 * out_size + k]).sum()); - } - - outs[0] += bias[0]; - outs[1] += bias[1]; - } - - v_type outs[2]; - -private: - static constexpr size_t in_size = 5; - static constexpr size_t out_size = 4; - - v_type ins[3]; - v_type bias[2]; - v_type weights[16]; -}; - -//=========================================================== -class Dense44 -{ -public: - Dense44(); - - void setWeights (std::vector<std::vector<double>>& w); - void setBias (std::vector<double>& b); - - inline void forward (const v_type* ins) noexcept - { - outs[0] = v_type (0.0); - outs[1] = v_type (0.0); - for (size_t k = 0; k < 2; ++k) - { - // output 0 - outs[0].set (0, outs[0].get (0) + (ins[k] * weights[0 * out_size + k]).sum()); - // output 1 - outs[0].set (1, outs[0].get (1) + (ins[k] * weights[1 * out_size + k]).sum()); - // output 2 - outs[1].set (0, outs[1].get (0) + (ins[k] * weights[2 * out_size + k]).sum()); - // output 3 - outs[1].set (1, outs[1].get (1) + (ins[k] * weights[3 * out_size + k]).sum()); - } - - outs[0] += bias[0]; - outs[1] += bias[1]; - } - - v_type outs[2]; - -private: - static constexpr size_t in_size = 4; - static constexpr size_t out_size = 4; - - v_type bias[2]; - v_type weights[16]; -}; - -//=========================================================== -class Dense41 -{ -public: - Dense41(); - - void setWeights (std::vector<std::vector<double>>& w); - void setBias (std::vector<double>& b); - - inline double forward (const v_type* ins) const noexcept - { - double out = 0.0; - for (size_t k = 0; k < 2; ++k) - out += (ins[k] * weights[k]).sum(); - - return out + bias; - } - -private: - static constexpr size_t in_size = 4; - static constexpr size_t out_size = 1; - - double bias; - v_type weights[2]; -}; - -//=========================================================== -class Tanh -{ -public: - Tanh() = default; - - inline void forward (const v_type* input) noexcept - { -#if defined(_M_ARM64) || defined(__arm64__) || defined(__aarch64__) - alignas (16) double x[4]; - input[0].copyToRawArray (x); - input[1].copyToRawArray (&x[2]); - - vvtanh (x, x, &size); - - outs[0] = v_type::fromRawArray (x); - outs[1] = v_type::fromRawArray (x + 2); -#elif USE_XSIMD - using x_type = xsimd::simd_type<double>; - outs[0] = v_type (xsimd::tanh (static_cast<x_type> (input[0].value))); - outs[1] = v_type (xsimd::tanh (static_cast<x_type> (input[1].value))); -#else - // fallback - outs[0].set (0, std::tanh (input[0].get (0))); - outs[0].set (1, std::tanh (input[0].get (1))); - outs[1].set (0, std::tanh (input[1].get (0))); - outs[1].set (1, std::tanh (input[1].get (1))); -#endif - } - - v_type outs[2]; - -private: - static constexpr int size = 4; -}; - -static bool printed = false; - class STNModel { public: - STNModel() - { - // useful for debugging... - if (! printed) - { -#if USE_RTNEURAL_STATIC - DBG ("Using RTNeural ModelT STN"); -#elif USE_RTNEURAL_POLY - DBG ("Using RTNeural polymorphic STN"); -#else - DBG ("Using hand-coded STN"); -#endif - printed = true; - } - } + STNModel(); inline double forward (const double* input) noexcept { @@ -192,12 +19,6 @@ public: return model.forward (input); #elif USE_RTNEURAL_POLY return model->forward (input); -#else - dense54.forward (input); - tanh1.forward (dense54.outs); - dense44.forward (tanh1.outs); - tanh2.forward (dense44.outs); - return dense41.forward (tanh2.outs); #endif } @@ -208,12 +29,6 @@ private: RTNeural::ModelT<double, 5, 1, RTNeural::DenseT<double, 5, 4>, RTNeural::TanhActivationT<double, 4>, RTNeural::DenseT<double, 4, 4>, RTNeural::TanhActivationT<double, 4>, RTNeural::DenseT<double, 4, 1>> model; #elif USE_RTNEURAL_POLY std::unique_ptr<RTNeural::Model<double>> model; -#else - Dense54 dense54; - Tanh tanh1; - Dense44 dense44; - Tanh tanh2; - Dense41 dense41; #endif JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (STNModel)