NeuralAmpModelerPlugin

Plugin for Neural Amp Modeler
Log | Files | Refs | Submodules | README | LICENSE

commit ed0294bd7f14b91fdf6d2210ee01ecbd24f5aecb
parent 3b0cc199e92bdfb720df4bf7a0466133e982ed1a
Author: Steven Atkinson <[email protected]>
Date:   Sat, 25 Feb 2023 23:02:23 -0800

Catch NaNs in WaveNet and RecursiveLinearFilter (#76)

* Catch and zero out NaNs in WaveNet and RecursiveLinearFilter

* Default tone stack to on
Diffstat:
MNeuralAmpModeler/NeuralAmpModeler.cpp | 2+-
MNeuralAmpModeler/dsp/RecursiveLinearFilter.cpp | 4++++
MNeuralAmpModeler/dsp/wavenet.cpp | 27+++++++++++++++++++++++----
MNeuralAmpModeler/dsp/wavenet.h | 5+++++
4 files changed, 33 insertions(+), 5 deletions(-)

diff --git a/NeuralAmpModeler/NeuralAmpModeler.cpp b/NeuralAmpModeler/NeuralAmpModeler.cpp @@ -111,7 +111,7 @@ NeuralAmpModeler::NeuralAmpModeler(const InstanceInfo &info) this->GetParam(kOutputLevel)->InitGain("Output", 0.0, -40.0, 40.0, 0.1); this->GetParam(kNoiseGateThreshold) ->InitGain("Noise Gate", -80.0, -100.0, 0.0, 0.1); - this->GetParam(kEQActive)->InitBool("ToneStack", false); + this->GetParam(kEQActive)->InitBool("ToneStack", true); this->mNoiseGateTrigger.AddListener(&this->mNoiseGateGain); diff --git a/NeuralAmpModeler/dsp/RecursiveLinearFilter.cpp b/NeuralAmpModeler/dsp/RecursiveLinearFilter.cpp @@ -7,6 +7,7 @@ // See: https://webaudio.github.io/Audio-EQ-Cookbook/audio-eq-cookbook.html #include <algorithm> // std::fill +#include <cmath> // isnan #include <stdexcept> #include "RecursiveLinearFilter.h" @@ -53,6 +54,9 @@ iplug::sample **recursive_linear_filter::Base::Process(iplug::sample **inputs, for (auto i = 1; i < outputDegree; i++) out += this->mOutputCoefficients[i] * this->mOutputHistory[c][(outputStart + i) % outputDegree]; + // Prevent a NaN from jamming the filter! + if (isnan(out)) + out = 0.0; // Store the output! if (outputDegree >= 1) this->mOutputHistory[c][outputStart] = out; diff --git a/NeuralAmpModeler/dsp/wavenet.cpp b/NeuralAmpModeler/dsp/wavenet.cpp @@ -69,9 +69,11 @@ wavenet::_LayerArray::_LayerArray(const int input_size, this->_layers.push_back(_Layer(condition_size, channels, kernel_size, dilations[i], activation, gated)); const long receptive_field = this->_get_receptive_field(); - for (int i = 0; i < dilations.size(); i++) + for (int i = 0; i < dilations.size(); i++) { this->_layer_buffers.push_back(Eigen::MatrixXf( channels, LAYER_ARRAY_BUFFER_SIZE + receptive_field - 1)); + this->_layer_buffers[i].setZero(); + } this->_buffer_start = this->_get_receptive_field() - 1; } @@ -88,6 +90,13 @@ long wavenet::_LayerArray::get_receptive_field() const { } void wavenet::_LayerArray::prepare_for_frames_(const long num_frames) { + // Example: + // _buffer_start = 0 + // num_frames = 64 + // buffer_size = 64 + // -> this will write on indices 0 through 63, inclusive. + // -> No illegal writes. + // -> no rewind needed. if (this->_buffer_start + num_frames > this->_get_buffer_size()) this->_rewind_buffers_(); } @@ -136,6 +145,7 @@ long wavenet::_LayerArray::_get_channels() const { } long wavenet::_LayerArray::_get_receptive_field() const { + // TODO remove this and use get_receptive_field() instead! long res = 1; for (int i = 0; i < this->_layers.size(); i++) res += (this->_layers[i].get_kernel_size() - 1) * @@ -300,6 +310,11 @@ void wavenet::WaveNet::_process_core_() { this->_set_num_frames_(num_frames); this->_prepare_for_frames_(num_frames); + // NOTE: During warm-up, weird things can happen that NaN out the layers. + // We could solve this by anti-popping the *input*. But, it's easier to check + // the outputs for NaNs and zero them out. + // They'll flush out eventually because the model doesn't use any feedback. + // Fill into condition array: // Clumsy... for (int j = 0; j < num_frames; j++) { @@ -330,9 +345,13 @@ void wavenet::WaveNet::_process_core_() { const long final_head_array = this->_head_arrays.size() - 1; assert(this->_head_arrays[final_head_array].rows() == 1); - for (int s = 0; s < num_frames; s++) - this->_core_dsp_output[s] = - this->_head_scale * this->_head_arrays[final_head_array](0, s); + for (int s = 0; s < num_frames; s++) { + float out = this->_head_scale * this->_head_arrays[final_head_array](0, s); + // This is the NaN check that we could fix with anti-popping the input + if (isnan(out)) + out = 0.0; + this->_core_dsp_output[s] = out; + } // Apply anti-pop this->_anti_pop_(); } diff --git a/NeuralAmpModeler/dsp/wavenet.h b/NeuralAmpModeler/dsp/wavenet.h @@ -101,6 +101,8 @@ public: void set_num_frames_(const long num_frames); void set_params_(std::vector<float>::iterator &it); + // "Zero-indexed" receptive field. + // E.g. a 1x1 convolution has a z.i.r.f. of zero. long get_receptive_field() const; private: @@ -122,6 +124,9 @@ private: return this->_layer_buffers.size() > 0 ? this->_layer_buffers[0].cols() : 0; }; long _get_channels() const; + // "One-indexed" receptive field + // TODO remove! + // E.g. a 1x1 convolution has a o.i.r.f. of one. long _get_receptive_field() const; void _rewind_buffers_(); };