commit 3a9ad761136c76a8e9f2e6bded017b23b130ed3c
parent e88a8ec94c679f1562ff911880b3d855ae712ab1
Author: mtseng15 <49569515+mtseng15@users.noreply.github.com>
Date: Tue, 9 Nov 2021 20:37:53 -0800
Merge pull request #2 from mlamsk/MT_20211105
Mt 20211105
Diffstat:
7 files changed, 91 insertions(+), 60 deletions(-)
diff --git a/NeuralPi.jucer b/NeuralPi.jucer
@@ -79,9 +79,9 @@
</LINUX_MAKE>
<VS2019 targetFolder="Builds/VisualStudio2019">
<CONFIGURATIONS>
- <CONFIGURATION isDebug="1" name="Debug" targetName="NeuralPi" headerPath="C:\Users\rache\Desktop\dev\json-develop\include C:\Users\rache\Desktop\dev\NeuralPi\modules\RTNeural C:\Users\rache\Desktop\dev\NeuralPi\modules\RTNeural\modules\xsimd\include"
+ <CONFIGURATION isDebug="1" name="Debug" targetName="NeuralPi" headerPath="C:\Users\tseng\Personal\mlamsk\NeuralPi\modules\json\include C:\Users\tseng\Personal\mlamsk\NeuralPi\modules\RTNeural C:\Users\tseng\Personal\mlamsk\NeuralPi\modules\RTNeural\modules\xsimd\include"
defines="USE_XSIMD=1"/>
- <CONFIGURATION isDebug="0" name="Release" targetName="NeuralPi" headerPath="C:\Users\rache\Desktop\dev\json-develop\include C:\Users\rache\Desktop\dev\NeuralPi\modules\RTNeural C:\Users\rache\Desktop\dev\NeuralPi\modules\RTNeural\modules\xsimd\include"
+ <CONFIGURATION isDebug="0" name="Release" targetName="NeuralPi" headerPath="C:\Users\tseng\Personal\mlamsk\NeuralPi\modules\json\include C:\Users\tseng\Personal\mlamsk\NeuralPi\modules\RTNeural C:\Users\tseng\Personal\mlamsk\NeuralPi\modules\RTNeural\modules\xsimd\include"
defines="USE_XSIMD=1"/>
</CONFIGURATIONS>
<MODULEPATHS>
diff --git a/Source/PluginEditor.cpp b/Source/PluginEditor.cpp
@@ -573,7 +573,7 @@ NeuralPiAudioProcessorEditor::NeuralPiAudioProcessorEditor (NeuralPiAudioProcess
setSize(345, 455);
// Set gain knob color based on conditioned/snapshot model
- setGainKnobColor();
+ setParamKnobColor();
}
NeuralPiAudioProcessorEditor::~NeuralPiAudioProcessorEditor()
@@ -657,7 +657,7 @@ void NeuralPiAudioProcessorEditor::modelSelectChanged()
}
auto newValue = static_cast<float>(processor.current_model_index / (processor.num_models - 1.0));
modelKnob.setValue(newValue);
- setGainKnobColor();
+ setParamKnobColor();
}
void NeuralPiAudioProcessorEditor::irSelectChanged()
@@ -714,7 +714,7 @@ void NeuralPiAudioProcessorEditor::loadButtonClicked()
}
}
}
- setGainKnobColor();
+ setParamKnobColor();
}
void NeuralPiAudioProcessorEditor::loadIRClicked()
@@ -1085,12 +1085,20 @@ void NeuralPiAudioProcessorEditor::setParameterValue(const String& paramId, floa
}
-void NeuralPiAudioProcessorEditor::setGainKnobColor()
+void NeuralPiAudioProcessorEditor::setParamKnobColor()
{
- if (processor.is_conditioned == false) {
+ // If the knob is used for a parameter, change it to red
+ if (processor.params == 0) {
ampGainKnob.setLookAndFeel(&blueLookAndFeel);
+ ampMasterKnob.setLookAndFeel(&blueLookAndFeel);
}
- else {
+ else if (processor.params == 1) {
ampGainKnob.setLookAndFeel(&redLookAndFeel);
+ ampMasterKnob.setLookAndFeel(&blueLookAndFeel);
}
+ else if (processor.params == 2) {
+ ampGainKnob.setLookAndFeel(&redLookAndFeel);
+ ampMasterKnob.setLookAndFeel(&redLookAndFeel);
+ }
+
}
\ No newline at end of file
diff --git a/Source/PluginEditor.h b/Source/PluginEditor.h
@@ -175,7 +175,7 @@ private:
float getParameterValue(const String& paramId);
void setParameterValue(const String& paramId, float value);
- void setGainKnobColor();
+ void setParamKnobColor();
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (NeuralPiAudioProcessorEditor)
};
diff --git a/Source/PluginProcessor.cpp b/Source/PluginProcessor.cpp
@@ -223,9 +223,13 @@ void NeuralPiAudioProcessor::processBlock (AudioBuffer<float>& buffer, MidiBuffe
// Process LSTM based on input_size (snapshot model or conditioned model)
if (LSTM.input_size == 1) {
LSTM.process(buffer.getReadPointer(0), buffer.getWritePointer(0), numSamples);
- } else {
+ }
+ else if (LSTM.input_size == 2) {
LSTM.process(buffer.getReadPointer(0), gain, buffer.getWritePointer(0), numSamples);
}
+ else if (LSTM.input_size == 3) {
+ LSTM.process(buffer.getReadPointer(0), gain, master, buffer.getWritePointer(0), numSamples);
+ }
}
// Process IR
@@ -243,7 +247,9 @@ void NeuralPiAudioProcessor::processBlock (AudioBuffer<float>& buffer, MidiBuffe
}
// Master Volume
- buffer.applyGain(master * 2.0); // Adding volume range (2x) mainly for clean models
+ if (LSTM.input_size == 1 || LSTM.input_size == 2) {
+ buffer.applyGain(master * 2.0); // Adding volume range (2x) mainly for clean models
+ }
// Process Delay, and Reverb
set_delayParams(delay);
@@ -334,22 +340,21 @@ void NeuralPiAudioProcessor::loadConfig(File configFile)
char_filename = path.toUTF8();
try {
- // Check input size for conditioned models
- // read JSON file
- std::ifstream i2(char_filename);
- nlohmann::json weights_json;
- i2 >> weights_json;
-
- int input_size_json = weights_json["/model_data/input_size"_json_pointer];
- LSTM.input_size = input_size_json;
- if (input_size_json == 1) {
- is_conditioned = false;
- LSTM.load_json(char_filename);
+ // Load the JSON file into the correct model
+ LSTM.load_json(char_filename);
+
+ // Check what the input size is and then update the GUI appropirately
+ if (LSTM.input_size == 1) {
+ params = 0;
+ }
+ else if (LSTM.input_size == 2) {
+ params = 1;
}
- else {
- is_conditioned = true;
- LSTM.load_json2(char_filename);
+ else if (LSTM.input_size == 3) {
+ params = 2;
}
+
+ // If we are good: let's say so
model_loaded = 1;
}
catch (const std::exception& e) {
diff --git a/Source/PluginProcessor.h b/Source/PluginProcessor.h
@@ -130,7 +130,10 @@ public:
int current_ir_index = 0;
int ir_index = 0;
- bool is_conditioned = false;
+ // The number of parameters for the model
+ // 0 is for a snap shot model
+ // The PluginEditor uses this to determin which knobs to color red
+ int params = 0;
RT_LSTM LSTM;
diff --git a/Source/RTNeuralLSTM.cpp b/Source/RTNeuralLSTM.cpp
@@ -17,11 +17,12 @@ Vec2d transpose(const Vec2d& x)
return y;
}
-void RT_LSTM::load_json(const char* filename)
+template <typename T1>
+void RT_LSTM::set_weights(T1 model, const char* filename)
{
-
- auto& lstm = model.get<0>();
- auto& dense = model.get<1>();
+ // Initialize the correct model
+ auto& lstm = (*model).get<0>();
+ auto& dense = (*model).get<1>();
// read a JSON file
std::ifstream i2(filename);
@@ -45,38 +46,32 @@ void RT_LSTM::load_json(const char* filename)
std::vector<float> dense_bias = weights_json["/state_dict/lin.bias"_json_pointer];
dense.setBias(dense_bias.data());
+
}
-
-void RT_LSTM::load_json2(const char* filename)
+void RT_LSTM::load_json(const char* filename)
{
-
- auto& lstm = model_cond1.get<0>();
- auto& dense = model_cond1.get<1>();
-
- // read a JSON file
+ // Read in the JSON file
std::ifstream i2(filename);
- nlohmann::json weights_json;
- i2 >> weights_json;
-
- Vec2d lstm_weights_ih = weights_json["/state_dict/rec.weight_ih_l0"_json_pointer];
- lstm.setWVals(transpose(lstm_weights_ih));
-
- Vec2d lstm_weights_hh = weights_json["/state_dict/rec.weight_hh_l0"_json_pointer];
- lstm.setUVals(transpose(lstm_weights_hh));
-
- std::vector<float> lstm_bias_ih = weights_json["/state_dict/rec.bias_ih_l0"_json_pointer];
- std::vector<float> lstm_bias_hh = weights_json["/state_dict/rec.bias_hh_l0"_json_pointer];
- for (int i = 0; i < 80; ++i)
- lstm_bias_hh[i] += lstm_bias_ih[i];
- lstm.setBVals(lstm_bias_hh);
+ nlohmann::json weights_json;
+ i2 >> weights_json;
- Vec2d dense_weights = weights_json["/state_dict/lin.weight"_json_pointer];
- dense.setWeights(dense_weights);
+ // Get the input size of the JSON file
+ int input_size_json = weights_json["/model_data/input_size"_json_pointer];
+ input_size = input_size_json;
- std::vector<float> dense_bias = weights_json["/state_dict/lin.bias"_json_pointer];
- dense.setBias(dense_bias.data());
+ // Load the appropriate model
+ if (input_size == 1) {
+ set_weights(&model, filename);
+ }
+ else if (input_size == 2) {
+ set_weights(&model_cond1, filename);
+ }
+ else if (input_size == 3) {
+ set_weights(&model_cond2, filename);
+ }
}
+
void RT_LSTM::reset()
{
if (input_size == 1) {
@@ -95,8 +90,19 @@ void RT_LSTM::process(const float* inData, float* outData, int numSamples)
void RT_LSTM::process(const float* inData, float param, float* outData, int numSamples)
{
for (int i = 0; i < numSamples; ++i) {
- inArray[0] = inData[i];
- inArray[1] = param;
- outData[i] = model_cond1.forward(inArray) + inData[i];
+ inArray1[0] = inData[i];
+ inArray1[1] = param;
+ outData[i] = model_cond1.forward(inArray1) + inData[i];
+ }
+}
+
+void RT_LSTM::process(const float* inData, float param1, float param2, float* outData, int numSamples)
+{
+ for (int i = 0; i < numSamples; ++i) {
+ inArray2[0] = inData[i];
+ inArray2[1] = param1;
+ inArray2[2] = param2;
+ outData[i] = model_cond2.forward(inArray2) + inData[i];
}
}
+
diff --git a/Source/RTNeuralLSTM.h b/Source/RTNeuralLSTM.h
@@ -9,10 +9,13 @@ public:
void reset();
void load_json(const char* filename);
- void load_json2(const char* filename);
+ template <typename T1>
+
+ void set_weights(T1 model, const char* filename);
void process(const float* inData, float* outData, int numSamples);
void process(const float* inData, float param, float* outData, int numSamples);
+ void process(const float* inData, float param1, float param2, float* outData, int numSamples);
int input_size = 1;
@@ -25,5 +28,11 @@ private:
RTNeural::LSTMLayerT<float, 2, 20>,
RTNeural::DenseT<float, 20, 1>> model_cond1;
- float inArray[2] = { 0.0, 0.0 };
+ RTNeural::ModelT<float, 3, 1,
+ RTNeural::LSTMLayerT<float, 3, 20>,
+ RTNeural::DenseT<float, 20, 1>> model_cond2;
+
+ // Pre-Allowcate arrays for feeding the models
+ float inArray1[2] = { 0.0, 0.0 };
+ float inArray2[3] = { 0.0, 0.0, 0.0 };
};