commit f86bb93e253d2fc5788c1e96cbcff95e16b4f0f1
parent 4c789d9e136fe426cd6842b933922bf287a5cd53
Author: mtseng15 <[email protected]>
Date: Wed, 10 Nov 2021 04:31:39 -0800
Added support for 2 parameter models
Diffstat:
6 files changed, 67 insertions(+), 31 deletions(-)
diff --git a/Source/PluginEditor.cpp b/Source/PluginEditor.cpp
@@ -573,7 +573,7 @@ NeuralPiAudioProcessorEditor::NeuralPiAudioProcessorEditor (NeuralPiAudioProcess
setSize(345, 455);
// Set gain knob color based on conditioned/snapshot model
- setGainKnobColor();
+ setParamKnobColor();
}
NeuralPiAudioProcessorEditor::~NeuralPiAudioProcessorEditor()
@@ -657,7 +657,7 @@ void NeuralPiAudioProcessorEditor::modelSelectChanged()
}
auto newValue = static_cast<float>(processor.current_model_index / (processor.num_models - 1.0));
modelKnob.setValue(newValue);
- setGainKnobColor();
+ setParamKnobColor();
}
void NeuralPiAudioProcessorEditor::irSelectChanged()
@@ -714,7 +714,7 @@ void NeuralPiAudioProcessorEditor::loadButtonClicked()
}
}
}
- setGainKnobColor();
+ setParamKnobColor();
}
void NeuralPiAudioProcessorEditor::loadIRClicked()
@@ -1085,12 +1085,20 @@ void NeuralPiAudioProcessorEditor::setParameterValue(const String& paramId, floa
}
-void NeuralPiAudioProcessorEditor::setGainKnobColor()
+void NeuralPiAudioProcessorEditor::setParamKnobColor()
{
- if (processor.is_conditioned == false) {
+ // If the knob is used for a parameter, change it to red
+ if (processor.params == 0) {
ampGainKnob.setLookAndFeel(&blueLookAndFeel);
+ ampMasterKnob.setLookAndFeel(&blueLookAndFeel);
}
- else {
+ else if (processor.params == 1) {
ampGainKnob.setLookAndFeel(&redLookAndFeel);
+ ampMasterKnob.setLookAndFeel(&blueLookAndFeel);
}
+ else if (processor.params == 2) {
+ ampGainKnob.setLookAndFeel(&redLookAndFeel);
+ ampMasterKnob.setLookAndFeel(&redLookAndFeel);
+ }
+
}
\ No newline at end of file
diff --git a/Source/PluginEditor.h b/Source/PluginEditor.h
@@ -175,7 +175,7 @@ private:
float getParameterValue(const String& paramId);
void setParameterValue(const String& paramId, float value);
- void setGainKnobColor();
+ void setParamKnobColor();
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (NeuralPiAudioProcessorEditor)
};
diff --git a/Source/PluginProcessor.cpp b/Source/PluginProcessor.cpp
@@ -223,9 +223,13 @@ void NeuralPiAudioProcessor::processBlock (AudioBuffer<float>& buffer, MidiBuffe
// Process LSTM based on input_size (snapshot model or conditioned model)
if (LSTM.input_size == 1) {
LSTM.process(buffer.getReadPointer(0), buffer.getWritePointer(0), numSamples);
- } else {
+ }
+ else if (LSTM.input_size == 2) {
LSTM.process(buffer.getReadPointer(0), gain, buffer.getWritePointer(0), numSamples);
}
+ else if (LSTM.input_size == 3) {
+ LSTM.process(buffer.getReadPointer(0), gain, master, buffer.getWritePointer(0), numSamples);
+ }
}
// Process IR
@@ -243,7 +247,9 @@ void NeuralPiAudioProcessor::processBlock (AudioBuffer<float>& buffer, MidiBuffe
}
// Master Volume
- buffer.applyGain(master * 2.0); // Adding volume range (2x) mainly for clean models
+ if (LSTM.input_size == 1 || LSTM.input_size == 2) {
+ buffer.applyGain(master * 2.0); // Adding volume range (2x) mainly for clean models
+ }
// Process Delay, and Reverb
set_delayParams(delay);
@@ -334,22 +340,21 @@ void NeuralPiAudioProcessor::loadConfig(File configFile)
char_filename = path.toUTF8();
try {
- // Check input size for conditioned models
- // read JSON file
- std::ifstream i2(char_filename);
- nlohmann::json weights_json;
- i2 >> weights_json;
-
- int input_size_json = weights_json["/model_data/input_size"_json_pointer];
- LSTM.input_size = input_size_json;
- if (input_size_json == 1) {
- is_conditioned = false;
- LSTM.load_json(char_filename);
+ // Load the JSON file into the correct model
+ LSTM.load_json(char_filename);
+
+ // Check what the input size is and then update the GUI appropirately
+ if (LSTM.input_size == 1) {
+ params = 0;
+ }
+ else if (LSTM.input_size == 2) {
+ params = 1;
}
- else {
- is_conditioned = true;
- LSTM.load_json2(char_filename);
+ else if (LSTM.input_size == 3) {
+ params = 2;
}
+
+ // If we are good: let's say so
model_loaded = 1;
}
catch (const std::exception& e) {
diff --git a/Source/PluginProcessor.h b/Source/PluginProcessor.h
@@ -130,7 +130,10 @@ public:
int current_ir_index = 0;
int ir_index = 0;
- bool is_conditioned = false;
+ // The number of parameters for the model
+ // 0 is for a snap shot model
+ // The PluginEditor uses this to determin which knobs to color red
+ int params = 0;
RT_LSTM LSTM;
diff --git a/Source/RTNeuralLSTM.cpp b/Source/RTNeuralLSTM.cpp
@@ -63,10 +63,12 @@ void RT_LSTM::load_json(const char* filename)
if (input_size == 1) {
set_weights(&model, filename);
}
- else {
+ else if (input_size == 2) {
set_weights(&model_cond1, filename);
- }
-
+ }
+ else if (input_size == 3) {
+ set_weights(&model_cond2, filename);
+ }
}
@@ -88,8 +90,19 @@ void RT_LSTM::process(const float* inData, float* outData, int numSamples)
void RT_LSTM::process(const float* inData, float param, float* outData, int numSamples)
{
for (int i = 0; i < numSamples; ++i) {
- inArray[0] = inData[i];
- inArray[1] = param;
- outData[i] = model_cond1.forward(inArray) + inData[i];
+ inArray1[0] = inData[i];
+ inArray1[1] = param;
+ outData[i] = model_cond1.forward(inArray1) + inData[i];
+ }
+}
+
+void RT_LSTM::process(const float* inData, float param1, float param2, float* outData, int numSamples)
+{
+ for (int i = 0; i < numSamples; ++i) {
+ inArray2[0] = inData[i];
+ inArray2[1] = param1;
+ inArray2[2] = param2;
+ outData[i] = model_cond2.forward(inArray2) + inData[i];
}
}
+
diff --git a/Source/RTNeuralLSTM.h b/Source/RTNeuralLSTM.h
@@ -15,6 +15,7 @@ public:
void process(const float* inData, float* outData, int numSamples);
void process(const float* inData, float param, float* outData, int numSamples);
+ void process(const float* inData, float param1, float param2, float* outData, int numSamples);
int input_size = 1;
@@ -27,5 +28,11 @@ private:
RTNeural::LSTMLayerT<float, 2, 20>,
RTNeural::DenseT<float, 20, 1>> model_cond1;
- float inArray[2] = { 0.0, 0.0 };
+ RTNeural::ModelT<float, 3, 1,
+ RTNeural::LSTMLayerT<float, 3, 20>,
+ RTNeural::DenseT<float, 20, 1>> model_cond2;
+
+ // Pre-Allowcate arrays for feeding the models
+ float inArray1[2] = { 0.0, 0.0 };
+ float inArray2[3] = { 0.0, 0.0, 0.0 };
};