commit 99b0d392ea4b7d1fb1b42b72bc0ef0b683b6d17a
parent c0c9be367ac1177bb74be417342ec6b1fa7fbf8b
Author: Steven Atkinson <steven@atkinson.mn>
Date: Sun, 5 Feb 2023 15:10:42 -0800
Clean up implicit conversions (#61)
* Clean up implicit conversions
Get rid of a bunch of warnings
* Get rid of choc and comment out unused variables in cnpy
Diffstat:
9 files changed, 50 insertions(+), 55 deletions(-)
diff --git a/NeuralAmpModeler/NeuralAmpModeler.h b/NeuralAmpModeler/NeuralAmpModeler.h
@@ -1,7 +1,5 @@
#pragma once
-#include "choc_DisableAllWarnings.h"
-#include "choc_ReenableAllWarnings.h"
#include "dsp.h"
#include "dsp/ImpulseResponse.h"
#include "dsp/RecursiveLinearFilter.h"
diff --git a/NeuralAmpModeler/dsp/Resample.h b/NeuralAmpModeler/dsp/Resample.h
@@ -55,7 +55,7 @@ void dsp::ResampleCubic(const std::vector<T> &inputs,
while (time < endTimeOriginal) {
// Find the index of the sample in the original audio file that is just
// before the current time in the resampled audio file
- int index = (long)std::floor(time / timeIncrement);
+ long index = (long)std::floor(time / timeIncrement);
// Calculate the time difference between the current time in the resampled
// audio file and the sample in the original audio file
diff --git a/NeuralAmpModeler/dsp/cnpy.cpp b/NeuralAmpModeler/dsp/cnpy.cpp
@@ -85,8 +85,8 @@ std::vector<char> &cnpy::operator+=(std::vector<char> &lhs, const char *rhs) {
void cnpy::parse_npy_header(unsigned char *buffer, size_t &word_size,
std::vector<size_t> &shape, bool &fortran_order) {
// std::string magic_string(buffer,6);
- uint8_t major_version = *reinterpret_cast<uint8_t *>(buffer + 6);
- uint8_t minor_version = *reinterpret_cast<uint8_t *>(buffer + 7);
+ // uint8_t major_version = *reinterpret_cast<uint8_t *>(buffer + 6);
+ // uint8_t minor_version = *reinterpret_cast<uint8_t *>(buffer + 7);
uint16_t header_len = *reinterpret_cast<uint16_t *>(buffer + 8);
std::string header(reinterpret_cast<char *>(buffer + 9), header_len);
diff --git a/NeuralAmpModeler/dsp/dsp.cpp b/NeuralAmpModeler/dsp/dsp.cpp
@@ -206,12 +206,12 @@ void tanh_(Eigen::MatrixXf &x) { tanh_(x, 0, x.rows(), 0, x.cols()); }
void Conv1D::set_params_(std::vector<float>::iterator ¶ms) {
if (this->_weight.size() > 0) {
- const int out_channels = this->_weight[0].rows();
- const int in_channels = this->_weight[0].cols();
+ const long out_channels = this->_weight[0].rows();
+ const long in_channels = this->_weight[0].cols();
// Crazy ordering because that's how it gets flattened.
- for (int i = 0; i < out_channels; i++)
- for (int j = 0; j < in_channels; j++)
- for (int k = 0; k < this->_weight.size(); k++)
+ for (auto i = 0; i < out_channels; i++)
+ for (auto j = 0; j < in_channels; j++)
+ for (auto k = 0; k < this->_weight.size(); k++)
this->_weight[k](i, j) = *(params++);
}
for (int i = 0; i < this->_bias.size(); i++)
@@ -319,7 +319,7 @@ void convnet::BatchNorm::process_(Eigen::MatrixXf &x, const long i_start,
const long i_end) const {
// todo using colwise?
// #speed but conv probably dominates
- for (int i = i_start; i < i_end; i++) {
+ for (auto i = i_start; i < i_end; i++) {
x.col(i) = x.col(i).cwiseProduct(this->scale);
x.col(i) += this->loc;
}
@@ -356,7 +356,7 @@ void convnet::ConvNetBlock::process_(const Eigen::MatrixXf &input,
throw std::runtime_error("Unrecognized activation");
}
-int convnet::ConvNetBlock::get_out_channels() const {
+long convnet::ConvNetBlock::get_out_channels() const {
return this->conv.get_out_channels();
}
@@ -384,7 +384,6 @@ convnet::ConvNet::ConvNet(const int channels, const std::vector<int> &dilations,
this->_verify_params(channels, dilations, batchnorm, params.size());
this->_blocks.resize(dilations.size());
std::vector<float>::iterator it = params.begin();
- int in_channels = 1;
for (int i = 0; i < dilations.size(); i++)
this->_blocks[i].set_params_(i == 0 ? 1 : channels, channels, dilations[i],
batchnorm, activation, it);
@@ -399,13 +398,13 @@ convnet::ConvNet::ConvNet(const int channels, const std::vector<int> &dilations,
void convnet::ConvNet::_process_core_() {
this->_update_buffers_();
// Main computation!
- const int i_start = this->_input_buffer_offset;
+ const long i_start = this->_input_buffer_offset;
const long num_frames = this->_input_post_gain.size();
const long i_end = i_start + num_frames;
// TODO one unnecessary copy :/ #speed
- for (int i = i_start; i < i_end; i++)
+ for (auto i = i_start; i < i_end; i++)
this->_block_vals[0](0, i) = this->_input_buffer[i];
- for (long i = 0; i < this->_blocks.size(); i++)
+ for (auto i = 0; i < this->_blocks.size(); i++)
this->_blocks[i].process_(this->_block_vals[i], this->_block_vals[i + 1],
i_start, i_end);
// TODO clean up this allocation
@@ -421,7 +420,7 @@ void convnet::ConvNet::_process_core_() {
void convnet::ConvNet::_verify_params(const int channels,
const std::vector<int> &dilations,
const bool batchnorm,
- const int actual_params) {
+ const size_t actual_params) {
// TODO
}
diff --git a/NeuralAmpModeler/dsp/dsp.h b/NeuralAmpModeler/dsp/dsp.h
@@ -196,7 +196,7 @@ public:
// :return: (N,Cout) or (Cout,), respectively
Eigen::MatrixXf process(const Eigen::MatrixXf &input) const;
- int get_out_channels() const { return this->_weight.rows(); };
+ long get_out_channels() const { return this->_weight.rows(); };
private:
Eigen::MatrixXf _weight;
@@ -239,7 +239,7 @@ public:
std::vector<float>::iterator ¶ms);
void process_(const Eigen::MatrixXf &input, Eigen::MatrixXf &output,
const long i_start, const long i_end) const;
- int get_out_channels() const;
+ long get_out_channels() const;
Conv1D conv;
private:
@@ -272,7 +272,7 @@ protected:
Eigen::VectorXf _head_output;
_Head _head;
void _verify_params(const int channels, const std::vector<int> &dilations,
- const bool batchnorm, const int actual_params);
+ const bool batchnorm, const size_t actual_params);
void _update_buffers_() override;
void _rewind_buffers_() override;
diff --git a/NeuralAmpModeler/dsp/lstm.cpp b/NeuralAmpModeler/dsp/lstm.cpp
@@ -15,7 +15,6 @@ lstm::LSTMCell::LSTMCell(const int input_size, const int hidden_size,
this->_c.resize(hidden_size);
// Assign in row-major because that's how PyTorch goes.
- int count = 0;
for (int i = 0; i < this->_w.rows(); i++)
for (int j = 0; j < this->_w.cols(); j++)
this->_w(i, j) = *(params++);
@@ -29,23 +28,23 @@ lstm::LSTMCell::LSTMCell(const int input_size, const int hidden_size,
}
void lstm::LSTMCell::process_(const Eigen::VectorXf &x) {
- const int hidden_size = this->_get_hidden_size();
- const int input_size = this->_get_input_size();
+ const long hidden_size = this->_get_hidden_size();
+ const long input_size = this->_get_input_size();
// Assign inputs
this->_xh(Eigen::seq(0, input_size - 1)) = x;
// The matmul
this->_ifgo = this->_w * this->_xh + this->_b;
// Elementwise updates (apply nonlinearities here)
- const int i_offset = 0;
- const int f_offset = hidden_size;
- const int g_offset = 2 * hidden_size;
- const int o_offset = 3 * hidden_size;
- for (int i = 0; i < hidden_size; i++)
+ const long i_offset = 0;
+ const long f_offset = hidden_size;
+ const long g_offset = 2 * hidden_size;
+ const long o_offset = 3 * hidden_size;
+ for (auto i = 0; i < hidden_size; i++)
this->_c[i] =
activations::sigmoid(this->_ifgo[i + f_offset]) * this->_c[i] +
activations::sigmoid(this->_ifgo[i + i_offset]) *
tanhf(this->_ifgo[i + g_offset]);
- const int h_offset = input_size;
+ const long h_offset = input_size;
for (int i = 0; i < hidden_size; i++)
this->_xh[i + h_offset] =
activations::sigmoid(this->_ifgo[i + o_offset]) * tanhf(this->_c[i]);
@@ -108,4 +107,4 @@ float lstm::LSTM::_process_sample(const float x) {
return this->_head_weight.dot(
this->_layers[this->_layers.size() - 1].get_hidden_state()) +
this->_head_bias;
-}
-\ No newline at end of file
+}
diff --git a/NeuralAmpModeler/dsp/lstm.h b/NeuralAmpModeler/dsp/lstm.h
@@ -42,8 +42,8 @@ private:
// Cell state
Eigen::VectorXf _c;
- int _get_hidden_size() const { return this->_b.size() / 4; };
- int _get_input_size() const {
+ long _get_hidden_size() const { return this->_b.size() / 4; };
+ long _get_input_size() const {
return this->_xh.size() - this->_get_hidden_size();
};
};
diff --git a/NeuralAmpModeler/dsp/wavenet.cpp b/NeuralAmpModeler/dsp/wavenet.cpp
@@ -49,7 +49,7 @@ void wavenet::_Layer::process_(const Eigen::MatrixXf &input,
this->_1x1.process(this->_z.topRows(channels));
}
-void wavenet::_Layer::set_num_frames_(const int num_frames) {
+void wavenet::_Layer::set_num_frames_(const long num_frames) {
this->_z.resize(this->_conv.get_out_channels(), num_frames);
}
@@ -87,7 +87,7 @@ long wavenet::_LayerArray::get_receptive_field() const {
return result;
}
-void wavenet::_LayerArray::prepare_for_frames_(const int num_frames) {
+void wavenet::_LayerArray::prepare_for_frames_(const long num_frames) {
if (this->_buffer_start + num_frames > this->_get_buffer_size())
this->_rewind_buffers_();
}
@@ -99,8 +99,8 @@ void wavenet::_LayerArray::process_(const Eigen::MatrixXf &layer_inputs,
Eigen::MatrixXf &head_outputs) {
this->_layer_buffers[0].middleCols(this->_buffer_start, layer_inputs.cols()) =
this->_rechannel.process(layer_inputs);
- const int last_layer = this->_layers.size() - 1;
- for (int i = 0; i < this->_layers.size(); i++) {
+ const long last_layer = this->_layers.size() - 1;
+ for (auto i = 0; i < this->_layers.size(); i++) {
this->_layers[i].process_(
this->_layer_buffers[i], condition, head_inputs,
i == last_layer ? layer_outputs : this->_layer_buffers[i + 1],
@@ -109,7 +109,7 @@ void wavenet::_LayerArray::process_(const Eigen::MatrixXf &layer_inputs,
head_outputs = this->_head_rechannel.process(head_inputs);
}
-void wavenet::_LayerArray::set_num_frames_(const int num_frames) {
+void wavenet::_LayerArray::set_num_frames_(const long num_frames) {
// Wavenet checks for unchanged num_frames; if we made it here, there's
// something to do.
if (LAYER_ARRAY_BUFFER_SIZE - num_frames < this->_get_receptive_field()) {
@@ -131,7 +131,7 @@ void wavenet::_LayerArray::set_params_(std::vector<float>::iterator ¶ms) {
this->_head_rechannel.set_params_(params);
}
-int wavenet::_LayerArray::_get_channels() const {
+long wavenet::_LayerArray::_get_channels() const {
return this->_layers.size() > 0 ? this->_layers[0].get_channels() : 0;
}
@@ -181,7 +181,7 @@ void wavenet::_Head::set_params_(std::vector<float>::iterator ¶ms) {
void wavenet::_Head::process_(Eigen::MatrixXf &inputs,
Eigen::MatrixXf &outputs) {
- const int num_layers = this->_layers.size();
+ const size_t num_layers = this->_layers.size();
this->_apply_activation_(inputs);
if (num_layers == 1)
outputs = this->_layers[0].process(inputs);
@@ -197,7 +197,7 @@ void wavenet::_Head::process_(Eigen::MatrixXf &inputs,
}
}
-void wavenet::_Head::set_num_frames_(const int num_frames) {
+void wavenet::_Head::set_num_frames_(const long num_frames) {
for (int i = 0; i < this->_buffers.size(); i++)
this->_buffers[i].resize(this->_channels, num_frames);
}
@@ -290,8 +290,8 @@ void wavenet::WaveNet::_init_parametric_(nlohmann::json ¶metric) {
std::sort(this->_param_names.begin(), this->_param_names.end());
}
-void wavenet::WaveNet::_prepare_for_frames_(const int num_frames) {
- for (int i = 0; i < this->_layer_arrays.size(); i++)
+void wavenet::WaveNet::_prepare_for_frames_(const long num_frames) {
+ for (auto i = 0; i < this->_layer_arrays.size(); i++)
this->_layer_arrays[i].prepare_for_frames_(num_frames);
}
@@ -328,7 +328,7 @@ void wavenet::WaveNet::_process_core_() {
// Hack: apply head scale here; revisit when/if I activate the head.
// assert(this->_head_output.rows() == 1);
- const int final_head_array = this->_head_arrays.size() - 1;
+ const long final_head_array = this->_head_arrays.size() - 1;
assert(this->_head_arrays[final_head_array].rows() == 1);
for (int s = 0; s < num_frames; s++)
this->_core_dsp_output[s] =
@@ -337,7 +337,7 @@ void wavenet::WaveNet::_process_core_() {
this->_anti_pop_();
}
-void wavenet::WaveNet::_set_num_frames_(const int num_frames) {
+void wavenet::WaveNet::_set_num_frames_(const long num_frames) {
if (num_frames == this->_num_frames)
return;
diff --git a/NeuralAmpModeler/dsp/wavenet.h b/NeuralAmpModeler/dsp/wavenet.h
@@ -31,10 +31,10 @@ public:
void process_(const Eigen::MatrixXf &input, const Eigen::MatrixXf &condition,
Eigen::MatrixXf &head_input, Eigen::MatrixXf &output,
const long i_start, const long j_start);
- void set_num_frames_(const int num_frames);
- int get_channels() const { return this->_conv.get_in_channels(); };
+ void set_num_frames_(const long num_frames);
+ long get_channels() const { return this->_conv.get_in_channels(); };
int get_dilation() const { return this->_conv.get_dilation(); };
- int get_kernel_size() const { return this->_conv.get_kernel_size(); };
+ long get_kernel_size() const { return this->_conv.get_kernel_size(); };
private:
// The dilated convolution at the front of the block
@@ -89,7 +89,7 @@ public:
// Rewind buffers if needed
// Shift index to prepare
//
- void prepare_for_frames_(const int num_frames);
+ void prepare_for_frames_(const long num_frames);
// All arrays are "short".
void process_(const Eigen::MatrixXf &layer_inputs, // Short
@@ -98,7 +98,7 @@ public:
Eigen::MatrixXf &head_inputs, // Sum up on this.
Eigen::MatrixXf &head_outputs // post head-rechannel
);
- void set_num_frames_(const int num_frames);
+ void set_num_frames_(const long num_frames);
void set_params_(std::vector<float>::iterator &it);
long get_receptive_field() const;
@@ -121,7 +121,7 @@ private:
long _get_buffer_size() const {
return this->_layer_buffers.size() > 0 ? this->_layer_buffers[0].cols() : 0;
};
- int _get_channels() const;
+ long _get_channels() const;
long _get_receptive_field() const;
void _rewind_buffers_();
};
@@ -136,7 +136,7 @@ public:
// NOTE: the head transforms the provided input by applying a nonlinearity
// to it in-place!
void process_(Eigen::MatrixXf &inputs, Eigen::MatrixXf &outputs);
- void set_num_frames_(const int num_frames);
+ void set_num_frames_(const long num_frames);
private:
int _channels;
@@ -168,7 +168,7 @@ public:
void set_params_(std::vector<float> ¶ms);
private:
- int _num_frames;
+ long _num_frames;
std::vector<_LayerArray> _layer_arrays;
// Their outputs
std::vector<Eigen::MatrixXf> _layer_array_outputs;
@@ -188,12 +188,12 @@ private:
void _advance_buffers_(const int num_frames);
// Get the info from the parametric config
void _init_parametric_(nlohmann::json ¶metric);
- void _prepare_for_frames_(const int num_frames);
+ void _prepare_for_frames_(const long num_frames);
// Reminder: From ._input_post_gain to ._core_dsp_output
void _process_core_() override;
// Ensure that all buffer arrays are the right size for this num_frames
- void _set_num_frames_(const int num_frames);
+ void _set_num_frames_(const long num_frames);
// The net starts with random parameters inside; we need to wait for a full
// receptive field to pass through before we can count on the output being