commit 004db80652c973555c4664db790261461836ede1
parent d8e11aab711904fcb9f25107edc4efd51254d257
Author: jatinchowdhury18 <[email protected]>
Date: Wed, 11 Nov 2020 00:19:15 -0800
Update playhead loss processing
Diffstat:
6 files changed, 200 insertions(+), 56 deletions(-)
diff --git a/Plugin/Source/GUI/Assets/gui.xml b/Plugin/Source/GUI/Assets/gui.xml
@@ -91,17 +91,17 @@
<View flex-direction="column" tab-caption="Loss" tab-color="" background-color="FF31323A"
padding="0" margin="0">
<View flex-grow="0.1" background-color="00000000"/>
- <Slider caption="Gap [cm]" parameter="gap" slider-type="linear-horizontal"
+ <Slider caption="Gap [microns]" parameter="gap" slider-type="linear-horizontal"
class="Slider" padding="0" slider-background="ff595c6b" slider-track="ff9cbcbd"
name="Gap" tooltip="Sets the width of the playhead gap. Certain frequencies that resonate with the gap width will be emphasized."
slidertext-height="18" caption-placement="top-left"/>
<View flex-grow="0.2" background-color="00000000"/>
- <Slider caption="Thickness [cm]" parameter="thick" class="Slider" slider-type="linear-horizontal"
+ <Slider caption="Thickness [microns]" parameter="thick" class="Slider" slider-type="linear-horizontal"
padding="0" slider-background="ff595c6b" slider-track="ff9cbcbd"
name="Thickness" tooltip="Sets the thickness of the tape. Thicker tape has a more muted high-frequency response."
caption-placement="top-left"/>
<View flex-grow="0.2" background-color="00000000"/>
- <Slider caption="Spacing [cm]" parameter="spacing" slider-type="linear-horizontal"
+ <Slider caption="Spacing [microns]" parameter="spacing" slider-type="linear-horizontal"
class="Slider" padding="0" slider-background="ff595c6b" slider-track="ff9cbcbd"
name="Spacing" tooltip="Sets the spacing between the tape and the playhead. A larger spacing means more high frequency signal is lost during playback."
caption-placement="top-left"/>
@@ -112,13 +112,13 @@
caption-placement="top-left"/>
<View flex-grow="0.55" margin="0" padding="2" background-color="00000000">
<TextButton margin="0" padding="2" text="3.75" button-color="00000000" background-color="00000000"
- onClick="set_speed_3.75" lookAndFeel="SpeedButtonLNF"/>
+ onClick="set_speed_3.75" lookAndFeel="SpeedButtonLNF" button-on-color="00000000"/>
<TextButton text="7.5" margin="0" padding="2" button-color="00000000" background-color="00000000"
- onClick="set_speed_7.50" lookAndFeel="SpeedButtonLNF"/>
- <TextButton margin="0" padding="2" text="15" button-color="00000000" border-color=""
+ onClick="set_speed_7.50" lookAndFeel="SpeedButtonLNF" button-on-color="00000000"/>
+ <TextButton margin="0" padding="2" text="15" button-color="00000000" button-on-color="00000000"
background-color="00000000" onClick="set_speed_15.00" lookAndFeel="SpeedButtonLNF"/>
<TextButton margin="0" padding="2" background-color="00000000" onClick="set_speed_30.00"
- lookAndFeel="SpeedButtonLNF" text="30" button-color="00000000"/>
+ lookAndFeel="SpeedButtonLNF" text="30" button-color="00000000" button-on-color="00000000"/>
</View>
<View flex-grow="0.01" background-color="00000000"/>
</View>
diff --git a/Plugin/Source/PluginProcessor.cpp b/Plugin/Source/PluginProcessor.cpp
@@ -163,11 +163,11 @@ void ChowtapeModelAudioProcessor::prepareToPlay (double sampleRate, int samplesP
degrade.prepareToPlay (sampleRate, samplesPerBlock);
chewer.prepare (sampleRate);
- dryDelay.prepare ({ sampleRate, (uint32) samplesPerBlock, 2 });
- dryDelay.setDelay (calcLatencySamples());
-
for (int ch = 0; ch < 2; ++ch)
lossFilter[ch]->prepare ((float) sampleRate, samplesPerBlock);
+
+ dryDelay.prepare ({ sampleRate, (uint32) samplesPerBlock, 2 });
+ dryDelay.setDelay (calcLatencySamples());
flutter.prepareToPlay (sampleRate, samplesPerBlock);
outGain.prepareToPlay (sampleRate, samplesPerBlock);
@@ -188,7 +188,7 @@ void ChowtapeModelAudioProcessor::releaseResources()
float ChowtapeModelAudioProcessor::calcLatencySamples() const noexcept
{
- return hysteresis.getLatencySamples();
+ return lossFilter[0]->getLatencySamples() + hysteresis.getLatencySamples();
}
#ifndef JucePlugin_PreferredChannelConfigurations
diff --git a/Plugin/Source/Processors/Loss_Effects/FIRFilter.h b/Plugin/Source/Processors/Loss_Effects/FIRFilter.h
@@ -4,6 +4,7 @@
#include "JuceHeader.h"
#include <numeric>
+/** FIR filter using a double-buffer and std::inner_product */
class FIRFilter
{
public:
@@ -11,7 +12,7 @@ public:
order (order)
{
h = new float[order];
- z = new float[order];
+ z = new float[2 * order];
}
~FIRFilter()
@@ -23,12 +24,12 @@ public:
void reset()
{
zPtr = 0;
- std::fill (z, &z[order], 0.0f);
+ FloatVectorOperations::fill (z, 0.0f, 2 * order);
}
void setCoefs (float* coefs)
{
- std::copy (coefs, &coefs[order], h);
+ FloatVectorOperations::copy (h, coefs, order);
}
inline void process (float* buffer, int numSamples)
@@ -36,12 +37,18 @@ public:
float y = 0.0f;
for (int n = 0; n < numSamples; ++n)
{
+ // insert input into double-buffered state
z[zPtr] = buffer[n];
+ z[zPtr + order] = buffer[n];
- y = std::inner_product (z + zPtr, z + order, h, 0.0f);
- y = std::inner_product (z, z + zPtr, h + (order - zPtr), y);
+#ifdef JUCE_USE_VDSP_FRAMEWORK
+ y = 0.0f;
+ vDSP_dotpr (z + zPtr, 1, h, 1, &y, order); // use Acclerate inner product (if available)
+#else
+ y = std::inner_product (z + zPtr, z + zPtr + order, h, 0.0f); // comput inner product
+#endif
- zPtr = (zPtr == 0 ? order - 1 : zPtr - 1);
+ zPtr = (zPtr == 0 ? order - 1 : zPtr - 1); // iterate state pointer in reverse
buffer[n] = y;
}
}
diff --git a/Plugin/Source/Processors/Loss_Effects/LossFilter.cpp b/Plugin/Source/Processors/Loss_Effects/LossFilter.cpp
@@ -1,4 +1,4 @@
-#include "LossFilter.h"
+#include "LossFilter.h"
LossFilter::LossFilter (AudioProcessorValueTreeState& vts, int order) :
order (order)
@@ -16,38 +16,37 @@ LossFilter::LossFilter (AudioProcessorValueTreeState& vts, int order) :
void LossFilter::createParameterLayout (std::vector<std::unique_ptr<RangedAudioParameter>>& params)
{
- constexpr float minDist = (float) 1.0e-6;
- constexpr float centreSkew = 5.0f;
+ constexpr float minDist = 0.1f;
- auto valueToString = [] (float value, int) { return String (100.0f * value, 4); };
- auto stringToValue = [] (const String& text) { return text.getFloatValue() / 100.0f; };
+ auto valueToString = [] (float value, int) { return String (value, 4); };
+ auto stringToValue = [] (const String& text) { return text.getFloatValue(); };
- NormalisableRange<float> speedRange (1.0f, 100.0f); // meters per second
+ NormalisableRange<float> speedRange (1.0f, 50.0f); // meters per second
speedRange.setSkewForCentre (15.0f);
- NormalisableRange<float> spaceRange (minDist, 100.0f);
- spaceRange.setSkewForCentre (centreSkew);
+ NormalisableRange<float> spaceRange (minDist, 20.0f);
+ spaceRange.setSkewForCentre (5.0f);
- NormalisableRange<float> thickRange (minDist, 10.0f);
- thickRange.setSkewForCentre (centreSkew / 15.0f);
+ NormalisableRange<float> thickRange (minDist, 50.0f);
+ thickRange.setSkewForCentre (5.0f);
- NormalisableRange<float> gapRange (minDist, 100.0f);
- gapRange.setSkewForCentre (centreSkew);
+ NormalisableRange<float> gapRange (1.0f, 50.0f);
+ gapRange.setSkewForCentre (10.0f);
params.push_back (std::make_unique<AudioParameterFloat> ("speed", "Speed [ips]",
- speedRange, 15.0f, String(), AudioProcessorParameter::genericParameter,
+ speedRange, 30.0f, String(), AudioProcessorParameter::genericParameter,
[] (float value, int) { return String (value, 2); }));
- params.push_back (std::make_unique<AudioParameterFloat> ("spacing", "Spacing [cm]",
+ params.push_back (std::make_unique<AudioParameterFloat> ("spacing", "Spacing [microns]",
spaceRange, minDist, String(), AudioProcessorParameter::genericParameter,
valueToString, stringToValue));
- params.push_back (std::make_unique<AudioParameterFloat> ("thick", "Thickness [cm]",
+ params.push_back (std::make_unique<AudioParameterFloat> ("thick", "Thickness [microns]",
thickRange, minDist, String(), AudioProcessorParameter::genericParameter,
valueToString, stringToValue));
- params.push_back (std::make_unique<AudioParameterFloat> ("gap", "Gap",
- gapRange, minDist, String(), AudioProcessorParameter::genericParameter,
+ params.push_back (std::make_unique<AudioParameterFloat> ("gap", "Gap [microns]",
+ gapRange, 1.0f, String(), AudioProcessorParameter::genericParameter,
valueToString, stringToValue));
}
@@ -57,14 +56,17 @@ void LossFilter::prepare (float sampleRate, int samplesPerBlock)
fadeBuffer.setSize (1, samplesPerBlock);
fsFactor = (float) fs / 44100.0f;
- const int curOrder = int (order * fsFactor);
+ curOrder = int (order * fsFactor);
filters.clear();
filters.add (new FIRFilter (curOrder));
filters.add (new FIRFilter (curOrder));
currentCoefs.resize (curOrder);
+ Hcoefs.resize (curOrder);
filters[0]->reset();
filters[1]->reset();
+ bumpFilter[0].prepare ({ (double) sampleRate, (uint32) samplesPerBlock, 1 });
+ bumpFilter[1].prepare ({ (double) sampleRate, (uint32) samplesPerBlock, 1 });
calcCoefs();
filters[0]->setCoefs (currentCoefs.getRawDataPointer());
@@ -78,50 +80,55 @@ void LossFilter::prepare (float sampleRate, int samplesPerBlock)
starting = true;
}
+static void calcHeadBumpFilter (float speedIps, float gapMeters, double fs, dsp::IIR::Filter<float>& filter)
+{
+ auto bumpFreq = speedIps * 0.0254f / (gapMeters * 500.0f);
+ auto gain = jmax (1.5f * (1000.0f - std::abs (bumpFreq - 100.0f)) / 1000.0f, 1.0f);
+ filter.coefficients = dsp::IIR::Coefficients<float>::makePeakFilter (fs, bumpFreq, 2.0f, gain);
+}
+
void LossFilter::calcCoefs()
{
// Set freq domain multipliers
- const int curOrder = int (order * fsFactor);
binWidth = fs / (float) curOrder;
- std::unique_ptr<float[]> H (new float[curOrder]);
+ auto H = Hcoefs.getRawDataPointer();
for (int k = 0; k < curOrder / 2; k++)
{
- const auto freq = ((float) k * binWidth); // + (binWidth / 2.0f);
+ const auto freq = (float) k * binWidth;
const auto waveNumber = MathConstants<float>::twoPi * jmax (freq, 20.0f) / (*speed * 0.0254f);
- const auto thickTimesK = waveNumber * (*thickness * (float) 1.0e-3);
- const auto kGapOverTwo = waveNumber * (*gap * (float) 1.0e-3) / 2.0f;
+ const auto thickTimesK = waveNumber * (*thickness * (float) 1.0e-6);
+ const auto kGapOverTwo = waveNumber * (*gap * (float) 1.0e-6) / 2.0f;
- H[k] = expf (-1.0f * waveNumber * (*spacing * (float) 1.0e-3)); // Spacing loss formula
- H[k] *= (1.0f - expf (-thickTimesK)) / thickTimesK;
- H[k] *= sinf (kGapOverTwo) / kGapOverTwo;
+ H[k] = expf (-waveNumber * (*spacing * (float) 1.0e-6)); // Spacing loss
+ H[k] *= (1.0f - expf (-thickTimesK)) / thickTimesK; // Thickness loss
+ H[k] *= sinf (kGapOverTwo) / kGapOverTwo; // Gap loss
H[curOrder - k - 1] = H[k];
}
- // Create time domain filter signals
+ // Create time domain filter signal
auto h = currentCoefs.getRawDataPointer();
- for (int n = 0; n < curOrder; n++)
+ for (int n = 0; n < curOrder / 2; n++)
{
+ const size_t idx = curOrder / 2 + n;
for (int k = 0; k < curOrder; k++)
- h[n] += H[k] * cosf (MathConstants<float>::twoPi * (float) k * (float) n / (float) curOrder);
+ h[idx] += Hcoefs[k] * cosf (MathConstants<float>::twoPi * (float) k * (float) n / (float) curOrder);
- h[n] /= (float) curOrder;
+ h[idx] /= (float) curOrder;
+ h[curOrder / 2 - n] = h[idx];
}
+
+ // compute head bump filters
+ calcHeadBumpFilter (*speed, *gap * (float) 1.0e-6, (double) fs, bumpFilter[! activeFilter]);
}
void LossFilter::processBlock (float* buffer, const int numSamples)
{
- if (*spacing == (float) 1.0e-6 && *thickness == (float) 1.0e-6 && *gap == (float) 1.0e-6
- && *spacing == prevSpacing && *thickness == prevThickness && *gap == prevGap)
- {
- filters[activeFilter]->processBypassed (buffer, numSamples);
- return;
- }
-
if ((*speed != prevSpeed || *spacing != prevSpacing ||
*thickness != prevThickness || *gap != prevGap) && fadeCount == 0)
{
calcCoefs();
filters[! activeFilter]->setCoefs (currentCoefs.getRawDataPointer());
+ bumpFilter[! activeFilter].reset();
fadeCount = fadeLength;
prevSpeed = *speed;
@@ -141,7 +148,12 @@ void LossFilter::processBlock (float* buffer, const int numSamples)
}
if (! starting)
+ {
filters[activeFilter]->process (buffer, numSamples);
+ dsp::AudioBlock<float> block (&buffer, 1, numSamples);
+ dsp::ProcessContextReplacing<float> ctx (block);
+ bumpFilter[activeFilter].process (ctx);
+ }
else
{
starting = false;
@@ -152,6 +164,9 @@ void LossFilter::processBlock (float* buffer, const int numSamples)
{
auto* fadePtr = fadeBuffer.getWritePointer (0);
filters[! activeFilter]->process (fadePtr, numSamples);
+ dsp::AudioBlock<float> block (&fadePtr, 1, numSamples);
+ dsp::ProcessContextReplacing<float> ctx (block);
+ bumpFilter[! activeFilter].process (ctx);
for (int n = 0; n < numSamples; ++n)
{
diff --git a/Plugin/Source/Processors/Loss_Effects/LossFilter.h b/Plugin/Source/Processors/Loss_Effects/LossFilter.h
@@ -6,7 +6,7 @@
class LossFilter
{
public:
- LossFilter (AudioProcessorValueTreeState& vts, int order = 100);
+ LossFilter (AudioProcessorValueTreeState& vts, int order = 64);
~LossFilter() {}
static void createParameterLayout (std::vector<std::unique_ptr<RangedAudioParameter>>& params);
@@ -14,12 +14,14 @@ public:
void prepare (float sampleRate, int samplesPerBlock);
void calcCoefs();
void processBlock (float* buffer, const int numSamples);
+ float getLatencySamples() const noexcept { return (float) curOrder / 2.0f; }
private:
OwnedArray<FIRFilter> filters;
+ dsp::IIR::Filter<float> bumpFilter[2];
int activeFilter = 0;
int fadeCount = 0;
- const int fadeLength = 512;
+ const int fadeLength = 1024;
AudioBuffer<float> fadeBuffer;
bool starting = false;
@@ -38,7 +40,9 @@ private:
float binWidth = fs / 100.0f;
const int order;
+ int curOrder;
Array<float> currentCoefs;
+ Array<float> Hcoefs;
JUCE_DECLARE_NON_COPYABLE_WITH_LEAK_DETECTOR (LossFilter)
};
diff --git a/Simulations/LossEffects/loss_effects2.py b/Simulations/LossEffects/loss_effects2.py
@@ -0,0 +1,118 @@
+# %%
+import numpy as np
+import matplotlib.pyplot as plt
+from scipy import signal
+
+# %%
+FREQS = np.logspace(1.3, 4.3, 50)
+N = 128
+FS = 48000.0
+f = np.linspace (0, FS, N)
+n = range(N)
+
+def ips_to_mps(speed):
+ return speed * 0.0254
+
+def freq_to_k(tape_speed_ips, freq):
+ wavelength = ips_to_mps(tape_speed_ips) / freq
+ return 2 * np.pi / wavelength
+
+def gain_to_db(x):
+ return 20 * np.log10(x)
+
+def plot_test(speed, H_func = lambda k: 1):
+ bin_width = FS / N
+ H = np.zeros(N)
+ H[0] = 1
+ for k in range(N // 2):
+ freq = k * bin_width
+ wave_num = 2 * np.pi * max(freq, 20) / (speed * 0.0254)
+ H[k] = H_func(wave_num)
+ H[N - k - 1] = H[k]
+
+ # plt.semilogx(f[:N//2], gain_to_db(H[:N//2]), '--')
+
+ h = np.zeros(N)
+ for n in range(N // 2):
+ idx = N//2 + n
+ for k in range(N):
+ h[idx] += H[k] * np.cos(2 * np.pi * k * n / N)
+ h[idx] /= N
+ h[N//2 - n] = h[idx]
+
+ # h = np.concatenate([h[N//2:], h[:N//2]]) # np.concatenate((h[N//2:])) #, H[:N//2]))
+
+ w, h_z = signal.freqz(h, fs=FS, worN=FREQS)
+ plt.semilogx(w, gain_to_db(h_z), '--')
+
+# %%
+def spacing_loss(tape_speed_ips, dist_meter, freqs):
+ K = freq_to_k(tape_speed_ips, freqs)
+ return np.exp(-1 * K * dist_meter)
+
+# SPEED = 7.5 # ips
+# dist = 5.0e-6 # meters
+
+for dist in [1.0e-6, 5e-6, 10e-6]:
+ for SPEED in [7.5]: #, 15, 30]:
+ space_loss = spacing_loss(SPEED, dist, FREQS)
+ plt.semilogx(FREQS, gain_to_db(space_loss), label=f'{SPEED}, {dist}')
+ plot_test(SPEED, lambda k : np.exp(-k * dist))
+
+# plt.ylim(-6, 0)
+# plt.ylim(0, 3.5)
+plt.legend()
+plt.title('Spacing Loss')
+
+# %%
+def thickness_loss(tape_speed_ips, thick_meter):
+ K = freq_to_k(tape_speed_ips, FREQS)
+ return (1 - np.exp(-1 * K * thick_meter)) / (K * thick_meter)
+
+for thick in [1.0e-6, 10.e-6]:
+ for SPEED in [7.5, 15, 30]:
+ loss = thickness_loss(SPEED, thick)
+ plt.semilogx(FREQS, gain_to_db(loss), label=f'{SPEED}, {thick}')
+
+plt.ylim(-6, 1)
+plt.legend()
+plt.title('Thickness Loss')
+# %%
+def gap_loss(tape_speed_ips, gap_meter):
+ K = freq_to_k(tape_speed_ips, FREQS)
+ return np.sin(K * gap_meter / 2.0) / (K * gap_meter / 2.0)
+
+for gap in [1.0e-6, 10.0e-6, 20.e-6]:
+ for SPEED in [7.5]: #, 15, 30]:
+ loss = gap_loss(SPEED, gap)
+ plt.semilogx(FREQS, gain_to_db(loss), label=f'{SPEED}, {gap}')
+
+plt.ylim(-6, 0)
+plt.legend()
+plt.title('Gap Loss')
+
+# %%
+def head_bump(tape_speed_ips, gap_meter):
+ bump_freq = tape_speed_ips * 0.0254 / (gap_meter * 5e2)
+ print(bump_freq)
+ width = bump_freq * 0.4
+ gain = 1.5 * (2e3 - abs(bump_freq - 100)) / 2e3
+ A = -(gain - 1) / (width/ 2)**2
+ H = np.zeros_like(FREQS)
+ for i, f in enumerate(FREQS):
+ if abs(f - bump_freq) > width / 2:
+ H[i] = 1.0
+ else:
+ H[i] = max(A * (f - bump_freq)**2 + gain, 1)
+ return H
+
+for gap in [5.0e-6, 10.0e-6, 20.e-6]:
+ for SPEED in [7.5, 15, 30]:
+ loss = head_bump(SPEED, gap)
+ plt.semilogx(FREQS, gain_to_db(loss), label=f'{SPEED}, {gap}')
+
+# plt.ylim(-6, 0)
+plt.legend()
+plt.title('Head Bump')
+
+# %%