unbound waterfall scrolling -- prevent low FPS from limiting the speed

This commit is contained in:
2026-04-09 10:19:54 +02:00
parent f52de19de5
commit 793df68ba1
3 changed files with 74 additions and 23 deletions

View File

@@ -2,6 +2,7 @@
#include "audio/FileSource.h" #include "audio/FileSource.h"
#include <algorithm> #include <algorithm>
#include <chrono>
#include <cmath> #include <cmath>
#include <cstdio> #include <cstdio>
#include <cstring> #include <cstring>
@@ -157,19 +158,38 @@ int AudioEngine::processAudio() {
if (hopFrames < 1) hopFrames = 1; if (hopFrames < 1) hopFrames = 1;
audioBuf_.resize(hopFrames * channels); audioBuf_.resize(hopFrames * channels);
constexpr int kMaxSpectraPerFrame = 8; // Drain all available audio so the scroll rate is independent of the
// display refresh rate (vsync). Real-time sources self-limit via their
// ring buffer; file sources are capped to wall-clock time so playback
// runs at 1× speed regardless of frame rate.
// For file sources, compute how many samples correspond to elapsed time.
size_t fileSampleCap = SIZE_MAX; // unlimited for real-time
if (!audioSource_->isRealTime()) {
using Clock = std::chrono::steady_clock;
static Clock::time_point lastFileTime = Clock::now();
auto now = Clock::now();
double elapsed = std::chrono::duration<double>(now - lastFileTime).count();
lastFileTime = now;
// Clamp elapsed to avoid huge bursts after pauses or stalls.
if (elapsed > 0.1) elapsed = 0.1;
fileSampleCap = static_cast<size_t>(elapsed * settings_.sampleRate) + 1;
}
// Process primary source. // Process primary source.
int spectraThisFrame = 0; int spectraThisFrame = 0;
while (spectraThisFrame < kMaxSpectraPerFrame) { size_t samplesRead = 0;
for (;;) {
if (samplesRead >= fileSampleCap) break;
size_t framesRead = audioSource_->read(audioBuf_.data(), hopFrames); size_t framesRead = audioSource_->read(audioBuf_.data(), hopFrames);
if (framesRead == 0) break; if (framesRead == 0) break;
samplesRead += framesRead;
analyzer_.pushSamples(audioBuf_.data(), framesRead); analyzer_.pushSamples(audioBuf_.data(), framesRead);
if (analyzer_.hasNewSpectrum()) if (analyzer_.hasNewSpectrum())
++spectraThisFrame; ++spectraThisFrame;
} }
// Process extra devices independently. // Process extra devices independently (always real-time).
for (auto& ed : extraDevices_) { for (auto& ed : extraDevices_) {
int edCh = ed->source->channels(); int edCh = ed->source->channels();
const auto& edSettings = ed->analyzer.settings(); const auto& edSettings = ed->analyzer.settings();
@@ -177,13 +197,10 @@ int AudioEngine::processAudio() {
if (edHop < 1) edHop = 1; if (edHop < 1) edHop = 1;
ed->audioBuf.resize(edHop * edCh); ed->audioBuf.resize(edHop * edCh);
int edSpectra = 0; for (;;) {
while (edSpectra < kMaxSpectraPerFrame) {
size_t framesRead = ed->source->read(ed->audioBuf.data(), edHop); size_t framesRead = ed->source->read(ed->audioBuf.data(), edHop);
if (framesRead == 0) break; if (framesRead == 0) break;
ed->analyzer.pushSamples(ed->audioBuf.data(), framesRead); ed->analyzer.pushSamples(ed->audioBuf.data(), framesRead);
if (ed->analyzer.hasNewSpectrum())
++edSpectra;
} }
} }
@@ -226,6 +243,18 @@ const std::vector<float>& AudioEngine::getSpectrum(int globalCh) const {
return analyzer_.channelSpectrum(0); return analyzer_.channelSpectrum(0);
} }
const std::deque<std::vector<float>>& AudioEngine::getWaterfallHistory(int globalCh) const {
int n = analyzer_.numSpectra();
if (globalCh < n) return analyzer_.waterfallHistory(globalCh);
globalCh -= n;
for (auto& ed : extraDevices_) {
int en = ed->analyzer.numSpectra();
if (globalCh < en) return ed->analyzer.waterfallHistory(globalCh);
globalCh -= en;
}
return analyzer_.waterfallHistory(0);
}
const std::vector<std::complex<float>>& AudioEngine::getComplex(int globalCh) const { const std::vector<std::complex<float>>& AudioEngine::getComplex(int globalCh) const {
int n = analyzer_.numSpectra(); int n = analyzer_.numSpectra();
if (globalCh < n) return analyzer_.channelComplex(globalCh); if (globalCh < n) return analyzer_.channelComplex(globalCh);

View File

@@ -42,6 +42,7 @@ public:
// ── Unified channel view across all analyzers ── // ── Unified channel view across all analyzers ──
int totalNumSpectra() const; int totalNumSpectra() const;
const std::vector<float>& getSpectrum(int globalCh) const; const std::vector<float>& getSpectrum(int globalCh) const;
const std::deque<std::vector<float>>& getWaterfallHistory(int globalCh) const;
const std::vector<std::complex<float>>& getComplex(int globalCh) const; const std::vector<std::complex<float>>& getComplex(int globalCh) const;
const char* getDeviceName(int globalCh) const; const char* getDeviceName(int globalCh) const;
int spectrumSize() const { return analyzer_.spectrumSize(); } int spectrumSize() const { return analyzer_.spectrumSize(); }

View File

@@ -312,16 +312,31 @@ void Application::processAudio() {
const auto& mathChannels = audio_.mathChannels(); const auto& mathChannels = audio_.mathChannels();
const auto& mathSpectra = audio_.mathSpectra(); const auto& mathSpectra = audio_.mathSpectra();
// Push ALL new spectra to the waterfall so that the scroll rate
// is determined by the audio sample rate, not the display refresh.
if (ui_.waterfallMultiCh && nSpec > 1) { if (ui_.waterfallMultiCh && nSpec > 1) {
// For multi-channel: replay the last spectraThisFrame entries
// from channel 0's history to get per-step data. Other
// channels have the same count of new entries.
const auto& hist0 = audio_.getWaterfallHistory(0);
int histSz = static_cast<int>(hist0.size());
int start = std::max(0, histSz - spectraThisFrame);
for (int si = start; si < histSz; ++si) {
std::vector<std::vector<float>> wfSpectra; std::vector<std::vector<float>> wfSpectra;
std::vector<WaterfallChannelInfo> wfInfo; std::vector<WaterfallChannelInfo> wfInfo;
for (int ch = 0; ch < nSpec; ++ch) { for (int ch = 0; ch < nSpec; ++ch) {
const auto& c = ui_.channelColors[ch % kMaxChannels]; const auto& c = ui_.channelColors[ch % kMaxChannels];
wfSpectra.push_back(audio_.getSpectrum(ch)); const auto& hist = audio_.getWaterfallHistory(ch);
int idx = std::max(0, static_cast<int>(hist.size()) - (histSz - si));
wfSpectra.push_back(hist[idx]);
wfInfo.push_back({c.x, c.y, c.z, wfInfo.push_back({c.x, c.y, c.z,
ui_.channelEnabled[ch % kMaxChannels]}); ui_.channelEnabled[ch % kMaxChannels]});
} }
// Math channels only available for the latest spectrum;
// include them only on the last iteration.
if (si == histSz - 1) {
for (size_t mi = 0; mi < mathChannels.size(); ++mi) { for (size_t mi = 0; mi < mathChannels.size(); ++mi) {
if (mathChannels[mi].enabled && mathChannels[mi].waterfall && if (mathChannels[mi].enabled && mathChannels[mi].waterfall &&
mi < mathSpectra.size()) { mi < mathSpectra.size()) {
@@ -330,10 +345,16 @@ void Application::processAudio() {
wfInfo.push_back({c[0], c[1], c[2], true}); wfInfo.push_back({c[0], c[1], c[2], true});
} }
} }
}
waterfall_.pushLineMulti(wfSpectra, wfInfo, ui_.minDB, ui_.maxDB); waterfall_.pushLineMulti(wfSpectra, wfInfo, ui_.minDB, ui_.maxDB);
}
} else { } else {
int wfCh = std::clamp(ui_.waterfallChannel, 0, nSpec - 1); int wfCh = std::clamp(ui_.waterfallChannel, 0, nSpec - 1);
waterfall_.pushLine(audio_.getSpectrum(wfCh), ui_.minDB, ui_.maxDB); const auto& hist = audio_.getWaterfallHistory(wfCh);
int histSz = static_cast<int>(hist.size());
int start = std::max(0, histSz - spectraThisFrame);
for (int si = start; si < histSz; ++si)
waterfall_.pushLine(hist[si], ui_.minDB, ui_.maxDB);
} }
int curCh = std::clamp(ui_.waterfallChannel, 0, nSpec - 1); int curCh = std::clamp(ui_.waterfallChannel, 0, nSpec - 1);
cursors_.update(audio_.getSpectrum(curCh), cursors_.update(audio_.getSpectrum(curCh),