Add support for the inactive color scheme to the audio renderers

Originally committed to SVN as r5886.
This commit is contained in:
Thomas Goyne 2011-11-18 22:58:02 +00:00
parent e3b15bf587
commit 3cf1718ef6
6 changed files with 106 additions and 133 deletions

View file

@ -1763,6 +1763,10 @@
RelativePath="..\..\src\audio_renderer_waveform.h" RelativePath="..\..\src\audio_renderer_waveform.h"
> >
</File> </File>
<File
RelativePath="..\..\src\audio_rendering_style.h"
>
</File>
<File <File
RelativePath="..\..\src\audio_timing_dialogue.cpp" RelativePath="..\..\src\audio_timing_dialogue.cpp"
> >

View file

@ -35,6 +35,7 @@
/// ///
/// Base classes for audio renderers (spectrum, waveform, ...) /// Base classes for audio renderers (spectrum, waveform, ...)
#pragma once
#ifndef AGI_PRE #ifndef AGI_PRE
#include <memory> #include <memory>
@ -44,6 +45,7 @@
#include <wx/gdicmn.h> #include <wx/gdicmn.h>
#endif #endif
#include "audio_rendering_style.h"
#include "block_cache.h" #include "block_cache.h"
// Some forward declarations for outside stuff // Some forward declarations for outside stuff
@ -53,41 +55,6 @@ class AudioProvider;
class AudioRendererBitmapProvider; class AudioRendererBitmapProvider;
class AudioRenderer; class AudioRenderer;
/// @brief Styles audio may be rendered in
///
/// The constants are ordered by priority:
/// Selected has highest priority and should overlap active, which should
/// overlap inactive, which should overlap normal regions.
enum AudioRenderingStyle {
/// Regular audio with no special properties
AudioStyle_Normal,
/// Audio belonging to objects that can not be manipulated currently
AudioStyle_Inactive,
/// Audio that may be manipulated indirectly, usually part of selected lines
AudioStyle_Active,
/// Primary selection for work, usually coinciding with the primary playback range
AudioStyle_Selected,
/// Number of audio styles
AudioStyle_MAX
};
/// @class AudioRenderingStyleRanges
/// @brief Abstract container for ranges of audio rendering styles
///
/// Interface for producers of audio rendering ranges, consumers should implement
/// this interface for objects to pass to producers.
class AudioRenderingStyleRanges {
public:
/// @brief Add a range to the line
/// @param start First sample index in range
/// @param end One past last sample index in range
/// @param style Style of the range added
virtual void AddRange(int64_t start, int64_t end, AudioRenderingStyle style) = 0;
};
/// @class AudioRendererBitmapCacheBitmapFactory /// @class AudioRendererBitmapCacheBitmapFactory
/// @brief Produces wxBitmap objects for DataBlockCache storage for the audio renderer /// @brief Produces wxBitmap objects for DataBlockCache storage for the audio renderer
struct AudioRendererBitmapCacheBitmapFactory { struct AudioRendererBitmapCacheBitmapFactory {

View file

@ -36,6 +36,18 @@
#include "config.h" #include "config.h"
#include "audio_renderer_spectrum.h"
#include "audio_colorscheme.h"
#ifndef WITH_FFTW
#include "fft.h"
#endif
#include "include/aegisub/audio_provider.h"
#include "main.h"
#include "utils.h"
#include <libaegisub/log.h>
#ifndef AGI_PRE #ifndef AGI_PRE
#include <algorithm> #include <algorithm>
@ -44,22 +56,6 @@
#include <wx/dcmemory.h> #include <wx/dcmemory.h>
#endif #endif
#include <libaegisub/log.h>
#include "block_cache.h"
#include "include/aegisub/audio_provider.h"
#include "audio_colorscheme.h"
#include "audio_renderer.h"
#include "audio_renderer_spectrum.h"
#ifdef WITH_FFTW
#include <fftw3.h>
#else
#include "fft.h"
#endif
#include "main.h"
#include "utils.h"
/// Allocates blocks of derived data for the audio spectrum /// Allocates blocks of derived data for the audio spectrum
struct AudioSpectrumCacheBlockFactory { struct AudioSpectrumCacheBlockFactory {
/// Pointer back to the owning spectrum renderer /// Pointer back to the owning spectrum renderer
@ -99,36 +95,31 @@ struct AudioSpectrumCacheBlockFactory {
/// @brief Cache for audio spectrum frequency-power data /// @brief Cache for audio spectrum frequency-power data
class AudioSpectrumCache class AudioSpectrumCache
: public DataBlockCache<float, 10, AudioSpectrumCacheBlockFactory> { : public DataBlockCache<float, 10, AudioSpectrumCacheBlockFactory> {
public: public:
AudioSpectrumCache(size_t block_count, AudioSpectrumRenderer *renderer) AudioSpectrumCache(size_t block_count, AudioSpectrumRenderer *renderer)
: DataBlockCache<float, 10, AudioSpectrumCacheBlockFactory>( : DataBlockCache<float, 10, AudioSpectrumCacheBlockFactory>(
block_count, AudioSpectrumCacheBlockFactory(renderer)) block_count, AudioSpectrumCacheBlockFactory(renderer))
{ {
} }
}; };
AudioSpectrumRenderer::AudioSpectrumRenderer() AudioSpectrumRenderer::AudioSpectrumRenderer()
: AudioRendererBitmapProvider() : colors_normal(new AudioColorScheme(12))
, cache(0) , colors_selected(new AudioColorScheme(12))
, colors_normal(12) , colors_inactive(new AudioColorScheme(12))
, colors_selected(12)
, derivation_size(8) , derivation_size(8)
, derivation_dist(8) , derivation_dist(8)
#ifdef WITH_FFTW #ifdef WITH_FFTW
, dft_plan(0) , dft_plan(0)
, dft_input(0) , dft_input(0)
, dft_output(0) , dft_output(0)
#else
, fft_scratch(0)
#endif #endif
, audio_scratch(0)
{ {
colors_normal.InitIcyBlue(AudioStyle_Normal); colors_normal->InitIcyBlue(AudioStyle_Normal);
colors_selected.InitIcyBlue(AudioStyle_Selected); colors_selected->InitIcyBlue(AudioStyle_Selected);
colors_inactive->InitIcyBlue(AudioStyle_Inactive);
} }
AudioSpectrumRenderer::~AudioSpectrumRenderer() AudioSpectrumRenderer::~AudioSpectrumRenderer()
@ -138,14 +129,8 @@ AudioSpectrumRenderer::~AudioSpectrumRenderer()
RecreateCache(); RecreateCache();
} }
void AudioSpectrumRenderer::RecreateCache() void AudioSpectrumRenderer::RecreateCache()
{ {
delete cache;
delete[] audio_scratch;
cache = 0;
audio_scratch = 0;
#ifdef WITH_FFTW #ifdef WITH_FFTW
if (dft_plan) if (dft_plan)
{ {
@ -156,15 +141,12 @@ void AudioSpectrumRenderer::RecreateCache()
dft_input = 0; dft_input = 0;
dft_output = 0; dft_output = 0;
} }
#else
delete[] fft_scratch;
fft_scratch = 0;
#endif #endif
if (provider) if (provider)
{ {
size_t block_count = (size_t)((provider->GetNumSamples() + (size_t)(1<<derivation_dist) - 1) >> derivation_dist); size_t block_count = (size_t)((provider->GetNumSamples() + (size_t)(1<<derivation_dist) - 1) >> derivation_dist);
cache = new AudioSpectrumCache(block_count, this); cache.reset(new AudioSpectrumCache(block_count, this));
#ifdef WITH_FFTW #ifdef WITH_FFTW
dft_input = (double*)fftw_malloc(sizeof(double) * (2<<derivation_size)); dft_input = (double*)fftw_malloc(sizeof(double) * (2<<derivation_size));
@ -179,19 +161,17 @@ void AudioSpectrumRenderer::RecreateCache()
// 2x for the input sample data // 2x for the input sample data
// 2x for the real part of the output // 2x for the real part of the output
// 2x for the imaginary part of the output // 2x for the imaginary part of the output
fft_scratch = new float[6<<derivation_size]; fft_scratch.resize(6 << derivation_size);
#endif #endif
audio_scratch = new int16_t[2<<derivation_size]; audio_scratch.resize(2 << derivation_size);
} }
} }
void AudioSpectrumRenderer::OnSetProvider() void AudioSpectrumRenderer::OnSetProvider()
{ {
RecreateCache(); RecreateCache();
} }
void AudioSpectrumRenderer::SetResolution(size_t _derivation_size, size_t _derivation_dist) void AudioSpectrumRenderer::SetResolution(size_t _derivation_size, size_t _derivation_dist)
{ {
if (derivation_dist != _derivation_dist) if (derivation_dist != _derivation_dist)
@ -208,6 +188,13 @@ void AudioSpectrumRenderer::SetResolution(size_t _derivation_size, size_t _deriv
} }
} }
template<class T>
void AudioSpectrumRenderer::ConvertToFloat(size_t count, T &dest) {
for (size_t si = 0; si < count; ++si)
{
dest[si] = (float)(audio_scratch[si]) / 32768.f;
}
}
void AudioSpectrumRenderer::FillBlock(size_t block_index, float *block) void AudioSpectrumRenderer::FillBlock(size_t block_index, float *block)
{ {
@ -215,14 +202,10 @@ void AudioSpectrumRenderer::FillBlock(size_t block_index, float *block)
assert(block); assert(block);
int64_t first_sample = ((int64_t)block_index) << derivation_dist; int64_t first_sample = ((int64_t)block_index) << derivation_dist;
provider->GetAudio(audio_scratch, first_sample, 2 << derivation_size); provider->GetAudio(&audio_scratch[0], first_sample, 2 << derivation_size);
#ifdef WITH_FFTW #ifdef WITH_FFTW
// Convert audio data to float range [-1;+1) ConvertToFloat(2 << derivation_size, dft_input);
for (size_t si = 0; si < (size_t)(2<<derivation_size); ++si)
{
dft_input[si] = (float)(audio_scratch[si]) / 32768.f;
}
fftw_execute(dft_plan); fftw_execute(dft_plan);
@ -235,16 +218,11 @@ void AudioSpectrumRenderer::FillBlock(size_t block_index, float *block)
o++; o++;
} }
#else #else
float *fft_input = fft_scratch; ConvertToFloat(2 << derivation_size, fft_scratch);
float *fft_real = fft_scratch + (2 << derivation_size);
float *fft_imag = fft_scratch + (4 << derivation_size);
// Convert audio data to float range [-1;+1) float *fft_input = &fft_scratch[0];
for (size_t si = 0; si < (size_t)(2<<derivation_size); ++si) float *fft_real = &fft_scratch[0] + (2 << derivation_size);
{ float *fft_imag = &fft_scratch[0] + (4 << derivation_size);
fft_input[si] = (float)(audio_scratch[si]) / 32768.f;
}
fft_input = fft_scratch;
FFT fft; FFT fft;
fft.Transform(2<<derivation_size, fft_input, fft_real, fft_imag); fft.Transform(2<<derivation_size, fft_input, fft_real, fft_imag);
@ -262,7 +240,6 @@ void AudioSpectrumRenderer::FillBlock(size_t block_index, float *block)
#endif #endif
} }
void AudioSpectrumRenderer::Render(wxBitmap &bmp, int start, AudioRenderingStyle style) void AudioSpectrumRenderer::Render(wxBitmap &bmp, int start, AudioRenderingStyle style)
{ {
if (!cache) if (!cache)
@ -283,7 +260,7 @@ void AudioSpectrumRenderer::Render(wxBitmap &bmp, int start, AudioRenderingStyle
ptrdiff_t stride = img.GetWidth()*3; ptrdiff_t stride = img.GetWidth()*3;
int imgheight = img.GetHeight(); int imgheight = img.GetHeight();
AudioColorScheme *pal = style == AudioStyle_Selected ? &colors_selected : &colors_normal; const AudioColorScheme *pal = GetColorScheme(style);
/// @todo Make minband and maxband configurable /// @todo Make minband and maxband configurable
int minband = 0; int minband = 0;
@ -339,24 +316,26 @@ void AudioSpectrumRenderer::Render(wxBitmap &bmp, int start, AudioRenderingStyle
targetdc.DrawBitmap(tmpbmp, 0, 0); targetdc.DrawBitmap(tmpbmp, 0, 0);
} }
void AudioSpectrumRenderer::RenderBlank(wxDC &dc, const wxRect &rect, AudioRenderingStyle style) void AudioSpectrumRenderer::RenderBlank(wxDC &dc, const wxRect &rect, AudioRenderingStyle style)
{ {
// Get the colour of silence // Get the colour of silence
AudioColorScheme *pal = style == AudioStyle_Selected ? &colors_selected : &colors_normal; wxColour col = GetColorScheme(style)->get(0.0f);
unsigned char color_raw[4];
pal->map(0.0, color_raw);
wxColour col(color_raw[0], color_raw[1], color_raw[2]);
dc.SetBrush(wxBrush(col)); dc.SetBrush(wxBrush(col));
dc.SetPen(wxPen(col)); dc.SetPen(wxPen(col));
dc.DrawRectangle(rect); dc.DrawRectangle(rect);
} }
void AudioSpectrumRenderer::AgeCache(size_t max_size) void AudioSpectrumRenderer::AgeCache(size_t max_size)
{ {
if (cache) if (cache)
cache->Age(max_size); cache->Age(max_size);
} }
const AudioColorScheme *AudioSpectrumRenderer::GetColorScheme(AudioRenderingStyle style) const
{
switch (style)
{
case AudioStyle_Selected: return colors_selected.get();
case AudioStyle_Inactive: return colors_inactive.get();
default: return colors_normal.get();
}
}

View file

@ -37,21 +37,21 @@
#ifndef AGI_PRE #ifndef AGI_PRE
#include <stdint.h> #include <stdint.h>
#include <vector>
#endif #endif
#include "audio_renderer.h"
#include <libaegisub/scoped_ptr.h>
#ifdef WITH_FFTW #ifdef WITH_FFTW
#include <fftw3.h> #include <fftw3.h>
#endif #endif
class AudioColorScheme;
// Specified and implemented in cpp file, to avoid pulling in too much
// complex template code in this header.
class AudioSpectrumCache; class AudioSpectrumCache;
struct AudioSpectrumCacheBlockFactory; struct AudioSpectrumCacheBlockFactory;
/// @class AudioSpectrumRenderer /// @class AudioSpectrumRenderer
/// @brief Render frequency-power spectrum graphs for audio data. /// @brief Render frequency-power spectrum graphs for audio data.
/// ///
@ -61,13 +61,16 @@ class AudioSpectrumRenderer : public AudioRendererBitmapProvider {
friend struct AudioSpectrumCacheBlockFactory; friend struct AudioSpectrumCacheBlockFactory;
/// Internal cache management for the spectrum /// Internal cache management for the spectrum
AudioSpectrumCache *cache; agi::scoped_ptr<AudioSpectrumCache> cache;
/// Colour table used for regular rendering /// Colour table used for regular rendering
AudioColorScheme colors_normal; agi::scoped_ptr<AudioColorScheme> colors_normal;
/// Colour table used for rendering the audio selection /// Colour table used for rendering the audio selection
AudioColorScheme colors_selected; agi::scoped_ptr<AudioColorScheme> colors_selected;
/// Colour table used for rendering inactive lines
agi::scoped_ptr<AudioColorScheme> colors_inactive;
/// Binary logarithm of number of samples to use in deriving frequency-power data /// Binary logarithm of number of samples to use in deriving frequency-power data
size_t derivation_size; size_t derivation_size;
@ -84,7 +87,7 @@ class AudioSpectrumRenderer : public AudioRendererBitmapProvider {
/// @brief Recreates the cache /// @brief Recreates the cache
/// ///
/// To be called when the number of blocks in cache might have changed, /// To be called when the number of blocks in cache might have changed,
// eg. new audio provider or new resolution. /// e.g. new audio provider or new resolution.
void RecreateCache(); void RecreateCache();
/// @brief Fill a block with frequency-power data for a time range /// @brief Fill a block with frequency-power data for a time range
@ -92,6 +95,12 @@ class AudioSpectrumRenderer : public AudioRendererBitmapProvider {
/// @param[out] block Address to write the data to /// @param[out] block Address to write the data to
void FillBlock(size_t block_index, float *block); void FillBlock(size_t block_index, float *block);
/// @brief Convert audio data to float range [-1;+1)
/// @param count Samples to convert
/// @param dest Buffer to fill
template<class T>
void ConvertToFloat(size_t count, T &dest);
#ifdef WITH_FFTW #ifdef WITH_FFTW
/// FFTW plan data /// FFTW plan data
fftw_plan dft_plan; fftw_plan dft_plan;
@ -101,21 +110,24 @@ class AudioSpectrumRenderer : public AudioRendererBitmapProvider {
fftw_complex *dft_output; fftw_complex *dft_output;
#else #else
/// Pre-allocated scratch area for doing FFT derivations /// Pre-allocated scratch area for doing FFT derivations
float *fft_scratch; std::vector<float> fft_scratch;
#endif #endif
/// Pre-allocated scratch area for storing raw audio data /// Pre-allocated scratch area for storing raw audio data
int16_t *audio_scratch; std::vector<int16_t> audio_scratch;
/// Get the color scheme for a rendering style
const AudioColorScheme *GetColorScheme(AudioRenderingStyle style) const;
public: public:
/// @brief Constructor /// @brief Constructor
AudioSpectrumRenderer(); AudioSpectrumRenderer();
/// @brief Destructor /// @brief Destructor
virtual ~AudioSpectrumRenderer(); ~AudioSpectrumRenderer();
/// @brief Render a range of audio spectrum /// @brief Render a range of audio spectrum
/// @param bmp [in,out] Bitmap to render into, also carries lenght information /// @param bmp [in,out] Bitmap to render into, also carries length information
/// @param start First column of pixel data in display to render /// @param start First column of pixel data in display to render
/// @param style Style to render audio in /// @param style Style to render audio in
void Render(wxBitmap &bmp, int start, AudioRenderingStyle style); void Render(wxBitmap &bmp, int start, AudioRenderingStyle style);

View file

@ -53,10 +53,12 @@ AudioWaveformRenderer::AudioWaveformRenderer()
: AudioRendererBitmapProvider() : AudioRendererBitmapProvider()
, colors_normal(6) , colors_normal(6)
, colors_selected(6) , colors_selected(6)
, colors_inactive(6)
, audio_buffer(0) , audio_buffer(0)
{ {
colors_normal.InitIcyBlue_Normal(); colors_normal.InitIcyBlue(AudioStyle_Normal);
colors_selected.InitIcyBlue_Selected(); colors_selected.InitIcyBlue(AudioStyle_Selected);
colors_inactive.InitIcyBlue(AudioStyle_Inactive);
} }
@ -72,7 +74,7 @@ void AudioWaveformRenderer::Render(wxBitmap &bmp, int start, AudioRenderingStyle
wxRect rect(wxPoint(0, 0), bmp.GetSize()); wxRect rect(wxPoint(0, 0), bmp.GetSize());
int midpoint = rect.height / 2; int midpoint = rect.height / 2;
AudioColorScheme *pal = style == AudioStyle_Selected ? &colors_selected : &colors_normal; const AudioColorScheme *pal = GetColorScheme(style);
// Fill the background // Fill the background
dc.SetBrush(wxBrush(pal->get(0.0f))); dc.SetBrush(wxBrush(pal->get(0.0f)));
@ -137,8 +139,7 @@ void AudioWaveformRenderer::Render(wxBitmap &bmp, int start, AudioRenderingStyle
void AudioWaveformRenderer::RenderBlank(wxDC &dc, const wxRect &rect, AudioRenderingStyle style) void AudioWaveformRenderer::RenderBlank(wxDC &dc, const wxRect &rect, AudioRenderingStyle style)
{ {
AudioColorScheme *pal = style == AudioStyle_Selected ? &colors_selected : &colors_normal; const AudioColorScheme *pal = GetColorScheme(style);
wxColor line(pal->get(1.0)); wxColor line(pal->get(1.0));
wxColor bg(pal->get(0.0)); wxColor bg(pal->get(0.0));
@ -155,19 +156,24 @@ void AudioWaveformRenderer::RenderBlank(wxDC &dc, const wxRect &rect, AudioRende
dc.DrawLine(rect.x, rect.y+halfheight, rect.x+rect.width, rect.y+halfheight); dc.DrawLine(rect.x, rect.y+halfheight, rect.x+rect.width, rect.y+halfheight);
} }
void AudioWaveformRenderer::OnSetProvider() void AudioWaveformRenderer::OnSetProvider()
{ {
delete[] audio_buffer; delete[] audio_buffer;
audio_buffer = 0; audio_buffer = 0;
} }
void AudioWaveformRenderer::OnSetSamplesPerPixel() void AudioWaveformRenderer::OnSetSamplesPerPixel()
{ {
delete[] audio_buffer; delete[] audio_buffer;
audio_buffer = 0; audio_buffer = 0;
} }
const AudioColorScheme *AudioWaveformRenderer::GetColorScheme(AudioRenderingStyle style) const
{
switch (style)
{
case AudioStyle_Selected: return &colors_selected;
case AudioStyle_Inactive: return &colors_inactive;
default: return &colors_normal;
}
}

View file

@ -35,9 +35,9 @@
/// ///
/// Render a waveform display of PCM audio data /// Render a waveform display of PCM audio data
#ifndef AGI_PRE
#include <stdint.h> #include <stdint.h>
#endif
class AudioWaveformRenderer : public AudioRendererBitmapProvider { class AudioWaveformRenderer : public AudioRendererBitmapProvider {
/// Colour table used for regular rendering /// Colour table used for regular rendering
@ -46,22 +46,27 @@ class AudioWaveformRenderer : public AudioRendererBitmapProvider {
/// Colour table used for rendering the audio selection /// Colour table used for rendering the audio selection
AudioColorScheme colors_selected; AudioColorScheme colors_selected;
/// Colour table used for rendering inactive lines
AudioColorScheme colors_inactive;
/// Pre-allocated buffer for audio fetched from provider /// Pre-allocated buffer for audio fetched from provider
char *audio_buffer; char *audio_buffer;
protected: /// Get the color scheme for a rendering style
virtual void OnSetProvider(); const AudioColorScheme *GetColorScheme(AudioRenderingStyle style) const;
virtual void OnSetSamplesPerPixel();
void OnSetProvider();
void OnSetSamplesPerPixel();
public: public:
/// @brief Constructor /// @brief Constructor
AudioWaveformRenderer(); AudioWaveformRenderer();
/// @brief Destructor /// @brief Destructor
virtual ~AudioWaveformRenderer(); ~AudioWaveformRenderer();
/// @brief Render a range of audio waveform /// @brief Render a range of audio waveform
/// @param bmp [in,out] Bitmap to render into, also carries lenght information /// @param bmp [in,out] Bitmap to render into, also carries length information
/// @param start First column of pixel data in display to render /// @param start First column of pixel data in display to render
/// @param style Style to render audio in /// @param style Style to render audio in
void Render(wxBitmap &bmp, int start, AudioRenderingStyle style); void Render(wxBitmap &bmp, int start, AudioRenderingStyle style);