Don't block the UI while decoding audio for the caches
Simply zero the memory for audio which hasn't been decoded yet, modify the audio renderer to avoid caching blocks which aren't ready yet, and add a progress indiciator to the audio display scrollbar.
This commit is contained in:
parent
7dfd494a46
commit
a30d6121fd
15 changed files with 280 additions and 190 deletions
|
@ -151,10 +151,10 @@ temp_file_mapping::temp_file_mapping(fs::path const& filename, uint64_t size)
|
||||||
temp_file_mapping::~temp_file_mapping() { }
|
temp_file_mapping::~temp_file_mapping() { }
|
||||||
|
|
||||||
const char *temp_file_mapping::read(int64_t offset, uint64_t length) {
|
const char *temp_file_mapping::read(int64_t offset, uint64_t length) {
|
||||||
return write(offset, length);
|
return map(offset, length, read_only, file_size, file, read_region, read_mapping_start);
|
||||||
}
|
}
|
||||||
|
|
||||||
char *temp_file_mapping::write(int64_t offset, uint64_t length) {
|
char *temp_file_mapping::write(int64_t offset, uint64_t length) {
|
||||||
return map(offset, length, read_write, file_size, file, region, mapping_start);
|
return map(offset, length, read_write, file_size, file, write_region, write_mapping_start);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
@ -49,10 +49,13 @@ namespace agi {
|
||||||
|
|
||||||
class temp_file_mapping {
|
class temp_file_mapping {
|
||||||
file_mapping file;
|
file_mapping file;
|
||||||
std::unique_ptr<boost::interprocess::mapped_region> region;
|
|
||||||
uint64_t mapping_start = 0;
|
|
||||||
uint64_t file_size = 0;
|
uint64_t file_size = 0;
|
||||||
|
|
||||||
|
std::unique_ptr<boost::interprocess::mapped_region> read_region;
|
||||||
|
uint64_t read_mapping_start = 0;
|
||||||
|
std::unique_ptr<boost::interprocess::mapped_region> write_region;
|
||||||
|
uint64_t write_mapping_start = 0;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
temp_file_mapping(fs::path const& filename, uint64_t size);
|
temp_file_mapping(fs::path const& filename, uint64_t size);
|
||||||
~temp_file_mapping();
|
~temp_file_mapping();
|
||||||
|
|
|
@ -44,6 +44,7 @@
|
||||||
#include "audio_timing.h"
|
#include "audio_timing.h"
|
||||||
#include "block_cache.h"
|
#include "block_cache.h"
|
||||||
#include "compat.h"
|
#include "compat.h"
|
||||||
|
#include "include/aegisub/audio_provider.h"
|
||||||
#include "include/aegisub/context.h"
|
#include "include/aegisub/context.h"
|
||||||
#include "include/aegisub/hotkey.h"
|
#include "include/aegisub/hotkey.h"
|
||||||
#include "options.h"
|
#include "options.h"
|
||||||
|
@ -59,6 +60,7 @@
|
||||||
#include <wx/dcclient.h>
|
#include <wx/dcclient.h>
|
||||||
#include <wx/mousestate.h>
|
#include <wx/mousestate.h>
|
||||||
|
|
||||||
|
namespace {
|
||||||
/// @brief Colourscheme-based UI colour provider
|
/// @brief Colourscheme-based UI colour provider
|
||||||
///
|
///
|
||||||
/// This class provides UI colours corresponding to the supplied audio colour
|
/// This class provides UI colours corresponding to the supplied audio colour
|
||||||
|
@ -74,11 +76,8 @@ class UIColours {
|
||||||
wxColour dark_focused_colour; ///< Dark focused colour from the colour scheme
|
wxColour dark_focused_colour; ///< Dark focused colour from the colour scheme
|
||||||
wxColour sel_focused_colour; ///< Selection focused colour from the colour scheme
|
wxColour sel_focused_colour; ///< Selection focused colour from the colour scheme
|
||||||
|
|
||||||
bool focused; ///< Use the focused colours?
|
bool focused = false; ///< Use the focused colours?
|
||||||
public:
|
public:
|
||||||
/// Constructor
|
|
||||||
UIColours() : focused(false) { }
|
|
||||||
|
|
||||||
/// Set the colour scheme to load colours from
|
/// Set the colour scheme to load colours from
|
||||||
/// @param name Name of the colour scheme
|
/// @param name Name of the colour scheme
|
||||||
void SetColourScheme(std::string const& name)
|
void SetColourScheme(std::string const& name)
|
||||||
|
@ -113,14 +112,14 @@ class AudioDisplayScrollbar final : public AudioDisplayInteractionObject {
|
||||||
wxRect bounds;
|
wxRect bounds;
|
||||||
wxRect thumb;
|
wxRect thumb;
|
||||||
|
|
||||||
bool dragging; ///< user is dragging with the primary mouse button
|
bool dragging = false; ///< user is dragging with the primary mouse button
|
||||||
|
|
||||||
int data_length; ///< total amount of data in control
|
int data_length = 1; ///< total amount of data in control
|
||||||
int page_length; ///< amount of data in one page
|
int page_length = 1; ///< amount of data in one page
|
||||||
int position; ///< first item displayed
|
int position = 0; ///< first item displayed
|
||||||
|
|
||||||
int sel_start; ///< first data item in selection
|
int sel_start = -1; ///< first data item in selection
|
||||||
int sel_length; ///< number of data items in selection
|
int sel_length = 0; ///< number of data items in selection
|
||||||
|
|
||||||
UIColours colours; ///< Colour provider
|
UIColours colours; ///< Colour provider
|
||||||
|
|
||||||
|
@ -138,13 +137,7 @@ class AudioDisplayScrollbar final : public AudioDisplayInteractionObject {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AudioDisplayScrollbar(AudioDisplay *display)
|
AudioDisplayScrollbar(AudioDisplay *display)
|
||||||
: dragging(false)
|
: display(display)
|
||||||
, data_length(1)
|
|
||||||
, page_length(1)
|
|
||||||
, position(0)
|
|
||||||
, sel_start(-1)
|
|
||||||
, sel_length(0)
|
|
||||||
, display(display)
|
|
||||||
{
|
{
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -217,7 +210,7 @@ public:
|
||||||
return dragging;
|
return dragging;
|
||||||
}
|
}
|
||||||
|
|
||||||
void Paint(wxDC &dc, bool has_focus)
|
void Paint(wxDC &dc, bool has_focus, int load_progress)
|
||||||
{
|
{
|
||||||
colours.SetFocused(has_focus);
|
colours.SetFocused(has_focus);
|
||||||
|
|
||||||
|
@ -236,6 +229,14 @@ public:
|
||||||
dc.SetBrush(*wxTRANSPARENT_BRUSH);
|
dc.SetBrush(*wxTRANSPARENT_BRUSH);
|
||||||
dc.DrawRectangle(bounds);
|
dc.DrawRectangle(bounds);
|
||||||
|
|
||||||
|
if (load_progress > 0 && load_progress < data_length)
|
||||||
|
{
|
||||||
|
wxRect marker(
|
||||||
|
(int64_t)bounds.width * load_progress / data_length - 25, bounds.y + 1,
|
||||||
|
25, bounds.height - 2);
|
||||||
|
dc.GradientFillLinear(marker, colours.Dark(), colours.Light());
|
||||||
|
}
|
||||||
|
|
||||||
dc.SetPen(wxPen(colours.Light()));
|
dc.SetPen(wxPen(colours.Light()));
|
||||||
dc.SetBrush(wxBrush(colours.Light()));
|
dc.SetBrush(wxBrush(colours.Light()));
|
||||||
dc.DrawRectangle(thumb);
|
dc.DrawRectangle(thumb);
|
||||||
|
@ -245,14 +246,14 @@ public:
|
||||||
const int AudioDisplayScrollbar::min_width;
|
const int AudioDisplayScrollbar::min_width;
|
||||||
|
|
||||||
class AudioDisplayTimeline final : public AudioDisplayInteractionObject {
|
class AudioDisplayTimeline final : public AudioDisplayInteractionObject {
|
||||||
int duration; ///< Total duration in ms
|
int duration = 0; ///< Total duration in ms
|
||||||
double ms_per_pixel; ///< Milliseconds per pixel
|
double ms_per_pixel = 1.0; ///< Milliseconds per pixel
|
||||||
int pixel_left; ///< Leftmost visible pixel (i.e. scroll position)
|
int pixel_left = 0; ///< Leftmost visible pixel (i.e. scroll position)
|
||||||
|
|
||||||
wxRect bounds;
|
wxRect bounds;
|
||||||
|
|
||||||
wxPoint drag_lastpos;
|
wxPoint drag_lastpos;
|
||||||
bool dragging;
|
bool dragging = false;
|
||||||
|
|
||||||
enum Scale {
|
enum Scale {
|
||||||
Sc_Millisecond,
|
Sc_Millisecond,
|
||||||
|
@ -276,11 +277,7 @@ class AudioDisplayTimeline final : public AudioDisplayInteractionObject {
|
||||||
|
|
||||||
public:
|
public:
|
||||||
AudioDisplayTimeline(AudioDisplay *display)
|
AudioDisplayTimeline(AudioDisplay *display)
|
||||||
: duration(0)
|
: display(display)
|
||||||
, ms_per_pixel(1.0)
|
|
||||||
, pixel_left(0)
|
|
||||||
, dragging(false)
|
|
||||||
, display(display)
|
|
||||||
{
|
{
|
||||||
int width, height;
|
int width, height;
|
||||||
display->GetTextExtent("0123456789:.", &width, &height);
|
display->GetTextExtent("0123456789:.", &width, &height);
|
||||||
|
@ -457,48 +454,6 @@ public:
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
class AudioMarkerInteractionObject final : public AudioDisplayInteractionObject {
|
|
||||||
// Object-pair being interacted with
|
|
||||||
std::vector<AudioMarker*> markers;
|
|
||||||
AudioTimingController *timing_controller;
|
|
||||||
// Audio display drag is happening on
|
|
||||||
AudioDisplay *display;
|
|
||||||
// Mouse button used to initiate the drag
|
|
||||||
wxMouseButton button_used;
|
|
||||||
// Default to snapping to snappable markers
|
|
||||||
bool default_snap;
|
|
||||||
// Range in pixels to snap at
|
|
||||||
int snap_range;
|
|
||||||
|
|
||||||
public:
|
|
||||||
AudioMarkerInteractionObject(std::vector<AudioMarker*> markers, AudioTimingController *timing_controller, AudioDisplay *display, wxMouseButton button_used)
|
|
||||||
: markers(std::move(markers))
|
|
||||||
, timing_controller(timing_controller)
|
|
||||||
, display(display)
|
|
||||||
, button_used(button_used)
|
|
||||||
, default_snap(OPT_GET("Audio/Snap/Enable")->GetBool())
|
|
||||||
, snap_range(OPT_GET("Audio/Snap/Distance")->GetInt())
|
|
||||||
{
|
|
||||||
}
|
|
||||||
|
|
||||||
bool OnMouseEvent(wxMouseEvent &event) override
|
|
||||||
{
|
|
||||||
if (event.Dragging())
|
|
||||||
{
|
|
||||||
timing_controller->OnMarkerDrag(
|
|
||||||
markers,
|
|
||||||
display->TimeFromRelativeX(event.GetPosition().x),
|
|
||||||
default_snap != event.ShiftDown() ? display->TimeFromAbsoluteX(snap_range) : 0);
|
|
||||||
}
|
|
||||||
|
|
||||||
// We lose the marker drag if the button used to initiate it goes up
|
|
||||||
return !event.ButtonUp(button_used);
|
|
||||||
}
|
|
||||||
|
|
||||||
/// Get the position in milliseconds of this group of markers
|
|
||||||
int GetPosition() const { return markers.front()->GetPosition(); }
|
|
||||||
};
|
|
||||||
|
|
||||||
class AudioStyleRangeMerger final : public AudioRenderingStyleRanges {
|
class AudioStyleRangeMerger final : public AudioRenderingStyleRanges {
|
||||||
typedef std::map<int, AudioRenderingStyle> style_map;
|
typedef std::map<int, AudioRenderingStyle> style_map;
|
||||||
public:
|
public:
|
||||||
|
@ -548,6 +503,50 @@ public:
|
||||||
iterator end() { return points.end(); }
|
iterator end() { return points.end(); }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
class AudioMarkerInteractionObject final : public AudioDisplayInteractionObject {
|
||||||
|
// Object-pair being interacted with
|
||||||
|
std::vector<AudioMarker*> markers;
|
||||||
|
AudioTimingController *timing_controller;
|
||||||
|
// Audio display drag is happening on
|
||||||
|
AudioDisplay *display;
|
||||||
|
// Mouse button used to initiate the drag
|
||||||
|
wxMouseButton button_used;
|
||||||
|
// Default to snapping to snappable markers
|
||||||
|
bool default_snap;
|
||||||
|
// Range in pixels to snap at
|
||||||
|
int snap_range;
|
||||||
|
|
||||||
|
public:
|
||||||
|
AudioMarkerInteractionObject(std::vector<AudioMarker*> markers, AudioTimingController *timing_controller, AudioDisplay *display, wxMouseButton button_used)
|
||||||
|
: markers(std::move(markers))
|
||||||
|
, timing_controller(timing_controller)
|
||||||
|
, display(display)
|
||||||
|
, button_used(button_used)
|
||||||
|
, default_snap(OPT_GET("Audio/Snap/Enable")->GetBool())
|
||||||
|
, snap_range(OPT_GET("Audio/Snap/Distance")->GetInt())
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
bool OnMouseEvent(wxMouseEvent &event) override
|
||||||
|
{
|
||||||
|
if (event.Dragging())
|
||||||
|
{
|
||||||
|
timing_controller->OnMarkerDrag(
|
||||||
|
markers,
|
||||||
|
display->TimeFromRelativeX(event.GetPosition().x),
|
||||||
|
default_snap != event.ShiftDown() ? display->TimeFromAbsoluteX(snap_range) : 0);
|
||||||
|
}
|
||||||
|
|
||||||
|
// We lose the marker drag if the button used to initiate it goes up
|
||||||
|
return !event.ButtonUp(button_used);
|
||||||
|
}
|
||||||
|
|
||||||
|
/// Get the position in milliseconds of this group of markers
|
||||||
|
int GetPosition() const { return markers.front()->GetPosition(); }
|
||||||
|
};
|
||||||
|
|
||||||
AudioDisplay::AudioDisplay(wxWindow *parent, AudioController *controller, agi::Context *context)
|
AudioDisplay::AudioDisplay(wxWindow *parent, AudioController *controller, agi::Context *context)
|
||||||
: wxWindow(parent, -1, wxDefaultPosition, wxDefaultSize, wxWANTS_CHARS|wxBORDER_SIMPLE)
|
: wxWindow(parent, -1, wxDefaultPosition, wxDefaultSize, wxWANTS_CHARS|wxBORDER_SIMPLE)
|
||||||
, audio_open_connection(controller->AddAudioOpenListener(&AudioDisplay::OnAudioOpen, this))
|
, audio_open_connection(controller->AddAudioOpenListener(&AudioDisplay::OnAudioOpen, this))
|
||||||
|
@ -582,6 +581,7 @@ AudioDisplay::AudioDisplay(wxWindow *parent, AudioController *controller, agi::C
|
||||||
Bind(wxEVT_CHAR_HOOK, &AudioDisplay::OnKeyDown, this);
|
Bind(wxEVT_CHAR_HOOK, &AudioDisplay::OnKeyDown, this);
|
||||||
Bind(wxEVT_KEY_DOWN, &AudioDisplay::OnKeyDown, this);
|
Bind(wxEVT_KEY_DOWN, &AudioDisplay::OnKeyDown, this);
|
||||||
scroll_timer.Bind(wxEVT_TIMER, &AudioDisplay::OnScrollTimer, this);
|
scroll_timer.Bind(wxEVT_TIMER, &AudioDisplay::OnScrollTimer, this);
|
||||||
|
load_timer.Bind(wxEVT_TIMER, &AudioDisplay::OnLoadTimer, this);
|
||||||
}
|
}
|
||||||
|
|
||||||
AudioDisplay::~AudioDisplay()
|
AudioDisplay::~AudioDisplay()
|
||||||
|
@ -754,9 +754,43 @@ void AudioDisplay::ReloadRenderingSettings()
|
||||||
Refresh();
|
Refresh();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
void AudioDisplay::OnLoadTimer(wxTimerEvent&)
|
||||||
|
{
|
||||||
|
using namespace std::chrono;
|
||||||
|
if (provider)
|
||||||
|
{
|
||||||
|
const auto now = steady_clock::now();
|
||||||
|
const auto elapsed = duration_cast<milliseconds>(now - audio_load_start_time).count();
|
||||||
|
if (elapsed == 0) return;
|
||||||
|
|
||||||
|
const int64_t new_decoded_count = provider->GetDecodedSamples();
|
||||||
|
if (new_decoded_count != last_sample_decoded)
|
||||||
|
audio_load_speed = (audio_load_speed + (double)new_decoded_count / elapsed) / 2;
|
||||||
|
if (audio_load_speed == 0) return;
|
||||||
|
|
||||||
|
int new_pos = AbsoluteXFromTime(elapsed * audio_load_speed * 1000.0 / provider->GetSampleRate());
|
||||||
|
if (new_pos > audio_load_position)
|
||||||
|
audio_load_position = new_pos;
|
||||||
|
|
||||||
|
const double left = last_sample_decoded * 1000.0 / provider->GetSampleRate() / ms_per_pixel;
|
||||||
|
const double right = new_decoded_count * 1000.0 / provider->GetSampleRate() / ms_per_pixel;
|
||||||
|
|
||||||
|
if (left < scroll_left + pixel_audio_width && right >= scroll_left)
|
||||||
|
Refresh();
|
||||||
|
else
|
||||||
|
RefreshRect(scrollbar->GetBounds());
|
||||||
|
last_sample_decoded = new_decoded_count;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!provider || last_sample_decoded == provider->GetNumSamples()) {
|
||||||
|
load_timer.Stop();
|
||||||
|
audio_load_position = -1;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
void AudioDisplay::OnPaint(wxPaintEvent&)
|
void AudioDisplay::OnPaint(wxPaintEvent&)
|
||||||
{
|
{
|
||||||
if (!audio_renderer_provider) return;
|
if (!audio_renderer_provider || !provider) return;
|
||||||
|
|
||||||
wxAutoBufferedPaintDC dc(this);
|
wxAutoBufferedPaintDC dc(this);
|
||||||
|
|
||||||
|
@ -787,7 +821,7 @@ void AudioDisplay::OnPaint(wxPaintEvent&)
|
||||||
PaintTrackCursor(dc);
|
PaintTrackCursor(dc);
|
||||||
|
|
||||||
if (redraw_scrollbar)
|
if (redraw_scrollbar)
|
||||||
scrollbar->Paint(dc, HasFocus());
|
scrollbar->Paint(dc, HasFocus(), audio_load_position);
|
||||||
if (redraw_timeline)
|
if (redraw_timeline)
|
||||||
timeline->Paint(dc);
|
timeline->Paint(dc);
|
||||||
}
|
}
|
||||||
|
@ -1142,6 +1176,8 @@ void AudioDisplay::OnFocus(wxFocusEvent &)
|
||||||
|
|
||||||
void AudioDisplay::OnAudioOpen(AudioProvider *provider)
|
void AudioDisplay::OnAudioOpen(AudioProvider *provider)
|
||||||
{
|
{
|
||||||
|
this->provider = provider;
|
||||||
|
|
||||||
if (!audio_renderer_provider)
|
if (!audio_renderer_provider)
|
||||||
ReloadRenderingSettings();
|
ReloadRenderingSettings();
|
||||||
|
|
||||||
|
@ -1171,6 +1207,13 @@ void AudioDisplay::OnAudioOpen(AudioProvider *provider)
|
||||||
|
|
||||||
OnTimingController();
|
OnTimingController();
|
||||||
}
|
}
|
||||||
|
|
||||||
|
last_sample_decoded = provider->GetDecodedSamples();
|
||||||
|
audio_load_position = -1;
|
||||||
|
audio_load_speed = 0;
|
||||||
|
audio_load_start_time = std::chrono::steady_clock::now();
|
||||||
|
if (last_sample_decoded != provider->GetNumSamples())
|
||||||
|
load_timer.Start(100);
|
||||||
}
|
}
|
||||||
else
|
else
|
||||||
{
|
{
|
||||||
|
|
|
@ -33,6 +33,7 @@
|
||||||
/// @ingroup audio_ui
|
/// @ingroup audio_ui
|
||||||
///
|
///
|
||||||
|
|
||||||
|
#include <chrono>
|
||||||
#include <cstdint>
|
#include <cstdint>
|
||||||
#include <deque>
|
#include <deque>
|
||||||
#include <map>
|
#include <map>
|
||||||
|
@ -55,9 +56,11 @@ class AudioProvider;
|
||||||
class TimeRange;
|
class TimeRange;
|
||||||
|
|
||||||
// Helper classes used in implementation of the audio display
|
// Helper classes used in implementation of the audio display
|
||||||
class AudioDisplayScrollbar;
|
namespace {
|
||||||
class AudioDisplayTimeline;
|
class AudioDisplayScrollbar;
|
||||||
class AudioDisplaySelection;
|
class AudioDisplayTimeline;
|
||||||
|
class AudioDisplaySelection;
|
||||||
|
}
|
||||||
class AudioMarkerInteractionObject;
|
class AudioMarkerInteractionObject;
|
||||||
|
|
||||||
/// @class AudioDisplayInteractionObject
|
/// @class AudioDisplayInteractionObject
|
||||||
|
@ -91,7 +94,6 @@ public:
|
||||||
virtual ~AudioDisplayInteractionObject() { }
|
virtual ~AudioDisplayInteractionObject() { }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
||||||
/// @class AudioDisplay
|
/// @class AudioDisplay
|
||||||
/// @brief Primary view/UI for interaction with audio timing
|
/// @brief Primary view/UI for interaction with audio timing
|
||||||
///
|
///
|
||||||
|
@ -111,7 +113,9 @@ class AudioDisplay: public wxWindow {
|
||||||
std::unique_ptr<AudioRendererBitmapProvider> audio_renderer_provider;
|
std::unique_ptr<AudioRendererBitmapProvider> audio_renderer_provider;
|
||||||
|
|
||||||
/// The controller managing us
|
/// The controller managing us
|
||||||
AudioController *controller;
|
AudioController *controller = nullptr;
|
||||||
|
|
||||||
|
AudioProvider *provider = nullptr;
|
||||||
|
|
||||||
/// Scrollbar helper object
|
/// Scrollbar helper object
|
||||||
std::unique_ptr<AudioDisplayScrollbar> scrollbar;
|
std::unique_ptr<AudioDisplayScrollbar> scrollbar;
|
||||||
|
@ -132,6 +136,15 @@ class AudioDisplay: public wxWindow {
|
||||||
/// Timer for scrolling when markers are dragged out of the displayed area
|
/// Timer for scrolling when markers are dragged out of the displayed area
|
||||||
wxTimer scroll_timer;
|
wxTimer scroll_timer;
|
||||||
|
|
||||||
|
wxTimer load_timer;
|
||||||
|
int64_t last_sample_decoded = 0;
|
||||||
|
/// Time at which audio loading began, for calculating loading speed
|
||||||
|
std::chrono::steady_clock::time_point audio_load_start_time;
|
||||||
|
/// Estimated speed of audio decoding in samples per ms
|
||||||
|
double audio_load_speed = 0.0;
|
||||||
|
/// Current position of the audio loading progress in absolute pixels
|
||||||
|
int audio_load_position = 0;
|
||||||
|
|
||||||
/// Leftmost pixel in the virtual audio image being displayed
|
/// Leftmost pixel in the virtual audio image being displayed
|
||||||
int scroll_left = 0;
|
int scroll_left = 0;
|
||||||
|
|
||||||
|
@ -219,6 +232,7 @@ class AudioDisplay: public wxWindow {
|
||||||
/// wxWidgets keypress event
|
/// wxWidgets keypress event
|
||||||
void OnKeyDown(wxKeyEvent& event);
|
void OnKeyDown(wxKeyEvent& event);
|
||||||
void OnScrollTimer(wxTimerEvent &event);
|
void OnScrollTimer(wxTimerEvent &event);
|
||||||
|
void OnLoadTimer(wxTimerEvent &);
|
||||||
void OnMouseEnter(wxMouseEvent&);
|
void OnMouseEnter(wxMouseEvent&);
|
||||||
void OnMouseLeave(wxMouseEvent&);
|
void OnMouseLeave(wxMouseEvent&);
|
||||||
|
|
||||||
|
|
|
@ -108,8 +108,8 @@ std::unique_ptr<AudioProvider> CreateFFmpegSourceAudioProvider(agi::fs::path con
|
||||||
|
|
||||||
std::unique_ptr<AudioProvider> CreateConvertAudioProvider(std::unique_ptr<AudioProvider> source_provider);
|
std::unique_ptr<AudioProvider> CreateConvertAudioProvider(std::unique_ptr<AudioProvider> source_provider);
|
||||||
std::unique_ptr<AudioProvider> CreateLockAudioProvider(std::unique_ptr<AudioProvider> source_provider);
|
std::unique_ptr<AudioProvider> CreateLockAudioProvider(std::unique_ptr<AudioProvider> source_provider);
|
||||||
std::unique_ptr<AudioProvider> CreateHDAudioProvider(std::unique_ptr<AudioProvider> source_provider, agi::BackgroundRunner *br);
|
std::unique_ptr<AudioProvider> CreateHDAudioProvider(std::unique_ptr<AudioProvider> source_provider);
|
||||||
std::unique_ptr<AudioProvider> CreateRAMAudioProvider(std::unique_ptr<AudioProvider> source_provider, agi::BackgroundRunner *br);
|
std::unique_ptr<AudioProvider> CreateRAMAudioProvider(std::unique_ptr<AudioProvider> source_provider);
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
struct factory {
|
struct factory {
|
||||||
|
@ -187,10 +187,10 @@ std::unique_ptr<AudioProvider> AudioProviderFactory::GetProvider(agi::fs::path c
|
||||||
return CreateLockAudioProvider(std::move(provider));
|
return CreateLockAudioProvider(std::move(provider));
|
||||||
|
|
||||||
// Convert to RAM
|
// Convert to RAM
|
||||||
if (cache == 1) return CreateRAMAudioProvider(std::move(provider), br);
|
if (cache == 1) return CreateRAMAudioProvider(std::move(provider));
|
||||||
|
|
||||||
// Convert to HD
|
// Convert to HD
|
||||||
if (cache == 2) return CreateHDAudioProvider(std::move(provider), br);
|
if (cache == 2) return CreateHDAudioProvider(std::move(provider));
|
||||||
|
|
||||||
throw agi::AudioCacheOpenError("Unknown caching method", nullptr);
|
throw agi::AudioCacheOpenError("Unknown caching method", nullptr);
|
||||||
}
|
}
|
||||||
|
|
|
@ -132,7 +132,7 @@ void AvisynthAudioProvider::LoadFromClip(AVSValue clip) {
|
||||||
|
|
||||||
// Read properties
|
// Read properties
|
||||||
channels = vi.AudioChannels();
|
channels = vi.AudioChannels();
|
||||||
num_samples = vi.num_audio_samples;
|
decoded_samples = num_samples = vi.num_audio_samples;
|
||||||
sample_rate = vi.SamplesPerSecond();
|
sample_rate = vi.SamplesPerSecond();
|
||||||
bytes_per_sample = vi.BytesPerAudioSample();
|
bytes_per_sample = vi.BytesPerAudioSample();
|
||||||
float_samples = false;
|
float_samples = false;
|
||||||
|
|
|
@ -143,6 +143,7 @@ public:
|
||||||
|
|
||||||
sample_rate *= 2;
|
sample_rate *= 2;
|
||||||
num_samples *= 2;
|
num_samples *= 2;
|
||||||
|
decoded_samples = decoded_samples * 2;
|
||||||
}
|
}
|
||||||
|
|
||||||
void FillBuffer(void *buf, int64_t start, int64_t count) const override {
|
void FillBuffer(void *buf, int64_t start, int64_t count) const override {
|
||||||
|
|
|
@ -81,7 +81,7 @@ public:
|
||||||
sample_rate = 44100;
|
sample_rate = 44100;
|
||||||
bytes_per_sample = 2;
|
bytes_per_sample = 2;
|
||||||
float_samples = false;
|
float_samples = false;
|
||||||
num_samples = (int64_t)5*30*60*1000 * sample_rate / 1000;
|
decoded_samples = num_samples = (int64_t)5*30*60*1000 * sample_rate / 1000;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
|
@ -173,6 +173,7 @@ void FFmpegSourceAudioProvider::LoadAudio(agi::fs::path const& filename) {
|
||||||
channels = AudioInfo.Channels;
|
channels = AudioInfo.Channels;
|
||||||
sample_rate = AudioInfo.SampleRate;
|
sample_rate = AudioInfo.SampleRate;
|
||||||
num_samples = AudioInfo.NumSamples;
|
num_samples = AudioInfo.NumSamples;
|
||||||
|
decoded_samples = AudioInfo.NumSamples;
|
||||||
if (channels <= 0 || sample_rate <= 0 || num_samples <= 0)
|
if (channels <= 0 || sample_rate <= 0 || num_samples <= 0)
|
||||||
throw agi::AudioProviderOpenError("sanity check failed, consult your local psychiatrist", nullptr);
|
throw agi::AudioProviderOpenError("sanity check failed, consult your local psychiatrist", nullptr);
|
||||||
|
|
||||||
|
|
|
@ -20,7 +20,6 @@
|
||||||
#include "compat.h"
|
#include "compat.h"
|
||||||
#include "options.h"
|
#include "options.h"
|
||||||
|
|
||||||
#include <libaegisub/background_runner.h>
|
|
||||||
#include <libaegisub/file_mapping.h>
|
#include <libaegisub/file_mapping.h>
|
||||||
#include <libaegisub/fs.h>
|
#include <libaegisub/fs.h>
|
||||||
#include <libaegisub/path.h>
|
#include <libaegisub/path.h>
|
||||||
|
@ -31,54 +30,67 @@
|
||||||
#include <boost/filesystem.hpp>
|
#include <boost/filesystem.hpp>
|
||||||
#include <boost/format.hpp>
|
#include <boost/format.hpp>
|
||||||
#include <boost/interprocess/detail/os_thread_functions.hpp>
|
#include <boost/interprocess/detail/os_thread_functions.hpp>
|
||||||
|
#include <thread>
|
||||||
#include <wx/intl.h>
|
#include <wx/intl.h>
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
class HDAudioProvider final : public AudioProviderWrapper {
|
class HDAudioProvider final : public AudioProviderWrapper {
|
||||||
std::unique_ptr<agi::temp_file_mapping> file;
|
std::unique_ptr<agi::temp_file_mapping> file;
|
||||||
|
std::atomic<bool> cancelled = {false};
|
||||||
|
std::thread decoder;
|
||||||
|
|
||||||
void FillBuffer(void *buf, int64_t start, int64_t count) const override {
|
void FillBuffer(void *buf, int64_t start, int64_t count) const override {
|
||||||
start *= channels * bytes_per_sample;
|
auto missing = std::min(count, start + count - decoded_samples);
|
||||||
count *= channels * bytes_per_sample;
|
if (missing > 0) {
|
||||||
memcpy(buf, file->read(start, count), count);
|
memset(static_cast<int16_t*>(buf) + count - missing, 0, missing * bytes_per_sample);
|
||||||
|
count -= missing;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (count > 0) {
|
||||||
|
start *= bytes_per_sample;
|
||||||
|
count *= bytes_per_sample;
|
||||||
|
memcpy(buf, file->read(start, count), count);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
public:
|
public:
|
||||||
HDAudioProvider(std::unique_ptr<AudioProvider> src, agi::BackgroundRunner *br)
|
HDAudioProvider(std::unique_ptr<AudioProvider> src)
|
||||||
: AudioProviderWrapper(std::move(src))
|
: AudioProviderWrapper(std::move(src))
|
||||||
{
|
{
|
||||||
|
decoded_samples = 0;
|
||||||
|
|
||||||
auto path = OPT_GET("Audio/Cache/HD/Location")->GetString();
|
auto path = OPT_GET("Audio/Cache/HD/Location")->GetString();
|
||||||
if (path == "default")
|
if (path == "default")
|
||||||
path = "?temp";
|
path = "?temp";
|
||||||
auto cache_dir = config::path->MakeAbsolute(config::path->Decode(path), "?temp");
|
auto cache_dir = config::path->MakeAbsolute(config::path->Decode(path), "?temp");
|
||||||
|
|
||||||
auto bps = bytes_per_sample * channels;
|
|
||||||
|
|
||||||
// Check free space
|
// Check free space
|
||||||
if ((uint64_t)num_samples * bps > agi::fs::FreeSpace(cache_dir))
|
if ((uint64_t)num_samples * bytes_per_sample > agi::fs::FreeSpace(cache_dir))
|
||||||
throw agi::AudioCacheOpenError("Not enough free disk space in " + cache_dir.string() + " to cache the audio", nullptr);
|
throw agi::AudioCacheOpenError("Not enough free disk space in " + cache_dir.string() + " to cache the audio", nullptr);
|
||||||
|
|
||||||
auto filename = str(boost::format("audio-%lld-%lld")
|
auto filename = str(boost::format("audio-%lld-%lld")
|
||||||
% (long long)time(nullptr)
|
% (long long)time(nullptr)
|
||||||
% (long long)boost::interprocess::ipcdetail::get_current_process_id());
|
% (long long)boost::interprocess::ipcdetail::get_current_process_id());
|
||||||
|
|
||||||
file = agi::util::make_unique<agi::temp_file_mapping>(cache_dir / filename, num_samples * bps);
|
file = agi::util::make_unique<agi::temp_file_mapping>(cache_dir / filename, num_samples * bytes_per_sample);
|
||||||
br->Run([&] (agi::ProgressSink *ps) {
|
decoder = std::thread([&] {
|
||||||
ps->SetTitle(from_wx(_("Load audio")));
|
|
||||||
ps->SetMessage(from_wx(_("Reading to Hard Disk cache")));
|
|
||||||
|
|
||||||
int64_t block = 65536;
|
int64_t block = 65536;
|
||||||
for (int64_t i = 0; i < num_samples; i += block) {
|
for (int64_t i = 0; i < num_samples; i += block) {
|
||||||
|
if (cancelled) break;
|
||||||
block = std::min(block, num_samples - i);
|
block = std::min(block, num_samples - i);
|
||||||
source->GetAudio(file->write(i * bps, block * bps), i, block);
|
source->GetAudio(file->write(i * bytes_per_sample, block * bytes_per_sample), i, block);
|
||||||
ps->SetProgress(i, num_samples);
|
decoded_samples += block;
|
||||||
if (ps->IsCancelled()) return;
|
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
~HDAudioProvider() {
|
||||||
|
cancelled = true;
|
||||||
|
decoder.join();
|
||||||
|
}
|
||||||
};
|
};
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<AudioProvider> CreateHDAudioProvider(std::unique_ptr<AudioProvider> src, agi::BackgroundRunner *br) {
|
std::unique_ptr<AudioProvider> CreateHDAudioProvider(std::unique_ptr<AudioProvider> src) {
|
||||||
return agi::util::make_unique<HDAudioProvider>(std::move(src), br);
|
return agi::util::make_unique<HDAudioProvider>(std::move(src));
|
||||||
}
|
}
|
||||||
|
|
|
@ -222,6 +222,8 @@ public:
|
||||||
data_left -= (ch.size + 1) & ~1;
|
data_left -= (ch.size + 1) & ~1;
|
||||||
filepos += (ch.size + 1) & ~1;
|
filepos += (ch.size + 1) & ~1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
decoded_samples = num_samples;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -353,6 +355,8 @@ public:
|
||||||
data_left -= (chunk_size + 7) & ~7;
|
data_left -= (chunk_size + 7) & ~7;
|
||||||
filepos += (chunk_size + 7) & ~7;
|
filepos += (chunk_size + 7) & ~7;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
decoded_samples = num_samples;
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
|
|
@ -37,11 +37,11 @@
|
||||||
#include "audio_controller.h"
|
#include "audio_controller.h"
|
||||||
#include "compat.h"
|
#include "compat.h"
|
||||||
|
|
||||||
#include <libaegisub/background_runner.h>
|
|
||||||
#include <libaegisub/util.h>
|
#include <libaegisub/util.h>
|
||||||
|
|
||||||
#include <array>
|
#include <array>
|
||||||
#include <boost/container/stable_vector.hpp>
|
#include <boost/container/stable_vector.hpp>
|
||||||
|
#include <thread>
|
||||||
#include <wx/intl.h>
|
#include <wx/intl.h>
|
||||||
|
|
||||||
namespace {
|
namespace {
|
||||||
|
@ -51,17 +51,21 @@ namespace {
|
||||||
|
|
||||||
class RAMAudioProvider final : public AudioProviderWrapper {
|
class RAMAudioProvider final : public AudioProviderWrapper {
|
||||||
#ifdef _MSC_VER
|
#ifdef _MSC_VER
|
||||||
boost::container::stable_vector<char[1 << 22]> blockcache;
|
boost::container::stable_vector<char[CacheBlockSize]> blockcache;
|
||||||
#else
|
#else
|
||||||
boost::container::stable_vector<std::array<char, 1 << 22>> blockcache;
|
boost::container::stable_vector<std::array<char, CacheBlockSize>> blockcache;
|
||||||
#endif
|
#endif
|
||||||
|
std::atomic<bool> cancelled = {false};
|
||||||
|
std::thread decoder;
|
||||||
|
|
||||||
void FillBuffer(void *buf, int64_t start, int64_t count) const override;
|
void FillBuffer(void *buf, int64_t start, int64_t count) const override;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
RAMAudioProvider(std::unique_ptr<AudioProvider> src, agi::BackgroundRunner *br)
|
RAMAudioProvider(std::unique_ptr<AudioProvider> src)
|
||||||
: AudioProviderWrapper(std::move(src))
|
: AudioProviderWrapper(std::move(src))
|
||||||
{
|
{
|
||||||
|
decoded_samples = 0;
|
||||||
|
|
||||||
try {
|
try {
|
||||||
blockcache.resize((source->GetNumSamples() * source->GetBytesPerSample() + CacheBlockSize - 1) >> CacheBits);
|
blockcache.resize((source->GetNumSamples() * source->GetBytesPerSample() + CacheBlockSize - 1) >> CacheBits);
|
||||||
}
|
}
|
||||||
|
@ -69,39 +73,44 @@ public:
|
||||||
throw agi::AudioCacheOpenError("Couldn't open audio, not enough ram available.", nullptr);
|
throw agi::AudioCacheOpenError("Couldn't open audio, not enough ram available.", nullptr);
|
||||||
}
|
}
|
||||||
|
|
||||||
br->Run([&](agi::ProgressSink *ps) {
|
decoder = std::thread([&] {
|
||||||
ps->SetTitle(from_wx(_("Load audio")));
|
|
||||||
ps->SetMessage(from_wx(_("Reading into RAM")));
|
|
||||||
|
|
||||||
int64_t readsize = CacheBlockSize / source->GetBytesPerSample();
|
int64_t readsize = CacheBlockSize / source->GetBytesPerSample();
|
||||||
for (size_t i = 0; i < blockcache.size(); i++) {
|
for (size_t i = 0; i < blockcache.size(); i++) {
|
||||||
if (ps->IsCancelled()) return;
|
if (cancelled) break;
|
||||||
ps->SetProgress(i + 1, blockcache.size());
|
auto actual_read = std::min<int64_t>(readsize, num_samples - i * readsize);
|
||||||
source->GetAudio(&blockcache[i][0], i * readsize, std::min<int64_t>(readsize, num_samples - i * readsize));
|
source->GetAudio(&blockcache[i][0], i * readsize, actual_read);
|
||||||
|
decoded_samples += actual_read;
|
||||||
}
|
}
|
||||||
});
|
});
|
||||||
}
|
}
|
||||||
|
|
||||||
|
~RAMAudioProvider() {
|
||||||
|
cancelled = true;
|
||||||
|
decoder.join();
|
||||||
|
}
|
||||||
};
|
};
|
||||||
|
|
||||||
void RAMAudioProvider::FillBuffer(void *buf, int64_t start, int64_t count) const {
|
void RAMAudioProvider::FillBuffer(void *buf, int64_t start, int64_t count) const {
|
||||||
char *charbuf = static_cast<char *>(buf);
|
char *charbuf = static_cast<char *>(buf);
|
||||||
int i = (start * bytes_per_sample) >> CacheBits;
|
for (int64_t bytes_remaining = count * bytes_per_sample; bytes_remaining; ) {
|
||||||
int start_offset = (start * bytes_per_sample) & (CacheBlockSize-1);
|
if (start >= decoded_samples) {
|
||||||
int64_t bytesremaining = count * bytes_per_sample;
|
memset(charbuf, 0, bytes_remaining);
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
|
||||||
while (bytesremaining) {
|
int i = (start * bytes_per_sample) >> CacheBits;
|
||||||
int readsize = std::min<int>(bytesremaining, CacheBlockSize - start_offset);
|
int start_offset = (start * bytes_per_sample) & (CacheBlockSize-1);
|
||||||
|
int read_size = std::min<int>(bytes_remaining, CacheBlockSize - start_offset);
|
||||||
|
|
||||||
memcpy(charbuf, &blockcache[i++][start_offset], readsize);
|
memcpy(charbuf, &blockcache[i++][start_offset], read_size);
|
||||||
|
charbuf += read_size;
|
||||||
|
|
||||||
charbuf += readsize;
|
bytes_remaining -= read_size;
|
||||||
|
start += CacheBlockSize / bytes_per_sample;
|
||||||
start_offset = 0;
|
|
||||||
bytesremaining -= readsize;
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
std::unique_ptr<AudioProvider> CreateRAMAudioProvider(std::unique_ptr<AudioProvider> src, agi::BackgroundRunner *br) {
|
std::unique_ptr<AudioProvider> CreateRAMAudioProvider(std::unique_ptr<AudioProvider> src) {
|
||||||
return agi::util::make_unique<RAMAudioProvider>(std::move(src), br);
|
return agi::util::make_unique<RAMAudioProvider>(std::move(src));
|
||||||
}
|
}
|
||||||
|
|
|
@ -43,6 +43,16 @@
|
||||||
#include <wx/bitmap.h>
|
#include <wx/bitmap.h>
|
||||||
#include <wx/dcmemory.h>
|
#include <wx/dcmemory.h>
|
||||||
|
|
||||||
|
namespace {
|
||||||
|
template<typename T>
|
||||||
|
bool compare_and_set(T &var, T new_value)
|
||||||
|
{
|
||||||
|
if (var == new_value) return false;
|
||||||
|
var = new_value;
|
||||||
|
return true;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
AudioRendererBitmapCacheBitmapFactory::AudioRendererBitmapCacheBitmapFactory(AudioRenderer *renderer)
|
AudioRendererBitmapCacheBitmapFactory::AudioRendererBitmapCacheBitmapFactory(AudioRenderer *renderer)
|
||||||
: renderer(renderer)
|
: renderer(renderer)
|
||||||
{
|
{
|
||||||
|
@ -71,64 +81,59 @@ AudioRenderer::AudioRenderer()
|
||||||
|
|
||||||
void AudioRenderer::SetMillisecondsPerPixel(double new_pixel_ms)
|
void AudioRenderer::SetMillisecondsPerPixel(double new_pixel_ms)
|
||||||
{
|
{
|
||||||
if (pixel_ms == new_pixel_ms) return;
|
if (compare_and_set(pixel_ms, new_pixel_ms))
|
||||||
|
{
|
||||||
|
if (renderer)
|
||||||
|
renderer->SetMillisecondsPerPixel(pixel_ms);
|
||||||
|
|
||||||
pixel_ms = new_pixel_ms;
|
ResetBlockCount();
|
||||||
|
}
|
||||||
if (renderer)
|
|
||||||
renderer->SetMillisecondsPerPixel(pixel_ms);
|
|
||||||
|
|
||||||
ResetBlockCount();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioRenderer::SetHeight(int _pixel_height)
|
void AudioRenderer::SetHeight(int _pixel_height)
|
||||||
{
|
{
|
||||||
if (pixel_height == _pixel_height) return;
|
if (compare_and_set(pixel_height, _pixel_height))
|
||||||
|
Invalidate();
|
||||||
pixel_height = _pixel_height;
|
|
||||||
Invalidate();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioRenderer::SetAmplitudeScale(float _amplitude_scale)
|
void AudioRenderer::SetAmplitudeScale(float _amplitude_scale)
|
||||||
{
|
{
|
||||||
if (amplitude_scale == _amplitude_scale) return;
|
if (compare_and_set(amplitude_scale, _amplitude_scale))
|
||||||
|
{
|
||||||
// A scaling of 0 or a negative scaling makes no sense
|
// A scaling of 0 or a negative scaling makes no sense
|
||||||
assert(_amplitude_scale > 0);
|
assert(amplitude_scale > 0);
|
||||||
|
if (renderer)
|
||||||
amplitude_scale = _amplitude_scale;
|
renderer->SetAmplitudeScale(amplitude_scale);
|
||||||
|
Invalidate();
|
||||||
if (renderer)
|
}
|
||||||
renderer->SetAmplitudeScale(amplitude_scale);
|
|
||||||
Invalidate();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioRenderer::SetRenderer(AudioRendererBitmapProvider *_renderer)
|
void AudioRenderer::SetRenderer(AudioRendererBitmapProvider *_renderer)
|
||||||
{
|
{
|
||||||
if (renderer == _renderer) return;
|
if (compare_and_set(renderer, _renderer))
|
||||||
|
|
||||||
renderer = _renderer;
|
|
||||||
Invalidate();
|
|
||||||
|
|
||||||
if (renderer)
|
|
||||||
{
|
{
|
||||||
renderer->SetProvider(provider);
|
Invalidate();
|
||||||
renderer->SetAmplitudeScale(amplitude_scale);
|
|
||||||
renderer->SetMillisecondsPerPixel(pixel_ms);
|
if (renderer)
|
||||||
|
{
|
||||||
|
renderer->SetProvider(provider);
|
||||||
|
renderer->SetAmplitudeScale(amplitude_scale);
|
||||||
|
renderer->SetMillisecondsPerPixel(pixel_ms);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioRenderer::SetAudioProvider(AudioProvider *_provider)
|
void AudioRenderer::SetAudioProvider(AudioProvider *_provider)
|
||||||
{
|
{
|
||||||
if (provider == _provider) return;
|
if (compare_and_set(provider, _provider))
|
||||||
|
{
|
||||||
|
Invalidate();
|
||||||
|
|
||||||
provider = _provider;
|
if (renderer)
|
||||||
Invalidate();
|
renderer->SetProvider(provider);
|
||||||
|
|
||||||
if (renderer)
|
ResetBlockCount();
|
||||||
renderer->SetProvider(provider);
|
}
|
||||||
|
|
||||||
ResetBlockCount();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioRenderer::SetCacheMaxSize(size_t max_size)
|
void AudioRenderer::SetCacheMaxSize(size_t max_size)
|
||||||
|
@ -145,13 +150,17 @@ void AudioRenderer::ResetBlockCount()
|
||||||
{
|
{
|
||||||
if (provider)
|
if (provider)
|
||||||
{
|
{
|
||||||
double duration = provider->GetNumSamples() * 1000.0 / provider->GetSampleRate();
|
const size_t total_blocks = NumBlocks(provider->GetNumSamples());
|
||||||
size_t rendered_width = (size_t)ceil(duration / pixel_ms);
|
for (auto& bmp : bitmaps) bmp.SetBlockCount(total_blocks);
|
||||||
cache_numblocks = rendered_width / cache_bitmap_width;
|
|
||||||
for (auto& bmp : bitmaps) bmp.SetBlockCount(cache_numblocks);
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
size_t AudioRenderer::NumBlocks(const int64_t samples) const
|
||||||
|
{
|
||||||
|
const double duration = samples * 1000.0 / provider->GetSampleRate();
|
||||||
|
return static_cast<size_t>(duration / pixel_ms / cache_bitmap_width);
|
||||||
|
}
|
||||||
|
|
||||||
const wxBitmap *AudioRenderer::GetCachedBitmap(int i, AudioRenderingStyle style)
|
const wxBitmap *AudioRenderer::GetCachedBitmap(int i, AudioRenderingStyle style)
|
||||||
{
|
{
|
||||||
assert(provider);
|
assert(provider);
|
||||||
|
@ -187,7 +196,7 @@ void AudioRenderer::Render(wxDC &dc, wxPoint origin, int start, int length, Audi
|
||||||
// And the offset in it to start its use at
|
// And the offset in it to start its use at
|
||||||
int firstbitmapoffset = start % cache_bitmap_width;
|
int firstbitmapoffset = start % cache_bitmap_width;
|
||||||
// The last bitmap required
|
// The last bitmap required
|
||||||
int lastbitmap = std::min<int>(end / cache_bitmap_width, cache_numblocks - 1);
|
int lastbitmap = std::min<int>(end / cache_bitmap_width, NumBlocks(provider->GetDecodedSamples()) - 1);
|
||||||
|
|
||||||
// Set a clipping region so that the first and last bitmaps don't draw
|
// Set a clipping region so that the first and last bitmaps don't draw
|
||||||
// outside the requested range
|
// outside the requested range
|
||||||
|
@ -202,9 +211,7 @@ void AudioRenderer::Render(wxDC &dc, wxPoint origin, int start, int length, Audi
|
||||||
|
|
||||||
// Now render blank audio from origin to end
|
// Now render blank audio from origin to end
|
||||||
if (origin.x < lastx)
|
if (origin.x < lastx)
|
||||||
{
|
|
||||||
renderer->RenderBlank(dc, wxRect(origin.x-1, origin.y, lastx-origin.x+1, pixel_height), style);
|
renderer->RenderBlank(dc, wxRect(origin.x-1, origin.y, lastx-origin.x+1, pixel_height), style);
|
||||||
}
|
|
||||||
|
|
||||||
if (needs_age)
|
if (needs_age)
|
||||||
{
|
{
|
||||||
|
@ -222,27 +229,18 @@ void AudioRenderer::Invalidate()
|
||||||
|
|
||||||
void AudioRendererBitmapProvider::SetProvider(AudioProvider *_provider)
|
void AudioRendererBitmapProvider::SetProvider(AudioProvider *_provider)
|
||||||
{
|
{
|
||||||
if (provider == _provider) return;
|
if (compare_and_set(provider, _provider))
|
||||||
|
OnSetProvider();
|
||||||
provider = _provider;
|
|
||||||
|
|
||||||
OnSetProvider();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioRendererBitmapProvider::SetMillisecondsPerPixel(double new_pixel_ms)
|
void AudioRendererBitmapProvider::SetMillisecondsPerPixel(double new_pixel_ms)
|
||||||
{
|
{
|
||||||
if (pixel_ms == new_pixel_ms) return;
|
if (compare_and_set(pixel_ms, new_pixel_ms))
|
||||||
|
OnSetMillisecondsPerPixel();
|
||||||
pixel_ms = new_pixel_ms;
|
|
||||||
|
|
||||||
OnSetMillisecondsPerPixel();
|
|
||||||
}
|
}
|
||||||
|
|
||||||
void AudioRendererBitmapProvider::SetAmplitudeScale(float _amplitude_scale)
|
void AudioRendererBitmapProvider::SetAmplitudeScale(float _amplitude_scale)
|
||||||
{
|
{
|
||||||
if (amplitude_scale == _amplitude_scale) return;
|
if (compare_and_set(amplitude_scale, _amplitude_scale))
|
||||||
|
OnSetAmplitudeScale();
|
||||||
amplitude_scale = _amplitude_scale;
|
|
||||||
|
|
||||||
OnSetAmplitudeScale();
|
|
||||||
}
|
}
|
||||||
|
|
|
@ -100,8 +100,6 @@ class AudioRenderer {
|
||||||
|
|
||||||
/// Cached bitmaps for audio ranges
|
/// Cached bitmaps for audio ranges
|
||||||
std::vector<AudioRendererBitmapCache> bitmaps;
|
std::vector<AudioRendererBitmapCache> bitmaps;
|
||||||
/// Number of blocks in the bitmap caches
|
|
||||||
size_t cache_numblocks = 0;
|
|
||||||
/// The maximum allowed size of each bitmap cache, in bytes
|
/// The maximum allowed size of each bitmap cache, in bytes
|
||||||
size_t cache_bitmap_maxsize = 0;
|
size_t cache_bitmap_maxsize = 0;
|
||||||
/// The maximum allowed size of the renderer's cache, in bytes
|
/// The maximum allowed size of the renderer's cache, in bytes
|
||||||
|
@ -131,11 +129,14 @@ class AudioRenderer {
|
||||||
/// has changed.
|
/// has changed.
|
||||||
void ResetBlockCount();
|
void ResetBlockCount();
|
||||||
|
|
||||||
|
/// Calculate the number of cache blocks needed for a given number of samples
|
||||||
|
size_t NumBlocks(int64_t samples) const;
|
||||||
|
|
||||||
public:
|
public:
|
||||||
/// @brief Constructor
|
/// @brief Constructor
|
||||||
///
|
///
|
||||||
/// Initialises audio rendering to a do-nothing state. An audio provider and bitmap
|
/// Initialises audio rendering to a do-nothing state. An audio provider
|
||||||
/// provider must be set before the audio renderer is functional.
|
/// and bitmap provider must be set before the audio renderer is functional.
|
||||||
AudioRenderer();
|
AudioRenderer();
|
||||||
|
|
||||||
/// @brief Set horizontal zoom
|
/// @brief Set horizontal zoom
|
||||||
|
|
|
@ -37,6 +37,7 @@
|
||||||
#include <libaegisub/exception.h>
|
#include <libaegisub/exception.h>
|
||||||
#include <libaegisub/fs_fwd.h>
|
#include <libaegisub/fs_fwd.h>
|
||||||
|
|
||||||
|
#include <atomic>
|
||||||
#include <boost/filesystem/path.hpp>
|
#include <boost/filesystem/path.hpp>
|
||||||
|
|
||||||
class AudioProvider {
|
class AudioProvider {
|
||||||
|
@ -45,6 +46,7 @@ protected:
|
||||||
|
|
||||||
/// for one channel, ie. number of PCM frames
|
/// for one channel, ie. number of PCM frames
|
||||||
int64_t num_samples;
|
int64_t num_samples;
|
||||||
|
std::atomic<int64_t> decoded_samples;
|
||||||
int sample_rate;
|
int sample_rate;
|
||||||
int bytes_per_sample;
|
int bytes_per_sample;
|
||||||
bool float_samples;
|
bool float_samples;
|
||||||
|
@ -62,6 +64,7 @@ public:
|
||||||
|
|
||||||
agi::fs::path GetFilename() const { return filename; }
|
agi::fs::path GetFilename() const { return filename; }
|
||||||
int64_t GetNumSamples() const { return num_samples; }
|
int64_t GetNumSamples() const { return num_samples; }
|
||||||
|
int64_t GetDecodedSamples() const { return decoded_samples; }
|
||||||
int GetSampleRate() const { return sample_rate; }
|
int GetSampleRate() const { return sample_rate; }
|
||||||
int GetBytesPerSample() const { return bytes_per_sample; }
|
int GetBytesPerSample() const { return bytes_per_sample; }
|
||||||
int GetChannels() const { return channels; }
|
int GetChannels() const { return channels; }
|
||||||
|
@ -81,6 +84,7 @@ public:
|
||||||
{
|
{
|
||||||
channels = source->GetChannels();
|
channels = source->GetChannels();
|
||||||
num_samples = source->GetNumSamples();
|
num_samples = source->GetNumSamples();
|
||||||
|
decoded_samples = source->GetDecodedSamples();
|
||||||
sample_rate = source->GetSampleRate();
|
sample_rate = source->GetSampleRate();
|
||||||
bytes_per_sample = source->GetBytesPerSample();
|
bytes_per_sample = source->GetBytesPerSample();
|
||||||
float_samples = source->AreSamplesFloat();
|
float_samples = source->AreSamplesFloat();
|
||||||
|
|
Loading…
Reference in a new issue