forked from mia/Aegisub
Use NSDMIs where applicable
This commit is contained in:
parent
d6a5d9c458
commit
6fad60e58d
114 changed files with 285 additions and 572 deletions
|
@ -20,14 +20,9 @@ TODO:
|
|||
|
||||
*/
|
||||
|
||||
namespace json
|
||||
{
|
||||
namespace json {
|
||||
|
||||
Writer::Writer(std::ostream& ostr)
|
||||
: m_ostr(ostr)
|
||||
, tab_depth(0)
|
||||
{
|
||||
}
|
||||
Writer::Writer(std::ostream& ostr) : m_ostr(ostr) { }
|
||||
|
||||
void Writer::Write(Array const& array) {
|
||||
if (array.empty())
|
||||
|
|
|
@ -285,9 +285,7 @@ namespace {
|
|||
namespace agi { namespace charset {
|
||||
|
||||
IconvWrapper::IconvWrapper(const char* sourceEncoding, const char* destEncoding, bool enableSubst)
|
||||
: toNulLen(0)
|
||||
, fromNulLen(0)
|
||||
, conv(get_converter(enableSubst, sourceEncoding, destEncoding))
|
||||
: conv(get_converter(enableSubst, sourceEncoding, destEncoding))
|
||||
{
|
||||
// These need to be set only after we verify that the source and dest
|
||||
// charsets are valid
|
||||
|
|
|
@ -23,15 +23,11 @@
|
|||
|
||||
namespace agi {
|
||||
|
||||
Color::Color() : r(0), g(0), b(0), a(0) { }
|
||||
|
||||
Color::Color(unsigned char r, unsigned char g, unsigned char b, unsigned char a)
|
||||
: r(r), g(g), b(b), a(a)
|
||||
{ }
|
||||
|
||||
Color::Color(std::string const& str)
|
||||
: r(0), g(0), b(0), a(0)
|
||||
{
|
||||
Color::Color(std::string const& str) {
|
||||
parser::parse(*this, str);
|
||||
}
|
||||
|
||||
|
|
|
@ -44,10 +44,7 @@ LogSink *log;
|
|||
/// Keep this ordered the same as Severity
|
||||
const char *Severity_ID = "EAWID";
|
||||
|
||||
LogSink::LogSink()
|
||||
: messages(250)
|
||||
, queue(dispatch::Create())
|
||||
{ }
|
||||
LogSink::LogSink() : queue(dispatch::Create()) { }
|
||||
|
||||
LogSink::~LogSink() {
|
||||
// The destructor for emitters may try to log messages, so disable all the
|
||||
|
@ -86,13 +83,8 @@ decltype(LogSink::messages) LogSink::GetMessages() const {
|
|||
|
||||
Message::Message(const char *section, Severity severity, const char *file, const char *func, int line)
|
||||
: msg(buffer, sizeof buffer)
|
||||
, sm{section, severity, file, func, line, util::time_log(), ""}
|
||||
{
|
||||
sm.section = section;
|
||||
sm.severity = severity;
|
||||
sm.file = file;
|
||||
sm.func = func;
|
||||
sm.line = line;
|
||||
sm.tv = util::time_log();
|
||||
}
|
||||
|
||||
Message::~Message() {
|
||||
|
|
|
@ -151,8 +151,6 @@ namespace vfr {
|
|||
Framerate::Framerate(double fps)
|
||||
: denominator(default_denominator)
|
||||
, numerator(int64_t(fps * denominator))
|
||||
, last(0)
|
||||
, drop(false)
|
||||
{
|
||||
if (fps < 0.) throw BadFPS("FPS must be greater than zero");
|
||||
if (fps > 1000.) throw BadFPS("FPS must not be greater than 1000");
|
||||
|
@ -162,7 +160,6 @@ Framerate::Framerate(double fps)
|
|||
Framerate::Framerate(int64_t numerator, int64_t denominator, bool drop)
|
||||
: denominator(denominator)
|
||||
, numerator(numerator)
|
||||
, last(0)
|
||||
, drop(drop && numerator % denominator != 0)
|
||||
{
|
||||
if (numerator <= 0 || denominator <= 0)
|
||||
|
@ -181,14 +178,12 @@ void Framerate::SetFromTimecodes() {
|
|||
|
||||
Framerate::Framerate(std::vector<int> timecodes)
|
||||
: timecodes(std::move(timecodes))
|
||||
, drop(false)
|
||||
{
|
||||
SetFromTimecodes();
|
||||
}
|
||||
|
||||
Framerate::Framerate(std::initializer_list<int> timecodes)
|
||||
: timecodes(timecodes)
|
||||
, drop(false)
|
||||
{
|
||||
SetFromTimecodes();
|
||||
}
|
||||
|
@ -203,8 +198,6 @@ void Framerate::swap(Framerate &right) throw() {
|
|||
|
||||
Framerate::Framerate(fs::path const& filename)
|
||||
: denominator(default_denominator)
|
||||
, numerator(0)
|
||||
, drop(false)
|
||||
{
|
||||
auto file = agi::io::Open(filename);
|
||||
auto encoding = agi::charset::Detect(filename);
|
||||
|
|
|
@ -35,7 +35,7 @@ class Writer : private ConstVisitor {
|
|||
void Visit(const Null& null) override;
|
||||
|
||||
std::ostream& m_ostr;
|
||||
int tab_depth;
|
||||
int tab_depth = 0;
|
||||
|
||||
public:
|
||||
template <typename ElementTypeT>
|
||||
|
@ -51,5 +51,4 @@ inline std::ostream& operator <<(std::ostream& ostr, UnknownElement const& eleme
|
|||
return ostr;
|
||||
}
|
||||
|
||||
|
||||
} // End namespace
|
||||
|
|
|
@ -46,8 +46,8 @@ struct Converter {
|
|||
|
||||
/// @brief A C++ wrapper for iconv
|
||||
class IconvWrapper {
|
||||
size_t toNulLen;
|
||||
size_t fromNulLen;
|
||||
size_t toNulLen = 0;
|
||||
size_t fromNulLen = 0;
|
||||
std::unique_ptr<Converter> conv;
|
||||
|
||||
public:
|
||||
|
|
|
@ -18,12 +18,12 @@
|
|||
|
||||
namespace agi {
|
||||
struct Color {
|
||||
unsigned char r; ///< Red component
|
||||
unsigned char g; ///< Green component
|
||||
unsigned char b; ///< Blue component
|
||||
unsigned char a; ///< Alpha component
|
||||
unsigned char r = 0; ///< Red component
|
||||
unsigned char g = 0; ///< Green component
|
||||
unsigned char b = 0; ///< Blue component
|
||||
unsigned char a = 0; ///< Alpha component
|
||||
|
||||
Color();
|
||||
Color() { }
|
||||
Color(unsigned char r, unsigned char g, unsigned char b, unsigned char a = 0);
|
||||
Color(std::string const& str);
|
||||
|
||||
|
|
|
@ -83,7 +83,7 @@ class Emitter;
|
|||
|
||||
/// Log sink, single destination for all messages
|
||||
class LogSink {
|
||||
boost::circular_buffer<SinkMessage> messages;
|
||||
boost::circular_buffer<SinkMessage> messages{250};
|
||||
std::unique_ptr<dispatch::Queue> queue;
|
||||
|
||||
/// List of pointers to emitters
|
||||
|
|
|
@ -64,24 +64,24 @@ class Framerate {
|
|||
/// Denominator of the FPS
|
||||
///
|
||||
/// For v1 VFR, the assumed FPS is used, for v2 the average FPS
|
||||
int64_t denominator;
|
||||
int64_t denominator = 0;
|
||||
/// Numerator of the FPS
|
||||
///
|
||||
/// For v1 VFR, the assumed FPS is used, for v2 the average FPS
|
||||
int64_t numerator;
|
||||
int64_t numerator = 0;
|
||||
|
||||
/// Unrounded frame-seconds of the final frame in timecodes. For CFR and v2,
|
||||
/// this is simply frame count * denominator, but for v1 it's the
|
||||
/// "unrounded" frame count, since override ranges generally don't exactly
|
||||
/// cover timebase-unit ranges of time. This is needed to match mkvmerge's
|
||||
/// rounding past the end of the final override range.
|
||||
int64_t last;
|
||||
int64_t last = 0;
|
||||
|
||||
/// Start time in milliseconds of each frame
|
||||
std::vector<int> timecodes;
|
||||
|
||||
/// Does this frame rate need drop frames and have them enabled?
|
||||
bool drop;
|
||||
bool drop = false;
|
||||
|
||||
/// Set FPS properties from the timecodes vector
|
||||
void SetFromTimecodes();
|
||||
|
|
|
@ -55,11 +55,6 @@ static int next_id = 0;
|
|||
|
||||
AssDialogue::AssDialogue()
|
||||
: Id(++next_id)
|
||||
, Comment(false)
|
||||
, Layer(0)
|
||||
, Start(0)
|
||||
, End(5000)
|
||||
, Style("Default")
|
||||
{
|
||||
memset(Margin, 0, sizeof Margin);
|
||||
}
|
||||
|
|
|
@ -136,17 +136,17 @@ public:
|
|||
const int Id;
|
||||
|
||||
/// Is this a comment line?
|
||||
bool Comment;
|
||||
bool Comment = false;
|
||||
/// Layer number
|
||||
int Layer;
|
||||
int Layer = 0;
|
||||
/// Margins: 0 = Left, 1 = Right, 2 = Top (Vertical)
|
||||
int Margin[3];
|
||||
/// Starting time
|
||||
AssTime Start;
|
||||
AssTime Start = 0;
|
||||
/// Ending time
|
||||
AssTime End;
|
||||
AssTime End = 5000;
|
||||
/// Style name
|
||||
boost::flyweight<std::string> Style;
|
||||
boost::flyweight<std::string> Style{ "Default" };
|
||||
/// Actor name
|
||||
boost::flyweight<std::string> Actor;
|
||||
/// Effect name
|
||||
|
|
|
@ -45,11 +45,7 @@
|
|||
#include <memory>
|
||||
#include <wx/sizer.h>
|
||||
|
||||
AssExporter::AssExporter(agi::Context *c)
|
||||
: c(c)
|
||||
, is_default(true)
|
||||
{
|
||||
}
|
||||
AssExporter::AssExporter(agi::Context *c) : c(c) { }
|
||||
|
||||
void AssExporter::DrawSettings(wxWindow *parent, wxSizer *target_sizer) {
|
||||
is_default = false;
|
||||
|
|
|
@ -58,7 +58,7 @@ class AssExporter {
|
|||
|
||||
/// Have the config windows been created, or should filters simply use
|
||||
/// their default settings
|
||||
bool is_default;
|
||||
bool is_default = true;
|
||||
|
||||
public:
|
||||
AssExporter(agi::Context *c);
|
||||
|
|
|
@ -50,9 +50,7 @@ std::string AssKaraoke::Syllable::GetText(bool k_tag) const {
|
|||
}
|
||||
|
||||
|
||||
AssKaraoke::AssKaraoke(AssDialogue *line, bool auto_split, bool normalize)
|
||||
: no_announce(false)
|
||||
{
|
||||
AssKaraoke::AssKaraoke(AssDialogue *line, bool auto_split, bool normalize) {
|
||||
if (line) SetLine(line, auto_split, normalize);
|
||||
}
|
||||
|
||||
|
|
|
@ -50,7 +50,7 @@ public:
|
|||
private:
|
||||
std::vector<Syllable> syls;
|
||||
|
||||
bool no_announce;
|
||||
bool no_announce = false;
|
||||
|
||||
agi::signal::Signal<> AnnounceSyllablesChanged;
|
||||
void ParseSyllables(AssDialogue *line, Syllable &syl);
|
||||
|
|
|
@ -54,7 +54,6 @@ using namespace boost::adaptors;
|
|||
AssOverrideParameter::AssOverrideParameter(VariableDataType type, AssParameterClass classification)
|
||||
: type(type)
|
||||
, classification(classification)
|
||||
, omitted(true)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -77,7 +77,7 @@ public:
|
|||
AssParameterClass classification;
|
||||
|
||||
/// Is this parameter actually present?
|
||||
bool omitted;
|
||||
bool omitted = true;
|
||||
|
||||
VariableDataType GetType() const { return type; }
|
||||
template<class T> void Set(T param);
|
||||
|
|
|
@ -31,7 +31,6 @@
|
|||
AssParser::AssParser(AssFile *target, int version)
|
||||
: target(target)
|
||||
, version(version)
|
||||
, attach(nullptr)
|
||||
, state(&AssParser::ParseScriptInfoLine)
|
||||
{
|
||||
std::fill(begin(insertion_positions), end(insertion_positions), nullptr);
|
||||
|
|
|
@ -49,26 +49,7 @@
|
|||
|
||||
#include <wx/intl.h>
|
||||
|
||||
AssStyle::AssStyle()
|
||||
: name("Default")
|
||||
, font("Arial")
|
||||
, fontsize(20.)
|
||||
, primary(255, 255, 255)
|
||||
, secondary(255, 0, 0)
|
||||
, bold(false)
|
||||
, italic(false)
|
||||
, underline(false)
|
||||
, strikeout(false)
|
||||
, scalex(100.)
|
||||
, scaley(100.)
|
||||
, spacing(0.)
|
||||
, angle(0.)
|
||||
, borderstyle(1)
|
||||
, outline_w(2.)
|
||||
, shadow_w(2.)
|
||||
, alignment(2)
|
||||
, encoding(1)
|
||||
{
|
||||
AssStyle::AssStyle() {
|
||||
std::fill(Margin.begin(), Margin.end(), 10);
|
||||
|
||||
UpdateData();
|
||||
|
|
|
@ -43,30 +43,30 @@ class AssStyle : public AssEntry {
|
|||
std::string data;
|
||||
|
||||
public:
|
||||
std::string name; ///< Name of the style; must be case-insensitively unique within a file despite being case-sensitive
|
||||
std::string font; ///< Font face name
|
||||
double fontsize; ///< Font size
|
||||
std::string name = "Default"; ///< Name of the style; must be case-insensitively unique within a file despite being case-sensitive
|
||||
std::string font = "Arial"; ///< Font face name
|
||||
double fontsize = 20.; ///< Font size
|
||||
|
||||
agi::Color primary; ///< Default text color
|
||||
agi::Color secondary; ///< Text color for not-yet-reached karaoke syllables
|
||||
agi::Color outline; ///< Outline color
|
||||
agi::Color shadow; ///< Shadow color
|
||||
agi::Color primary{ 255, 255, 255 }; ///< Default text color
|
||||
agi::Color secondary{ 255, 0, 0 }; ///< Text color for not-yet-reached karaoke syllables
|
||||
agi::Color outline{ 0, 0, 0 }; ///< Outline color
|
||||
agi::Color shadow{ 0, 0, 0 }; ///< Shadow color
|
||||
|
||||
bool bold;
|
||||
bool italic;
|
||||
bool underline;
|
||||
bool strikeout;
|
||||
bool bold = false;
|
||||
bool italic = false;
|
||||
bool underline = false;
|
||||
bool strikeout = false;
|
||||
|
||||
double scalex; ///< Font x scale with 100 = 100%
|
||||
double scaley; ///< Font y scale with 100 = 100%
|
||||
double spacing; ///< Additional spacing between characters in pixels
|
||||
double angle; ///< Counterclockwise z rotation in degrees
|
||||
int borderstyle; ///< 1: Normal; 3: Opaque box; others are unused in Aegisub
|
||||
double outline_w; ///< Outline width in pixels
|
||||
double shadow_w; ///< Shadow distance in pixels
|
||||
int alignment; ///< \an-style line alignment
|
||||
double scalex = 100.; ///< Font x scale with 100 = 100%
|
||||
double scaley = 100.; ///< Font y scale with 100 = 100%
|
||||
double spacing = 0.; ///< Additional spacing between characters in pixels
|
||||
double angle = 0.1; ///< Counterclockwise z rotation in degrees
|
||||
int borderstyle = 1; ///< 1: Normal; 3: Opaque box; others are unused in Aegisub
|
||||
double outline_w = 2.; ///< Outline width in pixels
|
||||
double shadow_w = 2.; ///< Shadow distance in pixels
|
||||
int alignment = 2; ///< \an-style line alignment
|
||||
std::array<int, 3> Margin; ///< Left / Right / Vertical
|
||||
int encoding; ///< ASS font encoding needed for some non-unicode fonts
|
||||
int encoding = 1; ///< ASS font encoding needed for some non-unicode fonts
|
||||
|
||||
/// Update the raw line data after one or more of the public members have been changed
|
||||
void UpdateData();
|
||||
|
|
|
@ -35,9 +35,7 @@
|
|||
|
||||
AssTime::AssTime(int time) : time(mid(0, time, 10 * 60 * 60 * 1000 - 1)) { }
|
||||
|
||||
AssTime::AssTime(std::string const& text)
|
||||
: time(0)
|
||||
{
|
||||
AssTime::AssTime(std::string const& text) {
|
||||
int after_decimal = -1;
|
||||
int current = 0;
|
||||
for (char c : text | boost::adaptors::filtered(boost::is_any_of(",.0123456789:"))) {
|
||||
|
|
|
@ -27,7 +27,7 @@
|
|||
|
||||
class AssTime {
|
||||
/// Time in milliseconds
|
||||
int time;
|
||||
int time = 0;
|
||||
|
||||
public:
|
||||
AssTime(int ms = 0);
|
||||
|
|
|
@ -83,7 +83,6 @@ AudioBox::AudioBox(wxWindow *parent, agi::Context *context)
|
|||
, HorizontalZoom(new wxSlider(panel, Audio_Horizontal_Zoom, -OPT_GET("Audio/Zoom/Horizontal")->GetInt(), -50, 30, wxDefaultPosition, wxSize(-1, 20), wxSL_VERTICAL|wxSL_BOTH))
|
||||
, VerticalZoom(new wxSlider(panel, Audio_Vertical_Zoom, OPT_GET("Audio/Zoom/Vertical")->GetInt(), 0, 100, wxDefaultPosition, wxSize(-1, 20), wxSL_VERTICAL|wxSL_BOTH|wxSL_INVERSE))
|
||||
, VolumeBar(new wxSlider(panel, Audio_Volume, OPT_GET("Audio/Volume")->GetInt(), 0, 100, wxDefaultPosition, wxSize(-1, 20), wxSL_VERTICAL|wxSL_BOTH|wxSL_INVERSE))
|
||||
, mouse_zoom_accum(0)
|
||||
{
|
||||
SetSashVisible(wxSASH_BOTTOM, true);
|
||||
Bind(wxEVT_SASH_DRAGGED, &AudioBox::OnSashDrag, this);
|
||||
|
|
|
@ -74,7 +74,7 @@ class AudioBox : public wxSashWindow {
|
|||
wxSlider *VolumeBar;
|
||||
|
||||
// Mouse wheel zoom accumulator
|
||||
int mouse_zoom_accum;
|
||||
int mouse_zoom_accum = 0;
|
||||
|
||||
void SetHorizontalZoom(int new_zoom);
|
||||
void OnAudioOpen();
|
||||
|
|
|
@ -559,14 +559,6 @@ AudioDisplay::AudioDisplay(wxWindow *parent, AudioController *controller, agi::C
|
|||
, controller(controller)
|
||||
, scrollbar(agi::util::make_unique<AudioDisplayScrollbar>(this))
|
||||
, timeline(agi::util::make_unique<AudioDisplayTimeline>(this))
|
||||
, dragged_object(nullptr)
|
||||
, scroll_left(0)
|
||||
, pixel_audio_width(0)
|
||||
, ms_per_pixel(0.0)
|
||||
, scale_amplitude(1.0f)
|
||||
, audio_top(0)
|
||||
, audio_height(0)
|
||||
, track_cursor_pos(-1)
|
||||
{
|
||||
style_ranges[0] = AudioStyle_Normal;
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ class AudioDisplay: public wxWindow {
|
|||
|
||||
|
||||
/// Current object on display being dragged, if any
|
||||
AudioDisplayInteractionObject *dragged_object;
|
||||
AudioDisplayInteractionObject *dragged_object = nullptr;
|
||||
/// Change the dragged object and update mouse capture
|
||||
void SetDraggedObject(AudioDisplayInteractionObject *new_obj);
|
||||
|
||||
|
@ -133,22 +133,22 @@ class AudioDisplay: public wxWindow {
|
|||
wxTimer scroll_timer;
|
||||
|
||||
/// Leftmost pixel in the virtual audio image being displayed
|
||||
int scroll_left;
|
||||
int scroll_left = 0;
|
||||
|
||||
/// Total width of the audio in pixels
|
||||
int pixel_audio_width;
|
||||
int pixel_audio_width = 0;
|
||||
|
||||
/// Horizontal zoom measured in millisecond per pixels
|
||||
double ms_per_pixel;
|
||||
double ms_per_pixel = 0.;
|
||||
|
||||
/// Amplitude scaling ("vertical zoom") as a factor, 1.0 is neutral
|
||||
float scale_amplitude;
|
||||
float scale_amplitude = 1.f;
|
||||
|
||||
/// Top of the main audio area in pixels
|
||||
int audio_top;
|
||||
int audio_top = 0;
|
||||
|
||||
/// Height of main audio area in pixels
|
||||
int audio_height;
|
||||
int audio_height = 0;
|
||||
|
||||
/// Width of the audio marker feet in pixels
|
||||
static const int foot_size = 6;
|
||||
|
@ -157,7 +157,7 @@ class AudioDisplay: public wxWindow {
|
|||
int zoom_level;
|
||||
|
||||
/// Absolute pixel position of the tracking cursor (mouse or playback)
|
||||
int track_cursor_pos;
|
||||
int track_cursor_pos = -1;
|
||||
/// Label to show by track cursor
|
||||
wxString track_cursor_label;
|
||||
/// Bounding rectangle last drawn track cursor label
|
||||
|
|
|
@ -68,15 +68,7 @@ AudioKaraoke::AudioKaraoke(wxWindow *parent, agi::Context *c)
|
|||
, audio_opened(c->audioController->AddAudioOpenListener(&AudioKaraoke::OnAudioOpened, this))
|
||||
, audio_closed(c->audioController->AddAudioCloseListener(&AudioKaraoke::OnAudioClosed, this))
|
||||
, active_line_changed(c->selectionController->AddActiveLineListener(&AudioKaraoke::OnActiveLineChanged, this))
|
||||
, active_line(nullptr)
|
||||
, kara(agi::util::make_unique<AssKaraoke>())
|
||||
, scroll_x(0)
|
||||
, scroll_dir(0)
|
||||
, char_height(0)
|
||||
, char_width(0)
|
||||
, mouse_pos(0)
|
||||
, click_will_remove_split(false)
|
||||
, enabled(false)
|
||||
{
|
||||
using std::bind;
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ class AudioKaraoke : public wxWindow {
|
|||
agi::signal::Connection active_line_changed;
|
||||
|
||||
/// Currently active dialogue line
|
||||
AssDialogue *active_line;
|
||||
AssDialogue *active_line = nullptr;
|
||||
/// Karaoke data
|
||||
std::unique_ptr<AssKaraoke> kara;
|
||||
|
||||
|
@ -101,18 +101,18 @@ class AudioKaraoke : public wxWindow {
|
|||
/// Cached width of characters from GetTextExtent
|
||||
std::unordered_map<std::string, int> char_widths;
|
||||
|
||||
int scroll_x; ///< Distance the display has been shifted to the left in pixels
|
||||
int scroll_dir; ///< Direction the display will be scrolled on scroll_timer ticks (+/- 1)
|
||||
int scroll_x = 0; ///< Distance the display has been shifted to the left in pixels
|
||||
int scroll_dir = 0; ///< Direction the display will be scrolled on scroll_timer ticks (+/- 1)
|
||||
wxTimer scroll_timer; ///< Timer to scroll every 50ms when user holds down scroll button
|
||||
|
||||
int char_height; ///< Maximum character height in pixels
|
||||
int char_width; ///< Maximum character width in pixels
|
||||
int mouse_pos; ///< Last x coordinate of the mouse
|
||||
bool click_will_remove_split; ///< If true a click at mouse_pos will remove a split rather than adding one
|
||||
int char_height = 0; ///< Maximum character height in pixels
|
||||
int char_width = 0; ///< Maximum character width in pixels
|
||||
int mouse_pos = 0; ///< Last x coordinate of the mouse
|
||||
bool click_will_remove_split = false; ///< If true a click at mouse_pos will remove a split rather than adding one
|
||||
|
||||
wxFont split_font; ///< Font used in the split/join interface
|
||||
|
||||
bool enabled; ///< Is karaoke mode enabled?
|
||||
bool enabled = false; ///< Is karaoke mode enabled?
|
||||
|
||||
wxButton *accept_button; ///< Accept pending splits button
|
||||
wxButton *cancel_button; ///< Revert pending changes
|
||||
|
|
|
@ -52,20 +52,8 @@ DEFINE_SIMPLE_EXCEPTION(OpenALException, agi::AudioPlayerOpenError, "audio/open/
|
|||
|
||||
OpenALPlayer::OpenALPlayer(AudioProvider *provider)
|
||||
: AudioPlayer(provider)
|
||||
, playing(false)
|
||||
, volume(1.f)
|
||||
, samplerate(provider->GetSampleRate())
|
||||
, bpf(provider->GetChannels() * provider->GetBytesPerSample())
|
||||
, start_frame(0)
|
||||
, cur_frame(0)
|
||||
, end_frame(0)
|
||||
, device(nullptr)
|
||||
, context(nullptr)
|
||||
, source(0)
|
||||
, buf_first_free(0)
|
||||
, buf_first_queued(0)
|
||||
, buffers_free(0)
|
||||
, buffers_played(0)
|
||||
{
|
||||
try {
|
||||
// Open device
|
||||
|
|
|
@ -55,32 +55,32 @@ class OpenALPlayer : public AudioPlayer, wxTimer {
|
|||
/// Number of OpenAL buffers to use
|
||||
static const ALsizei num_buffers = 8;
|
||||
|
||||
bool playing; ///< Is audio currently playing?
|
||||
bool playing = false; ///< Is audio currently playing?
|
||||
|
||||
float volume; ///< Current audio volume
|
||||
float volume = 1.f; ///< Current audio volume
|
||||
ALsizei samplerate; ///< Sample rate of the audio
|
||||
int bpf; ///< Bytes per frame
|
||||
|
||||
int64_t start_frame; ///< First frame of playbacka
|
||||
int64_t cur_frame; ///< Next frame to write to playback buffers
|
||||
int64_t end_frame; ///< Last frame to play
|
||||
int64_t start_frame = 0; ///< First frame of playbacka
|
||||
int64_t cur_frame = 0; ///< Next frame to write to playback buffers
|
||||
int64_t end_frame = 0; ///< Last frame to play
|
||||
|
||||
ALCdevice *device; ///< OpenAL device handle
|
||||
ALCcontext *context; ///< OpenAL sound context
|
||||
ALCdevice *device = nullptr; ///< OpenAL device handle
|
||||
ALCcontext *context = nullptr; ///< OpenAL sound context
|
||||
ALuint buffers[num_buffers]; ///< OpenAL sound buffers
|
||||
ALuint source; ///< OpenAL playback source
|
||||
ALuint source = 0; ///< OpenAL playback source
|
||||
|
||||
/// Index into buffers, first free (unqueued) buffer to be filled
|
||||
ALsizei buf_first_free;
|
||||
ALsizei buf_first_free = 0;
|
||||
|
||||
/// Index into buffers, first queued (non-free) buffer
|
||||
ALsizei buf_first_queued;
|
||||
ALsizei buf_first_queued = 0;
|
||||
|
||||
/// Number of free buffers
|
||||
ALsizei buffers_free;
|
||||
ALsizei buffers_free = 0;
|
||||
|
||||
/// Number of buffers which have been fully played since playback was last started
|
||||
ALsizei buffers_played;
|
||||
ALsizei buffers_played = 0;
|
||||
|
||||
wxStopWatch playback_segment_timer;
|
||||
|
||||
|
|
|
@ -50,15 +50,6 @@ DEFINE_SIMPLE_EXCEPTION(OSSError, agi::AudioPlayerOpenError, "audio/player/open/
|
|||
|
||||
OSSPlayer::OSSPlayer(AudioProvider *provider)
|
||||
: AudioPlayer(provider)
|
||||
, rate(0)
|
||||
, thread(0)
|
||||
, playing(false)
|
||||
, volume(1.0f)
|
||||
, start_frame(0)
|
||||
, cur_frame(0)
|
||||
, end_frame(0)
|
||||
, bpf(0)
|
||||
, dspdev(0)
|
||||
{
|
||||
OpenStream();
|
||||
}
|
||||
|
|
|
@ -69,31 +69,31 @@ class OSSPlayer : public AudioPlayer {
|
|||
friend class OSSPlayerThread;
|
||||
|
||||
/// sample rate of audio
|
||||
unsigned int rate;
|
||||
unsigned int rate = 0;
|
||||
|
||||
/// Worker thread that does the actual writing
|
||||
OSSPlayerThread *thread;
|
||||
OSSPlayerThread *thread = nullptr;
|
||||
|
||||
/// Is the player currently playing?
|
||||
volatile bool playing;
|
||||
volatile bool playing = false;
|
||||
|
||||
/// Current volume level
|
||||
volatile float volume;
|
||||
volatile float volume = 1.f;
|
||||
|
||||
/// first frame of playback
|
||||
volatile unsigned long start_frame;
|
||||
volatile unsigned long start_frame = 0;
|
||||
|
||||
/// last written frame + 1
|
||||
volatile unsigned long cur_frame;
|
||||
volatile unsigned long cur_frame = 0;
|
||||
|
||||
/// last frame to play
|
||||
volatile unsigned long end_frame;
|
||||
volatile unsigned long end_frame = 0;
|
||||
|
||||
/// bytes per frame
|
||||
unsigned long bpf;
|
||||
unsigned long bpf = 0;
|
||||
|
||||
/// OSS audio device handle
|
||||
volatile int dspdev;
|
||||
volatile int dspdev = 0;
|
||||
|
||||
void OpenStream();
|
||||
|
||||
|
|
|
@ -69,12 +69,7 @@ static const PaHostApiTypeId pa_host_api_priority[] = {
|
|||
};
|
||||
static const size_t pa_host_api_priority_count = sizeof(pa_host_api_priority) / sizeof(pa_host_api_priority[0]);
|
||||
|
||||
PortAudioPlayer::PortAudioPlayer(AudioProvider *provider)
|
||||
: AudioPlayer(provider)
|
||||
, volume(1.0f)
|
||||
, pa_start(0.0)
|
||||
, stream(0)
|
||||
{
|
||||
PortAudioPlayer::PortAudioPlayer(AudioProvider *provider) : AudioPlayer(provider) {
|
||||
PaError err = Pa_Initialize();
|
||||
|
||||
if (err != paNoError)
|
||||
|
@ -131,7 +126,7 @@ PortAudioPlayer::~PortAudioPlayer() {
|
|||
}
|
||||
|
||||
void PortAudioPlayer::OpenStream() {
|
||||
DeviceVec *device_ids = 0;
|
||||
DeviceVec *device_ids = nullptr;
|
||||
std::string device_name = OPT_GET("Player/Audio/PortAudio/Device Name")->GetString();
|
||||
|
||||
if (devices.count(device_name)) {
|
||||
|
|
|
@ -57,13 +57,13 @@ class PortAudioPlayer : public AudioPlayer {
|
|||
/// The index of the default output devices sorted by host API priority
|
||||
DeviceVec default_device;
|
||||
|
||||
float volume; ///< Current volume level
|
||||
int64_t current; ///< Current position
|
||||
int64_t start; ///< Start position
|
||||
int64_t end; ///< End position
|
||||
float volume = 1.f; ///< Current volume level
|
||||
int64_t current = 0; ///< Current position
|
||||
int64_t start = 0; ///< Start position
|
||||
int64_t end = 0; ///< End position
|
||||
PaTime pa_start; ///< PortAudio internal start position
|
||||
|
||||
PaStream *stream; ///< PortAudio stream
|
||||
PaStream *stream = nullptr; ///< PortAudio stream
|
||||
|
||||
/// @brief PortAudio callback, used to fill buffer for playback, and prime the playback buffer.
|
||||
/// @param inputBuffer Input buffer.
|
||||
|
|
|
@ -48,20 +48,7 @@
|
|||
|
||||
#include <libaegisub/log.h>
|
||||
|
||||
PulseAudioPlayer::PulseAudioPlayer(AudioProvider *provider)
|
||||
: AudioPlayer(provider)
|
||||
, volume(1.0f)
|
||||
, is_playing(false)
|
||||
, start_frame(0)
|
||||
, cur_frame(0)
|
||||
, end_frame(0)
|
||||
, bpf(0)
|
||||
, context_notify(0, 1)
|
||||
, context_success(0, 1)
|
||||
, stream_notify(0, 1)
|
||||
, stream_success(0, 1)
|
||||
, paerror(0)
|
||||
{
|
||||
PulseAudioPlayer::PulseAudioPlayer(AudioProvider *provider) : AudioPlayer(provider) {
|
||||
// Initialise a mainloop
|
||||
mainloop = pa_threaded_mainloop_new();
|
||||
if (!mainloop)
|
||||
|
|
|
@ -40,34 +40,34 @@
|
|||
class PulseAudioPlayer;
|
||||
|
||||
class PulseAudioPlayer : public AudioPlayer {
|
||||
float volume;
|
||||
bool is_playing;
|
||||
float volume = 1.f;
|
||||
bool is_playing = false;
|
||||
|
||||
volatile unsigned long start_frame;
|
||||
volatile unsigned long cur_frame;
|
||||
volatile unsigned long end_frame;
|
||||
volatile unsigned long start_frame = 0;
|
||||
volatile unsigned long cur_frame = 0;
|
||||
volatile unsigned long end_frame = 0;
|
||||
|
||||
unsigned long bpf; // bytes per frame
|
||||
unsigned long bpf = 0; // bytes per frame
|
||||
|
||||
|
||||
wxSemaphore context_notify;
|
||||
wxSemaphore context_success;
|
||||
wxSemaphore context_notify{0, 1};
|
||||
wxSemaphore context_success{0, 1};
|
||||
volatile int context_success_val;
|
||||
|
||||
wxSemaphore stream_notify;
|
||||
wxSemaphore stream_success;
|
||||
wxSemaphore stream_notify{0, 1};
|
||||
wxSemaphore stream_success{0, 1};
|
||||
volatile int stream_success_val;
|
||||
|
||||
pa_threaded_mainloop *mainloop; // pulseaudio mainloop handle
|
||||
pa_context *context; // connection context
|
||||
pa_threaded_mainloop *mainloop = nullptr; // pulseaudio mainloop handle
|
||||
pa_context *context = nullptr; // connection context
|
||||
volatile pa_context_state_t cstate;
|
||||
|
||||
pa_stream *stream;
|
||||
pa_stream *stream = nullptr;
|
||||
volatile pa_stream_state_t sstate;
|
||||
|
||||
volatile pa_usec_t play_start_time; // timestamp when playback was started
|
||||
|
||||
int paerror;
|
||||
int paerror = 0;
|
||||
|
||||
/// Called by PA to notify about contetxt operation completion
|
||||
static void pa_context_success(pa_context *c, int success, PulseAudioPlayer *thread);
|
||||
|
|
|
@ -110,12 +110,10 @@ void AudioProvider::GetAudio(void *buf, int64_t start, int64_t count) const {
|
|||
|
||||
namespace {
|
||||
struct provider_creator {
|
||||
bool found_file;
|
||||
bool found_audio;
|
||||
bool found_file = false;
|
||||
bool found_audio = false;
|
||||
std::string msg;
|
||||
|
||||
provider_creator() : found_file(false) , found_audio(false) { }
|
||||
|
||||
template<typename Factory>
|
||||
std::unique_ptr<AudioProvider> try_create(std::string const& name, Factory&& create) {
|
||||
try {
|
||||
|
|
|
@ -52,11 +52,8 @@
|
|||
#endif
|
||||
|
||||
PCMAudioProvider::PCMAudioProvider(agi::fs::path const& filename)
|
||||
: current_mapping(nullptr)
|
||||
, mapping_start(0)
|
||||
, mapping_length(0)
|
||||
#ifdef _WIN32
|
||||
, file_handle(0, CloseHandle)
|
||||
: file_handle(0, CloseHandle)
|
||||
, file_mapping(0, CloseHandle)
|
||||
{
|
||||
file_handle = CreateFile(
|
||||
|
@ -81,7 +78,7 @@ PCMAudioProvider::PCMAudioProvider(agi::fs::path const& filename)
|
|||
if (file_mapping == 0)
|
||||
throw agi::AudioProviderOpenError("Failed creating file mapping", 0);
|
||||
#else
|
||||
, file_handle(open(filename.c_str(), O_RDONLY), close)
|
||||
: file_handle(open(filename.c_str(), O_RDONLY), close)
|
||||
{
|
||||
if (file_handle == -1)
|
||||
throw agi::fs::FileNotFound(filename.string());
|
||||
|
|
|
@ -44,17 +44,17 @@
|
|||
#include <libaegisub/scoped_ptr.h>
|
||||
|
||||
class PCMAudioProvider : public AudioProvider {
|
||||
mutable void *current_mapping;
|
||||
mutable void *current_mapping = nullptr;
|
||||
|
||||
#ifdef _WIN32
|
||||
mutable int64_t mapping_start;
|
||||
mutable size_t mapping_length;
|
||||
mutable int64_t mapping_start = 0;
|
||||
mutable size_t mapping_length = 0;
|
||||
|
||||
agi::scoped_holder<HANDLE, BOOL (__stdcall *)(HANDLE)> file_handle;
|
||||
agi::scoped_holder<HANDLE, BOOL (__stdcall *)(HANDLE)> file_mapping;
|
||||
#else
|
||||
mutable off_t mapping_start;
|
||||
mutable size_t mapping_length;
|
||||
mutable off_t mapping_start = 0;
|
||||
mutable size_t mapping_length = 0;
|
||||
|
||||
agi::scoped_holder<int, int(*)(int)> file_handle;
|
||||
#endif
|
||||
|
@ -65,7 +65,7 @@ protected:
|
|||
char * EnsureRangeAccessible(int64_t range_start, int64_t range_length) const; // Ensure that the given range of bytes are accessible in the file mapping and return a pointer to the first byte of the requested range
|
||||
|
||||
/// Size of the opened file
|
||||
int64_t file_size;
|
||||
int64_t file_size = 0;
|
||||
|
||||
struct IndexPoint {
|
||||
int64_t start_byte;
|
||||
|
|
|
@ -65,14 +65,6 @@ size_t AudioRendererBitmapCacheBitmapFactory::GetBlockSize() const
|
|||
|
||||
|
||||
AudioRenderer::AudioRenderer()
|
||||
: pixel_ms(0)
|
||||
, pixel_height(0)
|
||||
, amplitude_scale(0)
|
||||
, cache_bitmap_width(32) // arbitrary value for now
|
||||
, cache_bitmap_maxsize(0)
|
||||
, cache_renderer_maxsize(0)
|
||||
, renderer(nullptr)
|
||||
, provider(nullptr)
|
||||
{
|
||||
for (int i = 0; i < AudioStyle_MAX; ++i)
|
||||
bitmaps.emplace_back(256, AudioRendererBitmapCacheBitmapFactory(this));
|
||||
|
|
|
@ -89,31 +89,31 @@ class AudioRenderer {
|
|||
friend struct AudioRendererBitmapCacheBitmapFactory;
|
||||
|
||||
/// Horizontal zoom level, milliseconds per pixel
|
||||
double pixel_ms;
|
||||
double pixel_ms = 0.f;
|
||||
/// Rendering height in pixels
|
||||
int pixel_height;
|
||||
int pixel_height = 0;
|
||||
/// Vertical zoom level/amplitude scale
|
||||
float amplitude_scale;
|
||||
float amplitude_scale = 0.f;
|
||||
|
||||
/// Width of bitmaps to store in cache
|
||||
const int cache_bitmap_width;
|
||||
const int cache_bitmap_width = 32; // Completely arbitrary value
|
||||
|
||||
/// Cached bitmaps for audio ranges
|
||||
std::vector<AudioRendererBitmapCache> bitmaps;
|
||||
/// Number of blocks in the bitmap caches
|
||||
size_t cache_numblocks;
|
||||
size_t cache_numblocks = 0;
|
||||
/// The maximum allowed size of each bitmap cache, in bytes
|
||||
size_t cache_bitmap_maxsize;
|
||||
size_t cache_bitmap_maxsize = 0;
|
||||
/// The maximum allowed size of the renderer's cache, in bytes
|
||||
size_t cache_renderer_maxsize;
|
||||
size_t cache_renderer_maxsize = 0;
|
||||
/// Do the caches need to be aged?
|
||||
bool needs_age;
|
||||
bool needs_age = false;
|
||||
|
||||
/// Actual renderer for bitmaps
|
||||
AudioRendererBitmapProvider *renderer;
|
||||
AudioRendererBitmapProvider *renderer = nullptr;
|
||||
|
||||
/// Audio provider to use as source
|
||||
AudioProvider *provider;
|
||||
AudioProvider *provider = nullptr;
|
||||
|
||||
/// @brief Make sure bitmap index i is in cache
|
||||
/// @param i Index of bitmap to get into cache
|
||||
|
|
|
@ -97,13 +97,6 @@ public:
|
|||
|
||||
|
||||
AudioSpectrumRenderer::AudioSpectrumRenderer(std::string const& color_scheme_name)
|
||||
: derivation_size(8)
|
||||
, derivation_dist(8)
|
||||
#ifdef WITH_FFTW3
|
||||
, dft_plan(nullptr)
|
||||
, dft_input(nullptr)
|
||||
, dft_output(nullptr)
|
||||
#endif
|
||||
{
|
||||
colors.reserve(AudioStyle_MAX);
|
||||
for (int i = 0; i < AudioStyle_MAX; ++i)
|
||||
|
|
|
@ -62,10 +62,10 @@ class AudioSpectrumRenderer : public AudioRendererBitmapProvider {
|
|||
std::vector<AudioColorScheme> colors;
|
||||
|
||||
/// Binary logarithm of number of samples to use in deriving frequency-power data
|
||||
size_t derivation_size;
|
||||
size_t derivation_size = 0;
|
||||
|
||||
/// Binary logarithm of number of samples between the start of derivations
|
||||
size_t derivation_dist;
|
||||
size_t derivation_dist = 0;
|
||||
|
||||
/// @brief Reset in response to changing audio provider
|
||||
///
|
||||
|
@ -92,11 +92,11 @@ class AudioSpectrumRenderer : public AudioRendererBitmapProvider {
|
|||
|
||||
#ifdef WITH_FFTW3
|
||||
/// FFTW plan data
|
||||
fftw_plan dft_plan;
|
||||
fftw_plan dft_plan = nullptr;
|
||||
/// Pre-allocated input array for FFTW
|
||||
double *dft_input;
|
||||
double *dft_input = nullptr;
|
||||
/// Pre-allocated output array for FFTW
|
||||
fftw_complex *dft_output;
|
||||
fftw_complex *dft_output = nullptr;
|
||||
#else
|
||||
/// Pre-allocated scratch area for doing FFT derivations
|
||||
std::vector<float> fft_scratch;
|
||||
|
|
|
@ -55,8 +55,7 @@ enum {
|
|||
};
|
||||
|
||||
AudioWaveformRenderer::AudioWaveformRenderer(std::string const& color_scheme_name)
|
||||
: audio_buffer(nullptr)
|
||||
, render_averages(OPT_GET("Audio/Display/Waveform Style")->GetInt() == Waveform_MaxAvg)
|
||||
: render_averages(OPT_GET("Audio/Display/Waveform Style")->GetInt() == Waveform_MaxAvg)
|
||||
{
|
||||
colors.reserve(AudioStyle_MAX);
|
||||
for (int i = 0; i < AudioStyle_MAX; ++i)
|
||||
|
|
|
@ -45,7 +45,7 @@ class AudioWaveformRenderer : public AudioRendererBitmapProvider {
|
|||
std::vector<AudioColorScheme> colors;
|
||||
|
||||
/// Pre-allocated buffer for audio fetched from provider
|
||||
char *audio_buffer;
|
||||
char *audio_buffer = nullptr;
|
||||
|
||||
/// Whether to render max+avg or just max
|
||||
bool render_averages;
|
||||
|
|
|
@ -89,7 +89,7 @@ class AudioTimingControllerKaraoke : public AudioTimingController {
|
|||
AssDialogue *active_line; ///< Currently active line
|
||||
AssKaraoke *kara; ///< Parsed karaoke model provided by karaoke controller
|
||||
|
||||
size_t cur_syl; ///< Index of currently selected syllable in the line
|
||||
size_t cur_syl = 0; ///< Index of currently selected syllable in the line
|
||||
|
||||
/// Pen used for the mid-syllable markers
|
||||
Pen separator_pen;
|
||||
|
@ -115,7 +115,7 @@ class AudioTimingControllerKaraoke : public AudioTimingController {
|
|||
std::vector<AudioLabel> labels;
|
||||
|
||||
bool auto_commit; ///< Should changes be automatically commited?
|
||||
int commit_id; ///< Last commit id used for an autocommit
|
||||
int commit_id = -1; ///< Last commit id used for an autocommit
|
||||
bool pending_changes; ///< Are there any pending changes to be committed?
|
||||
|
||||
void OnAutoCommitChange(agi::OptionValue const& opt);
|
||||
|
@ -160,7 +160,6 @@ AudioTimingControllerKaraoke::AudioTimingControllerKaraoke(agi::Context *c, AssK
|
|||
, c(c)
|
||||
, active_line(c->selectionController->GetActiveLine())
|
||||
, kara(kara)
|
||||
, cur_syl(0)
|
||||
, separator_pen("Colour/Audio Display/Syllable Boundaries", "Audio/Line Boundaries Thickness", wxPENSTYLE_DOT)
|
||||
, start_pen("Colour/Audio Display/Line boundary Start", "Audio/Line Boundaries Thickness")
|
||||
, end_pen("Colour/Audio Display/Line boundary End", "Audio/Line Boundaries Thickness")
|
||||
|
@ -169,7 +168,6 @@ AudioTimingControllerKaraoke::AudioTimingControllerKaraoke(agi::Context *c, AssK
|
|||
, keyframes_provider(c, "Audio/Display/Draw/Keyframes in Karaoke Mode")
|
||||
, video_position_provider(c)
|
||||
, auto_commit(OPT_GET("Audio/Auto/Commit")->GetBool())
|
||||
, commit_id(-1)
|
||||
{
|
||||
slots.push_back(kara->AddSyllablesChangedListener(&AudioTimingControllerKaraoke::Revert, this));
|
||||
slots.push_back(OPT_SUB("Audio/Auto/Commit", &AudioTimingControllerKaraoke::OnAutoCommitChange, this));
|
||||
|
|
|
@ -100,16 +100,8 @@ namespace std {
|
|||
|
||||
BaseGrid::BaseGrid(wxWindow* parent, agi::Context *context, const wxSize& size, long style, const wxString& name)
|
||||
: wxWindow(parent, -1, wxDefaultPosition, size, style, name)
|
||||
, lineHeight(1) // non-zero to avoid div by 0
|
||||
, holding(false)
|
||||
, scrollBar(new wxScrollBar(this, GRID_SCROLLBAR, wxDefaultPosition, wxDefaultSize, wxSB_VERTICAL))
|
||||
, byFrame(false)
|
||||
, extendRow(-1)
|
||||
, active_line(nullptr)
|
||||
, batch_level(0)
|
||||
, batch_active_line_changed(false)
|
||||
, seek_listener(context->videoController->AddSeekListener(std::bind(&BaseGrid::Refresh, this, false, nullptr)))
|
||||
, yPos(0)
|
||||
, context(context)
|
||||
{
|
||||
scrollBar->SetScrollbar(0,10,100,10);
|
||||
|
|
|
@ -50,27 +50,27 @@ namespace agi {
|
|||
class AssDialogue;
|
||||
|
||||
class BaseGrid : public wxWindow, public SubtitleSelectionController {
|
||||
int lineHeight; ///< Height of a line in pixels in the current font
|
||||
bool holding; ///< Is a drag selection in process?
|
||||
int lineHeight = 1; ///< Height of a line in pixels in the current font
|
||||
bool holding = false; ///< Is a drag selection in process?
|
||||
wxFont font; ///< Current grid font
|
||||
wxScrollBar *scrollBar; ///< The grid's scrollbar
|
||||
bool byFrame; ///< Should times be displayed as frame numbers
|
||||
bool byFrame = false; ///< Should times be displayed as frame numbers
|
||||
wxBrush rowColors[7]; ///< Cached brushes used for row backgrounds
|
||||
|
||||
/// Row from which the selection shrinks/grows from when selecting via the
|
||||
/// keyboard, shift-clicking or dragging
|
||||
int extendRow;
|
||||
int extendRow = -1;
|
||||
|
||||
Selection selection; ///< Currently selected lines
|
||||
AssDialogue *active_line; ///< The currently active line or 0 if none
|
||||
AssDialogue *active_line = nullptr; ///< The currently active line or 0 if none
|
||||
std::vector<AssDialogue*> index_line_map; ///< Row number -> dialogue line
|
||||
std::map<AssDialogue*,int> line_index_map; ///< Dialogue line -> row number
|
||||
|
||||
/// Selection batch nesting depth; changes are commited only when this
|
||||
/// hits zero
|
||||
int batch_level;
|
||||
int batch_level = 0;
|
||||
/// Has the active line been changed in the current batch?
|
||||
bool batch_active_line_changed;
|
||||
bool batch_active_line_changed = false;
|
||||
/// Lines which will be added to the selection when the current batch is
|
||||
/// completed; should be disjoint from selection
|
||||
Selection batch_selection_added;
|
||||
|
@ -112,7 +112,7 @@ class BaseGrid : public wxWindow, public SubtitleSelectionController {
|
|||
|
||||
bool showCol[10]; ///< Column visibility mask
|
||||
|
||||
int yPos;
|
||||
int yPos = 0;
|
||||
|
||||
void AdjustScrollbar();
|
||||
void SetColumnWidths();
|
||||
|
|
|
@ -451,8 +451,6 @@ bool KaraokeLineMatchDisplay::UndoMatch()
|
|||
DialogKanjiTimer::DialogKanjiTimer(agi::Context *c)
|
||||
: wxDialog(c->parent, -1, _("Kanji timing"))
|
||||
, subs(c->ass)
|
||||
, currentSourceLine(nullptr)
|
||||
, currentDestinationLine(nullptr)
|
||||
{
|
||||
SetIcon(GETICON(kara_timing_copier_16));
|
||||
|
||||
|
|
|
@ -55,8 +55,8 @@ class DialogKanjiTimer : public wxDialog {
|
|||
|
||||
std::vector<std::pair<AssDialogue*, std::string>> LinesToChange;
|
||||
|
||||
AssEntry *currentSourceLine;
|
||||
AssEntry *currentDestinationLine;
|
||||
AssEntry *currentSourceLine = nullptr;
|
||||
AssEntry *currentDestinationLine = nullptr;
|
||||
|
||||
void OnClose(wxCommandEvent &event);
|
||||
void OnStart(wxCommandEvent &event);
|
||||
|
|
|
@ -115,8 +115,6 @@ public:
|
|||
DialogProgress::DialogProgress(wxWindow *parent, wxString const& title_text, wxString const& message)
|
||||
: wxDialog(parent, -1, title_text, wxDefaultPosition, wxDefaultSize, wxBORDER_RAISED)
|
||||
, pulse_timer(GetEventHandler())
|
||||
, progress_current(0)
|
||||
, progress_target(0)
|
||||
{
|
||||
title = new wxStaticText(this, -1, title_text, wxDefaultPosition, wxDefaultSize, wxALIGN_CENTRE | wxST_NO_AUTORESIZE);
|
||||
gauge = new wxGauge(this, -1, 300, wxDefaultPosition, wxSize(300,20));
|
||||
|
|
|
@ -44,11 +44,11 @@ class DialogProgress : public wxDialog, public agi::BackgroundRunner {
|
|||
wxTimer pulse_timer;
|
||||
|
||||
wxString pending_log;
|
||||
int progress_anim_start_value;
|
||||
int progress_current;
|
||||
int progress_target;
|
||||
int progress_anim_start_value = 0;
|
||||
int progress_current = 0;
|
||||
int progress_target = 0;
|
||||
std::chrono::steady_clock::time_point progress_anim_start_time;
|
||||
int progress_anim_duration;
|
||||
int progress_anim_duration = 0;
|
||||
|
||||
void OnShow(wxShowEvent&);
|
||||
void OnCancel(wxCommandEvent &);
|
||||
|
|
|
@ -54,9 +54,6 @@ DialogSpellChecker::DialogSpellChecker(agi::Context *context)
|
|||
: wxDialog(context->parent, -1, _("Spell Checker"))
|
||||
, context(context)
|
||||
, spellchecker(SpellCheckerFactory::GetSpellChecker())
|
||||
, start_line(nullptr)
|
||||
, active_line(nullptr)
|
||||
, has_looped(false)
|
||||
{
|
||||
SetIcon(GETICON(spellcheck_toolbutton_16));
|
||||
|
||||
|
|
|
@ -59,9 +59,9 @@ class DialogSpellChecker : public wxDialog {
|
|||
wxButton *add_button; ///< Add word to currently active dictionary
|
||||
wxButton *remove_button; ///< Remove word from currently active dictionary
|
||||
|
||||
AssDialogue *start_line; ///< The first line checked
|
||||
AssDialogue *active_line; ///< The most recently checked line
|
||||
bool has_looped; ///< Has the search already looped from the end to beginning?
|
||||
AssDialogue *start_line = nullptr; ///< The first line checked
|
||||
AssDialogue *active_line = nullptr; ///< The most recently checked line
|
||||
bool has_looped = false; ///< Has the search already looped from the end to beginning?
|
||||
|
||||
/// Find the next misspelled word and close the dialog if there are none
|
||||
/// @return Are there any more misspelled words?
|
||||
|
|
|
@ -142,7 +142,6 @@ static wxTextCtrl *num_text_ctrl(wxWindow *parent, double *value, bool allow_neg
|
|||
DialogStyleEditor::DialogStyleEditor(wxWindow *parent, AssStyle *style, agi::Context *c, AssStyleStorage *store, std::string const& new_name, wxArrayString const& font_list)
|
||||
: wxDialog (parent, -1, _("Style Editor"), wxDefaultPosition, wxDefaultSize, wxDEFAULT_DIALOG_STYLE | wxRESIZE_BORDER)
|
||||
, c(c)
|
||||
, is_new(false)
|
||||
, style(style)
|
||||
, store(store)
|
||||
{
|
||||
|
|
|
@ -54,7 +54,7 @@ class DialogStyleEditor : public wxDialog {
|
|||
/// If true, the style was just created and so the user should not be
|
||||
/// asked if they want to change any existing lines should they rename
|
||||
/// the style
|
||||
bool is_new;
|
||||
bool is_new = false;
|
||||
|
||||
/// The style currently being edited
|
||||
AssStyle *style;
|
||||
|
|
|
@ -56,7 +56,6 @@ DialogStyling::DialogStyling(agi::Context *context)
|
|||
: wxDialog(context->parent, -1, _("Styling Assistant"), wxDefaultPosition, wxDefaultSize, wxDEFAULT_DIALOG_STYLE | wxRESIZE_BORDER | wxMINIMIZE_BOX)
|
||||
, c(context)
|
||||
, active_line_connection(context->selectionController->AddActiveLineListener(&DialogStyling::OnActiveLineChanged, this))
|
||||
, active_line(nullptr)
|
||||
{
|
||||
SetIcon(GETICON(styling_toolbutton_16));
|
||||
|
||||
|
|
|
@ -55,7 +55,7 @@ class DialogStyling : public wxDialog {
|
|||
|
||||
void OnActiveLineChanged(AssDialogue *);
|
||||
|
||||
AssDialogue *active_line;
|
||||
AssDialogue *active_line = nullptr;
|
||||
|
||||
std::unique_ptr<PersistLocation> persist;
|
||||
|
||||
|
|
|
@ -66,10 +66,8 @@ DialogTranslation::DialogTranslation(agi::Context *c)
|
|||
, file_change_connection(c->ass->AddCommitListener(&DialogTranslation::OnExternalCommit, this))
|
||||
, active_line_connection(c->selectionController->AddActiveLineListener(&DialogTranslation::OnActiveLineChanged, this))
|
||||
, active_line(c->selectionController->GetActiveLine())
|
||||
, cur_block(0)
|
||||
, line_count(count_if(c->ass->Line.begin(), c->ass->Line.end(), cast<AssDialogue*>()))
|
||||
, line_number(count_if(c->ass->Line.begin(), c->ass->Line.iterator_to(*active_line), cast<AssDialogue*>()) + 1)
|
||||
, switching_lines(false)
|
||||
{
|
||||
SetIcon(GETICON(translation_toolbutton_16));
|
||||
|
||||
|
|
|
@ -47,7 +47,7 @@ class DialogTranslation : public wxDialog {
|
|||
/// The parsed dialogue blocks for the active line
|
||||
boost::ptr_vector<AssDialogueBlock> blocks;
|
||||
/// Which dialogue block in the active line is currently being translated
|
||||
size_t cur_block;
|
||||
size_t cur_block = 0;
|
||||
|
||||
/// Total number of dialogue lines in the file
|
||||
size_t line_count;
|
||||
|
@ -55,7 +55,7 @@ class DialogTranslation : public wxDialog {
|
|||
size_t line_number;
|
||||
|
||||
/// Should active line change announcements be ignored?
|
||||
bool switching_lines;
|
||||
bool switching_lines = false;
|
||||
|
||||
wxStaticText *line_number_display;
|
||||
ScintillaTextCtrl *original_text;
|
||||
|
|
|
@ -59,14 +59,6 @@ AssTransformFramerateFilter::AssTransformFramerateFilter()
|
|||
: AssExportFilter(from_wx(_("Transform Framerate")),
|
||||
from_wx(_("Transform subtitle times, including those in override tags, from an input framerate to an output framerate.\n\nThis is useful for converting regular time subtitles to VFRaC time subtitles for hardsubbing.\nIt can also be used to convert subtitles to a different speed video, such as NTSC to PAL speedup.")),
|
||||
1000)
|
||||
, c(nullptr)
|
||||
, line(nullptr)
|
||||
, newStart(0)
|
||||
, newEnd(0)
|
||||
, newK(0)
|
||||
, oldK(0)
|
||||
, Input(nullptr)
|
||||
, Output(nullptr)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -45,16 +45,16 @@ class wxTextCtrl;
|
|||
/// @class AssTransformFramerateFilter
|
||||
/// @brief Transform subtitle times, including those in override tags, from an input framerate to an output framerate
|
||||
class AssTransformFramerateFilter : public AssExportFilter {
|
||||
agi::Context *c;
|
||||
AssDialogue *line;
|
||||
int newStart;
|
||||
int newEnd;
|
||||
int newK;
|
||||
int oldK;
|
||||
agi::Context *c = nullptr;
|
||||
AssDialogue *line = nullptr;
|
||||
int newStart = 0;
|
||||
int newEnd = 0;
|
||||
int newK = 0;
|
||||
int oldK = 0;
|
||||
|
||||
// Yes, these are backwards
|
||||
const agi::vfr::Framerate *Input; ///< Destination frame rate
|
||||
const agi::vfr::Framerate *Output; ///< Source frame rate
|
||||
// Yes, these are backwards. It sort of makes sense if you think about what it's doing.
|
||||
const agi::vfr::Framerate *Input = nullptr; ///< Destination frame rate
|
||||
const agi::vfr::Framerate *Output = nullptr; ///< Source frame rate
|
||||
|
||||
agi::vfr::Framerate t1,t2;
|
||||
|
||||
|
|
|
@ -64,8 +64,6 @@ namespace {
|
|||
FontCollector::FontCollector(FontCollectorStatusCallback status_callback, FontFileLister &lister)
|
||||
: status_callback(std::move(status_callback))
|
||||
, lister(lister)
|
||||
, missing(0)
|
||||
, missing_glyphs(0)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -87,9 +87,9 @@ class FontCollector {
|
|||
/// Paths to found required font files
|
||||
std::set<agi::fs::path> results;
|
||||
/// Number of fonts which could not be found
|
||||
int missing;
|
||||
int missing = 0;
|
||||
/// Number of fonts which were found, but did not contain all used glyphs
|
||||
int missing_glyphs;
|
||||
int missing_glyphs = 0;
|
||||
|
||||
/// Gather all of the unique styles with text on a line
|
||||
void ProcessDialogueLine(const AssDialogue *line, int index);
|
||||
|
|
|
@ -183,10 +183,6 @@ public:
|
|||
FrameMain::FrameMain()
|
||||
: wxFrame(nullptr, -1, "", wxDefaultPosition, wxSize(920,700), wxDEFAULT_FRAME_STYLE | wxCLIP_CHILDREN)
|
||||
, context(agi::util::make_unique<agi::Context>())
|
||||
, showVideo(true)
|
||||
, showAudio(true)
|
||||
, blockVideoLoad(false)
|
||||
, blockAudioLoad(false)
|
||||
{
|
||||
StartupLog("Entering FrameMain constructor");
|
||||
|
||||
|
|
|
@ -66,14 +66,14 @@ class FrameMain: public wxFrame {
|
|||
void Thaw(void) {}
|
||||
#endif
|
||||
|
||||
bool showVideo; ///< Is the video display shown?
|
||||
bool showAudio; ///< Is the audio display shown?
|
||||
bool showVideo = true; ///< Is the video display shown?
|
||||
bool showAudio = true; ///< Is the audio display shown?
|
||||
wxTimer StatusClear; ///< Status bar timeout timer
|
||||
/// Block video loading; used when both video and subtitles are opened at
|
||||
/// the same time, so that the video associated with the subtitles (if any)
|
||||
/// isn't loaded
|
||||
bool blockVideoLoad;
|
||||
bool blockAudioLoad;
|
||||
bool blockVideoLoad = false;
|
||||
bool blockAudioLoad = false;
|
||||
|
||||
void InitToolbar();
|
||||
void InitContents();
|
||||
|
|
|
@ -229,19 +229,8 @@ public:
|
|||
|
||||
}
|
||||
|
||||
OpenGLText::OpenGLText()
|
||||
: r(1.f)
|
||||
, g(1.f)
|
||||
, b(1.f)
|
||||
, a(1.f)
|
||||
, fontSize(0)
|
||||
, fontBold(false)
|
||||
, fontItalics(false)
|
||||
{
|
||||
}
|
||||
|
||||
OpenGLText::~OpenGLText() {
|
||||
}
|
||||
OpenGLText::OpenGLText() { }
|
||||
OpenGLText::~OpenGLText() { }
|
||||
|
||||
void OpenGLText::SetFont(std::string const& face, int size, bool bold, bool italics) {
|
||||
// No change required
|
||||
|
|
|
@ -50,11 +50,11 @@ namespace agi { struct Color; }
|
|||
typedef boost::container::map<int, OpenGLTextGlyph> glyphMap;
|
||||
|
||||
class OpenGLText {
|
||||
float r,g,b,a;
|
||||
float r = 1.f, g = 1.f, b = 1.f, a = 1.f;
|
||||
|
||||
int fontSize;
|
||||
bool fontBold;
|
||||
bool fontItalics;
|
||||
int fontSize = 0;
|
||||
bool fontBold = false;
|
||||
bool fontItalics = false;
|
||||
std::string fontFace;
|
||||
wxFont font;
|
||||
|
||||
|
@ -62,8 +62,8 @@ class OpenGLText {
|
|||
|
||||
std::vector<OpenGLTextTexture> textures;
|
||||
|
||||
OpenGLText(OpenGLText const&);
|
||||
OpenGLText& operator=(OpenGLText const&);
|
||||
OpenGLText(OpenGLText const&) = delete;
|
||||
OpenGLText& operator=(OpenGLText const&) = delete;
|
||||
|
||||
/// @brief Get the glyph for the character chr, creating it if necessary
|
||||
/// @param chr Character to get the glyph of
|
||||
|
|
|
@ -261,7 +261,6 @@ public:
|
|||
HotkeyDataViewModel::HotkeyDataViewModel(Preferences *parent)
|
||||
: root(agi::util::make_unique<HotkeyModelRoot>(this))
|
||||
, parent(parent)
|
||||
, has_pending_changes(false)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -31,7 +31,7 @@ class Preferences;
|
|||
class HotkeyDataViewModel : public wxDataViewModel {
|
||||
std::unique_ptr<HotkeyModelRoot> root;
|
||||
Preferences *parent;
|
||||
bool has_pending_changes;
|
||||
bool has_pending_changes = false;
|
||||
|
||||
/// Get the real item from the wrapper, or root if it's wrapping nullptr
|
||||
const HotkeyModelItem *get(wxDataViewItem const& item) const;
|
||||
|
|
|
@ -66,8 +66,8 @@ public:
|
|||
MkvStdIO(agi::fs::path const& filename);
|
||||
~MkvStdIO() { if (fp) fclose(fp); }
|
||||
|
||||
FILE *fp;
|
||||
int error;
|
||||
FILE *fp = nullptr;
|
||||
int error = 0;
|
||||
};
|
||||
|
||||
#define CACHESIZE 1024
|
||||
|
@ -284,9 +284,7 @@ longlong StdIoGetFileSize(InputStream *st) {
|
|||
return epos;
|
||||
}
|
||||
|
||||
MkvStdIO::MkvStdIO(agi::fs::path const& filename)
|
||||
: error(0)
|
||||
{
|
||||
MkvStdIO::MkvStdIO(agi::fs::path const& filename) {
|
||||
read = StdIoRead;
|
||||
scan = StdIoScan;
|
||||
getcachesize = [](InputStream *) -> unsigned int { return CACHESIZE; };
|
||||
|
|
|
@ -46,8 +46,6 @@
|
|||
|
||||
Spline::Spline(const VisualToolBase &tl)
|
||||
: coord_translator(tl)
|
||||
, scale(1)
|
||||
, raw_scale(1)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -43,8 +43,8 @@ class Spline : private std::list<SplineCurve> {
|
|||
/// Visual tool to do the conversion between script and video pixels
|
||||
const VisualToolBase &coord_translator;
|
||||
/// Spline scale
|
||||
int scale;
|
||||
int raw_scale;
|
||||
int scale = 0;
|
||||
int raw_scale = 0;
|
||||
|
||||
/// Video coordinates -> Script coordinates
|
||||
Vector2D ToScript(Vector2D vec) const;
|
||||
|
|
|
@ -59,9 +59,6 @@ struct SubsController::UndoInfo {
|
|||
SubsController::SubsController(agi::Context *context)
|
||||
: context(context)
|
||||
, undo_connection(context->ass->AddUndoManager(&SubsController::OnCommit, this))
|
||||
, commit_id(0)
|
||||
, saved_commit_id(0)
|
||||
, autosaved_commit_id(0)
|
||||
{
|
||||
autosave_timer_changed(&autosave_timer);
|
||||
OPT_SUB("App/Auto/Save", autosave_timer_changed, &autosave_timer);
|
||||
|
|
|
@ -37,11 +37,11 @@ class SubsController {
|
|||
boost::container::list<UndoInfo> redo_stack;
|
||||
|
||||
/// Revision counter for undo coalescing and modified state tracking
|
||||
int commit_id;
|
||||
int commit_id = 0;
|
||||
/// Last saved version of this file
|
||||
int saved_commit_id;
|
||||
int saved_commit_id = 0;
|
||||
/// Last autosaved version of this file
|
||||
int autosaved_commit_id;
|
||||
int autosaved_commit_id = 0;
|
||||
|
||||
/// Timer for triggering autosaves
|
||||
wxTimer autosave_timer;
|
||||
|
|
|
@ -93,11 +93,7 @@ void time_edit_char_hook(wxKeyEvent &event) {
|
|||
|
||||
SubsEditBox::SubsEditBox(wxWindow *parent, agi::Context *context)
|
||||
: wxPanel(parent, -1, wxDefaultPosition, wxDefaultSize, wxTAB_TRAVERSAL | wxRAISED_BORDER, "SubsEditBox")
|
||||
, line(nullptr)
|
||||
, button_bar_split(true)
|
||||
, controls_enabled(true)
|
||||
, c(context)
|
||||
, commit_id(-1)
|
||||
, undo_timer(GetEventHandler())
|
||||
{
|
||||
using std::bind;
|
||||
|
|
|
@ -78,14 +78,14 @@ class SubsEditBox : public wxPanel {
|
|||
std::deque<agi::signal::Connection> connections;
|
||||
|
||||
/// Currently active dialogue line
|
||||
AssDialogue *line;
|
||||
AssDialogue *line = nullptr;
|
||||
/// Last seen grid selection
|
||||
SubtitleSelection sel;
|
||||
|
||||
/// Are the buttons currently split into two lines?
|
||||
bool button_bar_split;
|
||||
bool button_bar_split = true;
|
||||
/// Are the controls currently enabled?
|
||||
bool controls_enabled;
|
||||
bool controls_enabled = true;
|
||||
|
||||
agi::Context *c;
|
||||
|
||||
|
@ -120,7 +120,7 @@ class SubsEditBox : public wxPanel {
|
|||
void CommitText(wxString const& desc);
|
||||
|
||||
/// Last commit ID for undo coalescing
|
||||
int commit_id;
|
||||
int commit_id = -1;
|
||||
|
||||
/// Last used commit message to avoid coalescing different types of changes
|
||||
wxString last_commit_type;
|
||||
|
|
|
@ -86,7 +86,6 @@ SubsTextEditCtrl::SubsTextEditCtrl(wxWindow* parent, wxSize wsize, long style, a
|
|||
: ScintillaTextCtrl(parent, -1, "", wxDefaultPosition, wsize, style)
|
||||
, spellchecker(SpellCheckerFactory::GetSpellChecker())
|
||||
, context(context)
|
||||
, calltip_position(0)
|
||||
{
|
||||
// Set properties
|
||||
SetWrapMode(wxSTC_WRAP_WORD);
|
||||
|
|
|
@ -78,7 +78,7 @@ class SubsTextEditCtrl : public ScintillaTextCtrl {
|
|||
std::string calltip_text;
|
||||
|
||||
/// Position of the currently show calltip
|
||||
size_t calltip_position;
|
||||
size_t calltip_position = 0;
|
||||
|
||||
/// Cursor position which the current calltip is for
|
||||
int cursor_pos;
|
||||
|
|
|
@ -83,10 +83,7 @@ void msg_callback(int level, const char *fmt, va_list args, void *) {
|
|||
#endif
|
||||
}
|
||||
|
||||
LibassSubtitlesProvider::LibassSubtitlesProvider(std::string)
|
||||
: ass_renderer(nullptr)
|
||||
, ass_track(nullptr)
|
||||
{
|
||||
LibassSubtitlesProvider::LibassSubtitlesProvider(std::string) {
|
||||
auto done = std::make_shared<bool>(false);
|
||||
auto renderer = std::make_shared<ASS_Renderer*>(nullptr);
|
||||
cache_queue->Async([=]{
|
||||
|
|
|
@ -39,8 +39,8 @@ extern "C" {
|
|||
}
|
||||
|
||||
class LibassSubtitlesProvider : public SubtitlesProvider {
|
||||
ASS_Renderer* ass_renderer;
|
||||
ASS_Track* ass_track;
|
||||
ASS_Renderer* ass_renderer = nullptr;
|
||||
ASS_Track* ass_track = nullptr;
|
||||
|
||||
public:
|
||||
LibassSubtitlesProvider(std::string);
|
||||
|
|
|
@ -114,10 +114,6 @@ ThreadedFrameSource::ThreadedFrameSource(agi::fs::path const& video_filename, st
|
|||
, subs_provider(get_subs_provider(parent))
|
||||
, video_provider(VideoProviderFactory::GetProvider(video_filename, colormatrix))
|
||||
, parent(parent)
|
||||
, frame_number(-1)
|
||||
, time(-1.)
|
||||
, single_frame(-1)
|
||||
, version(0)
|
||||
{
|
||||
}
|
||||
|
||||
|
|
|
@ -50,8 +50,8 @@ class ThreadedFrameSource {
|
|||
/// Event handler to send FrameReady events to
|
||||
wxEvtHandler *parent;
|
||||
|
||||
int frame_number; ///< Last frame number requested
|
||||
double time; ///< Time of the frame to pass to the subtitle renderer
|
||||
int frame_number = -1; ///< Last frame number requested
|
||||
double time = -1.; ///< Time of the frame to pass to the subtitle renderer
|
||||
|
||||
/// Copy of the subtitles file to avoid having to touch the project context
|
||||
std::unique_ptr<AssFile> subs;
|
||||
|
@ -59,7 +59,7 @@ class ThreadedFrameSource {
|
|||
/// If >= 0, the subtitles provider current has just the lines visible on
|
||||
/// that frame loaded. If -1, the entire file is loaded. If -2, the
|
||||
/// currently loaded file is out of date.
|
||||
int single_frame;
|
||||
int single_frame = -1;
|
||||
|
||||
std::shared_ptr<VideoFrame> ProcFrame(int frame, double time, bool raw = false);
|
||||
|
||||
|
@ -68,7 +68,7 @@ class ThreadedFrameSource {
|
|||
|
||||
/// Monotonic counter used to drop frames when changes arrive faster than
|
||||
/// they can be rendered
|
||||
std::atomic<uint_fast32_t> version;
|
||||
std::atomic<uint_fast32_t> version{ 0 };
|
||||
|
||||
public:
|
||||
/// @brief Load the passed subtitle file
|
||||
|
|
|
@ -60,7 +60,6 @@ enum {
|
|||
TimeEdit::TimeEdit(wxWindow* parent, wxWindowID id, agi::Context *c, const std::string& value, const wxSize& size, bool asEnd)
|
||||
: wxTextCtrl(parent, id, to_wx(value), wxDefaultPosition, size, wxTE_CENTRE | wxTE_PROCESS_ENTER)
|
||||
, c(c)
|
||||
, byFrame(false)
|
||||
, isEnd(asEnd)
|
||||
, insert(!OPT_GET("Subtitle/Time Edit/Insert Mode")->GetBool())
|
||||
, insert_opt(OPT_SUB("Subtitle/Time Edit/Insert Mode", &TimeEdit::OnInsertChanged, this))
|
||||
|
|
|
@ -48,8 +48,8 @@ namespace agi {
|
|||
/// This control constrains values to valid times, and can display the time
|
||||
/// being edited as either a h:mm:ss.cc formatted time, or a frame number
|
||||
class TimeEdit : public wxTextCtrl {
|
||||
bool byFrame = false; ///< Is the time displayed as a frame number?
|
||||
agi::Context *c; ///< Project context
|
||||
bool byFrame; ///< Is the time displayed as a frame number?
|
||||
bool isEnd; ///< Should the time be treated as an end time for time <-> frame conversions?
|
||||
AssTime time; ///< The time, which may be displayed as either a frame number or time
|
||||
bool insert; ///< If true, disable overwriting behavior in time mode
|
||||
|
|
|
@ -60,14 +60,7 @@
|
|||
|
||||
VideoContext::VideoContext()
|
||||
: playback(this)
|
||||
, start_ms(0)
|
||||
, end_frame(0)
|
||||
, frame_n(0)
|
||||
, ar_value(1.)
|
||||
, ar_type(AspectRatio::Default)
|
||||
, has_subtitles(false)
|
||||
, playAudioOnStep(OPT_GET("Audio/Plays When Stepping Video"))
|
||||
, no_amend(false)
|
||||
{
|
||||
Bind(EVT_VIDEO_ERROR, &VideoContext::OnVideoError, this);
|
||||
Bind(EVT_SUBTITLES_ERROR, &VideoContext::OnSubtitlesError, this);
|
||||
|
|
|
@ -110,24 +110,24 @@ class VideoContext : public wxEvtHandler {
|
|||
|
||||
/// The start time of the first frame of the current playback; undefined if
|
||||
/// video is not currently playing
|
||||
int start_ms;
|
||||
int start_ms = 0;
|
||||
|
||||
/// The last frame to play if video is currently playing
|
||||
int end_frame;
|
||||
int end_frame = 0;
|
||||
|
||||
/// The frame number which was last requested from the video provider,
|
||||
/// which may not be the same thing as the currently displayed frame
|
||||
int frame_n;
|
||||
int frame_n = 0;
|
||||
|
||||
/// The picture aspect ratio of the video if the aspect ratio has been
|
||||
/// overridden by the user
|
||||
double ar_value;
|
||||
double ar_value = 1.;
|
||||
|
||||
/// The current AR type
|
||||
AspectRatio ar_type;
|
||||
AspectRatio ar_type = AspectRatio::Default;
|
||||
|
||||
/// Does the currently loaded video file have subtitles muxed into it?
|
||||
bool has_subtitles;
|
||||
bool has_subtitles = false;
|
||||
|
||||
/// Filename of the currently loaded timecodes file, or empty if timecodes
|
||||
/// have not been overridden
|
||||
|
@ -140,7 +140,7 @@ class VideoContext : public wxEvtHandler {
|
|||
/// be kept in perfect sync. Saving the file can add lines to the file
|
||||
/// without a commit, breaking this sync, so force a non-amend after each
|
||||
/// save.
|
||||
bool no_amend;
|
||||
bool no_amend = false;
|
||||
|
||||
void OnPlayTimer(wxTimerEvent &event);
|
||||
|
||||
|
|
|
@ -95,11 +95,6 @@ VideoDisplay::VideoDisplay(
|
|||
: wxGLCanvas(parent, -1, attribList)
|
||||
, autohideTools(OPT_GET("Tool/Visual/Autohide"))
|
||||
, con(c)
|
||||
, viewport_left(0)
|
||||
, viewport_width(0)
|
||||
, viewport_bottom(0)
|
||||
, viewport_top(0)
|
||||
, viewport_height(0)
|
||||
, zoomValue(OPT_GET("Video/Default Zoom")->GetInt() * .125 + .125)
|
||||
, toolBar(visualSubToolBar)
|
||||
, zoomBox(zoomBox)
|
||||
|
|
|
@ -73,15 +73,15 @@ class VideoDisplay : public wxGLCanvas {
|
|||
Vector2D last_mouse_pos, mouse_pos;
|
||||
|
||||
/// Screen pixels between the left of the canvas and the left of the video
|
||||
int viewport_left;
|
||||
int viewport_left = 0;
|
||||
/// The width of the video in screen pixels
|
||||
int viewport_width;
|
||||
int viewport_width = 0;
|
||||
/// Screen pixels between the bottom of the canvas and the bottom of the video; used for glViewport
|
||||
int viewport_bottom;
|
||||
int viewport_bottom = 0;
|
||||
/// Screen pixels between the bottom of the canvas and the top of the video; used for coordinate space conversion
|
||||
int viewport_top;
|
||||
int viewport_top = 0;
|
||||
/// The height of the video in screen pixels
|
||||
int viewport_height;
|
||||
int viewport_height = 0;
|
||||
|
||||
/// The current zoom level, where 1.0 = 100%
|
||||
double zoomValue;
|
||||
|
|
|
@ -50,16 +50,10 @@
|
|||
|
||||
/// @brief Structure tracking all precomputable information about a subtexture
|
||||
struct VideoOutGL::TextureInfo {
|
||||
GLuint textureID;
|
||||
int dataOffset;
|
||||
int sourceH;
|
||||
int sourceW;
|
||||
TextureInfo()
|
||||
: textureID(0)
|
||||
, dataOffset(0)
|
||||
, sourceH(0)
|
||||
, sourceW(0)
|
||||
{ }
|
||||
GLuint textureID = 0;
|
||||
int dataOffset = 0;
|
||||
int sourceH = 0;
|
||||
int sourceW = 0;
|
||||
};
|
||||
|
||||
/// @brief Test if a texture can be created
|
||||
|
@ -76,21 +70,7 @@ static bool TestTexture(int width, int height, GLint format) {
|
|||
return format != 0;
|
||||
}
|
||||
|
||||
VideoOutGL::VideoOutGL()
|
||||
: maxTextureSize(0)
|
||||
, supportsRectangularTextures(false)
|
||||
, internalFormat(0)
|
||||
, frameWidth(0)
|
||||
, frameHeight(0)
|
||||
, frameFormat(0)
|
||||
, frameFlipped(false)
|
||||
, textureIdList()
|
||||
, textureList()
|
||||
, dl(0)
|
||||
, textureCount(0)
|
||||
, textureRows(0)
|
||||
, textureCols(0)
|
||||
{ }
|
||||
VideoOutGL::VideoOutGL() { }
|
||||
|
||||
/// @brief Runtime detection of required OpenGL capabilities
|
||||
void VideoOutGL::DetectOpenGLCapabilities() {
|
||||
|
|
|
@ -31,38 +31,38 @@ class VideoOutGL {
|
|||
struct TextureInfo;
|
||||
|
||||
/// The maximum texture size supported by the user's graphics card
|
||||
int maxTextureSize;
|
||||
int maxTextureSize = 0;
|
||||
/// Whether rectangular textures are supported by the user's graphics card
|
||||
bool supportsRectangularTextures;
|
||||
bool supportsRectangularTextures = false;
|
||||
/// The internalformat to use
|
||||
int internalFormat;
|
||||
int internalFormat = 0;
|
||||
|
||||
/// The frame height which the texture grid has been set up for
|
||||
int frameWidth;
|
||||
int frameWidth = 0;
|
||||
/// The frame width which the texture grid has been set up for
|
||||
int frameHeight;
|
||||
int frameHeight = 0;
|
||||
/// The frame format which the texture grid has been set up for
|
||||
GLenum frameFormat;
|
||||
GLenum frameFormat = 0;
|
||||
/// Whether the grid is set up for flipped video
|
||||
bool frameFlipped;
|
||||
bool frameFlipped = false;
|
||||
/// List of OpenGL texture ids used in the grid
|
||||
std::vector<GLuint> textureIdList;
|
||||
/// List of precalculated texture display information
|
||||
std::vector<TextureInfo> textureList;
|
||||
/// OpenGL display list which draws the frames
|
||||
GLuint dl;
|
||||
GLuint dl = 0;
|
||||
/// The total texture count
|
||||
int textureCount;
|
||||
int textureCount = 0;
|
||||
/// The number of rows of textures
|
||||
int textureRows;
|
||||
int textureRows = 0;
|
||||
/// The number of columns of textures
|
||||
int textureCols;
|
||||
int textureCols = 0;
|
||||
|
||||
void DetectOpenGLCapabilities();
|
||||
void InitTextures(int width, int height, GLenum format, int bpp, bool flipped);
|
||||
|
||||
VideoOutGL(const VideoOutGL &);
|
||||
VideoOutGL& operator=(const VideoOutGL&);
|
||||
VideoOutGL(const VideoOutGL &) = delete;
|
||||
VideoOutGL& operator=(const VideoOutGL&) = delete;
|
||||
public:
|
||||
/// @brief Set the frame to be displayed when Render() is called
|
||||
/// @param frame The frame to be displayed
|
||||
|
|
|
@ -74,9 +74,6 @@ std::string colormatrix_description(int cs, int cr) {
|
|||
|
||||
FFmpegSourceVideoProvider::FFmpegSourceVideoProvider(agi::fs::path const& filename, std::string const& colormatrix) try
|
||||
: VideoSource(nullptr, FFMS_DestroyVideoSource)
|
||||
, VideoInfo(nullptr)
|
||||
, Width(-1)
|
||||
, Height(-1)
|
||||
{
|
||||
ErrInfo.Buffer = FFMSErrMsg;
|
||||
ErrInfo.BufferSize = sizeof(FFMSErrMsg);
|
||||
|
|
|
@ -41,10 +41,10 @@
|
|||
class FFmpegSourceVideoProvider : public VideoProvider, FFmpegSourceProvider {
|
||||
/// video source object
|
||||
agi::scoped_holder<FFMS_VideoSource*, void (FFMS_CC*)(FFMS_VideoSource*)> VideoSource;
|
||||
const FFMS_VideoProperties *VideoInfo; ///< video properties
|
||||
const FFMS_VideoProperties *VideoInfo = nullptr; ///< video properties
|
||||
|
||||
int Width; ///< width in pixels
|
||||
int Height; ///< height in pixels
|
||||
int Width = -1; ///< width in pixels
|
||||
int Height = -1; ///< height in pixels
|
||||
double DAR; ///< display aspect ratio
|
||||
std::vector<int> KeyFramesList; ///< list of keyframes
|
||||
agi::vfr::Framerate Timecodes; ///< vfr object
|
||||
|
|
|
@ -58,15 +58,7 @@
|
|||
|
||||
/// @brief Constructor
|
||||
/// @param filename The filename to open
|
||||
YUV4MPEGVideoProvider::YUV4MPEGVideoProvider(agi::fs::path const& filename, std::string const&)
|
||||
: sf(nullptr)
|
||||
, inited(false)
|
||||
, w (0)
|
||||
, h (0)
|
||||
, num_frames(-1)
|
||||
, pixfmt(Y4M_PIXFMT_NONE)
|
||||
, imode(Y4M_ILACE_NOTSET)
|
||||
{
|
||||
YUV4MPEGVideoProvider::YUV4MPEGVideoProvider(agi::fs::path const& filename, std::string const&) {
|
||||
fps_rat.num = -1;
|
||||
fps_rat.den = 1;
|
||||
|
||||
|
|
|
@ -100,17 +100,17 @@ class YUV4MPEGVideoProvider : public VideoProvider {
|
|||
Y4M_FFLAG_C_UNKNOWN = 0x0800 /// unknown (only allowed for non-4:2:0 sampling)
|
||||
};
|
||||
|
||||
FILE *sf; /// source file
|
||||
bool inited; /// initialization state
|
||||
FILE *sf = nullptr; /// source file
|
||||
bool inited = false; /// initialization state
|
||||
|
||||
int w, h; /// frame width/height
|
||||
int num_frames; /// length of file in frames
|
||||
int w = 0, h = 0; /// frame width/height
|
||||
int num_frames = -1; /// length of file in frames
|
||||
int frame_sz; /// size of each frame in bytes
|
||||
int luma_sz; /// size of the luma plane of each frame, in bytes
|
||||
int chroma_sz; /// size of one of the two chroma planes of each frame, in bytes
|
||||
|
||||
Y4M_PixelFormat pixfmt; /// colorspace/pixel format
|
||||
Y4M_InterlacingMode imode; /// interlacing mode (for the entire stream)
|
||||
Y4M_PixelFormat pixfmt = Y4M_PIXFMT_NONE; /// colorspace/pixel format
|
||||
Y4M_InterlacingMode imode = Y4M_ILACE_NOTSET; /// interlacing mode (for the entire stream)
|
||||
struct {
|
||||
int num; /// numerator
|
||||
int den; /// denominator
|
||||
|
|
|
@ -50,8 +50,6 @@
|
|||
VideoSlider::VideoSlider (wxWindow* parent, agi::Context *c)
|
||||
: wxWindow(parent, -1, wxDefaultPosition, wxDefaultSize, wxWANTS_CHARS | wxFULL_REPAINT_ON_RESIZE)
|
||||
, c(c)
|
||||
, val(0)
|
||||
, max(1)
|
||||
{
|
||||
SetClientSize(20,25);
|
||||
SetMinSize(wxSize(20, 25));
|
||||
|
|
|
@ -49,8 +49,8 @@ class VideoSlider: public wxWindow {
|
|||
std::vector<int> keyframes; ///< Currently loaded keyframes
|
||||
std::vector<agi::signal::Connection> slots;
|
||||
|
||||
int val; ///< Current frame number
|
||||
int max; ///< Last frame number
|
||||
int val = 0; ///< Current frame number
|
||||
int max = 1; ///< Last frame number
|
||||
|
||||
/// Get the frame number for the given x coordinate
|
||||
int GetValueAtX(int x);
|
||||
|
|
|
@ -37,13 +37,6 @@
|
|||
#include "gl_wrap.h"
|
||||
#include "visual_feature.h"
|
||||
|
||||
VisualDraggableFeature::VisualDraggableFeature()
|
||||
: type(DRAG_NONE)
|
||||
, layer(0)
|
||||
, line(nullptr)
|
||||
{
|
||||
}
|
||||
|
||||
bool VisualDraggableFeature::IsMouseOver(Vector2D mouse_pos) const {
|
||||
if (!pos) return false;
|
||||
|
||||
|
|
|
@ -60,13 +60,10 @@ class VisualDraggableFeature : public boost::intrusive::make_list_base_hook<boos
|
|||
Vector2D start; ///< position before the last drag operation began
|
||||
|
||||
public:
|
||||
/// Constructor
|
||||
VisualDraggableFeature();
|
||||
|
||||
DraggableFeatureType type; ///< Shape of feature
|
||||
DraggableFeatureType type = DRAG_NONE; ///< Shape of feature
|
||||
Vector2D pos; ///< Position of this feature
|
||||
int layer; ///< Layer; Higher = above
|
||||
AssDialogue* line; ///< The dialogue line this feature is for; may be nullptr
|
||||
int layer = 0; ///< Layer; Higher = above
|
||||
AssDialogue* line = nullptr; ///< The dialogue line this feature is for; may be nullptr
|
||||
|
||||
/// @brief Is the given point over this feature?
|
||||
/// @param mouse_pos Position of the mouse
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show more
Loading…
Reference in a new issue