1
0
Fork 0

Merge branches 'video_panning_option', 'bugfixes' and 'avisynth' into feature

This commit is contained in:
arch1t3cht 2022-07-27 15:26:33 +02:00
commit 9cc3fe9e71
20 changed files with 234 additions and 836 deletions

1
.gitignore vendored
View File

@ -18,6 +18,7 @@ tools/repack-thes-dict.dSYM
# Meson
build*/
subprojects/avisynth
subprojects/boost*/
subprojects/cairo*
subprojects/ffmpeg

View File

@ -15,22 +15,13 @@
#include <libaegisub/util.h>
#include <cstddef>
#ifdef _LIBCPP_VERSION
#include <thread>
#else
#include <boost/thread.hpp>
#endif
namespace agi { namespace util {
void SetThreadName(const char *) { }
void sleep_for(int ms) {
#ifdef __clang__
std::this_thread::sleep_for(std::chrono::milliseconds(ms));
#else
boost::this_thread::sleep_for(boost::chrono::milliseconds(ms));
#endif
}
} }

View File

@ -1,6 +1,6 @@
project('Aegisub', ['c', 'cpp'],
license: 'BSD-3-Clause',
meson_version: '>=0.56.1',
meson_version: '>=0.57.0',
default_options: ['cpp_std=c++14', 'buildtype=debugoptimized'],
version: '3.2.2')
@ -226,6 +226,15 @@ endforeach
if host_machine.system() == 'windows' and get_option('avisynth').enabled()
conf.set('WITH_AVISYNTH', 1) # bundled separately with installer
deps += cc.find_library('avifil32', required: true)
avs_opt = cmake.subproject_options()
avs_opt.add_cmake_defines({
'HEADERS_ONLY': true
})
avs = cmake.subproject('avisynth', options: avs_opt)
deps_inc += avs.include_directories('AviSynth-Headers')
endif
if host_machine.system() == 'windows' and not get_option('directsound').disabled()

View File

@ -1,751 +0,0 @@
// Avisynth v2.5. Copyright 2002 Ben Rudiak-Gould et al.
// http://www.avisynth.org
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
// http://www.gnu.org/copyleft/gpl.html .
//
// Linking Avisynth statically or dynamically with other modules is making a
// combined work based on Avisynth. Thus, the terms and conditions of the GNU
// General Public License cover the whole combination.
//
// As a special exception, the copyright holders of Avisynth give you
// permission to link Avisynth with independent modules that communicate with
// Avisynth solely through the interfaces defined in avisynth.h, regardless of the license
// terms of these independent modules, and to copy and distribute the
// resulting combined work under terms of your choice, provided that
// every copy of the combined work is accompanied by a complete copy of
// the source code of Avisynth (the version of Avisynth used to produce the
// combined work), being distributed under the terms of the GNU General
// Public License plus this exception. An independent module is a module
// which is not derived from or based on Avisynth, such as 3rd-party filters,
// import and export plugins, or graphical user interfaces.
#ifndef __AVISYNTH_H__
#define __AVISYNTH_H__
enum { AVISYNTH_INTERFACE_VERSION = 3 };
/* Define all types necessary for interfacing with avisynth.dll
Moved from internal.h */
// Win32 API macros, notably the types BYTE, DWORD, ULONG, etc.
#include <windef.h>
// COM interface macros
#include <objbase.h>
// Raster types used by VirtualDub & Avisynth
#define in64 (__int64)(unsigned short)
typedef unsigned long Pixel; // this will break on 64-bit machines!
typedef unsigned long Pixel32;
typedef unsigned char Pixel8;
typedef long PixCoord;
typedef long PixDim;
typedef long PixOffset;
/* Compiler-specific crap */
// Tell MSVC to stop precompiling here
#ifdef _MSC_VER
#pragma hdrstop
#endif
// Set up debugging macros for MS compilers; for others, step down to the
// standard <assert.h> interface
#ifdef _MSC_VER
#include <crtdbg.h>
#else
#define _RPT0(a,b) ((void)0)
#define _RPT1(a,b,c) ((void)0)
#define _RPT2(a,b,c,d) ((void)0)
#define _RPT3(a,b,c,d,e) ((void)0)
#define _RPT4(a,b,c,d,e,f) ((void)0)
#define _ASSERTE(x) assert(x)
#include <assert.h>
#endif
// I had problems with Premiere wanting 1-byte alignment for its structures,
// so I now set the Avisynth struct alignment explicitly here.
#pragma pack(push,8)
#define FRAME_ALIGN 16
// Default frame alignment is 16 bytes, to help P4, when using SSE2
// The VideoInfo struct holds global information about a clip (i.e.
// information that does not depend on the frame number). The GetVideoInfo
// method in IClip returns this struct.
// Audio Sample information
typedef float SFLOAT;
enum {SAMPLE_INT8 = 1<<0,
SAMPLE_INT16 = 1<<1,
SAMPLE_INT24 = 1<<2, // Int24 is a very stupid thing to code, but it's supported by some hardware.
SAMPLE_INT32 = 1<<3,
SAMPLE_FLOAT = 1<<4};
enum {
PLANAR_Y=1<<0,
PLANAR_U=1<<1,
PLANAR_V=1<<2,
PLANAR_ALIGNED=1<<3,
PLANAR_Y_ALIGNED=PLANAR_Y|PLANAR_ALIGNED,
PLANAR_U_ALIGNED=PLANAR_U|PLANAR_ALIGNED,
PLANAR_V_ALIGNED=PLANAR_V|PLANAR_ALIGNED,
};
struct VideoInfo {
int width, height; // width=0 means no video
unsigned fps_numerator, fps_denominator;
int num_frames;
// This is more extensible than previous versions. More properties can be added seeminglesly.
// Colorspace properties.
enum {
CS_BGR = 1<<28,
CS_YUV = 1<<29,
CS_INTERLEAVED = 1<<30,
CS_PLANAR = 1<<31
};
// Specific colorformats
enum { CS_UNKNOWN = 0,
CS_BGR24 = 1<<0 | CS_BGR | CS_INTERLEAVED,
CS_BGR32 = 1<<1 | CS_BGR | CS_INTERLEAVED,
CS_YUY2 = 1<<2 | CS_YUV | CS_INTERLEAVED,
CS_YV12 = 1<<3 | CS_YUV | CS_PLANAR, // y-v-u, planar
CS_I420 = 1<<4 | CS_YUV | CS_PLANAR, // y-u-v, planar
CS_IYUV = 1<<4 | CS_YUV | CS_PLANAR // same as above
};
int pixel_type; // changed to int as of 2.5
int audio_samples_per_second; // 0 means no audio
int sample_type; // as of 2.5
__int64 num_audio_samples; // changed as of 2.5
int nchannels; // as of 2.5
// Imagetype properties
int image_type;
enum {
IT_BFF = 1<<0,
IT_TFF = 1<<1,
IT_FIELDBASED = 1<<2
};
// useful functions of the above
bool HasVideo() const { return (width!=0); }
bool HasAudio() const { return (audio_samples_per_second!=0); }
bool IsRGB() const { return !!(pixel_type&CS_BGR); }
bool IsRGB24() const { return (pixel_type&CS_BGR24)==CS_BGR24; } // Clear out additional properties
bool IsRGB32() const { return (pixel_type & CS_BGR32) == CS_BGR32 ; }
bool IsYUV() const { return !!(pixel_type&CS_YUV ); }
bool IsYUY2() const { return (pixel_type & CS_YUY2) == CS_YUY2; }
bool IsYV12() const { return ((pixel_type & CS_YV12) == CS_YV12)||((pixel_type & CS_I420) == CS_I420); }
bool IsColorSpace(int c_space) const { return ((pixel_type & c_space) == c_space); }
bool Is(int property) const { return ((pixel_type & property)==property ); }
bool IsPlanar() const { return !!(pixel_type & CS_PLANAR); }
bool IsFieldBased() const { return !!(image_type & IT_FIELDBASED); }
bool IsParityKnown() const { return ((image_type & IT_FIELDBASED)&&(image_type & (IT_BFF|IT_TFF))); }
bool IsBFF() const { return !!(image_type & IT_BFF); }
bool IsTFF() const { return !!(image_type & IT_TFF); }
bool IsVPlaneFirst() const {return ((pixel_type & CS_YV12) == CS_YV12); } // Don't use this
int BytesFromPixels(int pixels) const { return pixels * (BitsPerPixel()>>3); } // Will not work on planar images, but will return only luma planes
int RowSize() const { return BytesFromPixels(width); } // Also only returns first plane on planar images
int BMPSize() const { if (IsPlanar()) {int p = height * ((RowSize()+3) & ~3); p+=p>>1; return p; } return height * ((RowSize()+3) & ~3); }
__int64 AudioSamplesFromFrames(__int64 frames) const { return (fps_numerator && HasVideo()) ? ((__int64)(frames) * audio_samples_per_second * fps_denominator / fps_numerator) : 0; }
int FramesFromAudioSamples(__int64 samples) const { return (fps_denominator && HasAudio()) ? (int)((samples * (__int64)fps_numerator)/((__int64)fps_denominator * (__int64)audio_samples_per_second)) : 0; }
__int64 AudioSamplesFromBytes(__int64 bytes) const { return HasAudio() ? bytes / BytesPerAudioSample() : 0; }
__int64 BytesFromAudioSamples(__int64 samples) const { return samples * BytesPerAudioSample(); }
int AudioChannels() const { return nchannels; }
int SampleType() const{ return sample_type;}
bool IsSampleType(int testtype) const{ return !!(sample_type&testtype);}
int SamplesPerSecond() const { return audio_samples_per_second; }
int BytesPerAudioSample() const { return nchannels*BytesPerChannelSample();}
void SetFieldBased(bool isfieldbased) { if (isfieldbased) image_type|=IT_FIELDBASED; else image_type&=~IT_FIELDBASED; }
void Set(int property) { image_type|=property; }
void Clear(int property) { image_type&=~property; }
int BitsPerPixel() const {
switch (pixel_type) {
case CS_BGR24:
return 24;
case CS_BGR32:
return 32;
case CS_YUY2:
return 16;
case CS_YV12:
case CS_I420:
return 12;
default:
return 0;
}
}
int BytesPerChannelSample() const {
switch (sample_type) {
case SAMPLE_INT8:
return sizeof(signed char);
case SAMPLE_INT16:
return sizeof(signed short);
case SAMPLE_INT24:
return 3;
case SAMPLE_INT32:
return sizeof(signed int);
case SAMPLE_FLOAT:
return sizeof(SFLOAT);
default:
_ASSERTE("Sample type not recognized!");
return 0;
}
}
// useful mutator
void SetFPS(unsigned numerator, unsigned denominator) {
if ((numerator == 0) || (denominator == 0)) {
fps_numerator = 0;
fps_denominator = 1;
}
else {
unsigned x=numerator, y=denominator;
while (y) { // find gcd
unsigned t = x%y; x = y; y = t;
}
fps_numerator = numerator/x;
fps_denominator = denominator/x;
}
}
// Range protected multiply-divide of FPS
void MulDivFPS(unsigned multiplier, unsigned divisor) {
unsigned __int64 numerator = UInt32x32To64(fps_numerator, multiplier);
unsigned __int64 denominator = UInt32x32To64(fps_denominator, divisor);
unsigned __int64 x=numerator, y=denominator;
while (y) { // find gcd
unsigned __int64 t = x%y; x = y; y = t;
}
numerator /= x; // normalize
denominator /= x;
unsigned __int64 temp = numerator | denominator; // Just looking top bit
unsigned u = 0;
while (temp & 0xffffffff80000000) { // or perhaps > 16777216*2
temp = Int64ShrlMod32(temp, 1);
u++;
}
if (u) { // Scale to fit
const unsigned round = 1 << (u-1);
SetFPS( (unsigned)Int64ShrlMod32(numerator + round, u),
(unsigned)Int64ShrlMod32(denominator + round, u) );
}
else {
fps_numerator = (unsigned)numerator;
fps_denominator = (unsigned)denominator;
}
}
// Test for same colorspace
bool IsSameColorspace(const VideoInfo& vi) const {
if (vi.pixel_type == pixel_type) return TRUE;
if (IsYV12() && vi.IsYV12()) return TRUE;
return FALSE;
}
};
// VideoFrameBuffer holds information about a memory block which is used
// for video data. For efficiency, instances of this class are not deleted
// when the refcount reaches zero; instead they're stored in a linked list
// to be reused. The instances are deleted when the corresponding AVS
// file is closed.
class VideoFrameBuffer {
BYTE* const data;
const int data_size;
// sequence_number is incremented every time the buffer is changed, so
// that stale views can tell they're no longer valid.
long sequence_number;
friend class VideoFrame;
friend class Cache;
friend class ScriptEnvironment;
long refcount;
public:
VideoFrameBuffer(int size);
VideoFrameBuffer();
~VideoFrameBuffer();
const BYTE* GetReadPtr() const { return data; }
BYTE* GetWritePtr() { ++sequence_number; return data; }
int GetDataSize() { return data_size; }
int GetSequenceNumber() { return sequence_number; }
int GetRefcount() { return refcount; }
};
class IClip;
class PClip;
class PVideoFrame;
class IScriptEnvironment;
class AVSValue;
// VideoFrame holds a "window" into a VideoFrameBuffer. Operator new
// is overloaded to recycle class instances.
class VideoFrame {
int refcount;
VideoFrameBuffer* const vfb;
const int offset, pitch, row_size, height, offsetU, offsetV, pitchUV; // U&V offsets are from top of picture.
friend class PVideoFrame;
void AddRef() { InterlockedIncrement((long *)&refcount); }
void Release() { if (refcount==1) InterlockedDecrement(&vfb->refcount); InterlockedDecrement((long *)&refcount); }
friend class ScriptEnvironment;
friend class Cache;
VideoFrame(VideoFrameBuffer* _vfb, int _offset, int _pitch, int _row_size, int _height);
VideoFrame(VideoFrameBuffer* _vfb, int _offset, int _pitch, int _row_size, int _height, int _offsetU, int _offsetV, int _pitchUV);
void* operator new(size_t size);
// TESTME: OFFSET U/V may be switched to what could be expected from AVI standard!
public:
int GetPitch() const { return pitch; }
int GetPitch(int plane) const { switch (plane) {case PLANAR_U: case PLANAR_V: return pitchUV;} return pitch; }
int GetRowSize() const { return row_size; }
int GetRowSize(int plane) const {
switch (plane) {
case PLANAR_U: case PLANAR_V: if (pitchUV) return row_size>>1; else return 0;
case PLANAR_U_ALIGNED: case PLANAR_V_ALIGNED:
if (pitchUV) {
int r = ((row_size+FRAME_ALIGN-1)&(~(FRAME_ALIGN-1)) )>>1; // Aligned rowsize
if (r<=pitchUV)
return r;
return row_size>>1;
} else return 0;
case PLANAR_Y_ALIGNED:
int r = (row_size+FRAME_ALIGN-1)&(~(FRAME_ALIGN-1)); // Aligned rowsize
if (r<=pitch)
return r;
return row_size;
}
return row_size; }
int GetHeight() const { return height; }
int GetHeight(int plane) const { switch (plane) {case PLANAR_U: case PLANAR_V: if (pitchUV) return height>>1; return 0;} return height; }
// generally you shouldn't use these three
VideoFrameBuffer* GetFrameBuffer() const { return vfb; }
int GetOffset() const { return offset; }
int GetOffset(int plane) const { switch (plane) {case PLANAR_U: return offsetU;case PLANAR_V: return offsetV;default: return offset;}; }
// in plugins use env->SubFrame()
VideoFrame* Subframe(int rel_offset, int new_pitch, int new_row_size, int new_height) const;
VideoFrame* Subframe(int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int pitchUV) const;
const BYTE* GetReadPtr() const { return vfb->GetReadPtr() + offset; }
const BYTE* GetReadPtr(int plane) const { return vfb->GetReadPtr() + GetOffset(plane); }
bool IsWritable() const { return (refcount == 1 && vfb->refcount == 1); }
BYTE* GetWritePtr() const {
if (vfb->GetRefcount()>1) {
_ASSERT(FALSE);
//throw AvisynthError("Internal Error - refcount was more than one!");
}
return IsWritable() ? (vfb->GetWritePtr() + offset) : 0;
}
BYTE* GetWritePtr(int plane) const {
if (plane==PLANAR_Y) {
if (vfb->GetRefcount()>1) {
_ASSERT(FALSE);
// throw AvisynthError("Internal Error - refcount was more than one!");
}
return IsWritable() ? vfb->GetWritePtr() + GetOffset(plane) : 0;
}
return vfb->data + GetOffset(plane);
}
~VideoFrame() { InterlockedDecrement(&vfb->refcount); }
};
enum {
CACHE_NOTHING=0,
CACHE_RANGE=1,
CACHE_ALL=2,
CACHE_AUDIO=3,
CACHE_AUDIO_NONE=4
};
// Base class for all filters.
class IClip {
friend class PClip;
friend class AVSValue;
int refcnt;
void AddRef() { InterlockedIncrement((long *)&refcnt); }
void Release() { InterlockedDecrement((long *)&refcnt); if (!refcnt) delete this; }
public:
IClip() : refcnt(0) {}
virtual int __stdcall GetVersion() { return AVISYNTH_INTERFACE_VERSION; }
virtual PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env) = 0;
virtual bool __stdcall GetParity(int n) = 0; // return field parity if field_based, else parity of first field in frame
virtual void __stdcall GetAudio(void* buf, __int64 start, __int64 count, IScriptEnvironment* env) = 0; // start and count are in samples
virtual void __stdcall SetCacheHints(int cachehints,int frame_range) = 0 ; // We do not pass cache requests upwards, only to the next filter.
virtual const VideoInfo& __stdcall GetVideoInfo() = 0;
virtual __stdcall ~IClip() {}
};
// smart pointer to IClip
class PClip {
IClip* p;
IClip* GetPointerWithAddRef() const { if (p) p->AddRef(); return p; }
friend class AVSValue;
friend class VideoFrame;
void Init(IClip* x) {
if (x) x->AddRef();
p=x;
}
void Set(IClip* x) {
if (x) x->AddRef();
if (p) p->Release();
p=x;
}
public:
PClip() { p = 0; }
PClip(const PClip& x) { Init(x.p); }
PClip(IClip* x) { Init(x); }
void operator=(IClip* x) { Set(x); }
void operator=(const PClip& x) { Set(x.p); }
IClip* operator->() const { return p; }
// useful in conditional expressions
operator void*() const { return p; }
bool operator!() const { return !p; }
~PClip() { if (p) p->Release(); }
};
// smart pointer to VideoFrame
class PVideoFrame {
VideoFrame* p;
void Init(VideoFrame* x) {
if (x) x->AddRef();
p=x;
}
void Set(VideoFrame* x) {
if (x) x->AddRef();
if (p) p->Release();
p=x;
}
public:
PVideoFrame() { p = 0; }
PVideoFrame(const PVideoFrame& x) { Init(x.p); }
PVideoFrame(VideoFrame* x) { Init(x); }
void operator=(VideoFrame* x) { Set(x); }
void operator=(const PVideoFrame& x) { Set(x.p); }
VideoFrame* operator->() const { return p; }
// for conditional expressions
operator void*() const { return p; }
bool operator!() const { return !p; }
~PVideoFrame() { if (p) p->Release();}
};
class AVSValue {
public:
AVSValue() { type = 'v'; }
AVSValue(IClip* c) { type = 'c'; clip = c; if (c) c->AddRef(); }
AVSValue(const PClip& c) { type = 'c'; clip = c.GetPointerWithAddRef(); }
AVSValue(bool b) { type = 'b'; boolean = b; }
AVSValue(int i) { type = 'i'; integer = i; }
// AVSValue(__int64 l) { type = 'l'; longlong = l; }
AVSValue(float f) { type = 'f'; floating_pt = f; }
AVSValue(double f) { type = 'f'; floating_pt = float(f); }
AVSValue(const char* s) { type = 's'; string = s; }
AVSValue(const AVSValue* a, int size) { type = 'a'; array = a; array_size = size; }
AVSValue(const AVSValue& v) { Assign(&v, true); }
~AVSValue() { if (IsClip() && clip) clip->Release(); }
AVSValue& operator=(const AVSValue& v) { Assign(&v, false); return *this; }
// Note that we transparently allow 'int' to be treated as 'float'.
// There are no int<->bool conversions, though.
bool Defined() const { return type != 'v'; }
bool IsClip() const { return type == 'c'; }
bool IsBool() const { return type == 'b'; }
bool IsInt() const { return type == 'i'; }
// bool IsLong() const { return (type == 'l'|| type == 'i'); }
bool IsFloat() const { return type == 'f' || type == 'i'; }
bool IsString() const { return type == 's'; }
bool IsArray() const { return type == 'a'; }
PClip AsClip() const { _ASSERTE(IsClip()); return IsClip()?clip:0; }
bool AsBool() const { _ASSERTE(IsBool()); return boolean; }
int AsInt() const { _ASSERTE(IsInt()); return integer; }
// int AsLong() const { _ASSERTE(IsLong()); return longlong; }
const char* AsString() const { _ASSERTE(IsString()); return IsString()?string:0; }
double AsFloat() const { _ASSERTE(IsFloat()); return IsInt()?integer:floating_pt; }
bool AsBool(bool def) const { _ASSERTE(IsBool()||!Defined()); return IsBool() ? boolean : def; }
int AsInt(int def) const { _ASSERTE(IsInt()||!Defined()); return IsInt() ? integer : def; }
double AsFloat(double def) const { _ASSERTE(IsFloat()||!Defined()); return IsInt() ? integer : type=='f' ? floating_pt : def; }
const char* AsString(const char* def) const { _ASSERTE(IsString()||!Defined()); return IsString() ? string : def; }
int ArraySize() const { _ASSERTE(IsArray()); return IsArray()?array_size:1; }
const AVSValue& operator[](int index) const {
_ASSERTE(IsArray() && index>=0 && index<array_size);
return (IsArray() && index>=0 && index<array_size) ? array[index] : *this;
}
private:
short type; // 'a'rray, 'c'lip, 'b'ool, 'i'nt, 'f'loat, 's'tring, 'v'oid, or 'l'ong
short array_size;
union {
IClip* clip;
bool boolean;
int integer;
float floating_pt;
const char* string;
const AVSValue* array;
// __int64 longlong;
};
void Assign(const AVSValue* src, bool init) {
if (src->IsClip() && src->clip)
src->clip->AddRef();
if (!init && IsClip() && clip)
clip->Release();
// make sure this copies the whole struct!
//((__int32*)this)[0] = ((__int32*)src)[0];
//((__int32*)this)[1] = ((__int32*)src)[1];
memcpy(this, src, sizeof(AVSValue));
}
};
// instantiable null filter
class GenericVideoFilter : public IClip {
protected:
PClip child;
VideoInfo vi;
public:
GenericVideoFilter(PClip _child) : child(_child) { vi = child->GetVideoInfo(); }
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env) { return child->GetFrame(n, env); }
void __stdcall GetAudio(void* buf, __int64 start, __int64 count, IScriptEnvironment* env) { child->GetAudio(buf, start, count, env); }
const VideoInfo& __stdcall GetVideoInfo() { return vi; }
bool __stdcall GetParity(int n) { return child->GetParity(n); }
void __stdcall SetCacheHints(int cachehints,int frame_range) { } ; // We do not pass cache requests upwards, only to the next filter.
};
class AvisynthError /* exception */ {
public:
const char* const msg;
AvisynthError(const char* _msg) : msg(_msg) {}
};
/* Helper classes useful to plugin authors */
class AlignPlanar : public GenericVideoFilter
{
public:
AlignPlanar(PClip _clip);
static PClip Create(PClip clip);
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env);
};
class FillBorder : public GenericVideoFilter
{
public:
FillBorder(PClip _clip);
static PClip Create(PClip clip);
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env);
};
class ConvertAudio : public GenericVideoFilter
/**
* Helper class to convert audio to any format
**/
{
public:
ConvertAudio(PClip _clip, int prefered_format);
void __stdcall GetAudio(void* buf, __int64 start, __int64 count, IScriptEnvironment* env);
void __stdcall SetCacheHints(int cachehints,int frame_range); // We do pass cache requests upwards, to the cache!
static PClip Create(PClip clip, int sample_type, int prefered_type);
static AVSValue __cdecl Create_float(AVSValue args, void*, IScriptEnvironment*);
static AVSValue __cdecl Create_32bit(AVSValue args, void*, IScriptEnvironment*);
static AVSValue __cdecl Create_24bit(AVSValue args, void*, IScriptEnvironment*);
static AVSValue __cdecl Create_16bit(AVSValue args, void*, IScriptEnvironment*);
static AVSValue __cdecl Create_8bit(AVSValue args, void*, IScriptEnvironment*);
virtual ~ConvertAudio();
private:
void convertToFloat(char* inbuf, float* outbuf, char sample_type, int count);
void convertToFloat_3DN(char* inbuf, float* outbuf, char sample_type, int count);
void convertToFloat_SSE(char* inbuf, float* outbuf, char sample_type, int count);
void convertToFloat_SSE2(char* inbuf, float* outbuf, char sample_type, int count);
void convertFromFloat(float* inbuf, void* outbuf, char sample_type, int count);
void convertFromFloat_3DN(float* inbuf, void* outbuf, char sample_type, int count);
void convertFromFloat_SSE(float* inbuf, void* outbuf, char sample_type, int count);
void convertFromFloat_SSE2(float* inbuf, void* outbuf, char sample_type, int count);
__inline int Saturate_int8(float n);
__inline short Saturate_int16(float n);
__inline int Saturate_int24(float n);
__inline int Saturate_int32(float n);
char src_format;
char dst_format;
int src_bps;
char *tempbuffer;
SFLOAT *floatbuffer;
int tempbuffer_size;
};
// For GetCPUFlags. These are backwards-compatible with those in VirtualDub.
enum {
/* slowest CPU to support extension */
CPUF_FORCE = 0x01, // N/A
CPUF_FPU = 0x02, // 386/486DX
CPUF_MMX = 0x04, // P55C, K6, PII
CPUF_INTEGER_SSE = 0x08, // PIII, Athlon
CPUF_SSE = 0x10, // PIII, Athlon XP/MP
CPUF_SSE2 = 0x20, // PIV, Hammer
CPUF_3DNOW = 0x40, // K6-2
CPUF_3DNOW_EXT = 0x80, // Athlon
CPUF_X86_64 = 0xA0, // Hammer (note: equiv. to 3DNow + SSE2, which only Hammer
// will have anyway)
CPUF_SSE3 = 0x100, // Some P4 & Athlon 64.
};
#define MAX_INT 0x7fffffff
#define MIN_INT -0x7fffffff
class IScriptEnvironment {
public:
virtual __stdcall ~IScriptEnvironment() {}
virtual /*static*/ long __stdcall GetCPUFlags() = 0;
virtual char* __stdcall SaveString(const char* s, int length = -1) = 0;
virtual char* __stdcall Sprintf(const char* fmt, ...) = 0;
// note: val is really a va_list; I hope everyone typedefs va_list to a pointer
virtual char* __stdcall VSprintf(const char* fmt, void* val) = 0;
__declspec(noreturn) virtual void __stdcall ThrowError(const char* fmt, ...) = 0;
class NotFound /*exception*/ {}; // thrown by Invoke and GetVar
typedef AVSValue (__cdecl *ApplyFunc)(AVSValue args, void* user_data, IScriptEnvironment* env);
virtual void __stdcall AddFunction(const char* name, const char* params, ApplyFunc apply, void* user_data) = 0;
virtual bool __stdcall FunctionExists(const char* name) = 0;
virtual AVSValue __stdcall Invoke(const char* name, const AVSValue args, const char** arg_names=0) = 0;
virtual AVSValue __stdcall GetVar(const char* name) = 0;
virtual bool __stdcall SetVar(const char* name, const AVSValue& val) = 0;
virtual bool __stdcall SetGlobalVar(const char* name, const AVSValue& val) = 0;
virtual void __stdcall PushContext(int level=0) = 0;
virtual void __stdcall PopContext() = 0;
// align should be 4 or 8
virtual PVideoFrame __stdcall NewVideoFrame(const VideoInfo& vi, int align=FRAME_ALIGN) = 0;
virtual bool __stdcall MakeWritable(PVideoFrame* pvf) = 0;
virtual /*static*/ void __stdcall BitBlt(BYTE* dstp, int dst_pitch, const BYTE* srcp, int src_pitch, int row_size, int height) = 0;
typedef void (__cdecl *ShutdownFunc)(void* user_data, IScriptEnvironment* env);
virtual void __stdcall AtExit(ShutdownFunc function, void* user_data) = 0;
virtual void __stdcall CheckVersion(int version = AVISYNTH_INTERFACE_VERSION) = 0;
virtual PVideoFrame __stdcall Subframe(PVideoFrame src, int rel_offset, int new_pitch, int new_row_size, int new_height) = 0;
virtual int __stdcall SetMemoryMax(int mem) = 0;
virtual int __stdcall SetWorkingDir(const char * newdir) = 0;
virtual void* __stdcall ManageCache(int key, void* data) = 0;
enum PlanarChromaAlignmentMode {
PlanarChromaAlignmentOff,
PlanarChromaAlignmentOn,
PlanarChromaAlignmentTest };
virtual bool __stdcall PlanarChromaAlignment(PlanarChromaAlignmentMode key) = 0;
virtual PVideoFrame __stdcall SubframePlanar(PVideoFrame src, int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int new_pitchUV) = 0;
};
// avisynth.dll exports this; it's a way to use it as a library, without
// writing an AVS script or without going through AVIFile.
IScriptEnvironment* __stdcall CreateScriptEnvironment(int version = AVISYNTH_INTERFACE_VERSION);
#pragma pack(pop)
#endif //__AVISYNTH_H__

View File

@ -47,6 +47,8 @@ namespace {
IScriptEnvironment *env = nullptr;
std::mutex AviSynthMutex;
}
// This needs to be visible so Avisynth sees it
const AVS_Linkage *AVS_linkage = nullptr;
typedef IScriptEnvironment* __stdcall FUNC(int);
@ -61,15 +63,13 @@ AviSynthWrapper::AviSynthWrapper() {
if (!CreateScriptEnv)
throw AvisynthError("Failed to get address of CreateScriptEnv from avisynth.dll");
// Require Avisynth 2.5.6+?
if (OPT_GET("Provider/Avisynth/Allow Ancient")->GetBool())
env = CreateScriptEnv(AVISYNTH_INTERFACE_VERSION-1);
else
env = CreateScriptEnv(AVISYNTH_INTERFACE_VERSION);
env = CreateScriptEnv(AVISYNTH_INTERFACE_VERSION);
if (!env)
throw AvisynthError("Failed to create a new avisynth script environment. Avisynth is too old?");
AVS_linkage = env->GetAVSLinkage();
// Set memory limit
const int memoryMax = OPT_GET("Provider/Avisynth/Memory Max")->GetInt();
if (memoryMax)
@ -80,6 +80,7 @@ AviSynthWrapper::AviSynthWrapper() {
AviSynthWrapper::~AviSynthWrapper() {
if (!--avs_refcount) {
delete env;
AVS_linkage = nullptr;
FreeLibrary(hLib);
}
}

View File

@ -603,6 +603,17 @@ struct video_opt_autoscroll final : public Command {
}
};
struct video_pan_reset final : public validator_video_loaded {
CMD_NAME("video/pan_reset")
STR_MENU("Reset video pan")
STR_DISP("Reset video pan")
STR_HELP("Reset the video pan to the original value")
void operator()(agi::Context *c) override {
c->videoDisplay->ResetPan();
}
};
struct video_play final : public validator_video_loaded {
CMD_NAME("video/play")
CMD_ICON(button_play)
@ -658,7 +669,7 @@ public:
void operator()(agi::Context *c) override {
c->videoController->Stop();
c->videoDisplay->SetZoom(1.);
c->videoDisplay->SetWindowZoom(1.);
}
};
@ -689,7 +700,7 @@ public:
void operator()(agi::Context *c) override {
c->videoController->Stop();
c->videoDisplay->SetZoom(2.);
c->videoDisplay->SetWindowZoom(2.);
}
};
@ -707,7 +718,7 @@ public:
void operator()(agi::Context *c) override {
c->videoController->Stop();
c->videoDisplay->SetZoom(.5);
c->videoDisplay->SetWindowZoom(.5);
}
};
@ -719,7 +730,7 @@ struct video_zoom_in final : public validator_video_attached {
STR_HELP("Zoom video in")
void operator()(agi::Context *c) override {
c->videoDisplay->SetZoom(c->videoDisplay->GetZoom() + .125);
c->videoDisplay->SetWindowZoom(c->videoDisplay->GetZoom() + .125);
}
};
@ -731,7 +742,7 @@ struct video_zoom_out final : public validator_video_attached {
STR_HELP("Zoom video out")
void operator()(agi::Context *c) override {
c->videoDisplay->SetZoom(c->videoDisplay->GetZoom() - .125);
c->videoDisplay->SetWindowZoom(c->videoDisplay->GetZoom() - .125);
}
};
}
@ -767,6 +778,7 @@ namespace cmd {
reg(agi::make_unique<video_open>());
reg(agi::make_unique<video_open_dummy>());
reg(agi::make_unique<video_opt_autoscroll>());
reg(agi::make_unique<video_pan_reset>());
reg(agi::make_unique<video_play>());
reg(agi::make_unique<video_play_line>());
reg(agi::make_unique<video_show_overscan>());

View File

@ -276,9 +276,9 @@ void FrameMain::OnVideoOpen(AsyncVideoProvider *provider) {
double zoom = context->videoDisplay->GetZoom();
wxSize windowSize = GetSize();
if (vidx*3*zoom > windowSize.GetX()*4 || vidy*4*zoom > windowSize.GetY()*6)
context->videoDisplay->SetZoom(zoom * .25);
context->videoDisplay->SetWindowZoom(zoom * .25);
else if (vidx*3*zoom > windowSize.GetX()*2 || vidy*4*zoom > windowSize.GetY()*3)
context->videoDisplay->SetZoom(zoom * .5);
context->videoDisplay->SetWindowZoom(zoom * .5);
SetDisplayMode(1,-1);

View File

@ -335,8 +335,7 @@
}
},
"Avisynth" : {
"Allow Ancient" : false,
"Memory Max" : 64
"Memory Max" : 128
},
"FFmpegSource" : {
"Cache" : {
@ -611,6 +610,8 @@
"Fast Jump Step" : 10,
"Show Keyframes" : true
},
"Subtitle Sync" : true
"Subtitle Sync" : true,
"Video Pan": false,
"Default to UI Zoom": false
}
}

View File

@ -156,6 +156,7 @@
{ "submenu" : "main/video/set zoom", "text" : "Set &Zoom" },
{ "submenu" : "main/video/override ar", "text" : "Override &AR" },
{ "command" : "video/show_overscan" },
{ "command" : "video/pan_reset" },
{},
{ "command" : "video/jump" },
{ "command" : "video/jump/start" },

View File

@ -1,3 +1,5 @@
fs = import('fs')
respack = find_program(meson.project_source_root() / 'tools/respack.py')
resrc = [
@ -12,17 +14,26 @@ conf_platform_json = configure_file(input: 'default_config_platform.json.in',
configuration: conf_platform)
if host_machine.system() == 'darwin'
resrc += custom_target('default_config.{cpp,h}',
command: [respack, '@INPUT0@', '@OUTPUT@'],
input: [files('manifest_osx.respack'), conf_platform_json],
output: ['default_config.cpp', 'default_config.h'])
resmanifest = 'manifest_osx.respack'
else
resrc += custom_target('default_config.{cpp,h}',
command: [respack, '@INPUT0@', '@OUTPUT@'],
input: [files('manifest.respack'), conf_platform_json],
output: ['default_config.cpp', 'default_config.h'])
resmanifest = 'manifest.respack'
endif
resmanifest_files = [conf_platform_json]
# Filter out the files we've generated ourselves
foreach rfile : fs.read(resmanifest).strip().split('\n')
rfile_s = rfile.strip()
if fs.is_file(rfile_s)
resmanifest_files += files(rfile_s)
endif
endforeach
resrc += custom_target('default_config.{cpp,h}',
command: [respack, '@INPUT0@', '@OUTPUT@'],
input: [files(resmanifest)],
depend_files: resmanifest_files,
output: ['default_config.cpp', 'default_config.h'])
libresrc_inc = include_directories('.')
libresrc = static_library('resrc', 'libresrc.cpp', resrc, include_directories: deps_inc, dependencies: deps)

View File

@ -335,8 +335,7 @@
}
},
"Avisynth" : {
"Allow Ancient" : false,
"Memory Max" : 64
"Memory Max" : 128
},
"FFmpegSource" : {
"Cache" : {
@ -611,6 +610,8 @@
"Fast Jump Step" : 10,
"Show Keyframes" : true
},
"Subtitle Sync" : true
"Subtitle Sync" : true,
"Video Pan": false,
"Default to UI Zoom": false
}
}

View File

@ -174,6 +174,7 @@ elif host_machine.system() == 'windows'
'avisynth_wrap.cpp',
'font_file_lister_gdi.cpp',
# 'libass_gdi_fontselect.cpp',
'audio_provider_avs.cpp',
'video_provider_avs.cpp',
)

View File

@ -433,11 +433,13 @@ void Advanced_Video(wxTreebook *book, Preferences *parent) {
wxArrayString sp_choice = to_wx(SubtitlesProviderFactory::GetClasses());
p->OptionChoice(expert, _("Subtitles provider"), sp_choice, "Subtitle/Provider");
p->OptionAdd(expert, _("Video Panning"), "Video/Video Pan");
p->OptionAdd(expert, _("Default to UI Zoom"), "Video/Default to UI Zoom");
#ifdef WITH_AVISYNTH
auto avisynth = p->PageSizer("Avisynth");
p->OptionAdd(avisynth, _("Allow pre-2.56a Avisynth"), "Provider/Avisynth/Allow Ancient");
p->CellSkip(avisynth);
p->OptionAdd(avisynth, _("Avisynth memory limit"), "Provider/Avisynth/Memory Max");
#endif

View File

@ -53,7 +53,6 @@ Project::Project(agi::Context *c) : context(c) {
OPT_SUB("Audio/Cache/Type", &Project::ReloadAudio, this);
OPT_SUB("Audio/Provider", &Project::ReloadAudio, this);
OPT_SUB("Provider/Audio/FFmpegSource/Decode Error Handling", &Project::ReloadAudio, this);
OPT_SUB("Provider/Avisynth/Allow Ancient", &Project::ReloadVideo, this);
OPT_SUB("Provider/Avisynth/Memory Max", &Project::ReloadVideo, this);
OPT_SUB("Provider/Video/FFmpegSource/Decoding Threads", &Project::ReloadVideo, this);
OPT_SUB("Provider/Video/FFmpegSource/Unsafe Seeking", &Project::ReloadVideo, this);
@ -220,7 +219,7 @@ void Project::LoadUnloadFiles(ProjectProperties properties) {
vc->SetAspectRatio(properties.ar_value);
else
vc->SetAspectRatio(ar_mode);
context->videoDisplay->SetZoom(properties.video_zoom);
context->videoDisplay->SetWindowZoom(properties.video_zoom);
}
}

View File

@ -82,19 +82,20 @@ VideoDisplay::VideoDisplay(wxToolBar *toolbar, bool freeSize, wxComboBox *zoomBo
: wxGLCanvas(parent, -1, attribList)
, autohideTools(OPT_GET("Tool/Visual/Autohide"))
, con(c)
, zoomValue(OPT_GET("Video/Default Zoom")->GetInt() * .125 + .125)
, windowZoomValue(OPT_GET("Video/Default Zoom")->GetInt() * .125 + .125)
, videoZoomValue(1)
, toolBar(toolbar)
, zoomBox(zoomBox)
, freeSize(freeSize)
, retina_helper(agi::make_unique<RetinaHelper>(this))
, scale_factor(retina_helper->GetScaleFactor())
, scale_factor_connection(retina_helper->AddScaleFactorListener([=](int new_scale_factor) {
double new_zoom = zoomValue * new_scale_factor / scale_factor;
double new_zoom = windowZoomValue * new_scale_factor / scale_factor;
scale_factor = new_scale_factor;
SetZoom(new_zoom);
SetWindowZoom(new_zoom);
}))
{
zoomBox->SetValue(fmt_wx("%g%%", zoomValue * 100.));
zoomBox->SetValue(fmt_wx("%g%%", windowZoomValue * 100.));
zoomBox->Bind(wxEVT_COMBOBOX, &VideoDisplay::SetZoomFromBox, this);
zoomBox->Bind(wxEVT_TEXT_ENTER, &VideoDisplay::SetZoomFromBoxText, this);
@ -113,6 +114,8 @@ VideoDisplay::VideoDisplay(wxToolBar *toolbar, bool freeSize, wxComboBox *zoomBo
Bind(wxEVT_LEFT_DCLICK, &VideoDisplay::OnMouseEvent, this);
Bind(wxEVT_LEFT_DOWN, &VideoDisplay::OnMouseEvent, this);
Bind(wxEVT_LEFT_UP, &VideoDisplay::OnMouseEvent, this);
Bind(wxEVT_MIDDLE_DOWN, &VideoDisplay::OnMouseEvent, this);
Bind(wxEVT_MIDDLE_UP, &VideoDisplay::OnMouseEvent, this);
Bind(wxEVT_MOTION, &VideoDisplay::OnMouseEvent, this);
Bind(wxEVT_MOUSEWHEEL, &VideoDisplay::OnMouseWheel, this);
@ -191,11 +194,14 @@ void VideoDisplay::Render() try {
PositionVideo();
videoOut->Render(viewport_left, viewport_bottom, viewport_width, viewport_height);
E(glViewport(0, std::min(viewport_bottom, 0), videoSize.GetWidth(), videoSize.GetHeight()));
int client_w, client_h;
GetClientSize(&client_w, &client_h);
E(glViewport(0, 0, client_w, client_h));
E(glMatrixMode(GL_PROJECTION));
E(glLoadIdentity());
E(glOrtho(0.0f, videoSize.GetWidth() / scale_factor, videoSize.GetHeight() / scale_factor, 0.0f, -1000.0f, 1000.0f));
E(glOrtho(0.0f, client_w / scale_factor, client_h / scale_factor, 0.0f, -1000.0f, 1000.0f));
if (OPT_GET("Video/Overscan Mask")->GetBool()) {
double ar = con->videoController->GetAspectRatioValue();
@ -268,8 +274,11 @@ void VideoDisplay::PositionVideo() {
auto provider = con->project->VideoProvider();
if (!provider || !IsShownOnScreen()) return;
int client_w, client_h;
GetClientSize(&client_w, &client_h);
viewport_left = 0;
viewport_bottom = GetClientSize().GetHeight() * scale_factor - videoSize.GetHeight();
viewport_bottom = client_h * scale_factor - videoSize.GetHeight();
viewport_top = 0;
viewport_width = videoSize.GetWidth();
viewport_height = videoSize.GetHeight();
@ -279,27 +288,33 @@ void VideoDisplay::PositionVideo() {
int vidH = provider->GetHeight();
AspectRatio arType = con->videoController->GetAspectRatioType();
double displayAr = double(viewport_width) / viewport_height;
double displayAr = double(client_w) / client_h;
double videoAr = arType == AspectRatio::Default ? double(vidW) / vidH : con->videoController->GetAspectRatioValue();
// Window is wider than video, blackbox left/right
if (displayAr - videoAr > 0.01) {
int delta = viewport_width - videoAr * viewport_height;
int delta = client_w - videoAr * client_h;
viewport_left = delta / 2;
viewport_width -= delta;
}
// Video is wider than window, blackbox top/bottom
else if (videoAr - displayAr > 0.01) {
int delta = viewport_height - viewport_width / videoAr;
viewport_top = viewport_bottom = delta / 2;
int delta = client_h - client_w / videoAr;
viewport_top += delta / 2;
viewport_bottom += delta / 2;
viewport_height -= delta;
viewport_width = viewport_height * videoAr;
}
}
if (tool)
viewport_left += pan_x;
viewport_top += pan_y;
viewport_bottom -= pan_y;
if (tool) {
tool->SetClientSize(client_w * scale_factor, client_h * scale_factor);
tool->SetDisplayArea(viewport_left / scale_factor, viewport_top / scale_factor,
viewport_width / scale_factor, viewport_height / scale_factor);
}
Render();
}
@ -308,23 +323,24 @@ void VideoDisplay::UpdateSize() {
if (!provider || !IsShownOnScreen()) return;
videoSize.Set(provider->GetWidth(), provider->GetHeight());
videoSize *= zoomValue;
if (con->videoController->GetAspectRatioType() != AspectRatio::Default)
videoSize.SetWidth(videoSize.GetHeight() * con->videoController->GetAspectRatioValue());
videoSize *= videoZoomValue * windowZoomValue;
wxEventBlocker blocker(this);
if (freeSize) {
wxWindow *top = GetParent();
while (!top->IsTopLevel()) top = top->GetParent();
wxSize cs = GetClientSize();
wxSize oldClientSize = GetClientSize();
double csAr = (double)oldClientSize.GetWidth() / (double)oldClientSize.GetHeight();
wxSize newClientSize = wxSize(std::lround(provider->GetHeight() * csAr), provider->GetHeight()) * windowZoomValue / scale_factor;
wxSize oldSize = top->GetSize();
top->SetSize(top->GetSize() + videoSize / scale_factor - cs);
SetClientSize(cs + top->GetSize() - oldSize);
top->SetSize(oldSize + (newClientSize - oldClientSize));
SetClientSize(oldClientSize + (top->GetSize() - oldSize));
}
else {
SetMinClientSize(videoSize / scale_factor);
SetMaxClientSize(videoSize / scale_factor);
wxSize newSize = wxSize(provider->GetWidth(), provider->GetHeight()) * windowZoomValue / scale_factor;
SetMinClientSize(newSize);
SetMaxClientSize(newSize);
GetGrandParent()->Layout();
}
@ -334,11 +350,16 @@ void VideoDisplay::UpdateSize() {
void VideoDisplay::OnSizeEvent(wxSizeEvent &event) {
if (freeSize) {
videoSize = GetClientSize() * scale_factor;
PositionVideo();
zoomValue = double(viewport_height) / con->project->VideoProvider()->GetHeight();
zoomBox->ChangeValue(fmt_wx("%g%%", zoomValue * 100.));
con->ass->Properties.video_zoom = zoomValue;
/* If the video is not moved */
if (videoZoomValue == 1.0f && pan_x == 0 && pan_y == 0)
videoSize = GetClientSize() * scale_factor;
/* If the video is moving, we only need to update the size in this case */
else if (videoSize.GetWidth() == 0 && videoSize.GetHeight() == 0)
videoSize = GetClientSize() * videoZoomValue * scale_factor;
windowZoomValue = double(GetClientSize().GetHeight()) / con->project->VideoProvider()->GetHeight();
zoomBox->ChangeValue(fmt_wx("%g%%", windowZoomValue * 100.));
con->ass->Properties.video_zoom = windowZoomValue;
UpdateSize();
}
else {
PositionVideo();
@ -351,6 +372,30 @@ void VideoDisplay::OnMouseEvent(wxMouseEvent& event) {
last_mouse_pos = mouse_pos = event.GetPosition();
///if video pan
bool videoPan = OPT_GET("Video/Video Pan")->GetBool();
if (videoPan){
if (event.GetButton() == wxMOUSE_BTN_MIDDLE) {
if ((panning = event.ButtonDown()))
pan_last_pos = event.GetPosition();
}
if (panning && event.Dragging()) {
pan_x += event.GetX() - pan_last_pos.X();
pan_y += event.GetY() - pan_last_pos.Y();
pan_last_pos = event.GetPosition();
PositionVideo();
}
}
else if ((pan_x != 0 || pan_y != 0) && !videoPan)
{
pan_x = pan_y = 0;
PositionVideo();
}
///
if (tool)
tool->OnMouseEvent(event);
}
@ -362,9 +407,15 @@ void VideoDisplay::OnMouseLeave(wxMouseEvent& event) {
}
void VideoDisplay::OnMouseWheel(wxMouseEvent& event) {
bool videoPan = OPT_GET("Video/Video Pan")->GetBool();
if (int wheel = event.GetWheelRotation()) {
if (ForwardMouseWheelEvent(this, event))
SetZoom(zoomValue + .125 * (wheel / event.GetWheelDelta()));
if (ForwardMouseWheelEvent(this, event)) {
if (!videoPan || (event.ControlDown() != OPT_GET("Video/Default to UI Zoom")->GetBool())) {
SetWindowZoom(windowZoomValue + .125 * (wheel / event.GetWheelDelta()));
} else {
SetVideoZoom(wheel / event.GetWheelDelta());
}
}
}
}
@ -378,22 +429,63 @@ void VideoDisplay::OnKeyDown(wxKeyEvent &event) {
hotkey::check("Video", con, event);
}
void VideoDisplay::SetZoom(double value) {
void VideoDisplay::ResetPan() {
pan_x = pan_y = 0;
videoZoomValue = 1;
UpdateSize();
PositionVideo();
}
void VideoDisplay::SetWindowZoom(double value) {
if (value == 0) return;
zoomValue = std::max(value, .125);
size_t selIndex = zoomValue / .125 - 1;
value = std::max(value, .125);
pan_x *= value / windowZoomValue;
pan_y *= value / windowZoomValue;
windowZoomValue = value;
size_t selIndex = windowZoomValue / .125 - 1;
if (selIndex < zoomBox->GetCount())
zoomBox->SetSelection(selIndex);
zoomBox->ChangeValue(fmt_wx("%g%%", zoomValue * 100.));
con->ass->Properties.video_zoom = zoomValue;
zoomBox->ChangeValue(fmt_wx("%g%%", windowZoomValue * 100.));
con->ass->Properties.video_zoom = windowZoomValue;
UpdateSize();
}
void VideoDisplay::SetVideoZoom(int step) {
if (step == 0) return;
double newVideoZoom = videoZoomValue + (.125 * step) * videoZoomValue;
if (newVideoZoom < 0.125 || newVideoZoom > 10.0)
return;
// With the current blackbox algorithm in PositionVideo(), viewport_{width,height} could go negative. Stop that here
wxSize cs = GetClientSize();
wxSize videoNewSize = videoSize * (newVideoZoom / videoZoomValue);
float windowAR = (float)cs.GetWidth() / cs.GetHeight();
float videoAR = (float)videoNewSize.GetWidth() / videoNewSize.GetHeight();
if (windowAR < videoAR) {
int delta = cs.GetHeight() - cs.GetWidth() / videoAR;
if (videoNewSize.GetHeight() - delta < 0)
return;
}
// Mouse coordinates, relative to the video, at the current zoom level
Vector2D mp = GetMousePosition() * videoZoomValue * windowZoomValue;
// The video size will change by this many pixels
int pixelChangeW = std::lround(videoSize.GetWidth() * (newVideoZoom / videoZoomValue - 1.0));
int pixelChangeH = std::lround(videoSize.GetHeight() * (newVideoZoom / videoZoomValue - 1.0));
pan_x -= pixelChangeW * (mp.X() / videoSize.GetWidth());
pan_y -= pixelChangeH * (mp.Y() / videoSize.GetHeight());
videoZoomValue = newVideoZoom;
UpdateSize();
}
void VideoDisplay::SetZoomFromBox(wxCommandEvent &) {
int sel = zoomBox->GetSelection();
if (sel != wxNOT_FOUND) {
zoomValue = (sel + 1) * .125;
con->ass->Properties.video_zoom = zoomValue;
windowZoomValue = (sel + 1) * .125;
con->ass->Properties.video_zoom = windowZoomValue;
UpdateSize();
}
}
@ -405,7 +497,7 @@ void VideoDisplay::SetZoomFromBoxText(wxCommandEvent &) {
double value;
if (strValue.ToDouble(&value))
SetZoom(value / 100.);
SetWindowZoom(value / 100.);
}
void VideoDisplay::SetTool(std::unique_ptr<VisualToolBase> new_tool) {

View File

@ -84,8 +84,19 @@ class VideoDisplay final : public wxGLCanvas {
/// The height of the video in screen pixels
int viewport_height = 0;
/// The current zoom level, where 1.0 = 100%
double zoomValue;
/// The current window zoom level, where 1.0 = 100%
double windowZoomValue;
/// The current video zoom level, where 1.0 = 100% relative to the display window size
double videoZoomValue;
/// The last position of the mouse, when dragging
Vector2D pan_last_pos;
/// True if middle mouse button is down, and we should update pan_{x,y}
bool panning = false;
/// The current video pan offset width
int pan_x = 0;
/// The current video pan offset height
int pan_y = 0;
/// The video renderer
std::unique_ptr<VideoOutGL> videoOut;
@ -156,9 +167,13 @@ public:
/// @brief Set the zoom level
/// @param value The new zoom level
void SetZoom(double value);
void SetWindowZoom(double value);
void SetVideoZoom(int step);
/// @brief Get the current zoom level
double GetZoom() const { return zoomValue; }
double GetZoom() const { return windowZoomValue; }
/// @brief Reset the video pan
void ResetPan();
/// Get the last seen position of the mouse in script coordinates
Vector2D GetMousePosition() const;

View File

@ -132,6 +132,10 @@ AssDialogue* VisualToolBase::GetActiveDialogueLine() {
return nullptr;
}
void VisualToolBase::SetClientSize(int w, int h) {
client_size = Vector2D(w, h);
}
void VisualToolBase::SetDisplayArea(int x, int y, int w, int h) {
if (x == video_pos.X() && y == video_pos.Y() && w == video_res.X() && h == video_res.Y()) return;

View File

@ -104,6 +104,7 @@ protected:
Vector2D script_res; ///< Script resolution
Vector2D video_pos; ///< Top-left corner of the video in the display area
Vector2D video_res; ///< Video resolution
Vector2D client_size; ///< The size of the display area
const agi::OptionValue *highlight_color_primary_opt;
const agi::OptionValue *highlight_color_secondary_opt;
@ -144,6 +145,7 @@ public:
// Stuff called by VideoDisplay
virtual void OnMouseEvent(wxMouseEvent &event)=0;
virtual void Draw()=0;
virtual void SetClientSize(int w, int h);
virtual void SetDisplayArea(int x, int y, int w, int h);
virtual void SetToolbar(wxToolBar *) { }
virtual ~VisualToolBase() = default;

View File

@ -70,9 +70,9 @@ void VisualToolCross::Draw() {
gl.SetLineColour(*wxWHITE, 1.0, 1);
float lines[] = {
0.f, mouse_pos.Y(),
video_res.X() + video_pos.X() * 2, mouse_pos.Y(),
client_size.X(), mouse_pos.Y(),
mouse_pos.X(), 0.f,
mouse_pos.X(), video_res.Y() + video_pos.Y() * 2
mouse_pos.X(), client_size.Y(),
};
gl.DrawLines(2, lines, 4);
gl.ClearInvert();
@ -87,12 +87,12 @@ void VisualToolCross::Draw() {
// Place the text in the corner of the cross closest to the center of the video
int dx = mouse_pos.X();
int dy = mouse_pos.Y();
if (dx > video_res.X() / 2)
if (dx > client_size.X() / 2)
dx -= tw + 4;
else
dx += 4;
if (dy < video_res.Y() / 2)
if (dy < client_size.Y() / 2)
dy += 3;
else
dy -= th + 3;

View File

@ -0,0 +1,6 @@
[wrap-git]
url = https://github.com/AviSynth/AviSynthPlus.git
revision = v3.7.2
[provide]
avisynth = avisynth_dep