Simplify the avisynth filters
Small api change => you can now list a group of desired colorspace output formats for automatic selection, less functions can return errors
Less singnedness warnings

Originally committed to SVN as r2978.
This commit is contained in:
Fredrik Mellbin 2009-05-23 14:18:51 +00:00
parent e772ad5e10
commit 3220a2301a
13 changed files with 101 additions and 129 deletions

View file

@ -62,7 +62,7 @@ void TAudioCache::CacheBlock(int64_t Start, int64_t Samples, uint8_t *SrcData) {
} }
push_front(new TAudioBlock(Start, Samples, SrcData, Samples * BytesPerSample)); push_front(new TAudioBlock(Start, Samples, SrcData, Samples * BytesPerSample));
if (size() >= MaxCacheBlocks) { if (static_cast<int>(size()) >= MaxCacheBlocks) {
delete back(); delete back();
pop_back(); pop_back();
} }
@ -427,9 +427,9 @@ int FFMatroskaAudio::GetAudio(void *Buf, int64_t Start, int64_t Count, char *Err
} }
CurrentAudioBlock++; CurrentAudioBlock++;
if (CurrentAudioBlock < Frames.size()) if (CurrentAudioBlock < static_cast<int>(Frames.size()))
CurrentSample = Frames[CurrentAudioBlock].SampleStart; CurrentSample = Frames[CurrentAudioBlock].SampleStart;
} while (Start + Count - CacheEnd > 0 && CurrentAudioBlock < Frames.size()); } while (Start + Count - CacheEnd > 0 && CurrentAudioBlock < static_cast<int>(Frames.size()));
return 0; return 0;
} }
@ -666,7 +666,7 @@ int FFHaaliAudio::GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorM
if (CacheEnd == Start + Count) if (CacheEnd == Start + Count)
return 0; return 0;
int64_t CurrentAudioBlock; int CurrentAudioBlock;
// Is seeking required to decode the requested samples? // Is seeking required to decode the requested samples?
// if (!(CurrentSample >= Start && CurrentSample <= CacheEnd)) { // if (!(CurrentSample >= Start && CurrentSample <= CacheEnd)) {
if (CurrentSample != CacheEnd) { if (CurrentSample != CacheEnd) {
@ -702,9 +702,9 @@ int FFHaaliAudio::GetAudio(void *Buf, int64_t Start, int64_t Count, char *ErrorM
} }
CurrentAudioBlock++; CurrentAudioBlock++;
if (CurrentAudioBlock < Frames.size()) if (CurrentAudioBlock < static_cast<int>(Frames.size()))
CurrentSample = Frames[CurrentAudioBlock].SampleStart; CurrentSample = Frames[CurrentAudioBlock].SampleStart;
} while (Start + Count - CacheEnd > 0 && CurrentAudioBlock < Frames.size()); } while (Start + Count - CacheEnd > 0 && CurrentAudioBlock < static_cast<int>(Frames.size()));
return 0; return 0;
} }

View file

@ -18,14 +18,11 @@
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE. // THE SOFTWARE.
#include <libavutil/pixfmt.h>
#include "ffavisynth.h" #include "ffavisynth.h"
#include "utils.h" #include <cmath>
AvisynthVideoSource::AvisynthVideoSource(const char *SourceFile, int Track, FFIndex *Index, int FPSNum, int FPSDen, const char *PP, int Threads, int SeekMode, IScriptEnvironment* Env, char *ErrorMsg, unsigned MsgSize) { AvisynthVideoSource::AvisynthVideoSource(const char *SourceFile, int Track, FFIndex *Index, int FPSNum, int FPSDen, const char *PP, int Threads, int SeekMode, IScriptEnvironment* Env, char *ErrorMsg, unsigned MsgSize) {
memset(&VI, 0, sizeof(VI)); memset(&VI, 0, sizeof(VI));
SWS = NULL;
ConvertToFormat = PIX_FMT_NONE;
this->FPSNum = FPSNum; this->FPSNum = FPSNum;
this->FPSDen = FPSDen; this->FPSDen = FPSDen;
@ -33,105 +30,92 @@ AvisynthVideoSource::AvisynthVideoSource(const char *SourceFile, int Track, FFIn
if (!V) if (!V)
Env->ThrowError(ErrorMsg); Env->ThrowError(ErrorMsg);
const TVideoProperties VP = *FFMS_GetVideoProperties(V);
VI.image_type = VideoInfo::IT_TFF;
VI.width = VP.Width;
VI.height = VP.Height;
if (FPSNum > 0 && FPSDen > 0) {
VI.fps_denominator = FPSDen;
VI.fps_numerator = FPSNum;
VI.num_frames = static_cast<int>(ceil(((VP.LastTime - VP.FirstTime) * FPSNum) / FPSDen));
} else {
VI.fps_denominator = VP.FPSDenominator;
VI.fps_numerator = VP.FPSNumerator;
VI.num_frames = VP.NumFrames;
}
try { try {
InitOutputFormat(static_cast<PixelFormat>(VP.VPixelFormat), Env); InitOutputFormat(Env);
} catch (AvisynthError &) { } catch (AvisynthError &) {
FFMS_DestroyVideoSource(V); FFMS_DestroyVideoSource(V);
throw; throw;
} }
const TVideoProperties *VP = FFMS_GetVideoProperties(V);
if (FPSNum > 0 && FPSDen > 0) {
VI.fps_denominator = FPSDen;
VI.fps_numerator = FPSNum;
VI.num_frames = static_cast<int>(ceil(((VP->LastTime - VP->FirstTime) * FPSNum) / FPSDen));
} else {
VI.fps_denominator = VP->FPSDenominator;
VI.fps_numerator = VP->FPSNumerator;
VI.num_frames = VP->NumFrames;
}
// Set AR variables // Set AR variables
Env->SetVar("FFSAR_NUM", VP.SARNum); Env->SetVar("FFSAR_NUM", VP->SARNum);
Env->SetVar("FFSAR_DEN", VP.SARDen); Env->SetVar("FFSAR_DEN", VP->SARDen);
if (VP.SARNum > 0 && VP.SARDen > 0) if (VP->SARNum > 0 && VP->SARDen > 0)
Env->SetVar("FFSAR", VP.SARNum / (double)VP.SARDen); Env->SetVar("FFSAR", VP->SARNum / (double)VP->SARDen);
// Set crop variables // Set crop variables
Env->SetVar("FFCROP_LEFT", VP.CropLeft); Env->SetVar("FFCROP_LEFT", VP->CropLeft);
Env->SetVar("FFCROP_RIGHT", VP.CropRight); Env->SetVar("FFCROP_RIGHT", VP->CropRight);
Env->SetVar("FFCROP_TOP", VP.CropTop); Env->SetVar("FFCROP_TOP", VP->CropTop);
Env->SetVar("FFCROP_BOTTOM", VP.CropBottom); Env->SetVar("FFCROP_BOTTOM", VP->CropBottom);
} }
AvisynthVideoSource::~AvisynthVideoSource() { AvisynthVideoSource::~AvisynthVideoSource() {
if (SWS)
sws_freeContext(SWS);
FFMS_DestroyVideoSource(V); FFMS_DestroyVideoSource(V);
} }
void AvisynthVideoSource::InitOutputFormat(enum PixelFormat CurrentFormat, IScriptEnvironment *Env) { void AvisynthVideoSource::InitOutputFormat(IScriptEnvironment *Env) {
int Loss; const TVideoProperties *VP = FFMS_GetVideoProperties(V);
enum PixelFormat BestFormat = avcodec_find_best_pix_fmt((1 << PIX_FMT_YUVJ420P) | (1 << PIX_FMT_YUV420P) | (1 << PIX_FMT_YUYV422) | (1 << PIX_FMT_RGB32) | (1 << PIX_FMT_BGR24), CurrentFormat, 1 /* Required to prevent pointless RGB32 => RGB24 conversion */, &Loss);
switch (BestFormat) { if (FFMS_SetOutputFormat(V, (1 << FFMS_GetPixFmt("yuvj420p")) |
case PIX_FMT_YUVJ420P: // stupid yv12 distinctions, also inexplicably completely undeniably incompatible with all other supported output formats (1 << FFMS_GetPixFmt("yuv420p")) | (1 << FFMS_GetPixFmt("yuyv422")) |
case PIX_FMT_YUV420P: VI.pixel_type = VideoInfo::CS_I420; break; (1 << FFMS_GetPixFmt("rgb32")) | (1 << FFMS_GetPixFmt("bgr24")),
case PIX_FMT_YUYV422: VI.pixel_type = VideoInfo::CS_YUY2; break; VP->Width, VP->Height, NULL, 0))
case PIX_FMT_RGB32: VI.pixel_type = VideoInfo::CS_BGR32; break; Env->ThrowError("FFVideoSource: No suitable output format found");
case PIX_FMT_BGR24: VI.pixel_type = VideoInfo::CS_BGR24; break;
default:
Env->ThrowError("FFVideoSource: No suitable output format found");
}
if (BestFormat != CurrentFormat) { VP = FFMS_GetVideoProperties(V);
ConvertToFormat = BestFormat;
SWS = sws_getContext(VI.width, VI.height, CurrentFormat, VI.width, VI.height, ConvertToFormat, GetCPUFlags() | SWS_BICUBIC, NULL, NULL, NULL);
}
if (BestFormat == PIX_FMT_YUVJ420P || BestFormat == PIX_FMT_YUV420P) { if (VP->VPixelFormat == FFMS_GetPixFmt("yuvj420p") || VP->VPixelFormat == FFMS_GetPixFmt("yuv420p"))
VI.pixel_type = VideoInfo::CS_I420;
else if (VP->VPixelFormat == FFMS_GetPixFmt("yuyv422"))
VI.pixel_type = VideoInfo::CS_YUY2;
else if (VP->VPixelFormat == FFMS_GetPixFmt("rgb32"))
VI.pixel_type = VideoInfo::CS_BGR32;
else if (VP->VPixelFormat == FFMS_GetPixFmt("bgr24"))
VI.pixel_type = VideoInfo::CS_BGR24;
else
Env->ThrowError("FFVideoSource: No suitable output format found");
VI.image_type = VideoInfo::IT_TFF;
VI.width = VP->Width;
VI.height = VP->Height;
// Crop to obey avisynth's even width/height requirements
if (VP->VPixelFormat == FFMS_GetPixFmt("yuvj420p") || VP->VPixelFormat == FFMS_GetPixFmt("yuv420p")) {
VI.height -= VI.height & 1; VI.height -= VI.height & 1;
VI.width -= VI.width & 1; VI.width -= VI.width & 1;
} }
if (BestFormat == PIX_FMT_YUYV422) { if (VP->VPixelFormat == FFMS_GetPixFmt("yuyv422")) {
VI.width -= VI.width & 1; VI.width -= VI.width & 1;
} }
} }
PVideoFrame AvisynthVideoSource::OutputFrame(const TAVFrameLite *Frame, IScriptEnvironment *Env) { PVideoFrame AvisynthVideoSource::OutputFrame(const TAVFrameLite *Frame, IScriptEnvironment *Env) {
// Yes, this function is overly complex and could probably be simplified // Yes, this function is overly complex and could probably be simplified
AVPicture *SrcPicture = reinterpret_cast<AVPicture *>(const_cast<TAVFrameLite *>(Frame)); TAVFrameLite *SrcPicture = const_cast<TAVFrameLite *>(Frame);
PVideoFrame Dst = Env->NewVideoFrame(VI); PVideoFrame Dst = Env->NewVideoFrame(VI);
if (ConvertToFormat != PIX_FMT_NONE && VI.pixel_type == VideoInfo::CS_I420) { if (VI.pixel_type == VideoInfo::CS_I420) {
uint8_t *DstData[3] = {Dst->GetWritePtr(PLANAR_Y), Dst->GetWritePtr(PLANAR_U), Dst->GetWritePtr(PLANAR_V)}; Env->BitBlt(Dst->GetWritePtr(PLANAR_Y), Dst->GetPitch(PLANAR_Y), SrcPicture->Data[0], SrcPicture->Linesize[0], Dst->GetRowSize(PLANAR_Y), Dst->GetHeight(PLANAR_Y));
int DstStride[3] = {Dst->GetPitch(PLANAR_Y), Dst->GetPitch(PLANAR_U), Dst->GetPitch(PLANAR_V)}; Env->BitBlt(Dst->GetWritePtr(PLANAR_U), Dst->GetPitch(PLANAR_U), SrcPicture->Data[1], SrcPicture->Linesize[1], Dst->GetRowSize(PLANAR_U), Dst->GetHeight(PLANAR_U));
sws_scale(SWS, SrcPicture->data, SrcPicture->linesize, 0, VI.height, DstData, DstStride); Env->BitBlt(Dst->GetWritePtr(PLANAR_V), Dst->GetPitch(PLANAR_V), SrcPicture->Data[2], SrcPicture->Linesize[2], Dst->GetRowSize(PLANAR_V), Dst->GetHeight(PLANAR_V));
} else if (ConvertToFormat != PIX_FMT_NONE) { } else if (VI.IsRGB()) {
if (VI.IsRGB()) { Env->BitBlt(Dst->GetWritePtr() + Dst->GetPitch() * (Dst->GetHeight() - 1), -Dst->GetPitch(), SrcPicture->Data[0], SrcPicture->Linesize[0], Dst->GetRowSize(), Dst->GetHeight());
uint8_t *DstData[1] = {Dst->GetWritePtr() + Dst->GetPitch() * (Dst->GetHeight() - 1)};
int DstStride[1] = {-Dst->GetPitch()};
sws_scale(SWS, SrcPicture->data, SrcPicture->linesize, 0, VI.height, DstData, DstStride);
} else {
uint8_t *DstData[1] = {Dst->GetWritePtr()};
int DstStride[1] = {Dst->GetPitch()};
sws_scale(SWS, SrcPicture->data, SrcPicture->linesize, 0, VI.height, DstData, DstStride);
}
} else if (VI.pixel_type == VideoInfo::CS_I420) {
Env->BitBlt(Dst->GetWritePtr(PLANAR_Y), Dst->GetPitch(PLANAR_Y), SrcPicture->data[0], SrcPicture->linesize[0], Dst->GetRowSize(PLANAR_Y), Dst->GetHeight(PLANAR_Y));
Env->BitBlt(Dst->GetWritePtr(PLANAR_U), Dst->GetPitch(PLANAR_U), SrcPicture->data[1], SrcPicture->linesize[1], Dst->GetRowSize(PLANAR_U), Dst->GetHeight(PLANAR_U));
Env->BitBlt(Dst->GetWritePtr(PLANAR_V), Dst->GetPitch(PLANAR_V), SrcPicture->data[2], SrcPicture->linesize[2], Dst->GetRowSize(PLANAR_V), Dst->GetHeight(PLANAR_V));
} else { } else {
if (VI.IsRGB()) Env->BitBlt(Dst->GetWritePtr(), Dst->GetPitch(), SrcPicture->Data[0], SrcPicture->Linesize[0], Dst->GetRowSize(), Dst->GetHeight());
Env->BitBlt(Dst->GetWritePtr() + Dst->GetPitch() * (Dst->GetHeight() - 1), -Dst->GetPitch(), SrcPicture->data[0], SrcPicture->linesize[0], Dst->GetRowSize(), Dst->GetHeight());
else
Env->BitBlt(Dst->GetWritePtr(), Dst->GetPitch(), SrcPicture->data[0], SrcPicture->linesize[0], Dst->GetRowSize(), Dst->GetHeight());
} }
return Dst; return Dst;

View file

@ -21,13 +21,6 @@
#ifndef FFAVISYNTH_H #ifndef FFAVISYNTH_H
#define FFAVISYNTH_H #define FFAVISYNTH_H
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include <libpostproc/postprocess.h>
}
#include <windows.h> #include <windows.h>
#include "avisynth.h" #include "avisynth.h"
#include "ffms.h" #include "ffms.h"
@ -36,12 +29,10 @@ class AvisynthVideoSource : public IClip {
private: private:
VideoInfo VI; VideoInfo VI;
FFVideo *V; FFVideo *V;
SwsContext *SWS;
PixelFormat ConvertToFormat;
int FPSNum; int FPSNum;
int FPSDen; int FPSDen;
void InitOutputFormat(enum PixelFormat CurrentFormat, IScriptEnvironment *Env); void InitOutputFormat(IScriptEnvironment *Env);
PVideoFrame OutputFrame(const TAVFrameLite *SrcPicture, IScriptEnvironment *Env); PVideoFrame OutputFrame(const TAVFrameLite *SrcPicture, IScriptEnvironment *Env);
public: public:
AvisynthVideoSource(const char *SourceFile, int Track, FFIndex *Index, int FPSNum, int FPSDen, const char *PP, int Threads, int SeekMode, IScriptEnvironment* Env, char *ErrorMsg, unsigned MsgSize); AvisynthVideoSource(const char *SourceFile, int Track, FFIndex *Index, int FPSNum, int FPSDen, const char *PP, int Threads, int SeekMode, IScriptEnvironment* Env, char *ErrorMsg, unsigned MsgSize);

View file

@ -138,7 +138,7 @@ static AVSValue __cdecl CreateFFVideoSource(AVSValue Args, void* UserData, IScri
Env->ThrowError("FFVideoSource: No video track found"); Env->ThrowError("FFVideoSource: No video track found");
if (strcmp(Timecodes, "")) { if (strcmp(Timecodes, "")) {
if (FFMS_WriteTimecodes(FFMS_GetTrackFromIndex(Index, Track, ErrorMsg, MsgSize), Timecodes, ErrorMsg, MsgSize)) { if (FFMS_WriteTimecodes(FFMS_GetTrackFromIndex(Index, Track), Timecodes, ErrorMsg, MsgSize)) {
FFMS_DestroyFFIndex(Index); FFMS_DestroyFFIndex(Index);
Env->ThrowError("FFVideoSource: %s", ErrorMsg); Env->ThrowError("FFVideoSource: %s", ErrorMsg);
} }

View file

@ -126,8 +126,8 @@ FFMS_API(int) FFMS_GetAudio(FFAudio *A, void *Buf, int64_t Start, int64_t Count,
return A->GetAudio(Buf, Start, Count, ErrorMsg, MsgSize); return A->GetAudio(Buf, Start, Count, ErrorMsg, MsgSize);
} }
FFMS_API(int) FFMS_SetOutputFormat(FFVideo *V, int TargetFormat, int Width, int Height, char *ErrorMsg, unsigned MsgSize) { FFMS_API(int) FFMS_SetOutputFormat(FFVideo *V, int64_t TargetFormats, int Width, int Height, char *ErrorMsg, unsigned MsgSize) {
return V->SetOutputFormat(TargetFormat, Width, Height, ErrorMsg, MsgSize); return V->SetOutputFormat(TargetFormats, Width, Height, ErrorMsg, MsgSize);
} }
FFMS_API(void) FFMS_ResetOutputFormat(FFVideo *V) { FFMS_API(void) FFMS_ResetOutputFormat(FFVideo *V) {
@ -174,22 +174,12 @@ FFMS_API(int) FFMS_GetNumFrames(FFTrack *T) {
return T->size(); return T->size();
} }
FFMS_API(const FFFrameInfo *) FFMS_GetFrameInfo(FFTrack *T, int Frame, char *ErrorMsg, unsigned MsgSize) { FFMS_API(const FFFrameInfo *) FFMS_GetFrameInfo(FFTrack *T, int Frame) {
if (Frame < 0 || Frame >= static_cast<int>(T->size())) { return reinterpret_cast<FFFrameInfo *>(&(*T)[Frame]);
_snprintf(ErrorMsg, MsgSize, "Invalid frame specified");
return NULL;
} else {
return reinterpret_cast<FFFrameInfo *>(&(*T)[Frame]);
}
} }
FFMS_API(FFTrack *) FFMS_GetTrackFromIndex(FFIndex *Index, int Track, char *ErrorMsg, unsigned MsgSize) { FFMS_API(FFTrack *) FFMS_GetTrackFromIndex(FFIndex *Index, int Track) {
if (Track < 0 || Track >= static_cast<int>(Index->size())) { return &(*Index)[Track];
_snprintf(ErrorMsg, MsgSize, "Invalid track specified");
return NULL;
} else {
return &(*Index)[Track];
}
} }
FFMS_API(FFTrack *) FFMS_GetTrackFromVideo(FFVideo *V) { FFMS_API(FFTrack *) FFMS_GetTrackFromVideo(FFVideo *V) {

View file

@ -135,7 +135,7 @@ FFMS_API(const TAudioProperties *) FFMS_GetAudioProperties(FFAudio *A);
FFMS_API(const TAVFrameLite *) FFMS_GetFrame(FFVideo *V, int n, char *ErrorMsg, unsigned MsgSize); FFMS_API(const TAVFrameLite *) FFMS_GetFrame(FFVideo *V, int n, char *ErrorMsg, unsigned MsgSize);
FFMS_API(const TAVFrameLite *) FFMS_GetFrameByTime(FFVideo *V, double Time, char *ErrorMsg, unsigned MsgSize); FFMS_API(const TAVFrameLite *) FFMS_GetFrameByTime(FFVideo *V, double Time, char *ErrorMsg, unsigned MsgSize);
FFMS_API(int) FFMS_GetAudio(FFAudio *A, void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize); FFMS_API(int) FFMS_GetAudio(FFAudio *A, void *Buf, int64_t Start, int64_t Count, char *ErrorMsg, unsigned MsgSize);
FFMS_API(int) FFMS_SetOutputFormat(FFVideo *V, int TargetFormat, int Width, int Height, char *ErrorMsg, unsigned MsgSize); FFMS_API(int) FFMS_SetOutputFormat(FFVideo *V, int64_t TargetFormats, int Width, int Height, char *ErrorMsg, unsigned MsgSize);
FFMS_API(void) FFMS_ResetOutputFormat(FFVideo *V); FFMS_API(void) FFMS_ResetOutputFormat(FFVideo *V);
FFMS_API(void) FFMS_DestroyFFIndex(FFIndex *Index); FFMS_API(void) FFMS_DestroyFFIndex(FFIndex *Index);
FFMS_API(int) FFMS_GetFirstTrackOfType(FFIndex *Index, int TrackType, char *ErrorMsg, unsigned MsgSize); FFMS_API(int) FFMS_GetFirstTrackOfType(FFIndex *Index, int TrackType, char *ErrorMsg, unsigned MsgSize);
@ -146,8 +146,8 @@ FFMS_API(FFMS_TrackType) FFMS_GetTrackType(FFTrack *T);
FFMS_API(FFMS_TrackType) FFMS_GetTrackTypeI(FFIndexer *Indexer, int Track); FFMS_API(FFMS_TrackType) FFMS_GetTrackTypeI(FFIndexer *Indexer, int Track);
FFMS_API(const char *) FFMS_CodecName(FFIndexer *Indexer, int Track); FFMS_API(const char *) FFMS_CodecName(FFIndexer *Indexer, int Track);
FFMS_API(int) FFMS_GetNumFrames(FFTrack *T); FFMS_API(int) FFMS_GetNumFrames(FFTrack *T);
FFMS_API(const FFFrameInfo *) FFMS_GetFrameInfo(FFTrack *T, int Frame, char *ErrorMsg, unsigned MsgSize); FFMS_API(const FFFrameInfo *) FFMS_GetFrameInfo(FFTrack *T, int Frame);
FFMS_API(FFTrack *) FFMS_GetTrackFromIndex(FFIndex *Index, int Track, char *ErrorMsg, unsigned MsgSize); FFMS_API(FFTrack *) FFMS_GetTrackFromIndex(FFIndex *Index, int Track);
FFMS_API(FFTrack *) FFMS_GetTrackFromVideo(FFVideo *V); FFMS_API(FFTrack *) FFMS_GetTrackFromVideo(FFVideo *V);
FFMS_API(FFTrack *) FFMS_GetTrackFromAudio(FFAudio *A); FFMS_API(FFTrack *) FFMS_GetTrackFromAudio(FFAudio *A);
FFMS_API(const TTrackTimeBase *) FFMS_GetTimeBase(FFTrack *T); FFMS_API(const TTrackTimeBase *) FFMS_GetTimeBase(FFTrack *T);

View file

@ -118,15 +118,14 @@ TAVFrameLite *FFVideo::GetFrameByTime(double Time, char *ErrorMsg, unsigned MsgS
return GetFrame(Frame, ErrorMsg, MsgSize); return GetFrame(Frame, ErrorMsg, MsgSize);
} }
int FFVideo::SetOutputFormat(int TargetFormats, int Width, int Height, char *ErrorMsg, unsigned MsgSize) { int FFVideo::SetOutputFormat(int64_t TargetFormats, int Width, int Height, char *ErrorMsg, unsigned MsgSize) {
// FIXME: investigate the possible bug in avcodec_find_best_pix_fmt int Loss;
// int Loss; PixelFormat OutputFormat = avcodec_find_best_pix_fmt(TargetFormats,
// int OutputFormat = avcodec_find_best_pix_fmt(TargetFormats, CodecContext->pix_fmt, 1 /* Required to prevent pointless RGB32 => RGB24 conversion */, &Loss);
// CodecContext->pix_fmt, 1 /* Required to prevent pointless RGB32 => RGB24 conversion */, &Loss); if (OutputFormat == PIX_FMT_NONE) {
// if (OutputFormat == -1) _snprintf(ErrorMsg, MsgSize, "No suitable output format found");
// return -1; return -1;
}
PixelFormat OutputFormat = static_cast<PixelFormat>(TargetFormats);
SwsContext *NewSWS = NULL; SwsContext *NewSWS = NULL;
if (CodecContext->pix_fmt != OutputFormat || Width != CodecContext->width || Height != CodecContext->height) { if (CodecContext->pix_fmt != OutputFormat || Width != CodecContext->width || Height != CodecContext->height) {

View file

@ -69,7 +69,7 @@ public:
FFTrack *GetFFTrack() { return &Frames; } FFTrack *GetFFTrack() { return &Frames; }
virtual TAVFrameLite *GetFrame(int n, char *ErrorMsg, unsigned MsgSize) = 0; virtual TAVFrameLite *GetFrame(int n, char *ErrorMsg, unsigned MsgSize) = 0;
TAVFrameLite *GetFrameByTime(double Time, char *ErrorMsg, unsigned MsgSize); TAVFrameLite *GetFrameByTime(double Time, char *ErrorMsg, unsigned MsgSize);
int SetOutputFormat(int TargetFormats, int Width, int Height, char *ErrorMsg, unsigned MsgSize); int SetOutputFormat(int64_t TargetFormats, int Width, int Height, char *ErrorMsg, unsigned MsgSize);
void ResetOutputFormat(); void ResetOutputFormat();
}; };

View file

@ -511,6 +511,14 @@ FFIndexer *FFIndexer::CreateFFIndexer(const char *Filename, char *ErrorMsg, unsi
return new FFLAVFIndexer(Filename, FormatContext, ErrorMsg, MsgSize); return new FFLAVFIndexer(Filename, FormatContext, ErrorMsg, MsgSize);
} }
FFIndexer::FFIndexer() {
DecodingBuffer = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE * 5];
}
FFIndexer::~FFIndexer() {
delete[] DecodingBuffer;
}
FFLAVFIndexer::FFLAVFIndexer(const char *Filename, AVFormatContext *FormatContext, char *ErrorMsg, unsigned MsgSize) { FFLAVFIndexer::FFLAVFIndexer(const char *Filename, AVFormatContext *FormatContext, char *ErrorMsg, unsigned MsgSize) {
SourceFile = Filename; SourceFile = Filename;
this->FormatContext = FormatContext; this->FormatContext = FormatContext;

View file

@ -24,7 +24,7 @@
#include "utils.h" #include "utils.h"
#include "wave64writer.h" #include "wave64writer.h"
#define INDEXVERSION 25 #define INDEXVERSION 26
#define INDEXID 0x53920873 #define INDEXID 0x53920873
struct IndexHeader { struct IndexHeader {
@ -71,8 +71,8 @@ protected:
bool WriteAudio(SharedAudioContext &AudioContext, FFIndex *Index, int Track, int DBSize, char *ErrorMsg, unsigned MsgSize); bool WriteAudio(SharedAudioContext &AudioContext, FFIndex *Index, int Track, int DBSize, char *ErrorMsg, unsigned MsgSize);
public: public:
static FFIndexer *CreateFFIndexer(const char *Filename, char *ErrorMsg, unsigned MsgSize); static FFIndexer *CreateFFIndexer(const char *Filename, char *ErrorMsg, unsigned MsgSize);
FFIndexer() { DecodingBuffer = new int16_t[AVCODEC_MAX_AUDIO_FRAME_SIZE * 5]; } FFIndexer();
virtual ~FFIndexer() { delete[] DecodingBuffer; } virtual ~FFIndexer();
void SetIndexMask(int IndexMask) { this->IndexMask = IndexMask; } void SetIndexMask(int IndexMask) { this->IndexMask = IndexMask; }
void SetDumpMask(int DumpMask) { this->DumpMask = DumpMask; } void SetDumpMask(int DumpMask) { this->DumpMask = DumpMask; }
void SetIgnoreDecodeErrors(bool IgnoreDecodeErrors) { this->IgnoreDecodeErrors = IgnoreDecodeErrors; } void SetIgnoreDecodeErrors(bool IgnoreDecodeErrors) { this->IgnoreDecodeErrors = IgnoreDecodeErrors; }

View file

@ -156,17 +156,17 @@ int FFIndex::WriteIndex(const char *IndexFile, char *ErrorMsg, unsigned MsgSize)
IndexStream.write(reinterpret_cast<char *>(&IH), sizeof(IH)); IndexStream.write(reinterpret_cast<char *>(&IH), sizeof(IH));
for (unsigned int i = 0; i < IH.Tracks; i++) { for (unsigned int i = 0; i < IH.Tracks; i++) {
int TT = at(i).TT; FFMS_TrackType TT = at(i).TT;
IndexStream.write(reinterpret_cast<char *>(&TT), sizeof(TT)); IndexStream.write(reinterpret_cast<char *>(&TT), sizeof(TT));
int64_t Num = at(i).TB.Num; int64_t Num = at(i).TB.Num;
IndexStream.write(reinterpret_cast<char *>(&Num), sizeof(Num)); IndexStream.write(reinterpret_cast<char *>(&Num), sizeof(Num));
int64_t Den = at(i).TB.Den; int64_t Den = at(i).TB.Den;
IndexStream.write(reinterpret_cast<char *>(&Den), sizeof(Den)); IndexStream.write(reinterpret_cast<char *>(&Den), sizeof(Den));
size_t Frames = at(i).size(); int64_t Frames = at(i).size();
IndexStream.write(reinterpret_cast<char *>(&Frames), sizeof(Frames)); IndexStream.write(reinterpret_cast<char *>(&Frames), sizeof(Frames));
for (size_t j = 0; j < Frames; j++) for (FFTrack::iterator Cur=at(i).begin(); Cur!=at(i).end(); Cur++)
IndexStream.write(reinterpret_cast<char *>(&(at(i)[j])), sizeof(TFrameInfo)); IndexStream.write(reinterpret_cast<char *>(&*Cur), sizeof(TFrameInfo));
} }
return 0; return 0;
@ -211,7 +211,7 @@ int FFIndex::ReadIndex(const char *IndexFile, char *ErrorMsg, unsigned MsgSize)
Index.read(reinterpret_cast<char *>(&Num), sizeof(Num)); Index.read(reinterpret_cast<char *>(&Num), sizeof(Num));
int64_t Den; int64_t Den;
Index.read(reinterpret_cast<char *>(&Den), sizeof(Den)); Index.read(reinterpret_cast<char *>(&Den), sizeof(Den));
size_t Frames; int64_t Frames;
Index.read(reinterpret_cast<char *>(&Frames), sizeof(Frames)); Index.read(reinterpret_cast<char *>(&Frames), sizeof(Frames));
push_back(FFTrack(Num, Den, TT)); push_back(FFTrack(Num, Den, TT));

View file

@ -98,7 +98,7 @@ void FFmpegSourceAudioProvider::LoadAudio(Aegisub::String filename) {
} }
for (int i = 0; i < NumTracks; i++) { for (int i = 0; i < NumTracks; i++) {
FFTrack *FrameData = FFMS_GetTrackFromIndex(Index, i, FFMSErrMsg, MsgSize); FFTrack *FrameData = FFMS_GetTrackFromIndex(Index, i);
if (FrameData == NULL) { if (FrameData == NULL) {
FFMS_DestroyFFIndex(Index); FFMS_DestroyFFIndex(Index);
Index = NULL; Index = NULL;

View file

@ -166,7 +166,7 @@ void FFmpegSourceVideoProvider::LoadVideo(Aegisub::String filename, double fps)
// build list of keyframes and timecodes // build list of keyframes and timecodes
for (int CurFrameNum = 0; CurFrameNum < VideoInfo->NumFrames; CurFrameNum++) { for (int CurFrameNum = 0; CurFrameNum < VideoInfo->NumFrames; CurFrameNum++) {
CurFrameData = FFMS_GetFrameInfo(FrameData, CurFrameNum, FFMSErrorMessage, MessageSize); CurFrameData = FFMS_GetFrameInfo(FrameData, CurFrameNum);
if (CurFrameData == NULL) { if (CurFrameData == NULL) {
wxString temp(FFMSErrorMessage, wxConvUTF8); wxString temp(FFMSErrorMessage, wxConvUTF8);
ErrorMsg << _T("Couldn't get framedata for frame ") << CurFrameNum << _T(": ") << temp; ErrorMsg << _T("Couldn't get framedata for frame ") << CurFrameNum << _T(": ") << temp;
@ -254,7 +254,7 @@ const AegiVideoFrame FFmpegSourceVideoProvider::GetFrame(int _n, int FormatType)
// requested format was changed since last time we were called, (re)set output format // requested format was changed since last time we were called, (re)set output format
if (LastDstFormat != DstFormat) { if (LastDstFormat != DstFormat) {
if (FFMS_SetOutputFormat(VideoSource, DstFormat, w, h, FFMSErrorMessage, MessageSize)) { if (FFMS_SetOutputFormat(VideoSource, 1 << DstFormat, w, h, FFMSErrorMessage, MessageSize)) {
wxString temp(FFMSErrorMessage, wxConvUTF8); wxString temp(FFMSErrorMessage, wxConvUTF8);
ErrorMsg << _T("Failed to set output format: ") << temp; ErrorMsg << _T("Failed to set output format: ") << temp;
throw ErrorMsg; throw ErrorMsg;