Delete the obsolete FFmpegSource

Originally committed to SVN as r2618.
This commit is contained in:
Fredrik Mellbin 2009-01-03 15:28:02 +00:00
parent 54c721cc49
commit bb61298853
17 changed files with 0 additions and 7566 deletions

File diff suppressed because it is too large Load diff

View file

@ -1,399 +0,0 @@
/*
* Copyright (c) 2004-2006 Mike Matsnev. All Rights Reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* 1. Redistributions of source code must retain the above copyright
* notice immediately at the beginning of the file, without modification,
* this list of conditions, and the following disclaimer.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. Absolutely no warranty of function or purpose is made by the author
* Mike Matsnev.
*
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
* $Id: MatroskaParser.h,v 1.19 2006/03/11 10:57:13 mike Exp $
*
*/
#ifndef MATROSKA_PARSER_H
#define MATROSKA_PARSER_H
/* Random notes:
*
* The parser does not process frame data in any way and does not read it into
* the queue. The app should read it via mkv_ReadData if it is interested.
*
* The code here is 64-bit clean and was tested on FreeBSD/sparc 64-bit big endian
* system
*/
#ifdef MPDLLBUILD
#define X __declspec(dllexport)
#else
#ifdef MPDLL
#define X __declspec(dllimport)
#pragma comment(lib,"MatroskaParser")
#else
#define X
#endif
#endif
#define MATROSKA_COMPRESSION_SUPPORT
#define MATROSKA_INTEGER_ONLY
#ifdef __cplusplus
extern "C" {
#endif
/* 64-bit integers */
#ifdef _WIN32_WCE
typedef signed __int64 longlong;
typedef unsigned __int64 ulonglong;
#else
typedef signed long long longlong;
typedef unsigned long long ulonglong;
#endif
/* MKFLOATing point */
#ifdef MATROSKA_INTEGER_ONLY
typedef struct {
longlong v;
} MKFLOAT;
#else
typedef double MKFLOAT;
#endif
/* generic I/O */
struct InputStream {
/* read bytes from stream */
int (*read)(struct InputStream *cc,ulonglong pos,void *buffer,int count);
/* scan for a four byte signature, bytes must be nonzero */
longlong (*scan)(struct InputStream *cc,ulonglong start,unsigned signature);
/* get cache size, this is used to cap readahead */
unsigned (*getcachesize)(struct InputStream *cc);
/* fetch last error message */
const char *(*geterror)(struct InputStream *cc);
/* memory allocation */
void *(*memalloc)(struct InputStream *cc,size_t size);
void *(*memrealloc)(struct InputStream *cc,void *mem,size_t newsize);
void (*memfree)(struct InputStream *cc,void *mem);
/* zero return causes parser to abort open */
int (*progress)(struct InputStream *cc,ulonglong cur,ulonglong max);
/* get file size, optional, can be NULL or return -1 if filesize is unknown */
longlong (*getfilesize)(struct InputStream *cc);
};
typedef struct InputStream InputStream;
/* matroska file */
struct MatroskaFile; /* opaque */
typedef struct MatroskaFile MatroskaFile;
#define COMP_ZLIB 0
#define COMP_BZIP 1
#define COMP_LZO1X 2
#define COMP_PREPEND 3
#define TT_VIDEO 1
#define TT_AUDIO 2
#define TT_SUB 17
struct TrackInfo {
unsigned char Number;
unsigned char Type;
unsigned char TrackOverlay;
ulonglong UID;
ulonglong MinCache;
ulonglong MaxCache;
ulonglong DefaultDuration;
MKFLOAT TimecodeScale;
void *CodecPrivate;
unsigned CodecPrivateSize;
unsigned CompMethod;
void *CompMethodPrivate;
unsigned CompMethodPrivateSize;
unsigned MaxBlockAdditionID;
struct {
unsigned int Enabled:1;
unsigned int Default:1;
unsigned int Lacing:1;
unsigned int DecodeAll:1;
unsigned int CompEnabled:1;
};
union {
struct {
unsigned char StereoMode;
unsigned char DisplayUnit;
unsigned char AspectRatioType;
unsigned int PixelWidth;
unsigned int PixelHeight;
unsigned int DisplayWidth;
unsigned int DisplayHeight;
unsigned int CropL, CropT, CropR, CropB;
unsigned int ColourSpace;
MKFLOAT GammaValue;
struct {
unsigned int Interlaced:1;
};
} Video;
struct {
MKFLOAT SamplingFreq;
MKFLOAT OutputSamplingFreq;
unsigned char Channels;
unsigned char BitDepth;
} Audio;
} AV;
/* various strings */
char *Name;
char Language[4];
char *CodecID;
};
typedef struct TrackInfo TrackInfo;
struct SegmentInfo {
char UID[16];
char PrevUID[16];
char NextUID[16];
char *Filename;
char *PrevFilename;
char *NextFilename;
char *Title;
char *MuxingApp;
char *WritingApp;
ulonglong TimecodeScale;
ulonglong Duration;
longlong DateUTC;
char DateUTCValid;
};
typedef struct SegmentInfo SegmentInfo;
struct Attachment {
ulonglong Position;
ulonglong Length;
ulonglong UID;
char *Name;
char *Description;
char *MimeType;
};
typedef struct Attachment Attachment;
struct ChapterDisplay {
char *String;
char Language[4];
char Country[4];
};
struct ChapterCommand {
unsigned Time;
unsigned CommandLength;
void *Command;
};
struct ChapterProcess {
unsigned CodecID;
unsigned CodecPrivateLength;
void *CodecPrivate;
unsigned nCommands,nCommandsSize;
struct ChapterCommand *Commands;
};
struct Chapter {
ulonglong UID;
ulonglong Start;
ulonglong End;
unsigned nTracks,nTracksSize;
ulonglong *Tracks;
unsigned nDisplay,nDisplaySize;
struct ChapterDisplay *Display;
unsigned nChildren,nChildrenSize;
struct Chapter *Children;
unsigned nProcess,nProcessSize;
struct ChapterProcess *Process;
char SegmentUID[16];
struct {
unsigned int Hidden:1;
unsigned int Enabled:1;
// Editions
unsigned int Default:1;
unsigned int Ordered:1;
};
};
typedef struct Chapter Chapter;
#define TARGET_TRACK 0
#define TARGET_CHAPTER 1
#define TARGET_ATTACHMENT 2
#define TARGET_EDITION 3
struct Target {
ulonglong UID;
unsigned Type;
};
struct SimpleTag {
char *Name;
char *Value;
char Language[4];
unsigned Default:1;
};
struct Tag {
unsigned nTargets,nTargetsSize;
struct Target *Targets;
unsigned nSimpleTags,nSimpleTagsSize;
struct SimpleTag *SimpleTags;
};
typedef struct Tag Tag;
/* Open a matroska file
* io pointer is recorded inside MatroskaFile
*/
X MatroskaFile *mkv_Open(/* in */ InputStream *io,
/* out */ char *err_msg,
/* in */ unsigned msgsize);
#define MKVF_AVOID_SEEKS 1 /* use sequential reading only */
X MatroskaFile *mkv_OpenEx(/* in */ InputStream *io,
/* in */ ulonglong base,
/* in */ unsigned flags,
/* out */ char *err_msg,
/* in */ unsigned msgsize);
/* Close and deallocate mf
* NULL pointer is ok and is simply ignored
*/
X void mkv_Close(/* in */ MatroskaFile *mf);
/* Fetch the error message of the last failed operation */
X const char *mkv_GetLastError(/* in */ MatroskaFile *mf);
/* Get file information */
X SegmentInfo *mkv_GetFileInfo(/* in */ MatroskaFile *mf);
/* Get track information */
X unsigned int mkv_GetNumTracks(/* in */ MatroskaFile *mf);
X TrackInfo *mkv_GetTrackInfo(/* in */ MatroskaFile *mf,/* in */ unsigned track);
/* chapters, tags and attachments */
X void mkv_GetAttachments(/* in */ MatroskaFile *mf,
/* out */ Attachment **at,
/* out */ unsigned *count);
X void mkv_GetChapters(/* in */ MatroskaFile *mf,
/* out */ Chapter **ch,
/* out */ unsigned *count);
X void mkv_GetTags(/* in */ MatroskaFile *mf,
/* out */ Tag **tag,
/* out */ unsigned *count);
X ulonglong mkv_GetSegmentTop(MatroskaFile *mf);
/* Seek to specified timecode,
* if timecode is past end of file,
* all tracks are set to return EOF
* on next read
*/
#define MKVF_SEEK_TO_PREV_KEYFRAME 1
X void mkv_Seek(/* in */ MatroskaFile *mf,
/* in */ ulonglong timecode /* in ns */,
/* in */ unsigned flags);
X void mkv_SkipToKeyframe(MatroskaFile *mf);
X ulonglong mkv_GetLowestQTimecode(MatroskaFile *mf);
X int mkv_TruncFloat(MKFLOAT f);
/*************************************************************************
* reading data, pull model
*/
/* frame flags */
#define FRAME_UNKNOWN_START 0x00000001
#define FRAME_UNKNOWN_END 0x00000002
#define FRAME_KF 0x00000004
#define FRAME_GAP 0x00800000
#define FRAME_STREAM_MASK 0xff000000
#define FRAME_STREAM_SHIFT 24
/* This sets the masking flags for the parser,
* masked tracks [with 1s in their bit positions]
* will be ignored when reading file data.
* This call discards all parsed and queued frames
*/
X void mkv_SetTrackMask(/* in */ MatroskaFile *mf,/* in */ unsigned int mask);
/* Read one frame from the queue.
* mask specifies what tracks to ignore.
* Returns -1 if there are no more frames in the specified
* set of tracks, 0 on success
*/
X int mkv_ReadFrame(/* in */ MatroskaFile *mf,
/* in */ unsigned int mask,
/* out */ unsigned int *track,
/* out */ ulonglong *StartTime /* in ns */,
/* out */ ulonglong *EndTime /* in ns */,
/* out */ ulonglong *FilePos /* in bytes from start of file */,
/* out */ unsigned int *FrameSize /* in bytes */,
/* out */ unsigned int *FrameFlags);
#ifdef MATROSKA_COMPRESSION_SUPPORT
/* Compressed streams support */
struct CompressedStream;
typedef struct CompressedStream CompressedStream;
X CompressedStream *cs_Create(/* in */ MatroskaFile *mf,
/* in */ unsigned tracknum,
/* out */ char *errormsg,
/* in */ unsigned msgsize);
X void cs_Destroy(/* in */ CompressedStream *cs);
/* advance to the next frame in matroska stream, you need to pass values returned
* by mkv_ReadFrame */
X void cs_NextFrame(/* in */ CompressedStream *cs,
/* in */ ulonglong pos,
/* in */ unsigned size);
/* read and decode more data from current frame, return number of bytes decoded,
* 0 on end of frame, or -1 on error */
X int cs_ReadData(CompressedStream *cs,char *buffer,unsigned bufsize);
/* return error message for the last error */
X const char *cs_GetLastError(CompressedStream *cs);
#endif
#ifdef __cplusplus
}
#endif
#undef X
#endif

View file

@ -1,749 +0,0 @@
// Avisynth v2.5. Copyright 2002 Ben Rudiak-Gould et al.
// http://www.avisynth.org
// This program is free software; you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation; either version 2 of the License, or
// (at your option) any later version.
//
// This program is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
//
// You should have received a copy of the GNU General Public License
// along with this program; if not, write to the Free Software
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
// http://www.gnu.org/copyleft/gpl.html .
//
// Linking Avisynth statically or dynamically with other modules is making a
// combined work based on Avisynth. Thus, the terms and conditions of the GNU
// General Public License cover the whole combination.
//
// As a special exception, the copyright holders of Avisynth give you
// permission to link Avisynth with independent modules that communicate with
// Avisynth solely through the interfaces defined in avisynth.h, regardless of the license
// terms of these independent modules, and to copy and distribute the
// resulting combined work under terms of your choice, provided that
// every copy of the combined work is accompanied by a complete copy of
// the source code of Avisynth (the version of Avisynth used to produce the
// combined work), being distributed under the terms of the GNU General
// Public License plus this exception. An independent module is a module
// which is not derived from or based on Avisynth, such as 3rd-party filters,
// import and export plugins, or graphical user interfaces.
#ifndef __AVISYNTH_H__
#define __AVISYNTH_H__
enum { AVISYNTH_INTERFACE_VERSION = 3 };
/* Define all types necessary for interfacing with avisynth.dll
Moved from internal.h */
// Win32 API macros, notably the types BYTE, DWORD, ULONG, etc.
#include <windef.h>
// COM interface macros
#include <objbase.h>
// Raster types used by VirtualDub & Avisynth
#define in64 (__int64)(unsigned short)
typedef unsigned long Pixel; // this will break on 64-bit machines!
typedef unsigned long Pixel32;
typedef unsigned char Pixel8;
typedef long PixCoord;
typedef long PixDim;
typedef long PixOffset;
/* Compiler-specific crap */
// Tell MSVC to stop precompiling here
#ifdef _MSC_VER
#pragma hdrstop
#endif
// Set up debugging macros for MS compilers; for others, step down to the
// standard <assert.h> interface
#ifdef _MSC_VER
#include <crtdbg.h>
#else
#define _RPT0(a,b) ((void)0)
#define _RPT1(a,b,c) ((void)0)
#define _RPT2(a,b,c,d) ((void)0)
#define _RPT3(a,b,c,d,e) ((void)0)
#define _RPT4(a,b,c,d,e,f) ((void)0)
#define _ASSERTE(x) assert(x)
#include <assert.h>
#endif
// I had problems with Premiere wanting 1-byte alignment for its structures,
// so I now set the Avisynth struct alignment explicitly here.
#pragma pack(push,8)
#define FRAME_ALIGN 16
// Default frame alignment is 16 bytes, to help P4, when using SSE2
// The VideoInfo struct holds global information about a clip (i.e.
// information that does not depend on the frame number). The GetVideoInfo
// method in IClip returns this struct.
// Audio Sample information
typedef float SFLOAT;
enum {SAMPLE_INT8 = 1<<0,
SAMPLE_INT16 = 1<<1,
SAMPLE_INT24 = 1<<2, // Int24 is a very stupid thing to code, but it's supported by some hardware.
SAMPLE_INT32 = 1<<3,
SAMPLE_FLOAT = 1<<4};
enum {
PLANAR_Y=1<<0,
PLANAR_U=1<<1,
PLANAR_V=1<<2,
PLANAR_ALIGNED=1<<3,
PLANAR_Y_ALIGNED=PLANAR_Y|PLANAR_ALIGNED,
PLANAR_U_ALIGNED=PLANAR_U|PLANAR_ALIGNED,
PLANAR_V_ALIGNED=PLANAR_V|PLANAR_ALIGNED,
};
struct VideoInfo {
int width, height; // width=0 means no video
unsigned fps_numerator, fps_denominator;
int num_frames;
// This is more extensible than previous versions. More properties can be added seeminglesly.
// Colorspace properties.
enum {
CS_BGR = 1<<28,
CS_YUV = 1<<29,
CS_INTERLEAVED = 1<<30,
CS_PLANAR = 1<<31
};
// Specific colorformats
enum { CS_UNKNOWN = 0,
CS_BGR24 = 1<<0 | CS_BGR | CS_INTERLEAVED,
CS_BGR32 = 1<<1 | CS_BGR | CS_INTERLEAVED,
CS_YUY2 = 1<<2 | CS_YUV | CS_INTERLEAVED,
CS_YV12 = 1<<3 | CS_YUV | CS_PLANAR, // y-v-u, planar
CS_I420 = 1<<4 | CS_YUV | CS_PLANAR, // y-u-v, planar
CS_IYUV = 1<<4 | CS_YUV | CS_PLANAR // same as above
};
int pixel_type; // changed to int as of 2.5
int audio_samples_per_second; // 0 means no audio
int sample_type; // as of 2.5
__int64 num_audio_samples; // changed as of 2.5
int nchannels; // as of 2.5
// Imagetype properties
int image_type;
enum {
IT_BFF = 1<<0,
IT_TFF = 1<<1,
IT_FIELDBASED = 1<<2
};
// useful functions of the above
bool HasVideo() const { return (width!=0); }
bool HasAudio() const { return (audio_samples_per_second!=0); }
bool IsRGB() const { return !!(pixel_type&CS_BGR); }
bool IsRGB24() const { return (pixel_type&CS_BGR24)==CS_BGR24; } // Clear out additional properties
bool IsRGB32() const { return (pixel_type & CS_BGR32) == CS_BGR32 ; }
bool IsYUV() const { return !!(pixel_type&CS_YUV ); }
bool IsYUY2() const { return (pixel_type & CS_YUY2) == CS_YUY2; }
bool IsYV12() const { return ((pixel_type & CS_YV12) == CS_YV12)||((pixel_type & CS_I420) == CS_I420); }
bool IsColorSpace(int c_space) const { return ((pixel_type & c_space) == c_space); }
bool Is(int property) const { return ((pixel_type & property)==property ); }
bool IsPlanar() const { return !!(pixel_type & CS_PLANAR); }
bool IsFieldBased() const { return !!(image_type & IT_FIELDBASED); }
bool IsParityKnown() const { return ((image_type & IT_FIELDBASED)&&(image_type & (IT_BFF|IT_TFF))); }
bool IsBFF() const { return !!(image_type & IT_BFF); }
bool IsTFF() const { return !!(image_type & IT_TFF); }
bool IsVPlaneFirst() const {return ((pixel_type & CS_YV12) == CS_YV12); } // Don't use this
int BytesFromPixels(int pixels) const { return pixels * (BitsPerPixel()>>3); } // Will not work on planar images, but will return only luma planes
int RowSize() const { return BytesFromPixels(width); } // Also only returns first plane on planar images
int BMPSize() const { if (IsPlanar()) {int p = height * ((RowSize()+3) & ~3); p+=p>>1; return p; } return height * ((RowSize()+3) & ~3); }
__int64 AudioSamplesFromFrames(__int64 frames) const { return (fps_numerator && HasVideo()) ? ((__int64)(frames) * audio_samples_per_second * fps_denominator / fps_numerator) : 0; }
int FramesFromAudioSamples(__int64 samples) const { return (fps_denominator && HasAudio()) ? (int)((samples * (__int64)fps_numerator)/((__int64)fps_denominator * (__int64)audio_samples_per_second)) : 0; }
__int64 AudioSamplesFromBytes(__int64 bytes) const { return HasAudio() ? bytes / BytesPerAudioSample() : 0; }
__int64 BytesFromAudioSamples(__int64 samples) const { return samples * BytesPerAudioSample(); }
int AudioChannels() const { return HasAudio() ? nchannels : 0; }
int SampleType() const{ return sample_type;}
bool IsSampleType(int testtype) const{ return !!(sample_type&testtype);}
int SamplesPerSecond() const { return audio_samples_per_second; }
int BytesPerAudioSample() const { return nchannels*BytesPerChannelSample();}
void SetFieldBased(bool isfieldbased) { if (isfieldbased) image_type|=IT_FIELDBASED; else image_type&=~IT_FIELDBASED; }
void Set(int property) { image_type|=property; }
void Clear(int property) { image_type&=~property; }
int BitsPerPixel() const {
switch (pixel_type) {
case CS_BGR24:
return 24;
case CS_BGR32:
return 32;
case CS_YUY2:
return 16;
case CS_YV12:
case CS_I420:
return 12;
default:
return 0;
}
}
int BytesPerChannelSample() const {
switch (sample_type) {
case SAMPLE_INT8:
return sizeof(signed char);
case SAMPLE_INT16:
return sizeof(signed short);
case SAMPLE_INT24:
return 3;
case SAMPLE_INT32:
return sizeof(signed int);
case SAMPLE_FLOAT:
return sizeof(SFLOAT);
default:
_ASSERTE("Sample type not recognized!");
return 0;
}
}
// useful mutator
void SetFPS(unsigned numerator, unsigned denominator) {
if ((numerator == 0) || (denominator == 0)) {
fps_numerator = 0;
fps_denominator = 1;
}
else {
unsigned x=numerator, y=denominator;
while (y) { // find gcd
unsigned t = x%y; x = y; y = t;
}
fps_numerator = numerator/x;
fps_denominator = denominator/x;
}
}
// Range protected multiply-divide of FPS
void MulDivFPS(unsigned multiplier, unsigned divisor) {
unsigned __int64 numerator = UInt32x32To64(fps_numerator, multiplier);
unsigned __int64 denominator = UInt32x32To64(fps_denominator, divisor);
unsigned __int64 x=numerator, y=denominator;
while (y) { // find gcd
unsigned __int64 t = x%y; x = y; y = t;
}
numerator /= x; // normalize
denominator /= x;
unsigned __int64 temp = numerator | denominator; // Just looking top bit
unsigned u = 0;
while (temp & 0xffffffff80000000) { // or perhaps > 16777216*2
temp = Int64ShrlMod32(temp, 1);
u++;
}
if (u) { // Scale to fit
const unsigned round = 1 << (u-1);
SetFPS( (unsigned)Int64ShrlMod32(numerator + round, u),
(unsigned)Int64ShrlMod32(denominator + round, u) );
}
else {
fps_numerator = (unsigned)numerator;
fps_denominator = (unsigned)denominator;
}
}
// Test for same colorspace
bool IsSameColorspace(const VideoInfo& vi) const {
if (vi.pixel_type == pixel_type) return TRUE;
if (IsYV12() && vi.IsYV12()) return TRUE;
return FALSE;
}
};
// VideoFrameBuffer holds information about a memory block which is used
// for video data. For efficiency, instances of this class are not deleted
// when the refcount reaches zero; instead they're stored in a linked list
// to be reused. The instances are deleted when the corresponding AVS
// file is closed.
class VideoFrameBuffer {
BYTE* const data;
const int data_size;
// sequence_number is incremented every time the buffer is changed, so
// that stale views can tell they're no longer valid.
long sequence_number;
friend class VideoFrame;
friend class Cache;
friend class ScriptEnvironment;
long refcount;
public:
VideoFrameBuffer(int size);
VideoFrameBuffer();
~VideoFrameBuffer();
const BYTE* GetReadPtr() const { return data; }
BYTE* GetWritePtr() { ++sequence_number; return data; }
int GetDataSize() { return data_size; }
int GetSequenceNumber() { return sequence_number; }
int GetRefcount() { return refcount; }
};
class IClip;
class PClip;
class PVideoFrame;
class IScriptEnvironment;
class AVSValue;
// VideoFrame holds a "window" into a VideoFrameBuffer. Operator new
// is overloaded to recycle class instances.
class VideoFrame {
int refcount;
VideoFrameBuffer* const vfb;
const int offset, pitch, row_size, height, offsetU, offsetV, pitchUV; // U&V offsets are from top of picture.
friend class PVideoFrame;
void AddRef() { InterlockedIncrement((long *)&refcount); }
void Release() { if (refcount==1) InterlockedDecrement(&vfb->refcount); InterlockedDecrement((long *)&refcount); }
friend class ScriptEnvironment;
friend class Cache;
VideoFrame(VideoFrameBuffer* _vfb, int _offset, int _pitch, int _row_size, int _height);
VideoFrame(VideoFrameBuffer* _vfb, int _offset, int _pitch, int _row_size, int _height, int _offsetU, int _offsetV, int _pitchUV);
void* operator new(unsigned size);
// TESTME: OFFSET U/V may be switched to what could be expected from AVI standard!
public:
int GetPitch() const { return pitch; }
int GetPitch(int plane) const { switch (plane) {case PLANAR_U: case PLANAR_V: return pitchUV;} return pitch; }
int GetRowSize() const { return row_size; }
int GetRowSize(int plane) const {
switch (plane) {
case PLANAR_U: case PLANAR_V: if (pitchUV) return row_size>>1; else return 0;
case PLANAR_U_ALIGNED: case PLANAR_V_ALIGNED:
if (pitchUV) {
int r = ((row_size+FRAME_ALIGN-1)&(~(FRAME_ALIGN-1)) )>>1; // Aligned rowsize
if (r<=pitchUV)
return r;
return row_size>>1;
} else return 0;
case PLANAR_Y_ALIGNED:
int r = (row_size+FRAME_ALIGN-1)&(~(FRAME_ALIGN-1)); // Aligned rowsize
if (r<=pitch)
return r;
return row_size;
}
return row_size; }
int GetHeight() const { return height; }
int GetHeight(int plane) const { switch (plane) {case PLANAR_U: case PLANAR_V: if (pitchUV) return height>>1; return 0;} return height; }
// generally you shouldn't use these three
VideoFrameBuffer* GetFrameBuffer() const { return vfb; }
int GetOffset() const { return offset; }
int GetOffset(int plane) const { switch (plane) {case PLANAR_U: return offsetU;case PLANAR_V: return offsetV;default: return offset;}; }
// in plugins use env->SubFrame()
VideoFrame* Subframe(int rel_offset, int new_pitch, int new_row_size, int new_height) const;
VideoFrame* Subframe(int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int pitchUV) const;
const BYTE* GetReadPtr() const { return vfb->GetReadPtr() + offset; }
const BYTE* GetReadPtr(int plane) const { return vfb->GetReadPtr() + GetOffset(plane); }
bool IsWritable() const { return (refcount == 1 && vfb->refcount == 1); }
BYTE* GetWritePtr() const {
if (vfb->GetRefcount()>1) {
_ASSERT(FALSE);
//throw AvisynthError("Internal Error - refcount was more than one!");
}
return IsWritable() ? (vfb->GetWritePtr() + offset) : 0;
}
BYTE* GetWritePtr(int plane) const {
if (plane==PLANAR_Y) {
if (vfb->GetRefcount()>1) {
_ASSERT(FALSE);
// throw AvisynthError("Internal Error - refcount was more than one!");
}
return IsWritable() ? vfb->GetWritePtr() + GetOffset(plane) : 0;
}
return vfb->data + GetOffset(plane);
}
~VideoFrame() { InterlockedDecrement(&vfb->refcount); }
};
enum {
CACHE_NOTHING=0,
CACHE_RANGE=1,
CACHE_ALL=2,
CACHE_AUDIO=3,
CACHE_AUDIO_NONE=4
};
// Base class for all filters.
class IClip {
friend class PClip;
friend class AVSValue;
int refcnt;
void AddRef() { InterlockedIncrement((long *)&refcnt); }
void Release() { InterlockedDecrement((long *)&refcnt); if (!refcnt) delete this; }
public:
IClip() : refcnt(0) {}
virtual int __stdcall GetVersion() { return AVISYNTH_INTERFACE_VERSION; }
virtual PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env) = 0;
virtual bool __stdcall GetParity(int n) = 0; // return field parity if field_based, else parity of first field in frame
virtual void __stdcall GetAudio(void* buf, __int64 start, __int64 count, IScriptEnvironment* env) = 0; // start and count are in samples
virtual void __stdcall SetCacheHints(int cachehints,int frame_range) = 0 ; // We do not pass cache requests upwards, only to the next filter.
virtual const VideoInfo& __stdcall GetVideoInfo() = 0;
virtual __stdcall ~IClip() {}
};
// smart pointer to IClip
class PClip {
IClip* p;
IClip* GetPointerWithAddRef() const { if (p) p->AddRef(); return p; }
friend class AVSValue;
friend class VideoFrame;
void Init(IClip* x) {
if (x) x->AddRef();
p=x;
}
void Set(IClip* x) {
if (x) x->AddRef();
if (p) p->Release();
p=x;
}
public:
PClip() { p = 0; }
PClip(const PClip& x) { Init(x.p); }
PClip(IClip* x) { Init(x); }
void operator=(IClip* x) { Set(x); }
void operator=(const PClip& x) { Set(x.p); }
IClip* operator->() const { return p; }
// useful in conditional expressions
operator void*() const { return p; }
bool operator!() const { return !p; }
~PClip() { if (p) p->Release(); }
};
// smart pointer to VideoFrame
class PVideoFrame {
VideoFrame* p;
void Init(VideoFrame* x) {
if (x) x->AddRef();
p=x;
}
void Set(VideoFrame* x) {
if (x) x->AddRef();
if (p) p->Release();
p=x;
}
public:
PVideoFrame() { p = 0; }
PVideoFrame(const PVideoFrame& x) { Init(x.p); }
PVideoFrame(VideoFrame* x) { Init(x); }
void operator=(VideoFrame* x) { Set(x); }
void operator=(const PVideoFrame& x) { Set(x.p); }
VideoFrame* operator->() const { return p; }
// for conditional expressions
operator void*() const { return p; }
bool operator!() const { return !p; }
~PVideoFrame() { if (p) p->Release();}
};
class AVSValue {
public:
AVSValue() { type = 'v'; }
AVSValue(IClip* c) { type = 'c'; clip = c; if (c) c->AddRef(); }
AVSValue(const PClip& c) { type = 'c'; clip = c.GetPointerWithAddRef(); }
AVSValue(bool b) { type = 'b'; boolean = b; }
AVSValue(int i) { type = 'i'; integer = i; }
// AVSValue(__int64 l) { type = 'l'; longlong = l; }
AVSValue(float f) { type = 'f'; floating_pt = f; }
AVSValue(double f) { type = 'f'; floating_pt = float(f); }
AVSValue(const char* s) { type = 's'; string = s; }
AVSValue(const AVSValue* a, int size) { type = 'a'; array = a; array_size = size; }
AVSValue(const AVSValue& v) { Assign(&v, true); }
~AVSValue() { if (IsClip() && clip) clip->Release(); }
AVSValue& operator=(const AVSValue& v) { Assign(&v, false); return *this; }
// Note that we transparently allow 'int' to be treated as 'float'.
// There are no int<->bool conversions, though.
bool Defined() const { return type != 'v'; }
bool IsClip() const { return type == 'c'; }
bool IsBool() const { return type == 'b'; }
bool IsInt() const { return type == 'i'; }
// bool IsLong() const { return (type == 'l'|| type == 'i'); }
bool IsFloat() const { return type == 'f' || type == 'i'; }
bool IsString() const { return type == 's'; }
bool IsArray() const { return type == 'a'; }
PClip AsClip() const { _ASSERTE(IsClip()); return IsClip()?clip:0; }
bool AsBool() const { _ASSERTE(IsBool()); return boolean; }
int AsInt() const { _ASSERTE(IsInt()); return integer; }
// int AsLong() const { _ASSERTE(IsLong()); return longlong; }
const char* AsString() const { _ASSERTE(IsString()); return IsString()?string:0; }
double AsFloat() const { _ASSERTE(IsFloat()); return IsInt()?integer:floating_pt; }
bool AsBool(bool def) const { _ASSERTE(IsBool()||!Defined()); return IsBool() ? boolean : def; }
int AsInt(int def) const { _ASSERTE(IsInt()||!Defined()); return IsInt() ? integer : def; }
double AsFloat(double def) const { _ASSERTE(IsFloat()||!Defined()); return IsInt() ? integer : type=='f' ? floating_pt : def; }
const char* AsString(const char* def) const { _ASSERTE(IsString()||!Defined()); return IsString() ? string : def; }
int ArraySize() const { _ASSERTE(IsArray()); return IsArray()?array_size:1; }
const AVSValue& operator[](int index) const {
_ASSERTE(IsArray() && index>=0 && index<array_size);
return (IsArray() && index>=0 && index<array_size) ? array[index] : *this;
}
private:
short type; // 'a'rray, 'c'lip, 'b'ool, 'i'nt, 'f'loat, 's'tring, 'v'oid, or 'l'ong
short array_size;
union {
IClip* clip;
bool boolean;
int integer;
float floating_pt;
const char* string;
const AVSValue* array;
// __int64 longlong;
};
void Assign(const AVSValue* src, bool init) {
if (src->IsClip() && src->clip)
src->clip->AddRef();
if (!init && IsClip() && clip)
clip->Release();
// make sure this copies the whole struct!
((__int32*)this)[0] = ((__int32*)src)[0];
((__int32*)this)[1] = ((__int32*)src)[1];
}
};
// instantiable null filter
class GenericVideoFilter : public IClip {
protected:
PClip child;
VideoInfo vi;
public:
GenericVideoFilter(PClip _child) : child(_child) { vi = child->GetVideoInfo(); }
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env) { return child->GetFrame(n, env); }
void __stdcall GetAudio(void* buf, __int64 start, __int64 count, IScriptEnvironment* env) { child->GetAudio(buf, start, count, env); }
const VideoInfo& __stdcall GetVideoInfo() { return vi; }
bool __stdcall GetParity(int n) { return child->GetParity(n); }
void __stdcall SetCacheHints(int cachehints,int frame_range) { } ; // We do not pass cache requests upwards, only to the next filter.
};
class AvisynthError /* exception */ {
public:
const char* const msg;
AvisynthError(const char* _msg) : msg(_msg) {}
};
/* Helper classes useful to plugin authors */
class AlignPlanar : public GenericVideoFilter
{
public:
AlignPlanar(PClip _clip);
static PClip Create(PClip clip);
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env);
};
class FillBorder : public GenericVideoFilter
{
public:
FillBorder(PClip _clip);
static PClip Create(PClip clip);
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env);
};
class ConvertAudio : public GenericVideoFilter
/**
* Helper class to convert audio to any format
**/
{
public:
ConvertAudio(PClip _clip, int prefered_format);
void __stdcall GetAudio(void* buf, __int64 start, __int64 count, IScriptEnvironment* env);
void __stdcall SetCacheHints(int cachehints,int frame_range); // We do pass cache requests upwards, to the cache!
static PClip Create(PClip clip, int sample_type, int prefered_type);
static AVSValue __cdecl Create_float(AVSValue args, void*, IScriptEnvironment*);
static AVSValue __cdecl Create_32bit(AVSValue args, void*, IScriptEnvironment*);
static AVSValue __cdecl Create_24bit(AVSValue args, void*, IScriptEnvironment*);
static AVSValue __cdecl Create_16bit(AVSValue args, void*, IScriptEnvironment*);
static AVSValue __cdecl Create_8bit(AVSValue args, void*, IScriptEnvironment*);
virtual ~ConvertAudio();
private:
void convertToFloat(char* inbuf, float* outbuf, char sample_type, int count);
void convertToFloat_3DN(char* inbuf, float* outbuf, char sample_type, int count);
void convertToFloat_SSE(char* inbuf, float* outbuf, char sample_type, int count);
void convertToFloat_SSE2(char* inbuf, float* outbuf, char sample_type, int count);
void convertFromFloat(float* inbuf, void* outbuf, char sample_type, int count);
void convertFromFloat_3DN(float* inbuf, void* outbuf, char sample_type, int count);
void convertFromFloat_SSE(float* inbuf, void* outbuf, char sample_type, int count);
void convertFromFloat_SSE2(float* inbuf, void* outbuf, char sample_type, int count);
__inline int Saturate_int8(float n);
__inline short Saturate_int16(float n);
__inline int Saturate_int24(float n);
__inline int Saturate_int32(float n);
char src_format;
char dst_format;
int src_bps;
char *tempbuffer;
SFLOAT *floatbuffer;
int tempbuffer_size;
};
// For GetCPUFlags. These are backwards-compatible with those in VirtualDub.
enum {
/* slowest CPU to support extension */
CPUF_FORCE = 0x01, // N/A
CPUF_FPU = 0x02, // 386/486DX
CPUF_MMX = 0x04, // P55C, K6, PII
CPUF_INTEGER_SSE = 0x08, // PIII, Athlon
CPUF_SSE = 0x10, // PIII, Athlon XP/MP
CPUF_SSE2 = 0x20, // PIV, Hammer
CPUF_3DNOW = 0x40, // K6-2
CPUF_3DNOW_EXT = 0x80, // Athlon
CPUF_X86_64 = 0xA0, // Hammer (note: equiv. to 3DNow + SSE2, which only Hammer
// will have anyway)
CPUF_SSE3 = 0x100, // Some P4 & Athlon 64.
};
#define MAX_INT 0x7fffffff
#define MIN_INT -0x7fffffff
class IScriptEnvironment {
public:
virtual __stdcall ~IScriptEnvironment() {}
virtual /*static*/ long __stdcall GetCPUFlags() = 0;
virtual char* __stdcall SaveString(const char* s, int length = -1) = 0;
virtual char* __stdcall Sprintf(const char* fmt, ...) = 0;
// note: val is really a va_list; I hope everyone typedefs va_list to a pointer
virtual char* __stdcall VSprintf(const char* fmt, void* val) = 0;
__declspec(noreturn) virtual void __stdcall ThrowError(const char* fmt, ...) = 0;
class NotFound /*exception*/ {}; // thrown by Invoke and GetVar
typedef AVSValue (__cdecl *ApplyFunc)(AVSValue args, void* user_data, IScriptEnvironment* env);
virtual void __stdcall AddFunction(const char* name, const char* params, ApplyFunc apply, void* user_data) = 0;
virtual bool __stdcall FunctionExists(const char* name) = 0;
virtual AVSValue __stdcall Invoke(const char* name, const AVSValue args, const char** arg_names=0) = 0;
virtual AVSValue __stdcall GetVar(const char* name) = 0;
virtual bool __stdcall SetVar(const char* name, const AVSValue& val) = 0;
virtual bool __stdcall SetGlobalVar(const char* name, const AVSValue& val) = 0;
virtual void __stdcall PushContext(int level=0) = 0;
virtual void __stdcall PopContext() = 0;
// align should be 4 or 8
virtual PVideoFrame __stdcall NewVideoFrame(const VideoInfo& vi, int align=FRAME_ALIGN) = 0;
virtual bool __stdcall MakeWritable(PVideoFrame* pvf) = 0;
virtual /*static*/ void __stdcall BitBlt(BYTE* dstp, int dst_pitch, const BYTE* srcp, int src_pitch, int row_size, int height) = 0;
typedef void (__cdecl *ShutdownFunc)(void* user_data, IScriptEnvironment* env);
virtual void __stdcall AtExit(ShutdownFunc function, void* user_data) = 0;
virtual void __stdcall CheckVersion(int version = AVISYNTH_INTERFACE_VERSION) = 0;
virtual PVideoFrame __stdcall Subframe(PVideoFrame src, int rel_offset, int new_pitch, int new_row_size, int new_height) = 0;
virtual int __stdcall SetMemoryMax(int mem) = 0;
virtual int __stdcall SetWorkingDir(const char * newdir) = 0;
virtual void* __stdcall ManageCache(int key, void* data) = 0;
enum PlanarChromaAlignmentMode {
PlanarChromaAlignmentOff,
PlanarChromaAlignmentOn,
PlanarChromaAlignmentTest };
virtual bool __stdcall PlanarChromaAlignment(PlanarChromaAlignmentMode key) = 0;
virtual PVideoFrame __stdcall SubframePlanar(PVideoFrame src, int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int new_pitchUV) = 0;
};
// avisynth.dll exports this; it's a way to use it as a library, without
// writing an AVS script or without going through AVIFile.
IScriptEnvironment* __stdcall CreateScriptEnvironment(int version = AVISYNTH_INTERFACE_VERSION);
#pragma pack(pop)
#endif //__AVISYNTH_H__

View file

@ -1,92 +0,0 @@
// Copyright (c) 2007-2008 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "ffmpegsource.h"
FFAudioBase::FFAudioBase() {
memset(&VI, 0, sizeof(VI));
DecodingBuffer = new uint8_t[AVCODEC_MAX_AUDIO_FRAME_SIZE];
};
FFAudioBase::~FFAudioBase() {
delete[] DecodingBuffer;
};
size_t FFAudioBase::FindClosestAudioKeyFrame(int64_t Sample) {
for (size_t i = 0; i < SI.size(); i++) {
if (SI[i].SampleStart == Sample && SI[i].KeyFrame)
return i;
else if (SI[i].SampleStart > Sample && SI[i].KeyFrame)
return i - 1;
}
return SI.size() - 1;
}
bool FFAudioBase::LoadSampleInfoFromFile(const char *AAudioCacheFile, const char *ASource, int AAudioTrack) {
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffas%dcache", ASource, AAudioTrack);
if (!strcmp(AAudioCacheFile, ""))
AAudioCacheFile = DefaultCacheFilename;
FILE *CacheFile = fopen(AAudioCacheFile, "r");
if (!CacheFile)
return false;
size_t AudioBlocks = 0;
if (fscanf(CacheFile, "%lld %u\r\n", &VI.num_audio_samples, &AudioBlocks) <= 0 || VI.num_audio_samples <= 0 || AudioBlocks <= 0) {
VI.num_audio_samples = 0;
fclose(CacheFile);
return false;
}
for (size_t i = 0; i < AudioBlocks; i++) {
int64_t SampleStart;
int64_t FilePos;
unsigned int FrameSize;
int Flags;
fscanf(CacheFile, "%lld %lld %u %d\r\n", &SampleStart, &FilePos, &FrameSize, &Flags);
SI.push_back(SampleInfo(SampleStart, FilePos, FrameSize, (Flags & 1) != 0));
}
fclose(CacheFile);
return true;
}
bool FFAudioBase::SaveSampleInfoToFile(const char *AAudioCacheFile, const char *ASource, int AAudioTrack) {
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffas%dcache", ASource, AAudioTrack);
if (!strcmp(AAudioCacheFile, ""))
AAudioCacheFile = DefaultCacheFilename;
FILE *CacheFile = fopen(AAudioCacheFile, "wb");
if (!CacheFile)
return false;
fprintf(CacheFile, "%lld %u\r\n", VI.num_audio_samples, SI.size());
for (size_t i = 0; i < SI.size(); i++) {
int Flags = SI[i].KeyFrame ? 1 : 0;
fprintf(CacheFile, "%lld %lld %u %d\r\n", SI[i].SampleStart, SI[i].FilePos, SI[i].FrameSize, Flags);
}
fclose(CacheFile);
return true;
}

View file

@ -1,293 +0,0 @@
// Copyright (c) 2007-2008 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "ffmpegsource.h"
int FFBase::FrameFromDTS(int64_t ADTS) {
for (int i = 0; i < (int)Frames.size(); i++)
if (Frames[i].DTS == ADTS)
return i;
return -1;
}
int FFBase::ClosestFrameFromDTS(int64_t ADTS) {
int Frame = 0;
int64_t BestDiff = 0xFFFFFFFFFFFFFFLL; // big number
for (int i = 0; i < (int)Frames.size(); i++) {
int64_t CurrentDiff = FFABS(Frames[i].DTS - ADTS);
if (CurrentDiff < BestDiff) {
BestDiff = CurrentDiff;
Frame = i;
}
}
return Frame;
}
int FFBase::FindClosestKeyFrame(int AFrame) {
for (int i = AFrame; i > 0; i--)
if (Frames[i].KeyFrame)
return i;
return 0;
}
bool FFBase::LoadFrameInfoFromFile(const char *AVideoCacheFile, const char *ASource, int AVideoTrack) {
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffv%dcache", ASource, AVideoTrack);
if (!strcmp(AVideoCacheFile, ""))
AVideoCacheFile = DefaultCacheFilename;
FILE *CacheFile = fopen(AVideoCacheFile, "r");
if (!CacheFile)
return false;
if (fscanf(CacheFile, "%d\r\n", &VI.num_frames) <= 0 || VI.num_frames <= 0) {
VI.num_frames = 0;
fclose(CacheFile);
return false;
}
for (int i = 0; i < VI.num_frames; i++) {
int64_t DTSTemp;
int KFTemp;
fscanf(CacheFile, "%lld %d\r\n", &DTSTemp, &KFTemp);
Frames.push_back(FrameInfo(DTSTemp, KFTemp != 0));
}
fclose(CacheFile);
return true;
}
bool FFBase::SaveFrameInfoToFile(const char *AVideoCacheFile, const char *ASource, int AVideoTrack) {
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffv%dcache", ASource, AVideoTrack);
if (!strcmp(AVideoCacheFile, ""))
AVideoCacheFile = DefaultCacheFilename;
FILE *CacheFile = fopen(AVideoCacheFile, "wb");
if (!CacheFile)
return false;
fprintf(CacheFile, "%d\r\n", VI.num_frames);
for (int i = 0; i < VI.num_frames; i++)
fprintf(CacheFile, "%lld %d\r\n", Frames[i].DTS, (int)(Frames[i].KeyFrame ? 1 : 0));
fclose(CacheFile);
return true;
}
bool FFBase::SaveTimecodesToFile(const char *ATimecodeFile, int64_t ScaleD, int64_t ScaleN) {
if (!strcmp(ATimecodeFile, ""))
return true;
FILE *TimecodeFile = fopen(ATimecodeFile, "wb");
if (!TimecodeFile)
return false;
std::set<int64_t> Timecodes;
for (int i = 0; i < VI.num_frames; i++)
Timecodes.insert(Frames[i].DTS);
fprintf(TimecodeFile, "# timecode format v2\r\n");
for (std::set<int64_t>::iterator Cur=Timecodes.begin(); Cur!=Timecodes.end(); Cur++)
fprintf(TimecodeFile, "%f\r\n", (*Cur * ScaleD) / (double)ScaleN);
fclose(TimecodeFile);
return true;
}
bool FFBase::OpenAudioCache(const char *AAudioCacheFile, const char *ASource, int AAudioTrack, IScriptEnvironment *Env) {
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffa%dcache", ASource, AAudioTrack);
if (!strcmp(AAudioCacheFile, ""))
AAudioCacheFile = DefaultCacheFilename;
// Is an empty file?
FILE *FCFile = fopen(AAudioCacheFile, "rb");
int64_t CacheSize;
if (FCFile) {
_fseeki64(FCFile, 0, SEEK_END);
CacheSize = _ftelli64(FCFile);
_fseeki64(FCFile, 0, SEEK_SET);
if (CacheSize <= 0) {
fclose(FCFile);
FCFile = NULL;
return false;
}
} else {
return false;
}
// Raw audio
VI.num_audio_samples = VI.AudioSamplesFromBytes(CacheSize);
AudioCacheType = acRaw;
RawAudioCache = FCFile;
FCFile = NULL;
return true;
}
FILE *FFBase::NewRawCacheWriter(const char *AAudioCacheFile, const char *ASource, int AAudioTrack, IScriptEnvironment *Env) {
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffa%dcache", ASource, AAudioTrack);
if (!strcmp(AAudioCacheFile, ""))
AAudioCacheFile = DefaultCacheFilename;
FILE *RCF = fopen(AAudioCacheFile, "wb");
if (RCF == NULL)
Env->ThrowError("FFmpegSource: Failed to open '%s' for writing", AAudioCacheFile);
return RCF;
}
void FFBase::CloseRawCacheWriter(FILE *ARawCache) {
fclose(ARawCache);
}
void FFBase::InitPP(int AWidth, int AHeight, const char *APPString, int AQuality, int APixelFormat, IScriptEnvironment *Env) {
if (!strcmp(APPString, ""))
return;
if (AQuality < 0 || AQuality > PP_QUALITY_MAX)
Env->ThrowError("FFmpegSource: Quality is out of range");
// Unsafe?
PPMode = pp_get_mode_by_name_and_quality(APPString, AQuality);
if (!PPMode)
Env->ThrowError("FFmpegSource: Invalid postprocesing settings");
int Flags = GetPPCPUFlags(Env);
switch (APixelFormat) {
case PIX_FMT_YUV420P: Flags |= PP_FORMAT_420; break;
case PIX_FMT_YUV422P: Flags |= PP_FORMAT_422; break;
case PIX_FMT_YUV411P: Flags |= PP_FORMAT_411; break;
case PIX_FMT_YUV444P: Flags |= PP_FORMAT_444; break;
default:
Env->ThrowError("FFmpegSource: Input format is not supported for postprocessing");
}
PPContext = pp_get_context(VI.width, VI.height, Flags);
if (avpicture_alloc(&PPPicture, APixelFormat, AWidth, AHeight) < 0)
Env->ThrowError("FFmpegSource: Failed to allocate picture");
}
void FFBase::SetOutputFormat(int ACurrentFormat, IScriptEnvironment *Env) {
int Loss;
int BestFormat = avcodec_find_best_pix_fmt((1 << PIX_FMT_YUVJ420P) | (1 << PIX_FMT_YUV420P) | (1 << PIX_FMT_YUYV422) | (1 << PIX_FMT_RGB32) | (1 << PIX_FMT_BGR24), ACurrentFormat, 1 /* Required to prevent pointless RGB32 => RGB24 conversion */, &Loss);
switch (BestFormat) {
case PIX_FMT_YUVJ420P: // stupid yv12 distinctions, also inexplicably completely undeniably incompatible with all other supported output formats
case PIX_FMT_YUV420P: VI.pixel_type = VideoInfo::CS_I420; break;
case PIX_FMT_YUYV422: VI.pixel_type = VideoInfo::CS_YUY2; break;
case PIX_FMT_RGB32: VI.pixel_type = VideoInfo::CS_BGR32; break;
case PIX_FMT_BGR24: VI.pixel_type = VideoInfo::CS_BGR24; break;
default:
Env->ThrowError("FFmpegSource: No suitable output format found");
}
if (BestFormat != ACurrentFormat) {
ConvertToFormat = BestFormat;
SWS = sws_getContext(VI.width, VI.height, ACurrentFormat, VI.width, VI.height, ConvertToFormat, GetSWSCPUFlags(Env) | SWS_BICUBIC, NULL, NULL, NULL);
}
if (BestFormat == PIX_FMT_YUVJ420P || BestFormat == PIX_FMT_YUV420P) {
VI.height -= VI.height & 1;
VI.width -= VI.width & 1;
}
if (BestFormat == PIX_FMT_YUYV422) {
VI.width -= VI.width & 1;
}
}
PVideoFrame FFBase::OutputFrame(AVFrame *AFrame, IScriptEnvironment *Env) {
AVPicture *SrcPicture = (AVPicture *)AFrame;
if (PPContext) {
pp_postprocess(const_cast<const uint8_t **>(AFrame->data), AFrame->linesize, PPPicture.data, PPPicture.linesize, VI.width, VI.height, AFrame->qscale_table, AFrame->qstride, PPMode, PPContext, AFrame->pict_type | (AFrame->qscale_type ? PP_PICT_TYPE_QP2 : 0));
SrcPicture = &PPPicture;
}
PVideoFrame Dst = Env->NewVideoFrame(VI);
if (ConvertToFormat != PIX_FMT_NONE && VI.pixel_type == VideoInfo::CS_I420) {
uint8_t *DstData[3] = {Dst->GetWritePtr(PLANAR_Y), Dst->GetWritePtr(PLANAR_U), Dst->GetWritePtr(PLANAR_V)};
int DstStride[3] = {Dst->GetPitch(PLANAR_Y), Dst->GetPitch(PLANAR_U), Dst->GetPitch(PLANAR_V)};
sws_scale(SWS, SrcPicture->data, SrcPicture->linesize, 0, VI.height, DstData, DstStride);
} else if (ConvertToFormat != PIX_FMT_NONE) {
if (VI.IsRGB()) {
uint8_t *DstData[1] = {Dst->GetWritePtr() + Dst->GetPitch() * (Dst->GetHeight() - 1)};
int DstStride[1] = {-Dst->GetPitch()};
sws_scale(SWS, SrcPicture->data, SrcPicture->linesize, 0, VI.height, DstData, DstStride);
} else {
uint8_t *DstData[1] = {Dst->GetWritePtr()};
int DstStride[1] = {Dst->GetPitch()};
sws_scale(SWS, SrcPicture->data, SrcPicture->linesize, 0, VI.height, DstData, DstStride);
}
} else if (VI.pixel_type == VideoInfo::CS_I420) {
Env->BitBlt(Dst->GetWritePtr(PLANAR_Y), Dst->GetPitch(PLANAR_Y), SrcPicture->data[0], SrcPicture->linesize[0], Dst->GetRowSize(PLANAR_Y), Dst->GetHeight(PLANAR_Y));
Env->BitBlt(Dst->GetWritePtr(PLANAR_U), Dst->GetPitch(PLANAR_U), SrcPicture->data[1], SrcPicture->linesize[1], Dst->GetRowSize(PLANAR_U), Dst->GetHeight(PLANAR_U));
Env->BitBlt(Dst->GetWritePtr(PLANAR_V), Dst->GetPitch(PLANAR_V), SrcPicture->data[2], SrcPicture->linesize[2], Dst->GetRowSize(PLANAR_V), Dst->GetHeight(PLANAR_V));
} else {
if (VI.IsRGB())
Env->BitBlt(Dst->GetWritePtr() + Dst->GetPitch() * (Dst->GetHeight() - 1), -Dst->GetPitch(), SrcPicture->data[0], SrcPicture->linesize[0], Dst->GetRowSize(), Dst->GetHeight());
else
Env->BitBlt(Dst->GetWritePtr(), Dst->GetPitch(), SrcPicture->data[0], SrcPicture->linesize[0], Dst->GetRowSize(), Dst->GetHeight());
}
return Dst;
}
void FFBase::GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment* Env) {
if (AudioCacheType == acRaw) {
_fseeki64(RawAudioCache, VI.BytesFromAudioSamples(Start), SEEK_SET);
fread(Buf, 1, static_cast<size_t>(VI.BytesFromAudioSamples(Count)), RawAudioCache);
} else {
Env->ThrowError("FFmpegSource: Audio requested but none available");
}
}
FFBase::FFBase() {
memset(&VI, 0, sizeof(VI));
AudioCacheType = acNone;
RawAudioCache = NULL;
PPContext = NULL;
PPMode = NULL;
SWS = NULL;
LastFrameNum = -1;
DecodingBuffer = new uint8_t[AVCODEC_MAX_AUDIO_FRAME_SIZE];
ConvertToFormat = PIX_FMT_NONE;
memset(&PPPicture, 0, sizeof(PPPicture));
DecodeFrame = avcodec_alloc_frame();
}
FFBase::~FFBase() {
delete [] DecodingBuffer;
if (RawAudioCache)
fclose(RawAudioCache);
if (SWS)
sws_freeContext(SWS);
if (PPMode)
pp_free_mode(PPMode);
if (PPContext)
pp_free_context(PPContext);
avpicture_free(&PPPicture);
av_free(DecodeFrame);
}

View file

@ -1,297 +0,0 @@
// Copyright (c) 2007-2008 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "ffmpegsource.h"
int FFMatroskaAudioSource::GetTrackIndex(int Index, unsigned char ATrackType, IScriptEnvironment *Env) {
if (Index == -1)
for (unsigned int i = 0; i < mkv_GetNumTracks(MF); i++)
if (mkv_GetTrackInfo(MF, i)->Type == ATrackType) {
Index = i;
break;
}
if (Index == -1)
Env->ThrowError("FFmpegSource: No %s track found", (ATrackType & TT_VIDEO) ? "video" : "audio");
if (Index <= -2)
return -2;
if (Index >= (int)mkv_GetNumTracks(MF))
Env->ThrowError("FFmpegSource: Invalid %s track number", (ATrackType & TT_VIDEO) ? "video" : "audio");
TrackInfo *TI = mkv_GetTrackInfo(MF, Index);
if (TI->Type != ATrackType)
Env->ThrowError("FFmpegSource: Selected track is not %s", (ATrackType & TT_VIDEO) ? "video" : "audio");
return Index;
}
FFMatroskaAudioSource::FFMatroskaAudioSource(const char *ASource, int AAudioTrack, const char *AAudioCache, IScriptEnvironment *Env) {
int AudioTrack;
AudioCodecContext = NULL;
AVCodec *AudioCodec = NULL;
TrackInfo *VideoTI = NULL;
BufferSize = 0;
Buffer = NULL;
AudioCS = NULL;
memset(&ST,0,sizeof(ST));
ST.base.read = (int (__cdecl *)(InputStream *,ulonglong,void *,int))StdIoRead;
ST.base.scan = (longlong (__cdecl *)(InputStream *,ulonglong,unsigned int))StdIoScan;
ST.base.getcachesize = (unsigned int (__cdecl *)(InputStream *))StdIoGetCacheSize;
ST.base.geterror = (const char *(__cdecl *)(InputStream *))StdIoGetLastError;
ST.base.memalloc = (void *(__cdecl *)(InputStream *,size_t))StdIoMalloc;
ST.base.memrealloc = (void *(__cdecl *)(InputStream *,void *,size_t))StdIoRealloc;
ST.base.memfree = (void (__cdecl *)(InputStream *,void *)) StdIoFree;
ST.base.progress = (int (__cdecl *)(InputStream *,ulonglong,ulonglong))StdIoProgress;
ST.fp = fopen(ASource, "rb");
if (ST.fp == NULL)
Env->ThrowError("FFmpegSource: Can't open '%s': %s", ASource, strerror(errno));
setvbuf(ST.fp, NULL, _IOFBF, CACHESIZE);
MF = mkv_OpenEx(&ST.base, 0, 0, ErrorMessage, sizeof(ErrorMessage));
if (MF == NULL) {
fclose(ST.fp);
Env->ThrowError("FFmpegSource: Can't parse Matroska file: %s", ErrorMessage);
}
AudioTrack = GetTrackIndex(AAudioTrack, TT_AUDIO, Env);
mkv_SetTrackMask(MF, ~(1 << AudioTrack));
TrackInfo *AudioTI = mkv_GetTrackInfo(MF, AudioTrack);
if (AudioTI->CompEnabled) {
AudioCS = cs_Create(MF, AudioTrack, ErrorMessage, sizeof(ErrorMessage));
if (AudioCS == NULL)
Env->ThrowError("FFmpegSource: Can't create decompressor: %s", ErrorMessage);
}
AudioCodecContext = avcodec_alloc_context();
AudioCodecContext->extradata = (uint8_t *)AudioTI->CodecPrivate;
AudioCodecContext->extradata_size = AudioTI->CodecPrivateSize;
AudioCodec = avcodec_find_decoder(MatroskaToFFCodecID(AudioTI));
if (AudioCodec == NULL)
Env->ThrowError("FFmpegSource: Audio codec not found");
if (avcodec_open(AudioCodecContext, AudioCodec) < 0)
Env->ThrowError("FFmpegSource: Could not open audio codec");
// Fix for ac3 and other codecs where decoding a block of audio is required to get information about it
if (AudioCodecContext->channels == 0 || AudioCodecContext->sample_rate == 0) {
uint64_t StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize;
mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags);
int Size = ReadFrame(FilePos, FrameSize, AudioCS, Env);
uint8_t *Data = Buffer;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
int Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)DecodingBuffer, &TempOutputBufSize, Data, Size);
if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error");
Size -= Ret;
Data += Ret;
}
mkv_Seek(MF, 0, MKVF_SEEK_TO_PREV_KEYFRAME);
avcodec_flush_buffers(AudioCodecContext);
}
VI.nchannels = AudioCodecContext->channels;
VI.audio_samples_per_second = AudioCodecContext->sample_rate;
switch (AudioCodecContext->sample_fmt) {
case SAMPLE_FMT_U8: VI.sample_type = SAMPLE_INT8; break;
case SAMPLE_FMT_S16: VI.sample_type = SAMPLE_INT16; break;
case SAMPLE_FMT_S24: VI.sample_type = SAMPLE_INT24; break;
case SAMPLE_FMT_S32: VI.sample_type = SAMPLE_INT32; break;
case SAMPLE_FMT_FLT: VI.sample_type = SAMPLE_FLOAT; break;
default:
Env->ThrowError("FFmpegSource: Unsupported/unknown sample format");
}
//load audio cache
bool ACacheIsValid = LoadSampleInfoFromFile(AAudioCache, ASource, AudioTrack);
// Needs to be indexed?
if (!ACacheIsValid) {
uint64_t StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize;
while (mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) {
SI.push_back(SampleInfo(VI.num_audio_samples, FilePos, FrameSize, (FrameFlags & FRAME_KF) != 0));
if (AudioCodecContext->frame_size > 0) {
VI.num_audio_samples += AudioCodecContext->frame_size;
} else {
int Size = ReadFrame(FilePos, FrameSize, AudioCS, Env);
uint8_t *Data = Buffer;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
int Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)DecodingBuffer, &TempOutputBufSize, Data, Size);
if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error");
if (Ret > 0) {
int DecodedSamples = (int)VI.AudioSamplesFromBytes(TempOutputBufSize);
Size -= Ret;
Data += Ret;
VI.num_audio_samples += DecodedSamples;
}
}
}
}
mkv_Seek(MF, 0, MKVF_SEEK_TO_PREV_KEYFRAME);
avcodec_flush_buffers(AudioCodecContext);
if (!SaveSampleInfoToFile(AAudioCache, ASource, AudioTrack))
Env->ThrowError("FFmpegSource: Failed to save audio cache index");
}
if (VI.num_audio_samples == 0)
Env->ThrowError("FFmpegSource: Audio track contains no samples");
}
FFMatroskaAudioSource::~FFMatroskaAudioSource() {
free(Buffer);
mkv_Close(MF);
fclose(ST.fp);
if (AudioCodecContext)
avcodec_close(AudioCodecContext);
av_free(AudioCodecContext);
}
int FFMatroskaAudioSource::ReadFrame(uint64_t AFilePos, unsigned int AFrameSize, CompressedStream *ACS, IScriptEnvironment *Env) {
if (ACS) {
char CSBuffer[4096];
unsigned int DecompressedFrameSize = 0;
cs_NextFrame(ACS, AFilePos, AFrameSize);
for (;;) {
int ReadBytes = cs_ReadData(ACS, CSBuffer, sizeof(CSBuffer));
if (ReadBytes < 0)
Env->ThrowError("FFmpegSource: Error decompressing data: %s", cs_GetLastError(ACS));
if (ReadBytes == 0) {
return DecompressedFrameSize;
}
if (BufferSize < DecompressedFrameSize + ReadBytes) {
BufferSize = AFrameSize;
Buffer = (uint8_t *)realloc(Buffer, BufferSize);
if (Buffer == NULL)
Env->ThrowError("FFmpegSource: Out of memory");
}
memcpy(Buffer + DecompressedFrameSize, CSBuffer, ReadBytes);
DecompressedFrameSize += ReadBytes;
}
} else {
if (_fseeki64(ST.fp, AFilePos, SEEK_SET))
Env->ThrowError("FFmpegSource: fseek(): %s", strerror(errno));
if (BufferSize < AFrameSize) {
BufferSize = AFrameSize;
Buffer = (uint8_t *)realloc(Buffer, BufferSize);
if (Buffer == NULL)
Env->ThrowError("FFmpegSource: Out of memory");
}
size_t ReadBytes = fread(Buffer, 1, AFrameSize, ST.fp);
if (ReadBytes != AFrameSize) {
if (ReadBytes == 0) {
if (feof(ST.fp))
Env->ThrowError("FFmpegSource: Unexpected EOF while reading frame");
else
Env->ThrowError("FFmpegSource: Error reading frame: %s", strerror(errno));
} else
Env->ThrowError("FFmpegSource: Short read while reading frame");
Env->ThrowError("FFmpegSource: Unknown read error");
}
return AFrameSize;
}
return 0;
}
void __stdcall FFMatroskaAudioSource::GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment *Env) {
size_t CurrentAudioBlock = FFMAX((int64_t)FindClosestAudioKeyFrame(Start) - 10, (int64_t)0);
avcodec_flush_buffers(AudioCodecContext);
memset(Buf, 0, VI.BytesFromAudioSamples(Count));
uint8_t *DstBuf = (uint8_t *)Buf;
int64_t RemainingSamples = Count;
int64_t DecodeCount;
do {
int64_t DecodeStart = SI[CurrentAudioBlock].SampleStart;
int Ret = DecodeNextAudioBlock(DecodingBuffer, &DecodeCount, SI[CurrentAudioBlock].FilePos, SI[CurrentAudioBlock].FrameSize, Env);
if (Ret < 0)
Env->ThrowError("Bleh, bad audio decoding");
CurrentAudioBlock++;
int64_t OffsetBytes = VI.BytesFromAudioSamples(FFMAX(0, Start - DecodeStart));
int64_t CopyBytes = FFMAX(0, VI.BytesFromAudioSamples(FFMIN(RemainingSamples, DecodeCount - FFMAX(0, Start - DecodeStart))));
memcpy(DstBuf, DecodingBuffer + OffsetBytes, CopyBytes);
DstBuf += CopyBytes;
RemainingSamples -= VI.AudioSamplesFromBytes(CopyBytes);
} while (RemainingSamples > 0 && CurrentAudioBlock < SI.size());
}
int FFMatroskaAudioSource::DecodeNextAudioBlock(uint8_t *ABuf, int64_t *ACount, uint64_t AFilePos, unsigned int AFrameSize, IScriptEnvironment *Env) {
int Ret = -1;
*ACount = 0;
int FrameSize = ReadFrame(AFilePos, AFrameSize, AudioCS, Env);
uint8_t *Data = Buffer;
int Size = FrameSize;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)ABuf, &TempOutputBufSize, Data, Size);
if (Ret < 0) // throw error or something?
goto Done;
if (Ret > 0) {
Size -= Ret;
Data += Ret;
ABuf += TempOutputBufSize;
*ACount += VI.AudioSamplesFromBytes(TempOutputBufSize);
}
}
Done:
return Ret;
}

View file

@ -1,422 +0,0 @@
// Copyright (c) 2007-2008 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "ffmpegsource.h"
int FFMatroskaSource::GetTrackIndex(int Index, unsigned char ATrackType, IScriptEnvironment *Env) {
if (Index == -1)
for (unsigned int i = 0; i < mkv_GetNumTracks(MF); i++)
if (mkv_GetTrackInfo(MF, i)->Type == ATrackType) {
Index = i;
break;
}
if (Index == -1)
Env->ThrowError("FFmpegSource: No %s track found", (ATrackType & TT_VIDEO) ? "video" : "audio");
if (Index <= -2)
return -2;
if (Index >= (int)mkv_GetNumTracks(MF))
Env->ThrowError("FFmpegSource: Invalid %s track number", (ATrackType & TT_VIDEO) ? "video" : "audio");
TrackInfo *TI = mkv_GetTrackInfo(MF, Index);
if (TI->Type != ATrackType)
Env->ThrowError("FFmpegSource: Selected track is not %s", (ATrackType & TT_VIDEO) ? "video" : "audio");
return Index;
}
FFMatroskaSource::FFMatroskaSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes,
bool AVCache, const char *AVideoCache, const char *AAudioCache, const char *APPString,
int AQuality, int AThreads, IScriptEnvironment* Env) {
CurrentFrame = 0;
int VideoTrack;
int AudioTrack;
unsigned int TrackMask = ~0;
AVCodecContext *AudioCodecContext = NULL;
AVCodec *AudioCodec = NULL;
VideoCodecContext = NULL;
AVCodec *VideoCodec = NULL;
TrackInfo *VideoTI = NULL;
BufferSize = 0;
Buffer = NULL;
VideoCS = NULL;
AudioCS = NULL;
memset(&ST,0,sizeof(ST));
ST.base.read = (int (__cdecl *)(InputStream *,ulonglong,void *,int))StdIoRead;
ST.base.scan = (longlong (__cdecl *)(InputStream *,ulonglong,unsigned int))StdIoScan;
ST.base.getcachesize = (unsigned int (__cdecl *)(InputStream *))StdIoGetCacheSize;
ST.base.geterror = (const char *(__cdecl *)(InputStream *))StdIoGetLastError;
ST.base.memalloc = (void *(__cdecl *)(InputStream *,size_t))StdIoMalloc;
ST.base.memrealloc = (void *(__cdecl *)(InputStream *,void *,size_t))StdIoRealloc;
ST.base.memfree = (void (__cdecl *)(InputStream *,void *)) StdIoFree;
ST.base.progress = (int (__cdecl *)(InputStream *,ulonglong,ulonglong))StdIoProgress;
ST.fp = fopen(ASource, "rb");
if (ST.fp == NULL)
Env->ThrowError("FFmpegSource: Can't open '%s': %s", ASource, strerror(errno));
setvbuf(ST.fp, NULL, _IOFBF, CACHESIZE);
MF = mkv_OpenEx(&ST.base, 0, 0, ErrorMessage, sizeof(ErrorMessage));
if (MF == NULL) {
fclose(ST.fp);
Env->ThrowError("FFmpegSource: Can't parse Matroska file: %s", ErrorMessage);
}
VideoTrack = GetTrackIndex(AVideoTrack, TT_VIDEO, Env);
AudioTrack = GetTrackIndex(AAudioTrack, TT_AUDIO, Env);
bool VCacheIsValid = true;
bool ACacheIsValid = true;
if (VideoTrack >= 0) {
VCacheIsValid = LoadFrameInfoFromFile(AVideoCache, ASource, VideoTrack);
VideoTI = mkv_GetTrackInfo(MF, VideoTrack);
if (VideoTI->CompEnabled) {
VideoCS = cs_Create(MF, VideoTrack, ErrorMessage, sizeof(ErrorMessage));
if (VideoCS == NULL)
Env->ThrowError("FFmpegSource: Can't create decompressor: %s", ErrorMessage);
}
VideoCodecContext = avcodec_alloc_context();
VideoCodecContext->extradata = (uint8_t *)VideoTI->CodecPrivate;
VideoCodecContext->extradata_size = VideoTI->CodecPrivateSize;
VideoCodecContext->thread_count = AThreads;
VideoCodec = avcodec_find_decoder(MatroskaToFFCodecID(VideoTI));
if (VideoCodec == NULL)
Env->ThrowError("FFmpegSource: Video codec not found");
if (avcodec_open(VideoCodecContext, VideoCodec) < 0)
Env->ThrowError("FFmpegSource: Could not open video codec");
// Fix for mpeg2 and other formats where decoding a frame is necessary to get information about the stream
if (VideoCodecContext->pix_fmt == PIX_FMT_NONE || VideoCodecContext->width == 0 || VideoCodecContext->height == 0) {
mkv_SetTrackMask(MF, ~(1 << VideoTrack));
int64_t Dummy;
DecodeNextFrame(DecodeFrame, &Dummy, Env);
mkv_Seek(MF, 0, MKVF_SEEK_TO_PREV_KEYFRAME);
}
VI.image_type = VideoInfo::IT_TFF;
VI.width = VideoCodecContext->width;
VI.height = VideoCodecContext->height;;
VI.fps_denominator = 1;
VI.fps_numerator = 30;
if (VI.width <= 0 || VI.height <= 0)
Env->ThrowError("FFmpegSource: Codec returned zero size video");
SetOutputFormat(VideoCodecContext->pix_fmt, Env);
InitPP(VI.width, VI.height, APPString, AQuality, VideoCodecContext->pix_fmt, Env);
if (!VCacheIsValid)
TrackMask &= ~(1 << VideoTrack);
}
if (AudioTrack >= 0) {
TrackInfo *AudioTI = mkv_GetTrackInfo(MF, AudioTrack);
if (AudioTI->CompEnabled) {
AudioCS = cs_Create(MF, AudioTrack, ErrorMessage, sizeof(ErrorMessage));
if (AudioCS == NULL)
Env->ThrowError("FFmpegSource: Can't create decompressor: %s", ErrorMessage);
}
AudioCodecContext = avcodec_alloc_context();
AudioCodecContext->extradata = (uint8_t *)AudioTI->CodecPrivate;
AudioCodecContext->extradata_size = AudioTI->CodecPrivateSize;
AudioCodec = avcodec_find_decoder(MatroskaToFFCodecID(AudioTI));
if (AudioCodec == NULL)
Env->ThrowError("FFmpegSource: Audio codec not found");
if (avcodec_open(AudioCodecContext, AudioCodec) < 0)
Env->ThrowError("FFmpegSource: Could not open audio codec");
// Fix for ac3 and other codecs where decoding a block of audio is required to get information about it
if (AudioCodecContext->channels == 0 || AudioCodecContext->sample_rate == 0) {
mkv_SetTrackMask(MF, ~(1 << AudioTrack));
uint64_t StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize;
mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags);
int Size = ReadFrame(FilePos, FrameSize, AudioCS, Env);
uint8_t *Data = Buffer;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
int Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)DecodingBuffer, &TempOutputBufSize, Data, Size);
if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error");
Size -= Ret;
Data += Ret;
}
mkv_Seek(MF, 0, MKVF_SEEK_TO_PREV_KEYFRAME);
}
VI.nchannels = AudioCodecContext->channels;
VI.audio_samples_per_second = AudioCodecContext->sample_rate;
switch (AudioCodecContext->sample_fmt) {
case SAMPLE_FMT_U8: VI.sample_type = SAMPLE_INT8; break;
case SAMPLE_FMT_S16: VI.sample_type = SAMPLE_INT16; break;
case SAMPLE_FMT_S24: VI.sample_type = SAMPLE_INT24; break;
case SAMPLE_FMT_S32: VI.sample_type = SAMPLE_INT32; break;
case SAMPLE_FMT_FLT: VI.sample_type = SAMPLE_FLOAT; break;
default:
Env->ThrowError("FFmpegSource: Unsupported/unknown sample format");
}
ACacheIsValid = OpenAudioCache(AAudioCache, ASource, AudioTrack, Env);
if (!ACacheIsValid)
TrackMask &= ~(1 << AudioTrack);
}
mkv_SetTrackMask(MF, TrackMask);
// Needs to be indexed?
if (!ACacheIsValid || !VCacheIsValid) {
FILE *RawCache = NULL;
if (!ACacheIsValid)
AudioCacheType = acRaw;
switch (AudioCacheType) {
case acRaw: RawCache = NewRawCacheWriter(AAudioCache, ASource, AudioTrack, Env); break;
}
uint64_t StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize;
while (mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0)
if (Track == VideoTrack && !VCacheIsValid) {
Frames.push_back(FrameInfo(StartTime, (FrameFlags & FRAME_KF) != 0));
VI.num_frames++;
} else if (Track == AudioTrack && !ACacheIsValid) {
int Size = ReadFrame(FilePos, FrameSize, AudioCS, Env);
uint8_t *Data = Buffer;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
int Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)DecodingBuffer, &TempOutputBufSize, Data, Size);
if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error");
int DecodedSamples = (int)VI.AudioSamplesFromBytes(TempOutputBufSize);
Size -= Ret;
Data += Ret;
VI.num_audio_samples += DecodedSamples;
if (AudioCacheType == acRaw) {
fwrite(DecodingBuffer, 1, TempOutputBufSize, RawCache);
}
}
}
if (!ACacheIsValid) {
switch (AudioCacheType) {
case acRaw: CloseRawCacheWriter(RawCache); break;
}
ACacheIsValid = OpenAudioCache(AAudioCache, ASource, AudioTrack, Env);
if (!ACacheIsValid)
Env->ThrowError("FFmpegSource: Failed to open newly created audio cache for reading");
}
if (VideoTrack >= 0 && VI.num_frames == 0)
Env->ThrowError("FFmpegSource: Video track contains no frames");
if (AudioTrack >= 0 && VI.num_audio_samples == 0)
Env->ThrowError("FFmpegSource: Audio track contains no samples");
if (VideoTrack >= 0)
mkv_Seek(MF, Frames.front().DTS, MKVF_SEEK_TO_PREV_KEYFRAME);
if (AVCache && !VCacheIsValid)
if (!SaveFrameInfoToFile(AVideoCache, ASource, VideoTrack))
Env->ThrowError("FFmpegSource: Failed to write video cache info");
}
if (AudioTrack >= 0) {
avcodec_close(AudioCodecContext);
av_free(AudioCodecContext);
}
if (VideoTrack >= 0) {
mkv_SetTrackMask(MF, ~(1 << VideoTrack));
// Calculate the average framerate
if (Frames.size() >= 2) {
double DTSDiff = (double)(Frames.back().DTS - Frames.front().DTS);
VI.fps_denominator = (unsigned int)(DTSDiff * mkv_TruncFloat(VideoTI->TimecodeScale) / (double)1000 / (double)(VI.num_frames - 1) + 0.5);
VI.fps_numerator = 1000000;
}
if (!SaveTimecodesToFile(ATimecodes, mkv_TruncFloat(VideoTI->TimecodeScale), 1000000))
Env->ThrowError("FFmpegSource: Failed to write timecodes");
// Set AR variables
int ffsar_num = VideoTI->AV.Video.DisplayWidth * VideoTI->AV.Video.PixelHeight;
int ffsar_den = VideoTI->AV.Video.DisplayHeight * VideoTI->AV.Video.PixelWidth;
Env->SetVar("FFSAR_NUM", ffsar_num);
Env->SetVar("FFSAR_DEN", ffsar_den);
Env->SetVar("FFSAR", ffsar_num / (double)ffsar_den);
// Set crop variables
Env->SetVar("FFCROP_LEFT", (int)VideoTI->AV.Video.CropL);
Env->SetVar("FFCROP_RIGHT", (int)VideoTI->AV.Video.CropR);
Env->SetVar("FFCROP_TOP", (int)VideoTI->AV.Video.CropT);
Env->SetVar("FFCROP_BOTTOM", (int)VideoTI->AV.Video.CropB);
}
}
FFMatroskaSource::~FFMatroskaSource() {
free(Buffer);
mkv_Close(MF);
fclose(ST.fp);
if (VideoCodecContext)
avcodec_close(VideoCodecContext);
av_free(VideoCodecContext);
}
int FFMatroskaSource::ReadFrame(uint64_t AFilePos, unsigned int AFrameSize, CompressedStream *ACS, IScriptEnvironment *Env) {
if (ACS) {
char CSBuffer[4096];
unsigned int DecompressedFrameSize = 0;
cs_NextFrame(ACS, AFilePos, AFrameSize);
for (;;) {
int ReadBytes = cs_ReadData(ACS, CSBuffer, sizeof(CSBuffer));
if (ReadBytes < 0)
Env->ThrowError("FFmpegSource: Error decompressing data: %s", cs_GetLastError(ACS));
if (ReadBytes == 0) {
return DecompressedFrameSize;
}
if (BufferSize < DecompressedFrameSize + ReadBytes) {
BufferSize = AFrameSize;
Buffer = (uint8_t *)realloc(Buffer, BufferSize);
if (Buffer == NULL)
Env->ThrowError("FFmpegSource: Out of memory");
}
memcpy(Buffer + DecompressedFrameSize, CSBuffer, ReadBytes);
DecompressedFrameSize += ReadBytes;
}
} else {
if (_fseeki64(ST.fp, AFilePos, SEEK_SET))
Env->ThrowError("FFmpegSource: fseek(): %s", strerror(errno));
if (BufferSize < AFrameSize) {
BufferSize = AFrameSize;
Buffer = (uint8_t *)realloc(Buffer, BufferSize);
if (Buffer == NULL)
Env->ThrowError("FFmpegSource: Out of memory");
}
size_t ReadBytes = fread(Buffer, 1, AFrameSize, ST.fp);
if (ReadBytes != AFrameSize) {
if (ReadBytes == 0) {
if (feof(ST.fp))
Env->ThrowError("FFmpegSource: Unexpected EOF while reading frame");
else
Env->ThrowError("FFmpegSource: Error reading frame: %s", strerror(errno));
} else
Env->ThrowError("FFmpegSource: Short read while reading frame");
Env->ThrowError("FFmpegSource: Unknown read error");
}
return AFrameSize;
}
return 0;
}
int FFMatroskaSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, IScriptEnvironment* Env) {
int FrameFinished = 0;
int Ret = -1;
*AFirstStartTime = -1;
uint64_t StartTime, EndTime, FilePos;
unsigned int Track, FrameFlags, FrameSize;
while (mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) {
if (*AFirstStartTime < 0)
*AFirstStartTime = StartTime;
FrameSize = ReadFrame(FilePos, FrameSize, VideoCS, Env);
Ret = avcodec_decode_video(VideoCodecContext, AFrame, &FrameFinished, Buffer, FrameSize);
if (FrameFinished)
goto Done;
}
// Flush the last frames
if (VideoCodecContext->has_b_frames)
Ret = avcodec_decode_video(VideoCodecContext, AFrame, &FrameFinished, NULL, 0);
if (!FrameFinished)
goto Error;
Error:
Done:
return Ret;
}
PVideoFrame FFMatroskaSource::GetFrame(int n, IScriptEnvironment* Env) {
if (LastFrameNum == n)
return LastFrame;
bool HasSeeked = false;
if (n < CurrentFrame || FindClosestKeyFrame(n) > CurrentFrame) {
mkv_Seek(MF, Frames[n].DTS, MKVF_SEEK_TO_PREV_KEYFRAME);
avcodec_flush_buffers(VideoCodecContext);
HasSeeked = true;
}
do {
int64_t StartTime;
int Ret = DecodeNextFrame(DecodeFrame, &StartTime, Env);
if (HasSeeked) {
HasSeeked = false;
if (StartTime < 0 || (CurrentFrame = FrameFromDTS(StartTime)) < 0)
Env->ThrowError("FFmpegSource: Frame accurate seeking is not possible in this file");
}
CurrentFrame++;
} while (CurrentFrame <= n);
LastFrame = OutputFrame(DecodeFrame, Env);
LastFrameNum = n;
return LastFrame;
}

View file

@ -1,222 +0,0 @@
// Copyright (c) 2007-2008 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "ffmpegsource.h"
int FFmpegAudioSource::GetTrackIndex(int Index, CodecType ATrackType, IScriptEnvironment *Env) {
if (Index == -1)
for (unsigned int i = 0; i < FormatContext->nb_streams; i++)
if (FormatContext->streams[i]->codec->codec_type == ATrackType) {
Index = i;
break;
}
if (Index == -1)
Env->ThrowError("FFmpegSource: No %s track found", (ATrackType == CODEC_TYPE_VIDEO) ? "video" : "audio");
if (Index <= -2)
return -2;
if (Index >= (int)FormatContext->nb_streams)
Env->ThrowError("FFmpegSource: Invalid %s track number", (ATrackType == CODEC_TYPE_VIDEO) ? "video" : "audio");
if (FormatContext->streams[Index]->codec->codec_type != ATrackType)
Env->ThrowError("FFmpegSource: Selected track is not %s", (ATrackType == CODEC_TYPE_VIDEO) ? "video" : "audio");
return Index;
}
bool FFmpegAudioSource::LoadSampleInfoFromFile(const char *AAudioCacheFile, const char *AAudioCacheFile2, const char *ASource, int AAudioTrack) {
if (!FFAudioBase::LoadSampleInfoFromFile(AAudioCacheFile, ASource, AAudioTrack))
return false;
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffasd%dcache", ASource, AAudioTrack);
if (!strcmp(AAudioCacheFile2, ""))
AAudioCacheFile2 = DefaultCacheFilename;
RawCache = fopen(AAudioCacheFile2, "rb");
if (!RawCache)
return false;
return true;
}
FFmpegAudioSource::FFmpegAudioSource(const char *ASource, int AAudioTrack, const char *AAudioCache, const char *AAudioCache2, IScriptEnvironment *Env) {
BufferSize = 0;
Buffer = NULL;
RawCache = NULL;
FormatContext = NULL;
AudioCodecContext = NULL;
AVCodec *AudioCodec = NULL;
if (av_open_input_file(&FormatContext, ASource, NULL, 0, NULL) != 0)
Env->ThrowError("FFmpegSource: Couldn't open '%s'", ASource);
if (av_find_stream_info(FormatContext) < 0)
Env->ThrowError("FFmpegSource: Couldn't find stream information");
AudioTrack = GetTrackIndex(AAudioTrack, CODEC_TYPE_AUDIO, Env);
AudioCodecContext = FormatContext->streams[AudioTrack]->codec;
AudioCodec = avcodec_find_decoder(AudioCodecContext->codec_id);
if (AudioCodec == NULL)
Env->ThrowError("FFmpegSource: Audio codec not found");
if (avcodec_open(AudioCodecContext, AudioCodec) < 0)
Env->ThrowError("FFmpegSource: Could not open audio codec");
VI.nchannels = AudioCodecContext->channels;
VI.audio_samples_per_second = AudioCodecContext->sample_rate;
switch (AudioCodecContext->sample_fmt) {
case SAMPLE_FMT_U8: VI.sample_type = SAMPLE_INT8; break;
case SAMPLE_FMT_S16: VI.sample_type = SAMPLE_INT16; break;
case SAMPLE_FMT_S24: VI.sample_type = SAMPLE_INT24; break;
case SAMPLE_FMT_S32: VI.sample_type = SAMPLE_INT32; break;
case SAMPLE_FMT_FLT: VI.sample_type = SAMPLE_FLOAT; break;
default:
Env->ThrowError("FFmpegSource: Unsupported/unknown sample format");
}
//load cache
bool ACacheIsValid = LoadSampleInfoFromFile(AAudioCache, AAudioCache2, ASource, AudioTrack);
char DefaultCacheFilename[1024];
sprintf(DefaultCacheFilename, "%s.ffasd%dcache", ASource, AudioTrack);
if (!strcmp(AAudioCache2, ""))
AAudioCache2 = DefaultCacheFilename;
if (!RawCache)
RawCache = fopen(AAudioCache2, "wb+");
// Needs to be indexed?
if (!ACacheIsValid) {
AVPacket Packet;
while (av_read_frame(FormatContext, &Packet) >= 0) {
if (Packet.stream_index == AudioTrack) {
SI.push_back(SampleInfo(VI.num_audio_samples, _ftelli64(RawCache), Packet.size, (Packet.flags & PKT_FLAG_KEY) ? 1 : 0));
fwrite(Packet.data, 1, Packet.size, RawCache);
if (AudioCodecContext->frame_size > 0) {
VI.num_audio_samples += AudioCodecContext->frame_size;
} else {
int Size = Packet.size;
uint8_t *Data = Packet.data;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
int Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)DecodingBuffer, &TempOutputBufSize, Data, Size);
if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error");
if (Ret > 0) {
int DecodedSamples = (int)VI.AudioSamplesFromBytes(TempOutputBufSize);
Size -= Ret;
Data += Ret;
VI.num_audio_samples += DecodedSamples;
}
}
}
}
av_free_packet(&Packet);
}
av_seek_frame(FormatContext, AudioTrack, 0, AVSEEK_FLAG_BACKWARD);
avcodec_flush_buffers(AudioCodecContext);
if (!SaveSampleInfoToFile(AAudioCache, ASource, AudioTrack))
Env->ThrowError("FFmpegSource: Failed to save audio cache index");
}
if (VI.num_audio_samples == 0)
Env->ThrowError("FFmpegSource: Audio track contains no samples");
}
int FFmpegAudioSource::DecodeNextAudioBlock(uint8_t *ABuf, int64_t *ACount, uint64_t AFilePos, unsigned int AFrameSize, IScriptEnvironment *Env) {
int Ret = -1;
*ACount = 0;
_fseeki64(RawCache, AFilePos, SEEK_SET);
if (AFrameSize > BufferSize) {
Buffer = (uint8_t *)realloc(Buffer, AFrameSize);
BufferSize = AFrameSize;
}
fread(Buffer, 1, AFrameSize, RawCache);
uint8_t *Data = Buffer;
int Size = AFrameSize;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)ABuf, &TempOutputBufSize, Data, Size);
if (Ret < 0) // throw error or something?
goto Done;
if (Ret > 0) {
Size -= Ret;
Data += Ret;
ABuf += TempOutputBufSize;
*ACount += VI.AudioSamplesFromBytes(TempOutputBufSize);
}
}
Done:
return Ret;
}
void FFmpegAudioSource::GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment *Env) {
size_t CurrentAudioBlock = FFMAX((int64_t)FindClosestAudioKeyFrame(Start) - 10, (int64_t)0);
avcodec_flush_buffers(AudioCodecContext);
memset(Buf, 0, VI.BytesFromAudioSamples(Count));
uint8_t *DstBuf = (uint8_t *)Buf;
int64_t RemainingSamples = Count;
int64_t DecodeCount;
do {
int64_t DecodeStart = SI[CurrentAudioBlock].SampleStart;
int Ret = DecodeNextAudioBlock(DecodingBuffer, &DecodeCount, SI[CurrentAudioBlock].FilePos, SI[CurrentAudioBlock].FrameSize, Env);
if (Ret < 0)
Env->ThrowError("Bleh, bad audio decoding");
CurrentAudioBlock++;
int64_t OffsetBytes = VI.BytesFromAudioSamples(FFMAX(0, Start - DecodeStart));
int64_t CopyBytes = FFMAX(0, VI.BytesFromAudioSamples(FFMIN(RemainingSamples, DecodeCount - FFMAX(0, Start - DecodeStart))));
memcpy(DstBuf, DecodingBuffer + OffsetBytes, CopyBytes);
DstBuf += CopyBytes;
RemainingSamples -= VI.AudioSamplesFromBytes(CopyBytes);
} while (RemainingSamples > 0 && DecodeCount > 0);
}
FFmpegAudioSource::~FFmpegAudioSource() {
if (RawCache)
fclose(RawCache);
if (AudioCodecContext)
avcodec_close(AudioCodecContext);
av_close_input_file(FormatContext);
}

View file

@ -1,330 +0,0 @@
// Copyright (c) 2007-2008 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "ffmpegsource.h"
int FFmpegSource::GetTrackIndex(int Index, CodecType ATrackType, IScriptEnvironment *Env) {
if (Index == -1)
for (unsigned int i = 0; i < FormatContext->nb_streams; i++)
if (FormatContext->streams[i]->codec->codec_type == ATrackType) {
Index = i;
break;
}
if (Index == -1)
Env->ThrowError("FFmpegSource: No %s track found", (ATrackType == CODEC_TYPE_VIDEO) ? "video" : "audio");
if (Index <= -2)
return -2;
if (Index >= (int)FormatContext->nb_streams)
Env->ThrowError("FFmpegSource: Invalid %s track number", (ATrackType == CODEC_TYPE_VIDEO) ? "video" : "audio");
if (FormatContext->streams[Index]->codec->codec_type != ATrackType)
Env->ThrowError("FFmpegSource: Selected track is not %s", (ATrackType == CODEC_TYPE_VIDEO) ? "video" : "audio");
return Index;
}
FFmpegSource::FFmpegSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes,
bool AVCache, const char *AVideoCache, const char *AAudioCache, const char *APPString,
int AQuality, int AThreads, int ASeekMode, IScriptEnvironment *Env) {
CurrentFrame = 0;
SeekMode = ASeekMode;
AVCodecContext *AudioCodecContext = NULL;
AVCodec *AudioCodec;
AVCodec *VideoCodec;
FormatContext = NULL;
VideoCodecContext = NULL;
VideoCodec = NULL;
if (av_open_input_file(&FormatContext, ASource, NULL, 0, NULL) != 0)
Env->ThrowError("FFmpegSource: Couldn't open '%s'", ASource);
if (av_find_stream_info(FormatContext) < 0)
Env->ThrowError("FFmpegSource: Couldn't find stream information");
VideoTrack = GetTrackIndex(AVideoTrack, CODEC_TYPE_VIDEO, Env);
int AudioTrack = GetTrackIndex(AAudioTrack, CODEC_TYPE_AUDIO, Env);
bool VCacheIsValid = true;
bool ACacheIsValid = true;
if (VideoTrack >= 0) {
if (SeekMode >= 0 && av_seek_frame(FormatContext, VideoTrack, 0, AVSEEK_FLAG_BACKWARD) < 0)
Env->ThrowError("FFmpegSource: Video track is unseekable");
VCacheIsValid = LoadFrameInfoFromFile(AVideoCache, ASource, VideoTrack);
VideoCodecContext = FormatContext->streams[VideoTrack]->codec;
VideoCodecContext->thread_count = AThreads;
VideoCodec = avcodec_find_decoder(VideoCodecContext->codec_id);
if (VideoCodec == NULL)
Env->ThrowError("FFmpegSource: Video codec not found");
if (avcodec_open(VideoCodecContext, VideoCodec) < 0)
Env->ThrowError("FFmpegSource: Could not open video codec");
// Fix for mpeg2 and other formats where decoding a frame is necessary to get information about the stream
if (SeekMode >= 0 && (VideoCodecContext->pix_fmt == PIX_FMT_NONE || VideoCodecContext->width == 0 || VideoCodecContext->height == 0)) {
int64_t Dummy;
DecodeNextFrame(DecodeFrame, &Dummy);
av_seek_frame(FormatContext, VideoTrack, 0, AVSEEK_FLAG_BACKWARD);
}
VI.image_type = VideoInfo::IT_TFF;
VI.width = VideoCodecContext->width;
VI.height = VideoCodecContext->height;
VI.fps_denominator = FormatContext->streams[VideoTrack]->time_base.num;
VI.fps_numerator = FormatContext->streams[VideoTrack]->time_base.den;
if (VI.width <= 0 || VI.height <= 0)
Env->ThrowError("FFmpegSource: Codec returned zero size video");
// sanity check framerate
if (VI.fps_denominator > VI.fps_numerator || VI.fps_denominator <= 0 || VI.fps_numerator <= 0) {
VI.fps_denominator = 1;
VI.fps_numerator = 30;
}
SetOutputFormat(VideoCodecContext->pix_fmt, Env);
InitPP(VI.width, VI.height, APPString, AQuality, VideoCodecContext->pix_fmt, Env);
}
if (AudioTrack >= 0) {
AudioCodecContext = FormatContext->streams[AudioTrack]->codec;
AudioCodec = avcodec_find_decoder(AudioCodecContext->codec_id);
if (AudioCodec == NULL)
Env->ThrowError("FFmpegSource: Audio codec not found");
if (avcodec_open(AudioCodecContext, AudioCodec) < 0)
Env->ThrowError("FFmpegSource: Could not open audio codec");
switch (AudioCodecContext->sample_fmt) {
case SAMPLE_FMT_U8: VI.sample_type = SAMPLE_INT8; break;
case SAMPLE_FMT_S16: VI.sample_type = SAMPLE_INT16; break;
case SAMPLE_FMT_S24: VI.sample_type = SAMPLE_INT24; break;
case SAMPLE_FMT_S32: VI.sample_type = SAMPLE_INT32; break;
case SAMPLE_FMT_FLT: VI.sample_type = SAMPLE_FLOAT; break;
default:
Env->ThrowError("FFmpegSource: Unsupported/unknown sample format");
}
VI.nchannels = AudioCodecContext->channels;
VI.audio_samples_per_second = AudioCodecContext->sample_rate;
ACacheIsValid = OpenAudioCache(AAudioCache, ASource, AudioTrack, Env);
}
if ((!ACacheIsValid || !VCacheIsValid) && SeekMode == -1)
Env->ThrowError("FFmpegSource: Unusual indexing error, report on doom9");
// Needs to be indexed?
if (!ACacheIsValid || !VCacheIsValid) {
FILE *RawCache = NULL;
if (!ACacheIsValid)
AudioCacheType = acRaw;
switch (AudioCacheType) {
case acRaw: RawCache = NewRawCacheWriter(AAudioCache, ASource, AudioTrack, Env); break;
}
AVPacket Packet;
while (av_read_frame(FormatContext, &Packet) >= 0) {
if (Packet.stream_index == VideoTrack && !VCacheIsValid) {
Frames.push_back(FrameInfo(Packet.dts, (Packet.flags & PKT_FLAG_KEY) ? 1 : 0));
VI.num_frames++;
} else if (Packet.stream_index == AudioTrack && !ACacheIsValid) {
int Size = Packet.size;
uint8_t *Data = Packet.data;
while (Size > 0) {
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
int Ret = avcodec_decode_audio2(AudioCodecContext, (int16_t *)DecodingBuffer, &TempOutputBufSize, Data, Size);
if (Ret < 0)
Env->ThrowError("FFmpegSource: Audio decoding error");
int DecodedSamples = (int)VI.AudioSamplesFromBytes(TempOutputBufSize);
Size -= Ret;
Data += Ret;
VI.num_audio_samples += DecodedSamples;
if (AudioCacheType == acRaw) {
fwrite(DecodingBuffer, 1, TempOutputBufSize, RawCache);
}
}
}
av_free_packet(&Packet);
}
if (!ACacheIsValid) {
switch (AudioCacheType) {
case acRaw: CloseRawCacheWriter(RawCache); break;
}
ACacheIsValid = OpenAudioCache(AAudioCache, ASource, AudioTrack, Env);
if (!ACacheIsValid)
Env->ThrowError("FFmpegSource: Failed to open newly created audio cache for reading");
}
if (VideoTrack >= 0 && VI.num_frames == 0)
Env->ThrowError("FFmpegSource: Video track contains no frames");
if (AudioTrack >= 0 && VI.num_audio_samples == 0)
Env->ThrowError("FFmpegSource: Audio track contains no samples");
if (VideoTrack >= 0)
av_seek_frame(FormatContext, VideoTrack, Frames.front().DTS, AVSEEK_FLAG_BACKWARD);
if (AVCache && !VCacheIsValid)
if (!SaveFrameInfoToFile(AVideoCache, ASource, VideoTrack))
Env->ThrowError("FFmpegSource: Failed to write video cache info");
}
if (AudioTrack >= 0)
avcodec_close(AudioCodecContext);
if (VideoTrack >= 0) {
if (!SaveTimecodesToFile(ATimecodes, FormatContext->streams[VideoTrack]->time_base.num * 1000, FormatContext->streams[VideoTrack]->time_base.den))
Env->ThrowError("FFmpegSource: Failed to write timecodes");
// Adjust framerate to match the duration of the first frame
if (Frames.size() >= 2) {
unsigned int DTSDiff = (unsigned int)FFMAX(Frames[1].DTS - Frames[0].DTS, 1);
VI.fps_denominator *= DTSDiff;
}
// Set AR variables
Env->SetVar("FFSAR_NUM", VideoCodecContext->sample_aspect_ratio.num);
Env->SetVar("FFSAR_DEN", VideoCodecContext->sample_aspect_ratio.den);
Env->SetVar("FFSAR", av_q2d(VideoCodecContext->sample_aspect_ratio));
// Set crop variables
Env->SetVar("FFCROP_LEFT", (int)0);
Env->SetVar("FFCROP_RIGHT", (int)0);
Env->SetVar("FFCROP_TOP", (int)0);
Env->SetVar("FFCROP_BOTTOM", (int)0);
}
}
FFmpegSource::~FFmpegSource() {
if (VideoTrack >= 0)
avcodec_close(VideoCodecContext);
av_close_input_file(FormatContext);
}
int FFmpegSource::DecodeNextFrame(AVFrame *AFrame, int64_t *AStartTime) {
AVPacket Packet;
int FrameFinished = 0;
int Ret = -1;
*AStartTime = -1;
while (av_read_frame(FormatContext, &Packet) >= 0) {
if (Packet.stream_index == VideoTrack) {
if (*AStartTime < 0)
*AStartTime = Packet.dts;
Ret = avcodec_decode_video(VideoCodecContext, AFrame, &FrameFinished, Packet.data, Packet.size);
}
av_free_packet(&Packet);
if (FrameFinished)
goto Done;
}
// Flush the last frames
if (VideoCodecContext->has_b_frames)
Ret = avcodec_decode_video(VideoCodecContext, AFrame, &FrameFinished, NULL, 0);
if (!FrameFinished)
goto Error;
// Ignore errors for now
Error:
Done:
return Ret;
}
PVideoFrame FFmpegSource::GetFrame(int n, IScriptEnvironment* Env) {
if (LastFrameNum == n)
return LastFrame;
bool HasSeeked = false;
if (SeekMode >= 0) {
int ClosestKF = FindClosestKeyFrame(n);
if (SeekMode == 0) {
if (n < CurrentFrame) {
av_seek_frame(FormatContext, VideoTrack, 0, AVSEEK_FLAG_BACKWARD);
avcodec_flush_buffers(VideoCodecContext);
CurrentFrame = 0;
}
} else {
// 10 frames is used as a margin to prevent excessive seeking since the predicted best keyframe isn't always selected by avformat
if (n < CurrentFrame || ClosestKF > CurrentFrame + 10 || (SeekMode == 3 && n > CurrentFrame + 10)) {
av_seek_frame(FormatContext, VideoTrack, (SeekMode == 3) ? Frames[n].DTS : Frames[ClosestKF].DTS, AVSEEK_FLAG_BACKWARD);
avcodec_flush_buffers(VideoCodecContext);
HasSeeked = true;
}
}
} else if (n < CurrentFrame) {
Env->ThrowError("FFmpegSource: Non-linear access attempted");
}
do {
int64_t StartTime;
DecodeNextFrame(DecodeFrame, &StartTime);
if (HasSeeked) {
HasSeeked = false;
// Is the seek destination time known? Does it belong to a frame?
if (StartTime < 0 || (CurrentFrame = FrameFromDTS(StartTime)) < 0) {
switch (SeekMode) {
case 1:
Env->ThrowError("FFmpegSource: Frame accurate seeking is not possible in this file");
case 2:
case 3:
CurrentFrame = ClosestFrameFromDTS(StartTime);
break;
default:
Env->ThrowError("FFmpegSource: Failed assertion");
}
}
}
CurrentFrame++;
} while (CurrentFrame <= n);
LastFrame = OutputFrame(DecodeFrame, Env);
LastFrameNum = n;
return LastFrame;
}

View file

@ -1,246 +0,0 @@
// Copyright (c) 2007-2008 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#ifndef FFMPEGSOURCE_H
#define FFMPEGSOURCE_H
#include <windows.h>
#include <stdio.h>
#include <vector>
#include <set>
#include <stdlib.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
#include <io.h>
extern "C" {
#include <libavformat/avformat.h>
#include <libavcodec/avcodec.h>
#include <libswscale/swscale.h>
#include <libpostproc/postprocess.h>
#include "stdiostream.h"
}
#include "MatroskaParser.h"
#include "avisynth.h"
#define strcmpi _strcmpi
enum AudioCacheFormat {acNone, acRaw};
struct FrameInfo {
int64_t DTS;
bool KeyFrame;
FrameInfo(int64_t ADTS, bool AKeyFrame) : DTS(ADTS), KeyFrame(AKeyFrame) {};
};
typedef std::vector<FrameInfo> FrameInfoVector;
struct SampleInfo {
int64_t SampleStart;
int64_t FilePos;
unsigned int FrameSize;
bool KeyFrame;
SampleInfo(int64_t ASampleStart, int64_t AFilePos, unsigned int AFrameSize, bool AKeyFrame) {
SampleStart = ASampleStart;
FilePos = AFilePos;
FrameSize = AFrameSize;
KeyFrame = AKeyFrame;
}
};
typedef std::vector<SampleInfo> SampleInfoVector;
int GetPPCPUFlags(IScriptEnvironment *Env);
int GetSWSCPUFlags(IScriptEnvironment *Env);
int CSNameToPIXFMT(const char * ACSName, int ADefault);
int ResizerNameToSWSResizer(const char *AResizerName);
int GetNumberOfLogicalCPUs();
CodecID MatroskaToFFCodecID(TrackInfo *TI);
class FFPP : public GenericVideoFilter {
private:
pp_context_t *PPContext;
pp_mode_t *PPMode;
SwsContext *SWSTo422P;
SwsContext *SWSFrom422P;
AVPicture InputPicture;
AVPicture OutputPicture;
public:
FFPP(PClip AChild, const char *APPString, int AQuality, IScriptEnvironment *Env);
~FFPP();
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* Env);
};
class SWScale : public GenericVideoFilter {
private:
SwsContext *Context;
int OrigWidth;
int OrigHeight;
bool FlipOutput;
public:
SWScale(PClip AChild, int AResizeToWidth, int AResizeToHeight, const char *AResizer, const char *AConvertToFormat, IScriptEnvironment *Env);
~SWScale();
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment *Env);
};
class FFBase : public IClip{
private:
pp_context_t *PPContext;
pp_mode_t *PPMode;
SwsContext *SWS;
int ConvertToFormat;
AVPicture PPPicture;
protected:
VideoInfo VI;
AVFrame *DecodeFrame;
AudioCacheFormat AudioCacheType;
FILE *RawAudioCache;
PVideoFrame LastFrame;
int LastFrameNum;
uint8_t *DecodingBuffer;
FrameInfoVector Frames;
int FindClosestKeyFrame(int AFrame);
int FrameFromDTS(int64_t ADTS);
int ClosestFrameFromDTS(int64_t ADTS);
bool LoadFrameInfoFromFile(const char *AVideoCacheFile, const char *ASource, int AVideoTrack);
bool SaveFrameInfoToFile(const char *AVideoCacheFile, const char *ASource, int AVideoTrack);
bool SaveTimecodesToFile(const char *ATimecodeFile, int64_t ScaleD, int64_t ScaleN);
bool OpenAudioCache(const char *AAudioCacheFile, const char *ASource, int AAudioTrack, IScriptEnvironment *Env);
FILE *FFBase::NewRawCacheWriter(const char *AAudioCacheFile, const char *ASource, int AAudioTrack, IScriptEnvironment *Env);
void FFBase::CloseRawCacheWriter(FILE *ARawCache);
void InitPP(int AWidth, int AHeight, const char *APPString, int AQuality, int APixelFormat, IScriptEnvironment *Env);
void SetOutputFormat(int ACurrentFormat, IScriptEnvironment *Env);
PVideoFrame OutputFrame(AVFrame *AFrame, IScriptEnvironment *Env);
public:
FFBase();
~FFBase();
bool __stdcall GetParity(int n) { return false; }
void __stdcall SetCacheHints(int cachehints, int frame_range) { }
const VideoInfo& __stdcall GetVideoInfo() { return VI; }
void __stdcall GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment *Env);
};
class FFmpegSource : public FFBase {
private:
AVFormatContext *FormatContext;
AVCodecContext *VideoCodecContext;
int VideoTrack;
int CurrentFrame;
int SeekMode;
int GetTrackIndex(int Index, CodecType ATrackType, IScriptEnvironment *Env);
int DecodeNextFrame(AVFrame *Frame, int64_t *DTS);
public:
FFmpegSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes, bool AVCache, const char *AVideoCache, const char *AAudioCache, const char *APPString, int AQuality, int AThreads, int ASeekMode, IScriptEnvironment *Env);
~FFmpegSource();
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment *Env);
};
class FFMatroskaSource : public FFBase {
private:
StdIoStream ST;
unsigned int BufferSize;
CompressedStream *VideoCS;
CompressedStream *AudioCS;
AVCodecContext *VideoCodecContext;
MatroskaFile *MF;
char ErrorMessage[256];
uint8_t *Buffer;
int CurrentFrame;
int ReadFrame(uint64_t AFilePos, unsigned int AFrameSize, CompressedStream *ACS, IScriptEnvironment *Env);
int DecodeNextFrame(AVFrame *AFrame, int64_t *AFirstStartTime, IScriptEnvironment* Env);
int GetTrackIndex(int Index, unsigned char ATrackType, IScriptEnvironment *Env);
public:
FFMatroskaSource(const char *ASource, int AVideoTrack, int AAudioTrack, const char *ATimecodes, bool AVCache, const char *AVideoCache, const char *AAudioCache, const char *APPString, int AQuality, int AThreads, IScriptEnvironment *Env);
~FFMatroskaSource();
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment *Env);
};
class FFAudioBase : public IClip{
protected:
VideoInfo VI;
uint8_t *DecodingBuffer;
SampleInfoVector SI;
size_t FindClosestAudioKeyFrame(int64_t Sample);
bool LoadSampleInfoFromFile(const char *AAudioCacheFile, const char *ASource, int AAudioTrack);
bool SaveSampleInfoToFile(const char *AAudioCacheFile, const char *ASource, int AAudioTrack);
public:
FFAudioBase();
~FFAudioBase();
bool __stdcall GetParity(int n) { return false; }
void __stdcall SetCacheHints(int cachehints, int frame_range) { }
const VideoInfo& __stdcall GetVideoInfo() { return VI; }
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment *Env) { return NULL; }
};
class FFmpegAudioSource : public FFAudioBase {
private:
AVFormatContext *FormatContext;
AVCodecContext *AudioCodecContext;
int AudioTrack;
FILE *RawCache;
unsigned int BufferSize;
uint8_t *Buffer;
bool LoadSampleInfoFromFile(const char *AAudioCacheFile, const char *AAudioCacheFile2, const char *ASource, int AAudioTrack);
int DecodeNextAudioBlock(uint8_t *ABuf, int64_t *ACount, uint64_t AFilePos, unsigned int AFrameSize, IScriptEnvironment *Env);
int GetTrackIndex(int Index, CodecType ATrackType, IScriptEnvironment *Env);
public:
FFmpegAudioSource(const char *ASource, int AAudioTrack, const char *AAudioCache, const char *AAudioCacheFile2, IScriptEnvironment *Env);
~FFmpegAudioSource();
void __stdcall GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment *Env);
};
class FFMatroskaAudioSource : public FFAudioBase {
private:
StdIoStream ST;
CompressedStream *AudioCS;
AVCodecContext *AudioCodecContext;
MatroskaFile *MF;
char ErrorMessage[256];
unsigned int BufferSize;
uint8_t *Buffer;
int ReadFrame(uint64_t AFilePos, unsigned int AFrameSize, CompressedStream *ACS, IScriptEnvironment *Env);
int DecodeNextAudioBlock(uint8_t *ABuf, int64_t *ACount, uint64_t AFilePos, unsigned int AFrameSize, IScriptEnvironment *Env);
int GetTrackIndex(int Index, unsigned char ATrackType, IScriptEnvironment *Env);
public:
FFMatroskaAudioSource(const char *ASource, int AAudioTrack, const char *AAudioCache, IScriptEnvironment *Env);
~FFMatroskaAudioSource();
void __stdcall GetAudio(void* Buf, __int64 Start, __int64 Count, IScriptEnvironment *Env);
};
#endif

View file

@ -1,337 +0,0 @@
<html>
<head>
<title>
FFmpegSource Documentation
</title>
</head>
<body>
<h1>FFmpegSource Documentation</h1>
<p>
Opens files using ffmpeg and nothing else. May be frame accurate on good days. The source is BSD licensed and can be obtained from https://spaceboyz.net/svn/aegisub/trunk/FFmpegSource. The precompiled binary is GPL licensed.
</p>
<h2>Compatibility - Video</h2>
<ul>
<li>AVI, MKV, MP4, FLV: Frame accurate</li>
<li>WMV: Frame accurate(?) but avformat seems to pick keyframes relatively far away</li>
<li>OGM: Messed up first frame and seeking produces smearing with seekmode=3, incredibly slow seeking without, remux to mkv or avi</li>
<li>VOB: No rff flags applied, frame accurate?</li>
<li>MPG: Seeking seems to be off by one or two frames now and then</li>
<li>M2TS, TS: Linear access only (seekmode=-1)</li>
<li>Image files: most formats can be opened if seekmode=-1 is set</li>
</ul>
<h2>Compatibility - Audio</h2>
<ul>
<li>Should be sample accurate in all containers with audio cache</li>
<li>Can produce very bad distortions when caching is not used for certain formats like aac</li>
</ul>
<h2>Usage</h2>
<p>
<b>FFmpegSource(string source, int vtrack = -1, int atrack = -2, string timecodes, bool vcache = true, string vcachefile, string acachefile, string pp, int ppquality = 6, int threads = -1, int seekmode = 1)</b><br />
</p>
<p>
Note that the audio cache will always be created when opening files with audio and that it will be huge since it stores all audio as raw signed 16/24/32 bit pcm, unsigned 8 bit pcm or as float, using flac to compress it is also possible to reduce the size by half.
</p>
<p>
<b>FFAudioSource(string source, int atrack = -1, string acachefile, string acachefile2)</b><br />
</p>
<p>
Experimental, may or may not be accurate enough for real usage.
</p>
<p>
<b>FFPP(clip, string pp, int ppquality = 6)</b><br />
Separate postprocessing which also seems to include a few simple deinterlacers
</p>
<p>
<b>SWScale(clip, width = -1, height = -1, resizer = "BICUBIC", colorspace = "")</b><br />
Separate postprocessing which also seems to include a few simple deinterlacers
</p>
<p>
<b>source:</b>
Source file.
</p>
<p>
<b>atrack &amp; vtrack:</b>
Track number as seen by the relevant demuxer, starts from 0, -1 means it will pick the first suitable track and -2 means it's disabled.
</p>
<p>
<b>timecodes:</b>
File to output timecodes to, if the file exists it will be overwritten.
</p>
<p>
<b>vcache:</b>
Write video indexing information to a file for later use. This setting does not control if The video index is loaded which it always is if it exists.
</p>
<p>
<b>vcachefile, acachefile &amp; acachefile2:</b>
Specifies the file to store the index information or raw audio in, if nothing is specified (source).ffv(tracknumber)cache is used for video and (source).ffa(d if FFAudioSource is used)(tracknumber)cache for audio. The second audio cache file is only required/created when not opening matroska files.
</p>
<p>
<b>pp:</b>
See the table below for a full description, an empty string means no processing. It is recommended to avoid the autoq option since it's currently unknown what effect it will have on the processing.
</p>
<p>
<b>ppquality:</b>
The quality to use for the specified postprocessing. Valid values are 0-6 where 0 usually means that no actual processing is done.
</p>
<p>
<b>threads:</b>
Sets the number of decoder threads used. Defaults to the number of cpus reported by windows. Ignored by lavc if the used decoder doesn't implement it.
</p>
<p>
<b>seekmode:</b>
Force how seeking is handled, has no effect on matroska files which always use the equivalent of seekmode=1<br />
<b>-1:</b> linear access without rewind, will throw an error if each successive requested frame number isn't bigger than the last one, only intended for opening images but might work on well with some obscure video format<br />
<b>0:</b> linear access, the definition of slow but should make some formats "usable"<br />
<b>1:</b> safe normal, bases seeking decisions on the reported keyframe positions<br />
<b>2:</b> unsafe normal, same as 1 but no error will be thrown if the exact destination has to be guessed<br />
<b>3:</b> aggressive, seek in the forward direction even if no closer keyframe is known to exist, only useful for testing and containers where avformat doesn't report keyframes properly
</p>
<p>
<b>width &amp; height:</b>
Width and height to resize to. Value below or equal to 0 is the same as specifying the input dimensions.
</p>
<p>
<b>resizer:</b>
Selects the resizer used for resampling the chroma planes and normal resizing. The available methods are: FAST_BILINEAR, BILINEAR, BICUBIC, X, POINT, AREA, BICUBLIN, GAUSS, SINC, LANCZOS and SPLINE.
</p>
<p>
<b>colorspace:</b>
The colorspace to convert to. The names are YV12, YUY2, RGB24, RGB32 and the empty string for same as input.
</p>
<h2>PP string format</h2>
<pre>
Available postprocessing filters:
Filters Options
short long name short long option Description
* * a autoq CPU power dependent enabler
c chrom chrominance filtering enabled
y nochrom chrominance filtering disabled
n noluma luma filtering disabled
hb hdeblock (2 threshold) horizontal deblocking filter
1. difference factor: default=32, higher -> more deblocking
2. flatness threshold: default=39, lower -> more deblocking
the h & v deblocking filters share these
so you can't set different thresholds for h / v
vb vdeblock (2 threshold) vertical deblocking filter
ha hadeblock (2 threshold) horizontal deblocking filter
va vadeblock (2 threshold) vertical deblocking filter
h1 x1hdeblock experimental h deblock filter 1
v1 x1vdeblock experimental v deblock filter 1
dr dering deringing filter
al autolevels automatic brightness / contrast
f fullyrange stretch luminance to (0..255)
lb linblenddeint linear blend deinterlacer
li linipoldeint linear interpolating deinterlace
ci cubicipoldeint cubic interpolating deinterlacer
md mediandeint median deinterlacer
fd ffmpegdeint ffmpeg deinterlacer
l5 lowpass5 FIR lowpass deinterlacer
de default hb:a,vb:a,dr:a
fa fast h1:a,v1:a,dr:a
ac ha:a:128:7,va:a,dr:a
tn tmpnoise (3 threshold) temporal noise reducer
1. <= 2. <= 3. larger -> stronger filtering
fq forceQuant <quantizer> force quantizer
Usage:
<filterName>[:<option>[:<option>...]][[,|/][-]<filterName>[:<option>...]]...
long form example:
vdeblock:autoq/hdeblock:autoq/linblenddeint default,-vdeblock
short form example:
vb:a/hb:a/lb de,-vb
more examples:
tn:64:128:256
</pre>
<h2>Compiling</h2>
<p><b>zlib</b> from http://www.zlib.net/</p>
<p><b>FFmpeg svn</b> from http://ffmpeg.mplayerhq.hu/</p>
<p><b>Required FFmpeg Configuration:</b>
./configure --enable-memalign-hack --enable-gpl --enable-swscale --enable-postproc
<p><b>Suggested Additional Options:</b>
--enable-w32threads --disable-encoders --disable-muxers --enable-small --enable-libfaad --disable-debug</p>
<p>
Note that --enable-w32threads is required for multithreaded decoding to work.
</p>
<h2>Changes</h2>
<ul>
<li>1.21<ul>
<li>Updated FFmpeg to rev 14845 (No more avi opening issues)</li>
</ul></li>
<li>1.20<ul>
<li>Updated FFmpeg to rev 14461</li>
</ul></li>
<li>1.19<ul>
<li>Now automatically detects the number of cpus and uses it as the default for the number of decoding threads</li>
<li>Added SWScale filter which can perform colorspace conversions and resizing and has many different resizers to choose from</li>
<li>Now exports the stored cropping values in mkv files in the variables FFCROP_LEFT, FFCROP_RIGHT, FFCROP_TOP and FFCROP_BOTTOM</li>
<li>Updated FFmpeg to rev 13572</li>
</ul></li>
<li>1.18<ul>
<li>Reverted error handling because it broke vc1</li>
</ul></li>
<li>1.17<ul>
<li>Now sets the video SAR (if any) in the variables FFSAR, FFSAR_NUM and FFSAR_DEN when being invoked</li>
<li>Changed error handling slightly in video decoding (most errors are still ignored)</li>
<li>Fixed a bug where the last frame(s) wouldn't be returned properly in h264 with b-frames+pyramid</li>
<li>Updated FFmpeg to rev 12685</li>
</ul></li>
<li>1.16<ul>
<li>Added many new and missing matroska codec ids</li>
<li>Added threads argument to set the number of decoding threads used</li>
<li>Completely removed FLAC cache</li>
<li>Updated FFmpeg to rev 12382</li>
</ul></li>
<li>1.15<ul>
<li>Updated FFmpeg to rev 11518</li>
</ul></li>
<li>1.14<ul>
<li>If the output colorspace is YV12 or YUY2 the width and height may be automatically cropped by one pixel to make it an even number</li>
<li>FLAC cache is disabled because the static FLAC lib doesn't want to link</li>
<li>Added the experimental FFAudioSource which doesn't need a huge uncompressed cache</li>
<li>The plugin is now statically compiled</li>
<li>Updated FFmpeg to rev 11413</li>
</ul></li>
<li>1.13<ul>
<li>Now always sorts the output timecodes so native avc in mkv won't have out of order values</li>
<li>Fixed the missing '# timecode format v2' line in saved timecode files</li>
<li>Now properly handles video files where the output resolution isn't known until a frame has been decoded (seems to fix flv4)</li>
<li>Now throws an error if the video decoder returns zero size video</li>
<li>Added an avsi file for easy autoloading</li>
<li>Updated libFLAC to 1.2.1<li>
<li>Updated FFmpeg to rev 10671 + camtasia swapped colors fix</li>
</ul></li>
<li>1.12<ul>
<li>Now caches the last fully decoded frame to increase the reliability of seekmode=-1 and possibly reduce seeking in other modes</li>
<li>Video that needs to be converted to a suitable output format should now always have correct colors (was reversed in 1.11 and inconsistent in earlier versions)</li>
<li>Added seekmode=-1 which is mostly useful for opening image files very carefully</li>
<li>Now throws an error if the container is unseekable and seekmode=-1 isn't set</li>
<li>Updated FFmpeg to rev 10492 + camtasia swapped colors fix</li>
</ul></li>
<li>1.11<ul>
<li>Now officially uses the MIT license</li>
<li>Much cleaner source</li>
<li>Can be compiled without support for compressing the audio cache with FLAC</li>
<li>Supports more audio formats in matroska</li>
<li>RGB24 output no longer has swapped colors if the video is converted to it for output (there still seems to be some bugs lurking when conversion is done with libswscale)</li>
<li>Fixed an access violation on close when no audio is opened (introduced in 1.10)</li>
<li>Updated FFmpeg to rev 10423</li>
</ul></li>
<li>1.10<ul>
<li>The audio cache compression level is now ignored if the source isn't 16bit and the raw format is used instead</li>
<li>FLAC is now actually initialized properly so the cache actually works for files that aren't stereo (16bit limit still applies)</li>
<li>Now uses proper callbacks for FLAC so it works with larger than 2GB files</li>
<li>Doesn't (over)write the video cache with an empty one in certain cases when avformat is used for the source</li>
</ul></li>
<li>1.9<ul>
<li>Added the possibility to compress the audio cache with FLAC (currently only works with 16bit audio)</li>
<li>Added another planar YUV 4:2:0 format to the supported output formats (fixes certain mov files)</li>
<li>Less memory is now allocated on the stack which makes av_find_stream_info() work for all files (fixes certain mov files)</li>
<li>Updated FFmpeg to rev 10186</li>
</ul></li>
<li>1.8<ul>
<li>Updated FFmpeg to rev 10141</li>
</ul></li>
<li>1.7<ul>
<li>Updated FFmpeg</li>
<li>Fixed error with mkv for codecs without codec private data and the first packet doesn't belong to them</li>
</ul></li>
<li>1.6<ul>
<li>Fixed ac3 and other formats stored in mkv</li>
<li>Skip unnecessary seeking when index information already exists (gif file opening only 3/4 broken now)</li>
<li>Throws an error when the selected audio/video track has no frames/samples</li>
</ul></li>
<li>1.5<ul>
<li>Fixed a bug that made avformat opened files only return audio if only the audio cache needed to be created</li>
<li>Rejects more corrupt cache files</li>
<li>Fixed crash when a 0 byte audio cache file is present</li>
<li>Improved framerate guessing for avformat which now takes takes the duration of the first frame into account</li>
<li>Fixed a bug introduced in 1.4 that would make the number of reported frames too high for files opened with avformat</li>
<li>Fixed mpeg2 and probably some other formats stored in mkv</li>
<li>Fixed issues with large mkv files and large audio cache files</li>
<li>FFmpeg is now compiled with liba52 and faad2</li>
</ul></li>
<li>1.4<ul>
<li>Uses the average framerate for mkv files</li>
<li>Naming scheme of cache files changed to prevent confusion with the default names in files with multiple tracks of the same type</li>
<li>Use mmx optimizations in swscaler when possible</li>
<li>Now uses normal windows linebreaks in all files</li>
<li>Removed FFAudioSource</li>
<li>Merged FFVideoSource and FFAudioRefSource into FFmpegSource</li>
<li>Added postprocessing with libpostproc in FFmpegSource and separately in FFPP</li>
</ul></li>
<li>1.3<ul>
<li>Compiled against ffmpeg rev9620</li>
<li>Added FFAudioRefSource</li>
<li>Added FFAudioSource (has big issues)</li>
<li>Renamed FFmpegSource to FFVideoSource</li>
<li>Adjusted seeking in the forward direction to only be done if the requested frame is more than 10 frames away to reduce unnecessary seeking</li>
<li>Now outputs the last frame properly when there are decoding delays</li>
</ul></li>
<li>1.2<ul>
<li>Compiled against ffmpeg rev9451</li>
<li>Somewhat cleaner source code</li>
<li>Linear access in addition to a few other modes of seeking can now be forced</li>
<li>Can now save the index information to a file which makes subsequent file opening fast</li>
<li>No longer skips indexing for any format</li>
</ul></li>
<li>1.1<ul>
<li>Skip indexing for avi</li>
<li>Prefix all error messages with the plugin name</li>
<li>Can write v2 timecodes to a file</li>
<li>Fixed reported framerate</li>
</ul></li>
</ul>
</body>
</html>

View file

@ -1,97 +0,0 @@
// Copyright (c) 2007-2008 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "ffmpegsource.h"
FFPP::FFPP(PClip AChild, const char *APPString, int AQuality, IScriptEnvironment *Env) : GenericVideoFilter(AChild) {
if (!strcmp(APPString, ""))
Env->ThrowError("FFPP: PP argument is empty");
if (AQuality < 0 || AQuality > PP_QUALITY_MAX)
Env->ThrowError("FFPP: Quality is out of range");
PPContext = NULL;
PPMode = NULL;
SWSTo422P = NULL;
SWSFrom422P = NULL;
memset(&InputPicture, 0, sizeof(InputPicture));
memset(&OutputPicture, 0, sizeof(OutputPicture));
PPMode = pp_get_mode_by_name_and_quality((char *)APPString, AQuality);
if (!PPMode)
Env->ThrowError("FFPP: Invalid postprocesing settings");
int Flags = GetPPCPUFlags(Env);
if (vi.IsYV12()) {
Flags |= PP_FORMAT_420;
} else if (vi.IsYUY2()) {
Flags |= PP_FORMAT_422;
SWSTo422P = sws_getContext(vi.width, vi.height, PIX_FMT_YUV422, vi.width, vi.height, PIX_FMT_YUV422P, GetSWSCPUFlags(Env) | SWS_BICUBIC, NULL, NULL, NULL);
SWSFrom422P = sws_getContext(vi.width, vi.height, PIX_FMT_YUV422P, vi.width, vi.height, PIX_FMT_YUV422, GetSWSCPUFlags(Env) | SWS_BICUBIC, NULL, NULL, NULL);
avpicture_alloc(&InputPicture, PIX_FMT_YUV422P, vi.width, vi.height);
avpicture_alloc(&OutputPicture, PIX_FMT_YUV422P, vi.width, vi.height);
} else {
Env->ThrowError("FFPP: Only YV12 and YUY2 video supported");
}
PPContext = pp_get_context(vi.width, vi.height, Flags);
if (!PPContext)
Env->ThrowError("FFPP: Failed to create context");
}
FFPP::~FFPP() {
if (PPMode)
pp_free_mode(PPMode);
if (PPContext)
pp_free_context(PPContext);
if (SWSTo422P)
sws_freeContext(SWSTo422P);
if (SWSFrom422P)
sws_freeContext(SWSFrom422P);
avpicture_free(&InputPicture);
avpicture_free(&OutputPicture);
}
PVideoFrame FFPP::GetFrame(int n, IScriptEnvironment* Env) {
PVideoFrame Src = child->GetFrame(n, Env);
PVideoFrame Dst = Env->NewVideoFrame(vi);
if (vi.IsYV12()) {
const uint8_t *SrcData[3] = {(uint8_t *)Src->GetReadPtr(PLANAR_Y), (uint8_t *)Src->GetReadPtr(PLANAR_U), (uint8_t *)Src->GetReadPtr(PLANAR_V)};
int SrcStride[3] = {Src->GetPitch(PLANAR_Y), Src->GetPitch(PLANAR_U), Src->GetPitch(PLANAR_V)};
uint8_t *DstData[3] = {Dst->GetWritePtr(PLANAR_Y), Dst->GetWritePtr(PLANAR_U), Dst->GetWritePtr(PLANAR_V)};
int DstStride[3] = {Dst->GetPitch(PLANAR_Y), Dst->GetPitch(PLANAR_U), Dst->GetPitch(PLANAR_V)};
pp_postprocess(SrcData, SrcStride, DstData, DstStride, vi.width, vi.height, NULL, 0, PPMode, PPContext, 0);
} else if (vi.IsYUY2()) {
uint8_t *SrcData[1] = {(uint8_t *)Src->GetReadPtr()};
int SrcStride[1] = {Src->GetPitch()};
sws_scale(SWSTo422P, SrcData, SrcStride, 0, vi.height, InputPicture.data, InputPicture.linesize);
pp_postprocess(const_cast<const uint8_t **>(InputPicture.data), InputPicture.linesize, OutputPicture.data, OutputPicture.linesize, vi.width, vi.height, NULL, 0, PPMode, PPContext, 0);
uint8_t *DstData[1] = {Dst->GetWritePtr()};
int DstStride[1] = {Dst->GetPitch()};
sws_scale(SWSFrom422P, OutputPicture.data, OutputPicture.linesize, 0, vi.height, DstData, DstStride);
}
return Dst;
}

View file

@ -1,191 +0,0 @@
// Copyright (c) 2007-2008 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "ffmpegsource.h"
int GetPPCPUFlags(IScriptEnvironment *Env) {
int Flags = 0;
long CPUFlags = Env->GetCPUFlags();
if (CPUFlags & CPUF_MMX)
CPUFlags |= PP_CPU_CAPS_MMX;
if (CPUFlags & CPUF_INTEGER_SSE)
CPUFlags |= PP_CPU_CAPS_MMX2;
if (CPUFlags & CPUF_3DNOW)
CPUFlags |= PP_CPU_CAPS_3DNOW;
return Flags;
}
int GetSWSCPUFlags(IScriptEnvironment *Env) {
int Flags = 0;
long CPUFlags = Env->GetCPUFlags();
if (CPUFlags & CPUF_MMX)
CPUFlags |= SWS_CPU_CAPS_MMX;
if (CPUFlags & CPUF_INTEGER_SSE)
CPUFlags |= SWS_CPU_CAPS_MMX2;
if (CPUFlags & CPUF_3DNOW)
CPUFlags |= SWS_CPU_CAPS_3DNOW;
return Flags;
}
int CSNameToPIXFMT(const char * ACSName, int ADefault) {
if (!strcmpi(ACSName, ""))
return ADefault;
if (!strcmpi(ACSName, "YV12"))
return PIX_FMT_YUV420P;
if (!strcmpi(ACSName, "YUY2"))
return PIX_FMT_YUYV422;
if (!strcmpi(ACSName, "RGB24"))
return PIX_FMT_BGR24;
if (!strcmpi(ACSName, "RGB32"))
return PIX_FMT_RGB32;
return PIX_FMT_NONE;
}
int ResizerNameToSWSResizer(const char *AResizerName) {
if (!strcmpi(AResizerName, "FAST_BILINEAR"))
return SWS_FAST_BILINEAR;
if (!strcmpi(AResizerName, "BILINEAR"))
return SWS_BILINEAR;
if (!strcmpi(AResizerName, "BICUBIC"))
return SWS_BICUBIC;
if (!strcmpi(AResizerName, "X"))
return SWS_X;
if (!strcmpi(AResizerName, "POINT"))
return SWS_POINT;
if (!strcmpi(AResizerName, "AREA"))
return SWS_AREA;
if (!strcmpi(AResizerName, "BICUBLIN"))
return SWS_BICUBLIN;
if (!strcmpi(AResizerName, "GAUSS"))
return SWS_GAUSS;
if (!strcmpi(AResizerName, "SINC"))
return SWS_SINC;
if (!strcmpi(AResizerName, "LANCZOS"))
return SWS_LANCZOS;
if (!strcmpi(AResizerName, "SPLINE"))
return SWS_SPLINE;
return 0;
}
int GetNumberOfLogicalCPUs() {
SYSTEM_INFO SI;
GetSystemInfo(&SI);
return SI.dwNumberOfProcessors;
}
AVSValue __cdecl CreateFFmpegSource(AVSValue Args, void* UserData, IScriptEnvironment* Env) {
if (!UserData) {
av_register_all();
UserData = (void *)-1;
}
if (!Args[0].Defined())
Env->ThrowError("FFmpegSource: No source specified");
const char *Source = Args[0].AsString();
int VTrack = Args[1].AsInt(-1);
int ATrack = Args[2].AsInt(-2);
const char *Timecodes = Args[3].AsString("");
bool VCache = Args[4].AsBool(true);
const char *VCacheFile = Args[5].AsString("");
const char *ACacheFile = Args[6].AsString("");
const char *PPString = Args[7].AsString("");
int PPQuality = Args[8].AsInt(PP_QUALITY_MAX);
int Threads = Args[9].AsInt(-1);
int SeekMode = Args[10].AsInt(1);
if (VTrack <= -2 && ATrack <= -2)
Env->ThrowError("FFmpegSource: No tracks selected");
if (SeekMode < -1 || SeekMode > 3)
Env->ThrowError("FFmpegSource: Invalid seekmode selected");
if (Threads <= 0)
Threads = GetNumberOfLogicalCPUs();
if (Threads < 1)
Env->ThrowError("FFmpegSource: Invalid thread count");
AVFormatContext *FormatContext;
if (av_open_input_file(&FormatContext, Source, NULL, 0, NULL) != 0)
Env->ThrowError("FFmpegSource: Couldn't open %s", Args[0].AsString());
bool IsMatroska = !strcmp(FormatContext->iformat->name, "matroska");
av_close_input_file(FormatContext);
if (IsMatroska) {
return new FFMatroskaSource(Source, VTrack, ATrack, Timecodes, VCache, VCacheFile, ACacheFile, PPString, PPQuality, Threads, Env);
} else {
// Do a separate indexing pass, enjoy the constructor sideeffects
if (SeekMode == -1)
delete new FFmpegSource(Source, VTrack, ATrack, Timecodes, VCache, VCacheFile, ACacheFile, PPString, PPQuality, Threads, -2, Env);
return new FFmpegSource(Source, VTrack, ATrack, Timecodes, VCache, VCacheFile, ACacheFile, PPString, PPQuality, Threads, SeekMode, Env);
}
}
AVSValue __cdecl CreateFFAudioSource(AVSValue Args, void* UserData, IScriptEnvironment* Env) {
if (!UserData) {
av_register_all();
UserData = (void *)1;
}
if (!Args[0].Defined())
Env->ThrowError("FFmpegSource: No source specified");
const char *Source = Args[0].AsString();
int ATrack = Args[1].AsInt(-1);
const char *ACacheFile = Args[2].AsString("");
const char *ADemuxedFile = Args[3].AsString("");
if (ATrack <= -2)
Env->ThrowError("FFmpegSource: No tracks selected");
AVFormatContext *FormatContext;
if (av_open_input_file(&FormatContext, Source, NULL, 0, NULL) != 0)
Env->ThrowError("FFmpegSource: Couldn't open %s", Args[0].AsString());
bool IsMatroska = !strcmp(FormatContext->iformat->name, "matroska");
av_close_input_file(FormatContext);
if (IsMatroska) {
return new FFMatroskaAudioSource(Source, ATrack, ACacheFile, Env);
} else {
return new FFmpegAudioSource(Source, ATrack, ACacheFile, ADemuxedFile, Env);
}
}
AVSValue __cdecl CreateFFPP(AVSValue Args, void* UserData, IScriptEnvironment* Env) {
return new FFPP(Args[0].AsClip(), Args[1].AsString(""), Args[2].AsInt(PP_QUALITY_MAX), Env);
}
AVSValue __cdecl CreateSWScale(AVSValue Args, void* UserData, IScriptEnvironment* Env) {
return new SWScale(Args[0].AsClip(), Args[1].AsInt(0), Args[2].AsInt(0), Args[3].AsString("BICUBIC"), Args[4].AsString(""), Env);
}
extern "C" __declspec(dllexport) const char* __stdcall AvisynthPluginInit2(IScriptEnvironment* Env) {
Env->AddFunction("FFmpegSource", "[source]s[vtrack]i[atrack]i[timecodes]s[vcache]b[vcachefile]s[acachefile]s[pp]s[ppquality]i[threads]i[seekmode]i", CreateFFmpegSource, 0);
Env->AddFunction("FFAudioSource", "[source]s[atrack]i[acachefile]s[acachefile2]s", CreateFFAudioSource, 0);
Env->AddFunction("FFPP", "c[pp]s[ppquality]i", CreateFFPP, 0);
Env->AddFunction("SWScale", "c[width]i[height]i[resizer]s[colorspace]s", CreateSWScale, 0);
return "FFmpegSource";
};

View file

@ -1,99 +0,0 @@
// Copyright (c) 2008 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "ffmpegsource.h"
SWScale::SWScale(PClip AChild, int AResizeToWidth, int AResizeToHeight, const char *AResizer, const char *AConvertToFormat, IScriptEnvironment *Env) : GenericVideoFilter(AChild) {
Context = NULL;
OrigWidth = vi.width;
OrigHeight = vi.height;
FlipOutput = vi.IsYUV();
int ConvertFromFormat = PIX_FMT_NONE;
if (vi.IsYV12())
ConvertFromFormat = PIX_FMT_YUV420P;
if (vi.IsYUY2())
ConvertFromFormat = PIX_FMT_YUYV422;
if (vi.IsRGB24())
ConvertFromFormat = PIX_FMT_BGR24;
if (vi.IsRGB32())
ConvertFromFormat = PIX_FMT_RGB32;
if (AResizeToHeight <= 0)
AResizeToHeight = OrigHeight;
else
vi.height = AResizeToHeight;
if (AResizeToWidth <= 0)
AResizeToWidth = OrigWidth;
else
vi.width = AResizeToWidth;
int ConvertToFormat = CSNameToPIXFMT(AConvertToFormat, ConvertFromFormat);
if (ConvertToFormat == PIX_FMT_NONE)
Env->ThrowError("SWScale: Invalid colorspace specified (%s)", AConvertToFormat);
switch (ConvertToFormat) {
case PIX_FMT_YUV420P: vi.pixel_type = VideoInfo::CS_I420; break;
case PIX_FMT_YUYV422: vi.pixel_type = VideoInfo::CS_YUY2; break;
case PIX_FMT_BGR24: vi.pixel_type = VideoInfo::CS_BGR24; break;
case PIX_FMT_RGB32: vi.pixel_type = VideoInfo::CS_BGR32; break;
}
FlipOutput ^= vi.IsYUV();
int Resizer = ResizerNameToSWSResizer(AResizer);
if (Resizer == 0)
Env->ThrowError("SWScale: Invalid resizer specified (%s)", AResizer);
if (ConvertToFormat == PIX_FMT_YUV420P && vi.height & 1)
Env->ThrowError("SWScale: mod 2 output height required", AResizer);
if ((ConvertToFormat == PIX_FMT_YUV420P || ConvertToFormat == PIX_FMT_YUYV422) && vi.width & 1)
Env->ThrowError("SWScale: mod 2 output width required", AResizer);
// may one day need a SWS_CS_DEFAULT in flags
Context = sws_getContext(OrigWidth, OrigHeight, ConvertFromFormat, vi.width, vi.height, ConvertToFormat, GetSWSCPUFlags(Env) | Resizer, NULL, NULL, NULL);
}
SWScale::~SWScale() {
if (Context)
sws_freeContext(Context);
}
PVideoFrame SWScale::GetFrame(int n, IScriptEnvironment *Env) {
PVideoFrame Src = child->GetFrame(n, Env);
PVideoFrame Dst = Env->NewVideoFrame(vi);
uint8_t *SrcData[3] = {(uint8_t *)Src->GetReadPtr(PLANAR_Y), (uint8_t *)Src->GetReadPtr(PLANAR_U), (uint8_t *)Src->GetReadPtr(PLANAR_V)};
int SrcStride[3] = {Src->GetPitch(PLANAR_Y), Src->GetPitch(PLANAR_U), Src->GetPitch(PLANAR_V)};
if (FlipOutput) {
uint8_t *DstData[3] = {Dst->GetWritePtr(PLANAR_Y) + Dst->GetPitch(PLANAR_Y) * (Dst->GetHeight(PLANAR_Y) - 1), Dst->GetWritePtr(PLANAR_U) + Dst->GetPitch(PLANAR_U) * (Dst->GetHeight(PLANAR_U) - 1), Dst->GetWritePtr(PLANAR_V) + Dst->GetPitch(PLANAR_V) * (Dst->GetHeight(PLANAR_V) - 1)};
int DstStride[3] = {-Dst->GetPitch(PLANAR_Y), -Dst->GetPitch(PLANAR_U), -Dst->GetPitch(PLANAR_V)};
sws_scale(Context, SrcData, SrcStride, 0, OrigHeight, DstData, DstStride);
} else {
uint8_t *DstData[3] = {Dst->GetWritePtr(PLANAR_Y), Dst->GetWritePtr(PLANAR_U), Dst->GetWritePtr(PLANAR_V)};
int DstStride[3] = {Dst->GetPitch(PLANAR_Y), Dst->GetPitch(PLANAR_U), Dst->GetPitch(PLANAR_V)};
sws_scale(Context, SrcData, SrcStride, 0, OrigHeight, DstData, DstStride);
}
return Dst;
}

View file

@ -1,307 +0,0 @@
// Copyright (c) 2007-2008 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "ffmpegsource.h"
CodecID MatroskaToFFCodecID(TrackInfo *TI) {
char *Codec = TI->CodecID;
/* Video Codecs */
if (!strcmp(Codec, "V_MS/VFW/FOURCC")) {
// fourcc list from ffdshow
switch (((BITMAPINFOHEADER *)TI->CodecPrivate)->biCompression) {
case MAKEFOURCC('F', 'F', 'D', 'S'):
case MAKEFOURCC('F', 'V', 'F', 'W'):
case MAKEFOURCC('X', 'V', 'I', 'D'):
case MAKEFOURCC('D', 'I', 'V', 'X'):
case MAKEFOURCC('D', 'X', '5', '0'):
case MAKEFOURCC('M', 'P', '4', 'V'):
case MAKEFOURCC('3', 'I', 'V', 'X'):
case MAKEFOURCC('W', 'V', '1', 'F'):
case MAKEFOURCC('F', 'M', 'P', '4'):
case MAKEFOURCC('S', 'M', 'P', '4'):
return CODEC_ID_MPEG4;
case MAKEFOURCC('D', 'I', 'V', '3'):
case MAKEFOURCC('D', 'V', 'X', '3'):
case MAKEFOURCC('M', 'P', '4', '3'):
return CODEC_ID_MSMPEG4V3;
case MAKEFOURCC('M', 'P', '4', '2'):
return CODEC_ID_MSMPEG4V2;
case MAKEFOURCC('M', 'P', '4', '1'):
return CODEC_ID_MSMPEG4V1;
case MAKEFOURCC('W', 'M', 'V', '1'):
return CODEC_ID_WMV1;
case MAKEFOURCC('W', 'M', 'V', '2'):
return CODEC_ID_WMV2;
case MAKEFOURCC('W', 'M', 'V', '3'):
return CODEC_ID_WMV3;
/*
case MAKEFOURCC('M', 'S', 'S', '1'):
case MAKEFOURCC('M', 'S', 'S', '2'):
case MAKEFOURCC('W', 'V', 'P', '2'):
case MAKEFOURCC('W', 'M', 'V', 'P'):
return CODEC_ID_WMV9_LIB;
*/
case MAKEFOURCC('W', 'V', 'C', '1'):
return CODEC_ID_VC1;
case MAKEFOURCC('V', 'P', '5', '0'):
return CODEC_ID_VP5;
case MAKEFOURCC('V', 'P', '6', '0'):
case MAKEFOURCC('V', 'P', '6', '1'):
case MAKEFOURCC('V', 'P', '6', '2'):
return CODEC_ID_VP6;
case MAKEFOURCC('V', 'P', '6', 'F'):
case MAKEFOURCC('F', 'L', 'V', '4'):
return CODEC_ID_VP6F;
case MAKEFOURCC('C', 'A', 'V', 'S'):
return CODEC_ID_CAVS;
case MAKEFOURCC('M', 'P', 'G', '1'):
case MAKEFOURCC('M', 'P', 'E', 'G'):
return CODEC_ID_MPEG2VIDEO; // not a typo
case MAKEFOURCC('M', 'P', 'G', '2'):
case MAKEFOURCC('E', 'M', '2', 'V'):
case MAKEFOURCC('M', 'M', 'E', 'S'):
return CODEC_ID_MPEG2VIDEO;
case MAKEFOURCC('H', '2', '6', '3'):
case MAKEFOURCC('S', '2', '6', '3'):
case MAKEFOURCC('L', '2', '6', '3'):
case MAKEFOURCC('M', '2', '6', '3'):
case MAKEFOURCC('U', '2', '6', '3'):
case MAKEFOURCC('X', '2', '6', '3'):
return CODEC_ID_H263;
case MAKEFOURCC('H', '2', '6', '4'):
case MAKEFOURCC('X', '2', '6', '4'):
case MAKEFOURCC('V', 'S', 'S', 'H'):
case MAKEFOURCC('D', 'A', 'V', 'C'):
case MAKEFOURCC('P', 'A', 'V', 'C'):
case MAKEFOURCC('A', 'V', 'C', '1'):
return CODEC_ID_H264;
case MAKEFOURCC('M', 'J', 'P', 'G'):
case MAKEFOURCC('L', 'J', 'P', 'G'):
case MAKEFOURCC('M', 'J', 'L', 'S'):
case MAKEFOURCC('J', 'P', 'E', 'G'): // questionable fourcc?
case MAKEFOURCC('A', 'V', 'R', 'N'):
case MAKEFOURCC('M', 'J', 'P', 'A'):
return CODEC_ID_MJPEG;
case MAKEFOURCC('D', 'V', 'S', 'D'):
case MAKEFOURCC('D', 'V', '2', '5'):
case MAKEFOURCC('D', 'V', '5', '0'):
case MAKEFOURCC('C', 'D', 'V', 'C'):
case MAKEFOURCC('C', 'D', 'V', '5'):
case MAKEFOURCC('D', 'V', 'I', 'S'):
case MAKEFOURCC('P', 'D', 'V', 'C'):
return CODEC_ID_DVVIDEO;
case MAKEFOURCC('H', 'F', 'Y', 'U'):
return CODEC_ID_HUFFYUV;
case MAKEFOURCC('F', 'F', 'V', 'H'):
return CODEC_ID_FFVHUFF;
case MAKEFOURCC('C', 'Y', 'U', 'V'):
return CODEC_ID_CYUV;
case MAKEFOURCC('A', 'S', 'V', '1'):
return CODEC_ID_ASV1;
case MAKEFOURCC('A', 'S', 'V', '2'):
return CODEC_ID_ASV2;
case MAKEFOURCC('V', 'C', 'R', '1'):
return CODEC_ID_VCR1;
case MAKEFOURCC('T', 'H', 'E', 'O'):
return CODEC_ID_THEORA;
case MAKEFOURCC('S', 'V', 'Q', '1'):
return CODEC_ID_SVQ1;
case MAKEFOURCC('S', 'V', 'Q', '3'):
return CODEC_ID_SVQ3;
case MAKEFOURCC('R', 'P', 'Z', 'A'):
return CODEC_ID_RPZA;
case MAKEFOURCC('F', 'F', 'V', '1'):
return CODEC_ID_FFV1;
case MAKEFOURCC('V', 'P', '3', '1'):
return CODEC_ID_VP3;
case MAKEFOURCC('R', 'L', 'E', '8'):
return CODEC_ID_MSRLE;
case MAKEFOURCC('M', 'S', 'Z', 'H'):
return CODEC_ID_MSZH;
case MAKEFOURCC('Z', 'L', 'I', 'B'):
return CODEC_ID_ZLIB;
case MAKEFOURCC('F', 'L', 'V', '1'):
return CODEC_ID_FLV1;
/*
case MAKEFOURCC('P', 'N', 'G', '1'):
return CODEC_ID_COREPNG;
*/
case MAKEFOURCC('M', 'P', 'N', 'G'):
return CODEC_ID_PNG;
/*
case MAKEFOURCC('A', 'V', 'I', 'S'):
return CODEC_ID_AVISYNTH;
*/
case MAKEFOURCC('C', 'R', 'A', 'M'):
return CODEC_ID_MSVIDEO1;
case MAKEFOURCC('R', 'T', '2', '1'):
return CODEC_ID_INDEO2;
case MAKEFOURCC('I', 'V', '3', '2'):
case MAKEFOURCC('I', 'V', '3', '1'):
return CODEC_ID_INDEO3;
case MAKEFOURCC('C', 'V', 'I', 'D'):
return CODEC_ID_CINEPAK;
case MAKEFOURCC('R', 'V', '1', '0'):
return CODEC_ID_RV10;
case MAKEFOURCC('R', 'V', '2', '0'):
return CODEC_ID_RV20;
case MAKEFOURCC('8', 'B', 'P', 'S'):
return CODEC_ID_8BPS;
case MAKEFOURCC('Q', 'R', 'L', 'E'):
return CODEC_ID_QTRLE;
case MAKEFOURCC('D', 'U', 'C', 'K'):
return CODEC_ID_TRUEMOTION1;
case MAKEFOURCC('T', 'M', '2', '0'):
return CODEC_ID_TRUEMOTION2;
case MAKEFOURCC('T', 'S', 'C', 'C'):
return CODEC_ID_TSCC;
case MAKEFOURCC('S', 'N', 'O', 'W'):
return CODEC_ID_SNOW;
case MAKEFOURCC('Q', 'P', 'E', 'G'):
case MAKEFOURCC('Q', '1', '_', '0'):
case MAKEFOURCC('Q', '1', '_', '1'):
return CODEC_ID_QPEG;
case MAKEFOURCC('H', '2', '6', '1'):
case MAKEFOURCC('M', '2', '6', '1'):
return CODEC_ID_H261;
case MAKEFOURCC('L', 'O', 'C', 'O'):
return CODEC_ID_LOCO;
case MAKEFOURCC('W', 'N', 'V', '1'):
return CODEC_ID_WNV1;
case MAKEFOURCC('C', 'S', 'C', 'D'):
return CODEC_ID_CSCD;
case MAKEFOURCC('Z', 'M', 'B', 'V'):
return CODEC_ID_ZMBV;
case MAKEFOURCC('U', 'L', 'T', 'I'):
return CODEC_ID_ULTI;
case MAKEFOURCC('V', 'I', 'X', 'L'):
return CODEC_ID_VIXL;
case MAKEFOURCC('A', 'A', 'S', 'C'):
return CODEC_ID_AASC;
case MAKEFOURCC('F', 'P', 'S', '1'):
return CODEC_ID_FRAPS;
default:
return CODEC_ID_NONE;
}
} else if (!strcmp(Codec, "V_MPEG4/ISO/AVC"))
return CODEC_ID_H264;
else if (!strcmp(Codec, "V_MPEG4/ISO/AP"))
return CODEC_ID_MPEG4;
else if (!strcmp(Codec, "V_MPEG4/ISO/ASP"))
return CODEC_ID_MPEG4;
else if (!strcmp(Codec, "V_MPEG4/ISO/SP"))
return CODEC_ID_MPEG4;
else if (!strcmp(Codec, "V_MPEG4/MS/V3"))
return CODEC_ID_MSMPEG4V3;
else if (!strcmp(Codec, "V_MPEG2"))
return CODEC_ID_MPEG2VIDEO;
else if (!strcmp(Codec, "V_MPEG1"))
return CODEC_ID_MPEG2VIDEO; // still not a typo
else if (!strcmp(Codec, "V_VC1"))
return CODEC_ID_VC1;
else if (!strcmp(Codec, "V_SNOW"))
return CODEC_ID_SNOW;
else if (!strcmp(Codec, "V_THEORA"))
return CODEC_ID_THEORA;
else if (!strcmp(Codec, "V_UNCOMPRESSED"))
return CODEC_ID_NONE; // bleh
else if (!strcmp(Codec, "V_QUICKTIME"))
return CODEC_ID_SVQ3;
else if (!strcmp(Codec, "V_CIPC"))
return CODEC_ID_NONE; // don't know, don't care
else if (!strncmp(Codec, "V_REAL/RV", 9)) {
switch (Codec[9]) {
case '1':
return CODEC_ID_RV10;
case '2':
return CODEC_ID_RV20;
case '3':
return CODEC_ID_RV30;
case '4':
return CODEC_ID_RV40;
default:
return CODEC_ID_NONE;
}
/* Audio Codecs */
} else if (!strcmp(Codec, "A_AC3"))
return CODEC_ID_AC3;
else if (!strcmp(Codec, "A_EAC3"))
return CODEC_ID_AC3;
else if (!strcmp(Codec, "A_MPEG/L3"))
return CODEC_ID_MP3;
else if (!strcmp(Codec, "A_MPEG/L2"))
return CODEC_ID_MP2;
else if (!strcmp(Codec, "A_MPEG/L1"))
return CODEC_ID_MP2; // correct?
else if (!strcmp(Codec, "A_DTS"))
return CODEC_ID_DTS;
else if (!strcmp(Codec, "A_PCM/INT/LIT")) {
switch (TI->AV.Audio.BitDepth) {
case 8: return CODEC_ID_PCM_S8;
case 16: return CODEC_ID_PCM_S16LE;
case 24: return CODEC_ID_PCM_S24LE;
case 32: return CODEC_ID_PCM_S32LE;
default: return CODEC_ID_NONE;
}
} else if (!strcmp(Codec, "A_PCM/INT/BIG")) {
switch (TI->AV.Audio.BitDepth) {
case 8: return CODEC_ID_PCM_S8;
case 16: return CODEC_ID_PCM_S16BE;
case 24: return CODEC_ID_PCM_S24BE;
case 32: return CODEC_ID_PCM_S32BE;
default: return CODEC_ID_NONE;
}
} else if (!strcmp(Codec, "A_PCM/FLOAT/IEEE"))
return CODEC_ID_NONE; // no float codec id?
else if (!strcmp(Codec, "A_FLAC"))
return CODEC_ID_FLAC;
else if (!strcmp(Codec, "A_MPC"))
return CODEC_ID_MUSEPACK8;
else if (!strcmp(Codec, "A_TTA1"))
return CODEC_ID_TTA;
else if (!strcmp(Codec, "A_WAVPACK4"))
return CODEC_ID_WAVPACK;
else if (!strcmp(Codec, "A_VORBIS"))
return CODEC_ID_VORBIS;
else if (!strcmp(Codec, "A_REAL/14_4"))
return CODEC_ID_RA_144;
else if (!strcmp(Codec, "A_REAL/28_8"))
return CODEC_ID_RA_288;
else if (!strcmp(Codec, "A_REAL/COOK"))
return CODEC_ID_COOK;
else if (!strcmp(Codec, "A_REAL/SIPR"))
return CODEC_ID_NONE; // no sipr codec id?
else if (!strcmp(Codec, "A_REAL/ATRC"))
return CODEC_ID_ATRAC3;
else if (!strncmp(Codec, "A_AAC", 5))
return CODEC_ID_AAC;
else if (!strcmp(Codec, "A_SPEEX"))
return CODEC_ID_SPEEX;
else if (!strcmp(Codec, "A_QUICKTIME"))
return CODEC_ID_NONE; // no
else if (!strcmp(Codec, "A_MS/ACM")) {
// nothing useful here anyway?
//#include "Mmreg.h"
//((WAVEFORMATEX *)TI->CodecPrivate)->wFormatTag
return CODEC_ID_NONE;
} else
return CODEC_ID_NONE;
}

View file

@ -1,92 +0,0 @@
// Copyright (c) 2007-2008 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#include "stdiostream.h"
/* StdIoStream methods */
/* read count bytes into buffer starting at file position pos
* return the number of bytes read, -1 on error or 0 on EOF
*/
int StdIoRead(StdIoStream *st, ulonglong pos, void *buffer, int count) {
size_t rd;
if (_fseeki64(st->fp, pos, SEEK_SET)) {
st->error = errno;
return -1;
}
rd = fread(buffer, 1, count, st->fp);
if (rd == 0) {
if (feof(st->fp))
return 0;
st->error = errno;
return -1;
}
return rd;
}
/* scan for a signature sig(big-endian) starting at file position pos
* return position of the first byte of signature or -1 if error/not found
*/
longlong StdIoScan(StdIoStream *st, ulonglong start, unsigned signature) {
int c;
unsigned cmp = 0;
FILE *fp = st->fp;
if (_fseeki64(fp, start, SEEK_SET))
return -1;
while ((c = getc(fp)) != EOF) {
cmp = ((cmp << 8) | c) & 0xffffffff;
if (cmp == signature)
return _ftelli64(fp) - 4;
}
return -1;
}
/* return cache size, this is used to limit readahead */
unsigned StdIoGetCacheSize(StdIoStream *st) {
return CACHESIZE;
}
/* return last error message */
const char *StdIoGetLastError(StdIoStream *st) {
return strerror(st->error);
}
/* memory allocation, this is done via stdlib */
void *StdIoMalloc(StdIoStream *st, size_t size) {
return malloc(size);
}
void *StdIoRealloc(StdIoStream *st, void *mem, size_t size) {
return realloc(mem,size);
}
void StdIoFree(StdIoStream *st, void *mem) {
free(mem);
}
/* progress report handler for lengthy operations
* returns 0 to abort operation, nonzero to continue
*/
int StdIoProgress(StdIoStream *st, ulonglong cur, ulonglong max) {
return 1;
}

View file

@ -1,82 +0,0 @@
// Copyright (c) 2007-2008 Fredrik Mellbin
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
#ifndef STDIOSTREAM_H
#define STDIOSTREAM_H
#include <stdlib.h>
#include <stdio.h>
#include <errno.h>
#include <string.h>
#include <fcntl.h>
#include <io.h>
#include "MatroskaParser.h"
#define CACHESIZE 65536
/************\
* Structures *
\************/
/* first we need to create an I/O object that the parser will use to read the
* source file
*/
struct StdIoStream {
struct InputStream base;
FILE *fp;
int error;
};
typedef struct StdIoStream StdIoStream;
/***********\
* Functions *
\***********/
/* read count bytes into buffer starting at file position pos
* return the number of bytes read, -1 on error or 0 on EOF
*/
int StdIoRead(StdIoStream *st, ulonglong pos, void *buffer, int count);
/* scan for a signature sig(big-endian) starting at file position pos
* return position of the first byte of signature or -1 if error/not found
*/
longlong StdIoScan(StdIoStream *st, ulonglong start, unsigned signature);
/* return cache size, this is used to limit readahead */
unsigned StdIoGetCacheSize(StdIoStream *st);
/* return last error message */
const char *StdIoGetLastError(StdIoStream *st);
/* memory allocation, this is done via stdlib */
void *StdIoMalloc(StdIoStream *st, size_t size);
void *StdIoRealloc(StdIoStream *st, void *mem, size_t size);
void StdIoFree(StdIoStream *st, void *mem);
/* progress report handler for lengthy operations
* returns 0 to abort operation, nonzero to continue
*/
int StdIoProgress(StdIoStream *st, ulonglong cur, ulonglong max);
#endif /* #ifndef STDIOSTREAM_H */