Imported FFmpegSource
Originally committed to SVN as r1223.
This commit is contained in:
parent
5b8273d67c
commit
3e2ff3da77
6 changed files with 5251 additions and 0 deletions
3311
FFmpegSource/MatroskaParser.c
Normal file
3311
FFmpegSource/MatroskaParser.c
Normal file
File diff suppressed because it is too large
Load diff
399
FFmpegSource/MatroskaParser.h
Normal file
399
FFmpegSource/MatroskaParser.h
Normal file
|
@ -0,0 +1,399 @@
|
|||
/*
|
||||
* Copyright (c) 2004-2006 Mike Matsnev. All Rights Reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
* are met:
|
||||
*
|
||||
* 1. Redistributions of source code must retain the above copyright
|
||||
* notice immediately at the beginning of the file, without modification,
|
||||
* this list of conditions, and the following disclaimer.
|
||||
* 2. Redistributions in binary form must reproduce the above copyright
|
||||
* notice, this list of conditions and the following disclaimer in the
|
||||
* documentation and/or other materials provided with the distribution.
|
||||
* 3. Absolutely no warranty of function or purpose is made by the author
|
||||
* Mike Matsnev.
|
||||
*
|
||||
* THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
|
||||
* IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
|
||||
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
|
||||
* IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
|
||||
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
|
||||
* NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
* DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
* THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* $Id: MatroskaParser.h,v 1.19 2006/03/11 10:57:13 mike Exp $
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef MATROSKA_PARSER_H
|
||||
#define MATROSKA_PARSER_H
|
||||
|
||||
/* Random notes:
|
||||
*
|
||||
* The parser does not process frame data in any way and does not read it into
|
||||
* the queue. The app should read it via mkv_ReadData if it is interested.
|
||||
*
|
||||
* The code here is 64-bit clean and was tested on FreeBSD/sparc 64-bit big endian
|
||||
* system
|
||||
*/
|
||||
|
||||
#ifdef MPDLLBUILD
|
||||
#define X __declspec(dllexport)
|
||||
#else
|
||||
#ifdef MPDLL
|
||||
#define X __declspec(dllimport)
|
||||
#pragma comment(lib,"MatroskaParser")
|
||||
#else
|
||||
#define X
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define MATROSKA_COMPRESSION_SUPPORT
|
||||
#define MATROSKA_INTEGER_ONLY
|
||||
|
||||
#ifdef __cplusplus
|
||||
extern "C" {
|
||||
#endif
|
||||
|
||||
/* 64-bit integers */
|
||||
#ifdef _WIN32_WCE
|
||||
typedef signed __int64 longlong;
|
||||
typedef unsigned __int64 ulonglong;
|
||||
#else
|
||||
typedef signed long long longlong;
|
||||
typedef unsigned long long ulonglong;
|
||||
#endif
|
||||
|
||||
/* MKFLOATing point */
|
||||
#ifdef MATROSKA_INTEGER_ONLY
|
||||
typedef struct {
|
||||
longlong v;
|
||||
} MKFLOAT;
|
||||
#else
|
||||
typedef double MKFLOAT;
|
||||
#endif
|
||||
|
||||
/* generic I/O */
|
||||
struct InputStream {
|
||||
/* read bytes from stream */
|
||||
int (*read)(struct InputStream *cc,ulonglong pos,void *buffer,int count);
|
||||
/* scan for a four byte signature, bytes must be nonzero */
|
||||
longlong (*scan)(struct InputStream *cc,ulonglong start,unsigned signature);
|
||||
/* get cache size, this is used to cap readahead */
|
||||
unsigned (*getcachesize)(struct InputStream *cc);
|
||||
/* fetch last error message */
|
||||
const char *(*geterror)(struct InputStream *cc);
|
||||
/* memory allocation */
|
||||
void *(*memalloc)(struct InputStream *cc,size_t size);
|
||||
void *(*memrealloc)(struct InputStream *cc,void *mem,size_t newsize);
|
||||
void (*memfree)(struct InputStream *cc,void *mem);
|
||||
/* zero return causes parser to abort open */
|
||||
int (*progress)(struct InputStream *cc,ulonglong cur,ulonglong max);
|
||||
/* get file size, optional, can be NULL or return -1 if filesize is unknown */
|
||||
longlong (*getfilesize)(struct InputStream *cc);
|
||||
};
|
||||
|
||||
typedef struct InputStream InputStream;
|
||||
|
||||
/* matroska file */
|
||||
struct MatroskaFile; /* opaque */
|
||||
|
||||
typedef struct MatroskaFile MatroskaFile;
|
||||
|
||||
#define COMP_ZLIB 0
|
||||
#define COMP_BZIP 1
|
||||
#define COMP_LZO1X 2
|
||||
#define COMP_PREPEND 3
|
||||
|
||||
#define TT_VIDEO 1
|
||||
#define TT_AUDIO 2
|
||||
#define TT_SUB 17
|
||||
|
||||
struct TrackInfo {
|
||||
unsigned char Number;
|
||||
unsigned char Type;
|
||||
unsigned char TrackOverlay;
|
||||
ulonglong UID;
|
||||
ulonglong MinCache;
|
||||
ulonglong MaxCache;
|
||||
ulonglong DefaultDuration;
|
||||
MKFLOAT TimecodeScale;
|
||||
void *CodecPrivate;
|
||||
unsigned CodecPrivateSize;
|
||||
unsigned CompMethod;
|
||||
void *CompMethodPrivate;
|
||||
unsigned CompMethodPrivateSize;
|
||||
unsigned MaxBlockAdditionID;
|
||||
struct {
|
||||
unsigned int Enabled:1;
|
||||
unsigned int Default:1;
|
||||
unsigned int Lacing:1;
|
||||
unsigned int DecodeAll:1;
|
||||
unsigned int CompEnabled:1;
|
||||
};
|
||||
|
||||
union {
|
||||
struct {
|
||||
unsigned char StereoMode;
|
||||
unsigned char DisplayUnit;
|
||||
unsigned char AspectRatioType;
|
||||
unsigned int PixelWidth;
|
||||
unsigned int PixelHeight;
|
||||
unsigned int DisplayWidth;
|
||||
unsigned int DisplayHeight;
|
||||
unsigned int CropL, CropT, CropR, CropB;
|
||||
unsigned int ColourSpace;
|
||||
MKFLOAT GammaValue;
|
||||
struct {
|
||||
unsigned int Interlaced:1;
|
||||
};
|
||||
} Video;
|
||||
struct {
|
||||
MKFLOAT SamplingFreq;
|
||||
MKFLOAT OutputSamplingFreq;
|
||||
unsigned char Channels;
|
||||
unsigned char BitDepth;
|
||||
} Audio;
|
||||
} AV;
|
||||
|
||||
/* various strings */
|
||||
char *Name;
|
||||
char Language[4];
|
||||
char *CodecID;
|
||||
};
|
||||
|
||||
typedef struct TrackInfo TrackInfo;
|
||||
|
||||
struct SegmentInfo {
|
||||
char UID[16];
|
||||
char PrevUID[16];
|
||||
char NextUID[16];
|
||||
char *Filename;
|
||||
char *PrevFilename;
|
||||
char *NextFilename;
|
||||
char *Title;
|
||||
char *MuxingApp;
|
||||
char *WritingApp;
|
||||
ulonglong TimecodeScale;
|
||||
ulonglong Duration;
|
||||
longlong DateUTC;
|
||||
char DateUTCValid;
|
||||
};
|
||||
|
||||
typedef struct SegmentInfo SegmentInfo;
|
||||
|
||||
struct Attachment {
|
||||
ulonglong Position;
|
||||
ulonglong Length;
|
||||
ulonglong UID;
|
||||
char *Name;
|
||||
char *Description;
|
||||
char *MimeType;
|
||||
};
|
||||
|
||||
typedef struct Attachment Attachment;
|
||||
|
||||
struct ChapterDisplay {
|
||||
char *String;
|
||||
char Language[4];
|
||||
char Country[4];
|
||||
};
|
||||
|
||||
struct ChapterCommand {
|
||||
unsigned Time;
|
||||
unsigned CommandLength;
|
||||
void *Command;
|
||||
};
|
||||
|
||||
struct ChapterProcess {
|
||||
unsigned CodecID;
|
||||
unsigned CodecPrivateLength;
|
||||
void *CodecPrivate;
|
||||
unsigned nCommands,nCommandsSize;
|
||||
struct ChapterCommand *Commands;
|
||||
};
|
||||
|
||||
struct Chapter {
|
||||
ulonglong UID;
|
||||
ulonglong Start;
|
||||
ulonglong End;
|
||||
|
||||
unsigned nTracks,nTracksSize;
|
||||
ulonglong *Tracks;
|
||||
unsigned nDisplay,nDisplaySize;
|
||||
struct ChapterDisplay *Display;
|
||||
unsigned nChildren,nChildrenSize;
|
||||
struct Chapter *Children;
|
||||
unsigned nProcess,nProcessSize;
|
||||
struct ChapterProcess *Process;
|
||||
|
||||
char SegmentUID[16];
|
||||
|
||||
struct {
|
||||
unsigned int Hidden:1;
|
||||
unsigned int Enabled:1;
|
||||
|
||||
// Editions
|
||||
unsigned int Default:1;
|
||||
unsigned int Ordered:1;
|
||||
};
|
||||
};
|
||||
|
||||
typedef struct Chapter Chapter;
|
||||
|
||||
#define TARGET_TRACK 0
|
||||
#define TARGET_CHAPTER 1
|
||||
#define TARGET_ATTACHMENT 2
|
||||
#define TARGET_EDITION 3
|
||||
struct Target {
|
||||
ulonglong UID;
|
||||
unsigned Type;
|
||||
};
|
||||
|
||||
struct SimpleTag {
|
||||
char *Name;
|
||||
char *Value;
|
||||
char Language[4];
|
||||
unsigned Default:1;
|
||||
};
|
||||
|
||||
struct Tag {
|
||||
unsigned nTargets,nTargetsSize;
|
||||
struct Target *Targets;
|
||||
|
||||
unsigned nSimpleTags,nSimpleTagsSize;
|
||||
struct SimpleTag *SimpleTags;
|
||||
};
|
||||
|
||||
typedef struct Tag Tag;
|
||||
|
||||
/* Open a matroska file
|
||||
* io pointer is recorded inside MatroskaFile
|
||||
*/
|
||||
X MatroskaFile *mkv_Open(/* in */ InputStream *io,
|
||||
/* out */ char *err_msg,
|
||||
/* in */ unsigned msgsize);
|
||||
|
||||
#define MKVF_AVOID_SEEKS 1 /* use sequential reading only */
|
||||
|
||||
X MatroskaFile *mkv_OpenEx(/* in */ InputStream *io,
|
||||
/* in */ ulonglong base,
|
||||
/* in */ unsigned flags,
|
||||
/* out */ char *err_msg,
|
||||
/* in */ unsigned msgsize);
|
||||
|
||||
/* Close and deallocate mf
|
||||
* NULL pointer is ok and is simply ignored
|
||||
*/
|
||||
X void mkv_Close(/* in */ MatroskaFile *mf);
|
||||
|
||||
/* Fetch the error message of the last failed operation */
|
||||
X const char *mkv_GetLastError(/* in */ MatroskaFile *mf);
|
||||
|
||||
/* Get file information */
|
||||
X SegmentInfo *mkv_GetFileInfo(/* in */ MatroskaFile *mf);
|
||||
|
||||
/* Get track information */
|
||||
X unsigned int mkv_GetNumTracks(/* in */ MatroskaFile *mf);
|
||||
X TrackInfo *mkv_GetTrackInfo(/* in */ MatroskaFile *mf,/* in */ unsigned track);
|
||||
|
||||
/* chapters, tags and attachments */
|
||||
X void mkv_GetAttachments(/* in */ MatroskaFile *mf,
|
||||
/* out */ Attachment **at,
|
||||
/* out */ unsigned *count);
|
||||
X void mkv_GetChapters(/* in */ MatroskaFile *mf,
|
||||
/* out */ Chapter **ch,
|
||||
/* out */ unsigned *count);
|
||||
X void mkv_GetTags(/* in */ MatroskaFile *mf,
|
||||
/* out */ Tag **tag,
|
||||
/* out */ unsigned *count);
|
||||
|
||||
X ulonglong mkv_GetSegmentTop(MatroskaFile *mf);
|
||||
|
||||
/* Seek to specified timecode,
|
||||
* if timecode is past end of file,
|
||||
* all tracks are set to return EOF
|
||||
* on next read
|
||||
*/
|
||||
#define MKVF_SEEK_TO_PREV_KEYFRAME 1
|
||||
|
||||
X void mkv_Seek(/* in */ MatroskaFile *mf,
|
||||
/* in */ ulonglong timecode /* in ns */,
|
||||
/* in */ unsigned flags);
|
||||
|
||||
X void mkv_SkipToKeyframe(MatroskaFile *mf);
|
||||
|
||||
X ulonglong mkv_GetLowestQTimecode(MatroskaFile *mf);
|
||||
|
||||
X int mkv_TruncFloat(MKFLOAT f);
|
||||
|
||||
/*************************************************************************
|
||||
* reading data, pull model
|
||||
*/
|
||||
|
||||
/* frame flags */
|
||||
#define FRAME_UNKNOWN_START 0x00000001
|
||||
#define FRAME_UNKNOWN_END 0x00000002
|
||||
#define FRAME_KF 0x00000004
|
||||
#define FRAME_GAP 0x00800000
|
||||
#define FRAME_STREAM_MASK 0xff000000
|
||||
#define FRAME_STREAM_SHIFT 24
|
||||
|
||||
/* This sets the masking flags for the parser,
|
||||
* masked tracks [with 1s in their bit positions]
|
||||
* will be ignored when reading file data.
|
||||
* This call discards all parsed and queued frames
|
||||
*/
|
||||
X void mkv_SetTrackMask(/* in */ MatroskaFile *mf,/* in */ unsigned int mask);
|
||||
|
||||
/* Read one frame from the queue.
|
||||
* mask specifies what tracks to ignore.
|
||||
* Returns -1 if there are no more frames in the specified
|
||||
* set of tracks, 0 on success
|
||||
*/
|
||||
X int mkv_ReadFrame(/* in */ MatroskaFile *mf,
|
||||
/* in */ unsigned int mask,
|
||||
/* out */ unsigned int *track,
|
||||
/* out */ ulonglong *StartTime /* in ns */,
|
||||
/* out */ ulonglong *EndTime /* in ns */,
|
||||
/* out */ ulonglong *FilePos /* in bytes from start of file */,
|
||||
/* out */ unsigned int *FrameSize /* in bytes */,
|
||||
/* out */ unsigned int *FrameFlags);
|
||||
|
||||
#ifdef MATROSKA_COMPRESSION_SUPPORT
|
||||
/* Compressed streams support */
|
||||
struct CompressedStream;
|
||||
|
||||
typedef struct CompressedStream CompressedStream;
|
||||
|
||||
X CompressedStream *cs_Create(/* in */ MatroskaFile *mf,
|
||||
/* in */ unsigned tracknum,
|
||||
/* out */ char *errormsg,
|
||||
/* in */ unsigned msgsize);
|
||||
X void cs_Destroy(/* in */ CompressedStream *cs);
|
||||
|
||||
/* advance to the next frame in matroska stream, you need to pass values returned
|
||||
* by mkv_ReadFrame */
|
||||
X void cs_NextFrame(/* in */ CompressedStream *cs,
|
||||
/* in */ ulonglong pos,
|
||||
/* in */ unsigned size);
|
||||
|
||||
/* read and decode more data from current frame, return number of bytes decoded,
|
||||
* 0 on end of frame, or -1 on error */
|
||||
X int cs_ReadData(CompressedStream *cs,char *buffer,unsigned bufsize);
|
||||
|
||||
/* return error message for the last error */
|
||||
X const char *cs_GetLastError(CompressedStream *cs);
|
||||
#endif
|
||||
|
||||
#ifdef __cplusplus
|
||||
}
|
||||
#endif
|
||||
|
||||
#undef X
|
||||
|
||||
#endif
|
749
FFmpegSource/avisynth.h
Normal file
749
FFmpegSource/avisynth.h
Normal file
|
@ -0,0 +1,749 @@
|
|||
// Avisynth v2.5. Copyright 2002 Ben Rudiak-Gould et al.
|
||||
// http://www.avisynth.org
|
||||
|
||||
// This program is free software; you can redistribute it and/or modify
|
||||
// it under the terms of the GNU General Public License as published by
|
||||
// the Free Software Foundation; either version 2 of the License, or
|
||||
// (at your option) any later version.
|
||||
//
|
||||
// This program is distributed in the hope that it will be useful,
|
||||
// but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
||||
// GNU General Public License for more details.
|
||||
//
|
||||
// You should have received a copy of the GNU General Public License
|
||||
// along with this program; if not, write to the Free Software
|
||||
// Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA, or visit
|
||||
// http://www.gnu.org/copyleft/gpl.html .
|
||||
//
|
||||
// Linking Avisynth statically or dynamically with other modules is making a
|
||||
// combined work based on Avisynth. Thus, the terms and conditions of the GNU
|
||||
// General Public License cover the whole combination.
|
||||
//
|
||||
// As a special exception, the copyright holders of Avisynth give you
|
||||
// permission to link Avisynth with independent modules that communicate with
|
||||
// Avisynth solely through the interfaces defined in avisynth.h, regardless of the license
|
||||
// terms of these independent modules, and to copy and distribute the
|
||||
// resulting combined work under terms of your choice, provided that
|
||||
// every copy of the combined work is accompanied by a complete copy of
|
||||
// the source code of Avisynth (the version of Avisynth used to produce the
|
||||
// combined work), being distributed under the terms of the GNU General
|
||||
// Public License plus this exception. An independent module is a module
|
||||
// which is not derived from or based on Avisynth, such as 3rd-party filters,
|
||||
// import and export plugins, or graphical user interfaces.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
#ifndef __AVISYNTH_H__
|
||||
#define __AVISYNTH_H__
|
||||
|
||||
enum { AVISYNTH_INTERFACE_VERSION = 3 };
|
||||
|
||||
|
||||
/* Define all types necessary for interfacing with avisynth.dll
|
||||
Moved from internal.h */
|
||||
|
||||
// Win32 API macros, notably the types BYTE, DWORD, ULONG, etc.
|
||||
#include <windef.h>
|
||||
|
||||
// COM interface macros
|
||||
#include <objbase.h>
|
||||
|
||||
|
||||
// Raster types used by VirtualDub & Avisynth
|
||||
#define in64 (__int64)(unsigned short)
|
||||
typedef unsigned long Pixel; // this will break on 64-bit machines!
|
||||
typedef unsigned long Pixel32;
|
||||
typedef unsigned char Pixel8;
|
||||
typedef long PixCoord;
|
||||
typedef long PixDim;
|
||||
typedef long PixOffset;
|
||||
|
||||
|
||||
/* Compiler-specific crap */
|
||||
|
||||
// Tell MSVC to stop precompiling here
|
||||
#ifdef _MSC_VER
|
||||
#pragma hdrstop
|
||||
#endif
|
||||
|
||||
// Set up debugging macros for MS compilers; for others, step down to the
|
||||
// standard <assert.h> interface
|
||||
#ifdef _MSC_VER
|
||||
#include <crtdbg.h>
|
||||
#else
|
||||
#define _RPT0(a,b) ((void)0)
|
||||
#define _RPT1(a,b,c) ((void)0)
|
||||
#define _RPT2(a,b,c,d) ((void)0)
|
||||
#define _RPT3(a,b,c,d,e) ((void)0)
|
||||
#define _RPT4(a,b,c,d,e,f) ((void)0)
|
||||
|
||||
#define _ASSERTE(x) assert(x)
|
||||
#include <assert.h>
|
||||
#endif
|
||||
|
||||
|
||||
|
||||
// I had problems with Premiere wanting 1-byte alignment for its structures,
|
||||
// so I now set the Avisynth struct alignment explicitly here.
|
||||
#pragma pack(push,8)
|
||||
|
||||
#define FRAME_ALIGN 16
|
||||
// Default frame alignment is 16 bytes, to help P4, when using SSE2
|
||||
|
||||
// The VideoInfo struct holds global information about a clip (i.e.
|
||||
// information that does not depend on the frame number). The GetVideoInfo
|
||||
// method in IClip returns this struct.
|
||||
|
||||
// Audio Sample information
|
||||
typedef float SFLOAT;
|
||||
|
||||
enum {SAMPLE_INT8 = 1<<0,
|
||||
SAMPLE_INT16 = 1<<1,
|
||||
SAMPLE_INT24 = 1<<2, // Int24 is a very stupid thing to code, but it's supported by some hardware.
|
||||
SAMPLE_INT32 = 1<<3,
|
||||
SAMPLE_FLOAT = 1<<4};
|
||||
|
||||
enum {
|
||||
PLANAR_Y=1<<0,
|
||||
PLANAR_U=1<<1,
|
||||
PLANAR_V=1<<2,
|
||||
PLANAR_ALIGNED=1<<3,
|
||||
PLANAR_Y_ALIGNED=PLANAR_Y|PLANAR_ALIGNED,
|
||||
PLANAR_U_ALIGNED=PLANAR_U|PLANAR_ALIGNED,
|
||||
PLANAR_V_ALIGNED=PLANAR_V|PLANAR_ALIGNED,
|
||||
};
|
||||
|
||||
struct VideoInfo {
|
||||
int width, height; // width=0 means no video
|
||||
unsigned fps_numerator, fps_denominator;
|
||||
int num_frames;
|
||||
// This is more extensible than previous versions. More properties can be added seeminglesly.
|
||||
|
||||
// Colorspace properties.
|
||||
enum {
|
||||
CS_BGR = 1<<28,
|
||||
CS_YUV = 1<<29,
|
||||
CS_INTERLEAVED = 1<<30,
|
||||
CS_PLANAR = 1<<31
|
||||
};
|
||||
|
||||
// Specific colorformats
|
||||
enum { CS_UNKNOWN = 0,
|
||||
CS_BGR24 = 1<<0 | CS_BGR | CS_INTERLEAVED,
|
||||
CS_BGR32 = 1<<1 | CS_BGR | CS_INTERLEAVED,
|
||||
CS_YUY2 = 1<<2 | CS_YUV | CS_INTERLEAVED,
|
||||
CS_YV12 = 1<<3 | CS_YUV | CS_PLANAR, // y-v-u, planar
|
||||
CS_I420 = 1<<4 | CS_YUV | CS_PLANAR, // y-u-v, planar
|
||||
CS_IYUV = 1<<4 | CS_YUV | CS_PLANAR // same as above
|
||||
};
|
||||
int pixel_type; // changed to int as of 2.5
|
||||
|
||||
|
||||
int audio_samples_per_second; // 0 means no audio
|
||||
int sample_type; // as of 2.5
|
||||
__int64 num_audio_samples; // changed as of 2.5
|
||||
int nchannels; // as of 2.5
|
||||
|
||||
// Imagetype properties
|
||||
|
||||
int image_type;
|
||||
|
||||
enum {
|
||||
IT_BFF = 1<<0,
|
||||
IT_TFF = 1<<1,
|
||||
IT_FIELDBASED = 1<<2
|
||||
};
|
||||
|
||||
// useful functions of the above
|
||||
bool HasVideo() const { return (width!=0); }
|
||||
bool HasAudio() const { return (audio_samples_per_second!=0); }
|
||||
bool IsRGB() const { return !!(pixel_type&CS_BGR); }
|
||||
bool IsRGB24() const { return (pixel_type&CS_BGR24)==CS_BGR24; } // Clear out additional properties
|
||||
bool IsRGB32() const { return (pixel_type & CS_BGR32) == CS_BGR32 ; }
|
||||
bool IsYUV() const { return !!(pixel_type&CS_YUV ); }
|
||||
bool IsYUY2() const { return (pixel_type & CS_YUY2) == CS_YUY2; }
|
||||
bool IsYV12() const { return ((pixel_type & CS_YV12) == CS_YV12)||((pixel_type & CS_I420) == CS_I420); }
|
||||
bool IsColorSpace(int c_space) const { return ((pixel_type & c_space) == c_space); }
|
||||
bool Is(int property) const { return ((pixel_type & property)==property ); }
|
||||
bool IsPlanar() const { return !!(pixel_type & CS_PLANAR); }
|
||||
bool IsFieldBased() const { return !!(image_type & IT_FIELDBASED); }
|
||||
bool IsParityKnown() const { return ((image_type & IT_FIELDBASED)&&(image_type & (IT_BFF|IT_TFF))); }
|
||||
bool IsBFF() const { return !!(image_type & IT_BFF); }
|
||||
bool IsTFF() const { return !!(image_type & IT_TFF); }
|
||||
|
||||
bool IsVPlaneFirst() const {return ((pixel_type & CS_YV12) == CS_YV12); } // Don't use this
|
||||
int BytesFromPixels(int pixels) const { return pixels * (BitsPerPixel()>>3); } // Will not work on planar images, but will return only luma planes
|
||||
int RowSize() const { return BytesFromPixels(width); } // Also only returns first plane on planar images
|
||||
int BMPSize() const { if (IsPlanar()) {int p = height * ((RowSize()+3) & ~3); p+=p>>1; return p; } return height * ((RowSize()+3) & ~3); }
|
||||
__int64 AudioSamplesFromFrames(__int64 frames) const { return (fps_numerator && HasVideo()) ? ((__int64)(frames) * audio_samples_per_second * fps_denominator / fps_numerator) : 0; }
|
||||
int FramesFromAudioSamples(__int64 samples) const { return (fps_denominator && HasAudio()) ? (int)((samples * (__int64)fps_numerator)/((__int64)fps_denominator * (__int64)audio_samples_per_second)) : 0; }
|
||||
__int64 AudioSamplesFromBytes(__int64 bytes) const { return HasAudio() ? bytes / BytesPerAudioSample() : 0; }
|
||||
__int64 BytesFromAudioSamples(__int64 samples) const { return samples * BytesPerAudioSample(); }
|
||||
int AudioChannels() const { return HasAudio() ? nchannels : 0; }
|
||||
int SampleType() const{ return sample_type;}
|
||||
bool IsSampleType(int testtype) const{ return !!(sample_type&testtype);}
|
||||
int SamplesPerSecond() const { return audio_samples_per_second; }
|
||||
int BytesPerAudioSample() const { return nchannels*BytesPerChannelSample();}
|
||||
void SetFieldBased(bool isfieldbased) { if (isfieldbased) image_type|=IT_FIELDBASED; else image_type&=~IT_FIELDBASED; }
|
||||
void Set(int property) { image_type|=property; }
|
||||
void Clear(int property) { image_type&=~property; }
|
||||
|
||||
int BitsPerPixel() const {
|
||||
switch (pixel_type) {
|
||||
case CS_BGR24:
|
||||
return 24;
|
||||
case CS_BGR32:
|
||||
return 32;
|
||||
case CS_YUY2:
|
||||
return 16;
|
||||
case CS_YV12:
|
||||
case CS_I420:
|
||||
return 12;
|
||||
default:
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
int BytesPerChannelSample() const {
|
||||
switch (sample_type) {
|
||||
case SAMPLE_INT8:
|
||||
return sizeof(signed char);
|
||||
case SAMPLE_INT16:
|
||||
return sizeof(signed short);
|
||||
case SAMPLE_INT24:
|
||||
return 3;
|
||||
case SAMPLE_INT32:
|
||||
return sizeof(signed int);
|
||||
case SAMPLE_FLOAT:
|
||||
return sizeof(SFLOAT);
|
||||
default:
|
||||
_ASSERTE("Sample type not recognized!");
|
||||
return 0;
|
||||
}
|
||||
}
|
||||
|
||||
// useful mutator
|
||||
void SetFPS(unsigned numerator, unsigned denominator) {
|
||||
if ((numerator == 0) || (denominator == 0)) {
|
||||
fps_numerator = 0;
|
||||
fps_denominator = 1;
|
||||
}
|
||||
else {
|
||||
unsigned x=numerator, y=denominator;
|
||||
while (y) { // find gcd
|
||||
unsigned t = x%y; x = y; y = t;
|
||||
}
|
||||
fps_numerator = numerator/x;
|
||||
fps_denominator = denominator/x;
|
||||
}
|
||||
}
|
||||
|
||||
// Range protected multiply-divide of FPS
|
||||
void MulDivFPS(unsigned multiplier, unsigned divisor) {
|
||||
unsigned __int64 numerator = UInt32x32To64(fps_numerator, multiplier);
|
||||
unsigned __int64 denominator = UInt32x32To64(fps_denominator, divisor);
|
||||
|
||||
unsigned __int64 x=numerator, y=denominator;
|
||||
while (y) { // find gcd
|
||||
unsigned __int64 t = x%y; x = y; y = t;
|
||||
}
|
||||
numerator /= x; // normalize
|
||||
denominator /= x;
|
||||
|
||||
unsigned __int64 temp = numerator | denominator; // Just looking top bit
|
||||
unsigned u = 0;
|
||||
while (temp & 0xffffffff80000000) { // or perhaps > 16777216*2
|
||||
temp = Int64ShrlMod32(temp, 1);
|
||||
u++;
|
||||
}
|
||||
if (u) { // Scale to fit
|
||||
const unsigned round = 1 << (u-1);
|
||||
SetFPS( (unsigned)Int64ShrlMod32(numerator + round, u),
|
||||
(unsigned)Int64ShrlMod32(denominator + round, u) );
|
||||
}
|
||||
else {
|
||||
fps_numerator = (unsigned)numerator;
|
||||
fps_denominator = (unsigned)denominator;
|
||||
}
|
||||
}
|
||||
|
||||
// Test for same colorspace
|
||||
bool IsSameColorspace(const VideoInfo& vi) const {
|
||||
if (vi.pixel_type == pixel_type) return TRUE;
|
||||
if (IsYV12() && vi.IsYV12()) return TRUE;
|
||||
return FALSE;
|
||||
}
|
||||
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
// VideoFrameBuffer holds information about a memory block which is used
|
||||
// for video data. For efficiency, instances of this class are not deleted
|
||||
// when the refcount reaches zero; instead they're stored in a linked list
|
||||
// to be reused. The instances are deleted when the corresponding AVS
|
||||
// file is closed.
|
||||
|
||||
class VideoFrameBuffer {
|
||||
BYTE* const data;
|
||||
const int data_size;
|
||||
// sequence_number is incremented every time the buffer is changed, so
|
||||
// that stale views can tell they're no longer valid.
|
||||
long sequence_number;
|
||||
|
||||
friend class VideoFrame;
|
||||
friend class Cache;
|
||||
friend class ScriptEnvironment;
|
||||
long refcount;
|
||||
|
||||
public:
|
||||
VideoFrameBuffer(int size);
|
||||
VideoFrameBuffer();
|
||||
~VideoFrameBuffer();
|
||||
|
||||
const BYTE* GetReadPtr() const { return data; }
|
||||
BYTE* GetWritePtr() { ++sequence_number; return data; }
|
||||
int GetDataSize() { return data_size; }
|
||||
int GetSequenceNumber() { return sequence_number; }
|
||||
int GetRefcount() { return refcount; }
|
||||
};
|
||||
|
||||
|
||||
class IClip;
|
||||
class PClip;
|
||||
class PVideoFrame;
|
||||
class IScriptEnvironment;
|
||||
class AVSValue;
|
||||
|
||||
|
||||
// VideoFrame holds a "window" into a VideoFrameBuffer. Operator new
|
||||
// is overloaded to recycle class instances.
|
||||
|
||||
class VideoFrame {
|
||||
int refcount;
|
||||
VideoFrameBuffer* const vfb;
|
||||
const int offset, pitch, row_size, height, offsetU, offsetV, pitchUV; // U&V offsets are from top of picture.
|
||||
|
||||
friend class PVideoFrame;
|
||||
void AddRef() { InterlockedIncrement((long *)&refcount); }
|
||||
void Release() { if (refcount==1) InterlockedDecrement(&vfb->refcount); InterlockedDecrement((long *)&refcount); }
|
||||
|
||||
friend class ScriptEnvironment;
|
||||
friend class Cache;
|
||||
|
||||
VideoFrame(VideoFrameBuffer* _vfb, int _offset, int _pitch, int _row_size, int _height);
|
||||
VideoFrame(VideoFrameBuffer* _vfb, int _offset, int _pitch, int _row_size, int _height, int _offsetU, int _offsetV, int _pitchUV);
|
||||
|
||||
void* operator new(unsigned size);
|
||||
// TESTME: OFFSET U/V may be switched to what could be expected from AVI standard!
|
||||
public:
|
||||
int GetPitch() const { return pitch; }
|
||||
int GetPitch(int plane) const { switch (plane) {case PLANAR_U: case PLANAR_V: return pitchUV;} return pitch; }
|
||||
int GetRowSize() const { return row_size; }
|
||||
int GetRowSize(int plane) const {
|
||||
switch (plane) {
|
||||
case PLANAR_U: case PLANAR_V: if (pitchUV) return row_size>>1; else return 0;
|
||||
case PLANAR_U_ALIGNED: case PLANAR_V_ALIGNED:
|
||||
if (pitchUV) {
|
||||
int r = ((row_size+FRAME_ALIGN-1)&(~(FRAME_ALIGN-1)) )>>1; // Aligned rowsize
|
||||
if (r<=pitchUV)
|
||||
return r;
|
||||
return row_size>>1;
|
||||
} else return 0;
|
||||
case PLANAR_Y_ALIGNED:
|
||||
int r = (row_size+FRAME_ALIGN-1)&(~(FRAME_ALIGN-1)); // Aligned rowsize
|
||||
if (r<=pitch)
|
||||
return r;
|
||||
return row_size;
|
||||
}
|
||||
return row_size; }
|
||||
int GetHeight() const { return height; }
|
||||
int GetHeight(int plane) const { switch (plane) {case PLANAR_U: case PLANAR_V: if (pitchUV) return height>>1; return 0;} return height; }
|
||||
|
||||
// generally you shouldn't use these three
|
||||
VideoFrameBuffer* GetFrameBuffer() const { return vfb; }
|
||||
int GetOffset() const { return offset; }
|
||||
int GetOffset(int plane) const { switch (plane) {case PLANAR_U: return offsetU;case PLANAR_V: return offsetV;default: return offset;}; }
|
||||
|
||||
// in plugins use env->SubFrame()
|
||||
VideoFrame* Subframe(int rel_offset, int new_pitch, int new_row_size, int new_height) const;
|
||||
VideoFrame* Subframe(int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int pitchUV) const;
|
||||
|
||||
|
||||
const BYTE* GetReadPtr() const { return vfb->GetReadPtr() + offset; }
|
||||
const BYTE* GetReadPtr(int plane) const { return vfb->GetReadPtr() + GetOffset(plane); }
|
||||
|
||||
bool IsWritable() const { return (refcount == 1 && vfb->refcount == 1); }
|
||||
|
||||
BYTE* GetWritePtr() const {
|
||||
if (vfb->GetRefcount()>1) {
|
||||
_ASSERT(FALSE);
|
||||
//throw AvisynthError("Internal Error - refcount was more than one!");
|
||||
}
|
||||
return IsWritable() ? (vfb->GetWritePtr() + offset) : 0;
|
||||
}
|
||||
|
||||
BYTE* GetWritePtr(int plane) const {
|
||||
if (plane==PLANAR_Y) {
|
||||
if (vfb->GetRefcount()>1) {
|
||||
_ASSERT(FALSE);
|
||||
// throw AvisynthError("Internal Error - refcount was more than one!");
|
||||
}
|
||||
return IsWritable() ? vfb->GetWritePtr() + GetOffset(plane) : 0;
|
||||
}
|
||||
return vfb->data + GetOffset(plane);
|
||||
}
|
||||
|
||||
~VideoFrame() { InterlockedDecrement(&vfb->refcount); }
|
||||
};
|
||||
|
||||
enum {
|
||||
CACHE_NOTHING=0,
|
||||
CACHE_RANGE=1,
|
||||
CACHE_ALL=2,
|
||||
CACHE_AUDIO=3,
|
||||
CACHE_AUDIO_NONE=4
|
||||
};
|
||||
|
||||
// Base class for all filters.
|
||||
class IClip {
|
||||
friend class PClip;
|
||||
friend class AVSValue;
|
||||
int refcnt;
|
||||
void AddRef() { InterlockedIncrement((long *)&refcnt); }
|
||||
void Release() { InterlockedDecrement((long *)&refcnt); if (!refcnt) delete this; }
|
||||
public:
|
||||
IClip() : refcnt(0) {}
|
||||
|
||||
virtual int __stdcall GetVersion() { return AVISYNTH_INTERFACE_VERSION; }
|
||||
|
||||
virtual PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env) = 0;
|
||||
virtual bool __stdcall GetParity(int n) = 0; // return field parity if field_based, else parity of first field in frame
|
||||
virtual void __stdcall GetAudio(void* buf, __int64 start, __int64 count, IScriptEnvironment* env) = 0; // start and count are in samples
|
||||
virtual void __stdcall SetCacheHints(int cachehints,int frame_range) = 0 ; // We do not pass cache requests upwards, only to the next filter.
|
||||
virtual const VideoInfo& __stdcall GetVideoInfo() = 0;
|
||||
virtual __stdcall ~IClip() {}
|
||||
};
|
||||
|
||||
|
||||
// smart pointer to IClip
|
||||
class PClip {
|
||||
|
||||
IClip* p;
|
||||
|
||||
IClip* GetPointerWithAddRef() const { if (p) p->AddRef(); return p; }
|
||||
friend class AVSValue;
|
||||
friend class VideoFrame;
|
||||
|
||||
void Init(IClip* x) {
|
||||
if (x) x->AddRef();
|
||||
p=x;
|
||||
}
|
||||
void Set(IClip* x) {
|
||||
if (x) x->AddRef();
|
||||
if (p) p->Release();
|
||||
p=x;
|
||||
}
|
||||
|
||||
public:
|
||||
PClip() { p = 0; }
|
||||
PClip(const PClip& x) { Init(x.p); }
|
||||
PClip(IClip* x) { Init(x); }
|
||||
void operator=(IClip* x) { Set(x); }
|
||||
void operator=(const PClip& x) { Set(x.p); }
|
||||
|
||||
IClip* operator->() const { return p; }
|
||||
|
||||
// useful in conditional expressions
|
||||
operator void*() const { return p; }
|
||||
bool operator!() const { return !p; }
|
||||
|
||||
~PClip() { if (p) p->Release(); }
|
||||
};
|
||||
|
||||
|
||||
// smart pointer to VideoFrame
|
||||
class PVideoFrame {
|
||||
|
||||
VideoFrame* p;
|
||||
|
||||
void Init(VideoFrame* x) {
|
||||
if (x) x->AddRef();
|
||||
p=x;
|
||||
}
|
||||
void Set(VideoFrame* x) {
|
||||
if (x) x->AddRef();
|
||||
if (p) p->Release();
|
||||
p=x;
|
||||
}
|
||||
|
||||
public:
|
||||
PVideoFrame() { p = 0; }
|
||||
PVideoFrame(const PVideoFrame& x) { Init(x.p); }
|
||||
PVideoFrame(VideoFrame* x) { Init(x); }
|
||||
void operator=(VideoFrame* x) { Set(x); }
|
||||
void operator=(const PVideoFrame& x) { Set(x.p); }
|
||||
|
||||
VideoFrame* operator->() const { return p; }
|
||||
|
||||
// for conditional expressions
|
||||
operator void*() const { return p; }
|
||||
bool operator!() const { return !p; }
|
||||
|
||||
~PVideoFrame() { if (p) p->Release();}
|
||||
};
|
||||
|
||||
|
||||
class AVSValue {
|
||||
public:
|
||||
|
||||
AVSValue() { type = 'v'; }
|
||||
AVSValue(IClip* c) { type = 'c'; clip = c; if (c) c->AddRef(); }
|
||||
AVSValue(const PClip& c) { type = 'c'; clip = c.GetPointerWithAddRef(); }
|
||||
AVSValue(bool b) { type = 'b'; boolean = b; }
|
||||
AVSValue(int i) { type = 'i'; integer = i; }
|
||||
// AVSValue(__int64 l) { type = 'l'; longlong = l; }
|
||||
AVSValue(float f) { type = 'f'; floating_pt = f; }
|
||||
AVSValue(double f) { type = 'f'; floating_pt = float(f); }
|
||||
AVSValue(const char* s) { type = 's'; string = s; }
|
||||
AVSValue(const AVSValue* a, int size) { type = 'a'; array = a; array_size = size; }
|
||||
AVSValue(const AVSValue& v) { Assign(&v, true); }
|
||||
|
||||
~AVSValue() { if (IsClip() && clip) clip->Release(); }
|
||||
AVSValue& operator=(const AVSValue& v) { Assign(&v, false); return *this; }
|
||||
|
||||
// Note that we transparently allow 'int' to be treated as 'float'.
|
||||
// There are no int<->bool conversions, though.
|
||||
|
||||
bool Defined() const { return type != 'v'; }
|
||||
bool IsClip() const { return type == 'c'; }
|
||||
bool IsBool() const { return type == 'b'; }
|
||||
bool IsInt() const { return type == 'i'; }
|
||||
// bool IsLong() const { return (type == 'l'|| type == 'i'); }
|
||||
bool IsFloat() const { return type == 'f' || type == 'i'; }
|
||||
bool IsString() const { return type == 's'; }
|
||||
bool IsArray() const { return type == 'a'; }
|
||||
|
||||
PClip AsClip() const { _ASSERTE(IsClip()); return IsClip()?clip:0; }
|
||||
bool AsBool() const { _ASSERTE(IsBool()); return boolean; }
|
||||
int AsInt() const { _ASSERTE(IsInt()); return integer; }
|
||||
// int AsLong() const { _ASSERTE(IsLong()); return longlong; }
|
||||
const char* AsString() const { _ASSERTE(IsString()); return IsString()?string:0; }
|
||||
double AsFloat() const { _ASSERTE(IsFloat()); return IsInt()?integer:floating_pt; }
|
||||
|
||||
bool AsBool(bool def) const { _ASSERTE(IsBool()||!Defined()); return IsBool() ? boolean : def; }
|
||||
int AsInt(int def) const { _ASSERTE(IsInt()||!Defined()); return IsInt() ? integer : def; }
|
||||
double AsFloat(double def) const { _ASSERTE(IsFloat()||!Defined()); return IsInt() ? integer : type=='f' ? floating_pt : def; }
|
||||
const char* AsString(const char* def) const { _ASSERTE(IsString()||!Defined()); return IsString() ? string : def; }
|
||||
|
||||
int ArraySize() const { _ASSERTE(IsArray()); return IsArray()?array_size:1; }
|
||||
|
||||
const AVSValue& operator[](int index) const {
|
||||
_ASSERTE(IsArray() && index>=0 && index<array_size);
|
||||
return (IsArray() && index>=0 && index<array_size) ? array[index] : *this;
|
||||
}
|
||||
|
||||
private:
|
||||
|
||||
short type; // 'a'rray, 'c'lip, 'b'ool, 'i'nt, 'f'loat, 's'tring, 'v'oid, or 'l'ong
|
||||
short array_size;
|
||||
union {
|
||||
IClip* clip;
|
||||
bool boolean;
|
||||
int integer;
|
||||
float floating_pt;
|
||||
const char* string;
|
||||
const AVSValue* array;
|
||||
// __int64 longlong;
|
||||
};
|
||||
|
||||
void Assign(const AVSValue* src, bool init) {
|
||||
if (src->IsClip() && src->clip)
|
||||
src->clip->AddRef();
|
||||
if (!init && IsClip() && clip)
|
||||
clip->Release();
|
||||
// make sure this copies the whole struct!
|
||||
((__int32*)this)[0] = ((__int32*)src)[0];
|
||||
((__int32*)this)[1] = ((__int32*)src)[1];
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
// instantiable null filter
|
||||
class GenericVideoFilter : public IClip {
|
||||
protected:
|
||||
PClip child;
|
||||
VideoInfo vi;
|
||||
public:
|
||||
GenericVideoFilter(PClip _child) : child(_child) { vi = child->GetVideoInfo(); }
|
||||
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env) { return child->GetFrame(n, env); }
|
||||
void __stdcall GetAudio(void* buf, __int64 start, __int64 count, IScriptEnvironment* env) { child->GetAudio(buf, start, count, env); }
|
||||
const VideoInfo& __stdcall GetVideoInfo() { return vi; }
|
||||
bool __stdcall GetParity(int n) { return child->GetParity(n); }
|
||||
void __stdcall SetCacheHints(int cachehints,int frame_range) { } ; // We do not pass cache requests upwards, only to the next filter.
|
||||
};
|
||||
|
||||
|
||||
class AvisynthError /* exception */ {
|
||||
public:
|
||||
const char* const msg;
|
||||
AvisynthError(const char* _msg) : msg(_msg) {}
|
||||
};
|
||||
|
||||
|
||||
|
||||
|
||||
/* Helper classes useful to plugin authors */
|
||||
|
||||
class AlignPlanar : public GenericVideoFilter
|
||||
{
|
||||
public:
|
||||
AlignPlanar(PClip _clip);
|
||||
static PClip Create(PClip clip);
|
||||
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env);
|
||||
};
|
||||
|
||||
|
||||
|
||||
class FillBorder : public GenericVideoFilter
|
||||
{
|
||||
public:
|
||||
FillBorder(PClip _clip);
|
||||
static PClip Create(PClip clip);
|
||||
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* env);
|
||||
};
|
||||
|
||||
|
||||
|
||||
class ConvertAudio : public GenericVideoFilter
|
||||
/**
|
||||
* Helper class to convert audio to any format
|
||||
**/
|
||||
{
|
||||
public:
|
||||
ConvertAudio(PClip _clip, int prefered_format);
|
||||
void __stdcall GetAudio(void* buf, __int64 start, __int64 count, IScriptEnvironment* env);
|
||||
void __stdcall SetCacheHints(int cachehints,int frame_range); // We do pass cache requests upwards, to the cache!
|
||||
|
||||
static PClip Create(PClip clip, int sample_type, int prefered_type);
|
||||
static AVSValue __cdecl Create_float(AVSValue args, void*, IScriptEnvironment*);
|
||||
static AVSValue __cdecl Create_32bit(AVSValue args, void*, IScriptEnvironment*);
|
||||
static AVSValue __cdecl Create_24bit(AVSValue args, void*, IScriptEnvironment*);
|
||||
static AVSValue __cdecl Create_16bit(AVSValue args, void*, IScriptEnvironment*);
|
||||
static AVSValue __cdecl Create_8bit(AVSValue args, void*, IScriptEnvironment*);
|
||||
virtual ~ConvertAudio();
|
||||
|
||||
private:
|
||||
void convertToFloat(char* inbuf, float* outbuf, char sample_type, int count);
|
||||
void convertToFloat_3DN(char* inbuf, float* outbuf, char sample_type, int count);
|
||||
void convertToFloat_SSE(char* inbuf, float* outbuf, char sample_type, int count);
|
||||
void convertToFloat_SSE2(char* inbuf, float* outbuf, char sample_type, int count);
|
||||
void convertFromFloat(float* inbuf, void* outbuf, char sample_type, int count);
|
||||
void convertFromFloat_3DN(float* inbuf, void* outbuf, char sample_type, int count);
|
||||
void convertFromFloat_SSE(float* inbuf, void* outbuf, char sample_type, int count);
|
||||
void convertFromFloat_SSE2(float* inbuf, void* outbuf, char sample_type, int count);
|
||||
|
||||
__inline int Saturate_int8(float n);
|
||||
__inline short Saturate_int16(float n);
|
||||
__inline int Saturate_int24(float n);
|
||||
__inline int Saturate_int32(float n);
|
||||
|
||||
char src_format;
|
||||
char dst_format;
|
||||
int src_bps;
|
||||
char *tempbuffer;
|
||||
SFLOAT *floatbuffer;
|
||||
int tempbuffer_size;
|
||||
};
|
||||
|
||||
|
||||
// For GetCPUFlags. These are backwards-compatible with those in VirtualDub.
|
||||
enum {
|
||||
/* slowest CPU to support extension */
|
||||
CPUF_FORCE = 0x01, // N/A
|
||||
CPUF_FPU = 0x02, // 386/486DX
|
||||
CPUF_MMX = 0x04, // P55C, K6, PII
|
||||
CPUF_INTEGER_SSE = 0x08, // PIII, Athlon
|
||||
CPUF_SSE = 0x10, // PIII, Athlon XP/MP
|
||||
CPUF_SSE2 = 0x20, // PIV, Hammer
|
||||
CPUF_3DNOW = 0x40, // K6-2
|
||||
CPUF_3DNOW_EXT = 0x80, // Athlon
|
||||
CPUF_X86_64 = 0xA0, // Hammer (note: equiv. to 3DNow + SSE2, which only Hammer
|
||||
// will have anyway)
|
||||
CPUF_SSE3 = 0x100, // Some P4 & Athlon 64.
|
||||
};
|
||||
#define MAX_INT 0x7fffffff
|
||||
#define MIN_INT -0x7fffffff
|
||||
|
||||
|
||||
|
||||
class IScriptEnvironment {
|
||||
public:
|
||||
virtual __stdcall ~IScriptEnvironment() {}
|
||||
|
||||
virtual /*static*/ long __stdcall GetCPUFlags() = 0;
|
||||
|
||||
virtual char* __stdcall SaveString(const char* s, int length = -1) = 0;
|
||||
virtual char* __stdcall Sprintf(const char* fmt, ...) = 0;
|
||||
// note: val is really a va_list; I hope everyone typedefs va_list to a pointer
|
||||
virtual char* __stdcall VSprintf(const char* fmt, void* val) = 0;
|
||||
|
||||
__declspec(noreturn) virtual void __stdcall ThrowError(const char* fmt, ...) = 0;
|
||||
|
||||
class NotFound /*exception*/ {}; // thrown by Invoke and GetVar
|
||||
|
||||
typedef AVSValue (__cdecl *ApplyFunc)(AVSValue args, void* user_data, IScriptEnvironment* env);
|
||||
|
||||
virtual void __stdcall AddFunction(const char* name, const char* params, ApplyFunc apply, void* user_data) = 0;
|
||||
virtual bool __stdcall FunctionExists(const char* name) = 0;
|
||||
virtual AVSValue __stdcall Invoke(const char* name, const AVSValue args, const char** arg_names=0) = 0;
|
||||
|
||||
virtual AVSValue __stdcall GetVar(const char* name) = 0;
|
||||
virtual bool __stdcall SetVar(const char* name, const AVSValue& val) = 0;
|
||||
virtual bool __stdcall SetGlobalVar(const char* name, const AVSValue& val) = 0;
|
||||
|
||||
virtual void __stdcall PushContext(int level=0) = 0;
|
||||
virtual void __stdcall PopContext() = 0;
|
||||
|
||||
// align should be 4 or 8
|
||||
virtual PVideoFrame __stdcall NewVideoFrame(const VideoInfo& vi, int align=FRAME_ALIGN) = 0;
|
||||
|
||||
virtual bool __stdcall MakeWritable(PVideoFrame* pvf) = 0;
|
||||
|
||||
virtual /*static*/ void __stdcall BitBlt(BYTE* dstp, int dst_pitch, const BYTE* srcp, int src_pitch, int row_size, int height) = 0;
|
||||
|
||||
typedef void (__cdecl *ShutdownFunc)(void* user_data, IScriptEnvironment* env);
|
||||
virtual void __stdcall AtExit(ShutdownFunc function, void* user_data) = 0;
|
||||
|
||||
virtual void __stdcall CheckVersion(int version = AVISYNTH_INTERFACE_VERSION) = 0;
|
||||
|
||||
virtual PVideoFrame __stdcall Subframe(PVideoFrame src, int rel_offset, int new_pitch, int new_row_size, int new_height) = 0;
|
||||
|
||||
virtual int __stdcall SetMemoryMax(int mem) = 0;
|
||||
|
||||
virtual int __stdcall SetWorkingDir(const char * newdir) = 0;
|
||||
|
||||
virtual void* __stdcall ManageCache(int key, void* data) = 0;
|
||||
|
||||
enum PlanarChromaAlignmentMode {
|
||||
PlanarChromaAlignmentOff,
|
||||
PlanarChromaAlignmentOn,
|
||||
PlanarChromaAlignmentTest };
|
||||
|
||||
virtual bool __stdcall PlanarChromaAlignment(PlanarChromaAlignmentMode key) = 0;
|
||||
|
||||
virtual PVideoFrame __stdcall SubframePlanar(PVideoFrame src, int rel_offset, int new_pitch, int new_row_size, int new_height, int rel_offsetU, int rel_offsetV, int new_pitchUV) = 0;
|
||||
};
|
||||
|
||||
|
||||
// avisynth.dll exports this; it's a way to use it as a library, without
|
||||
// writing an AVS script or without going through AVIFile.
|
||||
IScriptEnvironment* __stdcall CreateScriptEnvironment(int version = AVISYNTH_INTERFACE_VERSION);
|
||||
|
||||
|
||||
#pragma pack(pop)
|
||||
|
||||
#endif //__AVISYNTH_H__
|
686
FFmpegSource/ffmpegsource.cpp
Normal file
686
FFmpegSource/ffmpegsource.cpp
Normal file
|
@ -0,0 +1,686 @@
|
|||
#include <windows.h>
|
||||
#include <stdio.h>
|
||||
#include <map>
|
||||
#include <vector>
|
||||
#include <assert.h>
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <errno.h>
|
||||
#include <string.h>
|
||||
#include <fcntl.h>
|
||||
#include <io.h>
|
||||
|
||||
extern "C" {
|
||||
#include <ffmpeg\avformat.h>
|
||||
#include <ffmpeg\avcodec.h>
|
||||
#include <ffmpeg\swscale.h>
|
||||
}
|
||||
|
||||
#include "MatroskaParser.h"
|
||||
#include "avisynth.h"
|
||||
#include "stdiostream.cpp"
|
||||
|
||||
class FFBase : public IClip {
|
||||
private:
|
||||
SwsContext *SWS;
|
||||
int ConvertToFormat;
|
||||
int ConvertFromFormat;
|
||||
protected:
|
||||
VideoInfo VI;
|
||||
|
||||
void SetOutputFormat(int CurrentFormat, IScriptEnvironment *Env) {
|
||||
int Loss;
|
||||
int BestFormat = avcodec_find_best_pix_fmt((1 << PIX_FMT_YUV420P) | (1 << PIX_FMT_YUYV422) | (1 << PIX_FMT_RGB32) | (1 << PIX_FMT_BGR24), CurrentFormat, 1, &Loss);
|
||||
|
||||
switch (BestFormat) {
|
||||
case PIX_FMT_YUV420P: VI.pixel_type = VideoInfo::CS_I420; break;
|
||||
case PIX_FMT_YUYV422: VI.pixel_type = VideoInfo::CS_YUY2; break;
|
||||
case PIX_FMT_RGB32: VI.pixel_type = VideoInfo::CS_BGR32; break;
|
||||
case PIX_FMT_BGR24: VI.pixel_type = VideoInfo::CS_BGR24; break;
|
||||
default:
|
||||
Env->ThrowError("No suitable output format found");
|
||||
}
|
||||
|
||||
if (BestFormat != CurrentFormat) {
|
||||
ConvertFromFormat = CurrentFormat;
|
||||
ConvertToFormat = BestFormat;
|
||||
SWS = sws_getContext(VI.width, VI.height, ConvertFromFormat, VI.width, VI.height, ConvertToFormat, SWS_LANCZOS, NULL, NULL, NULL);
|
||||
}
|
||||
}
|
||||
|
||||
PVideoFrame OutputFrame(AVFrame *Frame, IScriptEnvironment *Env) {
|
||||
PVideoFrame Dst = Env->NewVideoFrame(VI);
|
||||
|
||||
if (ConvertToFormat != PIX_FMT_NONE && VI.pixel_type == VideoInfo::CS_I420) {
|
||||
uint8_t *DstData[3] = {Dst->GetWritePtr(PLANAR_Y), Dst->GetWritePtr(PLANAR_U), Dst->GetWritePtr(PLANAR_V)};
|
||||
int DstStride[3] = {Dst->GetPitch(PLANAR_Y), Dst->GetPitch(PLANAR_U), Dst->GetPitch(PLANAR_V)};
|
||||
sws_scale(SWS, Frame->data, Frame->linesize, 0, VI.height, DstData, DstStride);
|
||||
} else if (ConvertToFormat != PIX_FMT_NONE) {
|
||||
if (VI.IsRGB()) {
|
||||
uint8_t *DstData[1] = {Dst->GetWritePtr() + Dst->GetPitch() * (Dst->GetHeight() - 1)};
|
||||
int DstStride[1] = {-Dst->GetPitch()};
|
||||
sws_scale(SWS, Frame->data, Frame->linesize, 0, VI.height, DstData, DstStride);
|
||||
} else {
|
||||
uint8_t *DstData[1] = {Dst->GetWritePtr()};
|
||||
int DstStride[1] = {Dst->GetPitch()};
|
||||
sws_scale(SWS, Frame->data, Frame->linesize, 0, VI.height, DstData, DstStride);
|
||||
}
|
||||
} else if (VI.pixel_type == VideoInfo::CS_I420) {
|
||||
Env->BitBlt(Dst->GetWritePtr(PLANAR_Y), Dst->GetPitch(PLANAR_Y), Frame->data[0], Frame->linesize[0], Dst->GetRowSize(PLANAR_Y), Dst->GetHeight(PLANAR_Y));
|
||||
Env->BitBlt(Dst->GetWritePtr(PLANAR_U), Dst->GetPitch(PLANAR_U), Frame->data[1], Frame->linesize[1], Dst->GetRowSize(PLANAR_U), Dst->GetHeight(PLANAR_U));
|
||||
Env->BitBlt(Dst->GetWritePtr(PLANAR_V), Dst->GetPitch(PLANAR_V), Frame->data[2], Frame->linesize[2], Dst->GetRowSize(PLANAR_V), Dst->GetHeight(PLANAR_V));
|
||||
} else {
|
||||
if (VI.IsRGB())
|
||||
Env->BitBlt(Dst->GetWritePtr() + Dst->GetPitch() * (Dst->GetHeight() - 1), -Dst->GetPitch(), Frame->data[0], Frame->linesize[0], Dst->GetRowSize(), Dst->GetHeight());
|
||||
else
|
||||
Env->BitBlt(Dst->GetWritePtr(), Dst->GetPitch(), Frame->data[0], Frame->linesize[0], Dst->GetRowSize(), Dst->GetHeight());
|
||||
}
|
||||
return Dst;
|
||||
}
|
||||
|
||||
public:
|
||||
virtual bool __stdcall GetParity(int n) { return 0; }
|
||||
virtual void __stdcall GetAudio(void* buf, __int64 start, __int64 count, IScriptEnvironment* env) { }
|
||||
virtual void __stdcall SetCacheHints(int cachehints, int frame_range) { }
|
||||
virtual const VideoInfo& __stdcall GetVideoInfo() { return VI; }
|
||||
|
||||
FFBase() {
|
||||
SWS = NULL;
|
||||
ConvertToFormat = PIX_FMT_NONE;
|
||||
ConvertFromFormat = PIX_FMT_NONE;
|
||||
memset(&VI, 0, sizeof(VI));
|
||||
}
|
||||
};
|
||||
|
||||
class FFMKVSource : public FFBase {
|
||||
private:
|
||||
AVCodecContext CodecContext;
|
||||
AVCodec *Codec;
|
||||
AVFrame *Frame;
|
||||
int CurrentFrame;
|
||||
|
||||
struct FFMKVFrameInfo {
|
||||
ulonglong DTS;
|
||||
bool KeyFrame;
|
||||
};
|
||||
|
||||
std::vector<FFMKVFrameInfo> FrameToDTS;
|
||||
std::map<ulonglong, int> DTSToFrame;
|
||||
|
||||
StdIoStream ST;
|
||||
MatroskaFile *MF;
|
||||
|
||||
char ErrorMessage[256];
|
||||
unsigned int BufferSize;
|
||||
void *Buffer;
|
||||
CompressedStream *CS;
|
||||
|
||||
int ReadNextFrame(AVFrame *Frame, ulonglong *StartTime, IScriptEnvironment* Env);
|
||||
|
||||
CodecID MatroskaToFFCodecID(TrackInfo *TI) {
|
||||
char *Codec = TI->CodecID;
|
||||
if (!strcmp(Codec, "V_MS/VFW/FOURCC")) {
|
||||
switch (((BITMAPINFOHEADER *)TI->CodecPrivate)->biCompression) {
|
||||
case MAKEFOURCC('F', 'F', 'D', 'S'):
|
||||
case MAKEFOURCC('F', 'V', 'F', 'W'):
|
||||
case MAKEFOURCC('X', 'V', 'I', 'D'):
|
||||
case MAKEFOURCC('D', 'I', 'V', 'X'):
|
||||
case MAKEFOURCC('D', 'X', '5', '0'):
|
||||
case MAKEFOURCC('M', 'P', '4', 'V'):
|
||||
case MAKEFOURCC('3', 'I', 'V', 'X'):
|
||||
case MAKEFOURCC('W', 'V', '1', 'F'):
|
||||
case MAKEFOURCC('F', 'M', 'P', '4'):
|
||||
case MAKEFOURCC('S', 'M', 'P', '4'):
|
||||
return CODEC_ID_MPEG4;
|
||||
case MAKEFOURCC('D', 'I', 'V', '3'):
|
||||
case MAKEFOURCC('D', 'V', 'X', '3'):
|
||||
case MAKEFOURCC('M', 'P', '4', '3'):
|
||||
return CODEC_ID_MSMPEG4V3;
|
||||
case MAKEFOURCC('M', 'P', '4', '2'):
|
||||
return CODEC_ID_MSMPEG4V2;
|
||||
case MAKEFOURCC('M', 'P', '4', '1'):
|
||||
return CODEC_ID_MSMPEG4V1;
|
||||
case MAKEFOURCC('W', 'M', 'V', '1'):
|
||||
return CODEC_ID_WMV1;
|
||||
case MAKEFOURCC('W', 'M', 'V', '2'):
|
||||
return CODEC_ID_WMV2;
|
||||
case MAKEFOURCC('W', 'M', 'V', '3'):
|
||||
return CODEC_ID_WMV3;
|
||||
/*
|
||||
case MAKEFOURCC('M', 'S', 'S', '1'):
|
||||
case MAKEFOURCC('M', 'S', 'S', '2'):
|
||||
case MAKEFOURCC('W', 'V', 'P', '2'):
|
||||
case MAKEFOURCC('W', 'M', 'V', 'P'):
|
||||
return CODEC_ID_WMV9_LIB;
|
||||
*/
|
||||
case MAKEFOURCC('W', 'V', 'C', '1'):
|
||||
return CODEC_ID_VC1;
|
||||
case MAKEFOURCC('V', 'P', '5', '0'):
|
||||
return CODEC_ID_VP5;
|
||||
case MAKEFOURCC('V', 'P', '6', '0'):
|
||||
case MAKEFOURCC('V', 'P', '6', '1'):
|
||||
case MAKEFOURCC('V', 'P', '6', '2'):
|
||||
return CODEC_ID_VP6;
|
||||
case MAKEFOURCC('V', 'P', '6', 'F'):
|
||||
case MAKEFOURCC('F', 'L', 'V', '4'):
|
||||
return CODEC_ID_VP6F;
|
||||
case MAKEFOURCC('C', 'A', 'V', 'S'):
|
||||
return CODEC_ID_CAVS;
|
||||
case MAKEFOURCC('M', 'P', 'G', '1'):
|
||||
case MAKEFOURCC('M', 'P', 'E', 'G'):
|
||||
return CODEC_ID_MPEG2VIDEO; // not a typo
|
||||
case MAKEFOURCC('M', 'P', 'G', '2'):
|
||||
case MAKEFOURCC('E', 'M', '2', 'V'):
|
||||
case MAKEFOURCC('M', 'M', 'E', 'S'):
|
||||
return CODEC_ID_MPEG2VIDEO;
|
||||
case MAKEFOURCC('H', '2', '6', '3'):
|
||||
case MAKEFOURCC('S', '2', '6', '3'):
|
||||
case MAKEFOURCC('L', '2', '6', '3'):
|
||||
case MAKEFOURCC('M', '2', '6', '3'):
|
||||
case MAKEFOURCC('U', '2', '6', '3'):
|
||||
case MAKEFOURCC('X', '2', '6', '3'):
|
||||
return CODEC_ID_H263;
|
||||
case MAKEFOURCC('H', '2', '6', '4'):
|
||||
case MAKEFOURCC('X', '2', '6', '4'):
|
||||
case MAKEFOURCC('V', 'S', 'S', 'H'):
|
||||
case MAKEFOURCC('D', 'A', 'V', 'C'):
|
||||
case MAKEFOURCC('P', 'A', 'V', 'C'):
|
||||
case MAKEFOURCC('A', 'V', 'C', '1'):
|
||||
return CODEC_ID_H264;
|
||||
case MAKEFOURCC('M', 'J', 'P', 'G'):
|
||||
case MAKEFOURCC('L', 'J', 'P', 'G'):
|
||||
case MAKEFOURCC('M', 'J', 'L', 'S'):
|
||||
case MAKEFOURCC('J', 'P', 'E', 'G'): // questionable fourcc?
|
||||
case MAKEFOURCC('A', 'V', 'R', 'N'):
|
||||
case MAKEFOURCC('M', 'J', 'P', 'A'):
|
||||
return CODEC_ID_MJPEG;
|
||||
case MAKEFOURCC('D', 'V', 'S', 'D'):
|
||||
case MAKEFOURCC('D', 'V', '2', '5'):
|
||||
case MAKEFOURCC('D', 'V', '5', '0'):
|
||||
case MAKEFOURCC('C', 'D', 'V', 'C'):
|
||||
case MAKEFOURCC('C', 'D', 'V', '5'):
|
||||
case MAKEFOURCC('D', 'V', 'I', 'S'):
|
||||
case MAKEFOURCC('P', 'D', 'V', 'C'):
|
||||
return CODEC_ID_DVVIDEO;
|
||||
case MAKEFOURCC('H', 'F', 'Y', 'U'):
|
||||
case MAKEFOURCC('F', 'F', 'V', 'H'):
|
||||
return CODEC_ID_HUFFYUV;
|
||||
case MAKEFOURCC('C', 'Y', 'U', 'V'):
|
||||
return CODEC_ID_CYUV;
|
||||
case MAKEFOURCC('A', 'S', 'V', '1'):
|
||||
return CODEC_ID_ASV1;
|
||||
case MAKEFOURCC('A', 'S', 'V', '2'):
|
||||
return CODEC_ID_ASV2;
|
||||
case MAKEFOURCC('V', 'C', 'R', '1'):
|
||||
return CODEC_ID_VCR1;
|
||||
case MAKEFOURCC('T', 'H', 'E', 'O'):
|
||||
return CODEC_ID_THEORA;
|
||||
case MAKEFOURCC('S', 'V', 'Q', '1'):
|
||||
return CODEC_ID_SVQ1;
|
||||
case MAKEFOURCC('S', 'V', 'Q', '3'):
|
||||
return CODEC_ID_SVQ3;
|
||||
case MAKEFOURCC('R', 'P', 'Z', 'A'):
|
||||
return CODEC_ID_RPZA;
|
||||
case MAKEFOURCC('F', 'F', 'V', '1'):
|
||||
return CODEC_ID_FFV1;
|
||||
case MAKEFOURCC('V', 'P', '3', '1'):
|
||||
return CODEC_ID_VP3;
|
||||
case MAKEFOURCC('R', 'L', 'E', '8'):
|
||||
return CODEC_ID_MSRLE;
|
||||
case MAKEFOURCC('M', 'S', 'Z', 'H'):
|
||||
return CODEC_ID_MSZH;
|
||||
case MAKEFOURCC('Z', 'L', 'I', 'B'):
|
||||
return CODEC_ID_FLV1;
|
||||
case MAKEFOURCC('F', 'L', 'V', '1'):
|
||||
return CODEC_ID_ZLIB;
|
||||
/*
|
||||
case MAKEFOURCC('P', 'N', 'G', '1'):
|
||||
return CODEC_ID_COREPNG;
|
||||
*/
|
||||
case MAKEFOURCC('M', 'P', 'N', 'G'):
|
||||
return CODEC_ID_PNG;
|
||||
/*
|
||||
case MAKEFOURCC('A', 'V', 'I', 'S'):
|
||||
return CODEC_ID_AVISYNTH;
|
||||
*/
|
||||
case MAKEFOURCC('C', 'R', 'A', 'M'):
|
||||
return CODEC_ID_MSVIDEO1;
|
||||
case MAKEFOURCC('R', 'T', '2', '1'):
|
||||
return CODEC_ID_INDEO2;
|
||||
case MAKEFOURCC('I', 'V', '3', '2'):
|
||||
case MAKEFOURCC('I', 'V', '3', '1'):
|
||||
return CODEC_ID_INDEO3;
|
||||
case MAKEFOURCC('C', 'V', 'I', 'D'):
|
||||
return CODEC_ID_CINEPAK;
|
||||
case MAKEFOURCC('R', 'V', '1', '0'):
|
||||
return CODEC_ID_RV10;
|
||||
case MAKEFOURCC('R', 'V', '2', '0'):
|
||||
return CODEC_ID_RV20;
|
||||
case MAKEFOURCC('8', 'B', 'P', 'S'):
|
||||
return CODEC_ID_8BPS;
|
||||
case MAKEFOURCC('Q', 'R', 'L', 'E'):
|
||||
return CODEC_ID_QTRLE;
|
||||
case MAKEFOURCC('D', 'U', 'C', 'K'):
|
||||
return CODEC_ID_TRUEMOTION1;
|
||||
case MAKEFOURCC('T', 'M', '2', '0'):
|
||||
return CODEC_ID_TRUEMOTION2;
|
||||
case MAKEFOURCC('T', 'S', 'C', 'C'):
|
||||
return CODEC_ID_TSCC;
|
||||
case MAKEFOURCC('S', 'N', 'O', 'W'):
|
||||
return CODEC_ID_SNOW;
|
||||
case MAKEFOURCC('Q', 'P', 'E', 'G'):
|
||||
case MAKEFOURCC('Q', '1', '_', '0'):
|
||||
case MAKEFOURCC('Q', '1', '_', '1'):
|
||||
return CODEC_ID_QPEG;
|
||||
case MAKEFOURCC('H', '2', '6', '1'):
|
||||
case MAKEFOURCC('M', '2', '6', '1'):
|
||||
return CODEC_ID_H261;
|
||||
case MAKEFOURCC('L', 'O', 'C', 'O'):
|
||||
return CODEC_ID_LOCO;
|
||||
case MAKEFOURCC('W', 'N', 'V', '1'):
|
||||
return CODEC_ID_WNV1;
|
||||
case MAKEFOURCC('C', 'S', 'C', 'D'):
|
||||
return CODEC_ID_CSCD;
|
||||
case MAKEFOURCC('Z', 'M', 'B', 'V'):
|
||||
return CODEC_ID_ZMBV;
|
||||
case MAKEFOURCC('U', 'L', 'T', 'I'):
|
||||
return CODEC_ID_ULTI;
|
||||
case MAKEFOURCC('V', 'I', 'X', 'L'):
|
||||
return CODEC_ID_VIXL;
|
||||
case MAKEFOURCC('A', 'A', 'S', 'C'):
|
||||
return CODEC_ID_AASC;
|
||||
case MAKEFOURCC('F', 'P', 'S', '1'):
|
||||
return CODEC_ID_FRAPS;
|
||||
default:
|
||||
return CODEC_ID_NONE;
|
||||
}
|
||||
} else if (!strcmp(Codec, "V_MPEG4/ISO/AVC"))
|
||||
return CODEC_ID_H264;
|
||||
else if (!strcmp(Codec, "V_MPEG4/ISO/ASP"))
|
||||
return CODEC_ID_MPEG4;
|
||||
else if (!strcmp(Codec, "V_MPEG2"))
|
||||
return CODEC_ID_MPEG2VIDEO;
|
||||
else if (!strcmp(Codec, "V_MPEG1"))
|
||||
return CODEC_ID_MPEG2VIDEO; // still not a typo
|
||||
else if (!strcmp(Codec, "V_SNOW"))
|
||||
return CODEC_ID_SNOW;
|
||||
else if (!strcmp(Codec, "V_THEORA"))
|
||||
return CODEC_ID_THEORA;
|
||||
else if (!strncmp(Codec, "V_REAL/RV", 9)) {
|
||||
switch (Codec[9]) {
|
||||
case '1':
|
||||
return CODEC_ID_RV10;
|
||||
case '2':
|
||||
return CODEC_ID_RV20;
|
||||
case '3':
|
||||
return CODEC_ID_RV30;
|
||||
case '4':
|
||||
return CODEC_ID_RV40;
|
||||
default:
|
||||
return CODEC_ID_NONE;
|
||||
}
|
||||
} else
|
||||
return CODEC_ID_NONE;
|
||||
}
|
||||
public:
|
||||
FFMKVSource(const char *Source, int Track, IScriptEnvironment* Env) {
|
||||
BufferSize = 0;
|
||||
Buffer = NULL;
|
||||
Frame = NULL;
|
||||
CS = NULL;
|
||||
CurrentFrame = 0;
|
||||
|
||||
memset(&ST,0,sizeof(ST));
|
||||
ST.base.read = (int (__cdecl *)(InputStream *,ulonglong,void *,int))StdIoRead;
|
||||
ST.base.scan = (longlong (__cdecl *)(InputStream *,ulonglong,unsigned int))StdIoScan;
|
||||
ST.base.getcachesize = (unsigned int (__cdecl *)(InputStream *))StdIoGetCacheSize;
|
||||
ST.base.geterror = (const char *(__cdecl *)(InputStream *))StdIoGetLastError;
|
||||
ST.base.memalloc = (void *(__cdecl *)(InputStream *,size_t))StdIoMalloc;
|
||||
ST.base.memrealloc = (void *(__cdecl *)(InputStream *,void *,size_t))StdIoRealloc;
|
||||
ST.base.memfree = (void (__cdecl *)(InputStream *,void *)) StdIoFree;
|
||||
ST.base.progress = (int (__cdecl *)(InputStream *,ulonglong,ulonglong))StdIoProgress;
|
||||
|
||||
ST.fp = fopen(Source ,"rb");
|
||||
if (ST.fp == NULL)
|
||||
Env->ThrowError("Can't open '%s': %s\n", Source, strerror(errno));
|
||||
|
||||
setvbuf(ST.fp, NULL, _IOFBF, CACHESIZE);
|
||||
|
||||
MF = mkv_OpenEx(&ST.base, 0, 0, ErrorMessage, sizeof(ErrorMessage));
|
||||
if (MF == NULL) {
|
||||
fclose(ST.fp);
|
||||
Env->ThrowError("Can't parse Matroska file: %s\n", ErrorMessage);
|
||||
}
|
||||
|
||||
if (Track < 0)
|
||||
for (unsigned int i = 0; i < mkv_GetNumTracks(MF); i++)
|
||||
if (mkv_GetTrackInfo(MF, i)->Type == TT_VIDEO) {
|
||||
Track = i;
|
||||
break;
|
||||
}
|
||||
|
||||
if (Track < 0)
|
||||
Env->ThrowError("No video track found");
|
||||
|
||||
if ((unsigned)Track >= mkv_GetNumTracks(MF))
|
||||
Env->ThrowError("Invalid track number: %d\n", Track);
|
||||
|
||||
TrackInfo *TI = mkv_GetTrackInfo(MF, Track);
|
||||
|
||||
if (TI->Type != TT_VIDEO)
|
||||
Env->ThrowError("Selected track is not video");
|
||||
|
||||
mkv_SetTrackMask(MF, ~(1 << Track));
|
||||
|
||||
if (TI->CompEnabled) {
|
||||
CS = cs_Create(MF, Track, ErrorMessage, sizeof(ErrorMessage));
|
||||
if (CS == NULL)
|
||||
Env->ThrowError("Can't create decompressor: %s\n", ErrorMessage);
|
||||
}
|
||||
|
||||
avcodec_get_context_defaults(&CodecContext);
|
||||
CodecContext.extradata = (uint8_t *)TI->CodecPrivate;
|
||||
CodecContext.extradata_size = TI->CodecPrivateSize;
|
||||
|
||||
Codec = avcodec_find_decoder(MatroskaToFFCodecID(TI));
|
||||
if(Codec == NULL)
|
||||
Env->ThrowError("Codec not found");
|
||||
|
||||
if(avcodec_open(&CodecContext, Codec) < 0)
|
||||
Env->ThrowError("Could not open codec");
|
||||
|
||||
VI.image_type = VideoInfo::IT_TFF;
|
||||
VI.width = TI->AV.Video.PixelWidth;
|
||||
VI.height = TI->AV.Video.PixelHeight;
|
||||
VI.fps_denominator = 1;
|
||||
VI.fps_numerator = 30;
|
||||
|
||||
SetOutputFormat(CodecContext.pix_fmt, Env);
|
||||
|
||||
unsigned TrackNumber, FrameSize, FrameFlags;
|
||||
ulonglong StartTime, EndTime, FilePos;
|
||||
|
||||
while (mkv_ReadFrame(MF, 0, &TrackNumber, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) {
|
||||
FFMKVFrameInfo FI;
|
||||
FI.DTS = StartTime;
|
||||
FI.KeyFrame = (FrameFlags & FRAME_KF) != 0;
|
||||
|
||||
FrameToDTS.push_back(FI);
|
||||
DTSToFrame[StartTime] = VI.num_frames;
|
||||
VI.num_frames++;
|
||||
}
|
||||
|
||||
Frame = avcodec_alloc_frame();
|
||||
|
||||
mkv_Seek(MF, FrameToDTS[0].DTS, MKVF_SEEK_TO_PREV_KEYFRAME);
|
||||
}
|
||||
|
||||
~FFMKVSource() {
|
||||
free(Buffer);
|
||||
mkv_Close(MF);
|
||||
fclose(ST.fp);
|
||||
av_free(Frame);
|
||||
avcodec_close(&CodecContext);
|
||||
}
|
||||
|
||||
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* Env);
|
||||
};
|
||||
|
||||
int FFMKVSource::ReadNextFrame(AVFrame *Frame, ulonglong *StartTime, IScriptEnvironment* Env) {
|
||||
unsigned TrackNumber, FrameFlags, FrameSize;
|
||||
ulonglong EndTime, FilePos, StartTime2;
|
||||
*StartTime = -1;
|
||||
int FrameFinished = 0;
|
||||
int Ret = -1;
|
||||
|
||||
while (mkv_ReadFrame(MF, 0, &TrackNumber, &StartTime2, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) {
|
||||
if ((longlong)*StartTime < 0)
|
||||
*StartTime = StartTime2;
|
||||
if (CS) {
|
||||
char CSBuffer[1024];
|
||||
|
||||
cs_NextFrame(CS, FilePos, FrameSize);
|
||||
for (;;) {
|
||||
int ReadBytes = cs_ReadData(CS, CSBuffer, sizeof(CSBuffer));
|
||||
if (ReadBytes < 0)
|
||||
Env->ThrowError("Error decompressing data: %s\n", cs_GetLastError(CS));
|
||||
if (ReadBytes == 0)
|
||||
break;
|
||||
Ret = avcodec_decode_video(&CodecContext, Frame, &FrameFinished, (uint8_t *)CSBuffer, ReadBytes);
|
||||
if (FrameFinished)
|
||||
goto Done;
|
||||
}
|
||||
} else {
|
||||
size_t ReadBytes;
|
||||
|
||||
if (fseek(ST.fp, FilePos, SEEK_SET))
|
||||
Env->ThrowError("fseek(): %s\n", strerror(errno));
|
||||
|
||||
if (BufferSize < FrameSize) {
|
||||
BufferSize = FrameSize;
|
||||
Buffer = realloc(Buffer, BufferSize);
|
||||
if (Buffer == NULL)
|
||||
Env->ThrowError("Out of memory\n");
|
||||
}
|
||||
|
||||
ReadBytes = fread(Buffer, 1, FrameSize, ST.fp);
|
||||
if (ReadBytes != FrameSize) {
|
||||
if (ReadBytes == 0) {
|
||||
if (feof(ST.fp))
|
||||
fprintf(stderr, "Unexpected EOF while reading frame\n");
|
||||
else
|
||||
fprintf(stderr, "Error reading frame: %s\n", strerror(errno));
|
||||
} else
|
||||
fprintf(stderr,"Short read while reading frame\n");
|
||||
goto Done;
|
||||
}
|
||||
|
||||
Ret = avcodec_decode_video(&CodecContext, Frame, &FrameFinished, (uint8_t *)Buffer, FrameSize);
|
||||
|
||||
if (FrameFinished)
|
||||
goto Done;
|
||||
}
|
||||
}
|
||||
|
||||
Done:
|
||||
return Ret;
|
||||
}
|
||||
|
||||
PVideoFrame __stdcall FFMKVSource::GetFrame(int n, IScriptEnvironment* Env) {
|
||||
bool HasSeeked = false;
|
||||
bool HasBiggerKF = false;
|
||||
|
||||
for (int i = CurrentFrame + 1; i <= n; i++)
|
||||
if (FrameToDTS[i].KeyFrame) {
|
||||
HasBiggerKF = true;
|
||||
break;
|
||||
}
|
||||
|
||||
if (n < CurrentFrame || HasBiggerKF) {
|
||||
mkv_Seek(MF, FrameToDTS[n].DTS, MKVF_SEEK_TO_PREV_KEYFRAME);
|
||||
avcodec_flush_buffers(&CodecContext);
|
||||
HasSeeked = true;
|
||||
}
|
||||
|
||||
do {
|
||||
ulonglong StartTime;
|
||||
int Ret = ReadNextFrame(Frame, &StartTime, Env);
|
||||
|
||||
if (HasSeeked) {
|
||||
CurrentFrame = DTSToFrame[StartTime];
|
||||
HasSeeked = false;
|
||||
}
|
||||
|
||||
CurrentFrame++;
|
||||
} while (CurrentFrame <= n);
|
||||
|
||||
return OutputFrame(Frame, Env);
|
||||
}
|
||||
|
||||
class FFMpegSource : public FFBase {
|
||||
private:
|
||||
AVFormatContext *FormatContext;
|
||||
AVCodecContext *CodecContext;
|
||||
AVCodec *Codec;
|
||||
AVFrame *Frame;
|
||||
int Track;
|
||||
int CurrentFrame;
|
||||
std::vector<int64_t> FrameToDTS;
|
||||
std::map<int64_t, int> DTSToFrame;
|
||||
bool ForceSeek;
|
||||
|
||||
int ReadNextFrame(AVFrame *Frame, int64_t *DTS);
|
||||
public:
|
||||
FFMpegSource(const char *Source, int _Track, bool _ForceSeek, IScriptEnvironment* Env) : Track(_Track), ForceSeek(_ForceSeek) {
|
||||
CurrentFrame = 0;
|
||||
|
||||
if(av_open_input_file(&FormatContext, Source, NULL, 0, NULL) != 0)
|
||||
Env->ThrowError("Couldn't open \"%s\"", Source);
|
||||
|
||||
if(av_find_stream_info(FormatContext) < 0)
|
||||
Env->ThrowError("Couldn't find stream information");
|
||||
|
||||
if (Track >= (int)FormatContext->nb_streams)
|
||||
Env->ThrowError("Invalid track number");
|
||||
|
||||
if (Track < 0)
|
||||
for(unsigned int i = 0; i < FormatContext->nb_streams; i++)
|
||||
if(FormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
|
||||
Track = i;
|
||||
break;
|
||||
}
|
||||
|
||||
if(Track < -1)
|
||||
Env->ThrowError("Couldn't find a video stream");
|
||||
|
||||
if (FormatContext->streams[Track]->codec->codec_type != CODEC_TYPE_VIDEO)
|
||||
Env->ThrowError("Selected stream doesn't contain video");
|
||||
|
||||
CodecContext = FormatContext->streams[Track]->codec;
|
||||
|
||||
Codec = avcodec_find_decoder(CodecContext->codec_id);
|
||||
if(Codec == NULL)
|
||||
Env->ThrowError("Codec not found");
|
||||
|
||||
if(avcodec_open(CodecContext, Codec) < 0)
|
||||
Env->ThrowError("Could not open codec");
|
||||
|
||||
VI.image_type = VideoInfo::IT_TFF;
|
||||
VI.width = CodecContext->width;
|
||||
VI.height = CodecContext->height;
|
||||
VI.fps_denominator = CodecContext->time_base.num * 1000;
|
||||
VI.fps_numerator = CodecContext->time_base.den;
|
||||
|
||||
// sanity check framerate
|
||||
if (VI.fps_numerator < VI.fps_denominator || CodecContext->time_base.num <= 0 || CodecContext->time_base.den <= 0) {
|
||||
VI.fps_denominator = 1;
|
||||
VI.fps_numerator = 30;
|
||||
}
|
||||
|
||||
SetOutputFormat(CodecContext->pix_fmt, Env);
|
||||
|
||||
AVPacket Packet;
|
||||
|
||||
while (av_read_frame(FormatContext, &Packet) >= 0) {
|
||||
if (Packet.stream_index == Track) {
|
||||
FrameToDTS.push_back(Packet.dts);
|
||||
DTSToFrame[Packet.dts] = VI.num_frames;
|
||||
VI.num_frames++;
|
||||
}
|
||||
av_free_packet(&Packet);
|
||||
}
|
||||
|
||||
Frame = avcodec_alloc_frame();
|
||||
|
||||
av_seek_frame(FormatContext, Track, 0, AVSEEK_FLAG_BACKWARD);
|
||||
}
|
||||
|
||||
~FFMpegSource() {
|
||||
av_free(Frame);
|
||||
avcodec_close(CodecContext);
|
||||
av_close_input_file(FormatContext);
|
||||
}
|
||||
|
||||
PVideoFrame __stdcall GetFrame(int n, IScriptEnvironment* Env);
|
||||
};
|
||||
|
||||
int FFMpegSource::ReadNextFrame(AVFrame *Frame, int64_t *DTS) {
|
||||
AVPacket Packet;
|
||||
int FrameFinished = 0;
|
||||
int Ret = -1;
|
||||
*DTS = -1;
|
||||
|
||||
while (av_read_frame(FormatContext, &Packet) >= 0) {
|
||||
if (Packet.stream_index == Track) {
|
||||
|
||||
Ret = avcodec_decode_video(CodecContext, Frame, &FrameFinished, Packet.data, Packet.size);
|
||||
|
||||
if (*DTS < 0)
|
||||
*DTS = Packet.dts;
|
||||
}
|
||||
|
||||
av_free_packet(&Packet);
|
||||
|
||||
if (FrameFinished)
|
||||
break;
|
||||
}
|
||||
|
||||
return Ret;
|
||||
}
|
||||
|
||||
PVideoFrame __stdcall FFMpegSource::GetFrame(int n, IScriptEnvironment* Env) {
|
||||
bool HasSeeked = false;
|
||||
|
||||
int IndexPosition = av_index_search_timestamp(FormatContext->streams[Track], FrameToDTS[n], AVSEEK_FLAG_BACKWARD);
|
||||
int64_t NearestIndexDTS = -1;
|
||||
if (IndexPosition >= 0)
|
||||
NearestIndexDTS = FormatContext->streams[Track]->index_entries[IndexPosition].timestamp;
|
||||
|
||||
if (n < CurrentFrame || NearestIndexDTS > FrameToDTS[CurrentFrame] || (ForceSeek && IndexPosition == -1 && n > CurrentFrame + 10)) {
|
||||
av_seek_frame(FormatContext, Track, FrameToDTS[n], AVSEEK_FLAG_BACKWARD);
|
||||
avcodec_flush_buffers(CodecContext);
|
||||
HasSeeked = true;
|
||||
}
|
||||
|
||||
do {
|
||||
int64_t DTS;
|
||||
int Ret = ReadNextFrame(Frame, &DTS);
|
||||
|
||||
if (HasSeeked) {
|
||||
CurrentFrame = DTSToFrame[DTS];
|
||||
HasSeeked = DTS < 0;
|
||||
}
|
||||
|
||||
CurrentFrame++;
|
||||
} while (CurrentFrame <= n || HasSeeked);
|
||||
|
||||
return OutputFrame(Frame, Env);
|
||||
}
|
||||
|
||||
AVSValue __cdecl CreateFFMpegSource(AVSValue Args, void* UserData, IScriptEnvironment* Env) {
|
||||
if (!Args[0].Defined())
|
||||
Env->ThrowError("No source specified");
|
||||
|
||||
av_register_all();
|
||||
|
||||
AVFormatContext *FormatContext;
|
||||
|
||||
if (av_open_input_file(&FormatContext, Args[0].AsString(), NULL, 0, NULL) != 0)
|
||||
Env->ThrowError("Couldn't open \"%s\"", Args[0].AsString());
|
||||
|
||||
bool IsMatroska = !strcmp(FormatContext->iformat->name, "matroska");
|
||||
|
||||
av_close_input_file(FormatContext);
|
||||
|
||||
if (IsMatroska)
|
||||
return new FFMKVSource(Args[0].AsString(), Args[1].AsInt(-1), Env);
|
||||
else
|
||||
return new FFMpegSource(Args[0].AsString(), Args[1].AsInt(-1), Args[2].AsBool(false), Env);
|
||||
}
|
||||
|
||||
extern "C" __declspec(dllexport) const char* __stdcall AvisynthPluginInit2(IScriptEnvironment* Env) {
|
||||
Env->AddFunction("FFMpegSource", "[source]s[track]i[forceseek]b", CreateFFMpegSource, 0);
|
||||
return "FFMpegSource";
|
||||
};
|
||||
|
24
FFmpegSource/ffmpegsource.txt
Normal file
24
FFmpegSource/ffmpegsource.txt
Normal file
|
@ -0,0 +1,24 @@
|
|||
Usage
|
||||
FFMpegSource(string source, int track = -1, bool forceseek = false)
|
||||
source: source file
|
||||
track: video track nubmer as seen by the relevant demuxer, must be a video track
|
||||
negative values means the first found video track
|
||||
forceseek: seek even if the format has no registered index, only useful for containers with limited avformat support
|
||||
|
||||
Compatibility
|
||||
AVI, MKV, MP4, FLV1: Frame accurate
|
||||
OGM: Messed up first frame and seeking produces smearing with forceseek=true, incredibly slow seeking without
|
||||
WMV: No seeking
|
||||
VOB: No rff flags, otherwise it appears to work
|
||||
TS: Seeking goes way off, number of total frames is probably off too
|
||||
|
||||
Compiling
|
||||
zlib compiled DLL from http://www.zlib.net/
|
||||
|
||||
ffmpeg svn from http://ffmpeg.mplayerhq.hu/
|
||||
|
||||
required configuration
|
||||
./configure --enable-shared --disable-static --enable-memalign-hack --enable-swscaler --enable-gpl
|
||||
|
||||
suggested additions
|
||||
--disable-encoders --disable-muxers
|
82
FFmpegSource/stdiostream.cpp
Normal file
82
FFmpegSource/stdiostream.cpp
Normal file
|
@ -0,0 +1,82 @@
|
|||
/* first we need to create an I/O object that the parser will use to read the
|
||||
* source file
|
||||
*/
|
||||
struct StdIoStream {
|
||||
struct InputStream base;
|
||||
FILE *fp;
|
||||
int error;
|
||||
};
|
||||
typedef struct StdIoStream StdIoStream;
|
||||
|
||||
#define CACHESIZE 65536
|
||||
|
||||
/* StdIoStream methods */
|
||||
|
||||
/* read count bytes into buffer starting at file position pos
|
||||
* return the number of bytes read, -1 on error or 0 on EOF
|
||||
*/
|
||||
int StdIoRead(StdIoStream *st, ulonglong pos, void *buffer, int count) {
|
||||
size_t rd;
|
||||
if (fseek(st->fp, pos, SEEK_SET)) {
|
||||
st->error = errno;
|
||||
return -1;
|
||||
}
|
||||
rd = fread(buffer, 1, count, st->fp);
|
||||
if (rd == 0) {
|
||||
if (feof(st->fp))
|
||||
return 0;
|
||||
st->error = errno;
|
||||
return -1;
|
||||
}
|
||||
return rd;
|
||||
}
|
||||
|
||||
/* scan for a signature sig(big-endian) starting at file position pos
|
||||
* return position of the first byte of signature or -1 if error/not found
|
||||
*/
|
||||
longlong StdIoScan(StdIoStream *st, ulonglong start, unsigned signature) {
|
||||
int c;
|
||||
unsigned cmp = 0;
|
||||
FILE *fp = st->fp;
|
||||
|
||||
if (fseek(fp, start, SEEK_SET))
|
||||
return -1;
|
||||
|
||||
while ((c = getc(fp)) != EOF) {
|
||||
cmp = ((cmp << 8) | c) & 0xffffffff;
|
||||
if (cmp == signature)
|
||||
return ftell(fp) - 4;
|
||||
}
|
||||
|
||||
return -1;
|
||||
}
|
||||
|
||||
/* return cache size, this is used to limit readahead */
|
||||
unsigned StdIoGetCacheSize(StdIoStream *st) {
|
||||
return CACHESIZE;
|
||||
}
|
||||
|
||||
/* return last error message */
|
||||
const char *StdIoGetLastError(StdIoStream *st) {
|
||||
return strerror(st->error);
|
||||
}
|
||||
|
||||
/* memory allocation, this is done via stdlib */
|
||||
void *StdIoMalloc(StdIoStream *st, size_t size) {
|
||||
return malloc(size);
|
||||
}
|
||||
|
||||
void *StdIoRealloc(StdIoStream *st, void *mem, size_t size) {
|
||||
return realloc(mem,size);
|
||||
}
|
||||
|
||||
void StdIoFree(StdIoStream *st, void *mem) {
|
||||
free(mem);
|
||||
}
|
||||
|
||||
/* progress report handler for lengthy operations
|
||||
* returns 0 to abort operation, nonzero to continue
|
||||
*/
|
||||
int StdIoProgress(StdIoStream *st, ulonglong cur, ulonglong max) {
|
||||
return 1;
|
||||
}
|
Loading…
Reference in a new issue