forked from mia/Aegisub
update ffms2 to r407
Originally committed to SVN as r5149.
This commit is contained in:
parent
8f401a838a
commit
71928bc25b
20 changed files with 1180 additions and 1068 deletions
|
@ -22,15 +22,15 @@
|
|||
#define FFMS_H
|
||||
|
||||
// Version format: major - minor - micro - bump
|
||||
#define FFMS_VERSION ((2 << 24) | (14 << 16) | (0 << 8) | 0)
|
||||
#define FFMS_VERSION ((2 << 24) | (14 << 16) | (1 << 8) | 1)
|
||||
|
||||
#include <stdint.h>
|
||||
|
||||
#ifdef __cplusplus
|
||||
# define EXTERN_C extern "C"
|
||||
# define FFMS_EXTERN_C extern "C"
|
||||
# define FFMS_CLASS_TYPE class
|
||||
#else
|
||||
# define EXTERN_C
|
||||
# define FFMS_EXTERN_C
|
||||
# define FFMS_CLASS_TYPE struct
|
||||
#endif
|
||||
|
||||
|
@ -38,16 +38,16 @@
|
|||
# define FFMS_CC __stdcall
|
||||
# ifdef _MSC_VER
|
||||
# ifdef FFMS_EXPORTS
|
||||
# define FFMS_API(ret) EXTERN_C __declspec(dllexport) ret FFMS_CC
|
||||
# define FFMS_API(ret) FFMS_EXTERN_C __declspec(dllexport) ret FFMS_CC
|
||||
# else
|
||||
# define FFMS_API(ret) EXTERN_C __declspec(dllimport) ret FFMS_CC
|
||||
# define FFMS_API(ret) FFMS_EXTERN_C __declspec(dllimport) ret FFMS_CC
|
||||
# endif
|
||||
# else
|
||||
# define FFMS_API(ret) EXTERN_C ret FFMS_CC
|
||||
# define FFMS_API(ret) FFMS_EXTERN_C ret FFMS_CC
|
||||
# endif
|
||||
#else
|
||||
# define FFMS_CC
|
||||
# define FFMS_API(ret) EXTERN_C ret FFMS_CC
|
||||
# define FFMS_API(ret) FFMS_EXTERN_C ret FFMS_CC
|
||||
#endif
|
||||
|
||||
typedef struct {
|
||||
|
@ -106,7 +106,8 @@ enum FFMS_CPUFeatures {
|
|||
FFMS_CPU_CAPS_MMX2 = 0x02,
|
||||
FFMS_CPU_CAPS_3DNOW = 0x04,
|
||||
FFMS_CPU_CAPS_ALTIVEC = 0x08,
|
||||
FFMS_CPU_CAPS_BFIN = 0x10
|
||||
FFMS_CPU_CAPS_BFIN = 0x10,
|
||||
FFMS_CPU_CAPS_SSE2 = 0x20
|
||||
};
|
||||
|
||||
enum FFMS_SeekMode {
|
||||
|
@ -178,6 +179,12 @@ enum FFMS_Resizers {
|
|||
FFMS_RESIZER_SPLINE = 0x0400
|
||||
};
|
||||
|
||||
enum FFMS_AudioDelayModes {
|
||||
FFMS_DELAY_NO_SHIFT = -3,
|
||||
FFMS_DELAY_TIME_ZERO = -2,
|
||||
FFMS_DELAY_FIRST_VIDEO_TRACK = -1
|
||||
};
|
||||
|
||||
typedef struct {
|
||||
uint8_t *Data[4];
|
||||
int Linesize[4];
|
||||
|
@ -244,7 +251,7 @@ FFMS_API(void) FFMS_Init(int CPUFeatures, int UseUTF8Paths);
|
|||
FFMS_API(int) FFMS_GetLogLevel();
|
||||
FFMS_API(void) FFMS_SetLogLevel(int Level);
|
||||
FFMS_API(FFMS_VideoSource *) FFMS_CreateVideoSource(const char *SourceFile, int Track, FFMS_Index *Index, int Threads, int SeekMode, FFMS_ErrorInfo *ErrorInfo);
|
||||
FFMS_API(FFMS_AudioSource *) FFMS_CreateAudioSource(const char *SourceFile, int Track, FFMS_Index *Index, FFMS_ErrorInfo *ErrorInfo);
|
||||
FFMS_API(FFMS_AudioSource *) FFMS_CreateAudioSource(const char *SourceFile, int Track, FFMS_Index *Index, int DelayMode, FFMS_ErrorInfo *ErrorInfo);
|
||||
FFMS_API(void) FFMS_DestroyVideoSource(FFMS_VideoSource *V);
|
||||
FFMS_API(void) FFMS_DestroyAudioSource(FFMS_AudioSource *A);
|
||||
FFMS_API(const FFMS_VideoProperties *) FFMS_GetVideoProperties(FFMS_VideoSource *V);
|
||||
|
|
|
@ -45,6 +45,7 @@
|
|||
# if (LIBAVCODEC_VERSION_INT) >= (AV_VERSION_INT(52,29,0))
|
||||
# define FFMS_HAVE_FFMPEG_COLORSPACE_INFO
|
||||
# else
|
||||
# define AVCOL_RANGE_JPEG 2
|
||||
# ifdef _MSC_VER
|
||||
# pragma message("WARNING: Your FFmpeg is too old to support reporting colorspace and luma range information. The corresponding fields of FFMS_VideoProperties will be set to 0. Please update FFmpeg to get rid of this warning.")
|
||||
# else
|
||||
|
@ -67,6 +68,12 @@
|
|||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef LIBAVUTIL_VERSION_INT
|
||||
# if (LIBAVUTIL_VERSION_INT) < (AV_VERSION_INT(50, 8, 0))
|
||||
# define av_get_pix_fmt avcodec_get_pix_fmt
|
||||
# endif
|
||||
#endif
|
||||
|
||||
#ifdef LIBSWSCALE_VERSION_INT
|
||||
# if (LIBSWSCALE_VERSION_INT) < (AV_VERSION_INT(0,8,0))
|
||||
# define FFMS_SWS_CONST_PARAM
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (c) 2007-2009 Fredrik Mellbin
|
||||
// Copyright (c) 2010 Thomas Goyne <tgoyne@gmail.com>
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
|
@ -20,105 +20,285 @@
|
|||
|
||||
#include "audiosource.h"
|
||||
|
||||
/* Audio Cache */
|
||||
#include <algorithm>
|
||||
#include <cassert>
|
||||
|
||||
TAudioBlock::TAudioBlock(int64_t Start, int64_t Samples, uint8_t *SrcData, size_t SrcBytes) {
|
||||
this->Start = Start;
|
||||
this->Samples = Samples;
|
||||
Data = new uint8_t[SrcBytes];
|
||||
memcpy(Data, SrcData, SrcBytes);
|
||||
}
|
||||
|
||||
TAudioBlock::~TAudioBlock() {
|
||||
delete[] Data;
|
||||
}
|
||||
|
||||
TAudioCache::TAudioCache() {
|
||||
MaxCacheBlocks = 0;
|
||||
BytesPerSample = 0;
|
||||
}
|
||||
|
||||
TAudioCache::~TAudioCache() {
|
||||
for (TAudioCache::iterator it=begin(); it != end(); it++)
|
||||
delete *it;
|
||||
}
|
||||
|
||||
void TAudioCache::Initialize(int BytesPerSample, int MaxCacheBlocks) {
|
||||
this->BytesPerSample = BytesPerSample;
|
||||
this->MaxCacheBlocks = MaxCacheBlocks;
|
||||
}
|
||||
|
||||
void TAudioCache::CacheBlock(int64_t Start, int64_t Samples, uint8_t *SrcData) {
|
||||
if (BytesPerSample > 0) {
|
||||
for (TAudioCache::iterator it=begin(); it != end(); it++) {
|
||||
if ((*it)->Start == Start) {
|
||||
delete *it;
|
||||
erase(it);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
push_front(new TAudioBlock(Start, Samples, SrcData, static_cast<size_t>(Samples * BytesPerSample)));
|
||||
if (static_cast<int>(size()) >= MaxCacheBlocks) {
|
||||
delete back();
|
||||
pop_back();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
bool TAudioCache::AudioBlockComp(TAudioBlock *A, TAudioBlock *B) {
|
||||
return A->Start < B->Start;
|
||||
}
|
||||
|
||||
int64_t TAudioCache::FillRequest(int64_t Start, int64_t Samples, uint8_t *Dst) {
|
||||
// May be better to move used blocks to the front
|
||||
std::list<TAudioBlock *> UsedBlocks;
|
||||
for (TAudioCache::iterator it=begin(); it != end(); it++) {
|
||||
int64_t SrcOffset = FFMAX(0, Start - (*it)->Start);
|
||||
int64_t DstOffset = FFMAX(0, (*it)->Start - Start);
|
||||
int64_t CopySamples = FFMIN((*it)->Samples - SrcOffset, Samples - DstOffset);
|
||||
if (CopySamples > 0) {
|
||||
memcpy(Dst + DstOffset * BytesPerSample, (*it)->Data + SrcOffset * BytesPerSample, static_cast<size_t>(CopySamples * BytesPerSample));
|
||||
UsedBlocks.push_back(*it);
|
||||
}
|
||||
}
|
||||
UsedBlocks.sort(AudioBlockComp);
|
||||
int64_t Ret = Start;
|
||||
for (std::list<TAudioBlock *>::iterator it = UsedBlocks.begin(); it != UsedBlocks.end(); it++) {
|
||||
if (it == UsedBlocks.begin() || Ret == (*it)->Start)
|
||||
Ret = (*it)->Start + (*it)->Samples;
|
||||
else
|
||||
break;
|
||||
}
|
||||
return FFMIN(Ret, Start + Samples);
|
||||
}
|
||||
|
||||
/* FFMS_AudioSource base class */
|
||||
|
||||
FFMS_AudioSource::FFMS_AudioSource(const char *SourceFile, FFMS_Index *Index, int Track) : DecodingBuffer(AVCODEC_MAX_AUDIO_FRAME_SIZE * 10), CurrentSample(0) {
|
||||
if (Track < 0 || Track >= static_cast<int>(Index->size()))
|
||||
FFMS_AudioSource::FFMS_AudioSource(const char *SourceFile, FFMS_Index &Index, int Track)
|
||||
: Delay(0)
|
||||
, MaxCacheBlocks(50)
|
||||
, BytesPerSample(0)
|
||||
, Decoded(0)
|
||||
, CurrentSample(-1)
|
||||
, PacketNumber(0)
|
||||
, CurrentFrame(NULL)
|
||||
, TrackNumber(Track)
|
||||
, SeekOffset(0)
|
||||
, DecodingBuffer(AVCODEC_MAX_AUDIO_FRAME_SIZE * 10)
|
||||
{
|
||||
if (Track < 0 || Track >= static_cast<int>(Index.size()))
|
||||
throw FFMS_Exception(FFMS_ERROR_INDEX, FFMS_ERROR_INVALID_ARGUMENT,
|
||||
"Out of bounds track index selected");
|
||||
|
||||
if (Index->at(Track).TT != FFMS_TYPE_AUDIO)
|
||||
if (Index[Track].TT != FFMS_TYPE_AUDIO)
|
||||
throw FFMS_Exception(FFMS_ERROR_INDEX, FFMS_ERROR_INVALID_ARGUMENT,
|
||||
"Not an audio track");
|
||||
|
||||
if (Index->at(Track).size() == 0)
|
||||
if (Index[Track].empty())
|
||||
throw FFMS_Exception(FFMS_ERROR_INDEX, FFMS_ERROR_INVALID_ARGUMENT,
|
||||
"Audio track contains no audio frames");
|
||||
|
||||
if (!Index->CompareFileSignature(SourceFile))
|
||||
Frames = Index[Track];
|
||||
|
||||
if (!Index.CompareFileSignature(SourceFile))
|
||||
throw FFMS_Exception(FFMS_ERROR_INDEX, FFMS_ERROR_FILE_MISMATCH,
|
||||
"The index does not match the source file");
|
||||
}
|
||||
void FFMS_AudioSource::Init(FFMS_Index &Index, int DelayMode) {
|
||||
// The first packet after a seek is often decoded incorrectly, which
|
||||
// makes it impossible to ever correctly seek back to the beginning, so
|
||||
// store the first block now
|
||||
|
||||
FFMS_AudioSource::~FFMS_AudioSource() {
|
||||
// In addition, anything with the same PTS as the first packet can't be
|
||||
// distinguished from the first packet and so can't be seeked to, so
|
||||
// store those as well
|
||||
|
||||
// Some of LAVF's splitters don't like to seek to the beginning of the
|
||||
// file (ts and?), so cache a few blocks even if PTSes are unique
|
||||
// Packet 7 is the last packet I've had be unseekable to, so cache up to
|
||||
// 10 for a bit of an extra buffer
|
||||
CacheIterator end = Cache.end();
|
||||
while (PacketNumber < Frames.size() &&
|
||||
((Frames[0].PTS != ffms_av_nopts_value && Frames[PacketNumber].PTS == Frames[0].PTS) ||
|
||||
Cache.size() < 10)) {
|
||||
|
||||
DecodeNextBlock();
|
||||
if (Decoded)
|
||||
CacheBlock(end, CurrentSample, Decoded, &DecodingBuffer[0]);
|
||||
}
|
||||
// Store the iterator to the last element of the cache which is used for
|
||||
// correctness rather than speed, so that when looking for one to delete
|
||||
// we know how much to skip
|
||||
CacheNoDelete = Cache.end();
|
||||
--CacheNoDelete;
|
||||
|
||||
// Read properties of the audio which may not be available until the first
|
||||
// frame has been decoded
|
||||
FillAP(AP, CodecContext, Frames);
|
||||
|
||||
if (AP.SampleRate <= 0 || AP.BitsPerSample <= 0)
|
||||
throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
|
||||
"Codec returned zero size audio");
|
||||
|
||||
if (DelayMode < FFMS_DELAY_NO_SHIFT)
|
||||
throw FFMS_Exception(FFMS_ERROR_INDEX, FFMS_ERROR_INVALID_ARGUMENT,
|
||||
"Bad audio delay compensation mode");
|
||||
|
||||
if (DelayMode == FFMS_DELAY_NO_SHIFT) return;
|
||||
|
||||
if (DelayMode > (signed)Index.size())
|
||||
throw FFMS_Exception(FFMS_ERROR_INDEX, FFMS_ERROR_INVALID_ARGUMENT,
|
||||
"Out of bounds track index selected for audio delay compensation");
|
||||
|
||||
if (DelayMode >= 0 && Index[DelayMode].TT != FFMS_TYPE_VIDEO)
|
||||
throw FFMS_Exception(FFMS_ERROR_INDEX, FFMS_ERROR_INVALID_ARGUMENT,
|
||||
"Audio delay compensation must be relative to a video track");
|
||||
|
||||
double AdjustRelative = 0;
|
||||
if (DelayMode != FFMS_DELAY_TIME_ZERO) {
|
||||
if (DelayMode == FFMS_DELAY_FIRST_VIDEO_TRACK) {
|
||||
for (size_t i = 0; i < Index.size(); ++i) {
|
||||
if (Index[i].TT == FFMS_TYPE_VIDEO) {
|
||||
DelayMode = i;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (DelayMode >= 0) {
|
||||
const FFMS_Track &VTrack = Index[DelayMode];
|
||||
AdjustRelative = VTrack[0].PTS * VTrack.TB.Num / (double)VTrack.TB.Den / 1000.;
|
||||
}
|
||||
}
|
||||
|
||||
Delay = static_cast<int64_t>((AdjustRelative - Frames[0].PTS) * AP.SampleRate + .5);
|
||||
AP.NumSamples -= Delay;
|
||||
}
|
||||
|
||||
void FFMS_AudioSource::GetAudioCheck(int64_t Start, int64_t Count) {
|
||||
if (Start < 0 || Start + Count > AP.NumSamples)
|
||||
void FFMS_AudioSource::CacheBlock(CacheIterator &pos, int64_t Start, size_t Samples, uint8_t *SrcData) {
|
||||
Cache.insert(pos, AudioBlock(Start, Samples, SrcData, Samples * BytesPerSample));
|
||||
|
||||
if (Cache.size() >= MaxCacheBlocks) {
|
||||
// Kill the oldest one
|
||||
CacheIterator min = CacheNoDelete;
|
||||
// Never drop the first one as the first packet decoded after a seek
|
||||
// is often decoded incorrectly and we can't seek to before the first one
|
||||
++min;
|
||||
for (CacheIterator it = min; it != Cache.end(); ++it)
|
||||
if (it->Age < min->Age) min = it;
|
||||
if (min == pos) ++pos;
|
||||
Cache.erase(min);
|
||||
}
|
||||
}
|
||||
|
||||
void FFMS_AudioSource::DecodeNextBlock() {
|
||||
if (BytesPerSample == 0) BytesPerSample = (av_get_bits_per_sample_fmt(CodecContext->sample_fmt) * CodecContext->channels) / 8;
|
||||
|
||||
CurrentFrame = &Frames[PacketNumber];
|
||||
|
||||
AVPacket Packet;
|
||||
if (!ReadPacket(&Packet))
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_UNKNOWN, "ReadPacket unexpectedly failed to read a packet");
|
||||
|
||||
// ReadPacket may have changed the packet number
|
||||
CurrentFrame = &Frames[PacketNumber];
|
||||
CurrentSample = CurrentFrame->SampleStart;
|
||||
++PacketNumber;
|
||||
|
||||
uint8_t *Buf = &DecodingBuffer[0];
|
||||
uint8_t *Data = Packet.data;
|
||||
while (Packet.size > 0) {
|
||||
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE * 10 - (Buf - &DecodingBuffer[0]);
|
||||
int Ret = avcodec_decode_audio3(CodecContext, (int16_t *)Buf, &TempOutputBufSize, &Packet);
|
||||
|
||||
// Should only ever happen if the user chose to ignore decoding errors
|
||||
// during indexing, so continue to just ignore decoding errors
|
||||
if (Ret < 0) break;
|
||||
|
||||
if (Ret > 0) {
|
||||
Packet.size -= Ret;
|
||||
Packet.data += Ret;
|
||||
Buf += TempOutputBufSize;
|
||||
}
|
||||
}
|
||||
Packet.data = Data;
|
||||
FreePacket(&Packet);
|
||||
|
||||
Decoded = (Buf - &DecodingBuffer[0]) / BytesPerSample;
|
||||
if (Decoded == 0) {
|
||||
// zero sample packets aren't included in the index so we didn't
|
||||
// actually move to the next packet
|
||||
--PacketNumber;
|
||||
}
|
||||
}
|
||||
|
||||
static bool SampleStartComp(const TFrameInfo &a, const TFrameInfo &b) {
|
||||
return a.SampleStart < b.SampleStart;
|
||||
}
|
||||
|
||||
void FFMS_AudioSource::GetAudio(void *Buf, int64_t Start, int64_t Count) {
|
||||
if (Start < 0 || Start + Count > AP.NumSamples || Count < 0)
|
||||
throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_INVALID_ARGUMENT,
|
||||
"Out of bounds audio samples requested");
|
||||
|
||||
uint8_t *Dst = static_cast<uint8_t*>(Buf);
|
||||
|
||||
// Apply audio delay (if any) and fill any samples before the start time with zero
|
||||
Start -= Delay;
|
||||
if (Start < 0) {
|
||||
size_t Bytes = static_cast<size_t>(BytesPerSample * FFMIN(-Start, Count));
|
||||
memset(Dst, 0, Bytes);
|
||||
|
||||
Count += Start;
|
||||
// Entire request was before the start of the audio
|
||||
if (Count <= 0) return;
|
||||
|
||||
Start = 0;
|
||||
Dst += Bytes;
|
||||
}
|
||||
|
||||
CacheIterator it = Cache.begin();
|
||||
|
||||
while (Count > 0) {
|
||||
// Find first useful cache block
|
||||
while (it != Cache.end() && it->Start + it->Samples <= Start) ++it;
|
||||
|
||||
// Cache has the next block we want
|
||||
if (it != Cache.end() && it->Start <= Start) {
|
||||
int64_t SrcOffset = FFMAX(0, Start - it->Start);
|
||||
int64_t DstOffset = FFMAX(0, it->Start - Start);
|
||||
int64_t CopySamples = FFMIN(it->Samples - SrcOffset, Count - DstOffset);
|
||||
size_t Bytes = static_cast<size_t>(CopySamples * BytesPerSample);
|
||||
|
||||
memcpy(Dst + DstOffset * BytesPerSample, &it->Data[SrcOffset * BytesPerSample], Bytes);
|
||||
Start += CopySamples;
|
||||
Count -= CopySamples;
|
||||
Dst += Bytes;
|
||||
++it;
|
||||
}
|
||||
// Decode another block
|
||||
else {
|
||||
if (Start < CurrentSample && SeekOffset == -1)
|
||||
throw FFMS_Exception(FFMS_ERROR_SEEKING, FFMS_ERROR_CODEC, "Audio stream is not seekable");
|
||||
|
||||
if (SeekOffset >= 0 && (Start < CurrentSample || Start > CurrentSample + Decoded * 5)) {
|
||||
TFrameInfo f;
|
||||
f.SampleStart = Start;
|
||||
int NewPacketNumber = std::distance(Frames.begin(), std::lower_bound(Frames.begin(), Frames.end(), f, SampleStartComp));
|
||||
NewPacketNumber = FFMAX(0, NewPacketNumber - SeekOffset - 15);
|
||||
while (NewPacketNumber > 0 && !Frames[NewPacketNumber].KeyFrame) --NewPacketNumber;
|
||||
|
||||
// Only seek forward if it'll actually result in moving forward
|
||||
if (Start < CurrentSample || NewPacketNumber > PacketNumber) {
|
||||
PacketNumber = NewPacketNumber;
|
||||
Decoded = 0;
|
||||
CurrentSample = -1;
|
||||
avcodec_flush_buffers(CodecContext);
|
||||
Seek();
|
||||
}
|
||||
}
|
||||
|
||||
// Decode everything between the last keyframe and the block we want
|
||||
while (CurrentSample + Decoded <= Start) DecodeNextBlock();
|
||||
if (CurrentSample > Start)
|
||||
throw FFMS_Exception(FFMS_ERROR_SEEKING, FFMS_ERROR_CODEC, "Seeking is severely broken");
|
||||
|
||||
CacheBlock(it, CurrentSample, Decoded, &DecodingBuffer[0]);
|
||||
|
||||
size_t FirstSample = static_cast<size_t>(Start - CurrentSample);
|
||||
size_t Samples = static_cast<size_t>(Decoded - FirstSample);
|
||||
size_t Bytes = FFMIN(Samples, static_cast<size_t>(Count)) * BytesPerSample;
|
||||
|
||||
memcpy(Dst, &DecodingBuffer[FirstSample * BytesPerSample], Bytes);
|
||||
|
||||
Start += Samples;
|
||||
Count -= Samples;
|
||||
Dst += Bytes;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
size_t GetSeekablePacketNumber(FFMS_Track const& Frames, size_t PacketNumber) {
|
||||
// Packets don't always have unique PTSes, so we may not be able to
|
||||
// uniquely identify the packet we want. This function attempts to find
|
||||
// a PTS we can seek to which will let us figure out which packet we're
|
||||
// on before we get to the packet we actually wanted
|
||||
|
||||
// MatroskaAudioSource doesn't need this, as it seeks by byte offset
|
||||
// rather than PTS. LAVF theoretically can seek by byte offset, but we
|
||||
// don't use it as not all demuxers support it and it's broken in some of
|
||||
// those that claim to support it
|
||||
|
||||
// However much we might wish to, we can't seek to before packet zero
|
||||
if (PacketNumber == 0) return PacketNumber;
|
||||
|
||||
// Desired packet's PTS is unique, so don't do anything
|
||||
if (Frames[PacketNumber].PTS != Frames[PacketNumber - 1].PTS &&
|
||||
(PacketNumber + 1 == Frames.size() || Frames[PacketNumber].PTS != Frames[PacketNumber + 1].PTS))
|
||||
return PacketNumber;
|
||||
|
||||
// When decoding, we only reliably know what packet we're at when the
|
||||
// newly parsed packet has a different PTS from the previous one. As such,
|
||||
// we walk backwards until we hit a different PTS and then seek to there,
|
||||
// so that we can then decode until we hit the PTS group we actually wanted
|
||||
// (and thereby know that we're at the first packet in the group rather
|
||||
// than whatever the splitter happened to choose)
|
||||
|
||||
// This doesn't work if our desired packet has the same PTS as the first
|
||||
// packet, but this scenario should never come up anyway; we permanently
|
||||
// cache the decoded results from those packets, so there's no need to ever
|
||||
// seek to them
|
||||
int64_t PTS = Frames[PacketNumber].PTS;
|
||||
while (PacketNumber > 0 && PTS == Frames[PacketNumber].PTS)
|
||||
--PacketNumber;
|
||||
return PacketNumber;
|
||||
}
|
||||
|
|
|
@ -28,7 +28,6 @@ extern "C" {
|
|||
|
||||
#include <vector>
|
||||
#include <list>
|
||||
#include <sstream>
|
||||
#include <memory>
|
||||
#include "indexing.h"
|
||||
#include "utils.h"
|
||||
|
@ -46,93 +45,118 @@ extern "C" {
|
|||
# include "guids.h"
|
||||
#endif
|
||||
|
||||
class TAudioBlock {
|
||||
public:
|
||||
int64_t Start;
|
||||
int64_t Samples;
|
||||
uint8_t *Data;
|
||||
|
||||
TAudioBlock(int64_t Start, int64_t Samples, uint8_t *SrcData, size_t SrcBytes);
|
||||
~TAudioBlock();
|
||||
};
|
||||
|
||||
class TAudioCache : private std::list<TAudioBlock *> {
|
||||
private:
|
||||
int MaxCacheBlocks;
|
||||
int BytesPerSample;
|
||||
static bool AudioBlockComp(TAudioBlock *A, TAudioBlock *B);
|
||||
public:
|
||||
TAudioCache();
|
||||
~TAudioCache();
|
||||
void Initialize(int BytesPerSample, int MaxCacheBlocks);
|
||||
void CacheBlock(int64_t Start, int64_t Samples, uint8_t *SrcData);
|
||||
int64_t FillRequest(int64_t Start, int64_t Samples, uint8_t *Dst);
|
||||
};
|
||||
|
||||
class FFMS_AudioSource {
|
||||
friend class FFSourceResources<FFMS_AudioSource>;
|
||||
struct AudioBlock {
|
||||
int64_t Age;
|
||||
int64_t Start;
|
||||
int64_t Samples;
|
||||
std::vector<uint8_t> Data;
|
||||
|
||||
AudioBlock(int64_t Start, int64_t Samples, uint8_t *SrcData, size_t SrcBytes)
|
||||
: Start(Start)
|
||||
, Samples(Samples)
|
||||
, Data(SrcData, SrcData + SrcBytes)
|
||||
{
|
||||
static int64_t Now = 0;
|
||||
Age = Now++;
|
||||
}
|
||||
};
|
||||
typedef std::list<AudioBlock>::iterator CacheIterator;
|
||||
|
||||
// delay in samples to apply to the audio
|
||||
int64_t Delay;
|
||||
// cache of decoded audio blocks
|
||||
std::list<AudioBlock> Cache;
|
||||
// max size of the cache in blocks
|
||||
size_t MaxCacheBlocks;
|
||||
// pointer to last element of the cache which should never be deleted
|
||||
CacheIterator CacheNoDelete;
|
||||
// bytes per sample * number of channels
|
||||
size_t BytesPerSample;
|
||||
// Number of samples stored in the decoding buffer
|
||||
size_t Decoded;
|
||||
|
||||
// Insert a block into the cache
|
||||
void CacheBlock(CacheIterator &pos, int64_t Start, size_t Samples, uint8_t *SrcData);
|
||||
|
||||
// Called after seeking
|
||||
virtual void Seek() { };
|
||||
// Read the next packet from the file
|
||||
virtual bool ReadPacket(AVPacket *) = 0;
|
||||
virtual void FreePacket(AVPacket *) { }
|
||||
protected:
|
||||
TAudioCache AudioCache;
|
||||
// First sample which is stored in the decoding buffer
|
||||
int64_t CurrentSample;
|
||||
// Next packet to be read
|
||||
size_t PacketNumber;
|
||||
// Current audio frame
|
||||
TFrameInfo *CurrentFrame;
|
||||
// Track which this corresponds to
|
||||
int TrackNumber;
|
||||
// Number of packets which the demuxer requires to know where it is
|
||||
// If -1, seeking is assumed to be impossible
|
||||
int SeekOffset;
|
||||
|
||||
// Buffer which audio is decoded into
|
||||
AlignedBuffer<uint8_t> DecodingBuffer;
|
||||
FFMS_Track Frames;
|
||||
AVCodecContext *CodecContext;
|
||||
int AudioTrack;
|
||||
FFCodecContext CodecContext;
|
||||
FFMS_AudioProperties AP;
|
||||
|
||||
virtual void Free(bool CloseCodec) = 0;
|
||||
void DecodeNextBlock();
|
||||
// Initialization which has to be done after the codec is opened
|
||||
void Init(FFMS_Index &Index, int DelayMode);
|
||||
|
||||
FFMS_AudioSource(const char *SourceFile, FFMS_Index &Index, int Track);
|
||||
|
||||
public:
|
||||
FFMS_AudioSource(const char *SourceFile, FFMS_Index *Index, int Track);
|
||||
virtual ~FFMS_AudioSource();
|
||||
virtual ~FFMS_AudioSource() { };
|
||||
FFMS_Track *GetTrack() { return &Frames; }
|
||||
const FFMS_AudioProperties& GetAudioProperties() { return AP; }
|
||||
virtual void GetAudio(void *Buf, int64_t Start, int64_t Count) = 0;
|
||||
void GetAudioCheck(int64_t Start, int64_t Count);
|
||||
const FFMS_AudioProperties& GetAudioProperties() const { return AP; }
|
||||
void GetAudio(void *Buf, int64_t Start, int64_t Count);
|
||||
};
|
||||
|
||||
class FFLAVFAudio : public FFMS_AudioSource {
|
||||
private:
|
||||
AVFormatContext *FormatContext;
|
||||
FFSourceResources<FFMS_AudioSource> Res;
|
||||
|
||||
void DecodeNextAudioBlock(int64_t *Count);
|
||||
void Free(bool CloseCodec);
|
||||
bool ReadPacket(AVPacket *);
|
||||
void FreePacket(AVPacket *);
|
||||
void Seek();
|
||||
|
||||
public:
|
||||
FFLAVFAudio(const char *SourceFile, int Track, FFMS_Index *Index);
|
||||
void GetAudio(void *Buf, int64_t Start, int64_t Count);
|
||||
FFLAVFAudio(const char *SourceFile, int Track, FFMS_Index &Index, int DelayMode);
|
||||
~FFLAVFAudio();
|
||||
};
|
||||
|
||||
class FFMatroskaAudio : public FFMS_AudioSource {
|
||||
private:
|
||||
MatroskaFile *MF;
|
||||
MatroskaReaderContext MC;
|
||||
TrackCompressionContext *TCC;
|
||||
TrackInfo *TI;
|
||||
std::auto_ptr<TrackCompressionContext> TCC;
|
||||
char ErrorMessage[256];
|
||||
FFSourceResources<FFMS_AudioSource> Res;
|
||||
size_t PacketNumber;
|
||||
|
||||
void DecodeNextAudioBlock(int64_t *Count);
|
||||
void Free(bool CloseCodec);
|
||||
bool ReadPacket(AVPacket *);
|
||||
|
||||
public:
|
||||
FFMatroskaAudio(const char *SourceFile, int Track, FFMS_Index *Index);
|
||||
void GetAudio(void *Buf, int64_t Start, int64_t Count);
|
||||
FFMatroskaAudio(const char *SourceFile, int Track, FFMS_Index &Index, int DelayMode);
|
||||
~FFMatroskaAudio();
|
||||
};
|
||||
|
||||
#ifdef HAALISOURCE
|
||||
|
||||
class FFHaaliAudio : public FFMS_AudioSource {
|
||||
private:
|
||||
CComPtr<IMMContainer> pMMC;
|
||||
std::vector<uint8_t> CodecPrivate;
|
||||
FFSourceResources<FFMS_AudioSource> Res;
|
||||
CComPtr<IMMFrame> pMMF;
|
||||
|
||||
bool ReadPacket(AVPacket *);
|
||||
void Seek();
|
||||
|
||||
void DecodeNextAudioBlock(int64_t *AFirstStartTime, int64_t *Count);
|
||||
void Free(bool CloseCodec);
|
||||
public:
|
||||
FFHaaliAudio(const char *SourceFile, int Track, FFMS_Index *Index, enum FFMS_Sources SourceMode);
|
||||
void GetAudio(void *Buf, int64_t Start, int64_t Count);
|
||||
FFHaaliAudio(const char *SourceFile, int Track, FFMS_Index &Index, enum FFMS_Sources SourceMode, int DelayMode);
|
||||
};
|
||||
|
||||
#endif // HAALISOURCE
|
||||
|
||||
size_t GetSeekablePacketNumber(FFMS_Track const& Frames, size_t PacketNumber);
|
||||
|
||||
#endif
|
||||
|
|
|
@ -25,6 +25,11 @@
|
|||
#include "audiosource.h"
|
||||
#include "indexing.h"
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/pixdesc.h>
|
||||
}
|
||||
|
||||
|
||||
#ifdef FFMS_WIN_DEBUG
|
||||
# include <windows.h>
|
||||
#endif
|
||||
|
@ -41,33 +46,34 @@ bool GlobalUseUTF8Paths = false;
|
|||
extern "C" int av_log_level;
|
||||
|
||||
void av_log_windebug_callback(void* ptr, int level, const char* fmt, va_list vl) {
|
||||
static int print_prefix=1;
|
||||
static int count;
|
||||
static char line[1024], prev[1024];
|
||||
AVClass* avc= ptr ? *(AVClass**)ptr : NULL;
|
||||
if(level>av_log_level)
|
||||
return;
|
||||
#undef fprintf
|
||||
if(print_prefix && avc) {
|
||||
snprintf(line, sizeof(line), "[%s @ %p]", avc->item_name(ptr), ptr);
|
||||
}else
|
||||
line[0]=0;
|
||||
static int print_prefix=1;
|
||||
static int count;
|
||||
static char line[1024] = {0}, prev[1024] = {0};
|
||||
AVClass* avc = ptr ? *(AVClass**)ptr : NULL;
|
||||
if(level > av_log_level)
|
||||
return;
|
||||
|
||||
vsnprintf(line + strlen(line), sizeof(line) - strlen(line), fmt, vl);
|
||||
int written = 0;
|
||||
if(print_prefix && avc) {
|
||||
written = snprintf(line, sizeof(line), "[%s @ %p]", avc->item_name(ptr), ptr);
|
||||
}
|
||||
|
||||
print_prefix= line[strlen(line)-1] == '\n';
|
||||
if(print_prefix && !strcmp(line, prev)){
|
||||
count++;
|
||||
return;
|
||||
}
|
||||
if(count>0){
|
||||
written += vsnprintf(line + written, sizeof(line) - written, fmt, vl);
|
||||
|
||||
print_prefix = line[written-1] == '\n';
|
||||
line[sizeof(line) - 1] = 0;
|
||||
if(print_prefix && !strcmp(line, prev)){
|
||||
count++;
|
||||
return;
|
||||
}
|
||||
if(count > 0){
|
||||
std::stringstream ss;
|
||||
ss << " Last message repeated " << count << " times\n";
|
||||
OutputDebugStringA(ss.str().c_str());
|
||||
count=0;
|
||||
}
|
||||
count = 0;
|
||||
}
|
||||
OutputDebugStringA(line);
|
||||
strcpy(prev, line);
|
||||
strcpy(prev, line);
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -140,21 +146,21 @@ FFMS_API(FFMS_VideoSource *) FFMS_CreateVideoSource(const char *SourceFile, int
|
|||
}
|
||||
}
|
||||
|
||||
FFMS_API(FFMS_AudioSource *) FFMS_CreateAudioSource(const char *SourceFile, int Track, FFMS_Index *Index, FFMS_ErrorInfo *ErrorInfo) {
|
||||
FFMS_API(FFMS_AudioSource *) FFMS_CreateAudioSource(const char *SourceFile, int Track, FFMS_Index *Index, int DelayMode, FFMS_ErrorInfo *ErrorInfo) {
|
||||
try {
|
||||
switch (Index->Decoder) {
|
||||
case FFMS_SOURCE_LAVF:
|
||||
return new FFLAVFAudio(SourceFile, Track, Index);
|
||||
return new FFLAVFAudio(SourceFile, Track, *Index, DelayMode);
|
||||
case FFMS_SOURCE_MATROSKA:
|
||||
return new FFMatroskaAudio(SourceFile, Track, Index);
|
||||
return new FFMatroskaAudio(SourceFile, Track, *Index, DelayMode);
|
||||
#ifdef HAALISOURCE
|
||||
case FFMS_SOURCE_HAALIMPEG:
|
||||
if (HasHaaliMPEG)
|
||||
return new FFHaaliAudio(SourceFile, Track, Index, FFMS_SOURCE_HAALIMPEG);
|
||||
return new FFHaaliAudio(SourceFile, Track, *Index, FFMS_SOURCE_HAALIMPEG, DelayMode);
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_NOT_AVAILABLE, "Haali MPEG/TS source unavailable");
|
||||
case FFMS_SOURCE_HAALIOGG:
|
||||
if (HasHaaliOGG)
|
||||
return new FFHaaliAudio(SourceFile, Track, Index, FFMS_SOURCE_HAALIOGG);
|
||||
return new FFHaaliAudio(SourceFile, Track, *Index, FFMS_SOURCE_HAALIOGG, DelayMode);
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_NOT_AVAILABLE, "Haali OGG/OGM source unavailable");
|
||||
#endif
|
||||
default:
|
||||
|
@ -440,7 +446,7 @@ FFMS_API(int) FFMS_WriteIndex(const char *IndexFile, FFMS_Index *Index, FFMS_Err
|
|||
}
|
||||
|
||||
FFMS_API(int) FFMS_GetPixFmt(const char *Name) {
|
||||
return avcodec_get_pix_fmt(Name);
|
||||
return av_get_pix_fmt(Name);
|
||||
}
|
||||
|
||||
FFMS_API(int) FFMS_GetPresentSources() {
|
||||
|
|
|
@ -19,23 +19,28 @@
|
|||
// THE SOFTWARE.
|
||||
|
||||
#include "indexing.h"
|
||||
#include <iostream>
|
||||
#include <fstream>
|
||||
|
||||
#include <algorithm>
|
||||
#include <fstream>
|
||||
#include <iostream>
|
||||
#include <limits>
|
||||
|
||||
|
||||
extern "C" {
|
||||
#include <libavutil/sha1.h>
|
||||
#include <zlib.h>
|
||||
}
|
||||
|
||||
#undef max
|
||||
|
||||
#define INDEXID 0x53920873
|
||||
|
||||
extern bool HasHaaliMPEG;
|
||||
extern bool HasHaaliOGG;
|
||||
|
||||
#ifndef WITH_LIBPOSTPROC
|
||||
unsigned postproc_version(void) { return 0; } // ugly workaround to avoid lots of ifdeffing
|
||||
#endif // WITH_LIBPOSTPROC
|
||||
#ifndef FFMS_USE_POSTPROC
|
||||
unsigned postproc_version() { return 0; } // ugly workaround to avoid lots of ifdeffing
|
||||
#endif // FFMS_USE_POSTPROC
|
||||
|
||||
struct IndexHeader {
|
||||
uint32_t Id;
|
||||
|
@ -52,11 +57,11 @@ struct IndexHeader {
|
|||
};
|
||||
|
||||
struct TrackHeader {
|
||||
uint32_t TT;
|
||||
uint32_t Frames;
|
||||
int64_t Num;
|
||||
int64_t Den;
|
||||
uint32_t UseDTS;
|
||||
uint32_t TT;
|
||||
uint32_t Frames;
|
||||
int64_t Num;
|
||||
int64_t Den;
|
||||
uint32_t UseDTS;
|
||||
};
|
||||
|
||||
|
||||
|
@ -73,12 +78,8 @@ SharedVideoContext::~SharedVideoContext() {
|
|||
if (FreeCodecContext)
|
||||
av_freep(&CodecContext);
|
||||
}
|
||||
|
||||
if (Parser)
|
||||
av_parser_close(Parser);
|
||||
|
||||
if (TCC)
|
||||
delete TCC;
|
||||
av_parser_close(Parser);
|
||||
delete TCC;
|
||||
}
|
||||
|
||||
SharedAudioContext::SharedAudioContext(bool FreeCodecContext) {
|
||||
|
@ -96,9 +97,7 @@ SharedAudioContext::~SharedAudioContext() {
|
|||
if (FreeCodecContext)
|
||||
av_freep(&CodecContext);
|
||||
}
|
||||
|
||||
if (TCC)
|
||||
delete TCC;
|
||||
delete TCC;
|
||||
}
|
||||
|
||||
TFrameInfo::TFrameInfo() {
|
||||
|
@ -119,22 +118,20 @@ TFrameInfo TFrameInfo::VideoFrameInfo(int64_t PTS, int RepeatPict, bool KeyFrame
|
|||
return TFrameInfo(PTS, 0, 0, RepeatPict, KeyFrame, FilePos, FrameSize);
|
||||
}
|
||||
|
||||
TFrameInfo TFrameInfo::AudioFrameInfo(int64_t PTS, int64_t SampleStart, unsigned int SampleCount, bool KeyFrame, int64_t FilePos, unsigned int FrameSize) {
|
||||
return TFrameInfo(PTS, SampleStart, SampleCount, 0, KeyFrame, FilePos, FrameSize);
|
||||
TFrameInfo TFrameInfo::AudioFrameInfo(int64_t PTS, int64_t SampleStart, int64_t SampleCount, bool KeyFrame, int64_t FilePos, unsigned int FrameSize) {
|
||||
return TFrameInfo(PTS, SampleStart, static_cast<unsigned int>(SampleCount), 0, KeyFrame, FilePos, FrameSize);
|
||||
}
|
||||
|
||||
void FFMS_Track::WriteTimecodes(const char *TimecodeFile) {
|
||||
ffms_fstream Timecodes(TimecodeFile, std::ios::out | std::ios::trunc);
|
||||
|
||||
if (!Timecodes.is_open()) {
|
||||
std::ostringstream buf;
|
||||
buf << "Failed to open '" << TimecodeFile << "' for writing";
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
}
|
||||
if (!Timecodes.is_open())
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ,
|
||||
std::string("Failed to open '") + TimecodeFile + "' for writing");
|
||||
|
||||
Timecodes << "# timecode format v2\n";
|
||||
|
||||
for (iterator Cur=begin(); Cur!=end(); Cur++)
|
||||
for (iterator Cur = begin(); Cur != end(); ++Cur)
|
||||
Timecodes << std::fixed << ((Cur->PTS * TB.Num) / (double)TB.Den) << "\n";
|
||||
}
|
||||
|
||||
|
@ -145,39 +142,26 @@ int FFMS_Track::FrameFromPTS(int64_t PTS) {
|
|||
return -1;
|
||||
}
|
||||
|
||||
int FFMS_Track::ClosestFrameFromPTS(int64_t PTS) {
|
||||
int Frame = 0;
|
||||
int64_t BestDiff = 0xFFFFFFFFFFFFFFLL; // big number
|
||||
for (int i = 0; i < static_cast<int>(size()); i++) {
|
||||
int64_t CurrentDiff = FFABS(at(i).PTS - PTS);
|
||||
if (CurrentDiff < BestDiff) {
|
||||
BestDiff = CurrentDiff;
|
||||
Frame = i;
|
||||
}
|
||||
}
|
||||
static bool PTSComparison(TFrameInfo FI1, TFrameInfo FI2) {
|
||||
return FI1.PTS < FI2.PTS;
|
||||
}
|
||||
|
||||
int FFMS_Track::ClosestFrameFromPTS(int64_t PTS) {
|
||||
TFrameInfo F;
|
||||
F.PTS = PTS;
|
||||
|
||||
iterator Pos = std::lower_bound(begin(), end(), F, PTSComparison);
|
||||
int Frame = std::distance(begin(), Pos);
|
||||
if ((Pos + 1) != end() && FFABS(Pos->PTS - PTS) > FFABS((Pos + 1)->PTS - PTS))
|
||||
Frame += 1;
|
||||
return Frame;
|
||||
}
|
||||
|
||||
int FFMS_Track::FindClosestVideoKeyFrame(int Frame) {
|
||||
Frame = FFMIN(FFMAX(Frame, 0), static_cast<int>(size()) - 1);
|
||||
for (int i = Frame; i > 0; i--)
|
||||
if (at(i).KeyFrame) {
|
||||
for (int j = i; j >= 0; j--)
|
||||
if (at(at(j).OriginalPos).KeyFrame)
|
||||
return j;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int FFMS_Track::FindClosestAudioKeyFrame(int64_t Sample) {
|
||||
for (size_t i = 0; i < size(); i++) {
|
||||
if (at(i).SampleStart == Sample && at(i).KeyFrame)
|
||||
return i;
|
||||
else if (at(i).SampleStart > Sample && at(i).KeyFrame)
|
||||
return i - 1;
|
||||
}
|
||||
return size() - 1;
|
||||
for (; Frame > 0 && !at(Frame).KeyFrame; Frame--) ;
|
||||
for (; Frame > 0 && !at(at(Frame).OriginalPos).KeyFrame; Frame--) ;
|
||||
return Frame;
|
||||
}
|
||||
|
||||
FFMS_Track::FFMS_Track() {
|
||||
|
@ -196,62 +180,53 @@ FFMS_Track::FFMS_Track(int64_t Num, int64_t Den, FFMS_TrackType TT, bool UseDTS)
|
|||
|
||||
void FFMS_Index::CalculateFileSignature(const char *Filename, int64_t *Filesize, uint8_t Digest[20]) {
|
||||
FILE *SFile = ffms_fopen(Filename,"rb");
|
||||
if (!SFile)
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ,
|
||||
std::string("Failed to open '") + Filename + "' for hashing");
|
||||
|
||||
if (SFile == NULL) {
|
||||
std::ostringstream buf;
|
||||
buf << "Failed to open '" << Filename << "' for hashing";
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
}
|
||||
|
||||
const int BlockSize = 1024*1024;
|
||||
std::vector<uint8_t> FileBuffer(BlockSize);
|
||||
std::vector<uint8_t> FileBuffer(1024*1024, 0);
|
||||
std::vector<uint8_t> ctxmem(av_sha1_size);
|
||||
AVSHA1 *ctx = (AVSHA1 *)&ctxmem[0];
|
||||
AVSHA1 *ctx = (AVSHA1 *)(&ctxmem[0]);
|
||||
av_sha1_init(ctx);
|
||||
|
||||
memset(&FileBuffer[0], 0, BlockSize);
|
||||
fread(&FileBuffer[0], 1, BlockSize, SFile);
|
||||
if (ferror(SFile) && !feof(SFile)) {
|
||||
av_sha1_final(ctx, Digest);
|
||||
fclose(SFile);
|
||||
std::ostringstream buf;
|
||||
buf << "Failed to read '" << Filename << "' for hashing";
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
}
|
||||
av_sha1_update(ctx, &FileBuffer[0], BlockSize);
|
||||
try {
|
||||
fread(&FileBuffer[0], 1, FileBuffer.size(), SFile);
|
||||
if (ferror(SFile) && !feof(SFile))
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ,
|
||||
std::string("Failed to read '") + Filename + "' for hashing");
|
||||
|
||||
fseeko(SFile, -BlockSize, SEEK_END);
|
||||
memset(&FileBuffer[0], 0, BlockSize);
|
||||
fread(&FileBuffer[0], 1, BlockSize, SFile);
|
||||
if (ferror(SFile) && !feof(SFile)) {
|
||||
av_sha1_final(ctx, Digest);
|
||||
fclose(SFile);
|
||||
std::ostringstream buf;
|
||||
buf << "Failed to seek with offset " << BlockSize << " from file end in '" << Filename << "' for hashing";
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
}
|
||||
av_sha1_update(ctx, &FileBuffer[0], BlockSize);
|
||||
av_sha1_update(ctx, &FileBuffer[0], FileBuffer.size());
|
||||
|
||||
fseeko(SFile, 0, SEEK_END);
|
||||
if (ferror(SFile)) {
|
||||
av_sha1_final(ctx, Digest);
|
||||
fclose(SFile);
|
||||
std::ostringstream buf;
|
||||
buf << "Failed to seek to end of '" << Filename << "' for hashing";
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
fseeko(SFile, -(int)FileBuffer.size(), SEEK_END);
|
||||
std::fill(FileBuffer.begin(), FileBuffer.end(), 0);
|
||||
fread(&FileBuffer[0], 1, FileBuffer.size(), SFile);
|
||||
if (ferror(SFile) && !feof(SFile)) {
|
||||
std::ostringstream buf;
|
||||
buf << "Failed to seek with offset " << FileBuffer.size() << " from file end in '" << Filename << "' for hashing";
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
}
|
||||
|
||||
av_sha1_update(ctx, &FileBuffer[0], FileBuffer.size());
|
||||
|
||||
fseeko(SFile, 0, SEEK_END);
|
||||
if (ferror(SFile))
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ,
|
||||
std::string("Failed to seek to end of '") + Filename + "' for hashing");
|
||||
|
||||
*Filesize = ftello(SFile);
|
||||
}
|
||||
catch (...) {
|
||||
fclose(SFile);
|
||||
av_sha1_final(ctx, Digest);
|
||||
throw;
|
||||
}
|
||||
*Filesize = ftello(SFile);
|
||||
fclose(SFile);
|
||||
|
||||
av_sha1_final(ctx, Digest);
|
||||
}
|
||||
|
||||
static bool PTSComparison(TFrameInfo FI1, TFrameInfo FI2) {
|
||||
return FI1.PTS < FI2.PTS;
|
||||
}
|
||||
|
||||
void FFMS_Index::Sort() {
|
||||
for (FFMS_Index::iterator Cur=begin(); Cur!=end(); Cur++) {
|
||||
for (FFMS_Index::iterator Cur = begin(); Cur != end(); ++Cur) {
|
||||
if (Cur->size() > 2 && Cur->front().PTS >= Cur->back().PTS) Cur->pop_back();
|
||||
|
||||
for (size_t i = 0; i < Cur->size(); i++)
|
||||
Cur->at(i).OriginalPos = i;
|
||||
|
@ -304,11 +279,9 @@ static unsigned int z_def(ffms_fstream *IndexStream, z_stream *stream, void *in,
|
|||
void FFMS_Index::WriteIndex(const char *IndexFile) {
|
||||
ffms_fstream IndexStream(IndexFile, std::ios::out | std::ios::binary | std::ios::trunc);
|
||||
|
||||
if (!IndexStream.is_open()) {
|
||||
std::ostringstream buf;
|
||||
buf << "Failed to open '" << IndexFile << "' for writing";
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
}
|
||||
if (!IndexStream.is_open())
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ,
|
||||
std::string("Failed to open '") + IndexFile + "' for writing");
|
||||
|
||||
z_stream stream;
|
||||
memset(&stream, 0, sizeof(z_stream));
|
||||
|
@ -327,7 +300,6 @@ void FFMS_Index::WriteIndex(const char *IndexFile) {
|
|||
IH.LAVCVersion = avcodec_version();
|
||||
IH.LSWSVersion = swscale_version();
|
||||
IH.LPPVersion = postproc_version();
|
||||
IH.LPPVersion = 0;
|
||||
IH.FileSize = Filesize;
|
||||
memcpy(IH.FileSignature, Digest, sizeof(Digest));
|
||||
|
||||
|
@ -364,8 +336,6 @@ void FFMS_Index::WriteIndex(const char *IndexFile) {
|
|||
}
|
||||
|
||||
static unsigned int z_inf(ffms_fstream *Index, z_stream *stream, void *in, size_t in_sz, void *out, size_t out_sz) {
|
||||
int ret;
|
||||
|
||||
if (out_sz == 0 || out == 0) return 0;
|
||||
stream->next_out = (Bytef*) out;
|
||||
stream->avail_out = out_sz;
|
||||
|
@ -376,20 +346,16 @@ static unsigned int z_inf(ffms_fstream *Index, z_stream *stream, void *in, size_
|
|||
stream->next_in = (Bytef*) in;
|
||||
stream->avail_in += Index->gcount();
|
||||
|
||||
ret = inflate(stream, Z_SYNC_FLUSH);
|
||||
switch (ret) {
|
||||
switch (inflate(stream, Z_SYNC_FLUSH)) {
|
||||
case Z_NEED_DICT:
|
||||
inflateEnd(stream);
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, "Failed to read data: Dictionary error.");
|
||||
break;
|
||||
case Z_DATA_ERROR:
|
||||
inflateEnd(stream);
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, "Failed to read data: Data error.");
|
||||
break;
|
||||
case Z_MEM_ERROR:
|
||||
inflateEnd(stream);
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, "Failed to read data: Memory error.");
|
||||
break;
|
||||
case Z_STREAM_END:
|
||||
inflateEnd(stream);
|
||||
return out_sz - stream->avail_out;
|
||||
|
@ -402,41 +368,34 @@ static unsigned int z_inf(ffms_fstream *Index, z_stream *stream, void *in, size_
|
|||
void FFMS_Index::ReadIndex(const char *IndexFile) {
|
||||
ffms_fstream Index(IndexFile, std::ios::in | std::ios::binary);
|
||||
|
||||
if (!Index.is_open()) {
|
||||
std::ostringstream buf;
|
||||
buf << "Failed to open '" << IndexFile << "' for reading";
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
}
|
||||
if (!Index.is_open())
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ,
|
||||
std::string("Failed to open '") + IndexFile + "' for reading");
|
||||
|
||||
z_stream stream;
|
||||
memset(&stream, 0, sizeof(z_stream));
|
||||
unsigned char in[CHUNK];
|
||||
if (inflateInit(&stream) != Z_OK) {
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, "Failed to initialize zlib");
|
||||
}
|
||||
if (inflateInit(&stream) != Z_OK)
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ,
|
||||
"Failed to initialize zlib");
|
||||
|
||||
// Read the index file header
|
||||
IndexHeader IH;
|
||||
z_inf(&Index, &stream, &in, CHUNK, &IH, sizeof(IndexHeader));
|
||||
if (IH.Id != INDEXID) {
|
||||
std::ostringstream buf;
|
||||
buf << "'" << IndexFile << "' is not a valid index file";
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
}
|
||||
|
||||
if (IH.Version != FFMS_VERSION) {
|
||||
std::ostringstream buf;
|
||||
buf << "'" << IndexFile << "' is not the expected index version";
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
}
|
||||
if (IH.Id != INDEXID)
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ,
|
||||
std::string("'") + IndexFile + "' is not a valid index file");
|
||||
|
||||
if (IH.Version != FFMS_VERSION)
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ,
|
||||
std::string("'") + IndexFile + "' is the expected index version");
|
||||
|
||||
if (IH.LAVUVersion != avutil_version() || IH.LAVFVersion != avformat_version() ||
|
||||
IH.LAVCVersion != avcodec_version() || IH.LSWSVersion != swscale_version() ||
|
||||
IH.LPPVersion != postproc_version()) {
|
||||
std::ostringstream buf;
|
||||
buf << "A different FFmpeg build was used to create '" << IndexFile << "'";
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
}
|
||||
IH.LPPVersion != postproc_version())
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ,
|
||||
std::string("A different FFmpeg build was used to create '") + IndexFile + "'");
|
||||
|
||||
if (!(IH.Decoder & FFMS_GetEnabledSources()))
|
||||
throw FFMS_Exception(FFMS_ERROR_INDEX, FFMS_ERROR_NOT_AVAILABLE,
|
||||
|
@ -465,16 +424,17 @@ void FFMS_Index::ReadIndex(const char *IndexFile) {
|
|||
ctrack[j].SampleStart = ctrack[j].SampleStart + ctrack[j - 1].SampleStart;
|
||||
}
|
||||
}
|
||||
|
||||
} catch (...) {
|
||||
std::ostringstream buf;
|
||||
buf << "Unknown error while reading index information in '" << IndexFile << "'";
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
}
|
||||
catch (FFMS_Exception const&) {
|
||||
throw;
|
||||
}
|
||||
catch (...) {
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ,
|
||||
std::string("Unknown error while reading index information in '") + IndexFile + "'");
|
||||
}
|
||||
}
|
||||
|
||||
FFMS_Index::FFMS_Index() {
|
||||
|
||||
}
|
||||
|
||||
FFMS_Index::FFMS_Index(int64_t Filesize, uint8_t Digest[20]) {
|
||||
|
@ -511,11 +471,9 @@ void FFMS_Indexer::SetAudioNameCallback(TAudioNameCallback ANC, void *ANCPrivate
|
|||
FFMS_Indexer *FFMS_Indexer::CreateIndexer(const char *Filename) {
|
||||
AVFormatContext *FormatContext = NULL;
|
||||
|
||||
if (av_open_input_file(&FormatContext, Filename, NULL, 0, NULL) != 0) {
|
||||
std::ostringstream buf;
|
||||
buf << "Can't open '" << Filename << "'";
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
}
|
||||
if (av_open_input_file(&FormatContext, Filename, NULL, 0, NULL) != 0)
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ,
|
||||
std::string("Can't open '") + Filename + "'");
|
||||
|
||||
// Do matroska indexing instead?
|
||||
if (!strncmp(FormatContext->iformat->name, "matroska", 8)) {
|
||||
|
@ -539,7 +497,7 @@ FFMS_Indexer *FFMS_Indexer::CreateIndexer(const char *Filename) {
|
|||
return new FFLAVFIndexer(Filename, FormatContext);
|
||||
}
|
||||
|
||||
FFMS_Indexer::FFMS_Indexer(const char *Filename) : DecodingBuffer(AVCODEC_MAX_AUDIO_FRAME_SIZE * 5) {
|
||||
FFMS_Indexer::FFMS_Indexer(const char *Filename) : DecodingBuffer(AVCODEC_MAX_AUDIO_FRAME_SIZE * 10) {
|
||||
IndexMask = 0;
|
||||
DumpMask = 0;
|
||||
ErrorHandling = FFMS_IEH_CLEAR_TRACK;
|
||||
|
@ -557,23 +515,89 @@ FFMS_Indexer::~FFMS_Indexer() {
|
|||
|
||||
void FFMS_Indexer::WriteAudio(SharedAudioContext &AudioContext, FFMS_Index *Index, int Track, int DBSize) {
|
||||
// Delay writer creation until after an audio frame has been decoded. This ensures that all parameters are known when writing the headers.
|
||||
if (DBSize > 0) {
|
||||
if (!AudioContext.W64Writer) {
|
||||
FFMS_AudioProperties AP;
|
||||
FillAP(AP, AudioContext.CodecContext, (*Index)[Track]);
|
||||
int FNSize = (*ANC)(SourceFile, Track, &AP, NULL, 0, ANCPrivate);
|
||||
std::vector<char> WName(FNSize);
|
||||
(*ANC)(SourceFile, Track, &AP, &WName[0], FNSize, ANCPrivate);
|
||||
std::string WN(&WName[0]);
|
||||
try {
|
||||
AudioContext.W64Writer = new Wave64Writer(WN.c_str(), av_get_bits_per_sample_fmt(AudioContext.CodecContext->sample_fmt),
|
||||
AudioContext.CodecContext->channels, AudioContext.CodecContext->sample_rate, (AudioContext.CodecContext->sample_fmt == AV_SAMPLE_FMT_FLT) || (AudioContext.CodecContext->sample_fmt == AV_SAMPLE_FMT_DBL));
|
||||
} catch (...) {
|
||||
throw FFMS_Exception(FFMS_ERROR_WAVE_WRITER, FFMS_ERROR_FILE_WRITE,
|
||||
"Failed to write wave data");
|
||||
}
|
||||
if (DBSize <= 0) return;
|
||||
|
||||
if (!AudioContext.W64Writer) {
|
||||
FFMS_AudioProperties AP;
|
||||
FillAP(AP, AudioContext.CodecContext, (*Index)[Track]);
|
||||
int FNSize = (*ANC)(SourceFile, Track, &AP, NULL, 0, ANCPrivate);
|
||||
if (FNSize <= 0) {
|
||||
DumpMask = DumpMask & ~(1 << Track);
|
||||
return;
|
||||
}
|
||||
|
||||
AudioContext.W64Writer->WriteData(&DecodingBuffer[0], DBSize);
|
||||
std::vector<char> WName(FNSize);
|
||||
(*ANC)(SourceFile, Track, &AP, &WName[0], FNSize, ANCPrivate);
|
||||
std::string WN(&WName[0]);
|
||||
try {
|
||||
AudioContext.W64Writer =
|
||||
new Wave64Writer(WN.c_str(),
|
||||
av_get_bits_per_sample_fmt(AudioContext.CodecContext->sample_fmt),
|
||||
AudioContext.CodecContext->channels,
|
||||
AudioContext.CodecContext->sample_rate,
|
||||
(AudioContext.CodecContext->sample_fmt == AV_SAMPLE_FMT_FLT) || (AudioContext.CodecContext->sample_fmt == AV_SAMPLE_FMT_DBL));
|
||||
} catch (...) {
|
||||
throw FFMS_Exception(FFMS_ERROR_WAVE_WRITER, FFMS_ERROR_FILE_WRITE,
|
||||
"Failed to write wave data");
|
||||
}
|
||||
}
|
||||
|
||||
AudioContext.W64Writer->WriteData(&DecodingBuffer[0], DBSize);
|
||||
}
|
||||
|
||||
int64_t FFMS_Indexer::IndexAudioPacket(int Track, AVPacket *Packet, SharedAudioContext &Context, FFMS_Index &TrackIndices) {
|
||||
AVCodecContext *CodecContext = Context.CodecContext;
|
||||
int64_t StartSample = Context.CurrentSample;
|
||||
int Read = 0;
|
||||
while (Packet->size > 0) {
|
||||
int dbsize = AVCODEC_MAX_AUDIO_FRAME_SIZE*10;
|
||||
int Ret = avcodec_decode_audio3(CodecContext, (int16_t *)&DecodingBuffer[0], &dbsize, Packet);
|
||||
if (Ret < 0) {
|
||||
if (ErrorHandling == FFMS_IEH_ABORT) {
|
||||
throw FFMS_Exception(FFMS_ERROR_CODEC, FFMS_ERROR_DECODING, "Audio decoding error");
|
||||
} else if (ErrorHandling == FFMS_IEH_CLEAR_TRACK) {
|
||||
TrackIndices[Track].clear();
|
||||
IndexMask &= ~(1 << Track);
|
||||
} else if (ErrorHandling == FFMS_IEH_STOP_TRACK) {
|
||||
IndexMask &= ~(1 << Track);
|
||||
}
|
||||
break;
|
||||
}
|
||||
Packet->size -= Ret;
|
||||
Packet->data += Ret;
|
||||
Read += Ret;
|
||||
|
||||
CheckAudioProperties(Track, CodecContext);
|
||||
|
||||
if (dbsize > 0)
|
||||
Context.CurrentSample += (dbsize * 8) / (av_get_bits_per_sample_fmt(CodecContext->sample_fmt) * CodecContext->channels);
|
||||
|
||||
if (DumpMask & (1 << Track))
|
||||
WriteAudio(Context, &TrackIndices, Track, dbsize);
|
||||
}
|
||||
Packet->size += Read;
|
||||
Packet->data -= Read;
|
||||
return Context.CurrentSample - StartSample;
|
||||
}
|
||||
|
||||
void FFMS_Indexer::CheckAudioProperties(int Track, AVCodecContext *Context) {
|
||||
std::map<int, FFMS_AudioProperties>::iterator it = LastAudioProperties.find(Track);
|
||||
if (it == LastAudioProperties.end()) {
|
||||
FFMS_AudioProperties &AP = LastAudioProperties[Track];
|
||||
AP.SampleRate = Context->sample_rate;
|
||||
AP.SampleFormat = Context->sample_fmt;
|
||||
AP.Channels = Context->channels;
|
||||
}
|
||||
else if (it->second.SampleRate != Context->sample_rate ||
|
||||
it->second.SampleFormat != Context->sample_fmt ||
|
||||
it->second.Channels != Context->channels) {
|
||||
std::ostringstream buf;
|
||||
buf <<
|
||||
"Audio format change detected. This is currently unsupported."
|
||||
<< " Channels: " << it->second.Channels << " -> " << Context->channels << ";"
|
||||
<< " Sample rate: " << it->second.SampleRate << " -> " << Context->sample_rate << ";"
|
||||
<< " Sample format: " << GetLAVCSampleFormatName((AVSampleFormat)it->second.SampleFormat) << " -> "
|
||||
<< GetLAVCSampleFormatName(Context->sample_fmt);
|
||||
throw FFMS_Exception(FFMS_ERROR_UNSUPPORTED, FFMS_ERROR_DECODING, buf.str());
|
||||
}
|
||||
}
|
||||
|
|
|
@ -19,8 +19,9 @@
|
|||
// THE SOFTWARE.
|
||||
|
||||
#ifndef INDEXING_H
|
||||
#define INDEXING_H
|
||||
#define INDEXING_H
|
||||
|
||||
#include <map>
|
||||
#include <memory>
|
||||
#include "utils.h"
|
||||
#include "wave64writer.h"
|
||||
|
@ -74,7 +75,7 @@ public:
|
|||
|
||||
TFrameInfo();
|
||||
static TFrameInfo VideoFrameInfo(int64_t PTS, int RepeatPict, bool KeyFrame, int64_t FilePos = 0, unsigned int FrameSize = 0);
|
||||
static TFrameInfo AudioFrameInfo(int64_t PTS, int64_t SampleStart, unsigned int SampleCount, bool KeyFrame, int64_t FilePos = 0, unsigned int FrameSize = 0);
|
||||
static TFrameInfo AudioFrameInfo(int64_t PTS, int64_t SampleStart, int64_t SampleCount, bool KeyFrame, int64_t FilePos = 0, unsigned int FrameSize = 0);
|
||||
private:
|
||||
TFrameInfo(int64_t PTS, int64_t SampleStart, unsigned int SampleCount, int RepeatPict, bool KeyFrame, int64_t FilePos, unsigned int FrameSize);
|
||||
};
|
||||
|
@ -86,7 +87,6 @@ public:
|
|||
bool UseDTS;
|
||||
|
||||
int FindClosestVideoKeyFrame(int Frame);
|
||||
int FindClosestAudioKeyFrame(int64_t Sample);
|
||||
int FrameFromPTS(int64_t PTS);
|
||||
int ClosestFrameFromPTS(int64_t PTS);
|
||||
void WriteTimecodes(const char *TimecodeFile);
|
||||
|
@ -113,6 +113,7 @@ public:
|
|||
};
|
||||
|
||||
class FFMS_Indexer {
|
||||
std::map<int, FFMS_AudioProperties> LastAudioProperties;
|
||||
protected:
|
||||
int IndexMask;
|
||||
int DumpMask;
|
||||
|
@ -122,12 +123,14 @@ protected:
|
|||
TAudioNameCallback ANC;
|
||||
void *ANCPrivate;
|
||||
const char *SourceFile;
|
||||
AlignedBuffer<int16_t> DecodingBuffer;
|
||||
AlignedBuffer<uint8_t> DecodingBuffer;
|
||||
|
||||
int64_t Filesize;
|
||||
uint8_t Digest[20];
|
||||
|
||||
void WriteAudio(SharedAudioContext &AudioContext, FFMS_Index *Index, int Track, int DBSize);
|
||||
void CheckAudioProperties(int Track, AVCodecContext *Context);
|
||||
int64_t IndexAudioPacket(int Track, AVPacket *Packet, SharedAudioContext &Context, FFMS_Index &TrackIndices);
|
||||
public:
|
||||
static FFMS_Indexer *CreateIndexer(const char *Filename);
|
||||
FFMS_Indexer(const char *Filename);
|
||||
|
@ -144,8 +147,8 @@ public:
|
|||
};
|
||||
|
||||
class FFLAVFIndexer : public FFMS_Indexer {
|
||||
private:
|
||||
AVFormatContext *FormatContext;
|
||||
void ReadTS(const AVPacket &Packet, int64_t &TS, bool &UseDTS);
|
||||
public:
|
||||
FFLAVFIndexer(const char *Filename, AVFormatContext *FormatContext);
|
||||
~FFLAVFIndexer();
|
||||
|
@ -177,9 +180,6 @@ private:
|
|||
CComPtr<IMMContainer> pMMC;
|
||||
int NumTracks;
|
||||
FFMS_TrackType TrackType[32];
|
||||
AVCodec *Codec[32];
|
||||
std::vector<uint8_t> CodecPrivate[32];
|
||||
int CodecPrivateSize[32];
|
||||
CComQIPtr<IPropertyBag> PropertyBags[32];
|
||||
int64_t Duration;
|
||||
public:
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (c) 2007-2009 Fredrik Mellbin
|
||||
// Copyright (c) 2011 Thomas Goyne <tgoyne@gmail.com>
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
|
@ -19,175 +19,68 @@
|
|||
// THE SOFTWARE.
|
||||
|
||||
#include "audiosource.h"
|
||||
#include <cassert>
|
||||
|
||||
|
||||
|
||||
void FFLAVFAudio::Free(bool CloseCodec) {
|
||||
if (CloseCodec)
|
||||
avcodec_close(CodecContext);
|
||||
if (FormatContext)
|
||||
av_close_input_file(FormatContext);
|
||||
}
|
||||
|
||||
FFLAVFAudio::FFLAVFAudio(const char *SourceFile, int Track, FFMS_Index *Index)
|
||||
: Res(FFSourceResources<FFMS_AudioSource>(this)), FFMS_AudioSource(SourceFile, Index, Track){
|
||||
FormatContext = NULL;
|
||||
AVCodec *Codec = NULL;
|
||||
AudioTrack = Track;
|
||||
Frames = (*Index)[AudioTrack];
|
||||
|
||||
FFLAVFAudio::FFLAVFAudio(const char *SourceFile, int Track, FFMS_Index &Index, int DelayMode)
|
||||
: FFMS_AudioSource(SourceFile, Index, Track)
|
||||
, FormatContext(NULL)
|
||||
{
|
||||
LAVFOpenFile(SourceFile, FormatContext);
|
||||
|
||||
CodecContext = FormatContext->streams[AudioTrack]->codec;
|
||||
CodecContext.reset(FormatContext->streams[TrackNumber]->codec);
|
||||
assert(CodecContext);
|
||||
|
||||
Codec = avcodec_find_decoder(CodecContext->codec_id);
|
||||
if (Codec == NULL)
|
||||
throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
|
||||
"Audio codec not found");
|
||||
AVCodec *Codec = avcodec_find_decoder(CodecContext->codec_id);
|
||||
try {
|
||||
if (!Codec)
|
||||
throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
|
||||
"Audio codec not found");
|
||||
|
||||
if (avcodec_open(CodecContext, Codec) < 0)
|
||||
throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
|
||||
"Could not open audio codec");
|
||||
|
||||
Res.CloseCodec(true);
|
||||
|
||||
// Always try to decode a frame to make sure all required parameters are known
|
||||
int64_t Dummy;
|
||||
DecodeNextAudioBlock(&Dummy);
|
||||
|
||||
if (av_seek_frame(FormatContext, AudioTrack, Frames[0].PTS, AVSEEK_FLAG_BACKWARD) < 0)
|
||||
av_seek_frame(FormatContext, AudioTrack, Frames[0].PTS, AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_ANY);
|
||||
avcodec_flush_buffers(CodecContext);
|
||||
|
||||
FillAP(AP, CodecContext, Frames);
|
||||
|
||||
if (AP.SampleRate <= 0 || AP.BitsPerSample <= 0)
|
||||
throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
|
||||
"Codec returned zero size audio");
|
||||
|
||||
AudioCache.Initialize((AP.Channels * AP.BitsPerSample) / 8, 50);
|
||||
}
|
||||
|
||||
void FFLAVFAudio::DecodeNextAudioBlock(int64_t *Count) {
|
||||
const size_t SizeConst = (av_get_bits_per_sample_fmt(CodecContext->sample_fmt) * CodecContext->channels) / 8;
|
||||
int Ret = -1;
|
||||
*Count = 0;
|
||||
uint8_t *Buf = &DecodingBuffer[0];
|
||||
AVPacket Packet, TempPacket;
|
||||
InitNullPacket(Packet);
|
||||
InitNullPacket(TempPacket);
|
||||
|
||||
while (av_read_frame(FormatContext, &Packet) >= 0) {
|
||||
if (Packet.stream_index == AudioTrack) {
|
||||
TempPacket.data = Packet.data;
|
||||
TempPacket.size = Packet.size;
|
||||
|
||||
while (TempPacket.size > 0) {
|
||||
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE * 10;
|
||||
Ret = avcodec_decode_audio3(CodecContext, (int16_t *)Buf, &TempOutputBufSize, &TempPacket);
|
||||
|
||||
if (Ret < 0) {// throw error or something?
|
||||
av_free_packet(&Packet);
|
||||
goto Done;
|
||||
}
|
||||
|
||||
if (Ret > 0) {
|
||||
TempPacket.size -= Ret;
|
||||
TempPacket.data += Ret;
|
||||
Buf += TempOutputBufSize;
|
||||
if (SizeConst)
|
||||
*Count += TempOutputBufSize / SizeConst;
|
||||
}
|
||||
}
|
||||
|
||||
av_free_packet(&Packet);
|
||||
goto Done;
|
||||
}
|
||||
|
||||
av_free_packet(&Packet);
|
||||
if (avcodec_open(CodecContext, Codec) < 0)
|
||||
throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
|
||||
"Could not open audio codec");
|
||||
}
|
||||
catch (...) {
|
||||
av_close_input_file(FormatContext);
|
||||
throw;
|
||||
}
|
||||
|
||||
Done:;
|
||||
if (Frames.back().PTS == Frames.front().PTS)
|
||||
SeekOffset = -1;
|
||||
else
|
||||
SeekOffset = 10;
|
||||
Init(Index, DelayMode);
|
||||
}
|
||||
|
||||
void FFLAVFAudio::GetAudio(void *Buf, int64_t Start, int64_t Count) {
|
||||
GetAudioCheck(Start, Count);
|
||||
FFLAVFAudio::~FFLAVFAudio() {
|
||||
av_close_input_file(FormatContext);
|
||||
}
|
||||
|
||||
const int64_t SizeConst = (av_get_bits_per_sample_fmt(CodecContext->sample_fmt) * CodecContext->channels) / 8;
|
||||
memset(Buf, 0, static_cast<size_t>(SizeConst * Count));
|
||||
void FFLAVFAudio::Seek() {
|
||||
size_t TargetPacket = GetSeekablePacketNumber(Frames, PacketNumber);
|
||||
|
||||
unsigned int PreDecBlocks = 0;
|
||||
uint8_t *DstBuf = static_cast<uint8_t *>(Buf);
|
||||
if (av_seek_frame(FormatContext, TrackNumber, Frames[TargetPacket].PTS, AVSEEK_FLAG_BACKWARD) < 0)
|
||||
av_seek_frame(FormatContext, TrackNumber, Frames[TargetPacket].PTS, AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_ANY);
|
||||
|
||||
// Fill with everything in the cache
|
||||
int64_t CacheEnd = AudioCache.FillRequest(Start, Count, DstBuf);
|
||||
// Was everything in the cache?
|
||||
if (CacheEnd == Start + Count)
|
||||
return;
|
||||
|
||||
size_t CurrentAudioBlock;
|
||||
if (CurrentSample != CacheEnd) {
|
||||
PreDecBlocks = 15;
|
||||
CurrentAudioBlock = FFMAX((int64_t)Frames.FindClosestAudioKeyFrame(CacheEnd) - PreDecBlocks - 20, (int64_t)0);
|
||||
|
||||
if (CurrentAudioBlock <= PreDecBlocks) {
|
||||
CurrentAudioBlock = 0;
|
||||
PreDecBlocks = 0;
|
||||
}
|
||||
|
||||
// Did the seeking fail?
|
||||
if (av_seek_frame(FormatContext, AudioTrack, Frames[CurrentAudioBlock].PTS, AVSEEK_FLAG_BACKWARD) < 0)
|
||||
av_seek_frame(FormatContext, AudioTrack, Frames[CurrentAudioBlock].PTS, AVSEEK_FLAG_BACKWARD | AVSEEK_FLAG_ANY);
|
||||
|
||||
avcodec_flush_buffers(CodecContext);
|
||||
|
||||
// Pretend we got to the first audio frame when PreDecBlocks = 0
|
||||
if (PreDecBlocks > 0) {
|
||||
AVPacket Packet;
|
||||
InitNullPacket(Packet);
|
||||
|
||||
// Establish where we actually are
|
||||
// Trigger on packet PTS difference since groups can otherwise be indistinguishable
|
||||
int64_t LastPTS = - 1;
|
||||
while (av_read_frame(FormatContext, &Packet) >= 0) {
|
||||
if (Packet.stream_index == AudioTrack) {
|
||||
if (LastPTS < 0) {
|
||||
LastPTS = Packet.pts;
|
||||
} else if (LastPTS != Packet.pts) {
|
||||
for (size_t i = 0; i < Frames.size(); i++)
|
||||
if (Frames[i].PTS == Packet.pts) {
|
||||
// The current match was consumed
|
||||
CurrentAudioBlock = i + 1;
|
||||
break;
|
||||
}
|
||||
|
||||
av_free_packet(&Packet);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
av_free_packet(&Packet);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
CurrentAudioBlock = Frames.FindClosestAudioKeyFrame(CurrentSample);
|
||||
if (TargetPacket != PacketNumber) {
|
||||
// Decode until the PTS changes so we know where we are
|
||||
int64_t LastPTS = Frames[PacketNumber].PTS;
|
||||
while (LastPTS == Frames[PacketNumber].PTS) DecodeNextBlock();
|
||||
}
|
||||
|
||||
int64_t DecodeCount;
|
||||
|
||||
do {
|
||||
DecodeNextAudioBlock(&DecodeCount);
|
||||
|
||||
// Cache the block if enough blocks before it have been decoded to avoid garbage
|
||||
if (PreDecBlocks == 0) {
|
||||
AudioCache.CacheBlock(Frames[CurrentAudioBlock].SampleStart, DecodeCount, &DecodingBuffer[0]);
|
||||
CacheEnd = AudioCache.FillRequest(CacheEnd, Start + Count - CacheEnd, DstBuf + (CacheEnd - Start) * SizeConst);
|
||||
} else {
|
||||
PreDecBlocks--;
|
||||
}
|
||||
|
||||
CurrentAudioBlock++;
|
||||
if (CurrentAudioBlock < Frames.size())
|
||||
CurrentSample = Frames[CurrentAudioBlock].SampleStart;
|
||||
} while (Start + Count - CacheEnd > 0 && CurrentAudioBlock < Frames.size());
|
||||
}
|
||||
|
||||
bool FFLAVFAudio::ReadPacket(AVPacket *Packet) {
|
||||
InitNullPacket(*Packet);
|
||||
|
||||
while (av_read_frame(FormatContext, Packet) >= 0) {
|
||||
if (Packet->stream_index == TrackNumber) {
|
||||
while (Frames[PacketNumber].PTS < Packet->pts) ++PacketNumber;
|
||||
return true;
|
||||
}
|
||||
av_free_packet(Packet);
|
||||
}
|
||||
return false;
|
||||
}
|
||||
void FFLAVFAudio::FreePacket(AVPacket *Packet) {
|
||||
av_free_packet(Packet);
|
||||
}
|
||||
|
|
|
@ -49,11 +49,12 @@ FFMS_Index *FFLAVFIndexer::DoIndexing() {
|
|||
FormatContext->streams[i]->time_base.den,
|
||||
static_cast<FFMS_TrackType>(FormatContext->streams[i]->codec->codec_type)));
|
||||
|
||||
if (static_cast<FFMS_TrackType>(FormatContext->streams[i]->codec->codec_type) == FFMS_TYPE_VIDEO &&
|
||||
(VideoContexts[i].Parser = av_parser_init(FormatContext->streams[i]->codec->codec_id))) {
|
||||
if (FormatContext->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
|
||||
VideoContexts[i].Parser = av_parser_init(FormatContext->streams[i]->codec->codec_id);
|
||||
if (!VideoContexts[i].Parser) continue;
|
||||
|
||||
AVCodec *VideoCodec = avcodec_find_decoder(FormatContext->streams[i]->codec->codec_id);
|
||||
if (VideoCodec == NULL)
|
||||
if (!VideoCodec)
|
||||
throw FFMS_Exception(FFMS_ERROR_CODEC, FFMS_ERROR_UNSUPPORTED,
|
||||
"Video codec not found");
|
||||
|
||||
|
@ -63,9 +64,9 @@ FFMS_Index *FFLAVFIndexer::DoIndexing() {
|
|||
|
||||
VideoContexts[i].CodecContext = FormatContext->streams[i]->codec;
|
||||
VideoContexts[i].Parser->flags = PARSER_FLAG_COMPLETE_FRAMES;
|
||||
IndexMask |= 1 << i;
|
||||
}
|
||||
|
||||
if (IndexMask & (1 << i) && FormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {
|
||||
else if (IndexMask & (1 << i) && FormatContext->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO) {
|
||||
AVCodecContext *AudioCodecContext = FormatContext->streams[i]->codec;
|
||||
|
||||
AVCodec *AudioCodec = avcodec_find_decoder(AudioCodecContext->codec_id);
|
||||
|
@ -83,11 +84,8 @@ FFMS_Index *FFLAVFIndexer::DoIndexing() {
|
|||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
AVPacket Packet, TempPacket;
|
||||
AVPacket Packet;
|
||||
InitNullPacket(Packet);
|
||||
InitNullPacket(TempPacket);
|
||||
std::vector<int64_t> LastValidTS;
|
||||
LastValidTS.resize(FormatContext->nb_streams, ffms_av_nopts_value);
|
||||
|
||||
|
@ -99,106 +97,38 @@ FFMS_Index *FFLAVFIndexer::DoIndexing() {
|
|||
throw FFMS_Exception(FFMS_ERROR_CANCELLED, FFMS_ERROR_USER,
|
||||
"Cancelled by user");
|
||||
}
|
||||
if (!(IndexMask & (1 << Packet.stream_index))) {
|
||||
av_free_packet(&Packet);
|
||||
continue;
|
||||
}
|
||||
|
||||
int Track = Packet.stream_index;
|
||||
bool KeyFrame = !!(Packet.flags & AV_PKT_FLAG_KEY);
|
||||
ReadTS(Packet, LastValidTS[Track], (*TrackIndices)[Track].UseDTS);
|
||||
|
||||
if (FormatContext->streams[Track]->codec->codec_type == CODEC_TYPE_VIDEO) {
|
||||
if (LastValidTS[Track] == ffms_av_nopts_value)
|
||||
throw FFMS_Exception(FFMS_ERROR_INDEXING, FFMS_ERROR_PARSER,
|
||||
"Invalid initial pts and dts");
|
||||
|
||||
// Only create index entries for video for now to save space
|
||||
if (FormatContext->streams[Packet.stream_index]->codec->codec_type == CODEC_TYPE_VIDEO) {
|
||||
uint8_t *OB;
|
||||
int OBSize;
|
||||
int RepeatPict = -1;
|
||||
|
||||
// Duplicated code
|
||||
if (!(*TrackIndices)[Packet.stream_index].UseDTS && Packet.pts != ffms_av_nopts_value)
|
||||
LastValidTS[Packet.stream_index] = Packet.pts;
|
||||
if (LastValidTS[Packet.stream_index] == ffms_av_nopts_value)
|
||||
(*TrackIndices)[Packet.stream_index].UseDTS = true;
|
||||
if ((*TrackIndices)[Packet.stream_index].UseDTS && Packet.dts != ffms_av_nopts_value)
|
||||
LastValidTS[Packet.stream_index] = Packet.dts;
|
||||
if (LastValidTS[Packet.stream_index] == ffms_av_nopts_value)
|
||||
throw FFMS_Exception(FFMS_ERROR_INDEXING, FFMS_ERROR_PARSER,
|
||||
"Invalid initial pts and dts");
|
||||
//
|
||||
|
||||
if (VideoContexts[Packet.stream_index].Parser) {
|
||||
av_parser_parse2(VideoContexts[Packet.stream_index].Parser, VideoContexts[Packet.stream_index].CodecContext, &OB, &OBSize, Packet.data, Packet.size, Packet.pts, Packet.dts, Packet.pos);
|
||||
RepeatPict = VideoContexts[Packet.stream_index].Parser->repeat_pict;
|
||||
if (VideoContexts[Track].Parser) {
|
||||
uint8_t *OB;
|
||||
int OBSize;
|
||||
av_parser_parse2(VideoContexts[Track].Parser, VideoContexts[Track].CodecContext, &OB, &OBSize, Packet.data, Packet.size, Packet.pts, Packet.dts, Packet.pos);
|
||||
RepeatPict = VideoContexts[Track].Parser->repeat_pict;
|
||||
}
|
||||
|
||||
(*TrackIndices)[Packet.stream_index].push_back(TFrameInfo::VideoFrameInfo(LastValidTS[Packet.stream_index], RepeatPict, (Packet.flags & AV_PKT_FLAG_KEY) ? 1 : 0));
|
||||
} else if (FormatContext->streams[Packet.stream_index]->codec->codec_type == CODEC_TYPE_AUDIO && (IndexMask & (1 << Packet.stream_index))) {
|
||||
int64_t StartSample = AudioContexts[Packet.stream_index].CurrentSample;
|
||||
AVCodecContext *AudioCodecContext = FormatContext->streams[Packet.stream_index]->codec;
|
||||
TempPacket.data = Packet.data;
|
||||
TempPacket.size = Packet.size;
|
||||
TempPacket.flags = Packet.flags;
|
||||
(*TrackIndices)[Track].push_back(TFrameInfo::VideoFrameInfo(LastValidTS[Track], RepeatPict, KeyFrame, Packet.pos));
|
||||
}
|
||||
else if (FormatContext->streams[Track]->codec->codec_type == CODEC_TYPE_AUDIO) {
|
||||
int64_t StartSample = AudioContexts[Track].CurrentSample;
|
||||
int64_t SampleCount = IndexAudioPacket(Track, &Packet, AudioContexts[Track], *TrackIndices);
|
||||
|
||||
// Duplicated code
|
||||
if (!(*TrackIndices)[Packet.stream_index].UseDTS && Packet.pts != ffms_av_nopts_value)
|
||||
LastValidTS[Packet.stream_index] = Packet.pts;
|
||||
if (LastValidTS[Packet.stream_index] == ffms_av_nopts_value)
|
||||
(*TrackIndices)[Packet.stream_index].UseDTS = true;
|
||||
if ((*TrackIndices)[Packet.stream_index].UseDTS && Packet.dts != ffms_av_nopts_value)
|
||||
LastValidTS[Packet.stream_index] = Packet.dts;
|
||||
if (LastValidTS[Packet.stream_index] == ffms_av_nopts_value)
|
||||
throw FFMS_Exception(FFMS_ERROR_INDEXING, FFMS_ERROR_PARSER,
|
||||
"Invalid initial pts and dts");
|
||||
//
|
||||
|
||||
bool first = true;
|
||||
int LastNumChannels;
|
||||
int LastSampleRate;
|
||||
AVSampleFormat LastSampleFormat;
|
||||
while (TempPacket.size > 0) {
|
||||
int dbsize = AVCODEC_MAX_AUDIO_FRAME_SIZE*10;
|
||||
int Ret = avcodec_decode_audio3(AudioCodecContext, &DecodingBuffer[0], &dbsize, &TempPacket);
|
||||
if (Ret < 0) {
|
||||
if (ErrorHandling == FFMS_IEH_ABORT) {
|
||||
throw FFMS_Exception(FFMS_ERROR_CODEC, FFMS_ERROR_DECODING,
|
||||
"Audio decoding error");
|
||||
} else if (ErrorHandling == FFMS_IEH_CLEAR_TRACK) {
|
||||
(*TrackIndices)[Packet.stream_index].clear();
|
||||
IndexMask &= ~(1 << Packet.stream_index);
|
||||
break;
|
||||
} else if (ErrorHandling == FFMS_IEH_STOP_TRACK) {
|
||||
IndexMask &= ~(1 << Packet.stream_index);
|
||||
break;
|
||||
} else if (ErrorHandling == FFMS_IEH_IGNORE) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if (first) {
|
||||
LastNumChannels = AudioCodecContext->channels;
|
||||
LastSampleRate = AudioCodecContext->sample_rate;
|
||||
LastSampleFormat = AudioCodecContext->sample_fmt;
|
||||
first = false;
|
||||
}
|
||||
|
||||
if (LastNumChannels != AudioCodecContext->channels || LastSampleRate != AudioCodecContext->sample_rate
|
||||
|| LastSampleFormat != AudioCodecContext->sample_fmt) {
|
||||
std::ostringstream buf;
|
||||
buf <<
|
||||
"Audio format change detected. This is currently unsupported."
|
||||
<< " Channels: " << LastNumChannels << " -> " << AudioCodecContext->channels << ";"
|
||||
<< " Sample rate: " << LastSampleRate << " -> " << AudioCodecContext->sample_rate << ";"
|
||||
<< " Sample format: " << GetLAVCSampleFormatName(LastSampleFormat) << " -> "
|
||||
<< GetLAVCSampleFormatName(AudioCodecContext->sample_fmt);
|
||||
throw FFMS_Exception(FFMS_ERROR_UNSUPPORTED, FFMS_ERROR_DECODING, buf.str());
|
||||
}
|
||||
|
||||
if (Ret > 0) {
|
||||
TempPacket.size -= Ret;
|
||||
TempPacket.data += Ret;
|
||||
}
|
||||
|
||||
if (dbsize > 0)
|
||||
AudioContexts[Packet.stream_index].CurrentSample += (dbsize * 8) / (av_get_bits_per_sample_fmt(AudioCodecContext->sample_fmt) * AudioCodecContext->channels);
|
||||
|
||||
if (DumpMask & (1 << Packet.stream_index))
|
||||
WriteAudio(AudioContexts[Packet.stream_index], TrackIndices.get(), Packet.stream_index, dbsize);
|
||||
}
|
||||
|
||||
(*TrackIndices)[Packet.stream_index].push_back(TFrameInfo::AudioFrameInfo(LastValidTS[Packet.stream_index], StartSample,
|
||||
static_cast<unsigned int>(AudioContexts[Packet.stream_index].CurrentSample - StartSample), (Packet.flags & AV_PKT_FLAG_KEY) ? 1 : 0));
|
||||
if (SampleCount != 0)
|
||||
(*TrackIndices)[Track].push_back(TFrameInfo::AudioFrameInfo(LastValidTS[Track],
|
||||
StartSample, SampleCount, KeyFrame, Packet.pos));
|
||||
}
|
||||
|
||||
av_free_packet(&Packet);
|
||||
|
@ -208,6 +138,15 @@ FFMS_Index *FFLAVFIndexer::DoIndexing() {
|
|||
return TrackIndices.release();
|
||||
}
|
||||
|
||||
void FFLAVFIndexer::ReadTS(const AVPacket &Packet, int64_t &TS, bool &UseDTS) {
|
||||
if (!UseDTS && Packet.pts != ffms_av_nopts_value)
|
||||
TS = Packet.pts;
|
||||
if (TS == ffms_av_nopts_value)
|
||||
UseDTS = true;
|
||||
if (UseDTS && Packet.dts != ffms_av_nopts_value)
|
||||
TS = Packet.dts;
|
||||
}
|
||||
|
||||
int FFLAVFIndexer::GetNumberOfTracks() {
|
||||
return FormatContext->nb_streams;
|
||||
}
|
||||
|
|
|
@ -83,6 +83,15 @@ FFLAVFVideo::FFLAVFVideo(const char *SourceFile, int Track, FFMS_Index *Index,
|
|||
VP.ColorSpace = 0;
|
||||
VP.ColorRange = 0;
|
||||
#endif
|
||||
// these pixfmt's are deprecated but still used
|
||||
if (
|
||||
CodecContext->pix_fmt == PIX_FMT_YUVJ420P
|
||||
|| CodecContext->pix_fmt == PIX_FMT_YUVJ422P
|
||||
|| CodecContext->pix_fmt == PIX_FMT_YUVJ444P
|
||||
)
|
||||
VP.ColorRange = AVCOL_RANGE_JPEG;
|
||||
|
||||
|
||||
VP.FirstTime = ((Frames.front().PTS * Frames.TB.Num) / (double)Frames.TB.Den) / 1000;
|
||||
VP.LastTime = ((Frames.back().PTS * Frames.TB.Num) / (double)Frames.TB.Den) / 1000;
|
||||
|
||||
|
@ -96,14 +105,13 @@ FFLAVFVideo::FFLAVFVideo(const char *SourceFile, int Track, FFMS_Index *Index,
|
|||
VP.FPSNumerator = 30;
|
||||
}
|
||||
|
||||
// Adjust framerate to match the duration of the first frame
|
||||
for (size_t i = 1; i < Frames.size(); i++) {
|
||||
if (Frames[i].PTS != Frames[0].PTS) {
|
||||
unsigned int PTSDiff = (unsigned int)(Frames[i].PTS - Frames[0].PTS);
|
||||
VP.FPSDenominator *= PTSDiff;
|
||||
VP.FPSDenominator /= i;
|
||||
break;
|
||||
}
|
||||
// Calculate the average framerate
|
||||
if (Frames.size() >= 2) {
|
||||
double PTSDiff = (double)(Frames.back().PTS - Frames.front().PTS);
|
||||
double TD = (double)(Frames.TB.Den);
|
||||
double TN = (double)(Frames.TB.Num);
|
||||
VP.FPSDenominator = (unsigned int)(((double)1000000) / (double)((VP.NumFrames - 1) / ((PTSDiff * TN/TD) / (double)1000)));
|
||||
VP.FPSNumerator = 1000000;
|
||||
}
|
||||
|
||||
// attempt to correct framerate to the proper NTSC fraction, if applicable
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright (c) 2007-2009 Fredrik Mellbin
|
||||
// Copyright (c) 2011 Thomas Goyne <tgoyne@gmail.com>
|
||||
//
|
||||
// Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
// of this software and associated documentation files (the "Software"), to deal
|
||||
|
@ -19,161 +19,57 @@
|
|||
// THE SOFTWARE.
|
||||
|
||||
#include "audiosource.h"
|
||||
#include <cassert>
|
||||
|
||||
void FFMatroskaAudio::Free(bool CloseCodec) {
|
||||
if (TCC)
|
||||
delete TCC;
|
||||
if (MC.ST.fp) {
|
||||
mkv_Close(MF);
|
||||
fclose(MC.ST.fp);
|
||||
}
|
||||
if (CloseCodec)
|
||||
avcodec_close(CodecContext);
|
||||
av_freep(&CodecContext);
|
||||
}
|
||||
|
||||
FFMatroskaAudio::FFMatroskaAudio(const char *SourceFile, int Track, FFMS_Index *Index)
|
||||
: Res(FFSourceResources<FFMS_AudioSource>(this)), FFMS_AudioSource(SourceFile, Index, Track) {
|
||||
CodecContext = NULL;
|
||||
AVCodec *Codec = NULL;
|
||||
TrackInfo *TI = NULL;
|
||||
TCC = NULL;
|
||||
PacketNumber = 0;
|
||||
Frames = (*Index)[Track];
|
||||
|
||||
MC.ST.fp = ffms_fopen(SourceFile, "rb");
|
||||
if (MC.ST.fp == NULL) {
|
||||
std::ostringstream buf;
|
||||
buf << "Can't open '" << SourceFile << "': " << strerror(errno);
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
}
|
||||
FFMatroskaAudio::FFMatroskaAudio(const char *SourceFile, int Track, FFMS_Index &Index, int DelayMode)
|
||||
: FFMS_AudioSource(SourceFile, Index, Track)
|
||||
, TI(NULL)
|
||||
{
|
||||
if (!(MC.ST.fp = ffms_fopen(SourceFile, "rb")))
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ,
|
||||
std::string("Can't open '") + SourceFile + "': " + strerror(errno));
|
||||
|
||||
setvbuf(MC.ST.fp, NULL, _IOFBF, CACHESIZE);
|
||||
|
||||
MF = mkv_OpenEx(&MC.ST.base, 0, 0, ErrorMessage, sizeof(ErrorMessage));
|
||||
if (MF == NULL) {
|
||||
fclose(MC.ST.fp);
|
||||
std::ostringstream buf;
|
||||
buf << "Can't parse Matroska file: " << ErrorMessage;
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
}
|
||||
|
||||
if (!(MF = mkv_OpenEx(&MC.ST.base, 0, 0, ErrorMessage, sizeof(ErrorMessage))))
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ,
|
||||
std::string("Can't parse Matroska file: ") + ErrorMessage);
|
||||
|
||||
TI = mkv_GetTrackInfo(MF, Track);
|
||||
assert(TI);
|
||||
|
||||
if (TI->CompEnabled)
|
||||
TCC = new TrackCompressionContext(MF, TI, Track);
|
||||
TCC.reset(new TrackCompressionContext(MF, TI, Track));
|
||||
|
||||
CodecContext = avcodec_alloc_context();
|
||||
CodecContext.reset(avcodec_alloc_context(), DeleteMatroskaCodecContext);
|
||||
assert(CodecContext);
|
||||
|
||||
Codec = avcodec_find_decoder(MatroskaToFFCodecID(TI->CodecID, TI->CodecPrivate, 0, TI->AV.Audio.BitDepth));
|
||||
if (Codec == NULL)
|
||||
throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
|
||||
"Audio codec not found");
|
||||
AVCodec *Codec = avcodec_find_decoder(MatroskaToFFCodecID(TI->CodecID, TI->CodecPrivate, 0, TI->AV.Audio.BitDepth));
|
||||
if (!Codec) {
|
||||
mkv_Close(MF);
|
||||
throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC, "Audio codec not found");
|
||||
}
|
||||
|
||||
InitializeCodecContextFromMatroskaTrackInfo(TI, CodecContext);
|
||||
|
||||
if (avcodec_open(CodecContext, Codec) < 0)
|
||||
throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
|
||||
"Could not open audio codec");
|
||||
|
||||
Res.CloseCodec(true);
|
||||
|
||||
// Always try to decode a frame to make sure all required parameters are known
|
||||
int64_t Dummy;
|
||||
DecodeNextAudioBlock(&Dummy);
|
||||
|
||||
avcodec_flush_buffers(CodecContext);
|
||||
|
||||
FillAP(AP, CodecContext, Frames);
|
||||
|
||||
if (AP.SampleRate <= 0 || AP.BitsPerSample <= 0)
|
||||
throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC,
|
||||
"Codec returned zero size audio");
|
||||
|
||||
AudioCache.Initialize((AP.Channels * AP.BitsPerSample) / 8, 50);
|
||||
}
|
||||
|
||||
void FFMatroskaAudio::GetAudio(void *Buf, int64_t Start, int64_t Count) {
|
||||
GetAudioCheck(Start, Count);
|
||||
|
||||
const int64_t SizeConst = (av_get_bits_per_sample_fmt(CodecContext->sample_fmt) * CodecContext->channels) / 8;
|
||||
memset(Buf, 0, static_cast<size_t>(SizeConst * Count));
|
||||
|
||||
unsigned int PreDecBlocks = 0;
|
||||
uint8_t *DstBuf = static_cast<uint8_t *>(Buf);
|
||||
|
||||
// Fill with everything in the cache
|
||||
int64_t CacheEnd = AudioCache.FillRequest(Start, Count, DstBuf);
|
||||
// Was everything in the cache?
|
||||
if (CacheEnd == Start + Count)
|
||||
return;
|
||||
|
||||
if (CurrentSample != CacheEnd) {
|
||||
PreDecBlocks = 15;
|
||||
PacketNumber = FFMAX((int64_t)Frames.FindClosestAudioKeyFrame(CacheEnd) - PreDecBlocks, (int64_t)0);
|
||||
|
||||
if (PacketNumber <= PreDecBlocks) {
|
||||
PacketNumber = 0;
|
||||
PreDecBlocks = 0;
|
||||
}
|
||||
|
||||
avcodec_flush_buffers(CodecContext);
|
||||
if (avcodec_open(CodecContext, Codec) < 0) {
|
||||
mkv_Close(MF);
|
||||
throw FFMS_Exception(FFMS_ERROR_DECODING, FFMS_ERROR_CODEC, "Could not open audio codec");
|
||||
}
|
||||
|
||||
int64_t DecodeCount;
|
||||
|
||||
do {
|
||||
const TFrameInfo &FI = Frames[Frames[PacketNumber].OriginalPos];
|
||||
DecodeNextAudioBlock(&DecodeCount);
|
||||
|
||||
// Cache the block if enough blocks before it have been decoded to avoid garbage
|
||||
if (PreDecBlocks == 0) {
|
||||
AudioCache.CacheBlock(FI.SampleStart, DecodeCount, &DecodingBuffer[0]);
|
||||
CacheEnd = AudioCache.FillRequest(CacheEnd, Start + Count - CacheEnd, DstBuf + (CacheEnd - Start) * SizeConst);
|
||||
} else {
|
||||
PreDecBlocks--;
|
||||
}
|
||||
|
||||
} while (Start + Count - CacheEnd > 0 && PacketNumber < Frames.size());
|
||||
Init(Index, DelayMode);
|
||||
}
|
||||
|
||||
void FFMatroskaAudio::DecodeNextAudioBlock(int64_t *Count) {
|
||||
const size_t SizeConst = (av_get_bits_per_sample_fmt(CodecContext->sample_fmt) * CodecContext->channels) / 8;
|
||||
int Ret = -1;
|
||||
*Count = 0;
|
||||
uint8_t *Buf = &DecodingBuffer[0];
|
||||
AVPacket TempPacket;
|
||||
InitNullPacket(TempPacket);
|
||||
|
||||
const TFrameInfo &FI = Frames[Frames[PacketNumber].OriginalPos];
|
||||
unsigned int FrameSize = FI.FrameSize;
|
||||
CurrentSample = FI.SampleStart + FI.SampleCount;
|
||||
PacketNumber++;
|
||||
|
||||
ReadFrame(FI.FilePos, FrameSize, TCC, MC);
|
||||
TempPacket.data = MC.Buffer;
|
||||
TempPacket.size = (TCC && TCC->CompressionMethod == COMP_PREPEND) ? FrameSize + TCC->CompressedPrivateDataSize : FrameSize;
|
||||
if (FI.KeyFrame)
|
||||
TempPacket.flags = AV_PKT_FLAG_KEY;
|
||||
else
|
||||
TempPacket.flags = 0;
|
||||
|
||||
while (TempPacket.size > 0) {
|
||||
int TempOutputBufSize = AVCODEC_MAX_AUDIO_FRAME_SIZE;
|
||||
Ret = avcodec_decode_audio3(CodecContext, (int16_t *)Buf, &TempOutputBufSize, &TempPacket);
|
||||
|
||||
if (Ret < 0) // throw error or something?
|
||||
goto Done;
|
||||
|
||||
if (Ret > 0) {
|
||||
TempPacket.size -= Ret;
|
||||
TempPacket.data += Ret;
|
||||
Buf += TempOutputBufSize;
|
||||
if (SizeConst)
|
||||
*Count += TempOutputBufSize / SizeConst;
|
||||
}
|
||||
}
|
||||
|
||||
Done:;
|
||||
FFMatroskaAudio::~FFMatroskaAudio() {
|
||||
mkv_Close(MF);
|
||||
}
|
||||
|
||||
bool FFMatroskaAudio::ReadPacket(AVPacket *Packet) {
|
||||
ReadFrame(CurrentFrame->FilePos, CurrentFrame->FrameSize, TCC.get(), MC);
|
||||
InitNullPacket(*Packet);
|
||||
Packet->data = MC.Buffer;
|
||||
Packet->size = CurrentFrame->FrameSize + ((TCC.get() && TCC->CompressionMethod == COMP_PREPEND) ? TCC->CompressedPrivateDataSize : 0);
|
||||
Packet->flags = CurrentFrame->KeyFrame ? AV_PKT_FLAG_KEY : 0;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
|
|
@ -42,7 +42,6 @@ FFMatroskaIndexer::FFMatroskaIndexer(const char *Filename) : FFMS_Indexer(Filena
|
|||
|
||||
MF = mkv_OpenEx(&MC.ST.base, 0, 0, ErrorMessage, sizeof(ErrorMessage));
|
||||
if (MF == NULL) {
|
||||
fclose(MC.ST.fp);
|
||||
std::ostringstream buf;
|
||||
buf << "Can't parse Matroska file: " << ErrorMessage;
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
|
@ -56,11 +55,9 @@ FFMatroskaIndexer::FFMatroskaIndexer(const char *Filename) : FFMS_Indexer(Filena
|
|||
|
||||
FFMatroskaIndexer::~FFMatroskaIndexer() {
|
||||
mkv_Close(MF);
|
||||
fclose(MC.ST.fp);
|
||||
}
|
||||
|
||||
FFMS_Index *FFMatroskaIndexer::DoIndexing() {
|
||||
// char ErrorMessage[256];
|
||||
std::vector<SharedAudioContext> AudioContexts(mkv_GetNumTracks(MF), SharedAudioContext(true));
|
||||
std::vector<SharedVideoContext> VideoContexts(mkv_GetNumTracks(MF), SharedVideoContext(true));
|
||||
|
||||
|
@ -71,61 +68,43 @@ FFMS_Index *FFMatroskaIndexer::DoIndexing() {
|
|||
TrackInfo *TI = mkv_GetTrackInfo(MF, i);
|
||||
TrackIndices->push_back(FFMS_Track(mkv_TruncFloat(mkv_GetTrackInfo(MF, i)->TimecodeScale), 1000000, HaaliTrackTypeToFFTrackType(mkv_GetTrackInfo(MF, i)->Type)));
|
||||
|
||||
if (HaaliTrackTypeToFFTrackType(TI->Type) == FFMS_TYPE_VIDEO && Codec[i] && (VideoContexts[i].Parser = av_parser_init(Codec[i]->id))) {
|
||||
if (!Codec[i]) continue;
|
||||
|
||||
AVCodecContext *CodecContext = avcodec_alloc_context();
|
||||
AVCodecContext *CodecContext = avcodec_alloc_context();
|
||||
InitializeCodecContextFromMatroskaTrackInfo(TI, CodecContext);
|
||||
|
||||
InitializeCodecContextFromMatroskaTrackInfo(TI, CodecContext);
|
||||
try {
|
||||
if (TI->Type == TT_VIDEO && (VideoContexts[i].Parser = av_parser_init(Codec[i]->id))) {
|
||||
if (avcodec_open(CodecContext, Codec[i]) < 0)
|
||||
throw FFMS_Exception(FFMS_ERROR_CODEC, FFMS_ERROR_DECODING,
|
||||
"Could not open video codec");
|
||||
|
||||
if (avcodec_open(CodecContext, Codec[i]) < 0) {
|
||||
av_freep(&CodecContext);
|
||||
throw FFMS_Exception(FFMS_ERROR_CODEC, FFMS_ERROR_DECODING,
|
||||
"Could not open video codec");
|
||||
if (TI->CompEnabled)
|
||||
VideoContexts[i].TCC = new TrackCompressionContext(MF, TI, i);
|
||||
|
||||
VideoContexts[i].CodecContext = CodecContext;
|
||||
VideoContexts[i].Parser->flags = PARSER_FLAG_COMPLETE_FRAMES;
|
||||
}
|
||||
else if (IndexMask & (1 << i) && TI->Type == TT_AUDIO) {
|
||||
if (avcodec_open(CodecContext, Codec[i]) < 0)
|
||||
throw FFMS_Exception(FFMS_ERROR_CODEC, FFMS_ERROR_DECODING,
|
||||
"Could not open audio codec");
|
||||
|
||||
if (TI->CompEnabled)
|
||||
VideoContexts[i].TCC = new TrackCompressionContext(MF, TI, i);
|
||||
|
||||
VideoContexts[i].CodecContext = CodecContext;
|
||||
VideoContexts[i].Parser->flags = PARSER_FLAG_COMPLETE_FRAMES;
|
||||
}
|
||||
|
||||
if (IndexMask & (1 << i) && TI->Type == TT_AUDIO) {
|
||||
AVCodecContext *AudioCodecContext = avcodec_alloc_context();
|
||||
InitializeCodecContextFromMatroskaTrackInfo(TI, AudioCodecContext);
|
||||
AudioContexts[i].CodecContext = AudioCodecContext;
|
||||
|
||||
if (TI->CompEnabled) {
|
||||
try {
|
||||
if (TI->CompEnabled)
|
||||
AudioContexts[i].TCC = new TrackCompressionContext(MF, TI, i);
|
||||
} catch (FFMS_Exception &) {
|
||||
av_freep(&AudioCodecContext);
|
||||
AudioContexts[i].CodecContext = NULL;
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
AVCodec *AudioCodec = Codec[i];
|
||||
if (AudioCodec == NULL) {
|
||||
av_freep(&AudioCodecContext);
|
||||
AudioContexts[i].CodecContext = NULL;
|
||||
throw FFMS_Exception(FFMS_ERROR_CODEC, FFMS_ERROR_UNSUPPORTED,
|
||||
"Audio codec not found");
|
||||
AudioContexts[i].CodecContext = CodecContext;
|
||||
} else {
|
||||
IndexMask &= ~(1 << i);
|
||||
av_freep(&CodecContext);
|
||||
}
|
||||
|
||||
if (avcodec_open(AudioCodecContext, AudioCodec) < 0) {
|
||||
av_freep(&AudioCodecContext);
|
||||
AudioContexts[i].CodecContext = NULL;
|
||||
throw FFMS_Exception(FFMS_ERROR_CODEC, FFMS_ERROR_DECODING,
|
||||
"Could not open audio codec");
|
||||
}
|
||||
} else {
|
||||
IndexMask &= ~(1 << i);
|
||||
}
|
||||
catch (...) {
|
||||
av_freep(&CodecContext);
|
||||
throw;
|
||||
}
|
||||
}
|
||||
|
||||
//
|
||||
|
||||
ulonglong StartTime, EndTime, FilePos;
|
||||
unsigned int Track, FrameFlags, FrameSize;
|
||||
AVPacket TempPacket;
|
||||
|
@ -133,13 +112,9 @@ FFMS_Index *FFMatroskaIndexer::DoIndexing() {
|
|||
|
||||
while (mkv_ReadFrame(MF, 0, &Track, &StartTime, &EndTime, &FilePos, &FrameSize, &FrameFlags) == 0) {
|
||||
// Update progress
|
||||
if (IC) {
|
||||
if ((*IC)(ftello(MC.ST.fp), Filesize, ICPrivate))
|
||||
throw FFMS_Exception(FFMS_ERROR_CANCELLED, FFMS_ERROR_USER,
|
||||
"Cancelled by user");
|
||||
}
|
||||
if (IC && (*IC)(ftello(MC.ST.fp), Filesize, ICPrivate))
|
||||
throw FFMS_Exception(FFMS_ERROR_CANCELLED, FFMS_ERROR_USER, "Cancelled by user");
|
||||
|
||||
// Only create index entries for video for now to save space
|
||||
if (mkv_GetTrackInfo(MF, Track)->Type == TT_VIDEO) {
|
||||
uint8_t *OB;
|
||||
int OBSize;
|
||||
|
@ -153,73 +128,18 @@ FFMS_Index *FFMatroskaIndexer::DoIndexing() {
|
|||
(*TrackIndices)[Track].push_back(TFrameInfo::VideoFrameInfo(StartTime, RepeatPict, (FrameFlags & FRAME_KF) != 0, FilePos, FrameSize));
|
||||
} else if (mkv_GetTrackInfo(MF, Track)->Type == TT_AUDIO && (IndexMask & (1 << Track))) {
|
||||
TrackCompressionContext *TCC = AudioContexts[Track].TCC;
|
||||
int64_t StartSample = AudioContexts[Track].CurrentSample;
|
||||
unsigned int CompressedFrameSize = FrameSize;
|
||||
AVCodecContext *AudioCodecContext = AudioContexts[Track].CodecContext;
|
||||
ReadFrame(FilePos, FrameSize, TCC, MC);
|
||||
TempPacket.data = MC.Buffer;
|
||||
TempPacket.size = (TCC && TCC->CompressionMethod == COMP_PREPEND) ? FrameSize + TCC->CompressedPrivateDataSize : FrameSize;
|
||||
if ((FrameFlags & FRAME_KF) != 0)
|
||||
TempPacket.flags = AV_PKT_FLAG_KEY;
|
||||
else
|
||||
TempPacket.flags = 0;
|
||||
TempPacket.flags = FrameFlags & FRAME_KF ? AV_PKT_FLAG_KEY : 0;
|
||||
|
||||
bool first = true;
|
||||
int LastNumChannels;
|
||||
int LastSampleRate;
|
||||
AVSampleFormat LastSampleFormat;
|
||||
while (TempPacket.size > 0) {
|
||||
int dbsize = AVCODEC_MAX_AUDIO_FRAME_SIZE*10;
|
||||
int Ret = avcodec_decode_audio3(AudioCodecContext, &DecodingBuffer[0], &dbsize, &TempPacket);
|
||||
if (Ret < 0) {
|
||||
if (ErrorHandling == FFMS_IEH_ABORT) {
|
||||
throw FFMS_Exception(FFMS_ERROR_CODEC, FFMS_ERROR_DECODING,
|
||||
"Audio decoding error");
|
||||
} else if (ErrorHandling == FFMS_IEH_CLEAR_TRACK) {
|
||||
(*TrackIndices)[Track].clear();
|
||||
IndexMask &= ~(1 << Track);
|
||||
break;
|
||||
} else if (ErrorHandling == FFMS_IEH_STOP_TRACK) {
|
||||
IndexMask &= ~(1 << Track);
|
||||
break;
|
||||
} else if (ErrorHandling == FFMS_IEH_IGNORE) {
|
||||
break;
|
||||
}
|
||||
}
|
||||
int64_t StartSample = AudioContexts[Track].CurrentSample;
|
||||
int64_t SampleCount = IndexAudioPacket(Track, &TempPacket, AudioContexts[Track], *TrackIndices);
|
||||
|
||||
if (first) {
|
||||
LastNumChannels = AudioCodecContext->channels;
|
||||
LastSampleRate = AudioCodecContext->sample_rate;
|
||||
LastSampleFormat = AudioCodecContext->sample_fmt;
|
||||
first = false;
|
||||
}
|
||||
|
||||
if (LastNumChannels != AudioCodecContext->channels || LastSampleRate != AudioCodecContext->sample_rate
|
||||
|| LastSampleFormat != AudioCodecContext->sample_fmt) {
|
||||
std::ostringstream buf;
|
||||
buf <<
|
||||
"Audio format change detected. This is currently unsupported."
|
||||
<< " Channels: " << LastNumChannels << " -> " << AudioCodecContext->channels << ";"
|
||||
<< " Sample rate: " << LastSampleRate << " -> " << AudioCodecContext->sample_rate << ";"
|
||||
<< " Sample format: " << GetLAVCSampleFormatName(LastSampleFormat) << " -> "
|
||||
<< GetLAVCSampleFormatName(AudioCodecContext->sample_fmt);
|
||||
throw FFMS_Exception(FFMS_ERROR_UNSUPPORTED, FFMS_ERROR_DECODING, buf.str());
|
||||
}
|
||||
|
||||
if (Ret > 0) {
|
||||
TempPacket.size -= Ret;
|
||||
TempPacket.data += Ret;
|
||||
}
|
||||
|
||||
if (dbsize > 0)
|
||||
AudioContexts[Track].CurrentSample += (dbsize * 8) / (av_get_bits_per_sample_fmt(AudioCodecContext->sample_fmt) * AudioCodecContext->channels);
|
||||
|
||||
if (DumpMask & (1 << Track))
|
||||
WriteAudio(AudioContexts[Track], TrackIndices.get(), Track, dbsize);
|
||||
}
|
||||
|
||||
(*TrackIndices)[Track].push_back(TFrameInfo::AudioFrameInfo(StartTime, StartSample,
|
||||
static_cast<unsigned int>(AudioContexts[Track].CurrentSample - StartSample), (FrameFlags & FRAME_KF) != 0, FilePos, CompressedFrameSize));
|
||||
if (SampleCount != 0)
|
||||
(*TrackIndices)[Track].push_back(TFrameInfo::AudioFrameInfo(StartTime, StartSample,
|
||||
SampleCount, (FrameFlags & FRAME_KF) != 0, FilePos, CompressedFrameSize));
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -236,8 +156,5 @@ FFMS_TrackType FFMatroskaIndexer::GetTrackType(int Track) {
|
|||
}
|
||||
|
||||
const char *FFMatroskaIndexer::GetTrackCodec(int Track) {
|
||||
if (Codec[Track])
|
||||
return Codec[Track]->name;
|
||||
else
|
||||
return "Unsupported codec/Unknown codec name";
|
||||
return Codec[Track] ? Codec[Track]->name : NULL;
|
||||
}
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2004-2009 Mike Matsnev. All Rights Reserved.
|
||||
* Copyright (c) 2004-2008 Mike Matsnev. All Rights Reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
|
@ -25,6 +25,8 @@
|
|||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* $Id: MatroskaParser.c,v 1.73 2010/03/05 22:41:47 mike Exp $
|
||||
*
|
||||
*/
|
||||
|
||||
#ifdef HAVE_CONFIG_H
|
||||
|
@ -298,8 +300,7 @@ static void myvsnprintf_int(char **pdest,char *de,int width,int zero,
|
|||
static void myvsnprintf(char *dest,unsigned dsize,const char *fmt,va_list ap) {
|
||||
// s,d,x,u,ll
|
||||
char *de = dest + dsize - 1;
|
||||
int state, width, zero, neg, ll;
|
||||
state = width = zero = neg = ll = 0;
|
||||
int state = 0, width, zero, neg, ll;
|
||||
|
||||
if (dsize <= 1) {
|
||||
if (dsize > 0)
|
||||
|
@ -720,12 +721,22 @@ static inline ulonglong readVLUInt(MatroskaFile *mf) {
|
|||
return readVLUIntImp(mf,NULL);
|
||||
}
|
||||
|
||||
static ulonglong readSize(MatroskaFile *mf) {
|
||||
static ulonglong readSizeUnspec(MatroskaFile *mf) {
|
||||
int m;
|
||||
ulonglong v = readVLUIntImp(mf,&m);
|
||||
|
||||
// see if it's unspecified
|
||||
if (v == (MAXU64 >> (57-m*7)))
|
||||
return MAXU64;
|
||||
|
||||
return v;
|
||||
}
|
||||
|
||||
static ulonglong readSize(MatroskaFile *mf) {
|
||||
ulonglong v = readSizeUnspec(mf);
|
||||
|
||||
// see if it's unspecified
|
||||
if (v == MAXU64)
|
||||
errorjmp(mf,"Unspecified element size is not supported here.");
|
||||
|
||||
return v;
|
||||
|
@ -873,7 +884,7 @@ static void readLangCC(MatroskaFile *mf, ulonglong len, char lcc[4]) {
|
|||
|
||||
///////////////////////////////////////////////////////////////////////////
|
||||
// file parser
|
||||
#define FOREACH(f,tl) \
|
||||
#define FOREACH2(f,tl,clid) \
|
||||
{ \
|
||||
ulonglong tmplen = (tl); \
|
||||
{ \
|
||||
|
@ -882,14 +893,18 @@ static void readLangCC(MatroskaFile *mf, ulonglong len, char lcc[4]) {
|
|||
int id; \
|
||||
for (;;) { \
|
||||
cur = filepos(mf); \
|
||||
if (cur == start + tmplen) \
|
||||
if (tmplen != MAXU64 && cur == start + tmplen) \
|
||||
break; \
|
||||
id = readID(f); \
|
||||
if (id==EOF) \
|
||||
errorjmp(mf,"Unexpected EOF while reading EBML container"); \
|
||||
len = readSize(mf); \
|
||||
len = id == clid ? readSizeUnspec(mf) : readSize(mf); \
|
||||
switch (id) {
|
||||
|
||||
#define FOREACH(f,tl) FOREACH2(f,tl,EOF)
|
||||
|
||||
#define RESTART() (tmplen=len),(start=cur)
|
||||
|
||||
#define ENDFOR1(f) \
|
||||
default: \
|
||||
skipbytes(f,len); \
|
||||
|
@ -917,6 +932,16 @@ static void readLangCC(MatroskaFile *mf, ulonglong len, char lcc[4]) {
|
|||
#define STRGETA(f,v,len) STRGETF(f,v,len,myalloca)
|
||||
#define STRGETM(f,v,len) STRGETF(f,v,len,f->cache->memalloc)
|
||||
|
||||
static int IsWritingApp(MatroskaFile *mf,const char *str) {
|
||||
const char *cp = mf->Seg.WritingApp;
|
||||
if (!cp)
|
||||
return 0;
|
||||
|
||||
while (*str && *str++==*cp++) ;
|
||||
|
||||
return !*str;
|
||||
}
|
||||
|
||||
static void parseEBML(MatroskaFile *mf,ulonglong toplen) {
|
||||
ulonglong v;
|
||||
char buf[32];
|
||||
|
@ -1077,31 +1102,57 @@ static void parseSegmentInfo(MatroskaFile *mf,ulonglong toplen) {
|
|||
}
|
||||
|
||||
static void parseFirstCluster(MatroskaFile *mf,ulonglong toplen) {
|
||||
ulonglong end = filepos(mf) + toplen;
|
||||
int seenTimecode = 0, seenBlock = 0;
|
||||
longlong tc;
|
||||
ulonglong clstart = filepos(mf);
|
||||
|
||||
mf->seen.Cluster = 1;
|
||||
mf->firstTimecode = 0;
|
||||
|
||||
FOREACH(mf,toplen)
|
||||
FOREACH2(mf,toplen,0x1f43b675)
|
||||
case 0xe7: // Timecode
|
||||
mf->firstTimecode += readUInt(mf,(unsigned)len);
|
||||
tc = readUInt(mf,(unsigned)len);
|
||||
if (!seenTimecode) {
|
||||
seenTimecode = 1;
|
||||
mf->firstTimecode += tc;
|
||||
}
|
||||
|
||||
if (seenBlock) {
|
||||
out:
|
||||
if (toplen != MAXU64)
|
||||
skipbytes(mf,clstart + toplen - filepos(mf));
|
||||
else if (len != MAXU64)
|
||||
skipbytes(mf,cur + len - filepos(mf));
|
||||
return;
|
||||
}
|
||||
break;
|
||||
case 0xa3: // BlockEx
|
||||
readVLUInt(mf); // track number
|
||||
mf->firstTimecode += readSInt(mf, 2);
|
||||
tc = readSInt(mf, 2);
|
||||
if (!seenBlock) {
|
||||
seenBlock = 1;
|
||||
mf->firstTimecode += tc;
|
||||
}
|
||||
|
||||
skipbytes(mf,end - filepos(mf));
|
||||
return;
|
||||
if (seenTimecode)
|
||||
goto out;
|
||||
break;
|
||||
case 0xa0: // BlockGroup
|
||||
FOREACH(mf,len)
|
||||
case 0xa1: // Block
|
||||
readVLUInt(mf); // track number
|
||||
mf->firstTimecode += readSInt(mf,2);
|
||||
tc = readSInt(mf,2);
|
||||
if (!seenBlock) {
|
||||
seenBlock = 1;
|
||||
mf->firstTimecode += tc;
|
||||
}
|
||||
|
||||
skipbytes(mf,end - filepos(mf));
|
||||
return;
|
||||
if (seenTimecode)
|
||||
goto out;
|
||||
ENDFOR(mf);
|
||||
break;
|
||||
case 0x1f43b675:
|
||||
return;
|
||||
ENDFOR(mf);
|
||||
}
|
||||
|
||||
|
@ -1215,6 +1266,10 @@ static void parseAudioInfo(MatroskaFile *mf,ulonglong toplen,struct TrackInfo *t
|
|||
break;
|
||||
case 0x6264: // BitDepth
|
||||
v = readUInt(mf,(unsigned)len);
|
||||
#if 0
|
||||
if ((v<1 || v>255) && !IsWritingApp(mf,"AVI-Mux GUI"))
|
||||
errorjmp(mf,"Invalid BitDepth: %d",(int)v);
|
||||
#endif
|
||||
ti->AV.Audio.BitDepth = (unsigned char)v;
|
||||
break;
|
||||
ENDFOR(mf);
|
||||
|
@ -1244,7 +1299,7 @@ static void parseTrackEntry(MatroskaFile *mf,ulonglong toplen) {
|
|||
ulonglong v;
|
||||
char *cp = NULL, *cs = NULL;
|
||||
size_t cplen = 0, cslen = 0, cpadd = 0;
|
||||
unsigned CompScope = 0, num_comp = 0;
|
||||
unsigned CompScope, num_comp = 0;
|
||||
|
||||
if (mf->nTracks >= MAX_TRACKS)
|
||||
errorjmp(mf,"Too many tracks.");
|
||||
|
@ -1430,7 +1485,7 @@ static void parseTrackEntry(MatroskaFile *mf,ulonglong toplen) {
|
|||
errorjmp(mf, "inflateInit failed");
|
||||
|
||||
zs.next_in = (Bytef *)cp;
|
||||
zs.avail_in = cplen;
|
||||
zs.avail_in = (uInt)cplen;
|
||||
|
||||
do {
|
||||
zs.next_out = tmp;
|
||||
|
@ -1448,7 +1503,7 @@ static void parseTrackEntry(MatroskaFile *mf,ulonglong toplen) {
|
|||
inflateReset(&zs);
|
||||
|
||||
zs.next_in = (Bytef *)cp;
|
||||
zs.avail_in = cplen;
|
||||
zs.avail_in = (uInt)cplen;
|
||||
zs.next_out = ncp;
|
||||
zs.avail_out = ncplen;
|
||||
|
||||
|
@ -1987,7 +2042,7 @@ static void parseSegment(MatroskaFile *mf,ulonglong toplen) {
|
|||
mf->flags &= ~MPF_ERROR;
|
||||
else {
|
||||
// we want to read data until we find a seekhead or a trackinfo
|
||||
FOREACH(mf,toplen)
|
||||
FOREACH2(mf,toplen,0x1f43b675)
|
||||
case 0x114d9b74: // SeekHead
|
||||
if (mf->flags & MKVF_AVOID_SEEKS) {
|
||||
skipbytes(mf,len);
|
||||
|
@ -2018,9 +2073,10 @@ static void parseSegment(MatroskaFile *mf,ulonglong toplen) {
|
|||
case 0x1f43b675: // Cluster
|
||||
if (!mf->pCluster)
|
||||
mf->pCluster = cur;
|
||||
if (mf->seen.Cluster)
|
||||
skipbytes(mf,len);
|
||||
else
|
||||
if (mf->seen.Cluster) {
|
||||
if (len != MAXU64)
|
||||
skipbytes(mf,len);
|
||||
} else
|
||||
parseFirstCluster(mf,len);
|
||||
break;
|
||||
case 0x1654ae6b: // Tracks
|
||||
|
@ -2346,8 +2402,8 @@ static int readMoreBlocks(MatroskaFile *mf) {
|
|||
if (cid == EOF)
|
||||
goto ex;
|
||||
if (cid == 0x1f43b675) {
|
||||
toplen = readSize(mf);
|
||||
if (toplen < MAXCLUSTER) {
|
||||
toplen = readSizeUnspec(mf);
|
||||
if (toplen < MAXCLUSTER || toplen == MAXU64) {
|
||||
// reset error flags
|
||||
mf->flags &= ~MPF_ERROR;
|
||||
ret = RBRESYNC;
|
||||
|
@ -2372,12 +2428,15 @@ static int readMoreBlocks(MatroskaFile *mf) {
|
|||
ret = EOF;
|
||||
break;
|
||||
}
|
||||
toplen = readSize(mf);
|
||||
toplen = cid == 0x1f43b675 ? readSizeUnspec(mf) : readSize(mf);
|
||||
|
||||
if (cid == 0x1f43b675) { // Cluster
|
||||
unsigned char have_timecode = 0;
|
||||
|
||||
FOREACH(mf,toplen)
|
||||
FOREACH2(mf,toplen,0x1f43b675)
|
||||
case 0x1f43b675:
|
||||
RESTART();
|
||||
break;
|
||||
case 0xe7: // Timecode
|
||||
mf->tcCluster = readUInt(mf,(unsigned)len);
|
||||
have_timecode = 1;
|
||||
|
@ -2517,7 +2576,10 @@ static void reindex(MatroskaFile *mf) {
|
|||
if (id != 0x1f43b675) // shouldn't happen
|
||||
continue;
|
||||
|
||||
size = readVLUInt(mf);
|
||||
size = readSizeUnspec(mf);
|
||||
if (size == MAXU64)
|
||||
break;
|
||||
|
||||
if (size >= MAXCLUSTER || size < 1024)
|
||||
continue;
|
||||
|
||||
|
@ -2650,7 +2712,6 @@ static void parseFile(MatroskaFile *mf) {
|
|||
ulonglong len = filepos(mf), adjust;
|
||||
unsigned i;
|
||||
int id = readID(mf);
|
||||
int m;
|
||||
|
||||
if (id==EOF)
|
||||
errorjmp(mf,"Unexpected EOF at start of file");
|
||||
|
@ -2671,12 +2732,11 @@ static void parseFile(MatroskaFile *mf) {
|
|||
if (id==EOF)
|
||||
errorjmp(mf,"No segments found in the file");
|
||||
segment:
|
||||
len = readVLUIntImp(mf,&m);
|
||||
// see if it's unspecified
|
||||
if (len == (MAXU64 >> (57-m*7)))
|
||||
len = MAXU64;
|
||||
len = readSizeUnspec(mf);
|
||||
if (id == 0x18538067) // Segment
|
||||
break;
|
||||
if (len == MAXU64)
|
||||
errorjmp(mf,"No segments found in the file");
|
||||
skipbytes(mf,len);
|
||||
}
|
||||
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
/*
|
||||
* Copyright (c) 2004-2009 Mike Matsnev. All Rights Reserved.
|
||||
* Copyright (c) 2004-2008 Mike Matsnev. All Rights Reserved.
|
||||
*
|
||||
* Redistribution and use in source and binary forms, with or without
|
||||
* modification, are permitted provided that the following conditions
|
||||
|
@ -25,6 +25,8 @@
|
|||
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
|
||||
* THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
*
|
||||
* $Id: MatroskaParser.h,v 1.22 2008/04/29 21:03:09 mike Exp $
|
||||
*
|
||||
*/
|
||||
|
||||
#ifndef MATROSKA_PARSER_H
|
||||
|
|
|
@ -27,7 +27,6 @@ void FFMatroskaVideo::Free(bool CloseCodec) {
|
|||
delete TCC;
|
||||
if (MC.ST.fp) {
|
||||
mkv_Close(MF);
|
||||
fclose(MC.ST.fp);
|
||||
}
|
||||
if (CloseCodec)
|
||||
avcodec_close(CodecContext);
|
||||
|
@ -57,7 +56,6 @@ FFMatroskaVideo::FFMatroskaVideo(const char *SourceFile, int Track,
|
|||
|
||||
MF = mkv_OpenEx(&MC.ST.base, 0, 0, ErrorMessage, sizeof(ErrorMessage));
|
||||
if (MF == NULL) {
|
||||
fclose(MC.ST.fp);
|
||||
std::ostringstream buf;
|
||||
buf << "Can't parse Matroska file: " << ErrorMessage;
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
|
@ -107,6 +105,14 @@ FFMatroskaVideo::FFMatroskaVideo(const char *SourceFile, int Track,
|
|||
VP.ColorSpace = 0;
|
||||
VP.ColorRange = 0;
|
||||
#endif
|
||||
// these pixfmt's are deprecated but still used
|
||||
if (
|
||||
CodecContext->pix_fmt == PIX_FMT_YUVJ420P
|
||||
|| CodecContext->pix_fmt == PIX_FMT_YUVJ422P
|
||||
|| CodecContext->pix_fmt == PIX_FMT_YUVJ444P
|
||||
)
|
||||
VP.ColorRange = AVCOL_RANGE_JPEG;
|
||||
|
||||
VP.FirstTime = ((Frames.front().PTS * Frames.TB.Num) / (double)Frames.TB.Den) / 1000;
|
||||
VP.LastTime = ((Frames.back().PTS * Frames.TB.Num) / (double)Frames.TB.Den) / 1000;
|
||||
|
||||
|
|
|
@ -21,6 +21,8 @@
|
|||
#ifndef STDIOSTREAM_H
|
||||
#define STDIOSTREAM_H
|
||||
|
||||
#undef __STRICT_ANSI__
|
||||
|
||||
#include <stdlib.h>
|
||||
#include <stdio.h>
|
||||
#include <string.h>
|
||||
|
|
|
@ -45,6 +45,8 @@ extern const AVCodecTag ff_codec_bmp_tags[];
|
|||
extern const CodecTags ff_mkv_codec_tags[];
|
||||
extern const AVCodecTag ff_codec_movvideo_tags[];
|
||||
extern const AVCodecTag ff_codec_wav_tags[];
|
||||
|
||||
#include <libavutil/opt.h>
|
||||
}
|
||||
|
||||
extern int CPUFeatures;
|
||||
|
@ -107,8 +109,8 @@ TrackCompressionContext::~TrackCompressionContext() {
|
|||
cs_Destroy(CS);
|
||||
}
|
||||
|
||||
int GetSWSCPUFlags() {
|
||||
int Flags = 0;
|
||||
int64_t GetSWSCPUFlags() {
|
||||
int64_t Flags = 0;
|
||||
|
||||
if (CPUFeatures & FFMS_CPU_CAPS_MMX)
|
||||
Flags |= SWS_CPU_CAPS_MMX;
|
||||
|
@ -120,14 +122,64 @@ int GetSWSCPUFlags() {
|
|||
Flags |= SWS_CPU_CAPS_ALTIVEC;
|
||||
if (CPUFeatures & FFMS_CPU_CAPS_BFIN)
|
||||
Flags |= SWS_CPU_CAPS_BFIN;
|
||||
#ifdef SWS_CPU_CAPS_SSE2
|
||||
if (CPUFeatures & FFMS_CPU_CAPS_SSE2)
|
||||
Flags |= SWS_CPU_CAPS_SSE2;
|
||||
#endif
|
||||
|
||||
return Flags;
|
||||
}
|
||||
|
||||
static int handle_jpeg(PixelFormat *format)
|
||||
{
|
||||
switch (*format) {
|
||||
case PIX_FMT_YUVJ420P: *format = PIX_FMT_YUV420P; return 1;
|
||||
case PIX_FMT_YUVJ422P: *format = PIX_FMT_YUV422P; return 1;
|
||||
case PIX_FMT_YUVJ444P: *format = PIX_FMT_YUV444P; return 1;
|
||||
case PIX_FMT_YUVJ440P: *format = PIX_FMT_YUV440P; return 1;
|
||||
default: return 0;
|
||||
}
|
||||
}
|
||||
SwsContext *GetSwsContext(int SrcW, int SrcH, PixelFormat SrcFormat, int DstW, int DstH, PixelFormat DstFormat, int64_t Flags, int ColorSpace) {
|
||||
#if LIBSWSCALE_VERSION_INT < AV_VERSION_INT(0, 12, 0)
|
||||
return sws_getContext(SrcW, SrcH, SrcFormat, DstW, DstH, DstFormat, Flags, 0, 0, 0);
|
||||
#else
|
||||
SwsContext *Context = sws_alloc_context();
|
||||
if (!Context) return 0;
|
||||
|
||||
if (ColorSpace == -1)
|
||||
ColorSpace = (SrcW > 1024 || SrcH >= 600) ? SWS_CS_ITU709 : SWS_CS_DEFAULT;
|
||||
|
||||
int SrcRange = handle_jpeg(&SrcFormat);
|
||||
int DstRange = handle_jpeg(&DstFormat);
|
||||
|
||||
av_set_int(Context, "sws_flags", Flags);
|
||||
av_set_int(Context, "srcw", SrcW);
|
||||
av_set_int(Context, "srch", SrcH);
|
||||
av_set_int(Context, "dstw", DstW);
|
||||
av_set_int(Context, "dsth", DstH);
|
||||
av_set_int(Context, "src_range", SrcRange);
|
||||
av_set_int(Context, "dst_range", DstRange);
|
||||
av_set_int(Context, "src_format", SrcFormat);
|
||||
av_set_int(Context, "dst_format", DstFormat);
|
||||
|
||||
sws_setColorspaceDetails(Context, sws_getCoefficients(ColorSpace), SrcRange, sws_getCoefficients(ColorSpace), DstRange, 0, 1<<16, 1<<16);
|
||||
|
||||
if(sws_init_context(Context, 0, 0) < 0){
|
||||
sws_freeContext(Context);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return Context;
|
||||
#endif
|
||||
|
||||
}
|
||||
|
||||
|
||||
int GetPPCPUFlags() {
|
||||
int Flags = 0;
|
||||
|
||||
#ifdef WITH_LIBPOSTPROC
|
||||
#ifdef FFMS_USE_POSTPROC
|
||||
// not exactly a pretty solution but it'll never get called anyway
|
||||
if (CPUFeatures & FFMS_CPU_CAPS_MMX)
|
||||
Flags |= PP_CPU_CAPS_MMX;
|
||||
|
@ -137,7 +189,7 @@ int GetPPCPUFlags() {
|
|||
Flags |= PP_CPU_CAPS_3DNOW;
|
||||
if (CPUFeatures & FFMS_CPU_CAPS_ALTIVEC)
|
||||
Flags |= PP_CPU_CAPS_ALTIVEC;
|
||||
#endif // WITH_LIBPOSTPROC
|
||||
#endif // FFMS_USE_POSTPROC
|
||||
|
||||
return Flags;
|
||||
}
|
||||
|
@ -172,6 +224,17 @@ const char *GetLAVCSampleFormatName(AVSampleFormat s) {
|
|||
}
|
||||
}
|
||||
|
||||
template<class T> static void safe_realloc(T *&ptr, size_t size) {
|
||||
void *newalloc = realloc(ptr, size);
|
||||
if (newalloc) {
|
||||
ptr = static_cast<T*>(newalloc);
|
||||
}
|
||||
else {
|
||||
free(ptr);
|
||||
ptr = 0;
|
||||
}
|
||||
}
|
||||
|
||||
void ReadFrame(uint64_t FilePos, unsigned int &FrameSize, TrackCompressionContext *TCC, MatroskaReaderContext &Context) {
|
||||
if (TCC && TCC->CS) {
|
||||
CompressedStream *CS = TCC->CS;
|
||||
|
@ -196,7 +259,7 @@ void ReadFrame(uint64_t FilePos, unsigned int &FrameSize, TrackCompressionContex
|
|||
|
||||
if (Context.BufferSize < DecompressedFrameSize + ReadBytes) {
|
||||
Context.BufferSize = DecompressedFrameSize + ReadBytes;
|
||||
Context.Buffer = (uint8_t *)realloc(Context.Buffer, Context.BufferSize + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
safe_realloc(Context.Buffer, Context.BufferSize + FF_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (Context.Buffer == NULL)
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_ALLOCATION_FAILED,
|
||||
"Out of memory");
|
||||
|
@ -216,7 +279,7 @@ void ReadFrame(uint64_t FilePos, unsigned int &FrameSize, TrackCompressionContex
|
|||
unsigned ReqBufsize = FrameSize + TCC->CompressedPrivateDataSize + 16;
|
||||
if (Context.BufferSize < ReqBufsize) {
|
||||
Context.BufferSize = FrameSize + TCC->CompressedPrivateDataSize;
|
||||
Context.Buffer = (uint8_t *)realloc(Context.Buffer, ReqBufsize);
|
||||
safe_realloc(Context.Buffer, ReqBufsize);
|
||||
if (Context.Buffer == NULL)
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_ALLOCATION_FAILED, "Out of memory");
|
||||
}
|
||||
|
@ -230,7 +293,7 @@ void ReadFrame(uint64_t FilePos, unsigned int &FrameSize, TrackCompressionContex
|
|||
}
|
||||
else if (Context.BufferSize < FrameSize) {
|
||||
Context.BufferSize = FrameSize;
|
||||
Context.Buffer = (uint8_t *)realloc(Context.Buffer, Context.BufferSize + 16);
|
||||
safe_realloc(Context.Buffer, Context.BufferSize + 16);
|
||||
if (Context.Buffer == NULL)
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_ALLOCATION_FAILED,
|
||||
"Out of memory");
|
||||
|
@ -279,9 +342,11 @@ void FillAP(FFMS_AudioProperties &AP, AVCodecContext *CTX, FFMS_Track &Frames) {
|
|||
AP.Channels = CTX->channels;;
|
||||
AP.ChannelLayout = CTX->channel_layout;
|
||||
AP.SampleRate = CTX->sample_rate;
|
||||
AP.NumSamples = (Frames.back()).SampleStart + (Frames.back()).SampleCount;
|
||||
AP.FirstTime = ((Frames.front().PTS * Frames.TB.Num) / (double)Frames.TB.Den) / 1000;
|
||||
AP.LastTime = ((Frames.back().PTS * Frames.TB.Num) / (double)Frames.TB.Den) / 1000;
|
||||
if (Frames.size() > 0) {
|
||||
AP.NumSamples = (Frames.back()).SampleStart + (Frames.back()).SampleCount;
|
||||
AP.FirstTime = ((Frames.front().PTS * Frames.TB.Num) / (double)Frames.TB.Den) / 1000;
|
||||
AP.LastTime = ((Frames.back().PTS * Frames.TB.Num) / (double)Frames.TB.Den) / 1000;
|
||||
}
|
||||
}
|
||||
|
||||
#ifdef HAALISOURCE
|
||||
|
@ -383,42 +448,80 @@ void InitializeCodecContextFromMatroskaTrackInfo(TrackInfo *TI, AVCodecContext *
|
|||
|
||||
#ifdef HAALISOURCE
|
||||
|
||||
void InitializeCodecContextFromHaaliInfo(CComQIPtr<IPropertyBag> pBag, AVCodecContext *CodecContext) {
|
||||
if (pBag) {
|
||||
CComVariant pV;
|
||||
FFCodecContext InitializeCodecContextFromHaaliInfo(CComQIPtr<IPropertyBag> pBag) {
|
||||
CComVariant pV;
|
||||
if (FAILED(pBag->Read(L"Type", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4)))
|
||||
return FFCodecContext();
|
||||
|
||||
unsigned int TT = pV.uintVal;
|
||||
|
||||
FFCodecContext CodecContext(avcodec_alloc_context(), DeleteHaaliCodecContext);
|
||||
|
||||
unsigned int FourCC = 0;
|
||||
if (TT == TT_VIDEO) {
|
||||
pV.Clear();
|
||||
if (SUCCEEDED(pBag->Read(L"Video.PixelWidth", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4)))
|
||||
CodecContext->coded_width = pV.uintVal;
|
||||
|
||||
pV.Clear();
|
||||
if (SUCCEEDED(pBag->Read(L"Type", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4))) {
|
||||
if (SUCCEEDED(pBag->Read(L"Video.PixelHeight", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4)))
|
||||
CodecContext->coded_height = pV.uintVal;
|
||||
|
||||
unsigned int TT = pV.uintVal;
|
||||
pV.Clear();
|
||||
if (SUCCEEDED(pBag->Read(L"FOURCC", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4)))
|
||||
FourCC = pV.uintVal;
|
||||
|
||||
if (TT == TT_VIDEO) {
|
||||
// Reconstruct the missing codec private part for VC1
|
||||
FFMS_BITMAPINFOHEADER bih;
|
||||
memset(&bih, 0, sizeof bih);
|
||||
bih.biSize = sizeof bih;
|
||||
bih.biCompression = FourCC;
|
||||
bih.biBitCount = 24;
|
||||
bih.biPlanes = 1;
|
||||
bih.biHeight = CodecContext->coded_height;
|
||||
|
||||
pV.Clear();
|
||||
if (SUCCEEDED(pBag->Read(L"Video.PixelWidth", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4)))
|
||||
CodecContext->coded_width = pV.uintVal;
|
||||
|
||||
pV.Clear();
|
||||
if (SUCCEEDED(pBag->Read(L"Video.PixelHeight", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4)))
|
||||
CodecContext->coded_height = pV.uintVal;
|
||||
|
||||
} else if (TT == TT_AUDIO) {
|
||||
|
||||
pV.Clear();
|
||||
if (SUCCEEDED(pBag->Read(L"Audio.SamplingFreq", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4)))
|
||||
CodecContext->sample_rate = pV.uintVal;
|
||||
|
||||
pV.Clear();
|
||||
if (SUCCEEDED(pBag->Read(L"Audio.BitDepth", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4)))
|
||||
CodecContext->bits_per_coded_sample = pV.uintVal;
|
||||
|
||||
pV.Clear();
|
||||
if (SUCCEEDED(pBag->Read(L"Audio.Channels", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4)))
|
||||
CodecContext->channels = pV.uintVal;
|
||||
|
||||
}
|
||||
pV.Clear();
|
||||
if (SUCCEEDED(pBag->Read(L"CodecPrivate", &pV, NULL))) {
|
||||
bih.biSize += vtSize(pV);
|
||||
CodecContext->extradata = static_cast<uint8_t*>(av_malloc(bih.biSize));
|
||||
memcpy(CodecContext->extradata, &bih, sizeof bih);
|
||||
vtCopy(pV, CodecContext->extradata + sizeof bih);
|
||||
}
|
||||
}
|
||||
else {
|
||||
CodecContext->extradata = static_cast<uint8_t*>(av_malloc(bih.biSize));
|
||||
memcpy(CodecContext->extradata, &bih, sizeof bih);
|
||||
}
|
||||
CodecContext->extradata_size = bih.biSize;
|
||||
}
|
||||
else if (TT == TT_AUDIO) {
|
||||
pV.Clear();
|
||||
if (SUCCEEDED(pBag->Read(L"CodecPrivate", &pV, NULL))) {
|
||||
CodecContext->extradata_size = vtSize(pV);
|
||||
CodecContext->extradata = static_cast<uint8_t*>(av_malloc(CodecContext->extradata_size));
|
||||
vtCopy(pV, CodecContext->extradata);
|
||||
}
|
||||
|
||||
pV.Clear();
|
||||
if (SUCCEEDED(pBag->Read(L"Audio.SamplingFreq", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4)))
|
||||
CodecContext->sample_rate = pV.uintVal;
|
||||
|
||||
pV.Clear();
|
||||
if (SUCCEEDED(pBag->Read(L"Audio.BitDepth", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4)))
|
||||
CodecContext->bits_per_coded_sample = pV.uintVal;
|
||||
|
||||
pV.Clear();
|
||||
if (SUCCEEDED(pBag->Read(L"Audio.Channels", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_UI4)))
|
||||
CodecContext->channels = pV.uintVal;
|
||||
}
|
||||
|
||||
pV.Clear();
|
||||
if (SUCCEEDED(pBag->Read(L"CodecID", &pV, NULL)) && SUCCEEDED(pV.ChangeType(VT_BSTR))) {
|
||||
char CodecStr[2048];
|
||||
wcstombs(CodecStr, pV.bstrVal, 2000);
|
||||
|
||||
CodecContext->codec = avcodec_find_decoder(MatroskaToFFCodecID(CodecStr, CodecContext->extradata, FourCC, CodecContext->bits_per_coded_sample));
|
||||
}
|
||||
return CodecContext;
|
||||
}
|
||||
|
||||
#endif
|
||||
|
@ -603,11 +706,9 @@ CComPtr<IMMContainer> HaaliOpenFile(const char *SourceFile, enum FFMS_Sources So
|
|||
#endif
|
||||
|
||||
void LAVFOpenFile(const char *SourceFile, AVFormatContext *&FormatContext) {
|
||||
if (av_open_input_file(&FormatContext, SourceFile, NULL, 0, NULL) != 0) {
|
||||
std::ostringstream buf;
|
||||
buf << "Couldn't open '" << SourceFile << "'";
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ, buf.str());
|
||||
}
|
||||
if (av_open_input_file(&FormatContext, SourceFile, NULL, 0, NULL) != 0)
|
||||
throw FFMS_Exception(FFMS_ERROR_PARSER, FFMS_ERROR_FILE_READ,
|
||||
std::string("Couldn't open '") + SourceFile + "'");
|
||||
|
||||
if (av_find_stream_info(FormatContext) < 0) {
|
||||
av_close_input_file(FormatContext);
|
||||
|
|
|
@ -34,9 +34,9 @@ extern "C" {
|
|||
#include <libavformat/avformat.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#ifdef WITH_LIBPOSTPROC
|
||||
#ifdef FFMS_USE_POSTPROC
|
||||
#include <libpostproc/postprocess.h>
|
||||
#endif // WITH_LIBPOSTPROC
|
||||
#endif // FFMS_USE_POSTPROC
|
||||
}
|
||||
|
||||
// must be included after ffmpeg headers
|
||||
|
@ -60,17 +60,17 @@ const int64_t ffms_av_nopts_value = static_cast<int64_t>(1) << 63;
|
|||
|
||||
// used for matroska<->ffmpeg codec ID mapping to avoid Win32 dependency
|
||||
typedef struct FFMS_BITMAPINFOHEADER {
|
||||
uint32_t biSize;
|
||||
int32_t biWidth;
|
||||
int32_t biHeight;
|
||||
uint16_t biPlanes;
|
||||
uint16_t biBitCount;
|
||||
uint32_t biCompression;
|
||||
uint32_t biSizeImage;
|
||||
int32_t biXPelsPerMeter;
|
||||
int32_t biYPelsPerMeter;
|
||||
uint32_t biClrUsed;
|
||||
uint32_t biClrImportant;
|
||||
uint32_t biSize;
|
||||
int32_t biWidth;
|
||||
int32_t biHeight;
|
||||
uint16_t biPlanes;
|
||||
uint16_t biBitCount;
|
||||
uint32_t biCompression;
|
||||
uint32_t biSizeImage;
|
||||
int32_t biXPelsPerMeter;
|
||||
int32_t biYPelsPerMeter;
|
||||
uint32_t biClrUsed;
|
||||
uint32_t biClrImportant;
|
||||
} FFMS_BITMAPINFOHEADER;
|
||||
|
||||
class FFMS_Exception : public std::exception {
|
||||
|
@ -113,6 +113,33 @@ public:
|
|||
_Arg = Arg;
|
||||
}
|
||||
};
|
||||
// auto_ptr-ish holder for AVCodecContexts with overridable deleter
|
||||
class FFCodecContext {
|
||||
AVCodecContext *CodecContext;
|
||||
void (*Deleter)(AVCodecContext *);
|
||||
public:
|
||||
FFCodecContext() : CodecContext(0), Deleter(0) { }
|
||||
FFCodecContext(FFCodecContext &r) : CodecContext(r.CodecContext), Deleter(r.Deleter) { r.CodecContext = 0; }
|
||||
FFCodecContext(AVCodecContext *c, void (*d)(AVCodecContext *)) : CodecContext(c), Deleter(d) { }
|
||||
FFCodecContext& operator=(FFCodecContext r) { reset(r.CodecContext, r.Deleter); r.CodecContext = 0; return *this; }
|
||||
~FFCodecContext() { reset(); }
|
||||
AVCodecContext* operator->() { return CodecContext; }
|
||||
operator AVCodecContext*() { return CodecContext; }
|
||||
void reset(AVCodecContext *c = 0, void (*d)(AVCodecContext *) = 0) {
|
||||
if (CodecContext && Deleter) Deleter(CodecContext);
|
||||
CodecContext = c;
|
||||
Deleter = d;
|
||||
}
|
||||
};
|
||||
|
||||
inline void DeleteHaaliCodecContext(AVCodecContext *CodecContext) {
|
||||
av_freep(&CodecContext->extradata);
|
||||
av_freep(&CodecContext);
|
||||
}
|
||||
inline void DeleteMatroskaCodecContext(AVCodecContext *CodecContext) {
|
||||
avcodec_close(CodecContext);
|
||||
av_freep(&CodecContext);
|
||||
}
|
||||
|
||||
struct MatroskaReaderContext {
|
||||
public:
|
||||
|
@ -129,6 +156,7 @@ public:
|
|||
|
||||
~MatroskaReaderContext() {
|
||||
free(Buffer);
|
||||
if (ST.fp) fclose(ST.fp);
|
||||
}
|
||||
};
|
||||
|
||||
|
@ -170,7 +198,8 @@ public:
|
|||
};
|
||||
|
||||
|
||||
int GetSWSCPUFlags();
|
||||
int64_t GetSWSCPUFlags();
|
||||
SwsContext *GetSwsContext(int SrcW, int SrcH, PixelFormat SrcFormat, int DstW, int DstH, PixelFormat DstFormat, int64_t Flags, int ColorSpace = -1);
|
||||
int GetPPCPUFlags();
|
||||
void ClearErrorInfo(FFMS_ErrorInfo *ErrorInfo);
|
||||
FFMS_TrackType HaaliTrackTypeToFFTrackType(int TT);
|
||||
|
@ -178,11 +207,13 @@ void ReadFrame(uint64_t FilePos, unsigned int &FrameSize, TrackCompressionContex
|
|||
bool AudioFMTIsFloat(AVSampleFormat FMT);
|
||||
void InitNullPacket(AVPacket &pkt);
|
||||
void FillAP(FFMS_AudioProperties &AP, AVCodecContext *CTX, FFMS_Track &Frames);
|
||||
|
||||
#ifdef HAALISOURCE
|
||||
unsigned vtSize(VARIANT &vt);
|
||||
void vtCopy(VARIANT& vt,void *dest);
|
||||
void InitializeCodecContextFromHaaliInfo(CComQIPtr<IPropertyBag> pBag, AVCodecContext *CodecContext);
|
||||
FFCodecContext InitializeCodecContextFromHaaliInfo(CComQIPtr<IPropertyBag> pBag);
|
||||
#endif
|
||||
|
||||
void InitializeCodecContextFromMatroskaTrackInfo(TrackInfo *TI, AVCodecContext *CodecContext);
|
||||
CodecID MatroskaToFFCodecID(char *Codec, void *CodecPrivate, unsigned int FourCC = 0, unsigned int BitsPerSample = 0);
|
||||
FILE *ffms_fopen(const char *filename, const char *mode);
|
||||
|
|
|
@ -28,7 +28,7 @@ void FFMS_VideoSource::GetFrameCheck(int n) {
|
|||
|
||||
void FFMS_VideoSource::SetPP(const char *PP) {
|
||||
|
||||
#ifdef WITH_LIBPOSTPROC
|
||||
#ifdef FFMS_USE_POSTPROC
|
||||
if (PPMode)
|
||||
pp_free_mode(PPMode);
|
||||
PPMode = NULL;
|
||||
|
@ -48,11 +48,11 @@ void FFMS_VideoSource::SetPP(const char *PP) {
|
|||
#else
|
||||
throw FFMS_Exception(FFMS_ERROR_POSTPROCESSING, FFMS_ERROR_UNSUPPORTED,
|
||||
"FFMS2 was not compiled with postprocessing support");
|
||||
#endif /* WITH_LIBPOSTPROC */
|
||||
#endif /* FFMS_USE_POSTPROC */
|
||||
}
|
||||
|
||||
void FFMS_VideoSource::ResetPP() {
|
||||
#ifdef WITH_LIBPOSTPROC
|
||||
#ifdef FFMS_USE_POSTPROC
|
||||
if (PPContext)
|
||||
pp_free_context(PPContext);
|
||||
PPContext = NULL;
|
||||
|
@ -61,12 +61,12 @@ void FFMS_VideoSource::ResetPP() {
|
|||
pp_free_mode(PPMode);
|
||||
PPMode = NULL;
|
||||
|
||||
#endif /* WITH_LIBPOSTPROC */
|
||||
#endif /* FFMS_USE_POSTPROC */
|
||||
OutputFrame(DecodeFrame);
|
||||
}
|
||||
|
||||
void FFMS_VideoSource::ReAdjustPP(PixelFormat VPixelFormat, int Width, int Height) {
|
||||
#ifdef WITH_LIBPOSTPROC
|
||||
#ifdef FFMS_USE_POSTPROC
|
||||
if (PPContext)
|
||||
pp_free_context(PPContext);
|
||||
PPContext = NULL;
|
||||
|
@ -77,10 +77,17 @@ void FFMS_VideoSource::ReAdjustPP(PixelFormat VPixelFormat, int Width, int Heigh
|
|||
int Flags = GetPPCPUFlags();
|
||||
|
||||
switch (VPixelFormat) {
|
||||
case PIX_FMT_YUV420P: Flags |= PP_FORMAT_420; break;
|
||||
case PIX_FMT_YUV422P: Flags |= PP_FORMAT_422; break;
|
||||
case PIX_FMT_YUV411P: Flags |= PP_FORMAT_411; break;
|
||||
case PIX_FMT_YUV444P: Flags |= PP_FORMAT_444; break;
|
||||
case PIX_FMT_YUV420P:
|
||||
case PIX_FMT_YUVJ420P:
|
||||
Flags |= PP_FORMAT_420; break;
|
||||
case PIX_FMT_YUV422P:
|
||||
case PIX_FMT_YUVJ422P:
|
||||
Flags |= PP_FORMAT_422; break;
|
||||
case PIX_FMT_YUV411P:
|
||||
Flags |= PP_FORMAT_411; break;
|
||||
case PIX_FMT_YUV444P:
|
||||
case PIX_FMT_YUVJ444P:
|
||||
Flags |= PP_FORMAT_444; break;
|
||||
default:
|
||||
ResetPP();
|
||||
throw FFMS_Exception(FFMS_ERROR_POSTPROCESSING, FFMS_ERROR_UNSUPPORTED,
|
||||
|
@ -93,7 +100,7 @@ void FFMS_VideoSource::ReAdjustPP(PixelFormat VPixelFormat, int Width, int Heigh
|
|||
avpicture_alloc(&PPFrame, VPixelFormat, Width, Height);
|
||||
#else
|
||||
return;
|
||||
#endif /* WITH_LIBPOSTPROC */
|
||||
#endif /* FFMS_USE_POSTPROC */
|
||||
|
||||
}
|
||||
|
||||
|
@ -113,7 +120,7 @@ FFMS_Frame *FFMS_VideoSource::OutputFrame(AVFrame *Frame) {
|
|||
ReAdjustOutputFormat(TargetPixelFormats, TargetWidth, TargetHeight, TargetResizer);
|
||||
}
|
||||
|
||||
#ifdef WITH_LIBPOSTPROC
|
||||
#ifdef FFMS_USE_POSTPROC
|
||||
if (PPMode) {
|
||||
pp_postprocess(const_cast<const uint8_t **>(Frame->data), Frame->linesize, PPFrame.data, PPFrame.linesize, CodecContext->width, CodecContext->height, Frame->qscale_table, Frame->qstride, PPMode, PPContext, Frame->pict_type | (Frame->qscale_type ? PP_PICT_TYPE_QP2 : 0));
|
||||
if (SWS) {
|
||||
|
@ -134,7 +141,7 @@ FFMS_Frame *FFMS_VideoSource::OutputFrame(AVFrame *Frame) {
|
|||
}
|
||||
}
|
||||
}
|
||||
#else // WITH_LIBPOSTPROC
|
||||
#else // FFMS_USE_POSTPROC
|
||||
if (SWS) {
|
||||
sws_scale(SWS, const_cast<FFMS_SWS_CONST_PARAM uint8_t **>(Frame->data), Frame->linesize, 0, CodecContext->height, SWSFrame.data, SWSFrame.linesize);
|
||||
CopyAVPictureFields(SWSFrame, LocalFrame);
|
||||
|
@ -145,7 +152,7 @@ FFMS_Frame *FFMS_VideoSource::OutputFrame(AVFrame *Frame) {
|
|||
LocalFrame.Linesize[i] = Frame->linesize[i];
|
||||
}
|
||||
}
|
||||
#endif // WITH_LIBPOSTPROC
|
||||
#endif // FFMS_USE_POSTPROC
|
||||
|
||||
LocalFrame.EncodedWidth = CodecContext->width;
|
||||
LocalFrame.EncodedHeight = CodecContext->height;
|
||||
|
@ -184,10 +191,10 @@ FFMS_VideoSource::FFMS_VideoSource(const char *SourceFile, FFMS_Index *Index, in
|
|||
"The index does not match the source file");
|
||||
|
||||
memset(&VP, 0, sizeof(VP));
|
||||
#ifdef WITH_LIBPOSTPROC
|
||||
#ifdef FFMS_USE_POSTPROC
|
||||
PPContext = NULL;
|
||||
PPMode = NULL;
|
||||
#endif // WITH_LIBPOSTPROC
|
||||
#endif // FFMS_USE_POSTPROC
|
||||
SWS = NULL;
|
||||
LastFrameNum = 0;
|
||||
CurrentFrame = 1;
|
||||
|
@ -205,14 +212,14 @@ FFMS_VideoSource::FFMS_VideoSource(const char *SourceFile, FFMS_Index *Index, in
|
|||
DecodeFrame = avcodec_alloc_frame();
|
||||
|
||||
// Dummy allocations so the unallocated case doesn't have to be handled later
|
||||
#ifdef WITH_LIBPOSTPROC
|
||||
#ifdef FFMS_USE_POSTPROC
|
||||
avpicture_alloc(&PPFrame, PIX_FMT_GRAY8, 16, 16);
|
||||
#endif // WITH_LIBPOSTPROC
|
||||
#endif // FFMS_USE_POSTPROC
|
||||
avpicture_alloc(&SWSFrame, PIX_FMT_GRAY8, 16, 16);
|
||||
}
|
||||
|
||||
FFMS_VideoSource::~FFMS_VideoSource() {
|
||||
#ifdef WITH_LIBPOSTPROC
|
||||
#ifdef FFMS_USE_POSTPROC
|
||||
if (PPMode)
|
||||
pp_free_mode(PPMode);
|
||||
|
||||
|
@ -220,7 +227,7 @@ FFMS_VideoSource::~FFMS_VideoSource() {
|
|||
pp_free_context(PPContext);
|
||||
|
||||
avpicture_free(&PPFrame);
|
||||
#endif // WITH_LIBPOSTPROC
|
||||
#endif // FFMS_USE_POSTPROC
|
||||
|
||||
if (SWS)
|
||||
sws_freeContext(SWS);
|
||||
|
@ -259,8 +266,10 @@ void FFMS_VideoSource::ReAdjustOutputFormat(int64_t TargetFormats, int Width, in
|
|||
}
|
||||
|
||||
if (CodecContext->pix_fmt != OutputFormat || Width != CodecContext->width || Height != CodecContext->height) {
|
||||
SWS = sws_getContext(CodecContext->width, CodecContext->height, CodecContext->pix_fmt, Width, Height,
|
||||
OutputFormat, GetSWSCPUFlags() | Resizer, NULL, NULL, NULL);
|
||||
int ColorSpace = CodecContext->colorspace;
|
||||
if (ColorSpace == AVCOL_SPC_UNSPECIFIED) ColorSpace = -1;
|
||||
SWS = GetSwsContext(CodecContext->width, CodecContext->height, CodecContext->pix_fmt, Width, Height,
|
||||
OutputFormat, GetSWSCPUFlags() | Resizer, ColorSpace);
|
||||
if (SWS == NULL) {
|
||||
ResetOutputFormat();
|
||||
throw FFMS_Exception(FFMS_ERROR_SCALING, FFMS_ERROR_INVALID_ARGUMENT,
|
||||
|
|
|
@ -25,9 +25,9 @@ extern "C" {
|
|||
#include <libavformat/avformat.h>
|
||||
#include <libavcodec/avcodec.h>
|
||||
#include <libswscale/swscale.h>
|
||||
#ifdef WITH_LIBPOSTPROC
|
||||
#ifdef FFMS_USE_POSTPROC
|
||||
#include <libpostproc/postprocess.h>
|
||||
#endif // WITH_LIBPOSTPROC
|
||||
#endif // FFMS_USE_POSTPROC
|
||||
}
|
||||
|
||||
// must be included after ffmpeg headers
|
||||
|
@ -54,10 +54,10 @@ extern "C" {
|
|||
class FFMS_VideoSource {
|
||||
friend class FFSourceResources<FFMS_VideoSource>;
|
||||
private:
|
||||
#ifdef WITH_LIBPOSTPROC
|
||||
#ifdef FFMS_USE_POSTPROC
|
||||
pp_context_t *PPContext;
|
||||
pp_mode_t *PPMode;
|
||||
#endif // WITH_LIBPOSTPROC
|
||||
#endif // FFMS_USE_POSTPROC
|
||||
SwsContext *SWS;
|
||||
int LastFrameHeight;
|
||||
int LastFrameWidth;
|
||||
|
@ -117,7 +117,7 @@ class FFMatroskaVideo : public FFMS_VideoSource {
|
|||
private:
|
||||
MatroskaFile *MF;
|
||||
MatroskaReaderContext MC;
|
||||
TrackCompressionContext *TCC;
|
||||
TrackCompressionContext *TCC;
|
||||
char ErrorMessage[256];
|
||||
FFSourceResources<FFMS_VideoSource> Res;
|
||||
size_t PacketNumber;
|
||||
|
@ -127,13 +127,13 @@ protected:
|
|||
void Free(bool CloseCodec);
|
||||
public:
|
||||
FFMatroskaVideo(const char *SourceFile, int Track, FFMS_Index *Index, int Threads);
|
||||
FFMS_Frame *GetFrame(int n);
|
||||
FFMS_Frame *GetFrame(int n);
|
||||
};
|
||||
|
||||
#ifdef HAALISOURCE
|
||||
|
||||
class FFHaaliVideo : public FFMS_VideoSource {
|
||||
private:
|
||||
FFCodecContext HCodecContext;
|
||||
CComPtr<IMMContainer> pMMC;
|
||||
AVBitStreamFilterContext *BitStreamFilter;
|
||||
FFSourceResources<FFMS_VideoSource> Res;
|
||||
|
@ -143,7 +143,7 @@ protected:
|
|||
void Free(bool CloseCodec);
|
||||
public:
|
||||
FFHaaliVideo(const char *SourceFile, int Track, FFMS_Index *Index, int Threads, enum FFMS_Sources SourceMode);
|
||||
FFMS_Frame *GetFrame(int n);
|
||||
FFMS_Frame *GetFrame(int n);
|
||||
};
|
||||
|
||||
#endif // HAALISOURCE
|
||||
|
|
Loading…
Reference in a new issue