2
// libavg - Media Playback Engine.
3
// Copyright (C) 2003-2008 Ulrich von Zadow
5
// This library is free software; you can redistribute it and/or
6
// modify it under the terms of the GNU Lesser General Public
7
// License as published by the Free Software Foundation; either
8
// version 2 of the License, or (at your option) any later version.
10
// This library is distributed in the hope that it will be useful,
11
// but WITHOUT ANY WARRANTY; without even the implied warranty of
12
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
13
// Lesser General Public License for more details.
15
// You should have received a copy of the GNU Lesser General Public
16
// License along with this library; if not, write to the Free Software
17
// Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19
// Current versions can be found at www.libavg.de
22
#include "FFMpegDecoder.h"
23
#include "AsyncDemuxer.h"
24
#include "FFMpegDemuxer.h"
26
#include "../base/Exception.h"
27
#include "../base/Logger.h"
28
#include "../base/ScopeTimer.h"
29
#include "../base/ObjectCounter.h"
31
#include "../graphics/Filterflipuv.h"
32
#include "../graphics/Filterfliprgba.h"
42
using namespace boost;
44
#define SAMPLE_BUFFER_SIZE ((AVCODEC_MAX_AUDIO_FRAME_SIZE*3))
45
#define VOLUME_FADE_SAMPLES 100
47
#if LIBAVFORMAT_BUILD < ((50<<16)+(0<<8)+0)
48
#define PIX_FMT_BGRA PIX_FMT_RGBA32
49
#define PIX_FMT_YUYV422 PIX_FMT_YUV422
54
bool FFMpegDecoder::s_bInitialized = false;
55
mutex FFMpegDecoder::s_OpenMutex;
57
FFMpegDecoder::FFMpegDecoder()
63
m_bUseStreamFPS(true),
67
m_pAudioResampleContext(0),
74
m_bFirstPacket(false),
75
m_VideoStartTimestamp(-1),
76
m_LastVideoFrameTime(-1),
79
ObjectCounter::get()->incRef(&typeid(*this));
83
FFMpegDecoder::~FFMpegDecoder()
85
if (m_pFormatContext) {
88
ObjectCounter::get()->decRef(&typeid(*this));
92
void avcodecError(const string& sFilename, int err)
95
case AVERROR_NUMEXPECTED:
96
throw Exception(AVG_ERR_VIDEO_INIT_FAILED,
97
sFilename + ": Incorrect image filename syntax (use %%d to specify the image number:");
98
case AVERROR_INVALIDDATA:
99
throw Exception(AVG_ERR_VIDEO_INIT_FAILED,
100
sFilename + ": Error while parsing header");
102
throw Exception(AVG_ERR_VIDEO_INIT_FAILED,
103
sFilename + ": Unknown format");
106
s << "'" << sFilename << "': Error while opening file (Num:" << err << ")";
107
throw Exception(AVG_ERR_VIDEO_INIT_FAILED, s.str());
112
void dump_stream_info(AVFormatContext *s)
115
fprintf(stderr, " Track: %d\n", s->track);
116
if (s->title[0] != '\0')
117
fprintf(stderr, " Title: %s\n", s->title);
118
if (s->author[0] != '\0')
119
fprintf(stderr, " Author: %s\n", s->author);
120
if (s->album[0] != '\0')
121
fprintf(stderr, " Album: %s\n", s->album);
123
fprintf(stderr, " Year: %d\n", s->year);
124
if (s->genre[0] != '\0')
125
fprintf(stderr, " Genre: %s\n", s->genre);
128
int openCodec(AVFormatContext* pFormatContext, int streamIndex)
131
#if LIBAVFORMAT_BUILD < ((49<<16)+(0<<8)+0)
132
enc = &(pFormatContext->streams[streamIndex]->codec);
134
enc = pFormatContext->streams[streamIndex]->codec;
136
// enc->debug = 0x0001; // see avcodec.h
138
AVCodec * codec = avcodec_find_decoder(enc->codec_id);
139
if (!codec || avcodec_open(enc, codec) < 0) {
145
void FFMpegDecoder::open(const string& sFilename, bool bThreadedDemuxer)
147
mutex::scoped_lock lock(s_OpenMutex);
148
m_bThreadedDemuxer = bThreadedDemuxer;
151
m_bEOFPending = false;
152
m_VideoStartTimestamp = -1;
153
AVFormatParameters params;
155
m_sFilename = sFilename;
157
AVG_TRACE(Logger::MEMORY, "Opening " << sFilename);
158
memset(¶ms, 0, sizeof(params));
160
err = av_open_input_file(&m_pFormatContext, sFilename.c_str(),
164
avcodecError(sFilename, err);
167
err = av_find_stream_info(m_pFormatContext);
169
throw Exception(AVG_ERR_VIDEO_INIT_FAILED,
170
sFilename + ": Could not find codec parameters.");
172
// dump_format(m_pFormatContext, 0, sFilename.c_str(), 0);
174
av_read_play(m_pFormatContext);
176
// Find audio and video streams in the file
179
for (unsigned i = 0; i < m_pFormatContext->nb_streams; i++) {
180
#if LIBAVFORMAT_BUILD < ((49<<16)+(0<<8)+0)
181
AVCodecContext *enc = &m_pFormatContext->streams[i]->codec;
183
AVCodecContext *enc = m_pFormatContext->streams[i]->codec;
185
switch (enc->codec_type) {
186
case CODEC_TYPE_VIDEO:
187
if (m_VStreamIndex < 0) {
191
case CODEC_TYPE_AUDIO:
192
// Ignore the audio stream if we're using sync demuxing.
193
if (m_AStreamIndex < 0 && bThreadedDemuxer) {
201
// dump_format(m_pFormatContext, 0, m_sFilename.c_str(), 0);
202
// dump_stream_info(m_pFormatContext);
204
// Enable video stream demuxing
205
if (m_VStreamIndex >= 0) {
206
m_pVStream = m_pFormatContext->streams[m_VStreamIndex];
209
// Set video parameters
210
m_TimeUnitsPerSecond = 1.0/av_q2d(m_pVStream->time_base);
211
if (m_bUseStreamFPS) {
212
m_FPS = getNominalFPS();
214
#if LIBAVFORMAT_BUILD < ((49<<16)+(0<<8)+0)
215
m_Size = IntPoint(m_pVStream->codec.width, m_pVStream->codec.height);
217
m_Size = IntPoint(m_pVStream->codec->width, m_pVStream->codec->height);
219
m_bFirstPacket = true;
220
m_sFilename = sFilename;
221
m_LastVideoFrameTime = -1;
223
int rc = openCodec(m_pFormatContext, m_VStreamIndex);
227
avcodec_string(szBuf, sizeof(szBuf), m_pVStream->codec, 0);
228
throw Exception(AVG_ERR_VIDEO_INIT_FAILED,
229
sFilename + ": unsupported codec ("+szBuf+").");
231
m_PF = calcPixelFormat(true);
233
// Enable audio stream demuxing.
234
if (m_AStreamIndex >= 0) {
235
m_pAStream = m_pFormatContext->streams[m_AStreamIndex];
238
m_AudioPacketData = 0;
239
m_AudioPacketSize = 0;
241
m_LastAudioFrameTime = 0;
242
m_AudioStartTimestamp = 0;
244
if (m_pAStream->start_time != AV_NOPTS_VALUE) {
245
m_AudioStartTimestamp = double(av_q2d(m_pAStream->time_base))
246
*m_pAStream->start_time;
248
m_EffectiveSampleRate = (int)(m_pAStream->codec->sample_rate);
249
int rc = openCodec(m_pFormatContext, m_AStreamIndex);
253
avcodec_string(szBuf, sizeof(szBuf), m_pAStream->codec, 0);
255
AVG_TRACE(Logger::WARNING,
256
sFilename + ": unsupported codec ("+szBuf+"). Disabling audio.");
263
void FFMpegDecoder::startDecoding(bool bDeliverYCbCr, const AudioParams* pAP)
265
AVG_ASSERT(m_State == OPENED);
266
if (m_VStreamIndex >= 0) {
267
m_PF = calcPixelFormat(bDeliverYCbCr);
269
bool bAudioEnabled = (pAP && m_bThreadedDemuxer);
275
avcodec_close(m_pAStream->codec);
280
if (m_AStreamIndex >= 0) {
281
if (m_pAStream->codec->channels > m_AP.m_Channels) {
282
AVG_TRACE(Logger::WARNING,
283
m_sFilename << ": unsupported number of channels (" <<
284
m_pAStream->codec->channels << "). Disabling audio.");
288
m_pSampleBuffer = (char*)av_mallocz(SAMPLE_BUFFER_SIZE);
289
m_SampleBufferStart = 0;
290
m_SampleBufferEnd = 0;
291
m_SampleBufferLeft = SAMPLE_BUFFER_SIZE;
293
m_ResampleBufferSize = 0;
294
m_pResampleBuffer = 0;
295
m_ResampleBufferStart = 0;
296
m_ResampleBufferEnd = 0;
300
if (m_VStreamIndex < 0 && m_AStreamIndex < 0) {
301
throw Exception(AVG_ERR_VIDEO_INIT_FAILED,
302
m_sFilename + " does not contain any valid audio or video streams.");
306
AVG_ASSERT(!m_pDemuxer);
307
vector<int> streamIndexes;
308
if (m_VStreamIndex >= 0) {
309
streamIndexes.push_back(m_VStreamIndex);
311
if (m_AStreamIndex >= 0) {
312
streamIndexes.push_back(m_AStreamIndex);
314
if (m_bThreadedDemuxer) {
315
m_pDemuxer = new AsyncDemuxer(m_pFormatContext, streamIndexes);
317
m_pDemuxer = new FFMpegDemuxer(m_pFormatContext, streamIndexes);
323
void FFMpegDecoder::close()
325
mutex::scoped_lock lock(s_OpenMutex);
326
mutex::scoped_lock lock2(m_AudioMutex);
327
AVG_TRACE(Logger::MEMORY, "Closing " << m_sFilename);
332
// Close audio and video codecs
334
avcodec_close(m_pVStream->codec);
340
avcodec_close(m_pAStream->codec);
342
av_free_packet(m_AudioPacket);
343
delete m_AudioPacket;
346
if (m_pAudioResampleContext) {
347
audio_resample_close(m_pAudioResampleContext);
348
m_pAudioResampleContext = 0;
351
if (m_pSampleBuffer) {
352
av_free(m_pSampleBuffer);
355
if (m_pResampleBuffer) {
356
av_free(m_pResampleBuffer);
357
m_pResampleBuffer = 0;
360
m_AudioPacketData = 0;
361
m_AudioPacketSize = 0;
363
m_SampleBufferStart = 0;
364
m_SampleBufferEnd = 0;
365
m_SampleBufferLeft = 0;
367
m_ResampleBufferStart = 0;
368
m_ResampleBufferEnd = 0;
369
m_ResampleBufferSize = 0;
371
m_LastAudioFrameTime = 0;
372
m_AudioStartTimestamp = 0;
377
if (m_pFormatContext) {
378
av_close_input_file(m_pFormatContext);
379
m_pFormatContext = 0;
383
sws_freeContext(m_pSwsContext);
389
IVideoDecoder::DecoderState FFMpegDecoder::getState() const
394
VideoInfo FFMpegDecoder::getVideoInfo() const
396
AVG_ASSERT(m_State != CLOSED);
398
if (m_pVStream || m_pAStream) {
399
duration = getDuration();
401
VideoInfo info(duration, m_pFormatContext->bit_rate, m_pVStream != 0,
404
info.setVideoData(m_Size, getStreamPF(), getNumFrames(), getNominalFPS(), m_FPS,
405
m_pVStream->codec->codec->name);
408
AVCodecContext * pACodec = m_pAStream->codec;
409
info.setAudioData(pACodec->codec->name, pACodec->sample_rate,
415
void FFMpegDecoder::seek(double destTime)
417
AVG_ASSERT(m_State == DECODING);
418
if (m_bFirstPacket && m_pVStream) {
422
m_pDemuxer->seek(destTime + getStartTime());
424
m_LastVideoFrameTime = destTime - 1.0/m_FPS;
427
mutex::scoped_lock lock(m_AudioMutex);
428
m_LastAudioFrameTime = destTime;
429
m_SampleBufferStart = m_SampleBufferEnd = 0;
430
m_SampleBufferLeft = SAMPLE_BUFFER_SIZE;
431
m_ResampleBufferStart = m_ResampleBufferEnd = 0;
432
m_AudioPacketSize = 0;
438
void FFMpegDecoder::loop()
443
IntPoint FFMpegDecoder::getSize() const
445
AVG_ASSERT(m_State != CLOSED);
449
int FFMpegDecoder::getCurFrame() const
451
AVG_ASSERT(m_State != CLOSED);
452
return int(m_LastVideoFrameTime*getNominalFPS()+0.5);
455
int FFMpegDecoder::getNumFramesQueued() const
460
double FFMpegDecoder::getCurTime(StreamSelect stream) const
462
AVG_ASSERT(m_State != CLOSED);
466
AVG_ASSERT(m_pVStream);
467
return m_LastVideoFrameTime;
469
AVG_ASSERT(m_pAStream);
470
return m_LastAudioFrameTime;
476
double FFMpegDecoder::getDuration() const
478
AVG_ASSERT(m_State != CLOSED);
480
AVRational time_base;
482
duration=m_pVStream->duration;
483
time_base=m_pVStream->time_base;
485
duration=m_pAStream->duration;
486
time_base=m_pAStream->time_base;
488
#if LIBAVFORMAT_BUILD < ((49<<16)+(0<<8)+0)
489
return double(duration)/AV_TIME_BASE;
491
return double(duration)*av_q2d(time_base);
495
double FFMpegDecoder::getNominalFPS() const
497
AVG_ASSERT(m_State != CLOSED);
498
#if LIBAVFORMAT_BUILD < ((49<<16)+(0<<8)+0)
499
return m_pVStream->r_frame_rate;
501
return av_q2d(m_pVStream->r_frame_rate);
505
double FFMpegDecoder::getFPS() const
507
AVG_ASSERT(m_State != CLOSED);
511
void FFMpegDecoder::setFPS(double fps)
513
m_bUseStreamFPS = (fps == 0);
515
m_FPS = calcStreamFPS();
521
double FFMpegDecoder::getVolume() const
523
AVG_ASSERT(m_State != CLOSED);
527
void FFMpegDecoder::setVolume(double volume)
530
if (m_State != DECODING) {
531
m_LastVolume = volume;
535
void copyPlaneToBmp(BitmapPtr pBmp, unsigned char * pData, int stride)
537
unsigned char * pSrc=pData;
538
unsigned char * pDest= pBmp->getPixels();
539
int destStride = pBmp->getStride();
540
int height = pBmp->getSize().y;
541
int width = pBmp->getSize().x;
542
for (int y = 0; y < height; y++) {
543
memcpy(pDest, pSrc, width);
549
static ProfilingZoneID RenderToBmpProfilingZone("FFMpeg: renderToBmp");
550
static ProfilingZoneID CopyImageProfilingZone("FFMpeg: copy image");
552
FrameAvailableCode FFMpegDecoder::renderToBmps(vector<BitmapPtr>& pBmps,
555
AVG_ASSERT(m_State == DECODING);
556
ScopeTimer timer(RenderToBmpProfilingZone);
558
FrameAvailableCode frameAvailable;
559
if (timeWanted == -1) {
561
frameAvailable = FA_NEW_FRAME;
563
frameAvailable = readFrameForTime(frame, timeWanted);
565
if (!m_bVideoEOF && frameAvailable == FA_NEW_FRAME) {
566
if (pixelFormatIsPlanar(m_PF)) {
567
ScopeTimer timer(CopyImageProfilingZone);
568
for (unsigned i = 0; i < pBmps.size(); ++i) {
569
copyPlaneToBmp(pBmps[i], frame.data[i], frame.linesize[i]);
572
convertFrameToBmp(frame, pBmps[0]);
576
return FA_USE_LAST_FRAME;
579
void FFMpegDecoder::throwAwayFrame(double timeWanted)
581
AVG_ASSERT(m_State == DECODING);
583
readFrameForTime(frame, timeWanted);
586
bool FFMpegDecoder::isEOF(StreamSelect stream) const
588
AVG_ASSERT(m_State == DECODING);
591
return (!m_pAStream || m_bAudioEOF);
593
return (!m_pVStream || m_bVideoEOF);
595
return isEOF(SS_VIDEO) && isEOF(SS_AUDIO);
601
int FFMpegDecoder::copyRawAudio(unsigned char* pBuffer, int size)
603
int bytesWritten = min(m_SampleBufferEnd - m_SampleBufferStart, size);
604
memcpy(pBuffer, m_pSampleBuffer + m_SampleBufferStart, bytesWritten);
606
m_SampleBufferStart += bytesWritten;
608
if (m_SampleBufferStart == m_SampleBufferEnd) {
609
m_SampleBufferStart = 0;
610
m_SampleBufferEnd = 0;
611
m_SampleBufferLeft = SAMPLE_BUFFER_SIZE;
617
int FFMpegDecoder::copyResampledAudio(unsigned char* pBuffer, int size)
619
int bytesWritten = 0;
621
// If there is no buffered resampled data, resample some more
622
if (m_ResampleBufferStart >= m_ResampleBufferEnd) {
626
// If we have some data in the resample buffer, copy it over
627
if (m_ResampleBufferStart < m_ResampleBufferEnd) {
628
bytesWritten = min(m_ResampleBufferEnd - m_ResampleBufferStart, size);
629
memcpy(pBuffer, m_pResampleBuffer + m_ResampleBufferStart, bytesWritten);
631
m_ResampleBufferStart += bytesWritten;
632
if (m_ResampleBufferStart >= m_ResampleBufferEnd) {
633
m_ResampleBufferStart = 0;
634
m_ResampleBufferEnd = 0;
637
if (m_SampleBufferStart == m_SampleBufferEnd) {
638
m_SampleBufferStart = 0;
639
m_SampleBufferEnd = 0;
640
m_SampleBufferLeft = SAMPLE_BUFFER_SIZE;
647
void FFMpegDecoder::resampleAudio()
649
if (!m_pAudioResampleContext) {
650
m_pAudioResampleContext = audio_resample_init(
651
m_AP.m_Channels, m_pAStream->codec->channels,
652
m_AP.m_SampleRate, m_EffectiveSampleRate);
655
if (!m_pResampleBuffer) {
656
m_ResampleBufferSize = (int)(SAMPLE_BUFFER_SIZE *
657
((double)m_AP.m_SampleRate / (double)m_EffectiveSampleRate));
658
m_pResampleBuffer = (char*)av_mallocz(m_ResampleBufferSize);
662
(m_SampleBufferEnd - m_SampleBufferStart) /
663
(2 * m_pAStream->codec->channels);
665
int outputSamples = audio_resample(m_pAudioResampleContext,
666
(short*)m_pResampleBuffer,
667
(short*)(m_pSampleBuffer + m_SampleBufferStart),
670
// Adjust buffer pointers
671
m_ResampleBufferEnd += outputSamples * 2 * m_AP.m_Channels;
672
m_SampleBufferStart += inputSamples * 2 * m_pAStream->codec->channels;
675
int FFMpegDecoder::decodeAudio()
677
// Save current size of the audio buffer
678
int lastSampleBufferSize = m_SampleBufferLeft;
680
#if LIBAVCODEC_BUILD > ((51<<16)+(11<<8)+0)
681
// Decode some data from packet into the audio buffer
682
int packetBytesDecoded = avcodec_decode_audio2(
684
(short*)(m_pSampleBuffer + m_SampleBufferEnd),
689
int packetBytesDecoded = avcodec_decode_audio(
691
(short*)(m_pSampleBuffer + m_SampleBufferEnd),
697
// Skip frame on error
698
if (packetBytesDecoded < 0) {
702
// Did not get any data, try again
703
if (packetBytesDecoded == 0) {
707
// Adjust audio buffer pointers
708
m_SampleBufferEnd += m_SampleBufferLeft;
709
m_SampleBufferLeft = lastSampleBufferSize - m_SampleBufferLeft;
711
// Adjust packet data pointers
712
m_AudioPacketData += packetBytesDecoded;
713
m_AudioPacketSize -= packetBytesDecoded;
714
return packetBytesDecoded;
717
int FFMpegDecoder::fillAudioBuffer(AudioBufferPtr pBuffer)
719
AVG_ASSERT(m_State == DECODING);
720
mutex::scoped_lock lock(m_AudioMutex);
722
unsigned char* pOutputBuffer = (unsigned char*)(pBuffer->getData());
723
int outputAudioBufferSize = pBuffer->getNumBytes();
725
AVG_ASSERT (m_pAStream);
730
int packetBytesDecoded;
732
unsigned char* pCurBufferPos = pOutputBuffer;
733
int bufferLeft = outputAudioBufferSize;
734
bool bFormatMatch = (m_EffectiveSampleRate == m_AP.m_SampleRate &&
735
m_pAStream->codec->channels == m_AP.m_Channels);
739
// Consume any data left in the sample buffers
740
while (m_SampleBufferStart < m_SampleBufferEnd ||
741
m_ResampleBufferStart < m_ResampleBufferEnd)
743
// If the output format is different from the decoded format,
744
// then convert it, else copy it over
746
bytesProduced = copyRawAudio(pCurBufferPos, bufferLeft);
748
bytesProduced = copyResampledAudio(pCurBufferPos, bufferLeft);
751
pCurBufferPos += bytesProduced;
752
bufferLeft -= bytesProduced;
754
m_LastAudioFrameTime += (double(bytesProduced) /
755
(2 * m_AP.m_Channels * m_AP.m_SampleRate));
756
if (bufferLeft == 0) {
758
return pBuffer->getNumFrames();
762
if (m_AudioPacketSize <= 0)
765
packetBytesDecoded = decodeAudio();
767
// Skip frame on error
768
if (packetBytesDecoded < 0)
771
// Did not get any data, try again
772
if (packetBytesDecoded == 0)
776
// We have decoded all data in the packet, free it
778
av_free_packet(m_AudioPacket);
779
delete m_AudioPacket;
782
// Get a new packet from the audio stream
783
m_AudioPacket = m_pDemuxer->getPacket(m_AStreamIndex);
784
if (!m_AudioPacket) {
787
return pBuffer->getNumFrames()-bufferLeft/(pBuffer->getFrameSize());
790
// Initialize packet data pointers
791
m_AudioPacketData = m_AudioPacket->data;
792
m_AudioPacketSize = m_AudioPacket->size;
796
PixelFormat FFMpegDecoder::calcPixelFormat(bool bUseYCbCr)
798
#if LIBAVFORMAT_BUILD < ((49<<16)+(0<<8)+0)
799
AVCodecContext *enc = &m_pVStream->codec;
801
AVCodecContext *enc = m_pVStream->codec;
804
switch(enc->pix_fmt) {
805
case PIX_FMT_YUV420P:
807
case PIX_FMT_YUVJ420P:
809
case PIX_FMT_YUVA420P:
815
if (enc->pix_fmt == PIX_FMT_BGRA || enc->pix_fmt == PIX_FMT_YUVA420P) {
821
static ProfilingZoneID ConvertImageLibavgProfilingZone(
822
"FFMpeg: colorspace conv (libavg)");
823
static ProfilingZoneID ConvertImageSWSProfilingZone("FFMpeg: colorspace conv (SWS)");
824
static ProfilingZoneID SetAlphaProfilingZone("FFMpeg: set alpha channel");
826
void FFMpegDecoder::convertFrameToBmp(AVFrame& frame, BitmapPtr pBmp)
829
unsigned char * pDestBits = pBmp->getPixels();
830
destPict.data[0] = pDestBits;
831
destPict.linesize[0] = pBmp->getStride();
832
#if LIBAVFORMAT_BUILD >= ((50<<16)+(0<<8)+0)
833
::PixelFormat destFmt;
837
switch(pBmp->getPixelFormat()) {
840
// XXX: Unused and broken.
841
destFmt = PIX_FMT_BGRA;
845
destFmt = PIX_FMT_BGRA;
848
destFmt = PIX_FMT_RGB24;
851
destFmt = PIX_FMT_BGR24;
854
destFmt = PIX_FMT_YUYV422;
857
AVG_TRACE(Logger::ERROR, "FFMpegDecoder: Dest format "
858
<< pBmp->getPixelFormat() << " not supported.");
860
destFmt = PIX_FMT_BGRA;
862
#if LIBAVFORMAT_BUILD < ((49<<16)+(0<<8)+0)
863
AVCodecContext *enc = &m_pVStream->codec;
865
AVCodecContext *enc = m_pVStream->codec;
868
if (destFmt == PIX_FMT_BGRA && (enc->pix_fmt == PIX_FMT_YUV420P ||
869
enc->pix_fmt == PIX_FMT_YUVJ420P))
871
ScopeTimer timer(ConvertImageLibavgProfilingZone);
872
BitmapPtr pBmpY(new Bitmap(pBmp->getSize(), I8, frame.data[0],
873
frame.linesize[0], false));
874
BitmapPtr pBmpU(new Bitmap(pBmp->getSize(), I8, frame.data[1],
875
frame.linesize[1], false));
876
BitmapPtr pBmpV(new Bitmap(pBmp->getSize(), I8, frame.data[2],
877
frame.linesize[2], false));
878
pBmp->copyYUVPixels(*pBmpY, *pBmpU, *pBmpV, enc->pix_fmt == PIX_FMT_YUVJ420P);
880
if (!m_pSwsContext) {
881
m_pSwsContext = sws_getContext(enc->width, enc->height, enc->pix_fmt,
882
enc->width, enc->height, destFmt, SWS_BICUBIC,
884
AVG_ASSERT(m_pSwsContext);
887
ScopeTimer timer(ConvertImageSWSProfilingZone);
888
sws_scale(m_pSwsContext, frame.data, frame.linesize, 0,
889
enc->height, destPict.data, destPict.linesize);
891
if (pBmp->getPixelFormat() == B8G8R8X8) {
892
ScopeTimer timer(SetAlphaProfilingZone);
893
// Make sure the alpha channel is white.
894
// TODO: This is slow. Make OpenGL do it.
895
unsigned char * pLine = pBmp->getPixels();
896
IntPoint size = pBmp->getSize();
897
for (int y = 0; y < size.y; ++y) {
898
unsigned char * pPixel = pLine;
899
for (int x = 0; x < size.x; ++x) {
903
pLine = pLine + pBmp->getStride();
910
PixelFormat FFMpegDecoder::getPixelFormat() const
912
AVG_ASSERT(m_State != CLOSED);
916
void FFMpegDecoder::initVideoSupport()
918
if (!s_bInitialized) {
920
s_bInitialized = true;
921
// Tune libavcodec console spam.
922
// av_log_set_level(AV_LOG_DEBUG);
923
av_log_set_level(AV_LOG_QUIET);
927
int FFMpegDecoder::getNumFrames() const
929
AVG_ASSERT(m_State != CLOSED);
930
// This is broken for some videos, but the code here is correct.
931
// So fix ffmpeg :-).
932
#if LIBAVFORMAT_BUILD < ((49<<16)+(0<<8)+0)
933
return m_pVStream->r_frame_rate*(m_pVStream->duration/AV_TIME_BASE);
935
return int(m_pVStream->nb_frames);
939
FrameAvailableCode FFMpegDecoder::readFrameForTime(AVFrame& frame, double timeWanted)
941
AVG_ASSERT(m_State == DECODING);
942
// cerr << " readFrameForTime " << timeWanted << ", LastFrameTime= "
943
// << m_LastVideoFrameTime << ", diff= " << m_LastVideoFrameTime-timeWanted
945
AVG_ASSERT(timeWanted != -1);
946
double timePerFrame = 1.0/m_FPS;
947
if (timeWanted-m_LastVideoFrameTime < 0.5*timePerFrame) {
948
// cerr << "DISPLAY AGAIN." << endl;
949
// The last frame is still current. Display it again.
950
return FA_USE_LAST_FRAME;
952
double frameTime = -1;
953
while (frameTime-timeWanted < -0.5*timePerFrame && !m_bVideoEOF) {
954
frameTime = readFrame(frame);
955
// cerr << " readFrame returned time " << frameTime << ", diff= " <<
956
// frameTime-timeWanted << endl;
958
// cerr << "NEW FRAME." << endl;
963
static ProfilingZoneID DecodeProfilingZone("FFMpeg: decode");
965
double FFMpegDecoder::readFrame(AVFrame& frame)
967
AVG_ASSERT(m_State == DECODING);
968
ScopeTimer timer(DecodeProfilingZone);
972
m_bEOFPending = false;
973
return m_LastVideoFrameTime;
975
#if LIBAVFORMAT_BUILD < ((49<<16)+(0<<8)+0)
976
AVCodecContext *enc = &m_pVStream->codec;
978
AVCodecContext *enc = m_pVStream->codec;
981
AVPacket* pPacket = 0;
982
double frameTime = -1;
983
while (!bGotPicture && !m_bVideoEOF) {
984
pPacket = m_pDemuxer->getPacket(m_VStreamIndex);
985
m_bFirstPacket = false;
987
int len1 = avcodec_decode_video(enc, &frame, &bGotPicture, pPacket->data,
990
AVG_ASSERT(len1 == pPacket->size);
993
frameTime = getFrameTime(pPacket->dts);
995
av_free_packet(pPacket);
998
// No more packets -> EOF. Decode the last data we got.
999
avcodec_decode_video(enc, &frame, &bGotPicture, 0, 0);
1001
m_bEOFPending = true;
1005
// We don't have a timestamp for the last frame, so we'll
1006
// calculate it based on the frame before.
1007
frameTime = m_LastVideoFrameTime+1.0/m_FPS;
1008
m_LastVideoFrameTime = frameTime;
1011
AVG_ASSERT(frameTime != -1)
1014
cerr << "coded_picture_number: " << frame.coded_picture_number <<
1015
", display_picture_number: " << frame.display_picture_number <<
1016
", pts: " << frame.pts << endl;
1018
cerr << "key_frame: " << frame.key_frame <<
1019
", pict_type: " << frame.pict_type << endl;
1020
AVFrac spts = m_pVStream->pts;
1021
cerr << "Stream.pts: " << spts.val + double(spts.num)/spts.den << endl;
1025
double FFMpegDecoder::getFrameTime(long long dts)
1027
if (m_VideoStartTimestamp == -1) {
1028
m_VideoStartTimestamp = double(dts)/m_TimeUnitsPerSecond;
1031
if (m_bUseStreamFPS) {
1032
frameTime = double(dts)/m_TimeUnitsPerSecond-m_VideoStartTimestamp;
1034
if (m_LastVideoFrameTime == -1) {
1037
frameTime = m_LastVideoFrameTime + 1.0/m_FPS;
1040
m_LastVideoFrameTime = frameTime;
1044
double FFMpegDecoder::getStartTime()
1047
return m_VideoStartTimestamp;
1049
return m_AudioStartTimestamp;
1053
double FFMpegDecoder::calcStreamFPS() const
1055
#if LIBAVFORMAT_BUILD < ((49<<16)+(0<<8)+0)
1056
return m_pVStream->r_frame_rate;
1058
return (m_pVStream->r_frame_rate.num/m_pVStream->r_frame_rate.den);
1062
string FFMpegDecoder::getStreamPF() const
1064
#if LIBAVFORMAT_BUILD < ((49<<16)+(0<<8)+0)
1065
AVCodecContext *pCodec = &m_pVStream->codec;
1067
AVCodecContext *pCodec = m_pVStream->codec;
1069
::PixelFormat pf = pCodec->pix_fmt;
1070
return avcodec_get_pix_fmt_name(pf);
1073
// TODO: this should be logarithmic...
1074
void FFMpegDecoder::volumize(AudioBufferPtr pBuffer)
1076
double curVol = m_Volume;
1077
double volDiff = m_LastVolume - curVol;
1079
if (curVol == 1.0 && volDiff == 0.0) {
1083
short * pData = pBuffer->getData();
1084
for (int i = 0; i < pBuffer->getNumFrames()*pBuffer->getNumChannels(); i++) {
1086
if (volDiff != 0 && i < VOLUME_FADE_SAMPLES) {
1087
fadeVol = volDiff * (VOLUME_FADE_SAMPLES - i) / VOLUME_FADE_SAMPLES;
1090
int s = int(pData[i] * (curVol + fadeVol));
1099
m_LastVolume = curVol;