756
863
caps = gst_ffmpeg_codectype_to_caps (oclass->in_plugin->type,
757
864
ffmpegdec->context);
760
/* If a demuxer provided a framerate then use it (#313970) */
869
switch (oclass->in_plugin->type) {
870
case CODEC_TYPE_VIDEO:
871
/* If a demuxer provided a framerate then use it (#313970) */
872
if (ffmpegdec->format.video.fps_n != -1) {
873
gst_caps_set_simple (caps, "framerate",
874
GST_TYPE_FRACTION, ffmpegdec->format.video.fps_n,
875
ffmpegdec->format.video.fps_d, NULL);
877
gst_ffmpegdec_add_pixel_aspect_ratio (ffmpegdec,
878
gst_caps_get_structure (caps, 0));
880
case CODEC_TYPE_AUDIO:
888
if (!gst_pad_set_caps (ffmpegdec->srcpad, caps))
891
gst_caps_unref (caps);
898
GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
899
("could not find caps for codec (%s), unknown type",
900
oclass->in_plugin->name));
905
GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
906
("Could not set caps for ffmpeg decoder (%s), not fixed?",
907
oclass->in_plugin->name));
908
gst_caps_unref (caps);
914
/* perform qos calculations before decoding the next frame.
916
* Sets the hurry_up flag and if things are really bad, skips to the next
919
* Returns TRUE if the frame should be decoded, FALSE if the frame can be dropped
923
gst_ffmpegdec_do_qos (GstFFMpegDec * ffmpegdec, GstClockTime timestamp,
924
gboolean * mode_switch)
926
GstClockTimeDiff diff;
928
GstClockTime qostime, earliest_time;
930
*mode_switch = FALSE;
932
/* no timestamp, can't do QoS */
933
if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (timestamp)))
936
/* get latest QoS observation values */
937
gst_ffmpegdec_read_qos (ffmpegdec, &proportion, &earliest_time);
939
/* skip qos if we have no observation (yet) */
940
if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (earliest_time))) {
941
/* no hurry_up initialy */
942
ffmpegdec->context->hurry_up = 0;
946
/* qos is done on running time */
947
qostime = gst_segment_to_running_time (&ffmpegdec->segment, GST_FORMAT_TIME,
950
/* see how our next timestamp relates to the latest qos timestamp. negative
951
* values mean we are early, positive values mean we are too late. */
952
diff = GST_CLOCK_DIFF (qostime, earliest_time);
954
GST_DEBUG_OBJECT (ffmpegdec, "QOS: qostime %" GST_TIME_FORMAT
955
", earliest %" GST_TIME_FORMAT, GST_TIME_ARGS (qostime),
956
GST_TIME_ARGS (earliest_time));
958
/* if we using less than 40% of the available time, we can try to
959
* speed up again when we were slow. */
960
if (proportion < 0.4 && diff < 0) {
963
/* if we're more than two seconds late, switch to the next keyframe */
964
/* FIXME, let the demuxer decide what's the best since we might be dropping
965
* a lot of frames when the keyframe is far away or we even might not get a new
966
* keyframe at all.. */
967
if (diff > ((GstClockTimeDiff) GST_SECOND * 2)
968
&& !ffmpegdec->waiting_for_key) {
969
goto skip_to_keyframe;
970
} else if (diff >= 0) {
971
/* we're too slow, try to speed up */
972
if (ffmpegdec->waiting_for_key) {
973
/* we were waiting for a keyframe, that's ok */
976
/* switch to hurry_up mode */
990
if (ffmpegdec->context->hurry_up != 0) {
991
ffmpegdec->context->hurry_up = 0;
993
GST_DEBUG_OBJECT (ffmpegdec, "QOS: normal mode %g < 0.4", proportion);
999
ffmpegdec->context->hurry_up = 1;
1000
ffmpegdec->waiting_for_key = TRUE;
1001
*mode_switch = TRUE;
1002
GST_DEBUG_OBJECT (ffmpegdec,
1003
"QOS: keyframe, %" G_GINT64_FORMAT " > GST_SECOND/2", diff);
1004
/* we can skip the current frame */
1009
if (ffmpegdec->context->hurry_up != 1) {
1010
ffmpegdec->context->hurry_up = 1;
1011
*mode_switch = TRUE;
1012
GST_DEBUG_OBJECT (ffmpegdec,
1013
"QOS: hurry up, diff %" G_GINT64_FORMAT " >= 0", diff);
1019
/* returns TRUE if buffer is within segment, else FALSE.
1020
* if Buffer is on segment border, it's timestamp and duration will be clipped */
1022
clip_video_buffer (GstFFMpegDec * dec, GstBuffer * buf, GstClockTime in_ts,
1023
GstClockTime in_dur)
1025
gboolean res = TRUE;
1026
gint64 cstart, cstop;
1029
GST_LOG_OBJECT (dec,
1030
"timestamp:%" GST_TIME_FORMAT " , duration:%" GST_TIME_FORMAT,
1031
GST_TIME_ARGS (in_ts), GST_TIME_ARGS (in_dur));
1033
/* can't clip without TIME segment */
1034
if (G_UNLIKELY (dec->segment.format != GST_FORMAT_TIME))
1037
/* we need a start time */
1038
if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (in_ts)))
1041
/* generate valid stop, if duration unknown, we have unknown stop */
1043
GST_CLOCK_TIME_IS_VALID (in_dur) ? (in_ts + in_dur) : GST_CLOCK_TIME_NONE;
1047
gst_segment_clip (&dec->segment, GST_FORMAT_TIME, in_ts, stop, &cstart,
1049
if (G_UNLIKELY (!res))
1052
/* update timestamp and possibly duration if the clipped stop time is
1054
GST_BUFFER_TIMESTAMP (buf) = cstart;
1055
if (GST_CLOCK_TIME_IS_VALID (cstop))
1056
GST_BUFFER_DURATION (buf) = cstop - cstart;
1059
GST_LOG_OBJECT (dec, "%sdropping", (res ? "not " : ""));
1064
/* figure out if the current picture is a keyframe, return TRUE if that is
1067
check_keyframe (GstFFMpegDec * ffmpegdec)
1069
GstFFMpegDecClass *oclass;
1070
gboolean is_itype = FALSE;
1071
gboolean is_reference = FALSE;
1072
gboolean iskeyframe;
1074
/* figure out if we are dealing with a keyframe */
1075
oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
1077
is_itype = (ffmpegdec->picture->pict_type == FF_I_TYPE);
1078
is_reference = (ffmpegdec->picture->reference == 1);
1080
iskeyframe = (is_itype || is_reference || ffmpegdec->picture->key_frame)
1081
|| (oclass->in_plugin->id == CODEC_ID_INDEO3)
1082
|| (oclass->in_plugin->id == CODEC_ID_MSZH)
1083
|| (oclass->in_plugin->id == CODEC_ID_ZLIB)
1084
|| (oclass->in_plugin->id == CODEC_ID_VP3)
1085
|| (oclass->in_plugin->id == CODEC_ID_HUFFYUV);
1087
GST_LOG_OBJECT (ffmpegdec,
1088
"current picture: type: %d, is_keyframe:%d, is_itype:%d, is_reference:%d",
1089
ffmpegdec->picture->pict_type, iskeyframe, is_itype, is_reference);
1094
/* get an outbuf buffer with the current picture */
1095
static GstFlowReturn
1096
get_output_buffer (GstFFMpegDec * ffmpegdec, GstBuffer ** outbuf)
1100
ret = GST_FLOW_ERROR;
1103
/* libavcodec constantly crashes on stupid buffer allocation
1104
* errors inside. This drives me crazy, so we let it allocate
1105
* its own buffers and copy to our own buffer afterwards... */
1106
/* BUFFER CREATION */
1107
if (ffmpegdec->picture->opaque != NULL) {
1108
*outbuf = (GstBuffer *) ffmpegdec->picture->opaque;
1109
if (*outbuf == ffmpegdec->last_buffer)
1110
ffmpegdec->last_buffer = NULL;
1111
if (*outbuf != NULL)
1117
/* see if we need renegotiation */
1118
if (G_UNLIKELY (!gst_ffmpegdec_negotiate (ffmpegdec)))
1119
goto negotiate_failed;
1121
fsize = gst_ffmpeg_avpicture_get_size (ffmpegdec->context->pix_fmt,
1122
ffmpegdec->context->width, ffmpegdec->context->height);
1124
if (!ffmpegdec->context->palctrl) {
1125
ret = gst_pad_alloc_buffer_and_set_caps (ffmpegdec->srcpad,
1126
GST_BUFFER_OFFSET_NONE, fsize,
1127
GST_PAD_CAPS (ffmpegdec->srcpad), outbuf);
1128
if (G_UNLIKELY (ret != GST_FLOW_OK))
1132
/* for paletted data we can't use pad_alloc_buffer(), because
1133
* fsize contains the size of the palette, so the overall size
1134
* is bigger than ffmpegcolorspace's unit size, which will
1135
* prompt GstBaseTransform to complain endlessly ... */
1136
*outbuf = gst_buffer_new_and_alloc (fsize);
1137
gst_buffer_set_caps (*outbuf, GST_PAD_CAPS (ffmpegdec->srcpad));
1141
/* original ffmpeg code does not handle odd sizes correctly.
1142
* This patched up version does */
1143
gst_ffmpeg_avpicture_fill (&pic, GST_BUFFER_DATA (*outbuf),
1144
ffmpegdec->context->pix_fmt,
1145
ffmpegdec->context->width, ffmpegdec->context->height);
1147
/* the original convert function did not do the right thing, this
1148
* is a patched up version that adjust widht/height so that the
1149
* ffmpeg one works correctly. */
1150
gst_ffmpeg_img_convert (&pic, ffmpegdec->context->pix_fmt,
1151
(AVPicture *) ffmpegdec->picture,
1152
ffmpegdec->context->pix_fmt,
1153
ffmpegdec->context->width, ffmpegdec->context->height);
1160
GST_DEBUG_OBJECT (ffmpegdec, "negotiate failed");
1161
return GST_FLOW_NOT_NEGOTIATED;
1165
GST_DEBUG_OBJECT (ffmpegdec, "pad_alloc failed");
1170
/* gst_ffmpegdec_[video|audio]_frame:
1172
* data: pointer to the data to decode
1173
* size: size of data in bytes
1174
* in_timestamp: incoming timestamp.
1175
* in_duration: incoming duration.
1176
* outbuf: outgoing buffer. Different from NULL ONLY if it contains decoded data.
1179
* Returns: number of bytes used in decoding. The check for successful decode is
1180
* outbuf being non-NULL.
1184
gst_ffmpegdec_video_frame (GstFFMpegDec * ffmpegdec,
1185
guint8 * data, guint size,
1186
GstClockTime in_timestamp, GstClockTime in_duration,
1187
GstBuffer ** outbuf, GstFlowReturn * ret)
1191
gboolean iskeyframe;
1192
gboolean mode_switch;
1197
ffmpegdec->context->opaque = ffmpegdec;
1199
/* run QoS code, returns FALSE if we can skip decoding this
1200
* frame entirely. */
1201
if (G_UNLIKELY (!gst_ffmpegdec_do_qos (ffmpegdec, in_timestamp, &mode_switch)))
1204
/* in case we skip frames */
1205
ffmpegdec->picture->pict_type = -1;
1207
/* now decode the frame */
1208
len = avcodec_decode_video (ffmpegdec->context,
1209
ffmpegdec->picture, &have_data, data, size);
1211
GST_DEBUG_OBJECT (ffmpegdec, "after decode: len %d, have_data %d",
1214
/* when we are in hurry_up mode, don't complain when ffmpeg returned
1215
* no data because we told it to skip stuff. */
1216
if (len < 0 && (mode_switch || ffmpegdec->context->hurry_up))
1219
/* no data, we're done */
1220
if (len < 0 || have_data <= 0)
1223
GST_DEBUG_OBJECT (ffmpegdec, "picture: pts %"G_GUINT64_FORMAT, ffmpegdec->picture->pts);
1224
GST_DEBUG_OBJECT (ffmpegdec, "picture: num %d", ffmpegdec->picture->coded_picture_number);
1225
GST_DEBUG_OBJECT (ffmpegdec, "picture: display %d", ffmpegdec->picture->display_picture_number);
1227
/* check if we are dealing with a keyframe here */
1228
iskeyframe = check_keyframe (ffmpegdec);
1230
/* when we're waiting for a keyframe, see if we have one or drop the current
1232
if (G_UNLIKELY (ffmpegdec->waiting_for_key)) {
1233
if (G_LIKELY (!iskeyframe))
1234
goto drop_non_keyframe;
1236
/* we have a keyframe, we can stop waiting for one */
1237
ffmpegdec->waiting_for_key = FALSE;
1240
/* get a handle to the output buffer */
1241
*ret = get_output_buffer (ffmpegdec, outbuf);
1242
if (G_UNLIKELY (*ret != GST_FLOW_OK))
1248
* 1) Copy parse context timestamp if present and valid (FIXME)
1249
* 2) Copy input timestamp if valid
1250
* 3) else interpolate from previous input timestamp
1253
/* this does not work reliably, for some files this works fine, for other
1254
* files it returns the same timestamp twice. Leaving the code here for when
1255
* the parsers are improved in ffmpeg. */
1256
if (ffmpegdec->pctx) {
1257
GST_DEBUG_OBJECT (ffmpegdec, "picture: ffpts %"G_GUINT64_FORMAT, ffmpegdec->pctx->pts);
1258
if (ffmpegdec->pctx->pts != AV_NOPTS_VALUE) {
1259
in_timestamp = gst_ffmpeg_time_ff_to_gst (ffmpegdec->pctx->pts,
1260
ffmpegdec->context->time_base);
1264
if (!GST_CLOCK_TIME_IS_VALID (in_timestamp)) {
1265
GST_LOG_OBJECT (ffmpegdec, "using timestamp returned by ffmpeg");
1266
/* Get (interpolated) timestamp from FFMPEG */
1267
in_timestamp = gst_ffmpeg_time_ff_to_gst ((guint64) ffmpegdec->picture->pts,
1268
ffmpegdec->context->time_base);
1270
GST_BUFFER_TIMESTAMP (*outbuf) = in_timestamp;
1275
* 1) Copy input duration if valid
1276
* 2) else use input framerate
1277
* 3) else use ffmpeg framerate
1279
if (!GST_CLOCK_TIME_IS_VALID (in_duration)) {
1280
/* if we have an input framerate, use that */
761
1281
if (ffmpegdec->format.video.fps_n != -1) {
762
gst_structure_set (gst_caps_get_structure (caps, 0), "framerate",
763
GST_TYPE_FRACTION, ffmpegdec->format.video.fps_n,
764
ffmpegdec->format.video.fps_d, NULL);
767
gst_ffmpegdec_add_pixel_aspect_ratio (ffmpegdec,
768
gst_caps_get_structure (caps, 0));
771
if (caps == NULL || !gst_pad_set_caps (ffmpegdec->srcpad, caps)) {
772
GST_ELEMENT_ERROR (ffmpegdec, CORE, NEGOTIATION, (NULL),
773
("Failed to link ffmpeg decoder (%s) to next element",
774
oclass->in_plugin->name));
777
gst_caps_unref (caps);
782
gst_caps_unref (caps);
1282
GST_LOG_OBJECT (ffmpegdec, "using input framerate for duration");
1283
in_duration = gst_util_uint64_scale_int (GST_SECOND,
1284
ffmpegdec->format.video.fps_d, ffmpegdec->format.video.fps_n);
1286
/* don't try to use the decoder's framerate when it seems a bit abnormal,
1287
* which we assume when den >= 1000... */
1288
if (ffmpegdec->context->time_base.num != 0 &&
1289
(ffmpegdec->context->time_base.den > 0 &&
1290
ffmpegdec->context->time_base.den < 1000)) {
1291
GST_LOG_OBJECT (ffmpegdec, "using decoder's framerate for duration");
1292
in_duration = gst_util_uint64_scale_int (GST_SECOND,
1293
ffmpegdec->context->time_base.num,
1294
ffmpegdec->context->time_base.den);
1296
GST_LOG_OBJECT (ffmpegdec, "no valid duration found");
1301
/* Take repeat_pict into account */
1302
if (GST_CLOCK_TIME_IS_VALID (in_duration)) {
1303
in_duration += in_duration * ffmpegdec->picture->repeat_pict / 2;
1305
GST_BUFFER_DURATION (*outbuf) = in_duration;
1307
/* palette is not part of raw video frame in gst and the size
1308
* of the outgoing buffer needs to be adjusted accordingly */
1309
if (ffmpegdec->context->palctrl != NULL)
1310
GST_BUFFER_SIZE (*outbuf) -= AVPALETTE_SIZE;
1312
/* now see if we need to clip the buffer against the segment boundaries. */
1313
if (G_UNLIKELY (!clip_video_buffer (ffmpegdec, *outbuf, in_timestamp, in_duration)))
1316
/* mark as keyframe or delta unit */
1318
GST_BUFFER_FLAG_SET (*outbuf, GST_BUFFER_FLAG_DELTA_UNIT);
1321
GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d",
1322
*ret, *outbuf, len);
1328
GST_WARNING_OBJECT (ffmpegdec, "Dropping frame because of QoS");
1333
GST_WARNING_OBJECT (ffmpegdec, "Dropping non-keyframe (seek/init)");
1338
GST_DEBUG_OBJECT (ffmpegdec, "no output buffer");
1344
GST_DEBUG_OBJECT (ffmpegdec, "buffer clipped");
1345
gst_buffer_unref (*outbuf);
1351
/* returns TRUE if buffer is within segment, else FALSE.
1352
* if Buffer is on segment border, it's timestamp and duration will be clipped */
1354
clip_audio_buffer (GstFFMpegDec * dec, GstBuffer * buf, GstClockTime in_ts,
1355
GstClockTime in_dur)
1358
gint64 diff, ctime, cstop;
1359
gboolean res = TRUE;
1361
GST_LOG_OBJECT (dec,
1362
"timestamp:%" GST_TIME_FORMAT ", duration:%" GST_TIME_FORMAT
1363
", size %u", GST_TIME_ARGS (in_ts), GST_TIME_ARGS (in_dur),
1364
GST_BUFFER_SIZE (buf));
1366
/* can't clip without TIME segment */
1367
if (G_UNLIKELY (dec->segment.format != GST_FORMAT_TIME))
1370
/* we need a start time */
1371
if (G_UNLIKELY (!GST_CLOCK_TIME_IS_VALID (in_ts)))
1374
/* trust duration */
1375
stop = in_ts + in_dur;
1377
res = gst_segment_clip (&dec->segment, GST_FORMAT_TIME, in_ts, stop, &ctime,
1379
if (G_UNLIKELY (!res))
1380
goto out_of_segment;
1382
/* see if some clipping happened */
1383
if (G_UNLIKELY ((diff = ctime - in_ts) > 0)) {
1384
/* bring clipped time to bytes */
1386
gst_util_uint64_scale_int (diff, dec->format.audio.samplerate,
1387
GST_SECOND) * (2 * dec->format.audio.channels);
1389
GST_DEBUG_OBJECT (dec, "clipping start to %" GST_TIME_FORMAT " %"
1390
G_GINT64_FORMAT " bytes", GST_TIME_ARGS (ctime), diff);
1392
GST_BUFFER_SIZE (buf) -= diff;
1393
GST_BUFFER_DATA (buf) += diff;
1395
if (G_UNLIKELY ((diff = stop - cstop) > 0)) {
1396
/* bring clipped time to bytes */
1398
gst_util_uint64_scale_int (diff, dec->format.audio.samplerate,
1399
GST_SECOND) * (2 * dec->format.audio.channels);
1401
GST_DEBUG_OBJECT (dec, "clipping stop to %" GST_TIME_FORMAT " %"
1402
G_GINT64_FORMAT " bytes", GST_TIME_ARGS (cstop), diff);
1404
GST_BUFFER_SIZE (buf) -= diff;
1406
GST_BUFFER_TIMESTAMP (buf) = ctime;
1407
GST_BUFFER_DURATION (buf) = cstop - ctime;
1410
GST_LOG_OBJECT (dec, "%sdropping", (res ? "not " : ""));
1416
GST_LOG_OBJECT (dec, "out of segment");
1422
gst_ffmpegdec_audio_frame (GstFFMpegDec * ffmpegdec,
1423
guint8 * data, guint size,
1424
GstClockTime in_timestamp, GstClockTime in_duration,
1425
GstBuffer ** outbuf, GstFlowReturn * ret)
1430
GST_DEBUG_OBJECT (ffmpegdec,
1431
"size:%d, ts:%" GST_TIME_FORMAT ", dur:%"GST_TIME_FORMAT", ffmpegdec->next_ts:%"
1432
GST_TIME_FORMAT, size, GST_TIME_ARGS (in_timestamp), GST_TIME_ARGS (in_duration),
1433
GST_TIME_ARGS (ffmpegdec->next_ts));
1435
/* outgoing buffer */
1436
if (!ffmpegdec->last_buffer)
1437
*outbuf = gst_buffer_new_and_alloc (AVCODEC_MAX_AUDIO_FRAME_SIZE);
1439
*outbuf = ffmpegdec->last_buffer;
1440
ffmpegdec->last_buffer = NULL;
1443
len = avcodec_decode_audio (ffmpegdec->context,
1444
(int16_t *) GST_BUFFER_DATA (*outbuf), &have_data, data, size);
1445
GST_DEBUG_OBJECT (ffmpegdec,
1446
"Decode audio: len=%d, have_data=%d", len, have_data);
1448
if (len >= 0 && have_data > 0) {
1449
GST_DEBUG_OBJECT (ffmpegdec, "Creating output buffer");
1450
if (!gst_ffmpegdec_negotiate (ffmpegdec)) {
1451
gst_buffer_unref (*outbuf);
1458
GST_BUFFER_SIZE (*outbuf) = have_data;
1463
* 1) Copy input timestamp if valid
1464
* 2) else interpolate from previous input timestamp
1466
/* always take timestamps from the input buffer if any */
1467
if (!GST_CLOCK_TIME_IS_VALID (in_timestamp)) {
1468
in_timestamp = ffmpegdec->next_ts;
1474
* 1) calculate based on number of samples
1476
in_duration = gst_util_uint64_scale_int (have_data, GST_SECOND,
1477
2 * ffmpegdec->context->channels * ffmpegdec->context->sample_rate);
1479
GST_DEBUG_OBJECT (ffmpegdec,
1480
"Buffer created. Size:%d , timestamp:%" GST_TIME_FORMAT " , duration:%"
1481
GST_TIME_FORMAT, have_data,
1482
GST_TIME_ARGS (in_timestamp), GST_TIME_ARGS (in_duration));
1484
GST_BUFFER_TIMESTAMP (*outbuf) = in_timestamp;
1485
GST_BUFFER_DURATION (*outbuf) = in_duration;
1487
/* the next timestamp we'll use when interpolating */
1488
ffmpegdec->next_ts = in_timestamp + in_duration;
1490
/* now see if we need to clip the buffer against the segment boundaries. */
1491
if (G_UNLIKELY (!clip_audio_buffer (ffmpegdec, *outbuf, in_timestamp, in_duration)))
1494
} else if (len > 0 && have_data == 0) {
1495
/* cache output, because it may be used for caching (in-place) */
1496
ffmpegdec->last_buffer = *outbuf;
1499
gst_buffer_unref (*outbuf);
1504
GST_DEBUG_OBJECT (ffmpegdec, "return flow %d, out %p, len %d",
1505
*ret, *outbuf, len);
1511
GST_DEBUG_OBJECT (ffmpegdec, "buffer clipped");
1512
gst_buffer_unref (*outbuf);
1519
/* gst_ffmpegdec_frame:
1521
* data: pointer to the data to decode
1522
* size: size of data in bytes
1523
* got_data: 0 if no data was decoded, != 0 otherwise.
1524
* in_time: timestamp of data
1525
* in_duration: duration of data
1526
* ret: GstFlowReturn to return in the chain function
1528
* Decode the given frame and pushes it downstream.
1530
* Returns: Number of bytes used in decoding, -1 on error/failure.
788
1534
gst_ffmpegdec_frame (GstFFMpegDec * ffmpegdec,
789
guint8 * data, guint size, gint * got_data, guint64 * in_ts,
790
GstBuffer * inbuf, GstFlowReturn * ret)
1535
guint8 * data, guint size, gint * got_data,
1536
GstClockTime in_timestamp, GstClockTime in_duration,
1537
GstFlowReturn * ret)
792
GstFFMpegDecClass *oclass =
793
(GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
1539
GstFFMpegDecClass *oclass;
794
1540
GstBuffer *outbuf = NULL;
795
1541
gint have_data = 0, len = 0;
797
if (ffmpegdec->context->codec == NULL)
1543
if (G_UNLIKELY (ffmpegdec->context->codec == NULL))
800
1546
GST_LOG_OBJECT (ffmpegdec,
801
"data:%p, size:%d, *in_ts:%" GST_TIME_FORMAT " inbuf:%p inbuf.ts:%"
802
GST_TIME_FORMAT, data, size, GST_TIME_ARGS (*in_ts), inbuf,
803
GST_TIME_ARGS ((inbuf) ? GST_BUFFER_TIMESTAMP (inbuf) : 0));
1547
"data:%p, size:%d, ts:%" GST_TIME_FORMAT", dur:%"GST_TIME_FORMAT,
1548
data, size, GST_TIME_ARGS (in_timestamp), GST_TIME_ARGS (in_duration));
805
1551
ffmpegdec->context->frame_number++;
1553
oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
807
1555
switch (oclass->in_plugin->type) {
808
case CODEC_TYPE_VIDEO:
810
gboolean iskeyframe = FALSE;
811
gboolean is_itype = FALSE;
812
gboolean is_reference = FALSE;
814
ffmpegdec->picture->pict_type = -1; /* in case we skip frames */
816
ffmpegdec->context->opaque = ffmpegdec;
818
len = avcodec_decode_video (ffmpegdec->context,
819
ffmpegdec->picture, &have_data, data, size);
820
is_itype = (ffmpegdec->picture->pict_type == FF_I_TYPE);
821
is_reference = (ffmpegdec->picture->reference == 1);
822
iskeyframe = (is_itype || is_reference || ffmpegdec->picture->key_frame)
823
|| (oclass->in_plugin->id == CODEC_ID_INDEO3)
824
|| (oclass->in_plugin->id == CODEC_ID_MSZH)
825
|| (oclass->in_plugin->id == CODEC_ID_ZLIB)
826
|| (oclass->in_plugin->id == CODEC_ID_VP3)
827
|| (oclass->in_plugin->id == CODEC_ID_HUFFYUV);
828
GST_LOG_OBJECT (ffmpegdec,
829
"Decoded video: len=%d, have_data=%d, "
830
"is_keyframe:%d, is_itype:%d, is_reference:%d",
831
len, have_data, iskeyframe, is_itype, is_reference);
833
if (ffmpegdec->waiting_for_key) {
835
ffmpegdec->waiting_for_key = FALSE;
837
GST_WARNING_OBJECT (ffmpegdec, "Dropping non-keyframe (seek/init)");
843
/* note that ffmpeg sometimes gets the FPS wrong.
844
* For B-frame containing movies, we get all pictures delayed
845
* except for the I frames, so we synchronize only on I frames
846
* and keep an internal counter based on FPS for the others. */
847
if (!(oclass->in_plugin->capabilities & CODEC_CAP_DELAY) ||
848
((iskeyframe || !GST_CLOCK_TIME_IS_VALID (ffmpegdec->next_ts)) &&
849
GST_CLOCK_TIME_IS_VALID (*in_ts))) {
850
GST_LOG_OBJECT (ffmpegdec, "setting next_ts to %" GST_TIME_FORMAT,
851
GST_TIME_ARGS (*in_ts));
852
ffmpegdec->next_ts = *in_ts;
853
*in_ts = GST_CLOCK_TIME_NONE;
856
/* precise seeking.... */
857
if (GST_CLOCK_TIME_IS_VALID (ffmpegdec->synctime)) {
858
if (ffmpegdec->next_ts >= ffmpegdec->synctime) {
859
ffmpegdec->synctime = GST_CLOCK_TIME_NONE;
861
GST_WARNING_OBJECT (ffmpegdec,
862
"Dropping frame for synctime %" GST_TIME_FORMAT
863
", expected(next_ts) %" GST_TIME_FORMAT,
864
GST_TIME_ARGS (ffmpegdec->synctime),
865
GST_TIME_ARGS (ffmpegdec->next_ts));
867
if (ffmpegdec->picture->opaque != NULL) {
868
outbuf = (GstBuffer *) ffmpegdec->picture->opaque;
869
gst_buffer_unref (outbuf);
873
/* don´t break here! Timestamps are updated below */
877
if (ffmpegdec->waiting_for_key && !iskeyframe) {
879
} else if (len >= 0 && have_data > 0) {
880
/* libavcodec constantly crashes on stupid buffer allocation
881
* errors inside. This drives me crazy, so we let it allocate
882
* its own buffers and copy to our own buffer afterwards... */
884
if (ffmpegdec->picture->opaque != NULL) {
885
outbuf = (GstBuffer *) ffmpegdec->picture->opaque;
886
if (outbuf == ffmpegdec->last_buffer)
887
ffmpegdec->last_buffer = NULL;
891
gst_ffmpeg_avpicture_get_size (ffmpegdec->context->pix_fmt,
892
ffmpegdec->context->width, ffmpegdec->context->height);
894
if (!gst_ffmpegdec_negotiate (ffmpegdec))
897
if (!ffmpegdec->context->palctrl) {
899
gst_pad_alloc_buffer_and_set_caps (ffmpegdec->srcpad,
900
GST_BUFFER_OFFSET_NONE, fsize,
901
GST_PAD_CAPS (ffmpegdec->srcpad),
902
&outbuf)) != GST_FLOW_OK)
905
/* for paletted data we can't use pad_alloc_buffer(), because
906
* fsize contains the size of the palette, so the overall size
907
* is bigger than ffmpegcolorspace's unit size, which will
908
* prompt GstBaseTransform to complain endlessly ... */
909
outbuf = gst_buffer_new_and_alloc (fsize);
910
gst_buffer_set_caps (outbuf, GST_PAD_CAPS (ffmpegdec->srcpad));
913
/* original ffmpeg code does not handle odd sizes correctly.
914
* This patched up version does */
915
gst_ffmpeg_avpicture_fill (&pic, GST_BUFFER_DATA (outbuf),
916
ffmpegdec->context->pix_fmt,
917
ffmpegdec->context->width, ffmpegdec->context->height);
919
/* the original convert function did not do the right thing, this
920
* is a patched up version that adjust widht/height so that the
921
* ffmpeg one works correctly. */
922
gst_ffmpeg_img_convert (&pic, ffmpegdec->context->pix_fmt,
923
(AVPicture *) ffmpegdec->picture,
924
ffmpegdec->context->pix_fmt,
925
ffmpegdec->context->width, ffmpegdec->context->height);
928
ffmpegdec->waiting_for_key = FALSE;
931
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DELTA_UNIT);
934
/* If we have used the framerate from the demuxer then
935
* also use the demuxer's timestamp information (#317596) */
936
if (ffmpegdec->format.video.fps_n != -1 && inbuf != NULL) {
937
GST_LOG_OBJECT (ffmpegdec, "using incoming buffer's timestamps");
938
GST_LOG_OBJECT (ffmpegdec, "incoming timestamp %" GST_TIME_FORMAT,
939
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (inbuf)));
940
gst_buffer_stamp (outbuf, inbuf);
942
GST_LOG_OBJECT (ffmpegdec, "using decoder's timestamps");
943
GST_BUFFER_TIMESTAMP (outbuf) = ffmpegdec->next_ts;
944
if (ffmpegdec->context->time_base.num != 0 &&
945
ffmpegdec->context->time_base.den != 0) {
946
GST_BUFFER_DURATION (outbuf) =
947
gst_util_uint64_scale_int (GST_SECOND,
948
ffmpegdec->context->time_base.num,
949
ffmpegdec->context->time_base.den);
951
/* Take repeat_pict into account */
952
GST_BUFFER_DURATION (outbuf) += GST_BUFFER_DURATION (outbuf)
953
* ffmpegdec->picture->repeat_pict / 2;
954
GST_DEBUG_OBJECT (ffmpegdec,
955
"advancing next_ts by duration of %" GST_TIME_FORMAT,
956
GST_TIME_ARGS (GST_BUFFER_DURATION (outbuf)));
957
ffmpegdec->next_ts += GST_BUFFER_DURATION (outbuf);
959
GST_DEBUG_OBJECT (ffmpegdec, "setting next_ts to NONE");
960
ffmpegdec->next_ts = GST_CLOCK_TIME_NONE;
963
GST_LOG_OBJECT (ffmpegdec, "outgoing timestamp %" GST_TIME_FORMAT,
964
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)));
965
} else if (ffmpegdec->picture->pict_type != -1 &&
966
oclass->in_plugin->capabilities & CODEC_CAP_DELAY) {
967
/* update time for skip-frame */
969
(iskeyframe || !GST_CLOCK_TIME_IS_VALID (ffmpegdec->next_ts))
970
&& GST_CLOCK_TIME_IS_VALID (*in_ts)) {
971
GST_DEBUG_OBJECT (ffmpegdec, "setting next_ts to *in_ts");
972
ffmpegdec->next_ts = *in_ts;
973
*in_ts = GST_CLOCK_TIME_NONE;
976
if (ffmpegdec->context->time_base.num != 0 &&
977
ffmpegdec->context->time_base.den != 0) {
978
guint64 dur = GST_SECOND *
979
ffmpegdec->context->time_base.num /
980
ffmpegdec->context->time_base.den;
982
/* Take repeat_pict into account */
983
dur += dur * ffmpegdec->picture->repeat_pict / 2;
984
GST_DEBUG_OBJECT (ffmpegdec,
985
"Advancing next_ts by dur:%" GST_TIME_FORMAT,
986
GST_TIME_ARGS (dur));
987
ffmpegdec->next_ts += dur;
989
GST_DEBUG_OBJECT (ffmpegdec, "setting next_ts to NONE");
990
ffmpegdec->next_ts = GST_CLOCK_TIME_NONE;
993
/* palette is not part of raw video frame in gst and the size
994
* of the outgoing buffer needs to be adjusted accordingly */
995
if (ffmpegdec->context->palctrl != NULL && outbuf != NULL)
996
GST_BUFFER_SIZE (outbuf) -= AVPALETTE_SIZE;
1556
case CODEC_TYPE_VIDEO:
1558
gst_ffmpegdec_video_frame (ffmpegdec, data, size, in_timestamp, in_duration, &outbuf,
999
1561
case CODEC_TYPE_AUDIO:
1000
if (!ffmpegdec->last_buffer)
1001
outbuf = gst_buffer_new_and_alloc (AVCODEC_MAX_AUDIO_FRAME_SIZE);
1003
outbuf = ffmpegdec->last_buffer;
1004
ffmpegdec->last_buffer = NULL;
1006
len = avcodec_decode_audio (ffmpegdec->context,
1007
(int16_t *) GST_BUFFER_DATA (outbuf), &have_data, data, size);
1008
GST_DEBUG_OBJECT (ffmpegdec,
1009
"Decode audio: len=%d, have_data=%d", len, have_data);
1011
if (len >= 0 && have_data > 0) {
1012
if (!gst_ffmpegdec_negotiate (ffmpegdec)) {
1013
gst_buffer_unref (outbuf);
1017
GST_BUFFER_SIZE (outbuf) = have_data;
1018
if (GST_CLOCK_TIME_IS_VALID (*in_ts)) {
1019
ffmpegdec->next_ts = *in_ts;
1021
GST_BUFFER_TIMESTAMP (outbuf) = ffmpegdec->next_ts;
1022
GST_BUFFER_DURATION (outbuf) = (have_data * GST_SECOND) /
1023
(2 * ffmpegdec->context->channels *
1024
ffmpegdec->context->sample_rate);
1025
ffmpegdec->next_ts += GST_BUFFER_DURATION (outbuf);
1026
if (GST_CLOCK_TIME_IS_VALID (*in_ts))
1027
*in_ts += GST_BUFFER_DURATION (outbuf);
1028
} else if (len > 0 && have_data == 0) {
1029
/* cache output, because it may be used for caching (in-place) */
1030
ffmpegdec->last_buffer = outbuf;
1032
gst_buffer_unref (outbuf);
1563
gst_ffmpegdec_audio_frame (ffmpegdec, data, size, in_timestamp, in_duration, &outbuf,
1567
g_assert_not_reached ();
1040
1574
if (len < 0 || have_data < 0) {
1041
GST_ERROR_OBJECT (ffmpegdec,
1575
GST_WARNING_OBJECT (ffmpegdec,
1042
1576
"ffdec_%s: decoding error (len: %d, have_data: %d)",
1043
1577
oclass->in_plugin->name, len, have_data);
1046
1580
} else if (len == 0 && have_data == 0) {
1050
1584
/* this is where I lost my last clue on ffmpeg... */
1051
*got_data = 1; //(ffmpegdec->pctx || have_data) ? 1 : 0;
1055
GST_LOG_OBJECT (ffmpegdec, "Decoded data, now pushing with timestamp %"
1056
GST_TIME_FORMAT, GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)));
1589
GST_LOG_OBJECT (ffmpegdec,
1590
"Decoded data, now pushing buffer with timestamp %" GST_TIME_FORMAT
1591
" and duration %" GST_TIME_FORMAT,
1592
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (outbuf)),
1593
GST_TIME_ARGS (GST_BUFFER_DURATION (outbuf)));
1595
/* mark pending discont */
1596
if (ffmpegdec->discont) {
1597
GST_BUFFER_FLAG_SET (outbuf, GST_BUFFER_FLAG_DISCONT);
1598
ffmpegdec->discont = FALSE;
1058
1601
gst_buffer_set_caps (outbuf, GST_PAD_CAPS (ffmpegdec->srcpad));
1059
1603
*ret = gst_pad_push (ffmpegdec->srcpad, outbuf);
1605
GST_DEBUG_OBJECT (ffmpegdec, "We didn't get a decoded buffer");
1614
GST_ERROR_OBJECT (ffmpegdec, "no codec context");
1620
gst_ffmpegdec_flush_pcache (GstFFMpegDec * ffmpegdec)
1622
if (ffmpegdec->pcache) {
1623
gst_buffer_unref (ffmpegdec->pcache);
1624
ffmpegdec->pcache = NULL;
1626
if (ffmpegdec->pctx) {
1627
GstFFMpegDecClass *oclass;
1629
oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
1631
av_parser_close (ffmpegdec->pctx);
1632
ffmpegdec->pctx = av_parser_init (oclass->in_plugin->id);
1065
1636
static gboolean
1066
1637
gst_ffmpegdec_sink_event (GstPad * pad, GstEvent * event)
1068
GstFFMpegDec *ffmpegdec = (GstFFMpegDec *) GST_OBJECT_PARENT (pad);
1069
GstFFMpegDecClass *oclass =
1070
(GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
1639
GstFFMpegDec *ffmpegdec;
1640
GstFFMpegDecClass *oclass;
1641
gboolean ret = FALSE;
1643
ffmpegdec = (GstFFMpegDec *) gst_pad_get_parent (pad);
1644
oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
1073
1646
GST_DEBUG_OBJECT (ffmpegdec, "Handling %s event",
1074
1647
GST_EVENT_TYPE_NAME (event));
1093
1669
if (ffmpegdec->opened) {
1094
1670
avcodec_flush_buffers (ffmpegdec->context);
1672
ffmpegdec->next_ts = GST_CLOCK_TIME_NONE;
1673
gst_ffmpegdec_reset_qos (ffmpegdec);
1674
gst_ffmpegdec_flush_pcache (ffmpegdec);
1675
ffmpegdec->waiting_for_key = TRUE;
1676
gst_segment_init (&ffmpegdec->segment, GST_FORMAT_TIME);
1098
1679
case GST_EVENT_NEWSEGMENT:{
1099
gint64 base, start, end;
1103
gst_event_parse_new_segment (event, NULL, &rate, &fmt, &start, &end,
1105
if (fmt == GST_FORMAT_TIME) {
1106
ffmpegdec->next_ts = start;
1107
GST_DEBUG_OBJECT (ffmpegdec,
1108
"Discont to time (next_ts) %" GST_TIME_FORMAT " -- %"
1109
GST_TIME_FORMAT, GST_TIME_ARGS (start), GST_TIME_ARGS (end));
1110
} else if (ffmpegdec->context->bit_rate && fmt == GST_FORMAT_BYTES) {
1111
ffmpegdec->next_ts = start * GST_SECOND / ffmpegdec->context->bit_rate;
1112
GST_DEBUG_OBJECT (ffmpegdec,
1113
"Newsegment in bytes from byte %" G_GINT64_FORMAT
1114
" (time %" GST_TIME_FORMAT ") to byte % " G_GINT64_FORMAT
1115
" (time %" GST_TIME_FORMAT ")",
1116
start, GST_TIME_ARGS (ffmpegdec->next_ts),
1118
GST_TIME_ARGS (end * GST_SECOND / ffmpegdec->context->bit_rate));
1119
gst_event_unref (event);
1120
event = gst_event_new_new_segment (FALSE, rate, fmt,
1121
start * GST_SECOND / ffmpegdec->context->bit_rate,
1122
end == -1 ? -1 : end * GST_SECOND / ffmpegdec->context->bit_rate,
1123
base * GST_SECOND / ffmpegdec->context->bit_rate);
1125
GST_WARNING_OBJECT (ffmpegdec,
1126
"Received discont with no useful value...");
1128
if (ffmpegdec->opened) {
1129
avcodec_flush_buffers (ffmpegdec->context);
1131
if (ffmpegdec->context->codec_id == CODEC_ID_MPEG2VIDEO ||
1132
ffmpegdec->context->codec_id == CODEC_ID_MPEG4 ||
1133
ffmpegdec->context->codec_id == CODEC_ID_H264) {
1134
ffmpegdec->waiting_for_key = TRUE;
1682
gint64 start, stop, time;
1683
gdouble rate, arate;
1685
gst_event_parse_new_segment_full (event, &update, &rate, &arate, &fmt,
1686
&start, &stop, &time);
1688
/* no negative rates for now */
1690
goto newseg_wrong_rate;
1693
case GST_FORMAT_TIME:
1694
/* fine, our native segment format */
1696
case GST_FORMAT_BYTES:
1700
bit_rate = ffmpegdec->context->bit_rate;
1702
/* convert to time or fail */
1706
GST_DEBUG_OBJECT (ffmpegdec, "bitrate: %d", bit_rate);
1708
/* convert values to TIME */
1710
start = gst_util_uint64_scale_int (start, GST_SECOND, bit_rate);
1712
stop = gst_util_uint64_scale_int (stop, GST_SECOND, bit_rate);
1714
time = gst_util_uint64_scale_int (time, GST_SECOND, bit_rate);
1716
/* unref old event */
1717
gst_event_unref (event);
1719
/* create new converted time segment */
1720
fmt = GST_FORMAT_TIME;
1721
/* FIXME, bitrate is not good enough too find a good stop, let's
1722
* hope start and time were 0... meh. */
1724
event = gst_event_new_new_segment (update, rate, fmt,
1729
/* invalid format */
1730
goto invalid_format;
1137
ffmpegdec->waiting_for_key = TRUE;
1138
ffmpegdec->synctime = ffmpegdec->next_ts;
1733
GST_DEBUG_OBJECT (ffmpegdec,
1734
"NEWSEGMENT in time start %" GST_TIME_FORMAT " -- stop %"
1735
GST_TIME_FORMAT, GST_TIME_ARGS (start), GST_TIME_ARGS (stop));
1737
/* and store the values */
1738
gst_segment_set_newsegment_full (&ffmpegdec->segment, update,
1739
rate, arate, fmt, start, stop, time);
1145
ret = gst_pad_event_default (ffmpegdec->sinkpad, event);
1746
/* and push segment downstream */
1747
ret = gst_pad_push_event (ffmpegdec->srcpad, event);
1750
gst_object_unref (ffmpegdec);
1757
GST_WARNING_OBJECT (ffmpegdec, "negative rates not supported yet");
1758
gst_event_unref (event);
1763
GST_WARNING_OBJECT (ffmpegdec, "no bitrate to convert BYTES to TIME");
1764
gst_event_unref (event);
1769
GST_WARNING_OBJECT (ffmpegdec, "unknown format received in NEWSEGMENT");
1770
gst_event_unref (event);
1150
1775
static GstFlowReturn
1151
1776
gst_ffmpegdec_chain (GstPad * pad, GstBuffer * inbuf)
1153
GstFFMpegDec *ffmpegdec = (GstFFMpegDec *) (GST_PAD_PARENT (pad));
1154
GstFFMpegDecClass *oclass =
1155
(GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
1156
guint8 *bdata, *data;
1157
gint bsize, size, len, have_data;
1158
guint64 in_ts = GST_BUFFER_TIMESTAMP (inbuf);
1778
GstFFMpegDec *ffmpegdec;
1779
GstFFMpegDecClass *oclass;
1780
guint8 *data, *bdata;
1781
gint size, bsize, len, have_data;
1159
1782
GstFlowReturn ret = GST_FLOW_OK;
1161
if (!ffmpegdec->opened)
1784
GstClockTime in_timestamp, in_duration;
1785
GstClockTime next_timestamp, next_duration;
1786
GstClockTime pending_timestamp, pending_duration;
1788
ffmpegdec = (GstFFMpegDec *) (GST_PAD_PARENT (pad));
1790
if (G_UNLIKELY (!ffmpegdec->opened))
1162
1791
goto not_negotiated;
1793
/* The discont flags marks a buffer that is not continuous with the previous
1794
* buffer. This means we need to clear whatever data we currently have. We
1795
* currently also wait for a new keyframe, which might be suboptimal in the
1796
* case of a network error, better show the errors than to drop all data.. */
1797
if (G_UNLIKELY (GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_DISCONT))) {
1798
GST_DEBUG_OBJECT (ffmpegdec, "received DISCONT");
1799
gst_ffmpegdec_flush_pcache (ffmpegdec);
1800
avcodec_flush_buffers (ffmpegdec->context);
1801
ffmpegdec->waiting_for_key = TRUE;
1802
ffmpegdec->discont = TRUE;
1803
ffmpegdec->next_ts = GST_CLOCK_TIME_NONE;
1806
oclass = (GstFFMpegDecClass *) (G_OBJECT_GET_CLASS (ffmpegdec));
1808
/* do early keyframe check pretty bad to rely on the keyframe flag in the
1809
* source for this as it might not even be parsed (UDP/file/..). */
1810
if (G_UNLIKELY (ffmpegdec->waiting_for_key)) {
1811
if (GST_BUFFER_FLAG_IS_SET (inbuf, GST_BUFFER_FLAG_DELTA_UNIT) &&
1812
oclass->in_plugin->type != CODEC_TYPE_AUDIO)
1815
GST_DEBUG_OBJECT (ffmpegdec, "got keyframe");
1816
ffmpegdec->waiting_for_key = FALSE;
1819
pending_timestamp = GST_BUFFER_TIMESTAMP (inbuf);
1820
pending_duration = GST_BUFFER_DURATION (inbuf);
1164
1822
GST_LOG_OBJECT (ffmpegdec,
1165
"Received new data of size %d, time %" GST_TIME_FORMAT " next_ts %"
1166
GST_TIME_FORMAT, GST_BUFFER_SIZE (inbuf),
1167
GST_TIME_ARGS (GST_BUFFER_TIMESTAMP (inbuf)),
1168
GST_TIME_ARGS (ffmpegdec->next_ts));
1823
"Received new data of size %d, ts:%" GST_TIME_FORMAT ", dur:%"GST_TIME_FORMAT,
1824
GST_BUFFER_SIZE (inbuf), GST_TIME_ARGS (pending_timestamp), GST_TIME_ARGS (pending_duration));
1170
/* parse cache joining */
1826
/* parse cache joining. If there is cached data, its timestamp will be what we
1827
* send to the parse. */
1171
1828
if (ffmpegdec->pcache) {
1172
GstClockTime timestamp = GST_CLOCK_TIME_NONE;
1173
GstClockTime duration = GST_CLOCK_TIME_NONE;
1176
/* decide on resulting timestamp/duration before we give away our ref */
1177
/* since the cache is all data that did not result in an outgoing frame,
1178
* we should timestamp with the new incoming buffer. This is probably
1179
* not entirely correct though, but better than nothing. */
1180
if (GST_BUFFER_TIMESTAMP_IS_VALID (inbuf))
1181
timestamp = GST_BUFFER_TIMESTAMP (inbuf);
1183
if (GST_BUFFER_DURATION_IS_VALID (ffmpegdec->pcache)
1184
&& GST_BUFFER_DURATION_IS_VALID (inbuf))
1185
duration = GST_BUFFER_DURATION (ffmpegdec->pcache) +
1186
GST_BUFFER_DURATION (inbuf);
1829
/* keep track of how many bytes to consume before we can use the incomming
1830
* timestamp, which we have stored in pending_timestamp. */
1831
left = GST_BUFFER_SIZE (ffmpegdec->pcache);
1833
/* use timestamp and duration of what is in the cache */
1834
in_timestamp = GST_BUFFER_TIMESTAMP (ffmpegdec->pcache);
1835
in_duration = GST_BUFFER_DURATION (ffmpegdec->pcache);
1837
/* join with previous data */
1188
1838
inbuf = gst_buffer_join (ffmpegdec->pcache, inbuf);
1189
/* inbuf = gst_buffer_span (ffmpegdec->pcache, 0, inbuf, */
1190
/* GST_BUFFER_SIZE (ffmpegdec->pcache) + GST_BUFFER_SIZE (inbuf)); */
1192
/* update time info as appropriate */
1193
GST_BUFFER_TIMESTAMP (inbuf) = timestamp;
1194
GST_BUFFER_DURATION (inbuf) = duration;
1195
GST_LOG_OBJECT (ffmpegdec, "joined parse cache, inbuf now has ts %" GST_TIME_FORMAT
1196
" and duration %" GST_TIME_FORMAT, GST_TIME_ARGS (timestamp),
1197
GST_TIME_ARGS (duration));
1840
GST_LOG_OBJECT (ffmpegdec,
1841
"joined parse cache, inbuf now has ts:%" GST_TIME_FORMAT,
1842
GST_TIME_ARGS (in_timestamp));
1844
/* no more cached data, we assume we can consume the complete cache */
1198
1845
ffmpegdec->pcache = NULL;
1199
bdata = GST_BUFFER_DATA (inbuf);
1200
bsize = GST_BUFFER_SIZE (inbuf);
1848
/* no cache, input timestamp matches the buffer we try to decode */
1850
in_timestamp = pending_timestamp;
1851
in_duration = pending_duration;
1202
1854
/* workarounds, functions write to buffers:
1203
1855
* libavcodec/svq1.c:svq1_decode_frame writes to the given buffer.
1204
1856
* libavcodec/svq3.c:svq3_decode_slice_header too.
1205
1857
* ffmpeg devs know about it and will fix it (they said). */
1206
else if (oclass->in_plugin->id == CODEC_ID_SVQ1 ||
1858
if (oclass->in_plugin->id == CODEC_ID_SVQ1 ||
1207
1859
oclass->in_plugin->id == CODEC_ID_SVQ3) {
1208
1860
inbuf = gst_buffer_make_writable (inbuf);
1209
bdata = GST_BUFFER_DATA (inbuf);
1210
bsize = GST_BUFFER_SIZE (inbuf);
1212
bdata = GST_BUFFER_DATA (inbuf);
1213
bsize = GST_BUFFER_SIZE (inbuf);
1863
bdata = GST_BUFFER_DATA (inbuf);
1864
bsize = GST_BUFFER_SIZE (inbuf);
1217
1867
/* parse, if at all possible */
1218
1868
if (ffmpegdec->pctx) {
1222
ffpts = gst_ffmpeg_time_gst_to_ff (in_ts, ffmpegdec->context->time_base);
1872
/* convert timestamp to ffmpeg timestamp */
1873
ffpts = gst_ffmpeg_time_gst_to_ff (in_timestamp, ffmpegdec->context->time_base);
1875
GST_LOG_OBJECT (ffmpegdec,
1876
"Calling av_parser_parse with ts:%" GST_TIME_FORMAT", ffpts:%"G_GINT64_FORMAT,
1877
GST_TIME_ARGS (in_timestamp), ffpts);
1879
/* feed the parser */
1223
1880
res = av_parser_parse (ffmpegdec->pctx, ffmpegdec->context,
1224
1881
&data, &size, bdata, bsize, ffpts, ffpts);
1226
GST_LOG_OBJECT (ffmpegdec, "Parsed video frame, res=%d, size=%d",
1229
in_ts = gst_ffmpeg_time_ff_to_gst (ffmpegdec->pctx->pts,
1230
ffmpegdec->context->time_base);
1231
if (res == 0 || size == 0)
1883
GST_LOG_OBJECT (ffmpegdec,
1884
"parser returned res %d and size %d", res, size);
1886
GST_LOG_OBJECT (ffmpegdec, "consuming %d bytes. Next ts at %d, ffpts:%"
1887
G_GINT64_FORMAT, size, left, ffmpegdec->pctx->pts);
1889
/* there is output, set pointers for next round. */
1893
/* if there is no output, we must break and wait for more data. also the
1894
* timestamp in the context is not updated. */
1904
/* activate the pending timestamp/duration and mark it invalid */
1905
next_timestamp = pending_timestamp;
1906
next_duration = pending_duration;
1908
pending_timestamp = GST_CLOCK_TIME_NONE;
1909
pending_duration = GST_CLOCK_TIME_NONE;
1911
GST_LOG_OBJECT (ffmpegdec, "activated ts:%" GST_TIME_FORMAT", dur:%"GST_TIME_FORMAT,
1912
GST_TIME_ARGS (next_timestamp), GST_TIME_ARGS (next_duration));
1916
/* get new timestamp from the parser, this could be interpolated by the
1917
* parser. We lost track of duration here. */
1918
next_timestamp = gst_ffmpeg_time_ff_to_gst (ffmpegdec->pctx->pts,
1919
ffmpegdec->context->time_base);
1920
next_duration = GST_CLOCK_TIME_NONE;
1921
GST_LOG_OBJECT (ffmpegdec, "parse context next ts:%" GST_TIME_FORMAT", ffpts:%"G_GINT64_FORMAT,
1922
GST_TIME_ARGS (next_timestamp), ffpts);
1928
/* after decoding this input buffer, we don't know the timestamp anymore
1929
* of any other decodable frame in this buffer, we let the interpolation
1931
next_timestamp = GST_CLOCK_TIME_NONE;
1932
next_duration = GST_CLOCK_TIME_NONE;
1242
if ((len = gst_ffmpegdec_frame (ffmpegdec, data, size,
1243
&have_data, &in_ts, inbuf, &ret)) < 0 || ret != GST_FLOW_OK)
1935
/* decode a frame of audio/video now */
1936
len = gst_ffmpegdec_frame (ffmpegdec, data, size, &have_data, in_timestamp, in_duration, &ret);
1937
if (len < 0 || ret != GST_FLOW_OK)
1940
/* we decoded something, prepare to use next_timestamp in the next round */
1941
in_timestamp = next_timestamp;
1942
in_duration = next_duration;
1246
1944
if (!ffmpegdec->pctx) {
1251
1948
if (!have_data) {
1949
GST_LOG_OBJECT (ffmpegdec, "Decoding didn't return any data, breaking");
1953
GST_LOG_OBJECT (ffmpegdec, "Before (while bsize>0). bsize:%d , bdata:%p",
1254
1955
} while (bsize > 0);
1957
/* keep left-over */
1256
1958
if ((ffmpegdec->pctx || oclass->in_plugin->id == CODEC_ID_MP3) && bsize > 0) {
1257
GST_LOG_OBJECT (ffmpegdec, "Keeping %d bytes of data", bsize);
1959
GST_LOG_OBJECT (ffmpegdec,
1960
"Keeping %d bytes of data with timestamp %" GST_TIME_FORMAT, bsize,
1961
GST_TIME_ARGS (in_timestamp));
1259
1963
ffmpegdec->pcache = gst_buffer_create_sub (inbuf,
1260
1964
GST_BUFFER_SIZE (inbuf) - bsize, bsize);
1261
1965
/* we keep timestamp, even though all we really know is that the correct
1262
1966
* timestamp is not below the one from inbuf */
1263
GST_BUFFER_TIMESTAMP (ffmpegdec->pcache) = GST_BUFFER_TIMESTAMP (inbuf);
1967
GST_BUFFER_TIMESTAMP (ffmpegdec->pcache) = in_timestamp;
1264
1968
} else if (bsize > 0) {
1265
1969
GST_DEBUG_OBJECT (ffmpegdec, "Dropping %d bytes of data", bsize);