3
// Port to avidemux2 by mean
4
// Original filter by M Niedermayer
7
Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or
12
(at your option) any later version.
14
This program is distributed in the hope that it will be useful,
15
but WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17
GNU General Public License for more details.
19
You should have received a copy of the GNU General Public License
20
along with this program; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27
#include <ADM_assert.h>
28
#include "ADM_lavcodec.h"
38
#include "ADM_toolkit/toolkit.hxx"
39
#include "ADM_editor/ADM_edit.hxx"
40
#include "ADM_video/ADM_genvideo.hxx"
42
#include "ADM_filter/video_filters.h"
43
#include "ADM_video/ADM_cache.h"
45
#include "ADM_vidMcDeint_param.h"
47
#include "ADM_userInterfaces/ADM_commonUI/DIA_factory.h"
49
static FILTER_PARAM mcDeintParam={3,{"mode","qp","initial_parity"}};
62
AVCodecContext *avctx_enc;
68
class AVDMVideoMCDeint:public AVDMGenericVideoStream
72
MCDEINT_PARAM *_param;
78
virtual char *printConf(void) ;
79
AVDMVideoMCDeint( AVDMGenericVideoStream *in,CONFcouple *setup);
81
virtual uint8_t getFrameNumberNoAlloc(uint32_t frame, uint32_t *len,
82
ADMImage *data,uint32_t *flags);
84
virtual uint8_t configure( AVDMGenericVideoStream *instream);
85
virtual uint8_t getCoupledConf( CONFcouple **couples);
88
SCRIPT_CREATE(mcdeint_script,AVDMVideoMCDeint,mcDeintParam);
89
BUILD_CREATE(mcdeint_create,AVDMVideoMCDeint);
91
static void filter(struct vf_priv_s *p, uint8_t *dst[3], uint8_t *src[3], int dst_stride[3], int src_stride[3], int width, int height);
94
char *AVDMVideoMCDeint::printConf( void )
98
sprintf((char *)buf," MC deinterlacer : Mode %d, qp %d, parity %d ",_param->mode,_param->qp,_param->initial_parity);
101
uint8_t AVDMVideoMCDeint::configure(AVDMGenericVideoStream * instream)
104
diaMenuEntry menuMode[4]={{0,QT_TR_NOOP("Fast"),NULL},
105
{1,QT_TR_NOOP("Medium"),NULL},
106
{2,QT_TR_NOOP("Slow iterative motion search"),NULL},
107
{3,QT_TR_NOOP("Extra slow (same as 3+multiple reference frames)"),NULL}
109
diaMenuEntry menuField[2]={{0,QT_TR_NOOP("Top"),NULL},
110
{1,QT_TR_NOOP("Bottom"),NULL}
113
diaElemMenu menu1(&(_param->mode),QT_TR_NOOP("_Mode:"), 4,menuMode);
114
diaElemMenu menu2(&(_param->initial_parity),QT_TR_NOOP("_Field dominance:"), 2,menuField);
115
diaElemUInteger qp(&(_param->qp),QT_TR_NOOP("_Qp:"),1,60);
117
diaElem *elems[3]={&menu1,&menu2,&qp};
119
return diaFactoryRun(QT_TR_NOOP("mcDeinterlace"),3,elems);
122
uint8_t AVDMVideoMCDeint::getCoupledConf( CONFcouple **couples)
126
*couples=new CONFcouple(3);
128
#define CSET(x) (*couples)->setCouple((char *)#x,(_param->x))
131
CSET(initial_parity);
136
//_______________________________________________________________
137
AVDMVideoMCDeint::AVDMVideoMCDeint(AVDMGenericVideoStream *in,CONFcouple *couples)
141
memcpy(&_info,_in->getInfo(),sizeof(_info));
142
_param=NEW(MCDEINT_PARAM);
143
vidCache=new VideoCache(4,_in);
154
_param->initial_parity=0;
163
// ___ destructor_____________
164
AVDMVideoMCDeint::~AVDMVideoMCDeint()
172
uint8_t AVDMVideoMCDeint::getFrameNumberNoAlloc(uint32_t frame,
177
if(frame>=_info.nb_frames)
179
printf("MPdelogo : Filter : out of bound!\n");
187
curImage=vidCache->getImage(frame);
190
printf("MCDeint : error getting frame\n");
194
// Prepare to call filter...
195
uint8_t *dplanes[3],*splanes[3];
196
int dstride[3],sstride[3];
198
dstride[0]=sstride[0]=_info.width;
199
dstride[2]=sstride[2]=dstride[1]=sstride[1]=_info.width>>1;
201
splanes[0]=YPLANE(curImage);
202
splanes[1]=UPLANE(curImage);
203
splanes[2]=VPLANE(curImage);
205
dplanes[0]=YPLANE(data);
206
dplanes[1]=UPLANE(data);
207
dplanes[2]=VPLANE(data);
210
filter(&priv, dplanes, splanes, dstride, sstride, _info.width, _info.height);
211
vidCache->unlockAll();
214
uint8_t AVDMVideoMCDeint::init( void )
216
memset(&priv,0,sizeof(priv));
219
AVCodec *enc= avcodec_find_encoder(CODEC_ID_SNOW);
224
AVCodecContext *avctx_enc;
225
avctx_enc= priv.avctx_enc= avcodec_alloc_context();
226
avctx_enc->width = _info.width;
227
avctx_enc->height = _info.height;
228
avctx_enc->time_base= (AVRational){1,25}; // meaningless
229
avctx_enc->gop_size = 300;
230
avctx_enc->max_b_frames= 0;
231
avctx_enc->pix_fmt = PIX_FMT_YUV420P;
232
avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
233
avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
234
avctx_enc->global_quality= 1;
235
avctx_enc->flags2= CODEC_FLAG2_MEMC_ONLY;
237
avctx_enc->me_sub_cmp= FF_CMP_SAD; //SSE;
238
avctx_enc->mb_cmp= FF_CMP_SSE;
245
avctx_enc->me_method= ME_ITER;
247
avctx_enc->flags |= CODEC_FLAG_4MV;
248
avctx_enc->dia_size=2;
249
// avctx_enc->mb_decision = MB_DECISSION_RD;
251
avctx_enc->flags |= CODEC_FLAG_QPEL;
254
avcodec_open(avctx_enc, enc);
257
priv.frame= avcodec_alloc_frame();
259
priv.outbuf_size= _info.width*_info.height*10;
260
priv.outbuf= (uint8_t *)ADM_alloc(priv.outbuf_size);
261
priv.parity=_param->initial_parity;
264
uint8_t AVDMVideoMCDeint::cleanup( void )
267
avcodec_close(priv.avctx_enc);
268
av_free(priv.avctx_enc);
269
ADM_dezalloc(priv.outbuf);
270
memset(&priv,0,sizeof(priv));
277
* The motion estimation is somewhat at the mercy of the input, if the input
278
frames are created purely based on spatial interpolation then for example
279
a thin black line or another random and not interpolateable pattern
281
Note: completly ignoring the "unavailable" lines during motion estimation
282
didnt look any better, so the most obvious solution would be to improve
283
tfields or penalize problematic motion vectors ...
285
* If non iterative ME is used then snow currently ignores the OBMC window
286
and as a result sometimes creates artifacts
288
* only past frames are used, we should ideally use future frames too, something
289
like filtering the whole movie in forward and then backward direction seems
290
like a interresting idea but the current filter framework is FAR from
291
supporting such things
293
* combining the motion compensated image with the input image also isnt
294
as trivial as it seems, simple blindly taking even lines from one and
295
odd ones from the other doesnt work at all as ME/MC sometimes simple
296
has nothing in the previous frames which matches the current, the current
297
algo has been found by trial and error and almost certainly can be
303
#define MIN(a,b) ((a) > (b) ? (b) : (a))
304
#define MAX(a,b) ((a) < (b) ? (b) : (a))
305
#define ABS(a) ((a) > 0 ? (a) : (-(a)))
307
//===========================================================================//
310
static void filter(struct vf_priv_s *p, uint8_t *dst[3], uint8_t *src[3], int dst_stride[3], int src_stride[3], int width, int height){
315
p->frame->data[i]= src[i];
316
p->frame->linesize[i]= src_stride[i];
319
p->avctx_enc->me_cmp=
320
p->avctx_enc->me_sub_cmp= FF_CMP_SAD /*| (p->parity ? FF_CMP_ODD : FF_CMP_EVEN)*/;
321
p->frame->quality= p->qp*FF_QP2LAMBDA;
322
out_size = avcodec_encode_video(p->avctx_enc, p->outbuf, p->outbuf_size, p->frame);
323
p->frame_dec = p->avctx_enc->coded_frame;
327
int w= width >>is_chroma;
328
int h= height>>is_chroma;
329
int fils= p->frame_dec->linesize[i];
330
int srcs= src_stride[i];
333
if((y ^ p->parity) & 1){
335
if((x-2)+(y-1)*w>=0 && (x+2)+(y+1)*w<w*h){ //FIXME either alloc larger images or optimize this
336
uint8_t *filp= &p->frame_dec->data[i][x + y*fils];
337
uint8_t *srcp= &src[i][x + y*srcs];
338
int diff0= filp[-fils] - srcp[-srcs];
339
int diff1= filp[+fils] - srcp[+srcs];
340
int spatial_score= ABS(srcp[-srcs-1] - srcp[+srcs-1])
341
+ABS(srcp[-srcs ] - srcp[+srcs ])
342
+ABS(srcp[-srcs+1] - srcp[+srcs+1]) - 1;
346
{ int score= ABS(srcp[-srcs-1+j] - srcp[+srcs-1-j])\
347
+ ABS(srcp[-srcs +j] - srcp[+srcs -j])\
348
+ ABS(srcp[-srcs+1+j] - srcp[+srcs+1-j]);\
349
if(score < spatial_score){\
350
spatial_score= score;\
351
diff0= filp[-fils+j] - srcp[-srcs+j];\
352
diff1= filp[+fils-j] - srcp[+srcs-j];
354
CHECK(-1) CHECK(-2) }} }}
355
CHECK( 1) CHECK( 2) }} }}
357
if((diff0 ^ diff1) > 0){
358
int mindiff= ABS(diff0) > ABS(diff1) ? diff1 : diff0;
362
if(diff0 + diff1 > 0)
363
temp-= (diff0 + diff1 - ABS( ABS(diff0) - ABS(diff1) )/2)/2;
365
temp-= (diff0 + diff1 + ABS( ABS(diff0) - ABS(diff1) )/2)/2;
367
temp-= (diff0 + diff1)/2;
371
dst[i][x + y*dst_stride[i]]= temp > 255U ? ~(temp>>31) : temp;
373
dst[i][x + y*dst_stride[i]]= filp[0];
374
filp[0]= temp > 255U ? ~(temp>>31) : temp;
377
dst[i][x + y*dst_stride[i]]= p->frame_dec->data[i][x + y*fils];
382
if(!((y ^ p->parity) & 1)){
385
p->frame_dec->data[i][x + y*fils]=
386
dst[i][x + y*dst_stride[i]]= src[i][x + y*srcs];
388
dst[i][x + y*dst_stride[i]]= p->frame_dec->data[i][x + y*fils];
389
p->frame_dec->data[i][x + y*fils]= src[i][x + y*srcs];
398
#ifdef titititititi_II
399
static int config(struct vf_instance_s* vf,
400
int width, int height, int d_width, int d_height,
401
unsigned int flags, unsigned int outfmt){
403
AVCodec *enc= avcodec_find_encoder(CODEC_ID_SNOW);
406
AVCodecContext *avctx_enc;
409
int w= ((width + 31) & (~31))>>is_chroma;
410
int h= ((height + 31) & (~31))>>is_chroma;
412
vf->priv->temp_stride[i]= w;
413
vf->priv->temp[i]= malloc(vf->priv->temp_stride[i]*h*sizeof(int16_t));
414
vf->priv->src [i]= malloc(vf->priv->temp_stride[i]*h*sizeof(uint8_t));
417
vf->priv->avctx_enc= avcodec_alloc_context();
418
avctx_enc->width = width;
419
avctx_enc->height = height;
420
avctx_enc->time_base= (AVRational){1,25}; // meaningless
421
avctx_enc->gop_size = 300;
422
avctx_enc->max_b_frames= 0;
423
avctx_enc->pix_fmt = PIX_FMT_YUV420P;
424
avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
425
avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
426
avctx_enc->global_quality= 1;
427
avctx_enc->flags2= CODEC_FLAG2_MEMC_ONLY;
429
avctx_enc->me_sub_cmp= FF_CMP_SAD; //SSE;
430
avctx_enc->mb_cmp= FF_CMP_SSE;
432
switch(vf->priv->mode){
436
avctx_enc->me_method= ME_ITER;
438
avctx_enc->flags |= CODEC_FLAG_4MV;
439
avctx_enc->dia_size=2;
440
// avctx_enc->mb_decision = MB_DECISSION_RD;
442
avctx_enc->flags |= CODEC_FLAG_QPEL;
445
avcodec_open(avctx_enc, enc);
448
vf->priv->frame= avcodec_alloc_frame();
450
vf->priv->outbuf_size= width*height*10;
451
vf->priv->outbuf= malloc(vf->priv->outbuf_size);
453
return vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
456
static void get_image(struct vf_instance_s* vf, mp_image_t *mpi){
457
if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
458
return; //caused problems, dunno why
459
// ok, we can do pp in-place (or pp disabled):
460
vf->dmpi=vf_get_image(vf->next,mpi->imgfmt,
461
mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height);
462
mpi->planes[0]=vf->dmpi->planes[0];
463
mpi->stride[0]=vf->dmpi->stride[0];
464
mpi->width=vf->dmpi->width;
465
if(mpi->flags&MP_IMGFLAG_PLANAR){
466
mpi->planes[1]=vf->dmpi->planes[1];
467
mpi->planes[2]=vf->dmpi->planes[2];
468
mpi->stride[1]=vf->dmpi->stride[1];
469
mpi->stride[2]=vf->dmpi->stride[2];
471
mpi->flags|=MP_IMGFLAG_DIRECT;
474
static int put_image(struct vf_instance_s* vf, mp_image_t *mpi, double pts){
477
if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
478
// no DR, so get a new image! hope we'll get DR buffer:
479
dmpi=vf_get_image(vf->next,mpi->imgfmt,
481
MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
482
mpi->width,mpi->height);
483
vf_clone_mpi_attributes(dmpi, mpi);
488
filter(vf->priv, dmpi->planes, mpi->planes, dmpi->stride, mpi->stride, mpi->w, mpi->h);
490
return vf_next_put_image(vf,dmpi, pts);
493
static void uninit(struct vf_instance_s* vf){
494
if(!vf->priv) return;
498
if(vf->priv->temp[i]) free(vf->priv->temp[i]);
499
vf->priv->temp[i]= NULL;
500
if(vf->priv->src[i]) free(vf->priv->src[i]);
501
vf->priv->src[i]= NULL;
504
av_freep(&vf->priv->avctx_enc);
506
free(vf->priv->outbuf);