3
// Port to avidemux2 by mean
4
// Original filter by M Niedermayer
7
Copyright (C) 2006 Michael Niedermayer <michaelni@gmx.at>
9
This program is free software; you can redistribute it and/or modify
10
it under the terms of the GNU General Public License as published by
11
the Free Software Foundation; either version 2 of the License, or
12
(at your option) any later version.
14
This program is distributed in the hope that it will be useful,
15
but WITHOUT ANY WARRANTY; without even the implied warranty of
16
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17
GNU General Public License for more details.
19
You should have received a copy of the GNU General Public License
20
along with this program; if not, write to the Free Software
21
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
27
#include <ADM_assert.h>
28
#include "ADM_lavcodec.h"
38
#include "ADM_toolkit/toolkit.hxx"
39
#include "ADM_editor/ADM_edit.hxx"
40
#include "ADM_video/ADM_genvideo.hxx"
42
#include "ADM_filter/video_filters.h"
43
#include "ADM_video/ADM_cache.h"
45
#include "ADM_video/ADM_vidMcDeint_param.h"
47
static FILTER_PARAM mcDeintParam={3,{"mode","qp","initial_parity"}};
60
AVCodecContext *avctx_enc;
66
class AVDMVideoMCDeint:public AVDMGenericVideoStream
70
MCDEINT_PARAM *_param;
76
virtual char *printConf(void) ;
77
AVDMVideoMCDeint( AVDMGenericVideoStream *in,CONFcouple *setup);
79
virtual uint8_t getFrameNumberNoAlloc(uint32_t frame, uint32_t *len,
80
ADMImage *data,uint32_t *flags);
82
virtual uint8_t configure( AVDMGenericVideoStream *instream);
83
virtual uint8_t getCoupledConf( CONFcouple **couples);
86
SCRIPT_CREATE(mcdeint_script,AVDMVideoMCDeint,mcDeintParam);
87
BUILD_CREATE(mcdeint_create,AVDMVideoMCDeint);
89
static void filter(struct vf_priv_s *p, uint8_t *dst[3], uint8_t *src[3], int dst_stride[3], int src_stride[3], int width, int height);
90
uint8_t DIA_mcDeint(MCDEINT_PARAM *param);
93
char *AVDMVideoMCDeint::printConf( void )
97
sprintf((char *)buf," MC deinterlacer : Mode %d, qp %d, parity %d ",_param->mode,_param->qp,_param->initial_parity);
100
uint8_t AVDMVideoMCDeint::configure(AVDMGenericVideoStream * instream)
102
if( DIA_mcDeint(_param))
110
uint8_t AVDMVideoMCDeint::getCoupledConf( CONFcouple **couples)
114
*couples=new CONFcouple(3);
116
#define CSET(x) (*couples)->setCouple((char *)#x,(_param->x))
119
CSET(initial_parity);
124
//_______________________________________________________________
125
AVDMVideoMCDeint::AVDMVideoMCDeint(AVDMGenericVideoStream *in,CONFcouple *couples)
129
memcpy(&_info,_in->getInfo(),sizeof(_info));
130
_param=NEW(MCDEINT_PARAM);
131
vidCache=new VideoCache(4,_in);
142
_param->initial_parity=0;
151
// ___ destructor_____________
152
AVDMVideoMCDeint::~AVDMVideoMCDeint()
160
uint8_t AVDMVideoMCDeint::getFrameNumberNoAlloc(uint32_t frame,
165
if(frame>=_info.nb_frames)
167
printf("MPdelogo : Filter : out of bound!\n");
175
curImage=vidCache->getImage(frame);
178
printf("MCDeint : error getting frame\n");
182
// Prepare to call filter...
183
uint8_t *dplanes[3],*splanes[3];
184
int dstride[3],sstride[3];
186
dstride[0]=sstride[0]=_info.width;
187
dstride[2]=sstride[2]=dstride[1]=sstride[1]=_info.width>>1;
189
splanes[0]=YPLANE(curImage);
190
splanes[1]=UPLANE(curImage);
191
splanes[2]=VPLANE(curImage);
193
dplanes[0]=YPLANE(data);
194
dplanes[1]=UPLANE(data);
195
dplanes[2]=VPLANE(data);
198
filter(&priv, dplanes, splanes, dstride, sstride, _info.width, _info.height);
199
vidCache->unlockAll();
202
uint8_t AVDMVideoMCDeint::init( void )
204
memset(&priv,0,sizeof(priv));
207
AVCodec *enc= avcodec_find_encoder(CODEC_ID_SNOW);
212
AVCodecContext *avctx_enc;
213
avctx_enc= priv.avctx_enc= avcodec_alloc_context();
214
avctx_enc->width = _info.width;
215
avctx_enc->height = _info.height;
216
avctx_enc->time_base= (AVRational){1,25}; // meaningless
217
avctx_enc->gop_size = 300;
218
avctx_enc->max_b_frames= 0;
219
avctx_enc->pix_fmt = PIX_FMT_YUV420P;
220
avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
221
avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
222
avctx_enc->global_quality= 1;
223
avctx_enc->flags2= CODEC_FLAG2_MEMC_ONLY;
225
avctx_enc->me_sub_cmp= FF_CMP_SAD; //SSE;
226
avctx_enc->mb_cmp= FF_CMP_SSE;
233
avctx_enc->me_method= ME_ITER;
235
avctx_enc->flags |= CODEC_FLAG_4MV;
236
avctx_enc->dia_size=2;
237
// avctx_enc->mb_decision = MB_DECISSION_RD;
239
avctx_enc->flags |= CODEC_FLAG_QPEL;
242
avcodec_open(avctx_enc, enc);
245
priv.frame= avcodec_alloc_frame();
247
priv.outbuf_size= _info.width*_info.height*10;
248
priv.outbuf= (uint8_t *)ADM_alloc(priv.outbuf_size);
249
priv.parity=_param->initial_parity;
252
uint8_t AVDMVideoMCDeint::cleanup( void )
255
avcodec_close(priv.avctx_enc);
256
av_free(priv.avctx_enc);
257
ADM_dezalloc(priv.outbuf);
258
memset(&priv,0,sizeof(priv));
265
* The motion estimation is somewhat at the mercy of the input, if the input
266
frames are created purely based on spatial interpolation then for example
267
a thin black line or another random and not interpolateable pattern
269
Note: completly ignoring the "unavailable" lines during motion estimation
270
didnt look any better, so the most obvious solution would be to improve
271
tfields or penalize problematic motion vectors ...
273
* If non iterative ME is used then snow currently ignores the OBMC window
274
and as a result sometimes creates artifacts
276
* only past frames are used, we should ideally use future frames too, something
277
like filtering the whole movie in forward and then backward direction seems
278
like a interresting idea but the current filter framework is FAR from
279
supporting such things
281
* combining the motion compensated image with the input image also isnt
282
as trivial as it seems, simple blindly taking even lines from one and
283
odd ones from the other doesnt work at all as ME/MC sometimes simple
284
has nothing in the previous frames which matches the current, the current
285
algo has been found by trial and error and almost certainly can be
291
#define MIN(a,b) ((a) > (b) ? (b) : (a))
292
#define MAX(a,b) ((a) < (b) ? (b) : (a))
293
#define ABS(a) ((a) > 0 ? (a) : (-(a)))
295
//===========================================================================//
298
static void filter(struct vf_priv_s *p, uint8_t *dst[3], uint8_t *src[3], int dst_stride[3], int src_stride[3], int width, int height){
303
p->frame->data[i]= src[i];
304
p->frame->linesize[i]= src_stride[i];
307
p->avctx_enc->me_cmp=
308
p->avctx_enc->me_sub_cmp= FF_CMP_SAD /*| (p->parity ? FF_CMP_ODD : FF_CMP_EVEN)*/;
309
p->frame->quality= p->qp*FF_QP2LAMBDA;
310
out_size = avcodec_encode_video(p->avctx_enc, p->outbuf, p->outbuf_size, p->frame);
311
p->frame_dec = p->avctx_enc->coded_frame;
315
int w= width >>is_chroma;
316
int h= height>>is_chroma;
317
int fils= p->frame_dec->linesize[i];
318
int srcs= src_stride[i];
321
if((y ^ p->parity) & 1){
323
if((x-2)+(y-1)*w>=0 && (x+2)+(y+1)*w<w*h){ //FIXME either alloc larger images or optimize this
324
uint8_t *filp= &p->frame_dec->data[i][x + y*fils];
325
uint8_t *srcp= &src[i][x + y*srcs];
326
int diff0= filp[-fils] - srcp[-srcs];
327
int diff1= filp[+fils] - srcp[+srcs];
328
int spatial_score= ABS(srcp[-srcs-1] - srcp[+srcs-1])
329
+ABS(srcp[-srcs ] - srcp[+srcs ])
330
+ABS(srcp[-srcs+1] - srcp[+srcs+1]) - 1;
334
{ int score= ABS(srcp[-srcs-1+j] - srcp[+srcs-1-j])\
335
+ ABS(srcp[-srcs +j] - srcp[+srcs -j])\
336
+ ABS(srcp[-srcs+1+j] - srcp[+srcs+1-j]);\
337
if(score < spatial_score){\
338
spatial_score= score;\
339
diff0= filp[-fils+j] - srcp[-srcs+j];\
340
diff1= filp[+fils-j] - srcp[+srcs-j];
342
CHECK(-1) CHECK(-2) }} }}
343
CHECK( 1) CHECK( 2) }} }}
345
if((diff0 ^ diff1) > 0){
346
int mindiff= ABS(diff0) > ABS(diff1) ? diff1 : diff0;
350
if(diff0 + diff1 > 0)
351
temp-= (diff0 + diff1 - ABS( ABS(diff0) - ABS(diff1) )/2)/2;
353
temp-= (diff0 + diff1 + ABS( ABS(diff0) - ABS(diff1) )/2)/2;
355
temp-= (diff0 + diff1)/2;
359
dst[i][x + y*dst_stride[i]]= temp > 255U ? ~(temp>>31) : temp;
361
dst[i][x + y*dst_stride[i]]= filp[0];
362
filp[0]= temp > 255U ? ~(temp>>31) : temp;
365
dst[i][x + y*dst_stride[i]]= p->frame_dec->data[i][x + y*fils];
370
if(!((y ^ p->parity) & 1)){
373
p->frame_dec->data[i][x + y*fils]=
374
dst[i][x + y*dst_stride[i]]= src[i][x + y*srcs];
376
dst[i][x + y*dst_stride[i]]= p->frame_dec->data[i][x + y*fils];
377
p->frame_dec->data[i][x + y*fils]= src[i][x + y*srcs];
386
#ifdef titititititi_II
387
static int config(struct vf_instance_s* vf,
388
int width, int height, int d_width, int d_height,
389
unsigned int flags, unsigned int outfmt){
391
AVCodec *enc= avcodec_find_encoder(CODEC_ID_SNOW);
394
AVCodecContext *avctx_enc;
397
int w= ((width + 31) & (~31))>>is_chroma;
398
int h= ((height + 31) & (~31))>>is_chroma;
400
vf->priv->temp_stride[i]= w;
401
vf->priv->temp[i]= malloc(vf->priv->temp_stride[i]*h*sizeof(int16_t));
402
vf->priv->src [i]= malloc(vf->priv->temp_stride[i]*h*sizeof(uint8_t));
405
vf->priv->avctx_enc= avcodec_alloc_context();
406
avctx_enc->width = width;
407
avctx_enc->height = height;
408
avctx_enc->time_base= (AVRational){1,25}; // meaningless
409
avctx_enc->gop_size = 300;
410
avctx_enc->max_b_frames= 0;
411
avctx_enc->pix_fmt = PIX_FMT_YUV420P;
412
avctx_enc->flags = CODEC_FLAG_QSCALE | CODEC_FLAG_LOW_DELAY;
413
avctx_enc->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
414
avctx_enc->global_quality= 1;
415
avctx_enc->flags2= CODEC_FLAG2_MEMC_ONLY;
417
avctx_enc->me_sub_cmp= FF_CMP_SAD; //SSE;
418
avctx_enc->mb_cmp= FF_CMP_SSE;
420
switch(vf->priv->mode){
424
avctx_enc->me_method= ME_ITER;
426
avctx_enc->flags |= CODEC_FLAG_4MV;
427
avctx_enc->dia_size=2;
428
// avctx_enc->mb_decision = MB_DECISSION_RD;
430
avctx_enc->flags |= CODEC_FLAG_QPEL;
433
avcodec_open(avctx_enc, enc);
436
vf->priv->frame= avcodec_alloc_frame();
438
vf->priv->outbuf_size= width*height*10;
439
vf->priv->outbuf= malloc(vf->priv->outbuf_size);
441
return vf_next_config(vf,width,height,d_width,d_height,flags,outfmt);
444
static void get_image(struct vf_instance_s* vf, mp_image_t *mpi){
445
if(mpi->flags&MP_IMGFLAG_PRESERVE) return; // don't change
446
return; //caused problems, dunno why
447
// ok, we can do pp in-place (or pp disabled):
448
vf->dmpi=vf_get_image(vf->next,mpi->imgfmt,
449
mpi->type, mpi->flags | MP_IMGFLAG_READABLE, mpi->width, mpi->height);
450
mpi->planes[0]=vf->dmpi->planes[0];
451
mpi->stride[0]=vf->dmpi->stride[0];
452
mpi->width=vf->dmpi->width;
453
if(mpi->flags&MP_IMGFLAG_PLANAR){
454
mpi->planes[1]=vf->dmpi->planes[1];
455
mpi->planes[2]=vf->dmpi->planes[2];
456
mpi->stride[1]=vf->dmpi->stride[1];
457
mpi->stride[2]=vf->dmpi->stride[2];
459
mpi->flags|=MP_IMGFLAG_DIRECT;
462
static int put_image(struct vf_instance_s* vf, mp_image_t *mpi, double pts){
465
if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
466
// no DR, so get a new image! hope we'll get DR buffer:
467
dmpi=vf_get_image(vf->next,mpi->imgfmt,
469
MP_IMGFLAG_ACCEPT_STRIDE|MP_IMGFLAG_PREFER_ALIGNED_STRIDE,
470
mpi->width,mpi->height);
471
vf_clone_mpi_attributes(dmpi, mpi);
476
filter(vf->priv, dmpi->planes, mpi->planes, dmpi->stride, mpi->stride, mpi->w, mpi->h);
478
return vf_next_put_image(vf,dmpi, pts);
481
static void uninit(struct vf_instance_s* vf){
482
if(!vf->priv) return;
486
if(vf->priv->temp[i]) free(vf->priv->temp[i]);
487
vf->priv->temp[i]= NULL;
488
if(vf->priv->src[i]) free(vf->priv->src[i]);
489
vf->priv->src[i]= NULL;
492
av_freep(&vf->priv->avctx_enc);
494
free(vf->priv->outbuf);