60
61
void ff_h264_idct_dc_add_c(uint8_t *dst, DCTELEM *block, int stride);
61
62
void ff_h264_lowres_idct_add_c(uint8_t *dst, int stride, DCTELEM *block);
62
63
void ff_h264_lowres_idct_put_c(uint8_t *dst, int stride, DCTELEM *block);
64
void ff_h264_idct_add16_c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
65
void ff_h264_idct_add16intra_c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
66
void ff_h264_idct8_add4_c(uint8_t *dst, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
67
void ff_h264_idct_add8_c(uint8_t **dest, const int *blockoffset, DCTELEM *block, int stride, const uint8_t nnzc[6*8]);
64
69
void ff_vector_fmul_add_add_c(float *dst, const float *src0, const float *src1,
65
70
const float *src2, int src3, int blocksize, int step);
204
209
void (*gmc )(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int ox, int oy,
205
210
int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height);
211
void (*clear_block)(DCTELEM *block/*align 16*/);
206
212
void (*clear_blocks)(DCTELEM *blocks/*align 16*/);
207
213
int (*pix_sum)(uint8_t * pix, int line_size);
208
214
int (*pix_norm1)(uint8_t * pix, int line_size);
346
352
void (*h264_v_loop_filter_luma)(uint8_t *pix/*align 16*/, int stride, int alpha, int beta, int8_t *tc0);
347
353
void (*h264_h_loop_filter_luma)(uint8_t *pix/*align 4 */, int stride, int alpha, int beta, int8_t *tc0);
348
354
/* v/h_loop_filter_luma_intra: align 16 */
355
void (*h264_v_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta);
356
void (*h264_h_loop_filter_luma_intra)(uint8_t *pix, int stride, int alpha, int beta);
349
357
void (*h264_v_loop_filter_chroma)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta, int8_t *tc0);
350
358
void (*h264_h_loop_filter_chroma)(uint8_t *pix/*align 4*/, int stride, int alpha, int beta, int8_t *tc0);
351
359
void (*h264_v_loop_filter_chroma_intra)(uint8_t *pix/*align 8*/, int stride, int alpha, int beta);
435
443
#define EDGE_WIDTH 16
437
445
/* h264 functions */
446
/* NOTE!!! if you implement any of h264_idct8_add, h264_idct8_add4 then you must implement all of them
447
NOTE!!! if you implement any of h264_idct_add, h264_idct_add16, h264_idct_add16intra, h264_idct_add8 then you must implement all of them
448
The reason for above, is that no 2 out of one list may use a different permutation.
438
450
void (*h264_idct_add)(uint8_t *dst/*align 4*/, DCTELEM *block/*align 16*/, int stride);
439
451
void (*h264_idct8_add)(uint8_t *dst/*align 8*/, DCTELEM *block/*align 16*/, int stride);
440
452
void (*h264_idct_dc_add)(uint8_t *dst/*align 4*/, DCTELEM *block/*align 16*/, int stride);
441
453
void (*h264_idct8_dc_add)(uint8_t *dst/*align 8*/, DCTELEM *block/*align 16*/, int stride);
442
454
void (*h264_dct)(DCTELEM block[4][4]);
455
void (*h264_idct_add16)(uint8_t *dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[6*8]);
456
void (*h264_idct8_add4)(uint8_t *dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[6*8]);
457
void (*h264_idct_add8)(uint8_t **dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[6*8]);
458
void (*h264_idct_add16intra)(uint8_t *dst/*align 16*/, const int *blockoffset, DCTELEM *block/*align 16*/, int stride, const uint8_t nnzc[6*8]);
444
460
/* snow wavelet */
445
461
void (*vertical_compose97i)(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
484
500
* @param shift number of bits to discard from product
486
502
int32_t (*scalarproduct_int16)(int16_t *v1, int16_t *v2/*align 16*/, int len, int shift);
505
qpel_mc_func put_rv30_tpel_pixels_tab[4][16];
506
qpel_mc_func avg_rv30_tpel_pixels_tab[4][16];
509
qpel_mc_func put_rv40_qpel_pixels_tab[4][16];
510
qpel_mc_func avg_rv40_qpel_pixels_tab[4][16];
511
h264_chroma_mc_func put_rv40_chroma_pixels_tab[3];
512
h264_chroma_mc_func avg_rv40_chroma_pixels_tab[3];
489
515
void dsputil_static_init(void);
547
573
int mm_support(void);
549
575
void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx);
550
void dsputil_init_armv4l(DSPContext* c, AVCodecContext *avctx);
576
void dsputil_init_arm(DSPContext* c, AVCodecContext *avctx);
551
577
void dsputil_init_bfin(DSPContext* c, AVCodecContext *avctx);
552
578
void dsputil_init_mlib(DSPContext* c, AVCodecContext *avctx);
553
579
void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx);
583
609
void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx);
585
#elif defined(ARCH_ARMV4L)
587
613
extern int mm_flags;
590
616
# define DECLARE_ALIGNED_8(t, v) DECLARE_ALIGNED(16, t, v)
591
617
# define STRIDE_ALIGN 16
594
#elif defined(ARCH_POWERPC)
596
622
extern int mm_flags;
598
624
#define DECLARE_ALIGNED_8(t, v) DECLARE_ALIGNED(16, t, v)
599
625
#define STRIDE_ALIGN 16
601
#elif defined(HAVE_MMI)
603
629
#define DECLARE_ALIGNED_8(t, v) DECLARE_ALIGNED(16, t, v)
604
630
#define STRIDE_ALIGN 16
706
732
extern float ff_sine_512 [ 512];
707
733
extern float ff_sine_1024[1024];
708
734
extern float ff_sine_2048[2048];
709
extern float *ff_sine_windows[5];
735
extern float ff_sine_4096[4096];
736
extern float *ff_sine_windows[6];
711
738
int ff_mdct_init(MDCTContext *s, int nbits, int inverse);
712
739
void ff_imdct_calc_c(MDCTContext *s, FFTSample *output, const FFTSample *input);