488
488
zeros_left= get_vlc2(gb, (total_zeros_vlc-1)[ total_coeff ].table, TOTAL_ZEROS_VLC_BITS, 1);
491
scantable += zeros_left + total_coeff - 1;
492
if(n >= LUMA_DC_BLOCK_INDEX){
493
block[*scantable] = level[0];
494
for(i=1;i<total_coeff && zeros_left > 0;i++) {
496
run_before= get_vlc2(gb, (run_vlc-1)[zeros_left].table, RUN_VLC_BITS, 1);
498
run_before= get_vlc2(gb, run7_vlc.table, RUN7_VLC_BITS, 2);
499
zeros_left -= run_before;
500
scantable -= 1 + run_before;
501
block[*scantable]= level[i];
503
for(;i<total_coeff;i++) {
505
block[*scantable]= level[i];
508
block[*scantable] = (level[0] * qmul[*scantable] + 32)>>6;
509
for(i=1;i<total_coeff && zeros_left > 0;i++) {
511
run_before= get_vlc2(gb, (run_vlc-1)[zeros_left].table, RUN_VLC_BITS, 1);
513
run_before= get_vlc2(gb, run7_vlc.table, RUN7_VLC_BITS, 2);
514
zeros_left -= run_before;
515
scantable -= 1 + run_before;
516
block[*scantable]= (level[i] * qmul[*scantable] + 32)>>6;
518
for(;i<total_coeff;i++) {
520
block[*scantable]= (level[i] * qmul[*scantable] + 32)>>6;
491
#define STORE_BLOCK(type) \
492
scantable += zeros_left + total_coeff - 1; \
493
if(n >= LUMA_DC_BLOCK_INDEX){ \
494
((type*)block)[*scantable] = level[0]; \
495
for(i=1;i<total_coeff && zeros_left > 0;i++) { \
497
run_before= get_vlc2(gb, (run_vlc-1)[zeros_left].table, RUN_VLC_BITS, 1); \
499
run_before= get_vlc2(gb, run7_vlc.table, RUN7_VLC_BITS, 2); \
500
zeros_left -= run_before; \
501
scantable -= 1 + run_before; \
502
((type*)block)[*scantable]= level[i]; \
504
for(;i<total_coeff;i++) { \
506
((type*)block)[*scantable]= level[i]; \
509
((type*)block)[*scantable] = ((int)(level[0] * qmul[*scantable] + 32))>>6; \
510
for(i=1;i<total_coeff && zeros_left > 0;i++) { \
512
run_before= get_vlc2(gb, (run_vlc-1)[zeros_left].table, RUN_VLC_BITS, 1); \
514
run_before= get_vlc2(gb, run7_vlc.table, RUN7_VLC_BITS, 2); \
515
zeros_left -= run_before; \
516
scantable -= 1 + run_before; \
517
((type*)block)[*scantable]= ((int)(level[i] * qmul[*scantable] + 32))>>6; \
519
for(;i<total_coeff;i++) { \
521
((type*)block)[*scantable]= ((int)(level[i] * qmul[*scantable] + 32))>>6; \
525
if (h->pixel_shift) {
524
531
if(zeros_left<0){
535
542
int partition_count;
536
543
unsigned int mb_type, cbp;
537
544
int dct8x8_allowed= h->pps.transform_8x8_mode;
545
const int pixel_shift = h->pixel_shift;
539
547
mb_xy = h->mb_xy = s->mb_x + s->mb_y*s->mb_stride;
541
549
tprintf(s->avctx, "pic:%d mb:%d/%d\n", h->frame_num, s->mb_x, s->mb_y);
542
550
cbp = 0; /* avoid warning. FIXME: find a solution without slowing
544
if(h->slice_type_nos != FF_I_TYPE){
552
if(h->slice_type_nos != AV_PICTURE_TYPE_I){
545
553
if(s->mb_skip_run==-1)
546
554
s->mb_skip_run= get_ue_golomb(&s->gb);
579
587
goto decode_intra_mb;
582
assert(h->slice_type_nos == FF_I_TYPE);
583
if(h->slice_type == FF_SI_TYPE && mb_type)
590
assert(h->slice_type_nos == AV_PICTURE_TYPE_I);
591
if(h->slice_type == AV_PICTURE_TYPE_SI && mb_type)
586
594
if(mb_type > 25){
587
av_log(h->s.avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_pict_type_char(h->slice_type), s->mb_x, s->mb_y);
595
av_log(h->s.avctx, AV_LOG_ERROR, "mb_type %d in %c slice too large at %d %d\n", mb_type, av_get_picture_type_char(h->slice_type), s->mb_x, s->mb_y);
590
598
partition_count=0;
671
679
}else if(partition_count==4){
672
680
int i, j, sub_partition_count[4], list, ref[2][4];
674
if(h->slice_type_nos == FF_B_TYPE){
682
if(h->slice_type_nos == AV_PICTURE_TYPE_B){
675
683
for(i=0; i<4; i++){
676
684
h->sub_mb_type[i]= get_ue_golomb_31(&s->gb);
677
685
if(h->sub_mb_type[i] >=13){
928
937
s->qscale += dquant;
930
if(((unsigned)s->qscale) > 51){
931
if(s->qscale<0) s->qscale+= 52;
933
if(((unsigned)s->qscale) > 51){
939
if(((unsigned)s->qscale) > max_qp){
940
if(s->qscale<0) s->qscale+= max_qp+1;
941
else s->qscale-= max_qp+1;
942
if(((unsigned)s->qscale) > max_qp){
934
943
av_log(h->s.avctx, AV_LOG_ERROR, "dquant out of range (%d) at %d %d\n", dquant, s->mb_x, s->mb_y);
941
950
if(IS_INTRA16x16(mb_type)){
942
951
AV_ZERO128(h->mb_luma_dc+0);
943
952
AV_ZERO128(h->mb_luma_dc+8);
953
AV_ZERO128(h->mb_luma_dc+16);
954
AV_ZERO128(h->mb_luma_dc+24);
944
955
if( decode_residual(h, h->intra_gb_ptr, h->mb_luma_dc, LUMA_DC_BLOCK_INDEX, scan, h->dequant4_coeff[0][s->qscale], 16) < 0){
945
956
return -1; //FIXME continue if partitioned and other return -1 too
951
962
for(i8x8=0; i8x8<4; i8x8++){
952
963
for(i4x4=0; i4x4<4; i4x4++){
953
964
const int index= i4x4 + 4*i8x8;
954
if( decode_residual(h, h->intra_gb_ptr, h->mb + 16*index, index, scan + 1, h->dequant4_coeff[0][s->qscale], 15) < 0 ){
965
if( decode_residual(h, h->intra_gb_ptr, h->mb + (16*index << pixel_shift), index, scan + 1, h->dequant4_coeff[0][s->qscale], 15) < 0 ){
976
987
for(i4x4=0; i4x4<4; i4x4++){
977
988
const int index= i4x4 + 4*i8x8;
979
if( decode_residual(h, gb, h->mb + 16*index, index, scan, h->dequant4_coeff[IS_INTRA( mb_type ) ? 0:3][s->qscale], 16) <0 ){
990
if( decode_residual(h, gb, h->mb + (16*index << pixel_shift), index, scan, h->dequant4_coeff[IS_INTRA( mb_type ) ? 0:3][s->qscale], 16) <0 ){
992
1003
for(chroma_idx=0; chroma_idx<2; chroma_idx++)
993
if( decode_residual(h, gb, h->mb + 256 + 16*4*chroma_idx, CHROMA_DC_BLOCK_INDEX+chroma_idx, chroma_dc_scan, NULL, 4) < 0){
1004
if( decode_residual(h, gb, h->mb + ((256 + 16*4*chroma_idx) << pixel_shift), CHROMA_DC_BLOCK_INDEX+chroma_idx, chroma_dc_scan, NULL, 4) < 0){
1000
1011
const uint32_t *qmul = h->dequant4_coeff[chroma_idx+1+(IS_INTRA( mb_type ) ? 0:3)][h->chroma_qp[chroma_idx]];
1001
1012
for(i4x4=0; i4x4<4; i4x4++){
1002
1013
const int index= 16 + 4*chroma_idx + i4x4;
1003
if( decode_residual(h, gb, h->mb + 16*index, index, scan + 1, qmul, 15) < 0){
1014
if( decode_residual(h, gb, h->mb + (16*index << pixel_shift), index, scan + 1, qmul, 15) < 0){