28
28
#include "internal.h"
29
29
#include "avcodec.h"
30
#include "mpegvideo.h"
31
#include "mpegutils.h"
32
32
#include "rectangle.h"
33
33
#include "thread.h"
35
35
#include <assert.h>
38
static int get_scale_factor(H264Context * const h, int poc, int poc1, int i){
37
static int get_scale_factor(H264Context *const h, int poc, int poc1, int i)
39
39
int poc0 = h->ref_list[0][i].poc;
40
40
int td = av_clip(poc1 - poc0, -128, 127);
41
if(td == 0 || h->ref_list[0][i].long_ref){
41
if (td == 0 || h->ref_list[0][i].long_ref) {
44
44
int tb = av_clip(poc - poc0, -128, 127);
45
45
int tx = (16384 + (FFABS(td) >> 1)) / td;
46
return av_clip((tb*tx + 32) >> 6, -1024, 1023);
46
return av_clip((tb * tx + 32) >> 6, -1024, 1023);
50
void ff_h264_direct_dist_scale_factor(H264Context * const h){
51
const int poc = h->cur_pic_ptr->field_poc[h->picture_structure == PICT_BOTTOM_FIELD];
50
void ff_h264_direct_dist_scale_factor(H264Context *const h)
52
const int poc = FIELD_PICTURE(h) ? h->cur_pic_ptr->field_poc[h->picture_structure == PICT_BOTTOM_FIELD]
53
: h->cur_pic_ptr->poc;
52
54
const int poc1 = h->ref_list[1][0].poc;
55
57
if (FRAME_MBAFF(h))
56
for (field = 0; field < 2; field++){
58
for (field = 0; field < 2; field++) {
57
59
const int poc = h->cur_pic_ptr->field_poc[field];
58
60
const int poc1 = h->ref_list[1][0].field_poc[field];
59
61
for (i = 0; i < 2 * h->ref_count[0]; i++)
60
h->dist_scale_factor_field[field][i^field] =
61
get_scale_factor(h, poc, poc1, i+16);
62
h->dist_scale_factor_field[field][i ^ field] =
63
get_scale_factor(h, poc, poc1, i + 16);
64
for (i = 0; i < h->ref_count[0]; i++){
66
for (i = 0; i < h->ref_count[0]; i++)
65
67
h->dist_scale_factor[i] = get_scale_factor(h, poc, poc1, i);
69
static void fill_colmap(H264Context *h, int map[2][16+32], int list, int field, int colfield, int mbafi){
70
Picture * const ref1 = &h->ref_list[1][0];
70
static void fill_colmap(H264Context *h, int map[2][16 + 32], int list,
71
int field, int colfield, int mbafi)
73
H264Picture *const ref1 = &h->ref_list[1][0];
71
74
int j, old_ref, rfield;
72
int start= mbafi ? 16 : 0;
73
int end = mbafi ? 16+2*h->ref_count[0] : h->ref_count[0];
74
int interl= mbafi || h->picture_structure != PICT_FRAME;
75
int start = mbafi ? 16 : 0;
76
int end = mbafi ? 16 + 2 * h->ref_count[0] : h->ref_count[0];
77
int interl = mbafi || h->picture_structure != PICT_FRAME;
76
79
/* bogus; fills in for missing frames */
77
80
memset(map[list], 0, sizeof(map[list]));
79
for(rfield=0; rfield<2; rfield++){
80
for(old_ref=0; old_ref<ref1->ref_count[colfield][list]; old_ref++){
82
for (rfield = 0; rfield < 2; rfield++) {
83
for (old_ref = 0; old_ref < ref1->ref_count[colfield][list]; old_ref++) {
81
84
int poc = ref1->ref_poc[colfield][list][old_ref];
85
else if( interl && (poc&3) == 3) // FIXME: store all MBAFF references so this is not needed
86
poc= (poc&~3) + rfield + 1;
88
// FIXME: store all MBAFF references so this is not needed
89
else if (interl && (poc & 3) == 3)
90
poc = (poc & ~3) + rfield + 1;
88
for(j=start; j<end; j++){
89
if (4 * h->ref_list[0][j].frame_num + (h->ref_list[0][j].reference & 3) == poc) {
90
int cur_ref= mbafi ? (j-16)^field : j;
92
for (j = start; j < end; j++) {
93
if (4 * h->ref_list[0][j].frame_num +
94
(h->ref_list[0][j].reference & 3) == poc) {
95
int cur_ref = mbafi ? (j - 16) ^ field : j;
92
map[list][2 * old_ref + (rfield^field) + 16] = cur_ref;
93
if(rfield == field || !interl)
97
map[list][2 * old_ref + (rfield ^ field) + 16] = cur_ref;
98
if (rfield == field || !interl)
94
99
map[list][old_ref] = cur_ref;
102
void ff_h264_direct_ref_list_init(H264Context * const h){
103
Picture * const ref1 = &h->ref_list[1][0];
104
Picture * const cur = h->cur_pic_ptr;
107
void ff_h264_direct_ref_list_init(H264Context *const h)
109
H264Picture *const ref1 = &h->ref_list[1][0];
110
H264Picture *const cur = h->cur_pic_ptr;
105
111
int list, j, field;
106
int sidx= (h->picture_structure&1)^1;
107
int ref1sidx = (ref1->reference&1)^1;
112
int sidx = (h->picture_structure & 1) ^ 1;
113
int ref1sidx = (ref1->reference & 1) ^ 1;
109
for(list=0; list<2; list++){
115
for (list = 0; list < 2; list++) {
110
116
cur->ref_count[sidx][list] = h->ref_count[list];
111
for(j=0; j<h->ref_count[list]; j++)
112
cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num + (h->ref_list[list][j].reference & 3);
117
for (j = 0; j < h->ref_count[list]; j++)
118
cur->ref_poc[sidx][list][j] = 4 * h->ref_list[list][j].frame_num +
119
(h->ref_list[list][j].reference & 3);
115
if(h->picture_structure == PICT_FRAME){
122
if (h->picture_structure == PICT_FRAME) {
116
123
memcpy(cur->ref_count[1], cur->ref_count[0], sizeof(cur->ref_count[0]));
117
memcpy(cur->ref_poc [1], cur->ref_poc [0], sizeof(cur->ref_poc [0]));
124
memcpy(cur->ref_poc[1], cur->ref_poc[0], sizeof(cur->ref_poc[0]));
120
127
cur->mbaff = FRAME_MBAFF(h);
123
if(h->picture_structure == PICT_FRAME){
124
int cur_poc = h->cur_pic_ptr->poc;
130
if (h->picture_structure == PICT_FRAME) {
131
int cur_poc = h->cur_pic_ptr->poc;
125
132
int *col_poc = h->ref_list[1]->field_poc;
126
h->col_parity= (FFABS(col_poc[0] - cur_poc) >= FFABS(col_poc[1] - cur_poc));
127
ref1sidx=sidx= h->col_parity;
128
} else if (!(h->picture_structure & h->ref_list[1][0].reference) && !h->ref_list[1][0].mbaff) { // FL -> FL & differ parity
133
h->col_parity = (FFABS(col_poc[0] - cur_poc) >=
134
FFABS(col_poc[1] - cur_poc));
136
sidx = h->col_parity;
137
// FL -> FL & differ parity
138
} else if (!(h->picture_structure & h->ref_list[1][0].reference) &&
139
!h->ref_list[1][0].mbaff) {
129
140
h->col_fieldoff = 2 * h->ref_list[1][0].reference - 3;
132
143
if (h->slice_type_nos != AV_PICTURE_TYPE_B || h->direct_spatial_mv_pred)
135
for(list=0; list<2; list++){
146
for (list = 0; list < 2; list++) {
136
147
fill_colmap(h, h->map_col_to_list0, list, sidx, ref1sidx, 0);
137
148
if (FRAME_MBAFF(h))
138
for(field=0; field<2; field++)
139
fill_colmap(h, h->map_col_to_list0_field[field], list, field, field, 1);
149
for (field = 0; field < 2; field++)
150
fill_colmap(h, h->map_col_to_list0_field[field], list, field,
143
static void await_reference_mb_row(H264Context * const h, Picture *ref, int mb_y)
155
static void await_reference_mb_row(H264Context *const h, H264Picture *ref,
145
int ref_field = ref->reference - 1;
158
int ref_field = ref->reference - 1;
146
159
int ref_field_picture = ref->field_picture;
147
int ref_height = 16*h->mb_height >> ref_field_picture;
160
int ref_height = 16 * h->mb_height >> ref_field_picture;
149
if(!HAVE_THREADS || !(h->avctx->active_thread_type&FF_THREAD_FRAME))
162
if (!HAVE_THREADS || !(h->avctx->active_thread_type & FF_THREAD_FRAME))
152
//FIXME it can be safe to access mb stuff
153
//even if pixels aren't deblocked yet
165
/* FIXME: It can be safe to access mb stuff
166
* even if pixels aren't deblocked yet. */
155
168
ff_thread_await_progress(&ref->tf,
156
FFMIN(16 * mb_y >> ref_field_picture, ref_height - 1),
169
FFMIN(16 * mb_y >> ref_field_picture,
157
171
ref_field_picture && ref_field);
160
static void pred_spatial_direct_motion(H264Context * const h, int *mb_type){
174
static void pred_spatial_direct_motion(H264Context *const h, int *mb_type)
161
176
int b8_stride = 2;
162
177
int b4_stride = h->b_stride;
163
178
int mb_xy = h->mb_xy, mb_y = h->mb_y;
174
189
assert(h->ref_list[1][0].reference & 3);
176
await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type));
178
#define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16|MB_TYPE_INTRA4x4|MB_TYPE_INTRA16x16|MB_TYPE_INTRA_PCM)
191
await_reference_mb_row(h, &h->ref_list[1][0],
192
h->mb_y + !!IS_INTERLACED(*mb_type));
194
#define MB_TYPE_16x16_OR_INTRA (MB_TYPE_16x16 | MB_TYPE_INTRA4x4 | \
195
MB_TYPE_INTRA16x16 | MB_TYPE_INTRA_PCM)
181
197
/* ref = min(neighbors) */
182
for(list=0; list<2; list++){
183
int left_ref = h->ref_cache[list][scan8[0] - 1];
184
int top_ref = h->ref_cache[list][scan8[0] - 8];
185
int refc = h->ref_cache[list][scan8[0] - 8 + 4];
186
const int16_t *C= h->mv_cache[list][ scan8[0] - 8 + 4];
187
if(refc == PART_NOT_AVAILABLE){
198
for (list = 0; list < 2; list++) {
199
int left_ref = h->ref_cache[list][scan8[0] - 1];
200
int top_ref = h->ref_cache[list][scan8[0] - 8];
201
int refc = h->ref_cache[list][scan8[0] - 8 + 4];
202
const int16_t *C = h->mv_cache[list][scan8[0] - 8 + 4];
203
if (refc == PART_NOT_AVAILABLE) {
188
204
refc = h->ref_cache[list][scan8[0] - 8 - 1];
189
C = h-> mv_cache[list][scan8[0] - 8 - 1];
205
C = h->mv_cache[list][scan8[0] - 8 - 1];
191
ref[list] = FFMIN3((unsigned)left_ref, (unsigned)top_ref, (unsigned)refc);
193
//this is just pred_motion() but with the cases removed that cannot happen for direct blocks
194
const int16_t * const A= h->mv_cache[list][ scan8[0] - 1 ];
195
const int16_t * const B= h->mv_cache[list][ scan8[0] - 8 ];
197
int match_count= (left_ref==ref[list]) + (top_ref==ref[list]) + (refc==ref[list]);
198
if(match_count > 1){ //most common
199
mv[list]= pack16to32(mid_pred(A[0], B[0], C[0]),
200
mid_pred(A[1], B[1], C[1]) );
202
assert(match_count==1);
203
if(left_ref==ref[list]){
204
mv[list]= AV_RN32A(A);
205
}else if(top_ref==ref[list]){
206
mv[list]= AV_RN32A(B);
208
mv[list]= AV_RN32A(C);
207
ref[list] = FFMIN3((unsigned)left_ref,
210
if (ref[list] >= 0) {
211
/* This is just pred_motion() but with the cases removed that
212
* cannot happen for direct blocks. */
213
const int16_t *const A = h->mv_cache[list][scan8[0] - 1];
214
const int16_t *const B = h->mv_cache[list][scan8[0] - 8];
216
int match_count = (left_ref == ref[list]) +
217
(top_ref == ref[list]) +
220
if (match_count > 1) { // most common
221
mv[list] = pack16to32(mid_pred(A[0], B[0], C[0]),
222
mid_pred(A[1], B[1], C[1]));
224
assert(match_count == 1);
225
if (left_ref == ref[list])
226
mv[list] = AV_RN32A(A);
227
else if (top_ref == ref[list])
228
mv[list] = AV_RN32A(B);
230
mv[list] = AV_RN32A(C);
212
int mask= ~(MB_TYPE_L0 << (2*list));
233
int mask = ~(MB_TYPE_L0 << (2 * list));
216
237
*mb_type &= mask;
217
238
sub_mb_type &= mask;
220
if(ref[0] < 0 && ref[1] < 0){
241
if (ref[0] < 0 && ref[1] < 0) {
221
242
ref[0] = ref[1] = 0;
223
244
*mb_type |= MB_TYPE_L0L1;
224
245
sub_mb_type |= MB_TYPE_L0L1;
227
if(!(is_b8x8|mv[0]|mv[1])){
248
if (!(is_b8x8 | mv[0] | mv[1])) {
228
249
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
229
250
fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
230
251
fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
231
252
fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4);
232
*mb_type= (*mb_type & ~(MB_TYPE_8x8|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_P1L0|MB_TYPE_P1L1))|MB_TYPE_16x16|MB_TYPE_DIRECT2;
253
*mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 |
254
MB_TYPE_P1L0 | MB_TYPE_P1L1)) |
255
MB_TYPE_16x16 | MB_TYPE_DIRECT2;
236
259
if (IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
237
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
238
mb_y = (h->mb_y&~1) + h->col_parity;
239
mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride;
260
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
261
mb_y = (h->mb_y & ~1) + h->col_parity;
263
((h->mb_y & ~1) + h->col_parity) * h->mb_stride;
242
266
mb_y += h->col_fieldoff;
243
mb_xy += h->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity
267
mb_xy += h->mb_stride * h->col_fieldoff; // non-zero for FL -> FL & differ parity
246
}else{ // AFL/AFR/FR/FL -> AFR/FR
247
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
249
mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride;
270
} else { // AFL/AFR/FR/FL -> AFR/FR
271
if (IS_INTERLACED(*mb_type)) { // AFL /FL -> AFR/FR
273
mb_xy = (h->mb_y & ~1) * h->mb_stride + h->mb_x;
250
274
mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
251
275
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + h->mb_stride];
252
b8_stride = 2+4*h->mb_stride;
254
if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
276
b8_stride = 2 + 4 * h->mb_stride;
278
if (IS_INTERLACED(mb_type_col[0]) !=
279
IS_INTERLACED(mb_type_col[1])) {
255
280
mb_type_col[0] &= ~MB_TYPE_INTERLACED;
256
281
mb_type_col[1] &= ~MB_TYPE_INTERLACED;
259
sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
260
if( (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)
261
&& (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA)
263
*mb_type |= MB_TYPE_16x8 |MB_TYPE_DIRECT2; /* B_16x8 */
265
*mb_type |= MB_TYPE_8x8;
284
sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */
285
if ((mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) &&
286
(mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) &&
288
*mb_type |= MB_TYPE_16x8 | MB_TYPE_DIRECT2; /* B_16x8 */
290
*mb_type |= MB_TYPE_8x8;
267
}else{ // AFR/FR -> AFR/FR
292
} else { // AFR/FR -> AFR/FR
270
295
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
272
sub_mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
273
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
274
*mb_type |= MB_TYPE_16x16|MB_TYPE_DIRECT2; /* B_16x16 */
275
}else if(!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16))){
276
*mb_type |= MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16));
278
if(!h->sps.direct_8x8_inference_flag){
279
/* FIXME save sub mb types from previous frames (or derive from MVs)
280
* so we know exactly what block size to use */
281
sub_mb_type += (MB_TYPE_8x8-MB_TYPE_16x16); /* B_SUB_4x4 */
297
sub_mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_SUB_8x8 */
298
if (!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)) {
299
*mb_type |= MB_TYPE_16x16 | MB_TYPE_DIRECT2; /* B_16x16 */
300
} else if (!is_b8x8 &&
301
(mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16))) {
302
*mb_type |= MB_TYPE_DIRECT2 |
303
(mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));
305
if (!h->sps.direct_8x8_inference_flag) {
306
/* FIXME: Save sub mb types from previous frames (or derive
307
* from MVs) so we know exactly what block size to use. */
308
sub_mb_type += (MB_TYPE_8x8 - MB_TYPE_16x16); /* B_SUB_4x4 */
283
*mb_type |= MB_TYPE_8x8;
310
*mb_type |= MB_TYPE_8x8;
288
315
await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
290
l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
291
l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
292
l1ref0 = &h->ref_list[1][0].ref_index [0][4 * mb_xy];
293
l1ref1 = &h->ref_list[1][0].ref_index [1][4 * mb_xy];
317
l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy[mb_xy]];
318
l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy[mb_xy]];
319
l1ref0 = &h->ref_list[1][0].ref_index[0][4 * mb_xy];
320
l1ref1 = &h->ref_list[1][0].ref_index[1][4 * mb_xy];
298
l1mv0 += 2*b4_stride;
299
l1mv1 += 2*b4_stride;
325
l1mv0 += 2 * b4_stride;
326
l1mv1 += 2 * b4_stride;
304
if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){
306
for(i8=0; i8<4; i8++){
309
int xy8 = x8+y8*b8_stride;
310
int xy4 = 3*x8+y8*b4_stride;
313
if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
315
h->sub_mb_type[i8] = sub_mb_type;
317
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
318
fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);
319
if(!IS_INTRA(mb_type_col[y8]) && !h->ref_list[1][0].long_ref
320
&& ( (l1ref0[xy8] == 0 && FFABS(l1mv0[xy4][0]) <= 1 && FFABS(l1mv0[xy4][1]) <= 1)
321
|| (l1ref0[xy8] < 0 && l1ref1[xy8] == 0 && FFABS(l1mv1[xy4][0]) <= 1 && FFABS(l1mv1[xy4][1]) <= 1))){
332
fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, a, 4);
333
fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, b, 4);
335
if(!is_b8x8 && !(n&3))
336
*mb_type= (*mb_type & ~(MB_TYPE_8x8|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_P1L0|MB_TYPE_P1L1))|MB_TYPE_16x16|MB_TYPE_DIRECT2;
337
}else if(IS_16X16(*mb_type)){
340
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
341
fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
342
if(!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref
343
&& ( (l1ref0[0] == 0 && FFABS(l1mv0[0][0]) <= 1 && FFABS(l1mv0[0][1]) <= 1)
344
|| (l1ref0[0] < 0 && l1ref1[0] == 0 && FFABS(l1mv1[0][0]) <= 1 && FFABS(l1mv1[0][1]) <= 1
345
&& h->x264_build>33U))){
355
fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, a, 4);
356
fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, b, 4);
359
for(i8=0; i8<4; i8++){
361
const int y8 = i8>>1;
363
if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
365
h->sub_mb_type[i8] = sub_mb_type;
367
fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, mv[0], 4);
368
fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, mv[1], 4);
369
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[0], 1);
370
fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, (uint8_t)ref[1], 1);
372
assert(b8_stride==2);
374
if(!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref && ( l1ref0[i8] == 0
375
|| (l1ref0[i8] < 0 && l1ref1[i8] == 0
376
&& h->x264_build>33U))){
377
const int16_t (*l1mv)[2]= l1ref0[i8] == 0 ? l1mv0 : l1mv1;
378
if(IS_SUB_8X8(sub_mb_type)){
379
const int16_t *mv_col = l1mv[x8*3 + y8*3*b4_stride];
380
if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
382
fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
384
fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
389
for(i4=0; i4<4; i4++){
390
const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride];
391
if(FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1){
393
AV_ZERO32(h->mv_cache[0][scan8[i8*4+i4]]);
395
AV_ZERO32(h->mv_cache[1][scan8[i8*4+i4]]);
330
if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) {
332
for (i8 = 0; i8 < 4; i8++) {
335
int xy8 = x8 + y8 * b8_stride;
336
int xy4 = x8 * 3 + y8 * b4_stride;
339
if (is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
341
h->sub_mb_type[i8] = sub_mb_type;
343
fill_rectangle(&h->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
345
fill_rectangle(&h->ref_cache[1][scan8[i8 * 4]], 2, 2, 8,
347
if (!IS_INTRA(mb_type_col[y8]) && !h->ref_list[1][0].long_ref &&
348
((l1ref0[xy8] == 0 &&
349
FFABS(l1mv0[xy4][0]) <= 1 &&
350
FFABS(l1mv0[xy4][1]) <= 1) ||
353
FFABS(l1mv1[xy4][0]) <= 1 &&
354
FFABS(l1mv1[xy4][1]) <= 1))) {
366
fill_rectangle(&h->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, a, 4);
367
fill_rectangle(&h->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, b, 4);
369
if (!is_b8x8 && !(n & 3))
370
*mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 |
371
MB_TYPE_P1L0 | MB_TYPE_P1L1)) |
372
MB_TYPE_16x16 | MB_TYPE_DIRECT2;
373
} else if (IS_16X16(*mb_type)) {
376
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, (uint8_t)ref[0], 1);
377
fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, (uint8_t)ref[1], 1);
378
if (!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref &&
380
FFABS(l1mv0[0][0]) <= 1 &&
381
FFABS(l1mv0[0][1]) <= 1) ||
382
(l1ref0[0] < 0 && !l1ref1[0] &&
383
FFABS(l1mv1[0][0]) <= 1 &&
384
FFABS(l1mv1[0][1]) <= 1 &&
385
h->x264_build > 33U))) {
395
fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, a, 4);
396
fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, b, 4);
399
for (i8 = 0; i8 < 4; i8++) {
400
const int x8 = i8 & 1;
401
const int y8 = i8 >> 1;
403
if (is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
405
h->sub_mb_type[i8] = sub_mb_type;
407
fill_rectangle(&h->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, mv[0], 4);
408
fill_rectangle(&h->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, mv[1], 4);
409
fill_rectangle(&h->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
411
fill_rectangle(&h->ref_cache[1][scan8[i8 * 4]], 2, 2, 8,
414
assert(b8_stride == 2);
416
if (!IS_INTRA(mb_type_col[0]) && !h->ref_list[1][0].long_ref &&
420
h->x264_build > 33U))) {
421
const int16_t (*l1mv)[2] = l1ref0[i8] == 0 ? l1mv0 : l1mv1;
422
if (IS_SUB_8X8(sub_mb_type)) {
423
const int16_t *mv_col = l1mv[x8 * 3 + y8 * 3 * b4_stride];
424
if (FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1) {
426
fill_rectangle(&h->mv_cache[0][scan8[i8 * 4]], 2, 2,
429
fill_rectangle(&h->mv_cache[1][scan8[i8 * 4]], 2, 2,
435
for (i4 = 0; i4 < 4; i4++) {
436
const int16_t *mv_col = l1mv[x8 * 2 + (i4 & 1) +
437
(y8 * 2 + (i4 >> 1)) * b4_stride];
438
if (FFABS(mv_col[0]) <= 1 && FFABS(mv_col[1]) <= 1) {
440
AV_ZERO32(h->mv_cache[0][scan8[i8 * 4 + i4]]);
442
AV_ZERO32(h->mv_cache[1][scan8[i8 * 4 + i4]]);
400
h->sub_mb_type[i8]+= MB_TYPE_16x16 - MB_TYPE_8x8;
447
h->sub_mb_type[i8] += MB_TYPE_16x16 - MB_TYPE_8x8;
405
if(!is_b8x8 && !(n&15))
406
*mb_type= (*mb_type & ~(MB_TYPE_8x8|MB_TYPE_16x8|MB_TYPE_8x16|MB_TYPE_P1L0|MB_TYPE_P1L1))|MB_TYPE_16x16|MB_TYPE_DIRECT2;
452
if (!is_b8x8 && !(n & 15))
453
*mb_type = (*mb_type & ~(MB_TYPE_8x8 | MB_TYPE_16x8 | MB_TYPE_8x16 |
454
MB_TYPE_P1L0 | MB_TYPE_P1L1)) |
455
MB_TYPE_16x16 | MB_TYPE_DIRECT2;
410
static void pred_temp_direct_motion(H264Context * const h, int *mb_type){
459
static void pred_temp_direct_motion(H264Context *const h, int *mb_type)
411
461
int b8_stride = 2;
412
462
int b4_stride = h->b_stride;
413
463
int mb_xy = h->mb_xy, mb_y = h->mb_y;
421
471
assert(h->ref_list[1][0].reference & 3);
423
await_reference_mb_row(h, &h->ref_list[1][0], h->mb_y + !!IS_INTERLACED(*mb_type));
473
await_reference_mb_row(h, &h->ref_list[1][0],
474
h->mb_y + !!IS_INTERLACED(*mb_type));
425
476
if (IS_INTERLACED(h->ref_list[1][0].mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
426
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
427
mb_y = (h->mb_y&~1) + h->col_parity;
428
mb_xy= h->mb_x + ((h->mb_y&~1) + h->col_parity)*h->mb_stride;
477
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
478
mb_y = (h->mb_y & ~1) + h->col_parity;
480
((h->mb_y & ~1) + h->col_parity) * h->mb_stride;
431
483
mb_y += h->col_fieldoff;
432
mb_xy += h->mb_stride*h->col_fieldoff; // non zero for FL -> FL & differ parity
484
mb_xy += h->mb_stride * h->col_fieldoff; // non-zero for FL -> FL & differ parity
435
}else{ // AFL/AFR/FR/FL -> AFR/FR
436
if(IS_INTERLACED(*mb_type)){ // AFL /FL -> AFR/FR
438
mb_xy= h->mb_x + (h->mb_y&~1)*h->mb_stride;
487
} else { // AFL/AFR/FR/FL -> AFR/FR
488
if (IS_INTERLACED(*mb_type)) { // AFL /FL -> AFR/FR
490
mb_xy = h->mb_x + (h->mb_y & ~1) * h->mb_stride;
439
491
mb_type_col[0] = h->ref_list[1][0].mb_type[mb_xy];
440
492
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy + h->mb_stride];
441
b8_stride = 2+4*h->mb_stride;
443
if (IS_INTERLACED(mb_type_col[0]) != IS_INTERLACED(mb_type_col[1])) {
493
b8_stride = 2 + 4 * h->mb_stride;
495
if (IS_INTERLACED(mb_type_col[0]) !=
496
IS_INTERLACED(mb_type_col[1])) {
444
497
mb_type_col[0] &= ~MB_TYPE_INTERLACED;
445
498
mb_type_col[1] &= ~MB_TYPE_INTERLACED;
448
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
501
sub_mb_type = MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
502
MB_TYPE_DIRECT2; /* B_SUB_8x8 */
450
if( (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)
451
&& (mb_type_col[1] & MB_TYPE_16x16_OR_INTRA)
453
*mb_type |= MB_TYPE_16x8 |MB_TYPE_L0L1|MB_TYPE_DIRECT2; /* B_16x8 */
455
*mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1;
504
if ((mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) &&
505
(mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) &&
507
*mb_type |= MB_TYPE_16x8 | MB_TYPE_L0L1 |
508
MB_TYPE_DIRECT2; /* B_16x8 */
510
*mb_type |= MB_TYPE_8x8 | MB_TYPE_L0L1;
457
}else{ // AFR/FR -> AFR/FR
512
} else { // AFR/FR -> AFR/FR
460
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
515
mb_type_col[1] = h->ref_list[1][0].mb_type[mb_xy];
462
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
463
if(!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)){
464
*mb_type |= MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_16x16 */
465
}else if(!is_b8x8 && (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16))){
466
*mb_type |= MB_TYPE_L0L1|MB_TYPE_DIRECT2 | (mb_type_col[0] & (MB_TYPE_16x8|MB_TYPE_8x16));
468
if(!h->sps.direct_8x8_inference_flag){
469
/* FIXME save sub mb types from previous frames (or derive from MVs)
470
* so we know exactly what block size to use */
471
sub_mb_type = MB_TYPE_8x8|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_4x4 */
517
sub_mb_type = MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
518
MB_TYPE_DIRECT2; /* B_SUB_8x8 */
519
if (!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)) {
520
*mb_type |= MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
521
MB_TYPE_DIRECT2; /* B_16x16 */
522
} else if (!is_b8x8 &&
523
(mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16))) {
524
*mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 |
525
(mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));
527
if (!h->sps.direct_8x8_inference_flag) {
528
/* FIXME: save sub mb types from previous frames (or derive
529
* from MVs) so we know exactly what block size to use */
530
sub_mb_type = MB_TYPE_8x8 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
531
MB_TYPE_DIRECT2; /* B_SUB_4x4 */
473
*mb_type |= MB_TYPE_8x8|MB_TYPE_L0L1;
533
*mb_type |= MB_TYPE_8x8 | MB_TYPE_L0L1;
478
538
await_reference_mb_row(h, &h->ref_list[1][0], mb_y);
480
l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy [mb_xy]];
481
l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy [mb_xy]];
482
l1ref0 = &h->ref_list[1][0].ref_index [0][4 * mb_xy];
483
l1ref1 = &h->ref_list[1][0].ref_index [1][4 * mb_xy];
540
l1mv0 = &h->ref_list[1][0].motion_val[0][h->mb2b_xy[mb_xy]];
541
l1mv1 = &h->ref_list[1][0].motion_val[1][h->mb2b_xy[mb_xy]];
542
l1ref0 = &h->ref_list[1][0].ref_index[0][4 * mb_xy];
543
l1ref1 = &h->ref_list[1][0].ref_index[1][4 * mb_xy];
488
l1mv0 += 2*b4_stride;
489
l1mv1 += 2*b4_stride;
548
l1mv0 += 2 * b4_stride;
549
l1mv1 += 2 * b4_stride;
494
const int *map_col_to_list0[2] = {h->map_col_to_list0[0], h->map_col_to_list0[1]};
554
const int *map_col_to_list0[2] = { h->map_col_to_list0[0],
555
h->map_col_to_list0[1] };
495
556
const int *dist_scale_factor = h->dist_scale_factor;
498
559
if (FRAME_MBAFF(h) && IS_INTERLACED(*mb_type)) {
499
map_col_to_list0[0] = h->map_col_to_list0_field[h->mb_y&1][0];
500
map_col_to_list0[1] = h->map_col_to_list0_field[h->mb_y&1][1];
501
dist_scale_factor =h->dist_scale_factor_field[h->mb_y&1];
560
map_col_to_list0[0] = h->map_col_to_list0_field[h->mb_y & 1][0];
561
map_col_to_list0[1] = h->map_col_to_list0_field[h->mb_y & 1][1];
562
dist_scale_factor = h->dist_scale_factor_field[h->mb_y & 1];
503
ref_offset = (h->ref_list[1][0].mbaff<<4) & (mb_type_col[0]>>3); //if(h->ref_list[1][0].mbaff && IS_INTERLACED(mb_type_col[0])) ref_offset=16 else 0
564
ref_offset = (h->ref_list[1][0].mbaff << 4) & (mb_type_col[0] >> 3);
505
if(IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])){
506
int y_shift = 2*!IS_INTERLACED(*mb_type);
566
if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) {
567
int y_shift = 2 * !IS_INTERLACED(*mb_type);
507
568
assert(h->sps.direct_8x8_inference_flag);
509
for(i8=0; i8<4; i8++){
511
const int y8 = i8>>1;
570
for (i8 = 0; i8 < 4; i8++) {
571
const int x8 = i8 & 1;
572
const int y8 = i8 >> 1;
513
const int16_t (*l1mv)[2]= l1mv0;
574
const int16_t (*l1mv)[2] = l1mv0;
515
if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
576
if (is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
517
578
h->sub_mb_type[i8] = sub_mb_type;
519
fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
520
if(IS_INTRA(mb_type_col[y8])){
521
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
522
fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
523
fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
580
fill_rectangle(&h->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 1);
581
if (IS_INTRA(mb_type_col[y8])) {
582
fill_rectangle(&h->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 1);
583
fill_rectangle(&h->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4);
584
fill_rectangle(&h->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4);
527
ref0 = l1ref0[x8 + y8*b8_stride];
588
ref0 = l1ref0[x8 + y8 * b8_stride];
529
590
ref0 = map_col_to_list0[0][ref0 + ref_offset];
531
ref0 = map_col_to_list0[1][l1ref1[x8 + y8*b8_stride] + ref_offset];
592
ref0 = map_col_to_list0[1][l1ref1[x8 + y8 * b8_stride] +
534
596
scale = dist_scale_factor[ref0];
535
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
597
fill_rectangle(&h->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
538
const int16_t *mv_col = l1mv[x8*3 + y8*b4_stride];
539
int my_col = (mv_col[1]<<y_shift)/2;
540
int mx = (scale * mv_col[0] + 128) >> 8;
541
int my = (scale * my_col + 128) >> 8;
542
fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
543
fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-my_col), 4);
601
const int16_t *mv_col = l1mv[x8 * 3 + y8 * b4_stride];
602
int my_col = (mv_col[1] << y_shift) / 2;
603
int mx = (scale * mv_col[0] + 128) >> 8;
604
int my = (scale * my_col + 128) >> 8;
605
fill_rectangle(&h->mv_cache[0][scan8[i8 * 4]], 2, 2, 8,
606
pack16to32(mx, my), 4);
607
fill_rectangle(&h->mv_cache[1][scan8[i8 * 4]], 2, 2, 8,
608
pack16to32(mx - mv_col[0], my - my_col), 4);
563
628
mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
564
629
mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
566
mv0= pack16to32(mv_l0[0],mv_l0[1]);
567
mv1= pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]);
631
mv0 = pack16to32(mv_l0[0], mv_l0[1]);
632
mv1 = pack16to32(mv_l0[0] - mv_col[0], mv_l0[1] - mv_col[1]);
569
634
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
570
fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4);
571
fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4);
573
for(i8=0; i8<4; i8++){
575
const int y8 = i8>>1;
635
fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4);
636
fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4);
638
for (i8 = 0; i8 < 4; i8++) {
639
const int x8 = i8 & 1;
640
const int y8 = i8 >> 1;
577
const int16_t (*l1mv)[2]= l1mv0;
642
const int16_t (*l1mv)[2] = l1mv0;
579
if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
644
if (is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
581
646
h->sub_mb_type[i8] = sub_mb_type;
582
fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
583
if(IS_INTRA(mb_type_col[0])){
584
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
585
fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
586
fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
647
fill_rectangle(&h->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 1);
648
if (IS_INTRA(mb_type_col[0])) {
649
fill_rectangle(&h->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 1);
650
fill_rectangle(&h->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4);
651
fill_rectangle(&h->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4);
590
655
assert(b8_stride == 2);
591
656
ref0 = l1ref0[i8];
593
658
ref0 = map_col_to_list0[0][ref0 + ref_offset];
595
660
ref0 = map_col_to_list0[1][l1ref1[i8] + ref_offset];
598
663
scale = dist_scale_factor[ref0];
600
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
601
if(IS_SUB_8X8(sub_mb_type)){
602
const int16_t *mv_col = l1mv[x8*3 + y8*3*b4_stride];
603
int mx = (scale * mv_col[0] + 128) >> 8;
604
int my = (scale * mv_col[1] + 128) >> 8;
605
fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mx,my), 4);
606
fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mx-mv_col[0],my-mv_col[1]), 4);
608
for(i4=0; i4<4; i4++){
609
const int16_t *mv_col = l1mv[x8*2 + (i4&1) + (y8*2 + (i4>>1))*b4_stride];
610
int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]];
611
mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
612
mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
613
AV_WN32A(h->mv_cache[1][scan8[i8*4+i4]],
614
pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]));
665
fill_rectangle(&h->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
667
if (IS_SUB_8X8(sub_mb_type)) {
668
const int16_t *mv_col = l1mv[x8 * 3 + y8 * 3 * b4_stride];
669
int mx = (scale * mv_col[0] + 128) >> 8;
670
int my = (scale * mv_col[1] + 128) >> 8;
671
fill_rectangle(&h->mv_cache[0][scan8[i8 * 4]], 2, 2, 8,
672
pack16to32(mx, my), 4);
673
fill_rectangle(&h->mv_cache[1][scan8[i8 * 4]], 2, 2, 8,
674
pack16to32(mx - mv_col[0], my - mv_col[1]), 4);
676
for (i4 = 0; i4 < 4; i4++) {
677
const int16_t *mv_col = l1mv[x8 * 2 + (i4 & 1) +
678
(y8 * 2 + (i4 >> 1)) * b4_stride];
679
int16_t *mv_l0 = h->mv_cache[0][scan8[i8 * 4 + i4]];
680
mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
681
mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
682
AV_WN32A(h->mv_cache[1][scan8[i8 * 4 + i4]],
683
pack16to32(mv_l0[0] - mv_col[0],
684
mv_l0[1] - mv_col[1]));
621
void ff_h264_pred_direct_motion(H264Context * const h, int *mb_type){
622
if(h->direct_spatial_mv_pred){
692
void ff_h264_pred_direct_motion(H264Context *const h, int *mb_type)
694
if (h->direct_spatial_mv_pred)
623
695
pred_spatial_direct_motion(h, mb_type);
625
697
pred_temp_direct_motion(h, mb_type);