2
* VP9 compatible video decoder
4
* Copyright (C) 2013 Ronald S. Bultje <rsbultje gmail com>
5
* Copyright (C) 2013 Clément Bœsch <u pkh me>
7
* This file is part of Libav.
9
* Libav is free software; you can redistribute it and/or
10
* modify it under the terms of the GNU Lesser General Public
11
* License as published by the Free Software Foundation; either
12
* version 2.1 of the License, or (at your option) any later version.
14
* Libav is distributed in the hope that it will be useful,
15
* but WITHOUT ANY WARRANTY; without even the implied warranty of
16
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
17
* Lesser General Public License for more details.
19
* You should have received a copy of the GNU Lesser General Public
20
* License along with Libav; if not, write to the Free Software
21
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
29
static av_always_inline void clamp_mv(VP56mv *dst, const VP56mv *src,
32
dst->x = av_clip(src->x, s->min_mv.x, s->max_mv.x);
33
dst->y = av_clip(src->y, s->min_mv.y, s->max_mv.y);
36
static void find_ref_mvs(VP9Context *s,
37
VP56mv *pmv, int ref, int z, int idx, int sb)
39
static const int8_t mv_ref_blk_off[N_BS_SIZES][8][2] = {
40
[BS_64x64] = { { 3, -1 }, { -1, 3 }, { 4, -1 }, { -1, 4 },
41
{ -1, -1 }, { 0, -1 }, { -1, 0 }, { 6, -1 } },
42
[BS_64x32] = { { 0, -1 }, { -1, 0 }, { 4, -1 }, { -1, 2 },
43
{ -1, -1 }, { 0, -3 }, { -3, 0 }, { 2, -1 } },
44
[BS_32x64] = { { -1, 0 }, { 0, -1 }, { -1, 4 }, { 2, -1 },
45
{ -1, -1 }, { -3, 0 }, { 0, -3 }, { -1, 2 } },
46
[BS_32x32] = { { 1, -1 }, { -1, 1 }, { 2, -1 }, { -1, 2 },
47
{ -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
48
[BS_32x16] = { { 0, -1 }, { -1, 0 }, { 2, -1 }, { -1, -1 },
49
{ -1, 1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
50
[BS_16x32] = { { -1, 0 }, { 0, -1 }, { -1, 2 }, { -1, -1 },
51
{ 1, -1 }, { -3, 0 }, { 0, -3 }, { -3, -3 } },
52
[BS_16x16] = { { 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, 1 },
53
{ -1, -1 }, { 0, -3 }, { -3, 0 }, { -3, -3 } },
54
[BS_16x8] = { { 0, -1 }, { -1, 0 }, { 1, -1 }, { -1, -1 },
55
{ 0, -2 }, { -2, 0 }, { -2, -1 }, { -1, -2 } },
56
[BS_8x16] = { { -1, 0 }, { 0, -1 }, { -1, 1 }, { -1, -1 },
57
{ -2, 0 }, { 0, -2 }, { -1, -2 }, { -2, -1 } },
58
[BS_8x8] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
59
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
60
[BS_8x4] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
61
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
62
[BS_4x8] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
63
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
64
[BS_4x4] = { { 0, -1 }, { -1, 0 }, { -1, -1 }, { 0, -2 },
65
{ -2, 0 }, { -1, -2 }, { -2, -1 }, { -2, -2 } },
67
VP9Block *const b = &s->b;
68
int row = b->row, col = b->col, row7 = b->row7;
69
const int8_t (*p)[2] = mv_ref_blk_off[b->bs];
70
#define INVALID_MV 0x80008000U
71
uint32_t mem = INVALID_MV;
74
#define RETURN_DIRECT_MV(mv) \
76
uint32_t m = AV_RN32A(&mv); \
80
} else if (mem == INVALID_MV) { \
82
} else if (m != mem) { \
89
if (sb == 2 || sb == 1) {
90
RETURN_DIRECT_MV(b->mv[0][z]);
92
RETURN_DIRECT_MV(b->mv[2][z]);
93
RETURN_DIRECT_MV(b->mv[1][z]);
94
RETURN_DIRECT_MV(b->mv[0][z]);
97
#define RETURN_MV(mv) \
102
clamp_mv(&tmp, &mv, s); \
103
m = AV_RN32A(&tmp); \
107
} else if (mem == INVALID_MV) { \
109
} else if (m != mem) { \
114
uint32_t m = AV_RN32A(&mv); \
116
clamp_mv(pmv, &mv, s); \
118
} else if (mem == INVALID_MV) { \
120
} else if (m != mem) { \
121
clamp_mv(pmv, &mv, s); \
128
VP9MVRefPair *mv = &s->mv[0][(row - 1) * s->sb_cols * 8 + col];
130
if (mv->ref[0] == ref)
131
RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][0]);
132
else if (mv->ref[1] == ref)
133
RETURN_MV(s->above_mv_ctx[2 * col + (sb & 1)][1]);
135
if (col > s->tiling.tile_col_start) {
136
VP9MVRefPair *mv = &s->mv[0][row * s->sb_cols * 8 + col - 1];
138
if (mv->ref[0] == ref)
139
RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][0]);
140
else if (mv->ref[1] == ref)
141
RETURN_MV(s->left_mv_ctx[2 * row7 + (sb >> 1)][1]);
148
// previously coded MVs in the neighborhood, using same reference frame
150
int c = p[i][0] + col, r = p[i][1] + row;
152
if (c >= s->tiling.tile_col_start && c < s->cols &&
153
r >= 0 && r < s->rows) {
154
VP9MVRefPair *mv = &s->mv[0][r * s->sb_cols * 8 + c];
156
if (mv->ref[0] == ref)
157
RETURN_MV(mv->mv[0]);
158
else if (mv->ref[1] == ref)
159
RETURN_MV(mv->mv[1]);
163
// MV at this position in previous frame, using same reference frame
164
if (s->use_last_frame_mvs) {
165
VP9MVRefPair *mv = &s->mv[1][row * s->sb_cols * 8 + col];
167
if (mv->ref[0] == ref)
168
RETURN_MV(mv->mv[0]);
169
else if (mv->ref[1] == ref)
170
RETURN_MV(mv->mv[1]);
173
#define RETURN_SCALE_MV(mv, scale) \
176
VP56mv mv_temp = { -mv.x, -mv.y }; \
177
RETURN_MV(mv_temp); \
183
// previously coded MVs in the neighborhood, using different reference frame
184
for (i = 0; i < 8; i++) {
185
int c = p[i][0] + col, r = p[i][1] + row;
187
if (c >= s->tiling.tile_col_start && c < s->cols &&
188
r >= 0 && r < s->rows) {
189
VP9MVRefPair *mv = &s->mv[0][r * s->sb_cols * 8 + c];
191
if (mv->ref[0] != ref && mv->ref[0] >= 0)
192
RETURN_SCALE_MV(mv->mv[0],
193
s->signbias[mv->ref[0]] != s->signbias[ref]);
194
if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
195
// BUG - libvpx has this condition regardless of whether
196
// we used the first ref MV and pre-scaling
197
AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
198
RETURN_SCALE_MV(mv->mv[1],
199
s->signbias[mv->ref[1]] != s->signbias[ref]);
204
// MV at this position in previous frame, using different reference frame
205
if (s->use_last_frame_mvs) {
206
VP9MVRefPair *mv = &s->mv[1][row * s->sb_cols * 8 + col];
208
if (mv->ref[0] != ref && mv->ref[0] >= 0)
209
RETURN_SCALE_MV(mv->mv[0],
210
s->signbias[mv->ref[0]] != s->signbias[ref]);
211
if (mv->ref[1] != ref && mv->ref[1] >= 0 &&
212
// BUG - libvpx has this condition regardless of whether
213
// we used the first ref MV and pre-scaling
214
AV_RN32A(&mv->mv[0]) != AV_RN32A(&mv->mv[1])) {
215
RETURN_SCALE_MV(mv->mv[1],
216
s->signbias[mv->ref[1]] != s->signbias[ref]);
223
#undef RETURN_SCALE_MV
226
static av_always_inline int read_mv_component(VP9Context *s, int idx, int hp)
228
int bit, sign = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].sign);
229
int n, c = vp8_rac_get_tree(&s->c, ff_vp9_mv_class_tree,
230
s->prob.p.mv_comp[idx].classes);
232
s->counts.mv_comp[idx].sign[sign]++;
233
s->counts.mv_comp[idx].classes[c]++;
237
for (n = 0, m = 0; m < c; m++) {
238
bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].bits[m]);
240
s->counts.mv_comp[idx].bits[m][bit]++;
243
bit = vp8_rac_get_tree(&s->c, ff_vp9_mv_fp_tree,
244
s->prob.p.mv_comp[idx].fp);
246
s->counts.mv_comp[idx].fp[bit]++;
248
bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].hp);
249
s->counts.mv_comp[idx].hp[bit]++;
253
// bug in libvpx - we count for bw entropy purposes even if the
255
s->counts.mv_comp[idx].hp[1]++;
259
n = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0);
260
s->counts.mv_comp[idx].class0[n]++;
261
bit = vp8_rac_get_tree(&s->c, ff_vp9_mv_fp_tree,
262
s->prob.p.mv_comp[idx].class0_fp[n]);
263
s->counts.mv_comp[idx].class0_fp[n][bit]++;
264
n = (n << 3) | (bit << 1);
266
bit = vp56_rac_get_prob(&s->c, s->prob.p.mv_comp[idx].class0_hp);
267
s->counts.mv_comp[idx].class0_hp[bit]++;
271
// bug in libvpx - we count for bw entropy purposes even if the
273
s->counts.mv_comp[idx].class0_hp[1]++;
277
return sign ? -(n + 1) : (n + 1);
280
void ff_vp9_fill_mv(VP9Context *s, VP56mv *mv, int mode, int sb)
282
VP9Block *const b = &s->b;
284
if (mode == ZEROMV) {
285
memset(mv, 0, sizeof(*mv) * 2);
289
// FIXME cache this value and reuse for other subblocks
290
find_ref_mvs(s, &mv[0], b->ref[0], 0, mode == NEARMV,
291
mode == NEWMV ? -1 : sb);
292
// FIXME maybe move this code into find_ref_mvs()
293
if ((mode == NEWMV || sb == -1) &&
294
!(hp = s->highprecisionmvs &&
295
abs(mv[0].x) < 64 && abs(mv[0].y) < 64)) {
310
enum MVJoint j = vp8_rac_get_tree(&s->c, ff_vp9_mv_joint_tree,
313
s->counts.mv_joint[j]++;
315
mv[0].y += read_mv_component(s, 0, hp);
317
mv[0].x += read_mv_component(s, 1, hp);
321
// FIXME cache this value and reuse for other subblocks
322
find_ref_mvs(s, &mv[1], b->ref[1], 1, mode == NEARMV,
323
mode == NEWMV ? -1 : sb);
324
if ((mode == NEWMV || sb == -1) &&
325
!(hp = s->highprecisionmvs &&
326
abs(mv[1].x) < 64 && abs(mv[1].y) < 64)) {
341
enum MVJoint j = vp8_rac_get_tree(&s->c, ff_vp9_mv_joint_tree,
344
s->counts.mv_joint[j]++;
346
mv[1].y += read_mv_component(s, 0, hp);
348
mv[1].x += read_mv_component(s, 1, hp);