The current code is a generalization of the earlier progressive MV
prediction code. This was supposed to predict MVs for both interlaced
and progressive pictures. But the interlaced MV prediction is buggy
and works mostly by luck.
This partially fixes interlaced MV prediction.
---
libavcodec/vc1dec.c | 189 ++++++++++++++++++++++-----------------------------
1 files changed, 80 insertions(+), 109 deletions(-)
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c
index e389040..3253292 100644
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@ -1475,12 +1475,10 @@ static inline void vc1_pred_mv(VC1Context *v, int n,
int dmv_x, int dmv_y,
int px, py;
int sum;
int mixedmv_pic, num_samefield = 0, num_oppfield = 0;
- int opposit, f;
- int16_t samefield_pred[2], oppfield_pred[2];
- int16_t samefield_predA[2], oppfield_predA[2];
- int16_t samefield_predB[2], oppfield_predB[2];
- int16_t samefield_predC[2], oppfield_predC[2];
- int16_t *predA, *predC;
+ int opposit, a_f, b_f, c_f;
+ int16_t field_predA[2];
+ int16_t field_predB[2];
+ int16_t field_predC[2];
int a_valid, b_valid, c_valid;
int hybridmv_thresh, y_bias = 0;
@@ -1554,96 +1552,34 @@ static inline void vc1_pred_mv(VC1Context *v, int n,
int dmv_x, int dmv_y,
}
if (a_valid) {
- f = v->mv_f[dir][xy - wrap + v->blocks_off];
- num_oppfield += f;
- num_samefield += 1 - f;
- if (f) {
- oppfield_predA[0] = A[0];
- oppfield_predA[1] = A[1];
- samefield_predA[0] = scaleforsame(v, 0, A[0], 0, dir);
- samefield_predA[1] = scaleforsame(v, n, A[1], 1, dir);
- } else {
- samefield_predA[0] = A[0];
- samefield_predA[1] = A[1];
- if (v->numref)
- oppfield_predA[0] = scaleforopp(v, A[0], 0, dir);
- if (v->numref)
- oppfield_predA[1] = scaleforopp(v, A[1], 1, dir);
- }
+ a_f = v->mv_f[dir][xy - wrap + v->blocks_off];
+ num_oppfield += a_f;
+ num_samefield += 1 - a_f;
+ field_predA[0] = A[0];
+ field_predA[1] = A[1];
} else {
- samefield_predA[0] = samefield_predA[1] = 0;
- oppfield_predA[0] = oppfield_predA[1] = 0;
+ field_predA[0] = field_predA[1] = 0;
+ a_f = 0;
}
if (c_valid) {
- f = v->mv_f[dir][xy - 1 + v->blocks_off];
- num_oppfield += f;
- num_samefield += 1 - f;
- if (f) {
- oppfield_predC[0] = C[0];
- oppfield_predC[1] = C[1];
- samefield_predC[0] = scaleforsame(v, 0, C[0], 0, dir);
- samefield_predC[1] = scaleforsame(v, n, C[1], 1, dir);
- } else {
- samefield_predC[0] = C[0];
- samefield_predC[1] = C[1];
- if (v->numref)
- oppfield_predC[0] = scaleforopp(v, C[0], 0, dir);
- if (v->numref)
- oppfield_predC[1] = scaleforopp(v, C[1], 1, dir);
- }
+ c_f = v->mv_f[dir][xy - 1 + v->blocks_off];
+ num_oppfield += c_f;
+ num_samefield += 1 - c_f;
+ field_predC[0] = C[0];
+ field_predC[1] = C[1];
} else {
- samefield_predC[0] = samefield_predC[1] = 0;
- oppfield_predC[0] = oppfield_predC[1] = 0;
+ field_predC[0] = field_predC[1] = 0;
+ c_f = 0;
}
if (b_valid) {
- f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
- num_oppfield += f;
- num_samefield += 1 - f;
- if (f) {
- oppfield_predB[0] = B[0];
- oppfield_predB[1] = B[1];
- samefield_predB[0] = scaleforsame(v, 0, B[0], 0, dir);
- samefield_predB[1] = scaleforsame(v, n, B[1], 1, dir);
- } else {
- samefield_predB[0] = B[0];
- samefield_predB[1] = B[1];
- if (v->numref)
- oppfield_predB[0] = scaleforopp(v, B[0], 0, dir);
- if (v->numref)
- oppfield_predB[1] = scaleforopp(v, B[1], 1, dir);
- }
- } else {
- samefield_predB[0] = samefield_predB[1] = 0;
- oppfield_predB[0] = oppfield_predB[1] = 0;
- }
-
- if (a_valid) {
- samefield_pred[0] = samefield_predA[0];
- samefield_pred[1] = samefield_predA[1];
- oppfield_pred[0] = oppfield_predA[0];
- oppfield_pred[1] = oppfield_predA[1];
- } else if (c_valid) {
- samefield_pred[0] = samefield_predC[0];
- samefield_pred[1] = samefield_predC[1];
- oppfield_pred[0] = oppfield_predC[0];
- oppfield_pred[1] = oppfield_predC[1];
- } else if (b_valid) {
- samefield_pred[0] = samefield_predB[0];
- samefield_pred[1] = samefield_predB[1];
- oppfield_pred[0] = oppfield_predB[0];
- oppfield_pred[1] = oppfield_predB[1];
+ b_f = v->mv_f[dir][xy - wrap + off + v->blocks_off];
+ num_oppfield += b_f;
+ num_samefield += 1 - b_f;
+ field_predB[0] = B[0];
+ field_predB[1] = B[1];
} else {
- samefield_pred[0] = samefield_pred[1] = 0;
- oppfield_pred[0] = oppfield_pred[1] = 0;
- }
-
- if (num_samefield + num_oppfield > 1) {
- samefield_pred[0] = mid_pred(samefield_predA[0], samefield_predB[0],
samefield_predC[0]);
- samefield_pred[1] = mid_pred(samefield_predA[1], samefield_predB[1],
samefield_predC[1]);
- if (v->numref)
- oppfield_pred[0] = mid_pred(oppfield_predA[0], oppfield_predB[0],
oppfield_predC[0]);
- if (v->numref)
- oppfield_pred[1] = mid_pred(oppfield_predA[1], oppfield_predB[1],
oppfield_predC[1]);
+ field_predB[0] = field_predB[1] = 0;
+ b_f = 0;
}
if (v->field_mode) {
@@ -1654,21 +1590,56 @@ static inline void vc1_pred_mv(VC1Context *v, int n,
int dmv_x, int dmv_y,
} else
opposit = 0;
if (opposit) {
- px = oppfield_pred[0];
- py = oppfield_pred[1];
- predA = oppfield_predA;
- predC = oppfield_predC;
- v->mv_f[dir][xy + v->blocks_off] = f = 1;
+ if (a_valid && !a_f) {
+ field_predA[0] = scaleforopp(v, field_predA[0], 0, dir);
+ field_predA[1] = scaleforopp(v, field_predA[1], 1, dir);
+ }
+ if (b_valid && !b_f) {
+ field_predB[0] = scaleforopp(v, field_predB[0], 0, dir);
+ field_predB[1] = scaleforopp(v, field_predB[1], 1, dir);
+ }
+ if (c_valid && !c_f) {
+ field_predC[0] = scaleforopp(v, field_predC[0], 0, dir);
+ field_predC[1] = scaleforopp(v, field_predC[1], 1, dir);
+ }
+ v->mv_f[dir][xy + v->blocks_off] = 1;
v->ref_field_type[dir] = !v->cur_field_type;
} else {
- px = samefield_pred[0];
- py = samefield_pred[1];
- predA = samefield_predA;
- predC = samefield_predC;
- v->mv_f[dir][xy + v->blocks_off] = f = 0;
+ if (a_valid && a_f) {
+ field_predA[0] = scaleforsame(v, n, field_predA[0], 0, dir);
+ field_predA[1] = scaleforsame(v, n, field_predA[1], 1, dir);
+ }
+ if (b_valid && b_f) {
+ field_predB[0] = scaleforsame(v, n, field_predB[0], 0, dir);
+ field_predB[1] = scaleforsame(v, n, field_predB[1], 1, dir);
+ }
+ if (c_valid && c_f) {
+ field_predC[0] = scaleforsame(v, n, field_predC[0], 0, dir);
+ field_predC[1] = scaleforsame(v, n, field_predC[1], 1, dir);
+ }
+ v->mv_f[dir][xy + v->blocks_off] = 0;
v->ref_field_type[dir] = v->cur_field_type;
}
+ if (a_valid) {
+ px = field_predA[0];
+ py = field_predA[1];
+ } else if (c_valid) {
+ px = field_predC[0];
+ py = field_predC[1];
+ } else if (b_valid) {
+ px = field_predB[0];
+ py = field_predB[1];
+ } else {
+ px = 0;
+ py = 0;
+ }
+
+ if (num_samefield + num_oppfield > 1) {
+ px = mid_pred(field_predA[0], field_predB[0], field_predC[0]);
+ py = mid_pred(field_predA[1], field_predB[1], field_predC[1]);
+ }
+
/* Pullback MV as specified in 8.3.5.3.4 */
if (!v->field_mode) {
int qx, qy, X, Y;
@@ -1697,27 +1668,27 @@ static inline void vc1_pred_mv(VC1Context *v, int n,
int dmv_x, int dmv_y,
if (is_intra[xy - wrap])
sum = FFABS(px) + FFABS(py);
else
- sum = FFABS(px - predA[0]) + FFABS(py - predA[1]);
+ sum = FFABS(px - field_predA[0]) + FFABS(py - field_predA[1]);
if (sum > hybridmv_thresh) {
if (get_bits1(&s->gb)) { // read HYBRIDPRED bit
- px = predA[0];
- py = predA[1];
+ px = field_predA[0];
+ py = field_predA[1];
} else {
- px = predC[0];
- py = predC[1];
+ px = field_predC[0];
+ py = field_predC[1];
}
} else {
if (is_intra[xy - 1])
sum = FFABS(px) + FFABS(py);
else
- sum = FFABS(px - predC[0]) + FFABS(py - predC[1]);
+ sum = FFABS(px - field_predC[0]) + FFABS(py -
field_predC[1]);
if (sum > hybridmv_thresh) {
if (get_bits1(&s->gb)) {
- px = predA[0];
- py = predA[1];
+ px = field_predA[0];
+ py = field_predA[1];
} else {
- px = predC[0];
- py = predC[1];
+ px = field_predC[0];
+ py = field_predC[1];
}
}
}
--
1.7.4.1
_______________________________________________
libav-devel mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-devel