On 17/08/2011 2:08, Ronald S. Bultje wrote:
I don't believe this works for negative numbers. Same for
effects_param1/2. Probably should be abs(sd->coefs[sprite][i]) in the
last line.

Yeah, fixed. (Not that there should be any negative numbers in valid files though.)

Also simplified the vertical clipping logic and factored dst's pointer calculation.

So coming back to this old one - is there some way we can signal at
the buffer request phase which buffer is to be "displayed" and at
display height, and which isn't? That'd allow us to keep using
ff_thread_release_buffer() at least for these, which can be a
significant performance gain.

All the buffers handled in mpegvideo.c are for internal use. The ones that are to be displayed get allocated and released in vc1_decode_sprites() using the callbacks as normal.
>From 259c66be72e9271f7f5aa879e245a7220a0994bc Mon Sep 17 00:00:00 2001
From: =?UTF-8?q?Alberto=20Delm=C3=A1s?= <adel...@gmail.com>
Date: Wed, 17 Aug 2011 14:24:42 +0200
Subject: [PATCH] Windows Media Image decoder (WMVP/WVP2)

---
 Changelog              |    1 +
 configure              |    2 +
 doc/general.texi       |    1 +
 libavcodec/allcodecs.c |    2 +
 libavcodec/avcodec.h   |    2 +
 libavcodec/h263dec.c   |    2 +
 libavcodec/mpegvideo.c |   12 ++-
 libavcodec/vc1.c       |    3 -
 libavcodec/vc1.h       |    3 +
 libavcodec/vc1dec.c    |  404 +++++++++++++++++++++++++++++++++++++++---------
 libavcodec/vc1dsp.c    |   68 ++++++++
 libavcodec/vc1dsp.h    |   10 ++
 libavcodec/version.h   |    2 +-
 libavformat/riff.c     |    4 +-
 14 files changed, 431 insertions(+), 85 deletions(-)

diff --git a/Changelog b/Changelog
index c43412c..e8fe826 100644
--- a/Changelog
+++ b/Changelog
@@ -39,6 +39,7 @@ easier to use. The changes are:
     * Presets in avconv are disabled, because only libx264 used them and
       presets for libx264 can now be specified using a private option
       '-preset <presetname>'.
+- Windows Media Image decoder
 
 
 version 0.7:
diff --git a/configure b/configure
index 9c6ce0a..d4e352e 100755
--- a/configure
+++ b/configure
@@ -1354,6 +1354,7 @@ vc1_dxva2_hwaccel_deps="dxva2api_h 
DXVA_PictureParameters_wDecodedPictureIndex"
 vc1_dxva2_hwaccel_select="dxva2 vc1_decoder"
 vc1_vaapi_hwaccel_select="vaapi vc1_decoder"
 vc1_vdpau_decoder_select="vdpau vc1_decoder"
+vc1image_decoder_select="vc1_decoder"
 vorbis_decoder_select="mdct"
 vorbis_encoder_select="mdct"
 vp6_decoder_select="huffman"
@@ -1374,6 +1375,7 @@ wmv3_decoder_select="vc1_decoder"
 wmv3_dxva2_hwaccel_select="vc1_dxva2_hwaccel"
 wmv3_vaapi_hwaccel_select="vc1_vaapi_hwaccel"
 wmv3_vdpau_decoder_select="vc1_vdpau_decoder"
+wmv3image_decoder_select="wmv3_decoder"
 zlib_decoder_select="zlib"
 zlib_encoder_select="zlib"
 zmbv_decoder_select="zlib"
diff --git a/doc/general.texi b/doc/general.texi
index be5b570..49ee367 100644
--- a/doc/general.texi
+++ b/doc/general.texi
@@ -516,6 +516,7 @@ following image formats are supported:
 @item VMware Screen Codec / VMware Video  @tab     @tab  X
     @tab Codec used in videos captured by VMware.
 @item Westwood Studios VQA (Vector Quantized Animation) video  @tab     @tab  X
+@item Windows Media Image    @tab     @tab  X
 @item Windows Media Video 7  @tab  X  @tab  X
 @item Windows Media Video 8  @tab  X  @tab  X
 @item Windows Media Video 9  @tab     @tab  X
diff --git a/libavcodec/allcodecs.c b/libavcodec/allcodecs.c
index dcef0d6..8f3de58 100644
--- a/libavcodec/allcodecs.c
+++ b/libavcodec/allcodecs.c
@@ -203,6 +203,7 @@ void avcodec_register_all(void)
     REGISTER_DECODER (VB, vb);
     REGISTER_DECODER (VC1, vc1);
     REGISTER_DECODER (VC1_VDPAU, vc1_vdpau);
+    REGISTER_DECODER (VC1IMAGE, vc1image);
     REGISTER_DECODER (VCR1, vcr1);
     REGISTER_DECODER (VMDVIDEO, vmdvideo);
     REGISTER_DECODER (VMNC, vmnc);
@@ -217,6 +218,7 @@ void avcodec_register_all(void)
     REGISTER_ENCDEC  (WMV2, wmv2);
     REGISTER_DECODER (WMV3, wmv3);
     REGISTER_DECODER (WMV3_VDPAU, wmv3_vdpau);
+    REGISTER_DECODER (WMV3IMAGE, wmv3image);
     REGISTER_DECODER (WNV1, wnv1);
     REGISTER_DECODER (XAN_WC3, xan_wc3);
     REGISTER_DECODER (XAN_WC4, xan_wc4);
diff --git a/libavcodec/avcodec.h b/libavcodec/avcodec.h
index 0859e0e..5698f51 100644
--- a/libavcodec/avcodec.h
+++ b/libavcodec/avcodec.h
@@ -208,6 +208,8 @@ enum CodecID {
     CODEC_ID_PRORES,
     CODEC_ID_JV,
     CODEC_ID_DFA,
+    CODEC_ID_WMV3IMAGE,
+    CODEC_ID_VC1IMAGE,
 
     /* various PCM "codecs" */
     CODEC_ID_FIRST_AUDIO = 0x10000,     ///< A dummy id pointing at the start 
of audio codecs
diff --git a/libavcodec/h263dec.c b/libavcodec/h263dec.c
index cba0c5a..8786679 100644
--- a/libavcodec/h263dec.c
+++ b/libavcodec/h263dec.c
@@ -91,6 +91,8 @@ av_cold int ff_h263_decode_init(AVCodecContext *avctx)
         break;
     case CODEC_ID_VC1:
     case CODEC_ID_WMV3:
+    case CODEC_ID_VC1IMAGE:
+    case CODEC_ID_WMV3IMAGE:
         s->h263_pred = 1;
         s->msmpeg4_version=6;
         avctx->chroma_sample_location = AVCHROMA_LOC_LEFT;
diff --git a/libavcodec/mpegvideo.c b/libavcodec/mpegvideo.c
index 365a0d4..8d40492 100644
--- a/libavcodec/mpegvideo.c
+++ b/libavcodec/mpegvideo.c
@@ -208,7 +208,12 @@ void ff_copy_picture(Picture *dst, Picture *src){
  */
 static void free_frame_buffer(MpegEncContext *s, Picture *pic)
 {
-    ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
+    /* Windows Media Image codecs allocate internal buffers with different
+       dimensions; ignore user defined callbacks for these */
+    if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
+        ff_thread_release_buffer(s->avctx, (AVFrame*)pic);
+    else
+        avcodec_default_release_buffer(s->avctx, (AVFrame*)pic);
     av_freep(&pic->f.hwaccel_picture_private);
 }
 
@@ -230,7 +235,10 @@ static int alloc_frame_buffer(MpegEncContext *s, Picture 
*pic)
         }
     }
 
-    r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
+    if (s->codec_id != CODEC_ID_WMV3IMAGE && s->codec_id != CODEC_ID_VC1IMAGE)
+        r = ff_thread_get_buffer(s->avctx, (AVFrame*)pic);
+    else
+        r = avcodec_default_get_buffer(s->avctx, (AVFrame*)pic);
 
     if (r < 0 || !pic->f.age || !pic->f.type || !pic->f.data[0]) {
         av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n",
diff --git a/libavcodec/vc1.c b/libavcodec/vc1.c
index 243bef5..fe9781b 100644
--- a/libavcodec/vc1.c
+++ b/libavcodec/vc1.c
@@ -314,9 +314,6 @@ int vc1_decode_sequence_header(AVCodecContext *avctx, 
VC1Context *v, GetBitConte
                    "Old interlaced mode is not supported\n");
             return -1;
         }
-        if (v->res_sprite) {
-            av_log(avctx, AV_LOG_ERROR, "WMVP is not fully supported\n");
-        }
     }
 
     // (fps-2)/4 (->30)
diff --git a/libavcodec/vc1.h b/libavcodec/vc1.h
index 96e5744..e1365ab 100644
--- a/libavcodec/vc1.h
+++ b/libavcodec/vc1.h
@@ -311,6 +311,9 @@ typedef struct VC1Context{
     //@{
     int new_sprite;
     int two_sprites;
+    AVFrame sprite_output_frame;
+    int output_width, output_height, sprite_width, sprite_height;
+    uint8_t* sr_rows[2][2];      ///< Sprite resizer line cache
     //@}
 
     int p_frame_skipped;
diff --git a/libavcodec/vc1dec.c b/libavcodec/vc1dec.c
index ae7906d..7de2244 100644
--- a/libavcodec/vc1dec.c
+++ b/libavcodec/vc1dec.c
@@ -3278,116 +3278,279 @@ static void vc1_decode_blocks(VC1Context *v)
     }
 }
 
-static inline float get_float_val(GetBitContext* gb)
+#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
+
+typedef struct {
+    /**
+     * Transform coefficients for both sprites in 16.16 fixed point format,
+     * in the order they appear in the bitstream:
+     *  x scale
+     *  rotation 1 (unused)
+     *  x offset
+     *  rotation 2 (unused)
+     *  y scale
+     *  y offset
+     *  alpha
+     */
+    int coefs[2][7];
+
+    int effect_type, effect_flag;
+    int effect_pcount1, effect_pcount2;   ///< amount of effect parameters 
stored in effect_params
+    int effect_params1[15], effect_params2[10]; ///< effect parameters in 
16.16 fixed point format
+} SpriteData;
+
+static inline int get_fp_val(GetBitContext* gb)
 {
-    return (float)get_bits_long(gb, 30) / (1<<15) - (1<<14);
+    return (get_bits_long(gb, 30) - (1<<29)) << 1;
 }
 
-static void vc1_sprite_parse_transform(VC1Context *v, GetBitContext* gb, float 
c[7])
+static void vc1_sprite_parse_transform(GetBitContext* gb, int c[7])
 {
-    c[1] = c[3] = 0.0f;
+    c[1] = c[3] = 0;
 
     switch (get_bits(gb, 2)) {
     case 0:
-        c[0] = 1.0f;
-        c[2] = get_float_val(gb);
-        c[4] = 1.0f;
+        c[0] = 1<<16;
+        c[2] = get_fp_val(gb);
+        c[4] = 1<<16;
         break;
     case 1:
-        c[0] = c[4] = get_float_val(gb);
-        c[2] = get_float_val(gb);
+        c[0] = c[4] = get_fp_val(gb);
+        c[2] = get_fp_val(gb);
         break;
     case 2:
-        c[0] = get_float_val(gb);
-        c[2] = get_float_val(gb);
-        c[4] = get_float_val(gb);
+        c[0] = get_fp_val(gb);
+        c[2] = get_fp_val(gb);
+        c[4] = get_fp_val(gb);
         break;
     case 3:
-        av_log_ask_for_sample(v->s.avctx, NULL);
-        c[0] = get_float_val(gb);
-        c[1] = get_float_val(gb);
-        c[2] = get_float_val(gb);
-        c[3] = get_float_val(gb);
-        c[4] = get_float_val(gb);
+        c[0] = get_fp_val(gb);
+        c[1] = get_fp_val(gb);
+        c[2] = get_fp_val(gb);
+        c[3] = get_fp_val(gb);
+        c[4] = get_fp_val(gb);
         break;
     }
-    c[5] = get_float_val(gb);
+    c[5] = get_fp_val(gb);
     if (get_bits1(gb))
-        c[6] = get_float_val(gb);
+        c[6] = get_fp_val(gb);
     else
-        c[6] = 1.0f;
+        c[6] = 1<<16;
 }
 
-static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb)
+static void vc1_parse_sprites(VC1Context *v, GetBitContext* gb, SpriteData* sd)
 {
-    int effect_type, effect_flag, effect_pcount1, effect_pcount2, i;
-    float effect_params1[14], effect_params2[10];
-
-    float coefs[2][7];
-    vc1_sprite_parse_transform(v, gb, coefs[0]);
-    av_log(v->s.avctx, AV_LOG_DEBUG, "S1:");
-    for (i = 0; i < 7; i++)
-        av_log(v->s.avctx, AV_LOG_DEBUG, " %.3f", coefs[0][i]);
-    av_log(v->s.avctx, AV_LOG_DEBUG, "\n");
-
-    if (v->two_sprites) {
-        vc1_sprite_parse_transform(v, gb, coefs[1]);
-        av_log(v->s.avctx, AV_LOG_DEBUG, "S2:");
+    AVCodecContext *avctx = v->s.avctx;
+    int sprite, i;
+
+    for (sprite = 0; sprite <= v->two_sprites; sprite++) {
+        vc1_sprite_parse_transform(gb, sd->coefs[sprite]);
+        if (sd->coefs[sprite][1] || sd->coefs[sprite][3])
+            av_log_ask_for_sample(avctx, "Rotation coefficients are not zero");
+        av_log(avctx, AV_LOG_DEBUG, sprite ? "S2:" : "S1:");
         for (i = 0; i < 7; i++)
-            av_log(v->s.avctx, AV_LOG_DEBUG, " %.3f", coefs[1][i]);
-        av_log(v->s.avctx, AV_LOG_DEBUG, "\n");
+            av_log(avctx, AV_LOG_DEBUG, " %d.%.3d",
+                   sd->coefs[sprite][i] / (1<<16),
+                   (abs(sd->coefs[sprite][i]) & 0xFFFF) * 1000 / (1<<16));
+        av_log(avctx, AV_LOG_DEBUG, "\n");
     }
+
     skip_bits(gb, 2);
-    if (effect_type = get_bits_long(gb, 30)){
-        switch (effect_pcount1 = get_bits(gb, 4)) {
-        case 2:
-            effect_params1[0] = get_float_val(gb);
-            effect_params1[1] = get_float_val(gb);
-            break;
+    if (sd->effect_type = get_bits_long(gb, 30)) {
+        switch (sd->effect_pcount1 = get_bits(gb, 4)) {
         case 7:
-            vc1_sprite_parse_transform(v, gb, effect_params1);
+            vc1_sprite_parse_transform(gb, sd->effect_params1);
             break;
         case 14:
-            vc1_sprite_parse_transform(v, gb, effect_params1);
-            vc1_sprite_parse_transform(v, gb, &effect_params1[7]);
+            vc1_sprite_parse_transform(gb, sd->effect_params1);
+            vc1_sprite_parse_transform(gb, sd->effect_params1 + 7);
             break;
         default:
-            av_log_ask_for_sample(v->s.avctx, NULL);
-            return;
+            for (i = 0; i < sd->effect_pcount1; i++)
+                sd->effect_params1[i] = get_fp_val(gb);
         }
-        if (effect_type != 13 || effect_params1[0] != coefs[0][6]) {
+        if (sd->effect_type != 13 || sd->effect_params1[0] != sd->coefs[0][6]) 
{
             // effect 13 is simple alpha blending and matches the opacity above
-            av_log(v->s.avctx, AV_LOG_DEBUG, "Effect: %d; params: ", 
effect_type);
-            for (i = 0; i < effect_pcount1; i++)
-                av_log(v->s.avctx, AV_LOG_DEBUG, " %.3f", effect_params1[i]);
-            av_log(v->s.avctx, AV_LOG_DEBUG, "\n");
+            av_log(avctx, AV_LOG_DEBUG, "Effect: %d; params: ", 
sd->effect_type);
+            for (i = 0; i < sd->effect_pcount1; i++)
+                av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
+                       sd->effect_params1[i] / (1<<16),
+                       (abs(sd->effect_params1[i]) & 0xFFFF) * 1000 / (1<<16));
+            av_log(avctx, AV_LOG_DEBUG, "\n");
         }
 
-        effect_pcount2 = get_bits(gb, 16);
-        if (effect_pcount2 > 10) {
-            av_log(v->s.avctx, AV_LOG_ERROR, "Too many effect parameters\n");
+        sd->effect_pcount2 = get_bits(gb, 16);
+        if (sd->effect_pcount2 > 10) {
+            av_log(avctx, AV_LOG_ERROR, "Too many effect parameters\n");
             return;
-        } else if (effect_pcount2) {
-            i = 0;
-            av_log(v->s.avctx, AV_LOG_DEBUG, "Effect params 2: ");
-            while (i < effect_pcount2){
-                effect_params2[i] = get_float_val(gb);
-                av_log(v->s.avctx, AV_LOG_DEBUG, " %.3f", effect_params2[i]);
-                i++;
+        } else if (sd->effect_pcount2) {
+            i = -1;
+            av_log(avctx, AV_LOG_DEBUG, "Effect params 2: ");
+            while (++i < sd->effect_pcount2){
+                sd->effect_params2[i] = get_fp_val(gb);
+                av_log(avctx, AV_LOG_DEBUG, " %d.%.2d",
+                       sd->effect_params2[i] / (1<<16),
+                       (abs(sd->effect_params2[i]) & 0xFFFF) * 1000 / (1<<16));
             }
-            av_log(v->s.avctx, AV_LOG_DEBUG, "\n");
+            av_log(avctx, AV_LOG_DEBUG, "\n");
         }
     }
-    if (effect_flag = get_bits1(gb))
-        av_log(v->s.avctx, AV_LOG_DEBUG, "Effect flag set\n");
+    if (sd->effect_flag = get_bits1(gb))
+        av_log(avctx, AV_LOG_DEBUG, "Effect flag set\n");
 
     if (get_bits_count(gb) >= gb->size_in_bits +
-       (v->s.avctx->codec_id == CODEC_ID_WMV3 ? 64 : 0))
-        av_log(v->s.avctx, AV_LOG_ERROR, "Buffer overrun\n");
+       (avctx->codec_id == CODEC_ID_WMV3IMAGE ? 64 : 0))
+        av_log(avctx, AV_LOG_ERROR, "Buffer overrun\n");
     if (get_bits_count(gb) < gb->size_in_bits - 8)
-        av_log(v->s.avctx, AV_LOG_WARNING, "Buffer not fully read\n");
+        av_log(avctx, AV_LOG_WARNING, "Buffer not fully read\n");
+}
+
+static void vc1_draw_sprites(VC1Context *v, SpriteData* sd)
+{
+    int i, plane, row, sprite;
+    int sr_cache[2][2] = { { -1, -1 }, { -1, -1 } };
+    uint8_t* src_h[2][2];
+    int xoff[2], xadv[2], yoff[2], yadv[2], alpha;
+    int ysub[2];
+    MpegEncContext *s = &v->s;
+
+    for (i = 0; i < 2; i++) {
+        xoff[i] = av_clip(sd->coefs[i][2], 0, v->sprite_width-1 << 16);
+        xadv[i] = sd->coefs[i][0];
+        if (xadv[i] != 1<<16 || (v->sprite_width<<16) - (v->output_width<<16) 
- xoff[i])
+            xadv[i] = av_clip(xadv[i], 0, ((v->sprite_width<<16) - xoff[i] - 
1) / v->output_width);
+
+        yoff[i] = av_clip(sd->coefs[i][5], 0, v->sprite_height-1 << 16);
+        yadv[i] = av_clip(sd->coefs[i][4], 0, ((v->sprite_height<<16) - 
yoff[i]) / v->output_height);
+    }
+    alpha = av_clip(sd->coefs[1][6], 0, (1<<16) - 1);
+
+    for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++) {
+        int width = v->output_width>>!!plane;
+
+        for (row = 0; row < v->output_height>>!!plane; row++) {
+            uint8_t *dst = v->sprite_output_frame.data[plane] +
+                           v->sprite_output_frame.linesize[plane] * row;
+
+            for (sprite = 0; sprite <= v->two_sprites; sprite++) {
+                uint8_t *iplane = s->current_picture.f.data[plane];
+                int      iline  = s->current_picture.f.linesize[plane];
+                int      ycoord = yoff[sprite] + yadv[sprite]*row;
+                int      yline  = ycoord>>16;
+                ysub[sprite] = ycoord&0xFFFF;
+                if (sprite) {
+                    iplane = s->last_picture.f.data[plane];
+                    iline  = s->last_picture.f.linesize[plane];
+                }
+                if (!(xoff[sprite]&0xFFFF) && xadv[sprite] == 1<<16) {
+                        src_h[sprite][0] = iplane+(xoff[sprite]>>16)+ yline   
*iline;
+                    if (ysub[sprite])
+                        src_h[sprite][1] = 
iplane+(xoff[sprite]>>16)+(yline+1)*iline;
+                } else {
+                    if (sr_cache[sprite][0] != yline) {
+                        if (sr_cache[sprite][1] == yline) {
+                            FFSWAP(uint8_t*, v->sr_rows[sprite][0], 
v->sr_rows[sprite][1]);
+                            FFSWAP(int,        sr_cache[sprite][0],   
sr_cache[sprite][1]);
+                        } else {
+                            v->vc1dsp.sprite_h(v->sr_rows[sprite][0], 
iplane+yline*iline, xoff[sprite], xadv[sprite], width);
+                            sr_cache[sprite][0] = yline;
+                        }
+                    }
+                    if (ysub[sprite] && sr_cache[sprite][1] != yline + 1) {
+                        v->vc1dsp.sprite_h(v->sr_rows[sprite][1], 
iplane+(yline+1)*iline, xoff[sprite], xadv[sprite], width);
+                        sr_cache[sprite][1] = yline + 1;
+                    }
+                    src_h[sprite][0] = v->sr_rows[sprite][0];
+                    src_h[sprite][1] = v->sr_rows[sprite][1];
+                }
+            }
+
+            if (!v->two_sprites) {
+                if (ysub[0]) {
+                    v->vc1dsp.sprite_v_single(dst, src_h[0][0], src_h[0][1], 
ysub[0], width);
+                } else {
+                    memcpy(dst, src_h[0][0], width);
+                }
+            } else {
+                if (ysub[0] && ysub[1]) {
+                    v->vc1dsp.sprite_v_double_twoscale(dst, src_h[0][0], 
src_h[0][1], ysub[0],
+                                                       src_h[1][0], 
src_h[1][1], ysub[1], alpha, width);
+                } else if (ysub[0]) {
+                    v->vc1dsp.sprite_v_double_onescale(dst, src_h[0][0], 
src_h[0][1], ysub[0],
+                                                       src_h[1][0], alpha, 
width);
+                } else if (ysub[1]) {
+                    v->vc1dsp.sprite_v_double_onescale(dst, src_h[1][0], 
src_h[1][1], ysub[1],
+                                                       src_h[0][0], 
(1<<16)-1-alpha, width);
+                } else {
+                    v->vc1dsp.sprite_v_double_noscale(dst, src_h[0][0], 
src_h[1][0], alpha, width);
+                }
+            }
+        }
+
+        if (!plane) {
+            for (i = 0; i < 2; i++) {
+                xoff[i] >>= 1;
+                yoff[i] >>= 1;
+            }
+        }
+
+    }
+}
+
+
+static int vc1_decode_sprites(VC1Context *v, GetBitContext* gb)
+{
+    MpegEncContext *s = &v->s;
+    AVCodecContext *avctx = s->avctx;
+    SpriteData sd;
+
+    vc1_parse_sprites(v, gb, &sd);
+
+    if (!s->current_picture.f.data[0]) {
+        av_log(avctx, AV_LOG_ERROR, "Got no sprites\n");
+        return -1;
+    }
+
+    if (v->two_sprites && (!s->last_picture_ptr || 
!s->last_picture.f.data[0])) {
+        av_log(avctx, AV_LOG_WARNING, "Need two sprites, only got one\n");
+        v->two_sprites = 0;
+    }
+
+    if (v->sprite_output_frame.data[0])
+        avctx->release_buffer(avctx, &v->sprite_output_frame);
+
+    v->sprite_output_frame.buffer_hints = FF_BUFFER_HINTS_VALID;
+    v->sprite_output_frame.reference = 0;
+    if (avctx->get_buffer(avctx, &v->sprite_output_frame) < 0) {
+        av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
+        return -1;
+    }
+
+    vc1_draw_sprites(v, &sd);
+
+    return 0;
+}
+
+static void vc1_sprite_flush(AVCodecContext *avctx)
+{
+    VC1Context *v = avctx->priv_data;
+    MpegEncContext *s = &v->s;
+    AVFrame *f = &s->current_picture.f;
+    int plane, i;
+
+    /* Windows Media Image codecs have a convergence interval of two keyframes.
+       Since we can't enforce it, clear to black the missing sprite. This is
+       wrong but it looks better than doing nothing. */
+
+    if (f->data[0])
+        for (plane = 0; plane < (s->flags&CODEC_FLAG_GRAY ? 1 : 3); plane++)
+            for (i = 0; i < v->sprite_height>>!!plane; i++)
+                memset(f->data[plane]+i*f->linesize[plane],
+                       plane ? 128 : 0, f->linesize[plane]);
 }
 
+#endif
+
 /** Initialize a VC1/WMV3 decoder
  * @todo TODO: Handle VC-1 IDUs (Transport level?)
  * @todo TODO: Decypher remaining bits in extra_data
@@ -3399,6 +3562,10 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx)
     GetBitContext gb;
     int i, cur_width, cur_height;
 
+    /* save the container output size for WMImage */
+    v->output_width  = avctx->width;
+    v->output_height = avctx->height;
+
     if (!avctx->extradata_size || !avctx->extradata) return -1;
     if (!(avctx->flags & CODEC_FLAG_GRAY))
         avctx->pix_fmt = avctx->get_format(avctx, avctx->codec->pix_fmts);
@@ -3420,7 +3587,7 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx)
 
     cur_width = avctx->coded_width = avctx->width;
     cur_height = avctx->coded_height = avctx->height;
-    if (avctx->codec_id == CODEC_ID_WMV3)
+    if (avctx->codec_id == CODEC_ID_WMV3 || avctx->codec_id == 
CODEC_ID_WMV3IMAGE)
     {
         int count = 0;
 
@@ -3562,6 +3729,25 @@ static av_cold int vc1_decode_init(AVCodecContext *avctx)
     }
 
     ff_intrax8_common_init(&v->x8,s);
+
+    if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == 
CODEC_ID_VC1IMAGE) {
+        for (i = 0; i < 4; i++)
+            if (!(v->sr_rows[i>>1][i%2] = av_malloc(v->output_width))) return 
-1;
+
+        s->low_delay = 1;
+
+        v->sprite_width  = avctx->coded_width;
+        v->sprite_height = avctx->coded_height;
+
+        avctx->coded_width  = avctx->width  = v->output_width;
+        avctx->coded_height = avctx->height = v->output_height;
+
+        // prevent 16.16 overflows
+        if (v->sprite_width  > 1<<14 ||
+            v->sprite_height > 1<<14 ||
+            v->output_width  > 1<<14 ||
+            v->output_height > 1<<14) return -1;
+    }
     return 0;
 }
 
@@ -3614,7 +3800,7 @@ static int vc1_decode_frame(AVCodecContext *avctx,
     }
 
     //for advanced profile we may need to parse and unescape data
-    if (avctx->codec_id == CODEC_ID_VC1) {
+    if (avctx->codec_id == CODEC_ID_VC1 || avctx->codec_id == 
CODEC_ID_VC1IMAGE) {
         int buf_size2 = 0;
         buf2 = av_mallocz(buf_size + FF_INPUT_BUFFER_PADDING_SIZE);
 
@@ -3679,8 +3865,19 @@ static int vc1_decode_frame(AVCodecContext *avctx,
     if (v->res_sprite) {
         v->new_sprite = !get_bits1(&s->gb);
         v->two_sprites = get_bits1(&s->gb);
-        if (!v->new_sprite)
-            goto end;
+        /* res_sprite means a Windows Media Image stream, CODEC_ID_*IMAGE means
+           we're using the sprite compositor. These are intentionally kept 
separate
+           so you can get the raw sprites by using the wmv3 decoder for WMVP or
+           the vc1 one for WVP2 */
+        if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == 
CODEC_ID_VC1IMAGE) {
+            if (v->new_sprite) {
+                // switch AVCodecContext parameters to those of the sprites
+                avctx->width  = avctx->coded_width  = v->sprite_width;
+                avctx->height = avctx->coded_height = v->sprite_height;
+            } else {
+                goto image;
+            }
+        }
     }
 
     // do parse frame header
@@ -3694,8 +3891,10 @@ static int vc1_decode_frame(AVCodecContext *avctx,
         }
     }
 
-    if (v->res_sprite && s->pict_type!=AV_PICTURE_TYPE_I) {
-        av_log(v->s.avctx, AV_LOG_WARNING, "Sprite decoder: expected 
I-frame\n");
+    if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == 
CODEC_ID_VC1IMAGE)
+        && s->pict_type!=AV_PICTURE_TYPE_I) {
+        av_log(v->s.avctx, AV_LOG_ERROR, "Sprite decoder: expected I-frame\n");
+        goto err;
     }
 
     // for skipping the frame
@@ -3758,6 +3957,19 @@ static int vc1_decode_frame(AVCodecContext *avctx,
 
 assert(s->current_picture.f.pict_type == s->current_picture_ptr->f.pict_type);
 assert(s->current_picture.f.pict_type == s->pict_type);
+
+    if (avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == 
CODEC_ID_VC1IMAGE) {
+image:
+        avctx->width  = avctx->coded_width  = v->output_width;
+        avctx->height = avctx->coded_height = v->output_height;
+        if (avctx->skip_frame >= AVDISCARD_NONREF) goto end;
+#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
+        if (vc1_decode_sprites(v, &s->gb)) goto err;
+#endif
+        *pict = v->sprite_output_frame;
+        *data_size = sizeof(AVFrame);
+    } else {
+
     if (s->pict_type == AV_PICTURE_TYPE_B || s->low_delay) {
         *pict= *(AVFrame*)s->current_picture_ptr;
     } else if (s->last_picture_ptr != NULL) {
@@ -3769,9 +3981,9 @@ assert(s->current_picture.f.pict_type == s->pict_type);
         ff_print_debug_info(s, pict);
     }
 
+    }
+
 end:
-    if (v->res_sprite)
-        vc1_parse_sprites(v, &s->gb);
     av_free(buf2);
     for (i = 0; i < n_slices; i++)
         av_free(slices[i].buf);
@@ -3793,7 +4005,13 @@ err:
 static av_cold int vc1_decode_end(AVCodecContext *avctx)
 {
     VC1Context *v = avctx->priv_data;
+    int i;
 
+    if ((avctx->codec_id == CODEC_ID_WMV3IMAGE || avctx->codec_id == 
CODEC_ID_VC1IMAGE)
+        && v->sprite_output_frame.data[0])
+        avctx->release_buffer(avctx, &v->sprite_output_frame);
+    for (i = 0; i < 4; i++)
+        av_freep(&v->sr_rows[i>>1][i%2]);
     av_freep(&v->hrd_rate);
     av_freep(&v->hrd_buffer);
     MPV_common_end(&v->s);
@@ -3880,3 +4098,35 @@ AVCodec ff_vc1_vdpau_decoder = {
     .profiles = NULL_IF_CONFIG_SMALL(profiles)
 };
 #endif
+
+#if CONFIG_WMV3IMAGE_DECODER
+AVCodec ff_wmv3image_decoder = {
+    .name           = "wmv3image",
+    .type           = AVMEDIA_TYPE_VIDEO,
+    .id             = CODEC_ID_WMV3IMAGE,
+    .priv_data_size = sizeof(VC1Context),
+    .init           = vc1_decode_init,
+    .close          = vc1_decode_end,
+    .decode         = vc1_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
+    .flush          = vc1_sprite_flush,
+    .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image"),
+    .pix_fmts = ff_pixfmt_list_420
+};
+#endif
+
+#if CONFIG_VC1IMAGE_DECODER
+AVCodec ff_vc1image_decoder = {
+    .name           = "vc1image",
+    .type           = AVMEDIA_TYPE_VIDEO,
+    .id             = CODEC_ID_VC1IMAGE,
+    .priv_data_size = sizeof(VC1Context),
+    .init           = vc1_decode_init,
+    .close          = vc1_decode_end,
+    .decode         = vc1_decode_frame,
+    .capabilities   = CODEC_CAP_DR1,
+    .flush          = vc1_sprite_flush,
+    .long_name = NULL_IF_CONFIG_SMALL("Windows Media Video 9 Image v2"),
+    .pix_fmts = ff_pixfmt_list_420
+};
+#endif
diff --git a/libavcodec/vc1dsp.c b/libavcodec/vc1dsp.c
index 7d0e406..4dd5672 100644
--- a/libavcodec/vc1dsp.c
+++ b/libavcodec/vc1dsp.c
@@ -713,6 +713,66 @@ static void avg_no_rnd_vc1_chroma_mc8_c(uint8_t 
*dst/*align 8*/, uint8_t *src/*a
     }
 }
 
+#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
+
+static void sprite_h_c(uint8_t *dst, const uint8_t *src, int offset, int 
advance, int count)
+{
+    while (count--) {
+        int a = src[(offset >> 16)    ];
+        int b = src[(offset >> 16) + 1];
+        *dst++ = a + ((b - a) * (offset&0xFFFF) >> 16);
+        offset += advance;
+    }
+}
+
+static av_always_inline void sprite_v_template(uint8_t *dst, const uint8_t 
*src1a, const uint8_t *src1b, int offset1,
+                                            int two_sprites, const uint8_t 
*src2a, const uint8_t *src2b, int offset2,
+                                            int alpha, int scaled, int width)
+{
+    int a1, b1, a2, b2;
+    while (width--) {
+        a1 = *src1a++;
+        if (scaled) {
+            b1 = *src1b++;
+            a1 = a1 + ((b1 - a1) * offset1 >> 16);
+        }
+        if (two_sprites) {
+            a2 = *src2a++;
+            if (scaled > 1) {
+                b2 = *src2b++;
+                a2 = a2 + ((b2 - a2) * offset2 >> 16);
+            }
+            a1 = a1 + ((a2 - a1) * alpha >> 16);
+        }
+        *dst++ = a1;
+    }
+}
+
+static void sprite_v_single_c(uint8_t *dst, const uint8_t *src1a, const 
uint8_t *src1b, int offset, int width)
+{
+    sprite_v_template(dst, src1a, src1b, offset, 0, NULL, NULL, 0, 0, 1, 
width);
+}
+
+static void sprite_v_double_noscale_c(uint8_t *dst, const uint8_t *src1a, 
const uint8_t *src2a, int alpha, int width)
+{
+    sprite_v_template(dst, src1a, NULL, 0, 1, src2a, NULL, 0, alpha, 0, width);
+}
+
+static void sprite_v_double_onescale_c(uint8_t *dst, const uint8_t *src1a, 
const uint8_t *src1b, int offset1,
+                                                     const uint8_t *src2a, int 
alpha, int width)
+{
+    sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, NULL, 0, alpha, 1, 
width);
+}
+
+static void sprite_v_double_twoscale_c(uint8_t *dst, const uint8_t *src1a, 
const uint8_t *src1b, int offset1,
+                                                     const uint8_t *src2a, 
const uint8_t *src2b, int offset2,
+                                       int alpha, int width)
+{
+    sprite_v_template(dst, src1a, src1b, offset1, 1, src2a, src2b, offset2, 
alpha, 2, width);
+}
+
+#endif
+
 av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) {
     dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_c;
     dsp->vc1_inv_trans_4x8 = vc1_inv_trans_4x8_c;
@@ -770,6 +830,14 @@ av_cold void ff_vc1dsp_init(VC1DSPContext* dsp) {
     dsp->put_no_rnd_vc1_chroma_pixels_tab[0]= put_no_rnd_vc1_chroma_mc8_c;
     dsp->avg_no_rnd_vc1_chroma_pixels_tab[0]= avg_no_rnd_vc1_chroma_mc8_c;
 
+#if CONFIG_WMV3IMAGE_DECODER || CONFIG_VC1IMAGE_DECODER
+    dsp->sprite_h = sprite_h_c;
+    dsp->sprite_v_single = sprite_v_single_c;
+    dsp->sprite_v_double_noscale = sprite_v_double_noscale_c;
+    dsp->sprite_v_double_onescale = sprite_v_double_onescale_c;
+    dsp->sprite_v_double_twoscale = sprite_v_double_twoscale_c;
+#endif
+
     if (HAVE_ALTIVEC)
         ff_vc1dsp_init_altivec(dsp);
     if (HAVE_MMX)
diff --git a/libavcodec/vc1dsp.h b/libavcodec/vc1dsp.h
index e1b6ba0..3e0a88e 100644
--- a/libavcodec/vc1dsp.h
+++ b/libavcodec/vc1dsp.h
@@ -60,6 +60,16 @@ typedef struct VC1DSPContext {
     /* This is really one func used in VC-1 decoding */
     h264_chroma_mc_func put_no_rnd_vc1_chroma_pixels_tab[3];
     h264_chroma_mc_func avg_no_rnd_vc1_chroma_pixels_tab[3];
+
+    /* Windows Media Image functions */
+    void (*sprite_h)(uint8_t *dst, const uint8_t *src, int offset, int 
advance, int count);
+    void (*sprite_v_single)(uint8_t *dst, const uint8_t *src1a, const uint8_t 
*src1b, int offset, int width);
+    void (*sprite_v_double_noscale)(uint8_t *dst, const uint8_t *src1a, const 
uint8_t *src2a, int alpha, int width);
+    void (*sprite_v_double_onescale)(uint8_t *dst, const uint8_t *src1a, const 
uint8_t *src1b, int offset1,
+                                                   const uint8_t *src2a, int 
alpha, int width);
+    void (*sprite_v_double_twoscale)(uint8_t *dst, const uint8_t *src1a, const 
uint8_t *src1b, int offset1,
+                                                   const uint8_t *src2a, const 
uint8_t *src2b, int offset2,
+                                     int alpha, int width);
 } VC1DSPContext;
 
 void ff_vc1dsp_init(VC1DSPContext* c);
diff --git a/libavcodec/version.h b/libavcodec/version.h
index 24a33d7..43efcaf 100644
--- a/libavcodec/version.h
+++ b/libavcodec/version.h
@@ -21,7 +21,7 @@
 #define AVCODEC_VERSION_H
 
 #define LIBAVCODEC_VERSION_MAJOR 53
-#define LIBAVCODEC_VERSION_MINOR  8
+#define LIBAVCODEC_VERSION_MINOR  9
 #define LIBAVCODEC_VERSION_MICRO  0
 
 #define LIBAVCODEC_VERSION_INT  AV_VERSION_INT(LIBAVCODEC_VERSION_MAJOR, \
diff --git a/libavformat/riff.c b/libavformat/riff.c
index c426ae5..4b81eb3 100644
--- a/libavformat/riff.c
+++ b/libavformat/riff.c
@@ -237,10 +237,10 @@ const AVCodecTag ff_codec_bmp_tags[] = {
     { CODEC_ID_QPEG,         MKTAG('Q', '1', '.', '0') },
     { CODEC_ID_QPEG,         MKTAG('Q', '1', '.', '1') },
     { CODEC_ID_WMV3,         MKTAG('W', 'M', 'V', '3') },
-    { CODEC_ID_WMV3,         MKTAG('W', 'M', 'V', 'P') },
+    { CODEC_ID_WMV3IMAGE,    MKTAG('W', 'M', 'V', 'P') },
     { CODEC_ID_VC1,          MKTAG('W', 'V', 'C', '1') },
     { CODEC_ID_VC1,          MKTAG('W', 'M', 'V', 'A') },
-    { CODEC_ID_VC1,          MKTAG('W', 'V', 'P', '2') },
+    { CODEC_ID_VC1IMAGE,     MKTAG('W', 'V', 'P', '2') },
     { CODEC_ID_LOCO,         MKTAG('L', 'O', 'C', 'O') },
     { CODEC_ID_WNV1,         MKTAG('W', 'N', 'V', '1') },
     { CODEC_ID_AASC,         MKTAG('A', 'A', 'S', 'C') },
-- 
1.7.5.1

_______________________________________________
libav-devel mailing list
libav-devel@libav.org
https://lists.libav.org/mailman/listinfo/libav-devel

Reply via email to