An example of usage, effectively deprecates AVPaletteControl too.

---
 libavcodec/8bps.c           |   19 +++++++++----------
 libavcodec/cinepak.c        |   19 +++++++++++--------
 libavcodec/idcinvideo.c     |   12 ++++++------
 libavcodec/interplayvideo.c |   21 ++++++++++-----------
 libavcodec/kmvc.c           |   16 ++++++----------
 libavcodec/msrle.c          |   17 +++++++++--------
 libavcodec/msvideo1.c       |   24 +++++++++++++-----------
 libavcodec/qpeg.c           |   12 +++++-------
 libavcodec/qtrle.c          |   12 ++++++++----
 libavcodec/rawdec.c         |   10 +++++++---
 libavcodec/smc.c            |   13 ++++++++-----
 libavcodec/targa.c          |    7 -------
 libavcodec/tscc.c           |   10 +++++++---
 libavformat/asf.h           |    2 ++
 libavformat/asfdec.c        |   20 ++++++++++++++------
 libavformat/avidec.c        |   23 ++++++++++-------------
 libavformat/idcin.c         |   16 ++++++++++------
 libavformat/ipmovie.c       |   21 +++++++++++++--------
 libavformat/isom.h          |    2 ++
 libavformat/mov.c           |   18 +++++++++++++-----
 20 files changed, 163 insertions(+), 131 deletions(-)

diff --git a/libavcodec/8bps.c b/libavcodec/8bps.c
index 1c6d406..8356a4f 100644
--- a/libavcodec/8bps.c
+++ b/libavcodec/8bps.c
@@ -50,6 +50,8 @@ typedef struct EightBpsContext {
 
         unsigned char planes;
         unsigned char planemap[4];
+
+        uint32_t pal[256];
 } EightBpsContext;
 
 
@@ -129,13 +131,14 @@ static int decode_frame(AVCodecContext *avctx, void 
*data, int *data_size, AVPac
                 }
         }
 
-        if (avctx->palctrl) {
-                memcpy (c->pic.data[1], avctx->palctrl->palette, 
AVPALETTE_SIZE);
-                if (avctx->palctrl->palette_changed) {
+        if (avctx->bits_per_coded_sample <= 8) {
+                const uint8_t *pal = av_packet_get_side_data(avpkt, 
AV_PKT_DATA_PALETTE);
+                if (pal) {
                         c->pic.palette_has_changed = 1;
-                        avctx->palctrl->palette_changed = 0;
-                } else
-                        c->pic.palette_has_changed = 0;
+                        memcpy(c->pal, pal, AVPALETTE_SIZE);
+                }
+
+                memcpy (c->pic.data[1], c->pal, AVPALETTE_SIZE);
         }
 
         *data_size = sizeof(AVFrame);
@@ -164,10 +167,6 @@ static av_cold int decode_init(AVCodecContext *avctx)
                         avctx->pix_fmt = PIX_FMT_PAL8;
                         c->planes = 1;
                         c->planemap[0] = 0; // 1st plane is palette indexes
-                        if (avctx->palctrl == NULL) {
-                                av_log(avctx, AV_LOG_ERROR, "Error: PAL8 
format but no palette from demuxer.\n");
-                                return -1;
-                        }
                         break;
                 case 24:
                         avctx->pix_fmt = avctx->get_format(avctx, 
pixfmt_rgb24);
diff --git a/libavcodec/cinepak.c b/libavcodec/cinepak.c
index f325bdb..c005bc0 100644
--- a/libavcodec/cinepak.c
+++ b/libavcodec/cinepak.c
@@ -67,6 +67,7 @@ typedef struct CinepakContext {
 
     int sega_film_skip_bytes;
 
+    uint32_t pal[256];
 } CinepakContext;
 
 static void cinepak_decode_codebook (cvid_codebook *codebook,
@@ -395,7 +396,7 @@ static av_cold int cinepak_decode_init(AVCodecContext 
*avctx)
     s->sega_film_skip_bytes = -1;  /* uninitialized state */
 
     // check for paletted data
-    if ((avctx->palctrl == NULL) || (avctx->bits_per_coded_sample == 40)) {
+    if (avctx->bits_per_coded_sample != 8) {
         s->palette_video = 0;
         avctx->pix_fmt = PIX_FMT_YUV420P;
     } else {
@@ -427,17 +428,19 @@ static int cinepak_decode_frame(AVCodecContext *avctx,
         return -1;
     }
 
-    cinepak_decode(s);
-
     if (s->palette_video) {
-        memcpy (s->frame.data[1], avctx->palctrl->palette, AVPALETTE_SIZE);
-        if (avctx->palctrl->palette_changed) {
+        const uint8_t *pal = av_packet_get_side_data(avpkt, 
AV_PKT_DATA_PALETTE);
+        if (pal) {
             s->frame.palette_has_changed = 1;
-            avctx->palctrl->palette_changed = 0;
-        } else
-            s->frame.palette_has_changed = 0;
+            memcpy(s->pal, pal, AVPALETTE_SIZE);
+        }
     }
 
+    cinepak_decode(s);
+
+    if (s->palette_video)
+        memcpy (s->frame.data[1], s->pal, AVPALETTE_SIZE);
+
     *data_size = sizeof(AVFrame);
     *(AVFrame*)data = s->frame;
 
diff --git a/libavcodec/idcinvideo.c b/libavcodec/idcinvideo.c
index b8d47ad..5ded6eb 100644
--- a/libavcodec/idcinvideo.c
+++ b/libavcodec/idcinvideo.c
@@ -72,6 +72,7 @@ typedef struct IdcinContext {
     hnode huff_nodes[256][HUF_TOKENS*2];
     int num_huff_nodes[256];
 
+    uint32_t pal[256];
 } IdcinContext;
 
 /*
@@ -213,7 +214,7 @@ static int idcin_decode_frame(AVCodecContext *avctx,
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     IdcinContext *s = avctx->priv_data;
-    AVPaletteControl *palette_control = avctx->palctrl;
+    const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE);
 
     s->buf = buf;
     s->size = buf_size;
@@ -228,13 +229,12 @@ static int idcin_decode_frame(AVCodecContext *avctx,
 
     idcin_decode_vlcs(s);
 
-    /* make the palette available on the way out */
-    memcpy(s->frame.data[1], palette_control->palette, PALETTE_COUNT * 4);
-    /* If palette changed inform application*/
-    if (palette_control->palette_changed) {
-        palette_control->palette_changed = 0;
+    if (pal) {
         s->frame.palette_has_changed = 1;
+        memcpy(s->pal, pal, AVPALETTE_SIZE);
     }
+    /* make the palette available on the way out */
+    memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
 
     *data_size = sizeof(AVFrame);
     *(AVFrame*)data = s->frame;
diff --git a/libavcodec/interplayvideo.c b/libavcodec/interplayvideo.c
index 8dbe6f6..36d0550 100644
--- a/libavcodec/interplayvideo.c
+++ b/libavcodec/interplayvideo.c
@@ -77,6 +77,7 @@ typedef struct IpvideoContext {
     int stride;
     int upper_motion_limit_offset;
 
+    uint32_t pal[256];
 } IpvideoContext;
 
 #define CHECK_STREAM_PTR(stream_ptr, stream_end, n) \
@@ -969,7 +970,7 @@ static void ipvideo_decode_opcodes(IpvideoContext *s)
 
     if (!s->is_16bpp) {
         /* this is PAL8, so make the palette available */
-        memcpy(s->current_frame.data[1], s->avctx->palctrl->palette, 
PALETTE_COUNT * 4);
+        memcpy(s->current_frame.data[1], s->pal, AVPALETTE_SIZE);
 
         s->stride = s->current_frame.linesize[0];
         s->stream_ptr = s->buf + 14;  /* data starts 14 bytes in */
@@ -1023,10 +1024,6 @@ static av_cold int ipvideo_decode_init(AVCodecContext 
*avctx)
 
     s->is_16bpp = avctx->bits_per_coded_sample == 16;
     avctx->pix_fmt = s->is_16bpp ? PIX_FMT_RGB555 : PIX_FMT_PAL8;
-    if (!s->is_16bpp && s->avctx->palctrl == NULL) {
-        av_log(avctx, AV_LOG_ERROR, " Interplay video: palette expected.\n");
-        return -1;
-    }
 
     dsputil_init(&s->dsp, avctx);
 
@@ -1046,7 +1043,6 @@ static int ipvideo_decode_frame(AVCodecContext *avctx,
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     IpvideoContext *s = avctx->priv_data;
-    AVPaletteControl *palette_control = avctx->palctrl;
 
     /* compressed buffer needs to be large enough to at least hold an entire
      * decoding map */
@@ -1063,13 +1059,16 @@ static int ipvideo_decode_frame(AVCodecContext *avctx,
         return -1;
     }
 
-    ipvideo_decode_opcodes(s);
-
-    if (!s->is_16bpp && palette_control->palette_changed) {
-        palette_control->palette_changed = 0;
-        s->current_frame.palette_has_changed = 1;
+    if (!s->is_16bpp) {
+        const uint8_t *pal = av_packet_get_side_data(avpkt, 
AV_PKT_DATA_PALETTE);
+        if (pal) {
+            s->current_frame.palette_has_changed = 1;
+            memcpy(s->pal, pal, AVPALETTE_SIZE);
+        }
     }
 
+    ipvideo_decode_opcodes(s);
+
     *data_size = sizeof(AVFrame);
     *(AVFrame*)data = s->current_frame;
 
diff --git a/libavcodec/kmvc.c b/libavcodec/kmvc.c
index 2671cc6..a0ceaf0 100644
--- a/libavcodec/kmvc.c
+++ b/libavcodec/kmvc.c
@@ -233,6 +233,7 @@ static int decode_frame(AVCodecContext * avctx, void *data, 
int *data_size, AVPa
     int i;
     int header;
     int blocksize;
+    const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE);
 
     if (ctx->pic.data[0])
         avctx->release_buffer(avctx, &ctx->pic);
@@ -264,13 +265,6 @@ static int decode_frame(AVCodecContext * avctx, void 
*data, int *data_size, AVPa
         ctx->pic.pict_type = FF_P_TYPE;
     }
 
-    /* if palette has been changed, copy it from palctrl */
-    if (ctx->avctx->palctrl && ctx->avctx->palctrl->palette_changed) {
-        memcpy(ctx->pal, ctx->avctx->palctrl->palette, AVPALETTE_SIZE);
-        ctx->setpal = 1;
-        ctx->avctx->palctrl->palette_changed = 0;
-    }
-
     if (header & KMVC_PALETTE) {
         ctx->pic.palette_has_changed = 1;
         // palette starts from index 1 and has 127 entries
@@ -279,6 +273,11 @@ static int decode_frame(AVCodecContext * avctx, void 
*data, int *data_size, AVPa
         }
     }
 
+    if (pal) {
+        ctx->pic.palette_has_changed = 1;
+        memcpy(ctx->pal, pal, AVPALETTE_SIZE);
+    }
+
     if (ctx->setpal) {
         ctx->setpal = 0;
         ctx->pic.palette_has_changed = 1;
@@ -374,9 +373,6 @@ static av_cold int decode_init(AVCodecContext * avctx)
             src += 4;
         }
         c->setpal = 1;
-        if (c->avctx->palctrl) {
-            c->avctx->palctrl->palette_changed = 0;
-        }
     }
 
     avctx->pix_fmt = PIX_FMT_PAL8;
diff --git a/libavcodec/msrle.c b/libavcodec/msrle.c
index a400589..1c11d04 100644
--- a/libavcodec/msrle.c
+++ b/libavcodec/msrle.c
@@ -26,9 +26,6 @@
  *   http://www.pcisys.net/~melanson/codecs/
  *
  * The MS RLE decoder outputs PAL8 colorspace data.
- *
- * Note that this decoder expects the palette colors from the end of the
- * BITMAPINFO header passed through palctrl.
  */
 
 #include <stdio.h>
@@ -46,6 +43,7 @@ typedef struct MsrleContext {
     const unsigned char *buf;
     int size;
 
+    uint32_t pal[256];
 } MsrleContext;
 
 static av_cold int msrle_decode_init(AVCodecContext *avctx)
@@ -91,13 +89,16 @@ static int msrle_decode_frame(AVCodecContext *avctx,
         return -1;
     }
 
-    if (s->avctx->palctrl) {
-        /* make the palette available */
-        memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE);
-        if (s->avctx->palctrl->palette_changed) {
+    if (avctx->bits_per_coded_sample <= 8) {
+        const uint8_t *pal = av_packet_get_side_data(avpkt, 
AV_PKT_DATA_PALETTE);
+
+        if (pal) {
             s->frame.palette_has_changed = 1;
-            s->avctx->palctrl->palette_changed = 0;
+            memcpy(s->pal, pal, AVPALETTE_SIZE);
         }
+
+        /* make the palette available */
+        memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
     }
 
     /* FIXME how to correctly detect RLE ??? */
diff --git a/libavcodec/msvideo1.c b/libavcodec/msvideo1.c
index e01ddf5..7b3d37e 100644
--- a/libavcodec/msvideo1.c
+++ b/libavcodec/msvideo1.c
@@ -25,9 +25,6 @@
  * For more information about the MS Video-1 format, visit:
  *   http://www.pcisys.net/~melanson/codecs/
  *
- * This decoder outputs either PAL8 or RGB555 data, depending on the
- * whether a RGB palette was passed through palctrl;
- * if it's present, then the data is PAL8; RGB555 otherwise.
  */
 
 #include <stdio.h>
@@ -55,6 +52,7 @@ typedef struct Msvideo1Context {
 
     int mode_8bit;  /* if it's not 8-bit, it's 16-bit */
 
+    uint32_t pal[256];
 } Msvideo1Context;
 
 static av_cold int msvideo1_decode_init(AVCodecContext *avctx)
@@ -64,7 +62,7 @@ static av_cold int msvideo1_decode_init(AVCodecContext *avctx)
     s->avctx = avctx;
 
     /* figure out the colorspace based on the presence of a palette */
-    if (s->avctx->palctrl) {
+    if (s->avctx->bits_per_coded_sample == 8) {
         s->mode_8bit = 1;
         avctx->pix_fmt = PIX_FMT_PAL8;
     } else {
@@ -173,13 +171,8 @@ static void msvideo1_decode_8bit(Msvideo1Context *s)
     }
 
     /* make the palette available on the way out */
-    if (s->avctx->pix_fmt == PIX_FMT_PAL8) {
-        memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE);
-        if (s->avctx->palctrl->palette_changed) {
-            s->frame.palette_has_changed = 1;
-            s->avctx->palctrl->palette_changed = 0;
-        }
-    }
+    if (s->avctx->pix_fmt == PIX_FMT_PAL8)
+        memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
 }
 
 static void msvideo1_decode_16bit(Msvideo1Context *s)
@@ -309,6 +302,15 @@ static int msvideo1_decode_frame(AVCodecContext *avctx,
         return -1;
     }
 
+    if (s->mode_8bit) {
+        const uint8_t *pal = av_packet_get_side_data(avpkt, 
AV_PKT_DATA_PALETTE);
+
+        if (pal) {
+            memcpy(s->pal, pal, AVPALETTE_SIZE);
+            s->frame.palette_has_changed = 1;
+        }
+    }
+
     if (s->mode_8bit)
         msvideo1_decode_8bit(s);
     else
diff --git a/libavcodec/qpeg.c b/libavcodec/qpeg.c
index ccd634a..bdae9c1 100644
--- a/libavcodec/qpeg.c
+++ b/libavcodec/qpeg.c
@@ -30,6 +30,7 @@ typedef struct QpegContext{
     AVCodecContext *avctx;
     AVFrame pic;
     uint8_t *refdata;
+    uint32_t pal[256];
 } QpegContext;
 
 static void qpeg_decode_intra(const uint8_t *src, uint8_t *dst, int size,
@@ -256,6 +257,7 @@ static int decode_frame(AVCodecContext *avctx,
     AVFrame * const p= (AVFrame*)&a->pic;
     uint8_t* outdata;
     int delta;
+    const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE);
 
     if(p->data[0])
         avctx->release_buffer(avctx, p);
@@ -274,11 +276,11 @@ static int decode_frame(AVCodecContext *avctx,
     }
 
     /* make the palette available on the way out */
-    memcpy(a->pic.data[1], a->avctx->palctrl->palette, AVPALETTE_SIZE);
-    if (a->avctx->palctrl->palette_changed) {
+    if (pal) {
         a->pic.palette_has_changed = 1;
-        a->avctx->palctrl->palette_changed = 0;
+        memcpy(a->pal, pal, AVPALETTE_SIZE);
     }
+    memcpy(a->pic.data[1], a->pal, AVPALETTE_SIZE);
 
     *data_size = sizeof(AVFrame);
     *(AVFrame*)data = a->pic;
@@ -289,10 +291,6 @@ static int decode_frame(AVCodecContext *avctx,
 static av_cold int decode_init(AVCodecContext *avctx){
     QpegContext * const a = avctx->priv_data;
 
-    if (!avctx->palctrl) {
-        av_log(avctx, AV_LOG_FATAL, "Missing required palette via palctrl\n");
-        return -1;
-    }
     a->avctx = avctx;
     avctx->pix_fmt= PIX_FMT_PAL8;
     a->refdata = av_malloc(avctx->width * avctx->height);
diff --git a/libavcodec/qtrle.c b/libavcodec/qtrle.c
index a8cc903..2a5f2d4 100644
--- a/libavcodec/qtrle.c
+++ b/libavcodec/qtrle.c
@@ -46,6 +46,7 @@ typedef struct QtrleContext {
     const unsigned char *buf;
     int size;
 
+    uint32_t pal[256];
 } QtrleContext;
 
 #define CHECK_STREAM_PTR(n) \
@@ -511,12 +512,15 @@ static int qtrle_decode_frame(AVCodecContext *avctx,
     }
 
     if(has_palette) {
-        /* make the palette available on the way out */
-        memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE);
-        if (s->avctx->palctrl->palette_changed) {
+        const uint8_t *pal = av_packet_get_side_data(avpkt, 
AV_PKT_DATA_PALETTE);
+
+        if (pal) {
             s->frame.palette_has_changed = 1;
-            s->avctx->palctrl->palette_changed = 0;
+            memcpy(s->pal, pal, AVPALETTE_SIZE);
         }
+
+        /* make the palette available on the way out */
+        memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
     }
 
 done:
diff --git a/libavcodec/rawdec.c b/libavcodec/rawdec.c
index 3c38829..53b2714 100644
--- a/libavcodec/rawdec.c
+++ b/libavcodec/rawdec.c
@@ -158,9 +158,13 @@ static int raw_decode(AVCodecContext *avctx,
         (av_pix_fmt_descriptors[avctx->pix_fmt].flags & PIX_FMT_PAL))){
         frame->data[1]= context->palette;
     }
-    if (avctx->palctrl && avctx->palctrl->palette_changed) {
-        memcpy(frame->data[1], avctx->palctrl->palette, AVPALETTE_SIZE);
-        avctx->palctrl->palette_changed = 0;
+    if (avctx->pix_fmt == PIX_FMT_PAL8) {
+        const uint8_t *pal = av_packet_get_side_data(avpkt, 
AV_PKT_DATA_PALETTE);
+
+        if (pal) {
+            memcpy(frame->data[1], pal, AVPALETTE_SIZE);
+            frame->palette_has_changed = 1;
+        }
     }
     if(avctx->pix_fmt==PIX_FMT_BGR24 && 
((frame->linesize[0]+3)&~3)*avctx->height <= buf_size)
         frame->linesize[0] = (frame->linesize[0]+3)&~3;
diff --git a/libavcodec/smc.c b/libavcodec/smc.c
index fe92b43..44397cb 100644
--- a/libavcodec/smc.c
+++ b/libavcodec/smc.c
@@ -54,6 +54,7 @@ typedef struct SmcContext {
     unsigned char color_quads[COLORS_PER_TABLE * CQUAD];
     unsigned char color_octets[COLORS_PER_TABLE * COCTET];
 
+    uint32_t pal[256];
 } SmcContext;
 
 #define GET_BLOCK_COUNT() \
@@ -110,11 +111,7 @@ static void smc_decode_stream(SmcContext *s)
     int color_octet_index = 0;
 
     /* make the palette available */
-    memcpy(s->frame.data[1], s->avctx->palctrl->palette, AVPALETTE_SIZE);
-    if (s->avctx->palctrl->palette_changed) {
-        s->frame.palette_has_changed = 1;
-        s->avctx->palctrl->palette_changed = 0;
-    }
+    memcpy(s->frame.data[1], s->pal, AVPALETTE_SIZE);
 
     chunk_size = AV_RB32(&s->buf[stream_ptr]) & 0x00FFFFFF;
     stream_ptr += 4;
@@ -440,6 +437,7 @@ static int smc_decode_frame(AVCodecContext *avctx,
     const uint8_t *buf = avpkt->data;
     int buf_size = avpkt->size;
     SmcContext *s = avctx->priv_data;
+    const uint8_t *pal = av_packet_get_side_data(avpkt, AV_PKT_DATA_PALETTE);
 
     s->buf = buf;
     s->size = buf_size;
@@ -452,6 +450,11 @@ static int smc_decode_frame(AVCodecContext *avctx,
         return -1;
     }
 
+    if (pal) {
+        s->frame.palette_has_changed = 1;
+        memcpy(s->pal, pal, AVPALETTE_SIZE);
+    }
+
     smc_decode_stream(s);
 
     *data_size = sizeof(AVFrame);
diff --git a/libavcodec/targa.c b/libavcodec/targa.c
index 06f87e4..910cc1b 100644
--- a/libavcodec/targa.c
+++ b/libavcodec/targa.c
@@ -171,13 +171,6 @@ static int decode_frame(AVCodecContext *avctx,
         stride = -p->linesize[0];
     }
 
-    if(avctx->pix_fmt == PIX_FMT_PAL8 && avctx->palctrl){
-        memcpy(p->data[1], avctx->palctrl->palette, AVPALETTE_SIZE);
-        if(avctx->palctrl->palette_changed){
-            p->palette_has_changed = 1;
-            avctx->palctrl->palette_changed = 0;
-        }
-    }
     if(colors){
         size_t pal_size;
         if((colors + first_clr) > 256){
diff --git a/libavcodec/tscc.c b/libavcodec/tscc.c
index f695973..02226e2 100644
--- a/libavcodec/tscc.c
+++ b/libavcodec/tscc.c
@@ -60,6 +60,8 @@ typedef struct TsccContext {
     unsigned char* decomp_buf;
     int height;
     z_stream zstream;
+
+    uint32_t pal[256];
 } CamtasiaContext;
 
 /*
@@ -111,11 +113,13 @@ static int decode_frame(AVCodecContext *avctx, void 
*data, int *data_size, AVPac
 
     /* make the palette available on the way out */
     if (c->avctx->pix_fmt == PIX_FMT_PAL8) {
-        memcpy(c->pic.data[1], c->avctx->palctrl->palette, AVPALETTE_SIZE);
-        if (c->avctx->palctrl->palette_changed) {
+        const uint8_t *pal = av_packet_get_side_data(avpkt, 
AV_PKT_DATA_PALETTE);
+
+        if (pal) {
             c->pic.palette_has_changed = 1;
-            c->avctx->palctrl->palette_changed = 0;
+            memcpy(c->pal, pal, AVPALETTE_SIZE);
         }
+        memcpy(c->pic.data[1], c->pal, AVPALETTE_SIZE);
     }
 
     *data_size = sizeof(AVFrame);
diff --git a/libavformat/asf.h b/libavformat/asf.h
index b563459..b72445d 100644
--- a/libavformat/asf.h
+++ b/libavformat/asf.h
@@ -44,6 +44,8 @@ typedef struct {
 
     uint16_t stream_language_index;
 
+    int      palette_changed;
+    uint32_t palette[256];
 } ASFStream;
 
 typedef uint8_t ff_asf_guid[16];
diff --git a/libavformat/asfdec.c b/libavformat/asfdec.c
index 20b4987..2cf8cfb 100644
--- a/libavformat/asfdec.c
+++ b/libavformat/asfdec.c
@@ -363,15 +363,14 @@ static int asf_read_stream_properties(AVFormatContext *s, 
int64_t size)
         /* This is true for all paletted codecs implemented in ffmpeg */
         if (st->codec->extradata_size && (st->codec->bits_per_coded_sample <= 
8)) {
             int av_unused i;
-            st->codec->palctrl = av_mallocz(sizeof(AVPaletteControl));
 #if HAVE_BIGENDIAN
             for (i = 0; i < FFMIN(st->codec->extradata_size, 
AVPALETTE_SIZE)/4; i++)
-                st->codec->palctrl->palette[i] = 
av_bswap32(((uint32_t*)st->codec->extradata)[i]);
+                asf_st->palette[i] = 
av_bswap32(((uint32_t*)st->codec->extradata)[i]);
 #else
-            memcpy(st->codec->palctrl->palette, st->codec->extradata,
-                    FFMIN(st->codec->extradata_size, AVPALETTE_SIZE));
+            memcpy(asf_st->palette, st->codec->extradata,
+                   FFMIN(st->codec->extradata_size, AVPALETTE_SIZE));
 #endif
-            st->codec->palctrl->palette_changed = 1;
+            asf_st->palette_changed = 1;
         }
 
         st->codec->codec_tag = tag1;
@@ -964,6 +963,16 @@ static int ff_asf_parse_packet(AVFormatContext *s, 
AVIOContext *pb, AVPacket *pk
             asf_st->pkt.stream_index = asf->stream_index;
             asf_st->pkt.pos =
             asf_st->packet_pos= asf->packet_pos;
+            if (asf_st->pkt.data && asf_st->palette_changed) {
+                if (av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
+                                            AVPALETTE_SIZE)) {
+                    av_log(s, AV_LOG_ERROR, "Cannot append palette to 
packet\n");
+                } else {
+                    memcpy(av_packet_get_side_data(pkt, AV_PKT_DATA_PALETTE),
+                           asf_st->palette, AVPALETTE_SIZE);
+                    asf_st->palette_changed = 0;
+                }
+            }
 //printf("new packet: stream:%d key:%d packet_key:%d audio:%d size:%d\n",
 //asf->stream_index, asf->packet_key_frame, asf_st->pkt.flags & 
AV_PKT_FLAG_KEY,
 //s->streams[asf->stream_index]->codec->codec_type == AVMEDIA_TYPE_AUDIO, 
asf->packet_obj_size);
@@ -1125,7 +1134,6 @@ static int asf_read_close(AVFormatContext *s)
     asf_reset_header(s);
     for(i=0;i<s->nb_streams;i++) {
         AVStream *st = s->streams[i];
-        av_free(st->codec->palctrl);
     }
     return 0;
 }
diff --git a/libavformat/avidec.c b/libavformat/avidec.c
index ae8d320..665f088 100644
--- a/libavformat/avidec.c
+++ b/libavformat/avidec.c
@@ -589,15 +589,14 @@ static int avi_read_header(AVFormatContext *s, 
AVFormatParameters *ap)
                     /* This code assumes that extradata contains only palette. 
*/
                     /* This is true for all paletted codecs implemented in 
FFmpeg. */
                     if (st->codec->extradata_size && 
(st->codec->bits_per_coded_sample <= 8)) {
-                        st->codec->palctrl = 
av_mallocz(sizeof(AVPaletteControl));
 #if HAVE_BIGENDIAN
                         for (i = 0; i < FFMIN(st->codec->extradata_size, 
AVPALETTE_SIZE)/4; i++)
-                            st->codec->palctrl->palette[i] = 
av_bswap32(((uint32_t*)st->codec->extradata)[i]);
+                            ast->pal[i] = 
av_bswap32(((uint32_t*)st->codec->extradata)[i]);
 #else
-                        memcpy(st->codec->palctrl->palette, 
st->codec->extradata,
+                        memcpy(ast->pal, st->codec->extradata,
                                FFMIN(st->codec->extradata_size, 
AVPALETTE_SIZE));
 #endif
-                        st->codec->palctrl->palette_changed = 1;
+                        ast->has_pal = 1;
                     }
 
                     print_tag("video", tag1, 0);
@@ -929,14 +928,13 @@ resync:
             return err;
 
         if(ast->has_pal && pkt->data && pkt->size<(unsigned)INT_MAX/2){
-            void *ptr= av_realloc(pkt->data, pkt->size + 4*256 + 
FF_INPUT_BUFFER_PADDING_SIZE);
-            if(ptr){
-            ast->has_pal=0;
-            pkt->size += 4*256;
-            pkt->data= ptr;
-                memcpy(pkt->data + pkt->size - 4*256, ast->pal, 4*256);
-            }else
-                av_log(s, AV_LOG_ERROR, "Failed to append palette\n");
+            if(av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, 
AVPALETTE_SIZE)){
+                av_log(s, AV_LOG_ERROR, "Failed to allocate data for 
palette\n");
+            }else{
+                memcpy(av_packet_get_side_data(pkt, AV_PKT_DATA_PALETTE),
+                       ast->pal, AVPALETTE_SIZE);
+                ast->has_pal = 0;
+            }
         }
 
         if (CONFIG_DV_DEMUXER && avi->dv_demux) {
@@ -1337,7 +1335,6 @@ static int avi_read_close(AVFormatContext *s)
     for(i=0;i<s->nb_streams;i++) {
         AVStream *st = s->streams[i];
         AVIStream *ast = st->priv_data;
-        av_free(st->codec->palctrl);
         if (ast) {
             if (ast->sub_ctx) {
                 av_freep(&ast->sub_ctx->pb);
diff --git a/libavformat/idcin.c b/libavformat/idcin.c
index e6883c7..c7031ba 100644
--- a/libavformat/idcin.c
+++ b/libavformat/idcin.c
@@ -86,8 +86,6 @@ typedef struct IdcinDemuxContext {
     int audio_present;
 
     int64_t pts;
-
-    AVPaletteControl palctrl;
 } IdcinDemuxContext;
 
 static int idcin_probe(AVProbeData *p)
@@ -172,8 +170,6 @@ static int idcin_read_header(AVFormatContext *s,
     if (avio_read(pb, st->codec->extradata, HUFFMAN_TABLE_SIZE) !=
         HUFFMAN_TABLE_SIZE)
         return AVERROR(EIO);
-    /* save a reference in order to transport the palette */
-    st->codec->palctrl = &idcin->palctrl;
 
     /* if sample rate is 0, assume no audio */
     if (sample_rate) {
@@ -226,6 +222,7 @@ static int idcin_read_packet(AVFormatContext *s,
     int palette_scale;
     unsigned char r, g, b;
     unsigned char palette_buffer[768];
+    uint32_t palette[256];
 
     if (s->pb->eof_reached)
         return AVERROR(EIO);
@@ -236,7 +233,6 @@ static int idcin_read_packet(AVFormatContext *s,
             return AVERROR(EIO);
         } else if (command == 1) {
             /* trigger a palette change */
-            idcin->palctrl.palette_changed = 1;
             if (avio_read(pb, palette_buffer, 768) != 768)
                 return AVERROR(EIO);
             /* scale the palette as necessary */
@@ -251,7 +247,7 @@ static int idcin_read_packet(AVFormatContext *s,
                 r = palette_buffer[i * 3    ] << palette_scale;
                 g = palette_buffer[i * 3 + 1] << palette_scale;
                 b = palette_buffer[i * 3 + 2] << palette_scale;
-                idcin->palctrl.palette[i] = (r << 16) | (g << 8) | (b);
+                palette[i] = (r << 16) | (g << 8) | (b);
             }
         }
 
@@ -262,6 +258,14 @@ static int idcin_read_packet(AVFormatContext *s,
         ret= av_get_packet(pb, pkt, chunk_size);
         if (ret < 0)
             return ret;
+        if (command == 1) {
+            ret = av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
+                                          AVPALETTE_SIZE);
+            if (ret < 0)
+                return ret;
+            memcpy(av_packet_get_side_data(pkt, AV_PKT_DATA_PALETTE),
+                   palette, AVPALETTE_SIZE);
+        }
         pkt->stream_index = idcin->video_stream_index;
         pkt->pts = idcin->pts;
     } else {
diff --git a/libavformat/ipmovie.c b/libavformat/ipmovie.c
index e1d08df..ac1cc8f 100644
--- a/libavformat/ipmovie.c
+++ b/libavformat/ipmovie.c
@@ -97,6 +97,8 @@ typedef struct IPMVEContext {
     unsigned int video_width;
     unsigned int video_height;
     int64_t video_pts;
+    uint32_t     palette[256];
+    int          has_palette;
 
     unsigned int audio_bits;
     unsigned int audio_channels;
@@ -116,8 +118,6 @@ typedef struct IPMVEContext {
 
     int64_t next_chunk_offset;
 
-    AVPaletteControl palette_control;
-
 } IPMVEContext;
 
 static int load_ipmovie_packet(IPMVEContext *s, AVIOContext *pb,
@@ -162,6 +162,15 @@ static int load_ipmovie_packet(IPMVEContext *s, 
AVIOContext *pb,
         if (av_new_packet(pkt, s->decode_map_chunk_size + s->video_chunk_size))
             return CHUNK_NOMEM;
 
+        if (s->has_palette) {
+            if (!av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE,
+                                         AVPALETTE_SIZE)) {
+                memcpy(av_packet_get_side_data(pkt, AV_PKT_DATA_PALETTE),
+                       s->palette, AVPALETTE_SIZE);
+                s->has_palette = 0;
+            }
+        }
+
         pkt->pos= s->decode_map_chunk_offset;
         avio_seek(pb, s->decode_map_chunk_offset, SEEK_SET);
         s->decode_map_chunk_offset = 0;
@@ -456,10 +465,9 @@ static int process_ipmovie_chunk(IPMVEContext *s, 
AVIOContext *pb,
                 r = scratch[j++] * 4;
                 g = scratch[j++] * 4;
                 b = scratch[j++] * 4;
-                s->palette_control.palette[i] = (r << 16) | (g << 8) | (b);
+                s->palette[i] = (r << 16) | (g << 8) | (b);
             }
-            /* indicate a palette change */
-            s->palette_control.palette_changed = 1;
+            s->has_palette = 1;
             break;
 
         case OPCODE_SET_PALETTE_COMPRESSED:
@@ -573,9 +581,6 @@ static int ipmovie_read_header(AVFormatContext *s,
     st->codec->height = ipmovie->video_height;
     st->codec->bits_per_coded_sample = ipmovie->video_bpp;
 
-    /* palette considerations */
-    st->codec->palctrl = &ipmovie->palette_control;
-
     if (ipmovie->audio_type) {
         st = av_new_stream(s, 0);
         if (!st)
diff --git a/libavformat/isom.h b/libavformat/isom.h
index fba4963..48e0bcf 100644
--- a/libavformat/isom.h
+++ b/libavformat/isom.h
@@ -123,6 +123,8 @@ typedef struct MOVStreamContext {
     int width;            ///< tkhd width
     int height;           ///< tkhd height
     int dts_shift;        ///< dts shift when ctts is negative
+    uint32_t palette[256];
+    int has_palette;
 } MOVStreamContext;
 
 typedef struct MOVContext {
diff --git a/libavformat/mov.c b/libavformat/mov.c
index 61ceaac..9d7eb82 100644
--- a/libavformat/mov.c
+++ b/libavformat/mov.c
@@ -1027,7 +1027,6 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext 
*pb, int entries)
                 unsigned int color_start, color_count, color_end;
                 unsigned char r, g, b;
 
-                st->codec->palctrl = av_malloc(sizeof(*st->codec->palctrl));
                 if (color_greyscale) {
                     int color_index, color_dec;
                     /* compute the greyscale palette */
@@ -1037,7 +1036,7 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext 
*pb, int entries)
                     color_dec = 256 / (color_count - 1);
                     for (j = 0; j < color_count; j++) {
                         r = g = b = color_index;
-                        st->codec->palctrl->palette[j] =
+                        sc->palette[j] =
                             (r << 16) | (g << 8) | (b);
                         color_index -= color_dec;
                         if (color_index < 0)
@@ -1058,7 +1057,7 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext 
*pb, int entries)
                         r = color_table[j * 3 + 0];
                         g = color_table[j * 3 + 1];
                         b = color_table[j * 3 + 2];
-                        st->codec->palctrl->palette[j] =
+                        sc->palette[j] =
                             (r << 16) | (g << 8) | (b);
                     }
                 } else {
@@ -1080,12 +1079,12 @@ int ff_mov_read_stsd_entries(MOVContext *c, AVIOContext 
*pb, int entries)
                             avio_r8(pb);
                             b = avio_r8(pb);
                             avio_r8(pb);
-                            st->codec->palctrl->palette[j] =
+                            sc->palette[j] =
                                 (r << 16) | (g << 8) | (b);
                         }
                     }
                 }
-                st->codec->palctrl->palette_changed = 1;
+                sc->has_palette = 1;
             }
         } else if(st->codec->codec_type==AVMEDIA_TYPE_AUDIO) {
             int bits_per_sample, flags;
@@ -2433,6 +2432,15 @@ static int mov_read_packet(AVFormatContext *s, AVPacket 
*pkt)
         ret = av_get_packet(sc->pb, pkt, sample->size);
         if (ret < 0)
             return ret;
+        if (sc->has_palette) {
+            if (av_packet_new_side_data(pkt, AV_PKT_DATA_PALETTE, 
AVPALETTE_SIZE)) {
+                av_log(mov->fc, AV_LOG_ERROR, "Cannot append palette to 
packet\n");
+            } else {
+                memcpy(av_packet_get_side_data(pkt, AV_PKT_DATA_PALETTE),
+                       sc->palette, AVPALETTE_SIZE);
+                sc->has_palette = 0;
+            }
+        }
 #if CONFIG_DV_DEMUXER
         if (mov->dv_demux && sc->dv_audio_container) {
             dv_produce_packet(mov->dv_demux, pkt, pkt->data, pkt->size);
-- 
1.7.0.4

_______________________________________________
libav-devel mailing list
libav-devel@libav.org
https://lists.libav.org/mailman/listinfo/libav-devel

Reply via email to