I am forwarding this question from ffmpeg-user to libav-user as I have
gotten no responses.
---------- Forwarded message ----------
From: Kaspar Bumke <[email protected]>
Date: Sat, Feb 27, 2010 at 10:34 PM
Subject: swscale and AVFrame use of const
To: [email protected]
Hi,
I have been trying to follow a simple tutorial on writing a player for
ffmpeg: http://dranger.com/ffmpeg/tutorial01.html
It is a little outdated as I am using ffmpeg SVN-r21688 but I have replaced
img_convert with sws_scale as follows:
--- tutorial01_old.c 2010-02-27 22:06:43.000000000 +0000
+++ tutorial01.c 2010-02-27 22:17:35.000000000 +0000
(...)
@@ -124,17 +125,23 @@
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
- avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
- packet.data, packet.size);
+ avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
+ &packet);
// Did we get a video frame?
if(frameFinished) {
// Convert the image from its native format to RGB
- img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24,
- (AVPicture*)pFrame, pCodecCtx->pix_fmt,
pCodecCtx->width,
- pCodecCtx->height);
-
- // Save the frame to disk
+
+ convert_ctx = sws_getContext( pCodecCtx->width,
+ pCodecCtx->height,
+ pCodecCtx->pix_fmt,
+ pCodecCtx->width,
+ pCodecCtx->height,
+ PIX_FMT_RGB24,
+ SWS_BILINEAR, 0, 0, 0);
+
+ sws_scale(convert_ctx, pFrame->data, pFrame->linesize, 0,
pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);
+
if(++i<=5)
SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,
i);
I have attached the whole modified file which should be compiled with:
gcc -o tutorial01 tutorial01.c -lavformat -lavcodec -lz
The problem is during compilation it says sws_scale expexts a ‘const uint8_t
* const*’ but argument is of type ‘uint8_t **’ in the second field. I have
been lookign at the doxygen documentation and other peoples players and
trying all sorts of things but I cannot figure out how to change and
AVFrame->data to a const or what what I should really be passing to
sws_scale in the second field.
Any help at all would be greatly appreciated.
--Kaspar
P.S.
This similar implementation has exactly the same problem:
http://web.me.com/dhoerl/Home/Tech_Blog/Entries/2009/1/22_Revised_avcodec_sample.c_files/avcodec_sample.0.5.0.c
#include <libavcodec/avcodec.h>
#include <libavformat/avformat.h>
//Save frame writes the RGB data to a file in RPM format
void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame)
{
FILE *pFile;
char szFilename[32];
int y;
// Open file
sprintf(szFilename, "wb");
if (pFile==NULL)
return;
// Write Header
fprintf(pFile, "P6\n%d %d\n255\n", width, height);
// Write pixel data
for (y=0; y<height; y++)
fwrite(pFrame->data[0] +y*pFrame->linesize[0], 1, width*3, pFile);
//Close file
fclose(pFile);
}
int main (int argc, char *argv[])
{
//register codecs and fileformats
av_register_all();
//open file
AVFormatContext *pFormatCtx;
if(av_open_input_file(&pFormatCtx, argv[1],NULL,0,NULL) !=0)
{
fprintf(stderr, "Couldn't open file\n");
return -1; // Couldn't open file
}
//retrieve stream information
if (av_find_stream_info (pFormatCtx) <0 )
{
fprintf(stderr,"Couldn't find stream info\n");
return -1; // Couldn't find stream info
}
// Dump info about file onto standard error
dump_format(pFormatCtx, 0, argv[1], 0);
// Find the first video stream
int i, videoStream;
AVCodecContext *pCodecCtx;
videoStream=-1;
for (i=0; i <pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO)
{
videoStream=i;
break;
}
if(videoStream==-1)
{
fprintf(stderr, "Didn't find a video stream\n");
return -1; // Didn't find a video stream
}
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx->stream[videoStream]->codec;
//Find the decoder for the video stream
AVCodec *pCodec;
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL)
{
fprintf(stderr, "Unsupported codec!\n");
return -1; //Codec not found
}
//Open codec
if(avcodec_open(pCodecCtx, pCodec) <0)
{
fprintf(stderr, "Could not open codec");
return -1; // Could not open codec
}
// Allocate vide frame
AVFram *pFrame;
pframe=avcodec_alloc_frame();
//Allocate and AVFrame structure
pFrameRGB=avcodec_alloc_fram();
if(pFrameRGB==NULL)
{
fprintf(stderr, "Could not allocate frame\n");
return -1;
}
//determine size and allocate space for buffer
uint8_t *buffer;
int numBytes;
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t)); //av_malloc is ffmpeg's wrapper for malloc
/* Assign appropriate parts of buffer to image planes in pFrameRGB
pFrameRGB is an AVFRame but AVFram is a superset of AVPicture */
avpicture_fill((ACPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, pCodecCtx->width,pCodecCtx->height);
// Reading the Data
int frameFinished;
AVPacket packet;
i=0;
while(av_read_frame(pFormatCtx, &packet) >= 0)
{
// query if it is a packet from the video stream
if(packet.stream_indec==videoStream)
{
//decode frame
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size);
//did we get a frame?
if(frameFinished)
{
//convert to RGB
img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24, (AVPicture*)pframe, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
}
// save frame to disk
if(++i<=5)
{
SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
}
}
//Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
// Free the RGB image
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
//Close the codec
avcodec_close(pCodecCtx);
//Close the video file
av_close_input_file(pFormatCtx);
return 0;
}
_______________________________________________
libav-user mailing list
[email protected]
https://lists.mplayerhq.hu/mailman/listinfo/libav-user