Hi,
I'm trying to use ffmpeg to capture images from a webcam. I already made
it work on Linux, and I'm trying to do the same on Windows. I'm using an
ffmpeg codebase snapshot from 9th July 2009, which I compiled myself.
As a bases of the sample code, I'm using an updated version of this
tutorial: http://www.dranger.com/ffmpeg/tutorial01.html source code (see
attached).
I can get as far as av_open_input_file() in the code, and I get the
following output for dump_format() right afterwards:
Input #0, vfwcap, from '0':
Duration: N/A, bitrate: N/A
Stream #0.0: Video: rawvideo, bgr24, 640x480, 1k tbn, 25 tbc
but then, the call to av_find_stream_info() blocks forever.. I wonder what
what the problem could be here..
if I simply omit the call to av_find_stream_info(), then on the first call
to av_read_frame(), I get the following output:
swScaler: Exactly one scaler algorithm must be chosen
I wonder what I'm doing wrong.
all help would be appreciated...
Akos
// tutorial01.c
// Code based on a tutorial by Martin Bohme
([email protected])
// Tested on Gentoo, CVS version 5/01/07 compiled with GCC 4.1.1
// A small sample program that shows how to use libavformat and libavcodec to
// read video from a file.
//
// Use
//
// gcc -o tutorial01 tutorial01.c -lavformat -lavcodec -lz
//
// to build (assuming libavformat and libavcodec are correctly installed
// your system).
//
// Run using
//
// tutorial01 myvideofile.mpg
//
// to write the first five frames from "myvideofile.mpg" to disk in PPM
// format.
extern "C" {
#include <avcodec.h>
#include <avformat.h>
#include <avdevice.h>
#include <swscale.h>
}
#include <stdio.h>
void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) {
FILE *pFile;
char szFilename[32];
int y;
// Open file
sprintf(szFilename, "frame%d.ppm", iFrame);
pFile=fopen(szFilename, "wb");
if(pFile==NULL)
return;
// Write header
fprintf(pFile, "P6\n%d %d\n255\n", width, height);
// Write pixel data
for(y=0; y<height; y++)
fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile);
// Close file
fclose(pFile);
}
int main(int argc, char *argv[]) {
AVFormatContext *pFormatCtx;
int i, videoStream;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
AVInputFormat *pFormat;
AVFormatParameters formatParams;
AVFrame *pFrame;
AVFrame *pFrameRGB;
AVPacket packet;
int frameFinished;
int numBytes;
uint8_t *buffer;
SwsContext *scaleCtx;
const char formatName[] = "vfwcap";
if(argc < 2) {
printf("Please provide a movie file\n");
return -1;
}
// Register all formats and codecs
av_register_all();
avdevice_register_all();
if (!(pFormat = av_find_input_format(formatName))) {
printf("can't find input format %s, %p\n", formatName, pFormat);
return -1;
}
memset(&formatParams, 0, sizeof(AVFormatParameters));
formatParams.channel = 0;
formatParams.width = 640;
formatParams.height = 480;
formatParams.time_base.num = 1;
formatParams.time_base.den = 25;
// Open video file
if(av_open_input_file(&pFormatCtx, argv[1], pFormat, 4096, &formatParams)!=0)
{
printf("can't find open input file %s\n", argv[1]);
return -1; // Couldn't open file
}
// Dump information about file onto standard error
dump_format(pFormatCtx, 0, argv[1], 0);
// Retrieve stream information
if(av_find_stream_info(pFormatCtx)<0)
return -1; // Couldn't find stream information
// Find the first video stream
videoStream=-1;
for(i=0; i<pFormatCtx->nb_streams; i++)
if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) {
videoStream=i;
break;
}
if(videoStream==-1)
return -1; // Didn't find a video stream
// Get a pointer to the codec context for the video stream
pCodecCtx=pFormatCtx->streams[videoStream]->codec;
// Find the decoder for the video stream
pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
if(pCodec==NULL) {
fprintf(stderr, "Unsupported codec!\n");
return -1; // Codec not found
}
// Open codec
if(avcodec_open(pCodecCtx, pCodec)<0)
return -1; // Could not open codec
// Allocate video frame
pFrame=avcodec_alloc_frame();
// Allocate an AVFrame structure
pFrameRGB=avcodec_alloc_frame();
if(pFrameRGB==NULL)
return -1;
// Determine required buffer size and allocate buffer
numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
pCodecCtx->height);
buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
// Assign appropriate parts of buffer to image planes in pFrameRGB
// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
// of AVPicture
avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
pCodecCtx->width, pCodecCtx->height);
scaleCtx = sws_getContext(pCodecCtx->width,
pCodecCtx->height,
pCodecCtx->pix_fmt,
pCodecCtx->width,
pCodecCtx->height,
PIX_FMT_RGB24,
0, 0, 0, 0);
// Read frames and save first five frames to disk
i=0;
while(av_read_frame(pFormatCtx, &packet)>=0) {
// Is this a packet from the video stream?
if(packet.stream_index==videoStream) {
// Decode video frame
avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
packet.data, packet.size);
// Did we get a video frame?
if(frameFinished) {
// Convert the image from its native format to RGB
sws_scale(scaleCtx,
pFrame->data, pFrame->linesize,
0, pCodecCtx->height,
pFrameRGB->data, pFrameRGB->linesize);
// Save the frame to disk
if(++i<=5)
SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,
i);
}
}
// Free the packet that was allocated by av_read_frame
av_free_packet(&packet);
}
sws_freeContext(scaleCtx);
// Free the RGB image
av_free(buffer);
av_free(pFrameRGB);
// Free the YUV frame
av_free(pFrame);
// Close the codec
avcodec_close(pCodecCtx);
// Close the video file
av_close_input_file(pFormatCtx);
return 0;
}_______________________________________________
libav-user mailing list
[email protected]
https://lists.mplayerhq.hu/mailman/listinfo/libav-user