I solved that by passing a shortname of "mpeg2video" as the first
parameter to guess_format.

Now I get a segmentation fault in avcodec_encode_video, and have
verified that none of its arguments are NULL.  Is there anything I
should check about the arguments to it other than that?

New code:

#include "avcodec.h"
#include "avformat.h"
#include "imgconvert.h"
#include "stdio.h"
#include "libswscale/swscale.h"

#define STREAM_DURATION 5.0

/* add a video output stream */
static AVStream *add_video_stream(AVFormatContext *oc, int codec_id)
{
    AVCodecContext *c;
    AVStream *st;

    st = av_new_stream(oc, 0);
    if (!st) {
        fprintf(stderr, "Could not alloc stream\n");
        exit(1);
    }

    c = st->codec;
    c->codec_id = codec_id;
    c->codec_type = CODEC_TYPE_VIDEO;

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 352;
    c->height = 288;
    /* time base: this is the fundamental unit of time (in seconds) in terms
       of which frame timestamps are represented. for fixed-fps content,
       timebase should be 1/framerate and timestamp increments should be
       identically 1. */
    c->time_base.den = 25;
    c->time_base.num = 1;
    c->gop_size = 12; /* emit one intra frame every twelve frames at most */
    c->pix_fmt = PIX_FMT_YUV420P;
    if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
        /* just for testing, we also add B frames */
        c->max_b_frames = 2;
    }
    if (c->codec_id == CODEC_ID_MPEG1VIDEO){
        /* Needed to avoid using macroblocks in which some coeffs overflow.
           This does not happen with normal video, it just happens here as
           the motion of the chroma plane does not match the luma plane. */
        c->mb_decision=2;
    }
    // some formats want stream headers to be separate
    if(oc->oformat->flags & AVFMT_GLOBALHEADER)
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;

    return st;
}

AVFrame *picture, *tmp_picture;
uint8_t *video_outbuf;
int video_outbuf_size;

static AVFrame *alloc_picture(int pix_fmt, int width, int height)
{
    AVFrame *picture;
    uint8_t *picture_buf;
    int size;

    picture = avcodec_alloc_frame();
    if (!picture)
        return NULL;
    size = avpicture_get_size(pix_fmt, width, height);
    picture_buf = av_malloc(size);
    if (!picture_buf) {
        av_free(picture);
        return NULL;
    }
    avpicture_fill((AVPicture *)picture, picture_buf,
                   pix_fmt, width, height);
    return picture;
}

static void open_video(AVFormatContext *oc, AVStream *st)
{
    AVCodec *codec;
    AVCodecContext *c;

    c = st->codec;

    /* find the video encoder */
    codec = avcodec_find_encoder(c->codec_id);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    /* open the codec */
    if (avcodec_open(c, codec) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    video_outbuf = NULL;
    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
        /* allocate output buffer */
        /* XXX: API change will be done */
        /* buffers passed into lav* can be allocated any way you prefer,
           as long as they're aligned enough for the architecture, and
           they're freed appropriately (such as using av_free for buffers
           allocated with av_malloc) */
        video_outbuf_size = 200000;
        video_outbuf = av_malloc(video_outbuf_size);
    }

    /* allocate the encoded raw picture */
    picture = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!picture) {
        fprintf(stderr, "Could not allocate picture\n");
        exit(1);
    }

    /* if the output format is not YUV420P, then a temporary YUV420P
       picture is needed too. It is then converted to the required
       output format */
    tmp_picture = NULL;
    if (c->pix_fmt != PIX_FMT_YUV420P) {
        tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
        if (!tmp_picture) {
            fprintf(stderr, "Could not allocate temporary picture\n");
            exit(1);
        }
    }
}

int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame         *pFrame;
    AVFrame         *pFrameRGB;
    AVPacket        packet;
    int             frameFinished;
    int             numBytes;
    uint8_t         *buffer;

    if(argc < 3) {
        printf("Usage error.\n");
        return -1;
    }
    // Register all formats and codecs
    av_register_all();

    // Open video file
    if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if(av_find_stream_info(pFormatCtx)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) {
            videoStream=i;
            break;
        }
    if(videoStream==-1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    // Open codec
    if(avcodec_open(pCodecCtx, pCodec)<0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame=avcodec_alloc_frame();

    // Allocate an AVFrame structure
    pFrameRGB=avcodec_alloc_frame();
    if(pFrameRGB==NULL)
        return -1;

    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
                                pCodecCtx->height);
    buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
                   pCodecCtx->width, pCodecCtx->height);

    AVOutputFormat *fmt = guess_format("mpeg2video", argv[2], NULL);
    if (!fmt)
    {
        return -1;
    }

    AVFormatContext *oc = av_alloc_format_context();
    if (!oc)
    {
        return -1;
    }

    oc->oformat = fmt;
    snprintf(oc->filename, sizeof(oc->filename), "%s", argv[2]);

    AVStream *video_st = NULL;
    double video_pts;

    if (fmt->video_codec != CODEC_ID_NONE) {
        video_st = add_video_stream(oc, fmt->video_codec);
    }
    if (video_st == NULL)
        return -5;
    /* set the output parameters (must be done even if no
       parameters). */
    if (av_set_parameters(oc, NULL) < 0) {
        fprintf(stderr, "Invalid output format parameters\n");
        exit(1);
    }

    dump_format(oc, 0, argv[2], 1);

    /* now that all the parameters are set, we can open the audio and
       video codecs and allocate the necessary encode buffers */
    if (video_st)
        open_video(oc, video_st);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (url_fopen(&oc->pb, argv[2], URL_WRONLY) < 0) {
            fprintf(stderr, "Could not open '%s'\n", argv[2]);
            exit(1);
        }
    }

    av_write_header(oc);

    static struct SwsContext *img_convert_ctx;

    int frame_count = 0;
    while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) {
            // Decode video frame
            avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
                                 packet.data, packet.size);

            // Did we get a video frame?
            if(frameFinished) {
                // Convert the image from its native format to RGB
                img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24,
                            (AVPicture*)pFrame, pCodecCtx->pix_fmt,
pCodecCtx->width,
                            pCodecCtx->height);

                // Save the frame to disk
                if (video_st)
                    video_pts = (double)video_st->pts.val *
video_st->time_base.num / video_st->time_base.den;

                if (video_pts >= STREAM_DURATION)
                    break;

                AVCodecContext *c = video_st->codec;
                if (1) { //c->pix_fmt != PIX_FMT_YUV420P) {
                    if (img_convert_ctx == NULL) {
                        img_convert_ctx = sws_getContext(c->width, c->height,
                                                         PIX_FMT_YUV420P,
                                                         c->width, c->height,
                                                         c->pix_fmt,
                                                         SWS_BICUBIC,
NULL, NULL, NULL);
                        if (img_convert_ctx == NULL)
                            return -1;
                    }

                    int out_size = avcodec_encode_video(c,
video_outbuf, video_outbuf_size, pFrameRGB);

                    AVPacket pkt;
                    av_init_packet(&pkt);

                    if (c->coded_frame->pts != AV_NOPTS_VALUE)
                        pkt.pts=av_rescale_q(c->coded_frame->pts,
c->time_base, video_st->time_base);
                    if (c->coded_frame->key_frame)
                        pkt.flags |= PKT_FLAG_KEY;
                    pkt.stream_index = video_st->index;
                    pkt.data = video_outbuf;
                    pkt.size = out_size;

                    if (av_interleaved_write_frame(oc, &pkt) != 0)
                        return -1;
                }
            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
    }

    av_write_trailer(oc);
    avcodec_close(video_st->codec);
    av_free(buffer);
    av_free(pFrameRGB);
    av_free(pFrame);
    avcodec_close(pCodecCtx);
    av_close_input_file(pFormatCtx);

    url_fclose(oc->pb);
    av_free(oc);

    return 0;
}

New output:

test.c: In function ‘main’:
test.c:209: warning: ‘av_alloc_format_context’ is deprecated (declared
at /home/ricky/ffmpeg/libavformat/avformat.h:873)
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from '/home/ricky/cavity_flow_movie.mp4':
  Duration: 00:00:41.66, start: 0.000000, bitrate: 2301 kb/s
    Stream #0.0(eng): Video: mpeg4, yuv420p, 320x240 [PAR 1:1 DAR
4:3], 30.00 tb(r)
    Stream #0.1(eng): Data: mp4s / 0x7334706D
    Stream #0.2(eng): Data: mp4s / 0x7334706D
Output #0, mpeg2video, to '/home/ricky/cavity_flow_movie.mp2':
    Stream #0.0: Video: mpeg2video, yuv420p, 352x288, q=2-31, 400
kb/s, 25.00 tb(c)
/bin/bash: line 1: 12670 Segmentation fault      ./a.out
~/cavity_flow_movie.mp4 ~/cavity_flow_movie.mp2

-- 
Ricky Clarkson
Java Programmer, AD Holdings
+44 1565 770804
Skype: ricky_clarkson
Google Talk: [email protected]
_______________________________________________
libav-user mailing list
[email protected]
https://lists.mplayerhq.hu/mailman/listinfo/libav-user

Reply via email to