I've now got to the same point in C that I was in Java - no segfault,
just an unplayable file.

The code (rewritten; basically dranger's tutorial and output_example.c merged):

#include "avcodec.h"
#include "avformat.h"
#include "imgconvert.h"
#include "stdio.h"
#include "libswscale/swscale.h"

#define STREAM_DURATION 5.0

/* add a video output stream */
static AVStream *add_video_stream(AVFormatContext *oc, int codec_id)
{
    AVCodecContext *c;
    AVStream *st;

    st = av_new_stream(oc, 0);
    if (!st) {
        fprintf(stderr, "Could not alloc stream\n");
        exit(1);
    }

    c = st->codec;
    c->codec_id = codec_id;
    c->codec_type = CODEC_TYPE_VIDEO;

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 352;
    c->height = 288;
    /* time base: this is the fundamental unit of time (in seconds) in terms
       of which frame timestamps are represented. for fixed-fps content,
       timebase should be 1/framerate and timestamp increments should be
       identically 1. */
    c->time_base.den = 25;
    c->time_base.num = 1;
    c->gop_size = 12; /* emit one intra frame every twelve frames at most */
    c->pix_fmt = PIX_FMT_YUV420P;
    if (c->codec_id == CODEC_ID_MPEG2VIDEO) {
        /* just for testing, we also add B frames */
        c->max_b_frames = 2;
    }
    if (c->codec_id == CODEC_ID_MPEG1VIDEO){
        /* Needed to avoid using macroblocks in which some coeffs overflow.
           This does not happen with normal video, it just happens here as
           the motion of the chroma plane does not match the luma plane. */
        c->mb_decision=2;
    }
    // some formats want stream headers to be separate
    if(oc->oformat->flags & AVFMT_GLOBALHEADER)
        c->flags |= CODEC_FLAG_GLOBAL_HEADER;

    return st;
}

AVFrame *picture, *tmp_picture;
uint8_t *video_outbuf;
int video_outbuf_size;

static AVFrame *alloc_picture(int pix_fmt, int width, int height)
{
    AVFrame *picture;
    uint8_t *picture_buf;
    int size;

    picture = avcodec_alloc_frame();
    if (!picture)
        return NULL;
    size = avpicture_get_size(pix_fmt, width, height);
    picture_buf = av_malloc(size);
    if (!picture_buf) {
        av_free(picture);
        return NULL;
    }
    avpicture_fill((AVPicture *)picture, picture_buf,
                   pix_fmt, width, height);
    return picture;
}

static void open_video(AVFormatContext *oc, AVStream *st)
{
    AVCodec *codec;
    AVCodecContext *c;

    c = st->codec;

    /* find the video encoder */
    codec = avcodec_find_encoder(c->codec_id);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    /* open the codec */
    if (avcodec_open(c, codec) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    video_outbuf = NULL;
    if (!(oc->oformat->flags & AVFMT_RAWPICTURE)) {
        /* allocate output buffer */
        /* XXX: API change will be done */
        /* buffers passed into lav* can be allocated any way you prefer,
           as long as they're aligned enough for the architecture, and
           they're freed appropriately (such as using av_free for buffers
           allocated with av_malloc) */
        video_outbuf_size = 200000;
        video_outbuf = av_malloc(video_outbuf_size);
    }

    /* allocate the encoded raw picture */
    picture = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!picture) {
        fprintf(stderr, "Could not allocate picture\n");
        exit(1);
    }

    /* if the output format is not YUV420P, then a temporary YUV420P
       picture is needed too. It is then converted to the required
       output format */
    tmp_picture = NULL;
    if (c->pix_fmt != PIX_FMT_YUV420P) {
        tmp_picture = alloc_picture(PIX_FMT_YUV420P, c->width, c->height);
        if (!tmp_picture) {
            fprintf(stderr, "Could not allocate temporary picture\n");
            exit(1);
        }
    }
}

int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame         *pFrame;
    AVFrame         *pFrameRGB;
    AVPacket        packet;
    int             frameFinished;
    int             numBytes;
    uint8_t         *buffer;

    if(argc < 3) {
        printf("Usage error.\n");
        return -1;
    }
    // Register all formats and codecs
    av_register_all();

    // Open video file
    if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if(av_find_stream_info(pFormatCtx)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) {
            videoStream=i;
            break;
        }
    if(videoStream==-1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    // Open codec
    if(avcodec_open(pCodecCtx, pCodec)<0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame=avcodec_alloc_frame();

    // Allocate an AVFrame structure
    pFrameRGB=avcodec_alloc_frame();
    if(pFrameRGB==NULL)
        return -1;

    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
                                pCodecCtx->height);
    buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
                   pCodecCtx->width, pCodecCtx->height);

    AVOutputFormat *fmt = guess_format(NULL, argv[2], NULL);
    if (!fmt)
    {
        return -1;
    }

    AVFormatContext *oc = av_alloc_format_context();
    if (!oc)
    {
        return -1;
    }

    oc->oformat = fmt;
    snprintf(oc->filename, sizeof(oc->filename), "%s", argv[2]);

    AVStream *video_st = NULL;
    double video_pts;

    if (fmt->video_codec != CODEC_ID_NONE) {
        video_st = add_video_stream(oc, fmt->video_codec);
    }

    /* set the output parameters (must be done even if no
       parameters). */
    if (av_set_parameters(oc, NULL) < 0) {
        fprintf(stderr, "Invalid output format parameters\n");
        exit(1);
    }

    dump_format(oc, 0, argv[2], 1);

    /* now that all the parameters are set, we can open the audio and
       video codecs and allocate the necessary encode buffers */
    if (video_st)
        open_video(oc, video_st);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (url_fopen(&oc->pb, argv[2], URL_WRONLY) < 0) {
            fprintf(stderr, "Could not open '%s'\n", argv[2]);
            exit(1);
        }
    }

    av_write_header(oc);

    static struct SwsContext *img_convert_ctx;

    // Read frames and save first five frames to disk
    i=0;
    int frame_count = 0;
    while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) {
            // Decode video frame
            avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
                                 packet.data, packet.size);

            // Did we get a video frame?
            if(frameFinished) {
                // Convert the image from its native format to RGB
                img_convert((AVPicture *)pFrameRGB, PIX_FMT_RGB24,
                            (AVPicture*)pFrame, pCodecCtx->pix_fmt,
pCodecCtx->width,
                            pCodecCtx->height);

                // Save the frame to disk
                if (video_st)
                    video_pts = (double)video_st->pts.val *
video_st->time_base.num / video_st->time_base.den;
                else
                    video_pts = 0.0;

                if (!video_st || video_pts >= STREAM_DURATION)
                    break;

                AVCodecContext *c = video_st->codec;
                if (c->pix_fmt != PIX_FMT_YUV420P) {
                    if (img_convert_ctx == NULL) {
                        img_convert_ctx = sws_getContext(c->width, c->height,
                                                         PIX_FMT_YUV420P,
                                                         c->width, c->height,
                                                         c->pix_fmt,
                                                         SWS_BICUBIC,
NULL, NULL, NULL);
                        if (img_convert_ctx == NULL)
                            return -1;
                    }
                    int out_size = avcodec_encode_video(c,
video_outbuf, video_outbuf_size, pFrameRGB);
                    AVPacket pkt;
                    av_init_packet(&pkt);

                    if (c->coded_frame->pts != AV_NOPTS_VALUE)
                        pkt.pts=av_rescale_q(c->coded_frame->pts,
c->time_base, video_st->time_base);
                    if (c->coded_frame->key_frame)
                        pkt.flags |= PKT_FLAG_KEY;
                    pkt.stream_index = video_st->index;
                    pkt.data = video_outbuf;
                    pkt.size = out_size;

                    if (av_interleaved_write_frame(oc, &pkt) != 0)
                        return -1;
                }
            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
    }

    av_free(buffer);
    av_free(pFrameRGB);
    av_free(pFrame);
    avcodec_close(pCodecCtx);
    av_close_input_file(pFormatCtx);

    return 0;
}

The output:

test.c: In function ‘main’:
test.c:209: warning: ‘av_alloc_format_context’ is deprecated (declared
at /home/ricky/ffmpeg/libavformat/avformat.h:873)
Input #0, mov,mp4,m4a,3gp,3g2,mj2, from '/home/ricky/cavity_flow_movie.mp4':
  Duration: 00:00:41.66, start: 0.000000, bitrate: 2301 kb/s
    Stream #0.0(eng): Video: mpeg4, yuv420p, 320x240 [PAR 1:1 DAR
4:3], 30.00 tb(r)
    Stream #0.1(eng): Data: mp4s / 0x7334706D
    Stream #0.2(eng): Data: mp4s / 0x7334706D
Output #0, mp2, to '/home/ricky/cavity_flow_movie.mp2':

The problem:

ri...@ricky-desktop:~/ffmpegplay$ ffplay ~/cavity_flow_movie.mp2
FFplay version 0.5-svn17737+3:0.svn20090303-1ubuntu6, Copyright (c)
2003-2009 Fabrice Bellard, et al.
  configuration: --enable-gpl --enable-postproc --enable-swscale
--enable-x11grab --extra-version=svn17737+3:0.svn20090303-1ubuntu6
--prefix=/usr --enable-avfilter --enable-avfilter-lavf --enable-libgsm
--enable-libschroedinger --enable-libspeex --enable-libtheora
--enable-libvorbis --enable-pthreads --disable-stripping
--disable-vhook --enable-libdc1394 --disable-armv5te --disable-armv6
--disable-armv6t2 --disable-armvfp --disable-neon --disable-altivec
--disable-vis --enable-shared --disable-static
  libavutil     49.15. 0 / 49.15. 0
  libavcodec    52.20. 0 / 52.20. 0
  libavformat   52.31. 0 / 52.31. 0
  libavdevice   52. 1. 0 / 52. 1. 0
  libavfilter    0. 4. 0 /  0. 4. 0
  libswscale     0. 7. 1 /  0. 7. 1
  libpostproc   51. 2. 0 / 51. 2. 0
  built on Apr 10 2009 23:18:41, gcc: 4.3.3
/home/ricky/cavity_flow_movie.mp2: Unknown format

Any suggestions?

2009/6/22 Art Clarke <[email protected]>:
> On Mon, Jun 22, 2009 at 6:54 AM, Ricky Clarkson 
> <[email protected]>wrote:
>
>> I already have some Java code that converts our in-memory streams to
>> AVI files, and was originally hoping to adapt that.  I did so, but
>> found the results unplayable (ffmpeg -i theoutput.mp2 reports 'Unknown
>> format').  I already noticed xuggle and was planning to look into it
>> further.  Is it as straightforward to use from a 'programmatic' data
>> source as it is directly from files?
>>
>
> I suggest joining the http://groups.google.com/group/xuggler-users group for
> detailed questions, but we allow you to use files, InputStreams,
> OutputStreams, ReadableByteChannels, WritableByteChannels, DataInput,
> DataOutput and RandomAccessFile objects.  Or if none of those work, you can
> do a custom IO handler by implementing the IURLProtocolHandler interface in
> Xuggler and opening that.
>
> - Art
>
> --
> http://www.xuggle.com/
> xu‧ggle (zŭ' gl) v. To freely encode, decode, and experience audio and
> video.
>
> Use Xuggle to get the power of FFMPEG in Java.
> _______________________________________________
> libav-user mailing list
> [email protected]
> https://lists.mplayerhq.hu/mailman/listinfo/libav-user
>



-- 
Ricky Clarkson
Java Programmer, AD Holdings
+44 1565 770804
Skype: ricky_clarkson
Google Talk: [email protected]
_______________________________________________
libav-user mailing list
[email protected]
https://lists.mplayerhq.hu/mailman/listinfo/libav-user

Reply via email to