Dear *Andreas Unterweger  & *Guys,


   I am trying to compress the avi file  that the uncompressed video
stream(RAW video )  is identified and passed for mpeg4 compression ... the
code works fine for few frames and it crashes.



 When i am opening the codec i get an error like there may be problem in
ffmpeg compilation...so do i have to check in that regard .. Or its problem
in my code... i have posted my code for your reference.


if we can set this code working we can use it further for any decoding
purpose , it will be helpful for each of us... here it crashes only after
decoding few frames...


wht is the value on time_base.num  & time_base.den  i have used 25 and 1
.... should i change that... looking forward






#define inline _inline



#include <stdio.h>

#include <stdlib.h>

#include <string.h>

#include <math.h>

#include "libavformat/avformat.h"

#include "libavcodec/avcodec.h"

#include "libavutil/avutil.h"

#include "libavutil/fifo.h"

#include "libswscale/swscale.h"

#include "libswscale/rgb2rgb.h"



#include "stdint.h" // svw

typedef int bool ;





AVRational myAVTIMEBASEQ = {1, AV_TIME_BASE}; // svw

/*******************************************************************************

* Demuxer and Decoder Functions

*******************************************************************************/

/* streamContext

* The video (and, if existent) audio streams in the input file are described

* by a streamContext structure.

*/

typedef struct _streamContext

{

AVStream * m_stream; // an AVStream * in gDecFormatCtx->streams[]

AVCodecContext * m_codecCtx; // pointer to m_stream->codec

AVPacket m_pkt; // the last AVPacket we read for this stream

int m_pktValid; // is m_pkt valid?

uint8_t * m_pktDataPtr; // pointer into m_pkt.data

int m_pktRemainingSiz;

int64_t m_pts; // the current presentation time of the input stream

int64_t m_ptsOffset; // packets associated with stream are relative to this

int m_frameValid; // is m_decodedVideoFrame/m_decodedAudioSamples valid?

AVFrame m_decodedVideoFrame; // decoded video frames stored here

int16_t * m_decodedAudioSamples; // decoded audio samples stored here

int m_decodedAudioSamplesSiz; // current size of m_decodedAudioSamples

int m_decodedAudioSamplesValidSiz; // # valid bytes in m_decodedAudioSamples

} streamContext;

extern void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame);

#define doFlush1 false

int muxInitVideoStreamCodec(); // svw

int decodeFrame(streamContext *streamCtx, int bFlushDecoder); // svw

void demuxEndSession(); // svw

int demuxInitStreamCodec(AVStream *streamIn); // svw

streamContext * demuxReadNextFrame(); // svw

int demuxStartSession(char *szFileIn); // svw

int encodeAudioFrame(int16_t *pFrame, int frameSize); // svw

int encodeVideoFrame(AVFrame *pFrame, bool doFlush1); // svw

int muxInitAudioStreamCodec(); // svw

int muxInitVideoStreamCodec(); // svw

int muxStartSession(char *szFileOut); // svw

AVFormatContext * gDecFormatCtx = NULL;

streamContext * gDecStreamCtxVideo = NULL;

streamContext * gDecStreamCtxAudio = NULL;

int64_t gDecContainerStartTime = 0;

#if 1 // MSVC has no lrintf

#include <emmintrin.h>

// static INLINE long lrintf(float f) {

static inline long lrintf(float f) {

return _mm_cvtss_si32(_mm_load_ss(&f));

}

#endif



/* demuxStartSession

* Opens an input file, creates the relevant streamContext structures and
initialises their

* decoders. On return frames can be read from input file by calling
demuxReadNextFrame().

*

* Returns: 0 on success or -1 on failure.

*/

int demuxStartSession(char *szFileIn)

{

int i;

// Ask libavformat to open the specified input file.

if (av_open_input_file(&gDecFormatCtx, szFileIn, NULL, 0, NULL) != 0)

{

printf("DEMUX: ERROR - Can't open input file '%s'!\n", szFileIn);

return -1;

}

// svw alloc codec context

// streamCtx->m_codecCtx = avcodec_alloc_context();

// svw end

// Determine which streams the file contains. On success
gDecFormatCtx.nb_streams and

// the streams[] array will be populated for us.

if (av_find_stream_info(gDecFormatCtx) < 0)

{

printf("DEMUX: ERROR - Can't determine stream details.\n");

return -1;

}

dump_format(gDecFormatCtx, 0, szFileIn, 0); // print some debugging
information

// The "container" (input file) can specify a start time (in AV_TIME_BASE
units) which

// specifies that the presentation timestamps of all streams should take
into account.

//

// eg. if start_time == 3000 and a packet read back from a stream has a
PTS/DTS == 3000,

// it is really positioned at time 0 on the decoding timeline.

if (gDecFormatCtx->start_time != AV_NOPTS_VALUE)

{

printf("DEMUX: Container start time is non-zero (%lld AV_TIME_BASE
units).\n", gDecFormatCtx->start_time);

gDecContainerStartTime = gDecFormatCtx->start_time;

}

if (gDecFormatCtx->duration != AV_NOPTS_VALUE)

printf("DEMUX: Container duration is %lld (AV_TIME_BASE units)\n",
gDecFormatCtx->duration);

// Interrogate each stream found in the input file and try to set up
streamContexts for

// streams that we're interested in. Once this is done we're ready to read &
decode frames.

for (i = 0; i < gDecFormatCtx->nb_streams; i++)

demuxInitStreamCodec(gDecFormatCtx->streams[i]);

if (!gDecStreamCtxVideo)

{

printf("DEMUX: ERROR - No video stream found!\n");

return -1;

}

return 0;

}



/* demuxEndSession

* Performs cleanup of all demux related data structures, should be called on
exit.

*/

void demuxEndSession()

{

// svw: in the used ffmpeg version this will free some data in the streams

av_close_input_file(gDecFormatCtx);

if (gDecStreamCtxVideo != NULL)

{

if (gDecStreamCtxVideo->m_codecCtx) {

// svw is already done in av_close_input_file()
avcodec_close(gDecStreamCtxVideo->m_codecCtx);

}

free(gDecStreamCtxVideo);

}

if (gDecStreamCtxAudio != NULL)

{

if (gDecStreamCtxAudio->m_codecCtx) {

// svw is already done in av_close_input_file()
avcodec_close(gDecStreamCtxAudio->m_codecCtx);

}

if (gDecStreamCtxAudio->m_decodedAudioSamples != NULL)

av_free(gDecStreamCtxAudio->m_decodedAudioSamples);

free(gDecStreamCtxAudio);

}

}



/* demuxInitStreamCodec

* streamIn is a stream that exists in the input file we're demuxing from. It
could be a video,

* audio, subtitle or other stream. We're only interested in video or audio
streams and ignore

* the others. The video and audio streams are allocated a streamContext
structure that wraps up

* context details for the demuxing process as it proceeds.

*

* Each stream is associated with a specific codec (ie. WMV/MPEG2/MP4) whose
decoder could be

* compiled into the libavcodec we're linked against. If the decoder module
is present, we open

* it so we can then use it to decode frames read from the stream.

*

* Returns: 0 on success or -1 on failure.

*/

int demuxInitStreamCodec(AVStream *streamIn)

{

streamContext * streamCtx = NULL;

AVCodec * codec = NULL;

if (streamIn->codec->codec_type == CODEC_TYPE_VIDEO)

{

printf("DEMUX: STREAM[%d] - Video stream found.\n", streamIn->index);

gDecStreamCtxVideo = (streamContext*)malloc(sizeof(streamContext));

memset(gDecStreamCtxVideo, 0, sizeof(streamContext));

streamCtx = gDecStreamCtxVideo;

}

else if (streamIn->codec->codec_type == CODEC_TYPE_AUDIO)

{

printf("DEMUX: STREAM[%d] - Audio stream found.\n", streamIn->index);

gDecStreamCtxAudio = (streamContext*)malloc(sizeof(streamContext));

memset(gDecStreamCtxAudio, 0, sizeof(streamContext));

streamCtx = gDecStreamCtxAudio;

printf("DEMUX: STREAM[%d] - Audio Input Codec Frame Size: %d samples\n",

streamIn->index, streamIn->codec->frame_size);

}

else

{

printf("DEMUX: STREAM[%d] - ERROR - Stream has unsupported codec type
(%d).\n",

streamIn->index, streamIn->codec->codec_type);

return -1;

}

streamCtx->m_stream = streamIn;

streamCtx->m_codecCtx = streamIn->codec; // describes the codec used by the
stream

printf("DEMUX: STREAM[%d] - Input Stream time base: %d / %d\n",
streamIn->index,

streamIn->time_base.num, streamIn->time_base.den);

// Just as the entire container itself can have a start time, each stream
can also have a

// start time. Unlike the container start time, the stream start time is
specified in the

// units of the AVStream itself (AVStream.time_base). For simplicitly, all
our calculations

// are done in AV_TIME_BASE units, so we need to scale from the stream
timebase to that.

if (streamIn->start_time != AV_NOPTS_VALUE)

{

printf("DEMUX: STREAM[%d] - Start time is non-zero (%lld in
AVStream.time_base units).\n",

streamIn->index, streamIn->start_time);

// svw streamCtx->m_ptsOffset = av_rescale_q(streamIn->start_time,
streamIn->time_base, AV_TIME_BASE_Q);

streamCtx->m_ptsOffset = av_rescale_q(streamIn->start_time,
streamIn->time_base, myAVTIMEBASEQ);

}

if (streamIn->duration != AV_NOPTS_VALUE)

{

printf("DEMUX: STREAM[%d] - Duration is %lld (AVStream.time_base units)\n",

streamIn->index, streamIn->duration);

}

if (streamCtx->m_codecCtx->lowres)

streamCtx->m_codecCtx->flags |= CODEC_FLAG_EMU_EDGE;

// See if a decoder for the codec used by the stream is compiled into
libavcodec.

codec = avcodec_find_decoder(streamCtx->m_codecCtx->codec_id);

if (codec == NULL)

{

printf("DEMUX: STREAM[%d] - ERROR - Can't find decoder for codec ID %d!\n",

streamIn->index, streamCtx->m_codecCtx->codec_id);

return -1;

}

// Tell the codec we're capable of handling frames that span packets.

if (codec->capabilities & CODEC_CAP_TRUNCATED)

streamCtx->m_codecCtx->flags |= CODEC_FLAG_TRUNCATED;

// Attach an instance of the decoder to the codec context so we can use it
to decode frames.

if (avcodec_open(streamCtx->m_codecCtx, codec) < 0)

{

printf("DEMUX: STREAM[%d] - ERROR - Can't open decoder for codec ID %d!\n",

streamIn->index, streamCtx->m_codecCtx->codec_id);

return -1;

}

return 0;

}



/* demuxReadNextFrame

* Once the input file has been opened and the stream decoders initialised we
can start reading

* frames from the file as 'packets'. Each packet read by av_read_frame() is
associated with one

* and only one stream and may or may not represent enough data to decode a
raw frame of video

* or audio from.

*

* Additionally, in the case of audio frames, the packet may actually contain
multiple audio

* frames so the decoding process must be able to deal with this.

*

* Returns: The streamContext structure associated with the packet or NULL.

*

* If the stream is found the m_pkt field is initialised to the packet just
read from

* the file. It is then the caller's responsibility to free any buffers
associated with

* the packet by calling av_free_packet(). Additionally, the contents of
m_pkt may be

* invalid after the next call to av_read_frame() and so should be consumed
before the

* next call to demuxReadNextFrame().

*/

streamContext * demuxReadNextFrame()

{

streamContext * streamCtx = NULL;

AVPacket pkt; // copied into streamCtx, buffers must be freed by caller

av_init_packet(&pkt);

// Read a new packet from the input file.

if (av_read_frame(gDecFormatCtx, &pkt) < 0)

{

printf("DEMUX: Can't read next frame from input file.\n");

return NULL;

}



printf("DEMUX: STREAM[%d] - Got new %d byte frame.\n", pkt.stream_index,
pkt.size);

 // Work out whether this packet belongs to the video, audio or another
stream.

if (pkt.stream_index == gDecStreamCtxVideo->m_stream->index)

streamCtx = gDecStreamCtxVideo;

else if (gDecStreamCtxAudio && (pkt.stream_index ==
gDecStreamCtxAudio->m_stream->index))

streamCtx = gDecStreamCtxAudio;

if (streamCtx == NULL)

{

printf("DEMUX: ERROR - Can't determine which stream this packet belongs
to!\n");

av_free_packet(&pkt);

return NULL;

}

// 'Give' the packet to the stream context, the caller is responsible for
freeing it.

memcpy(&streamCtx->m_pkt, &pkt, sizeof(AVPacket));

streamCtx->m_pktValid = 1;

streamCtx->m_pktDataPtr = pkt.data;

streamCtx->m_pktRemainingSiz = pkt.size;

return streamCtx;

}

/* decodeFrame

* Use the decoder associated with the stream described by streamCtx to
decode a frame of the

* stream. The input (encoded) data exists in streamCtx->m_pkt which has just
been populated

* by calling demuxReadNextFrame().

*

* decodeFrame() should also be called once more after we're finished reading
the input file

* with bFlushDecoder == '1' to flush any data remaining in the decoders
which potentially may

* generate another frame.

*

* Returns: 0 on success or -1 on failure.

*

* Success does NOT indicate that a raw, decoded frame is actually available
to be displayed

* or re-encoded into the output file, it just means the decoder was able to
take the packet

* as more input, it may still not have enough data to generate output.

*

* If, on success, streamCtx->m_frameValid is '1', then a decoded frame was
generated and is

* available to be displayed / re-encoded. It is in
streamCtx->m_decodedVideoFrame if this is

* a video stream or if an audio stream, the raw audio samples are in
m_decodedAudioSamples

* (and there are m_decodedAudioSamplesValidSize bytes of them).

*

* NOTE: If an audio stream, streamCtx->m_pkt may actually contain multiple
raw audio frames

* but a call to avcodec_decode_audio2() will only decode one of them. This
is why we

* maintain streamCtx->m_pktDataPtr & m_pktRemainingSiz: so decodeFrame() can
be called

* multiple times for each demuxReadNextFrame() call.

*/

int decodeFrame(streamContext *streamCtx, int bFlushDecoder)

{

int bGotVideoFrame; // if decoding video, did we decode a raw frame?

int nBytesDecoded;

uint8_t * pDecode = streamCtx->m_pktDataPtr;

int nDecodeSiz = streamCtx->m_pktRemainingSiz;

streamCtx->m_frameValid = 0;

if (bFlushDecoder)

{

// If we're flushing the decoders we don't actually have any new data to
decode.

pDecode = NULL;

nDecodeSiz = 0;

}

else

{

if (!streamCtx->m_pktValid || streamCtx->m_pktRemainingSiz <= 0)

{

printf("DEMUX: STREAM[%d] - No more data in this packet!\n",
streamCtx->m_stream->index);

return -1;

}

}

// Call appropriate decoding funcs in libavcodec depending on the type of
stream.

if (streamCtx->m_codecCtx->codec_type == CODEC_TYPE_VIDEO)

{

avcodec_get_frame_defaults(&streamCtx->m_decodedVideoFrame);

nBytesDecoded = avcodec_decode_video(streamCtx->m_codecCtx,

&streamCtx->m_decodedVideoFrame, &bGotVideoFrame, // out

pDecode, nDecodeSiz); // in

streamCtx->m_stream->quality = streamCtx->m_decodedVideoFrame.quality;

if (nBytesDecoded < 0)

{

// Decoding failed. This does not necessarily mean the caller should stop
reading frames.

printf("DEMUX: STREAM[%d] - Can't decode video frame from packet.\n",
streamCtx->m_stream->index);

return -1;

}

// We don't HAVE to get a full frame back from the packet - it could be a
P/B frame inside

// in which case we may have to wait and get more data to rebuild the full
frame. If we DID

// get a frame though, we can return it to the caller to display or
re-encode.

if (bGotVideoFrame != 0)

{

streamCtx->m_frameValid = 1;

if (bFlushDecoder)

{

// Synthesise the PTS here as we don't actually have a packet to calculate
it from.

// Just increment it by 1 tick in stream time (but expressed in AV_TIME_BASE
units).

if (streamCtx->m_codecCtx->time_base.num != 0)

streamCtx->m_pts += ((int64_t)AV_TIME_BASE *
streamCtx->m_codecCtx->time_base.num) /

streamCtx->m_codecCtx->time_base.den;

}

else if (streamCtx->m_pkt.dts != AV_NOPTS_VALUE)

{

// The packet DTS is expressed in the owner AVStream's time_base and
relative to

// the owner AVStream's start_time however we want it expressed in
AV_TIME_BASE

// units, so we'll have to scale it. Must also take any stream start_time
offset

// into account.

// svw streamCtx->m_pts = av_rescale_q(streamCtx->m_pkt.dts,
streamCtx->m_stream->time_base, AV_TIME_BASE_Q);

streamCtx->m_pts = av_rescale_q(streamCtx->m_pkt.dts,
streamCtx->m_stream->time_base, myAVTIMEBASEQ);

streamCtx->m_pts -= streamCtx->m_ptsOffset; // m_ptsOffset is in
AV_TIME_BASE units

}

printf("DEMUX: STREAM[%d] - Got VIDEO frame. pkt[pts:%lld dts:%lld
duration:%d] (AVStream.time_base) pts: %lld (AV_TIME_BASE)\n",

streamCtx->m_stream->index, streamCtx->m_pkt.pts, streamCtx->m_pkt.dts,

streamCtx->m_pkt.duration, streamCtx->m_pts);

}

// Even if we didn't decode a raw frame we've exhausted all data in the
packet.

streamCtx->m_pktRemainingSiz = 0;

}

else if (streamCtx->m_codecCtx->codec_type == CODEC_TYPE_AUDIO)

{

// Reallocate the audio sample buffer if it's smaller than the frame size.

if (!bFlushDecoder)

{

// av_fast_realloc() will only reallocate the buffer if
m_decodedAudioSamplesSiz is

// smaller than third parameter. It also returns new size in
m_decodedAudioSamplesSiz.

streamCtx->m_decodedAudioSamples =

(int16_t *)av_fast_realloc(streamCtx->m_decodedAudioSamples,

(unsigned int*)&streamCtx->m_decodedAudioSamplesSiz,

FFMAX(streamCtx->m_pkt.size, AVCODEC_MAX_AUDIO_FRAME_SIZE));

// streamCtx->m_decodedAudioSamplesSiz = FFMAX(streamCtx->m_currentPkt.size,
AVCODEC_MAX_AUDIO_FRAME_SIZE);

if (streamCtx->m_decodedAudioSamples == NULL)

{

printf("DEMUX: ERROR - Can't (re)allocate %d byte audio sample buffer.\n",
streamCtx->m_decodedAudioSamplesSiz);

return -1;

}

}

// avcodec_decode_audio2() expects the size of the output buffer as the 3rd
parameter but

// also returns the number of bytes it decoded in the same parameter.

streamCtx->m_decodedAudioSamplesValidSiz =
streamCtx->m_decodedAudioSamplesSiz;

nBytesDecoded = avcodec_decode_audio2(streamCtx->m_codecCtx,

streamCtx->m_decodedAudioSamples, // out

&streamCtx->m_decodedAudioSamplesValidSiz, // in/out

pDecode, nDecodeSiz); // in

if (nBytesDecoded < 0)

{

// Decoding failed. This does not necessarily mean the caller should stop
reading frames.

printf("DEMUX: STREAM[%d] - Can't decode audio frame from packet.\n",
streamCtx->m_stream->index);

return -1;

}

// We may not have read all of the data from this packet. If so, the user
can call again.

// Whether or not they do depends on if m_pktRemainingSiz == 0 (they can
check).

streamCtx->m_pktDataPtr += nBytesDecoded;

streamCtx->m_pktRemainingSiz -= nBytesDecoded;

// At this point it's normally safe to assume that we've read some samples.
However, the MPEG

// audio decoder is broken. If this is the case then we just return with
m_frameValid == 0

// but m_pktRemainingSiz perhaps != 0, so the user can call again.

if (streamCtx->m_decodedAudioSamplesValidSiz > 0)

{

streamCtx->m_frameValid = 1; // otherwise maybe call us again

printf("DEMUX: STREAM[%d] - Got AUDIO frame. pkt[pts:%lld dts:%lld
duration:%lld]\n",

streamCtx->m_stream->index, streamCtx->m_pkt.pts, streamCtx->m_pkt.dts,
streamCtx->m_pkt.duration);

}

else

{

printf("DEMUX: STREAM[%d] - Broken MPEG audio decode, caller can recall
decoder.\n", streamCtx->m_stream->index);

}

}

else

{

printf("DEMUX: STREAM[%d] - Unknown codec class, not decoding this frame.\n",
streamCtx->m_stream->index);

}

return 0;

}



/*******************************************************************************

* Encoder and Multiplexer Functions

*******************************************************************************/

AVFormatContext * gEncFormatCtx = NULL; // libavformat's context for our
output file

AVOutputFormat * gEncFormatDesc = NULL; // describes our output file to
libavformat

AVStream * gEncVideoStream = NULL; // the output video stream

AVCodecContext * gEncVideoCodecCtx = NULL; // the encoder for the output
video stream

AVCodecContext * gEncVideoCodecCtx_dummy = NULL; // the encoder for the
output video stream

uint8_t * gEncVideoEncodedBuf = NULL; // buffer to hold frames encoded by
the encoder

int gEncVideoEncodedBufSiz = 200000;

struct SwsContext * gEncVideoConvertCtx = NULL; // allocated if we must do
CC conversion/scaling

AVFrame * gEncVideoConvertFrame = NULL; // where our converted frames are
stored

int64_t gEncVideoFrameCount = 0; // # of video frames written to output so
far

AVStream * gEncAudioStream = NULL; // the output audio stream (may remain
NULL)

AVCodecContext * gEncAudioCodecCtx = NULL; // the encoder for the output
audio stream

uint8_t * gEncAudioEncodedBuf = NULL; // buffer to hold frames encoded by
the encoder

int gEncAudioEncodedBufSiz = 4 * AVCODEC_MAX_AUDIO_FRAME_SIZE;

AVFifoBuffer gEncAudioFifo; // FIFO to write incoming audio samples into

uint8_t * gEncAudioFifoOutBuf = NULL; // buffer to read _out_ of the FIFO
into

ReSampleContext * gEncAudioConvertCtx; // allocated if we must up/downsample
audio

uint8_t * gEncAudioConvertBuf = NULL; // allocated to store resampled audio
into



/* muxStartSession

* Allocates an AVFormatContext to describe an output file of a specific
format (ie. MPEG2/WMV).

* AVStreams for any input video or audio streams (from the demuxer) are
created and attached to

* the AVFormatContext. The codecs for these streams are also initialised.

*

* On return frames can be written to the output file by calling
encodeVideoFrame() and/or

* encodeAudioFrame().

*

* Returns: 0 on success or -1 on failure.

*/

int muxStartSession(char *szFileOut)

{

AVFormatParameters fpOutFile;

AVCodec *OutputCodec;

AVFrame *InputBufferFrame;

// See if libavformat has modules that can write our output format. If so,
gEncFormatDesc

// will describe the functions used to write the format (used internally by
libavformat)

// and the default video/audio codecs that the format uses.

// svw if ((gEncFormatDesc = guess_format("avi", NULL, NULL)) == NULL)

if ((gEncFormatDesc = guess_format ("avi", szFileOut , NULL)) == NULL)

{

printf("MUX: ERROR - Can't determine format description for file.\n");

return -1;

}

 //gEncFormatDesc->video_codec=CODEC_ID_RAWVIDEO;

// gEncFormatCtx is used by libavformat to carry around context data re our
output file.

if ((gEncFormatCtx = av_alloc_format_context()) == NULL)

{

printf("MUX: ERROR - Can't allocate output format context.\n");

return -1;

}

// Initialise the output format context.

gEncFormatCtx->oformat = gEncFormatDesc;

// svw snprintf(gEncFormatCtx->filename, sizeof(gEncFormatCtx->filename),
szFileOut);

_snprintf(gEncFormatCtx->filename, sizeof(gEncFormatCtx->filename),
szFileOut);

// Add a video stream that uses the format's default video codec to the
format context's

// streams[] array. We can then encode video frames into this stream.

if ((gEncVideoStream = av_new_stream(gEncFormatCtx, 0)) == NULL)

{

printf("MUX: ERROR - Can't add video stream to output file.\n");

return -1;

}

// Do we have an input audio stream to encode into the output file?

if (gDecStreamCtxAudio != NULL)

{

if ((gEncAudioStream = av_new_stream(gEncFormatCtx, 1)) == NULL)

{

printf("MUX: ERROR - Can't add audio stream to output file.\n");

return -1;

}

}

gEncFormatCtx->timestamp = 0;

// Open the output file.

if (!(gEncFormatDesc->flags & AVFMT_NOFILE))

{

if (url_fopen(&gEncFormatCtx->pb, szFileOut, URL_WRONLY) < 0)

{

printf("MUX: ERROR - Can't open output file '%s' to write.\n", szFileOut);

return -1;

}

}

// Set default parameters on the format context.

memset(&fpOutFile, 0, sizeof(AVFormatParameters));

if (av_set_parameters(gEncFormatCtx, &fpOutFile) < 0)

{

printf("MUX: ERROR - Can't set output parameters for output file.\n");

return -1;

}

gEncFormatCtx->preload = (int)(0.5 * AV_TIME_BASE);

gEncFormatCtx->max_delay = (int)(0.7 * AV_TIME_BASE);

// Open the video stream's codec and initialise any stream related data.

if (muxInitVideoStreamCodec() < 0)

return -1;

// Open the audio stream's codec and initialise any stream related data.

if ((gDecStreamCtxAudio != NULL) && (muxInitAudioStreamCodec() < 0))

return -1;

// Write headers to the output file.

if (av_write_header(gEncFormatCtx) < 0)

{

printf("MUX: ERROR - Can't write output file headers.\n");

return -1;

}

return 0;

}



/* muxEndSession

* This should be called to perform muxer / encoder cleanup when we've
finished transcoding.

*

* It's most important task is to flush the encoders of any remaining data
they have in them

* as doing so may generate a new output frame. To flush the audio encoder we
should first

* flush any audio frame fragments out of the audio FIFO.

*/

void muxEndSession()

{

int i, nEncodedBytes;

// Flush the audio FIFO and encoder.

#if 1 // svw no audio for testing!

if (gEncAudioFifo.buffer != NULL) {

for (;;)

{

AVPacket pkt;

int nFifoBytes = av_fifo_size(&gEncAudioFifo); // any bytes left in audio
FIFO?

nEncodedBytes = 0;

// Flush the audio FIFO first if necessary. It won't contain a _full_ audio
frame because

// if it did we'd have pulled it from the FIFO during the last
encodeAudioFrame() call -

// the encoder must support short/incomplete frames for this to work.

#if 0

if (nFifoBytes > 0 && gEncAudioCodecCtx->codec->capabilities &
CODEC_CAP_SMALL_LAST_FRAME)

{

int nFrameSizeTmp = gEncAudioCodecCtx->frame_size;

// The last frame is going to contain a smaller than usual number of
samples.

gEncAudioCodecCtx->frame_size = nFifoBytes / (gEncAudioCodecCtx->channels *
sizeof(int16_t));

printf("MUX: Audio FIFO still contains %d bytes, writing short %d sample
frame ...\n",

nFifoBytes, gEncAudioCodecCtx->frame_size);

// Pull the bytes out from the FIFO and feed them to the encoder.

if (av_fifo_read(&gEncAudioFifo, gEncAudioFifoOutBuf, nFifoBytes) == 0)

nEncodedBytes = avcodec_encode_audio(gEncAudioCodecCtx, gEncAudioEncodedBuf,
gEncAudioEncodedBufSiz, (int16_t*)gEncAudioFifoOutBuf);

gEncAudioCodecCtx->frame_size = nFrameSizeTmp; // restore the native frame
size

}

#endif

// Now flush the encoder.

if (nEncodedBytes <= 0)// svw see forum thread

nEncodedBytes = avcodec_encode_audio(gEncAudioCodecCtx, gEncAudioEncodedBuf,
gEncAudioEncodedBufSiz, NULL);

if (nEncodedBytes <= 0)

break;

// Okay, we got a final encoded frame we can write to the output file.

av_init_packet(&pkt);

pkt.stream_index = gEncAudioStream->index;

pkt.data = gEncAudioEncodedBuf;

pkt.size = nEncodedBytes;

pkt.flags |= PKT_FLAG_KEY;

// Set presentation time of frame (currently in the codec's timebase) in the
stream timebase.

if(gEncAudioCodecCtx->coded_frame && gEncAudioCodecCtx->coded_frame->pts !=
AV_NOPTS_VALUE)

pkt.pts = av_rescale_q(gEncAudioCodecCtx->coded_frame->pts,
gEncAudioCodecCtx->time_base, gEncAudioStream->time_base);

if (av_interleaved_write_frame(gEncFormatCtx, &pkt) != 0)

{

printf("MUX: ERROR - Couldn't write last audio frame to output file.\n");

break;

}

}

}

#endif

// Flush the video encoder.

for (;;)

{

AVPacket pkt;

printf("MUX: Flushing video encoder ...\n");

nEncodedBytes = avcodec_encode_video(gEncVideoCodecCtx, gEncVideoEncodedBuf,
gEncVideoEncodedBufSiz, NULL);

if (nEncodedBytes <= 0)

break;

// Okay, we got a final encoded frame we can write to the output file.

av_init_packet(&pkt);

pkt.stream_index = gEncVideoStream->index;

pkt.data = gEncVideoEncodedBuf;

pkt.size = nEncodedBytes;

if (gEncVideoCodecCtx->coded_frame &&
gEncVideoCodecCtx->coded_frame->key_frame)

pkt.flags |= PKT_FLAG_KEY;

// Set presentation time of frame (currently in the codec's timebase) in the
stream timebase.

if(gEncVideoCodecCtx->coded_frame && gEncVideoCodecCtx->coded_frame->pts !=
AV_NOPTS_VALUE)

pkt.pts = av_rescale_q(gEncVideoCodecCtx->coded_frame->pts,
gEncVideoCodecCtx->time_base, gEncVideoStream->time_base);

if (av_interleaved_write_frame(gEncFormatCtx, &pkt) != 0)

{

printf("MUX: ERROR - Couldn't write last video frame to output file.\n");

break;

}

}

// Close the codecs.

if (gEncVideoStream != NULL)

avcodec_close(gEncVideoStream->codec);

if (gEncAudioStream != NULL)

avcodec_close(gEncAudioStream->codec);

// Write any file trailers.

av_write_trailer(gEncFormatCtx);

for (i = 0; i < gEncFormatCtx->nb_streams; i++)

{

av_freep(&gEncFormatCtx->streams[i]->codec);

av_freep(&gEncFormatCtx->streams[i]);

}

// Close the output file if we created it.

if (!(gEncFormatDesc->flags & AVFMT_NOFILE))

// svw url_fclose(&gEncFormatCtx->pb);

url_fclose(gEncFormatCtx->pb);

// Free any buffers or structures we allocated.

av_free(gEncFormatCtx);

if (gEncVideoEncodedBuf != NULL)

av_free(gEncVideoEncodedBuf);

if (gEncAudioEncodedBuf != NULL)

av_free(gEncAudioEncodedBuf);

if (gEncAudioFifoOutBuf != NULL)

av_free(gEncAudioFifoOutBuf);

if (gEncVideoConvertFrame)

{

av_free(gEncVideoConvertFrame->data[0]);

av_free(gEncVideoConvertFrame);

}

if (gEncVideoConvertCtx)

sws_freeContext(gEncVideoConvertCtx);

if (gEncAudioConvertBuf)

av_free(gEncAudioConvertBuf);

av_fifo_free(&gEncAudioFifo);

}



/* muxInitVideoStreamCodec

* The output format we're using has a default video codec associated with
it, which we need to

* open and configure before we can use its encoder to encode raw input video
frames.

*

* Returns: 0 on success or -1 on failure.

*/

int muxInitVideoStreamCodec()

{

AVCodec * codec = NULL;

AVCodecContext *c= NULL;

// Configure the video stream's codec context.

gEncVideoCodecCtx = gEncVideoStream->codec;

avcodec_get_context_defaults2(gEncVideoCodecCtx, CODEC_TYPE_VIDEO);

gEncVideoCodecCtx->codec_id = gEncFormatDesc->video_codec;

gEncVideoCodecCtx->codec_type = CODEC_TYPE_VIDEO;

// Is the encoder module actually compiled into libavcodec?

if ((codec = avcodec_find_encoder(gEncVideoCodecCtx->codec_id)) == NULL)

{

printf("MUX: ERROR - Can't find video codec.\n");

return -1;

}







// libavformat and libavcodec maintain several different measures of time,
the most important

// of which are stream time (all AVPackets read from / written to an
AVStream have a PTS/DTS

// expressed in these units) and codec time (frames returned after being
encoded by encoder

// are expressed in this time).

//

// For a fixed framerate video codec, the timebase is generally expressed as
1 / framerate.

// eg. codec.time_base = 1 / 25 = 0.04 (40 ms per frame).

//

// When a frame is encoded by the codec (after a call to
avcodec_encode_video()), the PTS of

// the coded_frame will be expressed in the codec's time_base (ie. 1 ==
0.04ms, 2 == 0.08ms).

// Before we write the coded_frame into the output stream we must re-express
the frame's PTS

// in the stream's time_base, which is normally 1 / 1000. This involves the
following math:

//

// tStream = (tCodec * tCodecTimeBase) / tStreamTimeBase, eg.

// tStream = (1 * 0.04) / (1 / 1000) = 0.04 * 1000 = 400.

//

// This is exactly what av_rescale_q() does.

//

// Because we want the output file framerate to match the input file
framerate, we set the

// output video codec's time_base to the framerate specified in the _input_
video stream.

//

// eg. If gDecVideoStreamCtx->m_stream->r_rate_rate = 601 / 12 (50.08 fps)

// gEncVideoCodecCtx->time_base = 12 / 601 (~19 ms per frame)

//

// NOTE: We also maintain the presentation time of the decoded input streams
_as we decode_

// them. gDecStreamCtxVideo/gDecStreamCtxAudio->m_pts always specify the
presentation

// time of the frame we've just read from the input file in AV_TIME_BASE
units *not*

// the units of their owning AVStreams (we do this conversion to make it
easier to

// decouple the timebases of the input and output streams). This value is
used as we

// re-encode these frames to achieve sync between the input & output
streams.

gEncVideoCodecCtx->time_base.den =
25;//gDecStreamCtxVideo->m_stream->r_frame_rate.num;


gEncVideoCodecCtx->time_base.num =1;// eg. 6011
;//gDecStreamCtxVideo->m_stream->r_frame_rate.den; // eg. 12

// We want the frames output by the encoder to match these parameters.

// C:\src\ffmpeg_test\Debug>ffmpeg_test_app.exe > log1.txt

//Input #0, mov,mp4,m4a,3gp,3g2,mj2, from 'output3.mpeg4':

// Duration: 00:00:14.1, start: 0.000000, bitrate: 450 kb/s

// Stream #0.0(und): Video: mpeg4, yuv420p, 640x480 [PAR 1:1 DAR 4:3], 30.00
tb

//(r)



//gEncVideoCodecCtx->width = 240;

//gEncVideoCodecCtx->height = 180;

gEncVideoCodecCtx->width = 640;

gEncVideoCodecCtx->height = 480;

gEncVideoCodecCtx->pix_fmt = PIX_FMT_YUV420P; // H.263 requires YUV420P
input frames, we convert from RGB24

gEncVideoCodecCtx->bit_rate = 400000; // 200kbps

gEncVideoCodecCtx->gop_size = 12; // Emit 1 I-Frame every 12 frames at most

gEncVideoCodecCtx->max_qdiff = 3;

gEncVideoCodecCtx->rc_eq = "tex^qComp";

gEncVideoCodecCtx->rc_override_count = 0;

if (!gEncVideoCodecCtx->rc_initial_buffer_occupancy)

gEncVideoCodecCtx->rc_initial_buffer_occupancy =
gEncVideoCodecCtx->rc_buffer_size * 3/4;

gEncVideoCodecCtx->me_threshold = 0;

gEncVideoCodecCtx->intra_dc_precision = 0;

gEncVideoCodecCtx->strict_std_compliance = 0;

gEncVideoCodecCtx->me_method = ME_EPZS;

// some formats want stream headers to be separate

// if(!strcmp(fmtCtx->oformat->name, "mp4") ||
!strcmp(fmtCtx->oformat->name, "mov") || !strcmp(fmtCtx->oformat->name,
"3gp"))

// codecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;

// Open the codec.







if (avcodec_open(gEncVideoCodecCtx, codec) < 0)

{

printf("MUX: ERROR - Can't open video codec.\n");

return -1;

}

printf("MUX: Video Output Codec Timebase: %d / %d\n",

gEncVideoCodecCtx->time_base.num, gEncVideoCodecCtx->time_base.den);

// Input frames can come to us in any size or pixel format but our codec
only wants them in the

// size and format it's setup to encode to. If the two don't match we'll
have to resize or do

// colourspace conversion on incoming frames before feeding the encoder.

if ((gDecStreamCtxVideo->m_codecCtx->width != gEncVideoCodecCtx->width) ||

(gDecStreamCtxVideo->m_codecCtx->height != gEncVideoCodecCtx->height) ||

(gDecStreamCtxVideo->m_codecCtx->pix_fmt != gEncVideoCodecCtx->pix_fmt))

{

uint8_t * bufFrame = NULL; // buf to hold converted frames (attached to
gEncVideoConvertFrame)

int bufFrameSiz;

printf("MUX: Video size or colourspace conversion will be required (IN:
%dx%d @ %dcs, OUT: %dx%d @ %dcs)\n",

gDecStreamCtxVideo->m_codecCtx->width,
gDecStreamCtxVideo->m_codecCtx->height,

gDecStreamCtxVideo->m_codecCtx->pix_fmt,

gEncVideoCodecCtx->width, gEncVideoCodecCtx->height,

gEncVideoCodecCtx->pix_fmt);

// Allocate an AVFrame structure in which the swscaler can store converted
frames.

if ((gEncVideoConvertFrame = avcodec_alloc_frame()) == NULL)

{

printf("MUX: ERROR - Can't allocate video conversion frame structure.\n");

return -1;

}

// This AVFrame structure needs a backing buffer too, how big should it be?

bufFrameSiz = avpicture_get_size(gEncVideoCodecCtx->pix_fmt,

gEncVideoCodecCtx->width, gEncVideoCodecCtx->height);

if ((bufFrame = (uint8_t*)av_malloc(bufFrameSiz)) == NULL)

{

printf("MUX: ERROR - Can't allocate video conversion frame backing
buffer.\n");

return -1;

}

// Attach the backing buffer to the AVFrame.

avpicture_fill((AVPicture *)gEncVideoConvertFrame, bufFrame,
gEncVideoCodecCtx->pix_fmt,

gEncVideoCodecCtx->width, gEncVideoCodecCtx->height);

// Create the swscaler conversion context.

gEncVideoConvertCtx = sws_getContext(gDecStreamCtxVideo->m_codecCtx->width, //
input width

gDecStreamCtxVideo->m_codecCtx->height, // input height

gDecStreamCtxVideo->m_codecCtx->pix_fmt, // input format

gEncVideoCodecCtx->width, // output width

gEncVideoCodecCtx->height, // output height

gEncVideoCodecCtx->pix_fmt, // output format

SWS_BICUBIC, NULL, NULL, NULL); // do bicubic resizing

if (!gEncVideoConvertCtx)

{

printf("MUX: ERROR - Can't create input to output video frame conversion
context.\n");

return -1;

}

}

// We're always going to need a buffer for the encoder to store its encoded
data into.

if ((gEncVideoEncodedBuf = (uint8_t*)av_malloc(gEncVideoEncodedBufSiz)) ==
NULL)

{

printf("MUX: ERROR - Can't allocate buffer to hold encoded video.\n");

return -1;

}

return 0;

}



/* muxInitAudioStreamCodec

* The output format we're using has a default audio codec associated with
it, which we need to

* open and configure before we can use its encoder to encode raw input audio
frames.

*

* Returns: 0 on success or -1 on failure.

*/

int muxInitAudioStreamCodec()

{

AVCodec * codec = NULL;

// Configure the audio stream's codec context.

gEncAudioCodecCtx = gEncAudioStream->codec;

avcodec_get_context_defaults2(gEncAudioCodecCtx, CODEC_TYPE_AUDIO);

gEncAudioCodecCtx->codec_id = gEncFormatDesc->audio_codec;

gEncAudioCodecCtx->codec_type = CODEC_TYPE_AUDIO;

// Codec parameters (22050Hz stereo at ~40kbps)

gEncAudioCodecCtx->bit_rate = 40000;

gEncAudioCodecCtx->sample_rate = 22050;

gEncAudioCodecCtx->channels = 2;

gEncAudioCodecCtx->time_base.num = 1;

gEncAudioCodecCtx->time_base.den = 22050;

// Is the required audio codec compiled into libavcodec?

if ((codec = avcodec_find_encoder(gEncAudioCodecCtx->codec_id)) == NULL)

{

printf("MUX: ERROR - Can't find audio codec.\n");

return -1;

}

// Open the codec.

if (avcodec_open(gEncAudioCodecCtx, codec) < 0)

{

printf("MUX: ERROR - Can't open audio codec.\n");

return -1;

}

printf("MUX: Audio Output Codec Timebase: %d / %d\n",
gEncAudioCodecCtx->time_base.num, gEncAudioCodecCtx->time_base.den);

printf("MUX: Audio Output Codec Frame Size: %d samples\n",
gEncAudioCodecCtx->frame_size);

// Allocate a buffer for the encoder to store encoded audio frames into.

if ((gEncAudioEncodedBuf = (uint8_t*)av_malloc(gEncAudioEncodedBufSiz)) ==
NULL)

{

printf("MUX: ERROR - Can't allocate buffer to hold encoded audio.\n");

return -1;

}

#if 0

// The encoder may require a minimum number of raw audio samples for each
encoding but we can't

// guarantee we'll get this minimum each time an audio frame is decoded from
the input file so

// we use a FIFO to store up incoming raw samples until we have enough for
one call to the codec.

av_fifo_init(&gEncAudioFifo, 2 * AVCODEC_MAX_AUDIO_FRAME_SIZE);

// Allocate a buffer to read OUT of the FIFO into. The FIFO maintains its
own buffer internally.

if ((gEncAudioFifoOutBuf = (uint8_t*)av_malloc(2 *
AVCODEC_MAX_AUDIO_FRAME_SIZE)) == NULL)

{

printf("MUX: ERROR - Can't allocate buffer to read into from audio
FIFO.\n");

return -1;

}

#endif

// Do we need to resample the audio?

if ((gDecStreamCtxAudio->m_codecCtx->sample_rate !=
gEncAudioCodecCtx->sample_rate) ||

(gDecStreamCtxAudio->m_codecCtx->channels != gEncAudioCodecCtx->channels))

{

printf("MUX: Audio resampling required (IN: %dhz @ %d channels, OUT: %dhz @
%d channels)\n",

gDecStreamCtxAudio->m_codecCtx->sample_rate,
gDecStreamCtxAudio->m_codecCtx->channels,

gEncAudioCodecCtx->sample_rate, gEncAudioCodecCtx->channels);

gEncAudioConvertCtx = audio_resample_init(gEncAudioCodecCtx->channels,

gDecStreamCtxAudio->m_codecCtx->channels,

gEncAudioCodecCtx->sample_rate,

gDecStreamCtxAudio->m_codecCtx->sample_rate);

if (gEncAudioConvertCtx == NULL)

{

printf("MUX: ERROR - Can't create resampling context.\n");

return -1;

}

// Allocate a buffer for the audio resampler to store resampled samples in.

if ((gEncAudioConvertBuf = (uint8_t*)av_malloc(2 *
AVCODEC_MAX_AUDIO_FRAME_SIZE)) == NULL)

{

printf("MUX: ERROR - Can't create resampling buffer.\n");

return -1;

}

}

return 0;

}



/* encodeVideoFrame

* Takes an entire raw video frame, in any size and pixel format, performs
any required

* colourspace conversion or resizing required to meet the video encoder's
requirements

* and then encodes the frame. If the encoder generates an encoded frame from
the input

* this encoded frame is written to the output file.

*

* This function also maintains synchronisation between the input video
stream and the

* output video stream. This may occur if the input stream has too many or
few frames

* for its specified frame rate.

*

* Returns: 0 on success or -1 on failure.

*/

int encodeVideoFrame(AVFrame *pFrame, bool doFlush) // svw doFlush added

{

int i;

int nEncodedBytes;

int nFrameCount = 1; // by default output a single frame

AVFrame * pRawFrame = pFrame;

double inPts ;

double outFrames ;

double vdelta ;



// If the input frames don't match the encoder requirements we must resize /
colourspace

// convert them and use the converted frame as the raw frame.

if (gEncVideoConvertCtx != NULL)

{

sws_scale(gEncVideoConvertCtx, pFrame->data, pFrame->linesize, 0,
gEncVideoCodecCtx->height, gEncVideoConvertFrame->data,
gEncVideoConvertFrame->linesize);

pRawFrame = gEncVideoConvertFrame;

}

// The input stream which feeds this output stream has an idea of what it's
current presentation

// time is as we're decoding it. If our presentation time starts to lead or
lag that of the input

// we need to drop or duplicate frames.

//

// The input stream's m_pts has already been scaled into AV_TIME_BASE units
-- work out how many

// frames this equates to in our output codec timebase and compare it to how
many frames we've

// actually output.

inPts = (double)(gDecStreamCtxVideo->m_pts) / AV_TIME_BASE; // how many
seconds into input stream are we?

outFrames = inPts / av_q2d(gEncVideoCodecCtx->time_base); // how many frames
of output does this equate to?

vdelta = outFrames - gEncVideoFrameCount; // what's the difference?

printf("MUX: inPts: %lld (AV_TIME_BASE) %.2f (secs), equiv out frames: %.2f,
vdelta: %.2f\n",

gDecStreamCtxVideo->m_pts, inPts, outFrames, vdelta);

if (vdelta < -1.1)

{

nFrameCount = 0;

printf("MUX: DROPPING frame!\n");

}

else if (vdelta > 1.1)

{

// svw nFrameCount = lrintf(vdelta);

nFrameCount = lrintf(vdelta); // svw or : nFrameCount = (int) floor(vdelta);

printf("MUX: DUPING %d frames\n", nFrameCount - 1);

}

// Encode the raw frame and write it to the output file, duplicating frames
if necessary but

// don't duplicate more than 10 frames in case we get a runaway duplicate
calculation.

for (i = 0; i < nFrameCount && i < 10; i++)

{

pRawFrame->pict_type = 0;

pRawFrame->quality = gEncVideoStream->quality;

pRawFrame->pts = gEncVideoFrameCount;

// Encode the frame.

#if 0

nEncodedBytes = avcodec_encode_video(gEncVideoCodecCtx,

gEncVideoEncodedBuf, gEncVideoEncodedBufSiz, // out

pRawFrame); // in

#else

if(!doFlush)

nEncodedBytes = avcodec_encode_video(gEncVideoCodecCtx,

gEncVideoEncodedBuf, gEncVideoEncodedBufSiz, // out

pRawFrame); // in

else

nEncodedBytes = avcodec_encode_video(gEncVideoCodecCtx,

gEncVideoEncodedBuf, gEncVideoEncodedBufSiz, // out

NULL);

#endif

// The codec doesn't necessarily generate an encoded frame from out input,
it might buffer

// it. We can only write a frame to the output if it generated one though.

if (nEncodedBytes > 0)

{

AVPacket pkt;

av_init_packet(&pkt);

pkt.stream_index = gEncVideoStream->index;

pkt.data = gEncVideoEncodedBuf;

pkt.size = nEncodedBytes;

if(gEncVideoCodecCtx->coded_frame &&
gEncVideoCodecCtx->coded_frame->key_frame)

pkt.flags |= PKT_FLAG_KEY;

// Encoder will have specified the encoded frame's PTS in codec time_base
but we must

// scale it back into the stream time_base before writing it to the output
file.

if (gEncVideoCodecCtx->coded_frame && gEncVideoCodecCtx->coded_frame->pts !=
AV_NOPTS_VALUE)

{

pkt.pts = av_rescale_q(gEncVideoCodecCtx->coded_frame->pts,
gEncVideoCodecCtx->time_base, gEncVideoStream->time_base);

printf("MUX: Rescaled video PTS from %lld (codec time_base) to %lld (stream
time_base).\n", gEncVideoCodecCtx->coded_frame->pts, pkt.pts);

}

printf("MUX: (%lld) Writing video frame with PTS: %lld.\n",
gEncVideoFrameCount, pkt.pts);

// Write the encoded frame in the output file.

if (av_interleaved_write_frame(gEncFormatCtx, &pkt) != 0)

{

printf("MUX: ERROR - Video frame write failed.\n");

return -1;

}

gEncVideoFrameCount++;

if (gEncVideoFrameCount ==83)

gEncVideoFrameCount=gEncVideoFrameCount;

}

else if (nEncodedBytes < 0)

{

printf("MUX: ERROR - Video encoding failed.\n");

return -1;

}

}

return 0;

}



/* encodeAudioFrame

* Takes a frame of raw audio samples, read & decoded from the input file,
performs any resampling

* required to match the output codec's sample rate, encodes them and then
writes the encoded

* frame into the output file.

*

* The output codec has a minimum number of samples it requires to generate
an encoded frame, the

* number of bytes this equates to is stored in nAudioFrameSizeOut. Incoming
raw audio frames may

* not be this big however, so we store them in a FIFO until we've got enough
to extract in groups

* of nAudioFrameSizeOut and encode.

*/

int encodeAudioFrame(int16_t *pFrame, int frameSize)

{

AVPacket pkt;

int nBytesToWrite = 0;

uint8_t * pRawSamples = NULL;

int nAudioFrameSizeOut = gEncAudioCodecCtx->frame_size *
gEncAudioCodecCtx->channels * sizeof(int16_t);

// Do we need to resample the audio first?

if (gEncAudioConvertCtx)

{

int nSampleCountIn = frameSize / (gDecStreamCtxAudio->m_codecCtx->channels *
sizeof(int16_t));

int nSampleCountOut = audio_resample(gEncAudioConvertCtx,

(short *)gEncAudioConvertBuf, // out

(short *)pFrame, nSampleCountIn); // in

nBytesToWrite = nSampleCountOut * gEncAudioCodecCtx->channels * sizeof
(int16_t);

pRawSamples = gEncAudioConvertBuf;

}

else

{

nBytesToWrite = frameSize;

pRawSamples = (uint8_t*)pFrame;

}

#if 0

// Put the (possibly resampled) raw audio samples into the FIFO.

av_fifo_write(&gEncAudioFifo, pRawSamples, nBytesToWrite);

// assert(nAudioFrameSizeOut <= (2 * AVCODEC_MAX_AUDIO_FRAME_SIZE));

// Read raw audio samples out of the FIFO in nAudioFrameSizeOut byte-sized
groups to encode.

while (av_fifo_read(&gEncAudioFifo, gEncAudioFifoOutBuf, nAudioFrameSizeOut)
== 0)

{

av_init_packet(&pkt);

pkt.size = avcodec_encode_audio(gEncAudioCodecCtx,

gEncAudioEncodedBuf, gEncAudioEncodedBufSiz, // out

(int16_t*)gEncAudioFifoOutBuf); // in

if (pkt.size < 0)

{

printf("MUX: ERROR - Can't encode audio frame.\n");

return -1;

}

else if (pkt.size == 0)

{

printf("MUX: Audio codec buffered samples ...\n");

return 0;

}

// Rescale from the codec time_base to the AVStream time_base.

if (gEncAudioCodecCtx->coded_frame && gEncAudioCodecCtx->coded_frame->pts !=
AV_NOPTS_VALUE)

pkt.pts = av_rescale_q(gEncAudioCodecCtx->coded_frame->pts,
gEncAudioCodecCtx->time_base, gEncAudioStream->time_base);

printf("MUX: (%d) Writing audio frame with PTS: %lld.\n",
gEncAudioCodecCtx->frame_number, pkt.pts);

pkt.stream_index = gDecStreamCtxAudio->m_stream->index;

pkt.data = gEncAudioEncodedBuf;

pkt.flags |= PKT_FLAG_KEY;

// Write the encoded audio frame to the output file.

if (av_interleaved_write_frame(gEncFormatCtx, &pkt) != 0)

{

printf("MUX: ERROR - Failed to write audio frame to file.\n");

return -1;

}

}

#endif

return 0;

}





/*******************************************************************************

* Main

*

* The code is broken into two functional groups - a demultiplexer / decoder

* which reads and decodes frames from the input file and an encoder /

* multiplexer which encodes and writes frames to the output file. The result

* is a transcoding from the input format to the output format.

*

* See the comments along the way to understand how the various vagaries of

* the libraries (ie. time bases, presentation and decompression time stamps

* etc work) and for tips on how to achieve tasks such as synchronisation.

*

*******************************************************************************/

int main()

{

streamContext *streamCtx = NULL;

int frameIndex = 0;

// Tell libavcodec and libavformat to register all their codecs, parsers,

// demultiplexers and multiplexers.

av_register_all();

// Open the input file and initialise all demux and decoder structures.

// if (demuxStartSession("test.mpg") == 0)

if (demuxStartSession("test.avi") == 0)

{

#if 1 // svw test only

// Open the output file and initialise all encoder and muxer structures.

// if (muxStartSession("test-out.avi") == 0)

if (muxStartSession("video-out.avi") == 0)

{

// Read frames from the input streams.

while ((streamCtx = demuxReadNextFrame()) != NULL)

{

// Decode frames while the input packet contains them.

while (streamCtx->m_pktRemainingSiz > 0)

{

if (decodeFrame(streamCtx, 0) < 0)

break;

if (streamCtx->m_frameValid)

{

// We decoded a valid frame, pass it to the encoder to encode & output.

if (streamCtx->m_codecCtx->codec_type == CODEC_TYPE_VIDEO) {

// svw save frame as .ppm for debugging

// SaveFrame(&gDecStreamCtxVideo->m_decodedVideoFrame,
streamCtx->m_codecCtx->width, streamCtx->m_codecCtx->height, frameIndex++);

// svw end

encodeVideoFrame(&gDecStreamCtxVideo->m_decodedVideoFrame,0);

}

else if (streamCtx->m_codecCtx->codec_type == CODEC_TYPE_AUDIO)

encodeAudioFrame(gDecStreamCtxAudio->m_decodedAudioSamples,
gDecStreamCtxAudio->m_decodedAudioSamplesValidSiz);

}

}

// Release the frame.

if (streamCtx->m_pktValid)

{

av_free_packet(&streamCtx->m_pkt);

streamCtx->m_pktValid = 0;

}

}

// Flush the decoders. (for last frame)

// video

if (decodeFrame(gDecStreamCtxVideo, 1) == 0)

{

bool doFlush = 1; // svw added

encodeVideoFrame(&gDecStreamCtxVideo->m_decodedVideoFrame, doFlush);

if (gDecStreamCtxVideo->m_pktValid)

{

av_free_packet(&gDecStreamCtxVideo->m_pkt);

gDecStreamCtxVideo->m_pktValid = 0;

}

}

// audio

if ((gDecStreamCtxAudio != NULL) && (decodeFrame(gDecStreamCtxAudio, 1) ==
0))

{

encodeAudioFrame(gDecStreamCtxAudio->m_decodedAudioSamples,
gDecStreamCtxAudio->m_decodedAudioSamplesValidSiz);

if (gDecStreamCtxAudio->m_pktValid)

{

av_free_packet(&gDecStreamCtxAudio->m_pkt);

gDecStreamCtxAudio->m_pktValid = 0;

}

}

}

#endif

demuxEndSession();

muxEndSession();

}

return 0;

}
_______________________________________________
libav-user mailing list
[email protected]
https://lists.mplayerhq.hu/mailman/listinfo/libav-user

Reply via email to