Hi, I am recording video in WEBM ( VP8 ), My program screenshots 3
times per sec & adds it to video. The problem is video plays faster,
instead of playing like 1,2,3.. it jumps like 1,3,5 & shows wrong
video length ( means if I recorded it for 1min it will show 44sec )

I am giving timestamp to every bitmap i add to video but still some problem.

c# part:

public static void StartRecording()
        {
            DateTime RecordingStartTime = DateTime.Now;
                        vencoder.InitFile(filename, 1000, 3,
Screen.PrimaryScreen.Bounds.Width,
Screen.PrimaryScreen.Bounds.Height);
                        
                        while(!Environment.HasShutdownStarted & !abort)
                        {
                                Bitmap snap = takescreeshot();
                vencoder.AddBitmap(snap, DateTime.Now - RecordingStartTime);
                                Thread.sleep(300);
                        }
                        vencoder.Finish();
                }
                
C++/Cli part:

bool VideoEncoder::InitFile(String^ inputFile,int^ bitrate,int^
fps,int^ width,int ^height)
{
        bool res = false;

        // convert specified managed String to unmanaged string
        IntPtr ptr = 
System::Runtime::InteropServices::Marshal::StringToHGlobalUni(
inputFile );
    wchar_t* nativeFileNameUnicode = (wchar_t*) ptr.ToPointer( );
    int utf8StringSize = WideCharToMultiByte( CP_UTF8, 0,
nativeFileNameUnicode, -1, NULL, 0, NULL, NULL );
    char* nativeFileName = new char[utf8StringSize];
    WideCharToMultiByte( CP_UTF8, 0, nativeFileNameUnicode, -1,
nativeFileName, utf8StringSize, NULL, NULL );

        const char * filename = nativeFileName;
        outputFilename = inputFile;
        
        SYSTEM_INFO sysinfo;
        GetSystemInfo( &sysinfo );
        DWORD numCPU = sysinfo.dwNumberOfProcessors;

  // Initialize libavcodec
  av_register_all();

  // Create format
  pOutFormat = av_guess_format(NULL, filename, NULL);
  AVCodec * avCodec = avcodec_find_encoder_by_name("libvpx");
  pOutFormat->video_codec=avCodec->id;
  //init shared variable
  BITRATE=(int)bitrate;
  FPS=(int)fps;
  WIDTH=(int)width;
  HEIGHT=(int)height;
  //

  if (pOutFormat)
  {
    // allocate context
    pFormatContext = avformat_alloc_context();
        //pFormatContext->bit_rate

    if (pFormatContext)
    {
      pFormatContext->oformat = pOutFormat;
      memcpy(pFormatContext->filename, filename, min(strlen(filename),
        sizeof(pFormatContext->filename)));

      // Add video and audio stream
      pVideoStream   = AddVideoStream(pFormatContext, pOutFormat->video_codec);

          pVideoStream->codec->bit_rate=BITRATE; //2500; //5000
          pVideoStream->codec->time_base.num=1;
          pVideoStream->codec->time_base.den=FPS;
          pVideoStream->codec->thread_count=numCPU;

      // Set the output parameters (must be done even if no
      // parameters).
      av_dump_format(pFormatContext, 0, filename, 1);

        // Open Video and Audio stream
        res = false;
        if (pVideoStream)
        {
          res = OpenVideo(pFormatContext, pVideoStream);
        }

        if (res && !(pOutFormat->flags & AVFMT_NOFILE))
        {
          if (avio_open(&pFormatContext->pb, filename, AVIO_FLAG_WRITE) < 0)
          {
            res = false;
            printf("Cannot open file\n");
          }
        }

        if (res)
        {
          avformat_write_header(pFormatContext,NULL);
                  pImgConvertCtx =
sws_getContext(pVideoStream->codec->width,pVideoStream->codec->height,PIX_FMT_GRAY8,pVideoStream->codec->width,pVideoStream->codec->height,PIX_FMT_YUV420P,SWS_FAST_BILINEAR,NULL,NULL,NULL);
//SWS_POINT //SWS_BICUBIC //SWS_FAST_BILINEAR
          res = true;
        }
      //}
    }
  }

  if (!res)
  {
    Free();
    printf("Cannot init file\n");
  }

  inputFile=nullptr;

  return res;
}

AVStream *VideoEncoder::AddVideoStream(AVFormatContext *pContext,
CodecID codec_id)
{
  AVCodecContext *pCodecCxt = NULL;
  AVStream *st    = NULL;

  st = av_new_stream(pContext, 0);
  if (!st)
  {
    printf("Cannot add new vidoe stream\n");
    return NULL;
  }

  pCodecCxt = st->codec;
  pCodecCxt->codec_id = (CodecID)codec_id;
  pCodecCxt->codec_type = AVMEDIA_TYPE_VIDEO;
  pCodecCxt->frame_number = 0;
  pCodecCxt->bit_rate = BITRATE;
  pCodecCxt->width  = WIDTH;
  pCodecCxt->height = HEIGHT;
  pCodecCxt->time_base.num = 1;
  pCodecCxt->time_base.den = FPS;
  pCodecCxt->pix_fmt = PIX_FMT_YUV420P;

  // Some formats want stream headers to be separate.
  if(pContext->oformat->flags & AVFMT_GLOBALHEADER)
  {
      pCodecCxt->flags |= CODEC_FLAG_GLOBAL_HEADER;
  }

  return st;
}

bool VideoEncoder::OpenVideo(AVFormatContext *oc, AVStream *pStream)
{
  AVCodec *pCodec;
  AVCodecContext *pContext;

  pContext = pStream->codec;

  // Find the video encoder.
  pCodec = avcodec_find_encoder(pContext->codec_id);
  if (!pCodec)
  {
    throw gcnew Exception( "Cannot find video codec." );
    return false;
  }

  // Open the codec.
  if (avcodec_open2(pContext, pCodec,NULL) < 0)
  {
          throw gcnew Exception( "Cannot open video codec." );
      return false;
  }

  pVideoEncodeBuffer = NULL;
  if (!(pFormatContext->oformat->flags & AVFMT_RAWPICTURE))
  {
        nSizeVideoEncodeBuffer = WIDTH*HEIGHT*16;
    pVideoEncodeBuffer = (uint8_t *)av_malloc(nSizeVideoEncodeBuffer);
  }

  return true;
}

bool VideoEncoder::AddBitmap(Bitmap^ bitmap,TimeSpan timestamp)
{
        System::Drawing::Imaging::BitmapData^ bitmapData = bitmap->LockBits(
System::Drawing::Rectangle( 0, 0,bitmap->Width, bitmap->Height
),System::Drawing::Imaging::ImageLockMode::ReadOnly,System::Drawing::Imaging::PixelFormat::Format8bppIndexed);
        uint8_t* ptr = reinterpret_cast<uint8_t*>( static_cast<void*>(
bitmapData->Scan0 ) );
        uint8_t* srcData[4] = { ptr, NULL, NULL, NULL };
        int srcLinesize[4] = { bitmapData->Stride, 0, 0, 0 };
        
        pCurrentPicture = CreateFFmpegPicture(pVideoStream->codec->pix_fmt,
pVideoStream->codec->width, pVideoStream->codec->height);
        sws_scale(pImgConvertCtx, srcData, srcLinesize, 0, bitmap->Height,
pCurrentPicture->data, pCurrentPicture->linesize );

        bitmap->UnlockBits( bitmapData );

        if ( timestamp.Ticks >= 0 )
        {
                const double frameNumber = timestamp.TotalSeconds * FPS;
                pCurrentPicture->pts = static_cast<int64_t>( frameNumber );
        }

        write_video_frame();

        bitmapData=nullptr;
        ptr=NULL;

        return true;
}

AVFrame * VideoEncoder::CreateFFmpegPicture(int pix_fmt, int nWidth,
int nHeight)
{

  AVFrame *picture     = NULL;
  uint8_t *picture_buf = NULL;
  int size;

  picture = avcodec_alloc_frame();
  if ( !picture)
  {
    printf("Cannot create frame\n");
    return NULL;
  }

  size = avpicture_get_size((PixelFormat)pix_fmt, nWidth, nHeight);

  picture_buf = (uint8_t *) av_malloc(size);

  if (!picture_buf)
  {
    av_free(picture);
    printf("Cannot allocate buffer\n");
    return NULL;
  }

  avpicture_fill((AVPicture *)picture,
picture_buf,(PixelFormat)pix_fmt, nWidth, nHeight);

  return picture;
}

void VideoEncoder::write_video_frame() //set pcurrentpicture to frame to encode
{
        AVCodecContext* codecContext=NULL;

        codecContext = pVideoStream->codec;

        int out_size, ret = 0;

        if ( pFormatContext->oformat->flags & AVFMT_RAWPICTURE )
        {
                Console::WriteLine( "raw picture must be written" );
        }
        else
        {
                // encode the image
                out_size = avcodec_encode_video( codecContext,
pVideoEncodeBuffer,nSizeVideoEncodeBuffer, pCurrentPicture );

                // if zero size, it means the image was buffered
                if ( out_size > 0 )
                {
                        AVPacket packet;
                        av_init_packet( &packet );

                        //if commented process explorer is visible now else ...
                        if ( codecContext->coded_frame->pts != AV_NOPTS_VALUE )
                        {
                                packet.pts = av_rescale_q( 
codecContext->coded_frame->pts,
codecContext->time_base, pVideoStream->time_base );
                        }

                        if ( codecContext->coded_frame->pkt_dts != 
AV_NOPTS_VALUE )
                        {
                                packet.dts = av_rescale_q( 
codecContext->coded_frame->.pkt_dts,
codecContext->time_base, pVideoStream->time_base );
                        }

                        if ( codecContext->coded_frame->key_frame )
                        {
                                packet.flags |= AV_PKT_FLAG_KEY;
                        }

                        packet.stream_index = pVideoStream->index;
                        packet.data = pVideoEncodeBuffer;
                        packet.size = out_size;

                        // write the compressed frame to the media file
                        ret = av_interleaved_write_frame( pFormatContext, 
&packet );

                        av_free_packet(&packet);
                        av_free(pCurrentPicture->data[0]);
                        av_free(pCurrentPicture);
                        pCurrentPicture=NULL;
                }
                else
                {
                        // image was buffered
                }
        }

        if ( ret != 0 )
        {
                throw gcnew Exception( "Error while writing video frame." );
        }
}

void VideoEncoder::CloseVideo(AVFormatContext *pContext, AVStream *pStream)
{
  avcodec_close(pStream->codec);
  if (pCurrentPicture)
  {
    if (pCurrentPicture->data)
    {
                av_free(pCurrentPicture->data[0]);
                pCurrentPicture->data[0] = NULL;
        }
    av_free(pCurrentPicture);
    pCurrentPicture = NULL;
  }

  if (pVideoEncodeBuffer)
  {
    av_free(pVideoEncodeBuffer);
    pVideoEncodeBuffer = NULL;
  }
  nSizeVideoEncodeBuffer = 0;
}

// Flushes delayed frames to disk
void VideoEncoder::Flush( )
{
        // This function goes by the data->VideoOutputBuffer extracting
        // and saving to disk one frame at time, using mostly the same
        // code which can be found on write_video_frame.

                int out_size, ret = 0;

                AVCodecContext* codecContext = pVideoStream->codec;
                
                while ( 1 ) // while there are still delayed frames
                {
                        AVPacket packet;
                    av_init_packet(&packet);

                        // attempt to extract a single delayed frame from the 
buffer
                        out_size = avcodec_encode_video(codecContext, 
pVideoEncodeBuffer ,
nSizeVideoEncodeBuffer , NULL);

                        if (out_size <= 0)
                                break; // there are no more frames to be written

                        // TODO: consider refactoring with write_video_frame?
                        if ( codecContext->coded_frame->pts != AV_NOPTS_VALUE )
                        {
                                packet.pts = av_rescale_q( 
codecContext->coded_frame->pts,
codecContext->time_base, pVideoStream->time_base );
                        }

                        if ( codecContext->coded_frame->key_frame )
                        {
                                packet.flags |= AV_PKT_FLAG_KEY;
                        }

                        packet.stream_index = pVideoStream->index;
                        packet.data = pVideoEncodeBuffer;
                        packet.size = out_size;

                        // write the compressed frame to the media file
                        ret = av_interleaved_write_frame( pFormatContext, 
&packet );

                        if ( ret != 0 )
                        {
                                throw gcnew Exception( "Error while writing 
video frame." );
                        }
                }

                avcodec_flush_buffers(pVideoStream->codec);
}


bool VideoEncoder::Finish()
{
  bool res = true;

  Flush();

  if (pFormatContext)
  {
    av_write_trailer(pFormatContext);
    Free();
  }

  return res;
}

void VideoEncoder::Free()
{
  bool res = true;

  if (pFormatContext)
  {
    // close video stream
    if (pVideoStream)
    {
      CloseVideo(pFormatContext, pVideoStream);
    }

    // Free the streams.
    for(size_t i = 0; i < pFormatContext->nb_streams; i++)
    {
      av_freep(&pFormatContext->streams[i]->codec);
      av_freep(&pFormatContext->streams[i]);
    }

    if (!(pFormatContext->flags & AVFMT_NOFILE) && pFormatContext->pb)
    {
      avio_close(pFormatContext->pb);
    }

    // Free the stream.
    av_free(pFormatContext);
    pFormatContext = NULL;
        av_free(pImgConvertCtx);
  }
}

Tried too much times to correct this but not working. Guys please
suggest some fix

Another Que:
I am using NAudio, how can i add wave samples to vorbis audio to this?

I am developing free desktop recording program ( to generate Video Tutorials ).
_______________________________________________
Libav-user mailing list
Libav-user@ffmpeg.org
http://ffmpeg.org/mailman/listinfo/libav-user

Reply via email to