/*
 * Fullpath.cpp
 *
 *  Created on: May 26, 2016
 *      Author: forman
 */

extern "C"
{
#include "libavformat/avformat.h"
#include "x264.h"
#include <libswscale/swscale.h>
#include <libavutil/opt.h>
#include <libavcodec/avcodec.h>
#include <libavutil/channel_layout.h>
#include <libavutil/common.h>
#include <libavutil/imgutils.h>
#include <libavutil/mathematics.h>
#include <libavutil/samplefmt.h>
}

#include <math.h>
#include </usr/include/SDL/SDL.h>
#include </usr/include/SDL/SDL_thread.h>

#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <unistd.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <assert.h>
#include <opencv/highgui.h>
#include <opencv/cv.h>
#include <getopt.h>             /* getopt_long() */
#include <fcntl.h>              /* low-level i/o */
#include <errno.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/time.h>
#include <sys/mman.h>
#include <sys/ioctl.h>

#include <linux/videodev2.h>
#include "Channel.h"

typedef struct eframe
{
        int nal_type;
        int data_size;
        uint8_t *data;

} eframe;

void outputNals( int nalCount, x264_nal_t *nals );
static SDL_Overlay* create_display( AVCodecContext *c );
void encoder_init();
void encode_img( unsigned char* picBuf );
void decoder_init();
void decode_img( eframe* img );

#define FRAME_SIZE (800*1280*3)
#define BUFF_SIZE ((FRAME_SIZE/8)+1)
static long int buf[BUFF_SIZE];

static x264_t *encoder;
static x264_picture_t pic_in, pic_out;
static x264_nal_t *nals;
static int i_nals, out_width = 640, out_height = 480;
static struct SwsContext* sws;
AVCodecContext *pCodecCtx;
AVCodec *pCodec;
SDL_Event event;

void startFullPath( unsigned char *buf )
{

    encode_img( buf );
}

void outputNals( int nalCount, x264_nal_t *nals )
{
    eframe *frame = ( eframe * ) malloc( sizeof(eframe) );
    ;

    for ( int i = 0; i < nalCount; i++ )
    {
        frame->data = ( uint8_t* ) malloc(
                sizeof(uint8_t) * nals[i].i_payload );
        memcpy( frame->data, nals->p_payload, nals[i].i_payload );
        frame->data_size = nals[i].i_payload;
        frame->nal_type = nals->i_type;

        printf( "Encoder: NAL sent (type=%02x, size=%d)\n", frame->nal_type,
                frame->data_size );
        //printf("Nal data = %c,%c\n",frame->data[0],frame->data[1]);

        decode_img( frame );

    }

}

static SDL_Overlay* create_display( AVCodecContext *c )
{
    SDL_Surface *screen;
    SDL_Overlay *bmp;

    screen = SDL_SetVideoMode( 1280, 800, 12, 0 );
    if ( !screen )
    {
        fprintf( stderr, "SDL: could not set video mode - exiting\n" );
        exit( 1 );
    }

    bmp = SDL_CreateYUVOverlay( 1280, 800,
    SDL_YV12_OVERLAY, screen );

    return bmp;
}

void encoder_init()
{
    x264_param_t x264Param;
    int fps = 30, width = 1280, height = 800, r = 0, h = 0, pts = 0;
    int frame_size = 0, header_size = 0, nheader = 0;
    eframe* header;
    i_nals = 0;

    header = ( eframe * ) malloc( sizeof(eframe) );
    /*set up params*/
    sws = sws_getContext( width, height, AV_PIX_FMT_YUYV422, out_width,
            out_height, AV_PIX_FMT_YUV420P,
            SWS_FAST_BILINEAR, NULL, NULL, NULL );
    //x264_picture_alloc(&pic_in, X264_CSP_I420, width, height);

    x264_param_default_preset( &x264Param, "veryfast", "zerolatency" );
    x264Param.i_csp = X264_CSP_I420;
    x264Param.i_threads = 1;
    x264Param.i_width = out_width;
    x264Param.i_height = out_height;
    x264Param.i_fps_num = fps;
    //    x264Param.b_repeat_headers = 0;

    if ( x264_picture_alloc( &pic_in, x264Param.i_csp, x264Param.i_width,
            x264Param.i_height ) < 0 )
    {
        perror( "x264_picture_alloc() failed" );
        exit( 1 );
    }

    encoder = x264_encoder_open( &x264Param );
    if ( !encoder )
    {
        perror( "Cannot open the encoder" );
        exit( 1 );
    }

    r = x264_encoder_headers( encoder, &nals, &nheader );
    if ( r < 0 )
    {
        perror( "x264_encoder_headers() failed" );
        exit( 1 );
    }

    header_size = nals[0].i_payload + nals[1].i_payload + nals[2].i_payload;
    //nals[0].p_payload, header_size, 1, ofs))
    //send header to decoder
    header->data_size = header_size;
    header->data = ( uint8_t* ) malloc( sizeof(uint8_t) * header_size );
    memcpy( header->data, nals[0].p_payload, header_size );
    //  decode_img(header);
}

void encode_img( unsigned char* picBuf )
{
    int bytes_filled, h = 0;
    AVPicture pic_raw;
    int frame_size;
    static int pts = 0;
    eframe *frame;

    frame = ( eframe * ) malloc( sizeof(eframe) );
    bytes_filled = avpicture_fill( &pic_raw, picBuf, AV_PIX_FMT_YUYV422, 1280,
            800 );
    if ( !bytes_filled )
    {
        perror( "Cannot fill the raw input buffer" );
        exit( 1 );
    }
    h = sws_scale( sws, pic_raw.data, pic_raw.linesize, 0, 800,
            pic_in.img.plane, pic_in.img.i_stride );

    if ( h != out_height )
    {
        perror( "scale failed" );
        exit( 1 );
    }

    pic_in.i_pts = pts;
    frame_size = x264_encoder_encode( encoder, &nals, &i_nals, &pic_in,
            &pic_out );
    if ( frame_size )
    {
        ++pts;
        //nals->p_payload, frame_size, 1, ofs)

        frame->data = ( uint8_t* ) malloc( sizeof(uint8_t) * frame_size );
        memcpy( frame->data, nals->p_payload, frame_size );
        ;
        frame->data_size = frame_size;
        frame->nal_type = nals->i_type;

        printf( "Encoder: NAL sent (type=%02x, size=%d)\n", frame->nal_type,
                frame->data_size );

        //outputNals(i_nals,nals);
        decode_img( frame );
        free( frame );

    }

    while ( x264_encoder_delayed_frames( encoder ) )
    {

        frame_size = x264_encoder_encode( encoder, &nals, &i_nals, NULL,
                &pic_out );

        if ( frame_size )
        {
            ++pts;
            frame->data = ( uint8_t* ) malloc( sizeof(uint8_t) * frame_size );
            memcpy( frame->data, nals->p_payload, frame_size );
            ;
            frame->data_size = frame_size;
            frame->nal_type = nals->i_type;

            printf( "DELAYED FRAMES--Encoder: NAL sent (type=%02x, size=%d)\n",
                    frame->nal_type, frame->data_size );

            //outputNals(i_nals,nals);
            decode_img( frame );
            free( frame );

        }

    }

}

void decoder_init()
{

    if ( SDL_Init( SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER ) )
    {
        fprintf( stderr, "Could not initialize SDL - %s\n", SDL_GetError() );
        exit( 1 );
    }

    // Register all formats and codecs
    av_register_all();

    pCodec = avcodec_find_decoder( AV_CODEC_ID_H264 );
    if ( pCodec == NULL )
    {
        fprintf( stderr, "Unsupported codec!\n" );
        exit( 1 ); // Codec not found
    }

    pCodecCtx = avcodec_alloc_context3( pCodec );

    /// Open codec
    if ( avcodec_open2( pCodecCtx, pCodec, NULL ) < 0 )
        exit( 1 ); // Could not open codec

    pCodecCtx->flags2 = CODEC_FLAG2_CHUNKS; //allows slicing
    pCodecCtx->strict_std_compliance = FF_COMPLIANCE_NORMAL;
    pCodecCtx->has_b_frames = 1;
    /* pCodecCtx->height = 640;
     pCodecCtx->width = 480;
     pCodecCtx->coded_width = 480;
     pCodecCtx->coded_height = 640;
     pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
     pCodecCtx->bit_rate = 0;

     pCodecCtx->extradata_size = 35;
     //    pCodecCtx->active_thread_type = 1;
     pCodecCtx->thread_count = 16;
     pCodecCtx->thread_type = 3;
     pCodecCtx->profile = 100;
     pCodecCtx->level = 30;*/
    pCodecCtx->debug = 1;

}

void decode_img( eframe* img )
{
    AVFrame *pFrame;
    SDL_Rect rect;
    int frameFinished, size = 0, test_width = 1280, test_height = 800;
    static int i = 0, totalsize = 0;
    struct SwsContext * img_convert_ctx;
    AVPacket avpkt;
    int swsheight;

    /// Allocate video frame
    pFrame = avcodec_alloc_frame();

    SDL_Overlay *bmp = create_display( pCodecCtx );

    av_init_packet( &avpkt );
    avpkt.data = ( uint8_t * ) img->data;
    avpkt.size = img->data_size;

    // avcodec_get_frame_defaults(pFrame);
    size = avcodec_decode_video2( pCodecCtx, pFrame, &frameFinished, &avpkt );

    if ( frameFinished )
    {

        printf( "\rFrame [%d]: pts=%ld, pkt_pts=%ld, pkt_dts=%ld\n", i,
                pFrame->pts, pFrame->pkt_pts, pFrame->pkt_dts );
        SDL_LockYUVOverlay( bmp );

        AVPicture pict;
        pict.data[0] = bmp->pixels[0];
        pict.data[1] = bmp->pixels[2];
        pict.data[2] = bmp->pixels[1];

        pict.linesize[0] = bmp->pitches[0];
        pict.linesize[1] = bmp->pitches[2];
        pict.linesize[2] = bmp->pitches[1];

        // Convert the image into YUV format that SDL uses
        img_convert_ctx = sws_getCachedContext( NULL, 640, 480, AV_PIX_FMT_NV21,
                1280, 800, AV_PIX_FMT_NV21, SWS_BICUBIC, NULL, NULL, NULL );

        swsheight = sws_scale( img_convert_ctx, ( ( AVPicture* ) pFrame )->data,
                ( ( AVPicture* ) pFrame )->linesize, 0, 800, pict.data,
                pict.linesize );

        printf( "sws height = %d", swsheight );
        SDL_UnlockYUVOverlay( bmp );
        rect.x = 0;
        rect.y = 0;
        rect.w = 1280;
        rect.h = 800;
        SDL_DisplayYUVOverlay( bmp, &rect );

        // Free the packet that was allocated by av_read_frame
        sws_freeContext( img_convert_ctx );
        //   av_free_packet(&packet);

        usleep( 40000 );

    }
    // avpkt.data += size;
    // avpkt.size -=  size;

    ++i;
    printf( "\n" );

    /// Free the Y frame
    av_free( pFrame );

}

int main( int argc, char *argv[] )
{
    /*
     * 1.  encode init
     * 2.  take image out of shared memory and display
     * 3.  send image to encoder
     * 4.  send encoded image to decoder
     * 5.  decode and display
     *
     */

    //init channel
    CHANNEL_S* chan;
    ChannelMemoryPTR* ptr;
    char* framePtr;
    eframe *eframe;
    int consumerId;
    char name[LINE_SIZE];

    ptr = new ChannelMemoryPTR();
    ChannelManager* myManager = ptr->getManager();
    if ( myManager == NULL )
    {
        printf( "Reader Error ptr->getManager() failed\n" );
        return -1;
    }

    strcpy( name, "Cam_1" );
    myManager->initChannelLockingRC( name, consumerId, ptr, 1440 );
    chan = myManager->getChannel( name );

    if ( chan == NULL )
    {
        printf( "Reader Error myManager->getChannel(name) failed\n" );
        return -1;
    }

    av_log_set_level( AV_LOG_DEBUG );
    decoder_init();
    encoder_init();

    while ( 1 )
    {
        framePtr = myManager->getNewFrame( chan, consumerId );
        if ( framePtr != NULL && framePtr != ( char * ) -1 )
        {
            memcpy( buf, framePtr, chan->myFrameSize );


            encode_img( ( unsigned char* ) buf );

        }
    }
}



Sent from my iPhone

> On Jun 8, 2016, at 08:35, Luca Barbato <[email protected]> wrote:
> 
>> On 07/06/16 21:41, Kiara Forman wrote:
>> Ok the formats both happen to be AV_PIX_FMT_YUV420P.
>> 
>> with AV_PIX_FMT_YUV420P im seeing 2 left half images purple and green 
>> 
>> with AV_PIX_FMT_NV12 and AV_PIX_FMT_NV21 im seeing 2 left half images blue 
>> and green
> 
> If avplay renders the video correctly I'm afraid there is something else
> overlooked, could you please paste the code you are using?
> 
> lu
> _______________________________________________
> libav-api mailing list
> [email protected]
> https://lists.libav.org/mailman/listinfo/libav-api
_______________________________________________
libav-api mailing list
[email protected]
https://lists.libav.org/mailman/listinfo/libav-api

Reply via email to