I have an array that keeps which frames are the keyframe and I want to
create a video with these keyframes but first off all I try to save
these keyframes in a directory in sdcard called "keyframes".
I use a video that I have 217 keyframe and  I think I understand using
of av_seek_frame() wrong .
Because my code always save the last frame 217 times with different
names like frame1,frame2...frame217.
Can someone help me? Thank you.

void SaveFrame(AVFrame *pFrame, int width, int height, int iFrame) {
log_message("saving frame.");

  FILE *pFile;
  char szFilename[32];
  int  y;
  // Open file
  sprintf(szFilename, "/sdcard/keyframes/frame%d.ppm", iFrame+1);
  pFile=fopen(szFilename, "wb");
  if(pFile==NULL)
    return;

  // Write header
  fprintf(pFile, "P6\n%d %d\n255\n", width, height);

  // Write pixel data
  for(y=0; y<height; y++)
    fwrite(pFrame->data[0]+y*pFrame->linesize[0], 1, width*3, pFile);

  // Close file
  fclose(pFile);
}



jint Java_com_test_Test_takePics(JNIEnv* env, jobject javaThis)  {

   const char path[256] = "/sdcard/keyframes/";
   if (mkdir (path) != 0) printf("Fail Retry");

    log_message("Fonka girdim!\n");

    AVFormatContext *pFormatCtx;
    int             i, videoStream, j;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame         *pFrame;
    AVFrame         *pFrameRGB;
    AVPacket        packet;
    int             frameFinished;
    int             numBytes;
    uint8_t         *buffer;
    unsigned char r, g, b;
    int framecount;

    char* filename = "/sdcard/do-beer-not-drugs.3gp";
    // Register all formats and codecs
    av_register_all();

    // Open video file
    if(av_open_input_file(&pFormatCtx, filename, NULL, 0, NULL)!=0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if(av_find_stream_info(pFormatCtx)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    dump_format(pFormatCtx, 0, filename, 0);

        framecount = pFormatCtx->streams[0]->nb_frames;
        hist = malloc(framecount*sizeof(int*));

            for (j = 0; j < framecount; ++j) {
                        hist[j] = malloc(sizeof(int)*64); // this is because we 
use 64-bin
histogram
                }
            for (i = 0; i < framecount; i++) {
                for (j = 0; j < 64; j++) {
                                hist[i][j] = 0;
                        }

            }

    // Find the first video stream
    videoStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec-
>codec_type==CODEC_TYPE_VIDEO)
        {
            videoStream=i;
            break;
        }
    if(videoStream==-1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL)
        return -1; // Codec not found

    // Open codec
    if(avcodec_open(pCodecCtx, pCodec)<0)
        return -1; // Could not open codec

    // Hack to correct wrong frame rates that seem to be generated by
some codecs
    if(pCodecCtx->time_base.num>1000 && pCodecCtx->time_base.den==1)
                pCodecCtx->time_base.den=1000;

    // Allocate video frame
    pFrame=avcodec_alloc_frame();

    // Allocate an AVFrame structure
    pFrameRGB=avcodec_alloc_frame();
    if(pFrameRGB==NULL)
        return -1;

    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
        pCodecCtx->height);

    buffer=malloc(numBytes);

    // Assign appropriate parts of buffer to image planes in pFrameRGB
    avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
        pCodecCtx->width, pCodecCtx->height);

    // Read frames and save first five frames to disk
    i=0;
    while(av_read_frame(pFormatCtx, &packet)>=0)
    {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream)
        {
            // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
                &packet);

            // Did we get a video frame?
            if(frameFinished)
            {

                static struct SwsContext *img_convert_ctx;


                                // Convert the image into YUV format that SDL 
uses
                                if(img_convert_ctx == NULL) {
                                        int w = pCodecCtx->width;
                                        int h = pCodecCtx->height;

                                        img_convert_ctx = sws_getContext(w, h,
                                                                        
pCodecCtx->pix_fmt,
                                                                        w, h, 
PIX_FMT_RGB24, SWS_BICUBIC,
                                                                        NULL, 
NULL, NULL);
                                        if(img_convert_ctx == NULL) {
                                                fprintf(stderr, "Cannot 
initialize the conversion context!\n");
                                                exit(1);
                                        }
                                }
                                int ret = sws_scale(img_convert_ctx, 
pFrame->data, pFrame-
>linesize, 0,
                                                  pCodecCtx->height, 
pFrameRGB->data, pFrameRGB->linesize);

                                        for (j = 0; j < 
3*pCodecCtx->height*pCodecCtx->width -3; j++) {
                                                                                
                                r = (unsigned char) pFrameRGB->data[0][j];
                                                                                
                                g = (unsigned char) pFrameRGB->data[0][j
+1];
                                                                                
                                b = (unsigned char) pFrameRGB->data[0][j
+2];


                                                                                
                                r = (unsigned char) ((r >> 2) & 0x30);
                                                                                
                                g = (unsigned char) ((g >> 4) & 0x0C);
                                                                                
                                b = (unsigned char) ((b >> 6) & 0x03);

                                                                                
                                unsigned char h = (unsigned char)(r|g|b);
                                                                                
                                hist[i][h]++;
                                        }
                                i++;
            }
        }


        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
    }

        framecount = i;
        int keyframelength=select_keyFrames(framecount);

for(i=1;i<keyframelength+1;i++){
        int64_t timeStamp = (int64_t)((pFormatCtx->duration) /
framecount)*keyFrames[i]/100;
        av_seek_frame(pFormatCtx,0,timeStamp,AVSEEK_FLAG_ANY);
        SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
}


    free(keyFrames);
    // Free the RGB image
    free(buffer);
    av_free(pFrameRGB);

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    av_close_input_file(pFormatCtx);


    return 0;
}

-- 
You received this message because you are subscribed to the Google
Groups "Android Developers" group.
To post to this group, send email to android-developers@googlegroups.com
To unsubscribe from this group, send email to
android-developers+unsubscr...@googlegroups.com
For more options, visit this group at
http://groups.google.com/group/android-developers?hl=en

Reply via email to