iPhone相机使用AVCaptureSession和ffmpeg CMSampleBufferRef拍摄视频时,出现了h.264格式更改的问题。请指教

| 我的目标是h.264 / AAC,将mpeg2-ts从iPhone设备流式传输到服务器。 目前我的源码是FFmpeg + libx264编译成功。我知道gnu许可证。我想要演示程序。 我想知道 1.CMSampleBufferRef到AVPicture的数据是否成功?
 avpicture_fill((AVPicture*)pFrame, rawPixelBase, PIX_FMT_RGB32, width, height);
  pFrame linesize and data is not null but pst -9233123123 . outpic also .
 Because of this I have to guess \'non-strictly-monotonic PTS\' message 
2.此日志重复。
encoding frame (size= 0)
encoding frame = \"\" , \'avcodec_encode_video\' return 0 is success but always 0 . 
我不知道该怎么办...
2011-06-01 15:15:14.199 AVCam[1993:7303] pFrame = avcodec_alloc_frame(); 
2011-06-01 15:15:14.207 AVCam[1993:7303] avpicture_fill = 1228800
Video encoding
2011-0601 15:5:14.215 AVCam[1993:7303] codec = 5841844
[libx264 @ 0x1441e00] using cpu capabilities: ARMv6 NEON
[libx264 @ 0x1441e00] profile Constrained Baseline, level 2.0[libx264 @ 0x1441e00] non-strictly-monotonic PTS
encoding frame (size=    0)
encoding frame 
[libx264 @ 0x1441e00] final ratefactor: 26.74
3.我不得不猜测“非严格单调的PTS”消息是所有问题的原因。 什么是“非严格单调的PTS”。 ~~~~~~~~~~这是来源~~~~~~~~~~~~~~~~~~~~
(void)        captureOutput:(AVCaptureOutput *)captureOutput 
        didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer 
               fromConnection:(AVCaptureConnection *)connection
{

    if( !CMSampleBufferDataIsReady(sampleBuffer) )
    {
        NSLog( @\"sample buffer is not ready. Skipping sample\" );
        return;
    }


    if( [isRecordingNow isEqualToString:@\"YES\"] )
    {
        lastSampleTime = CMSampleBufferGetPresentationTimeStamp(sampleBuffer);
        if( videoWriter.status != AVAssetWriterStatusWriting  )
        {
            [videoWriter startWriting];
            [videoWriter startSessionAtSourceTime:lastSampleTime];
        }

        if( captureOutput == videooutput )
        {
            [self newVideoSample:sampleBuffer];

            CVImageBufferRef pixelBuffer = CMSampleBufferGetImageBuffer(sampleBuffer); 
            CVPixelBufferLockBaseAddress(pixelBuffer, 0); 

            // access the data 
            int width = CVPixelBufferGetWidth(pixelBuffer); 
            int height = CVPixelBufferGetHeight(pixelBuffer); 
            unsigned char *rawPixelBase = (unsigned char *)CVPixelBufferGetBaseAddress(pixelBuffer); 

            AVFrame *pFrame; 
            pFrame = avcodec_alloc_frame(); 
            pFrame->quality = 0;

            NSLog(@\"pFrame = avcodec_alloc_frame(); \");

//          int bytesPerRow = CVPixelBufferGetBytesPerRowOfPlane(pixelBuffer, 0);

//          int bytesSize = height * bytesPerRow ;  

//          unsigned char *pixel = (unsigned char*)malloc(bytesSize);

//          unsigned char *rowBase = CVPixelBufferGetBaseAddressOfPlane(pixelBuffer, 0);

//          memcpy (pixel, rowBase, bytesSize);


            int avpicture_fillNum = avpicture_fill((AVPicture*)pFrame, rawPixelBase, PIX_FMT_RGB32, width, height);//PIX_FMT_RGB32//PIX_FMT_RGB8
            //NSLog(@\"rawPixelBase = %i , rawPixelBase -s = %s\",rawPixelBase, rawPixelBase); 
            NSLog(@\"avpicture_fill = %i\",avpicture_fillNum);
            //NSLog(@\"width = %i,height = %i\",width, height);



            // Do something with the raw pixels here 

            CVPixelBufferUnlockBaseAddress(pixelBuffer, 0); 

            //avcodec_init();
            //avdevice_register_all();
            av_register_all();





            AVCodec *codec;
            AVCodecContext *c= NULL;
            int  out_size, size, outbuf_size;
            //FILE *f;
            uint8_t *outbuf;

            printf(\"Video encoding\\n\");

            /* find the mpeg video encoder */
            codec =avcodec_find_encoder(CODEC_ID_H264);//avcodec_find_encoder_by_name(\"libx264\"); //avcodec_find_encoder(CODEC_ID_H264);//CODEC_ID_H264);
            NSLog(@\"codec = %i\",codec);
            if (!codec) {
                fprintf(stderr, \"codec not found\\n\");
                exit(1);
            }

            c= avcodec_alloc_context();

            /* put sample parameters */
            c->bit_rate = 400000;
            c->bit_rate_tolerance = 10;
            c->me_method = 2;
            /* resolution must be a multiple of two */
            c->width = 352;//width;//352;
            c->height = 288;//height;//288;
            /* frames per second */
            c->time_base= (AVRational){1,25};
            c->gop_size = 10;//25; /* emit one intra frame every ten frames */
            //c->max_b_frames=1;
            c->pix_fmt = PIX_FMT_YUV420P;

            c ->me_range = 16;
            c ->max_qdiff = 4;
            c ->qmin = 10;
            c ->qmax = 51;
            c ->qcompress = 0.6f;

            /* open it */
            if (avcodec_open(c, codec) < 0) {
                fprintf(stderr, \"could not open codec\\n\");
                exit(1);
            }


            /* alloc image and output buffer */
            outbuf_size = 100000;
            outbuf = malloc(outbuf_size);
            size = c->width * c->height;

            AVFrame* outpic = avcodec_alloc_frame();
            int nbytes = avpicture_get_size(PIX_FMT_YUV420P, c->width, c->height);

            //create buffer for the output image
            uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes);

#pragma mark -  

            fflush(stdout);

<pre>//         int numBytes = avpicture_get_size(PIX_FMT_YUV420P, c->width, c->height);
//          uint8_t *buffer = (uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
//          
//          //UIImage *image = [UIImage imageNamed:[NSString stringWithFormat:@\"10%d\", i]];
//          CGImageRef newCgImage = [self imageFromSampleBuffer:sampleBuffer];//[image CGImage];
//          
//          CGDataProviderRef dataProvider = CGImageGetDataProvider(newCgImage);
//          CFDataRef bitmapData = CGDataProviderCopyData(dataProvider);
//          buffer = (uint8_t *)CFDataGetBytePtr(bitmapData);   
//          
//          avpicture_fill((AVPicture*)pFrame, buffer, PIX_FMT_RGB8, c->width, c->height);
            avpicture_fill((AVPicture*)outpic, outbuffer, PIX_FMT_YUV420P, c->width, c->height);

            struct SwsContext* fooContext = sws_getContext(c->width, c->height, 
                                                           PIX_FMT_RGB8, 
                                                           c->width, c->height, 
                                                           PIX_FMT_YUV420P, 
                                                           SWS_FAST_BILINEAR, NULL, NULL, NULL);

            //perform the conversion
            sws_scale(fooContext, pFrame->data, pFrame->linesize, 0, c->height, outpic->data, outpic->linesize);
            // Here is where I try to convert to YUV

            /* encode the image */

            out_size = avcodec_encode_video(c, outbuf, outbuf_size, outpic);
            printf(\"encoding frame (size=%5d)\\n\", out_size);
            printf(\"encoding frame %s\\n\", outbuf);


            //fwrite(outbuf, 1, out_size, f);

            //              free(buffer);
            //              buffer = NULL;      



            /* add sequence end code to have a real mpeg file */
//          outbuf[0] = 0x00;
//          outbuf[1] = 0x00;
//          outbuf[2] = 0x01;
//          outbuf[3] = 0xb7;
            //fwrite(outbuf, 1, 4, f);
            //fclose(f);
            free(outbuf);

            avcodec_close(c);
            av_free(c);
            av_free(pFrame);
            printf(\"\\n\");
    
已邀请:
        这是因为您在\“ captureOutput:\”的每个迭代中都启动了AVCodecContext。 AVCodecContext在每一帧中连续保存信息和编码状态 到达。因此,应该每个会话一次进行所有初始化,或者 高度和宽度或其他变化。这也将节省您的处理时间。的 您收到的消息是完全有效的。他们只是通知您有关编解码器的打开 以及在编解码器方面进行了哪些讨论。     

要回复问题请先登录注册