摄像头编码h264 编码avcodec_encode_video2失败,求指点
#define WIDTH 320
#define HEIGHT 240
void CFileTransferH264Dlg::OnBnClickedButton1()
{
// TODO: 在此添加控件通知处理程序代码
//打开第一个摄像头
cap = cvCreateCameraCapture(0);
//cvNamedWindow("myWindow", CV_WINDOW_AUTOSIZE);
if (!cap)
{
fprintf(stderr, "Can not open camera1.\n");
exit(-1);
}
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_WIDTH, WIDTH);
cvSetCaptureProperty(cap, CV_CAP_PROP_FRAME_HEIGHT, HEIGHT);
// m_pFrame = cvQueryFrame(cap);
// while (true)
// {
// m_pFrame = cvQueryFrame(cap);
// //展示当前帧的图片
// cvShowImage("myWindow", m_pFrame);
// char c = cvWaitKey(33);
// if (c == 27)
// {
// break;
// }
// }
// //cvReleaseCapture(&cap);//释放视频
// cvDestroyWindow("myWindow");//销毁窗口
pFrame = cvQueryFrame(cap);
// pAvCodec = NULL;
// pAvCodecContext = NULL;
// pAvFrame = NULL;
avcodec_register_all();
pAvCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
pAvCodecContext = avcodec_alloc_context3(pAvCodec);
pAvCodecContext->bit_rate = 3000000;//400000;// put sample parameters
/* resolution must be a multiple of two */
pAvCodecContext->width = pFrame->width;
pAvCodecContext->height = pFrame->height;
/* frames per second */
pAvCodecContext->time_base.num = 1;
pAvCodecContext->time_base.den = 25;
pAvCodecContext->gop_size = 10; /* emit one intra frame every ten frames */
pAvCodecContext->max_b_frames = 1;//设置B帧最大数,
pAvCodecContext->thread_count = 1;
//pAvCodecContext->thread_type = FF_THREAD_FRAME;
pAvCodecContext->pix_fmt = AV_PIX_FMT_YUV420P;
av_opt_set(pAvCodecContext->priv_data, "preset", "slow", 0);
av_opt_set(pAvCodecContext->priv_data, "tune", "zerolatency", 0);
/* open it */
if (avcodec_open2(pAvCodecContext, pAvCodec, NULL) < 0) {
fprintf(stderr, "Could not open codec\n");
return ;
}
pAvFrame = av_frame_alloc();//分配一个AVFrame并设置默认值
if (!pAvFrame)
{
fprintf(stderr, "Could not allocate video frame\n");
exit(1);
}
pAvFrame->format = pAvCodecContext->pix_fmt;
pAvFrame->width = pAvCodecContext->width;
pAvFrame->height = pAvCodecContext->height;
int ret = av_image_alloc(pAvFrame->data, pAvFrame->linesize, pAvCodecContext->width, pAvCodecContext->height, pAvCodecContext->pix_fmt, pFrame->align);
if (ret < 0) {
fprintf(stderr, "Could not allocate raw picture buffer\n");
return ;
}
CvCapture* pCapture;
IplImage * dst = 0;
CvSize dst_cvsize; //目标图像尺寸
dst_cvsize.width = 320; //pAvCodecContext->width
dst_cvsize.height = 240;
//dst = cvCreateImage(dst_cvsize, IPL_DEPTH_8U, 3); //构造目标图象
dst = cvCreateImage(dst_cvsize, pFrame->depth, pFrame->nChannels); //构造目标图象
cvResize(pFrame, dst, CV_INTER_CUBIC);
data_y = (unsigned char *)malloc(sizeof(char) * (dst->width * dst->height));
data_u = (unsigned char *)malloc(sizeof(char) * (dst->width * dst->height / 4));
data_v = (unsigned char *)malloc(sizeof(char) * (dst->width * dst->height / 4));
//转YUV
convertBGR2YUV420(dst, data_y, data_u, data_v);
//开始编码
pAvFrame->data[0] = data_y;
pAvFrame->data[1] = data_u;
pAvFrame->data[2] = data_v;
/*pAvFrame->linesize[0] = (dst->width * dst->height) / dst->height;
pAvFrame->linesize[1] = (dst->width * dst->height / 4) / (dst->height * 2);
pAvFrame->linesize[2] = (dst->width * dst->height / 4) / (dst->height * 2);*/
AVPacket nAvPkt;
av_init_packet(&nAvPkt);
nAvPkt.data = NULL; // packet data will be allocated by the encoder
nAvPkt.size = 0;
int got_output =1;
pAvFrame-》pts++;
ret = avcodec_encode_video2(pAvCodecContext, &nAvPkt, pAvFrame, &got_output); //函数返回0代表编码成功。
if (ret < 0)
{
fprintf(stderr, "Error encoding frame\n");
return ;
}
if (got_output)
{
//nSize += nAvPkt.size;
printf(" Frame length is %d ", nAvPkt.size);
printf(" Frame data is %s ", nAvPkt.data);
}
av_free_packet(&nAvPkt);
cvReleaseImage(&dst);
}
void CFileTransferH264Dlg::convertBGR2YUV420(IplImage *in, unsigned char* out_y, unsigned char* out_u, unsigned char* out_v)
{
int idx_in = 0;
int idx_out = 0;
int idx_out_y = 0;
int idx_out_u = 0;
int idx_out_v = 0;
IplImage *tmp = NULL;
int i = 0;
int j = 0;
// first, convert the input image into YCbCr
tmp = cvCreateImage(cvSize(in->width, in->height), 8, 3);
cvCvtColor(in, tmp, CV_RGB2YCrCb);
/* * widthStep = channel number * width
* if width%4 == 0 * for example, width = 352, width%4 == 0, widthStep = 3 * 352 = 1056 */;
for (j = 0; j < in->height; j += 1)
{
idx_in = j * in->widthStep;
for (i = 0; i < in->widthStep; i += 12)
{
// We use the chroma sample here, and put it into the out buffer
// take the luminance sample
out_y[idx_out_y] = tmp->imageData[idx_in + i + 0]; // Y
idx_out_y++;
out_y[idx_out_y] = tmp->imageData[idx_in + i + 3]; // Y
idx_out_y++;
out_y[idx_out_y] = tmp->imageData[idx_in + i + 6]; // Y
idx_out_y++;
out_y[idx_out_y] = tmp->imageData[idx_in + i + 9]; // Y
idx_out_y++;
if ((j % 2) == 0)
{
// take the blue-difference and red-difference chroma components sample
out_u[idx_out_u++] = tmp->imageData[idx_in + i + 1]; // Cr U
out_u[idx_out_u++] = tmp->imageData[idx_in + i + 7]; // Cr U
out_v[idx_out_v++] = tmp->imageData[idx_in + i + 2]; // Cb V
out_v[idx_out_v++] = tmp->imageData[idx_in + i + 8]; // Cb V
}
}
}
cvReleaseImage(&tmp);
}