用ffmpeg制作推流工具,实现推流系统声音和桌面到rtmp服务器

灰熊问题的最优解 2016-12-12 04:43:28
加精
能正常推流,但是画面比声音快了几秒钟,不懂怎么设置音视频同步,各位大大能帮忙看一下问题吗?

视频信息:
pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!pCodec){
printf("Can not find output video encoder! (没有找到合适的编码器!)\n");
return -1;
}
m_pCodecCtx = avcodec_alloc_context3(pCodec);
m_pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
m_pCodecCtx->width = m_pIfmt_ctx->streams[m_nVideoIndex]->codec->width;
m_pCodecCtx->height = m_pIfmt_ctx->streams[m_nVideoIndex]->codec->height;
m_pCodecCtx->time_base.num = 1;
m_pCodecCtx->time_base.den = 25;
m_pCodecCtx->bit_rate = 300000;
m_pCodecCtx->gop_size = 250;
/* Some formats want stream headers to be separate. */
if (m_pOfmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
m_pCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;

m_pCodecCtx->qmin = 10;
m_pCodecCtx->qmax = 51;
//Optional Param
m_pCodecCtx->max_b_frames = 0;


音频信息:
pCodec_a = avcodec_find_encoder(AV_CODEC_ID_AAC);
if (!pCodec_a){
Write_Log(LOG_INFO, "Can not find output audio encoder! (没有找到合适的编码器!)\n");
return -1;
}
m_pCodecCtx_a = avcodec_alloc_context3(pCodec_a);
m_pCodecCtx_a->channels = 2;
m_pCodecCtx_a->channel_layout = AV_CH_LAYOUT_STEREO;//av_get_default_channel_layout(2);
m_pCodecCtx_a->sample_rate = 44100;//m_pIfmt_ctx_a->streams[m_nAudioIndex]->codec->sample_rate;
m_pCodecCtx_a->sample_fmt = pCodec_a->sample_fmts[0];
m_pCodecCtx_a->bit_rate = 64000;
m_pCodecCtx_a->time_base.num = 1;
m_pCodecCtx_a->time_base.den = m_pCodecCtx_a->sample_rate;
/** Allow the use of the experimental AAC encoder */
m_pCodecCtx_a->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
/* Some formats want stream headers to be separate. */
if (m_pOfmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
m_pCodecCtx_a->flags |= CODEC_FLAG_GLOBAL_HEADER;


视频编码部分:
int nResult = 0;
int nFramecnt = 0;
int nDecGotFrame, nEncGotFrame;
AVFrame *pframe;
AVPacket *pDecPkt;

while (true)
{
if (m_bStopThread)
return;

if (m_bPauseThread)
{
Sleep(10);
continue;
}

pDecPkt = (AVPacket *)av_malloc(sizeof(AVPacket));
av_init_packet(pDecPkt);
pDecPkt->data = NULL;
pDecPkt->size = 0;

av_free_packet(pDecPkt);

if ((nResult = av_read_frame(m_pIfmt_ctx, pDecPkt)) >= 0)
{
pframe = av_frame_alloc();
if (!pframe) {
return;
}
nResult = avcodec_decode_video2(m_pIfmt_ctx->streams[pDecPkt->stream_index]->codec, pframe,
&nDecGotFrame, pDecPkt);
if (nResult < 0) {
av_frame_free(&pframe);
Write_Log(LOG_ERROR, "VideoEncoderThread : Decoding video failed.");
return;
}
if (nDecGotFrame)
{
sws_scale(pImgConvertCtx, (const uint8_t* const*)pframe->data, pframe->linesize, 0, m_pCodecCtx->height, m_pFrameYUV->data, m_pFrameYUV->linesize);
m_pFrameYUV->width = pframe->width;
m_pFrameYUV->height = pframe->height;
m_pFrameYUV->format = AV_PIX_FMT_YUV420P;

AVPacket* pEncPkt = (AVPacket *)av_malloc(sizeof(AVPacket));
pEncPkt->data = NULL;
pEncPkt->size = 0;
av_init_packet(pEncPkt);
nResult = avcodec_encode_video2(m_pCodecCtx, pEncPkt, m_pFrameYUV, &nEncGotFrame);
if (nResult < 0)
{
Write_Log(LOG_ERROR, "VideoEncoderThread : Encoding video failed.");
av_free_packet(pDecPkt);
return;
}
av_frame_free(&pframe);
if (nEncGotFrame == 1)
{
nFramecnt++;
pEncPkt->stream_index = m_nVideoStIndex;

//Write PTS
AVRational r_framerate1 = m_pIfmt_ctx->streams[m_nVideoIndex]->r_frame_rate;//{ 50, 2 };
//Duration between 2 frames (us)
int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1)); //内部时间戳
//Parameters
pEncPkt->pts = av_rescale_q(nFramecnt*calc_duration, g_avTimeBase, m_avTimeBase);
Write_Log(LOG_INFO, "Video EncoderThread : pts %d.", pEncPkt->pts);
pEncPkt->dts = pEncPkt->pts;
pEncPkt->duration = av_rescale_q(calc_duration, g_avTimeBase, m_avTimeBase); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
pEncPkt->pos = -1;

m_nVideoNextPts = nFramecnt*calc_duration; //general timebase
m_pRtmpPush->PushPtsData(VIDEO_TYPE, m_nVideoNextPts);
//Delay
int64_t pts_time = av_rescale_q(pEncPkt->pts, m_avTimeBase, g_avTimeBase);
int64_t now_time = av_gettime() - m_nStartTime;
if ((pts_time > now_time) && ((m_nVideoNextPts + pts_time - now_time) < m_pRtmpPush->m_nAudioNextPts))
{
av_usleep(pts_time - now_time);
Write_Log(LOG_INFO, "Video EncoderThread : Delay %d.", (pts_time - now_time));
}
m_pRtmpPush->PushData(pEncPkt);
}
}
else
{
av_frame_free(&pframe);
}
av_free_packet(pDecPkt);
}
Sleep(10);
}


音频编码部分:
int nDecGotFrame_a = 0;
int nEncGotFrame_a = 0;

while (true)
{
if (m_bStopThread)
{
return;
}
if (m_bPauseThread)
{
Sleep(10);
continue;
}
const int output_frame_size = m_pCodecCtx_a->frame_size;
while (av_audio_fifo_size(m_avFifo) < output_frame_size)
{
AVFrame *pInputFrame = av_frame_alloc();
if (!pInputFrame)
{
return;
}

AVPacket input_packet;
av_init_packet(&input_packet);
input_packet.data = NULL;
input_packet.size = 0;

if ((av_read_frame(m_pIfmt_ctx_a, &input_packet)) < 0)
{
av_frame_free(&pInputFrame);
return;
}

if ((avcodec_decode_audio4(m_pIfmt_ctx_a->streams[m_nAudioIndex]->codec, pInputFrame,
&nDecGotFrame_a, &input_packet)) < 0) {
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not decode audio frame.");
av_frame_free(&pInputFrame);
return;
}
av_packet_unref(&input_packet);
if (nDecGotFrame_a)
{
if ((av_samples_alloc(m_pConvertedInputSamples, NULL,
m_pCodecCtx_a->channels,
pInputFrame->nb_samples,
m_pCodecCtx_a->sample_fmt, 0)) < 0) {
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not allocate converted input samples.");
av_freep(&(*m_pConvertedInputSamples)[0]);
free(*m_pConvertedInputSamples);
av_frame_free(&pInputFrame);
return;
}

if ((swr_convert(m_pAudioConvertCtx,
m_pConvertedInputSamples, pInputFrame->nb_samples,
(const uint8_t**)pInputFrame->extended_data, pInputFrame->nb_samples)) < 0) {
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not convert input samples.");
av_frame_free(&pInputFrame);
return;
}

if ((av_audio_fifo_realloc(m_avFifo, av_audio_fifo_size(m_avFifo) + pInputFrame->nb_samples)) < 0) {
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not reallocate m_avFifo.");
av_frame_free(&pInputFrame);
return;
}

if (av_audio_fifo_write(m_avFifo, (void **)m_pConvertedInputSamples,
pInputFrame->nb_samples) < pInputFrame->nb_samples) {
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not write data to m_avFifo.");
av_frame_free(&pInputFrame);
return;
}
}
av_frame_free(&pInputFrame);
}

if (av_audio_fifo_size(m_avFifo) >= output_frame_size)
{
AVFrame *output_frame = av_frame_alloc();
if (!output_frame)
{
av_audio_fifo_free(m_avFifo);
return;
}

const int frame_size = FFMIN(av_audio_fifo_size(m_avFifo),
m_pCodecCtx_a->frame_size);

output_frame->nb_samples = frame_size;
output_frame->channel_layout = m_pCodecCtx_a->channel_layout;
output_frame->format = m_pCodecCtx_a->sample_fmt;
output_frame->sample_rate = m_pCodecCtx_a->sample_rate;

if ((av_frame_get_buffer(output_frame, 0)) < 0) {
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not allocate output frame samples.");
av_frame_free(&output_frame);
return;
}

if (av_audio_fifo_read(m_avFifo, (void **)output_frame->data, frame_size) < frame_size)
{
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not read data from m_avFifo.");
av_frame_free(&output_frame);
return;
}

AVPacket* output_packet = (AVPacket *)av_malloc(sizeof(AVPacket));
av_init_packet(output_packet);
output_packet->data = NULL;
output_packet->size = 0;

if (output_frame) {
m_nb_samples += output_frame->nb_samples;
}

if ((avcodec_encode_audio2(m_pCodecCtx_a, output_packet,
output_frame, &nEncGotFrame_a)) < 0) {
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not encode frame\n");
av_frame_free(&output_frame);
av_packet_unref(output_packet);
return;
}

if (nEncGotFrame_a) {

output_packet->stream_index = 1;

AVRational r_framerate1 = { m_pIfmt_ctx_a->streams[m_nAudioIndex]->codec->sample_rate, 1 };
int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));//内部时间戳

output_packet->pts = av_rescale_q(m_nb_samples*calc_duration, g_avTimeBase_a, m_avTimeBase);//ms
Write_Log(LOG_INFO, "Audio EncoderThread : pts %d.", output_packet->pts);
output_packet->dts = output_packet->pts;
output_packet->duration = output_frame->nb_samples;

m_nAudioNextPts = m_nb_samples*calc_duration;
m_pRtmpPush->PushPtsData(AUDIO_TYPE, m_nAudioNextPts);

int64_t pts_time = av_rescale_q(output_packet->pts, m_avTimeBase, g_avTimeBase_a);
int64_t now_time = av_gettime() - m_nStartTime;
if ((pts_time > now_time) && ((m_nAudioNextPts + pts_time - now_time) < m_pRtmpPush->m_nVideoNextPts))
{
av_usleep(pts_time - now_time);
Write_Log(LOG_INFO, "Audio EncoderThread : Delay %d.", (pts_time - now_time));
}
m_pRtmpPush->PushData(output_packet);
}

av_frame_free(&output_frame);
}
Sleep(10);
}
...全文
10770 15 打赏 收藏 转发到动态 举报
写回复
用AI写文章
15 条回复
切换为时间正序
请发表友善的回复…
发表回复
Airborne_76 2018-04-10
  • 打赏
  • 举报
回复
能给份完整代码学校下嘛,网上这方面资料很少 qweasdz76@live.com
赵4老师 2017-05-16
  • 打赏
  • 举报
回复
搜“RTP”
qq_22646649 2017-05-15
  • 打赏
  • 举报
回复
版主能给份代码学习下吗?谢谢!1952590621@qq.com
洋洋六号 2017-03-09
  • 打赏
  • 举报
回复
推荐看一下OBS源码,那个已经实现,你可以参考一下。
hi5681148 2017-02-24
  • 打赏
  • 举报
回复
版主能给份代码学习下吗?谢谢!313342885@qq.com
GaryCV 2016-12-28
  • 打赏
  • 举报
回复
厉害厉害,参考
快跑稻草人 2016-12-26
  • 打赏
  • 举报
回复
求一份代码,调试调试929680880@qq.com
  • 打赏
  • 举报
回复
引用 5 楼 sanshui 的回复:
简单的同步机制可以使用两个变量记录已发送的视音频帧数,例如vout,aout 视频线程处理完一帧后vout++;音频线程处理一次后aout++; 在视频线程处理开始处比较判断vout和aout的值,如果vout比aout大太多,则循环等待; 同样在音频线程处理开始处也比较判断vout和aout的值,如果aout比vout大太多也循环等待。 注意如果变量vout和aout不是类成员变量的话,需要加violate修饰
因为音视频pts计算方式不一样,有可能发三次音频才发一次视频,也有可能发三次视频才发一次音频,这个同步机制也不太适用在这里
sanshui 2016-12-13
  • 打赏
  • 举报
回复
简单的同步机制可以使用两个变量记录已发送的视音频帧数,例如vout,aout 视频线程处理完一帧后vout++;音频线程处理一次后aout++; 在视频线程处理开始处比较判断vout和aout的值,如果vout比aout大太多,则循环等待; 同样在音频线程处理开始处也比较判断vout和aout的值,如果aout比vout大太多也循环等待。 注意如果变量vout和aout不是类成员变量的话,需要加violate修饰
  • 打赏
  • 举报
回复
谢谢楼上版主大大
qq_35504202 2016-12-13
  • 打赏
  • 举报
回复
liahileleleeeee
line_us 2016-12-13
  • 打赏
  • 举报
回复
看看大牛怎么解决
shiter 2016-12-12
  • 打赏
  • 举报
回复
http://www.cnblogs.com/haibindev/archive/2011/12/29/2305712.html
shiter 2016-12-12
  • 打赏
  • 举报
回复
推荐一下,希望大牛给予关注 我之前看的是这个人的博客
  • 打赏
  • 举报
回复
有大大来给我解答解答吗?

5,530

社区成员

发帖
与我相关
我的任务
社区描述
C/C++ 模式及实现
社区管理员
  • 模式及实现社区
加入社区
  • 近7日
  • 近30日
  • 至今
社区公告
暂无公告

试试用AI创作助手写篇文章吧