5,530
社区成员
发帖
与我相关
我的任务
分享
pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
if (!pCodec){
printf("Can not find output video encoder! (没有找到合适的编码器!)\n");
return -1;
}
m_pCodecCtx = avcodec_alloc_context3(pCodec);
m_pCodecCtx->pix_fmt = AV_PIX_FMT_YUV420P;
m_pCodecCtx->width = m_pIfmt_ctx->streams[m_nVideoIndex]->codec->width;
m_pCodecCtx->height = m_pIfmt_ctx->streams[m_nVideoIndex]->codec->height;
m_pCodecCtx->time_base.num = 1;
m_pCodecCtx->time_base.den = 25;
m_pCodecCtx->bit_rate = 300000;
m_pCodecCtx->gop_size = 250;
/* Some formats want stream headers to be separate. */
if (m_pOfmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
m_pCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;
m_pCodecCtx->qmin = 10;
m_pCodecCtx->qmax = 51;
//Optional Param
m_pCodecCtx->max_b_frames = 0;
pCodec_a = avcodec_find_encoder(AV_CODEC_ID_AAC);
if (!pCodec_a){
Write_Log(LOG_INFO, "Can not find output audio encoder! (没有找到合适的编码器!)\n");
return -1;
}
m_pCodecCtx_a = avcodec_alloc_context3(pCodec_a);
m_pCodecCtx_a->channels = 2;
m_pCodecCtx_a->channel_layout = AV_CH_LAYOUT_STEREO;//av_get_default_channel_layout(2);
m_pCodecCtx_a->sample_rate = 44100;//m_pIfmt_ctx_a->streams[m_nAudioIndex]->codec->sample_rate;
m_pCodecCtx_a->sample_fmt = pCodec_a->sample_fmts[0];
m_pCodecCtx_a->bit_rate = 64000;
m_pCodecCtx_a->time_base.num = 1;
m_pCodecCtx_a->time_base.den = m_pCodecCtx_a->sample_rate;
/** Allow the use of the experimental AAC encoder */
m_pCodecCtx_a->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
/* Some formats want stream headers to be separate. */
if (m_pOfmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
m_pCodecCtx_a->flags |= CODEC_FLAG_GLOBAL_HEADER;
int nResult = 0;
int nFramecnt = 0;
int nDecGotFrame, nEncGotFrame;
AVFrame *pframe;
AVPacket *pDecPkt;
while (true)
{
if (m_bStopThread)
return;
if (m_bPauseThread)
{
Sleep(10);
continue;
}
pDecPkt = (AVPacket *)av_malloc(sizeof(AVPacket));
av_init_packet(pDecPkt);
pDecPkt->data = NULL;
pDecPkt->size = 0;
av_free_packet(pDecPkt);
if ((nResult = av_read_frame(m_pIfmt_ctx, pDecPkt)) >= 0)
{
pframe = av_frame_alloc();
if (!pframe) {
return;
}
nResult = avcodec_decode_video2(m_pIfmt_ctx->streams[pDecPkt->stream_index]->codec, pframe,
&nDecGotFrame, pDecPkt);
if (nResult < 0) {
av_frame_free(&pframe);
Write_Log(LOG_ERROR, "VideoEncoderThread : Decoding video failed.");
return;
}
if (nDecGotFrame)
{
sws_scale(pImgConvertCtx, (const uint8_t* const*)pframe->data, pframe->linesize, 0, m_pCodecCtx->height, m_pFrameYUV->data, m_pFrameYUV->linesize);
m_pFrameYUV->width = pframe->width;
m_pFrameYUV->height = pframe->height;
m_pFrameYUV->format = AV_PIX_FMT_YUV420P;
AVPacket* pEncPkt = (AVPacket *)av_malloc(sizeof(AVPacket));
pEncPkt->data = NULL;
pEncPkt->size = 0;
av_init_packet(pEncPkt);
nResult = avcodec_encode_video2(m_pCodecCtx, pEncPkt, m_pFrameYUV, &nEncGotFrame);
if (nResult < 0)
{
Write_Log(LOG_ERROR, "VideoEncoderThread : Encoding video failed.");
av_free_packet(pDecPkt);
return;
}
av_frame_free(&pframe);
if (nEncGotFrame == 1)
{
nFramecnt++;
pEncPkt->stream_index = m_nVideoStIndex;
//Write PTS
AVRational r_framerate1 = m_pIfmt_ctx->streams[m_nVideoIndex]->r_frame_rate;//{ 50, 2 };
//Duration between 2 frames (us)
int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1)); //内部时间戳
//Parameters
pEncPkt->pts = av_rescale_q(nFramecnt*calc_duration, g_avTimeBase, m_avTimeBase);
Write_Log(LOG_INFO, "Video EncoderThread : pts %d.", pEncPkt->pts);
pEncPkt->dts = pEncPkt->pts;
pEncPkt->duration = av_rescale_q(calc_duration, g_avTimeBase, m_avTimeBase); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
pEncPkt->pos = -1;
m_nVideoNextPts = nFramecnt*calc_duration; //general timebase
m_pRtmpPush->PushPtsData(VIDEO_TYPE, m_nVideoNextPts);
//Delay
int64_t pts_time = av_rescale_q(pEncPkt->pts, m_avTimeBase, g_avTimeBase);
int64_t now_time = av_gettime() - m_nStartTime;
if ((pts_time > now_time) && ((m_nVideoNextPts + pts_time - now_time) < m_pRtmpPush->m_nAudioNextPts))
{
av_usleep(pts_time - now_time);
Write_Log(LOG_INFO, "Video EncoderThread : Delay %d.", (pts_time - now_time));
}
m_pRtmpPush->PushData(pEncPkt);
}
}
else
{
av_frame_free(&pframe);
}
av_free_packet(pDecPkt);
}
Sleep(10);
}
int nDecGotFrame_a = 0;
int nEncGotFrame_a = 0;
while (true)
{
if (m_bStopThread)
{
return;
}
if (m_bPauseThread)
{
Sleep(10);
continue;
}
const int output_frame_size = m_pCodecCtx_a->frame_size;
while (av_audio_fifo_size(m_avFifo) < output_frame_size)
{
AVFrame *pInputFrame = av_frame_alloc();
if (!pInputFrame)
{
return;
}
AVPacket input_packet;
av_init_packet(&input_packet);
input_packet.data = NULL;
input_packet.size = 0;
if ((av_read_frame(m_pIfmt_ctx_a, &input_packet)) < 0)
{
av_frame_free(&pInputFrame);
return;
}
if ((avcodec_decode_audio4(m_pIfmt_ctx_a->streams[m_nAudioIndex]->codec, pInputFrame,
&nDecGotFrame_a, &input_packet)) < 0) {
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not decode audio frame.");
av_frame_free(&pInputFrame);
return;
}
av_packet_unref(&input_packet);
if (nDecGotFrame_a)
{
if ((av_samples_alloc(m_pConvertedInputSamples, NULL,
m_pCodecCtx_a->channels,
pInputFrame->nb_samples,
m_pCodecCtx_a->sample_fmt, 0)) < 0) {
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not allocate converted input samples.");
av_freep(&(*m_pConvertedInputSamples)[0]);
free(*m_pConvertedInputSamples);
av_frame_free(&pInputFrame);
return;
}
if ((swr_convert(m_pAudioConvertCtx,
m_pConvertedInputSamples, pInputFrame->nb_samples,
(const uint8_t**)pInputFrame->extended_data, pInputFrame->nb_samples)) < 0) {
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not convert input samples.");
av_frame_free(&pInputFrame);
return;
}
if ((av_audio_fifo_realloc(m_avFifo, av_audio_fifo_size(m_avFifo) + pInputFrame->nb_samples)) < 0) {
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not reallocate m_avFifo.");
av_frame_free(&pInputFrame);
return;
}
if (av_audio_fifo_write(m_avFifo, (void **)m_pConvertedInputSamples,
pInputFrame->nb_samples) < pInputFrame->nb_samples) {
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not write data to m_avFifo.");
av_frame_free(&pInputFrame);
return;
}
}
av_frame_free(&pInputFrame);
}
if (av_audio_fifo_size(m_avFifo) >= output_frame_size)
{
AVFrame *output_frame = av_frame_alloc();
if (!output_frame)
{
av_audio_fifo_free(m_avFifo);
return;
}
const int frame_size = FFMIN(av_audio_fifo_size(m_avFifo),
m_pCodecCtx_a->frame_size);
output_frame->nb_samples = frame_size;
output_frame->channel_layout = m_pCodecCtx_a->channel_layout;
output_frame->format = m_pCodecCtx_a->sample_fmt;
output_frame->sample_rate = m_pCodecCtx_a->sample_rate;
if ((av_frame_get_buffer(output_frame, 0)) < 0) {
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not allocate output frame samples.");
av_frame_free(&output_frame);
return;
}
if (av_audio_fifo_read(m_avFifo, (void **)output_frame->data, frame_size) < frame_size)
{
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not read data from m_avFifo.");
av_frame_free(&output_frame);
return;
}
AVPacket* output_packet = (AVPacket *)av_malloc(sizeof(AVPacket));
av_init_packet(output_packet);
output_packet->data = NULL;
output_packet->size = 0;
if (output_frame) {
m_nb_samples += output_frame->nb_samples;
}
if ((avcodec_encode_audio2(m_pCodecCtx_a, output_packet,
output_frame, &nEncGotFrame_a)) < 0) {
Write_Log(LOG_ERROR, "AudioEncoderThread : Could not encode frame\n");
av_frame_free(&output_frame);
av_packet_unref(output_packet);
return;
}
if (nEncGotFrame_a) {
output_packet->stream_index = 1;
AVRational r_framerate1 = { m_pIfmt_ctx_a->streams[m_nAudioIndex]->codec->sample_rate, 1 };
int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));//内部时间戳
output_packet->pts = av_rescale_q(m_nb_samples*calc_duration, g_avTimeBase_a, m_avTimeBase);//ms
Write_Log(LOG_INFO, "Audio EncoderThread : pts %d.", output_packet->pts);
output_packet->dts = output_packet->pts;
output_packet->duration = output_frame->nb_samples;
m_nAudioNextPts = m_nb_samples*calc_duration;
m_pRtmpPush->PushPtsData(AUDIO_TYPE, m_nAudioNextPts);
int64_t pts_time = av_rescale_q(output_packet->pts, m_avTimeBase, g_avTimeBase_a);
int64_t now_time = av_gettime() - m_nStartTime;
if ((pts_time > now_time) && ((m_nAudioNextPts + pts_time - now_time) < m_pRtmpPush->m_nVideoNextPts))
{
av_usleep(pts_time - now_time);
Write_Log(LOG_INFO, "Audio EncoderThread : Delay %d.", (pts_time - now_time));
}
m_pRtmpPush->PushData(output_packet);
}
av_frame_free(&output_frame);
}
Sleep(10);
}