如何在ffmpeg中使用滤镜技术?

柳长街 2013-06-21 03:03:08
如何在ffmpeg中使用滤镜技术?

我希望在ffmpeg中使用滤镜技术,在进行h.264编码之后,在进行滤镜处理,叠加字符,如何做啊?
本人现在已经做了一部分工作,但是到最后配置滤镜的时候出错了,就是avfilter_graph_config函数不知道哪里有问题?总是返回-22,请高手指教!

本人的qq:94643913,欢迎联系本人。

以下是本人的部分代码:
//解码一个avpacket
static int get_video_frame(VideoState *is, AVFrame *frame, int64_t *pts, AVPacket *pkt)
{
int got_picture, i; int ret = 1;
//if (packet_queue_get(&is->videoq, pkt, 1) < 0)
// return -1;
if ((i = av_read_frame(is->ic, pkt)) < 0)//如果读到文件尾部就退出
return 0;

i=avcodec_decode_video2(is->video_st->codec, frame, &got_picture, pkt);

if (got_picture==0)
;//return 0;

*pts = frame->pkt_dts;
if (*pts == AV_NOPTS_VALUE)
*pts = 0;

/*if (((is->av_sync_type == AV_SYNC_AUDIO_MASTER && is->audio_st) ||
is->av_sync_type == AV_SYNC_EXTERNAL_CLOCK) &&
(framedrop>0 || (framedrop && is->audio_st)))
{
//SDL_LockMutex(is->pictq_mutex);
if (is->frame_last_pts != AV_NOPTS_VALUE && *pts)
{
double clockdiff = get_video_clock(is) - get_master_clock(is);
double dpts = av_q2d(is->video_st->time_base) * *pts;
double ptsdiff = dpts - is->frame_last_pts;

if (fabs(clockdiff) < AV_NOSYNC_THRESHOLD &&
ptsdiff > 0 && ptsdiff < AV_NOSYNC_THRESHOLD &&
clockdiff + ptsdiff - is->frame_last_filter_delay < 0)
{
is->frame_last_dropped_pos = pkt->pos;
is->frame_last_dropped_pts = dpts;
is->frame_drops_early++;
ret = 0;
}
}
}*/
if (ret)
is->frame_last_returned_time = av_gettime() / 1000000.0;

return ret;
}

static int input_get_buffer(AVCodecContext *codec, AVFrame *pic)
{
AVFilterContext *ctx =(AVFilterContext *) codec->opaque;
AVFilterBufferRef *ref;
int perms = AV_PERM_WRITE;
int i, w, h, stride[AV_NUM_DATA_POINTERS];
unsigned edge;
int pixel_size;

av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);

if (codec->codec->capabilities & CODEC_CAP_NEG_LINESIZES)
perms |= AV_PERM_NEG_LINESIZES;

if (pic->buffer_hints & FF_BUFFER_HINTS_VALID)
{
if (pic->buffer_hints & FF_BUFFER_HINTS_READABLE) perms |= AV_PERM_READ;
if (pic->buffer_hints & FF_BUFFER_HINTS_PRESERVE) perms |= AV_PERM_PRESERVE;
if (pic->buffer_hints & FF_BUFFER_HINTS_REUSABLE) perms |= AV_PERM_REUSE2;
}
if (pic->reference) perms |= AV_PERM_READ | AV_PERM_PRESERVE;

w = codec->width;
h = codec->height;

if(av_image_check_size(w, h, 0, codec) || codec->pix_fmt<0)
return -1;

avcodec_align_dimensions2(codec, &w, &h, stride);
edge = codec->flags & CODEC_FLAG_EMU_EDGE ? 0 : avcodec_get_edge_width();
w += edge << 1;
h += edge << 1;

if (codec->pix_fmt != ctx->outputs[0]->format)
{
av_log(codec, AV_LOG_ERROR, "Pixel format mismatches %d %d\n", codec->pix_fmt, ctx->outputs[0]->format);
return -1;
}
if (!(ref = avfilter_get_video_buffer(ctx->outputs[0], perms, w, h)))
return -1;

//pixel_size = av_pix_fmt_descriptors[ref->format].comp[0].step_minus1 + 1;
pixel_size = av_PixFmtDescriptors[ref->format].comp[0].step_minus1 + 1;

ref->video->w = codec->width;
ref->video->h = codec->height;

for (i = 0; i < 4; i ++)
{
//unsigned hshift =(i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_w : 0;
//unsigned vshift =(i == 1 || i == 2) ? av_pix_fmt_descriptors[ref->format].log2_chroma_h : 0;
unsigned hshift =(i == 1 || i == 2) ? av_PixFmtDescriptors[ref->format].log2_chroma_w : 0;
unsigned vshift =(i == 1 || i == 2) ? av_PixFmtDescriptors[ref->format].log2_chroma_h : 0;

pic->base[i] = ref->data[i];
if (ref->data[i])
ref->data[i] += ((edge * pixel_size) >> hshift) + ((edge * ref->linesize[i]) >> vshift);

pic->data[i] = ref->data[i];
pic->linesize[i] = ref->linesize[i];
}
pic->opaque = ref;
pic->type = FF_BUFFER_TYPE_USER;
pic->reordered_opaque = codec->reordered_opaque;
pic->width = codec->width;
pic->height = codec->height;
pic->format = codec->pix_fmt;
pic->sample_aspect_ratio = codec->sample_aspect_ratio;
if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
else pic->pkt_pts = AV_NOPTS_VALUE;
return 0;
}

static void input_release_buffer(AVCodecContext *codec, AVFrame *pic)
{
memset(pic->data, 0, sizeof(pic->data));
avfilter_unref_buffer((AVFilterBufferRef *)pic->opaque);
}

static int input_reget_buffer(AVCodecContext *codec, AVFrame *pic)
{
AVFilterBufferRef *ref =(AVFilterBufferRef *) pic->opaque;

if (pic->data[0] == NULL)
{
pic->buffer_hints |= FF_BUFFER_HINTS_READABLE;
return codec->get_buffer(codec, pic);
}

if ((codec->width != ref->video->w) || (codec->height != ref->video->h) ||
(codec->pix_fmt != ref->format))
{
av_log(codec, AV_LOG_ERROR, "Picture properties changed.\n");
return -1;
}
pic->reordered_opaque = codec->reordered_opaque;

if (codec->pkt) pic->pkt_pts = codec->pkt->pts;
else pic->pkt_pts = AV_NOPTS_VALUE;
return 0;
}

static int input_init(AVFilterContext *ctx, const char *args, void *opaque)
{
FilterPriv *priv =(FilterPriv *)ctx->priv;
AVCodecContext *codec;
if (!opaque) return -1;

priv->is =(VideoState *) opaque;
codec = priv->is->video_st->codec;
codec->opaque = ctx;

if (codec->codec->capabilities & CODEC_CAP_DR1) //解码能力
{
//av_assert0(codec->flags & CODEC_FLAG_EMU_EDGE);
priv->use_dr1 = 1;
codec->get_buffer = input_get_buffer;
codec->release_buffer = input_release_buffer;
codec->reget_buffer = input_reget_buffer;
codec->thread_safe_callbacks = 1;
}
priv->frame = avcodec_alloc_frame();
return 0;
}

static void input_uninit(AVFilterContext *ctx)
{
FilterPriv *priv =(FilterPriv *) ctx->priv;
av_free(priv->frame);
}

static int input_request_frame(AVFilterLink *link)
{
FilterPriv *priv =(FilterPriv *) link->src->priv;
AVFilterBufferRef *picref;
int64_t pts = 0;
AVPacket pkt;
int ret;

//循环,解码一个avpacket
while (!(ret = get_video_frame(priv->is, priv->frame, &pts, &pkt)))
av_free_packet(&pkt);

if (ret < 0)
return -1;

if (priv->use_dr1 && priv->frame->opaque)
{
picref = avfilter_ref_buffer((AVFilterBufferRef *)priv->frame->opaque, ~0);
}
else
{
picref = avfilter_get_video_buffer(link, AV_PERM_WRITE, priv->frame->width, priv->frame->height);

av_image_copy(picref->data, picref->linesize,
(const uint8_t **)(void **)priv->frame->data, priv->frame->linesize,
(FFmpegPixFmt::PixelFormat)picref->format, priv->frame->width, priv->frame->height);
}
av_free_packet(&pkt);

avfilter_copy_frame_props(picref, priv->frame);
picref->video->sample_aspect_ratio = av_guess_sample_aspect_ratio(priv->is->ic, priv->is->video_st, priv->frame);
picref->pts = pts;

avfilter_start_frame(link, picref);
avfilter_draw_slice(link, 0, picref->video->h, 1);
avfilter_end_frame(link);
return 0;
}

static int input_query_formats(AVFilterContext *ctx)
{
FilterPriv *priv =(FilterPriv *) ctx->priv;

enum FFmpegPixFmt::PixelFormat pix_fmts[] ={(FFmpegPixFmt::PixelFormat)priv->is->video_st->codec->pix_fmt, PIX_FMT_NONE};

avfilter_set_common_pixel_formats(ctx, avfilter_make_format_list((int *)pix_fmts));
return 0;
}

static int input_config_props(AVFilterLink *link)
{
FilterPriv *priv =(FilterPriv *)link->src->priv;
AVStream *s = priv->is->video_st;

link->w = s->codec->width;
link->h = s->codec->height;
link->sample_aspect_ratio = s->sample_aspect_ratio.num ? s->sample_aspect_ratio : s->codec->sample_aspect_ratio;
link->time_base = s->time_base;

return 0;
}

AVFilterPad inPad={NULL};

AVFilterPad outPad={"default",AVMEDIA_TYPE_VIDEO,NULL,NULL,NULL,NULL,NULL,NULL,NULL,NULL,
NULL,input_request_frame,input_config_props
};

static AVFilter input_filter=
{
"ffplay_input",sizeof(FilterPriv),
input_init,input_uninit,
input_query_formats, &inPad, &outPad,"test zjr inputFilter"
};

//配置视频帧滤镜函数
static int configure_video_filters(AVFilterGraph *graph, VideoState *is, const char *vfilters)
{
static const enum FFmpegPixFmt::PixelFormat pix_fmts[]={PIX_FMT_YUV420P, PIX_FMT_NONE };
char sws_flags_str[128];
int ret=0;
char *pPixelFmt;
AVBufferSinkParams *buffersink_params = av_buffersink_params_alloc();

//定义三个滤镜上下文对象
AVFilterContext *filt_src = NULL, *filt_out = NULL, *filt_format;;

sprintf(sws_flags_str, "flags=%d", SWS_BICUBIC);
graph->scale_sws_opts = av_strdup(sws_flags_str);

//第一个参数是生成的filter(是一个source),
//第二个参数是一个AVFilter结构的实例,此实例必须由调用者自己实现,才能将帧送到graph中.
//第三个参数是要创建的fitler的名字,第四个 参数是不知道什么用,
//第五个参数是user data(调用者的私有数据),第六个参数是graph的指针
ret = avfilter_graph_create_filter(&filt_src, &input_filter, "src", NULL, is, graph);

ret = avfilter_graph_create_filter(&filt_out, avfilter_get_by_name("buffersink"),
"out", NULL, (char*)pix_fmts, graph);//

av_freep(&buffersink_params);

ret = avfilter_graph_create_filter(&filt_format, avfilter_get_by_name("format"),
"format", "yuv420p", NULL, graph);

if (ret < 0)
return ret;

//连接source和sink
//第一个参数是接在前面的filter,第二个参数是前fitler的要连接的pad的序号,第三个参数是后面的filter,
//第四个参数是后filter的要连接的pad.
if ((ret = avfilter_link(filt_format, 0, filt_out, 0)) < 0)
return ret;

if (vfilters)
{
AVFilterInOut *outputs = avfilter_inout_alloc();
AVFilterInOut *inputs = avfilter_inout_alloc();

outputs->name = av_strdup("in");
outputs->filter_ctx = filt_src;
outputs->pad_idx = 0;
outputs->next = NULL;

inputs->name = av_strdup("out");
inputs->filter_ctx = filt_format;
inputs->pad_idx = 0;
inputs->next = NULL;

if ((ret = avfilter_graph_parse(graph, vfilters, &inputs, &outputs, NULL)) < 0)
return ret;
}
else
{
if ((ret = avfilter_link(filt_src, 0, filt_format, 0)) < 0)
return ret;
}

//对graph做最后的检查
ret = avfilter_graph_config(graph, NULL);
if (ret< 0)
;//return ret;

is->out_video_filter = filt_out;

return ret;
}
...全文
325 2 打赏 收藏 转发到动态 举报
AI 作业
写回复
用AI写文章
2 条回复
切换为时间正序
请发表友善的回复…
发表回复
huxiaomin111 2013-09-12
  • 打赏
  • 举报
回复
怎么解决的啊,我也返回-22
柳长街 2013-06-28
  • 打赏
  • 举报
回复
本人已经解决了,太不容易了! 有需要整个示例程序的,请跟我联系!!我的qq:94643913

2,553

社区成员

发帖
与我相关
我的任务
社区描述
专题开发/技术/项目 多媒体/流媒体开发
社区管理员
  • 多媒体/流媒体开发社区
加入社区
  • 近7日
  • 近30日
  • 至今
社区公告
暂无公告

试试用AI创作助手写篇文章吧