当前位置: 首页 > news >正文

从头用脚分析FFmpeg源码 - av_read_frame

av_read_frame作用

/*** Return the next frame of a stream.* This function returns what is stored in the file, and does not validate* that what is there are valid frames for the decoder. It will split what is* stored in the file into frames and return one for each call. It will not* omit invalid data between valid frames so as to give the decoder the maximum* information possible for decoding.** On success, the returned packet is reference-counted (pkt->buf is set) and* valid indefinitely. The packet must be freed with av_packet_unref() when* it is no longer needed. For video, the packet contains exactly one frame.* For audio, it contains an integer number of frames if each frame has* a known fixed size (e.g. PCM or ADPCM data). If the audio frames have* a variable size (e.g. MPEG audio), then it contains one frame.** pkt->pts, pkt->dts and pkt->duration are always set to correct* values in AVStream.time_base units (and guessed if the format cannot* provide them). pkt->pts can be AV_NOPTS_VALUE if the video format* has B-frames, so it is better to rely on pkt->dts if you do not* decompress the payload.** @return 0 if OK, < 0 on error or end of file. On error, pkt will be blank*         (as if it came from av_packet_alloc()).** @note pkt will be initialized, so it may be uninitialized, but it must not*       contain data that needs to be freed.*/
int av_read_frame(AVFormatContext *s, AVPacket *pkt);

上面说的很复杂,简单的来讲,就是从AVFormatContext读取一个AVPacket。

再看看雷神的补充:

通过av_read_packet(),读取一个包,需要说明的是此函数必须是包含整数帧的,不存在半帧的情况,以ts流为例,是读取一个完整的PES包(一个完整pes包包含若干视频或音频es包),读取完毕后,通过av_parser_parse2()分析出视频一帧(或音频若干帧),返回,下次进入循环的时候,如果上次的数据没有完全取完,则st = s->cur_st;不会是NULL,即再此进入av_parser_parse2()流程,而不是下面的av_read_packet()流程,这样就保证了,如果读取一次包含了N帧视频数据(以视频为例),则调用av_read_frame(**)N次都不会去读数据,而是返回第一次读取的数据,直到全部解析完毕。

av_read_frame源码

读取一个AVPacket,如果buffer里面存在,就从buffer中读,没有就调用read_frame_internal函数,从流中读。

int av_read_frame(AVFormatContext *s, AVPacket *pkt)
{// AVFMT_FLAG_GENPTS是指在解封装阶段就要对packet设置pts。FFmpeg默认是不会设置的,// FFplay在使用的时候,可以通过genpts参数设置这个flag。一般的容器都会在解封装阶段自动设置// pts,这个标识其实是针对在容器中没有时间戳概念的结构。(比如h264裸流?)const int genpts = s->flags & AVFMT_FLAG_GENPTS;int eof = 0;int ret;AVStream *st;// 一般来说,都会走这里,因为没设置AVFMT_FLAG_GENPTS标志if (!genpts) {// 有buffer读buffer,没buffer就重新读。// avformat_find_stream_info中读取的AVPacket就会存放在buffer中。// read_frame_internal 才是真正从流中读取AVPacket的方法。ret = s->internal->packet_buffer? avpriv_packet_list_get(&s->internal->packet_buffer,&s->internal->packet_buffer_end, pkt): read_frame_internal(s, pkt);if (ret < 0)return ret;goto return_packet;}// 这里重新设置ptsfor (;;) {PacketList *pktl = s->internal->packet_buffer;if (pktl) {AVPacket *next_pkt = &pktl->pkt;// dts不能为null,pts需要从dts中推算出来if (next_pkt->dts != AV_NOPTS_VALUE) {int wrap_bits = s->streams[next_pkt->stream_index]->pts_wrap_bits;// last dts seen for this stream. if any of packets following// current one had no dts, we will set this to AV_NOPTS_VALUE.int64_t last_dts = next_pkt->dts;av_assert2(wrap_bits <= 64);while (pktl && next_pkt->pts == AV_NOPTS_VALUE) {// 根据连续两个AVPacket之间的dts,设置ptsif (pktl->pkt.stream_index == next_pkt->stream_index &&av_compare_mod(next_pkt->dts, pktl->pkt.dts, 2ULL << (wrap_bits - 1)) < 0) {if (av_compare_mod(pktl->pkt.pts, pktl->pkt.dts, 2ULL << (wrap_bits - 1))) {// not B-framenext_pkt->pts = pktl->pkt.dts;}if (last_dts != AV_NOPTS_VALUE) {// Once last dts was set to AV_NOPTS_VALUE, we don't change it.last_dts = pktl->pkt.dts;}}pktl = pktl->next;}// 尾部加上durationif (eof && next_pkt->pts == AV_NOPTS_VALUE && last_dts != AV_NOPTS_VALUE) {// Fixing the last reference frame had none pts issue (For MXF etc).// We only do this when// 1. eof.// 2. we are not able to resolve a pts value for current packet.// 3. the packets for this stream at the end of the files had valid dts.next_pkt->pts = last_dts + next_pkt->duration;}pktl = s->internal->packet_buffer;}/* read packet from packet buffer, if there is data */st = s->streams[next_pkt->stream_index];// pts设置成功,并非是AV_NOPTS_VALUE,这个时候就应用这个AVPacketif (!(next_pkt->pts == AV_NOPTS_VALUE && st->discard < AVDISCARD_ALL &&next_pkt->dts != AV_NOPTS_VALUE && !eof)) {ret = avpriv_packet_list_get(&s->internal->packet_buffer,&s->internal->packet_buffer_end, pkt);goto return_packet;}}// buffer中没有,就继续读ret = read_frame_internal(s, pkt);if (ret < 0) {if (pktl && ret != AVERROR(EAGAIN)) {eof = 1;continue;} elsereturn ret;}//放到buffer中,待循环的时候使用ret = avpriv_packet_list_put(&s->internal->packet_buffer,&s->internal->packet_buffer_end,pkt, NULL, 0);if (ret < 0) {av_packet_unref(pkt);return ret;}}return_packet:st = s->streams[pkt->stream_index];if ((s->iformat->flags & AVFMT_GENERIC_INDEX) && pkt->flags & AV_PKT_FLAG_KEY) {ff_reduce_index(s, st->index);av_add_index_entry(st, pkt->pos, pkt->dts, 0, 0, AVINDEX_KEYFRAME);}if (is_relative(pkt->dts))pkt->dts -= RELATIVE_TS_BASE;if (is_relative(pkt->pts))pkt->pts -= RELATIVE_TS_BASE;return ret;
}

read_frame_internal源码

读取一个AVPacket,读取完成之后,看是否需要parse,如果需要就parse一下,然后把parse的结果重新写入AVPacket中。

static int read_frame_internal(AVFormatContext *s, AVPacket *pkt)
{int ret, i, got_packet = 0;AVDictionary *metadata = NULL;// packet也有可能会放到s->internal->parse_queue中while (!got_packet && !s->internal->parse_queue) {AVStream *st;/* read next packet */// 读取一个packet,这是最主要的函数ret = ff_read_packet(s, pkt);if (ret < 0) {if (ret == AVERROR(EAGAIN))return ret;/* flush the parsers */// 如果需要parser,就parser,parser后,AVPacket会放入s->internal->parse_queue中for (i = 0; i < s->nb_streams; i++) {st = s->streams[i];if (st->parser && st->need_parsing)parse_packet(s, pkt, st->index, 1);}/* all remaining packets are now in parse_queue =>* really terminate parsing */break;}ret = 0;st  = s->streams[pkt->stream_index];st->event_flags |= AVSTREAM_EVENT_FLAG_NEW_PACKETS;/* update context if required *///更新stream中的AVCodecContext信息,在hls,webm等格式中可能会用到if (st->internal->need_context_update) {if (avcodec_is_open(st->internal->avctx)) {av_log(s, AV_LOG_DEBUG, "Demuxer context update while decoder is open, closing and trying to re-open\n");avcodec_close(st->internal->avctx);st->internal->info->found_decoder = 0;}/* close parser, because it depends on the codec */if (st->parser && st->internal->avctx->codec_id != st->codecpar->codec_id) {av_parser_close(st->parser);st->parser = NULL;}ret = avcodec_parameters_to_context(st->internal->avctx, st->codecpar);if (ret < 0) {av_packet_unref(pkt);return ret;}#if FF_API_LAVF_AVCTX
FF_DISABLE_DEPRECATION_WARNINGS/* update deprecated public codec context */ret = avcodec_parameters_to_context(st->codec, st->codecpar);if (ret < 0) {av_packet_unref(pkt);return ret;}
FF_ENABLE_DEPRECATION_WARNINGS
#endifst->internal->need_context_update = 0;}// 判断pts与输出if (pkt->pts != AV_NOPTS_VALUE &&pkt->dts != AV_NOPTS_VALUE &&pkt->pts < pkt->dts) {av_log(s, AV_LOG_WARNING,"Invalid timestamps stream=%d, pts=%s, dts=%s, size=%d\n",pkt->stream_index,av_ts2str(pkt->pts),av_ts2str(pkt->dts),pkt->size);}if (s->debug & FF_FDEBUG_TS)av_log(s, AV_LOG_DEBUG,"ff_read_packet stream=%d, pts=%s, dts=%s, size=%d, duration=%"PRId64", flags=%d\n",pkt->stream_index,av_ts2str(pkt->pts),av_ts2str(pkt->dts),pkt->size, pkt->duration, pkt->flags);if (st->need_parsing && !st->parser && !(s->flags & AVFMT_FLAG_NOPARSE)) {st->parser = av_parser_init(st->codecpar->codec_id);if (!st->parser) {av_log(s, AV_LOG_VERBOSE, "parser not found for codec ""%s, packets or times may be invalid.\n",avcodec_get_name(st->codecpar->codec_id));/* no parser available: just output the raw packets */st->need_parsing = AVSTREAM_PARSE_NONE;} else if (st->need_parsing == AVSTREAM_PARSE_HEADERS)st->parser->flags |= PARSER_FLAG_COMPLETE_FRAMES;else if (st->need_parsing == AVSTREAM_PARSE_FULL_ONCE)st->parser->flags |= PARSER_FLAG_ONCE;else if (st->need_parsing == AVSTREAM_PARSE_FULL_RAW)st->parser->flags |= PARSER_FLAG_USE_CODEC_TS;}// 不需要parse的情况下,packet还存放在pkt变量中,没有放在parse_queue,所以got_packet = 1if (!st->need_parsing || !st->parser) {/* no parsing needed: we just output the packet as is */compute_pkt_fields(s, st, NULL, pkt, AV_NOPTS_VALUE, AV_NOPTS_VALUE);if ((s->iformat->flags & AVFMT_GENERIC_INDEX) &&(pkt->flags & AV_PKT_FLAG_KEY) && pkt->dts != AV_NOPTS_VALUE) {ff_reduce_index(s, st->index);av_add_index_entry(st, pkt->pos, pkt->dts,0, 0, AVINDEX_KEYFRAME);}got_packet = 1;} else if (st->discard < AVDISCARD_ALL) {if ((ret = parse_packet(s, pkt, pkt->stream_index, 0)) < 0)return ret;st->codecpar->sample_rate = st->internal->avctx->sample_rate;st->codecpar->bit_rate = st->internal->avctx->bit_rate;st->codecpar->channels = st->internal->avctx->channels;st->codecpar->channel_layout = st->internal->avctx->channel_layout;st->codecpar->codec_id = st->internal->avctx->codec_id;} else {/* free packet */av_packet_unref(pkt);}if (pkt->flags & AV_PKT_FLAG_KEY)st->internal->skip_to_keyframe = 0;if (st->internal->skip_to_keyframe) {av_packet_unref(pkt);got_packet = 0;}}//未找到,然后parse_queue中又存在的话,那就说明pkt被转到parse_queue中,所以从parse_queue中获取if (!got_packet && s->internal->parse_queue)ret = avpriv_packet_list_get(&s->internal->parse_queue, &s->internal->parse_queue_end, pkt);if (ret >= 0) {AVStream *st = s->streams[pkt->stream_index];int discard_padding = 0;if (st->internal->first_discard_sample && pkt->pts != AV_NOPTS_VALUE) {int64_t pts = pkt->pts - (is_relative(pkt->pts) ? RELATIVE_TS_BASE : 0);int64_t sample = ts_to_samples(st, pts);int duration = ts_to_samples(st, pkt->duration);int64_t end_sample = sample + duration;if (duration > 0 && end_sample >= st->internal->first_discard_sample &&sample < st->internal->last_discard_sample)discard_padding = FFMIN(end_sample - st->internal->first_discard_sample, duration);}if (st->internal->start_skip_samples && (pkt->pts == 0 || pkt->pts == RELATIVE_TS_BASE))st->internal->skip_samples = st->internal->start_skip_samples;if (st->internal->skip_samples || discard_padding) {uint8_t *p = av_packet_new_side_data(pkt, AV_PKT_DATA_SKIP_SAMPLES, 10);if (p) {AV_WL32(p, st->internal->skip_samples);AV_WL32(p + 4, discard_padding);av_log(s, AV_LOG_DEBUG, "demuxer injecting skip %d / discard %d\n", st->internal->skip_samples, discard_padding);}st->internal->skip_samples = 0;}if (st->internal->inject_global_side_data) {for (i = 0; i < st->nb_side_data; i++) {AVPacketSideData *src_sd = &st->side_data[i];uint8_t *dst_data;if (av_packet_get_side_data(pkt, src_sd->type, NULL))continue;dst_data = av_packet_new_side_data(pkt, src_sd->type, src_sd->size);if (!dst_data) {av_log(s, AV_LOG_WARNING, "Could not inject global side data\n");continue;}memcpy(dst_data, src_sd->data, src_sd->size);}st->internal->inject_global_side_data = 0;}}av_opt_get_dict_val(s, "metadata", AV_OPT_SEARCH_CHILDREN, &metadata);if (metadata) {s->event_flags |= AVFMT_EVENT_FLAG_METADATA_UPDATED;av_dict_copy(&s->metadata, metadata, 0);av_dict_free(&metadata);av_opt_set_dict_val(s, "metadata", NULL, AV_OPT_SEARCH_CHILDREN);}#if FF_API_LAVF_AVCTXupdate_stream_avctx(s);
#endifif (s->debug & FF_FDEBUG_TS)av_log(s, AV_LOG_DEBUG,"read_frame_internal stream=%d, pts=%s, dts=%s, ""size=%d, duration=%"PRId64", flags=%d\n",pkt->stream_index,av_ts2str(pkt->pts),av_ts2str(pkt->dts),pkt->size, pkt->duration, pkt->flags);/* A demuxer might have returned EOF because of an IO error, let's* propagate this back to the user. */if (ret == AVERROR_EOF && s->pb && s->pb->error < 0 && s->pb->error != AVERROR(EAGAIN))ret = s->pb->error;return ret;
}

ff_read_packet源码

从AVInputFormat或者raw_packet_buffer中读取一个AVPacket,如果raw_packet_buffer中有,就从raw_packet_buffer中读取,没有的话,只能从对应的AVInputFormat中读取。

int ff_read_packet(AVFormatContext *s, AVPacket *pkt)
{int ret, i, err;AVStream *st;#if FF_API_INIT_PACKET
FF_DISABLE_DEPRECATION_WARNINGSpkt->data = NULL;pkt->size = 0;av_init_packet(pkt);
FF_ENABLE_DEPRECATION_WARNINGS
#elseav_packet_unref(pkt);
#endiffor (;;) {PacketList *pktl = s->internal->raw_packet_buffer;const AVPacket *pkt1;//  probe的时候是不是有缓存在内存中的packetif (pktl) {st = s->streams[pktl->pkt.stream_index];if (s->internal->raw_packet_buffer_remaining_size <= 0)if ((err = probe_codec(s, st, NULL)) < 0)return err;if (st->internal->request_probe <= 0) {avpriv_packet_list_get(&s->internal->raw_packet_buffer,&s->internal->raw_packet_buffer_end, pkt);s->internal->raw_packet_buffer_remaining_size += pkt->size;return 0;}}// 从对应的AVInputFormat格式中读取AVPacket, 在Mp4格式中就是读取Sample。ret = s->iformat->read_packet(s, pkt);if (ret < 0) {av_packet_unref(pkt);/* Some demuxers return FFERROR_REDO when they consumedata and discard it (ignored streams, junk, extradata).We must re-call the demuxer to get the real packet. */if (ret == FFERROR_REDO)continue;if (!pktl || ret == AVERROR(EAGAIN))return ret;for (i = 0; i < s->nb_streams; i++) {st = s->streams[i];if (st->probe_packets || st->internal->request_probe > 0)if ((err = probe_codec(s, st, NULL)) < 0)return err;av_assert0(st->internal->request_probe <= 0);}continue;}// 把数据从 pkt->buffer->data 转移到 pkt->dataerr = av_packet_make_refcounted(pkt);if (err < 0) {av_packet_unref(pkt);return err;}if (pkt->flags & AV_PKT_FLAG_CORRUPT) {av_log(s, AV_LOG_WARNING,"Packet corrupt (stream = %d, dts = %s)",pkt->stream_index, av_ts2str(pkt->dts));if (s->flags & AVFMT_FLAG_DISCARD_CORRUPT) {av_log(s, AV_LOG_WARNING, ", dropping it.\n");av_packet_unref(pkt);continue;}av_log(s, AV_LOG_WARNING, ".\n");}av_assert0(pkt->stream_index < (unsigned)s->nb_streams &&"Invalid stream index.\n");st = s->streams[pkt->stream_index];// 更新AVProgram中的pts_wrap_reference和pts_wrap_behavior// 不过Mp4格式并不用这个AVProgramif (update_wrap_reference(s, st, pkt->stream_index, pkt) && st->internal->pts_wrap_behavior == AV_PTS_WRAP_SUB_OFFSET) {// correct first time stamps to negative valuesif (!is_relative(st->first_dts))st->first_dts = wrap_timestamp(st, st->first_dts);if (!is_relative(st->start_time))st->start_time = wrap_timestamp(st, st->start_time);if (!is_relative(st->cur_dts))st->cur_dts = wrap_timestamp(st, st->cur_dts);}// 如果有wrap的行为,就根据pts_wrap_reference和pts_wrap_behavior来重新定义pts和dtspkt->dts = wrap_timestamp(st, pkt->dts);pkt->pts = wrap_timestamp(st, pkt->pts);force_codec_ids(s, st);/* TODO: audio: time filter; video: frame reordering (pts != dts) */if (s->use_wallclock_as_timestamps)pkt->dts = pkt->pts = av_rescale_q(av_gettime(), AV_TIME_BASE_Q, st->time_base);// 用的不是raw_packet_buffer中的AVPacket就在这里returnif (!pktl && st->internal->request_probe <= 0)return ret;err = avpriv_packet_list_put(&s->internal->raw_packet_buffer,&s->internal->raw_packet_buffer_end,pkt, NULL, 0);if (err < 0) {av_packet_unref(pkt);return err;}pkt1 = &s->internal->raw_packet_buffer_end->pkt;s->internal->raw_packet_buffer_remaining_size -= pkt1->size;if ((err = probe_codec(s, st, pkt1)) < 0)return err;}
}
http://www.lryc.cn/news/45535.html

相关文章:

  • 第17章_触发器
  • 3956. 截断数组
  • React Labs: 我们最近在做什么——2023 年 3 月
  • 文件系统设计详解
  • 好看~立马启动python实现美女通通下
  • Git 安装设置
  • Python-闭包
  • Gitlab中Pipeline语法四
  • Go语言精修(尚硅谷笔记)第五章
  • 三、MySQL 高级(DML 增删改)
  • 面向AI编程的本质是什么?
  • 深入浅出——深度学习训练中的warmup
  • 你知道如何用C语言将格式化数据和字符串相互转换吗?
  • 免费一键生成原创文章-原创文章批量生成
  • 【数据库管理】④重做日志Redo Log
  • 5-python文件操作
  • 企业级Oracle入门Linux/Unix基础①
  • NexNoSQL Client:Elasticsearch、Redis、MongoDB三合一的可视化客户端管理工具
  • 如果大学能重来,我绝对能吊打90%的大学生,早知道这方法就好了
  • FactoryBean是现在的执行时机
  • 自定义注解使用
  • 2023年全国最新高校辅导员精选真题及答案42
  • Python模拟星空
  • IDE集成开发工具-IDEA(一)之IDE的概念
  • 基于Java+Springboot+vue的汽车租赁系统设计与实现【源码(完整源码请私聊)+论文+演示视频+包运行成功】
  • 【笔记】关于Maven中<optional>标签的理解
  • JavaScript函数及面向对象
  • 【chartGPT】我们要不要搞chartGPT?
  • java——date时间类型操作
  • 应急响应——IDSIPSmsf流量后门分析