摘要:基本上就是對一個數據幀的描述。我理解的是一個未解碼的壓縮數據幀。
read_thread這個最關鍵的讀取線程中,逐步跟蹤,可以明確stream_component_open---> decoder_start---> video_thread--->ffplay_video_thread。這個調用過程,在解碼開始后的異步解碼線程中,調用的是ffplay_video_thread。具體可見續1。這個函數是解碼處理視頻的核心:
static int ffplay_video_thread(void *arg) { FFPlayer *ffp = arg; VideoState *is = ffp->is; AVFrame *frame = av_frame_alloc(); double pts; double duration; int ret; AVRational tb = is->video_st->time_base; AVRational frame_rate = av_guess_frame_rate(is->ic, is->video_st, NULL); #if CONFIG_AVFILTER AVFilterGraph *graph = avfilter_graph_alloc(); AVFilterContext *filt_out = NULL, *filt_in = NULL; int last_w = 0; int last_h = 0; enum AVPixelFormat last_format = -2; int last_serial = -1; int last_vfilter_idx = 0; if (!graph) { av_frame_free(&frame); return AVERROR(ENOMEM); } #else ffp_notify_msg2(ffp, FFP_MSG_VIDEO_ROTATION_CHANGED, ffp_get_video_rotate_degrees(ffp)); #endif if (!frame) { #if CONFIG_AVFILTER avfilter_graph_free(&graph); #endif return AVERROR(ENOMEM); } for (;;) { ret = get_video_frame(ffp, frame); if (ret < 0) goto the_end; if (!ret) continue; #if CONFIG_AVFILTER if ( last_w != frame->width || last_h != frame->height || last_format != frame->format || last_serial != is->viddec.pkt_serial || ffp->vf_changed || last_vfilter_idx != is->vfilter_idx) { SDL_LockMutex(ffp->vf_mutex); ffp->vf_changed = 0; av_log(NULL, AV_LOG_DEBUG, "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d ", last_w, last_h, (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial, frame->width, frame->height, (const char *)av_x_if_null(av_get_pix_fmt_name(frame->format), "none"), is->viddec.pkt_serial); avfilter_graph_free(&graph); graph = avfilter_graph_alloc(); if ((ret = configure_video_filters(ffp, graph, is, ffp->vfilters_list ? ffp->vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) { // FIXME: post error SDL_UnlockMutex(ffp->vf_mutex); goto the_end; } filt_in = is->in_video_filter; filt_out = is->out_video_filter; last_w = frame->width; last_h = frame->height; last_format = frame->format; last_serial = is->viddec.pkt_serial; last_vfilter_idx = is->vfilter_idx; frame_rate = filt_out->inputs[0]->frame_rate; SDL_UnlockMutex(ffp->vf_mutex); } ret = av_buffersrc_add_frame(filt_in, frame); if (ret < 0) goto the_end; while (ret >= 0) { is->frame_last_returned_time = av_gettime_relative() / 1000000.0; ret = av_buffersink_get_frame_flags(filt_out, frame, 0); if (ret < 0) { if (ret == AVERROR_EOF) is->viddec.finished = is->viddec.pkt_serial; ret = 0; break; } is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time; if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0) is->frame_last_filter_delay = 0; tb = filt_out->inputs[0]->time_base; #endif duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0); pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb); ret = queue_picture(ffp, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec.pkt_serial); av_frame_unref(frame); #if CONFIG_AVFILTER } #endif if (ret < 0) goto the_end; } the_end: #if CONFIG_AVFILTER avfilter_graph_free(&graph); #endif av_frame_free(&frame); return 0; }
前面的初始化過程暫不分析,直接看for(;;)開始的這個循環,1.get_video_frame讀取一幀;2.av_buffersrc_add_frame添加幀到緩沖中;3.queue_picture將幀數據通過ffmpeg解碼后轉為yup格式幀,然后調用sol進行渲染。大體是這3個步驟。
雖然前文已有介紹get_video_frame,但是太粗略了,這次仔細進去看下:
static int get_video_frame(FFPlayer *ffp, AVFrame *frame) { VideoState *is = ffp->is; int got_picture; ffp_video_statistic_l(ffp); if ((got_picture = decoder_decode_frame(ffp, &is->viddec, frame, NULL)) < 0) return -1; if (got_picture) { double dpts = NAN; if (frame->pts != AV_NOPTS_VALUE) dpts = av_q2d(is->video_st->time_base) * frame->pts; frame->sample_aspect_ratio = av_guess_sample_aspect_ratio(is->ic, is->video_st, frame); if (ffp->framedrop>0 || (ffp->framedrop && get_master_sync_type(is) != AV_SYNC_VIDEO_MASTER)) { if (frame->pts != AV_NOPTS_VALUE) { double diff = dpts - get_master_clock(is); if (!isnan(diff) && fabs(diff) < AV_NOSYNC_THRESHOLD && diff - is->frame_last_filter_delay < 0 && is->viddec.pkt_serial == is->vidclk.serial && is->videoq.nb_packets) { is->frame_drops_early++; is->continuous_frame_drops_early++; if (is->continuous_frame_drops_early > ffp->framedrop) { is->continuous_frame_drops_early = 0; } else { av_frame_unref(frame); got_picture = 0; } } } } } return got_picture; }
decoder_decode_frame毫無疑問是個關鍵,解碼frame:
static int decoder_decode_frame(FFPlayer *ffp, Decoder *d, AVFrame *frame, AVSubtitle *sub) { int got_frame = 0; do { int ret = -1; if (d->queue->abort_request) return -1; if (!d->packet_pending || d->queue->serial != d->pkt_serial) { AVPacket pkt; do { if (d->queue->nb_packets == 0) SDL_CondSignal(d->empty_queue_cond); if (packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < 0) return -1; if (pkt.data == flush_pkt.data) { avcodec_flush_buffers(d->avctx); d->finished = 0; d->next_pts = d->start_pts; d->next_pts_tb = d->start_pts_tb; } } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial); av_packet_unref(&d->pkt); d->pkt_temp = d->pkt = pkt; d->packet_pending = 1; } switch (d->avctx->codec_type) { case AVMEDIA_TYPE_VIDEO: { ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp); if (got_frame) { ffp->stat.vdps = SDL_SpeedSamplerAdd(&ffp->vdps_sampler, FFP_SHOW_VDPS_AVCODEC, "vdps[avcodec]"); if (ffp->decoder_reorder_pts == -1) { frame->pts = av_frame_get_best_effort_timestamp(frame); } else if (!ffp->decoder_reorder_pts) { frame->pts = frame->pkt_dts; } } } break; case AVMEDIA_TYPE_AUDIO: ret = avcodec_decode_audio4(d->avctx, frame, &got_frame, &d->pkt_temp); if (got_frame) { AVRational tb = (AVRational){1, frame->sample_rate}; if (frame->pts != AV_NOPTS_VALUE) frame->pts = av_rescale_q(frame->pts, av_codec_get_pkt_timebase(d->avctx), tb); else if (d->next_pts != AV_NOPTS_VALUE) frame->pts = av_rescale_q(d->next_pts, d->next_pts_tb, tb); if (frame->pts != AV_NOPTS_VALUE) { d->next_pts = frame->pts + frame->nb_samples; d->next_pts_tb = tb; } } break; case AVMEDIA_TYPE_SUBTITLE: ret = avcodec_decode_subtitle2(d->avctx, sub, &got_frame, &d->pkt_temp); break; default: break; } if (ret < 0) { d->packet_pending = 0; } else { d->pkt_temp.dts = d->pkt_temp.pts = AV_NOPTS_VALUE; if (d->pkt_temp.data) { if (d->avctx->codec_type != AVMEDIA_TYPE_AUDIO) ret = d->pkt_temp.size; d->pkt_temp.data += ret; d->pkt_temp.size -= ret; if (d->pkt_temp.size <= 0) d->packet_pending = 0; } else { if (!got_frame) { d->packet_pending = 0; d->finished = d->pkt_serial; } } } } while (!got_frame && !d->finished); return got_frame; }
一個大循環(一直到沒有幀或者結尾為止)里面套著一個小循環和一個switch case的判斷,以及末尾的一些狀態更新。先來看小循環:
AVPacket pkt; do { if (d->queue->nb_packets == 0) SDL_CondSignal(d->empty_queue_cond); if (packet_queue_get_or_buffering(ffp, d->queue, &pkt, &d->pkt_serial, &d->finished) < 0) return -1; if (pkt.data == flush_pkt.data) { avcodec_flush_buffers(d->avctx); d->finished = 0; d->next_pts = d->start_pts; d->next_pts_tb = d->start_pts_tb; } } while (pkt.data == flush_pkt.data || d->queue->serial != d->pkt_serial); av_packet_unref(&d->pkt); d->pkt_temp = d->pkt = pkt; d->packet_pending = 1;
這里看到一個關鍵的數據結構AVPacket,表示的是音視頻的一個數據幀:
typedef struct AVPacket { /** * A reference to the reference-counted buffer where the packet data is * stored. * May be NULL, then the packet data is not reference-counted. */ AVBufferRef *buf; /** * Presentation timestamp in AVStream->time_base units; the time at which * the decompressed packet will be presented to the user. * Can be AV_NOPTS_VALUE if it is not stored in the file. * pts MUST be larger or equal to dts as presentation cannot happen before * decompression, unless one wants to view hex dumps. Some formats misuse * the terms dts and pts/cts to mean something different. Such timestamps * must be converted to true pts/dts before they are stored in AVPacket. */ int64_t pts; /** * Decompression timestamp in AVStream->time_base units; the time at which * the packet is decompressed. * Can be AV_NOPTS_VALUE if it is not stored in the file. */ int64_t dts; uint8_t *data; int size; int stream_index; /** * A combination of AV_PKT_FLAG values */ int flags; /** * Additional packet data that can be provided by the container. * Packet can contain several types of side information. */ AVPacketSideData *side_data; int side_data_elems; /** * Duration of this packet in AVStream->time_base units, 0 if unknown. * Equals next_pts - this_pts in presentation order. */ int64_t duration; int64_t pos; ///< byte position in stream, -1 if unknown #if FF_API_CONVERGENCE_DURATION /** * @deprecated Same as the duration field, but as int64_t. This was required * for Matroska subtitles, whose duration values could overflow when the * duration field was still an int. */ attribute_deprecated int64_t convergence_duration; #endif } AVPacket;
可以看到有顯示和解碼的時間戳dts pts,有在網絡流中的位置pos,實際數據指針data,大小size,所屬流的索引stream_index。基本上就是對一個數據幀的描述。我理解的是一個未解碼的壓縮數據幀。
回到小循環里看,packet_queue_get_or_buffering,讀取一個壓縮數據幀:
static int packet_queue_get_or_buffering(FFPlayer *ffp, PacketQueue *q, AVPacket *pkt, int *serial, int *finished) { assert(finished); if (!ffp->packet_buffering) return packet_queue_get(q, pkt, 1, serial); while (1) { int new_packet = packet_queue_get(q, pkt, 0, serial); if (new_packet < 0) return -1; else if (new_packet == 0) { if (q->is_buffer_indicator && !*finished) ffp_toggle_buffering(ffp, 1); new_packet = packet_queue_get(q, pkt, 1, serial); if (new_packet < 0) return -1; } if (*finished == *serial) { av_packet_unref(pkt); continue; } else break; } return 1; }
packet_queue_get是從隊列中獲取一個pkt,但是他的參數不同調用的含義并不相同:
/* return < 0 if aborted, 0 if no packet and > 0 if packet. */ static int packet_queue_get(PacketQueue *q, AVPacket *pkt, int block, int *serial) { MyAVPacketList *pkt1; int ret; SDL_LockMutex(q->mutex); for (;;) { if (q->abort_request) { ret = -1; break; } pkt1 = q->first_pkt; if (pkt1) { q->first_pkt = pkt1->next; if (!q->first_pkt) q->last_pkt = NULL; q->nb_packets--; q->size -= pkt1->pkt.size + sizeof(*pkt1); q->duration -= pkt1->pkt.duration; *pkt = pkt1->pkt; if (serial) *serial = pkt1->serial; #ifdef FFP_MERGE av_free(pkt1); #else pkt1->next = q->recycle_pkt; q->recycle_pkt = pkt1; #endif ret = 1; break; } else if (!block) { ret = 0; break; } else { SDL_CondWait(q->cond, q->mutex); } } SDL_UnlockMutex(q->mutex); return ret; }
又是個循環,如果被終止了,直接返回-1。讀取隊列(其實是個鏈表)中的第一個pkt,然后將其出隊,下一個成為第一個。如果沒讀到有2種情況,根據參數block(是否阻塞),非阻塞直接返回0,阻塞線程等待條件喚醒,條件符合喚醒后繼續執行循環,從頭開始讀取。
好吧,回來看packet_queue_get_or_buffering,開頭就是一個判斷,如果不在緩存中,直接按照阻塞方式讀取pkt,并返回(這意味著網絡傳輸還未收到數據包,因此需要先休眠,直到有數據到來后再進行處理)。下面的while(1)開始是處理緩存中已經可以讀到數據包的情況。首先進行非阻塞讀取,如果被終止,直接返回-1,否則如果沒有pkt,ffp_toggle_buffering更新buffer,然后在阻塞讀取。那么這個ffp_toggle_buffering在干什么呢?往下跟蹤2層,是ffp_toggle_buffering_l函數:
void ffp_toggle_buffering_l(FFPlayer *ffp, int buffering_on) { if (!ffp->packet_buffering) return; VideoState *is = ffp->is; if (buffering_on && !is->buffering_on) { av_log(ffp, AV_LOG_DEBUG, "ffp_toggle_buffering_l: start "); is->buffering_on = 1; stream_update_pause_l(ffp); ffp_notify_msg1(ffp, FFP_MSG_BUFFERING_START); } else if (!buffering_on && is->buffering_on){ av_log(ffp, AV_LOG_DEBUG, "ffp_toggle_buffering_l: end "); is->buffering_on = 0; stream_update_pause_l(ffp); ffp_notify_msg1(ffp, FFP_MSG_BUFFERING_END); } }
無論什么情況,大體都會走stream_update_pause_l,然后進行消息通知,好吧,看看stream_update_pause_l,往下走2層是stream_toggle_pause_l:
static void stream_toggle_pause_l(FFPlayer *ffp, int pause_on) { VideoState *is = ffp->is; if (is->paused && !pause_on) { is->frame_timer += av_gettime_relative() / 1000000.0 - is->vidclk.last_updated; #ifdef FFP_MERGE if (is->read_pause_return != AVERROR(ENOSYS)) { is->vidclk.paused = 0; } #endif set_clock(&is->vidclk, get_clock(&is->vidclk), is->vidclk.serial); } else { } set_clock(&is->extclk, get_clock(&is->extclk), is->extclk.serial); is->paused = is->audclk.paused = is->vidclk.paused = is->extclk.paused = pause_on; SDL_AoutPauseAudio(ffp->aout, pause_on); }
這不是暫停與恢復的調用嗎。好吧,咱們回顧一下,也就是說,讀取pkt的過程,會先讀取緩存,如果有直接返回,如果換成讀取到的是0,也就是沒內容,那么要阻塞在這里,同時暫停播放,那么也即是咱們在看視頻的時候出現的緩沖等待的情況了。
回到decoder_decode_frame的小循環里。小循環的意思大約是讀取pkt,直到與全局的flush_pkt不相等,我的理解是flush_pkt類似一個標記的作用,用來表示到達了改解碼的那個pkt。在此之前循環尋找緩存中的pkt(不知對不對,歡迎指正)。
往下繼續看小循環之后的switch case,以video的case為例:
case AVMEDIA_TYPE_VIDEO: { ret = avcodec_decode_video2(d->avctx, frame, &got_frame, &d->pkt_temp); if (got_frame) { ffp->stat.vdps = SDL_SpeedSamplerAdd(&ffp->vdps_sampler, FFP_SHOW_VDPS_AVCODEC, "vdps[avcodec]"); if (ffp->decoder_reorder_pts == -1) { frame->pts = av_frame_get_best_effort_timestamp(frame); } else if (!ffp->decoder_reorder_pts) { frame->pts = frame->pkt_dts; } } } break;
這里調用avcodec_decode_video2解碼,傳遞進入剛才的pkt,如果獲取的got_frame有正常,則調用sdl準備開始顯示,并且更新下pts。
解碼的過程后續有機會再分析。現在還是有個疑問,flush_pkt到底是個什么?我上面的猜測不知道對不對。繼續找找線索吧。在ffp_global_init中:
av_init_packet(&flush_pkt); flush_pkt.data = (uint8_t *)&flush_pkt;
初始化清空,并且將他的data賦值為自己的地址。有點奇怪,繼續找:
static void packet_queue_start(PacketQueue *q) { SDL_LockMutex(q->mutex); q->abort_request = 0; packet_queue_put_private(q, &flush_pkt); SDL_UnlockMutex(q->mutex); }
在初始化隊列的時候就加入了這個都是空的pkt。那么之前的小循環的地方是否可理解為讀取pkt,直到緩存隊列中沒東西為止?不敢肯定,這里先留個疑問吧。
文章版權歸作者所有,未經允許請勿轉載,若此文章存在違規行為,您可以聯系管理員刪除。
轉載請注明本文地址:http://specialneedsforspecialkids.com/yun/66752.html
摘要:分別為音頻視頻和字母進行相關處理。向下跟蹤兩層,會發現,核心函數是。至此解碼算完了。整個過程真是粗略分析啊,對自己也很抱歉,暫時先這樣吧。 上文中說到在read_thread線程中有個關鍵函數:avformat_open_input(utils.c),應當是讀取視頻文件的,這個函數屬于ffmpeg層。這回進入到其中去看下: int avformat_open_input(AVForma...
摘要:我們下面先從讀取線程入手。無論這個循環前后干了什么,都是要走這一步,讀取數據幀。從開始,我理解的是計算出當前數據幀的時間戳后再計算出播放的起始時間到當前時間,然后看這個時間戳是否在此范圍內。 ijkplayer現在比較流行,因為工作關系,接觸了他,現在做個簡單的分析記錄吧。我這里直接跳過java層代碼,進入c層,因為大多數的工作都是通過jni調用到c層來完成的,java層的內容并不是主...
摘要:下面是,讀取頭信息頭信息。猜測網絡部分至少在一開始就應當初始化好的,因此在的過程里面找,在中找到了。就先暫時分析到此吧。 這章要簡單分析下ijkplayer是如何從文件或網絡讀取數據源的。還是read_thread函數中的關鍵點avformat_open_input函數: int avformat_open_input(AVFormatContext **ps, const char ...
摘要:初始化的過程上一篇其實并未完全分析完,這回接著來。層的函數中,最后還有的調用,走的是層的。結構體如下的和,以及,其余是狀態及的內容。整個過程是個異步的過程,并不阻塞。至于的東西,都是在層創建并填充的。 初始化的過程上一篇其實并未完全分析完,這回接著來。java層的initPlayer函數中,最后還有native_setup的調用,走的是c層的IjkMediaPlayer_native_...
閱讀 1309·2021-09-27 13:56
閱讀 2338·2019-08-26 10:35
閱讀 3497·2019-08-23 15:53
閱讀 1848·2019-08-23 14:42
閱讀 1233·2019-08-23 14:33
閱讀 3562·2019-08-23 12:36
閱讀 1947·2019-08-22 18:46
閱讀 996·2019-08-22 14:06