91超碰碰碰碰久久久久久综合_超碰av人澡人澡人澡人澡人掠_国产黄大片在线观看画质优化_txt小说免费全本

溫馨提示×

溫馨提示×

您好,登錄后才能下訂單哦!

密碼登錄×
登錄注冊×
其他方式登錄
點擊 登錄注冊 即表示同意《億速云用戶服務條款》

FFmpeg avformat_find_stream_info函數優化

發布時間:2020-07-07 16:23:19 來源:網絡 閱讀:5281 作者:fengyuzaitu 欄目:編程語言

背景
?????? ?一般的應用場景對實時點播速度要求不高的情況下,可以設置探測碼流的延時和探測數據的大小,代碼如下:
?? ?pFormatContext->probesize = 500 *1024;
?? ?pFormatContext->max_analyze_duration = 5 * AV_TIME_BASE;//AV_TIME_BASE是定義的時間標準,代表1秒

弊端

????????這樣設置probesize和max_analyze_duration是可以減少探測時間,但是是以犧牲成功率為代價的,有時候探測不到流信息,就會播不出來,

出現在網絡丟包的情況下(使用UDP進行視頻數據的傳輸)或者網路特別復雜,跨越多個網段


探測碼流有沒有用于解碼顯示問題

AVFMT_FLAG_NOBUFFER宏定義剖析

默認情況下,讀取的緩存數據將會用于解碼,如果不想探測的碼流用于顯示,可以這樣子設置:
pAVFormatContext->flags =pAVFormatContext->flags & AVFMT_FLAG_NOBUFFER;

代碼剖析
函數調用int avformat_find_stream_info(AVFormatContext *ic, AVDictionary **options)
??????? if (ic->flags & AVFMT_FLAG_NOBUFFER)
??????????? free_packet_buffer(&ic->packet_buffer, &ic->packet_buffer_end);

問題描述:因為沒有設置該宏定義,傳遞進去I幀數據,應該會被保存下來,但是明顯的得到PPS/SPS不存在的異常,究竟傳輸進去的碼流哪里去了??


場景
?? ?要求點播時間不超過1秒,允許指定攝像機的視頻參數如下:
當前只考慮視頻流,后續會添加音頻流,已知輸入的流格式video: H264 1920*1080 25fps

實施方案
?? ?當前采用FFmpeg 3.4版本,參考博客采用的是比較舊的版本2.2.0版本,有些函數已經被廢棄,有些函數已經發生了改變,但是基本思路是不變的:
通過手動指定×××參數,來取代avformat_find_stream_info函數探測流格式


AVStream* CDecoder::CreateStream(AVFormatContext* pFormatContext, int nCodecType)

{

AVStream *st = avformat_new_stream(pFormatContext, NULL);

if (!st)

return NULL;

st->codecpar->codec_type = (AVMediaType)nCodecType;

return st;

}



int CDecoder::GetVideoExtraData(AVFormatContext* pFormatContext, int nVideoIndex)

{

int? type, size, flags, pos, stream_type;

int ret = -1;

int64_t dts;

bool got_extradata = false;


if (!pFormatContext || nVideoIndex < 0 || nVideoIndex > 2)

return ret;


for (;; avio_skip(pFormatContext->pb, 4)) {

pos = avio_tell(pFormatContext->pb);

type = avio_r8(pFormatContext->pb);

size = avio_rb24(pFormatContext->pb);

dts = avio_rb24(pFormatContext->pb);

dts |= avio_r8(pFormatContext->pb) << 24;

avio_skip(pFormatContext->pb, 3);


if (0 == size)

break;

if (FLV_TAG_TYPE_AUDIO == type || FLV_TAG_TYPE_META == type) {

/*if audio or meta tags, skip them.*/

avio_seek(pFormatContext->pb, size, SEEK_CUR);

}

else if (type == FLV_TAG_TYPE_VIDEO) {

/*if the first video tag, read the sps/pps info from it. then break.*/

size -= 5;

pFormatContext->streams[nVideoIndex]->codecpar->extradata = (uint8_t*)av_malloc(size + FF_INPUT_BUFFER_PADDING_SIZE);

if (NULL == pFormatContext->streams[nVideoIndex]->codecpar->extradata)

break;

memset(pFormatContext->streams[nVideoIndex]->codecpar->extradata, 0, size + FF_INPUT_BUFFER_PADDING_SIZE);

memcpy(pFormatContext->streams[nVideoIndex]->codecpar->extradata, pFormatContext->pb->buf_ptr + 5, size);

pFormatContext->streams[nVideoIndex]->codecpar->extradata_size = size;

ret = 0;

got_extradata = true;

}

else {

/*The type unknown,something wrong.*/

break;

}


if (got_extradata)

break;

}


return ret;

}


int CDecoder::InitDecode(AVFormatContext *pFormatContext)

{

int video_index = -1;

int audio_index = -1;

int ret = -1;


if (!pFormatContext)

return ret;


/*

Get video stream index, if no video stream then create it.

And audio so on.

*/

if (0 == pFormatContext->nb_streams) {

CreateStream(pFormatContext, AVMEDIA_TYPE_VIDEO);

CreateStream(pFormatContext, AVMEDIA_TYPE_AUDIO);

video_index = 0;

audio_index = 1;

}

else if (1 == pFormatContext->nb_streams) {

if (AVMEDIA_TYPE_VIDEO == pFormatContext->streams[0]->codecpar->codec_type) {

CreateStream(pFormatContext, AVMEDIA_TYPE_AUDIO);

video_index = 0;

audio_index = 1;

}

else if (AVMEDIA_TYPE_AUDIO == pFormatContext->streams[0]->codecpar->codec_type) {

CreateStream(pFormatContext, AVMEDIA_TYPE_VIDEO);

video_index = 1;

audio_index = 0;

}

}

else if (2 == pFormatContext->nb_streams) {

if (AVMEDIA_TYPE_VIDEO == pFormatContext->streams[0]->codecpar->codec_type) {

video_index = 0;

audio_index = 1;

}

else if (AVMEDIA_TYPE_VIDEO == pFormatContext->streams[1]->codecpar->codec_type) {

video_index = 1;

audio_index = 0;

}

}


/*Error. I can't find video stream.*/

if (video_index != 0 && video_index != 1)

return ret;


//Init the audio codec(AAC).

pFormatContext->streams[audio_index]->codecpar->codec_id = AV_CODEC_ID_AAC;

pFormatContext->streams[audio_index]->codecpar->sample_rate = 44100;

pFormatContext->streams[audio_index]->codecpar->bits_per_coded_sample = 16;

pFormatContext->streams[audio_index]->codecpar->channels = 2;

pFormatContext->streams[audio_index]->codecpar->channel_layout = 3;

pFormatContext->streams[audio_index]->pts_wrap_bits = 32;

pFormatContext->streams[audio_index]->time_base.den = 1000;

pFormatContext->streams[audio_index]->time_base.num = 1;


//Init the video codec(H264).

pFormatContext->streams[video_index]->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;

pFormatContext->streams[video_index]->codecpar->codec_id = AV_CODEC_ID_H264;

pFormatContext->streams[video_index]->codecpar->format = 12;

pFormatContext->streams[video_index]->codecpar->bits_per_raw_sample = 8;

pFormatContext->streams[video_index]->codecpar->profile = 66;

pFormatContext->streams[video_index]->codecpar->level = 42;

pFormatContext->streams[video_index]->codecpar->width = 1920;

pFormatContext->streams[video_index]->codecpar->height = 1080;

pFormatContext->streams[video_index]->codecpar->sample_aspect_ratio.num = 0;

pFormatContext->streams[video_index]->codecpar->sample_aspect_ratio.den = 1;



pFormatContext->streams[video_index]->pts_wrap_bits = 64;

pFormatContext->streams[video_index]->time_base.den = 1200000;

pFormatContext->streams[video_index]->time_base.num = 1;

pFormatContext->streams[video_index]->avg_frame_rate.den = 1;

pFormatContext->streams[video_index]->avg_frame_rate.num = 25;

/*Need to change, different condition has different frame_rate. 'r_frame_rate' is new in ffmepg2.3.3*/

pFormatContext->streams[video_index]->r_frame_rate.den = 25;

pFormatContext->streams[video_index]->r_frame_rate.num = 1;

/* H264 need sps/pps for decoding, so read it from the first video tag.*/

ret = GetVideoExtraData(pFormatContext, video_index);


/*Update the AVFormatContext Info*/

pFormatContext->nb_streams = 1;

/*empty the buffer.*/

pFormatContext->pb->buf_ptr = pFormatContext->pb->buf_end;


return ret;

}


已有的方案

AVDictionary* pOptions = NULL;

pFormatCtx->probesize = 200 *1024;

pFormatCtx->max_analyze_duration = 3 * AV_TIME_BASE;


//Retrieve stream information

?if (avformat_find_stream_info(pFormatCtx, &pOptions) < 0)

?{

?return -1; // Couldn't find stream information

?}


現有方案

InitDecode(pFormatCtx);


警告

在上述的解決方案中,采用了av_malloc分配內存,但是沒有進行內存的釋放,是否會隨著pFormatCtx的釋放而釋放,不至于導致內存泄露,當然經過測試,并沒有走到這一步


優化效果

通過測試,速度優化了1200毫秒


測試結果
??? 探測ES流,avformat_open_input會非常快的返回,PS反而是一個例外。通過調用av_log_set_callback設置日志寫文件的方式,
在調用avformat_open_input函數探測PS輸入格式時候,
會打印如下的日志:
Probing mp3 score:1 size:2048
Probing mp3 score:1 size:4096
Probing mp3 score:1 size:8192
Probing mp3 score:1 size:16384
Probing h364 score:51 size:32768
Format h364 probed with size=32768 and score=51
Input #0, h364, from '':
? Duration: N/A, bitrate: N/A
??? Stream #0:0, 0, 1/1200000: Video: h364 (Baseline), yuvj420p, 1920x1080, 25 fps, 0.04 tbr, 1200k tbn
deprecated pixel format used, make sure you did set range correctly
non-existing PPS 0 referenced
non-existing PPS 0 referenced
nal_unit_type: 1, nal_ref_idc: 3
non-existing PPS 0 referenced
non-existing PPS 0 referenced
decode_slice_header error
non-existing PPS 0 referenced
non-existing PPS 0 referenced
non-existing PPS 0 referenced
no frame!

通過跟蹤源碼Probing h364 score:51 size:32768日志打印在調用
av_probe_input_format3函數會打印
name h364
long_name raw H.264 video
raw_codec_id 28
說明如果指定了AVInputFormat結構體,就可以節省探測碼流格式的時間

ffmpeg 針對指定的h364 es流延時優化
參考http://blog.csdn.net/rain_2011_kai/article/details/7746805文章,
是否只需要知道發送端發送的視屏的×××ID,視頻幀的長和寬,就可以直接
直接省略掉ffmpeg庫的視頻流探測接口,avformat_open_input函數
和avformat_find_stream_info函數耗時超過500毫秒

手動指定解碼格式 效果不明顯
+buffer0x00000000002cbdc0? <字符串中的字符無效。>unsigned char *

+buf_end0x00000000002d3626? <字符串中的字符無效。>unsigned char *


30822


pos 262349

buffer? 2932160

buf_end 2962982

buffsize 35840

buf_end-buffer 30822

pos的值從哪里來,值得考慮

pFormatContext->pb->pos = pFormatContext->pb->buf_end;

在已有的版本是編譯不過的,因為pos是一個64位整型,buf_end是一個字符指針

但是從上面還是看不出它們之間的關系,盡管手動指定解碼格式,但是效果并不理想


還有在這里讀取一幀的數據

m_pVideoc->io_ctx = avio_alloc_context(m_pAvioBuf, BUF_SIZE, 0, this, ReadStreamData, NULL, NULL);

設置BUF_SIZE的大小為4*1024,實際上是否有效果,尚未可知




參考

http://blog.csdn.net/STN_LCD/article/details/74935760

https://jiya.io/archives/vlc_optimize_1.html


打印碼流詳情av_dump_format函數使用

函數說明
??????? 一般使用av_find_stream_info函數探測碼流格式,它的作用是為pFormatContext->streams填充上正確的音視頻格式信息。可以通過av_dump_format函數將音視頻數據格式通過av_log輸出到指定的文件或者控制臺,方便開發者了解輸入的視音頻格式,對于程序的調用,刪除該函數的調用沒有任何的影響
/**
?* Print detailed information about the input or output format, such as
?* duration, bitrate, streams, container, programs, metadata, side data,
?* codec and time base(打印輸入或者輸出格式的信息,例如視頻總時長,波特率,流,容器,程序,元數據,邊數據,編碼器,時間戳).
?*
?* @param ic??????? the context to analyze(被分析的視音頻上下文)
?* @param index???? index of the stream to dump information about(需要解析的流索引號,例如0是視頻流,1是音頻流)
?* @param url?????? the URL to print, such as source or destination file(打印URL,例如源文件或者目的文件)
?* @param is_output Select whether the specified context is an input(0) or output(1)(指定的視音頻上下文是輸入流(0)還是輸出流(1))
?*/
void av_dump_format(AVFormatContext *ic,
??????????????????? int index,
??????????????????? const char *url,
??????????????????? int is_output);

調用代碼
??????? av_dump_format(pFormatContext, 0, "", 0);
注意:最后一個參數如果是輸入流取值0,如果是輸出流取值1
通過av_log_set_callback設置日志文件輸出,輸出的結果如下:
Input #0, h364, from '':
? Duration: N/A, bitrate: N/A
??? Stream #0:0, 0, 1/1200000: Video: h364 (Baseline), yuvj420p, 1920x1080, 25 fps, 0.04 tbr, 1200k tbn
當前沒有打印視頻總時長是因為這是h364裸留數據

函數代碼剖析
void av_dump_format(AVFormatContext *ic, int index, const char *url, int is_output)
{
??? int i;
??? uint8_t *printed = ic->nb_streams ? av_mallocz(ic->nb_streams) : NULL;
??? if (ic->nb_streams && !printed)
??????? return;
//如果is_output是0,說明是輸入流
??? av_log(NULL, AV_LOG_INFO, "%s #%d, %s, %s '%s':\n",
?????????? is_output ? "Output" : "Input",
?????????? index,
?????????? is_output ? ic->oformat->name : ic->iformat->name,
?????????? is_output ? "to" : "from", url);
??? dump_metadata(NULL, ic->metadata, "? ");
//如果is_output指定為0,開始計算視頻總時長
??? if (!is_output) {
??????? av_log(NULL, AV_LOG_INFO, "? Duration: ");
//如果上下文中的總時長字段有效,說明不是h364裸留,h364裸留是沒有dts/pts字段數據的,開始計算視頻總時長
??????? if (ic->duration != AV_NOPTS_VALUE) {
??????????? int hours, mins, secs, us;
??????????? int64_t duration = ic->duration + (ic->duration <= INT64_MAX - 5000 ? 5000 : 0);
??????????? secs? = duration / AV_TIME_BASE;
??????????? us??? = duration % AV_TIME_BASE;
??????????? mins? = secs / 60;
??????????? secs %= 60;
??????????? hours = mins / 60;
??????????? mins %= 60;
??????????? av_log(NULL, AV_LOG_INFO, "%02d:%02d:%02d.%02d", hours, mins, secs,
?????????????????? (100 * us) / AV_TIME_BASE);
??????? } else {
??????????? av_log(NULL, AV_LOG_INFO, "N/A");
??????? }
?????? //判斷開始是否開始時間有效
??????? if (ic->start_time != AV_NOPTS_VALUE) {
??????????? int secs, us;
??????????? av_log(NULL, AV_LOG_INFO, ", start: ");
??????????? secs = llabs(ic->start_time / AV_TIME_BASE);
??????????? us?? = llabs(ic->start_time % AV_TIME_BASE);
??????????? av_log(NULL, AV_LOG_INFO, "%s%d.%06d",
?????????????????? ic->start_time >= 0 ? "" : "-",
?????????????????? secs,
?????????????????? (int) av_rescale(us, 1000000, AV_TIME_BASE));
??????? }
??????? av_log(NULL, AV_LOG_INFO, ", bitrate: ");
??????? if (ic->bit_rate)
??????????? av_log(NULL, AV_LOG_INFO, "%"PRId64" kb/s", ic->bit_rate / 1000);
??????? else
??????????? av_log(NULL, AV_LOG_INFO, "N/A");
??????? av_log(NULL, AV_LOG_INFO, "\n");
??? }
??? for (i = 0; i < ic->nb_chapters; i++) {
??????? AVChapter *ch = ic->chapters[i];
??????? av_log(NULL, AV_LOG_INFO, "??? Chapter #%d:%d: ", index, i);
??????? av_log(NULL, AV_LOG_INFO,
?????????????? "start %f, ", ch->start * av_q2d(ch->time_base));
??????? av_log(NULL, AV_LOG_INFO,
?????????????? "end %f\n", ch->end * av_q2d(ch->time_base));
??????? dump_metadata(NULL, ch->metadata, "??? ");
??? }
??? if (ic->nb_programs) {
??????? int j, k, total = 0;
??????? for (j = 0; j < ic->nb_programs; j++) {
??????????? AVDictionaryEntry *name = av_dict_get(ic->programs[j]->metadata,
????????????????????????????????????????????????? "name", NULL, 0);
??????????? av_log(NULL, AV_LOG_INFO, "? Program %d %s\n", ic->programs[j]->id,
?????????????????? name ? name->value : "");
??????????? dump_metadata(NULL, ic->programs[j]->metadata, "??? ");
??????????? for (k = 0; k < ic->programs[j]->nb_stream_indexes; k++) {
??????????????? dump_stream_format(ic, ic->programs[j]->stream_index[k],
?????????????????????????????????? index, is_output);
??????????????? printed[ic->programs[j]->stream_index[k]] = 1;
??????????? }
??????????? total += ic->programs[j]->nb_stream_indexes;
??????? }
??????? if (total < ic->nb_streams)
??????????? av_log(NULL, AV_LOG_INFO, "? No Program\n");
??? }
??? for (i = 0; i < ic->nb_streams; i++)
??????? if (!printed[i])
??????????? dump_stream_format(ic, i, index, is_output);
??? av_free(printed);
}

?

向AI問一下細節

免責聲明:本站發布的內容(圖片、視頻和文字)以原創、轉載和分享為主,文章觀點不代表本網站立場,如果涉及侵權請聯系站長郵箱:is@yisu.com進行舉報,并提供相關證據,一經查實,將立刻刪除涉嫌侵權內容。

AI

景谷| 鄂伦春自治旗| 遂昌县| 清水县| 永靖县| 平陆县| 潜山县| 乐业县| 安徽省| 安塞县| 施秉县| 凉城县| 蒲江县| 肥西县| 平阴县| 洛南县| 文成县| 仁化县| 泾川县| 循化| 景洪市| 昌图县| 孟州市| 西乡县| 竹北市| 勃利县| 丰镇市| 湖州市| 沁源县| 和龙市| 普兰店市| 常熟市| 广宗县| 民丰县| 平顶山市| 定安县| 上高县| 黔东| 柳江县| 交口县| 武鸣县|