图片转音乐视频
为什么想将图片转视频?
是这样的,我打造的任性动图软件,在编辑制作GIF动图方面,已经基本完善。现在想加个生成视频的功能。
其实以前,我加过这个功能,但是当时生成的视频太大,就取消了。
现在动图功能基本完善,就再次研究起这个功能起来。
调研了下,主要有三种方式
1、直接用MFC的库,不加第三方DLL
使用这个,可以实现将图片生成视频,可以给视频添加WAV格式的音乐
优点:不加DLL
缺点:只能生成AVI,视频占空间大,只能添加WAV音乐
生成的视频,用格式工厂转换后,可以节省很多空间
思路:先将图片生成AVI,WAV音乐不好找,所以又搜了下MP3转WAV代码,实现了下,但是这种转换的WAV,添加到AVI视频中,没有声音,或只有一小段,所以就放弃这种想法了。
后来,几经比较,最终还是选择了这个方案。
具体方案:
选择好图片、WAV音乐,将其合成AVI视频。
正好,网上找的一个源码,不过这个源码,对于音频有些问题:
1、无法循环播放
2、帧速大于10的时候,会出现噪音,有时候很严重
一开始我以为要循环播放音乐,可能需要调节参数,后来,怎么也找不到这方面的资料,然后进一步研究代码,发现,音频也是一段一段的输入到视频文件中的。所以要实现循环播放,就是判断音乐是否播放完,播放完了后,重新移动到开头再次循环写人。
这是修改后的代码,实现了循环播放,但是帧速大于10的时候,会有噪音,这个问题我没想通,到底哪里出现问题了,有知道的,可以告诉我。
因为只能添加WAV格式音乐,我也调研了下MP3转MAV代码,没有找到很好的封装,都是C代码,感觉太乱了,也不是我的重点,所以直接从网上下载了个MP3转WAV的小软件,将这个小软件作为插件,集成到了我的任性动图软件中了。如果你找不到WAV音乐,可以用这个小软件转换下就可以了,然后将转换成功的WAV作为背景音效。
/** bmpFileName: bmp file fullname time : picture delay time */ bool CAVIFile::AddFrame(CString bmpFileName, int time, bool isNewWave) { void* pBmpFileData = 0; HRESULT hr = AVIERR_OK; BITMAPFILEHEADER BmpFile1Header; BITMAPINFO* pBmp1Info = 0; DWORD nBmp1DataSize = 0; void* pBmp1Data = 0; FILE* pBmpFile1 = 0; _wfopen_s(&pBmpFile1, bmpFileName, _T("rb")); fread(&BmpFile1Header, sizeof(BmpFile1Header), 1, pBmpFile1); //读取bmpFileHeader pBmpFileData = new BYTE[BmpFile1Header.bfSize]; memcpy(pBmpFileData, &BmpFile1Header, sizeof(BmpFile1Header)); //将bmpFileHeader填充到pBmpFileData //将bmp数据填充到pBmpFileData fread(((BYTE*)pBmpFileData) + sizeof(BmpFile1Header), BmpFile1Header.bfSize - sizeof(BmpFile1Header), 1, pBmpFile1); fclose(pBmpFile1); pBmp1Info = (BITMAPINFO*)(((BYTE*)pBmpFileData) + sizeof(BmpFile1Header)); //为bmpInfo指针赋值,setFormat时用到 nBmp1DataSize = BmpFile1Header.bfSize - BmpFile1Header.bfOffBits; pBmp1Data = ((BYTE*)pBmpFileData) + BmpFile1Header.bfOffBits; //为bmp数据区域指针赋值,写入数据时用到 //read wave file DWORD* pWaveFormatSize = 0; WAVEFORMATEX* pWaveFormat = 0; DWORD* pWaveDataSize = 0; DWORD dwTemp=0; if(!musicPath.IsEmpty()){ if (pWaveFileData == NULL){ FILE* pWaveFile = NULL; _wfopen_s(&pWaveFile, musicPath, _T("rb")); fread(&dwTemp, sizeof(dwTemp), 1, pWaveFile); fread(&dwTemp, sizeof(dwTemp), 1, pWaveFile); pWaveFileData = new BYTE[dwTemp]; fread(pWaveFileData, dwTemp, 1, pWaveFile); fclose(pWaveFile); } pWaveFormatSize = (DWORD*)(((BYTE*)pWaveFileData) + 8); pWaveFormat = (WAVEFORMATEX*)(((BYTE*)pWaveFormatSize) + 4); pWaveDataSize = (DWORD*)(((BYTE*)pWaveFormat) + *pWaveFormatSize + 4); pWaveData = ((BYTE*)pWaveDataSize) + 4; //循环播放 if (pEndAudioData < paudiodata pendaudiodata='(BYTE*)pWaveFileData' dwtemp-1 paudiodata='(BYTE*)pWaveData;' naudiodataleft='*pWaveDataSize;' pwaveformat-> } if(nFrames==0){ hr = AVIFileOpen(&pAviFile, FName, OF_WRITE | OF_CREATE, NULL); if (hr != AVIERR_OK) { return false; } VideoStreamInfo.fccType = streamtypeVIDEO; VideoStreamInfo.fccHandler = 0; VideoStreamInfo.dwFlags = 0; VideoStreamInfo.dwCaps = 0; VideoStreamInfo.wPriority = 0; VideoStreamInfo.wLanguage = 0; VideoStreamInfo.dwScale = 1; VideoStreamInfo.dwRate = nRate; // VideoStreamInfo.dwStart = 0; VideoStreamInfo.dwLength = 2; VideoStreamInfo.dwInitialFrames = 0; VideoStreamInfo.dwSuggestedBufferSize = 0x100000; VideoStreamInfo.dwQuality = -1; VideoStreamInfo.dwSampleSize = 0; VideoStreamInfo.rcFrame.left = 0; VideoStreamInfo.rcFrame.top = 0; VideoStreamInfo.rcFrame.right = 320; VideoStreamInfo.rcFrame.bottom = 240; VideoStreamInfo.dwEditCount = 0; VideoStreamInfo.dwFormatChangeCount = 0; CString szTestVideoName = _T("Test Video Stream"); wcsncpy_s(VideoStreamInfo.szName, sizeof(VideoStreamInfo.szName) / sizeof(WCHAR), szTestVideoName, sizeof(VideoStreamInfo.szName) / sizeof(WCHAR) - 1); //create vidio stream, and set video format hr = AVIFileCreateStream(pAviFile, &pVideoStream, &VideoStreamInfo); if (hr != AVIERR_OK) { return false; } hr = AVIStreamSetFormat(pVideoStream, 0, pBmp1Info, sizeof(BITMAPINFO)); if (hr != AVIERR_OK) { return false; } //create audio stream, and set audio format if(!musicPath.IsEmpty()){ pAudioStream = new PAVISTREAM[sizeof(PAVISTREAM)]; AudioStreamInfo = new AVISTREAMINFO[sizeof(AVISTREAMINFO)]; //set AudioStreamInfo AudioStreamInfo->fccType = streamtypeAUDIO; AudioStreamInfo->fccHandler = WAVE_FORMAT_PCM; AudioStreamInfo->dwFlags = 0; AudioStreamInfo->dwCaps = 0; AudioStreamInfo->wPriority = 0; AudioStreamInfo->wLanguage = 0; AudioStreamInfo->dwScale = 1; AudioStreamInfo->dwRate = pWaveFormat->nSamplesPerSec; AudioStreamInfo->dwStart = 0; AudioStreamInfo->dwLength = 2; AudioStreamInfo->dwInitialFrames = 0; AudioStreamInfo->dwSuggestedBufferSize = 0x100000; AudioStreamInfo->dwQuality = -1; AudioStreamInfo->dwSampleSize = pWaveFormat->nBlockAlign; AudioStreamInfo->rcFrame.left = 0; AudioStreamInfo->rcFrame.top = 0; AudioStreamInfo->rcFrame.right = 0; AudioStreamInfo->rcFrame.bottom = 0; AudioStreamInfo->dwEditCount = 0; AudioStreamInfo->dwFormatChangeCount = 0; CString szTestAudioName = _T("Test Audio Stream"); wcsncpy_s(AudioStreamInfo->szName, sizeof(AudioStreamInfo->szName) / sizeof(WCHAR), szTestAudioName, sizeof(AudioStreamInfo->szName) / sizeof(WCHAR) - 1); hr = AVIFileCreateStream(pAviFile, pAudioStream, AudioStreamInfo); if (hr != AVIERR_OK) { return false; } hr = AVIStreamSetFormat(*pAudioStream, 0, pWaveFormat, *pWaveFormatSize); if (hr != AVIERR_OK) { return false; } pAudioData = (BYTE*)pWaveData; nAudioDataLeft = *pWaveDataSize; pEndAudioData = (BYTE*)pWaveFileData + dwTemp; } } //else if(isNewWave){ // if(!musicPath.IsEmpty()){ // pAudioData = (BYTE*)pWaveData; // nAudioDataLeft = *pWaveDataSize; // // pEndAudioData = (BYTE*)pWaveFileData + dwTemp-1; // //nAudioFrameCount = 0; // } //} for (int i = 0; i < time 3 i write video if i='= 0)' hr='AVIStreamWrite(pVideoStream,' nvideoframecount 1 pbmp1data bmpfile1header.bfsize aviif_keyframe 0 0 if hr return false else hr='AVIStreamWrite(pVideoStream,' nvideoframecount 1 pbmp1data 0 0 0 0 if hr return false double time='1.0' nrate time nrate>10时,有时候会出现噪音 { //write audio if (!musicPath.IsEmpty()){ long nStep = pWaveFormat->nSamplesPerSec * pWaveFormat->nBlockAlign / (nRate); if (nAudioDataLeft>nStep) { hr = AVIStreamWrite(*pAudioStream, nAudioFrameCount++, 1, pAudioData, nStep, 0, 0, 0); } else{ //pEndAudioData - nAudioDataLeft 紧接上一段即将结束音乐 使用pAudioData做参数时,有部分音乐是噪音 hr = AVIStreamWrite(*pAudioStream, nAudioFrameCount++, 1, pEndAudioData - nAudioDataLeft, nAudioDataLeft, 0, 0, 0); } if (hr != AVIERR_OK) { break; } if (nAudioDataLeft < nStep) { pAudioData = (BYTE*)pWaveData; nAudioDataLeft = *pWaveDataSize; } else{ pAudioData += nStep; nAudioDataLeft -= nStep; } } } } nFrames++; return true; }
2 openCV
这是最简单的生成视频的方法,前提是搭建好环境后。
缺点: 只能生成视频,无法添加音乐
void make_video(string dir, string videoPath, int frameRate) { vectorfileset; Size frameSize; VideoWriter writer; findFile(dir, fileset); vector ::iterator it = fileset.begin(); string filename; while (it != fileset.end()) { cout << "---------------------------------------------" << endl; filename = dir + *it; cout << filename << endl; it++; Mat frame; frame = imread(filename); // 读入图片 if (!writer.isOpened()) { frameSize.width = frame.cols; frameSize.height = frame.rows; if (!writer.open(videoPath, CV_FOURCC('M', 'J', 'P', 'G') /*CV_FOURCC('D', 'I', 'V', 'X')*/, frameRate, frameSize, true)) { cout << "open writer error..." << endl; return ; } } // 将图片数据写入 writer.write(frame); // 显示 // imshow("video", frame); waitKey(frameRate); } return; }
编译出的这个openCV占了4G多空间,不过,这个VideoWriter 类封装的很好,直接一帧一帧的写入,就成了视频。不过参数CV_FOURCC('M', 'J', 'P', 'G'),需要调,我把这个参数设置为-1,然后手动设置视频格式,才成功生成视频,至于为什么使用其它格式不行,还没找到原因。
3 ffmpeg
音视频专业库
缺点:DLL有点大,没有很好的封装,我想实现的功能很简单,但是用这个实现,不熟悉的话,却很麻烦。
优点:生成的视频占空间小。
网上搜的图片生成视频的源码, 改进的代码,
主要是解决内存泄漏、以及设置存放进视频中的实际图片数量
使用vector容器记录开辟的图片空间地址,
设置实际存入的图片数据
#define _AFXDLL #include#ifdef __cplusplus #include using namespace std; //stl 放在 extern C 之前,不能包含在C 之中,因为C不支持模板 extern "C" { #endif #include <libavcodec/avcodec.h> #include <libavformat/avformat.h> #include <libswscale/swscale.h> void main() { BYTE* pData=NULL; std::vector imgPDataVec; //去掉图片信息头后的,纯图片内容空间地址 std::vector pOriDataVec; // 为图像开辟的空间的初始地址,主要是为了准确释放空间 int nWidth = 0; int nHeight= 0; int nDataLen=0; int nLen; CFile file; CString csFileName; for (int fileI = 1; fileI <= 5 filei csfilename.formatd:\\pic\\d.bmp filei file.opencsfilenamecfile::moderead cfile::typebinary nlen='file.GetLength();' pdata='new' bytenlen poridatavec.push_backpdata file.readpdata nlen file.close bitmapfileheader bmpfheader bitmapinfoheader bmpiheader memcpybmpfheader pdata sizeofbitmapfileheader int nheadlen='bmpFHeader.bfOffBits' - sizeofbitmapfileheader memcpybmpiheader pdata sizeofbitmapfileheader nheadlen nwidth='bmpIHeader.biWidth;//' 464 bmi.bmpinfo.bmiheader.biwidth nheight='bmpIHeader.biHeight;//362;//' bmi.bmpinfo.bmiheader.biheight pdata ndatalen='nLen-bmpFHeader.bfOffBits;' imgpdatavec.push_backpdata av_register_all avcodec_register_all avframe prgbframe='new' avframe1 rgb avframe pyuvframe='new' avframe1 yuv avcodeccontext pcontext='NULL;' avcodec pcodech264='NULL;' uint8_t pyuv_buff='NULL;//' h264 pcodech264='avcodec_find_encoder(CODEC_ID_H264);' ifpcodech264 fprintfstderr h264 codec not found\n getchar exit1 pcontext='avcodec_alloc_context3(pCodecH264);' pcontext->bit_rate = 3000000;// put sample parameters pContext->width =nWidth;// pContext->height = nHeight;// // frames per second AVRational rate; rate.num = 1; rate.den =1; pContext->time_base= rate;//(AVRational){1,25}; pContext->gop_size = 10; // emit one intra frame every ten frames pContext->max_b_frames=1; pContext->thread_count = 1; pContext->pix_fmt = PIX_FMT_YUV420P;// PIX_FMT_YUV420P;// //av_opt_set(pContext->priv_data, /*"preset"*/"libvpx-1080p.ffpreset", /*"slow"*/NULL, 0); //打开编码器 if(avcodec_open2(pContext,pCodecH264,NULL)<0){ printfavcodec_open2 failed\n trace getchar int size='pContext-'>width * pContext->height; pYuv_buff = (uint8_t *) malloc((size * 3) / 2); // size for YUV 420 //将rgb图像数据填充rgb帧 uint8_t * rgb_buff = new uint8_t[nDataLen]; //图象编码 outbuf_size太小会报错,图像清晰度也会差 int outbuf_size = 900000; uint8_t * pOutbuf= (uint8_t*)malloc(outbuf_size); int u_size = 0; FILE *f=NULL; char * filename = "myData.h264"; f = fopen(filename, "wb"); if (!f) { TRACE( "could not open %s\n", filename); getchar(); exit(1); } //初始化SwsContext SwsContext * scxt = sws_getContext(pContext->width,pContext->height,PIX_FMT_BGR24,pContext->width,pContext->height,PIX_FMT_YUV420P,SWS_POINT,NULL,NULL,NULL); AVPacket avpkt; //AVFrame *pTFrame=new AVFrame int nNum = 0; for (int i = 0; i data[0] += pRGBFrame->linesize[0] * (nHeight - 1); pRGBFrame->linesize[0] *= -1; pRGBFrame->data[1] += pRGBFrame->linesize[1] * (nHeight / 2 - 1); pRGBFrame->linesize[1] *= -1; pRGBFrame->data[2] += pRGBFrame->linesize[2] * (nHeight / 2 - 1); pRGBFrame->linesize[2] *= -1; //将RGB转化为YUV sws_scale(scxt,pRGBFrame->data,pRGBFrame->linesize,0,pContext->height,pYUVFrame->data,pYUVFrame->linesize); static int got_packet_ptr = 0; av_init_packet(&avpkt); avpkt.data = pOutbuf; avpkt.size = outbuf_size;
u_size = avcodec_encode_video2(pContext, &avpkt, pYUVFrame, &got_packet_ptr); //这个函数不是每次运行都存入实际数据,因为有帧延长帧间隔,有时候只是修改下标记pYUVFrame->pts++;if (u_size == 0){if (avpkt.data != NULL)//这时候存入的才是实际数据nNum++;//if (nNum == imgPDataVec.size()+1)break;fwrite(avpkt.data, 1, avpkt.size, f);}}fclose(f); delete []pRGBFrame;delete []pYUVFrame;delete []rgb_buff;for (int i = 0; i < pOriDataVec.size(); i++){delete[]pOriDataVec.at(i);}free(pYuv_buff);free(pOutbuf);avcodec_close(pContext);av_free(pContext);}#ifdef __cplusplus}#endif
同时,也调研了下,雷神的音视频复合代码
int main(int argc, char* argv[]) { AVOutputFormat *ofmt = NULL; //Input AVFormatContext and Output AVFormatContext AVFormatContext *ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL,*ofmt_ctx = NULL; AVPacket pkt; int ret, i; int videoindex_v=-1,videoindex_out=-1; int audioindex_a=-1,audioindex_out=-1; int frame_index=0; int64_t cur_pts_v=0,cur_pts_a=0; //const char *in_filename_v = "cuc_ieschool.ts";//Input file URL const char *in_filename_v = "myData.h264";//"cuc_ieschool.h264"; //const char *in_filename_a = "cuc_ieschool.mp3"; //const char *in_filename_a = "gowest.m4a"; //const char *in_filename_a = "gowest.aac"; const char *in_filename_a = "2.mp3"; const char *out_filename = "4.mp4";//Output file URL av_register_all(); //Input if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) { printf( "Could not open input file."); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) { printf( "Failed to retrieve input stream information"); goto end; } if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) { printf( "Could not open input file."); goto end; } if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0 printf failed to retrieve input stream information goto end printf='==========Input' information='=========\n");' av_dump_formatifmt_ctx_v 0 in_filename_v 0 av_dump_formatifmt_ctx_a 0 in_filename_a 0 printf='=====================================\n");' output avformat_alloc_output_context2ofmt_ctx null null out_filename if ofmt_ctx printf could not create output context\n ret='AVERROR_UNKNOWN;' goto end ofmt='ofmt_ctx-'>oformat; int nVStreams = ifmt_ctx_v->nb_streams; for (i = 0; i < nvstreams i create output avstream according to input avstream ififmt_ctx_v->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){ AVStream *in_stream = ifmt_ctx_v->streams[i]; AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); videoindex_v=i; if (!out_stream) { printf( "Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } videoindex_out=out_stream->index; //Copy the settings of AVCodecContext if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0 printf failed to copy context from input to output stream codec context\n goto end out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; break; } } int nAStreams = ifmt_ctx_a->nb_streams; for (i = 0; istreams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){ AVStream *in_stream = ifmt_ctx_a->streams[i]; AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec); audioindex_a=i; if (!out_stream) { printf( "Failed allocating output stream\n"); ret = AVERROR_UNKNOWN; goto end; } audioindex_out=out_stream->index; //Copy the settings of AVCodecContext if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0 printf failed to copy context from input to output stream codec context\n goto end out_stream->codec->codec_tag = 0; if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER; break; } } printf("==========Output Information==========\n"); av_dump_format(ofmt_ctx, 0, out_filename, 1); printf("======================================\n"); //Open output file if (!(ofmt->flags & AVFMT_NOFILE)) { if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) { printf( "Could not open output file '%s'", out_filename); goto end; } } //Write file header if (avformat_write_header(ofmt_ctx, NULL) < 0 printf error occurred when opening output file\n goto end fix if use_h264bsf avbitstreamfiltercontext h264bsfc='av_bitstream_filter_init("h264_mp4toannexb");' endif if use_aacbsf avbitstreamfiltercontext aacbsfc='av_bitstream_filter_init("aac_adtstoasc");' endif while 1 avformatcontext ifmt_ctx int stream_index='0;' avstream in_stream out_stream get an avpacket ifav_compare_tscur_pts_vifmt_ctx_v->streams[videoindex_v]->time_base,cur_pts_a,ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0 ifmt_ctx='ifmt_ctx_v;' stream_index='videoindex_out;' ifav_read_frameifmt_ctx pkt>= 0){ do{ in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = ofmt_ctx->streams[stream_index]; if(pkt.stream_index==videoindex_v){ //FIX:No PTS (Example: Raw H.264) //Simple Write PTS if(pkt.pts==AV_NOPTS_VALUE){ //Write PTS AVRational time_base1=in_stream->time_base; //Duration between 2 frames (us) int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate); //Parameters pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE); pkt.dts=pkt.pts; pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE); frame_index++; } cur_pts_v=pkt.pts; break; } }while(av_read_frame(ifmt_ctx, &pkt) >= 0); }else{ break; } }else{ ifmt_ctx=ifmt_ctx_a; stream_index=audioindex_out; if(av_read_frame(ifmt_ctx, &pkt) >= 0){ do{ in_stream = ifmt_ctx->streams[pkt.stream_index]; out_stream = ofmt_ctx->streams[stream_index]; if(pkt.stream_index==audioindex_a){ //FIX:No PTS //Simple Write PTS if(pkt.pts==AV_NOPTS_VALUE){ //Write PTS AVRational time_base1=in_stream->time_base; //Duration between 2 frames (us) int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate); //Parameters pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE); pkt.dts=pkt.pts; pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE); frame_index++; } cur_pts_a=pkt.pts; break; } }while(av_read_frame(ifmt_ctx, &pkt) >= 0); }else{ break; } } //FIX:Bitstream Filter #if USE_H264BSF av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0); #endif #if USE_AACBSF av_bitstream_filter_filter(aacbsfc, out_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0); #endif //Convert PTS/DTS pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX)); pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base); pkt.pos = -1; pkt.stream_index=stream_index; printf("Write 1 Packet. size:%5d\tpts:%lld\n",pkt.size,pkt.pts); //Write if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0 printf error muxing packet\n break av_free_packetpkt write file trailer av_write_trailerofmt_ctx if use_h264bsf av_bitstream_filter_closeh264bsfc endif if use_aacbsf av_bitstream_filter_closeaacbsfc endif end: avformat_close_inputifmt_ctx_v avformat_close_inputifmt_ctx_a close output if ofmt_ctx ofmt->flags & AVFMT_NOFILE)) avio_close(ofmt_ctx->pb); avformat_free_context(ofmt_ctx); if (ret < 0 && ret != AVERROR_EOF) { printf( "Error occurred.\n"); return -1; } return 0; }
可以生成视频,但是官方的例子生成的是正确的,改成我自己的图片后,发现生成的色彩,以及帧数还有视频发生了扭曲,要使用的话,还要大改,而且这个库需要的DLL18M多,太大了,所以也排除这个方案了。
综上所述,最终采取了第一种策略,实现效果,可以下载我的这个软件: 任性动图看看效果