@@ -17,16 +17,6 @@ cmpc::CMpegDecoder::CMpegDecoder(void)
1717 videoPath.clear ();
1818 _str_codec.clear ();
1919
20- for (auto i = std::begin (video_dst_data); i < std::end (video_dst_data); i++) {
21- *i = nullptr ;
22- }
23-
24- for (auto i = std::begin (video_dst_linesize); i < std::end (video_dst_linesize); i++) {
25- *i = 0 ;
26- }
27-
28- video_dst_bufsize = 0 ;
29-
3020 /* Enable or disable frame reference counting. You are not supposed to support
3121 * both paths in your application but pick the one most appropriate to your
3222 * needs. Look for the use of refcount in this example to see what are the
@@ -59,34 +49,18 @@ void cmpc::CMpegDecoder::clear(void) {
5949 sws_freeContext (PswsCtx);
6050 PswsCtx = nullptr ;
6151 }
62- // PswsCtx = nullptr;
63- // cout << "SWS Freed!" << endl;
6452 if (RGBbuffer) {
6553 av_free (RGBbuffer);
6654 RGBbuffer = nullptr ;
6755 }
68- // cout << "Buffer Freed!" << endl;
6956 if (PCodecCtx) {
7057 avcodec_free_context (&PCodecCtx);
7158 PCodecCtx = nullptr ;
7259 }
73- // cout << "Condec Contex Freed!" << endl;
7460 if (PFormatCtx) {
7561 avformat_close_input (&PFormatCtx);
7662 PFormatCtx = nullptr ;
7763 }
78- // cout << "Format Contex Freed!" << endl;
79- if (video_dst_data[0 ])
80- av_freep (&video_dst_data[0 ]);
81- // cout << "DST Buffer Freed!" << endl;
82- for (auto i = std::begin (video_dst_data); i < std::end (video_dst_data); i++) {
83- *i = nullptr ;
84- }
85- for (auto i = std::begin (video_dst_linesize); i < std::end (video_dst_linesize); i++) {
86- *i = 0 ;
87- }
88-
89- video_dst_bufsize = 0 ;
9064 refcount = 1 ;
9165}
9266
@@ -120,14 +94,6 @@ cmpc::CMpegDecoder::CMpegDecoder(CMpegDecoder &&ref) noexcept
12094 ref.PCodecCtx = nullptr ;
12195 ref.PVideoStream = nullptr ;
12296 ref.PswsCtx = nullptr ;
123- for (auto i = std::begin (video_dst_data), j = std::begin (ref.video_dst_data ); \
124- i < std::end (video_dst_data), j = std::end (ref.video_dst_data ); i++, j++) {
125- *i = *j;
126- }
127- for (auto i = std::begin (video_dst_linesize), j = std::begin (ref.video_dst_linesize ); \
128- i < std::end (video_dst_linesize), j = std::end (ref.video_dst_linesize ); i++, j++) {
129- *i = *j;
130- }
13197}
13298
13399cmpc::CMpegDecoder& cmpc::CMpegDecoder::operator =(CMpegDecoder &&ref) noexcept {
@@ -155,14 +121,6 @@ cmpc::CMpegDecoder& cmpc::CMpegDecoder::operator=(CMpegDecoder &&ref) noexcept {
155121 ref.PVideoStream = nullptr ;
156122 ref.RGBbuffer = nullptr ;
157123 ref.PswsCtx = nullptr ;
158- for (auto i = std::begin (video_dst_data), j = std::begin (ref.video_dst_data ); \
159- i < std::end (video_dst_data), j = std::end (ref.video_dst_data ); i++, j++) {
160- *i = *j;
161- }
162- for (auto i = std::begin (video_dst_linesize), j = std::begin (ref.video_dst_linesize ); \
163- i < std::end (video_dst_linesize), j = std::end (ref.video_dst_linesize ); i++, j++) {
164- *i = *j;
165- }
166124 refcount = ref.refcount ;
167125 }
168126 return *this ;
@@ -268,14 +226,6 @@ bool cmpc::CMpegDecoder::FFmpegSetup() { //打开指定路径的视频文件,
268226 PPixelFormat = PCodecCtx->pix_fmt ;
269227 _duration = static_cast <double >(PVideoStream->duration ) / static_cast <double >(time_base.den ) * static_cast <double >(time_base.num );
270228 _predictFrameNum = av_rescale (static_cast <int64_t >(_duration*0xFFFF ), frame_base.num , frame_base.den )/0xFFFF ;
271- ret = av_image_alloc (video_dst_data, video_dst_linesize,
272- width, height, PPixelFormat, 1 ); // 使用源格式的,一律保持原状
273- if (ret < 0 ) {
274- cerr << " Could not allocate raw video buffer" << endl;
275- clear ();
276- return false ;
277- }
278- video_dst_bufsize = ret;
279229 }
280230
281231 /* dump input information to stderr */
@@ -292,13 +242,11 @@ bool cmpc::CMpegDecoder::FFmpegSetup() { //打开指定路径的视频文件,
292242 if (widthDst > 0 && heightDst > 0 ) {
293243 PswsCtx = sws_getContext (width, height, PPixelFormat, widthDst, heightDst, AV_PIX_FMT_RGB24, SCALE_FLAGS, nullptr , nullptr , nullptr );
294244 auto numBytes = av_image_get_buffer_size (AV_PIX_FMT_RGB24, widthDst, heightDst, 1 );
295- video_dst_bufsize = numBytes;
296245 RGBbuffer = (uint8_t *)av_malloc (numBytes * sizeof (uint8_t ));
297246 }
298247 else {
299248 PswsCtx = sws_getContext (width, height, PPixelFormat, width, height, AV_PIX_FMT_RGB24, SCALE_FLAGS, nullptr , nullptr , nullptr );
300249 auto numBytes = av_image_get_buffer_size (AV_PIX_FMT_RGB24, width, height, 1 );
301- video_dst_bufsize = numBytes;
302250 RGBbuffer = (uint8_t *)av_malloc (numBytes * sizeof (uint8_t ));
303251 }
304252 return true ;
@@ -427,22 +375,15 @@ int cmpc::CMpegDecoder::_SaveFrame(PyObject *PyFrameList, AVFrame *&frame, AVFra
427375
428376 /* copy decoded frame to destination buffer:
429377 * this is required since rawvideo expects non aligned data */
430- /* av_image_copy(video_dst_data, video_dst_linesize,
431- (const uint8_t **)frame->data, frame->linesize,
432- PPixelFormat, width, height);*/
433378
434379 sws_scale (PswsCtx, frame->data , frame->linesize , 0 , height, frameRGB->data , frameRGB->linesize );
435-
436- // cout << "Complete Conv ";
437380
438381 /* write to rawvideo file */
439382 if (widthDst>0 && heightDst>0 )
440383 OneFrame = _SaveFrame_castToPyFrameArray (frameRGB->data , widthDst, heightDst);
441384 else
442385 OneFrame = _SaveFrame_castToPyFrameArray (frameRGB->data , width, height);
443386 PyList_Append (PyFrameList, OneFrame);
444- // cout << "[" << width << "-" << height << ", " << width*height << ", " << video_dst_bufsize << "]" << endl;
445- // cout << "PTS = " << frameRGB->pts << ", coded Fnum = " << frameRGB->coded_picture_number << endl;
446387 processed = true ;
447388 }
448389 }
@@ -523,8 +464,6 @@ int cmpc::CMpegDecoder::_SaveFrameForGOP(PyObject *PyFrameList, AVFrame *&frame,
523464
524465 sws_scale (PswsCtx, frame->data , frame->linesize , 0 , height, frameRGB->data , frameRGB->linesize );
525466
526- // cout << "Complete Conv ";
527-
528467 /* write to rawvideo file */
529468 if (widthDst>0 && heightDst>0 )
530469 OneFrame = _SaveFrame_castToPyFrameArray (frameRGB->data , widthDst, heightDst);
@@ -768,7 +707,6 @@ bool cmpc::CMpegDecoder::ExtractFrame(PyObject* PyFrameList, int64_t framePos, i
768707 else {
769708 framePos_TimeBase = __FrameToPts (framePos);
770709 }
771- // cout << framePos_TimeBase << endl;
772710 if (av_seek_frame (PFormatCtx, PVideoStreamIDX, framePos_TimeBase, AVSEEK_FLAG_BACKWARD) < 0 ) {
773711 cerr << " AV seek frame fail!" << endl;
774712 av_seek_frame (PFormatCtx, -1 , 0 , AVSEEK_FLAG_BACKWARD);
@@ -783,7 +721,6 @@ bool cmpc::CMpegDecoder::ExtractFrame(PyObject* PyFrameList, int64_t framePos, i
783721 }
784722
785723 while (av_read_frame (PFormatCtx, &pkt) >= 0 ) {
786- // cout << "[Test - " << pkt.size << " ]" << endl;
787724 AVPacket orig_pkt = pkt;
788725 frameProcessed = false ;
789726 do {
@@ -807,23 +744,13 @@ bool cmpc::CMpegDecoder::ExtractFrame(PyObject* PyFrameList, int64_t framePos, i
807744 _SaveFrame (PyFrameList, frame, frameRGB, pkt, got_frame, framePos_TimeBase, frameProcessed, 1 );
808745 } while (got_frame);
809746
810- // cout << "Demuxing succeeded." << endl;
811-
812747 if (PVideoStream && count>0 && (__dumpControl > 0 )) {
813748 cout << " Succeed in convert frames into Python_List" << endl;
814749 }
815750
816- // av_free(RGBbuffer);
817- // RGBbuffer = nullptr;
818- // cout << "Free Buffer" << endl;
819- // sws_freeContext(PswsCtx);
820- // cout << "Free ctx" << endl;
821- // PswsCtx = nullptr;
822751 av_frame_free (&frameRGB);
823752 av_frame_free (&frame);
824753
825- // cout << "End Process" << endl;
826-
827754 return true ;
828755}
829756
@@ -925,7 +852,7 @@ cmpc::CMpegEncoder::CMpegEncoder(CMpegEncoder &&ref) noexcept:
925852 codecName.assign (ref.codecName );
926853}
927854
928- cmpc::CMpegEncoder& cmpc::CMpegEncoder::operator =(CMpegEncoder &&ref) noexcept {
855+ cmpc::CMpegEncoder& cmpc::CMpegEncoder::operator =(CMpegEncoder &&ref) noexcept {
929856 videoPath.assign (ref.videoPath );
930857 codecName.assign (ref.codecName );
931858 bitRate = ref.bitRate ;
@@ -981,8 +908,6 @@ void cmpc::CMpegEncoder::__log_packet(){
981908 AVRational *time_base = &PFormatCtx->streams [Ppacket->stream_index ]->time_base ;
982909 cout << " pts:" << av_ts2str (Ppacket->pts ) << " pts_time:" << av_ts2timestr (Ppacket->pts , time_base)
983910 << " dts:" << av_ts2str (Ppacket->dts ) << " dts_time:" << av_ts2timestr (Ppacket->dts , time_base) << endl;
984- // << " duration:" << av_ts2str(Ppacket->duration) << " duration_time:"
985- // << av_ts2timestr(Ppacket->duration, time_base) << " stream_index:"<< Ppacket->stream_index << endl;
986911}
987912
988913int cmpc::CMpegEncoder::__write_frame (){
@@ -1012,8 +937,6 @@ bool cmpc::CMpegEncoder::__add_stream(AVCodec **codec){
1012937 codec_id = (*codec)->id ;
1013938 PFormatCtx->oformat ->video_codec = codec_id;
1014939 }
1015- // auto codec_id = PFormatCtx->oformat->video_codec;
1016- // *codec = avcodec_find_encoder(codec_id);
1017940 if (!(*codec)) {
1018941 cerr << " Could not find encoder for '" << avcodec_get_name (codec_id) << " '" << endl;
1019942 return false ;
@@ -1044,11 +967,13 @@ bool cmpc::CMpegEncoder::__add_stream(AVCodec **codec){
1044967 * of which frame timestamps are represented. For fixed-fps content,
1045968 * timebase should be 1/framerate and timestamp increments should be
1046969 * identical to 1. */
1047- PStreamContex.st ->time_base = timeBase;
970+ PStreamContex.st ->time_base .den = 0 ;
971+ PStreamContex.st ->time_base .num = 0 ;
1048972 // av_stream_set_r_frame_rate(PStreamContex.st, frameRate);
1049973 // cout << "(" << frameRate.num << ", " << frameRate.den << ")" << endl;
1050974 // PStreamContex.st->r_frame_rate
1051- c->time_base = PStreamContex.st ->time_base ;
975+ c->time_base = timeBase;
976+
1052977 // PStreamContex.st->frame
1053978 c->framerate = frameRate;
1054979
@@ -1204,11 +1129,8 @@ AVFrame *cmpc::CMpegEncoder::__get_video_frame(PyArrayObject* PyFrame) {
12041129 }
12051130 }
12061131
1207- // PStreamContex.next_frame++;
1208- // PStreamContex.frame->pts = __FrameToPts(PStreamContex.next_frame);
12091132 PStreamContex.frame ->pts = PStreamContex.next_frame ;
12101133 PStreamContex.next_frame ++;
1211- // PStreamContex.frame->pts = PStreamContex.next_pts++;
12121134 return PStreamContex.frame ;
12131135}
12141136
@@ -1439,14 +1361,6 @@ void cmpc::CMpegEncoder::setParameter(string keyword, void *ptr) {
14391361 int *ref = reinterpret_cast <int *>(ptr);
14401362 MaxBFrame = *ref;
14411363 }
1442- /* else if (keyword.compare("timeBase") == 0) {
1443- PyObject *ref = reinterpret_cast<PyObject *>(ptr);
1444- auto refObj = PyTuple_GetItem(ref, 0);
1445- int num = static_cast<int>(PyLong_AsLong(refObj));
1446- refObj = PyTuple_GetItem(ref, 1);
1447- int den = static_cast<int>(PyLong_AsLong(refObj));
1448- timeBase = _setAVRational(num, den);
1449- }*/
14501364 else if (keyword.compare (" frameRate" ) == 0 ) {
14511365 PyObject *ref = reinterpret_cast <PyObject *>(ptr);
14521366 auto refObj = PyTuple_GetItem (ref, 0 );
@@ -1476,7 +1390,7 @@ bool cmpc::CMpegEncoder::FFmpegSetup() {
14761390 /* allocate the output media context */
14771391 // auto getFormat = av_guess_format(codecName.c_str(), nullptr, nullptr);
14781392 avformat_alloc_output_context2 (&PFormatCtx, nullptr , nullptr , videoPath.c_str ());
1479- PFormatCtx->avoid_negative_ts = AVFMT_AVOID_NEG_TS_MAKE_ZERO ;
1393+ PFormatCtx->avoid_negative_ts = AVFMT_AVOID_NEG_TS_AUTO ;
14801394 if (!PFormatCtx) {
14811395 cout << " Could not select the encoder automatically: using MPEG." << endl;
14821396 // cout << "Could not deduce output format from file extension: using MPEG." << endl;
0 commit comments