34 #include "../include/FFmpegReader.h" 36 #define ENABLE_VAAPI 0 39 #pragma message "You are compiling with experimental hardware decode" 41 #pragma message "You are compiling only with software decode" 45 #define MAX_SUPPORTED_WIDTH 1950 46 #define MAX_SUPPORTED_HEIGHT 1100 49 #include "libavutil/hwcontext_vaapi.h" 51 typedef struct VAAPIDecodeContext {
53 VAEntrypoint va_entrypoint;
55 VAContextID va_context;
57 #if FF_API_STRUCT_VAAPI_CONTEXT 60 struct vaapi_context *old_context;
61 AVBufferRef *device_ref;
65 AVHWDeviceContext *device;
66 AVVAAPIDeviceContext *hwctx;
68 AVHWFramesContext *frames;
69 AVVAAPIFramesContext *hwfc;
71 enum AVPixelFormat surface_format;
86 FFmpegReader::FFmpegReader(std::string
path)
87 : last_frame(0), is_seeking(0), seeking_pts(0), seeking_frame(0), seek_count(0),
88 audio_pts_offset(99999), video_pts_offset(99999), path(path), is_video_seek(true), check_interlace(false),
89 check_fps(false), enable_seek(true), is_open(false), seek_audio_frame_found(0), seek_video_frame_found(0),
90 prev_samples(0), prev_pts(0), pts_total(0), pts_counter(0), is_duration_known(false), largest_frame_processed(0),
91 current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0),
109 : last_frame(0), is_seeking(0), seeking_pts(0), seeking_frame(0), seek_count(0),
110 audio_pts_offset(99999), video_pts_offset(99999), path(path), is_video_seek(true), check_interlace(false),
111 check_fps(false),
enable_seek(true), is_open(false), seek_audio_frame_found(0), seek_video_frame_found(0),
112 prev_samples(0), prev_pts(0), pts_total(0), pts_counter(0), is_duration_known(false), largest_frame_processed(0),
113 current_video_frame(0), has_missing_frames(false), num_packets_since_video_frame(0), num_checks_since_final(0),
126 if (inspect_reader) {
141 if (abs(location.
frame - frame) >= 2)
147 int64_t diff = samples_per_frame * (location.
frame - frame) + location.
sample_start - sample_start;
148 if (abs(diff) <= amount)
159 static enum AVPixelFormat get_hw_dec_format(AVCodecContext *ctx,
const enum AVPixelFormat *pix_fmts)
161 const enum AVPixelFormat *p;
163 for (p = pix_fmts; *p != AV_PIX_FMT_NONE; p++) {
165 #if defined(__linux__) 167 case AV_PIX_FMT_VAAPI:
172 case AV_PIX_FMT_VDPAU:
180 case AV_PIX_FMT_DXVA2_VLD:
185 case AV_PIX_FMT_D3D11:
191 #if defined(__APPLE__) 193 case AV_PIX_FMT_VIDEOTOOLBOX:
200 case AV_PIX_FMT_CUDA:
216 return AV_PIX_FMT_NONE;
219 int FFmpegReader::IsHardwareDecodeSupported(
int codecid)
223 case AV_CODEC_ID_H264:
224 case AV_CODEC_ID_MPEG2VIDEO:
225 case AV_CODEC_ID_VC1:
226 case AV_CODEC_ID_WMV1:
227 case AV_CODEC_ID_WMV2:
228 case AV_CODEC_ID_WMV3:
249 if (avformat_open_input(&pFormatCtx,
path.c_str(), NULL, NULL) != 0)
253 if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
259 for (
unsigned int i = 0; i < pFormatCtx->nb_streams; i++) {
261 if (
AV_GET_CODEC_TYPE(pFormatCtx->streams[i]) == AVMEDIA_TYPE_VIDEO && videoStream < 0) {
265 if (
AV_GET_CODEC_TYPE(pFormatCtx->streams[i]) == AVMEDIA_TYPE_AUDIO && audioStream < 0) {
269 if (videoStream == -1 && audioStream == -1)
273 if (videoStream != -1) {
278 pStream = pFormatCtx->streams[videoStream];
284 AVCodec *pCodec = avcodec_find_decoder(codecId);
285 AVDictionary *
opts = NULL;
286 int retry_decode_open = 2;
291 if (
hw_de_on && (retry_decode_open==2)) {
293 hw_de_supported = IsHardwareDecodeSupported(pCodecCtx->codec_id);
296 retry_decode_open = 0;
301 if (pCodec == NULL) {
302 throw InvalidCodec(
"A valid video codec could not be found for this file.",
path);
306 av_dict_set(&opts,
"strict",
"experimental", 0);
310 int i_decoder_hw = 0;
312 char *adapter_ptr = NULL;
315 fprintf(stderr,
"Hardware decoding device number: %d\n", adapter_num);
318 pCodecCtx->get_format = get_hw_dec_format;
320 if (adapter_num < 3 && adapter_num >=0) {
321 #if defined(__linux__) 322 snprintf(adapter,
sizeof(adapter),
"/dev/dri/renderD%d", adapter_num+128);
323 adapter_ptr = adapter;
325 switch (i_decoder_hw) {
327 hw_de_av_device_type = AV_HWDEVICE_TYPE_VAAPI;
330 hw_de_av_device_type = AV_HWDEVICE_TYPE_CUDA;
333 hw_de_av_device_type = AV_HWDEVICE_TYPE_VDPAU;
336 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
339 hw_de_av_device_type = AV_HWDEVICE_TYPE_VAAPI;
343 #elif defined(_WIN32) 346 switch (i_decoder_hw) {
348 hw_de_av_device_type = AV_HWDEVICE_TYPE_CUDA;
351 hw_de_av_device_type = AV_HWDEVICE_TYPE_DXVA2;
354 hw_de_av_device_type = AV_HWDEVICE_TYPE_D3D11VA;
357 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
360 hw_de_av_device_type = AV_HWDEVICE_TYPE_DXVA2;
363 #elif defined(__APPLE__) 366 switch (i_decoder_hw) {
368 hw_de_av_device_type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
371 hw_de_av_device_type = AV_HWDEVICE_TYPE_QSV;
374 hw_de_av_device_type = AV_HWDEVICE_TYPE_VIDEOTOOLBOX;
384 #if defined(__linux__) 385 if( adapter_ptr != NULL && access( adapter_ptr, W_OK ) == 0 ) {
386 #elif defined(_WIN32) 387 if( adapter_ptr != NULL ) {
388 #elif defined(__APPLE__) 389 if( adapter_ptr != NULL ) {
398 hw_device_ctx = NULL;
400 if (av_hwdevice_ctx_create(&hw_device_ctx, hw_de_av_device_type, adapter_ptr, NULL, 0) >= 0) {
401 if (!(pCodecCtx->hw_device_ctx = av_buffer_ref(hw_device_ctx))) {
439 if (avcodec_open2(pCodecCtx, pCodec, &opts) < 0)
440 throw InvalidCodec(
"A video codec was found, but could not be opened.",
path);
444 AVHWFramesConstraints *constraints = NULL;
445 void *hwconfig = NULL;
446 hwconfig = av_hwdevice_hwconfig_alloc(hw_device_ctx);
450 ((AVVAAPIHWConfig *)hwconfig)->config_id = ((VAAPIDecodeContext *)(pCodecCtx->priv_data))->va_config;
451 constraints = av_hwdevice_get_hwframe_constraints(hw_device_ctx,hwconfig);
454 if (pCodecCtx->coded_width < constraints->min_width ||
455 pCodecCtx->coded_height < constraints->min_height ||
456 pCodecCtx->coded_width > constraints->max_width ||
457 pCodecCtx->coded_height > constraints->max_height) {
460 retry_decode_open = 1;
463 av_buffer_unref(&hw_device_ctx);
464 hw_device_ctx = NULL;
469 ZmqLogger::Instance()->
AppendDebugMethod(
"\nDecode hardware acceleration is used\n",
"Min width :", constraints->min_width,
"Min Height :", constraints->min_height,
"MaxWidth :", constraints->max_width,
"MaxHeight :", constraints->max_height,
"Frame width :", pCodecCtx->coded_width,
"Frame height :", pCodecCtx->coded_height);
470 retry_decode_open = 0;
472 av_hwframe_constraints_free(&constraints);
485 if (pCodecCtx->coded_width < 0 ||
486 pCodecCtx->coded_height < 0 ||
487 pCodecCtx->coded_width > max_w ||
488 pCodecCtx->coded_height > max_h ) {
489 ZmqLogger::Instance()->
AppendDebugMethod(
"DIMENSIONS ARE TOO LARGE for hardware acceleration\n",
"Max Width :", max_w,
"Max Height :", max_h,
"Frame width :", pCodecCtx->coded_width,
"Frame height :", pCodecCtx->coded_height);
491 retry_decode_open = 1;
494 av_buffer_unref(&hw_device_ctx);
495 hw_device_ctx = NULL;
499 ZmqLogger::Instance()->
AppendDebugMethod(
"\nDecode hardware acceleration is used\n",
"Max Width :", max_w,
"Max Height :", max_h,
"Frame width :", pCodecCtx->coded_width,
"Frame height :", pCodecCtx->coded_height);
500 retry_decode_open = 0;
508 retry_decode_open = 0;
510 }
while (retry_decode_open);
519 if (audioStream != -1) {
524 aStream = pFormatCtx->streams[audioStream];
530 AVCodec *aCodec = avcodec_find_decoder(codecId);
536 if (aCodec == NULL) {
537 throw InvalidCodec(
"A valid audio codec could not be found for this file.",
path);
541 AVDictionary *opts = NULL;
542 av_dict_set(&opts,
"strict",
"experimental", 0);
545 if (avcodec_open2(aCodecCtx, aCodec, &opts) < 0)
546 throw InvalidCodec(
"An audio codec was found, but could not be opened.",
path);
556 AVDictionaryEntry *tag = NULL;
557 while ((tag = av_dict_get(pFormatCtx->metadata,
"", tag, AV_DICT_IGNORE_SUFFIX))) {
558 QString str_key = tag->key;
559 QString str_value = tag->value;
560 info.
metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
564 previous_packet_location.
frame = -1;
587 RemoveAVPacket(packet);
593 avcodec_flush_buffers(pCodecCtx);
598 av_buffer_unref(&hw_device_ctx);
599 hw_device_ctx = NULL;
605 avcodec_flush_buffers(aCodecCtx);
611 working_cache.
Clear();
612 missing_frames.
Clear();
617 processed_video_frames.clear();
618 processed_audio_frames.clear();
619 processing_video_frames.clear();
620 processing_audio_frames.clear();
621 missing_audio_frames.clear();
622 missing_video_frames.clear();
623 missing_audio_frames_source.clear();
624 missing_video_frames_source.clear();
625 checked_frames.clear();
629 avformat_close_input(&pFormatCtx);
630 av_freep(&pFormatCtx);
634 largest_frame_processed = 0;
635 seek_audio_frame_found = 0;
636 seek_video_frame_found = 0;
637 current_video_frame = 0;
638 has_missing_frames =
false;
640 last_video_frame.reset();
644 void FFmpegReader::UpdateAudioInfo() {
647 info.
file_size = pFormatCtx->pb ? avio_size(pFormatCtx->pb) : -1;
661 if (aStream->duration > 0.0f && aStream->duration >
info.
duration)
688 AVDictionaryEntry *tag = NULL;
689 while ((tag = av_dict_get(aStream->metadata,
"", tag, AV_DICT_IGNORE_SUFFIX))) {
690 QString str_key = tag->key;
691 QString str_value = tag->value;
692 info.
metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
696 void FFmpegReader::UpdateVideoInfo() {
703 info.
file_size = pFormatCtx->pb ? avio_size(pFormatCtx->pb) : -1;
713 if (pStream->sample_aspect_ratio.num != 0) {
736 if (!check_interlace) {
737 check_interlace =
true;
739 switch(field_order) {
740 case AV_FIELD_PROGRESSIVE:
753 case AV_FIELD_UNKNOWN:
755 check_interlace =
false;
784 is_duration_known =
false;
787 is_duration_known =
true;
801 AVDictionaryEntry *tag = NULL;
802 while ((tag = av_dict_get(pStream->metadata,
"", tag, AV_DICT_IGNORE_SUFFIX))) {
803 QString str_key = tag->key;
804 QString str_value = tag->value;
805 info.
metadata[str_key.toStdString()] = str_value.trimmed().toStdString();
813 throw ReaderClosed(
"The FFmpegReader is closed. Call Open() before calling this method.",
path);
816 if (requested_frame < 1)
822 throw InvalidFile(
"Could not detect the duration of the video or audio stream.",
path);
836 #pragma omp critical (ReadStream) 851 if (last_frame == 0 && requested_frame != 1)
856 int64_t diff = requested_frame - last_frame;
857 if (diff >= 1 && diff <= 20) {
859 frame = ReadStream(requested_frame);
864 Seek(requested_frame);
873 frame = ReadStream(requested_frame);
882 std::shared_ptr<Frame> FFmpegReader::ReadStream(int64_t requested_frame) {
884 bool end_of_stream =
false;
885 bool check_seek =
false;
886 bool frame_finished =
false;
887 int packet_error = -1;
890 int packets_processed = 0;
892 int max_packets = 4096;
897 omp_set_nested(
true);
909 packet_error = GetNextPacket();
911 int processing_video_frames_size = 0;
912 int processing_audio_frames_size = 0;
915 processing_video_frames_size = processing_video_frames.size();
916 processing_audio_frames_size = processing_audio_frames.size();
920 while (processing_video_frames_size + processing_audio_frames_size >= minimum_packets) {
923 processing_video_frames_size = processing_video_frames.size();
924 processing_audio_frames_size = processing_audio_frames.size();
928 if (packet_error < 0) {
930 end_of_stream =
true;
935 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ReadStream (GetNextPacket)",
"requested_frame", requested_frame,
"processing_video_frames_size", processing_video_frames_size,
"processing_audio_frames_size", processing_audio_frames_size,
"minimum_packets", minimum_packets,
"packets_processed", packets_processed,
"is_seeking", is_seeking);
940 num_packets_since_video_frame = 0;
944 #pragma omp critical (openshot_seek) 945 check_seek = CheckSeek(
true);
960 frame_finished = GetAVFrame();
963 if (frame_finished) {
965 UpdatePTSOffset(
true);
968 ProcessVideoPacket(requested_frame);
979 else if (
info.
has_audio && packet->stream_index == audioStream) {
981 num_packets_since_video_frame++;
985 #pragma omp critical (openshot_seek) 986 check_seek = CheckSeek(
false);
1001 UpdatePTSOffset(
false);
1013 CheckWorkingFrames(
false, requested_frame);
1020 packets_processed++;
1023 if ((is_cache_found && packets_processed >= minimum_packets) || packets_processed > max_packets)
1033 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ReadStream (Completed)",
"packets_processed", packets_processed,
"end_of_stream", end_of_stream,
"largest_frame_processed", largest_frame_processed,
"Working Cache Count", working_cache.
Count());
1038 CheckWorkingFrames(end_of_stream, requested_frame);
1054 std::shared_ptr<Frame> f = CreateFrame(largest_frame_processed);
1063 int FFmpegReader::GetNextPacket() {
1064 int found_packet = 0;
1065 AVPacket *next_packet;
1066 #pragma omp critical(getnextpacket) 1068 next_packet =
new AVPacket();
1069 found_packet = av_read_frame(pFormatCtx, next_packet);
1074 RemoveAVPacket(packet);
1078 if (found_packet >= 0) {
1080 packet = next_packet;
1086 return found_packet;
1090 bool FFmpegReader::GetAVFrame() {
1091 int frameFinished = -1;
1096 #pragma omp critical (packet_cache) 1101 ret = avcodec_send_packet(pCodecCtx, packet);
1107 if (ret < 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
1111 AVFrame *next_frame2;
1117 next_frame2 = next_frame;
1121 ret = avcodec_receive_frame(pCodecCtx, next_frame2);
1122 if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
1130 if (next_frame2->format == hw_de_av_pix_fmt) {
1131 next_frame->format = AV_PIX_FMT_YUV420P;
1132 if ((err = av_hwframe_transfer_data(next_frame,next_frame2,0)) < 0) {
1135 if ((err = av_frame_copy_props(next_frame,next_frame2)) < 0) {
1142 next_frame = next_frame2;
1147 if (frameFinished == 0 ) {
1149 av_image_alloc(pFrame->data, pFrame->linesize,
info.
width,
info.
height, (AVPixelFormat)(pStream->codecpar->format), 1);
1150 av_image_copy(pFrame->data, pFrame->linesize, (
const uint8_t**)next_frame->data, next_frame->linesize,
1159 avcodec_decode_video2(pCodecCtx, next_frame, &frameFinished, packet);
1165 if (frameFinished) {
1169 av_picture_copy((AVPicture *) pFrame, (AVPicture *) next_frame, pCodecCtx->pix_fmt,
info.
width,
1179 return frameFinished;
1183 bool FFmpegReader::CheckSeek(
bool is_video) {
1188 if ((is_video_seek && !seek_video_frame_found) || (!is_video_seek && !seek_audio_frame_found))
1196 int64_t max_seeked_frame = seek_audio_frame_found;
1197 if (seek_video_frame_found > max_seeked_frame)
1198 max_seeked_frame = seek_video_frame_found;
1201 if (max_seeked_frame >= seeking_frame) {
1203 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckSeek (Too far, seek again)",
"is_video_seek", is_video_seek,
"max_seeked_frame", max_seeked_frame,
"seeking_frame", seeking_frame,
"seeking_pts", seeking_pts,
"seek_video_frame_found", seek_video_frame_found,
"seek_audio_frame_found", seek_audio_frame_found);
1206 Seek(seeking_frame - (10 * seek_count * seek_count));
1209 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckSeek (Successful)",
"is_video_seek", is_video_seek,
"current_pts", packet->pts,
"seeking_pts", seeking_pts,
"seeking_frame", seeking_frame,
"seek_video_frame_found", seek_video_frame_found,
"seek_audio_frame_found", seek_audio_frame_found);
1223 void FFmpegReader::ProcessVideoPacket(int64_t requested_frame) {
1225 int64_t current_frame = ConvertVideoPTStoFrame(GetVideoPTS());
1228 if (!seek_video_frame_found && is_seeking)
1229 seek_video_frame_found = current_frame;
1232 if ((current_frame < (requested_frame - 20)) or (current_frame == -1)) {
1234 RemoveAVFrame(pFrame);
1251 AVFrame *my_frame = pFrame;
1256 processing_video_frames[current_frame] = current_frame;
1258 #pragma omp task firstprivate(current_frame, my_frame, height, width, video_length, pix_fmt) 1261 AVFrame *pFrameRGB = NULL;
1263 uint8_t *buffer = NULL;
1267 if (pFrameRGB == NULL)
1268 throw OutOfBoundsFrame(
"Convert Image Broke!", current_frame, video_length);
1279 if (max_height <= 0)
1288 max_width = std::max(
float(max_width), max_width * max_scale_x);
1289 max_height = std::max(
float(max_height), max_height * max_scale_y);
1295 QSize width_size(max_width * max_scale_x,
1298 max_height * max_scale_y);
1300 if (width_size.width() >= max_width && width_size.height() >= max_height) {
1301 max_width = std::max(max_width, width_size.width());
1302 max_height = std::max(max_height, width_size.height());
1304 max_width = std::max(max_width, height_size.width());
1305 max_height = std::max(max_height, height_size.height());
1316 int original_height = height;
1317 if (max_width != 0 && max_height != 0 && max_width < width && max_height < height) {
1319 float ratio = float(width) / float(height);
1320 int possible_width = round(max_height * ratio);
1321 int possible_height = round(max_width / ratio);
1323 if (possible_width <= max_width) {
1325 width = possible_width;
1326 height = max_height;
1330 height = possible_height;
1337 #pragma omp critical (video_buffer) 1338 buffer = (uint8_t *) av_malloc(numBytes *
sizeof(uint8_t));
1343 int scale_mode = SWS_FAST_BILINEAR;
1345 scale_mode = SWS_BICUBIC;
1351 sws_scale(img_convert_ctx, my_frame->data, my_frame->linesize, 0,
1352 original_height, pFrameRGB->data, pFrameRGB->linesize);
1355 std::shared_ptr<Frame> f = CreateFrame(current_frame);
1358 f->AddImage(width, height, 4, QImage::Format_RGBA8888, buffer);
1361 working_cache.
Add(f);
1364 #pragma omp critical (video_buffer) 1365 last_video_frame = f;
1372 RemoveAVFrame(my_frame);
1373 sws_freeContext(img_convert_ctx);
1378 processing_video_frames.erase(current_frame);
1379 processed_video_frames[current_frame] = current_frame;
1383 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessVideoPacket (After)",
"requested_frame", requested_frame,
"current_frame", current_frame,
"f->number", f->number);
1390 void FFmpegReader::ProcessAudioPacket(int64_t requested_frame, int64_t target_frame,
int starting_sample) {
1392 if (!seek_audio_frame_found && is_seeking)
1393 seek_audio_frame_found = target_frame;
1396 if (target_frame < (requested_frame - 20)) {
1398 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (Skipped)",
"requested_frame", requested_frame,
"target_frame", target_frame,
"starting_sample", starting_sample);
1405 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (Before)",
"requested_frame", requested_frame,
"target_frame", target_frame,
"starting_sample", starting_sample);
1408 int frame_finished = 0;
1412 int packet_samples = 0;
1417 #pragma omp critical (ProcessAudioPacket) 1422 while((packet->size > 0 || (!packet->data && frame_finished)) && ret >= 0) {
1424 ret = avcodec_send_packet(aCodecCtx, packet);
1425 if (ret < 0 && ret != AVERROR(EINVAL) && ret != AVERROR_EOF) {
1426 avcodec_send_packet(aCodecCtx, NULL);
1431 ret = avcodec_receive_frame(aCodecCtx, audio_frame);
1434 if(ret == AVERROR(EINVAL) || ret == AVERROR_EOF) {
1435 avcodec_flush_buffers(aCodecCtx);
1439 ret = frame_finished;
1442 if (!packet->data && !frame_finished)
1447 int used = avcodec_decode_audio4(aCodecCtx, audio_frame, &frame_finished, packet);
1451 if (frame_finished) {
1455 int plane_size = -1;
1456 data_size = av_samples_get_buffer_size(&plane_size,
1458 audio_frame->nb_samples,
1466 int pts_remaining_samples = packet_samples /
info.
channels;
1469 int64_t adjusted_pts = packet->pts + audio_pts_offset;
1474 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (Decode Info A)",
"pts_counter", pts_counter,
"PTS", adjusted_pts,
"Offset", audio_pts_offset,
"PTS Diff", adjusted_pts - prev_pts,
"Samples", pts_remaining_samples,
"Sample PTS ratio",
float(adjusted_pts - prev_pts) / pts_remaining_samples);
1475 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (Decode Info B)",
"Sample Diff", pts_remaining_samples - prev_samples - prev_pts,
"Total", pts_total,
"PTS Seconds", audio_seconds,
"Sample Seconds", sample_seconds,
"Seconds Diff", audio_seconds - sample_seconds,
"raw samples", packet_samples);
1478 prev_pts = adjusted_pts;
1479 pts_total += pts_remaining_samples;
1481 prev_samples = pts_remaining_samples;
1486 processing_audio_frames.insert(std::pair<int, int>(previous_packet_location.
frame, previous_packet_location.
frame));
1489 while (pts_remaining_samples) {
1494 int samples = samples_per_frame - previous_packet_location.
sample_start;
1495 if (samples > pts_remaining_samples)
1496 samples = pts_remaining_samples;
1499 pts_remaining_samples -= samples;
1501 if (pts_remaining_samples > 0) {
1503 previous_packet_location.
frame++;
1509 processing_audio_frames.insert(std::pair<int, int>(previous_packet_location.
frame, previous_packet_location.
frame));
1522 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (ReSample)",
"packet_samples", packet_samples,
"info.channels",
info.
channels,
"info.sample_rate",
info.
sample_rate,
"aCodecCtx->sample_fmt",
AV_GET_SAMPLE_FORMAT(aStream, aCodecCtx),
"AV_SAMPLE_FMT_S16", AV_SAMPLE_FMT_S16);
1527 audio_converted->nb_samples = audio_frame->nb_samples;
1528 av_samples_alloc(audio_converted->data, audio_converted->linesize,
info.
channels, audio_frame->nb_samples, AV_SAMPLE_FMT_S16, 0);
1538 av_opt_set_int(avr,
"out_sample_fmt", AV_SAMPLE_FMT_S16, 0);
1547 audio_converted->data,
1548 audio_converted->linesize[0],
1549 audio_converted->nb_samples,
1551 audio_frame->linesize[0],
1552 audio_frame->nb_samples);
1555 memcpy(audio_buf, audio_converted->data[0], audio_converted->nb_samples * av_get_bytes_per_sample(AV_SAMPLE_FMT_S16) *
info.
channels);
1563 av_free(audio_converted->data[0]);
1566 int64_t starting_frame_number = -1;
1567 bool partial_frame =
true;
1568 for (
int channel_filter = 0; channel_filter <
info.
channels; channel_filter++) {
1570 starting_frame_number = target_frame;
1571 int channel_buffer_size = packet_samples /
info.
channels;
1572 float *channel_buffer =
new float[channel_buffer_size];
1575 for (
int z = 0; z < channel_buffer_size; z++)
1576 channel_buffer[z] = 0.0f;
1582 for (
int sample = 0; sample < packet_samples; sample++) {
1584 if (channel_filter == channel) {
1586 channel_buffer[position] = audio_buf[sample] * (1.0f / (1 << 15));
1602 int start = starting_sample;
1603 int remaining_samples = channel_buffer_size;
1604 float *iterate_channel_buffer = channel_buffer;
1605 while (remaining_samples > 0) {
1610 int samples = samples_per_frame - start;
1611 if (samples > remaining_samples)
1612 samples = remaining_samples;
1615 std::shared_ptr<Frame> f = CreateFrame(starting_frame_number);
1618 if (samples_per_frame == start + samples)
1619 partial_frame =
false;
1621 partial_frame =
true;
1625 f->AddAudio(
true, channel_filter, start, iterate_channel_buffer, samples, 0.98f);
1628 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (f->AddAudio)",
"frame", starting_frame_number,
"start", start,
"samples", samples,
"channel", channel_filter,
"partial_frame", partial_frame,
"samples_per_frame", samples_per_frame);
1631 working_cache.
Add(f);
1634 remaining_samples -= samples;
1637 if (remaining_samples > 0)
1638 iterate_channel_buffer += samples;
1641 starting_frame_number++;
1648 delete[] channel_buffer;
1649 channel_buffer = NULL;
1650 iterate_channel_buffer = NULL;
1661 for (int64_t f = target_frame; f < starting_frame_number; f++) {
1665 processing_audio_frames.erase(processing_audio_frames.find(f));
1668 if (processing_audio_frames.count(f) == 0)
1670 processed_audio_frames[f] = f;
1673 if (target_frame == starting_frame_number) {
1675 processing_audio_frames.erase(processing_audio_frames.find(target_frame));
1683 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ProcessAudioPacket (After)",
"requested_frame", requested_frame,
"starting_frame", target_frame,
"end_frame", starting_frame_number - 1);
1689 void FFmpegReader::Seek(int64_t requested_frame) {
1691 if (requested_frame < 1)
1692 requested_frame = 1;
1696 int processing_video_frames_size = 0;
1697 int processing_audio_frames_size = 0;
1700 processing_video_frames_size = processing_video_frames.size();
1701 processing_audio_frames_size = processing_audio_frames.size();
1705 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::Seek",
"requested_frame", requested_frame,
"seek_count", seek_count,
"last_frame", last_frame,
"processing_video_frames_size", processing_video_frames_size,
"processing_audio_frames_size", processing_audio_frames_size,
"video_pts_offset", video_pts_offset);
1708 while (processing_video_frames_size + processing_audio_frames_size > 0) {
1711 processing_video_frames_size = processing_video_frames.size();
1712 processing_audio_frames_size = processing_audio_frames.size();
1716 working_cache.
Clear();
1717 missing_frames.
Clear();
1722 processing_audio_frames.clear();
1723 processing_video_frames.clear();
1724 processed_video_frames.clear();
1725 processed_audio_frames.clear();
1726 missing_audio_frames.clear();
1727 missing_video_frames.clear();
1728 missing_audio_frames_source.clear();
1729 missing_video_frames_source.clear();
1730 checked_frames.clear();
1735 current_video_frame = 0;
1736 largest_frame_processed = 0;
1737 num_checks_since_final = 0;
1738 num_packets_since_video_frame = 0;
1739 has_missing_frames =
false;
1748 if (requested_frame - buffer_amount < 20) {
1759 if (seek_count == 1) {
1762 seeking_pts = ConvertFrameToVideoPTS(1);
1764 seek_audio_frame_found = 0;
1765 seek_video_frame_found = 0;
1769 bool seek_worked =
false;
1770 int64_t seek_target = 0;
1774 seek_target = ConvertFrameToVideoPTS(requested_frame - buffer_amount);
1776 fprintf(stderr,
"%s: error while seeking video stream\n", pFormatCtx->AV_FILENAME);
1779 is_video_seek =
true;
1786 seek_target = ConvertFrameToAudioPTS(requested_frame - buffer_amount);
1788 fprintf(stderr,
"%s: error while seeking audio stream\n", pFormatCtx->AV_FILENAME);
1791 is_video_seek =
false;
1800 avcodec_flush_buffers(aCodecCtx);
1804 avcodec_flush_buffers(pCodecCtx);
1807 previous_packet_location.
frame = -1;
1812 if (seek_count == 1) {
1814 seeking_pts = seek_target;
1815 seeking_frame = requested_frame;
1817 seek_audio_frame_found = 0;
1818 seek_video_frame_found = 0;
1842 int64_t FFmpegReader::GetVideoPTS() {
1843 int64_t current_pts = 0;
1844 if (packet->dts != AV_NOPTS_VALUE)
1845 current_pts = packet->dts;
1852 void FFmpegReader::UpdatePTSOffset(
bool is_video) {
1856 if (video_pts_offset == 99999)
1866 if (audio_pts_offset == 99999)
1878 int64_t FFmpegReader::ConvertVideoPTStoFrame(int64_t pts) {
1880 pts = pts + video_pts_offset;
1881 int64_t previous_video_frame = current_video_frame;
1890 if (current_video_frame == 0)
1891 current_video_frame = frame;
1895 if (frame == previous_video_frame) {
1900 current_video_frame++;
1903 if (current_video_frame < frame)
1905 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ConvertVideoPTStoFrame (detected missing frame)",
"calculated frame", frame,
"previous_video_frame", previous_video_frame,
"current_video_frame", current_video_frame);
1910 while (current_video_frame < frame) {
1911 if (!missing_video_frames.count(current_video_frame)) {
1912 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::ConvertVideoPTStoFrame (tracking missing frame)",
"current_video_frame", current_video_frame,
"previous_video_frame", previous_video_frame);
1913 missing_video_frames.insert(std::pair<int64_t, int64_t>(current_video_frame, previous_video_frame));
1914 missing_video_frames_source.insert(std::pair<int64_t, int64_t>(previous_video_frame, current_video_frame));
1918 has_missing_frames =
true;
1921 current_video_frame++;
1930 int64_t FFmpegReader::ConvertFrameToVideoPTS(int64_t frame_number) {
1938 return video_pts - video_pts_offset;
1942 int64_t FFmpegReader::ConvertFrameToAudioPTS(int64_t frame_number) {
1950 return audio_pts - audio_pts_offset;
1954 AudioLocation FFmpegReader::GetAudioPTSLocation(int64_t pts) {
1956 pts = pts + audio_pts_offset;
1965 int64_t whole_frame = int64_t(frame);
1968 double sample_start_percentage = frame - double(whole_frame);
1974 int sample_start = round(
double(samples_per_frame) * sample_start_percentage);
1977 if (whole_frame < 1)
1979 if (sample_start < 0)
1986 if (previous_packet_location.
frame != -1) {
1987 if (location.
is_near(previous_packet_location, samples_per_frame, samples_per_frame)) {
1988 int64_t orig_frame = location.
frame;
1993 location.
frame = previous_packet_location.
frame;
1996 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::GetAudioPTSLocation (Audio Gap Detected)",
"Source Frame", orig_frame,
"Source Audio Sample", orig_start,
"Target Frame", location.
frame,
"Target Audio Sample", location.
sample_start,
"pts", pts);
2003 for (int64_t audio_frame = previous_packet_location.
frame; audio_frame < location.
frame; audio_frame++) {
2004 if (!missing_audio_frames.count(audio_frame)) {
2005 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::GetAudioPTSLocation (tracking missing frame)",
"missing_audio_frame", audio_frame,
"previous_audio_frame", previous_packet_location.
frame,
"new location frame", location.
frame);
2006 missing_audio_frames.insert(std::pair<int64_t, int64_t>(audio_frame, previous_packet_location.
frame - 1));
2013 previous_packet_location = location;
2020 std::shared_ptr<Frame> FFmpegReader::CreateFrame(int64_t requested_frame) {
2022 std::shared_ptr<Frame> output = working_cache.
GetFrame(requested_frame);
2029 output = working_cache.
GetFrame(requested_frame);
2030 if(output)
return output;
2038 working_cache.
Add(output);
2041 if (requested_frame > largest_frame_processed)
2042 largest_frame_processed = requested_frame;
2049 bool FFmpegReader::IsPartialFrame(int64_t requested_frame) {
2052 bool seek_trash =
false;
2053 int64_t max_seeked_frame = seek_audio_frame_found;
2054 if (seek_video_frame_found > max_seeked_frame) {
2055 max_seeked_frame = seek_video_frame_found;
2057 if ((
info.
has_audio && seek_audio_frame_found && max_seeked_frame >= requested_frame) ||
2058 (
info.
has_video && seek_video_frame_found && max_seeked_frame >= requested_frame)) {
2066 bool FFmpegReader::CheckMissingFrame(int64_t requested_frame) {
2071 ++checked_frames[requested_frame];
2074 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckMissingFrame",
"requested_frame", requested_frame,
"has_missing_frames", has_missing_frames,
"missing_video_frames.size()", missing_video_frames.size(),
"checked_count", checked_frames[requested_frame]);
2077 std::map<int64_t, int64_t>::iterator itr;
2078 bool found_missing_frame =
false;
2087 if (checked_frames[requested_frame] > 8 && !missing_video_frames.count(requested_frame) &&
2088 !processing_audio_frames.count(requested_frame) && processed_audio_frames.count(requested_frame) &&
2089 last_frame && last_video_frame && last_video_frame->has_image_data && aCodecId == AV_CODEC_ID_MP3 && (vCodecId == AV_CODEC_ID_MJPEGB || vCodecId == AV_CODEC_ID_MJPEG)) {
2090 missing_video_frames.insert(std::pair<int64_t, int64_t>(requested_frame, last_video_frame->number));
2091 missing_video_frames_source.insert(std::pair<int64_t, int64_t>(last_video_frame->number, requested_frame));
2092 missing_frames.
Add(last_video_frame);
2097 if (missing_video_frames.count(requested_frame)) {
2098 int64_t missing_source_frame = missing_video_frames.find(requested_frame)->second;
2101 ++checked_frames[missing_source_frame];
2104 std::shared_ptr<Frame> parent_frame = missing_frames.
GetFrame(missing_source_frame);
2105 if (parent_frame == NULL) {
2107 if (parent_frame != NULL) {
2109 missing_frames.
Add(parent_frame);
2114 std::shared_ptr<Frame> missing_frame = CreateFrame(requested_frame);
2117 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckMissingFrame (Is Previous Video Frame Final)",
"requested_frame", requested_frame,
"missing_frame->number", missing_frame->number,
"missing_source_frame", missing_source_frame);
2120 if (parent_frame != NULL) {
2122 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckMissingFrame (AddImage from Previous Video Frame)",
"requested_frame", requested_frame,
"missing_frame->number", missing_frame->number,
"missing_source_frame", missing_source_frame);
2125 std::shared_ptr<QImage> parent_image = parent_frame->GetImage();
2127 missing_frame->AddImage(std::shared_ptr<QImage>(
new QImage(*parent_image)));
2128 processed_video_frames[missing_frame->number] = missing_frame->number;
2134 if (missing_audio_frames.count(requested_frame)) {
2137 std::shared_ptr<Frame> missing_frame = CreateFrame(requested_frame);
2143 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckMissingFrame (Add Silence for Missing Audio Frame)",
"requested_frame", requested_frame,
"missing_frame->number", missing_frame->number,
"samples_per_frame", samples_per_frame);
2146 missing_frame->AddAudioSilence(samples_per_frame);
2147 processed_audio_frames[missing_frame->number] = missing_frame->number;
2150 return found_missing_frame;
2154 void FFmpegReader::CheckWorkingFrames(
bool end_of_stream, int64_t requested_frame) {
2156 bool checked_count_tripped =
false;
2157 int max_checked_count = 80;
2160 CheckMissingFrame(requested_frame);
2173 working_cache.
Remove(f->number);
2177 CheckMissingFrame(f->number);
2180 int checked_count = 0;
2181 int checked_frames_size = 0;
2183 bool is_video_ready =
false;
2184 bool is_audio_ready =
false;
2187 is_video_ready = processed_video_frames.count(f->number);
2188 is_audio_ready = processed_audio_frames.count(f->number);
2191 checked_frames_size = checked_frames.size();
2192 if (!checked_count_tripped || f->number >= requested_frame)
2193 checked_count = checked_frames[f->number];
2196 checked_count = max_checked_count;
2199 if (previous_packet_location.
frame == f->number && !end_of_stream)
2200 is_audio_ready =
false;
2201 bool is_seek_trash = IsPartialFrame(f->number);
2208 if (checked_count >= max_checked_count && (!is_video_ready || !is_audio_ready)) {
2210 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckWorkingFrames (exceeded checked_count)",
"requested_frame", requested_frame,
"frame_number", f->number,
"is_video_ready", is_video_ready,
"is_audio_ready", is_audio_ready,
"checked_count", checked_count,
"checked_frames_size", checked_frames_size);
2213 checked_count_tripped =
true;
2215 if (
info.
has_video && !is_video_ready && last_video_frame) {
2217 f->AddImage(std::shared_ptr<QImage>(
new QImage(*last_video_frame->GetImage())));
2218 is_video_ready =
true;
2223 is_audio_ready =
true;
2228 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckWorkingFrames",
"requested_frame", requested_frame,
"frame_number", f->number,
"is_video_ready", is_video_ready,
"is_audio_ready", is_audio_ready,
"checked_count", checked_count,
"checked_frames_size", checked_frames_size);
2231 if ((!end_of_stream && is_video_ready && is_audio_ready) || end_of_stream || is_seek_trash) {
2233 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckWorkingFrames (mark frame as final)",
"requested_frame", requested_frame,
"f->number", f->number,
"is_seek_trash", is_seek_trash,
"Working Cache Count", working_cache.
Count(),
"Final Cache Count",
final_cache.
Count(),
"end_of_stream", end_of_stream);
2235 if (!is_seek_trash) {
2239 f->AddImage(std::shared_ptr<QImage>(
new QImage(*last_video_frame->GetImage())));
2242 num_checks_since_final = 0;
2250 if (missing_video_frames_source.count(f->number)) {
2252 ZmqLogger::Instance()->
AppendDebugMethod(
"FFmpegReader::CheckWorkingFrames (add frame to missing cache)",
"f->number", f->number,
"is_seek_trash", is_seek_trash,
"Missing Cache Count", missing_frames.
Count(),
"Working Cache Count", working_cache.
Count(),
"Final Cache Count",
final_cache.
Count());
2253 missing_frames.
Add(f);
2257 checked_frames.erase(f->number);
2261 working_cache.
Remove(f->number);
2264 last_frame = f->number;
2268 working_cache.
Remove(f->number);
2279 void FFmpegReader::CheckFPS() {
2283 int first_second_counter = 0;
2284 int second_second_counter = 0;
2285 int third_second_counter = 0;
2286 int forth_second_counter = 0;
2287 int fifth_second_counter = 0;
2288 int frames_detected = 0;
2294 if (GetNextPacket() < 0)
2299 if (packet->stream_index == videoStream) {
2303 UpdatePTSOffset(
true);
2306 pts = GetVideoPTS();
2309 RemoveAVFrame(pFrame);
2312 pts += video_pts_offset;
2318 if (video_seconds <= 1.0)
2319 first_second_counter++;
2320 else if (video_seconds > 1.0 && video_seconds <= 2.0)
2321 second_second_counter++;
2322 else if (video_seconds > 2.0 && video_seconds <= 3.0)
2323 third_second_counter++;
2324 else if (video_seconds > 3.0 && video_seconds <= 4.0)
2325 forth_second_counter++;
2326 else if (video_seconds > 4.0 && video_seconds <= 5.0)
2327 fifth_second_counter++;
2336 if (second_second_counter != 0 && third_second_counter != 0 && forth_second_counter != 0 && fifth_second_counter != 0) {
2338 int sum_fps = second_second_counter + third_second_counter + forth_second_counter + fifth_second_counter;
2339 int avg_fps = round(sum_fps / 4.0f);
2350 }
else if (second_second_counter != 0 && third_second_counter != 0) {
2352 int sum_fps = second_second_counter;
2376 void FFmpegReader::RemoveAVFrame(AVFrame *remove_frame) {
2380 #pragma omp critical (packet_cache) 2382 av_freep(&remove_frame->data[0]);
2391 void FFmpegReader::RemoveAVPacket(AVPacket *remove_packet) {
2396 delete remove_packet;
2400 int64_t FFmpegReader::GetSmallestVideoFrame() {
2402 std::map<int64_t, int64_t>::iterator itr;
2403 int64_t smallest_frame = -1;
2405 for (itr = processing_video_frames.begin(); itr != processing_video_frames.end(); ++itr) {
2406 if (itr->first < smallest_frame || smallest_frame == -1)
2407 smallest_frame = itr->first;
2411 return smallest_frame;
2415 int64_t FFmpegReader::GetSmallestAudioFrame() {
2417 std::map<int64_t, int64_t>::iterator itr;
2418 int64_t smallest_frame = -1;
2420 for (itr = processing_audio_frames.begin(); itr != processing_audio_frames.end(); ++itr) {
2421 if (itr->first < smallest_frame || smallest_frame == -1)
2422 smallest_frame = itr->first;
2426 return smallest_frame;
2441 root[
"type"] =
"FFmpegReader";
2442 root[
"path"] =
path;
2453 Json::CharReaderBuilder rbuilder;
2454 Json::CharReader* reader(rbuilder.newCharReader());
2457 bool success = reader->parse(value.c_str(), value.c_str() + value.size(),
2463 throw InvalidJSON(
"JSON could not be parsed (or is invalid)");
2469 catch (
const std::exception& e) {
2471 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
2482 if (!root[
"path"].isNull())
2483 path = root[
"path"].asString();
#define AV_RESET_FRAME(av_frame)
void SetJson(std::string value)
Load JSON string into this object.
#define AV_FREE_FRAME(av_frame)
int MAX_HEIGHT
Maximum height for image data (useful for optimzing for a smaller preview or render) ...
int num
Numerator for the fraction.
Point GetMaxPoint() const
Get max point (by Y coordinate)
#define AV_FIND_DECODER_CODEC_ID(av_stream)
int width
The width of the video (in pixesl)
#define AV_COPY_PICTURE_DATA(av_frame, buffer, pix_fmt, width, height)
std::shared_ptr< openshot::Frame > GetFrame(int64_t requested_frame)
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
#define AV_GET_CODEC_PIXEL_FORMAT(av_stream, av_context)
float duration
Length of time (in seconds)
bool is_near(AudioLocation location, int samples_per_frame, int64_t amount)
void Reduce()
Reduce this fraction (i.e. 640/480 = 4/3)
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
Scale the clip until both height and width fill the canvas (cropping the overlap) ...
#define AVCODEC_MAX_AUDIO_FRAME_SIZE
FFmpegReader(std::string path)
openshot::Keyframe scale_x
Curve representing the horizontal scaling in percent (0 to 1)
#define OPEN_MP_NUM_PROCESSORS
#define AV_GET_SAMPLE_FORMAT(av_stream, av_context)
#define AVCODEC_REGISTER_ALL
openshot::Keyframe scale_y
Curve representing the vertical scaling in percent (0 to 1)
Json::Value JsonValue()
Generate Json::JsonValue for this object.
Exception when a reader is closed, and a frame is requested.
bool has_video
Determines if this file has a video stream.
int64_t file_size
Size of file (in bytes)
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
std::shared_ptr< openshot::Frame > GetSmallestFrame()
Get the smallest frame number.
AVPixelFormat hw_de_av_pix_fmt_global
int audio_bit_rate
The bit rate of the audio stream (in bytes)
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
bool has_audio
Determines if this file has an audio stream.
This class represents a clip (used to arrange readers on the timeline)
#define AV_FREE_CONTEXT(av_context)
Exception when no valid codec is found for a file.
double Y
The Y value of the coordinate (usually representing the value of the property being animated) ...
int HARDWARE_DECODER
Use video codec for faster video decoding (if supported)
int MAX_WIDTH
Maximum width for image data (useful for optimzing for a smaller preview or render) ...
int audio_stream_index
The index of the audio stream.
#define AV_GET_CODEC_ATTRIBUTES(av_stream, av_context)
int64_t video_length
The number of frames in the video stream.
#define FF_NUM_PROCESSORS
#define AV_FREE_PACKET(av_packet)
juce::CriticalSection processingCriticalSection
Exception when no streams are found in the file.
int height
The height of the video (in pixels)
#define AV_ALLOCATE_FRAME()
openshot::Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
Exception for files that can not be found or opened.
This class represents a fraction.
std::map< std::string, std::string > metadata
An optional map/dictionary of metadata for this reader.
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround...
std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)
Get a frame from the cache.
AVHWDeviceType hw_de_av_device_type_global
virtual Json::Value JsonValue()=0
Generate Json::JsonValue for this object.
int HW_DE_DEVICE_SET
Which GPU to use to decode (0 is the first)
auto AV_GET_CODEC_CONTEXT
openshot::ClipBase * parent
virtual void SetJsonValue(Json::Value root)=0
Load Json::JsonValue into this object.
Scale the clip until both height and width fill the canvas (distort to fit)
void Clear()
Clear the cache of all frames.
int ToInt()
Return a rounded integer of the fraction (for example 30000/1001 returns 30)
#define MY_INPUT_BUFFER_PADDING_SIZE
#define AV_GET_IMAGE_SIZE(pix_fmt, width, height)
openshot::ReaderInfo info
Information about the current media file.
Exception for frames that are out of bounds.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
int64_t Count()
Count the frames in the queue.
openshot::Fraction audio_timebase
The audio timebase determines how long each audio packet should be played.
std::string vcodec
The name of the video codec used to encode / decode the video stream.
This namespace is the default namespace for all code in the openshot library.
#define SWR_CONVERT(ctx, out, linesize, out_count, in, linesize2, in_count)
openshot::ClipBase * GetClip()
Parent clip object of this reader (which can be unparented and NULL)
Coordinate co
This is the primary coordinate.
Exception for invalid JSON.
#define AV_GET_CODEC_TYPE(av_stream)
int pixel_format
The pixel format (i.e. YUV420P, RGB24, etc...)
openshot::Fraction display_ratio
The ratio of width to height of the video stream (i.e. 640x480 has a ratio of 4/3) ...
This struct holds the associated video frame and starting sample # for an audio packet.
openshot::ScaleType scale
The scale determines how a clip should be resized to fit its parent.
void Open()
Open File - which is called by the constructor automatically.
static Settings * Instance()
Create or get an instance of this logger singleton (invoke the class with this method) ...
int video_bit_rate
The bit rate of the video stream (in bytes)
int DE_LIMIT_WIDTH_MAX
Maximum columns that hardware decode can handle.
openshot::Fraction pixel_ratio
The pixel ratio of the video stream as a fraction (i.e. some pixels are not square) ...
void Add(std::shared_ptr< openshot::Frame > frame)
Add a Frame to the cache.
void Remove(int64_t frame_number)
Remove a specific frame.
CacheMemory final_cache
Final cache object used to hold final frames.
virtual ~FFmpegReader()
Destructor.
int den
Denominator for the fraction.
int channels
The number of audio channels used in the audio stream.
void SetJsonValue(Json::Value root)
Load Json::JsonValue into this object.
int video_stream_index
The index of the video stream.
Scale the clip until either height or width fills the canvas (with no cropping)
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
std::string acodec
The name of the audio codec used to encode / decode the video stream.
std::string Json()
Get and Set JSON methods.
double ToDouble()
Return this fraction as a double (i.e. 1/2 = 0.5)
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
int DE_LIMIT_HEIGHT_MAX
Maximum rows that hardware decode can handle.