24#include <unordered_map>
32 is_open(false), auto_map_clips(true), managed_cache(true),
path(
""), max_time(0.0)
77 info.width, info.height, info.fps, info.sample_rate,
78 info.channels, info.channel_layout) {}
82 is_open(false), auto_map_clips(true), managed_cache(true),
path(projectPath), max_time(0.0) {
101 QFileInfo filePath(QString::fromStdString(path));
102 if (!filePath.exists()) {
103 throw InvalidFile(
"File could not be opened.", path);
109 if (!openshotPath.exists()) {
112 QDir openshotTransPath(openshotPath.filePath(
"transitions"));
113 if (!openshotTransPath.exists()) {
114 throw InvalidFile(
"PATH_OPENSHOT_INSTALL/transitions could not be found.", openshotTransPath.path().toStdString());
118 QString asset_name = filePath.baseName().left(30) +
"_assets";
119 QDir asset_folder(filePath.dir().filePath(asset_name));
120 if (!asset_folder.exists()) {
122 asset_folder.mkpath(
".");
126 QFile projectFile(QString::fromStdString(path));
127 projectFile.open(QFile::ReadOnly);
128 QString projectContents = QString::fromUtf8(projectFile.readAll());
131 if (convert_absolute_paths) {
135 QRegularExpression allPathsRegex(QStringLiteral(
"\"(image|path)\":.*?\"(.*?)\""));
136 std::vector<QRegularExpressionMatch> matchedPositions;
137 QRegularExpressionMatchIterator i = allPathsRegex.globalMatch(projectContents);
138 while (i.hasNext()) {
139 QRegularExpressionMatch match = i.next();
140 if (match.hasMatch()) {
142 matchedPositions.push_back(match);
147 std::vector<QRegularExpressionMatch>::reverse_iterator itr;
148 for (itr = matchedPositions.rbegin(); itr != matchedPositions.rend(); itr++) {
149 QRegularExpressionMatch match = *itr;
150 QString relativeKey = match.captured(1);
151 QString relativePath = match.captured(2);
152 QString absolutePath =
"";
155 if (relativePath.startsWith(
"@assets")) {
156 absolutePath = QFileInfo(asset_folder.absoluteFilePath(relativePath.replace(
"@assets",
"."))).canonicalFilePath();
157 }
else if (relativePath.startsWith(
"@transitions")) {
158 absolutePath = QFileInfo(openshotTransPath.absoluteFilePath(relativePath.replace(
"@transitions",
"."))).canonicalFilePath();
160 absolutePath = QFileInfo(filePath.absoluteDir().absoluteFilePath(relativePath)).canonicalFilePath();
164 if (!absolutePath.isEmpty()) {
165 projectContents.replace(match.capturedStart(0), match.capturedLength(0),
"\"" + relativeKey +
"\": \"" + absolutePath +
"\"");
169 matchedPositions.clear();
173 SetJson(projectContents.toStdString());
177 float calculated_duration = 0.0;
178 for (
auto clip : clips)
181 if (clip_last_frame > calculated_duration)
182 calculated_duration = clip_last_frame;
183 if (
clip->Reader() &&
clip->Reader()->info.has_audio)
185 if (
clip->Reader() &&
clip->Reader()->info.has_video)
217 if (managed_cache && final_cache) {
227 auto iterator = tracked_objects.find(trackedObject->Id());
229 if (iterator != tracked_objects.end()){
231 iterator->second = trackedObject;
235 tracked_objects[trackedObject->Id()] = trackedObject;
245 auto iterator = tracked_objects.find(
id);
247 if (iterator != tracked_objects.end()){
249 std::shared_ptr<openshot::TrackedObjectBase> trackedObject = iterator->second;
250 return trackedObject;
262 std::list<std::string> trackedObjects_ids;
265 for (
auto const& it: tracked_objects){
267 trackedObjects_ids.push_back(it.first);
270 return trackedObjects_ids;
278 Json::Value trackedObjectJson;
281 auto iterator = tracked_objects.find(
id);
283 if (iterator != tracked_objects.end())
286 std::shared_ptr<TrackedObjectBBox> trackedObject = std::static_pointer_cast<TrackedObjectBBox>(iterator->second);
289 if (trackedObject->ExactlyContains(frame_number)){
290 BBox box = trackedObject->GetBox(frame_number);
291 float x1 = box.
cx - (box.
width/2);
293 float x2 = box.
cx + (box.
width/2);
295 float rotation = box.
angle;
297 trackedObjectJson[
"x1"] = x1;
298 trackedObjectJson[
"y1"] = y1;
299 trackedObjectJson[
"x2"] = x2;
300 trackedObjectJson[
"y2"] = y2;
301 trackedObjectJson[
"rotation"] = rotation;
304 BBox box = trackedObject->BoxVec.begin()->second;
305 float x1 = box.
cx - (box.
width/2);
307 float x2 = box.
cx + (box.
width/2);
309 float rotation = box.
angle;
311 trackedObjectJson[
"x1"] = x1;
312 trackedObjectJson[
"y1"] = y1;
313 trackedObjectJson[
"x2"] = x2;
314 trackedObjectJson[
"y2"] = y2;
315 trackedObjectJson[
"rotation"] = rotation;
321 trackedObjectJson[
"x1"] = 0;
322 trackedObjectJson[
"y1"] = 0;
323 trackedObjectJson[
"x2"] = 0;
324 trackedObjectJson[
"y2"] = 0;
325 trackedObjectJson[
"rotation"] = 0;
328 return trackedObjectJson.toStyledString();
336 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
342 if (
clip->Reader() &&
clip->Reader()->GetCache())
343 clip->Reader()->GetCache()->Clear();
346 if (auto_map_clips) {
348 apply_mapper_to_clip(
clip);
352 clips.push_back(
clip);
362 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
368 effects.push_back(effect);
378 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
380 effects.remove(effect);
383 if (allocated_effects.count(effect)) {
384 allocated_effects.erase(effect);
397 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
402 if (allocated_clips.count(
clip)) {
403 allocated_clips.erase(
clip);
416 for (
const auto&
clip : clips) {
428 for (
const auto& effect : effects) {
429 if (effect->Id() == id) {
439 for (
const auto&
clip : clips) {
440 const auto e =
clip->GetEffect(
id);
452 std::list<EffectBase*> timelineEffectsList;
455 for (
const auto&
clip : clips) {
458 std::list<EffectBase*> clipEffectsList =
clip->Effects();
461 timelineEffectsList.insert(timelineEffectsList.end(), clipEffectsList.begin(), clipEffectsList.end());
464 return timelineEffectsList;
478 return static_cast<int64_t
>(std::ceil(t * fps));
486 return static_cast<int64_t
>(std::floor(t * fps)) + 1;
496void Timeline::apply_mapper_to_clip(
Clip* clip)
500 if (
clip->Reader()->Name() ==
"FrameMapper")
513 allocated_frame_mappers.insert(mapper);
518 clip->Reader(clip_reader);
528 for (
auto clip : clips)
531 apply_mapper_to_clip(
clip);
536double Timeline::calculate_time(int64_t number,
Fraction rate)
539 double raw_fps = rate.
ToFloat();
542 return double(number - 1) / raw_fps;
550 "Timeline::apply_effects",
551 "frame->number", frame->number,
552 "timeline_frame_number", timeline_frame_number,
556 for (
auto effect : effects)
560 int64_t effect_start_position =
static_cast<int64_t
>(std::llround(effect->Position() * fpsD)) + 1;
561 int64_t effect_end_position =
static_cast<int64_t
>(std::llround((effect->Position() + effect->Duration()) * fpsD));
563 bool does_effect_intersect = (effect_start_position <= timeline_frame_number && effect_end_position >= timeline_frame_number && effect->Layer() == layer);
566 if (does_effect_intersect)
569 int64_t effect_start_frame =
static_cast<int64_t
>(std::llround(effect->Start() * fpsD)) + 1;
570 int64_t effect_frame_number = timeline_frame_number - effect_start_position + effect_start_frame;
580 "Timeline::apply_effects (Process Effect)",
581 "effect_frame_number", effect_frame_number,
582 "does_effect_intersect", does_effect_intersect);
585 frame = effect->GetFrame(frame, effect_frame_number);
595std::shared_ptr<Frame> Timeline::GetOrCreateFrame(std::shared_ptr<Frame> background_frame,
Clip* clip, int64_t number,
openshot::TimelineInfoStruct* options)
597 std::shared_ptr<Frame> new_frame;
605 "Timeline::GetOrCreateFrame (from reader)",
607 "samples_in_frame", samples_in_frame);
610 new_frame = std::shared_ptr<Frame>(
clip->
GetFrame(background_frame, number, options));
623 "Timeline::GetOrCreateFrame (create blank)",
625 "samples_in_frame", samples_in_frame);
632void Timeline::add_layer(std::shared_ptr<Frame> new_frame,
Clip* source_clip, int64_t clip_frame_number,
bool is_top_clip,
float max_volume)
640 std::shared_ptr<Frame> source_frame;
641 source_frame = GetOrCreateFrame(new_frame, source_clip, clip_frame_number, &options);
649 "Timeline::add_layer",
650 "new_frame->number", new_frame->number,
651 "clip_frame_number", clip_frame_number);
654 if (source_clip->
Reader()->info.has_audio) {
657 "Timeline::add_layer (Copy Audio)",
658 "source_clip->Reader()->info.has_audio", source_clip->
Reader()->info.has_audio,
659 "source_frame->GetAudioChannelsCount()", source_frame->GetAudioChannelsCount(),
661 "clip_frame_number", clip_frame_number);
666 if (new_frame->GetAudioSamplesCount() != source_frame->GetAudioSamplesCount()){
670 for (
int channel = 0; channel < source_frame->GetAudioChannelsCount(); channel++)
673 float previous_volume = source_clip->
volume.
GetValue(clip_frame_number - 1);
681 previous_volume = previous_volume / max_volume;
682 volume = volume / max_volume;
686 previous_volume = previous_volume * 0.77;
687 volume = volume * 0.77;
691 if (channel_filter != -1 && channel_filter != channel)
695 if (previous_volume == 0.0 && volume == 0.0)
699 if (channel_mapping == -1)
700 channel_mapping = channel;
703 if (!isEqual(previous_volume, 1.0) || !isEqual(volume, 1.0))
704 source_frame->ApplyGainRamp(channel_mapping, 0, source_frame->GetAudioSamplesCount(), previous_volume, volume);
708 new_frame->AddAudio(
false, channel_mapping, 0, source_frame->GetAudioSamples(channel), source_frame->GetAudioSamplesCount(), 1.0);
714 "Timeline::add_layer (No Audio Copied - Wrong # of Channels)",
715 "source_clip->Reader()->info.has_audio",
716 source_clip->
Reader()->info.has_audio,
717 "source_frame->GetAudioChannelsCount()",
718 source_frame->GetAudioChannelsCount(),
720 "clip_frame_number", clip_frame_number);
725 "Timeline::add_layer (Transform: Composite Image Layer: Completed)",
726 "source_frame->number", source_frame->number,
727 "new_frame->GetImage()->width()", new_frame->GetWidth(),
728 "new_frame->GetImage()->height()", new_frame->GetHeight());
732void Timeline::update_open_clips(
Clip *clip,
bool does_clip_intersect)
735 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
738 "Timeline::update_open_clips (before)",
739 "does_clip_intersect", does_clip_intersect,
740 "closing_clips.size()", closing_clips.size(),
741 "open_clips.size()", open_clips.size());
744 bool clip_found = open_clips.count(
clip);
746 if (clip_found && !does_clip_intersect)
749 open_clips.erase(
clip);
754 else if (!clip_found && does_clip_intersect)
770 "Timeline::update_open_clips (after)",
771 "does_clip_intersect", does_clip_intersect,
772 "clip_found", clip_found,
773 "closing_clips.size()", closing_clips.size(),
774 "open_clips.size()", open_clips.size());
778void Timeline::calculate_max_duration() {
779 double last_clip = 0.0;
780 double last_effect = 0.0;
781 double first_clip = std::numeric_limits<double>::max();
782 double first_effect = std::numeric_limits<double>::max();
785 if (!clips.empty()) {
787 const auto max_clip = std::max_element(
789 last_clip = (*max_clip)->Position() + (*max_clip)->Duration();
792 const auto min_clip = std::min_element(
794 return lhs->Position() < rhs->Position();
796 first_clip = (*min_clip)->Position();
800 if (!effects.empty()) {
802 const auto max_effect = std::max_element(
804 last_effect = (*max_effect)->Position() + (*max_effect)->Duration();
807 const auto min_effect = std::min_element(
809 return lhs->Position() < rhs->Position();
811 first_effect = (*min_effect)->Position();
815 max_time = std::max(last_clip, last_effect);
816 min_time = std::min(first_clip, first_effect);
819 if (clips.empty() && effects.empty()) {
826void Timeline::sort_clips()
829 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
833 "Timeline::SortClips",
834 "clips.size()", clips.size());
840 calculate_max_duration();
844void Timeline::sort_effects()
847 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
853 calculate_max_duration();
862 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
865 for (
auto clip : clips)
867 update_open_clips(
clip,
false);
870 bool allocated = allocated_clips.count(
clip);
877 allocated_clips.clear();
880 for (
auto effect : effects)
883 bool allocated = allocated_effects.count(effect);
890 allocated_effects.clear();
893 for (
auto mapper : allocated_frame_mappers)
899 allocated_frame_mappers.clear();
908 const std::lock_guard<std::recursive_mutex> guard(
getFrameMutex);
911 for (
auto clip : clips)
914 update_open_clips(
clip,
false);
931bool Timeline::isEqual(
double a,
double b)
933 return fabs(a - b) < 0.000001;
940 if (requested_frame < 1)
944 std::shared_ptr<Frame> frame;
945 frame = final_cache->
GetFrame(requested_frame);
949 "Timeline::GetFrame (Cached frame found)",
950 "requested_frame", requested_frame);
958 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
961 std::shared_ptr<Frame> frame;
962 frame = final_cache->
GetFrame(requested_frame);
966 "Timeline::GetFrame (Cached frame found on 2nd check)",
967 "requested_frame", requested_frame);
974 std::vector<Clip *> nearby_clips;
975 nearby_clips = find_intersecting_clips(requested_frame, 1,
true);
979 "Timeline::GetFrame (processing frame)",
980 "requested_frame", requested_frame,
981 "omp_get_thread_num()", omp_get_thread_num());
988 new_frame->AddAudioSilence(samples_in_frame);
994 "Timeline::GetFrame (Adding solid color)",
995 "requested_frame", requested_frame,
1007 "Timeline::GetFrame (Loop through clips)",
1008 "requested_frame", requested_frame,
1009 "clips.size()", clips.size(),
1010 "nearby_clips.size()", nearby_clips.size());
1017 int64_t start_frame;
1018 int64_t frame_number;
1021 std::vector<ClipInfo> clip_infos;
1022 clip_infos.reserve(nearby_clips.size());
1025 for (
auto clip : nearby_clips) {
1026 int64_t start_pos =
static_cast<int64_t
>(std::llround(
clip->
Position() * fpsD)) + 1;
1028 bool intersects = (start_pos <= requested_frame && end_pos >= requested_frame);
1029 int64_t start_frame =
static_cast<int64_t
>(std::llround(
clip->
Start() * fpsD)) + 1;
1030 int64_t frame_number = requested_frame - start_pos + start_frame;
1031 clip_infos.push_back({
clip, start_pos, end_pos, start_frame, frame_number, intersects});
1035 std::unordered_map<int, int64_t> top_start_for_layer;
1036 std::unordered_map<int, Clip*> top_clip_for_layer;
1037 for (
const auto& ci : clip_infos) {
1038 if (!ci.intersects)
continue;
1039 const int layer = ci.clip->Layer();
1040 auto it = top_start_for_layer.find(layer);
1041 if (it == top_start_for_layer.end() || ci.start_pos > it->second) {
1042 top_start_for_layer[layer] = ci.start_pos;
1043 top_clip_for_layer[layer] = ci.clip;
1048 float max_volume_sum = 0.0f;
1049 for (
const auto& ci : clip_infos) {
1050 if (!ci.intersects)
continue;
1051 if (ci.clip->Reader() && ci.clip->Reader()->info.has_audio &&
1052 ci.clip->has_audio.GetInt(ci.frame_number) != 0) {
1053 max_volume_sum +=
static_cast<float>(ci.clip->volume.GetValue(ci.frame_number));
1058 for (
const auto& ci : clip_infos) {
1061 "Timeline::GetFrame (Does clip intersect)",
1062 "requested_frame", requested_frame,
1063 "clip->Position()", ci.clip->Position(),
1064 "clip->Duration()", ci.clip->Duration(),
1065 "does_clip_intersect", ci.intersects);
1068 if (ci.intersects) {
1070 bool is_top_clip =
false;
1071 const int layer = ci.clip->Layer();
1072 auto top_it = top_clip_for_layer.find(layer);
1073 if (top_it != top_clip_for_layer.end())
1074 is_top_clip = (top_it->second == ci.clip);
1077 int64_t clip_frame_number = ci.frame_number;
1081 "Timeline::GetFrame (Calculate clip's frame #)",
1082 "clip->Position()", ci.clip->Position(),
1083 "clip->Start()", ci.clip->Start(),
1085 "clip_frame_number", clip_frame_number);
1088 add_layer(new_frame, ci.clip, clip_frame_number, is_top_clip, max_volume_sum);
1093 "Timeline::GetFrame (clip does not intersect)",
1094 "requested_frame", requested_frame,
1095 "does_clip_intersect", ci.intersects);
1102 "Timeline::GetFrame (Add frame to cache)",
1103 "requested_frame", requested_frame,
1108 new_frame->SetFrameNumber(requested_frame);
1111 final_cache->
Add(new_frame);
1121std::vector<Clip*> Timeline::find_intersecting_clips(int64_t requested_frame,
int number_of_frames,
bool include)
1124 std::vector<Clip*> matching_clips;
1127 const int64_t min_requested_frame = requested_frame;
1128 const int64_t max_requested_frame = requested_frame + (number_of_frames - 1);
1131 matching_clips.reserve(clips.size());
1133 for (
auto clip : clips)
1136 int64_t clip_start_position =
static_cast<int64_t
>(std::llround(
clip->
Position() * fpsD)) + 1;
1139 bool does_clip_intersect =
1140 (clip_start_position <= min_requested_frame || clip_start_position <= max_requested_frame) &&
1141 (clip_end_position >= min_requested_frame || clip_end_position >= max_requested_frame);
1145 "Timeline::find_intersecting_clips (Is clip near or intersecting)",
1146 "requested_frame", requested_frame,
1147 "min_requested_frame", min_requested_frame,
1148 "max_requested_frame", max_requested_frame,
1150 "does_clip_intersect", does_clip_intersect);
1153 update_open_clips(
clip, does_clip_intersect);
1156 if (does_clip_intersect && include)
1158 matching_clips.push_back(
clip);
1160 else if (!does_clip_intersect && !include)
1162 matching_clips.push_back(
clip);
1167 return matching_clips;
1173 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
1176 if (managed_cache && final_cache) {
1179 managed_cache =
false;
1183 final_cache = new_cache;
1198 root[
"type"] =
"Timeline";
1203 root[
"path"] = path;
1206 root[
"clips"] = Json::Value(Json::arrayValue);
1209 for (
const auto existing_clip : clips)
1211 root[
"clips"].append(existing_clip->JsonValue());
1215 root[
"effects"] = Json::Value(Json::arrayValue);
1218 for (
const auto existing_effect: effects)
1220 root[
"effects"].append(existing_effect->JsonValue());
1231 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
1240 catch (
const std::exception& e)
1243 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
1251 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
1254 bool was_open = is_open;
1261 if (!root[
"path"].isNull())
1262 path = root[
"path"].asString();
1264 if (!root[
"clips"].isNull()) {
1269 for (
const Json::Value existing_clip : root[
"clips"]) {
1271 if (existing_clip.isNull()) {
1279 allocated_clips.insert(c);
1296 if (!root[
"effects"].isNull()) {
1301 for (
const Json::Value existing_effect :root[
"effects"]) {
1303 if (existing_effect.isNull()) {
1310 if (!existing_effect[
"type"].isNull()) {
1312 if ( (e =
EffectInfo().CreateEffect(existing_effect[
"type"].asString())) ) {
1315 allocated_effects.insert(e);
1327 if (!root[
"duration"].isNull()) {
1350 const std::lock_guard<std::recursive_mutex> lock(
getFrameMutex);
1357 for (
const Json::Value change : root) {
1358 std::string change_key = change[
"key"][(uint)0].asString();
1361 if (change_key ==
"clips")
1363 apply_json_to_clips(change);
1365 else if (change_key ==
"effects")
1367 apply_json_to_effects(change);
1371 apply_json_to_timeline(change);
1375 catch (
const std::exception& e)
1378 throw InvalidJSON(
"JSON is invalid (missing keys or invalid data types)");
1383void Timeline::apply_json_to_clips(Json::Value change) {
1386 std::string change_type = change[
"type"].asString();
1387 std::string clip_id =
"";
1388 Clip *existing_clip = NULL;
1391 for (
auto key_part : change[
"key"]) {
1393 if (key_part.isObject()) {
1395 if (!key_part[
"id"].isNull()) {
1397 clip_id = key_part[
"id"].asString();
1400 for (
auto c : clips)
1402 if (c->Id() == clip_id) {
1414 if (existing_clip && change[
"key"].size() == 4 && change[
"key"][2] ==
"effects")
1417 Json::Value key_part = change[
"key"][3];
1419 if (key_part.isObject()) {
1421 if (!key_part[
"id"].isNull())
1424 std::string effect_id = key_part[
"id"].asString();
1427 std::list<EffectBase*> effect_list = existing_clip->
Effects();
1428 for (
auto e : effect_list)
1430 if (e->Id() == effect_id) {
1432 apply_json_to_effects(change, e);
1437 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1447 if (change_type ==
"insert") {
1453 allocated_clips.insert(
clip);
1461 }
else if (change_type ==
"update") {
1464 if (existing_clip) {
1477 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1478 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1481 if (existing_clip->
Reader() && existing_clip->
Reader()->GetCache()) {
1482 existing_clip->
Reader()->GetCache()->Remove(old_starting_frame - 8, old_ending_frame + 8);
1483 existing_clip->
Reader()->GetCache()->Remove(new_starting_frame - 8, new_ending_frame + 8);
1487 if (auto_map_clips) {
1488 apply_mapper_to_clip(existing_clip);
1492 }
else if (change_type ==
"delete") {
1495 if (existing_clip) {
1502 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1512void Timeline::apply_json_to_effects(Json::Value change) {
1515 std::string change_type = change[
"type"].asString();
1519 for (
auto key_part : change[
"key"]) {
1521 if (key_part.isObject()) {
1523 if (!key_part[
"id"].isNull())
1526 std::string effect_id = key_part[
"id"].asString();
1529 for (
auto e : effects)
1531 if (e->Id() == effect_id) {
1532 existing_effect = e;
1542 if (existing_effect || change_type ==
"insert") {
1544 apply_json_to_effects(change, existing_effect);
1549void Timeline::apply_json_to_effects(Json::Value change,
EffectBase* existing_effect) {
1552 std::string change_type = change[
"type"].asString();
1555 if (!change[
"value"].isArray() && !change[
"value"][
"position"].isNull()) {
1556 int64_t new_starting_frame = (change[
"value"][
"position"].asDouble() *
info.
fps.
ToDouble()) + 1;
1557 int64_t new_ending_frame = ((change[
"value"][
"position"].asDouble() + change[
"value"][
"end"].asDouble() - change[
"value"][
"start"].asDouble()) *
info.
fps.
ToDouble()) + 1;
1558 final_cache->
Remove(new_starting_frame - 8, new_ending_frame + 8);
1562 if (change_type ==
"insert") {
1565 std::string effect_type = change[
"value"][
"type"].asString();
1574 allocated_effects.insert(e);
1583 }
else if (change_type ==
"update") {
1586 if (existing_effect) {
1591 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1597 }
else if (change_type ==
"delete") {
1600 if (existing_effect) {
1605 final_cache->
Remove(old_starting_frame - 8, old_ending_frame + 8);
1618void Timeline::apply_json_to_timeline(Json::Value change) {
1619 bool cache_dirty =
true;
1622 std::string change_type = change[
"type"].asString();
1623 std::string root_key = change[
"key"][(uint)0].asString();
1624 std::string sub_key =
"";
1625 if (change[
"key"].size() >= 2)
1626 sub_key = change[
"key"][(uint)1].asString();
1629 if (change_type ==
"insert" || change_type ==
"update") {
1633 if (root_key ==
"color")
1636 else if (root_key ==
"viewport_scale")
1639 else if (root_key ==
"viewport_x")
1642 else if (root_key ==
"viewport_y")
1645 else if (root_key ==
"duration") {
1651 cache_dirty =
false;
1653 else if (root_key ==
"width") {
1658 else if (root_key ==
"height") {
1663 else if (root_key ==
"fps" && sub_key ==
"" && change[
"value"].isObject()) {
1665 if (!change[
"value"][
"num"].isNull())
1666 info.
fps.
num = change[
"value"][
"num"].asInt();
1667 if (!change[
"value"][
"den"].isNull())
1668 info.
fps.
den = change[
"value"][
"den"].asInt();
1670 else if (root_key ==
"fps" && sub_key ==
"num")
1673 else if (root_key ==
"fps" && sub_key ==
"den")
1676 else if (root_key ==
"display_ratio" && sub_key ==
"" && change[
"value"].isObject()) {
1678 if (!change[
"value"][
"num"].isNull())
1680 if (!change[
"value"][
"den"].isNull())
1683 else if (root_key ==
"display_ratio" && sub_key ==
"num")
1686 else if (root_key ==
"display_ratio" && sub_key ==
"den")
1689 else if (root_key ==
"pixel_ratio" && sub_key ==
"" && change[
"value"].isObject()) {
1691 if (!change[
"value"][
"num"].isNull())
1693 if (!change[
"value"][
"den"].isNull())
1696 else if (root_key ==
"pixel_ratio" && sub_key ==
"num")
1699 else if (root_key ==
"pixel_ratio" && sub_key ==
"den")
1703 else if (root_key ==
"sample_rate")
1706 else if (root_key ==
"channels")
1709 else if (root_key ==
"channel_layout")
1714 throw InvalidJSONKey(
"JSON change key is invalid", change.toStyledString());
1717 }
else if (change[
"type"].asString() ==
"delete") {
1721 if (root_key ==
"color") {
1727 else if (root_key ==
"viewport_scale")
1729 else if (root_key ==
"viewport_x")
1731 else if (root_key ==
"viewport_y")
1735 throw InvalidJSONKey(
"JSON change key is invalid", change.toStyledString());
1750 final_cache->
Clear();
1755 for (
const auto clip : clips) {
1757 if (
clip->Reader()) {
1758 if (
auto rc =
clip->Reader()->GetCache())
1762 if (deep &&
clip->Reader()->Name() ==
"FrameMapper") {
1764 if (nested_reader->
Reader()) {
1772 if (
auto cc =
clip->GetCache())
1788 display_ratio_size.scale(proposed_size, Qt::KeepAspectRatio);
Header file for CacheBase class.
Header file for CacheDisk class.
Header file for CacheMemory class.
Header file for CrashHandler class.
Header file for all Exception classes.
Header file for the FrameMapper class.
Header file for Timeline class.
All cache managers in libopenshot are based on this CacheBase class.
virtual void Clear()=0
Clear the cache of all frames.
virtual void Remove(int64_t frame_number)=0
Remove a specific frame.
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)=0
Get a frame from the cache.
virtual void Add(std::shared_ptr< openshot::Frame > frame)=0
Add a Frame to the cache.
void SetMaxBytesFromInfo(int64_t number_of_frames, int width, int height, int sample_rate, int channels)
Set maximum bytes to a different amount based on a ReaderInfo struct.
This class is a memory-based cache manager for Frame objects.
float Start() const
Get start position (in seconds) of clip (trim start of video)
float Duration() const
Get the length of this clip (in seconds)
virtual std::shared_ptr< openshot::Frame > GetFrame(int64_t frame_number)=0
This method is required for all derived classes of ClipBase, and returns a new openshot::Frame object...
std::string Id() const
Get the Id of this clip object.
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
float Position() const
Get position on timeline (in seconds)
virtual openshot::TimelineBase * ParentTimeline()
Get the associated Timeline pointer (if any)
This class represents a clip (used to arrange readers on the timeline)
openshot::VolumeMixType mixing
What strategy should be followed when mixing audio with other clips.
openshot::Keyframe channel_filter
A number representing an audio channel to filter (clears all other channels)
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
openshot::Keyframe has_audio
An optional override to determine if this clip has audio (-1=undefined, 0=no, 1=yes)
openshot::TimelineBase * ParentTimeline() override
Get the associated Timeline pointer (if any)
std::list< openshot::EffectBase * > Effects()
Return the list of effects on the timeline.
openshot::Keyframe volume
Curve representing the volume (0 to 1)
openshot::Keyframe channel_mapping
A number representing an audio channel to output (only works when filtering a channel)
void Reader(openshot::ReaderBase *new_reader)
Set the current reader.
This class represents a color (used on the timeline and clips)
std::string GetColorHex(int64_t frame_number)
Get the HEX value of a color at a specific frame.
openshot::Keyframe blue
Curve representing the red value (0 - 255)
openshot::Keyframe red
Curve representing the red value (0 - 255)
openshot::Keyframe green
Curve representing the green value (0 - 255)
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
Json::Value JsonValue() const
Generate Json::Value for this object.
static CrashHandler * Instance()
This abstract class is the base class, used by all effects in libopenshot.
virtual void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
This class returns a listing of all effects supported by libopenshot.
EffectBase * CreateEffect(std::string effect_type)
Create an instance of an effect (factory style)
This class represents a fraction.
int num
Numerator for the fraction.
float ToFloat()
Return this fraction as a float (i.e. 1/2 = 0.5)
double ToDouble() const
Return this fraction as a double (i.e. 1/2 = 0.5)
void Reduce()
Reduce this fraction (i.e. 640/480 = 4/3)
Fraction Reciprocal() const
Return the reciprocal as a Fraction.
int den
Denominator for the fraction.
This class creates a mapping between 2 different frame rates, applying a specific pull-down technique...
void ChangeMapping(Fraction target_fps, PulldownType pulldown, int target_sample_rate, int target_channels, ChannelLayout target_channel_layout)
Change frame rate or audio mapping details.
ReaderBase * Reader()
Get the current reader.
void Close() override
Close the openshot::FrameMapper and internal reader.
int GetSamplesPerFrame(openshot::Fraction fps, int sample_rate, int channels)
Calculate the # of samples per video frame (for the current frame number)
Exception for files that can not be found or opened.
Exception for missing JSON Change key.
Exception for invalid JSON.
A Keyframe is a collection of Point instances, which is used to vary a number or property over time.
int GetInt(int64_t index) const
Get the rounded INT value at a specific index.
void SetJsonValue(const Json::Value root)
Load Json::Value into this object.
double GetValue(int64_t index) const
Get the value at a specific index.
Json::Value JsonValue() const
Generate Json::Value for this object.
int64_t GetCount() const
Get the number of points (i.e. # of points)
Exception for frames that are out of bounds.
This abstract class is the base class, used by all readers in libopenshot.
openshot::ReaderInfo info
Information about the current media file.
virtual void SetJsonValue(const Json::Value root)=0
Load Json::Value into this object.
virtual Json::Value JsonValue() const =0
Generate Json::Value for this object.
std::recursive_mutex getFrameMutex
Mutex for multiple threads.
virtual openshot::CacheBase * GetCache()=0
Get the cache object used by this reader (note: not all readers use cache)
openshot::ClipBase * clip
Pointer to the parent clip instance (if any)
Exception when a reader is closed, and a frame is requested.
This class is contains settings used by libopenshot (and can be safely toggled at any point)
std::string PATH_OPENSHOT_INSTALL
static Settings * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
int preview_height
Optional preview width of timeline image. If your preview window is smaller than the timeline,...
int preview_width
Optional preview width of timeline image. If your preview window is smaller than the timeline,...
This class represents a timeline.
void AddTrackedObject(std::shared_ptr< openshot::TrackedObjectBase > trackedObject)
Add to the tracked_objects map a pointer to a tracked object (TrackedObjectBBox)
Json::Value JsonValue() const override
Generate Json::Value for this object.
openshot::Keyframe viewport_scale
Curve representing the scale of the viewport (0 to 100)
void ApplyJsonDiff(std::string value)
Apply a special formatted JSON object, which represents a change to the timeline (add,...
openshot::EffectBase * GetClipEffect(const std::string &id)
Look up a clip effect by ID.
void AddClip(openshot::Clip *clip)
Add an openshot::Clip to the timeline.
std::list< openshot::EffectBase * > ClipEffects() const
Return the list of effects on all clips.
std::list< std::string > GetTrackedObjectsIds() const
Return the ID's of the tracked objects as a list of strings.
std::string Json() const override
Generate JSON string of this object.
int64_t GetMaxFrame()
Look up the end frame number of the latest element on the timeline.
double GetMinTime()
Look up the position/start time of the first timeline element.
std::shared_ptr< openshot::Frame > GetFrame(int64_t requested_frame) override
void ApplyMapperToClips()
Apply the timeline's framerate and samplerate to all clips.
openshot::Color color
Background color of timeline canvas.
std::string GetTrackedObjectValues(std::string id, int64_t frame_number) const
Return the trackedObject's properties as a JSON string.
Timeline(int width, int height, openshot::Fraction fps, int sample_rate, int channels, openshot::ChannelLayout channel_layout)
Constructor for the timeline (which configures the default frame properties)
std::shared_ptr< openshot::TrackedObjectBase > GetTrackedObject(std::string id) const
Return tracked object pointer by it's id.
int64_t GetMinFrame()
Look up the start frame number of the first element on the timeline (first frame is 1)
openshot::EffectBase * GetEffect(const std::string &id)
Look up a timeline effect by ID.
void SetJsonValue(const Json::Value root) override
Load Json::Value into this object.
openshot::Clip * GetClip(const std::string &id)
Look up a single clip by ID.
void ClearAllCache(bool deep=false)
void AddEffect(openshot::EffectBase *effect)
Add an effect to the timeline.
void SetCache(openshot::CacheBase *new_cache)
void Clear()
Clear all clips, effects, and frame mappers from timeline (and free memory)
openshot::Keyframe viewport_x
Curve representing the x coordinate for the viewport.
void RemoveClip(openshot::Clip *clip)
Remove an openshot::Clip from the timeline.
void SetMaxSize(int width, int height)
double GetMaxTime()
Look up the end time of the latest timeline element.
void RemoveEffect(openshot::EffectBase *effect)
Remove an effect from the timeline.
std::shared_ptr< openshot::Frame > apply_effects(std::shared_ptr< openshot::Frame > frame, int64_t timeline_frame_number, int layer, TimelineInfoStruct *options)
Apply global/timeline effects to the source frame (if any)
void Open() override
Open the reader (and start consuming resources)
void SetJson(const std::string value) override
Load JSON string into this object.
openshot::Keyframe viewport_y
Curve representing the y coordinate for the viewport.
void Close() override
Close the timeline reader (and any resources it was consuming)
void AppendDebugMethod(std::string method_name, std::string arg1_name="", float arg1_value=-1.0, std::string arg2_name="", float arg2_value=-1.0, std::string arg3_name="", float arg3_value=-1.0, std::string arg4_name="", float arg4_value=-1.0, std::string arg5_name="", float arg5_value=-1.0, std::string arg6_name="", float arg6_value=-1.0)
Append debug information.
static ZmqLogger * Instance()
Create or get an instance of this logger singleton (invoke the class with this method)
This namespace is the default namespace for all code in the openshot library.
@ PULLDOWN_NONE
Do not apply pull-down techniques, just repeat or skip entire frames.
ChannelLayout
This enumeration determines the audio channel layout (such as stereo, mono, 5 point surround,...
@ VOLUME_MIX_AVERAGE
Evenly divide the overlapping clips volume keyframes, so that the sum does not exceed 100%.
@ VOLUME_MIX_REDUCE
Reduce volume by about %25, and then mix (louder, but could cause pops if the sum exceeds 100%)
const Json::Value stringToJson(const std::string value)
This struct holds the information of a bounding-box.
float cy
y-coordinate of the bounding box center
float height
bounding box height
float cx
x-coordinate of the bounding box center
float width
bounding box width
float angle
bounding box rotation angle [degrees]
Like CompareClipEndFrames, but for effects.
This struct contains info about a media file, such as height, width, frames per second,...
float duration
Length of time (in seconds)
int width
The width of the video (in pixesl)
int channels
The number of audio channels used in the audio stream.
openshot::Fraction fps
Frames per second, as a fraction (i.e. 24/1 = 24 fps)
openshot::Fraction display_ratio
The ratio of width to height of the video stream (i.e. 640x480 has a ratio of 4/3)
int height
The height of the video (in pixels)
int64_t video_length
The number of frames in the video stream.
std::string acodec
The name of the audio codec used to encode / decode the video stream.
std::string vcodec
The name of the video codec used to encode / decode the video stream.
openshot::Fraction pixel_ratio
The pixel ratio of the video stream as a fraction (i.e. some pixels are not square)
openshot::ChannelLayout channel_layout
The channel layout (mono, stereo, 5 point surround, etc...)
bool has_video
Determines if this file has a video stream.
bool has_audio
Determines if this file has an audio stream.
openshot::Fraction video_timebase
The video timebase determines how long each frame stays on the screen.
int sample_rate
The number of audio samples per second (44100 is a common sample rate)
This struct contains info about the current Timeline clip instance.
bool is_before_clip_keyframes
Is this before clip keyframes are applied.
bool is_top_clip
Is clip on top (if overlapping another clip)