Improved audio workflow for multi stream clips

Related to #382
parent 35b18d07
Pipeline #19657 passed with stage
in 10 minutes and 10 seconds
......@@ -501,7 +501,7 @@ signals:
/** @brief A clip was updated, request panel update. */
void refreshPanel(const QString &id);
/** @brief Upon selection, activate timeline target tracks. */
void setupTargets(bool hasVideo, QList <int> audioStreams);
void setupTargets(bool hasVideo, QMap <int, QString> audioStreams);
/** @brief A drag event ended, inform timeline. */
void processDragEnd();
};
......
......@@ -70,6 +70,7 @@ along with this program. If not, see <http://www.gnu.org/licenses/>.
#pragma GCC diagnostic ignored "-Wshadow"
#pragma GCC diagnostic ignored "-Wpedantic"
#include <rttr/registration>
#pragma GCC diagnostic pop
RTTR_REGISTRATION
{
......@@ -164,7 +165,6 @@ ProjectClip::~ProjectClip()
m_requestedThumbs.clear();
m_thumbMutex.unlock();
m_thumbThread.waitForFinished();
audioFrameCache.clear();
}
void ProjectClip::connectEffectStack()
......@@ -198,13 +198,13 @@ QString ProjectClip::getXmlProperty(const QDomElement &producer, const QString &
return value;
}
void ProjectClip::updateAudioThumbnail(const QVector<uint8_t> audioLevels)
void ProjectClip::updateAudioThumbnail()
{
if (!KdenliveSettings::audiothumbnails()) {
return;
}
audioFrameCache = audioLevels;
m_audioThumbCreated = true;
audioThumbReady();
updateTimelineClips({TimelineModel::ReloadThumbRole});
}
......@@ -568,14 +568,18 @@ void ProjectClip::createDisabledMasterProducer()
}
}
std::shared_ptr<Mlt::Producer> ProjectClip::getTimelineProducer(int trackId, int clipId, PlaylistState::ClipState state, double speed)
std::shared_ptr<Mlt::Producer> ProjectClip::getTimelineProducer(int trackId, int clipId, PlaylistState::ClipState state, int audioStream, double speed)
{
if (!m_masterProducer) {
return nullptr;
}
if (qFuzzyCompare(speed, 1.0)) {
// we are requesting a normal speed producer
if (trackId == -1 ||
bool byPassTrackProducer = false;
if (trackId == -1 && (state != PlaylistState::AudioOnly || audioStream == m_masterProducer->get_int("audio_index"))) {
byPassTrackProducer = true;
}
if (byPassTrackProducer ||
(state == PlaylistState::VideoOnly && (m_clipType == ClipType::Color || m_clipType == ClipType::Image || m_clipType == ClipType::Text|| m_clipType == ClipType::TextTemplate || m_clipType == ClipType::Qml))) {
// Temporary copy, return clone of master
int duration = m_masterProducer->time_to_frames(m_masterProducer->get("kdenlive:duration"));
......@@ -587,10 +591,20 @@ std::shared_ptr<Mlt::Producer> ProjectClip::getTimelineProducer(int trackId, int
}
if (state == PlaylistState::AudioOnly) {
// We need to get an audio producer, if none exists
if (audioStream > -1) {
if (trackId >= 0) {
trackId += 100 * audioStream;
} else {
trackId -= 100 * audioStream;
}
}
if (m_audioProducers.count(trackId) == 0) {
m_audioProducers[trackId] = cloneProducer(true);
m_audioProducers[trackId]->set("set.test_audio", 0);
m_audioProducers[trackId]->set("set.test_image", 1);
if (audioStream > -1) {
m_audioProducers[trackId]->set("audio_index", audioStream);
}
m_effectStack->addService(m_audioProducers[trackId]);
}
return std::shared_ptr<Mlt::Producer>(m_audioProducers[trackId]->cut());
......@@ -665,8 +679,7 @@ std::shared_ptr<Mlt::Producer> ProjectClip::getTimelineProducer(int trackId, int
return std::shared_ptr<Mlt::Producer>(warpProducer->cut());
}
std::pair<std::shared_ptr<Mlt::Producer>, bool> ProjectClip::giveMasterAndGetTimelineProducer(int clipId, std::shared_ptr<Mlt::Producer> master,
PlaylistState::ClipState state)
std::pair<std::shared_ptr<Mlt::Producer>, bool> ProjectClip::giveMasterAndGetTimelineProducer(int clipId, std::shared_ptr<Mlt::Producer> master, PlaylistState::ClipState state, int tid)
{
int in = master->get_in();
int out = master->get_out();
......@@ -681,10 +694,9 @@ std::pair<std::shared_ptr<Mlt::Producer>, bool> ProjectClip::giveMasterAndGetTim
}
if (master->parent().get_int("_loaded") == 1) {
// we already have a clip that shares the same master
if (state != PlaylistState::Disabled || timeWarp) {
// In that case, we must create copies
std::shared_ptr<Mlt::Producer> prod(getTimelineProducer(-1, clipId, state, speed)->cut(in, out));
std::shared_ptr<Mlt::Producer> prod(getTimelineProducer(tid, clipId, state, master->parent().get_int("audio_index"), speed)->cut(in, out));
return {prod, false};
}
if (state == PlaylistState::Disabled) {
......@@ -704,16 +716,21 @@ std::pair<std::shared_ptr<Mlt::Producer>, bool> ProjectClip::giveMasterAndGetTim
return {master, true};
}
if (state == PlaylistState::AudioOnly) {
m_audioProducers[clipId] = std::make_shared<Mlt::Producer>(&master->parent());
m_effectStack->loadService(m_audioProducers[clipId]);
int producerId = tid;
int audioStream = master->parent().get_int("audio_index");
if (audioStream > -1) {
producerId += 100 * audioStream;
}
m_audioProducers[tid] = std::make_shared<Mlt::Producer>(&master->parent());
m_effectStack->loadService(m_audioProducers[tid]);
return {master, true};
}
if (state == PlaylistState::VideoOnly) {
// good, we found a master video producer, and we didn't have any
if (m_clipType != ClipType::Color && m_clipType != ClipType::Image && m_clipType != ClipType::Text) {
// Color, image and text clips always use master producer in timeline
m_videoProducers[clipId] = std::make_shared<Mlt::Producer>(&master->parent());
m_effectStack->loadService(m_videoProducers[clipId]);
m_videoProducers[tid] = std::make_shared<Mlt::Producer>(&master->parent());
m_effectStack->loadService(m_videoProducers[tid]);
}
return {master, true};
}
......@@ -733,7 +750,7 @@ std::pair<std::shared_ptr<Mlt::Producer>, bool> ProjectClip::giveMasterAndGetTim
if (QString::fromUtf8(master->parent().get("mlt_service")) == QLatin1String("timewarp")) {
speed = master->get_double("warp_speed");
}
return {getTimelineProducer(-1, clipId, state, speed), false};
return {getTimelineProducer(-1, clipId, state, master->get_int("audio_index"), speed), false};
}
// we have a problem
return {std::shared_ptr<Mlt::Producer>(ClipController::mediaUnavailable->cut()), false};
......@@ -1082,9 +1099,9 @@ void ProjectClip::setProperties(const QMap<QString, QString> &properties, bool r
}
} else {
if (audioStreamChanged) {
discardAudioThumb();
refreshAudioInfo();
audioThumbReady();
pCore->bin()->reloadMonitorStreamIfActive(clipId());
pCore->jobManager()->startJob<AudioThumbJob>({clipId()}, -1, QString());
refreshPanel = true;
}
}
......@@ -1223,18 +1240,40 @@ int ProjectClip::audioChannels() const
void ProjectClip::discardAudioThumb()
{
QString audioThumbPath = getAudioThumbPath();
if (!m_audioInfo) {
return;
}
QString audioThumbPath = getAudioThumbPath(audioInfo()->ffmpeg_audio_index());
if (!audioThumbPath.isEmpty()) {
QFile::remove(audioThumbPath);
}
audioFrameCache.clear();
qCDebug(KDENLIVE_LOG) << "//////////////////// DISCARD AUIIO THUMBNS";
qCDebug(KDENLIVE_LOG) << "//////////////////// DISCARD AUDIO THUMBS";
m_audioThumbCreated = false;
refreshAudioInfo();
pCore->jobManager()->discardJobs(clipId(), AbstractClipJob::AUDIOTHUMBJOB);
}
const QString ProjectClip::getAudioThumbPath(bool miniThumb)
int ProjectClip::getAudioStreamFfmpegIndex(int mltStream)
{
if (!m_masterProducer) {
return -1;
}
int streams = m_masterProducer->get_int("meta.media.nb_streams");
QList<int> audioStreams;
for (int i = 0; i < streams; ++i) {
QByteArray propertyName = QStringLiteral("meta.media.%1.stream.type").arg(i).toLocal8Bit();
QString type = m_masterProducer->get(propertyName.data());
if (type == QLatin1String("audio")) {
audioStreams << i;
}
}
if (audioStreams.count() > 1 && mltStream < audioStreams.count()) {
return audioStreams.indexOf(mltStream);
}
return -1;
}
const QString ProjectClip::getAudioThumbPath(int stream, bool miniThumb)
{
if (audioInfo() == nullptr && !miniThumb) {
return QString();
......@@ -1249,14 +1288,11 @@ const QString ProjectClip::getAudioThumbPath(bool miniThumb)
return QString();
}
QString audioPath = thumbFolder.absoluteFilePath(clipHash);
audioPath.append(QLatin1Char('_') + QString::number(stream));
if (miniThumb) {
audioPath.append(QStringLiteral(".png"));
return audioPath;
}
int audioStream = audioInfo()->ffmpeg_audio_index();
if (audioStream > 0) {
audioPath.append(QLatin1Char('_') + QString::number(audioInfo()->audio_index()));
}
int roundedFps = (int)pCore->getCurrentFps();
audioPath.append(QStringLiteral("_%1_audio.png").arg(roundedFps));
return audioPath;
......@@ -1489,3 +1525,31 @@ void ProjectClip::setRating(uint rating)
setProducerProperty(QStringLiteral("kdenlive:rating"), (int) rating);
pCore->currentDoc()->setModified(true);
}
QVector <uint8_t> ProjectClip::audioFrameCache(int stream)
{
QVector <uint8_t> audioLevels;
if (stream == -1) {
if (m_audioInfo) {
stream = m_audioInfo->ffmpeg_audio_index();
} else {
return audioLevels;
}
}
// convert cached image
const QString cachePath = getAudioThumbPath(stream);
// checking for cached thumbs
QImage image(cachePath);
int channels = m_audioInfo->channels();
if (!image.isNull()) {
int n = image.width() * image.height();
for (int i = 0; i < n; i++) {
QRgb p = image.pixel(i / channels, i % channels);
audioLevels << qRed(p);
audioLevels << qGreen(p);
audioLevels << qBlue(p);
audioLevels << qAlpha(p);
}
}
return audioLevels;
}
......@@ -171,7 +171,6 @@ public:
/** Cache for every audio Frame with 10 Bytes */
/** format is frame -> channel ->bytes */
QVector<uint8_t> audioFrameCache;
bool audioThumbCreated() const;
void setWaitingStatus(const QString &id);
......@@ -188,7 +187,7 @@ public:
/** @brief Delete cached audio thumb - needs to be recreated */
void discardAudioThumb();
/** @brief Get path for this clip's audio thumbnail */
const QString getAudioThumbPath(bool miniThumb = false);
const QString getAudioThumbPath(int stream, bool miniThumb = false);
/** @brief Returns true if this producer has audio and can be splitted on timeline*/
bool isSplittable() const;
......@@ -201,7 +200,7 @@ public:
/** @brief This function returns a cut to the master producer associated to the timeline clip with given ID.
Each clip must have a different master producer (see comment of the class)
*/
std::shared_ptr<Mlt::Producer> getTimelineProducer(int trackId, int clipId, PlaylistState::ClipState st, double speed = 1.0);
std::shared_ptr<Mlt::Producer> getTimelineProducer(int trackId, int clipId, PlaylistState::ClipState st, int audioStream = -1, double speed = 1.0);
/* @brief This function should only be used at loading. It takes a producer that was read from mlt, and checks whether the master producer is already in
use. If yes, then we must create a new one, because of the mixing bug. In any case, we return a cut of the master that can be used in the timeline The
......@@ -209,8 +208,7 @@ public:
- if true, then the returned cut still possibly has effect on it. You need to rebuild the effectStack based on this
- if false, then the returned cut don't have effects anymore (it's a fresh one), so you need to reload effects from the old producer
*/
std::pair<std::shared_ptr<Mlt::Producer>, bool> giveMasterAndGetTimelineProducer(int clipId, std::shared_ptr<Mlt::Producer> master,
PlaylistState::ClipState state);
std::pair<std::shared_ptr<Mlt::Producer>, bool> giveMasterAndGetTimelineProducer(int clipId, std::shared_ptr<Mlt::Producer> master, PlaylistState::ClipState state, int tid);
std::shared_ptr<Mlt::Producer> cloneProducer(bool removeEffects = false);
static std::shared_ptr<Mlt::Producer> cloneProducer(const std::shared_ptr<Mlt::Producer> &producer);
......@@ -225,6 +223,12 @@ public:
/** @brief Display Bin thumbnail given a percent
*/
void getThumbFromPercent(int percent);
/** @brief Return audio cache for a stream
*/
QVector <uint8_t> audioFrameCache(int stream = -1);
/** @brief Return FFmpeg's audio stream index for an MLT audio stream index
*/
int getAudioStreamFfmpegIndex(int mltStream);
protected:
friend class ClipModel;
......@@ -250,7 +254,7 @@ protected:
public slots:
/* @brief Store the audio thumbnails once computed. Note that the parameter is a value and not a reference, fill free to use it as a sink (use std::move to
* avoid copy). */
void updateAudioThumbnail(const QVector<uint8_t> audioLevels);
void updateAudioThumbnail();
/** @brief Delete the proxy file */
void deleteProxy();
......
......@@ -390,16 +390,16 @@ std::shared_ptr<ProjectClip> ProjectItemModel::getClipByBinID(const QString &bin
return nullptr;
}
const QVector<uint8_t> ProjectItemModel::getAudioLevelsByBinID(const QString &binId)
const QVector<uint8_t> ProjectItemModel::getAudioLevelsByBinID(const QString &binId, int stream)
{
READ_LOCK();
if (binId.contains(QLatin1Char('_'))) {
return getAudioLevelsByBinID(binId.section(QLatin1Char('_'), 0, 0));
return getAudioLevelsByBinID(binId.section(QLatin1Char('_'), 0, 0), stream);
}
for (const auto &clip : m_allItems) {
auto c = std::static_pointer_cast<AbstractProjectItem>(clip.second.lock());
if (c->itemType() == AbstractProjectItem::ClipItem && c->clipId() == binId) {
return std::static_pointer_cast<ProjectClip>(c)->audioFrameCache;
return std::static_pointer_cast<ProjectClip>(c)->audioFrameCache(stream);
}
}
return QVector<uint8_t>();
......
......@@ -68,7 +68,7 @@ public:
/** @brief Returns a clip from the hierarchy, given its id */
std::shared_ptr<ProjectClip> getClipByBinID(const QString &binId);
/** @brief Returns audio levels for a clip from its id */
const QVector <uint8_t>getAudioLevelsByBinID(const QString &binId);
const QVector <uint8_t>getAudioLevelsByBinID(const QString &binId, int stream);
/** @brief Returns a list of clips using the given url */
QStringList getClipByUrl(const QFileInfo &url) const;
......
......@@ -118,13 +118,21 @@ bool AudioThumbJob::computeWithFFMPEG()
filePath = m_prod->get("resource");
}
m_ffmpegProcess.reset(new QProcess);
if (!m_thumbInCache) {
QString thumbPath = m_binClip->getAudioThumbPath(m_audioStream, true);
int audioStreamIndex = m_binClip->getAudioStreamFfmpegIndex(m_audioStream);
if (!QFile::exists(thumbPath)) {
// Generate thumbnail used in monitor overlay
QStringList args;
args << QStringLiteral("-hide_banner") << QStringLiteral("-y")<< QStringLiteral("-i") << QUrl::fromLocalFile(filePath).toLocalFile() << QStringLiteral("-filter_complex:a");
args << QString("showwavespic=s=%1x%2:split_channels=1:scale=cbrt:colors=0xffdddd|0xddffdd").arg(m_thumbSize.width()).arg(m_thumbSize.height());
args << QStringLiteral("-hide_banner") << QStringLiteral("-y")<< QStringLiteral("-i") << QUrl::fromLocalFile(filePath).toLocalFile() << QString("-filter_complex");
if (audioStreamIndex >= 0) {
args << QString("[a:%1]showwavespic=s=%2x%3:split_channels=1:scale=cbrt:colors=0xffdddd|0xddffdd").arg(m_binClip->getAudioStreamFfmpegIndex(m_audioStream)).arg(m_thumbSize.width()).arg(m_thumbSize.height());
} else {
// Only 1 audio stream in clip
args << QString("[a]showwavespic=s=%2x%3:split_channels=1:scale=cbrt:colors=0xffdddd|0xddffdd").arg(m_thumbSize.width()).arg(m_thumbSize.height());
}
args << QStringLiteral("-frames:v") << QStringLiteral("1");
args << m_binClip->getAudioThumbPath(true);
args << thumbPath;
qDebug()<<"=== FFARGS: "<<args;
connect(m_ffmpegProcess.get(), &QProcess::readyReadStandardOutput, this, &AudioThumbJob::updateFfmpegProgress, Qt::UniqueConnection);
connect(this, &AudioThumbJob::jobCanceled, [&] () {
if (m_ffmpegProcess) {
......@@ -137,14 +145,9 @@ bool AudioThumbJob::computeWithFFMPEG()
m_ffmpegProcess->start(KdenliveSettings::ffmpegpath(), args);
m_ffmpegProcess->waitForFinished(-1);
if (m_ffmpegProcess->exitStatus() != QProcess::CrashExit) {
m_thumbInCache = true;
if (m_dataInCache || !KdenliveSettings::audiothumbnails()) {
m_binClip->audioThumbReady();
m_done = true;
return true;
} else {
// Next Processing step can be long, already display audio thumb in monitor
m_binClip->audioThumbReady();
}
}
}
......@@ -170,17 +173,20 @@ bool AudioThumbJob::computeWithFFMPEG()
args << QStringLiteral("/dev/stdout");
#endif
bool isFFmpeg = KdenliveSettings::ffmpegpath().contains(QLatin1String("ffmpeg"));
args << QStringLiteral("-filter_complex:a");
args << QStringLiteral("-filter_complex");
if (m_channels == 1) {
//TODO: this does not correcty generate the correct stream data
args << QStringLiteral("aformat=channel_layouts=mono,%1=100").arg(isFFmpeg ? "aresample=async" : "sample_rates");
args << QStringLiteral("-map") << QStringLiteral("0:a%1").arg(m_audioStream > 0 ? ":" + QString::number(m_audioStream) : QString())
<< QStringLiteral("-c:a") << QStringLiteral("pcm_s16le") << QStringLiteral("-frames:v")
if (audioStreamIndex >= 0) {
args << QStringLiteral("[a:%1]aformat=channel_layouts=mono,%2=100").arg(audioStreamIndex).arg(isFFmpeg ? "aresample=async" : "sample_rates");
} else {
args << QStringLiteral("[a]aformat=channel_layouts=mono,%2=100").arg(audioStreamIndex).arg(isFFmpeg ? "aresample=async" : "sample_rates");
}
/*args << QStringLiteral("-map") << QStringLiteral("0:a%1").arg(m_audioStream > 0 ? ":" + QString::number(audioStreamIndex) : QString())*/
args << QStringLiteral("-c:a") << QStringLiteral("pcm_s16le") << QStringLiteral("-frames:v")
<< QStringLiteral("1") << QStringLiteral("-y") << QStringLiteral("-f") << QStringLiteral("data")
<< channelFiles[0]->fileName();
} else {
QString aformat = QStringLiteral("[0:a%1]%2=100,channelsplit=channel_layout=%3")
.arg(m_audioStream > 0 ? ":" + QString::number(m_audioStream) : QString())
.arg(audioStreamIndex >= 0 ? ":" + QString::number(audioStreamIndex) : QString())
.arg(isFFmpeg ? "aresample=async" : "aformat=sample_rates")
.arg(m_channels > 2 ? "5.1" : "stereo");
for (int i = 0; i < m_channels; ++i) {
......@@ -300,7 +306,6 @@ bool AudioThumbJob::startJob()
return true;
}
m_dataInCache = false;
m_thumbInCache = false;
m_binClip = pCore->projectItemModel()->getClipByBinID(m_clipId);
if (m_binClip == nullptr) {
// Clip was deleted
......@@ -322,70 +327,58 @@ bool AudioThumbJob::startJob()
m_channels = m_channels <= 0 ? 2 : m_channels;
m_lengthInFrames = m_prod->get_length();
m_audioStream = m_binClip->audioInfo()->ffmpeg_audio_index();
QMap <int, QString> streams = m_binClip->audioInfo()->streams();
if ((m_prod == nullptr) || !m_prod->is_valid()) {
m_errorMessage.append(i18n("Audio thumbs: cannot open project file %1", m_binClip->url()));
m_done = true;
m_successful = false;
return false;
}
m_cachePath = m_binClip->getAudioThumbPath();
QMapIterator<int, QString> st(streams);
while (st.hasNext()) {
st.next();
int stream = st.key();
// Generate one thumb per stream
m_audioStream = stream;
m_cachePath = m_binClip->getAudioThumbPath(stream);
// checking for cached thumbs
QImage image(m_cachePath);
if (!image.isNull()) {
// convert cached image
int n = image.width() * image.height();
for (int i = 0; i < n; i++) {
QRgb p = image.pixel(i / m_channels, i % m_channels);
m_audioLevels << (uint8_t)qRed(p);
m_audioLevels << (uint8_t)qGreen(p);
m_audioLevels << (uint8_t)qBlue(p);
m_audioLevels << (uint8_t)qAlpha(p);
// checking for cached thumbs
QImage image(m_cachePath);
if (!image.isNull()) {
// Audio cache already exists
continue;
}
}
if (!m_audioLevels.isEmpty()) {
m_dataInCache = true;
}
// Check audio thumbnail image
if (ThumbnailCache::get()->hasThumbnail(m_clipId, -1, false)) {
m_thumbInCache = true;
}
if (m_thumbInCache && (m_dataInCache || !KdenliveSettings::audiothumbnails())) {
m_done = true;
m_successful = true;
return true;
}
m_done = false;
bool ok = m_binClip->clipType() == ClipType::Playlist ? (KdenliveSettings::audiothumbnails() ? false : true) : computeWithFFMPEG();
ok = ok ? ok : computeWithMlt();
Q_ASSERT(ok == m_done);
bool ok = m_binClip->clipType() == ClipType::Playlist ? (KdenliveSettings::audiothumbnails() ? false : true) : computeWithFFMPEG();
ok = ok ? ok : computeWithMlt();
Q_ASSERT(ok == m_done);
if (ok && m_done && !m_dataInCache && !m_audioLevels.isEmpty()) {
// Put into an image for caching.
int count = m_audioLevels.size();
image = QImage((int)lrint((count + 3) / 4.0 / m_channels), m_channels, QImage::Format_ARGB32);
int n = image.width() * image.height();
for (int i = 0; i < n; i++) {
QRgb p;
if ((4 * i + 3) < count) {
p = qRgba(m_audioLevels.at(4 * i), m_audioLevels.at(4 * i + 1), m_audioLevels.at(4 * i + 2),
if (ok && m_done && !m_audioLevels.isEmpty()) {
// Put into an image for caching.
int count = m_audioLevels.size();
image = QImage((int)lrint((count + 3) / 4.0 / m_channels), m_channels, QImage::Format_ARGB32);
int n = image.width() * image.height();
for (int i = 0; i < n; i++) {
QRgb p;
if ((4 * i + 3) < count) {
p = qRgba(m_audioLevels.at(4 * i), m_audioLevels.at(4 * i + 1), m_audioLevels.at(4 * i + 2),
m_audioLevels.at(4 * i + 3));
} else {
int last = m_audioLevels.last();
int r = (4 * i + 0) < count ? m_audioLevels.at(4 * i + 0) : last;
int g = (4 * i + 1) < count ? m_audioLevels.at(4 * i + 1) : last;
int b = (4 * i + 2) < count ? m_audioLevels.at(4 * i + 2) : last;
int a = last;
p = qRgba(r, g, b, a);
} else {
int last = m_audioLevels.last();
int r = (4 * i + 0) < count ? m_audioLevels.at(4 * i + 0) : last;
int g = (4 * i + 1) < count ? m_audioLevels.at(4 * i + 1) : last;
int b = (4 * i + 2) < count ? m_audioLevels.at(4 * i + 2) : last;
int a = last;
p = qRgba(r, g, b, a);
}
image.setPixel(i / m_channels, i % m_channels, p);
}
image.setPixel(i / m_channels, i % m_channels, p);
image.save(m_cachePath);
}
image.save(m_cachePath);
m_successful = true;
return true;
} else if (ok && m_thumbInCache && (m_done || !KdenliveSettings::audiothumbnails())) {
m_audioLevels.clear();
}
if (m_done || !KdenliveSettings::audiothumbnails()) {
m_successful = true;
return true;
}
......@@ -406,7 +399,6 @@ bool AudioThumbJob::commitResult(Fun &undo, Fun &redo)
if (!m_successful) {
return false;
}
QVector <uint8_t>old = m_binClip->audioFrameCache;
QImage oldImage;
QImage result;
if (m_binClip->clipType() == ClipType::Audio) {
......@@ -415,17 +407,15 @@ bool AudioThumbJob::commitResult(Fun &undo, Fun &redo)
}
// note that the image is moved into lambda, it won't be available from this class anymore
auto operation = [clip = m_binClip, audio = std::move(m_audioLevels), image = std::move(result)]() {
if (!audio.isEmpty()) {
clip->updateAudioThumbnail(audio);
}
auto operation = [clip = m_binClip, image = std::move(result)]() {
clip->updateAudioThumbnail();
if (!image.isNull() && clip->clipType() == ClipType::Audio) {
clip->setThumbnail(image);
}
return true;
};
auto reverse = [clip = m_binClip, audio = std::move(old), image = std::move(oldImage)]() {
clip->updateAudioThumbnail(audio);
auto reverse = [clip = m_binClip, image = std::move(oldImage)]() {
clip->updateAudioThumbnail();
if (!image.isNull() && clip->clipType() == ClipType::Audio) {
clip->setThumbnail(image);
}
......
......@@ -24,12 +24,42 @@ AudioStreamInfo::AudioStreamInfo(const std::shared_ptr<Mlt::Producer> &producer,
{
// Fetch audio streams
int streams = producer->get_int("meta.media.nb_streams");
int streamIndex = 1;
for (int ix = 0; ix < streams; ix++) {
char property[200];
snprintf(property, sizeof(property), "meta.media.%d.stream.type", ix);
QString type = producer->get(property);
if (type == QLatin1String("audio")) {
m_audioStreams << ix;
memset(property, 0, 200);
snprintf(property, sizeof(property), "meta.media.%d.codec.channels", ix);
int chan = producer->get_int(property);
QString channelDescription = QString("%1|").arg(streamIndex++);
switch (chan) {
case 1:
channelDescription.append(i18n("Mono "));
break;
case 2:
channelDescription.append(i18n("Stereo "));
break;
default:
channelDescription.append(i18n("%1 channels ", chan));
break;
}
// Frequency
memset(property, 0, 200);
snprintf(property, sizeof(property), "meta.media.%d.codec.sample_rate", ix);
QString frequency(producer->get(property));
if (frequency.endsWith(QLatin1String("000"))) {
frequency.chop(3);
frequency.append(i18n("kHz "));
} else {
frequency.append(i18n("Hz "));
}
channelDescription.append(frequency);
memset(property, 0, 200);