Fix confusion in audio stream names/thumbnails

parent b11bf778
Pipeline #19684 passed with stage
in 9 minutes and 27 seconds
......@@ -1255,19 +1255,11 @@ void ProjectClip::discardAudioThumb()
int ProjectClip::getAudioStreamFfmpegIndex(int mltStream)
{
if (!m_masterProducer) {
if (!m_masterProducer || !audioInfo()) {
return -1;
}
int streams = m_masterProducer->get_int("meta.media.nb_streams");
QList<int> audioStreams;
for (int i = 0; i < streams; ++i) {
QByteArray propertyName = QStringLiteral("meta.media.%1.stream.type").arg(i).toLocal8Bit();
QString type = m_masterProducer->get(propertyName.data());
if (type == QLatin1String("audio")) {
audioStreams << i;
}
}
if (audioStreams.count() > 1 && mltStream < audioStreams.count()) {
QList<int> audioStreams = audioInfo()->streams().keys();
if (audioStreams.contains(mltStream)) {
return audioStreams.indexOf(mltStream);
}
return -1;
......
......@@ -124,8 +124,8 @@ bool AudioThumbJob::computeWithFFMPEG()
// Generate thumbnail used in monitor overlay
QStringList args;
args << QStringLiteral("-hide_banner") << QStringLiteral("-y")<< QStringLiteral("-i") << QUrl::fromLocalFile(filePath).toLocalFile() << QString("-filter_complex");
if (audioStreamIndex >= 0) {
args << QString("[a:%1]showwavespic=s=%2x%3:split_channels=1:scale=cbrt:colors=0xffdddd|0xddffdd").arg(m_binClip->getAudioStreamFfmpegIndex(m_audioStream)).arg(m_thumbSize.width()).arg(m_thumbSize.height());
if (m_audioStream >= 0) {
args << QString("[a:%1]showwavespic=s=%2x%3:split_channels=1:scale=cbrt:colors=0xffdddd|0xddffdd").arg(audioStreamIndex).arg(m_thumbSize.width()).arg(m_thumbSize.height());
} else {
// Only 1 audio stream in clip
args << QString("[a]showwavespic=s=%2x%3:split_channels=1:scale=cbrt:colors=0xffdddd|0xddffdd").arg(m_thumbSize.width()).arg(m_thumbSize.height());
......@@ -144,6 +144,7 @@ bool AudioThumbJob::computeWithFFMPEG()
});
m_ffmpegProcess->start(KdenliveSettings::ffmpegpath(), args);
m_ffmpegProcess->waitForFinished(-1);
disconnect(m_ffmpegProcess.get(), &QProcess::readyReadStandardOutput, this, &AudioThumbJob::updateFfmpegProgress);
if (m_ffmpegProcess->exitStatus() != QProcess::CrashExit) {
if (m_dataInCache || !KdenliveSettings::audiothumbnails()) {
m_done = true;
......@@ -175,17 +176,17 @@ bool AudioThumbJob::computeWithFFMPEG()
bool isFFmpeg = KdenliveSettings::ffmpegpath().contains(QLatin1String("ffmpeg"));
args << QStringLiteral("-filter_complex");
if (m_channels == 1) {
if (audioStreamIndex >= 0) {
if (m_audioStream >= 0) {
args << QStringLiteral("[a:%1]aformat=channel_layouts=mono,%2=100").arg(audioStreamIndex).arg(isFFmpeg ? "aresample=async" : "sample_rates");
} else {
args << QStringLiteral("[a]aformat=channel_layouts=mono,%2=100").arg(audioStreamIndex).arg(isFFmpeg ? "aresample=async" : "sample_rates");
args << QStringLiteral("[a]aformat=channel_layouts=mono,%1=100").arg(isFFmpeg ? "aresample=async" : "sample_rates");
}
/*args << QStringLiteral("-map") << QStringLiteral("0:a%1").arg(m_audioStream > 0 ? ":" + QString::number(audioStreamIndex) : QString())*/
args << QStringLiteral("-c:a") << QStringLiteral("pcm_s16le") << QStringLiteral("-frames:v")
<< QStringLiteral("1") << QStringLiteral("-y") << QStringLiteral("-f") << QStringLiteral("data")
<< channelFiles[0]->fileName();
} else {
QString aformat = QStringLiteral("[0:a%1]%2=100,channelsplit=channel_layout=%3")
QString aformat = QStringLiteral("[a%1]%2=100,channelsplit=channel_layout=%3")
.arg(audioStreamIndex >= 0 ? ":" + QString::number(audioStreamIndex) : QString())
.arg(isFFmpeg ? "aresample=async" : "aformat=sample_rates")
.arg(m_channels > 2 ? "5.1" : "stereo");
......@@ -210,6 +211,7 @@ bool AudioThumbJob::computeWithFFMPEG()
});
m_ffmpegProcess->start(KdenliveSettings::ffmpegpath(), args);
m_ffmpegProcess->waitForFinished(-1);
disconnect(m_ffmpegProcess.get(), &QProcess::readyReadStandardOutput, this, &AudioThumbJob::updateFfmpegProgress);
if (m_ffmpegProcess->exitStatus() != QProcess::CrashExit) {
int dataSize = 0;
std::vector<const qint16 *> rawChannels;
......
......@@ -566,6 +566,14 @@ int ClipModel::audioStream() const
return -m_producer->parent().get_int("audio_index");
}
int ClipModel::audioStreamIndex() const
{
READ_LOCK();
QVariantList list;
QList <int> streams = pCore->projectItemModel()->getClipByBinID(m_binClipId)->audioStreams().keys();
return streams.indexOf(m_producer->parent().get_int("audio_index")) + 1;
}
int ClipModel::fadeIn() const
{
return m_effectStack->getFadePosition(true);
......
......@@ -194,7 +194,10 @@ protected:
/** @brief Returns the number of audio channels for this clip */
int audioChannels() const;
/** @brief Returns the active audio stream for this clip (or -1 if we only have 1 stream */
int audioStream() const;
/** @brief Returns the list of available audio stream indexes for the bin clip */
int audioStreamIndex() const;
bool audioEnabled() const;
bool isAudioOnly() const;
......
......@@ -212,6 +212,7 @@ QHash<int, QByteArray> TimelineItemModel::roleNames() const
roles[IsAudioRole] = "audio";
roles[AudioChannelsRole] = "audioChannels";
roles[AudioStreamRole] = "audioStream";
roles[AudioStreamIndexRole] = "audioStreamIndex";
roles[IsCompositeRole] = "composite";
roles[IsLockedRole] = "locked";
roles[FadeInRole] = "fadeIn";
......@@ -290,6 +291,8 @@ QVariant TimelineItemModel::data(const QModelIndex &index, int role) const
return clip->audioChannels();
case AudioStreamRole:
return clip->audioStream();
case AudioStreamIndexRole:
return clip->audioStreamIndex();
case HasAudio:
return clip->audioEnabled();
case IsAudioRole:
......
......@@ -141,6 +141,7 @@ public:
AudioLevelsRole, /// clip only
AudioChannelsRole, /// clip only
AudioStreamRole, /// clip only
AudioStreamIndexRole, /// clip only
IsCompositeRole, /// track only
IsLockedRole, /// track only
HeightRole, /// track only
......
......@@ -41,6 +41,7 @@ Rectangle {
property bool isAudio: false
property int audioChannels
property int audioStream: -1
property int aStreamIndex: 0
property bool showKeyframes: false
property bool isGrabbed: false
property bool grouped: false
......@@ -583,7 +584,7 @@ Rectangle {
Text {
// Clip name text
id: label
property string clipNameString: (clipRoot.isAudio && clipRoot.audioStream > -1) ? ((clipRoot.audioStream > 10000 ? 'Merged' : clipRoot.audioStream) + '|' + clipName ) : clipName
property string clipNameString: (clipRoot.isAudio && clipRoot.audioStream > -1) ? ((clipRoot.audioStream > 10000 ? 'Merged' : clipRoot.aStreamIndex) + '|' + clipName ) : clipName
text: (clipRoot.speed != 1.0 ? ('[' + Math.round(clipRoot.speed*100) + '%] ') : '') + clipNameString
font: miniFont
anchors {
......
......@@ -235,6 +235,7 @@ Item{
item.itemType = model.clipType
item.audioChannels = model.audioChannels
item.audioStream = model.audioStream
item.aStreamIndex = model.audioStreamIndex
console.log('loaded clpi with Astream: ', model.audioStream)
// Speed change triggers a new clip insert so no binding necessary
item.speed = model.speed
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment