Commit 3f267699 authored by Jean-Baptiste Mardelle's avatar Jean-Baptiste Mardelle

* Fix audio thumbs normalization (https://phabricator.kde.org/D26355)

* Use uint_8 to store audio thumbs (spare memory)
* Fix disappearing audio thumbs on some zoom levels
* Improve target colors (related to !71)
parent 2a03208c
Pipeline #12941 passed with stage
in 21 minutes and 19 seconds
......@@ -197,7 +197,7 @@ QString ProjectClip::getXmlProperty(const QDomElement &producer, const QString &
return value;
}
void ProjectClip::updateAudioThumbnail(const QVector<double> audioLevels)
void ProjectClip::updateAudioThumbnail(const QVector<uint8_t> audioLevels)
{
audioFrameCache = audioLevels;
m_audioThumbCreated = true;
......
......@@ -171,7 +171,7 @@ public:
/** Cache for every audio Frame with 10 Bytes */
/** format is frame -> channel ->bytes */
QVector<double> audioFrameCache;
QVector<uint8_t> audioFrameCache;
bool audioThumbCreated() const;
void setWaitingStatus(const QString &id);
......@@ -247,7 +247,7 @@ protected:
public slots:
/* @brief Store the audio thumbnails once computed. Note that the parameter is a value and not a reference, fill free to use it as a sink (use std::move to
* avoid copy). */
void updateAudioThumbnail(const QVector<double> audioLevels);
void updateAudioThumbnail(const QVector<uint8_t> audioLevels);
/** @brief Delete the proxy file */
void deleteProxy();
......
......@@ -384,7 +384,7 @@ std::shared_ptr<ProjectClip> ProjectItemModel::getClipByBinID(const QString &bin
return nullptr;
}
const QVector<double> ProjectItemModel::getAudioLevelsByBinID(const QString &binId)
const QVector<uint8_t> ProjectItemModel::getAudioLevelsByBinID(const QString &binId)
{
READ_LOCK();
if (binId.contains(QLatin1Char('_'))) {
......@@ -396,7 +396,7 @@ const QVector<double> ProjectItemModel::getAudioLevelsByBinID(const QString &bin
return std::static_pointer_cast<ProjectClip>(c)->audioFrameCache;
}
}
return QVector<double>();
return QVector<uint8_t>();
}
bool ProjectItemModel::hasClip(const QString &binId)
......
......@@ -68,7 +68,7 @@ public:
/** @brief Returns a clip from the hierarchy, given its id */
std::shared_ptr<ProjectClip> getClipByBinID(const QString &binId);
/** @brief Returns audio levels for a clip from its id */
const QVector <double>getAudioLevelsByBinID(const QString &binId);
const QVector <uint8_t>getAudioLevelsByBinID(const QString &binId);
/** @brief Returns a list of clips using the given url */
QStringList getClipByUrl(const QFileInfo &url) const;
......
......@@ -79,7 +79,8 @@ bool AudioThumbJob::computeWithMlt()
for (int i = 0; i < m_channels; i++) {
keys << "meta.media.audio_level." + QString::number(i);
}
double maxLevel = 1;
QVector <double> mltLevels;
for (int z = 0; z < m_lengthInFrames; ++z) {
int val = (int)(100.0 * z / m_lengthInFrames);
if (last_val != val) {
......@@ -91,15 +92,21 @@ bool AudioThumbJob::computeWithMlt()
int samples = mlt_sample_calculator(float(framesPerSecond), m_frequency, z);
mltFrame->get_audio(audioFormat, m_frequency, m_channels, samples);
for (int channel = 0; channel < m_channels; ++channel) {
double level = 256 * qMin(mltFrame->get_double(keys.at(channel).toUtf8().constData()) * 0.9, 1.0);
m_audioLevels << level;
double lev = mltFrame->get_double(keys.at(channel).toUtf8().constData());
mltLevels << lev;
maxLevel = qMax(lev, maxLevel);
}
} else if (!m_audioLevels.isEmpty()) {
} else if (!mltLevels.isEmpty()) {
for (int channel = 0; channel < m_channels; channel++) {
m_audioLevels << m_audioLevels.last();
mltLevels << mltLevels.last();
}
}
}
// Normalize
for (double &v : mltLevels) {
m_audioLevels << 255 * v / maxLevel;
}
m_done = true;
return true;
}
......@@ -170,7 +177,7 @@ bool AudioThumbJob::computeWithFFMPEG()
} else {
QString aformat = QStringLiteral("[0:a%1]%2=100,channelsplit=channel_layout=%3")
.arg(m_audioStream > 0 ? ":" + QString::number(m_audioStream) : QString())
.arg(isFFmpeg ? "aresample=async" : "aformat=sample_rates=")
.arg(isFFmpeg ? "aresample=async" : "aformat=sample_rates")
.arg(m_channels > 2 ? "5.1" : "stereo");
for (int i = 0; i < m_channels; ++i) {
aformat.append(QStringLiteral("[0:%1]").arg(i));
......@@ -220,7 +227,8 @@ bool AudioThumbJob::computeWithFFMPEG()
} else if (offset > 250) {
intraOffset = offset / 10;
}
double factor = 800.0 / 32768;
long maxLevel = 1;
QVector <long> ffmpegLevels;
for (int i = 0; i < m_lengthInFrames; i++) {
channelsData.resize((size_t)rawChannels.size());
std::fill(channelsData.begin(), channelsData.end(), 0);
......@@ -229,14 +237,15 @@ bool AudioThumbJob::computeWithFFMPEG()
for (int j = 0; j < (int)offset && (pos + j < dataSize); j += intraOffset) {
steps++;
for (size_t k = 0; k < rawChannels.size(); k++) {
channelsData[k] += abs(rawChannels[k][pos + j]);
channelsData[k] += abs(rawChannels[k][pos + j]);
}
}
for (long &k : channelsData) {
if (steps != 0) {
k /= steps;
}
m_audioLevels << (int)((double)k * factor);
maxLevel = qMax(k, maxLevel);
ffmpegLevels << k;
}
int p = 80 + (i * 20 / m_lengthInFrames);
if (p != progress) {
......@@ -244,6 +253,9 @@ bool AudioThumbJob::computeWithFFMPEG()
progress = p;
}
}
for (long &v : ffmpegLevels) {
m_audioLevels << 255 * (double) v / maxLevel;
}
m_done = true;
return true;
}
......@@ -378,7 +390,7 @@ bool AudioThumbJob::commitResult(Fun &undo, Fun &redo)
if (!m_successful) {
return false;
}
QVector <double>old = m_binClip->audioFrameCache;
QVector <uint8_t>old = m_binClip->audioFrameCache;
QImage oldImage = m_binClip->thumbnail(m_thumbSize.width(), m_thumbSize.height()).toImage();
QImage result = ThumbnailCache::get()->getAudioThumbnail(m_clipId);
......
......@@ -72,6 +72,6 @@ private:
bool m_done{false}, m_successful{false};
int m_channels, m_frequency, m_lengthInFrames, m_audioStream;
QVector <double>m_audioLevels;
QVector <uint8_t>m_audioLevels;
std::unique_ptr<QProcess> m_ffmpegProcess;
};
......@@ -26,7 +26,7 @@ import com.enums 1.0
Rectangle {
id: clipRoot
property real timeScale: 1.0
property real timeScale: 1
property string clipName: ''
property string clipResource: ''
property string mltService: ''
......@@ -70,10 +70,10 @@ Rectangle {
property color borderColor: 'black'
property bool forceReloadThumb
property bool isComposition: false
property bool hideClipViews
property bool hideClipViews: false
property var groupTrimData
property int scrollStart: scrollView.flickableItem.contentX - clipRoot.modelStart * timeline.scaleFactor
width : clipDuration * timeScale;
property int scrollStart: scrollView.flickableItem.contentX - (clipRoot.modelStart * timeline.scaleFactor)
width : clipDuration * timeScale
opacity: dragProxyArea.drag.active && dragProxy.draggedItem == clipId ? 0.8 : 1.0
signal trimmingIn(var clip, real newDuration, var mouse, bool shiftTrim, bool controlTrim)
......@@ -83,7 +83,7 @@ Rectangle {
signal trimmedOut(var clip, bool shiftTrim, bool controlTrim)
onScrollStartChanged: {
clipRoot.hideClipViews = scrollStart > width || scrollStart + scrollView.viewport.width < 0
clipRoot.hideClipViews = scrollStart > (clipDuration * timeline.scaleFactor) || scrollStart + scrollView.viewport.width < 0
}
onIsGrabbedChanged: {
......@@ -143,6 +143,7 @@ Rectangle {
onModelStartChanged: {
x = modelStart * timeScale;
}
onFakePositionChanged: {
x = fakePosition * timeScale;
}
......
......@@ -43,7 +43,7 @@ Row {
channels: clipRoot.audioChannels
binId: clipRoot.binId
isFirstChunk: index == 0
showItem: waveform.visible && (index * waveform.maxWidth < clipRoot.scrollStart + scrollView.viewport.width) && (index * waveform.maxWidth + width > clipRoot.scrollStart)
showItem: waveform.visible && (index * waveform.maxWidth < (clipRoot.scrollStart + scrollView.viewport.width)) && ((index * waveform.maxWidth + width) > clipRoot.scrollStart)
format: timeline.audioThumbFormat
waveInPoint: clipRoot.speed < 0 ? (Math.round(clipRoot.outPoint - (index * waveform.maxWidth / clipRoot.timeScale) * Math.abs(clipRoot.speed)) * channels) : (Math.round(clipRoot.inPoint + (index * waveform.maxWidth / clipRoot.timeScale) * clipRoot.speed) * channels)
waveOutPoint: clipRoot.speed < 0 ? (waveInPoint - Math.ceil(width / clipRoot.timeScale * Math.abs(clipRoot.speed)) * channels) : (waveInPoint + Math.round(width / clipRoot.timeScale * clipRoot.speed) * channels)
......
......@@ -28,7 +28,7 @@ import 'Timeline.js' as Logic
Item {
id: compositionRoot
property real timeScale: 1.0
property real timeScale: 1
property string clipName: ''
property string clipResource: ''
property string mltService: ''
......
......@@ -272,7 +272,7 @@ Column{
if (controlTrim) {
newDuration = controller.requestItemSpeedChange(clip.clipId, newDuration, false, root.snapping)
speedController.x = clip.x + clip.width - newDuration * trackRoot.timeScale
speedController.width = newDuration * trackRoot.timeScale
speedController.width = newDuration * root.timeScale
speedController.lastValidDuration = newDuration
speedController.speedText = (100 * clip.originalDuration * clip.speed / speedController.lastValidDuration).toFixed(2) + '%'
speedController.visible = true
......
......@@ -103,7 +103,7 @@ Rectangle {
}
ColumnLayout {
id: targetColumn
width: trackTagLabel.width * .3
width: trackTagLabel.width * .5
height: trackHeadRoot.height
Item {
width: parent.width
......@@ -161,7 +161,7 @@ Rectangle {
when: (trackHeadRoot.isAudio && trackHeadRoot.trackId == timeline.audioTarget) || (!trackHeadRoot.isAudio && trackHeadRoot.trackId == timeline.videoTarget)
PropertyChanges {
target: trackTarget
color: 'green'
color: timeline.targetColor
}
},
State {
......@@ -199,11 +199,11 @@ Rectangle {
anchors.topMargin: 0
RowLayout {
spacing: 0
Layout.leftMargin: 2
Layout.leftMargin: 1
ToolButton {
id: expandButton
implicitHeight: root.baseUnit * 2
implicitWidth: root.baseUnit * 2
implicitHeight: trackHeadRoot.iconSize
implicitWidth: trackHeadRoot.iconSize
iconName: trackHeadRoot.collapsed ? 'arrow-right' : 'arrow-down'
onClicked: {
trackHeadRoot.myTrackHeight = trackHeadRoot.collapsed ? Math.max(collapsedHeight * 1.5, controller.getTrackProperty(trackId, "kdenlive:trackheight")) : collapsedHeight
......@@ -216,16 +216,16 @@ Rectangle {
Layout.topMargin: 1
Rectangle {
id: trackLed
color: Qt.darker(trackHeadRoot.color, 0.45)
color: Qt.darker(trackHeadRoot.color, 0.55)
anchors.fill: parent
width: height
border.width: 0
radius: 2
Text {
id: trackTagLabel
text: trackHeadRoot.trackTag
anchors.fill: parent
font.pointSize: root.fontUnit
color: activePalette.text
verticalAlignment: Text.AlignVCenter
horizontalAlignment: Text.AlignHCenter
}
......@@ -267,7 +267,11 @@ Rectangle {
when: trackHeadRoot.isActive
PropertyChanges {
target: trackLed
color: 'yellow'
color: timeline.targetColor
}
PropertyChanges {
target: trackTagLabel
color: timeline.targetTextColor
}
},
State {
......@@ -275,7 +279,7 @@ Rectangle {
when: !trackHeadRoot.isLocked && !trackHeadRoot.isActive
PropertyChanges {
target: trackLed
color: Qt.darker(trackHeadRoot.color, 0.45)
color: Qt.darker(trackHeadRoot.color, 0.55)
}
}
]
......
......@@ -156,9 +156,9 @@ public:
continue;
}
lastIdx = idx;
level = m_audioLevels.at(idx) / 256;
level = m_audioLevels.at(idx) / 255.;
for (int j = 1; j < m_channels; j++) {
level = qMax(level, m_audioLevels.at(idx + j) / 256);
level = qMax(level, m_audioLevels.at(idx + j) / 255.);
}
path.lineTo(i, height() - level * height());
}
......@@ -197,7 +197,7 @@ public:
lastIdx = idx;
idx += channel;
if (idx >= m_audioLevels.length() || idx < 0) break;
level = m_audioLevels.at(idx) * channelHeight / 256;
level = m_audioLevels.at(idx) * channelHeight / 255.;
channelPaths[channel].lineTo(i, y - level);
}
if (m_firstChunk && m_channels > 1 && m_channels < 7) {
......@@ -220,7 +220,7 @@ signals:
void audioChannelsChanged();
private:
QVector<double> m_audioLevels;
QVector<uint8_t> m_audioLevels;
int m_inPoint;
int m_outPoint;
QString m_binId;
......
......@@ -2555,6 +2555,22 @@ QColor TimelineController::videoColor() const
return scheme.foreground(KColorScheme::LinkText).color();
}
QColor TimelineController::targetColor() const
{
KColorScheme scheme(QApplication::palette().currentColorGroup());
QColor base = scheme.foreground(KColorScheme::PositiveText).color();
QColor high = QApplication::palette().highlightedText().color();
double factor = 0.3;
QColor res = QColor(qBound(0, base.red() + (int)(factor*(high.red() - 128)), 255), qBound(0, base.green() + (int)(factor*(high.green() - 128)), 255), qBound(0, base.blue() + (int)(factor*(high.blue() - 128)), 255), 255);
return res;
}
QColor TimelineController::targetTextColor() const
{
KColorScheme scheme(QApplication::palette().currentColorGroup());
return scheme.background(KColorScheme::PositiveBackground).color();
}
QColor TimelineController::audioColor() const
{
KColorScheme scheme(QApplication::palette().currentColorGroup());
......
......@@ -72,6 +72,8 @@ class TimelineController : public QObject
Q_PROPERTY(bool autoScroll READ autoScroll NOTIFY autoScrollChanged)
Q_PROPERTY(QColor videoColor READ videoColor NOTIFY colorsChanged)
Q_PROPERTY(QColor audioColor READ audioColor NOTIFY colorsChanged)
Q_PROPERTY(QColor targetColor READ targetColor NOTIFY colorsChanged)
Q_PROPERTY(QColor targetTextColor READ targetTextColor NOTIFY colorsChanged)
Q_PROPERTY(QColor lockedColor READ lockedColor NOTIFY colorsChanged)
Q_PROPERTY(QColor selectionColor READ selectionColor NOTIFY colorsChanged)
Q_PROPERTY(QColor groupColor READ groupColor NOTIFY colorsChanged)
......@@ -153,6 +155,8 @@ public:
Q_INVOKABLE int activeTrack() const { return m_activeTrack; }
Q_INVOKABLE QColor videoColor() const;
Q_INVOKABLE QColor audioColor() const;
Q_INVOKABLE QColor targetColor() const;
Q_INVOKABLE QColor targetTextColor() const;
Q_INVOKABLE QColor lockedColor() const;
Q_INVOKABLE QColor selectionColor() const;
Q_INVOKABLE QColor groupColor() const;
......
......@@ -127,6 +127,7 @@ void TimelineWidget::setModel(const std::shared_ptr<TimelineItemModel> &model, M
// Create a unique id for this timeline to prevent thumbnails
// leaking from one project to another because of qml's image caching
rootContext()->setContextProperty("documentId", QUuid::createUuid());
rootContext()->setContextProperty("miniFont", QFontDatabase::systemFont(QFontDatabase::SmallestReadableFont));
rootContext()->setContextProperty("transitionModel", sortedItems(KdenliveSettings::favorite_transitions(), true)); // m_transitionProxyModel.get());
// rootContext()->setContextProperty("effectModel", m_effectsProxyModel.get());
rootContext()->setContextProperty("effectModel", sortedItems(KdenliveSettings::favorite_effects(), false));
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment