Commit 6e57fc99 authored by Vincent PINON's avatar Vincent PINON
Browse files

krazy fix: use Qt int types

git grep -l '\<int[0-9]\+_t\>' | grep -v kiss_fft \
| xargs sed -i 's/\<int\([0-9]\+\)_t\>/qint\1/'
parent dfaaed1f
......@@ -55,11 +55,11 @@ void AudioCorrelation::slotProcessChild(AudioEnvelope *envelope)
const int sizeSub = envelope->envelopeSize();
AudioCorrelationInfo *info = new AudioCorrelationInfo(sizeMain, sizeSub);
int64_t *correlation = info->correlationVector();
qint64 *correlation = info->correlationVector();
const int64_t *envMain = m_mainTrackEnvelope->envelope();
const int64_t *envSub = envelope->envelope();
int64_t max = 0;
const qint64 *envMain = m_mainTrackEnvelope->envelope();
const qint64 *envSub = envelope->envelope();
qint64 max = 0;
if (sizeSub > 200) {
FFTCorrelation::correlate(envMain, sizeMain,
......@@ -103,18 +103,18 @@ AudioCorrelationInfo const* AudioCorrelation::info(int childIndex) const
}
void AudioCorrelation::correlate(const int64_t *envMain, int sizeMain,
const int64_t *envSub, int sizeSub,
int64_t *correlation,
int64_t *out_max)
void AudioCorrelation::correlate(const qint64 *envMain, int sizeMain,
const qint64 *envSub, int sizeSub,
qint64 *correlation,
qint64 *out_max)
{
Q_ASSERT(correlation != NULL);
int64_t const* left;
int64_t const* right;
qint64 const* left;
qint64 const* right;
int size;
int64_t sum;
int64_t max = 0;
qint64 sum;
qint64 max = 0;
/*
......
......@@ -44,10 +44,10 @@ public:
Correlates the two vectors envMain and envSub.
\c correlation must be a pre-allocated vector of size sizeMain+sizeSub+1.
*/
static void correlate(const int64_t *envMain, int sizeMain,
const int64_t *envSub, int sizeSub,
int64_t *correlation,
int64_t *out_max = NULL);
static void correlate(const qint64 *envMain, int sizeMain,
const qint64 *envSub, int sizeSub,
qint64 *correlation,
qint64 *out_max = NULL);
private:
AudioEnvelope *m_mainTrackEnvelope;
......
......@@ -16,7 +16,7 @@ AudioCorrelationInfo::AudioCorrelationInfo(int mainSize, int subSize) :
m_subSize(subSize),
m_max(-1)
{
m_correlationVector = new int64_t[m_mainSize+m_subSize+1];
m_correlationVector = new qint64[m_mainSize+m_subSize+1];
}
AudioCorrelationInfo::~AudioCorrelationInfo()
......@@ -29,16 +29,16 @@ int AudioCorrelationInfo::size() const
return m_mainSize+m_subSize+1;
}
void AudioCorrelationInfo::setMax(int64_t max)
void AudioCorrelationInfo::setMax(qint64 max)
{
m_max = max;
}
int64_t AudioCorrelationInfo::max() const
qint64 AudioCorrelationInfo::max() const
{
if (m_max <= 0) {
int width = size();
int64_t max = 0;
qint64 max = 0;
for (int i = 0; i < width; ++i) {
if (m_correlationVector[i] > max) {
max = m_correlationVector[i];
......@@ -52,7 +52,7 @@ int64_t AudioCorrelationInfo::max() const
int AudioCorrelationInfo::maxIndex() const
{
int64_t max = 0;
qint64 max = 0;
int index = 0;
int width = size();
......@@ -66,7 +66,7 @@ int AudioCorrelationInfo::maxIndex() const
return index;
}
int64_t* AudioCorrelationInfo::correlationVector()
qint64* AudioCorrelationInfo::correlationVector()
{
return m_correlationVector;
}
......@@ -74,7 +74,7 @@ int64_t* AudioCorrelationInfo::correlationVector()
QImage AudioCorrelationInfo::toImage(int height) const
{
int width = size();
int64_t maxVal = max();
qint64 maxVal = max();
QImage img(width, height, QImage::Format_ARGB32);
img.fill(qRgb(255,255,255));
......
......@@ -27,15 +27,15 @@ public:
~AudioCorrelationInfo();
int size() const;
int64_t* correlationVector();
int64_t const* correlationVector() const;
qint64* correlationVector();
qint64 const* correlationVector() const;
/**
Returns the maximum value in the correlation vector.
If it has not been set before with setMax(), it will be calculated.
*/
int64_t max() const;
void setMax(int64_t max); ///< Can be set to avoid calculating the max again in this function
qint64 max() const;
void setMax(qint64 max); ///< Can be set to avoid calculating the max again in this function
/**
Returns the index of the largest value in the correlation vector
......@@ -48,8 +48,8 @@ private:
int m_mainSize;
int m_subSize;
int64_t *m_correlationVector;
int64_t m_max;
qint64 *m_correlationVector;
qint64 m_max;
};
......
......@@ -59,7 +59,7 @@ AudioEnvelope::~AudioEnvelope()
const int64_t *AudioEnvelope::envelope()
const qint64 *AudioEnvelope::envelope()
{
if (m_envelope == NULL) {
loadEnvelope();
......@@ -70,7 +70,7 @@ int AudioEnvelope::envelopeSize() const
{
return m_envelopeSize;
}
int64_t AudioEnvelope::maxValue() const
qint64 AudioEnvelope::maxValue() const
{
return m_envelopeMax;
}
......@@ -88,7 +88,7 @@ void AudioEnvelope::loadEnvelope()
mlt_audio_format format_s16 = mlt_audio_s16;
int channels = 1;
m_envelope = new int64_t[m_envelopeSize];
m_envelope = new qint64[m_envelopeSize];
m_envelopeMax = 0;
m_envelopeMean = 0;
......@@ -99,12 +99,12 @@ void AudioEnvelope::loadEnvelope()
m_producer->set_speed(1.0); // This is necessary, otherwise we don't get any new frames in the 2nd run.
for (int i = 0; i < m_envelopeSize; ++i) {
Mlt::Frame *frame = m_producer->get_frame(i);
int64_t position = mlt_frame_get_position(frame->get_frame());
qint64 position = mlt_frame_get_position(frame->get_frame());
int samples = mlt_sample_calculator(m_producer->get_fps(), samplingRate, position);
int16_t *data = static_cast<int16_t*>(frame->get_audio(format_s16, samplingRate, channels, samples));
qint16 *data = static_cast<int16_t*>(frame->get_audio(format_s16, samplingRate, channels, samples));
int64_t sum = 0;
qint64 sum = 0;
for (int k = 0; k < samples; ++k) {
sum += fabs(data[k]);
}
......@@ -130,7 +130,7 @@ void AudioEnvelope::loadEnvelope()
<< t.elapsed() << " ms.";
}
int64_t AudioEnvelope::loadStdDev()
qint64 AudioEnvelope::loadStdDev()
{
if (m_envelopeStdDevCalculated) {
qDebug() << "Standard deviation already calculated, not re-calculating.";
......@@ -173,7 +173,7 @@ void AudioEnvelope::slotProcessEnveloppe()
if (!m_envelopeIsNormalized) {
m_envelopeMax = 0;
int64_t newMean = 0;
qint64 newMean = 0;
for (int i = 0; i < m_envelopeSize; ++i) {
m_envelope[i] -= m_envelopeMean;
......
......@@ -35,12 +35,12 @@ public:
virtual ~AudioEnvelope();
/// Returns the envelope, calculates it if necessary.
int64_t const* envelope();
qint64 const* envelope();
int envelopeSize() const;
int64_t maxValue() const;
qint64 maxValue() const;
void loadEnvelope();
int64_t loadStdDev();
qint64 loadStdDev();
void normalizeEnvelope(bool clampTo0 = false);
QImage drawEnvelope();
......@@ -51,7 +51,7 @@ public:
int startPos() const;
private:
int64_t *m_envelope;
qint64 *m_envelope;
Mlt::Producer *m_producer;
AudioInfo *m_info;
QFutureWatcher<void> m_watcher;
......@@ -63,9 +63,9 @@ private:
int m_startpos;
int m_envelopeSize;
int64_t m_envelopeMax;
int64_t m_envelopeMean;
int64_t m_envelopeStdDev;
qint64 m_envelopeMax;
qint64 m_envelopeMean;
qint64 m_envelopeStdDev;
bool m_envelopeStdDevCalculated;
bool m_envelopeIsNormalized;
......
......@@ -19,9 +19,9 @@ extern "C"
#include <QTime>
#include <algorithm>
void FFTCorrelation::correlate(const int64_t *left, const int leftSize,
const int64_t *right, const int rightSize,
int64_t *out_correlated)
void FFTCorrelation::correlate(const qint64 *left, const int leftSize,
const qint64 *right, const int rightSize,
qint64 *out_correlated)
{
float correlatedFloat[leftSize+rightSize+1];
correlate(left, leftSize, right, rightSize, correlatedFloat);
......@@ -34,8 +34,8 @@ void FFTCorrelation::correlate(const int64_t *left, const int leftSize,
}
}
void FFTCorrelation::correlate(const int64_t *left, const int leftSize,
const int64_t *right, const int rightSize,
void FFTCorrelation::correlate(const qint64 *left, const int leftSize,
const qint64 *right, const int rightSize,
float *out_correlated)
{
QTime t;
......@@ -44,12 +44,12 @@ void FFTCorrelation::correlate(const int64_t *left, const int leftSize,
float leftF[leftSize];
float rightF[rightSize];
// First the int64_t values need to be normalized to floats
// First the qint64 values need to be normalized to floats
// Dividing by the max value is maybe not the best solution, but the
// maximum value after correlation should not be larger than the longest
// vector since each value should be at most 1
int64_t maxLeft = 1;
int64_t maxRight = 1;
qint64 maxLeft = 1;
qint64 maxRight = 1;
for (int i = 0; i < leftSize; ++i) {
if (labs(left[i]) > maxLeft) {
maxLeft = labs(left[i]);
......
......@@ -37,13 +37,13 @@ public:
\c out_correlated must be a pre-allocated vector of size
\c leftSize + \c rightSize.
*/
static void correlate(const int64_t *left, const int leftSize,
const int64_t *right, const int rightSize,
static void correlate(const qint64 *left, const int leftSize,
const qint64 *right, const int rightSize,
float *out_correlated);
static void correlate(const int64_t *left, const int leftSize,
const int64_t *right, const int rightSize,
int64_t *out_correlated);
static void correlate(const qint64 *left, const int leftSize,
const qint64 *right, const int rightSize,
qint64 *out_correlated);
};
#endif // FFTCORRELATION_H
......@@ -111,7 +111,7 @@ const QVector<float> FFTTools::window(const WindowType windowType, const int siz
return QVector<float>();
}
void FFTTools::fftNormalized(const QVector<int16_t> audioFrame, const uint channel, const uint numChannels, float *freqSpectrum,
void FFTTools::fftNormalized(const QVector<qint16> audioFrame, const uint channel, const uint numChannels, float *freqSpectrum,
const WindowType windowType, const uint windowSize, const float param)
{
#ifdef DEBUG_FFTTOOLS
......
......@@ -53,7 +53,7 @@ public:
* freqSpectrum has to be of size windowSize/2
For windowType and param see the FFTTools::window() function above.
*/
void fftNormalized(const QVector<int16_t> audioFrame, const uint channel, const uint numChannels, float *freqSpectrum,
void fftNormalized(const QVector<qint16> audioFrame, const uint channel, const uint numChannels, float *freqSpectrum,
const WindowType windowType, const uint windowSize, const float param = 0);
......
......@@ -125,7 +125,7 @@ namespace Mlt
class Producer;
};
Q_DECLARE_METATYPE(QVector<int16_t>)
Q_DECLARE_METATYPE(QVector<qint16>)
EffectsList MainWindow::videoEffects;
......@@ -160,7 +160,7 @@ MainWindow::MainWindow(const QString &MltPath, const KUrl & Url, const QString &
m_stopmotion(NULL),
m_mainClip(NULL)
{
qRegisterMetaType<QVector<int16_t> > ();
qRegisterMetaType<QVector<qint16> > ();
qRegisterMetaType<stringMap> ("stringMap");
qRegisterMetaType<audioByteArray> ("audioByteArray");
......
......@@ -289,7 +289,7 @@ void MltDeviceCapture::showAudio(Mlt::Frame& frame)
int freq = 0;
int num_channels = 0;
int samples = 0;
int16_t* data = (int16_t*)frame.get_audio(audio_format, freq, num_channels, samples);
qint16* data = (int16_t*)frame.get_audio(audio_format, freq, num_channels, samples);
if (!data) {
return;
......@@ -297,8 +297,8 @@ void MltDeviceCapture::showAudio(Mlt::Frame& frame)
// Data format: [ c00 c10 c01 c11 c02 c12 c03 c13 ... c0{samples-1} c1{samples-1} for 2 channels.
// So the vector is of size samples*channels.
QVector<int16_t> sampleVector(samples*num_channels);
memcpy(sampleVector.data(), data, samples*num_channels*sizeof(int16_t));
QVector<qint16> sampleVector(samples*num_channels);
memcpy(sampleVector.data(), data, samples*num_channels*sizeof(qint16));
if (samples > 0) {
emit audioSamplesSignal(sampleVector, freq, num_channels, samples);
}
......
......@@ -73,7 +73,7 @@ signals:
void frameUpdated(const QImage &);
/** @brief This signal contains the audio of the current frame. */
void audioSamplesSignal(const QVector<int16_t>&,int,int,int);
void audioSamplesSignal(const QVector<qint16>&,int,int,int);
};
class AbstractMonitor : public QWidget
......
......@@ -1919,7 +1919,7 @@ void Render::showAudio(Mlt::Frame& frame)
int freq = 48000;
int num_channels = 2;
int samples = 0;
int16_t* data = (int16_t*)frame.get_audio(audio_format, freq, num_channels, samples);
qint16* data = (int16_t*)frame.get_audio(audio_format, freq, num_channels, samples);
if (!data) {
return;
......@@ -1927,8 +1927,8 @@ void Render::showAudio(Mlt::Frame& frame)
// Data format: [ c00 c10 c01 c11 c02 c12 c03 c13 ... c0{samples-1} c1{samples-1} for 2 channels.
// So the vector is of size samples*channels.
QVector<int16_t> sampleVector(samples*num_channels);
memcpy(sampleVector.data(), data, samples*num_channels*sizeof(int16_t));
QVector<qint16> sampleVector(samples*num_channels);
memcpy(sampleVector.data(), data, samples*num_channels*sizeof(qint16));
if (samples > 0) {
emit audioSamplesSignal(sampleVector, freq, num_channels, samples);
......
......@@ -30,7 +30,7 @@ AbstractAudioScopeWidget::AbstractAudioScopeWidget(bool trackMouse, QWidget *par
{
}
void AbstractAudioScopeWidget::slotReceiveAudio(const QVector<int16_t>& sampleData, int freq, int num_channels, int num_samples)
void AbstractAudioScopeWidget::slotReceiveAudio(const QVector<qint16>& sampleData, int freq, int num_channels, int num_samples)
{
#ifdef DEBUG_AASW
qDebug() << "Received audio for " << widgetName() << '.';
......
......@@ -32,7 +32,7 @@ public:
virtual ~AbstractAudioScopeWidget();
public slots:
void slotReceiveAudio(const QVector<int16_t> &sampleData, int freq, int num_channels, int num_samples);
void slotReceiveAudio(const QVector<qint16> &sampleData, int freq, int num_channels, int num_samples);
protected:
/** @brief This is just a wrapper function, subclasses can use renderAudioScope. */
......@@ -43,7 +43,7 @@ protected:
when calculation has finished, to allow multi-threading.
accelerationFactor hints how much faster than usual the calculation should be accomplished, if possible. */
virtual QImage renderAudioScope(uint accelerationFactor,
const QVector<int16_t> &audioFrame, const int freq, const int num_channels, const int num_samples,
const QVector<qint16> &audioFrame, const int freq, const int num_channels, const int num_samples,
const int newData) = 0;
int m_freq;
......@@ -51,7 +51,7 @@ protected:
int m_nSamples;
private:
QVector<int16_t> m_audioFrame;
QVector<qint16> m_audioFrame;
QAtomicInt m_newData;
};
......
......@@ -41,7 +41,7 @@ AudioSignal::~AudioSignal()
{
}
QImage AudioSignal::renderAudioScope(uint, const QVector<int16_t> &audioFrame,
QImage AudioSignal::renderAudioScope(uint, const QVector<qint16> &audioFrame,
const int, const int num_channels, const int samples, const int)
{
QTime start = QTime::currentTime();
......@@ -148,7 +148,7 @@ QRect AudioSignal::scopeRect() { return QRect(0, 0, width(), height()); }
QImage AudioSignal::renderHUD(uint) { return QImage(); }
QImage AudioSignal::renderBackground(uint) { return QImage(); }
void AudioSignal::slotReceiveAudio(QVector<int16_t> data, int, int num_channels, int samples)
void AudioSignal::slotReceiveAudio(QVector<qint16> data, int, int num_channels, int samples)
{
int num_samples = samples > 200 ? 200 : samples;
......
......@@ -42,7 +42,7 @@ public:
QRect scopeRect();
QImage renderHUD(uint accelerationFactor);
QImage renderBackground(uint accelerationFactor);
QImage renderAudioScope(uint accelerationFactor, const QVector<int16_t> &audioFrame, const int, const int num_channels, const int samples, const int);
QImage renderAudioScope(uint accelerationFactor, const QVector<qint16> &audioFrame, const int, const int num_channels, const int samples, const int);
QString widgetName() const { return "audioSignal"; }
bool isHUDDependingOnInput() const { return false; }
......@@ -57,7 +57,7 @@ private:
public slots:
void showAudio(const QByteArray &);
void slotReceiveAudio(QVector<int16_t>,int,int,int);
void slotReceiveAudio(QVector<qint16>,int,int,int);
private slots:
void slotNoAudioTimeout();
......
......@@ -172,7 +172,7 @@ QImage AudioSpectrum::renderBackground(uint)
return QImage();
}
QImage AudioSpectrum::renderAudioScope(uint, const QVector<int16_t> &audioFrame, const int freq, const int num_channels,
QImage AudioSpectrum::renderAudioScope(uint, const QVector<qint16> &audioFrame, const int freq, const int num_channels,
const int num_samples, const int)
{
if (
......@@ -192,8 +192,8 @@ QImage AudioSpectrum::renderAudioScope(uint, const QVector<int16_t> &audioFrame,
for (int i = 0; i < audioFrame.size(); ++i) {
if (
audioFrame[i] == std::numeric_limits<int16_t>::max()
|| audioFrame[i] == std::numeric_limits<int16_t>::min()) {
audioFrame[i] == std::numeric_limits<qint16>::max()
|| audioFrame[i] == std::numeric_limits<qint16>::min()) {
overmodulateCount++;
if (overmodulateCount > 3) {
overmodulated = true;
......
......@@ -50,7 +50,7 @@ protected:
///// Implemented methods /////
QRect scopeRect();
QImage renderHUD(uint accelerationFactor);
QImage renderAudioScope(uint accelerationFactor, const QVector<int16_t> &audioFrame, const int freq, const int num_channels, const int num_samples, const int newData);
QImage renderAudioScope(uint accelerationFactor, const QVector<qint16> &audioFrame, const int freq, const int num_channels, const int num_samples, const int newData);
QImage renderBackground(uint accelerationFactor);
bool isHUDDependingOnInput() const;
bool isScopeDependingOnInput() const;
......
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment