Commit 0ff58df2 authored by Andrey Kamakin's avatar Andrey Kamakin

Changed QSBR to clean more often.

Added sanity checks.

Ref T8874
parent 08742c85
......@@ -12,7 +12,7 @@
#define CONCURRENTMAP_H
#include "leapfrog.h"
#include "tiles3/kis_lockless_stack.h"
#include "qsbr.h"
template <typename K, typename V, class KT = DefaultKeyTraits<K>, class VT = DefaultValueTraits<V> >
class ConcurrentMap
......@@ -57,7 +57,6 @@ public:
// There are no racing calls to this function.
typename Details::Table* oldRoot = m_root.loadNonatomic();
m_root.store(migration->m_destination, Release);
Q_ASSERT(oldRoot == migration->getSources()[0].table);
// Caller will GC the TableMigration and the source table.
}
......@@ -151,9 +150,6 @@ public:
Value exchangeValue(Value desired)
{
Q_ASSERT(desired != Value(ValueTraits::NullValue));
Q_ASSERT(desired != Value(ValueTraits::Redirect));
Q_ASSERT(m_cell); // Cell must have been found or inserted
for (;;) {
Value oldValue = m_value;
if (m_cell->value.compareExchangeStrong(m_value, desired, ConsumeRelease)) {
......@@ -209,7 +205,6 @@ public:
Value eraseValue()
{
Q_ASSERT(m_cell); // Cell must have been found or inserted
for (;;) {
if (m_value == Value(ValueTraits::NullValue)) {
return Value(m_value);
......@@ -217,7 +212,6 @@ public:
if (m_cell->value.compareExchangeStrong(m_value, Value(ValueTraits::NullValue), Consume)) {
// Exchange was successful and a non-NULL value was erased and returned by reference in m_value.
Q_ASSERT(m_value != Value(ValueTraits::NullValue)); // Implied by the test at the start of the loop.
Value result = m_value;
m_value = Value(ValueTraits::NullValue); // Leave the mutator in a valid state
return result;
......@@ -331,8 +325,6 @@ public:
void next()
{
Q_ASSERT(m_table);
Q_ASSERT(isValid() || m_idx == -1); // Either the Iterator is already valid, or we've just started iterating.
while (++m_idx <= m_table->sizeMask) {
// Index still inside range of table.
typename Details::CellGroup* group = m_table->getCellGroups() + (m_idx >> 2);
......@@ -342,7 +334,6 @@ public:
if (m_hash != KeyTraits::NullHash) {
// Cell has been reserved.
m_value = cell->value.load(Relaxed);
Q_ASSERT(m_value != Value(ValueTraits::Redirect));
if (m_value != Value(ValueTraits::NullValue))
return; // Yield this cell.
}
......@@ -359,14 +350,12 @@ public:
Key getKey() const
{
Q_ASSERT(isValid());
// Since we've forbidden concurrent inserts (for now), nonatomic would suffice here, but let's plan ahead:
return KeyTraits::dehash(m_hash);
}
Value getValue() const
{
Q_ASSERT(isValid());
return m_value;
}
};
......
......@@ -13,7 +13,9 @@
#include "map_traits.h"
#include "simple_job_coordinator.h"
#include "qsbr.h"
#include "kis_assert.h"
#define SANITY_CHECK
template <class Map>
struct Leapfrog {
......@@ -57,8 +59,10 @@ struct Leapfrog {
static Table* create(quint64 tableSize)
{
Q_ASSERT(isPowerOf2(tableSize));
Q_ASSERT(tableSize >= 4);
#ifdef SANITY_CHECK
KIS_ASSERT_RECOVER_NOOP(isPowerOf2(tableSize));
KIS_ASSERT_RECOVER_NOOP(tableSize >= 4);
#endif // SANITY_CHECK
quint64 numGroups = tableSize >> 2;
Table* table = (Table*) std::malloc(sizeof(Table) + sizeof(CellGroup) * numGroups);
new (table) Table(tableSize - 1);
......@@ -152,8 +156,10 @@ struct Leapfrog {
static Cell* find(Hash hash, Table* table)
{
Q_ASSERT(table);
Q_ASSERT(hash != KeyTraits::NullHash);
#ifdef SANITY_CHECK
KIS_ASSERT_RECOVER_NOOP(table);
KIS_ASSERT_RECOVER_NOOP(hash != KeyTraits::NullHash);
#endif // SANITY_CHECK
quint64 sizeMask = table->sizeMask;
// Optimistically check hashed cell even though it might belong to another bucket
quint64 idx = hash & sizeMask;
......@@ -188,8 +194,10 @@ struct Leapfrog {
enum InsertResult { InsertResult_AlreadyFound, InsertResult_InsertedNew, InsertResult_Overflow };
static InsertResult insertOrFind(Hash hash, Table* table, Cell*& cell, quint64& overflowIdx)
{
Q_ASSERT(table);
Q_ASSERT(hash != KeyTraits::NullHash);
#ifdef SANITY_CHECK
KIS_ASSERT_RECOVER_NOOP(table);
KIS_ASSERT_RECOVER_NOOP(hash != KeyTraits::NullHash);
#endif // SANITY_CHECK
quint64 sizeMask = table->sizeMask;
quint64 idx = quint64(hash);
......@@ -237,7 +245,9 @@ struct Leapfrog {
} while (probeHash == KeyTraits::NullHash);
}
Q_ASSERT(((probeHash ^ hash) & sizeMask) == 0); // Only hashes in same bucket can be linked
#ifdef SANITY_CHECK
KIS_ASSERT_RECOVER_NOOP(((probeHash ^ hash) & sizeMask) == 0); // Only hashes in same bucket can be linked
#endif // SANITY_CHECK
if (probeHash == hash) {
return InsertResult_AlreadyFound;
}
......@@ -245,7 +255,9 @@ struct Leapfrog {
// Reached the end of the link chain for this bucket.
// Switch to linear probing until we reserve a new cell or find a late-arriving cell in the same bucket.
quint64 prevLinkIdx = idx;
Q_ASSERT(qint64(maxIdx - idx) >= 0); // Nobody would have linked an idx that's out of range.
#ifdef SANITY_CHECK
KIS_ASSERT_RECOVER_NOOP(qint64(maxIdx - idx) >= 0); // Nobody would have linked an idx that's out of range.
#endif // SANITY_CHECK
quint64 linearProbesRemaining = qMin(maxIdx - idx, quint64(LinearSearchLimit));
while (linearProbesRemaining-- > 0) {
......@@ -258,7 +270,9 @@ struct Leapfrog {
// It's an empty cell. Try to reserve it.
if (cell->hash.compareExchangeStrong(probeHash, hash, Relaxed)) {
// Success. We've reserved the cell. Link it to previous cell in same bucket.
Q_ASSERT(probeDelta == 0);
#ifdef SANITY_CHECK
KIS_ASSERT_RECOVER_NOOP(probeDelta == 0);
#endif // SANITY_CHECK
quint8 desiredDelta = idx - prevLinkIdx;
prevLink->store(desiredDelta, Relaxed);
return InsertResult_InsertedNew;
......@@ -386,16 +400,20 @@ bool Leapfrog<Map>::TableMigration::migrateRange(Table* srcTable, quint64 startI
// We've got a key/value pair to migrate.
// Reserve a destination cell in the destination.
Q_ASSERT(srcHash != KeyTraits::NullHash);
Q_ASSERT(srcValue != Value(ValueTraits::NullValue));
Q_ASSERT(srcValue != Value(ValueTraits::Redirect));
#ifdef SANITY_CHECK
KIS_ASSERT_RECOVER_NOOP(srcHash != KeyTraits::NullHash);
KIS_ASSERT_RECOVER_NOOP(srcValue != Value(ValueTraits::NullValue));
KIS_ASSERT_RECOVER_NOOP(srcValue != Value(ValueTraits::Redirect));
#endif // SANITY_CHECK
Cell* dstCell;
quint64 overflowIdx;
InsertResult result = insertOrFind(srcHash, m_destination, dstCell, overflowIdx);
// During migration, a hash can only exist in one place among all the source tables,
// and it is only migrated by one thread. Therefore, the hash will never already exist
// in the destination table:
Q_ASSERT(result != InsertResult_AlreadyFound);
#ifdef SANITY_CHECK
KIS_ASSERT_RECOVER_NOOP(result != InsertResult_AlreadyFound);
#endif // SANITY_CHECK
if (result == InsertResult_Overflow) {
// Destination overflow.
// This can happen for several reasons. For example, the source table could have
......@@ -411,7 +429,9 @@ bool Leapfrog<Map>::TableMigration::migrateRange(Table* srcTable, quint64 startI
dstCell->value.store(srcValue, Relaxed);
// Try to place a Redirect marker in srcValue.
Value doubleCheckedSrcValue = srcCell->value.compareExchange(srcValue, Value(ValueTraits::Redirect), Relaxed);
Q_ASSERT(doubleCheckedSrcValue != Value(ValueTraits::Redirect)); // Only one thread can redirect a cell at a time.
#ifdef SANITY_CHECK
KIS_ASSERT_RECOVER_NOOP(doubleCheckedSrcValue != Value(ValueTraits::Redirect)); // Only one thread can redirect a cell at a time.
#endif // SANITY_CHECK
if (doubleCheckedSrcValue == srcValue) {
// No racing writes to the src. We've successfully placed the Redirect marker.
// srcValue was non-NULL when we decided to migrate it, but it may have changed to NULL
......@@ -446,7 +466,9 @@ void Leapfrog<Map>::TableMigration::run()
}
} while (!m_workerStatus.compareExchangeWeak(probeStatus, probeStatus + 2, Relaxed, Relaxed));
// # of workers has been incremented, and the end flag is clear.
Q_ASSERT((probeStatus & 1) == 0);
#ifdef SANITY_CHECK
KIS_ASSERT_RECOVER_NOOP((probeStatus & 1) == 0);
#endif // SANITY_CHECK
// Iterate over all source tables.
for (quint64 s = 0; s < m_numSources; s++) {
......@@ -480,7 +502,9 @@ void Leapfrog<Map>::TableMigration::run()
}
qint64 prevRemaining = m_unitsRemaining.fetchSub(1, Relaxed);
Q_ASSERT(prevRemaining > 0);
#ifdef SANITY_CHECK
KIS_ASSERT_RECOVER_NOOP(prevRemaining > 0);
#endif // SANITY_CHECK
if (prevRemaining == 1) {
// *** SUCCESSFUL MIGRATION ***
// That was the last chunk to migrate.
......@@ -500,7 +524,9 @@ endMigration:
// We're the very last worker thread.
// Perform the appropriate post-migration step depending on whether the migration succeeded or failed.
Q_ASSERT(probeStatus == 3);
#ifdef SANITY_CHECK
KIS_ASSERT_RECOVER_NOOP(probeStatus == 3);
#endif // SANITY_CHECK
bool overflowed = m_overflowed.loadNonatomic(); // No racing writes at this point
if (!overflowed) {
// The migration succeeded. This is the most likely outcome. Publish the new subtree.
......@@ -541,7 +567,7 @@ endMigration:
}
// We're done with this TableMigration. Queue it for GC.
m_map.getGC().enqueue(&TableMigration::destroy, this);
m_map.getGC().enqueue(&TableMigration::destroy, this, true);
}
#endif // LEAPFROG_H
......@@ -39,13 +39,14 @@ private:
};
QMutex m_mutex;
QVector<Action> m_actions;
QVector<Action> m_pendingActions;
QVector<Action> m_deferedActions;
std::atomic_flag m_isProcessing = ATOMIC_FLAG_INIT;
public:
template <class T>
void enqueue(void (T::*pmf)(), T* target)
void enqueue(void (T::*pmf)(), T* target, bool migration = false)
{
struct Closure {
void (T::*pmf)();
......@@ -62,15 +63,25 @@ public:
while (m_isProcessing.test_and_set(std::memory_order_acquire)) {
}
m_actions.append(Action(Closure::thunk, &closure, sizeof(closure)));
if (migration) {
m_deferedActions.append(Action(Closure::thunk, &closure, sizeof(closure)));
} else {
m_pendingActions.append(Action(Closure::thunk, &closure, sizeof(closure)));
}
m_isProcessing.clear(std::memory_order_release);
}
void update()
void update(bool migration)
{
if (!m_isProcessing.test_and_set(std::memory_order_acquire)) {
QVector<Action> actions;
actions.swap(m_actions);
actions.swap(m_pendingActions);
if (!migration) {
m_pendingActions.swap(m_deferedActions);
}
m_isProcessing.clear(std::memory_order_release);
for (auto &action : actions) {
......@@ -78,6 +89,21 @@ public:
}
}
}
void flush()
{
if (!m_isProcessing.test_and_set(std::memory_order_acquire)) {
for (auto &action : m_pendingActions) {
action();
}
for (auto &action : m_deferedActions) {
action();
}
m_isProcessing.clear(std::memory_order_release);
}
}
};
#endif // QSBR_H
......@@ -15,8 +15,11 @@
#include <QWaitCondition>
#include <QMutexLocker>
#include "kis_assert.h"
#include "atomic.h"
#define SANITY_CHECK
class SimpleJobCoordinator
{
public:
......@@ -83,7 +86,9 @@ public:
void runOne(Job* job)
{
Q_ASSERT(job != (Job*) m_job.load(Relaxed));
#ifdef SANITY_CHECK
KIS_ASSERT_RECOVER_NOOP(job != (Job*) m_job.load(Relaxed));
#endif // SANITY_CHECK
storeRelease(job);
job->run();
}
......
......@@ -6,6 +6,21 @@
#include "3rdparty/lock_free_map/concurrent_map.h"
#include "kis_tile.h"
#define SANITY_CHECK
/**
* This is a template for a hash table that stores tiles (or some other
* objects resembling tiles). Actually, this object should only have
* col()/row() methods and be able to answer setNext()/next() requests to
* be stored here. It is used in KisTiledDataManager and
* KisMementoManager.
*
* How to use:
* 1) each hash must be unique, otherwise tiles would rewrite each-other
* 2) 0 key is reserved, so can't be used
* 3) col and row must be less than 0x7FFF to garantee uniqueness of hash for each pair
*/
template <class T>
class KisTileHashTableIteratorTraits2;
......@@ -96,6 +111,10 @@ private:
inline quint32 calculateHash(qint32 col, qint32 row)
{
#ifdef SANITY_CHECK
KIS_ASSERT_RECOVER_NOOP(row < 0x7FFF && col < 0x7FFF)
#endif // SANITY_CHECK
if (col == 0 && row == 0) {
col = 0x7FFF;
row = 0x7FFF;
......@@ -115,9 +134,7 @@ private:
m_numTiles.fetchAndAddRelaxed(1);
}
if (!m_map.migrationInProcess()) {
m_map.getGC().update();
}
m_map.getGC().update(m_map.migrationInProcess());
}
inline bool erase(quint32 key)
......@@ -131,21 +148,14 @@ private:
m_map.getGC().enqueue(&MemoryReclaimer::destroy, new MemoryReclaimer(result));
}
if (!m_map.migrationInProcess()) {
m_map.getGC().update();
}
m_map.getGC().update(m_map.migrationInProcess());
return wasDeleted;
}
inline TileTypeSP get(quint32 key)
{
TileTypeSP result = m_map.get(key);
if (!m_map.migrationInProcess()) {
m_map.getGC().update();
}
m_map.getGC().update(m_map.migrationInProcess());
return result;
}
......@@ -227,7 +237,7 @@ template <class T>
KisTileHashTableTraits2<T>::~KisTileHashTableTraits2()
{
clear();
m_map.getGC().update();
m_map.getGC().flush();
setDefaultTileData(0);
}
......@@ -273,10 +283,7 @@ typename KisTileHashTableTraits2<T>::TileTypeSP KisTileHashTableTraits2<T>::getT
tile = mutator.getValue();
}
if (!m_map.migrationInProcess()) {
m_map.getGC().update();
}
m_map.getGC().update(m_map.migrationInProcess());
return tile;
}
......@@ -292,10 +299,7 @@ typename KisTileHashTableTraits2<T>::TileTypeSP KisTileHashTableTraits2<T>::getR
tile = new TileType(col, row, m_defaultTileData, 0);
}
if (!m_map.migrationInProcess()) {
m_map.getGC().update();
}
m_map.getGC().update(m_map.migrationInProcess());
return tile;
}
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment