Members of the KDE Community are recommended to subscribe to the kde-community mailing list at https://mail.kde.org/mailman/listinfo/kde-community to allow them to participate in important discussions and receive other important announcements

Commit 1aace13b authored by David Faure's avatar David Faure

TransactionSequence: cancel all subjobs on rollback-from-user.

Summary:
This replaces commit f14a6e99 which wasn't the right fix for the
same problem.

Test Plan:
F5 in a large folder in kmail, then pressing the Abort button
in the middle of the sync. It used to just block the sync, while now it's
possible to resume afterwards.

Reviewers: dvratil, vkrause

Reviewed By: dvratil

Subscribers: kde-pim

Differential Revision: https://phabricator.kde.org/D19488
parent 8ff596c4
......@@ -61,6 +61,18 @@ private:
return fetch->items();
}
void createItems(const Collection &col, int itemCount)
{
for (int i = 0; i < itemCount; ++i) {
Item item(QStringLiteral("application/octet-stream"));
item.setRemoteId(QStringLiteral("rid") + QString::number(i));
item.setGid(QStringLiteral("gid") + QString::number(i));
item.setPayload<QByteArray>("payload1");
ItemCreateJob *job = new ItemCreateJob(item, col);
AKVERIFYEXEC(job);
}
}
private Q_SLOTS:
void initTestCase()
{
......@@ -467,7 +479,7 @@ private Q_SLOTS:
}
/*
* This test verifies that ItemSync doesn't prematurely emit it's result if a job inside a transaction fails.
* This test verifies that ItemSync doesn't prematurely emit its result if a job inside a transaction fails.
* ItemSync is supposed to continue the sync but simply ignoring all delivered data.
*/
void testFailingJob()
......@@ -519,11 +531,11 @@ private Q_SLOTS:
}
/*
* This test verifies that ItemSync doesn't prematurly emit it's result if a job inside a transaction fails, due to a duplicate.
* This test verifies that ItemSync doesn't prematurely emit its result if a job inside a transaction fails, due to a duplicate.
* This case used to break the TransactionSequence.
* ItemSync is supposed to continue the sync but simply ignoring all delivered data.
*/
void testFailingDueToDuplicateJob()
void testFailingDueToDuplicateItem()
{
const Collection col = Collection(collectionIdFromPath(QStringLiteral("res1/foo")));
QVERIFY(col.isValid());
......@@ -579,6 +591,7 @@ private Q_SLOTS:
void testFullSyncManyItems()
{
// Given a collection with 1000 items
const Collection col = Collection(collectionIdFromPath(QStringLiteral("res2/foo2")));
QVERIFY(col.isValid());
......@@ -588,22 +601,11 @@ private Q_SLOTS:
QVERIFY(addedSpy.isValid());
const int itemCount = 1000;
for (int i = 0; i < itemCount; ++i) {
Item item(QStringLiteral("application/octet-stream"));
item.setRemoteId(QStringLiteral("rid") + QString::number(i));
item.setGid(QStringLiteral("gid") + QString::number(i));
item.setPayload<QByteArray>("payload1");
ItemCreateJob *job = new ItemCreateJob(item, col);
AKVERIFYEXEC(job);
}
createItems(col, itemCount);
QTRY_COMPARE(addedSpy.count(), itemCount);
addedSpy.clear();
const Item::List origItems = fetchItems(col);
//Since the item sync affects the knut resource we ensure we actually managed to load all items
//This needs to be adjusted should the testdataset change
QCOMPARE(origItems.size(), itemCount);
QSignalSpy deletedSpy(&monitor, SIGNAL(itemRemoved(Akonadi::Item)));
......@@ -617,6 +619,7 @@ private Q_SLOTS:
QSignalSpy transactionSpy(syncer, SIGNAL(transactionCommitted()));
QVERIFY(transactionSpy.isValid());
syncer->setFullSyncItems(origItems);
AKVERIFYEXEC(syncer);
QCOMPARE(transactionSpy.count(), 1);
}
......@@ -632,6 +635,40 @@ private Q_SLOTS:
ItemDeleteJob *job = new ItemDeleteJob(resultItems);
AKVERIFYEXEC(job);
}
void testUserCancel()
{
// Given a collection with 100 items
const Collection col = Collection(collectionIdFromPath(QStringLiteral("res2/foo2")));
QVERIFY(col.isValid());
const Item::List itemsToDelete = fetchItems(col);
if (!itemsToDelete.isEmpty()) {
ItemDeleteJob *deleteJob = new ItemDeleteJob(itemsToDelete);
AKVERIFYEXEC(deleteJob);
}
const int itemCount = 100;
createItems(col, itemCount);
const Item::List origItems = fetchItems(col);
QCOMPARE(origItems.size(), itemCount);
// and an ItemSync running
ItemSync *syncer = new ItemSync(col);
syncer->setTransactionMode(ItemSync::SingleTransaction);
syncer->setFullSyncItems(origItems);
// When the user cancels the ItemSync
QTimer::singleShot(10, syncer, &ItemSync::rollback);
// Then the itemsync should finish at some point, and not crash
QVERIFY(!syncer->exec());
QCOMPARE(syncer->errorString(), QStringLiteral("User canceled operation."));
// Cleanup
ItemDeleteJob *job = new ItemDeleteJob(origItems);
AKVERIFYEXEC(job);
}
};
QTEST_AKONADIMAIN(ItemsyncTest)
......
......@@ -165,6 +165,14 @@ void ItemSyncPrivate::checkDone()
}
}
mProcessingBatch = false;
if (q->error() == Job::UserCanceled && mTransactionJobs == 0 && !mFinished) {
qCDebug(AKONADICORE_LOG) << "ItemSync of collection" << mSyncCollection.id() << "finished due to user cancelling";
mFinished = true;
q->emitResult();
return;
}
if (!mRemoteItemQueue.isEmpty()) {
execute();
//We don't have enough items, request more
......@@ -363,9 +371,14 @@ void ItemSyncPrivate::execute()
//process the current batch of items
void ItemSyncPrivate::processBatch()
{
Q_Q(ItemSync);
if (mCurrentBatchRemoteItems.isEmpty() && !mDeliveryDone) {
return;
}
if (q->error() == Job::UserCanceled) {
checkDone();
return;
}
//request a transaction, there are items that require processing
requestTransaction();
......@@ -517,7 +530,7 @@ void ItemSync::rollback()
if (d->mCurrentTransaction) {
d->mCurrentTransaction->rollback();
}
d->mDeliveryDone = true; // user wont deliver more data
d->mDeliveryDone = true; // user won't deliver more data
d->execute(); // end this in an ordered way, since we have an error set no real change will be done
}
......
......@@ -135,15 +135,16 @@ void TransactionSequence::slotResult(KJob *job)
connect(job, &TransactionCommitJob::result, [d](KJob *job) { d->commitResult(job);});
}
}
} else if (job->error() == KJob::KilledJobError) {
Job::slotResult(job);
} else {
setError(job->error());
setErrorText(job->errorText());
removeSubjob(job);
// cancel all subjobs in case someone else is listening (such as ItemSync), but without notifying ourselves again
// cancel all subjobs in case someone else is listening (such as ItemSync)
foreach (KJob *job, subjobs()) {
disconnect(job, &KJob::result, this, &TransactionSequence::slotResult);
job->kill(EmitResult);
job->kill(KJob::EmitResult);
}
clearSubjobs();
......@@ -165,6 +166,8 @@ void TransactionSequence::commit()
if (d->mState == TransactionSequencePrivate::Running) {
d->mState = TransactionSequencePrivate::WaitingForSubjobs;
} else if (d->mState == TransactionSequencePrivate::RollingBack) {
return;
} else {
// we never got any subjobs, that means we never started a transaction
// so we can just quit here
......@@ -231,7 +234,16 @@ void TransactionSequence::rollback()
return;
}
// TODO cancel not yet executed sub-jobs here
const auto jobList = subjobs();
for (KJob *job : jobList) {
// Killing the current subjob means forcibly closing the akonadiserver socket
// (with a bit of delay since it happens in a secondary thread)
// which means the next job gets disconnected
// and the itemsync finishes with error "Cannot connect to the Akonadi service.", not ideal
if (job != d->mCurrentSubJob) {
job->kill(KJob::EmitResult);
}
}
d->mState = TransactionSequencePrivate::RollingBack;
TransactionRollbackJob *job = new TransactionRollbackJob(this);
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment