From 23265d70bb00b4c869b2cea3d7a59075cc3acabc Mon Sep 17 00:00:00 2001 From: CentOS Sources Date: Mar 06 2018 18:38:17 +0000 Subject: import libvirt-3.2.0-14.el7_4.9 --- diff --git a/SOURCES/libvirt-RHEL-qemu-Report-full-stats-for-completed-migration.patch b/SOURCES/libvirt-RHEL-qemu-Report-full-stats-for-completed-migration.patch new file mode 100644 index 0000000..b9f4830 --- /dev/null +++ b/SOURCES/libvirt-RHEL-qemu-Report-full-stats-for-completed-migration.patch @@ -0,0 +1,57 @@ +From c92e3fa956308c0b8be42a412b6f6ea2b9ff27ce Mon Sep 17 00:00:00 2001 +Message-Id: +From: Jiri Denemark +Date: Fri, 19 Jan 2018 13:51:51 +0100 +Subject: [PATCH] RHEL: qemu: Report full stats for completed migration + +RHEL-only: caused by improper adjustment of a backported patch + +When backporting patches (specifically upstream commit 3f2d6d829e, which +was backported to 7.4.z as commit 547c0e17e6) for BZ 1530130 I +intentionally skipped several refactoring commits because they were not +trivial and resolving conflicts could cause more problems than skipping +them. The commits were done upstream to support reporting statistics +from NBD migration. + +The refactors moved qemuMigrationFetchJobStatus (hidden in +qemuMigrationUpdateJobStatus in RHEL-7.4) out of +qemuMigrationCheckJobStatus. Because of this, the upstream version of +qemuMigrationCheckJobStatus didn't have to do anything special for the +QEMU_DOMAIN_JOB_STATUS_COMPLETED switch branch. But downstream called +qemuMigrationUpdateJobStatus in this branch and thus +QEMU_DOMAIN_JOB_STATUS_COMPLETED should have been replaced with +QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED to make sure the migration +statistics are properly fetched from QEMU once migration completes. + +https://bugzilla.redhat.com/show_bug.cgi?id=1530130 + +Signed-off-by: Jiri Denemark +--- + src/qemu/qemu_migration.c | 4 ++-- + 1 file changed, 2 insertions(+), 2 deletions(-) + +diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c +index 2b1e8dd9fd..ab5398f5ea 100644 +--- a/src/qemu/qemu_migration.c ++++ b/src/qemu/qemu_migration.c +@@ -1452,7 +1452,7 @@ qemuMigrationCheckJobStatus(virQEMUDriverPtr driver, + qemuMigrationJobName(vm), _("canceled by client")); + return -1; + +- case QEMU_DOMAIN_JOB_STATUS_COMPLETED: ++ case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: + /* Fetch statistics of a completed migration */ + if (events && updateJobStats && + qemuMigrationUpdateJobStatus(driver, vm, asyncJob) < 0) +@@ -1461,7 +1461,7 @@ qemuMigrationCheckJobStatus(virQEMUDriverPtr driver, + + case QEMU_DOMAIN_JOB_STATUS_ACTIVE: + case QEMU_DOMAIN_JOB_STATUS_MIGRATING: +- case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: ++ case QEMU_DOMAIN_JOB_STATUS_COMPLETED: + case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: + break; + } +-- +2.16.0 + diff --git a/SOURCES/libvirt-cpu_x86-Copy-CPU-signature-from-ancestor.patch b/SOURCES/libvirt-cpu_x86-Copy-CPU-signature-from-ancestor.patch new file mode 100644 index 0000000..77bda28 --- /dev/null +++ b/SOURCES/libvirt-cpu_x86-Copy-CPU-signature-from-ancestor.patch @@ -0,0 +1,47 @@ +From e8639315f3f6116b3f589657bad57a15f119ddbc Mon Sep 17 00:00:00 2001 +Message-Id: +From: Jiri Denemark +Date: Thu, 11 Jan 2018 14:33:07 +0100 +Subject: [PATCH] cpu_x86: Copy CPU signature from ancestor + +When specifying a new CPU model in cpu_map.xml as an extension to an +existing model, we forgot to copy the signature (family + model) from +the original CPU model. + +We don't use this way of specifying CPU models, but it's still supported +and it becomes useful when someone wants to quickly hack up a CPU model +for testing or when creating additional variants of existing models to +help with fixing some spectral issues. + +Signed-off-by: Jiri Denemark +Reviewed-by: Pavel Hrdina +(cherry picked from commit b427cf4831d0ea7aac9dd1a3aa7682478356a483) + +https://bugzilla.redhat.com/show_bug.cgi?id=1533418 + +The new -IBRS and -IBPB CPU models were defined via inheritance from +their original models in RHEL. Thus when the host CPU matches the +signature of the original model from cpu_map.xml, libvirt will detect +the host CPU as the original model + the new feature rather than +reporting it as the -IBRS/-IBPB variant. + +Signed-off-by: Jiri Denemark +--- + src/cpu/cpu_x86.c | 1 + + 1 file changed, 1 insertion(+) + +diff --git a/src/cpu/cpu_x86.c b/src/cpu/cpu_x86.c +index efef7f235d..41aaa61c35 100644 +--- a/src/cpu/cpu_x86.c ++++ b/src/cpu/cpu_x86.c +@@ -1209,6 +1209,7 @@ x86ModelParse(xmlXPathContextPtr ctxt, + VIR_FREE(name); + + model->vendor = ancestor->vendor; ++ model->signature = ancestor->signature; + if (x86DataCopy(&model->data, &ancestor->data) < 0) + goto error; + } +-- +2.15.1 + diff --git a/SOURCES/libvirt-qemu-Fix-crash-in-offline-migration.patch b/SOURCES/libvirt-qemu-Fix-crash-in-offline-migration.patch new file mode 100644 index 0000000..11da599 --- /dev/null +++ b/SOURCES/libvirt-qemu-Fix-crash-in-offline-migration.patch @@ -0,0 +1,45 @@ +From 3a47e083b2e118cb338442721a73ed8506e4c0ed Mon Sep 17 00:00:00 2001 +Message-Id: <3a47e083b2e118cb338442721a73ed8506e4c0ed@dist-git> +From: Jiri Denemark +Date: Fri, 19 Jan 2018 10:32:44 +0100 +Subject: [PATCH] qemu: Fix crash in offline migration + +When migrating a shutoff domain (i.e., offline migration), we have no +statistics to report and thus jobInfo will be NULL in +qemuMigrationFinish. + +Broken by me in v3.10.0-183-ge8784e7868. + +https://bugzilla.redhat.com/show_bug.cgi?id=1536351 + +Signed-off-by: Jiri Denemark +Reviewed-by: Pavel Hrdina +(cherry picked from commit bcc5710708ec90146b71bdb00d8705cb4a9e0088) + +https://bugzilla.redhat.com/show_bug.cgi?id=1530130 + +Signed-off-by: Jiri Denemark +--- + src/qemu/qemu_migration.c | 6 ++++-- + 1 file changed, 4 insertions(+), 2 deletions(-) + +diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c +index 120b79415e..2b1e8dd9fd 100644 +--- a/src/qemu/qemu_migration.c ++++ b/src/qemu/qemu_migration.c +@@ -5372,8 +5372,10 @@ qemuMigrationFinish(virQEMUDriverPtr driver, + } + + if (dom) { +- VIR_STEAL_PTR(priv->job.completed, jobInfo); +- priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; ++ if (jobInfo) { ++ VIR_STEAL_PTR(priv->job.completed, jobInfo); ++ priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; ++ } + + if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_STATS) < 0) +-- +2.16.0 + diff --git a/SOURCES/libvirt-qemu-Fix-type-of-a-completed-job.patch b/SOURCES/libvirt-qemu-Fix-type-of-a-completed-job.patch new file mode 100644 index 0000000..1f5cd14 --- /dev/null +++ b/SOURCES/libvirt-qemu-Fix-type-of-a-completed-job.patch @@ -0,0 +1,61 @@ +From 684d747a6c5ea03dc05e6324ff8cba42b5873a15 Mon Sep 17 00:00:00 2001 +Message-Id: <684d747a6c5ea03dc05e6324ff8cba42b5873a15@dist-git> +From: Jiri Denemark +Date: Thu, 11 Jan 2018 20:47:50 +0100 +Subject: [PATCH] qemu: Fix type of a completed job + +Libvirt 3.7.0 and earlier libvirt reported a migration job as completed +immediately after QEMU finished sending migration data at which point +migration was not really complete yet. Commit v3.7.0-29-g3f2d6d829e +fixed this, but caused a regression in reporting statistics for +completed jobs which started reporting the job as still running. This +happened because the completed job statistics including the job status +are copied from the running job before we finally mark it as completed. + +Let's make sure QEMU_DOMAIN_JOB_STATUS_COMPLETED is always set in the +completed job info even when the job has not finished yet. + +https://bugzilla.redhat.com/show_bug.cgi?id=1523036 + +Signed-off-by: Jiri Denemark +Reviewed-by: Pavel Hrdina +(cherry picked from commit e8784e7868d44a2ce796b376cf78f0f6c61c114a) + +https://bugzilla.redhat.com/show_bug.cgi?id=1530130 + +Signed-off-by: Jiri Denemark +--- + src/qemu/qemu_migration.c | 9 ++++++--- + 1 file changed, 6 insertions(+), 3 deletions(-) + +diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c +index aabd0913ad..120b79415e 100644 +--- a/src/qemu/qemu_migration.c ++++ b/src/qemu/qemu_migration.c +@@ -1590,8 +1590,10 @@ qemuMigrationWaitForCompletion(virQEMUDriverPtr driver, + + qemuDomainJobInfoUpdateDowntime(jobInfo); + VIR_FREE(priv->job.completed); +- if (VIR_ALLOC(priv->job.completed) == 0) ++ if (VIR_ALLOC(priv->job.completed) == 0) { + *priv->job.completed = *jobInfo; ++ priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; ++ } + + if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT && + jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED) +@@ -5370,8 +5372,9 @@ qemuMigrationFinish(virQEMUDriverPtr driver, + } + + if (dom) { +- priv->job.completed = jobInfo; +- jobInfo = NULL; ++ VIR_STEAL_PTR(priv->job.completed, jobInfo); ++ priv->job.completed->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; ++ + if (qemuMigrationBakeCookie(mig, driver, vm, cookieout, cookieoutlen, + QEMU_MIGRATION_COOKIE_STATS) < 0) + VIR_WARN("Unable to encode migration cookie"); +-- +2.15.1 + diff --git a/SOURCES/libvirt-qemu-drop-code-for-VIR_DOMAIN_JOB_BOUNDED-and-timeRemaining.patch b/SOURCES/libvirt-qemu-drop-code-for-VIR_DOMAIN_JOB_BOUNDED-and-timeRemaining.patch new file mode 100644 index 0000000..8eb685c --- /dev/null +++ b/SOURCES/libvirt-qemu-drop-code-for-VIR_DOMAIN_JOB_BOUNDED-and-timeRemaining.patch @@ -0,0 +1,99 @@ +From 5e9305c83e7acf31e849af2db6b82c7651114425 Mon Sep 17 00:00:00 2001 +Message-Id: <5e9305c83e7acf31e849af2db6b82c7651114425@dist-git> +From: Nikolay Shirokovskiy +Date: Fri, 1 Sep 2017 09:49:19 +0300 +Subject: [PATCH] qemu: drop code for VIR_DOMAIN_JOB_BOUNDED and timeRemaining + +qemu driver does not have VIR_DOMAIN_JOB_BOUNDED jobs and +timeRemaining is always 0. + +Signed-off-by: Jiri Denemark +(cherry picked from commit 16bf7619b8377b21e8cb2af33b53949ff77755c6) + +https://bugzilla.redhat.com/show_bug.cgi?id=1530130 + +Signed-off-by: Jiri Denemark +--- + src/qemu/qemu_domain.c | 7 ------- + src/qemu/qemu_domain.h | 1 - + src/qemu/qemu_driver.c | 3 +-- + src/qemu/qemu_migration_cookie.c | 5 ----- + 4 files changed, 1 insertion(+), 15 deletions(-) + +diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c +index ade06f07db..3c2368368f 100644 +--- a/src/qemu/qemu_domain.c ++++ b/src/qemu/qemu_domain.c +@@ -395,7 +395,6 @@ qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo, + { + info->type = jobInfo->type; + info->timeElapsed = jobInfo->timeElapsed; +- info->timeRemaining = jobInfo->timeRemaining; + + info->memTotal = jobInfo->stats.ram_total; + info->memRemaining = jobInfo->stats.ram_remaining; +@@ -440,12 +439,6 @@ qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, + jobInfo->timeElapsed - jobInfo->timeDelta) < 0) + goto error; + +- if (jobInfo->type == VIR_DOMAIN_JOB_BOUNDED && +- virTypedParamsAddULLong(&par, &npar, &maxpar, +- VIR_DOMAIN_JOB_TIME_REMAINING, +- jobInfo->timeRemaining) < 0) +- goto error; +- + if (stats->downtime_set && + virTypedParamsAddULLong(&par, &npar, &maxpar, + VIR_DOMAIN_JOB_DOWNTIME, +diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h +index 68458ad9ae..7002407218 100644 +--- a/src/qemu/qemu_domain.h ++++ b/src/qemu/qemu_domain.h +@@ -112,7 +112,6 @@ struct _qemuDomainJobInfo { + info from the source (migrations only). */ + /* Computed values */ + unsigned long long timeElapsed; +- unsigned long long timeRemaining; + long long timeDelta; /* delta = received - sent, i.e., the difference + between the source and the destination time plus + the time between the end of Perform phase on the +diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c +index 501f19fbcc..1c60d533e8 100644 +--- a/src/qemu/qemu_driver.c ++++ b/src/qemu/qemu_driver.c +@@ -12971,8 +12971,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, + } + *jobInfo = *info; + +- if (jobInfo->type == VIR_DOMAIN_JOB_BOUNDED || +- jobInfo->type == VIR_DOMAIN_JOB_UNBOUNDED) { ++ if (jobInfo->type == VIR_DOMAIN_JOB_UNBOUNDED) { + if (fetch) + ret = qemuMigrationFetchJobStatus(driver, vm, QEMU_ASYNC_JOB_NONE, + jobInfo); +diff --git a/src/qemu/qemu_migration_cookie.c b/src/qemu/qemu_migration_cookie.c +index af0ac03418..5f8595fdba 100644 +--- a/src/qemu/qemu_migration_cookie.c ++++ b/src/qemu/qemu_migration_cookie.c +@@ -612,9 +612,6 @@ qemuMigrationCookieStatisticsXMLFormat(virBufferPtr buf, + virBufferAsprintf(buf, "<%1$s>%2$llu\n", + VIR_DOMAIN_JOB_TIME_ELAPSED, + jobInfo->timeElapsed); +- virBufferAsprintf(buf, "<%1$s>%2$llu\n", +- VIR_DOMAIN_JOB_TIME_REMAINING, +- jobInfo->timeRemaining); + if (stats->downtime_set) + virBufferAsprintf(buf, "<%1$s>%2$llu\n", + VIR_DOMAIN_JOB_DOWNTIME, +@@ -987,8 +984,6 @@ qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt) + + virXPathULongLong("string(./" VIR_DOMAIN_JOB_TIME_ELAPSED "[1])", + ctxt, &jobInfo->timeElapsed); +- virXPathULongLong("string(./" VIR_DOMAIN_JOB_TIME_REMAINING "[1])", +- ctxt, &jobInfo->timeRemaining); + + if (virXPathULongLong("string(./" VIR_DOMAIN_JOB_DOWNTIME "[1])", + ctxt, &stats->downtime) == 0) +-- +2.15.1 + diff --git a/SOURCES/libvirt-qemu-introduce-QEMU_DOMAIN_JOB_STATUS_POSTCOPY.patch b/SOURCES/libvirt-qemu-introduce-QEMU_DOMAIN_JOB_STATUS_POSTCOPY.patch new file mode 100644 index 0000000..2db5463 --- /dev/null +++ b/SOURCES/libvirt-qemu-introduce-QEMU_DOMAIN_JOB_STATUS_POSTCOPY.patch @@ -0,0 +1,171 @@ +From 626d23419335db221e7b52a5bc07ad4f74391199 Mon Sep 17 00:00:00 2001 +Message-Id: <626d23419335db221e7b52a5bc07ad4f74391199@dist-git> +From: Nikolay Shirokovskiy +Date: Fri, 1 Sep 2017 09:49:21 +0300 +Subject: [PATCH] qemu: introduce QEMU_DOMAIN_JOB_STATUS_POSTCOPY + +Let's introduce QEMU_DOMAIN_JOB_STATUS_POSTCOPY state for job.current->status +instead of checking job.current->stats.status. The latter can be changed +when fetching migration statistics. Moving state function from the variable +and leave only store function seems more managable. + +This patch removes all state checking usage of stats except for +qemuDomainGetJobStatsInternal. This place will be handled separately. + +Signed-off-by: Jiri Denemark +(cherry picked from commit 09f57f9aaca6d2703567d5cda45d47fc06131ae0) + +https://bugzilla.redhat.com/show_bug.cgi?id=1530130 + +Signed-off-by: Jiri Denemark +--- + src/qemu/qemu_domain.c | 1 + + src/qemu/qemu_domain.h | 1 + + src/qemu/qemu_driver.c | 5 +++-- + src/qemu/qemu_migration.c | 18 +++++++++++------- + src/qemu/qemu_process.c | 4 ++-- + 5 files changed, 18 insertions(+), 11 deletions(-) + +diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c +index c9b3e13892..329ec6e2ce 100644 +--- a/src/qemu/qemu_domain.c ++++ b/src/qemu/qemu_domain.c +@@ -397,6 +397,7 @@ qemuDomainJobStatusToType(qemuDomainJobStatus status) + break; + + case QEMU_DOMAIN_JOB_STATUS_ACTIVE: ++ case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: + return VIR_DOMAIN_JOB_UNBOUNDED; + + case QEMU_DOMAIN_JOB_STATUS_COMPLETED: +diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h +index e4e672f872..c8b3873b9f 100644 +--- a/src/qemu/qemu_domain.h ++++ b/src/qemu/qemu_domain.h +@@ -102,6 +102,7 @@ VIR_ENUM_DECL(qemuDomainAsyncJob) + typedef enum { + QEMU_DOMAIN_JOB_STATUS_NONE = 0, + QEMU_DOMAIN_JOB_STATUS_ACTIVE, ++ QEMU_DOMAIN_JOB_STATUS_POSTCOPY, + QEMU_DOMAIN_JOB_STATUS_COMPLETED, + QEMU_DOMAIN_JOB_STATUS_FAILED, + QEMU_DOMAIN_JOB_STATUS_CANCELED, +diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c +index a271f60abf..ecba87a121 100644 +--- a/src/qemu/qemu_driver.c ++++ b/src/qemu/qemu_driver.c +@@ -12971,7 +12971,8 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, + } + *jobInfo = *info; + +- if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE) { ++ if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE || ++ jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { + if (fetch) + ret = qemuMigrationFetchJobStatus(driver, vm, QEMU_ASYNC_JOB_NONE, + jobInfo); +@@ -13105,7 +13106,7 @@ static int qemuDomainAbortJob(virDomainPtr dom) + } + + if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT && +- (priv->job.current->stats.status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY || ++ (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY || + (virDomainObjGetState(vm, &reason) == VIR_DOMAIN_PAUSED && + reason == VIR_DOMAIN_PAUSED_POSTCOPY))) { + virReportError(VIR_ERR_OPERATION_INVALID, "%s", +diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c +index 500f467f25..1726349f94 100644 +--- a/src/qemu/qemu_migration.c ++++ b/src/qemu/qemu_migration.c +@@ -1333,6 +1333,10 @@ static void + qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo) + { + switch ((qemuMonitorMigrationStatus) jobInfo->stats.status) { ++ case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY: ++ jobInfo->status = QEMU_DOMAIN_JOB_STATUS_POSTCOPY; ++ break; ++ + case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED: + jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + break; +@@ -1351,7 +1355,6 @@ qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo) + + case QEMU_MONITOR_MIGRATION_STATUS_SETUP: + case QEMU_MONITOR_MIGRATION_STATUS_ACTIVE: +- case QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY: + case QEMU_MONITOR_MIGRATION_STATUS_CANCELLING: + case QEMU_MONITOR_MIGRATION_STATUS_LAST: + break; +@@ -1457,6 +1460,7 @@ qemuMigrationCheckJobStatus(virQEMUDriverPtr driver, + break; + + case QEMU_DOMAIN_JOB_STATUS_ACTIVE: ++ case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: + break; + } + return 0; +@@ -1514,8 +1518,7 @@ qemuMigrationCompleted(virQEMUDriverPtr driver, + * will continue waiting until the migrate state changes to completed. + */ + if (flags & QEMU_MIGRATION_COMPLETED_POSTCOPY && +- jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE && +- jobInfo->stats.status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY) { ++ jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { + VIR_DEBUG("Migration switched to post-copy"); + if (updateStats && + qemuMigrationUpdateJobStatus(driver, vm, asyncJob) < 0) +@@ -1529,7 +1532,8 @@ qemuMigrationCompleted(virQEMUDriverPtr driver, + return 0; + + error: +- if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE) { ++ if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE || ++ jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { + /* The migration was aborted by us rather than QEMU itself. */ + jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + return -2; +@@ -3824,7 +3828,7 @@ qemuMigrationRun(virQEMUDriverPtr driver, + else if (rc == -1) + goto cleanup; + +- if (priv->job.current->stats.status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY) ++ if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) + inPostCopy = true; + + /* When migration completed, QEMU will have paused the CPUs for us. +@@ -3876,7 +3880,7 @@ qemuMigrationRun(virQEMUDriverPtr driver, + ignore_value(virTimeMillisNow(&priv->job.completed->sent)); + } + +- if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE && !inPostCopy) ++ if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE) + priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + + cookieFlags |= QEMU_MIGRATION_COOKIE_NETWORK | +@@ -5249,7 +5253,7 @@ qemuMigrationFinish(virQEMUDriverPtr driver, + goto endjob; + } + +- if (priv->job.current->stats.status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY) ++ if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) + inPostCopy = true; + + if (!(flags & VIR_MIGRATE_PAUSED)) { +diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c +index 3235cebad0..25de367afc 100644 +--- a/src/qemu/qemu_process.c ++++ b/src/qemu/qemu_process.c +@@ -723,8 +723,8 @@ qemuProcessHandleStop(qemuMonitorPtr mon ATTRIBUTE_UNUSED, + } + + if (priv->job.asyncJob == QEMU_ASYNC_JOB_MIGRATION_OUT) { +- if (priv->job.current->stats.status == +- QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY) { ++ if (priv->job.current->status == ++ QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { + reason = VIR_DOMAIN_PAUSED_POSTCOPY; + detail = VIR_DOMAIN_EVENT_SUSPENDED_POSTCOPY; + } else { +-- +2.15.1 + diff --git a/SOURCES/libvirt-qemu-introduce-migrating-job-status.patch b/SOURCES/libvirt-qemu-introduce-migrating-job-status.patch new file mode 100644 index 0000000..eeb09f8 --- /dev/null +++ b/SOURCES/libvirt-qemu-introduce-migrating-job-status.patch @@ -0,0 +1,117 @@ +From 59dfc370693ee7631f7b538660cfd30f213b0a86 Mon Sep 17 00:00:00 2001 +Message-Id: <59dfc370693ee7631f7b538660cfd30f213b0a86@dist-git> +From: Nikolay Shirokovskiy +Date: Fri, 1 Sep 2017 09:49:28 +0300 +Subject: [PATCH] qemu: introduce migrating job status + +Instead of checking stat.status let's set status to migrating +as soon as migrate command is send (waiting for completion +is a good place too). + +Signed-off-by: Jiri Denemark +(cherry picked from commit 5a274d4fdc0015909fa95e6667922bb249a95bd5) + +https://bugzilla.redhat.com/show_bug.cgi?id=1530130 + +Conflicts: + src/qemu/qemu_migration.c + - commit e87d4b9e2f is not backported + +Signed-off-by: Jiri Denemark +--- + src/qemu/qemu_domain.c | 1 + + src/qemu/qemu_domain.h | 1 + + src/qemu/qemu_driver.c | 4 +++- + src/qemu/qemu_migration.c | 9 +++++++-- + 4 files changed, 12 insertions(+), 3 deletions(-) + +diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c +index 4b1ead7d0a..e81d682c00 100644 +--- a/src/qemu/qemu_domain.c ++++ b/src/qemu/qemu_domain.c +@@ -397,6 +397,7 @@ qemuDomainJobStatusToType(qemuDomainJobStatus status) + break; + + case QEMU_DOMAIN_JOB_STATUS_ACTIVE: ++ case QEMU_DOMAIN_JOB_STATUS_MIGRATING: + case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: + return VIR_DOMAIN_JOB_UNBOUNDED; + +diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h +index c8b3873b9f..0cf54d99ae 100644 +--- a/src/qemu/qemu_domain.h ++++ b/src/qemu/qemu_domain.h +@@ -102,6 +102,7 @@ VIR_ENUM_DECL(qemuDomainAsyncJob) + typedef enum { + QEMU_DOMAIN_JOB_STATUS_NONE = 0, + QEMU_DOMAIN_JOB_STATUS_ACTIVE, ++ QEMU_DOMAIN_JOB_STATUS_MIGRATING, + QEMU_DOMAIN_JOB_STATUS_POSTCOPY, + QEMU_DOMAIN_JOB_STATUS_COMPLETED, + QEMU_DOMAIN_JOB_STATUS_FAILED, +diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c +index 8972897a28..931f3d344d 100644 +--- a/src/qemu/qemu_driver.c ++++ b/src/qemu/qemu_driver.c +@@ -12934,7 +12934,8 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, + fetch = false; + + /* Do not ask QEMU if migration is not even running yet */ +- if (!priv->job.current || !priv->job.current->stats.status) ++ if (!priv->job.current || ++ priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE) + fetch = false; + + if (fetch) { +@@ -12970,6 +12971,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, + *jobInfo = *info; + + if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE || ++ jobInfo->status == QEMU_DOMAIN_JOB_STATUS_MIGRATING || + jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { + if (fetch) + ret = qemuMigrationFetchJobStatus(driver, vm, QEMU_ASYNC_JOB_NONE, +diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c +index 9f7aad6680..c8cd7ebfa3 100644 +--- a/src/qemu/qemu_migration.c ++++ b/src/qemu/qemu_migration.c +@@ -1460,6 +1460,7 @@ qemuMigrationCheckJobStatus(virQEMUDriverPtr driver, + break; + + case QEMU_DOMAIN_JOB_STATUS_ACTIVE: ++ case QEMU_DOMAIN_JOB_STATUS_MIGRATING: + case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: + break; + } +@@ -1532,7 +1533,8 @@ qemuMigrationCompleted(virQEMUDriverPtr driver, + return 0; + + error: +- if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE || ++ /* state can not be active at this point */ ++ if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_MIGRATING || + jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { + /* The migration was aborted by us rather than QEMU itself. */ + jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; +@@ -1563,6 +1565,8 @@ qemuMigrationWaitForCompletion(virQEMUDriverPtr driver, + + flags |= QEMU_MIGRATION_COMPLETED_UPDATE_STATS; + ++ jobInfo->status = QEMU_DOMAIN_JOB_STATUS_MIGRATING; ++ + while ((rv = qemuMigrationCompleted(driver, vm, asyncJob, + dconn, flags)) != 1) { + if (rv < 0) +@@ -3879,7 +3883,8 @@ qemuMigrationRun(virQEMUDriverPtr driver, + ignore_value(virTimeMillisNow(&priv->job.completed->sent)); + } + +- if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE) ++ if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE || ++ priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_MIGRATING) + priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + + cookieFlags |= QEMU_MIGRATION_COOKIE_NETWORK | +-- +2.15.1 + diff --git a/SOURCES/libvirt-qemu-introduce-qemu-domain-job-status.patch b/SOURCES/libvirt-qemu-introduce-qemu-domain-job-status.patch new file mode 100644 index 0000000..1a31ee5 --- /dev/null +++ b/SOURCES/libvirt-qemu-introduce-qemu-domain-job-status.patch @@ -0,0 +1,339 @@ +From c95e8e44a4b4c07b79cafafe20018c5d2d37bfbd Mon Sep 17 00:00:00 2001 +Message-Id: +From: Nikolay Shirokovskiy +Date: Fri, 1 Sep 2017 09:49:20 +0300 +Subject: [PATCH] qemu: introduce qemu domain job status + +This patch simply switches code from using VIR_DOMAIN_JOB_* to +introduced QEMU_DOMAIN_JOB_STATUS_*. Later this gives us freedom +to introduce states for postcopy and mirroring phases. + +Signed-off-by: Jiri Denemark +(cherry picked from commit 751a1c7f0ade5c9e5ea858bbaf97e39950f821e6) + +https://bugzilla.redhat.com/show_bug.cgi?id=1530130 + +Signed-off-by: Jiri Denemark +--- + src/qemu/qemu_domain.c | 27 ++++++++++++++++++++-- + src/qemu/qemu_domain.h | 10 +++++++- + src/qemu/qemu_driver.c | 10 ++++---- + src/qemu/qemu_migration.c | 50 +++++++++++++++++++--------------------- + src/qemu/qemu_migration_cookie.c | 2 +- + src/qemu/qemu_process.c | 2 +- + 6 files changed, 65 insertions(+), 36 deletions(-) + +diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c +index 3c2368368f..c9b3e13892 100644 +--- a/src/qemu/qemu_domain.c ++++ b/src/qemu/qemu_domain.c +@@ -389,11 +389,34 @@ qemuDomainJobInfoUpdateDowntime(qemuDomainJobInfoPtr jobInfo) + return 0; + } + ++static virDomainJobType ++qemuDomainJobStatusToType(qemuDomainJobStatus status) ++{ ++ switch (status) { ++ case QEMU_DOMAIN_JOB_STATUS_NONE: ++ break; ++ ++ case QEMU_DOMAIN_JOB_STATUS_ACTIVE: ++ return VIR_DOMAIN_JOB_UNBOUNDED; ++ ++ case QEMU_DOMAIN_JOB_STATUS_COMPLETED: ++ return VIR_DOMAIN_JOB_COMPLETED; ++ ++ case QEMU_DOMAIN_JOB_STATUS_FAILED: ++ return VIR_DOMAIN_JOB_FAILED; ++ ++ case QEMU_DOMAIN_JOB_STATUS_CANCELED: ++ return VIR_DOMAIN_JOB_CANCELLED; ++ } ++ ++ return VIR_DOMAIN_JOB_NONE; ++} ++ + int + qemuDomainJobInfoToInfo(qemuDomainJobInfoPtr jobInfo, + virDomainJobInfoPtr info) + { +- info->type = jobInfo->type; ++ info->type = qemuDomainJobStatusToType(jobInfo->status); + info->timeElapsed = jobInfo->timeElapsed; + + info->memTotal = jobInfo->stats.ram_total; +@@ -553,7 +576,7 @@ qemuDomainJobInfoToParams(qemuDomainJobInfoPtr jobInfo, + stats->cpu_throttle_percentage) < 0) + goto error; + +- *type = jobInfo->type; ++ *type = qemuDomainJobStatusToType(jobInfo->status); + *params = par; + *nparams = npar; + return 0; +diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h +index 7002407218..e4e672f872 100644 +--- a/src/qemu/qemu_domain.h ++++ b/src/qemu/qemu_domain.h +@@ -99,10 +99,18 @@ typedef enum { + } qemuDomainAsyncJob; + VIR_ENUM_DECL(qemuDomainAsyncJob) + ++typedef enum { ++ QEMU_DOMAIN_JOB_STATUS_NONE = 0, ++ QEMU_DOMAIN_JOB_STATUS_ACTIVE, ++ QEMU_DOMAIN_JOB_STATUS_COMPLETED, ++ QEMU_DOMAIN_JOB_STATUS_FAILED, ++ QEMU_DOMAIN_JOB_STATUS_CANCELED, ++} qemuDomainJobStatus; ++ + typedef struct _qemuDomainJobInfo qemuDomainJobInfo; + typedef qemuDomainJobInfo *qemuDomainJobInfoPtr; + struct _qemuDomainJobInfo { +- virDomainJobType type; ++ qemuDomainJobStatus status; + virDomainJobOperation operation; + unsigned long long started; /* When the async job started */ + unsigned long long stopped; /* When the domain's CPUs were stopped */ +diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c +index 1c60d533e8..a271f60abf 100644 +--- a/src/qemu/qemu_driver.c ++++ b/src/qemu/qemu_driver.c +@@ -3299,7 +3299,7 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virDomainPtr dom, + goto endjob; + } + +- priv->job.current->type = VIR_DOMAIN_JOB_UNBOUNDED; ++ priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + + /* Pause */ + if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { +@@ -12965,13 +12965,13 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, + info = priv->job.current; + + if (!info) { +- jobInfo->type = VIR_DOMAIN_JOB_NONE; ++ jobInfo->status = QEMU_DOMAIN_JOB_STATUS_NONE; + ret = 0; + goto cleanup; + } + *jobInfo = *info; + +- if (jobInfo->type == VIR_DOMAIN_JOB_UNBOUNDED) { ++ if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE) { + if (fetch) + ret = qemuMigrationFetchJobStatus(driver, vm, QEMU_ASYNC_JOB_NONE, + jobInfo); +@@ -13006,7 +13006,7 @@ qemuDomainGetJobInfo(virDomainPtr dom, + if (qemuDomainGetJobStatsInternal(driver, vm, false, &jobInfo) < 0) + goto cleanup; + +- if (jobInfo.type == VIR_DOMAIN_JOB_NONE) { ++ if (jobInfo.status == QEMU_DOMAIN_JOB_STATUS_NONE) { + memset(info, 0, sizeof(*info)); + info->type = VIR_DOMAIN_JOB_NONE; + ret = 0; +@@ -13047,7 +13047,7 @@ qemuDomainGetJobStats(virDomainPtr dom, + if (qemuDomainGetJobStatsInternal(driver, vm, completed, &jobInfo) < 0) + goto cleanup; + +- if (jobInfo.type == VIR_DOMAIN_JOB_NONE) { ++ if (jobInfo.status == QEMU_DOMAIN_JOB_STATUS_NONE) { + *type = VIR_DOMAIN_JOB_NONE; + *params = NULL; + *nparams = 0; +diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c +index 60722cbf6c..500f467f25 100644 +--- a/src/qemu/qemu_migration.c ++++ b/src/qemu/qemu_migration.c +@@ -965,7 +965,7 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver, + goto cleanup; + + if (priv->job.abortJob) { +- priv->job.current->type = VIR_DOMAIN_JOB_CANCELLED; ++ priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; + virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), + qemuDomainAsyncJobTypeToString(priv->job.asyncJob), + _("canceled by client")); +@@ -1334,19 +1334,19 @@ qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo) + { + switch ((qemuMonitorMigrationStatus) jobInfo->stats.status) { + case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED: +- jobInfo->type = VIR_DOMAIN_JOB_COMPLETED; ++ jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + break; + + case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE: +- jobInfo->type = VIR_DOMAIN_JOB_NONE; ++ jobInfo->status = QEMU_DOMAIN_JOB_STATUS_NONE; + break; + + case QEMU_MONITOR_MIGRATION_STATUS_ERROR: +- jobInfo->type = VIR_DOMAIN_JOB_FAILED; ++ jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + break; + + case QEMU_MONITOR_MIGRATION_STATUS_CANCELLED: +- jobInfo->type = VIR_DOMAIN_JOB_CANCELLED; ++ jobInfo->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; + break; + + case QEMU_MONITOR_MIGRATION_STATUS_SETUP: +@@ -1433,32 +1433,30 @@ qemuMigrationCheckJobStatus(virQEMUDriverPtr driver, + else if (qemuMigrationUpdateJobStatus(driver, vm, asyncJob) < 0) + return -1; + +- switch (jobInfo->type) { +- case VIR_DOMAIN_JOB_NONE: ++ switch (jobInfo->status) { ++ case QEMU_DOMAIN_JOB_STATUS_NONE: + virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"), + qemuMigrationJobName(vm), _("is not active")); + return -1; + +- case VIR_DOMAIN_JOB_FAILED: ++ case QEMU_DOMAIN_JOB_STATUS_FAILED: + virReportError(VIR_ERR_OPERATION_FAILED, _("%s: %s"), + qemuMigrationJobName(vm), _("unexpectedly failed")); + return -1; + +- case VIR_DOMAIN_JOB_CANCELLED: ++ case QEMU_DOMAIN_JOB_STATUS_CANCELED: + virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), + qemuMigrationJobName(vm), _("canceled by client")); + return -1; + +- case VIR_DOMAIN_JOB_COMPLETED: ++ case QEMU_DOMAIN_JOB_STATUS_COMPLETED: + /* Fetch statistics of a completed migration */ + if (events && updateJobStats && + qemuMigrationUpdateJobStatus(driver, vm, asyncJob) < 0) + return -1; + break; + +- case VIR_DOMAIN_JOB_BOUNDED: +- case VIR_DOMAIN_JOB_UNBOUNDED: +- case VIR_DOMAIN_JOB_LAST: ++ case QEMU_DOMAIN_JOB_STATUS_ACTIVE: + break; + } + return 0; +@@ -1516,7 +1514,7 @@ qemuMigrationCompleted(virQEMUDriverPtr driver, + * will continue waiting until the migrate state changes to completed. + */ + if (flags & QEMU_MIGRATION_COMPLETED_POSTCOPY && +- jobInfo->type == VIR_DOMAIN_JOB_UNBOUNDED && ++ jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE && + jobInfo->stats.status == QEMU_MONITOR_MIGRATION_STATUS_POSTCOPY) { + VIR_DEBUG("Migration switched to post-copy"); + if (updateStats && +@@ -1525,18 +1523,18 @@ qemuMigrationCompleted(virQEMUDriverPtr driver, + return 1; + } + +- if (jobInfo->type == VIR_DOMAIN_JOB_COMPLETED) ++ if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_COMPLETED) + return 1; + else + return 0; + + error: +- if (jobInfo->type == VIR_DOMAIN_JOB_UNBOUNDED) { ++ if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE) { + /* The migration was aborted by us rather than QEMU itself. */ +- jobInfo->type = VIR_DOMAIN_JOB_FAILED; ++ jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + return -2; +- } else if (jobInfo->type == VIR_DOMAIN_JOB_COMPLETED) { +- jobInfo->type = VIR_DOMAIN_JOB_FAILED; ++ } else if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_COMPLETED) { ++ jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + return -1; + } else { + return -1; +@@ -1561,7 +1559,7 @@ qemuMigrationWaitForCompletion(virQEMUDriverPtr driver, + + flags |= QEMU_MIGRATION_COMPLETED_UPDATE_STATS; + +- jobInfo->type = VIR_DOMAIN_JOB_UNBOUNDED; ++ jobInfo->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + while ((rv = qemuMigrationCompleted(driver, vm, asyncJob, + dconn, flags)) != 1) { + if (rv < 0) +@@ -1569,7 +1567,7 @@ qemuMigrationWaitForCompletion(virQEMUDriverPtr driver, + + if (events) { + if (virDomainObjWait(vm) < 0) { +- jobInfo->type = VIR_DOMAIN_JOB_FAILED; ++ jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + return -2; + } + } else { +@@ -3744,7 +3742,7 @@ qemuMigrationRun(virQEMUDriverPtr driver, + * as this is a critical section so we are guaranteed + * priv->job.abortJob will not change */ + ignore_value(qemuDomainObjExitMonitor(driver, vm)); +- priv->job.current->type = VIR_DOMAIN_JOB_CANCELLED; ++ priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_CANCELED; + virReportError(VIR_ERR_OPERATION_ABORTED, _("%s: %s"), + qemuDomainAsyncJobTypeToString(priv->job.asyncJob), + _("canceled by client")); +@@ -3878,8 +3876,8 @@ qemuMigrationRun(virQEMUDriverPtr driver, + ignore_value(virTimeMillisNow(&priv->job.completed->sent)); + } + +- if (priv->job.current->type == VIR_DOMAIN_JOB_UNBOUNDED && !inPostCopy) +- priv->job.current->type = VIR_DOMAIN_JOB_FAILED; ++ if (priv->job.current->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE && !inPostCopy) ++ priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + + cookieFlags |= QEMU_MIGRATION_COOKIE_NETWORK | + QEMU_MIGRATION_COOKIE_STATS; +@@ -3921,7 +3919,7 @@ qemuMigrationRun(virQEMUDriverPtr driver, + goto cleanup; + + cancelPostCopy: +- priv->job.current->type = VIR_DOMAIN_JOB_FAILED; ++ priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + if (inPostCopy) + goto cancel; + else +@@ -5640,7 +5638,7 @@ qemuMigrationJobStart(virQEMUDriverPtr driver, + return -1; + + qemuDomainObjSetAsyncJobMask(vm, mask); +- priv->job.current->type = VIR_DOMAIN_JOB_UNBOUNDED; ++ priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + + return 0; + } +diff --git a/src/qemu/qemu_migration_cookie.c b/src/qemu/qemu_migration_cookie.c +index 5f8595fdba..4914c77ef0 100644 +--- a/src/qemu/qemu_migration_cookie.c ++++ b/src/qemu/qemu_migration_cookie.c +@@ -974,7 +974,7 @@ qemuMigrationCookieStatisticsXMLParse(xmlXPathContextPtr ctxt) + goto cleanup; + + stats = &jobInfo->stats; +- jobInfo->type = VIR_DOMAIN_JOB_COMPLETED; ++ jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; + + virXPathULongLong("string(./started[1])", ctxt, &jobInfo->started); + virXPathULongLong("string(./stopped[1])", ctxt, &jobInfo->stopped); +diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c +index 6ce33c0134..3235cebad0 100644 +--- a/src/qemu/qemu_process.c ++++ b/src/qemu/qemu_process.c +@@ -3950,7 +3950,7 @@ qemuProcessBeginJob(virQEMUDriverPtr driver, + return -1; + + qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE); +- priv->job.current->type = VIR_DOMAIN_JOB_UNBOUNDED; ++ priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + + return 0; + } +-- +2.15.1 + diff --git a/SOURCES/libvirt-qemu-migration-don-t-expose-incomplete-job-as-complete.patch b/SOURCES/libvirt-qemu-migration-don-t-expose-incomplete-job-as-complete.patch new file mode 100644 index 0000000..7e87b62 --- /dev/null +++ b/SOURCES/libvirt-qemu-migration-don-t-expose-incomplete-job-as-complete.patch @@ -0,0 +1,126 @@ +From 547c0e17e665d7a1051d8670b2f9e44783dbaf58 Mon Sep 17 00:00:00 2001 +Message-Id: <547c0e17e665d7a1051d8670b2f9e44783dbaf58@dist-git> +From: Nikolay Shirokovskiy +Date: Fri, 1 Sep 2017 09:49:31 +0300 +Subject: [PATCH] qemu: migration: don't expose incomplete job as complete + +In case of real migration (not migrating to file on save, dump etc) +migration info is not complete at time qemu finishes migration +in normal (non postcopy) mode. We need to update disks stats, +downtime info etc. Thus let's not expose this job status as +completed. + +To archive this let's set status to 'qemu completed' after +qemu reports migration is finished. It is not visible as complete +job to clients. Cookie code on confirm phase will finally turn +job into completed. As we don't need more things to do when +migrating to file status is set to 'completed' as before +in this case. + +Signed-off-by: Jiri Denemark +(cherry picked from commit 3f2d6d829eb8de0348fcbd58d654b29d5c5bebc2) + +https://bugzilla.redhat.com/show_bug.cgi?id=1530130 + +Signed-off-by: Jiri Denemark +--- + src/qemu/qemu_domain.c | 1 + + src/qemu/qemu_domain.h | 1 + + src/qemu/qemu_driver.c | 1 + + src/qemu/qemu_migration.c | 13 +++++++++---- + 4 files changed, 12 insertions(+), 4 deletions(-) + +diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c +index e81d682c00..9a80e409b9 100644 +--- a/src/qemu/qemu_domain.c ++++ b/src/qemu/qemu_domain.c +@@ -398,6 +398,7 @@ qemuDomainJobStatusToType(qemuDomainJobStatus status) + + case QEMU_DOMAIN_JOB_STATUS_ACTIVE: + case QEMU_DOMAIN_JOB_STATUS_MIGRATING: ++ case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: + case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: + return VIR_DOMAIN_JOB_UNBOUNDED; + +diff --git a/src/qemu/qemu_domain.h b/src/qemu/qemu_domain.h +index 0cf54d99ae..cc458e1224 100644 +--- a/src/qemu/qemu_domain.h ++++ b/src/qemu/qemu_domain.h +@@ -103,6 +103,7 @@ typedef enum { + QEMU_DOMAIN_JOB_STATUS_NONE = 0, + QEMU_DOMAIN_JOB_STATUS_ACTIVE, + QEMU_DOMAIN_JOB_STATUS_MIGRATING, ++ QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED, + QEMU_DOMAIN_JOB_STATUS_POSTCOPY, + QEMU_DOMAIN_JOB_STATUS_COMPLETED, + QEMU_DOMAIN_JOB_STATUS_FAILED, +diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c +index 931f3d344d..d2dd704868 100644 +--- a/src/qemu/qemu_driver.c ++++ b/src/qemu/qemu_driver.c +@@ -12972,6 +12972,7 @@ qemuDomainGetJobStatsInternal(virQEMUDriverPtr driver, + + if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_ACTIVE || + jobInfo->status == QEMU_DOMAIN_JOB_STATUS_MIGRATING || ++ jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED || + jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { + if (fetch) + ret = qemuMigrationFetchJobStatus(driver, vm, QEMU_ASYNC_JOB_NONE, +diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c +index c8cd7ebfa3..aabd0913ad 100644 +--- a/src/qemu/qemu_migration.c ++++ b/src/qemu/qemu_migration.c +@@ -1338,7 +1338,7 @@ qemuMigrationUpdateJobType(qemuDomainJobInfoPtr jobInfo) + break; + + case QEMU_MONITOR_MIGRATION_STATUS_COMPLETED: +- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; ++ jobInfo->status = QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED; + break; + + case QEMU_MONITOR_MIGRATION_STATUS_INACTIVE: +@@ -1461,6 +1461,7 @@ qemuMigrationCheckJobStatus(virQEMUDriverPtr driver, + + case QEMU_DOMAIN_JOB_STATUS_ACTIVE: + case QEMU_DOMAIN_JOB_STATUS_MIGRATING: ++ case QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED: + case QEMU_DOMAIN_JOB_STATUS_POSTCOPY: + break; + } +@@ -1527,19 +1528,19 @@ qemuMigrationCompleted(virQEMUDriverPtr driver, + return 1; + } + +- if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_COMPLETED) ++ if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED) + return 1; + else + return 0; + + error: +- /* state can not be active at this point */ ++ /* state can not be active or completed at this point */ + if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_MIGRATING || + jobInfo->status == QEMU_DOMAIN_JOB_STATUS_POSTCOPY) { + /* The migration was aborted by us rather than QEMU itself. */ + jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + return -2; +- } else if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_COMPLETED) { ++ } else if (jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED) { + jobInfo->status = QEMU_DOMAIN_JOB_STATUS_FAILED; + return -1; + } else { +@@ -1592,6 +1593,10 @@ qemuMigrationWaitForCompletion(virQEMUDriverPtr driver, + if (VIR_ALLOC(priv->job.completed) == 0) + *priv->job.completed = *jobInfo; + ++ if (asyncJob != QEMU_ASYNC_JOB_MIGRATION_OUT && ++ jobInfo->status == QEMU_DOMAIN_JOB_STATUS_QEMU_COMPLETED) ++ jobInfo->status = QEMU_DOMAIN_JOB_STATUS_COMPLETED; ++ + return 0; + } + +-- +2.15.1 + diff --git a/SOURCES/libvirt-qemu-migration-fix-race-on-cancelling-drive-mirror.patch b/SOURCES/libvirt-qemu-migration-fix-race-on-cancelling-drive-mirror.patch new file mode 100644 index 0000000..c1c5b82 --- /dev/null +++ b/SOURCES/libvirt-qemu-migration-fix-race-on-cancelling-drive-mirror.patch @@ -0,0 +1,61 @@ +From 48fc7e172ed82fbe8b377c11cff2c9d47fbb37c8 Mon Sep 17 00:00:00 2001 +Message-Id: <48fc7e172ed82fbe8b377c11cff2c9d47fbb37c8@dist-git> +From: Nikolay Shirokovskiy +Date: Fri, 7 Apr 2017 14:06:25 +0300 +Subject: [PATCH] qemu: migration: fix race on cancelling drive mirror + +0feebab2 adds calling qemuBlockNodeNamesDetect for completed job +on updating block jobs. This affects cancelling drive mirror logic as +this function drops vm lock. Now we have to recheck all disks +before the disk with the completed block job before going +to wait for block job events. + +Signed-off-by: Jiri Denemark +(cherry picked from commit bc82d1eaf66bfdfde80a64e7feedb60c6d1d2505) + +https://bugzilla.redhat.com/show_bug.cgi?id=1530129 + +Signed-off-by: Jiri Denemark +--- + src/qemu/qemu_migration.c | 15 +++++++++++++++ + 1 file changed, 15 insertions(+) + +diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c +index d2b691bd2b..60722cbf6c 100644 +--- a/src/qemu/qemu_migration.c ++++ b/src/qemu/qemu_migration.c +@@ -654,9 +654,11 @@ qemuMigrationDriveMirrorCancelled(virQEMUDriverPtr driver, + { + size_t i; + size_t active = 0; ++ size_t completed = 0; + int status; + bool failed = false; + ++ retry: + for (i = 0; i < vm->def->ndisks; i++) { + virDomainDiskDefPtr disk = vm->def->disks[i]; + qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); +@@ -683,6 +685,19 @@ qemuMigrationDriveMirrorCancelled(virQEMUDriverPtr driver, + default: + active++; + } ++ ++ if (status == VIR_DOMAIN_BLOCK_JOB_COMPLETED) ++ completed++; ++ } ++ ++ /* Updating completed block job drops the lock thus we have to recheck ++ * block jobs for disks that reside before the disk(s) with completed ++ * block job. ++ */ ++ if (completed > 0) { ++ completed = 0; ++ active = 0; ++ goto retry; + } + + if (failed) { +-- +2.15.1 + diff --git a/SOURCES/libvirt-qemu-start-all-async-job-with-job-status-active.patch b/SOURCES/libvirt-qemu-start-all-async-job-with-job-status-active.patch new file mode 100644 index 0000000..5f7bbfc --- /dev/null +++ b/SOURCES/libvirt-qemu-start-all-async-job-with-job-status-active.patch @@ -0,0 +1,107 @@ +From 2726e0de1158ab7788f16cd91df53a48b882a716 Mon Sep 17 00:00:00 2001 +Message-Id: <2726e0de1158ab7788f16cd91df53a48b882a716@dist-git> +From: Nikolay Shirokovskiy +Date: Fri, 1 Sep 2017 09:49:27 +0300 +Subject: [PATCH] qemu: start all async job with job status active + +Setting status to none has little value - getting job status +will not return even elapsed time. + +After this patch getting job stats stays correct in a sence +it will not fetch migration stats because it consults +stats.status before doing the fetch. + +Signed-off-by: Jiri Denemark +(cherry picked from commit b6868c3cdd711e012c6cb1ec0e0a3cac4ea92a33) + +https://bugzilla.redhat.com/show_bug.cgi?id=1530130 + +Conflicts: + src/qemu/qemu_migration.c + - commit e87d4b9e2f is not backported + +Signed-off-by: Jiri Denemark +--- + src/qemu/qemu_domain.c | 1 + + src/qemu/qemu_driver.c | 2 -- + src/qemu/qemu_migration.c | 4 ---- + src/qemu/qemu_process.c | 4 ---- + 4 files changed, 1 insertion(+), 10 deletions(-) + +diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c +index 329ec6e2ce..4b1ead7d0a 100644 +--- a/src/qemu/qemu_domain.c ++++ b/src/qemu/qemu_domain.c +@@ -3811,6 +3811,7 @@ qemuDomainObjBeginJobInternal(virQEMUDriverPtr driver, + qemuDomainObjResetAsyncJob(priv); + if (VIR_ALLOC(priv->job.current) < 0) + goto cleanup; ++ priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + priv->job.asyncJob = asyncJob; + priv->job.asyncOwner = virThreadSelfID(); + priv->job.asyncOwnerAPI = virThreadJobGet(); +diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c +index ecba87a121..8972897a28 100644 +--- a/src/qemu/qemu_driver.c ++++ b/src/qemu/qemu_driver.c +@@ -3299,8 +3299,6 @@ qemuDomainSaveInternal(virQEMUDriverPtr driver, virDomainPtr dom, + goto endjob; + } + +- priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; +- + /* Pause */ + if (virDomainObjGetState(vm, NULL) == VIR_DOMAIN_RUNNING) { + was_running = true; +diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c +index 1726349f94..9f7aad6680 100644 +--- a/src/qemu/qemu_migration.c ++++ b/src/qemu/qemu_migration.c +@@ -1563,7 +1563,6 @@ qemuMigrationWaitForCompletion(virQEMUDriverPtr driver, + + flags |= QEMU_MIGRATION_COMPLETED_UPDATE_STATS; + +- jobInfo->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; + while ((rv = qemuMigrationCompleted(driver, vm, asyncJob, + dconn, flags)) != 1) { + if (rv < 0) +@@ -5624,7 +5623,6 @@ qemuMigrationJobStart(virQEMUDriverPtr driver, + virDomainObjPtr vm, + qemuDomainAsyncJob job) + { +- qemuDomainObjPrivatePtr priv = vm->privateData; + virDomainJobOperation op; + unsigned long long mask; + +@@ -5642,8 +5640,6 @@ qemuMigrationJobStart(virQEMUDriverPtr driver, + return -1; + + qemuDomainObjSetAsyncJobMask(vm, mask); +- priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; +- + return 0; + } + +diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c +index 25de367afc..1633652fdc 100644 +--- a/src/qemu/qemu_process.c ++++ b/src/qemu/qemu_process.c +@@ -3943,15 +3943,11 @@ qemuProcessBeginJob(virQEMUDriverPtr driver, + virDomainObjPtr vm, + virDomainJobOperation operation) + { +- qemuDomainObjPrivatePtr priv = vm->privateData; +- + if (qemuDomainObjBeginAsyncJob(driver, vm, QEMU_ASYNC_JOB_START, + operation) < 0) + return -1; + + qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE); +- priv->job.current->status = QEMU_DOMAIN_JOB_STATUS_ACTIVE; +- + return 0; + } + +-- +2.15.1 + diff --git a/SOURCES/libvirt-qemu-take-current-async-job-into-account-in-qemuBlockNodeNamesDetect.patch b/SOURCES/libvirt-qemu-take-current-async-job-into-account-in-qemuBlockNodeNamesDetect.patch new file mode 100644 index 0000000..f286c50 --- /dev/null +++ b/SOURCES/libvirt-qemu-take-current-async-job-into-account-in-qemuBlockNodeNamesDetect.patch @@ -0,0 +1,340 @@ +From b40b5b2f59acd26c7238d9bfc48364d06e44a97f Mon Sep 17 00:00:00 2001 +Message-Id: +From: Nikolay Shirokovskiy +Date: Fri, 7 Apr 2017 14:06:24 +0300 +Subject: [PATCH] qemu: take current async job into account in + qemuBlockNodeNamesDetect + +Becase it can be called during migration out (namely on cancelling +blockjobs). + +Signed-off-by: Jiri Denemark +(cherry picked from commit dd8e40790be7c88b476a93e2fa63fe086caa0cf8) + +https://bugzilla.redhat.com/show_bug.cgi?id=1530129 + +Conflicts: + src/qemu/qemu_process.c + - context + +Signed-off-by: Jiri Denemark +--- + src/qemu/qemu_block.c | 6 ++++-- + src/qemu/qemu_block.h | 4 +++- + src/qemu/qemu_blockjob.c | 9 ++++++--- + src/qemu/qemu_blockjob.h | 4 ++++ + src/qemu/qemu_driver.c | 11 ++++++----- + src/qemu/qemu_migration.c | 28 ++++++++++++++++------------ + src/qemu/qemu_process.c | 2 +- + 7 files changed, 40 insertions(+), 24 deletions(-) + +diff --git a/src/qemu/qemu_block.c b/src/qemu/qemu_block.c +index 586d56809d..29b5c4756e 100644 +--- a/src/qemu/qemu_block.c ++++ b/src/qemu/qemu_block.c +@@ -336,7 +336,8 @@ qemuBlockDiskDetectNodes(virDomainDiskDefPtr disk, + + int + qemuBlockNodeNamesDetect(virQEMUDriverPtr driver, +- virDomainObjPtr vm) ++ virDomainObjPtr vm, ++ qemuDomainAsyncJob asyncJob) + { + qemuDomainObjPrivatePtr priv = vm->privateData; + virHashTablePtr disktable = NULL; +@@ -350,7 +351,8 @@ qemuBlockNodeNamesDetect(virQEMUDriverPtr driver, + if (!virQEMUCapsGet(priv->qemuCaps, QEMU_CAPS_QUERY_NAMED_BLOCK_NODES)) + return 0; + +- qemuDomainObjEnterMonitor(driver, vm); ++ if (qemuDomainObjEnterMonitorAsync(driver, vm, asyncJob) < 0) ++ return -1; + + disktable = qemuMonitorGetBlockInfo(qemuDomainGetMonitor(vm)); + data = qemuMonitorQueryNamedBlockNodes(qemuDomainGetMonitor(vm)); +diff --git a/src/qemu/qemu_block.h b/src/qemu/qemu_block.h +index 9d6a246435..2af15a65a0 100644 +--- a/src/qemu/qemu_block.h ++++ b/src/qemu/qemu_block.h +@@ -22,6 +22,7 @@ + # include "internal.h" + + # include "qemu_conf.h" ++# include "qemu_domain.h" + + # include "virhash.h" + # include "virjson.h" +@@ -46,7 +47,8 @@ qemuBlockNodeNameGetBackingChain(virJSONValuePtr data); + + int + qemuBlockNodeNamesDetect(virQEMUDriverPtr driver, +- virDomainObjPtr vm); ++ virDomainObjPtr vm, ++ qemuDomainAsyncJob asyncJob); + + virHashTablePtr + qemuBlockGetNodeData(virJSONValuePtr data); +diff --git a/src/qemu/qemu_blockjob.c b/src/qemu/qemu_blockjob.c +index 0601e68da8..415768ddce 100644 +--- a/src/qemu/qemu_blockjob.c ++++ b/src/qemu/qemu_blockjob.c +@@ -55,13 +55,14 @@ VIR_LOG_INIT("qemu.qemu_blockjob"); + int + qemuBlockJobUpdate(virQEMUDriverPtr driver, + virDomainObjPtr vm, ++ qemuDomainAsyncJob asyncJob, + virDomainDiskDefPtr disk) + { + qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); + int status = diskPriv->blockJobStatus; + + if (status != -1) { +- qemuBlockJobEventProcess(driver, vm, disk, ++ qemuBlockJobEventProcess(driver, vm, disk, asyncJob, + diskPriv->blockJobType, + diskPriv->blockJobStatus); + diskPriv->blockJobStatus = -1; +@@ -87,6 +88,7 @@ void + qemuBlockJobEventProcess(virQEMUDriverPtr driver, + virDomainObjPtr vm, + virDomainDiskDefPtr disk, ++ qemuDomainAsyncJob asyncJob, + int type, + int status) + { +@@ -167,7 +169,7 @@ qemuBlockJobEventProcess(virQEMUDriverPtr driver, + disk->mirrorJob = VIR_DOMAIN_BLOCK_JOB_TYPE_UNKNOWN; + ignore_value(qemuDomainDetermineDiskChain(driver, vm, disk, + true, true)); +- ignore_value(qemuBlockNodeNamesDetect(driver, vm)); ++ ignore_value(qemuBlockNodeNamesDetect(driver, vm, asyncJob)); + diskPriv->blockjob = false; + break; + +@@ -247,9 +249,10 @@ qemuBlockJobSyncBegin(virDomainDiskDefPtr disk) + void + qemuBlockJobSyncEnd(virQEMUDriverPtr driver, + virDomainObjPtr vm, ++ qemuDomainAsyncJob asyncJob, + virDomainDiskDefPtr disk) + { + VIR_DEBUG("disk=%s", disk->dst); +- qemuBlockJobUpdate(driver, vm, disk); ++ qemuBlockJobUpdate(driver, vm, asyncJob, disk); + QEMU_DOMAIN_DISK_PRIVATE(disk)->blockJobSync = false; + } +diff --git a/src/qemu/qemu_blockjob.h b/src/qemu/qemu_blockjob.h +index 775ce95ec0..47aa4c1755 100644 +--- a/src/qemu/qemu_blockjob.h ++++ b/src/qemu/qemu_blockjob.h +@@ -24,19 +24,23 @@ + + # include "internal.h" + # include "qemu_conf.h" ++# include "qemu_domain.h" + + int qemuBlockJobUpdate(virQEMUDriverPtr driver, + virDomainObjPtr vm, ++ qemuDomainAsyncJob asyncJob, + virDomainDiskDefPtr disk); + void qemuBlockJobEventProcess(virQEMUDriverPtr driver, + virDomainObjPtr vm, + virDomainDiskDefPtr disk, ++ qemuDomainAsyncJob asyncJob, + int type, + int status); + + void qemuBlockJobSyncBegin(virDomainDiskDefPtr disk); + void qemuBlockJobSyncEnd(virQEMUDriverPtr driver, + virDomainObjPtr vm, ++ qemuDomainAsyncJob asyncJob, + virDomainDiskDefPtr disk); + + #endif /* __QEMU_BLOCKJOB_H__ */ +diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c +index 0943d222b4..501f19fbcc 100644 +--- a/src/qemu/qemu_driver.c ++++ b/src/qemu/qemu_driver.c +@@ -4676,7 +4676,7 @@ processBlockJobEvent(virQEMUDriverPtr driver, + } + + if ((disk = qemuProcessFindDomainDiskByAlias(vm, diskAlias))) +- qemuBlockJobEventProcess(driver, vm, disk, type, status); ++ qemuBlockJobEventProcess(driver, vm, disk, QEMU_ASYNC_JOB_NONE, type, status); + + endjob: + qemuDomainObjEndJob(driver, vm); +@@ -16499,24 +16499,25 @@ qemuDomainBlockJobAbort(virDomainPtr dom, + * event to pull and let qemuBlockJobEventProcess() handle + * the rest as usual */ + qemuBlockJobEventProcess(driver, vm, disk, ++ QEMU_ASYNC_JOB_NONE, + VIR_DOMAIN_BLOCK_JOB_TYPE_PULL, + VIR_DOMAIN_BLOCK_JOB_CANCELED); + } else { + qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); +- qemuBlockJobUpdate(driver, vm, disk); ++ qemuBlockJobUpdate(driver, vm, QEMU_ASYNC_JOB_NONE, disk); + while (diskPriv->blockjob) { + if (virDomainObjWait(vm) < 0) { + ret = -1; + goto endjob; + } +- qemuBlockJobUpdate(driver, vm, disk); ++ qemuBlockJobUpdate(driver, vm, QEMU_ASYNC_JOB_NONE, disk); + } + } + } + + endjob: + if (disk) +- qemuBlockJobSyncEnd(driver, vm, disk); ++ qemuBlockJobSyncEnd(driver, vm, QEMU_ASYNC_JOB_NONE, disk); + qemuDomainObjEndJob(driver, vm); + + cleanup: +@@ -20675,7 +20676,7 @@ qemuDomainSetBlockThreshold(virDomainPtr dom, + goto endjob; + + if (!src->nodebacking && +- qemuBlockNodeNamesDetect(driver, vm) < 0) ++ qemuBlockNodeNamesDetect(driver, vm, QEMU_ASYNC_JOB_NONE) < 0) + goto endjob; + + if (!src->nodebacking) { +diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c +index 5eed933a3c..d2b691bd2b 100644 +--- a/src/qemu/qemu_migration.c ++++ b/src/qemu/qemu_migration.c +@@ -600,7 +600,8 @@ qemuMigrationStopNBDServer(virQEMUDriverPtr driver, + */ + static int + qemuMigrationDriveMirrorReady(virQEMUDriverPtr driver, +- virDomainObjPtr vm) ++ virDomainObjPtr vm, ++ qemuDomainAsyncJob asyncJob) + { + size_t i; + size_t notReady = 0; +@@ -613,7 +614,7 @@ qemuMigrationDriveMirrorReady(virQEMUDriverPtr driver, + if (!diskPriv->migrating) + continue; + +- status = qemuBlockJobUpdate(driver, vm, disk); ++ status = qemuBlockJobUpdate(driver, vm, asyncJob, disk); + if (status == VIR_DOMAIN_BLOCK_JOB_FAILED) { + virReportError(VIR_ERR_OPERATION_FAILED, + _("migration of disk %s failed"), +@@ -648,6 +649,7 @@ qemuMigrationDriveMirrorReady(virQEMUDriverPtr driver, + static int + qemuMigrationDriveMirrorCancelled(virQEMUDriverPtr driver, + virDomainObjPtr vm, ++ qemuDomainAsyncJob asyncJob, + bool check) + { + size_t i; +@@ -662,7 +664,7 @@ qemuMigrationDriveMirrorCancelled(virQEMUDriverPtr driver, + if (!diskPriv->migrating) + continue; + +- status = qemuBlockJobUpdate(driver, vm, disk); ++ status = qemuBlockJobUpdate(driver, vm, asyncJob, disk); + switch (status) { + case VIR_DOMAIN_BLOCK_JOB_FAILED: + if (check) { +@@ -674,7 +676,7 @@ qemuMigrationDriveMirrorCancelled(virQEMUDriverPtr driver, + /* fallthrough */ + case VIR_DOMAIN_BLOCK_JOB_CANCELED: + case VIR_DOMAIN_BLOCK_JOB_COMPLETED: +- qemuBlockJobSyncEnd(driver, vm, disk); ++ qemuBlockJobSyncEnd(driver, vm, asyncJob, disk); + diskPriv->migrating = false; + break; + +@@ -722,7 +724,7 @@ qemuMigrationCancelOneDriveMirror(virQEMUDriverPtr driver, + int status; + int rv; + +- status = qemuBlockJobUpdate(driver, vm, disk); ++ status = qemuBlockJobUpdate(driver, vm, asyncJob, disk); + switch (status) { + case VIR_DOMAIN_BLOCK_JOB_FAILED: + case VIR_DOMAIN_BLOCK_JOB_CANCELED: +@@ -799,12 +801,13 @@ qemuMigrationCancelDriveMirror(virQEMUDriverPtr driver, + err = virSaveLastError(); + failed = true; + } +- qemuBlockJobSyncEnd(driver, vm, disk); ++ qemuBlockJobSyncEnd(driver, vm, asyncJob, disk); + diskPriv->migrating = false; + } + } + +- while ((rv = qemuMigrationDriveMirrorCancelled(driver, vm, check)) != 1) { ++ while ((rv = qemuMigrationDriveMirrorCancelled(driver, vm, asyncJob, ++ check)) != 1) { + if (check && !failed && + dconn && virConnectIsAlive(dconn) <= 0) { + virReportError(VIR_ERR_OPERATION_FAILED, "%s", +@@ -930,7 +933,7 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver, + VIR_FREE(nbd_dest); + + if (qemuDomainObjExitMonitor(driver, vm) < 0 || mon_ret < 0) { +- qemuBlockJobSyncEnd(driver, vm, disk); ++ qemuBlockJobSyncEnd(driver, vm, QEMU_ASYNC_JOB_MIGRATION_OUT, disk); + goto cleanup; + } + diskPriv->migrating = true; +@@ -941,7 +944,8 @@ qemuMigrationDriveMirror(virQEMUDriverPtr driver, + } + } + +- while ((rv = qemuMigrationDriveMirrorReady(driver, vm)) != 1) { ++ while ((rv = qemuMigrationDriveMirrorReady(driver, vm, ++ QEMU_ASYNC_JOB_MIGRATION_OUT)) != 1) { + if (rv < 0) + goto cleanup; + +@@ -1475,7 +1479,7 @@ qemuMigrationCompleted(virQEMUDriverPtr driver, + goto error; + + if (flags & QEMU_MIGRATION_COMPLETED_CHECK_STORAGE && +- qemuMigrationDriveMirrorReady(driver, vm) < 0) ++ qemuMigrationDriveMirrorReady(driver, vm, asyncJob) < 0) + goto error; + + if (flags & QEMU_MIGRATION_COMPLETED_ABORT_ON_ERROR && +@@ -5567,7 +5571,7 @@ qemuMigrationCancel(virQEMUDriverPtr driver, + VIR_DEBUG("Drive mirror on disk %s is still running", disk->dst); + } else { + VIR_DEBUG("Drive mirror on disk %s is gone", disk->dst); +- qemuBlockJobSyncEnd(driver, vm, disk); ++ qemuBlockJobSyncEnd(driver, vm, QEMU_ASYNC_JOB_NONE, disk); + diskPriv->migrating = false; + } + } +@@ -5589,7 +5593,7 @@ qemuMigrationCancel(virQEMUDriverPtr driver, + qemuDomainDiskPrivatePtr diskPriv = QEMU_DOMAIN_DISK_PRIVATE(disk); + + if (diskPriv->migrating) { +- qemuBlockJobSyncEnd(driver, vm, disk); ++ qemuBlockJobSyncEnd(driver, vm, QEMU_ASYNC_JOB_NONE, disk); + diskPriv->migrating = false; + } + } +diff --git a/src/qemu/qemu_process.c b/src/qemu/qemu_process.c +index 5802a553cf..6ce33c0134 100644 +--- a/src/qemu/qemu_process.c ++++ b/src/qemu/qemu_process.c +@@ -6899,7 +6899,7 @@ qemuProcessReconnect(void *opaque) + if (qemuProcessRefreshDisks(driver, obj, QEMU_ASYNC_JOB_NONE) < 0) + goto error; + +- if (qemuBlockNodeNamesDetect(driver, obj) < 0) ++ if (qemuBlockNodeNamesDetect(driver, obj, QEMU_ASYNC_JOB_NONE) < 0) + goto error; + + if (qemuRefreshVirtioChannelState(driver, obj, QEMU_ASYNC_JOB_NONE) < 0) +-- +2.15.1 + diff --git a/SOURCES/libvirt-qemuDomainAttachDeviceMknodHelper-Remove-symlink-before-creating-it.patch b/SOURCES/libvirt-qemuDomainAttachDeviceMknodHelper-Remove-symlink-before-creating-it.patch new file mode 100644 index 0000000..2c3815a --- /dev/null +++ b/SOURCES/libvirt-qemuDomainAttachDeviceMknodHelper-Remove-symlink-before-creating-it.patch @@ -0,0 +1,68 @@ +From f30ba86ea92bb2a77b9620cea318bc3ebe0bf2a5 Mon Sep 17 00:00:00 2001 +Message-Id: +From: Michal Privoznik +Date: Tue, 9 Jan 2018 09:34:24 +0100 +Subject: [PATCH] qemuDomainAttachDeviceMknodHelper: Remove symlink before + creating it + +RHEL-7.5: https://bugzilla.redhat.com/show_bug.cgi?id=1528502 +RHEL-7.4.z: https://bugzilla.redhat.com/show_bug.cgi?id=1532183 + +So imagine you have /dev/blah symlink which points to /dev/sda. +You attach /dev/blah as disk to your domain. Libvirt correctly +creates the /dev/blah -> /dev/sda symlink in the qemu namespace. +However, then you detach the disk, change the symlink so that it +points to /dev/sdb and tries to attach the disk again. This time, +however, the attach fails (well, qemu attaches wrong disk) +because the code assumes that symlinks don't change. Well they +do. + +This is inspired by test fix written by Eduardo Habkost. + +Signed-off-by: Michal Privoznik +Reviewed-by: Andrea Bolognani +(cherry picked from commit db98e7f67ea0d7699410f514f01947cef5128a6c) +Signed-off-by: Michal Privoznik +Signed-off-by: Jiri Denemark +Reviewed-by: Andrea Bolognani +--- + src/qemu/qemu_domain.c | 22 ++++++++++++++++------ + 1 file changed, 16 insertions(+), 6 deletions(-) + +diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c +index dd70bd6367..ade06f07db 100644 +--- a/src/qemu/qemu_domain.c ++++ b/src/qemu/qemu_domain.c +@@ -8470,13 +8470,23 @@ qemuDomainAttachDeviceMknodHelper(pid_t pid ATTRIBUTE_UNUSED, + + if (isLink) { + VIR_DEBUG("Creating symlink %s -> %s", data->file, data->target); ++ ++ /* First, unlink the symlink target. Symlinks change and ++ * therefore we have no guarantees that pre-existing ++ * symlink is still valid. */ ++ if (unlink(data->file) < 0 && ++ errno != ENOENT) { ++ virReportSystemError(errno, ++ _("Unable to remove symlink %s"), ++ data->file); ++ goto cleanup; ++ } ++ + if (symlink(data->target, data->file) < 0) { +- if (errno != EEXIST) { +- virReportSystemError(errno, +- _("Unable to create symlink %s"), +- data->target); +- goto cleanup; +- } ++ virReportSystemError(errno, ++ _("Unable to create symlink %s (pointing to %s)"), ++ data->file, data->target); ++ goto cleanup; + } else { + delDevice = true; + } +-- +2.15.1 + diff --git a/SPECS/libvirt.spec b/SPECS/libvirt.spec index 128b841..56ce958 100644 --- a/SPECS/libvirt.spec +++ b/SPECS/libvirt.spec @@ -228,7 +228,7 @@ Summary: Library providing a simple virtualization API Name: libvirt Version: 3.2.0 -Release: 14%{?dist}.7%{?extra_release} +Release: 14%{?dist}.9%{?extra_release} License: LGPLv2+ Group: Development/Libraries BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-root @@ -498,6 +498,19 @@ Patch255: libvirt-conf-include-x86-microcode-version-in-virsh-capabiltiies.patch Patch256: libvirt-qemu-capabilities-force-update-if-the-microcode-version-does-not-match.patch Patch257: libvirt-cpu-add-CPU-features-and-model-for-indirect-branch-prediction-protection.patch Patch258: libvirt-qemu-Properly-store-microcode-version-in-QEMU-caps-cache.patch +Patch259: libvirt-qemuDomainAttachDeviceMknodHelper-Remove-symlink-before-creating-it.patch +Patch260: libvirt-cpu_x86-Copy-CPU-signature-from-ancestor.patch +Patch261: libvirt-qemu-take-current-async-job-into-account-in-qemuBlockNodeNamesDetect.patch +Patch262: libvirt-qemu-migration-fix-race-on-cancelling-drive-mirror.patch +Patch263: libvirt-qemu-drop-code-for-VIR_DOMAIN_JOB_BOUNDED-and-timeRemaining.patch +Patch264: libvirt-qemu-introduce-qemu-domain-job-status.patch +Patch265: libvirt-qemu-introduce-QEMU_DOMAIN_JOB_STATUS_POSTCOPY.patch +Patch266: libvirt-qemu-start-all-async-job-with-job-status-active.patch +Patch267: libvirt-qemu-introduce-migrating-job-status.patch +Patch268: libvirt-qemu-migration-don-t-expose-incomplete-job-as-complete.patch +Patch269: libvirt-qemu-Fix-type-of-a-completed-job.patch +Patch270: libvirt-qemu-Fix-crash-in-offline-migration.patch +Patch271: libvirt-RHEL-qemu-Report-full-stats-for-completed-migration.patch Requires: libvirt-daemon = %{version}-%{release} Requires: libvirt-daemon-config-network = %{version}-%{release} @@ -2346,6 +2359,23 @@ exit 0 %changelog +* Fri Jan 19 2018 Jiri Denemark - 3.2.0-14.el7_4.9 +- qemu: Fix crash in offline migration (rhbz#1530130) +- RHEL: qemu: Report full stats for completed migration (rhbz#1530130) + +* Tue Jan 16 2018 Jiri Denemark - 3.2.0-14.el7_4.8 +- qemuDomainAttachDeviceMknodHelper: Remove symlink before creating it (rhbz#1532183) +- cpu_x86: Copy CPU signature from ancestor (rhbz#1533418) +- qemu: take current async job into account in qemuBlockNodeNamesDetect (rhbz#1530129) +- qemu: migration: fix race on cancelling drive mirror (rhbz#1530129) +- qemu: drop code for VIR_DOMAIN_JOB_BOUNDED and timeRemaining (rhbz#1530130) +- qemu: introduce qemu domain job status (rhbz#1530130) +- qemu: introduce QEMU_DOMAIN_JOB_STATUS_POSTCOPY (rhbz#1530130) +- qemu: start all async job with job status active (rhbz#1530130) +- qemu: introduce migrating job status (rhbz#1530130) +- qemu: migration: don't expose incomplete job as complete (rhbz#1530130) +- qemu: Fix type of a completed job (rhbz#1530130) + * Tue Dec 19 2017 Jiri Denemark - 3.2.0-14.el7_4.7 - qemu: Properly store microcode version in QEMU caps cache (CVE-2017-5715)