Google
  Web www.spinics.net

[PATCH 09/19] qemu: Implement migration job phases

[Date Prev][Date Next][Thread Prev][Thread Next][Date Index][Thread Index]


This patch introduces several helper methods to deal with jobs and
phases during migration in a simpler manner.
---
 src/qemu/MIGRATION.txt    |   55 +++++++++++++++++++++++++++
 src/qemu/qemu_domain.c    |    5 ++
 src/qemu/qemu_migration.c |   91 +++++++++++++++++++++++++++++++++++++++++++++
 src/qemu/qemu_migration.h |   36 ++++++++++++++++++
 4 files changed, 187 insertions(+), 0 deletions(-)
 create mode 100644 src/qemu/MIGRATION.txt

diff --git a/src/qemu/MIGRATION.txt b/src/qemu/MIGRATION.txt
new file mode 100644
index 0000000..6c32998
--- /dev/null
+++ b/src/qemu/MIGRATION.txt
@@ -0,0 +1,55 @@
+    QEMU Migration Locking Rules
+    ============================
+
+Migration is a complicated beast which may span across several APIs on both
+source and destination side and we need to keep the domain we are migrating in
+a consistent state during the whole process.
+
+To avoid anyone from changing the domain in the middle of migration we need to
+keep MIGRATION_OUT job active during migration from Begin to Confirm on the
+source side and MIGRATION_IN job has to be active from Prepare to Finish on
+the destination side.
+
+For this purpose we introduce several helper methods to deal with locking
+primitives (described in THREADS.txt) in the right way:
+
+* qemuMigrationJobStart
+
+* qemuMigrationJobContinue
+
+* qemuMigrationJobStartPhase
+
+* qemuMigrationJobSetPhase
+
+* qemuMigrationJobFinish
+
+The sequence of calling qemuMigrationJob* helper methods is as follows:
+
+- The first API of a migration protocol (Prepare or Perform/Begin depending on
+  migration type and version) has to start migration job and keep it active:
+
+      qemuMigrationJobStart(driver, vm, QEMU_JOB_MIGRATION_{IN,OUT});
+      qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_*);
+      ...do work...
+      qemuMigrationJobContinue(vm);
+
+- All consequent phases except for the last one have to keep the job active:
+
+      if (!qemuMigrationJobIsActive(vm, QEMU_JOB_MIGRATION_{IN,OUT}))
+          return;
+      qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_*);
+      ...do work...
+      qemuMigrationJobContinue(vm);
+
+- The last migration phase finally finishes the migration job:
+
+      if (!qemuMigrationJobIsActive(vm, QEMU_JOB_MIGRATION_{IN,OUT}))
+          return;
+      qemuMigrationJobStartPhase(driver, vm, QEMU_MIGRATION_PHASE_*);
+      ...do work...
+      qemuMigrationJobFinish(driver, vm);
+
+While migration job is running (i.e., after qemuMigrationJobStart* but before
+qemuMigrationJob{Continue,Finish}), migration phase can be advanced using
+
+      qemuMigrationJobSetPhase(driver, vm, QEMU_MIGRATION_PHASE_*);
diff --git a/src/qemu/qemu_domain.c b/src/qemu/qemu_domain.c
index d0dd764..39cbf0e 100644
--- a/src/qemu/qemu_domain.c
+++ b/src/qemu/qemu_domain.c
@@ -26,6 +26,7 @@
 #include "qemu_domain.h"
 #include "qemu_command.h"
 #include "qemu_capabilities.h"
+#include "qemu_migration.h"
 #include "memory.h"
 #include "logging.h"
 #include "virterror_internal.h"
@@ -72,6 +73,8 @@ qemuDomainAsyncJobPhaseToString(enum qemuDomainAsyncJob job,
     switch (job) {
     case QEMU_ASYNC_JOB_MIGRATION_OUT:
     case QEMU_ASYNC_JOB_MIGRATION_IN:
+        return qemuMigrationJobPhaseTypeToString(phase);
+
     case QEMU_ASYNC_JOB_SAVE:
     case QEMU_ASYNC_JOB_DUMP:
     case QEMU_ASYNC_JOB_NONE:
@@ -92,6 +95,8 @@ qemuDomainAsyncJobPhaseFromString(enum qemuDomainAsyncJob job,
     switch (job) {
     case QEMU_ASYNC_JOB_MIGRATION_OUT:
     case QEMU_ASYNC_JOB_MIGRATION_IN:
+        return qemuMigrationJobPhaseTypeFromString(phase);
+
     case QEMU_ASYNC_JOB_SAVE:
     case QEMU_ASYNC_JOB_DUMP:
     case QEMU_ASYNC_JOB_NONE:
diff --git a/src/qemu/qemu_migration.c b/src/qemu/qemu_migration.c
index e595596..33aa89b 100644
--- a/src/qemu/qemu_migration.c
+++ b/src/qemu/qemu_migration.c
@@ -46,6 +46,19 @@
 
 #define VIR_FROM_THIS VIR_FROM_QEMU
 
+VIR_ENUM_IMPL(qemuMigrationJobPhase, QEMU_MIGRATION_PHASE_LAST,
+              "none",
+              "preform2",
+              "begin3",
+              "perform3",
+              "perform3_done",
+              "confirm3_cancelled",
+              "confirm3",
+              "prepare",
+              "finish2",
+              "finish3",
+);
+
 enum qemuMigrationCookieFlags {
     QEMU_MIGRATION_COOKIE_FLAG_GRAPHICS,
     QEMU_MIGRATION_COOKIE_FLAG_LOCKSTATE,
@@ -2749,3 +2762,81 @@ cleanup:
     }
     return ret;
 }
+
+int
+qemuMigrationJobStart(struct qemud_driver *driver,
+                      virDomainObjPtr vm,
+                      enum qemuDomainAsyncJob job)
+{
+    qemuDomainObjPrivatePtr priv = vm->privateData;
+
+    if (qemuDomainObjBeginAsyncJobWithDriver(driver, vm, job) < 0)
+        return -1;
+
+    if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
+        qemuDomainObjSetAsyncJobMask(vm, QEMU_JOB_NONE);
+    else
+        qemuDomainObjSetAsyncJobMask(vm, DEFAULT_JOB_MASK);
+
+    priv->job.info.type = VIR_DOMAIN_JOB_UNBOUNDED;
+
+    return 0;
+}
+
+void
+qemuMigrationJobSetPhase(struct qemud_driver *driver,
+                         virDomainObjPtr vm,
+                         enum qemuMigrationJobPhase phase)
+{
+    qemuDomainObjPrivatePtr priv = vm->privateData;
+
+    if (phase < priv->job.phase) {
+        VIR_ERROR(_("migration protocol going backwards %s => %s"),
+                  qemuMigrationJobPhaseTypeToString(priv->job.phase),
+                  qemuMigrationJobPhaseTypeToString(phase));
+        return;
+    }
+
+    qemuDomainObjSetJobPhase(driver, vm, phase);
+}
+
+void
+qemuMigrationJobStartPhase(struct qemud_driver *driver,
+                           virDomainObjPtr vm,
+                           enum qemuMigrationJobPhase phase)
+{
+    virDomainObjRef(vm);
+    qemuMigrationJobSetPhase(driver, vm, phase);
+}
+
+int
+qemuMigrationJobContinue(virDomainObjPtr vm)
+{
+    return virDomainObjUnref(vm);
+}
+
+bool
+qemuMigrationJobIsActive(virDomainObjPtr vm,
+                         enum qemuDomainAsyncJob job)
+{
+    qemuDomainObjPrivatePtr priv = vm->privateData;
+
+    if (priv->job.asyncJob != job) {
+        const char *msg;
+
+        if (job == QEMU_ASYNC_JOB_MIGRATION_IN)
+            msg = _("domain '%s' is not processing incoming migration");
+        else
+            msg = _("domain '%s' is not being migrated");
+
+        qemuReportError(VIR_ERR_OPERATION_INVALID, msg, vm->def->name);
+        return false;
+    }
+    return true;
+}
+
+int
+qemuMigrationJobFinish(struct qemud_driver *driver, virDomainObjPtr vm)
+{
+    return qemuDomainObjEndAsyncJob(driver, vm);
+}
diff --git a/src/qemu/qemu_migration.h b/src/qemu/qemu_migration.h
index d3a3743..4342173 100644
--- a/src/qemu/qemu_migration.h
+++ b/src/qemu/qemu_migration.h
@@ -23,7 +23,43 @@
 # define __QEMU_MIGRATION_H__
 
 # include "qemu_conf.h"
+# include "qemu_domain.h"
 
+enum qemuMigrationJobPhase {
+    QEMU_MIGRATION_PHASE_NONE = 0,
+    QEMU_MIGRATION_PHASE_PERFORM2,
+    QEMU_MIGRATION_PHASE_BEGIN3,
+    QEMU_MIGRATION_PHASE_PERFORM3,
+    QEMU_MIGRATION_PHASE_PERFORM3_DONE,
+    QEMU_MIGRATION_PHASE_CONFIRM3_CANCELLED,
+    QEMU_MIGRATION_PHASE_CONFIRM3,
+    QEMU_MIGRATION_PHASE_PREPARE,
+    QEMU_MIGRATION_PHASE_FINISH2,
+    QEMU_MIGRATION_PHASE_FINISH3,
+
+    QEMU_MIGRATION_PHASE_LAST
+};
+VIR_ENUM_DECL(qemuMigrationJobPhase)
+
+int qemuMigrationJobStart(struct qemud_driver *driver,
+                          virDomainObjPtr vm,
+                          enum qemuDomainAsyncJob job)
+    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_RETURN_CHECK;
+void qemuMigrationJobSetPhase(struct qemud_driver *driver,
+                              virDomainObjPtr vm,
+                              enum qemuMigrationJobPhase phase)
+    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
+void qemuMigrationJobStartPhase(struct qemud_driver *driver,
+                                virDomainObjPtr vm,
+                                enum qemuMigrationJobPhase phase)
+    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2);
+int qemuMigrationJobContinue(virDomainObjPtr obj)
+    ATTRIBUTE_NONNULL(1) ATTRIBUTE_RETURN_CHECK;
+bool qemuMigrationJobIsActive(virDomainObjPtr vm,
+                              enum qemuDomainAsyncJob job)
+    ATTRIBUTE_NONNULL(1);
+int qemuMigrationJobFinish(struct qemud_driver *driver, virDomainObjPtr obj)
+    ATTRIBUTE_NONNULL(1) ATTRIBUTE_NONNULL(2) ATTRIBUTE_RETURN_CHECK;
 
 bool qemuMigrationIsAllowed(virDomainDefPtr def)
     ATTRIBUTE_NONNULL(1);
-- 
1.7.6

--
libvir-list mailing list
libvir-list@xxxxxxxxxx
https://www.redhat.com/mailman/listinfo/libvir-list


[Virt Tools]     [Libvirt Users]     [Fedora Users]     [Fedora Legacy]     [Fedora Maintainers]     [Fedora Desktop]     [Fedora SELinux]     [Big List of Linux Books]     [Yosemite News]     [Yosemite Photos]     [KDE Users]     [Fedora Tools]

Powered by Linux

Google
  Web www.spinics.net