mrc0mmand / rpms / libguestfs

Forked from rpms/libguestfs 3 years ago
Clone

Blame SOURCES/0034-v2v-o-rhv-upload-improve-lookup-of-specified-resourc.patch

498672
From 07348a7d9a7533b11513706a91d8eb8b91ce8518 Mon Sep 17 00:00:00 2001
10436e
From: Pino Toscano <ptoscano@redhat.com>
10436e
Date: Thu, 12 Sep 2019 15:21:26 +0200
10436e
Subject: [PATCH] v2v: -o rhv-upload: improve lookup of specified resources
10436e
 (RHBZ#1612653)
10436e
10436e
Improve the way the precheck script checks for the specified resources:
10436e
- look directly for a data center with the specified storage domain
10436e
- get the storage domain object from the storage domains attached to the
10436e
  data center found
10436e
- similarly, look for the specified cluster among the ones attached to
10436e
  the data center found
10436e
When everything is found, return the UUID of the storage domain, and of
10436e
the cluster back to virt-v2v, which will store them.
10436e
10436e
Similarly, rework the createvm script to directly get the requested
10436e
cluster, instead of looking for it once again.  Also, since the UUID of
10436e
the storage domain is available in virt-v2v already, use it directly
10436e
instead of using a placeholder.
10436e
10436e
This should fix a number of issues:
10436e
- unexisting/unattached storage domains are rejected outright
10436e
- the cluster is rejected if not part of the same data center of the
10436e
  selected storage domain
10436e
- renaming the specified storage domain during the data copying will not
10436e
  cause the conversion to fail (which will still use the specified
10436e
  storage domain, no matter the new name)
10436e
10436e
Based on the hints by Daniel Erez in RHBZ#1612653.
10436e
10436e
(cherry picked from commit c49aa4fe01aac82d4776dd2a3524ce16e6deed06)
10436e
---
10436e
 v2v/output_rhv_upload.ml   | 24 +++++++++++++++++++-----
10436e
 v2v/rhv-upload-createvm.py | 11 ++++-------
10436e
 v2v/rhv-upload-precheck.py | 30 ++++++++++++++++++++++++------
10436e
 3 files changed, 47 insertions(+), 18 deletions(-)
10436e
10436e
diff --git a/v2v/output_rhv_upload.ml b/v2v/output_rhv_upload.ml
10436e
index fd6f2e3e6..19bdfcf05 100644
10436e
--- a/v2v/output_rhv_upload.ml
10436e
+++ b/v2v/output_rhv_upload.ml
10436e
@@ -227,6 +227,11 @@ See also the virt-v2v-output-rhv(1) manual.")
10436e
 object
10436e
   inherit output
10436e
 
10436e
+  (* The storage domain UUID. *)
10436e
+  val mutable rhv_storagedomain_uuid = None
10436e
+  (* The cluster UUID. *)
10436e
+  val mutable rhv_cluster_uuid = None
10436e
+
10436e
   method precheck () =
10436e
     Python_script.error_unless_python_interpreter_found ();
10436e
     error_unless_ovirtsdk4_module_available ();
10436e
@@ -242,6 +247,10 @@ object
10436e
     let json = JSON_parser.json_parser_tree_parse_file precheck_fn in
10436e
     debug "precheck output parsed as: %s"
10436e
           (JSON.string_of_doc ~fmt:JSON.Indented ["", json]);
10436e
+    rhv_storagedomain_uuid <-
10436e
+       Some (JSON_parser.object_get_string "rhv_storagedomain_uuid" json);
10436e
+    rhv_cluster_uuid <-
10436e
+       Some (JSON_parser.object_get_string "rhv_cluster_uuid" json);
10436e
     if have_selinux then
10436e
       error_unless_nbdkit_compiled_with_selinux ()
10436e
 
10436e
@@ -388,11 +397,11 @@ If the messages above are not sufficient to diagnose the problem then add the 
10436e
           diskid
10436e
       ) targets in
10436e
 
10436e
-    (* We don't have the storage domain UUID, but instead we write
10436e
-     * in a magic value which the Python code (which can get it)
10436e
-     * will substitute.
10436e
-     *)
10436e
-    let sd_uuid = "@SD_UUID@" in
10436e
+    (* The storage domain UUID. *)
10436e
+    let sd_uuid =
10436e
+      match rhv_storagedomain_uuid with
10436e
+      | None -> assert false
10436e
+      | Some uuid -> uuid in
10436e
 
10436e
     (* The volume and VM UUIDs are made up. *)
10436e
     let vol_uuids = List.map (fun _ -> uuidgen ()) targets
10436e
@@ -406,6 +415,11 @@ If the messages above are not sufficient to diagnose the problem then add the 
10436e
                             OVirt in
10436e
     let ovf = DOM.doc_to_string ovf in
10436e
 
10436e
+    let json_params =
10436e
+      match rhv_cluster_uuid with
10436e
+      | None -> assert false
10436e
+      | Some uuid -> ("rhv_cluster_uuid", JSON.String uuid) :: json_params in
10436e
+
10436e
     let ovf_file = tmpdir // "vm.ovf" in
10436e
     with_open_out ovf_file (fun chan -> output_string chan ovf);
10436e
     if Python_script.run_command createvm_script json_params [ovf_file] <> 0
10436e
diff --git a/v2v/rhv-upload-createvm.py b/v2v/rhv-upload-createvm.py
10436e
index 1d0e8c95d..ed57a9b20 100644
10436e
--- a/v2v/rhv-upload-createvm.py
10436e
+++ b/v2v/rhv-upload-createvm.py
10436e
@@ -65,17 +65,14 @@ connection = sdk.Connection(
10436e
 
10436e
 system_service = connection.system_service()
10436e
 
10436e
-# Get the storage domain UUID and substitute it into the OVF doc.
10436e
-sds_service = system_service.storage_domains_service()
10436e
-sd = sds_service.list(search=("name=%s" % params['output_storage']))[0]
10436e
-sd_uuid = sd.id
10436e
-
10436e
-ovf = ovf.replace("@SD_UUID@", sd_uuid)
10436e
+# Get the cluster.
10436e
+cluster = system_service.clusters_service().cluster_service(params['rhv_cluster_uuid'])
10436e
+cluster = cluster.get()
10436e
 
10436e
 vms_service = system_service.vms_service()
10436e
 vm = vms_service.add(
10436e
     types.Vm(
10436e
-        cluster=types.Cluster(name = params['rhv_cluster']),
10436e
+        cluster=cluster,
10436e
         initialization=types.Initialization(
10436e
             configuration = types.Configuration(
10436e
                 type = types.ConfigurationType.OVA,
10436e
diff --git a/v2v/rhv-upload-precheck.py b/v2v/rhv-upload-precheck.py
10436e
index de8a66c05..725a8dc9e 100644
10436e
--- a/v2v/rhv-upload-precheck.py
10436e
+++ b/v2v/rhv-upload-precheck.py
10436e
@@ -60,18 +60,36 @@ connection = sdk.Connection(
10436e
 
10436e
 system_service = connection.system_service()
10436e
 
10436e
-# Check whether the specified cluster exists.
10436e
-clusters_service = system_service.clusters_service()
10436e
-clusters = clusters_service.list(
10436e
-    search='name=%s' % params['rhv_cluster'],
10436e
+# Check whether there is a datacenter for the specified storage.
10436e
+data_centers = system_service.data_centers_service().list(
10436e
+    search='storage.name=%s' % params['output_storage'],
10436e
     case_sensitive=True,
10436e
 )
10436e
+if len(data_centers) == 0:
10436e
+    # The storage domain is not attached to a datacenter
10436e
+    # (shouldn't happen, would fail on disk creation).
10436e
+    raise RuntimeError("The storage domain ‘%s’ is not attached to a DC" %
10436e
+                       (params['output_storage']))
10436e
+datacenter = data_centers[0]
10436e
+
10436e
+# Get the storage domain.
10436e
+storage_domains = connection.follow_link(datacenter.storage_domains)
10436e
+storage_domain = [sd for sd in storage_domains if sd.name == params['output_storage']][0]
10436e
+
10436e
+# Get the cluster.
10436e
+clusters = connection.follow_link(datacenter.clusters)
10436e
+clusters = [cluster for cluster in clusters if cluster.name == params['rhv_cluster']]
10436e
 if len(clusters) == 0:
10436e
-    raise RuntimeError("The cluster ‘%s’ does not exist" %
10436e
-                       (params['rhv_cluster']))
10436e
+    raise RuntimeError("The cluster ‘%s’ is not part of the DC ‘%s’, "
10436e
+                       "where the storage domain ‘%s’ is" %
10436e
+                       (params['rhv_cluster'], datacenter.name,
10436e
+                        params['output_storage']))
10436e
+cluster = clusters[0]
10436e
 
10436e
 # Otherwise everything is OK, print a JSON with the results.
10436e
 results = {
10436e
+  "rhv_storagedomain_uuid": storage_domain.id,
10436e
+  "rhv_cluster_uuid": cluster.id,
10436e
 }
10436e
 
10436e
 json.dump(results, sys.stdout)
10436e
-- 
498672
2.18.4
10436e