From 17e9b949ec3876e74bcaa217810afbd46f297a65 Mon Sep 17 00:00:00 2001
Message-Id: <17e9b949ec3876e74bcaa217810afbd46f297a65@dist-git>
From: Michal Privoznik
Date: Wed, 7 Oct 2020 18:45:39 +0200
Subject: [PATCH] conf: Parse and format HMAT
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
To cite ACPI specification:
Heterogeneous Memory Attribute Table describes the memory
attributes, such as memory side cache attributes and bandwidth
and latency details, related to the System Physical Address
(SPA) Memory Ranges. The software is expected to use this
information as hint for optimization.
According to our upstream discussion [1] this is exposed under
as under NUMA | and or
under numa/latencies.
1: https://www.redhat.com/archives/libvir-list/2020-January/msg00422.html
Signed-off-by: Michal Privoznik
Reviewed-by: Daniel Henrique Barboza
(cherry picked from commit a89bbbac86383a10be0cec5a93feb7ed820871eb)
Resolves: https://bugzilla.redhat.com/show_bug.cgi?id=1749518
Conflicts:
- src/conf/numa_conf.c: Context, because we're not using
VIR_XPATH_NODE_AUTORESTORE() everywhere in the old code.
Signed-off-by: Michal Privoznik
Message-Id:
Reviewed-by: Ján Tomko
---
docs/formatdomain.html.in | 107 +++++++
docs/schemas/cputypes.rng | 110 ++++++-
src/conf/numa_conf.c | 349 ++++++++++++++++++++-
src/conf/numa_conf.h | 33 ++
src/libvirt_private.syms | 6 +
tests/qemuxml2argvdata/numatune-hmat.xml | 52 +++
tests/qemuxml2xmloutdata/numatune-hmat.xml | 1 +
tests/qemuxml2xmltest.c | 1 +
8 files changed, 644 insertions(+), 15 deletions(-)
create mode 100644 tests/qemuxml2argvdata/numatune-hmat.xml
create mode 120000 tests/qemuxml2xmloutdata/numatune-hmat.xml
diff --git a/docs/formatdomain.html.in b/docs/formatdomain.html.in
index 4b8d312596..bec753e37f 100644
--- a/docs/formatdomain.html.in
+++ b/docs/formatdomain.html.in
@@ -1874,6 +1874,113 @@
using 10 for local and 20 for remote distances.
+
+
+
+...
+<cpu>
+ ...
+ <numa>
+ <cell id='0' cpus='0-3' memory='512000' unit='KiB' discard='yes'/>
+ <cell id='1' cpus='4-7' memory='512000' unit='KiB' memAccess='shared'/>
+ <cell id='3' cpus='0-3' memory='2097152' unit='KiB'>
+ <cache level='1' associativity='direct' policy='writeback'>
+ <size value='10' unit='KiB'/>
+ <line value='8' unit='B'/>
+ </cache>
+ </cell>
+ <interconnects>
+ <latency initiator='0' target='0' type='access' value='5'/>
+ <latency initiator='0' target='0' cache='1' type='access' value='10'/>
+ <bandwidth initiator='0' target='0' type='access' value='204800' unit='KiB'/>
+ </interconnects>
+ </numa>
+ ...
+</cpu>
+...
+
+
+ Since 6.6.0 the cell
element can
+ have a cache
child element which describes memory side cache
+ for memory proximity domains. The cache
element has a
+ level
attribute describing the cache level and thus the
+ element can be repeated multiple times to describe different levels of
+ the cache.
+
+
+
+ The cache
element then has following mandatory attributes:
+
+
+
+ level
+
+ Level of the cache this description refers to.
+
+
+ associativity
+
+ Describes cache associativity (accepted values are none
,
+ direct
and full
).
+
+
+ policy
+
+ Describes cache write associativity (accepted values are
+ none
, writeback
and
+ writethrough
).
+
+
+
+
+ The cache
element has two mandatory child elements then:
+ size
and line
which describe cache size and
+ cache line size. Both elements accept two attributes: value
+ and unit
which set the value of corresponding cache
+ attribute.
+
+
+
+ The NUMA description has an optional interconnects
element that
+ describes the normalized memory read/write latency, read/write bandwidth
+ between Initiator Proximity Domains (Processor or I/O) and Target
+ Proximity Domains (Memory).
+
+
+
+ The interconnects
element can have zero or more
+ latency
child elements to describe latency between two
+ memory nodes and zero or more bandwidth
child elements to
+ describe bandwidth between two memory nodes. Both these have the
+ following mandatory attributes:
+
+
+
+ initiator
+ Refers to the source NUMA node
+
+ target
+ Refers to the target NUMA node
+
+ type
+ The type of the access. Accepted values: access
,
+ read
, write
+
+ value
+ The actual value. For latency this is delay in nanoseconds, for
+ bandwidth this value is in kibibytes per second. Use additional
+ unit
attribute to change the units.
+
+
+
+ To describe latency from one NUMA node to a cache of another NUMA node
+ the latency
element has optional cache
+ attribute which in combination with target
attribute creates
+ full reference to distant NUMA node's cache level. For instance,
+ target='0' cache='1'
refers to the first level cache of NUMA
+ node 0.
+
+
diff --git a/docs/schemas/cputypes.rng b/docs/schemas/cputypes.rng
index a1682a1003..ba30dbf9ff 100644
--- a/docs/schemas/cputypes.rng
+++ b/docs/schemas/cputypes.rng
@@ -102,9 +102,14 @@
-
-
-
+
+
+
+
+
+
+
+
@@ -148,6 +153,9 @@
+
+
+
@@ -162,6 +170,102 @@
+
+
+
+
+
+
+
+ none
+ direct
+ full
+
+
+
+
+ none
+ writeback
+ writethrough
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ access
+ read
+ write
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ access
+ read
+ write
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/src/conf/numa_conf.c b/src/conf/numa_conf.c
index a805336d16..5c764190c3 100644
--- a/src/conf/numa_conf.c
+++ b/src/conf/numa_conf.c
@@ -59,9 +59,37 @@ VIR_ENUM_IMPL(virDomainMemoryAccess,
"private",
);
+VIR_ENUM_IMPL(virDomainCacheAssociativity,
+ VIR_DOMAIN_CACHE_ASSOCIATIVITY_LAST,
+ "none",
+ "direct",
+ "full",
+);
+
+VIR_ENUM_IMPL(virDomainCachePolicy,
+ VIR_DOMAIN_CACHE_POLICY_LAST,
+ "none",
+ "writeback",
+ "writethrough",
+);
+
+VIR_ENUM_IMPL(virDomainMemoryLatency,
+ VIR_DOMAIN_MEMORY_LATENCY_LAST,
+ "none",
+ "access",
+ "read",
+ "write"
+);
+
typedef struct _virDomainNumaDistance virDomainNumaDistance;
typedef virDomainNumaDistance *virDomainNumaDistancePtr;
+typedef struct _virDomainNumaCache virDomainNumaCache;
+typedef virDomainNumaCache *virDomainNumaCachePtr;
+
+typedef struct _virDomainNumaInterconnect virDomainNumaInterconnect;
+typedef virDomainNumaInterconnect *virDomainNumaInterconnectPtr;
+
typedef struct _virDomainNumaNode virDomainNumaNode;
typedef virDomainNumaNode *virDomainNumaNodePtr;
@@ -86,9 +114,30 @@ struct _virDomainNuma {
unsigned int cellid;
} *distances; /* remote node distances */
size_t ndistances;
+
+ struct _virDomainNumaCache {
+ unsigned int level; /* cache level */
+ unsigned int size; /* cache size */
+ unsigned int line; /* line size, !!! in bytes !!! */
+ virDomainCacheAssociativity associativity; /* cache associativity */
+ virDomainCachePolicy policy; /* cache policy */
+ } *caches;
+ size_t ncaches;
} *mem_nodes; /* guest node configuration */
size_t nmem_nodes;
+ struct _virDomainNumaInterconnect {
+ virDomainNumaInterconnectType type; /* whether structure describes latency
+ or bandwidth */
+ unsigned int initiator; /* the initiator NUMA node */
+ unsigned int target; /* the target NUMA node */
+ unsigned int cache; /* the target cache on @target; if 0 then the
+ memory on @target */
+ virDomainMemoryLatency accessType; /* what type of access is defined */
+ unsigned long value; /* value itself */
+ } *interconnects;
+ size_t ninterconnects;
+
/* Future NUMA tuning related stuff should go here. */
};
@@ -368,9 +417,13 @@ virDomainNumaFree(virDomainNumaPtr numa)
if (numa->mem_nodes[i].ndistances > 0)
VIR_FREE(numa->mem_nodes[i].distances);
+
+ VIR_FREE(numa->mem_nodes[i].caches);
}
VIR_FREE(numa->mem_nodes);
+ VIR_FREE(numa->interconnects);
+
VIR_FREE(numa);
}
@@ -841,6 +894,97 @@ virDomainNumaDefNodeDistanceParseXML(virDomainNumaPtr def,
return ret;
}
+
+static int
+virDomainNumaDefNodeCacheParseXML(virDomainNumaPtr def,
+ xmlXPathContextPtr ctxt,
+ unsigned int cur_cell)
+{
+ g_autofree xmlNodePtr *nodes = NULL;
+ int n;
+ size_t i;
+
+ if ((n = virXPathNodeSet("./cache", ctxt, &nodes)) < 0)
+ return -1;
+
+ def->mem_nodes[cur_cell].caches = g_new0(virDomainNumaCache, n);
+
+ for (i = 0; i < n; i++) {
+ VIR_XPATH_NODE_AUTORESTORE(ctxt);
+ virDomainNumaCachePtr cache = &def->mem_nodes[cur_cell].caches[i];
+ g_autofree char *tmp = NULL;
+ unsigned int level;
+ int associativity;
+ int policy;
+ unsigned long long size;
+ unsigned long long line;
+
+ if (!(tmp = virXMLPropString(nodes[i], "level"))) {
+ virReportError(VIR_ERR_XML_ERROR,
+ _("Missing 'level' attribute in cache "
+ "element for NUMA node %d"),
+ cur_cell);
+ return -1;
+ }
+
+ if (virStrToLong_uip(tmp, NULL, 10, &level) < 0 ||
+ level == 0) {
+ virReportError(VIR_ERR_XML_ERROR,
+ _("Invalid 'level' attribute in cache "
+ "element for NUMA node %d"),
+ cur_cell);
+ return -1;
+ }
+ VIR_FREE(tmp);
+
+ if (!(tmp = virXMLPropString(nodes[i], "associativity"))) {
+ virReportError(VIR_ERR_XML_ERROR,
+ _("Missing 'associativity' attribute in cache "
+ "element for NUMA node %d"),
+ cur_cell);
+ return -1;
+ }
+
+ if ((associativity = virDomainCacheAssociativityTypeFromString(tmp)) < 0) {
+ virReportError(VIR_ERR_XML_ERROR,
+ _("Invalid cache associativity '%s'"),
+ tmp);
+ return -1;
+ }
+ VIR_FREE(tmp);
+
+ if (!(tmp = virXMLPropString(nodes[i], "policy"))) {
+ virReportError(VIR_ERR_XML_ERROR,
+ _("Missing 'policy' attribute in cache "
+ "element for NUMA node %d"),
+ cur_cell);
+ }
+
+ if ((policy = virDomainCachePolicyTypeFromString(tmp)) < 0) {
+ virReportError(VIR_ERR_XML_ERROR,
+ _("Invalid cache policy '%s'"),
+ tmp);
+ return -1;
+ }
+ VIR_FREE(tmp);
+
+ ctxt->node = nodes[i];
+ if (virDomainParseMemory("./size/@value", "./size/unit",
+ ctxt, &size, true, false) < 0)
+ return -1;
+
+ if (virParseScaledValue("./line/@value", "./line/unit",
+ ctxt, &line, 1, ULLONG_MAX, true) < 0)
+ return -1;
+
+ *cache = (virDomainNumaCache){level, size, line, associativity, policy};
+ def->mem_nodes[cur_cell].ncaches++;
+ }
+
+ return 0;
+}
+
+
int
virDomainNumaDefParseXML(virDomainNumaPtr def,
xmlXPathContextPtr ctxt)
@@ -867,6 +1011,7 @@ virDomainNumaDefParseXML(virDomainNumaPtr def,
def->nmem_nodes = n;
for (i = 0; i < n; i++) {
+ VIR_XPATH_NODE_AUTORESTORE(ctxt);
int rc;
unsigned int cur_cell = i;
@@ -953,7 +1098,109 @@ virDomainNumaDefParseXML(virDomainNumaPtr def,
/* Parse NUMA distances info */
if (virDomainNumaDefNodeDistanceParseXML(def, ctxt, cur_cell) < 0)
+ goto cleanup;
+
+ /* Parse cache info */
+ if (virDomainNumaDefNodeCacheParseXML(def, ctxt, cur_cell) < 0)
+ goto cleanup;
+ }
+
+ VIR_FREE(nodes);
+ if ((n = virXPathNodeSet("./cpu/numa[1]/interconnects[1]/latency|"
+ "./cpu/numa[1]/interconnects[1]/bandwidth", ctxt, &nodes)) < 0)
+ goto cleanup;
+
+ def->interconnects = g_new0(virDomainNumaInterconnect, n);
+ for (i = 0; i < n; i++) {
+ virDomainNumaInterconnectType type;
+ unsigned int initiator;
+ unsigned int target;
+ unsigned int cache = 0;
+ int accessType;
+ unsigned long long value;
+
+ if (virXMLNodeNameEqual(nodes[i], "latency")) {
+ type = VIR_DOMAIN_NUMA_INTERCONNECT_TYPE_LATENCY;
+
+ if (!(tmp = virXMLPropString(nodes[i], "value"))) {
+ virReportError(VIR_ERR_XML_ERROR, "%s",
+ _("Missing 'value' attribute in NUMA interconnects"));
goto cleanup;
+ }
+
+ if (virStrToLong_ullp(tmp, NULL, 10, &value) < 0) {
+ virReportError(VIR_ERR_XML_ERROR, "%s",
+ _("Invalid 'value' attribute in NUMA interconnects"));
+ goto cleanup;
+ }
+ VIR_FREE(tmp);
+ } else if (virXMLNodeNameEqual(nodes[i], "bandwidth")) {
+ VIR_XPATH_NODE_AUTORESTORE(ctxt);
+ type = VIR_DOMAIN_NUMA_INTERCONNECT_TYPE_BANDWIDTH;
+
+ ctxt->node = nodes[i];
+
+ if (virDomainParseMemory("./@value", "./@unit", ctxt, &value, true, false) < 0)
+ goto cleanup;
+ } else {
+ /* Ignore yet unknown child elements. */
+ continue;
+ }
+
+ if (!(tmp = virXMLPropString(nodes[i], "initiator"))) {
+ virReportError(VIR_ERR_XML_ERROR, "%s",
+ _("Missing 'initiator' attribute in NUMA interconnects"));
+ goto cleanup;
+ }
+
+ if (virStrToLong_uip(tmp, NULL, 10, &initiator) < 0) {
+ virReportError(VIR_ERR_XML_ERROR, "%s",
+ _("Invalid 'initiator' attribute in NUMA interconnects"));
+ goto cleanup;
+ }
+ VIR_FREE(tmp);
+
+ if (!(tmp = virXMLPropString(nodes[i], "target"))) {
+ virReportError(VIR_ERR_XML_ERROR, "%s",
+ _("Missing 'target' attribute in NUMA interconnects"));
+ goto cleanup;
+ }
+
+ if (virStrToLong_uip(tmp, NULL, 10, &target) < 0) {
+ virReportError(VIR_ERR_XML_ERROR, "%s",
+ _("Invalid 'target' attribute in NUMA interconnects"));
+ goto cleanup;
+ }
+ VIR_FREE(tmp);
+
+
+ /* cache attribute is optional */
+ if ((tmp = virXMLPropString(nodes[i], "cache"))) {
+ if (virStrToLong_uip(tmp, NULL, 10, &cache) < 0 ||
+ cache == 0) {
+ virReportError(VIR_ERR_XML_ERROR, "%s",
+ _("Invalid 'cache' attribute in NUMA interconnects"));
+ goto cleanup;
+ }
+ }
+ VIR_FREE(tmp);
+
+ if (!(tmp = virXMLPropString(nodes[i], "type"))) {
+ virReportError(VIR_ERR_XML_ERROR, "%s",
+ _("Missing 'type' attribute in NUMA interconnects"));
+ goto cleanup;
+ }
+
+ if ((accessType = virDomainMemoryLatencyTypeFromString(tmp)) <= 0) {
+ virReportError(VIR_ERR_XML_ERROR, "%s",
+ _("Invalid 'type' attribute in NUMA interconnects"));
+ goto cleanup;
+ }
+ VIR_FREE(tmp);
+
+ def->interconnects[i] = (virDomainNumaInterconnect) {type, initiator, target,
+ cache, accessType, value};
+ def->ninterconnects++;
}
ret = 0;
@@ -983,6 +1230,7 @@ virDomainNumaDefFormatXML(virBufferPtr buf,
for (i = 0; i < ncells; i++) {
virBitmapPtr cpumask = virDomainNumaGetNodeCpumask(def, i);
int ndistances;
+ size_t ncaches;
memAccess = virDomainNumaGetNodeMemoryAccessMode(def, i);
discard = virDomainNumaGetNodeDiscard(def, i);
@@ -1009,30 +1257,107 @@ virDomainNumaDefFormatXML(virBufferPtr buf,
virTristateBoolTypeToString(discard));
ndistances = def->mem_nodes[i].ndistances;
- if (ndistances == 0) {
+ ncaches = def->mem_nodes[i].ncaches;
+ if (ndistances == 0 && ncaches == 0) {
virBufferAddLit(buf, "/>\n");
} else {
size_t j;
- virDomainNumaDistancePtr distances = def->mem_nodes[i].distances;
virBufferAddLit(buf, ">\n");
virBufferAdjustIndent(buf, 2);
- virBufferAddLit(buf, "\n");
- virBufferAdjustIndent(buf, 2);
- for (j = 0; j < ndistances; j++) {
- if (distances[j].value) {
- virBufferAddLit(buf, " \n");
+
+ if (ndistances) {
+ virDomainNumaDistancePtr distances = def->mem_nodes[i].distances;
+
+ virBufferAddLit(buf, "\n");
+ virBufferAdjustIndent(buf, 2);
+ for (j = 0; j < ndistances; j++) {
+ if (distances[j].value) {
+ virBufferAddLit(buf, " \n");
+ }
}
+ virBufferAdjustIndent(buf, -2);
+ virBufferAddLit(buf, " \n");
+ }
+
+ for (j = 0; j < ncaches; j++) {
+ virDomainNumaCachePtr cache = &def->mem_nodes[i].caches[j];
+
+ virBufferAsprintf(buf, "level);
+ if (cache->associativity) {
+ virBufferAsprintf(buf, " associativity='%s'",
+ virDomainCacheAssociativityTypeToString(cache->associativity));
+ }
+
+ if (cache->policy) {
+ virBufferAsprintf(buf, " policy='%s'",
+ virDomainCachePolicyTypeToString(cache->policy));
+ }
+ virBufferAddLit(buf, ">\n");
+
+ virBufferAdjustIndent(buf, 2);
+ virBufferAsprintf(buf,
+ " \n",
+ cache->size);
+
+ if (cache->line) {
+ virBufferAsprintf(buf,
+ " \n",
+ cache->line);
+ }
+
+ virBufferAdjustIndent(buf, -2);
+ virBufferAddLit(buf, " \n");
}
- virBufferAdjustIndent(buf, -2);
- virBufferAddLit(buf, " \n");
virBufferAdjustIndent(buf, -2);
virBufferAddLit(buf, "\n");
}
}
+
+ if (def->ninterconnects) {
+ virBufferAddLit(buf, "\n");
+ virBufferAdjustIndent(buf, 2);
+ }
+
+ for (i = 0; i < def->ninterconnects; i++) {
+ virDomainNumaInterconnectPtr l = &def->interconnects[i];
+
+ switch (l->type) {
+ case VIR_DOMAIN_NUMA_INTERCONNECT_TYPE_LATENCY:
+ virBufferAddLit(buf, "initiator, l->target);
+
+ if (l->cache > 0) {
+ virBufferAsprintf(buf,
+ " cache='%u'",
+ l->cache);
+ }
+
+ virBufferAsprintf(buf,
+ " type='%s' value='%lu'",
+ virDomainMemoryLatencyTypeToString(l->accessType),
+ l->value);
+
+ if (l->type == VIR_DOMAIN_NUMA_INTERCONNECT_TYPE_BANDWIDTH)
+ virBufferAddLit(buf, " unit='KiB'");
+ virBufferAddLit(buf, "/>\n");
+ }
+
+ if (def->ninterconnects) {
+ virBufferAdjustIndent(buf, -2);
+ virBufferAddLit(buf, " \n");
+ }
+
virBufferAdjustIndent(buf, -2);
virBufferAddLit(buf, "\n");
diff --git a/src/conf/numa_conf.h b/src/conf/numa_conf.h
index 6808439a7c..5043c5a6d4 100644
--- a/src/conf/numa_conf.h
+++ b/src/conf/numa_conf.h
@@ -52,6 +52,39 @@ typedef enum {
} virDomainMemoryAccess;
VIR_ENUM_DECL(virDomainMemoryAccess);
+typedef enum {
+ VIR_DOMAIN_CACHE_ASSOCIATIVITY_NONE, /* No associativity */
+ VIR_DOMAIN_CACHE_ASSOCIATIVITY_DIRECT, /* Direct mapped cache */
+ VIR_DOMAIN_CACHE_ASSOCIATIVITY_FULL, /* Fully associative cache */
+
+ VIR_DOMAIN_CACHE_ASSOCIATIVITY_LAST
+} virDomainCacheAssociativity;
+VIR_ENUM_DECL(virDomainCacheAssociativity);
+
+typedef enum {
+ VIR_DOMAIN_CACHE_POLICY_NONE, /* No policy */
+ VIR_DOMAIN_CACHE_POLICY_WRITEBACK, /* Write-back policy */
+ VIR_DOMAIN_CACHE_POLICY_WRITETHROUGH, /* Write-through policy */
+
+ VIR_DOMAIN_CACHE_POLICY_LAST
+} virDomainCachePolicy;
+VIR_ENUM_DECL(virDomainCachePolicy);
+
+typedef enum {
+ VIR_DOMAIN_NUMA_INTERCONNECT_TYPE_LATENCY,
+ VIR_DOMAIN_NUMA_INTERCONNECT_TYPE_BANDWIDTH,
+} virDomainNumaInterconnectType;
+
+typedef enum {
+ VIR_DOMAIN_MEMORY_LATENCY_NONE = 0, /* No memory latency defined */
+ VIR_DOMAIN_MEMORY_LATENCY_ACCESS, /* Access latency */
+ VIR_DOMAIN_MEMORY_LATENCY_READ, /* Read latency */
+ VIR_DOMAIN_MEMORY_LATENCY_WRITE, /* Write latency */
+
+ VIR_DOMAIN_MEMORY_LATENCY_LAST
+} virDomainMemoryLatency;
+VIR_ENUM_DECL(virDomainMemoryLatency);
+
virDomainNumaPtr virDomainNumaNew(void);
void virDomainNumaFree(virDomainNumaPtr numa);
diff --git a/src/libvirt_private.syms b/src/libvirt_private.syms
index acb25eb8c8..de95e3b116 100644
--- a/src/libvirt_private.syms
+++ b/src/libvirt_private.syms
@@ -808,8 +808,14 @@ virNodeDeviceDeleteVport;
virNodeDeviceGetParentName;
# conf/numa_conf.h
+virDomainCacheAssociativityTypeFromString;
+virDomainCacheAssociativityTypeToString;
+virDomainCachePolicyTypeFromString;
+virDomainCachePolicyTypeToString;
virDomainMemoryAccessTypeFromString;
virDomainMemoryAccessTypeToString;
+virDomainMemoryLatencyTypeFromString;
+virDomainMemoryLatencyTypeToString;
virDomainNumaCheckABIStability;
virDomainNumaEquals;
virDomainNumaFree;
diff --git a/tests/qemuxml2argvdata/numatune-hmat.xml b/tests/qemuxml2argvdata/numatune-hmat.xml
new file mode 100644
index 0000000000..83f0b56c9b
--- /dev/null
+++ b/tests/qemuxml2argvdata/numatune-hmat.xml
@@ -0,0 +1,52 @@
+
+ QEMUGuest
+ c7a5fdb2-cdaf-9455-926a-d65c16db1809
+ 8388608
+ 8388608
+ 12
+
+ hvm
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ |
+ |
+ |
+ |
+ |
+ |
+
+
+
+
+
+
+
+
+ destroy
+ restart
+ restart
+
+ /usr/bin/qemu-system-x86_64
+
+
+
+
+
+
+
+
+
+
+
diff --git a/tests/qemuxml2xmloutdata/numatune-hmat.xml b/tests/qemuxml2xmloutdata/numatune-hmat.xml
new file mode 120000
index 0000000000..6903a80ab1
--- /dev/null
+++ b/tests/qemuxml2xmloutdata/numatune-hmat.xml
@@ -0,0 +1 @@
+../qemuxml2argvdata/numatune-hmat.xml
\ No newline at end of file
diff --git a/tests/qemuxml2xmltest.c b/tests/qemuxml2xmltest.c
index 1ddeba30f0..de1d720e1d 100644
--- a/tests/qemuxml2xmltest.c
+++ b/tests/qemuxml2xmltest.c
@@ -1106,6 +1106,7 @@ mymain(void)
DO_TEST("numatune-memnode-no-memory", QEMU_CAPS_OBJECT_MEMORY_FILE);
DO_TEST("numatune-distances", QEMU_CAPS_NUMA, QEMU_CAPS_NUMA_DIST);
DO_TEST("numatune-no-vcpu", QEMU_CAPS_NUMA);
+ DO_TEST("numatune-hmat", NONE);
DO_TEST("bios-nvram", NONE);
DO_TEST("bios-nvram-os-interleave", NONE);
--
2.29.2