|
|
b38b0f |
From 4da5757f8ad715c203e2ef9320c49432e8259ee8 Mon Sep 17 00:00:00 2001
|
|
|
b38b0f |
From: David Gibson <dgibson@redhat.com>
|
|
|
b38b0f |
Date: Thu, 30 May 2019 04:37:23 +0100
|
|
|
b38b0f |
Subject: [PATCH 2/8] spapr: Fix ibm, max-associativity-domains property number
|
|
|
b38b0f |
of nodes
|
|
|
b38b0f |
|
|
|
b38b0f |
RH-Author: David Gibson <dgibson@redhat.com>
|
|
|
b38b0f |
Message-id: <20190530043728.32575-2-dgibson@redhat.com>
|
|
|
b38b0f |
Patchwork-id: 88418
|
|
|
b38b0f |
O-Subject: [RHEL-8.1 qemu-kvm PATCH 1/6] spapr: Fix ibm, max-associativity-domains property number of nodes
|
|
|
b38b0f |
Bugzilla: 1710662
|
|
|
b38b0f |
RH-Acked-by: Laurent Vivier <lvivier@redhat.com>
|
|
|
b38b0f |
RH-Acked-by: Auger Eric <eric.auger@redhat.com>
|
|
|
b38b0f |
RH-Acked-by: Cornelia Huck <cohuck@redhat.com>
|
|
|
b38b0f |
|
|
|
b38b0f |
From: Serhii Popovych <spopovyc@redhat.com>
|
|
|
b38b0f |
|
|
|
b38b0f |
Laurent Vivier reported off by one with maximum number of NUMA nodes
|
|
|
b38b0f |
provided by qemu-kvm being less by one than required according to
|
|
|
b38b0f |
description of "ibm,max-associativity-domains" property in LoPAPR.
|
|
|
b38b0f |
|
|
|
b38b0f |
It appears that I incorrectly treated LoPAPR description of this
|
|
|
b38b0f |
property assuming it provides last valid domain (NUMA node here)
|
|
|
b38b0f |
instead of maximum number of domains.
|
|
|
b38b0f |
|
|
|
b38b0f |
### Before hot-add
|
|
|
b38b0f |
|
|
|
b38b0f |
(qemu) info numa
|
|
|
b38b0f |
3 nodes
|
|
|
b38b0f |
node 0 cpus: 0
|
|
|
b38b0f |
node 0 size: 0 MB
|
|
|
b38b0f |
node 0 plugged: 0 MB
|
|
|
b38b0f |
node 1 cpus:
|
|
|
b38b0f |
node 1 size: 1024 MB
|
|
|
b38b0f |
node 1 plugged: 0 MB
|
|
|
b38b0f |
node 2 cpus:
|
|
|
b38b0f |
node 2 size: 0 MB
|
|
|
b38b0f |
node 2 plugged: 0 MB
|
|
|
b38b0f |
|
|
|
b38b0f |
$ numactl -H
|
|
|
b38b0f |
available: 2 nodes (0-1)
|
|
|
b38b0f |
node 0 cpus: 0
|
|
|
b38b0f |
node 0 size: 0 MB
|
|
|
b38b0f |
node 0 free: 0 MB
|
|
|
b38b0f |
node 1 cpus:
|
|
|
b38b0f |
node 1 size: 999 MB
|
|
|
b38b0f |
node 1 free: 658 MB
|
|
|
b38b0f |
node distances:
|
|
|
b38b0f |
node 0 1
|
|
|
b38b0f |
0: 10 40
|
|
|
b38b0f |
1: 40 10
|
|
|
b38b0f |
|
|
|
b38b0f |
### Hot-add
|
|
|
b38b0f |
|
|
|
b38b0f |
(qemu) object_add memory-backend-ram,id=mem0,size=1G
|
|
|
b38b0f |
(qemu) device_add pc-dimm,id=dimm1,memdev=mem0,node=2
|
|
|
b38b0f |
(qemu) [ 87.704898] pseries-hotplug-mem: Attempting to hot-add 4 ...
|
|
|
b38b0f |
<there is no "Initmem setup node 2 [mem 0xHEX-0xHEX]">
|
|
|
b38b0f |
[ 87.705128] lpar: Attempting to resize HPT to shift 21
|
|
|
b38b0f |
... <HPT resize messages>
|
|
|
b38b0f |
|
|
|
b38b0f |
### After hot-add
|
|
|
b38b0f |
|
|
|
b38b0f |
(qemu) info numa
|
|
|
b38b0f |
3 nodes
|
|
|
b38b0f |
node 0 cpus: 0
|
|
|
b38b0f |
node 0 size: 0 MB
|
|
|
b38b0f |
node 0 plugged: 0 MB
|
|
|
b38b0f |
node 1 cpus:
|
|
|
b38b0f |
node 1 size: 1024 MB
|
|
|
b38b0f |
node 1 plugged: 0 MB
|
|
|
b38b0f |
node 2 cpus:
|
|
|
b38b0f |
node 2 size: 1024 MB
|
|
|
b38b0f |
node 2 plugged: 1024 MB
|
|
|
b38b0f |
|
|
|
b38b0f |
$ numactl -H
|
|
|
b38b0f |
available: 2 nodes (0-1)
|
|
|
b38b0f |
^^^^^^^^^^^^^^^^^^^^^^^^
|
|
|
b38b0f |
Still only two nodes (and memory hot-added to node 0 below)
|
|
|
b38b0f |
node 0 cpus: 0
|
|
|
b38b0f |
node 0 size: 1024 MB
|
|
|
b38b0f |
node 0 free: 1021 MB
|
|
|
b38b0f |
node 1 cpus:
|
|
|
b38b0f |
node 1 size: 999 MB
|
|
|
b38b0f |
node 1 free: 658 MB
|
|
|
b38b0f |
node distances:
|
|
|
b38b0f |
node 0 1
|
|
|
b38b0f |
0: 10 40
|
|
|
b38b0f |
1: 40 10
|
|
|
b38b0f |
|
|
|
b38b0f |
After fix applied numactl(8) reports 3 nodes available and memory
|
|
|
b38b0f |
plugged into node 2 as expected.
|
|
|
b38b0f |
|
|
|
b38b0f |
>From David Gibson:
|
|
|
b38b0f |
------------------
|
|
|
b38b0f |
Qemu makes a distinction between "non NUMA" (nb_numa_nodes == 0) and
|
|
|
b38b0f |
"NUMA with one node" (nb_numa_nodes == 1). But from a PAPR guests's
|
|
|
b38b0f |
point of view these are equivalent. I don't want to present two
|
|
|
b38b0f |
different cases to the guest when we don't need to, so even though the
|
|
|
b38b0f |
guest can handle it, I'd prefer we put a '1' here for both the
|
|
|
b38b0f |
nb_numa_nodes == 0 and nb_numa_nodes == 1 case.
|
|
|
b38b0f |
|
|
|
b38b0f |
This consolidates everything discussed previously on mailing list.
|
|
|
b38b0f |
|
|
|
b38b0f |
Fixes: da9f80fbad21 ("spapr: Add ibm,max-associativity-domains property")
|
|
|
b38b0f |
Reported-by: Laurent Vivier <lvivier@redhat.com>
|
|
|
b38b0f |
Signed-off-by: Serhii Popovych <spopovyc@redhat.com>
|
|
|
b38b0f |
|
|
|
b38b0f |
Signed-off-by: David Gibson <david@gibson.dropbear.id.au>
|
|
|
b38b0f |
Reviewed-by: Greg Kurz <groug@kaod.org>
|
|
|
b38b0f |
Reviewed-by: Laurent Vivier <lvivier@redhat.com>
|
|
|
b38b0f |
(cherry picked from commit 3908a24fcb83913079d315de0ca6d598e8616dbb)
|
|
|
b38b0f |
|
|
|
b38b0f |
Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1710662
|
|
|
b38b0f |
|
|
|
b38b0f |
Signed-off-by: David Gibson <dgibson@redhat.com>
|
|
|
b38b0f |
Signed-off-by: Danilo C. L. de Paula <ddepaula@redhat.com>
|
|
|
b38b0f |
---
|
|
|
b38b0f |
hw/ppc/spapr.c | 2 +-
|
|
|
b38b0f |
1 file changed, 1 insertion(+), 1 deletion(-)
|
|
|
b38b0f |
|
|
|
b38b0f |
diff --git a/hw/ppc/spapr.c b/hw/ppc/spapr.c
|
|
|
b38b0f |
index ea72782..b57c0be 100644
|
|
|
b38b0f |
--- a/hw/ppc/spapr.c
|
|
|
b38b0f |
+++ b/hw/ppc/spapr.c
|
|
|
b38b0f |
@@ -915,7 +915,7 @@ static void spapr_dt_rtas(sPAPRMachineState *spapr, void *fdt)
|
|
|
b38b0f |
cpu_to_be32(0),
|
|
|
b38b0f |
cpu_to_be32(0),
|
|
|
b38b0f |
cpu_to_be32(0),
|
|
|
b38b0f |
- cpu_to_be32(nb_numa_nodes ? nb_numa_nodes - 1 : 0),
|
|
|
b38b0f |
+ cpu_to_be32(nb_numa_nodes ? nb_numa_nodes : 1),
|
|
|
b38b0f |
};
|
|
|
b38b0f |
|
|
|
b38b0f |
_FDT(rtas = fdt_add_subnode(fdt, 0, "rtas"));
|
|
|
b38b0f |
--
|
|
|
b38b0f |
1.8.3.1
|
|
|
b38b0f |
|