From 98205e71073c36234048b60feedc1ce142902572 Mon Sep 17 00:00:00 2001 From: Eduardo Habkost Date: Tue, 3 Jul 2018 17:23:51 +0200 Subject: [PATCH 06/89] i386: Clean up cache CPUID code RH-Author: Eduardo Habkost Message-id: <20180703172356.21038-6-ehabkost@redhat.com> Patchwork-id: 81215 O-Subject: [RHEL-7.6 qemu-kvm-rhev PATCH v3 05/10] i386: Clean up cache CPUID code Bugzilla: 1481253 RH-Acked-by: Laurent Vivier RH-Acked-by: Igor Mammedov RH-Acked-by: Miroslav Rezanina Always initialize CPUCaches structs with cache information, even if legacy_cache=true. Use different CPUCaches struct for CPUID[2], CPUID[4], and the AMD CPUID leaves. This will simplify a lot the logic inside cpu_x86_cpuid(). Signed-off-by: Eduardo Habkost Signed-off-by: Babu Moger Message-Id: <1527176614-26271-2-git-send-email-babu.moger@amd.com> Signed-off-by: Eduardo Habkost (cherry picked from commit a9f27ea9adc8c695197bd08f2e938ef7b4183f07) Signed-off-by: Eduardo Habkost Signed-off-by: Miroslav Rezanina --- target/i386/cpu.c | 117 +++++++++++++++++++++++++++--------------------------- target/i386/cpu.h | 14 ++++--- 2 files changed, 67 insertions(+), 64 deletions(-) diff --git a/target/i386/cpu.c b/target/i386/cpu.c index 3426130..41d0b72 100644 --- a/target/i386/cpu.c +++ b/target/i386/cpu.c @@ -1113,7 +1113,7 @@ struct X86CPUDefinition { }; static CPUCaches epyc_cache_info = { - .l1d_cache = { + .l1d_cache = &(CPUCacheInfo) { .type = DCACHE, .level = 1, .size = 32 * KiB, @@ -1125,7 +1125,7 @@ static CPUCaches epyc_cache_info = { .self_init = 1, .no_invd_sharing = true, }, - .l1i_cache = { + .l1i_cache = &(CPUCacheInfo) { .type = ICACHE, .level = 1, .size = 64 * KiB, @@ -1137,7 +1137,7 @@ static CPUCaches epyc_cache_info = { .self_init = 1, .no_invd_sharing = true, }, - .l2_cache = { + .l2_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, .level = 2, .size = 512 * KiB, @@ -1147,7 +1147,7 @@ static CPUCaches epyc_cache_info = { .sets = 1024, .lines_per_tag = 1, }, - .l3_cache = { + .l3_cache = &(CPUCacheInfo) { .type = UNIFIED_CACHE, .level = 3, .size = 8 * MiB, @@ -3325,9 +3325,8 @@ static void x86_cpu_load_def(X86CPU *cpu, X86CPUDefinition *def, Error **errp) env->features[w] = def->features[w]; } - /* Store Cache information from the X86CPUDefinition if available */ - env->cache_info = def->cache_info; - cpu->legacy_cache = def->cache_info ? 0 : 1; + /* legacy-cache defaults to 'off' if CPU model provides cache info */ + cpu->legacy_cache = !def->cache_info; /* Special cases not set in the X86CPUDefinition structs: */ /* TODO: in-kernel irqchip for hvf */ @@ -3678,21 +3677,11 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, if (!cpu->enable_l3_cache) { *ecx = 0; } else { - if (env->cache_info && !cpu->legacy_cache) { - *ecx = cpuid2_cache_descriptor(&env->cache_info->l3_cache); - } else { - *ecx = cpuid2_cache_descriptor(&legacy_l3_cache); - } - } - if (env->cache_info && !cpu->legacy_cache) { - *edx = (cpuid2_cache_descriptor(&env->cache_info->l1d_cache) << 16) | - (cpuid2_cache_descriptor(&env->cache_info->l1i_cache) << 8) | - (cpuid2_cache_descriptor(&env->cache_info->l2_cache)); - } else { - *edx = (cpuid2_cache_descriptor(&legacy_l1d_cache) << 16) | - (cpuid2_cache_descriptor(&legacy_l1i_cache) << 8) | - (cpuid2_cache_descriptor(&legacy_l2_cache_cpuid2)); + *ecx = cpuid2_cache_descriptor(env->cache_info_cpuid2.l3_cache); } + *edx = (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1d_cache) << 16) | + (cpuid2_cache_descriptor(env->cache_info_cpuid2.l1i_cache) << 8) | + (cpuid2_cache_descriptor(env->cache_info_cpuid2.l2_cache)); break; case 4: /* cache info: needed for Core compatibility */ @@ -3705,35 +3694,27 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, } } else { *eax = 0; - CPUCacheInfo *l1d, *l1i, *l2, *l3; - if (env->cache_info && !cpu->legacy_cache) { - l1d = &env->cache_info->l1d_cache; - l1i = &env->cache_info->l1i_cache; - l2 = &env->cache_info->l2_cache; - l3 = &env->cache_info->l3_cache; - } else { - l1d = &legacy_l1d_cache; - l1i = &legacy_l1i_cache; - l2 = &legacy_l2_cache; - l3 = &legacy_l3_cache; - } switch (count) { case 0: /* L1 dcache info */ - encode_cache_cpuid4(l1d, 1, cs->nr_cores, + encode_cache_cpuid4(env->cache_info_cpuid4.l1d_cache, + 1, cs->nr_cores, eax, ebx, ecx, edx); break; case 1: /* L1 icache info */ - encode_cache_cpuid4(l1i, 1, cs->nr_cores, + encode_cache_cpuid4(env->cache_info_cpuid4.l1i_cache, + 1, cs->nr_cores, eax, ebx, ecx, edx); break; case 2: /* L2 cache info */ - encode_cache_cpuid4(l2, cs->nr_threads, cs->nr_cores, + encode_cache_cpuid4(env->cache_info_cpuid4.l2_cache, + cs->nr_threads, cs->nr_cores, eax, ebx, ecx, edx); break; case 3: /* L3 cache info */ pkg_offset = apicid_pkg_offset(cs->nr_cores, cs->nr_threads); if (cpu->enable_l3_cache) { - encode_cache_cpuid4(l3, (1 << pkg_offset), cs->nr_cores, + encode_cache_cpuid4(env->cache_info_cpuid4.l3_cache, + (1 << pkg_offset), cs->nr_cores, eax, ebx, ecx, edx); break; } @@ -3946,13 +3927,8 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, (L1_ITLB_2M_ASSOC << 8) | (L1_ITLB_2M_ENTRIES); *ebx = (L1_DTLB_4K_ASSOC << 24) | (L1_DTLB_4K_ENTRIES << 16) | \ (L1_ITLB_4K_ASSOC << 8) | (L1_ITLB_4K_ENTRIES); - if (env->cache_info && !cpu->legacy_cache) { - *ecx = encode_cache_cpuid80000005(&env->cache_info->l1d_cache); - *edx = encode_cache_cpuid80000005(&env->cache_info->l1i_cache); - } else { - *ecx = encode_cache_cpuid80000005(&legacy_l1d_cache_amd); - *edx = encode_cache_cpuid80000005(&legacy_l1i_cache_amd); - } + *ecx = encode_cache_cpuid80000005(env->cache_info_amd.l1d_cache); + *edx = encode_cache_cpuid80000005(env->cache_info_amd.l1i_cache); break; case 0x80000006: /* cache info (L2 cache) */ @@ -3968,17 +3944,10 @@ void cpu_x86_cpuid(CPUX86State *env, uint32_t index, uint32_t count, (L2_DTLB_4K_ENTRIES << 16) | \ (AMD_ENC_ASSOC(L2_ITLB_4K_ASSOC) << 12) | \ (L2_ITLB_4K_ENTRIES); - if (env->cache_info && !cpu->legacy_cache) { - encode_cache_cpuid80000006(&env->cache_info->l2_cache, - cpu->enable_l3_cache ? - &env->cache_info->l3_cache : NULL, - ecx, edx); - } else { - encode_cache_cpuid80000006(&legacy_l2_cache_amd, - cpu->enable_l3_cache ? - &legacy_l3_cache : NULL, - ecx, edx); - } + encode_cache_cpuid80000006(env->cache_info_amd.l2_cache, + cpu->enable_l3_cache ? + env->cache_info_amd.l3_cache : NULL, + ecx, edx); break; case 0x80000007: *eax = 0; @@ -4675,6 +4644,37 @@ static void x86_cpu_realizefn(DeviceState *dev, Error **errp) cpu->phys_bits = 32; } } + + /* Cache information initialization */ + if (!cpu->legacy_cache) { + if (!xcc->cpu_def || !xcc->cpu_def->cache_info) { + char *name = x86_cpu_class_get_model_name(xcc); + error_setg(errp, + "CPU model '%s' doesn't support legacy-cache=off", name); + g_free(name); + return; + } + env->cache_info_cpuid2 = env->cache_info_cpuid4 = env->cache_info_amd = + *xcc->cpu_def->cache_info; + } else { + /* Build legacy cache information */ + env->cache_info_cpuid2.l1d_cache = &legacy_l1d_cache; + env->cache_info_cpuid2.l1i_cache = &legacy_l1i_cache; + env->cache_info_cpuid2.l2_cache = &legacy_l2_cache_cpuid2; + env->cache_info_cpuid2.l3_cache = &legacy_l3_cache; + + env->cache_info_cpuid4.l1d_cache = &legacy_l1d_cache; + env->cache_info_cpuid4.l1i_cache = &legacy_l1i_cache; + env->cache_info_cpuid4.l2_cache = &legacy_l2_cache; + env->cache_info_cpuid4.l3_cache = &legacy_l3_cache; + + env->cache_info_amd.l1d_cache = &legacy_l1d_cache_amd; + env->cache_info_amd.l1i_cache = &legacy_l1i_cache_amd; + env->cache_info_amd.l2_cache = &legacy_l2_cache_amd; + env->cache_info_amd.l3_cache = &legacy_l3_cache; + } + + cpu_exec_realizefn(cs, &local_err); if (local_err != NULL) { error_propagate(errp, local_err); @@ -5159,11 +5159,10 @@ static Property x86_cpu_properties[] = { DEFINE_PROP_BOOL("vmware-cpuid-freq", X86CPU, vmware_cpuid_freq, true), DEFINE_PROP_BOOL("tcg-cpuid", X86CPU, expose_tcg, true), /* - * lecacy_cache defaults to CPU model being chosen. This is set in - * x86_cpu_load_def based on cache_info which is initialized in - * builtin_x86_defs + * lecacy_cache defaults to true unless the CPU model provides its + * own cache information (see x86_cpu_load_def()). */ - DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, false), + DEFINE_PROP_BOOL("legacy-cache", X86CPU, legacy_cache, true), /* * From "Requirements for Implementing the Microsoft diff --git a/target/i386/cpu.h b/target/i386/cpu.h index b01b0c1..c80c461 100644 --- a/target/i386/cpu.h +++ b/target/i386/cpu.h @@ -1099,10 +1099,10 @@ typedef struct CPUCacheInfo { typedef struct CPUCaches { - CPUCacheInfo l1d_cache; - CPUCacheInfo l1i_cache; - CPUCacheInfo l2_cache; - CPUCacheInfo l3_cache; + CPUCacheInfo *l1d_cache; + CPUCacheInfo *l1i_cache; + CPUCacheInfo *l2_cache; + CPUCacheInfo *l3_cache; } CPUCaches; typedef struct CPUX86State { @@ -1291,7 +1291,11 @@ typedef struct CPUX86State { /* Features that were explicitly enabled/disabled */ FeatureWordArray user_features; uint32_t cpuid_model[12]; - CPUCaches *cache_info; + /* Cache information for CPUID. When legacy-cache=on, the cache data + * on each CPUID leaf will be different, because we keep compatibility + * with old QEMU versions. + */ + CPUCaches cache_info_cpuid2, cache_info_cpuid4, cache_info_amd; /* MTRRs */ uint64_t mtrr_fixed[11]; -- 1.8.3.1