diff --git a/SOURCES/create-diff-object-ignore-altrinstr-aux-backport.patch b/SOURCES/create-diff-object-ignore-altrinstr-aux-backport.patch new file mode 100644 index 0000000..1d3cb4a --- /dev/null +++ b/SOURCES/create-diff-object-ignore-altrinstr-aux-backport.patch @@ -0,0 +1,78 @@ +From 46b54db1427ac7a58f986ae3506ce23c068d94e0 Mon Sep 17 00:00:00 2001 +From: Josh Poimboeuf +Date: Tue, 23 Jun 2020 22:20:33 -0500 +Subject: [PATCH] create-diff-object: change rela_equal() to return bool + +Change rela_equal's return value to bool to make its return semantics +more clear. + +Signed-off-by: Josh Poimboeuf + +create-diff-object: Ignore changes to .altinstr_aux + +On x86, .altinstr_aux is used to store temporary code which allows +static_cpu_has() to work before apply_alternatives() has run. This code +is completely inert for modules, because apply_alternatives() runs +during module init, before the module is fully formed. Any changed +references to it (i.e. changed addend) can be ignored. As long as +they're both references to .altinstr_aux, they can be considered equal, +even if the addends differ. + +Signed-off-by: Josh Poimboeuf +--- + kpatch-build/create-diff-object.c | 21 +++++++++++++++++---- + 1 file changed, 17 insertions(+), 4 deletions(-) + +diff --git a/kpatch-build/create-diff-object.c b/kpatch-build/create-diff-object.c +index aedd07d..b7332b1 100644 +--- a/kpatch-build/create-diff-object.c ++++ b/kpatch-build/create-diff-object.c +@@ -327,14 +327,27 @@ static int kpatch_mangled_strcmp(char *s1, char *s2) + return 1; + } + +-static int rela_equal(struct rela *rela1, struct rela *rela2) ++static bool rela_equal(struct rela *rela1, struct rela *rela2) + { + struct rela *rela_toc1, *rela_toc2; + unsigned long toc_data1 = 0, toc_data2 = 0; /* = 0 to prevent gcc warning */ + + if (rela1->type != rela2->type || + rela1->offset != rela2->offset) +- return 0; ++ return false; ++ ++ /* ++ * On x86, .altinstr_aux is used to store temporary code which allows ++ * static_cpu_has() to work before apply_alternatives() has run. This ++ * code is completely inert for modules, because apply_alternatives() ++ * runs during module init, before the module is fully formed. Any ++ * changed references to it (i.e. changed addend) can be ignored. As ++ * long as they're both references to .altinstr_aux, they can be ++ * considered equal, even if the addends differ. ++ */ ++ if (!strcmp(rela1->sym->name, ".altinstr_aux") && ++ !strcmp(rela2->sym->name, ".altinstr_aux")) ++ return true; + + /* + * With -mcmodel=large on ppc64le, GCC might generate entries in the .toc +@@ -393,13 +406,13 @@ static int rela_equal(struct rela *rela1, struct rela *rela2) + return toc_data1 == toc_data2; + + if (!rela_toc1 || !rela_toc2) +- return 0; ++ return false; + + if (rela_toc1->string) + return rela_toc2->string && !strcmp(rela_toc1->string, rela_toc2->string); + + if (rela_toc1->addend != rela_toc2->addend) +- return 0; ++ return false; + + return !kpatch_mangled_strcmp(rela_toc1->sym->name, rela_toc2->sym->name); + } +-- +2.21.3 + diff --git a/SOURCES/kpatch-adapted-patch-for-CVE-2020-10767-and-CVE-2020-10768.patch b/SOURCES/kpatch-adapted-patch-for-CVE-2020-10767-and-CVE-2020-10768.patch new file mode 100644 index 0000000..d0ec677 --- /dev/null +++ b/SOURCES/kpatch-adapted-patch-for-CVE-2020-10767-and-CVE-2020-10768.patch @@ -0,0 +1,369 @@ +From 7a3d1bf135b9ef3d9c26c5e48e9200efe932b555 Mon Sep 17 00:00:00 2001 +From: Artem Savkov +Date: Fri, 19 Jun 2020 11:30:09 +0200 +Subject: [KPATCH RHEL-8.2 v4] kpatch adapted patch for CVE-2020-10767 and CVE-2020-10768 + +Kernels: +4.18.0-193.el8 +4.18.0-193.1.2.el8_2 +4.18.0-193.6.3.el8_2 + +Changes since last build: +arches: x86_64 +bugs.o: changed function: arch_prctl_spec_ctrl_get +bugs.o: changed function: arch_prctl_spec_ctrl_set +bugs.o: changed function: arch_seccomp_spec_mitigate +bugs.o: changed function: ib_prctl_set.part.1 +bugs.o: new function: kpatch_cve_2020_10767_pre_patch_callback +--------------------------- + +Kernels: +4.18.0-193.el8 +4.18.0-193.1.2.el8_2 +4.18.0-193.6.3.el8_2 + +Modifications: + - Dropped SPECTRE_V2_USER_STRICT_PREFERRED support as it is not a part + of CVE fix and was only ported for patches to apply cleanly. + - Added a pre-patch callback that would initialize spectre_v2_user_ibpb + from scratch, parsing the command line and checking cpu capabilities. + - spectre_v2_user_stibp is not renamed and is left as spectre_v2_user + to limit the footprint of the patch. + +Testing: reproducer provided by security team + +commit eb2ce6c5ce9269a32474955bb0934359801c83fa +Author: Waiman Long +Date: Tue Jun 16 19:01:42 2020 -0400 + + [x86] x86/speculation: PR_SPEC_FORCE_DISABLE enforcement for indirect branches + + Message-id: <20200616190142.5674-6-longman@redhat.com> + Patchwork-id: 320367 + Patchwork-instance: patchwork + O-Subject: [RHEL8.2.z PATCH 5/5] x86/speculation: PR_SPEC_FORCE_DISABLE enforcement for indirect branches. + Bugzilla: 1847396 + Z-Bugzilla: 1847395 + CVE: CVE-2020-10768 + RH-Acked-by: Rafael Aquini + RH-Acked-by: Phil Auld + + Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1847395 + CVE: CVE-2020-10768 + + commit 4d8df8cbb9156b0a0ab3f802b80cb5db57acc0bf + Author: Anthony Steinhauser + Date: Sun, 7 Jun 2020 05:44:19 -0700 + + x86/speculation: PR_SPEC_FORCE_DISABLE enforcement for indirect branches. + + Currently, it is possible to enable indirect branch speculation even after + it was force-disabled using the PR_SPEC_FORCE_DISABLE option. Moreover, the + PR_GET_SPECULATION_CTRL command gives afterwards an incorrect result + (force-disabled when it is in fact enabled). This also is inconsistent + vs. STIBP and the documention which cleary states that + PR_SPEC_FORCE_DISABLE cannot be undone. + + Fix this by actually enforcing force-disabled indirect branch + speculation. PR_SPEC_ENABLE called after PR_SPEC_FORCE_DISABLE now fails + with -EPERM as described in the documentation. + + Fixes: 9137bb27e60e ("x86/speculation: Add prctl() control for indirect branch speculation") + Signed-off-by: Anthony Steinhauser + Signed-off-by: Thomas Gleixner + Cc: stable@vger.kernel.org + + Signed-off-by: Waiman Long + Signed-off-by: Bruno Meneguele + +commit b7620b4e8bfb015bf494d47152751b8f07b5b215 +Author: Waiman Long +Date: Tue Jun 16 19:01:40 2020 -0400 + + [x86] x86/speculation: Avoid force-disabling IBPB based on STIBP and enhanced IBRS + + Message-id: <20200616190142.5674-4-longman@redhat.com> + Patchwork-id: 320366 + Patchwork-instance: patchwork + O-Subject: [RHEL8.2.z PATCH 3/5] x86/speculation: Avoid force-disabling IBPB based on STIBP and enhanced IBRS. + Bugzilla: 1847379 + Z-Bugzilla: 1847378 + CVE: CVE-2020-10767 + RH-Acked-by: Rafael Aquini + RH-Acked-by: Phil Auld + + Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1847378 + CVE: CVE-2020-10767 + Conflicts: There is a minor fuzz in bugs.c due to missing the upstream + commit e4f358916d52 ("x86, modpost: Replace last remnants + of RETPOLINE with CONFIG_RETPOLINE"). However, backporting + this commit at this stage will cause unexpected retpoline + warning when loading 3rd party modules. + + commit 21998a351512eba4ed5969006f0c55882d995ada + Author: Anthony Steinhauser + Date: Tue, 19 May 2020 06:40:42 -0700 + + x86/speculation: Avoid force-disabling IBPB based on STIBP and enhanced IBRS. + + When STIBP is unavailable or enhanced IBRS is available, Linux + force-disables the IBPB mitigation of Spectre-BTB even when simultaneous + multithreading is disabled. While attempts to enable IBPB using + prctl(PR_SET_SPECULATION_CTRL, PR_SPEC_INDIRECT_BRANCH, ...) fail with + EPERM, the seccomp syscall (or its prctl(PR_SET_SECCOMP, ...) equivalent) + which are used e.g. by Chromium or OpenSSH succeed with no errors but the + application remains silently vulnerable to cross-process Spectre v2 attacks + (classical BTB poisoning). At the same time the SYSFS reporting + (/sys/devices/system/cpu/vulnerabilities/spectre_v2) displays that IBPB is + conditionally enabled when in fact it is unconditionally disabled. + + STIBP is useful only when SMT is enabled. When SMT is disabled and STIBP is + unavailable, it makes no sense to force-disable also IBPB, because IBPB + protects against cross-process Spectre-BTB attacks regardless of the SMT + state. At the same time since missing STIBP was only observed on AMD CPUs, + AMD does not recommend using STIBP, but recommends using IBPB, so disabling + IBPB because of missing STIBP goes directly against AMD's advice: + https://developer.amd.com/wp-content/resources/Architecture_Guidelines_Update_Indirect_Branch_Control.pdf + + Similarly, enhanced IBRS is designed to protect cross-core BTB poisoning + and BTB-poisoning attacks from user space against kernel (and + BTB-poisoning attacks from guest against hypervisor), it is not designed + to prevent cross-process (or cross-VM) BTB poisoning between processes (or + VMs) running on the same core. Therefore, even with enhanced IBRS it is + necessary to flush the BTB during context-switches, so there is no reason + to force disable IBPB when enhanced IBRS is available. + + Enable the prctl control of IBPB even when STIBP is unavailable or enhanced + IBRS is available. + + Fixes: 7cc765a67d8e ("x86/speculation: Enable prctl mode for spectre_v2_user") + Signed-off-by: Anthony Steinhauser + Signed-off-by: Thomas Gleixner + Cc: stable@vger.kernel.org + + Signed-off-by: Waiman Long + Signed-off-by: Bruno Meneguele + +Signed-off-by: Artem Savkov +Acked-by: Julien Thierry +Acked-by: Joe Lawrence +--- + arch/x86/kernel/cpu/bugs.c | 158 +++++++++++++++++++++++++++++++++---- + 1 file changed, 144 insertions(+), 14 deletions(-) + +diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c +index 0316c7a04457..654c9a779caf 100644 +--- a/arch/x86/kernel/cpu/bugs.c ++++ b/arch/x86/kernel/cpu/bugs.c +@@ -589,6 +589,9 @@ static enum spectre_v2_mitigation spectre_v2_enabled __ro_after_init = + static enum spectre_v2_user_mitigation spectre_v2_user __ro_after_init = + SPECTRE_V2_USER_NONE; + ++static enum spectre_v2_user_mitigation spectre_v2_user_ibpb = ++ SPECTRE_V2_USER_NONE; ++ + #ifdef RETPOLINE + static bool spectre_v2_bad_module; + +@@ -1291,13 +1294,17 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) + { + switch (ctrl) { + case PR_SPEC_ENABLE: +- if (spectre_v2_user == SPECTRE_V2_USER_NONE) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && ++ spectre_v2_user == SPECTRE_V2_USER_NONE) + return 0; + /* + * Indirect branch speculation is always disabled in strict +- * mode. ++ * mode. It can neither be enabled if it was force-disabled ++ * by a previous prctl call. + */ +- if (spectre_v2_user == SPECTRE_V2_USER_STRICT) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user == SPECTRE_V2_USER_STRICT || ++ task_spec_ib_force_disable(task)) + return -EPERM; + task_clear_spec_ib_disable(task); + task_update_spec_tif(task); +@@ -1308,9 +1315,11 @@ static int ib_prctl_set(struct task_struct *task, unsigned long ctrl) + * Indirect branch speculation is always allowed when + * mitigation is force disabled. + */ +- if (spectre_v2_user == SPECTRE_V2_USER_NONE) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && ++ spectre_v2_user == SPECTRE_V2_USER_NONE) + return -EPERM; +- if (spectre_v2_user == SPECTRE_V2_USER_STRICT) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user == SPECTRE_V2_USER_STRICT) + return 0; + task_set_spec_ib_disable(task); + if (ctrl == PR_SPEC_FORCE_DISABLE) +@@ -1341,7 +1350,8 @@ void arch_seccomp_spec_mitigate(struct task_struct *task) + { + if (ssb_mode == SPEC_STORE_BYPASS_SECCOMP) + ssb_prctl_set(task, PR_SPEC_FORCE_DISABLE); +- if (spectre_v2_user == SPECTRE_V2_USER_SECCOMP) ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || ++ spectre_v2_user == SPECTRE_V2_USER_SECCOMP) + ib_prctl_set(task, PR_SPEC_FORCE_DISABLE); + } + #endif +@@ -1372,21 +1382,23 @@ static int ib_prctl_get(struct task_struct *task) + if (!boot_cpu_has_bug(X86_BUG_SPECTRE_V2)) + return PR_SPEC_NOT_AFFECTED; + +- switch (spectre_v2_user) { +- case SPECTRE_V2_USER_NONE: ++ if (spectre_v2_user_ibpb == SPECTRE_V2_USER_NONE && ++ spectre_v2_user == SPECTRE_V2_USER_NONE) + return PR_SPEC_ENABLE; +- case SPECTRE_V2_USER_PRCTL: +- case SPECTRE_V2_USER_SECCOMP: ++ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_STRICT || ++ spectre_v2_user == SPECTRE_V2_USER_STRICT) ++ return PR_SPEC_DISABLE; ++ else if (spectre_v2_user_ibpb == SPECTRE_V2_USER_PRCTL || ++ spectre_v2_user_ibpb == SPECTRE_V2_USER_SECCOMP || ++ spectre_v2_user == SPECTRE_V2_USER_PRCTL || ++ spectre_v2_user == SPECTRE_V2_USER_SECCOMP) { + if (task_spec_ib_force_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_FORCE_DISABLE; + if (task_spec_ib_disable(task)) + return PR_SPEC_PRCTL | PR_SPEC_DISABLE; + return PR_SPEC_PRCTL | PR_SPEC_ENABLE; +- case SPECTRE_V2_USER_STRICT: +- return PR_SPEC_DISABLE; +- default: ++ } else + return PR_SPEC_NOT_AFFECTED; +- } + } + + int arch_prctl_spec_ctrl_get(struct task_struct *task, unsigned long which) +@@ -1755,3 +1767,121 @@ ssize_t cpu_show_srbds(struct device *dev, struct device_attribute *attr, char * + return cpu_show_common(dev, attr, buf, X86_BUG_SRBDS); + } + #endif ++ ++static inline bool kpatch_cve_2020_10767_match_option(const char *arg, int arglen, const char *opt) ++{ ++ int len = strlen(opt); ++ ++ return len == arglen && !strncmp(arg, opt, len); ++} ++ ++static enum spectre_v2_mitigation_cmd kpatch_cve_2020_10767_spectre_v2_parse_cmdline(void) ++{ ++ enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO; ++ char arg[20]; ++ int ret, i; ++ ++ if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") || ++ cpu_mitigations_off()) ++ return SPECTRE_V2_CMD_NONE; ++ ++ ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg)); ++ if (ret < 0) ++ return SPECTRE_V2_CMD_AUTO; ++ ++ for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) { ++ if (!kpatch_cve_2020_10767_match_option(arg, ret, mitigation_options[i].option)) ++ continue; ++ cmd = mitigation_options[i].cmd; ++ break; ++ } ++ ++ if (i >= ARRAY_SIZE(mitigation_options)) { ++ return SPECTRE_V2_CMD_AUTO; ++ } ++ ++ if ((cmd == SPECTRE_V2_CMD_RETPOLINE || ++ cmd == SPECTRE_V2_CMD_RETPOLINE_AMD || ++ cmd == SPECTRE_V2_CMD_RETPOLINE_GENERIC) && ++ !IS_ENABLED(CONFIG_RETPOLINE)) { ++ return SPECTRE_V2_CMD_AUTO; ++ } ++ ++ if (cmd == SPECTRE_V2_CMD_RETPOLINE_AMD && ++ boot_cpu_data.x86_vendor != X86_VENDOR_AMD) { ++ return SPECTRE_V2_CMD_AUTO; ++ } ++ ++ return cmd; ++} ++ ++static enum spectre_v2_user_cmd ++kpatch_cve_2020_10767_spectre_v2_parse_user_cmdline(enum spectre_v2_mitigation_cmd v2_cmd) ++{ ++ char arg[20]; ++ int ret, i; ++ ++ switch (v2_cmd) { ++ case SPECTRE_V2_CMD_NONE: ++ return SPECTRE_V2_USER_CMD_NONE; ++ case SPECTRE_V2_CMD_FORCE: ++ return SPECTRE_V2_USER_CMD_FORCE; ++ default: ++ break; ++ } ++ ++ ret = cmdline_find_option(boot_command_line, "spectre_v2_user", ++ arg, sizeof(arg)); ++ if (ret < 0) ++ return SPECTRE_V2_USER_CMD_AUTO; ++ ++ for (i = 0; i < ARRAY_SIZE(v2_user_options); i++) { ++ if (kpatch_cve_2020_10767_match_option(arg, ret, v2_user_options[i].option)) { ++ return v2_user_options[i].cmd; ++ } ++ } ++ ++ return SPECTRE_V2_USER_CMD_AUTO; ++} ++ ++static void kpatch_cve_2020_10767_spectre_v2_user_select_mitigation(void) ++{ ++ enum spectre_v2_mitigation_cmd v2_cmd = kpatch_cve_2020_10767_spectre_v2_parse_cmdline(); ++ enum spectre_v2_user_mitigation mode = SPECTRE_V2_USER_NONE; ++ enum spectre_v2_user_cmd cmd; ++ ++ if (!boot_cpu_has(X86_FEATURE_IBPB)) ++ return; ++ ++ cmd = kpatch_cve_2020_10767_spectre_v2_parse_user_cmdline(v2_cmd); ++ switch (cmd) { ++ case SPECTRE_V2_USER_CMD_NONE: ++ return; ++ case SPECTRE_V2_USER_CMD_FORCE: ++ mode = SPECTRE_V2_USER_STRICT; ++ break; ++ case SPECTRE_V2_USER_CMD_PRCTL: ++ case SPECTRE_V2_USER_CMD_PRCTL_IBPB: ++ mode = SPECTRE_V2_USER_PRCTL; ++ break; ++ case SPECTRE_V2_USER_CMD_AUTO: ++ case SPECTRE_V2_USER_CMD_SECCOMP: ++ case SPECTRE_V2_USER_CMD_SECCOMP_IBPB: ++ if (IS_ENABLED(CONFIG_SECCOMP)) ++ mode = SPECTRE_V2_USER_SECCOMP; ++ else ++ mode = SPECTRE_V2_USER_PRCTL; ++ break; ++ } ++ ++ spectre_v2_user_ibpb = mode; ++} ++ ++#include "kpatch-macros.h" ++ ++static int kpatch_cve_2020_10767_pre_patch_callback(struct klp_object *obj) ++{ ++ kpatch_cve_2020_10767_spectre_v2_user_select_mitigation(); ++ return 0; ++} ++KPATCH_PRE_PATCH_CALLBACK(kpatch_cve_2020_10767_pre_patch_callback); +-- +2.21.3 + diff --git a/SOURCES/x86-speculation-Prevent-rogue-cross-process-SSBD-shu.patch b/SOURCES/x86-speculation-Prevent-rogue-cross-process-SSBD-shu.patch new file mode 100644 index 0000000..c4adb7f --- /dev/null +++ b/SOURCES/x86-speculation-Prevent-rogue-cross-process-SSBD-shu.patch @@ -0,0 +1,131 @@ +From 9479630a09a7484c5b35a5c5eb89e6971d528d9e Mon Sep 17 00:00:00 2001 +From: Julien Thierry +Date: Fri, 19 Jun 2020 08:13:25 +0100 +Subject: [PATCH] x86/speculation: Prevent rogue cross-process SSBD shutdown + +Kernels: +4.18.0-193.el8 +4.18.0-193.1.2.el8_2 +4.18.0-193.6.3.el8_2 + +Changes since last build: +arches: x86_64 +process.o: changed function: __switch_to_xtra +process.o: changed function: speculation_ctrl_update +--------------------------- + +Modifications: +switch_to_cond_stibp jump label fix + +commit e771c60266443edd15f3ebd0866a1ef6f070ebdc +Author: Waiman Long +Date: Thu Jun 18 13:40:39 2020 +0100 + + x86/speculation: Prevent rogue cross-process SSBD shutdown + + Bugzilla: https://bugzilla.redhat.com/show_bug.cgi?id=1847357 + CVE: CVE-2020-10766 + + commit dbbe2ad02e9df26e372f38cc3e70dab9222c832e + Author: Anthony Steinhauser + Date: Sun, 5 Jan 2020 12:19:43 -0800 + + x86/speculation: Prevent rogue cross-process SSBD shutdown + + On context switch the change of TIF_SSBD and TIF_SPEC_IB are evaluated + to adjust the mitigations accordingly. This is optimized to avoid the + expensive MSR write if not needed. + + This optimization is buggy and allows an attacker to shutdown the SSBD + protection of a victim process. + + The update logic reads the cached base value for the speculation control + MSR which has neither the SSBD nor the STIBP bit set. It then OR's the + SSBD bit only when TIF_SSBD is different and requests the MSR update. + + That means if TIF_SSBD of the previous and next task are the same, then + the base value is not updated, even if TIF_SSBD is set. The MSR write is + not requested. + + Subsequently if the TIF_STIBP bit differs then the STIBP bit is updated + in the base value and the MSR is written with a wrong SSBD value. + + This was introduced when the per task/process conditional STIPB + switching was added on top of the existing SSBD switching. + + It is exploitable if the attacker creates a process which enforces SSBD + and has the contrary value of STIBP than the victim process (i.e. if the + victim process enforces STIBP, the attacker process must not enforce it; + if the victim process does not enforce STIBP, the attacker process must + enforce it) and schedule it on the same core as the victim process. If + the victim runs after the attacker the victim becomes vulnerable to + Spectre V4. + + To fix this, update the MSR value independent of the TIF_SSBD difference + and dependent on the SSBD mitigation method available. This ensures that + a subsequent STIPB initiated MSR write has the correct state of SSBD. + + [ tglx: Handle X86_FEATURE_VIRT_SSBD & X86_FEATURE_VIRT_SSBD correctly + and massaged changelog ] + + Fixes: 5bfbe3ad5840 ("x86/speculation: Prepare for per task indirect branch speculation control") + Signed-off-by: Anthony Steinhauser + Signed-off-by: Thomas Gleixner + Cc: stable@vger.kernel.org + + Signed-off-by: Waiman Long + +Signed-off-by: Julien Thierry +--- + arch/x86/kernel/process.c | 30 +++++++++++------------------- + 1 file changed, 11 insertions(+), 19 deletions(-) + +diff --git a/arch/x86/kernel/process.c b/arch/x86/kernel/process.c +index e5c5b1d724ab..9410134a38a8 100644 +--- a/arch/x86/kernel/process.c ++++ b/arch/x86/kernel/process.c +@@ -431,30 +431,22 @@ static __always_inline void __speculation_ctrl_update(unsigned long tifp, + + lockdep_assert_irqs_disabled(); + +- /* +- * If TIF_SSBD is different, select the proper mitigation +- * method. Note that if SSBD mitigation is disabled or permanentely +- * enabled this branch can't be taken because nothing can set +- * TIF_SSBD. +- */ +- if (tif_diff & _TIF_SSBD) { +- if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { ++ /* Handle change of TIF_SSBD depending on the mitigation method. */ ++ if (static_cpu_has(X86_FEATURE_VIRT_SSBD)) { ++ if (tif_diff & _TIF_SSBD) + amd_set_ssb_virt_state(tifn); +- } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { ++ } else if (static_cpu_has(X86_FEATURE_LS_CFG_SSBD)) { ++ if (tif_diff & _TIF_SSBD) + amd_set_core_ssb_state(tifn); +- } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || +- static_cpu_has(X86_FEATURE_AMD_SSBD)) { +- msr |= ssbd_tif_to_spec_ctrl(tifn); +- updmsr = true; +- } ++ } else if (static_cpu_has(X86_FEATURE_SPEC_CTRL_SSBD) || ++ static_cpu_has(X86_FEATURE_AMD_SSBD)) { ++ updmsr |= !!(tif_diff & _TIF_SSBD); ++ msr |= ssbd_tif_to_spec_ctrl(tifn); + } + +- /* +- * Only evaluate TIF_SPEC_IB if conditional STIBP is enabled, +- * otherwise avoid the MSR write. +- */ ++ /* Only evaluate TIF_SPEC_IB if conditional STIBP is enabled. */ + if (IS_ENABLED(CONFIG_SMP) && +- static_branch_unlikely(&switch_to_cond_stibp)) { ++ static_key_enabled(&switch_to_cond_stibp)) { + updmsr |= !!(tif_diff & _TIF_SPEC_IB); + msr |= stibp_tif_to_spec_ctrl(tifn); + } +-- +2.21.3 + diff --git a/SPECS/kpatch-patch.spec b/SPECS/kpatch-patch.spec index 8d2e197..d62dba1 100644 --- a/SPECS/kpatch-patch.spec +++ b/SPECS/kpatch-patch.spec @@ -6,7 +6,7 @@ %define kernel_ver 4.18.0-193.el8 %define kpatch_ver 0.9.1 %define rpm_ver 1 -%define rpm_rel 3 +%define rpm_rel 5 %if !%{empty_package} # Patch sources below. DO NOT REMOVE THIS LINE. @@ -16,6 +16,13 @@ Source100: v2-netlabel-cope-with-NULL-catmap.patch # # https://bugzilla.redhat.com/1835531 Source101: block-block-bfq-fix-use-after-free-in-bfq_idle_slice.patch +# +# https://bugzilla.redhat.com/1847371 +Source102: x86-speculation-Prevent-rogue-cross-process-SSBD-shu.patch +# +# https://bugzilla.redhat.com/1847404 +# https://bugzilla.redhat.com/1847402 +Source103: kpatch-adapted-patch-for-CVE-2020-10767-and-CVE-2020-10768.patch # End of patch sources. DO NOT REMOVE THIS LINE. %endif @@ -35,7 +42,7 @@ Summary: Live kernel patching module for kernel-%{kernel_ver_arch} Group: System Environment/Kernel License: GPLv2 -ExclusiveArch: x86_64 ppc64le +ExclusiveArch: x86_64 Conflicts: %{name} < %{version}-%{release} @@ -71,7 +78,7 @@ Source: https://github.com/dynup/kpatch/archive/v%{kpatch_ver}.tar.gz Source10: kernel-%{kernel_ver}.src.rpm # kpatch-build patches -# Patch1: ZZZ.patch +Patch1: create-diff-object-ignore-altrinstr-aux-backport.patch %global _dupsign_opts --keyname=rhelkpatch1 @@ -155,6 +162,12 @@ It is only a method to subscribe to the kpatch stream for kernel-%{kernel_ver}. %endif %changelog +* Fri Jun 26 2020 Artem Savkov [1-5.el8] +- Indirect Branch Prediction Barrier is force-disabled when STIBP is unavailable or enhanced IBRS is available. Indirect branch speculation can be enabled after it was force-disabled by the PR_SPEC_FORCE_DISABLE prctl command. [1847404] [1847402] {CVE-2020-10767} {CVE-2020-10768} + +* Thu Jun 25 2020 Julien Thierry [1-4.el8] +- Prevent rogue cross-process SSBD shutdown [1847371] {CVE-2020-10766} + * Mon May 25 2020 Artem Savkov [1-3.el8] - use-after-free in block/bfq-iosched.c related to bfq_idle_slice_timer_body [1835531] {CVE-2020-12657}