Merge tag 'x86_urgent_for_v6.17_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
Pull x86 fixes from Borislav Petkov:
- Convert the SSB mitigation to the attack vector controls which got
forgotten at the time
- Prevent the CPUID topology hierarchy detection on AMD from
overwriting the correct initial APIC ID
- Fix the case of a machine shipping without microcode in the BIOS, in
the AMD microcode loader
- Correct the Pentium 4 model range which has a constant TSC
* tag 'x86_urgent_for_v6.17_rc4' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
x86/bugs: Add attack vector controls for SSB
x86/cpu/topology: Use initial APIC ID from XTOPOLOGY leaf on AMD/HYGON
x86/microcode/AMD: Handle the case of no BIOS microcode
x86/cpu/intel: Fix the constant_tsc model check for Pentium 4
diff --git a/Documentation/admin-guide/hw-vuln/attack_vector_controls.rst b/Documentation/admin-guide/hw-vuln/attack_vector_controls.rst
index 6dd0800..5964901 100644
--- a/Documentation/admin-guide/hw-vuln/attack_vector_controls.rst
+++ b/Documentation/admin-guide/hw-vuln/attack_vector_controls.rst
@@ -215,7 +215,7 @@
Spectre_v2_user X X * (Note 1)
SRBDS X X X X
SRSO X X X X
-SSB (Note 4)
+SSB X
TAA X X X X * (Note 2)
TSA X X X X
=============== ============== ============ ============= ============== ============ ========
@@ -229,9 +229,6 @@
3 -- Disables SMT if cross-thread mitigations are fully enabled, the CPU is
vulnerable, and STIBP is not supported
- 4 -- Speculative store bypass is always enabled by default (no kernel
- mitigation applied) unless overridden with spec_store_bypass_disable option
-
When an attack-vector is disabled, all mitigations for the vulnerabilities
listed in the above table are disabled, unless mitigation is required for a
different enabled attack-vector or a mitigation is explicitly selected via a
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 49ef1b8..af838b8 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -416,6 +416,10 @@ static bool __init should_mitigate_vuln(unsigned int bug)
cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER) ||
cpu_attack_vector_mitigated(CPU_MITIGATE_GUEST_GUEST) ||
(smt_mitigations != SMT_MITIGATIONS_OFF);
+
+ case X86_BUG_SPEC_STORE_BYPASS:
+ return cpu_attack_vector_mitigated(CPU_MITIGATE_USER_USER);
+
default:
WARN(1, "Unknown bug %x\n", bug);
return false;
@@ -2710,6 +2714,11 @@ static void __init ssb_select_mitigation(void)
ssb_mode = SPEC_STORE_BYPASS_DISABLE;
break;
case SPEC_STORE_BYPASS_CMD_AUTO:
+ if (should_mitigate_vuln(X86_BUG_SPEC_STORE_BYPASS))
+ ssb_mode = SPEC_STORE_BYPASS_PRCTL;
+ else
+ ssb_mode = SPEC_STORE_BYPASS_NONE;
+ break;
case SPEC_STORE_BYPASS_CMD_PRCTL:
ssb_mode = SPEC_STORE_BYPASS_PRCTL;
break;
diff --git a/arch/x86/kernel/cpu/intel.c b/arch/x86/kernel/cpu/intel.c
index 076eaa4..98ae4c3 100644
--- a/arch/x86/kernel/cpu/intel.c
+++ b/arch/x86/kernel/cpu/intel.c
@@ -262,7 +262,7 @@ static void early_init_intel(struct cpuinfo_x86 *c)
if (c->x86_power & (1 << 8)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
set_cpu_cap(c, X86_FEATURE_NONSTOP_TSC);
- } else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_WILLAMETTE) ||
+ } else if ((c->x86_vfm >= INTEL_P4_PRESCOTT && c->x86_vfm <= INTEL_P4_CEDARMILL) ||
(c->x86_vfm >= INTEL_CORE_YONAH && c->x86_vfm <= INTEL_IVYBRIDGE)) {
set_cpu_cap(c, X86_FEATURE_CONSTANT_TSC);
}
diff --git a/arch/x86/kernel/cpu/microcode/amd.c b/arch/x86/kernel/cpu/microcode/amd.c
index 097e393..514f633 100644
--- a/arch/x86/kernel/cpu/microcode/amd.c
+++ b/arch/x86/kernel/cpu/microcode/amd.c
@@ -171,8 +171,28 @@ static int cmp_id(const void *key, const void *elem)
return 1;
}
+static u32 cpuid_to_ucode_rev(unsigned int val)
+{
+ union zen_patch_rev p = {};
+ union cpuid_1_eax c;
+
+ c.full = val;
+
+ p.stepping = c.stepping;
+ p.model = c.model;
+ p.ext_model = c.ext_model;
+ p.ext_fam = c.ext_fam;
+
+ return p.ucode_rev;
+}
+
static bool need_sha_check(u32 cur_rev)
{
+ if (!cur_rev) {
+ cur_rev = cpuid_to_ucode_rev(bsp_cpuid_1_eax);
+ pr_info_once("No current revision, generating the lowest one: 0x%x\n", cur_rev);
+ }
+
switch (cur_rev >> 8) {
case 0x80012: return cur_rev <= 0x800126f; break;
case 0x80082: return cur_rev <= 0x800820f; break;
@@ -749,8 +769,6 @@ static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equi
n.equiv_cpu = equiv_cpu;
n.patch_id = uci->cpu_sig.rev;
- WARN_ON_ONCE(!n.patch_id);
-
list_for_each_entry(p, µcode_cache, plist)
if (patch_cpus_equivalent(p, &n, false))
return p;
diff --git a/arch/x86/kernel/cpu/topology_amd.c b/arch/x86/kernel/cpu/topology_amd.c
index 843b165..827dd0d 100644
--- a/arch/x86/kernel/cpu/topology_amd.c
+++ b/arch/x86/kernel/cpu/topology_amd.c
@@ -81,20 +81,25 @@ static bool parse_8000_001e(struct topo_scan *tscan, bool has_topoext)
cpuid_leaf(0x8000001e, &leaf);
- tscan->c->topo.initial_apicid = leaf.ext_apic_id;
-
/*
- * If leaf 0xb is available, then the domain shifts are set
- * already and nothing to do here. Only valid for family >= 0x17.
+ * If leaf 0xb/0x26 is available, then the APIC ID and the domain
+ * shifts are set already.
*/
- if (!has_topoext && tscan->c->x86 >= 0x17) {
- /*
- * Leaf 0x80000008 set the CORE domain shift already.
- * Update the SMT domain, but do not propagate it.
- */
- unsigned int nthreads = leaf.core_nthreads + 1;
+ if (!has_topoext) {
+ tscan->c->topo.initial_apicid = leaf.ext_apic_id;
- topology_update_dom(tscan, TOPO_SMT_DOMAIN, get_count_order(nthreads), nthreads);
+ /*
+ * Leaf 0x8000008 sets the CORE domain shift but not the
+ * SMT domain shift. On CPUs with family >= 0x17, there
+ * might be hyperthreads.
+ */
+ if (tscan->c->x86 >= 0x17) {
+ /* Update the SMT domain, but do not propagate it. */
+ unsigned int nthreads = leaf.core_nthreads + 1;
+
+ topology_update_dom(tscan, TOPO_SMT_DOMAIN,
+ get_count_order(nthreads), nthreads);
+ }
}
store_node(tscan, leaf.nnodes_per_socket + 1, leaf.node_id);