Commit 0c47d892 authored by Henrik Riomar's avatar Henrik Riomar Committed by Leonardo Arena

main/xen: XSA-297

CVE-2018-12126
CVE-2018-12127
CVE-2018-12130
CVE-2019-11091
Signed-off-by: default avatarLeonardo Arena <rnalrd@alpinelinux.org>
parent 9f65692a
......@@ -3,7 +3,7 @@
# Maintainer: William Pitcock <nenolod@dereferenced.org>
pkgname=xen
pkgver=4.9.4
pkgrel=0
pkgrel=1
pkgdesc="Xen hypervisor"
url="http://www.xen.org/"
arch="x86_64 armhf aarch64"
......@@ -137,6 +137,11 @@ options="!strip"
# - CVE-2018-19965 XSA-279
# - CVE-2018-19966 XSA-280
# - CVE-2018-19967 XSA-282
# 4.9.4-r1:
# - CVE-2018-12126 XSA-297
# - CVE-2018-12127 XSA-297
# - CVE-2018-12130 XSA-297
# - CVE-2019-11091 XSA-297
case "$CARCH" in
x86*)
......@@ -201,6 +206,15 @@ source="https://downloads.xenproject.org/release/$pkgname/$pkgver/$pkgname-$pkgv
xenqemu-xattr-size-max.patch
xsa297-4.9-1.patch
xsa297-4.9-2.patch
xsa297-4.9-3.patch
xsa297-4.9-4.patch
xsa297-4.9-5.patch
xsa297-4.9-6.patch
xsa297-4.9-7.patch
xsa297-4.9-8.patch
xenstored.initd
xenstored.confd
xenconsoled.initd
......@@ -453,6 +467,14 @@ a3197d9c2455983554610031702ea95dc31f1b375b8c1291207d33c9e6114c6928417b4c8138cb53
e76816c6ad0e91dc5f81947f266da3429b20e6d976c3e8c41202c6179532eec878a3f0913921ef3ac853c5dbad8082da3c9cd53b65081910516feb492577b7fc xen-fd-is-file.c
69dfa60628ca838678862383528654ecbdf4269cbb5c9cfb6b84d976202a8dea85d711aa65a52fa1b477fb0b30604ca70cf1337192d6fb9388a08bbe7fe56077 xenstore_client_transaction_fix.patch
2094ea964fa610b2bf72fd2c7ede7e954899a75c0f5b08030cf1d74460fb759ade84866176e32f8fe29c921dfdc6dafd2b31e23ab9b0a3874d3dceeabdd1913b xenqemu-xattr-size-max.patch
182a835b58332742cfa2cf59c1c4a2286daa44d64a28e88fd434137ef4414d5a07968b24f49ab710e6424ad73f50e75626de2e8b5e9b9f22976a5b78dfd92d39 xsa297-4.9-1.patch
11395236b949e0157a52cfc7336acbe95d4f383244d4e10662dedc9d710ffa5812dd972b98e09de2e3f0e96bf33a7124e0cb4d4adfccc7146560d143335d6cfb xsa297-4.9-2.patch
5b1d983b1bc602481da6df48ff547c2d0cb44f2b1e13d2f7d1af5e721d6557c46e9cc1463c3ed7755c33fc74c9329972e10f7a5d4af43acecb1c8d8ec95e8c9d xsa297-4.9-3.patch
92e403bccb9bb43a46029469d60615308e66585ae3f9f55f86cc16d23cec2f94b188c56a7d8aa8039f7610199bce8b9d83928737a67f6cf0a7f1ab9ee64e11be xsa297-4.9-4.patch
de773e10d1c9fa223f89c030a2095cf476852c5339329a972780908d916a25c3687e0ac6eaf491f14d93679589c9c7d4ba287855d27fef2a82cca5313e0a9597 xsa297-4.9-5.patch
f785ec915c3c62400dcaa6818fd12686f1adc3575a8c5646b716bd763b239ffa08359af7d7a05575d33b3c42d0a295556f6dfe9f6ca1a43319f17cedf5f81c60 xsa297-4.9-6.patch
d149809f16f5959c496cd8b027a7bc9774e1a90ea66ce0ffc93cd23aec671b8e0c50ff7d37776b9f3f1d4b93c6216b31907413e354018298441233acf0ba4e8f xsa297-4.9-7.patch
b329d9c8b2c0e8a5fbdcf2b9952b02c7958e2c9fceb4dd2a9636bb3355f3209c6f1b7adcb20a1dd7c86dc7ca74558641dda8891e7458251ca2ff2634dae76096 xsa297-4.9-8.patch
52c43beb2596d645934d0f909f2d21f7587b6898ed5e5e7046799a8ed6d58f7a09c5809e1634fa26152f3fd4f3e7cfa07da7076f01b4a20cc8f5df8b9cb77e50 xenstored.initd
093f7fbd43faf0a16a226486a0776bade5dc1681d281c5946a3191c32d74f9699c6bf5d0ab8de9d1195a2461165d1660788e92a3156c9b3c7054d7b2d52d7ff0 xenstored.confd
3c86ed48fbee0af4051c65c4a3893f131fa66e47bf083caf20c9b6aa4b63fdead8832f84a58d0e27964bc49ec8397251b34e5be5c212c139f556916dc8da9523 xenconsoled.initd
......
From: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: x86/tsx: Implement controls for RTM force-abort mode
The CPUID bit and MSR are deliberately not exposed to guests, because they
won't exist on newer processors. As vPMU isn't security supported, the
misbehaviour of PCR3 isn't expected to impact production deployments.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
master commit: 6be613f29b4205349275d24367bd4c82fb2960dd
master date: 2019-03-12 17:05:21 +0000
diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown
index 6009450..10ed971 100644
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
@@ -1885,7 +1885,7 @@ Use Virtual Processor ID support if available. This prevents the need for TLB
flushes on VM entry and exit, increasing performance.
### vpmu
-> `= ( <boolean> | { bts | ipc | arch [, ...] } )`
+> `= ( <boolean> | { bts | ipc | arch | rtm-abort=<bool> [, ...] } )`
> Default: `off`
@@ -1911,6 +1911,21 @@ in the Pre-Defined Architectural Performance Events table from the Intel 64
and IA-32 Architectures Software Developer's Manual, Volume 3B, System
Programming Guide, Part 2.
+vpmu=rtm-abort controls a trade-off between working Restricted Transactional
+Memory, and working performance counters.
+
+All processors released to date (Q1 2019) supporting Transactional Memory
+Extensions suffer an erratum which has been addressed in microcode.
+
+Processors based on the Skylake microarchitecture with up-to-date
+microcode internally use performance counter 3 to work around the erratum.
+A consequence is that the counter gets reprogrammed whenever an `XBEGIN`
+instruction is executed.
+
+An alternative mode exists where PCR3 behaves as before, at the cost of
+`XBEGIN` unconditionally aborting. Enabling `rtm-abort` mode will
+activate this alternative mode.
+
If a boolean is not used, combinations of flags are allowed, comma separated.
For example, vpmu=arch,bts.
diff --git a/tools/misc/xen-cpuid.c b/tools/misc/xen-cpuid.c
index d6e60be..702c072 100644
--- a/tools/misc/xen-cpuid.c
+++ b/tools/misc/xen-cpuid.c
@@ -157,7 +157,11 @@ static const char *str_7d0[32] =
[ 2] = "avx512_4vnniw", [ 3] = "avx512_4fmaps",
- [4 ... 25] = "REZ",
+ [4 ... 11] = "REZ",
+
+ [12] = "REZ", [13] = "tsx-force-abort",
+
+ [14 ... 25] = "REZ",
[26] = "ibrsb", [27] = "stibp",
[28] = "l1d_flush", [29] = "arch_caps",
diff --git a/xen/arch/x86/cpu/intel.c b/xen/arch/x86/cpu/intel.c
index a7c0d49..449273c 100644
--- a/xen/arch/x86/cpu/intel.c
+++ b/xen/arch/x86/cpu/intel.c
@@ -356,6 +356,9 @@ static void Intel_errata_workarounds(struct cpuinfo_x86 *c)
if (c->x86 == 6 && cpu_has_clflush &&
(c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
__set_bit(X86_FEATURE_CLFLUSH_MONITOR, c->x86_capability);
+
+ if (cpu_has_tsx_force_abort && opt_rtm_abort)
+ wrmsrl(MSR_TSX_FORCE_ABORT, TSX_FORCE_ABORT_RTM);
}
diff --git a/xen/arch/x86/cpu/vpmu.c b/xen/arch/x86/cpu/vpmu.c
index 40da7e3..8b7a7a9 100644
--- a/xen/arch/x86/cpu/vpmu.c
+++ b/xen/arch/x86/cpu/vpmu.c
@@ -53,6 +53,7 @@ CHECK_pmu_params;
static unsigned int __read_mostly opt_vpmu_enabled;
unsigned int __read_mostly vpmu_mode = XENPMU_MODE_OFF;
unsigned int __read_mostly vpmu_features = 0;
+bool __read_mostly opt_rtm_abort;
static void parse_vpmu_params(char *s);
custom_param("vpmu", parse_vpmu_params);
@@ -63,6 +64,8 @@ static DEFINE_PER_CPU(struct vcpu *, last_vcpu);
static int parse_vpmu_param(char *s, unsigned int len)
{
+ int val;
+
if ( !*s || !len )
return 0;
if ( !strncmp(s, "bts", len) )
@@ -71,6 +74,8 @@ static int parse_vpmu_param(char *s, unsigned int len)
vpmu_features |= XENPMU_FEATURE_IPC_ONLY;
else if ( !strncmp(s, "arch", len) )
vpmu_features |= XENPMU_FEATURE_ARCH_ONLY;
+ else if ( (val = parse_boolean("rtm-abort", s, s + len)) >= 0 )
+ opt_rtm_abort = val;
else
return 1;
return 0;
@@ -97,6 +102,10 @@ static void __init parse_vpmu_params(char *s)
break;
p = sep + 1;
}
+
+ if ( !vpmu_features ) /* rtm-abort doesn't imply vpmu=1 */
+ break;
+
/* fall through */
case 1:
/* Default VPMU mode */
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 0cbb0f5..346f1cf 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3425,6 +3425,8 @@ int hvm_msr_read_intercept(unsigned int msr, uint64_t *msr_content)
case MSR_PRED_CMD:
case MSR_FLUSH_CMD:
/* Write-only */
+ case MSR_TSX_FORCE_ABORT:
+ /* Not offered to guests. */
goto gp_fault;
case MSR_SPEC_CTRL:
@@ -3647,6 +3649,8 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
case MSR_ARCH_CAPABILITIES:
/* Read-only */
+ case MSR_TSX_FORCE_ABORT:
+ /* Not offered to guests. */
goto gp_fault;
case MSR_AMD64_NB_CFG:
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 2f9f75f..8635f70 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2640,6 +2640,8 @@ static int priv_op_read_msr(unsigned int reg, uint64_t *val,
case MSR_PRED_CMD:
case MSR_FLUSH_CMD:
/* Write-only */
+ case MSR_TSX_FORCE_ABORT:
+ /* Not offered to guests. */
break;
case MSR_SPEC_CTRL:
@@ -2861,6 +2863,8 @@ static int priv_op_write_msr(unsigned int reg, uint64_t val,
case MSR_INTEL_PLATFORM_INFO:
case MSR_ARCH_CAPABILITIES:
/* The MSR is read-only. */
+ case MSR_TSX_FORCE_ABORT:
+ /* Not offered to guests. */
break;
case MSR_SPEC_CTRL:
diff --git a/xen/include/asm-x86/cpufeature.h b/xen/include/asm-x86/cpufeature.h
index 5043231..b10d8ef 100644
--- a/xen/include/asm-x86/cpufeature.h
+++ b/xen/include/asm-x86/cpufeature.h
@@ -100,6 +100,9 @@
/* CPUID level 0x80000007.edx */
#define cpu_has_itsc boot_cpu_has(X86_FEATURE_ITSC)
+/* CPUID level 0x00000007:0.edx */
+#define cpu_has_tsx_force_abort boot_cpu_has(X86_FEATURE_TSX_FORCE_ABORT)
+
/* Synthesized. */
#define cpu_has_arch_perfmon boot_cpu_has(X86_FEATURE_ARCH_PERFMON)
#define cpu_has_cpuid_faulting boot_cpu_has(X86_FEATURE_CPUID_FAULTING)
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 17722d2..8dec40e 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -53,6 +53,9 @@
#define MSR_FLUSH_CMD 0x0000010b
#define FLUSH_CMD_L1D (_AC(1, ULL) << 0)
+#define MSR_TSX_FORCE_ABORT 0x0000010f
+#define TSX_FORCE_ABORT_RTM (_AC(1, ULL) << 0)
+
/* Intel MSRs. Some also available on other CPUs */
#define MSR_IA32_PERFCTR0 0x000000c1
#define MSR_IA32_A_PERFCTR0 0x000004c1
diff --git a/xen/include/asm-x86/vpmu.h b/xen/include/asm-x86/vpmu.h
index 5e778ab..1287b9f 100644
--- a/xen/include/asm-x86/vpmu.h
+++ b/xen/include/asm-x86/vpmu.h
@@ -125,6 +125,7 @@ static inline int vpmu_do_rdmsr(unsigned int msr, uint64_t *msr_content)
extern unsigned int vpmu_mode;
extern unsigned int vpmu_features;
+extern bool opt_rtm_abort;
/* Context switch */
static inline void vpmu_switch_from(struct vcpu *prev)
diff --git a/xen/include/public/arch-x86/cpufeatureset.h b/xen/include/public/arch-x86/cpufeatureset.h
index e1a2c4e..33b515e 100644
--- a/xen/include/public/arch-x86/cpufeatureset.h
+++ b/xen/include/public/arch-x86/cpufeatureset.h
@@ -241,6 +241,7 @@ XEN_CPUFEATURE(IBPB, 8*32+12) /*A IBPB support only (no IBRS, used by
/* Intel-defined CPU features, CPUID level 0x00000007:0.edx, word 9 */
XEN_CPUFEATURE(AVX512_4VNNIW, 9*32+ 2) /*A AVX512 Neural Network Instructions */
XEN_CPUFEATURE(AVX512_4FMAPS, 9*32+ 3) /*A AVX512 Multiply Accumulation Single Precision */
+XEN_CPUFEATURE(TSX_FORCE_ABORT, 9*32+13) /* MSR_TSX_FORCE_ABORT.RTM_ABORT */
XEN_CPUFEATURE(IBRSB, 9*32+26) /*A IBRS and IBPB support (used by Intel) */
XEN_CPUFEATURE(STIBP, 9*32+27) /*A STIBP */
XEN_CPUFEATURE(L1D_FLUSH, 9*32+28) /*S MSR_FLUSH_CMD and L1D flush. */
From: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: x86/spec-ctrl: Reposition the XPTI command line parsing logic
It has ended up in the middle of the mitigation calculation logic. Move it to
be beside the other command line parsing.
No functional change.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index 25da6a2..9665ec5 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -206,6 +206,73 @@ static int __init parse_spec_ctrl(char *s)
}
custom_param("spec-ctrl", parse_spec_ctrl);
+int8_t __read_mostly opt_xpti_hwdom = -1;
+int8_t __read_mostly opt_xpti_domu = -1;
+
+static __init void xpti_init_default(uint64_t caps)
+{
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ caps = ARCH_CAPABILITIES_RDCL_NO;
+
+ if ( caps & ARCH_CAPABILITIES_RDCL_NO )
+ {
+ if ( opt_xpti_hwdom < 0 )
+ opt_xpti_hwdom = 0;
+ if ( opt_xpti_domu < 0 )
+ opt_xpti_domu = 0;
+ }
+ else
+ {
+ if ( opt_xpti_hwdom < 0 )
+ opt_xpti_hwdom = 1;
+ if ( opt_xpti_domu < 0 )
+ opt_xpti_domu = 1;
+ }
+}
+
+static __init int parse_xpti(char *s)
+{
+ char *ss;
+ int val, rc = 0;
+
+ /* Interpret 'xpti' alone in its positive boolean form. */
+ if ( *s == '\0' )
+ opt_xpti_hwdom = opt_xpti_domu = 1;
+
+ do {
+ ss = strchr(s, ',');
+ if ( ss )
+ *ss = '\0';
+
+ switch ( parse_bool(s) )
+ {
+ case 0:
+ opt_xpti_hwdom = opt_xpti_domu = 0;
+ break;
+
+ case 1:
+ opt_xpti_hwdom = opt_xpti_domu = 1;
+ break;
+
+ default:
+ if ( !strcmp(s, "default") )
+ opt_xpti_hwdom = opt_xpti_domu = -1;
+ else if ( (val = parse_boolean("dom0", s, ss)) >= 0 )
+ opt_xpti_hwdom = val;
+ else if ( (val = parse_boolean("domu", s, ss)) >= 0 )
+ opt_xpti_domu = val;
+ else if ( *s )
+ rc = -EINVAL;
+ break;
+ }
+
+ s = ss + 1;
+ } while ( ss );
+
+ return rc;
+}
+custom_param("xpti", parse_xpti);
+
int8_t __read_mostly opt_pv_l1tf_hwdom = -1;
int8_t __read_mostly opt_pv_l1tf_domu = -1;
@@ -639,73 +706,6 @@ static __init void l1tf_calculations(uint64_t caps)
: (3ul << (paddr_bits - 2))));
}
-int8_t __read_mostly opt_xpti_hwdom = -1;
-int8_t __read_mostly opt_xpti_domu = -1;
-
-static __init void xpti_init_default(uint64_t caps)
-{
- if ( boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
- caps = ARCH_CAPABILITIES_RDCL_NO;
-
- if ( caps & ARCH_CAPABILITIES_RDCL_NO )
- {
- if ( opt_xpti_hwdom < 0 )
- opt_xpti_hwdom = 0;
- if ( opt_xpti_domu < 0 )
- opt_xpti_domu = 0;
- }
- else
- {
- if ( opt_xpti_hwdom < 0 )
- opt_xpti_hwdom = 1;
- if ( opt_xpti_domu < 0 )
- opt_xpti_domu = 1;
- }
-}
-
-static __init int parse_xpti(char *s)
-{
- char *ss;
- int val, rc = 0;
-
- /* Interpret 'xpti' alone in its positive boolean form. */
- if ( *s == '\0' )
- opt_xpti_hwdom = opt_xpti_domu = 1;
-
- do {
- ss = strchr(s, ',');
- if ( ss )
- *ss = '\0';
-
- switch ( parse_bool(s) )
- {
- case 0:
- opt_xpti_hwdom = opt_xpti_domu = 0;
- break;
-
- case 1:
- opt_xpti_hwdom = opt_xpti_domu = 1;
- break;
-
- default:
- if ( !strcmp(s, "default") )
- opt_xpti_hwdom = opt_xpti_domu = -1;
- else if ( (val = parse_boolean("dom0", s, ss)) >= 0 )
- opt_xpti_hwdom = val;
- else if ( (val = parse_boolean("domu", s, ss)) >= 0 )
- opt_xpti_domu = val;
- else if ( *s )
- rc = -EINVAL;
- break;
- }
-
- s = ss + 1;
- } while ( ss );
-
- return rc;
-}
-custom_param("xpti", parse_xpti);
-
void __init init_speculation_mitigations(void)
{
enum ind_thunk thunk = THUNK_DEFAULT;
From: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: x86/msr: Definitions for MSR_INTEL_CORE_THREAD_COUNT
This is a model specific register which details the current configuration
cores and threads in the package. Because of how Hyperthread and Core
configuration works works in firmware, the MSR it is de-facto constant and
will remain unchanged until the next system reset.
It is a read only MSR (so unilaterally reject writes), but for now retain its
leaky-on-read properties. Further CPUID/MSR work is required before we can
start virtualising a consistent topology to the guest, and retaining the old
behaviour is the safest course of action.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 346f1cf..0164ae5 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -3647,6 +3647,7 @@ int hvm_msr_write_intercept(unsigned int msr, uint64_t msr_content,
wrmsrl(MSR_FLUSH_CMD, msr_content);
break;
+ case MSR_INTEL_CORE_THREAD_COUNT:
case MSR_ARCH_CAPABILITIES:
/* Read-only */
case MSR_TSX_FORCE_ABORT:
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 8635f70..f3c8705 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2860,6 +2860,7 @@ static int priv_op_write_msr(unsigned int reg, uint64_t val,
wrmsrl(reg, val);
return X86EMUL_OKAY;
+ case MSR_INTEL_CORE_THREAD_COUNT:
case MSR_INTEL_PLATFORM_INFO:
case MSR_ARCH_CAPABILITIES:
/* The MSR is read-only. */
diff --git a/xen/include/asm-x86/msr-index.h b/xen/include/asm-x86/msr-index.h
index 8dec40e..bc72476 100644
--- a/xen/include/asm-x86/msr-index.h
+++ b/xen/include/asm-x86/msr-index.h
@@ -34,6 +34,10 @@
#define EFER_KNOWN_MASK (EFER_SCE | EFER_LME | EFER_LMA | EFER_NX | \
EFER_SVME | EFER_LMSLE | EFER_FFXSE)
+#define MSR_INTEL_CORE_THREAD_COUNT 0x00000035
+#define MSR_CTC_THREAD_MASK 0x0000ffff
+#define MSR_CTC_CORE_MASK 0xffff0000
+
/* Speculation Controls. */
#define MSR_SPEC_CTRL 0x00000048
#define SPEC_CTRL_IBRS (_AC(1, ULL) << 0)
From: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: x86/boot: Detect the firmware SMT setting correctly on Intel hardware
While boot_cpu_data.x86_num_siblings is an accurate value to use on AMD
hardware, it isn't on Intel when the user has disabled Hyperthreading in the
firmware. As a result, a user which has chosen to disable HT still gets
nagged on L1TF-vulnerable hardware when they haven't chosen an explicit
smt=<bool> setting.
Make use of the largely-undocumented MSR_INTEL_CORE_THREAD_COUNT which in
practice exists since Nehalem, when booting on real hardware. Fall back to
using the ACPI table APIC IDs.
While adjusting this logic, fix a latent bug in amd_get_topology(). The
thread count field in CPUID.0x8000001e.ebx is documented as 8 bits wide,
rather than 2 bits wide.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
diff --git a/xen/arch/x86/cpu/amd.c b/xen/arch/x86/cpu/amd.c
index cb2abda..c61beed 100644
--- a/xen/arch/x86/cpu/amd.c
+++ b/xen/arch/x86/cpu/amd.c
@@ -501,7 +501,7 @@ static void amd_get_topology(struct cpuinfo_x86 *c)
u32 eax, ebx, ecx, edx;
cpuid(0x8000001e, &eax, &ebx, &ecx, &edx);
- c->x86_num_siblings = ((ebx >> 8) & 0x3) + 1;
+ c->x86_num_siblings = ((ebx >> 8) & 0xff) + 1;
if (c->x86 < 0x17)
c->compute_unit_id = ebx & 0xFF;
diff --git a/xen/arch/x86/spec_ctrl.c b/xen/arch/x86/spec_ctrl.c
index 9665ec5..2a7267c 100644
--- a/xen/arch/x86/spec_ctrl.c
+++ b/xen/arch/x86/spec_ctrl.c
@@ -400,6 +400,45 @@ static void __init print_details(enum ind_thunk thunk, uint64_t caps)
opt_pv_l1tf_domu ? "enabled" : "disabled");
}
+static bool __init check_smt_enabled(void)
+{
+ uint64_t val;
+ unsigned int cpu;
+
+ /*
+ * x86_num_siblings defaults to 1 in the absence of other information, and
+ * is adjusted based on other topology information found in CPUID leaves.
+ *
+ * On AMD hardware, it will be the current SMT configuration. On Intel
+ * hardware, it will represent the maximum capability, rather than the
+ * current configuration.
+ */
+ if ( boot_cpu_data.x86_num_siblings < 2 )
+ return false;
+
+ /*
+ * Intel Nehalem and later hardware does have an MSR which reports the
+ * current count of cores/threads in the package.
+ *
+ * At the time of writing, it is almost completely undocumented, so isn't
+ * virtualised reliably.
+ */
+ if ( boot_cpu_data.x86_vendor == X86_VENDOR_INTEL && !cpu_has_hypervisor &&
+ !rdmsr_safe(MSR_INTEL_CORE_THREAD_COUNT, val) )
+ return (MASK_EXTR(val, MSR_CTC_CORE_MASK) !=
+ MASK_EXTR(val, MSR_CTC_THREAD_MASK));
+
+ /*
+ * Search over the CPUs reported in the ACPI tables. Any whose APIC ID
+ * has a non-zero thread id component indicates that SMT is active.
+ */
+ for_each_present_cpu ( cpu )
+ if ( x86_cpu_to_apicid[cpu] & (boot_cpu_data.x86_num_siblings - 1) )
+ return true;
+
+ return false;
+}
+
/* Calculate whether Retpoline is known-safe on this CPU. */
static bool __init retpoline_safe(uint64_t caps)
{
@@ -709,12 +748,14 @@ static __init void l1tf_calculations(uint64_t caps)
void __init init_speculation_mitigations(void)
{
enum ind_thunk thunk = THUNK_DEFAULT;
- bool use_spec_ctrl = false, ibrs = false;
+ bool use_spec_ctrl = false, ibrs = false, hw_smt_enabled;
uint64_t caps = 0;
if ( boot_cpu_has(X86_FEATURE_ARCH_CAPS) )
rdmsrl(MSR_ARCH_CAPABILITIES, caps);
+ hw_smt_enabled = check_smt_enabled();
+
/*
* Has the user specified any custom BTI mitigations? If so, follow their
* instructions exactly and disable all heuristics.
@@ -887,8 +928,7 @@ void __init init_speculation_mitigations(void)
* However, if we are on affected hardware, with HT enabled, and the user
* hasn't explicitly chosen whether to use HT or not, nag them to do so.
*/
- if ( opt_smt == -1 && cpu_has_bug_l1tf &&
- boot_cpu_data.x86_num_siblings > 1 )
+ if ( opt_smt == -1 && cpu_has_bug_l1tf && hw_smt_enabled )
warning_add(
"Booted on L1TF-vulnerable hardware with SMT/Hyperthreading\n"
"enabled. Please assess your configuration and choose an\n"
From: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: x86/spec-ctrl: Misc non-functional cleanup
* Identify BTI in the spec_ctrl_{enter,exit}_idle() comments, as other
mitigations will shortly appear.
* Use alternative_input() and cover the lack of memory cobber with a further
barrier.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
diff --git a/xen/include/asm-x86/spec_ctrl.h b/xen/include/asm-x86/spec_ctrl.h
index c846354..4983071 100644
--- a/xen/include/asm-x86/spec_ctrl.h
+++ b/xen/include/asm-x86/spec_ctrl.h
@@ -61,6 +61,8 @@ static always_inline void spec_ctrl_enter_idle(struct cpu_info *info)
uint32_t val = 0;
/*
+ * Branch Target Injection:
+ *
* Latch the new shadow value, then enable shadowing, then update the MSR.
* There are no SMP issues here; only local processor ordering concerns.
*/
@@ -68,8 +70,9 @@ static always_inline void spec_ctrl_enter_idle(struct cpu_info *info)
barrier();
info->spec_ctrl_flags |= SCF_use_shadow;
barrier();
- asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_SC_MSR_IDLE)
- :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" );
+ alternative_input(ASM_NOP3, "wrmsr", X86_FEATURE_SC_MSR_IDLE,
+ "a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
+ barrier();
}
/* WARNING! `ret`, `call *`, `jmp *` not safe before this call. */
@@ -78,13 +81,16 @@ static always_inline void spec_ctrl_exit_idle(struct cpu_info *info)
uint32_t val = info->xen_spec_ctrl;
/*
+ * Branch Target Injection:
+ *
* Disable shadowing before updating the MSR. There are no SMP issues
* here; only local processor ordering concerns.
*/
info->spec_ctrl_flags &= ~SCF_use_shadow;
barrier();
- asm volatile ( ALTERNATIVE(ASM_NOP3, "wrmsr", X86_FEATURE_SC_MSR_IDLE)
- :: "a" (val), "c" (MSR_SPEC_CTRL), "d" (0) : "memory" );
+ alternative_input(ASM_NOP3, "wrmsr", X86_FEATURE_SC_MSR_IDLE,
+ "a" (val), "c" (MSR_SPEC_CTRL), "d" (0));
+ barrier();
}
#endif /* !__X86_SPEC_CTRL_H__ */
From: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: x86/spec-ctrl: CPUID/MSR definitions for Microarchitectural Data
Sampling
The MD_CLEAR feature can be automatically offered to guests. No
infrastructure is needed in Xen to support the guest making use of it.
This is part of XSA-297, CVE-2018-12126, CVE-2018-12127, CVE-2018-12130, CVE-2019-11091.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown
index 10ed971..a74a995 100644
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown