Commit 40a3ee6c authored by Leonardo Arena's avatar Leonardo Arena

main/xen: security fixes (CVE-2016-3158, CVE-2016-3159, CVE-2016-3960). Fixes #5489

parent a934752a
......@@ -3,7 +3,7 @@
# Maintainer: William Pitcock <nenolod@dereferenced.org>
pkgname=xen
pkgver=4.6.1
pkgrel=0
pkgrel=1
pkgdesc="Xen hypervisor"
url="http://www.xen.org/"
arch="x86_64"
......@@ -47,6 +47,8 @@ source="http://bits.xensource.com/oss-xen/release/$pkgver/$pkgname-$pkgver.tar.g
xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch
xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch
xsa170.patch
xsa172.patch
xsa173-4.6.patch
qemu-coroutine-gthread.patch
qemu-xen_paths.patch
......@@ -248,6 +250,8 @@ debc62758716a169df9f62e6ab2bc634 zlib-1.2.3.tar.gz
48be8e53712d8656549fcdf1a96ffdec xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch
21448f920d1643580e261ac3650d1ef9 xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch
e0fd8934b37592a6a3e6ab107a2ab41a xsa170.patch
b14d9a4247ae654579cb757c9b0e949a xsa172.patch
a29812dc4cf1d8013d650496cb107fd0 xsa173-4.6.patch
de1a3db370b87cfb0bddb51796b50315 qemu-coroutine-gthread.patch
08bfdf8caff5d631f53660bf3fd4edaf qemu-xen_paths.patch
e449bb3359b490804ffc7b0ae08d62a0 hotplug-vif-vtrill.patch
......@@ -285,6 +289,8 @@ e52467fcec73bcc86d3e96d06f8ca8085ae56a83d2c42a30c16bc3dc630d8f8a xsa155-xen-000
eae34c8ccc096ad93a74190506b3d55020a88afb0cc504a3a514590e9fd746fd xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch
42780265014085a4221ad32b026214693d751789eb5219e2e83862c0006c66f4 xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch
77b4b14b2c93da5f68e724cf74e1616f7df2e78305f66d164b3de2d980221a9a xsa170.patch
f18282fcb794b8772bc3af51d56860050071bd62a5a909b8f2fc2018e2958154 xsa172.patch
6dbc34e3e2d4415967c4406e0f8392a9395bff74da115ae20f26bd112b19017c xsa173-4.6.patch
3941f99b49c7e8dafc9fae8aad2136a14c6d84533cd542cc5f1040a41ef7c6fe qemu-coroutine-gthread.patch
e4e5e838e259a3116978aabbcebc1865a895179a7fcbf4bad195c83e9b4c0f98 qemu-xen_paths.patch
dd1e784bc455eb62cb85b3fa24bfc34f575ceaab9597ef6a2f1ee7ff7b3cae0a hotplug-vif-vtrill.patch
......@@ -322,6 +328,8 @@ fde4c58acb857bd4eec807a78bee356a02358174e8c52a66555a6ad9cf5670b43391429ff973e74d
d64d7e0dd96e31fa45d9d9b0cad9c543484709d699d9ab2efe1992f9375e8e0d67b0164e9ea8d3e75998388964f2fbfd96b5520a4acf13804dcf8c3472e37791 xsa155-xen-0002-blktap2-Use-RING_COPY_REQUEST.patch
cad6b571ccca123e2a797cf82669ad0fe2e1ec99b7a68396beb3a2279e2cf87d8f0cf75e22dcd98238dd5031b2c7e9cb86d02ecaa82ae973fba6d26b2acfb514 xsa155-xen-0003-libvchan-Read-prod-cons-only-once.patch
09a6defca0f32319dddf4325fb0105a468517a7150c8a8ea287677b4a55f09bf776f5aa673bae22a0708537cf075d5e2143a24aa1b08629ef911a7cdfd8376f0 xsa170.patch
8636f74b270b0ccf56ea6bab4c90d0ee909e5d2891987b4572df4a0906e2230e046aad0c99add6c1d70f7023cc6d99bcfd2947c953f600074a6ed7c176a5d3dc xsa172.patch
d56d7403163fb7eeb2b5c44027c150f9edd1c4df86b38e3834b4b2cb58db94472fe0030c0ec667e41faed00bd6540fab10a4d909c82280d075482d06f8ac4cfb xsa173-4.6.patch
c3c46f232f0bd9f767b232af7e8ce910a6166b126bd5427bb8dc325aeb2c634b956de3fc225cab5af72649070c8205cc8e1cab7689fc266c204f525086f1a562 qemu-coroutine-gthread.patch
1936ab39a1867957fa640eb81c4070214ca4856a2743ba7e49c0cd017917071a9680d015f002c57fa7b9600dbadd29dcea5887f50e6c133305df2669a7a933f3 qemu-xen_paths.patch
f095ea373f36381491ad36f0662fb4f53665031973721256b23166e596318581da7cbb0146d0beb2446729adfdb321e01468e377793f6563a67d68b8b0f7ffe3 hotplug-vif-vtrill.patch
......
x86: fix information leak on AMD CPUs
The fix for XSA-52 was wrong, and so was the change synchronizing that
new behavior to the FXRSTOR logic: AMD's manuals explictly state that
writes to the ES bit are ignored, and it instead gets calculated from
the exception and mask bits (it gets set whenever there is an unmasked
exception, and cleared otherwise). Hence we need to follow that model
in our workaround.
This is XSA-172.
The first hunk (xen/arch/x86/i387.c:fpu_fxrstor) is CVE-2016-3159.
The second hunk (xen/arch/x86/xstate.c:xrstor) is CVE-2016-3158.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -49,7 +49,7 @@ static inline void fpu_fxrstor(struct vc
* sometimes new user value. Both should be ok. Use the FPU saved
* data block as a safe address because it should be in L1.
*/
- if ( !(fpu_ctxt->fsw & 0x0080) &&
+ if ( !(fpu_ctxt->fsw & ~fpu_ctxt->fcw & 0x003f) &&
boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
{
asm volatile ( "fnclex\n\t"
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -344,7 +344,7 @@ void xrstor(struct vcpu *v, uint64_t mas
* data block as a safe address because it should be in L1.
*/
if ( (mask & ptr->xsave_hdr.xstate_bv & XSTATE_FP) &&
- !(ptr->fpu_sse.fsw & 0x0080) &&
+ !(ptr->fpu_sse.fsw & ~ptr->fpu_sse.fcw & 0x003f) &&
boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
asm volatile ( "fnclex\n\t" /* clear exceptions */
"ffree %%st(7)\n\t" /* clear stack tag */
commit 54a4651cb4e744960fb375ed99909d7dfb943caf
Author: Tim Deegan <tim@xen.org>
Date: Wed Mar 16 16:51:27 2016 +0000
x86: limit GFNs to 32 bits for shadowed superpages.
Superpage shadows store the shadowed GFN in the backpointer field,
which for non-BIGMEM builds is 32 bits wide. Shadowing a superpage
mapping of a guest-physical address above 2^44 would lead to the GFN
being truncated there, and a crash when we come to remove the shadow
from the hash table.
Track the valid width of a GFN for each guest, including reporting it
through CPUID, and enforce it in the shadow pagetables. Set the
maximum witth to 32 for guests where this truncation could occur.
This is XSA-173.
Signed-off-by: Tim Deegan <tim@xen.org>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reported-by: Ling Liu <liuling-it@360.cn>
diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c
index 35ef21b..528c283 100644
--- a/xen/arch/x86/cpu/common.c
+++ b/xen/arch/x86/cpu/common.c
@@ -38,6 +38,7 @@ integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx);
const struct cpu_dev *__read_mostly cpu_devs[X86_VENDOR_NUM] = {};
unsigned int paddr_bits __read_mostly = 36;
+unsigned int hap_paddr_bits __read_mostly = 36;
/*
* Default host IA32_CR_PAT value to cover all memory types.
@@ -211,7 +212,7 @@ static void __init early_cpu_detect(void)
static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
{
- u32 tfms, capability, excap, ebx;
+ u32 tfms, capability, excap, ebx, eax;
/* Get vendor name */
cpuid(0x00000000, &c->cpuid_level,
@@ -248,8 +249,11 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c)
}
if ( c->extended_cpuid_level >= 0x80000004 )
get_model_name(c); /* Default name */
- if ( c->extended_cpuid_level >= 0x80000008 )
- paddr_bits = cpuid_eax(0x80000008) & 0xff;
+ if ( c->extended_cpuid_level >= 0x80000008 ) {
+ eax = cpuid_eax(0x80000008);
+ paddr_bits = eax & 0xff;
+ hap_paddr_bits = ((eax >> 16) & 0xff) ?: paddr_bits;
+ }
}
/* Might lift BIOS max_leaf=3 limit. */
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index e200aab..0b4d9f0 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -4567,8 +4567,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx,
break;
case 0x80000008:
- count = cpuid_eax(0x80000008);
- count = (count >> 16) & 0xff ?: count & 0xff;
+ count = d->arch.paging.gfn_bits + PAGE_SHIFT;
if ( (*eax & 0xff) > count )
*eax = (*eax & ~0xff) | count;
diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c
index 773454d..06543d3 100644
--- a/xen/arch/x86/mm/guest_walk.c
+++ b/xen/arch/x86/mm/guest_walk.c
@@ -93,6 +93,12 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn,
struct page_info *page;
void *map;
+ if ( gfn_x(gfn) >> p2m->domain->arch.paging.gfn_bits )
+ {
+ *rc = _PAGE_INVALID_BIT;
+ return NULL;
+ }
+
/* Translate the gfn, unsharing if shared */
page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), p2mt, NULL,
q);
@@ -326,20 +332,8 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m,
flags &= ~_PAGE_PAT;
if ( gfn_x(start) & GUEST_L2_GFN_MASK & ~0x1 )
- {
-#if GUEST_PAGING_LEVELS == 2
- /*
- * Note that _PAGE_INVALID_BITS is zero in this case, yielding a
- * no-op here.
- *
- * Architecturally, the walk should fail if bit 21 is set (others
- * aren't being checked at least in PSE36 mode), but we'll ignore
- * this here in order to avoid specifying a non-natural, non-zero
- * _PAGE_INVALID_BITS value just for that case.
- */
-#endif
rc |= _PAGE_INVALID_BITS;
- }
+
/* Increment the pfn by the right number of 4k pages.
* Mask out PAT and invalid bits. */
start = _gfn((gfn_x(start) & ~GUEST_L2_GFN_MASK) +
@@ -422,5 +416,11 @@ set_ad:
put_page(mfn_to_page(mfn_x(gw->l1mfn)));
}
+ /* If this guest has a restricted physical address space then the
+ * target GFN must fit within it. */
+ if ( !(rc & _PAGE_PRESENT)
+ && gfn_x(guest_l1e_get_gfn(gw->l1e)) >> d->arch.paging.gfn_bits )
+ rc |= _PAGE_INVALID_BITS;
+
return rc;
}
diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c
index 6eb2167..f3475c6 100644
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -448,6 +448,8 @@ void hap_domain_init(struct domain *d)
{
INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist);
+ d->arch.paging.gfn_bits = hap_paddr_bits - PAGE_SHIFT;
+
/* Use HAP logdirty mechanism. */
paging_log_dirty_init(d, hap_enable_log_dirty,
hap_disable_log_dirty,
diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c
index bad8360..98d0d2c 100644
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -51,6 +51,16 @@ int shadow_domain_init(struct domain *d, unsigned int domcr_flags)
INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist);
INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows);
+ d->arch.paging.gfn_bits = paddr_bits - PAGE_SHIFT;
+#ifndef CONFIG_BIGMEM
+ /*
+ * Shadowed superpages store GFNs in 32-bit page_info fields.
+ * Note that we cannot use guest_supports_superpages() here.
+ */
+ if ( !is_pv_domain(d) || opt_allow_superpage )
+ d->arch.paging.gfn_bits = 32;
+#endif
+
/* Use shadow pagetables for log-dirty support */
paging_log_dirty_init(d, sh_enable_log_dirty,
sh_disable_log_dirty, sh_clean_dirty_bitmap);
diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
index 43c9488..71477fe 100644
--- a/xen/arch/x86/mm/shadow/multi.c
+++ b/xen/arch/x86/mm/shadow/multi.c
@@ -525,7 +525,8 @@ _sh_propagate(struct vcpu *v,
ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3);
/* Check there's something for the shadows to map to */
- if ( !p2m_is_valid(p2mt) && !p2m_is_grant(p2mt) )
+ if ( (!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt))
+ || gfn_x(target_gfn) >> d->arch.paging.gfn_bits )
{
*sp = shadow_l1e_empty();
goto done;
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index c6c6e71..74c3a52 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -193,6 +193,9 @@ struct paging_domain {
/* log dirty support */
struct log_dirty_domain log_dirty;
+ /* Number of valid bits in a gfn. */
+ unsigned int gfn_bits;
+
/* preemption handling */
struct {
const struct domain *dom;
diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h
index f8a0d76..b5db401 100644
--- a/xen/include/asm-x86/guest_pt.h
+++ b/xen/include/asm-x86/guest_pt.h
@@ -210,15 +210,17 @@ guest_supports_nx(struct vcpu *v)
}
-/* Some bits are invalid in any pagetable entry. */
-#if GUEST_PAGING_LEVELS == 2
-#define _PAGE_INVALID_BITS (0)
-#elif GUEST_PAGING_LEVELS == 3
-#define _PAGE_INVALID_BITS \
- get_pte_flags(((1ull<<63) - 1) & ~((1ull<<paddr_bits) - 1))
-#else /* GUEST_PAGING_LEVELS == 4 */
+/*
+ * Some bits are invalid in any pagetable entry.
+ * Normal flags values get represented in 24-bit values (see
+ * get_pte_flags() and put_pte_flags()), so set bit 24 in
+ * addition to be able to flag out of range frame numbers.
+ */
+#if GUEST_PAGING_LEVELS == 3
#define _PAGE_INVALID_BITS \
- get_pte_flags(((1ull<<52) - 1) & ~((1ull<<paddr_bits) - 1))
+ (_PAGE_INVALID_BIT | get_pte_flags(((1ull << 63) - 1) & ~(PAGE_SIZE - 1)))
+#else /* 2-level and 4-level */
+#define _PAGE_INVALID_BITS _PAGE_INVALID_BIT
#endif
diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
index f507f5e..a200470 100644
--- a/xen/include/asm-x86/processor.h
+++ b/xen/include/asm-x86/processor.h
@@ -212,6 +212,8 @@ extern u32 cpuid_ext_features;
/* Maximum width of physical addresses supported by the hardware */
extern unsigned int paddr_bits;
+/* Max physical address width supported within HAP guests */
+extern unsigned int hap_paddr_bits;
extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id table[]);
diff --git a/xen/include/asm-x86/x86_64/page.h b/xen/include/asm-x86/x86_64/page.h
index 19ab4d0..eb5e2fd 100644
--- a/xen/include/asm-x86/x86_64/page.h
+++ b/xen/include/asm-x86/x86_64/page.h
@@ -141,6 +141,12 @@ typedef l4_pgentry_t root_pgentry_t;
#define _PAGE_GNTTAB (1U<<22)
/*
+ * Bit 24 of a 24-bit flag mask! This is not any bit of a real pte,
+ * and is only used for signalling in variables that contain flags.
+ */
+#define _PAGE_INVALID_BIT (1U<<24)
+
+/*
* Bit 12 of a 24-bit flag mask. This corresponds to bit 52 of a pte.
* This is needed to distinguish between user and kernel PTEs since _PAGE_USER
* is asserted for both.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment