Commit eac7d0a6 authored by Leonardo Arena's avatar Leonardo Arena

main/xen: security fixes

CVE-2017-15596, XSA-235, CVE-2017-15588, CVE-2017-15589, CVE-2017-15590, XSA-238
CVE-2017-15593, CVE-2017-15592, CVE-2017-15594, CVE-2017-15595, CVE-2017-15597

Fixes #8063
parent 9e642dbb
From: Jan Beulich <jbeulich@suse.com>
Subject: x86: don't allow MSI pIRQ mapping on unowned device
MSI setup should be permitted only for existing devices owned by the
respective guest (the operation may still be carried out by the domain
controlling that guest).
This is part of XSA-237.
Reported-by: HW42 <hw42@ipsumj.de>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1964,7 +1964,10 @@ int map_domain_pirq(
if ( !cpu_has_apic )
goto done;
- pdev = pci_get_pdev(msi->seg, msi->bus, msi->devfn);
+ pdev = pci_get_pdev_by_domain(d, msi->seg, msi->bus, msi->devfn);
+ if ( !pdev )
+ goto done;
+
ret = pci_enable_msi(msi, &msi_desc);
if ( ret )
{
From a48d47febc1340f27d6c716545692641a09b414c Mon Sep 17 00:00:00 2001
From: Julien Grall <julien.grall@arm.com>
Date: Thu, 21 Sep 2017 14:13:08 +0100
Subject: [PATCH 1/2] xen/page_alloc: Cover memory unreserved after boot in
first_valid_mfn
On Arm, some regions (e.g Initramfs, Dom0 Kernel...) are marked as
reserved until the hardware domain is built and they are copied into its
memory. Therefore, they will not be added in the boot allocator via
init_boot_pages.
Instead, init_xenheap_pages will be called once the region are not used
anymore.
Update first_valid_mfn in both init_heap_pages and init_boot_pages
(already exist) to cover all the cases.
Signed-off-by: Julien Grall <julien.grall@arm.com>
[Adjust comment, added locking around first_valid_mfn update]
Signed-off-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
---
xen/common/page_alloc.c | 10 ++++++++++
1 file changed, 10 insertions(+)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 0b9f6cc6df..fbe5a8af39 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -1700,6 +1700,16 @@ static void init_heap_pages(
{
unsigned long i;
+ /*
+ * Some pages may not go through the boot allocator (e.g reserved
+ * memory at boot but released just after --- kernel, initramfs,
+ * etc.).
+ * Update first_valid_mfn to ensure those regions are covered.
+ */
+ spin_lock(&heap_lock);
+ first_valid_mfn = min_t(unsigned long, page_to_mfn(pg), first_valid_mfn);
+ spin_unlock(&heap_lock);
+
for ( i = 0; i < nr_pages; i++ )
{
unsigned int nid = phys_to_nid(page_to_maddr(pg+i));
--
2.11.0
From: Jan Beulich <jbeulich@suse.com>
Subject: x86: enforce proper privilege when (un)mapping pIRQ-s
(Un)mapping of IRQs, just like other RESOURCE__ADD* / RESOURCE__REMOVE*
actions (in FLASK terms) should be XSM_DM_PRIV rather than XSM_TARGET.
This in turn requires bypassing the XSM check in physdev_unmap_pirq()
for the HVM emuirq case just like is being done in physdev_map_pirq().
The primary goal security wise, however, is to no longer allow HVM
guests, by specifying their own domain ID instead of DOMID_SELF, to
enter code paths intended for PV guest and the control domains of HVM
guests only.
This is part of XSA-237.
Reported-by: HW42 <hw42@ipsumj.de>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -110,7 +110,7 @@ int physdev_map_pirq(domid_t domid, int
if ( d == NULL )
return -ESRCH;
- ret = xsm_map_domain_pirq(XSM_TARGET, d);
+ ret = xsm_map_domain_pirq(XSM_DM_PRIV, d);
if ( ret )
goto free_domain;
@@ -255,13 +255,14 @@ int physdev_map_pirq(domid_t domid, int
int physdev_unmap_pirq(domid_t domid, int pirq)
{
struct domain *d;
- int ret;
+ int ret = 0;
d = rcu_lock_domain_by_any_id(domid);
if ( d == NULL )
return -ESRCH;
- ret = xsm_unmap_domain_pirq(XSM_TARGET, d);
+ if ( domid != DOMID_SELF || !is_hvm_domain(d) )
+ ret = xsm_unmap_domain_pirq(XSM_DM_PRIV, d);
if ( ret )
goto free_domain;
--- a/xen/include/xsm/dummy.h
+++ b/xen/include/xsm/dummy.h
@@ -453,7 +453,7 @@ static XSM_INLINE char *xsm_show_irq_sid
static XSM_INLINE int xsm_map_domain_pirq(XSM_DEFAULT_ARG struct domain *d)
{
- XSM_ASSERT_ACTION(XSM_TARGET);
+ XSM_ASSERT_ACTION(XSM_DM_PRIV);
return xsm_default_action(action, current->domain, d);
}
@@ -465,7 +465,7 @@ static XSM_INLINE int xsm_map_domain_irq
static XSM_INLINE int xsm_unmap_domain_pirq(XSM_DEFAULT_ARG struct domain *d)
{
- XSM_ASSERT_ACTION(XSM_TARGET);
+ XSM_ASSERT_ACTION(XSM_DM_PRIV);
return xsm_default_action(action, current->domain, d);
}
From 9a4b34729f1bb92eea1e1efe52e6face9f0b17ae Mon Sep 17 00:00:00 2001
From: George Dunlap <george.dunlap@citrix.com>
Date: Fri, 22 Sep 2017 11:46:55 +0100
Subject: [PATCH 2/2] x86/mm: Disable PV linear pagetables by default
Allowing pagetables to point to other pagetables of the same level
(often called 'linear pagetables') has been included in Xen since its
inception. But it is not used by the most common PV guests (Linux,
NetBSD, minios), and has been the source of a number of subtle
reference-counting bugs.
Add a command-line option to control whether PV linear pagetables are
allowed (disabled by default).
Reported-by: Jann Horn <jannh@google.com>
Signed-off-by: George Dunlap <george.dunlap@citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
---
Changes since v2:
- s/_/-/; in command-line option
- Added __read_mostly
---
docs/misc/xen-command-line.markdown | 15 +++++++++++++++
xen/arch/x86/mm.c | 9 +++++++++
2 files changed, 24 insertions(+)
diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown
index 73f5265fc6..061aff5edc 100644
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
@@ -1280,6 +1280,21 @@ The following resources are available:
CDP, one COS will corespond two CBMs other than one with CAT, due to the
sum of CBMs is fixed, that means actual `cos_max` in use will automatically
reduce to half when CDP is enabled.
+
+### pv-linear-pt
+> `= <boolean>`
+
+> Default: `false`
+
+Allow PV guests to have pagetable entries pointing to other pagetables
+of the same level (i.e., allowing L2 PTEs to point to other L2 pages).
+This technique is often called "linear pagetables", and is sometimes
+used to allow operating systems a simple way to consistently map the
+current process's pagetables into its own virtual address space.
+
+None of the most common PV operating systems (Linux, NetBSD, MiniOS)
+use this technique, but there may be custom operating systems which
+do.
### reboot
> `= t[riple] | k[bd] | a[cpi] | p[ci] | P[ower] | e[fi] | n[o] [, [w]arm | [c]old]`
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e81a461b91..f748d4a221 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -799,6 +799,9 @@ static void dec_linear_uses(struct page_info *pg)
* frame if it is mapped by a different root table. This is sufficient and
* also necessary to allow validation of a root table mapping itself.
*/
+static bool_t __read_mostly pv_linear_pt_enable = 0;
+boolean_param("pv-linear-pt", pv_linear_pt_enable);
+
#define define_get_linear_pagetable(level) \
static int \
get_##level##_linear_pagetable( \
@@ -808,6 +811,12 @@ get_##level##_linear_pagetable( \
struct page_info *page; \
unsigned long pfn; \
\
+ if ( !pv_linear_pt_enable ) \
+ { \
+ MEM_LOG("Attempt to create linear p.t. (feature disabled)"); \
+ return 0; \
+ } \
+ \
if ( (level##e_get_flags(pde) & _PAGE_RW) ) \
{ \
MEM_LOG("Attempt to create linear p.t. with write perms"); \
--
2.14.1
From cbfcf039d0e0b6f4c4cb3de612f7bf788a0c47cd Mon Sep 17 00:00:00 2001
From: Julien Grall <julien.grall@arm.com>
Date: Mon, 18 Sep 2017 14:24:08 +0100
Subject: [PATCH 2/2] xen/arm: Correctly report the memory region in the dummy
NUMA helpers
NUMA is currently not supported on Arm. Because common code is
NUMA-aware, dummy helpers are instead provided to expose a single node.
Those helpers are for instance used to know the region to scrub.
However the memory region is not reported correctly. Indeed, the
frametable may not be at the beginning of the memory and there might be
multiple memory banks. This will lead to not scrub some part of the
memory.
The memory information can be found using:
* first_valid_mfn as the start of the memory
* max_page - first_valid_mfn as the spanned pages
Note that first_valid_mfn is now been exported. The prototype has been
added in asm-arm/numa.h and not in a common header because I would
expect the variable to become static once NUMA is fully supported on
Arm.
Signed-off-by: Julien Grall <julien.grall@arm.com>
---
xen/common/page_alloc.c | 6 +++++-
xen/include/asm-arm/numa.h | 10 ++++++++--
2 files changed, 13 insertions(+), 3 deletions(-)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index fbe5a8af39..472c6fe329 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -192,7 +192,11 @@ PAGE_LIST_HEAD(page_broken_list);
* BOOT-TIME ALLOCATOR
*/
-static unsigned long __initdata first_valid_mfn = ~0UL;
+/*
+ * first_valid_mfn is exported because it is use in ARM specific NUMA
+ * helpers. See comment in asm-arm/numa.h.
+ */
+unsigned long first_valid_mfn = ~0UL;
static struct bootmem_region {
unsigned long s, e; /* MFNs @s through @e-1 inclusive are free */
diff --git a/xen/include/asm-arm/numa.h b/xen/include/asm-arm/numa.h
index a2c1a3476d..3e7384da9e 100644
--- a/xen/include/asm-arm/numa.h
+++ b/xen/include/asm-arm/numa.h
@@ -12,9 +12,15 @@ static inline __attribute__((pure)) nodeid_t phys_to_nid(paddr_t addr)
return 0;
}
+/*
+ * TODO: make first_valid_mfn static when NUMA is supported on Arm, this
+ * is required because the dummy helpers is using it.
+ */
+extern unsigned long first_valid_mfn;
+
/* XXX: implement NUMA support */
-#define node_spanned_pages(nid) (total_pages)
-#define node_start_pfn(nid) (pdx_to_pfn(frametable_base_pdx))
+#define node_spanned_pages(nid) (max_page - first_valid_mfn)
+#define node_start_pfn(nid) (first_valid_mfn)
#define __node_distance(a, b) (20)
static inline unsigned int arch_get_dma_bitsize(void)
--
2.11.0
From: Jan Beulich <jbeulich@suse.com>
Subject: x86/MSI: disallow redundant enabling
At the moment, Xen attempts to allow redundant enabling of MSI by
having pci_enable_msi() return 0, and point to the existing MSI
descriptor, when the msi already exists.
Unfortunately, if subsequent errors are encountered, the cleanup
paths assume pci_enable_msi() had done full initialization, and
hence undo everything that was assumed to be done by that
function without also undoing other setup that would normally
occur only after that function was called (in map_domain_pirq()
itself).
Rather than try to make the redundant enabling case work properly, just
forbid it entirely by having pci_enable_msi() return -EEXIST when MSI
is already set up.
This is part of XSA-237.
Reported-by: HW42 <hw42@ipsumj.de>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
--- a/xen/arch/x86/msi.c
+++ b/xen/arch/x86/msi.c
@@ -1050,11 +1050,10 @@ static int __pci_enable_msi(struct msi_i
old_desc = find_msi_entry(pdev, msi->irq, PCI_CAP_ID_MSI);
if ( old_desc )
{
- printk(XENLOG_WARNING "irq %d already mapped to MSI on %04x:%02x:%02x.%u\n",
+ printk(XENLOG_ERR "irq %d already mapped to MSI on %04x:%02x:%02x.%u\n",
msi->irq, msi->seg, msi->bus,
PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
- *desc = old_desc;
- return 0;
+ return -EEXIST;
}
old_desc = find_msi_entry(pdev, -1, PCI_CAP_ID_MSIX);
@@ -1118,11 +1117,10 @@ static int __pci_enable_msix(struct msi_
old_desc = find_msi_entry(pdev, msi->irq, PCI_CAP_ID_MSIX);
if ( old_desc )
{
- printk(XENLOG_WARNING "irq %d already mapped to MSI-X on %04x:%02x:%02x.%u\n",
+ printk(XENLOG_ERR "irq %d already mapped to MSI-X on %04x:%02x:%02x.%u\n",
msi->irq, msi->seg, msi->bus,
PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
- *desc = old_desc;
- return 0;
+ return -EEXIST;
}
old_desc = find_msi_entry(pdev, -1, PCI_CAP_ID_MSI);
From: Jan Beulich <jbeulich@suse.com>
Subject: x86/IRQ: conditionally preserve irq <-> pirq mapping on map error paths
Mappings that had been set up before should not be torn down when
handling unrelated errors.
This is part of XSA-237.
Reported-by: HW42 <hw42@ipsumj.de>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -1252,7 +1252,8 @@ static int prepare_domain_irq_pirq(struc
return -ENOMEM;
}
*pinfo = info;
- return 0;
+
+ return !!err;
}
static void set_domain_irq_pirq(struct domain *d, int irq, struct pirq *pirq)
@@ -1295,7 +1296,10 @@ int init_domain_irq_mapping(struct domai
continue;
err = prepare_domain_irq_pirq(d, i, i, &info);
if ( err )
+ {
+ ASSERT(err < 0);
break;
+ }
set_domain_irq_pirq(d, i, info);
}
@@ -1903,6 +1907,7 @@ int map_domain_pirq(
struct pirq *info;
struct irq_desc *desc;
unsigned long flags;
+ DECLARE_BITMAP(prepared, MAX_MSI_IRQS) = {};
ASSERT(spin_is_locked(&d->event_lock));
@@ -1946,8 +1951,10 @@ int map_domain_pirq(
}
ret = prepare_domain_irq_pirq(d, irq, pirq, &info);
- if ( ret )
+ if ( ret < 0 )
goto revoke;
+ if ( !ret )
+ __set_bit(0, prepared);
desc = irq_to_desc(irq);
@@ -2019,8 +2026,10 @@ int map_domain_pirq(
irq = create_irq(NUMA_NO_NODE);
ret = irq >= 0 ? prepare_domain_irq_pirq(d, irq, pirq + nr, &info)
: irq;
- if ( ret )
+ if ( ret < 0 )
break;
+ if ( !ret )
+ __set_bit(nr, prepared);
msi_desc[nr].irq = irq;
if ( irq_permit_access(d, irq) != 0 )
@@ -2053,15 +2062,15 @@ int map_domain_pirq(
desc->msi_desc = NULL;
spin_unlock_irqrestore(&desc->lock, flags);
}
- while ( nr-- )
+ while ( nr )
{
if ( irq >= 0 && irq_deny_access(d, irq) )
printk(XENLOG_G_ERR
"dom%d: could not revoke access to IRQ%d (pirq %d)\n",
d->domain_id, irq, pirq);
- if ( info )
+ if ( info && test_bit(nr, prepared) )
cleanup_domain_irq_pirq(d, irq, info);
- info = pirq_info(d, pirq + nr);
+ info = pirq_info(d, pirq + --nr);
irq = info->arch.irq;
}
msi_desc->irq = -1;
@@ -2077,12 +2086,14 @@ int map_domain_pirq(
spin_lock_irqsave(&desc->lock, flags);
set_domain_irq_pirq(d, irq, info);
spin_unlock_irqrestore(&desc->lock, flags);
+ ret = 0;
}
done:
if ( ret )
{
- cleanup_domain_irq_pirq(d, irq, info);
+ if ( test_bit(0, prepared) )
+ cleanup_domain_irq_pirq(d, irq, info);
revoke:
if ( irq_deny_access(d, irq) )
printk(XENLOG_G_ERR
--- a/xen/arch/x86/physdev.c
+++ b/xen/arch/x86/physdev.c
@@ -185,7 +185,7 @@ int physdev_map_pirq(domid_t domid, int
}
else if ( type == MAP_PIRQ_TYPE_MULTI_MSI )
{
- if ( msi->entry_nr <= 0 || msi->entry_nr > 32 )
+ if ( msi->entry_nr <= 0 || msi->entry_nr > MAX_MSI_IRQS )
ret = -EDOM;
else if ( msi->entry_nr != 1 && !iommu_intremap )
ret = -EOPNOTSUPP;
--- a/xen/include/asm-x86/msi.h
+++ b/xen/include/asm-x86/msi.h
@@ -55,6 +55,8 @@
/* MAX fixed pages reserved for mapping MSIX tables. */
#define FIX_MSIX_MAX_PAGES 512
+#define MAX_MSI_IRQS 32 /* limited by MSI capability struct properties */
+
struct msi_info {
u16 seg;
u8 bus;
From: Jan Beulich <jbeulich@suse.com>
Subject: x86/FLASK: fix unmap-domain-IRQ XSM hook
The caller and the FLASK implementation of xsm_unmap_domain_irq()
disagreed about what the "data" argument points to in the MSI case:
Change both sides to pass/take a PCI device.
This is part of XSA-237.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
--- a/xen/arch/x86/irq.c
+++ b/xen/arch/x86/irq.c
@@ -2144,7 +2144,8 @@ int unmap_domain_pirq(struct domain *d,
nr = msi_desc->msi.nvec;
}
- ret = xsm_unmap_domain_irq(XSM_HOOK, d, irq, msi_desc);
+ ret = xsm_unmap_domain_irq(XSM_HOOK, d, irq,
+ msi_desc ? msi_desc->dev : NULL);
if ( ret )
goto done;
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -915,8 +915,8 @@ static int flask_unmap_domain_msi (struc
u32 *sid, struct avc_audit_data *ad)
{
#ifdef CONFIG_HAS_PCI
- struct msi_info *msi = data;
- u32 machine_bdf = (msi->seg << 16) | (msi->bus << 8) | msi->devfn;
+ const struct pci_dev *pdev = data;
+ u32 machine_bdf = (pdev->seg << 16) | (pdev->bus << 8) | pdev->devfn;
AVC_AUDIT_DATA_INIT(ad, DEV);
ad->device = machine_bdf;
......@@ -3,7 +3,7 @@
# Maintainer: William Pitcock <nenolod@dereferenced.org>
pkgname=xen
pkgver=4.7.3
pkgrel=2
pkgrel=3
pkgdesc="Xen hypervisor"
url="http://www.xen.org/"
arch="x86_64 armhf"
......@@ -85,6 +85,18 @@ makedepends="$depends_dev autoconf automake libtool "
# 4.7.3-r2:
# - CVE-2017-17044 XSA-246
# - CVE-2017-17045 XSA-247
# 4.7.3-r3:
# - CVE-2017-15596 XSA-235
# - CVE-2017-15597 XSA-236
# - CVE-2017-15590 XSA-237
# - XSA-238
# - CVE-2017-15589 XSA-239
# - CVE-2017-15595 XSA-240
# - CVE-2017-15588 XSA-241
# - CVE-2017-15593 XSA-242
# - CVE-2017-15592 XSA-243
# - CVE-2017-15594 XSA-244
# - CVE-2017-17046 XSA-245
case "$CARCH" in
x86*)
......@@ -137,6 +149,22 @@ source="https://downloads.xenproject.org/release/xen/$pkgver/$pkgname-$pkgver.ta
xsa232.patch
xsa233.patch
xsa234-4.6.patch
xsa235-4.7.patch
xsa236-4.9.patch
0001-x86-dont-allow-MSI-pIRQ-mapping-on-unowned-device.patch
0002-x86-enforce-proper-privilege-when-mapping-pIRQ-s.patch
0003-x86-MSI-disallow-redundant-enabling.patch
0004-x86-IRQ-conditionally-preserve-irq-pirq-mapping-on-error.patch
0005-x86-FLASK-fix-unmap-domain-IRQ-XSM-hook.patch
xsa238.patch
xsa239.patch
0001-x86-limit-linear-page-table-use-to-a-single-level.patch
0002-x86-mm-Disable-PV-linear-pagetables-by-default.patch
xsa241-4.8.patch
xsa242-4.9.patch
xsa243-4.7-1.patch
xsa243-2.patch
xsa244-4.7.patch
xsa246-4.7.patch
0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
......@@ -380,6 +408,22 @@ f66e7149f5a579ead59cc416d23bdefb xsa228-4.8.patch
d582d6a402935ea1aa2f6d9435ffef52 xsa232.patch
2f027cddb9401ca25add6ae229cb52c6 xsa233.patch
c07c078bb0b94783741c998d3afdabd8 xsa234-4.6.patch
fd6fb62075d0fc4ba9a14bbbc010a41f xsa235-4.7.patch
f8797c74a1186f610835bd8bc5daaddb xsa236-4.9.patch
e96050a4d241ed8dc0c8a39beb6f3284 0001-x86-dont-allow-MSI-pIRQ-mapping-on-unowned-device.patch
24a503341fca703f851182bdf2bbbb3c 0002-x86-enforce-proper-privilege-when-mapping-pIRQ-s.patch
9f849dab12ad214be8eb6682a0873856 0003-x86-MSI-disallow-redundant-enabling.patch
f3689ebfc50bb463c35a2556e39f762a 0004-x86-IRQ-conditionally-preserve-irq-pirq-mapping-on-error.patch
5d119f231b38e61b2178cac637dc42d3 0005-x86-FLASK-fix-unmap-domain-IRQ-XSM-hook.patch
1d7afbe5d47d87aebb2b4022d0be338e xsa238.patch
5c564209bd30cade592c8e3f39edc279 xsa239.patch
4642495f2b5a1feeb42d014a04042246 0001-x86-limit-linear-page-table-use-to-a-single-level.patch
0a50531d1ce5e29e01bdcc468cb4c597 0002-x86-mm-Disable-PV-linear-pagetables-by-default.patch
c4e34874e6169cf3b68b0f1508e282a2 xsa241-4.8.patch
e25e98c3e699d90ad617f6f7e0106a5c xsa242-4.9.patch
f7d66f6e40dec159b073000baf8a2b84 xsa243-4.7-1.patch
93ffcf8e416b426f1b0088e61f538a3a xsa243-2.patch
1c81aebd57755934d0b88c082fe9020a xsa244-4.7.patch
ed39b07fbee1f07d767921732806578b xsa246-4.7.patch
5681e5bba8270223bf7ccd0beaf3ad12 0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
54a258e7b548fa656e7babc73fca394d 0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
......@@ -437,6 +481,22 @@ ce29b56a0480f4835b37835b351e704d204bb0ccd22325f487127aa2776cc2cf xsa231-4.7.pat
5068a78293daa58557c30c95141b775becfb650de6a5eda0d82a4a321ced551c xsa232.patch
f721cc49ba692b2f36299b631451f51d7340b8b4732f74c98f01cb7a80d8662b xsa233.patch
3df4ce173196111c1ff849039ea4927c0b4bd632b08a501fb26f64e31b951fba xsa234-4.6.patch
f30848eee71e66687b421b87be1d8e3f454c0eb395422546c62a689153d1e31c xsa235-4.7.patch
b6fe5604af26e93184f30127ebbb644f127ecc7116b093c161ca3044b44d2fe9 xsa236-4.9.patch
1ae6aefb86ba0c48a45ecc14ff56ea0bc3d9d354937668bcacadaed1225017a8 0001-x86-dont-allow-MSI-pIRQ-mapping-on-unowned-device.patch
bf2ca9cb99ee64d7db77d628cec1a84684c360fd36de433cbc78fbcde8095319 0002-x86-enforce-proper-privilege-when-mapping-pIRQ-s.patch
494a79332fc5f854f0dc7606669201717a41e5b89b44db2fb30607a326930bfb 0003-x86-MSI-disallow-redundant-enabling.patch
9a38899afd728d504382954de28657aa82af7da352eb4e45a5e615bd646834c5 0004-x86-IRQ-conditionally-preserve-irq-pirq-mapping-on-error.patch
fef5c77f19e2c6229912f1fd19cbcb41c1ce554ff53be22198b2f34ea7a27314 0005-x86-FLASK-fix-unmap-domain-IRQ-XSM-hook.patch
85d3f9713bef1bc86c682857dbd7388a1d1f20089363ddfc4cb9ecbd88eaffec xsa238.patch
087a8b3cf7ecbdbde593033c127cbcf6c37f532bf33d90f72c19e493970a799c xsa239.patch
5a9b8e0a4e55482826e6cf820ea01fbf6674fecf19b101f4578396e3fa98a8fd 0001-x86-limit-linear-page-table-use-to-a-single-level.patch
acf9744b853c1c6ac071bcf5776d8d9463781bbdf658c2fecc59ee338c094750 0002-x86-mm-Disable-PV-linear-pagetables-by-default.patch
443a5b0818045ada44fad0370ac01af0c96181be5a4078ae3b2575799e4a4e5b xsa241-4.8.patch
5e66b6b1d1cd400905d3abd3478144539c3afa24f5a744a11809d9c5eb517b98 xsa242-4.9.patch
465ba9e3293591a3c84c122ffd73474fe96483f5e21565440d5fbc207fa4c4a9 xsa243-4.7-1.patch
013cff90312305b7f4ce6818a25760bcfca61bfadd860b694afa04d56e60c563 xsa243-2.patch
4d8cf754f760ef05488e9fb25a7ebd9a7e46f3742e91eee1a8385fd1e611ea8c xsa244-4.7.patch
b41550688e88a2a7a22349a07168f3a3ddf6fad8b3389fa27de44ae6731b6a8b xsa246-4.7.patch
d149342e4d40dfb550f8af6d05cd20a34889d64fb33f967fe77cf89b4ea8504a 0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
3c8a7bfdb408af0224cf6f5471b0fd9dd1a9a1ded7207e427b02268ca2906aa6 0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
......@@ -494,6 +554,22 @@ c1c05c2ec68486a3721ae9c305a4f7a01a1c38a62f468ba97be22ee583b5690b92fa1cb3c8a4ea65
fb742225a4f3dbf2a574c4a6e3ef61a5da0c91aaeed77a2247023bdefcd4e0b6c08f1c9ffb42eaac3d38739c401443c3cf7aebb507b1d779c415b6cbffabbc10 xsa232.patch
a322ac6c5ac2f858a59096108032fd42974eaaeeebd8f4966119149665f32bed281e333e743136e79add2e6f3844d88b6a3e4d5a685c2808702fd3a9e6396cd4 xsa233.patch
9f578606c3ffbbf3eb3dda82130336e155a502c2065841856e04f6935cf77b3da59d1ff7e6583c6425ccdefd673ad2b07ca3b3ad15aa6ca9765ac3a28d784f2c xsa234-4.6.patch
2048fd831c4b5e36db7eee0a32c804714b8e5f02e49317d4c26b564932158f9d16688667f20c89bc61a1d91c0f9d32fff0d172bcff819b46d6e3f22097b2e7bb xsa235-4.7.patch
a951c3d29a6b05b42021bd49419becff51123a245256659240a3af5701bbf51e7d3c1a79835a7cc9a5fdf7c1c6aa330a35a586cb56d69d847c256642f0fc8e55 xsa236-4.9.patch
a7491ef0c0b770f16198a36c02cfee7f6c55a3aaad1fc05dd7532ce3bd2cc84981253a8703244f79c552d3a8e21531891f911e91ddc7035fe3ddf0928b5577b3 0001-x86-dont-allow-MSI-pIRQ-mapping-on-unowned-device.patch
5f9ea6e66eb3a507497ad80956c690f6e45a6743f235aafc5a428df292a18b9d614915163726227851d32a22d9789450deff974fb861b9278504c6eb7b9b222e 0002-x86-enforce-proper-privilege-when-mapping-pIRQ-s.patch
50607fca2e02eed322927e0288c77e7a6c541794fa2c70c78ada0c2fa762b5ad0f3b5108ecb9f01d8826f89dab492d56c502236c70234e6ba741e94a39356ea3 0003-x86-MSI-disallow-redundant-enabling.patch
c29b4747ded7ac070f325ce00355a682e8ac742257d25d8b9493c6cbf7a24060c09fb311a3ba5443b733513103f1afe87b57edf44a7274be60b648f230a7d6a2 0004-x86-IRQ-conditionally-preserve-irq-pirq-mapping-on-error.patch
0a367c1839f4cb19e3b2fd22b782d32fe97de8f991c99e4881162c06c9a608bebcd6d4bf6d44af9cd55fd45981125e13727bd368a646ed59d4e8b6a535c776e1 0005-x86-FLASK-fix-unmap-domain-IRQ-XSM-hook.patch
b154c0925bbceab40e8f3b689e2d1fb321b42c685fdcb6bd29b0411ccd856731480a2fbb8025c633f9edf34cec938e5d8888cc71e8158212c078bb595d07a29d xsa238.patch
8b09cd12c7adfef69a02a2965cda22ef6499fd42c8a84a20a6af231f422a6e8a0e597501c327532e1580c1067ee4bf35579e3cf98dee9302ed34ba87f74bf6d2 xsa239.patch
66071fa4ff14ce604d2b67fe37bb06dbbabb38b86d51c14c5d0700b26f9049b202e8a17bdac4440ab7281625e8b12a47f23bdb0f30f93f66cac2152b0c7be5d7 0001-x86-limit-linear-page-table-use-to-a-single-level.patch
58b6e1308e0ce8ab19a814f49e1472d332af8245328599e8446cbe3e192025f2ef017572bef02ab4b1e8b1074618a816eed81d1e456f3d5f9e12caa80e143409 0002-x86-mm-Disable-PV-linear-pagetables-by-default.patch
7716b76365b96ee7f80ea3c74ab450e184935babd708ff98b90c2d27d318beb4f9ba3534595a9fa06ec0bce561f62922d0cac0f0e9bb4801dcdfb6d3b7b5ea9b xsa241-4.8.patch
86aa763949ca36a36120a40eafbdf3a8e8bc04acd32ee6bc1e3ae90b189b86b9b166b81a9e0a4f86a7eb1fcc8723ae8ba6bd0f84fa9732e7e4e1ccea45d0b7c1 xsa242-4.9.patch
825f45f1b7ea75ed821c48c7fb989562fc884835919ec498369eca903c92ee83d389469c171beea84d637342221ae17782bb1741c1bfcaf17a597f4db2b90b47 xsa243-4.7-1.patch
8aaf0599259b1ff34171684467089da4a26af8fe67eedf22066955b34b2460c45abdf0f19a5a5e3dd3231b944674c62b9d3112ad7d765afc4bdbcdcfbad226e1 xsa243-2.patch
8ab78d48c6fce2bb34416bba0a72904a7baa64de912143647ff984eb3c428f7f9c98d3a4e8de0e101ebb643d3c0bffd6043f5ce4c2b4f943da102164551e23e6 xsa244-4.7.patch
082480ba79f4bf400d2b1a28a1a85e373a5681c02c0a470801d88b319cc5c21e739590fdf6468371edcc4745308128f0ce4f83ee4e94ba8e06bb7df211b80511 xsa246-4.7.patch
4fb72d13a22fc55660247182012e3541aeee39258b70dc9faf2f47351a15234c57d1626ec2336c3c463b80a22d6fc54e593e8e7c12b70d2ee69f3d1a2f83b7c9 0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
f59cbda14300a62f3dc21c41609d7f13c99e46565863ecd887e5dff561d533c58cb07aff6d6b342c68e64d4b6f28643f86d090dc2b28c7092d995525cf8542a3 0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
......
From: Jan Beulich <jbeulich@suse.com>
Subject: xen/disk: don't leak stack data via response ring
Rather than constructing a local structure instance on the stack, fill
the fields directly on the shared ring, just like other (Linux)
backends do. Build on the fact that all response structure flavors are
actually identical (the old code did make this assumption too).
This is XSA-216.
Reported-by: Anthony Perard <anthony.perard@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Acked-by: Anthony PERARD <anthony.perard@citrix.com>
--- a/hw/block/xen_blkif.h
+++ b/hw/block/xen_blkif.h
@@ -12,9 +12,6 @@
struct blkif_common_request {
char dummy;
};
-struct blkif_common_response {
- char dummy;
-};
/* i386 protocol version */
#pragma pack(push, 4)
@@ -26,13 +23,7 @@ struct blkif_x86_32_request {
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
-struct blkif_x86_32_response {
- uint64_t id; /* copied from request */
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};
typedef struct blkif_x86_32_request blkif_x86_32_request_t;
-typedef struct blkif_x86_32_response blkif_x86_32_response_t;
#pragma pack(pop)
/* x86_64 protocol version */
@@ -44,17 +35,14 @@ struct blkif_x86_64_request {
blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
};
-struct blkif_x86_64_response {
- uint64_t __attribute__((__aligned__(8))) id;
- uint8_t operation; /* copied from request */
- int16_t status; /* BLKIF_RSP_??? */
-};
typedef struct blkif_x86_64_request blkif_x86_64_request_t;
-typedef struct blkif_x86_64_response blkif_x86_64_response_t;
-DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response);
-DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response);
-DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response);
+DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
+ struct blkif_response);
+DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
+ struct blkif_response QEMU_PACKED);
+DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
+ struct blkif_response);
union blkif_back_rings {
blkif_back_ring_t native;
--- a/hw/block/xen_disk.c
+++ b/hw/block/xen_disk.c
@@ -614,31 +614,30 @@ static int blk_send_response_one(struct
struct XenBlkDev *blkdev = ioreq->blkdev;
int send_notify = 0;
int have_requests = 0;
- blkif_response_t resp;
- void *dst;
-