Commit d04eadc3 authored by Ariadne Conill's avatar Ariadne Conill
Browse files

main/xen: add mitigations for XSA-378 through XSA-383

parent 0cb39112
......@@ -2,7 +2,7 @@
# Maintainer: Natanael Copa <ncopa@alpinelinux.org>
pkgname=xen
pkgver=4.13.3
pkgrel=1
pkgrel=2
pkgdesc="Xen hypervisor"
url="https://www.xenproject.org/"
arch="x86_64 armhf aarch64" # enable armv7 when builds with gcc8
......@@ -219,6 +219,14 @@ options="!strip"
# - CVE-2021-28692 XSA-373
# - CVE-2021-0089 XSA-375
# - CVE-2021-28690 XSA-377
# 4.13.3-r2:
# - CVE-2021-28694 XSA-378
# - CVE-2021-28695 XSA-378
# - CVE-2021-28696 XSA-378
# - CVE-2021-28697 XSA-379
# - CVE-2021-28698 XSA-380
# - CVE-2021-28699 XSA-382
# - CVE-2021-28700 XSA-383
case "$CARCH" in
x86*)
......@@ -279,6 +287,8 @@ source="https://downloads.xenproject.org/release/$pkgname/$pkgver/$pkgname-$pkgv
xenqemu-xattr-size-max.patch
stubdom-hack.patch
0001-xen-arm-Create-dom0less-domUs-earlier.patch
0002-xen-arm-Boot-modules-should-always-be-scrubbed-if-bo.patch
......@@ -292,6 +302,28 @@ source="https://downloads.xenproject.org/release/$pkgname/$pkgver/$pkgname-$pkgv
xsa377.patch
xen.git-e06d0c113e0067b86186db94aabae9c91aa09f35.patch
xsa378-4.13-0a.patch
xsa378-4.13-0b.patch
xsa378-4.13-0c.patch
xsa378-4.13-1.patch
xsa378-4.13-2.patch
xsa378-4.13-3.patch
xsa378-4.13-4.patch
xsa378-4.13-5.patch
xsa378-4.13-6.patch
xsa378-4.13-7.patch
xsa378-4.13-8.patch
xsa379-4.14.patch
xsa380-4.13-1.patch
xsa380-4.13-2.patch
xsa382.patch
xsa383.patch
hotplug-Linux-iscsi-block-handle-lun-1.patch
xenstored.initd
......@@ -364,7 +396,7 @@ prepare() {
update_config_sub || return 1
msg "Autoreconf..."
autoreconf || return 1
autoreconf --install
unset CFLAGS
unset LDFLAGS
......@@ -547,6 +579,7 @@ f095ea373f36381491ad36f0662fb4f53665031973721256b23166e596318581da7cbb0146d0beb2
79cb1b6b81b17cb87a064dfe3548949dfb80f64f203cac11ef327102b7a25794549ce2d9c019ebf05f752214da8e05065e9219d069e679c0ae5bee3d090c685e xen-hotplug-lockfd.patch
e76816c6ad0e91dc5f81947f266da3429b20e6d976c3e8c41202c6179532eec878a3f0913921ef3ac853c5dbad8082da3c9cd53b65081910516feb492577b7fc xen-fd-is-file.c
2094ea964fa610b2bf72fd2c7ede7e954899a75c0f5b08030cf1d74460fb759ade84866176e32f8fe29c921dfdc6dafd2b31e23ab9b0a3874d3dceeabdd1913b xenqemu-xattr-size-max.patch
6c28470dab368ce94d94db9e66954e4d915394ea730f6d4abb198ae122dbd7412453d6d8054f0a348d43d7f807fb13294363162f8b19f47311e802ffa9a40a90 stubdom-hack.patch
57bae240ac94fd35e8a2a39a06fdc4178a1cf0782832a77fd768ca3c773d8b27d76692703ac481733874e5a0198ef20d7319ea504c6b7836d4edd0a198adede1 0001-xen-arm-Create-dom0less-domUs-earlier.patch
2b47e612c23c8bb65a2432f93a877f592b75b8de2ae97d5a22ed37588594a38b740f5c3e0694dd7ceff5f949e24ff38113e543038d5ae22e8c1dc142c3e8d1b3 0002-xen-arm-Boot-modules-should-always-be-scrubbed-if-bo.patch
7010225962e7c22d6aa2e14d10e5091b3876a76f195e9725e7f175b108f933ea9ad5a080663d27279ccd20e2d4e344620ec414e17437d971a8f3cb9420520696 xsa373-4.13-1.patch
......@@ -556,6 +589,23 @@ bb04c86c57058b674237d6d81b8a5a600e39e6c2144ae72b7312ee7e72d4305c5fa4b8d5194a0aec
8fb77d16b60efa4307c0008c8773a9d5341f1b0577c6de46fe6e5630a7243c7b2eb55089a1ce778e4ed03ebf29fad69042746121b50cb953016e95a60549a728 xsa373-4.13-5.patch
9e354ab79cc182ca71c1d60be18b207c0254f35cf89f5020791d98a081bafc0a84ae7320ceb9c6215ccc4846e2daa258f72f577268bda84f5c7153e0bc03cabb xsa375-4.13.patch
9c104793facd9d595a1cbca21034d700e7e25398cad1440131258a349cd60d6145e5847e9c4bd066a5d63a63aceb8995456126a51b6d3ca872cd90717ebc2dbe xsa377.patch
6640e6c1373b1a90a5d0ff8a7f21bf98afa35cfb668b68ce53d83f4b6f4e912c7dec818025c753204301b3e2113fa69d17be0b785ed4cd458fe890c121d6ae2f xen.git-e06d0c113e0067b86186db94aabae9c91aa09f35.patch
b8feb4579e422c6ebe16e9f66bedf710151e745c84c75574f985f9a4ca655510955b05da195b584dff4af320e65093d75ac4e5a300989cd225037bf986cf0f73 xsa378-4.13-0a.patch
4f80e767621cd2b0d2c5d1b650e5d85753bfd97aba972398ca5991e6da093e2f7c1fa3eae8b8dec27826b2a90a4970b4eb3926cf76aa88d1d13a34041698a322 xsa378-4.13-0b.patch
d59ad3bf524261e58a7abb495e9723142e5c1ece0d2d0022167abec182dfc3cd77d3572ca29381c2f6eaa21ebfbde603daf4cbd2d6e33a3c5d6eaafa46353f5f xsa378-4.13-0c.patch
d143906731257c5cb551daf73dfe3b711d6f36eeaf2078c3ea95f438d750248e36fcd015a53e02a426ce850fbe933ee5844301dc405a2b2a36f341cbc7a1da75 xsa378-4.13-1.patch
8429fac01762219afa06b7b3cb5e53e9363f28f4e9cdabeed7523161a47188b8a86f8d80d1a9d78c5e5677bec6685ce436cc47d06b67e0cd4e816e726618dc0f xsa378-4.13-2.patch
69281c4f5d06cc085f06d107dd430b61506c7a3fb03025604b716388f1c944427736c2adabe10d6cdb7f40d53df1f0722ece2f4a7666f898bbcd362cdee25b79 xsa378-4.13-3.patch
c35b4397fd4999331262a2f2439f3d4ff80d3efbd479f192fc644cff31b5f1e968cfd785191158262db4cfcbaf7c862c5d26b78ec5e53b95cc3bad48d21d3a32 xsa378-4.13-4.patch
2ab2a8e934d0a40484350da17d7f65f0ae34b80574af48ebb23e66d386f51a8e8e66f3b9b1a2093bc8cf96f379b54c8a7cd874b4b3a5efcfcc0b43ecc3546a75 xsa378-4.13-5.patch
c27c9bead789347824b41450c78bd1287f04996cdc0342ae7b057b20d1ee98d10443a08fddaa8102dffad30900cebbd0927311298fbecfee2d6689908c49f7af xsa378-4.13-6.patch
9632bda9f463f0fb7a0470a01aca67c8b622e7820e9bd905d5cd15eb57d4ad075be1f6a4ccac4fe6de3a81591871a899a10b7535fafcc4245c9f5647ac905924 xsa378-4.13-7.patch
0b880a5cee5e50563252448ec5f2e4da0c70bd9d710b871d5319787ef37121521d66e32987cf24a52a2f78785f8b92367d929c3752ffff4ca06448a99d611925 xsa378-4.13-8.patch
3305caa9bde065962203b8c6b766d67c0fb19048116d5785a7734fad8a9cab87fde71b31192bdcc5e13277d3530c2789f52c88d1d7491638a6b8edcb5241839d xsa379-4.14.patch
e8284587f60e15cb34db381924e7833e634e9a9320fc206f6ecf411ee5c586a2f1efba0972dc4ba089b888402f9ff919c9b0493ddd8f996f226ba0d063d5efc0 xsa380-4.13-1.patch
277952033c67001883ed88fb8440c2b44c521e5b869bf6efa171f568ccfc13400ef341780bd6eba587ec484e455cbc57ba5e1186faa48691942a70790ee4f7cb xsa380-4.13-2.patch
6c5e3388fcfb0dcae30d5f315bf95d263c82519d2cbf2a8a88d280b5b0b1c1ed4cce7a1a85fabbf57c785ad9dc23e8e5e4773c631c00e036aada604ff8e7fa03 xsa382.patch
d5106df26e6c4512d88ea6748c403117a2b61cb40f6d6c08a76f160352b79f94dd67cbb3419a33f2c6cfc7bbd644baed0498e366a6bf00d8031df728a47f36ea xsa383.patch
8c9cfc6afca325df1d8026e21ed03fa8cd2c7e1a21a56cc1968301c5ab634bfe849951899e75d328951d7a41273d1e49a2448edbadec0029ed410c43c0549812 hotplug-Linux-iscsi-block-handle-lun-1.patch
52c43beb2596d645934d0f909f2d21f7587b6898ed5e5e7046799a8ed6d58f7a09c5809e1634fa26152f3fd4f3e7cfa07da7076f01b4a20cc8f5df8b9cb77e50 xenstored.initd
093f7fbd43faf0a16a226486a0776bade5dc1681d281c5946a3191c32d74f9699c6bf5d0ab8de9d1195a2461165d1660788e92a3156c9b3c7054d7b2d52d7ff0 xenstored.confd
......
--- xen-4.15.0.orig/stubdom/Makefile
+++ xen-4.15.0/stubdom/Makefile
@@ -186,7 +186,7 @@
rm $@ -rf || :
mv gmp-$(GMP_VERSION) $@
#patch -d $@ -p0 < gmp.patch
- cd $@; CPPFLAGS="-isystem $(CROSS_PREFIX)/$(GNU_TARGET_ARCH)-xen-elf/include $(TARGET_CPPFLAGS)" CFLAGS="$(TARGET_CFLAGS)" CC=$(CC) $(GMPEXT) ./configure --disable-shared --enable-static --disable-fft --without-readline --prefix=$(CROSS_PREFIX)/$(GNU_TARGET_ARCH)-xen-elf --libdir=$(CROSS_PREFIX)/$(GNU_TARGET_ARCH)-xen-elf/lib --build=`gcc -dumpmachine` --host=$(GNU_TARGET_ARCH)-xen-elf
+ cd $@; CPPFLAGS="-isystem $(CROSS_PREFIX)/$(GNU_TARGET_ARCH)-xen-elf/include $(TARGET_CPPFLAGS)" CFLAGS="$(TARGET_CFLAGS)" CC=$(CC) $(GMPEXT) ./configure --disable-shared --enable-static --disable-fft --without-readline --prefix=$(CROSS_PREFIX)/$(GNU_TARGET_ARCH)-xen-elf --libdir=$(CROSS_PREFIX)/$(GNU_TARGET_ARCH)-xen-elf/lib --host=$(GNU_TARGET_ARCH)-xen-elf
sed -i 's/#define HAVE_OBSTACK_VPRINTF 1/\/\/#define HAVE_OBSTACK_VPRINTF 1/' $@/config.h
touch $@
From e06d0c113e0067b86186db94aabae9c91aa09f35 Mon Sep 17 00:00:00 2001
From: Jan Beulich <jbeulich@suse.com>
Date: Thu, 15 Jul 2021 09:32:21 +0200
Subject: [PATCH] VT-d: adjust domid map updating when unmapping context
When an earlier error occurred, cleaning up the domid mapping data is
wrong, as references likely still exist. The only exception to this is
when the actual unmapping worked, but some flush failed (supposedly
impossible after XSA-373). The guest will get crashed in such a case
though, so add fallback cleanup to domain destruction to cover this
case. This in turn makes it desirable to silence the dprintk() in
domain_iommu_domid().
Note that no error will be returned anymore when the lookup fails - in
the common case lookup failure would already have caused
domain_context_unmap_one() to fail, yet even from a more general
perspective it doesn't look right to fail domain_context_unmap() in such
a case when this was the last device, but not when any earlier unmap was
otherwise successful.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Kevin Tian <kevin.tian@intel.com>
master commit: 32655880057ce2829f962d46916ea6cec60f98d3
master date: 2021-06-24 16:29:13 +0200
---
xen/drivers/passthrough/vtd/iommu.c | 39 ++++++++++++++++++-----------
1 file changed, 24 insertions(+), 15 deletions(-)
diff --git a/xen/drivers/passthrough/vtd/iommu.c b/xen/drivers/passthrough/vtd/iommu.c
index 7d1813a615..e4c0e4368e 100644
--- a/xen/drivers/passthrough/vtd/iommu.c
+++ b/xen/drivers/passthrough/vtd/iommu.c
@@ -79,9 +79,11 @@ static int domain_iommu_domid(struct domain *d,
i = find_next_bit(iommu->domid_bitmap, nr_dom, i+1);
}
- dprintk(XENLOG_ERR VTDPREFIX,
- "Cannot get valid iommu domid: domid=%d iommu->index=%d\n",
- d->domain_id, iommu->index);
+ if ( !d->is_dying )
+ dprintk(XENLOG_ERR VTDPREFIX,
+ "Cannot get valid iommu %u domid: %pd\n",
+ iommu->index, d);
+
return -1;
}
@@ -146,6 +148,17 @@ static int context_get_domain_id(struct context_entry *context,
return domid;
}
+static void cleanup_domid_map(struct domain *domain, struct vtd_iommu *iommu)
+{
+ int iommu_domid = domain_iommu_domid(domain, iommu);
+
+ if ( iommu_domid >= 0 )
+ {
+ clear_bit(iommu_domid, iommu->domid_bitmap);
+ iommu->domid_map[iommu_domid] = 0;
+ }
+}
+
static int iommus_incoherent;
static void sync_cache(const void *addr, unsigned int size)
@@ -1753,6 +1766,9 @@ static int domain_context_unmap(struct domain *domain, u8 devfn,
goto out;
}
+ if ( ret )
+ goto out;
+
/*
* if no other devices under the same iommu owned by this domain,
* clear iommu in iommu_bitmap and clear domain_id in domid_bitmp
@@ -1772,19 +1788,8 @@ static int domain_context_unmap(struct domain *domain, u8 devfn,
if ( found == 0 )
{
- int iommu_domid;
-
clear_bit(iommu->index, &dom_iommu(domain)->arch.iommu_bitmap);
-
- iommu_domid = domain_iommu_domid(domain, iommu);
- if ( iommu_domid == -1 )
- {
- ret = -EINVAL;
- goto out;
- }
-
- clear_bit(iommu_domid, iommu->domid_bitmap);
- iommu->domid_map[iommu_domid] = 0;
+ cleanup_domid_map(domain, iommu);
}
out:
@@ -1795,6 +1800,7 @@ static void iommu_domain_teardown(struct domain *d)
{
struct domain_iommu *hd = dom_iommu(d);
struct mapped_rmrr *mrmrr, *tmp;
+ const struct acpi_drhd_unit *drhd;
if ( list_empty(&acpi_drhd_units) )
return;
@@ -1814,6 +1820,9 @@ static void iommu_domain_teardown(struct domain *d)
iommu_free_pagetable(hd->arch.pgd_maddr, agaw_to_level(hd->arch.agaw));
hd->arch.pgd_maddr = 0;
spin_unlock(&hd->arch.mapping_lock);
+
+ for_each_drhd_unit ( drhd )
+ cleanup_domid_map(d, drhd->iommu);
}
static int __must_check intel_iommu_map_page(struct domain *d, dfn_t dfn,
--
2.30.2
From: Jan Beulich <jbeulich@suse.com>
Subject: x86/p2m: fix PoD accounting in guest_physmap_add_entry()
The initial observation was that the mfn_valid() check comes too late:
Neither mfn_add() nor mfn_to_page() (let alone de-referencing the
result of the latter) are valid for MFNs failing this check. Move it up
and - noticing that there's no caller doing so - also add an assertion
that this should never produce "false" here.
In turn this would have meant that the "else" to that if() could now go
away, which didn't seem right at all. And indeed, considering callers
like memory_exchange() or various grant table functions, the PoD
accounting should have been outside of that if() from the very
beginning.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
master commit: aea270e3f7c0db696c88a0e94b1ece7abd339c84
master date: 2020-02-21 17:14:38 +0100
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -881,6 +881,12 @@ guest_physmap_add_entry(struct domain *d
if ( p2m_is_foreign(t) )
return -EINVAL;
+ if ( !mfn_valid(mfn) )
+ {
+ ASSERT_UNREACHABLE();
+ return -EINVAL;
+ }
+
p2m_lock(p2m);
P2M_DEBUG("adding gfn=%#lx mfn=%#lx\n", gfn_x(gfn), mfn_x(mfn));
@@ -981,12 +987,13 @@ guest_physmap_add_entry(struct domain *d
}
/* Now, actually do the two-way mapping */
- if ( mfn_valid(mfn) )
+ rc = p2m_set_entry(p2m, gfn, mfn, page_order, t, p2m->default_access);
+ if ( rc == 0 )
{
- rc = p2m_set_entry(p2m, gfn, mfn, page_order, t,
- p2m->default_access);
- if ( rc )
- goto out; /* Failed to update p2m, bail without updating m2p. */
+ pod_lock(p2m);
+ p2m->pod.entry_count -= pod_count;
+ BUG_ON(p2m->pod.entry_count < 0);
+ pod_unlock(p2m);
if ( !p2m_is_grant(t) )
{
@@ -995,22 +1002,7 @@ guest_physmap_add_entry(struct domain *d
gfn_x(gfn_add(gfn, i)));
}
}
- else
- {
- gdprintk(XENLOG_WARNING, "Adding bad mfn to p2m map (%#lx -> %#lx)\n",
- gfn_x(gfn), mfn_x(mfn));
- rc = p2m_set_entry(p2m, gfn, INVALID_MFN, page_order,
- p2m_invalid, p2m->default_access);
- if ( rc == 0 )
- {
- pod_lock(p2m);
- p2m->pod.entry_count -= pod_count;
- BUG_ON(p2m->pod.entry_count < 0);
- pod_unlock(p2m);
- }
- }
-out:
p2m_unlock(p2m);
return rc;
From: Jan Beulich <jbeulich@suse.com>
Subject: x86/p2m: don't ignore p2m_remove_page()'s return value
It's not very nice to return from guest_physmap_add_entry() after
perhaps already having made some changes to the P2M, but this is pre-
existing practice in the function, and imo better than ignoring errors.
Take the liberty and replace an mfn_add() instance with a local variable
already holding the result (as proven by the check immediately ahead).
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
master commit: a6b051a87a586347969bfbaa6925ac0f0c845413
master date: 2020-04-03 10:56:10 +0200
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -773,8 +773,7 @@ void p2m_final_teardown(struct domain *d
p2m_teardown_hostp2m(d);
}
-
-static int
+static int __must_check
p2m_remove_page(struct p2m_domain *p2m, unsigned long gfn_l, unsigned long mfn,
unsigned int page_order)
{
@@ -979,9 +978,9 @@ guest_physmap_add_entry(struct domain *d
ASSERT(mfn_valid(omfn));
P2M_DEBUG("old gfn=%#lx -> mfn %#lx\n",
gfn_x(ogfn) , mfn_x(omfn));
- if ( mfn_eq(omfn, mfn_add(mfn, i)) )
- p2m_remove_page(p2m, gfn_x(ogfn), mfn_x(mfn_add(mfn, i)),
- 0);
+ if ( mfn_eq(omfn, mfn_add(mfn, i)) &&
+ (rc = p2m_remove_page(p2m, gfn_x(ogfn), mfn_x(omfn), 0)) )
+ goto out;
}
}
}
@@ -1003,6 +1002,7 @@ guest_physmap_add_entry(struct domain *d
}
}
+ out:
p2m_unlock(p2m);
return rc;
@@ -2690,9 +2690,9 @@ int p2m_change_altp2m_gfn(struct domain
if ( gfn_eq(new_gfn, INVALID_GFN) )
{
mfn = ap2m->get_entry(ap2m, old_gfn, &t, &a, 0, NULL, NULL);
- if ( mfn_valid(mfn) )
- p2m_remove_page(ap2m, gfn_x(old_gfn), mfn_x(mfn), PAGE_ORDER_4K);
- rc = 0;
+ rc = mfn_valid(mfn)
+ ? p2m_remove_page(ap2m, gfn_x(old_gfn), mfn_x(mfn), PAGE_ORDER_4K)
+ : 0;
goto out;
}
From: Jan Beulich <jbeulich@suse.com>
Subject: x86/p2m: don't assert that the passed in MFN matches for a remove
guest_physmap_remove_page() gets handed an MFN from the outside, yet
takes the necessary lock to prevent further changes to the GFN <-> MFN
mapping itself. While some callers, in particular guest_remove_page()
(by way of having called get_gfn_query()), hold the GFN lock already,
various others (most notably perhaps the 2nd instance in
xenmem_add_to_physmap_one()) don't. While it also is an option to fix
all the callers, deal with the issue in p2m_remove_page() instead:
Replace the ASSERT() by a conditional and split the loop into two, such
that all checking gets done before any modification would occur.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
master commit: c65ea16dbcafbe4fe21693b18f8c2a3c5d14600e
master date: 2020-04-03 10:56:55 +0200
--- a/xen/arch/x86/mm/p2m.c
+++ b/xen/arch/x86/mm/p2m.c
@@ -779,7 +779,6 @@ p2m_remove_page(struct p2m_domain *p2m,
{
unsigned long i;
gfn_t gfn = _gfn(gfn_l);
- mfn_t mfn_return;
p2m_type_t t;
p2m_access_t a;
@@ -790,15 +789,26 @@ p2m_remove_page(struct p2m_domain *p2m,
ASSERT(gfn_locked_by_me(p2m, gfn));
P2M_DEBUG("removing gfn=%#lx mfn=%#lx\n", gfn_l, mfn);
+ for ( i = 0; i < (1UL << page_order); )
+ {
+ unsigned int cur_order;
+ mfn_t mfn_return = p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0,
+ &cur_order, NULL);
+
+ if ( p2m_is_valid(t) &&
+ (!mfn_valid(_mfn(mfn)) || mfn + i != mfn_x(mfn_return)) )
+ return -EILSEQ;
+
+ i += (1UL << cur_order) - ((gfn_l + i) & ((1UL << cur_order) - 1));
+ }
+
if ( mfn_valid(_mfn(mfn)) )
{
for ( i = 0; i < (1UL << page_order); i++ )
{
- mfn_return = p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0,
- NULL, NULL);
+ p2m->get_entry(p2m, gfn_add(gfn, i), &t, &a, 0, NULL, NULL);
if ( !p2m_is_grant(t) && !p2m_is_shared(t) && !p2m_is_foreign(t) )
set_gpfn_from_mfn(mfn+i, INVALID_M2P_ENTRY);
- ASSERT( !p2m_is_valid(t) || mfn + i == mfn_x(mfn_return) );
}
}
return p2m_set_entry(p2m, gfn, INVALID_MFN, page_order, p2m_invalid,
From: Jan Beulich <jbeulich@suse.com>
Subject: AMD/IOMMU: correct global exclusion range extending
Besides unity mapping regions, the AMD IOMMU spec also provides for
exclusion ranges (areas of memory not to be subject to DMA translation)
to be specified by firmware in the ACPI tables. The spec does not put
any constraints on the number of such regions.
Blindly assuming all addresses between any two such ranges should also
be excluded can't be right. Since hardware has room for just a single
such range (comprised of the Exclusion Base Register and the Exclusion
Range Limit Register), combine only adjacent or overlapping regions (for
now; this may require further adjustment in case table entries aren't
sorted by address) with matching exclusion_allow_all settings. This
requires bubbling up error indicators, such that IOMMU init can be
failed when concatenation wasn't possible.
Furthermore, since the exclusion range specified in IOMMU registers
implies R/W access, reject requests asking for less permissions (this
will be brought closer to the spec by a subsequent change).
This is part of XSA-378 / CVE-2021-28695.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Paul Durrant <paul@xen.org>
--- a/xen/drivers/passthrough/amd/iommu_acpi.c
+++ b/xen/drivers/passthrough/amd/iommu_acpi.c
@@ -117,12 +117,21 @@ static struct amd_iommu * __init find_io
return NULL;
}
-static void __init reserve_iommu_exclusion_range(
- struct amd_iommu *iommu, uint64_t base, uint64_t limit)
+static int __init reserve_iommu_exclusion_range(
+ struct amd_iommu *iommu, uint64_t base, uint64_t limit,
+ bool all, bool iw, bool ir)
{
+ if ( !ir || !iw )
+ return -EPERM;
+
/* need to extend exclusion range? */
if ( iommu->exclusion_enable )
{
+ if ( iommu->exclusion_limit + PAGE_SIZE < base ||
+ limit + PAGE_SIZE < iommu->exclusion_base ||
+ iommu->exclusion_allow_all != all )
+ return -EBUSY;
+
if ( iommu->exclusion_base < base )
base = iommu->exclusion_base;
if ( iommu->exclusion_limit > limit )
@@ -130,16 +139,11 @@ static void __init reserve_iommu_exclusi
}
iommu->exclusion_enable = IOMMU_CONTROL_ENABLED;
+ iommu->exclusion_allow_all = all;
iommu->exclusion_base = base;
iommu->exclusion_limit = limit;
-}
-static void __init reserve_iommu_exclusion_range_all(
- struct amd_iommu *iommu,
- unsigned long base, unsigned long limit)
-{
- reserve_iommu_exclusion_range(iommu, base, limit);
- iommu->exclusion_allow_all = IOMMU_CONTROL_ENABLED;
+ return 0;
}
static void __init reserve_unity_map_for_device(
@@ -177,6 +181,7 @@ static int __init register_exclusion_ran
unsigned long range_top, iommu_top, length;
struct amd_iommu *iommu;
unsigned int bdf;
+ int rc = 0;
/* is part of exclusion range inside of IOMMU virtual address space? */
/* note: 'limit' parameter is assumed to be page-aligned */
@@ -198,10 +203,15 @@ static int __init register_exclusion_ran
if ( limit >= iommu_top )
{
for_each_amd_iommu( iommu )
- reserve_iommu_exclusion_range_all(iommu, base, limit);
+ {
+ rc = reserve_iommu_exclusion_range(iommu, base, limit,
+ true /* all */, iw, ir);
+ if ( rc )
+ break;
+ }
}
- return 0;
+ return rc;
}
static int __init register_exclusion_range_for_device(
@@ -212,6 +222,7 @@ static int __init register_exclusion_ran
unsigned long range_top, iommu_top, length;
struct amd_iommu *iommu;
u16 req;
+ int rc = 0;
iommu = find_iommu_for_device(seg, bdf);
if ( !iommu )
@@ -241,12 +252,13 @@ static int __init register_exclusion_ran
/* register IOMMU exclusion range settings for device */
if ( limit >= iommu_top )
{
- reserve_iommu_exclusion_range(iommu, base, limit);
+ rc = reserve_iommu_exclusion_range(iommu, base, limit,
+ false /* all */, iw, ir);
ivrs_mappings[bdf].dte_allow_exclusion = true;
ivrs_mappings[req].dte_allow_exclusion = true;
}
- return 0;
+ return rc;
}
static int __init register_exclusion_range_for_iommu_devices(
@@ -256,6 +268,7 @@ static int __init register_exclusion_ran
unsigned long range_top, iommu_top, length;
unsigned int bdf;
u16 req;
+ int rc = 0;
/* is part of exclusion range inside of IOMMU virtual address space? */
/* note: 'limit' parameter is assumed to be page-aligned */
@@ -286,8 +299,10 @@ static int __init register_exclusion_ran
/* register IOMMU exclusion range settings */
if ( limit >= iommu_top )
- reserve_iommu_exclusion_range_all(iommu, base, limit);
- return 0;
+ rc = reserve_iommu_exclusion_range(iommu, base, limit,
+ true /* all */, iw, ir);
+
+ return rc;
}
static int __init parse_ivmd_device_select(
From: Jan Beulich <jbeulich@suse.com>
Subject: AMD/IOMMU: correct device unity map handling
Blindly assuming all addresses between any two such ranges, specified by
firmware in the ACPI tables, should also be unity-mapped can't be right.
Nor can it be correct to merge ranges with differing permissions. Track
ranges individually; don't merge at all, but check for overlaps instead.
This requires bubbling up error indicators, such that IOMMU init can be
failed when allocation of a new tracking struct wasn't possible, or an
overlap was detected.
At this occasion also stop ignoring
amd_iommu_reserve_domain_unity_map()'s return value.
This is part of XSA-378 / CVE-2021-28695.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
Reviewed-by: Paul Durrant <paul@xen.org>
--- a/xen/drivers/passthrough/amd/iommu_acpi.c
+++ b/xen/drivers/passthrough/amd/iommu_acpi.c
@@ -146,32 +146,48 @@ static int __init reserve_iommu_exclusio
return 0;
}
-static void __init reserve_unity_map_for_device(
- u16 seg, u16 bdf, unsigned long base,
- unsigned long length, u8 iw, u8 ir)
+static int __init reserve_unity_map_for_device(
+ uint16_t seg, uint16_t bdf, unsigned long base,
+ unsigned long length, bool iw, bool ir)
{
struct ivrs_mappings *ivrs_mappings = get_ivrs_mappings(seg);
- unsigned long old_top, new_top;
+ struct ivrs_unity_map *unity_map = ivrs_mappings[bdf].unity_map;
- /* need to extend unity-mapped range? */
- if ( ivrs_mappings[bdf].unity_map_enable )
+ /* Check for overlaps. */
+ for ( ; unity_map; unity_map = unity_map->next )
{
- old_top = ivrs_mappings[bdf].addr_range_start +
- ivrs_mappings[bdf].addr_range_length;
- new_top = base + length;
- if ( old_top > new_top )
- new_top = old_top;
- if ( ivrs_mappings[bdf].addr_range_start < base )
- base = ivrs_mappings[bdf].addr_range_start;
- length = new_top - base;
- }
-
- /* extend r/w permissioms and keep aggregate */
- ivrs_mappings[bdf].write_permission = iw;
- ivrs_mappings[bdf].read_permission = ir;
- ivrs_mappings[bdf].unity_map_enable = true;
- ivrs_mappings[bdf].addr_range_start = base;
- ivrs_mappings[bdf].addr_range_length = length;
+ /*
+ * Exact matches are okay. This can in particular happen when
+ * register_exclusion_range_for_device() calls here twice for the