Commit de881742 authored by Daniel Néri's avatar Daniel Néri Committed by Leo

main/xen: fix XSA-364/CVE-2021-26933

On ARM, the cache may not be cleaned for newly allocated scrubbed
pages.
parent 3cb6ae6d
......@@ -2,7 +2,7 @@
# Maintainer: Natanael Copa <ncopa@alpinelinux.org>
pkgname=xen
pkgver=4.14.1
pkgrel=2
pkgrel=3
pkgdesc="Xen hypervisor"
url="https://www.xenproject.org/"
arch="x86_64 armhf aarch64" # enable armv7 when builds with gcc8
......@@ -237,6 +237,8 @@ options="!strip"
# - CVE-2020-29571 XSA-359
# 4.14.1-r2:
# - CVE-????-????? XSA-360
# 4.14.1-r3:
# - CVE-2021-26933 XSA-364
case "$CARCH" in
......@@ -296,6 +298,7 @@ source="https://downloads.xenproject.org/release/xen/$pkgver/xen-$pkgver.tar.gz
hotplug-Linux-iscsi-block-handle-lun-1.patch
xsa360-4.14.patch
xsa364.patch
qemu-xen-time64.patch
gcc10-etherboot-enum.patch
......@@ -543,6 +546,7 @@ f095ea373f36381491ad36f0662fb4f53665031973721256b23166e596318581da7cbb0146d0beb2
2094ea964fa610b2bf72fd2c7ede7e954899a75c0f5b08030cf1d74460fb759ade84866176e32f8fe29c921dfdc6dafd2b31e23ab9b0a3874d3dceeabdd1913b xenqemu-xattr-size-max.patch
8c9cfc6afca325df1d8026e21ed03fa8cd2c7e1a21a56cc1968301c5ab634bfe849951899e75d328951d7a41273d1e49a2448edbadec0029ed410c43c0549812 hotplug-Linux-iscsi-block-handle-lun-1.patch
f39ae56876f61ed224073985dda83037e75e6f2ab0cd0b0f920186812c6d1e6ec52494b3fd0f25cd9d6606d85061a6555cb952719d084cf8e256ef93080b75f9 xsa360-4.14.patch
aea8b37ae5c772c4928f8b644dadc59891e7d0e0d50461c66ca106b391fd984e2b8def089d01954f88675ddc93c114d4d27c0ccb7384ff9a1807f081f33805e5 xsa364.patch
231b5d0abf6420722534bf48b4f263bdf70dd258f5f34b344f230b4e166edb3ebaf769592f40653ea5836b4431ef951ebcf1995f09e2beb4a591edd3b024a652 qemu-xen-time64.patch
e72ae17cb80c78412996845b996e442cdc21ee4b840c8b7ebacca101619b3d47104bf6b6330520aecf0d7ccf2699826b4f2a649c729b21d5ac81b37f7fc505fc gcc10-etherboot-enum.patch
52c43beb2596d645934d0f909f2d21f7587b6898ed5e5e7046799a8ed6d58f7a09c5809e1634fa26152f3fd4f3e7cfa07da7076f01b4a20cc8f5df8b9cb77e50 xenstored.initd
......
From dadb5b4b21c904ce59024c686eb1c55be8f46c52 Mon Sep 17 00:00:00 2001
From: Julien Grall <jgrall@amazon.com>
Date: Thu, 21 Jan 2021 10:16:08 +0000
Subject: [PATCH] xen/page_alloc: Only flush the page to RAM once we know they
are scrubbed
At the moment, each page are flushed to RAM just after the allocator
found some free pages. However, this is happening before check if the
page was scrubbed.
As a consequence, on Arm, a guest may be able to access the old content
of the scrubbed pages if it has cache disabled (default at boot) and
the content didn't reach the Point of Coherency.
The flush is now moved after we know the content of the page will not
change. This also has the benefit to reduce the amount of work happening
with the heap_lock held.
This is XSA-364.
Fixes: 307c3be3ccb2 ("mm: Don't scrub pages while holding heap lock in alloc_heap_pages()")
Signed-off-by: Julien Grall <jgrall@amazon.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
---
xen/common/page_alloc.c | 14 +++++++++-----
1 file changed, 9 insertions(+), 5 deletions(-)
diff --git a/xen/common/page_alloc.c b/xen/common/page_alloc.c
index 02ac1fa613e7..1744e6faa5c4 100644
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -924,6 +924,7 @@ static struct page_info *alloc_heap_pages(
bool need_tlbflush = false;
uint32_t tlbflush_timestamp = 0;
unsigned int dirty_cnt = 0;
+ mfn_t mfn;
/* Make sure there are enough bits in memflags for nodeID. */
BUILD_BUG_ON((_MEMF_bits - _MEMF_node) < (8 * sizeof(nodeid_t)));
@@ -1022,11 +1023,6 @@ static struct page_info *alloc_heap_pages(
pg[i].u.inuse.type_info = 0;
page_set_owner(&pg[i], NULL);
- /* Ensure cache and RAM are consistent for platforms where the
- * guest can control its own visibility of/through the cache.
- */
- flush_page_to_ram(mfn_x(page_to_mfn(&pg[i])),
- !(memflags & MEMF_no_icache_flush));
}
spin_unlock(&heap_lock);
@@ -1062,6 +1058,14 @@ static struct page_info *alloc_heap_pages(
if ( need_tlbflush )
filtered_flush_tlb_mask(tlbflush_timestamp);
+ /*
+ * Ensure cache and RAM are consistent for platforms where the guest
+ * can control its own visibility of/through the cache.
+ */
+ mfn = page_to_mfn(pg);
+ for ( i = 0; i < (1U << order); i++ )
+ flush_page_to_ram(mfn_x(mfn) + i, !(memflags & MEMF_no_icache_flush));
+
return pg;
}
--
2.17.1
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment