Commit 031a7eaf authored by Henrik Riomar's avatar Henrik Riomar

main/xen: security upgrade to 4.13.1

parent 9303d97a
Pipeline #20110 passed with stages
in 8 minutes and 51 seconds
# Contributor: Roger Pau Monne <roger.pau@entel.upc.edu>
# Maintainer: Natanael Copa <ncopa@alpinelinux.org>
pkgname=xen
pkgver=4.13.0
pkgrel=4
pkgver=4.13.1
pkgrel=0
pkgdesc="Xen hypervisor"
url="https://www.xenproject.org/"
arch="x86_64 armhf aarch64" # enable armv7 when builds with gcc8
......@@ -166,6 +166,8 @@ options="!strip"
# - CVE-2020-11739 XSA-314
# - CVE-2020-11743 XSA-316
# - CVE-2020-11742 XSA-318
# 4.13.1-r0:
# - CVE-????-????? XSA-312
case "$CARCH" in
x86*)
......@@ -229,12 +231,6 @@ source="https://downloads.xenproject.org/release/xen/$pkgver/xen-$pkgver.tar.gz
drop-test.py.patch
py3-compat.patch
xsa313-1.patch
xsa313-2.patch
xsa314-4.13.patch
xsa316-xen.patch
xsa318.patch
xenstored.initd
xenstored.confd
xenconsoled.initd
......@@ -458,7 +454,7 @@ EOF
}
sha512sums="5b2ded9a2fe3f7ddf40eed1fa9858baead06233a01eb6099cc45b3c78b6c3823acfe7b731910733e87125dfa49d08c53f74c215fb1b320a92b44b87a0a105225 xen-4.13.0.tar.gz
sha512sums="b56d20704155d98d803496cba83eb928e0f986a750831cd5600fc88d0ae772fe1456571654375054043d2da8daca255cc98385ebf08b1b1a75ecf7f4b7a0ee90 xen-4.13.1.tar.gz
2e0b0fd23e6f10742a5517981e5171c6e88b0a93c83da701b296f5c0861d72c19782daab589a7eac3f9032152a0fc7eff7f5362db8fccc4859564a9aa82329cf gmp-4.3.2.tar.bz2
c2bc9ffc8583aeae71cee9ddcc4418969768d4e3764d47307da54f93981c0109fb07d84b061b3a3628bd00ba4d14a54742bc04848110eb3ae8ca25dbfbaabadb grub-0.97.tar.gz
1465b58279af1647f909450e394fe002ca165f0ff4a0254bfa9fe0e64316f50facdde2729d79a4e632565b4500cf4d6c74192ac0dd3bc9fe09129bbd67ba089d lwip-1.3.0.tar.gz
......@@ -480,11 +476,6 @@ e76816c6ad0e91dc5f81947f266da3429b20e6d976c3e8c41202c6179532eec878a3f0913921ef3a
8c9cfc6afca325df1d8026e21ed03fa8cd2c7e1a21a56cc1968301c5ab634bfe849951899e75d328951d7a41273d1e49a2448edbadec0029ed410c43c0549812 hotplug-Linux-iscsi-block-handle-lun-1.patch
61f66bab603778fb41bfe8e85320c15f2bf3e5d8583e077b56a93784dbdb9b2c7c5e55ce18f06b87501429086f8410d102d3ed5f2a77d54bcfa328bc07681f4d drop-test.py.patch
8cb12dbfc05a53898a97d47d71ab6b8a6f81c5e5579fd765b37303faea95c645cb8dedc05e3d064bdf070e93814e00bf8939767acc1127513375bab0fe2f4436 py3-compat.patch
a5443da59c75a786ecd0c5ad5df4c84de8b0f7ac92bc11d840d1fb4c2c33653f7e883640c2081ba594fb1ca92a61f5c970b821a5f2d37c6e666bc2e7da6c8e8f xsa313-1.patch
afc34c39e14b3b3d7bcd5b9bb7d2e6eaeb52fdc8733845cafd0b200c764ebd5a79f540cd818143f99bf084d1a33e50ad1614e5e98af6582412975bd73a5c48dd xsa313-2.patch
6e319c3856ed4a4d96705a258c2654c89a7d645d8b16c03dd257c57d320ee220ffa675eeef615c5bbcf4d5d25b66ceb8b77f57df59da757a3a554a316db074b6 xsa314-4.13.patch
cd6ac97375742bacd55f51062849ba5dcef6026f673d3fb6ab73723befbf52570ea08765af44d636df65b7c16a9dce2fe6c9b6c47b671872ffb83c8121a181df xsa316-xen.patch
66e178a859844a3839333b19934ede5db1d83d8b84bfcce70c51a46077287811a92a8ad2ad60663a88162112d65a867815605202a2c9ca44ba32251b42f0ca23 xsa318.patch
52c43beb2596d645934d0f909f2d21f7587b6898ed5e5e7046799a8ed6d58f7a09c5809e1634fa26152f3fd4f3e7cfa07da7076f01b4a20cc8f5df8b9cb77e50 xenstored.initd
093f7fbd43faf0a16a226486a0776bade5dc1681d281c5946a3191c32d74f9699c6bf5d0ab8de9d1195a2461165d1660788e92a3156c9b3c7054d7b2d52d7ff0 xenstored.confd
3c86ed48fbee0af4051c65c4a3893f131fa66e47bf083caf20c9b6aa4b63fdead8832f84a58d0e27964bc49ec8397251b34e5be5c212c139f556916dc8da9523 xenconsoled.initd
......
From: Jan Beulich <jbeulich@suse.com>
Subject: xenoprof: clear buffer intended to be shared with guests
alloc_xenheap_pages() making use of MEMF_no_scrub is fine for Xen
internally used allocations, but buffers allocated to be shared with
(unpriviliged) guests need to be zapped of their prior content.
This is part of XSA-313.
Reported-by: Ilja Van Sprundel <ivansprundel@ioactive.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wl@xen.org>
--- a/xen/common/xenoprof.c
+++ b/xen/common/xenoprof.c
@@ -253,6 +253,9 @@ static int alloc_xenoprof_struct(
return -ENOMEM;
}
+ for ( i = 0; i < npages; ++i )
+ clear_page(d->xenoprof->rawbuf + i * PAGE_SIZE);
+
d->xenoprof->npages = npages;
d->xenoprof->nbuf = nvcpu;
d->xenoprof->bufsize = bufsize;
From: Jan Beulich <jbeulich@suse.com>
Subject: xenoprof: limit consumption of shared buffer data
Since a shared buffer can be written to by the guest, we may only read
the head and tail pointers from there (all other fields should only ever
be written to). Furthermore, for any particular operation the two values
must be read exactly once, with both checks and consumption happening
with the thus read values. (The backtrace related xenoprof_buf_space()
use in xenoprof_log_event() is an exception: The values used there get
re-checked by every subsequent xenoprof_add_sample().)
Since that code needed touching, also fix the double increment of the
lost samples count in case the backtrace related xenoprof_add_sample()
invocation in xenoprof_log_event() fails.
Where code is being touched anyway, add const as appropriate, but take
the opportunity to entirely drop the now unused domain parameter of
xenoprof_buf_space().
This is part of XSA-313.
Reported-by: Ilja Van Sprundel <ivansprundel@ioactive.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: George Dunlap <george.dunlap@citrix.com>
Reviewed-by: Wei Liu <wl@xen.org>
--- a/xen/common/xenoprof.c
+++ b/xen/common/xenoprof.c
@@ -479,25 +479,22 @@ static int add_passive_list(XEN_GUEST_HA
/* Get space in the buffer */
-static int xenoprof_buf_space(struct domain *d, xenoprof_buf_t * buf, int size)
+static int xenoprof_buf_space(int head, int tail, int size)
{
- int head, tail;
-
- head = xenoprof_buf(d, buf, event_head);
- tail = xenoprof_buf(d, buf, event_tail);
-
return ((tail > head) ? 0 : size) + tail - head - 1;
}
/* Check for space and add a sample. Return 1 if successful, 0 otherwise. */
-static int xenoprof_add_sample(struct domain *d, xenoprof_buf_t *buf,
+static int xenoprof_add_sample(const struct domain *d,
+ const struct xenoprof_vcpu *v,
uint64_t eip, int mode, int event)
{
+ xenoprof_buf_t *buf = v->buffer;
int head, tail, size;
head = xenoprof_buf(d, buf, event_head);
tail = xenoprof_buf(d, buf, event_tail);
- size = xenoprof_buf(d, buf, event_size);
+ size = v->event_size;
/* make sure indexes in shared buffer are sane */
if ( (head < 0) || (head >= size) || (tail < 0) || (tail >= size) )
@@ -506,7 +503,7 @@ static int xenoprof_add_sample(struct do
return 0;
}
- if ( xenoprof_buf_space(d, buf, size) > 0 )
+ if ( xenoprof_buf_space(head, tail, size) > 0 )
{
xenoprof_buf(d, buf, event_log[head].eip) = eip;
xenoprof_buf(d, buf, event_log[head].mode) = mode;
@@ -530,7 +527,6 @@ static int xenoprof_add_sample(struct do
int xenoprof_add_trace(struct vcpu *vcpu, uint64_t pc, int mode)
{
struct domain *d = vcpu->domain;
- xenoprof_buf_t *buf = d->xenoprof->vcpu[vcpu->vcpu_id].buffer;
/* Do not accidentally write an escape code due to a broken frame. */
if ( pc == XENOPROF_ESCAPE_CODE )
@@ -539,7 +535,8 @@ int xenoprof_add_trace(struct vcpu *vcpu
return 0;
}
- return xenoprof_add_sample(d, buf, pc, mode, 0);
+ return xenoprof_add_sample(d, &d->xenoprof->vcpu[vcpu->vcpu_id],
+ pc, mode, 0);
}
void xenoprof_log_event(struct vcpu *vcpu, const struct cpu_user_regs *regs,
@@ -570,17 +567,22 @@ void xenoprof_log_event(struct vcpu *vcp
/* Provide backtrace if requested. */
if ( backtrace_depth > 0 )
{
- if ( (xenoprof_buf_space(d, buf, v->event_size) < 2) ||
- !xenoprof_add_sample(d, buf, XENOPROF_ESCAPE_CODE, mode,
- XENOPROF_TRACE_BEGIN) )
+ if ( xenoprof_buf_space(xenoprof_buf(d, buf, event_head),
+ xenoprof_buf(d, buf, event_tail),
+ v->event_size) < 2 )
{
xenoprof_buf(d, buf, lost_samples)++;
lost_samples++;
return;
}
+
+ /* xenoprof_add_sample() will increment lost_samples on failure */
+ if ( !xenoprof_add_sample(d, v, XENOPROF_ESCAPE_CODE, mode,
+ XENOPROF_TRACE_BEGIN) )
+ return;
}
- if ( xenoprof_add_sample(d, buf, pc, mode, event) )
+ if ( xenoprof_add_sample(d, v, pc, mode, event) )
{
if ( is_active(vcpu->domain) )
active_samples++;
--- a/xen/include/xen/xenoprof.h
+++ b/xen/include/xen/xenoprof.h
@@ -61,12 +61,12 @@ struct xenoprof {
#ifndef CONFIG_COMPAT
#define XENOPROF_COMPAT(x) 0
-#define xenoprof_buf(d, b, field) ((b)->field)
+#define xenoprof_buf(d, b, field) ACCESS_ONCE((b)->field)
#else
#define XENOPROF_COMPAT(x) ((x)->is_compat)
-#define xenoprof_buf(d, b, field) (*(!(d)->xenoprof->is_compat ? \
- &(b)->native.field : \
- &(b)->compat.field))
+#define xenoprof_buf(d, b, field) ACCESS_ONCE(*(!(d)->xenoprof->is_compat \
+ ? &(b)->native.field \
+ : &(b)->compat.field))
#endif
struct domain;
From ab49f005f7d01d4004d76f2e295d31aca7d4f93a Mon Sep 17 00:00:00 2001
From: Julien Grall <jgrall@amazon.com>
Date: Thu, 20 Feb 2020 20:54:40 +0000
Subject: [PATCH] xen/rwlock: Add missing memory barrier in the unlock path of
rwlock
The rwlock unlock paths are using atomic_sub() to release the lock.
However the implementation of atomic_sub() rightfully doesn't contain a
memory barrier. On Arm, this means a processor is allowed to re-order
the memory access with the preceeding access.
In other words, the unlock may be seen by another processor before all
the memory accesses within the "critical" section.
The rwlock paths already contains barrier indirectly, but they are not
very useful without the counterpart in the unlock paths.
The memory barriers are not necessary on x86 because loads/stores are
not re-ordered with lock instructions.
So add arch_lock_release_barrier() in the unlock paths that will only
add memory barrier on Arm.
Take the opportunity to document each lock paths explaining why a
barrier is not necessary.
This is XSA-314.
Signed-off-by: Julien Grall <jgrall@amazon.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
---
xen/include/xen/rwlock.h | 29 ++++++++++++++++++++++++++++-
1 file changed, 28 insertions(+), 1 deletion(-)
diff --git a/xen/include/xen/rwlock.h b/xen/include/xen/rwlock.h
index 3dfea1ac2a..516486306f 100644
--- a/xen/include/xen/rwlock.h
+++ b/xen/include/xen/rwlock.h
@@ -48,6 +48,10 @@ static inline int _read_trylock(rwlock_t *lock)
if ( likely(!(cnts & _QW_WMASK)) )
{
cnts = (u32)atomic_add_return(_QR_BIAS, &lock->cnts);
+ /*
+ * atomic_add_return() is a full barrier so no need for an
+ * arch_lock_acquire_barrier().
+ */
if ( likely(!(cnts & _QW_WMASK)) )
return 1;
atomic_sub(_QR_BIAS, &lock->cnts);
@@ -64,11 +68,19 @@ static inline void _read_lock(rwlock_t *lock)
u32 cnts;
cnts = atomic_add_return(_QR_BIAS, &lock->cnts);
+ /*
+ * atomic_add_return() is a full barrier so no need for an
+ * arch_lock_acquire_barrier().
+ */
if ( likely(!(cnts & _QW_WMASK)) )
return;
/* The slowpath will decrement the reader count, if necessary. */
queue_read_lock_slowpath(lock);
+ /*
+ * queue_read_lock_slowpath() is using spinlock and therefore is a
+ * full barrier. So no need for an arch_lock_acquire_barrier().
+ */
}
static inline void _read_lock_irq(rwlock_t *lock)
@@ -92,6 +104,7 @@ static inline unsigned long _read_lock_irqsave(rwlock_t *lock)
*/
static inline void _read_unlock(rwlock_t *lock)
{
+ arch_lock_release_barrier();
/*
* Atomically decrement the reader count
*/
@@ -121,11 +134,20 @@ static inline int _rw_is_locked(rwlock_t *lock)
*/
static inline void _write_lock(rwlock_t *lock)
{
- /* Optimize for the unfair lock case where the fair flag is 0. */
+ /*
+ * Optimize for the unfair lock case where the fair flag is 0.
+ *
+ * atomic_cmpxchg() is a full barrier so no need for an
+ * arch_lock_acquire_barrier().
+ */
if ( atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0 )
return;
queue_write_lock_slowpath(lock);
+ /*
+ * queue_write_lock_slowpath() is using spinlock and therefore is a
+ * full barrier. So no need for an arch_lock_acquire_barrier().
+ */
}
static inline void _write_lock_irq(rwlock_t *lock)
@@ -157,11 +179,16 @@ static inline int _write_trylock(rwlock_t *lock)
if ( unlikely(cnts) )
return 0;
+ /*
+ * atomic_cmpxchg() is a full barrier so no need for an
+ * arch_lock_acquire_barrier().
+ */
return likely(atomic_cmpxchg(&lock->cnts, 0, _QW_LOCKED) == 0);
}
static inline void _write_unlock(rwlock_t *lock)
{
+ arch_lock_release_barrier();
/*
* If the writer field is atomic, it can be cleared directly.
* Otherwise, an atomic subtraction will be used to clear it.
--
2.17.1
From: Ross Lagerwall <ross.lagerwall@citrix.com>
Subject: xen/gnttab: Fix error path in map_grant_ref()
Part of XSA-295 (c/s 863e74eb2cffb) inadvertently re-positioned the brackets,
changing the logic. If the _set_status() call fails, the grant_map hypercall
would fail with a status of 1 (rc != GNTST_okay) instead of the expected
negative GNTST_* error.
This error path can be taken due to bad guest state, and causes net/blk-back
in Linux to crash.
This is XSA-316.
Signed-off-by: Ross Lagerwall <ross.lagerwall@citrix.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Julien Grall <jgrall@amazon.com>
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 9fd6e60416..4b5344dc21 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1031,7 +1031,7 @@ map_grant_ref(
{
if ( (rc = _set_status(shah, status, rd, rgt->gt_version, act,
op->flags & GNTMAP_readonly, 1,
- ld->domain_id) != GNTST_okay) )
+ ld->domain_id)) != GNTST_okay )
goto act_release_out;
if ( !act->pin )
From: Jan Beulich <jbeulich@suse.com>
Subject: gnttab: fix GNTTABOP_copy continuation handling
The XSA-226 fix was flawed - the backwards transformation on rc was done
too early, causing a continuation to not get invoked when the need for
preemption was determined at the very first iteration of the request.
This in particular means that all of the status fields of the individual
operations would be left untouched, i.e. set to whatever the caller may
or may not have initialized them to.
This is part of XSA-318.
Reported-by: Pawel Wieczorkiewicz <wipawel@amazon.de>
Tested-by: Pawel Wieczorkiewicz <wipawel@amazon.de>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Juergen Gross <jgross@suse.com>
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -3576,8 +3576,7 @@ do_grant_table_op(
rc = gnttab_copy(copy, count);
if ( rc > 0 )
{
- rc = count - rc;
- guest_handle_add_offset(copy, rc);
+ guest_handle_add_offset(copy, count - rc);
uop = guest_handle_cast(copy, void);
}
break;
@@ -3644,6 +3643,9 @@ do_grant_table_op(
out:
if ( rc > 0 || opaque_out != 0 )
{
+ /* Adjust rc, see gnttab_copy() for why this is needed. */
+ if ( cmd == GNTTABOP_copy )
+ rc = count - rc;
ASSERT(rc < count);
ASSERT((opaque_out & GNTTABOP_CMD_MASK) == 0);
rc = hypercall_create_continuation(__HYPERVISOR_grant_table_op, "ihi",
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment