Commit b5fc2777 authored by Leonardo Arena's avatar Leonardo Arena
Browse files

main/libvirt: security fix (CVE-2013-6458 CVE-2014-1447)

Fixes #2637
parent 90d8ac40
......@@ -2,7 +2,7 @@
pkgname=libvirt
pkgver=1.1.4
_ver="${pkgver/_rc/-rc}"
pkgrel=0
pkgrel=1
pkgdesc="A virtualization API for several hypervisor and container systems"
url="http://libvirt.org/"
arch="all"
......@@ -26,6 +26,12 @@ source="http://libvirt.org/sources/$pkgname-$pkgver.tar.gz
libvirt.confd
libvirt.initd
uclibc-physmem.patch
CVE-2013-6458_1.patch
CVE-2013-6458_2.patch
CVE-2013-6458_3.patch
CVE-2013-6458_4.patch
CVE-2014-1447_1.patch
CVE-2014-1447_2.patch
"
if [ "$CARCH" = "x86_64" ]; then
......@@ -171,12 +177,30 @@ _common_drivers() {
md5sums="0b21e97ad5d3c27d18806896e16ef82b libvirt-1.1.4.tar.gz
1c84a7baeafe0a7f4e9d7ae5180311b7 libvirt.confd
d897df38c7e7fa1a297aa551108633c9 libvirt.initd
df9cbfaf8a6e520a4822914a300add4d uclibc-physmem.patch"
df9cbfaf8a6e520a4822914a300add4d uclibc-physmem.patch
c553f1262e82ceaf65fb326c18a38fd3 CVE-2013-6458_1.patch
42c528e2609a6161fe9ca407a34a1a0b CVE-2013-6458_2.patch
2f6d80c57f1e3d47d58dc10eee073f4b CVE-2013-6458_3.patch
6ea1ef3d176cbea184470eb1cf90d312 CVE-2013-6458_4.patch
478b29fb900f1029c788df778c34d76f CVE-2014-1447_1.patch
a8c3311052ceaf1ace02015fe4b6fa8c CVE-2014-1447_2.patch"
sha256sums="5910f5cf607a50c606d959e6b1e5d6e67966b7aa7324982afd7f85d9c24bd98f libvirt-1.1.4.tar.gz
851ab3f9678f0fa9c3ee03f7fc7bd00c4ee86d5f0777eecf9eb1ffe3243adfd1 libvirt.confd
e9fad203434ffaa6afe524e42a9fb6594edad61cb02b1ca60a68d1a7fe0c31ab libvirt.initd
807005a8669b7396c9af43ddb2534bb0f073f1e97a5c8b1d9eefc1949f3c2df8 uclibc-physmem.patch"
807005a8669b7396c9af43ddb2534bb0f073f1e97a5c8b1d9eefc1949f3c2df8 uclibc-physmem.patch
3a2a0fc3ec7e78677f6c76c4178c05c76e056e21642cb52580aaa2c785aeb6f3 CVE-2013-6458_1.patch
4f290a3016edc07c0870747035e1bf8bf7d72d0072ced3979659cddadc9ea41a CVE-2013-6458_2.patch
8c8c7b4e41c46593064192e35fa5391c25d99166f18f000a55c5baf74e031e36 CVE-2013-6458_3.patch
26a999baa8640f9c03be0fa7842004421211b7be1309a3769695ea0a670369a0 CVE-2013-6458_4.patch
2d565d00b0090829e3c80aa8cf49f33457da2c286438a8db3609bbf9b831604d CVE-2014-1447_1.patch
5ff1b38de2dccb8c8ecd7a80de21b45e2ea8276f3a7f50634433714b636df45c CVE-2014-1447_2.patch"
sha512sums="64455585b8d7e5c0f701e9803ae00d277ee27f9bec92b9890e487cd0298f5f8ff9075d0b2bc81be67e8d8e03bfe58eb2e4fd6292439acd9d22a3f3e7f5b87efc libvirt-1.1.4.tar.gz
9aba6ab73219a635c64a340ee8887356e644445c9128734cbce73f5d54778378da2f10a190365ad88a7db8bc95b1fb17f0c6ca41fc41bb786c09e1afe84d65dc libvirt.confd
f48c97f93ef4509a86eda6200b3aae5b2c0c6263403bde933b770fd62240dca27bc439bd29b440ea6a47c8337f8b4511230ed915cb5ff54d9a1cf311863f6fa1 libvirt.initd
4c885e72dcb11f8523a267917315d4874812eee289fb00075334c1728d0da9bd0e5db6c52d6e3c39bd3fe66d5ccadf9e26ec9dcaa855397e211b9bd1173ac72d uclibc-physmem.patch"
4c885e72dcb11f8523a267917315d4874812eee289fb00075334c1728d0da9bd0e5db6c52d6e3c39bd3fe66d5ccadf9e26ec9dcaa855397e211b9bd1173ac72d uclibc-physmem.patch
d6584f20beab27329a873c61ffacc15592b1c382e363eb7975926707b89973f6594650a69e8f5986fde42b14d3bb4396f1668c6393e7dc1187113bad8ed01aa2 CVE-2013-6458_1.patch
ff84d2c7ab2f4b67a4fc22c3159dc4620bec05c4b47289f7ea9a235253873c9d99ab9f1f8a29918e6bba23c4311546d2a5ab80c34566e6fd943828fd962ba98f CVE-2013-6458_2.patch
e2fc3481efa2c0260bffd4df23c6dddf6024e088d606bf1e307d91b5b25f3a1ae6addc8be869eb83d5717ecffeebb2bfe975420e6d1fbb51474c9022058f76e6 CVE-2013-6458_3.patch
9d8e734d60c179133c16729ff6535c1b761e7c447863d884cd9b20266bb15a2c3371b6e90206d8bae1cb7e00c464f863d9023748bf210f4624ce0f3769a9c928 CVE-2013-6458_4.patch
36c8da3138db811d6fcc21aef2744f8a0a7c753f21f66b054fae01de81bafcf5cd7bce6b3cc0e647b7e701c009db2ab13369f508d7f4cb5960d75100fe5bd529 CVE-2014-1447_1.patch
188e23b6e08609eb4e0a6c4fffd0cf5743df06520c06473c197585f9e50c76f052da01fb0d033cd1f585704ce74af017b4d96716469c4fa2c44e5e8a7d08968b CVE-2014-1447_2.patch"
From 3b56425938e2f97208d5918263efa0d6439e4ecd Mon Sep 17 00:00:00 2001
From: Jiri Denemark <jdenemar@redhat.com>
Date: Fri, 20 Dec 2013 15:41:04 +0100
Subject: [PATCH] qemu: Fix job usage in virDomainGetBlockIoTune
CVE-2013-6458
Every API that is going to begin a job should do that before fetching
data from vm->def.
---
src/qemu/qemu_driver.c | 11 +++++------
1 files changed, 5 insertions(+), 6 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 6f0de70..1949abe 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -15064,12 +15064,6 @@ qemuDomainGetBlockIoTune(virDomainPtr dom,
goto cleanup;
}
- device = qemuDiskPathToAlias(vm, disk, NULL);
-
- if (!device) {
- goto cleanup;
- }
-
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
goto cleanup;
@@ -15077,6 +15071,11 @@ qemuDomainGetBlockIoTune(virDomainPtr dom,
&persistentDef) < 0)
goto endjob;
+ device = qemuDiskPathToAlias(vm, disk, NULL);
+ if (!device) {
+ goto endjob;
+ }
+
if (flags & VIR_DOMAIN_AFFECT_LIVE) {
priv = vm->privateData;
qemuDomainObjEnterMonitor(driver, vm);
--
1.7.1
From f93d2caa070f6197ab50d372d286018b0ba6bbd8 Mon Sep 17 00:00:00 2001
From: Jiri Denemark <jdenemar@redhat.com>
Date: Fri, 20 Dec 2013 15:04:09 +0100
Subject: [PATCH] qemu: Fix job usage in qemuDomainBlockJobImpl
CVE-2013-6458
Every API that is going to begin a job should do that before fetching
data from vm->def.
---
src/qemu/qemu_driver.c | 22 +++++++++++-----------
1 files changed, 11 insertions(+), 11 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 1e9cd28..0ed5b6b 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -14249,16 +14249,25 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
goto cleanup;
}
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
+ goto cleanup;
+
+ if (!virDomainObjIsActive(vm)) {
+ virReportError(VIR_ERR_OPERATION_INVALID, "%s",
+ _("domain is not running"));
+ goto endjob;
+ }
+
device = qemuDiskPathToAlias(vm, path, &idx);
if (!device)
- goto cleanup;
+ goto endjob;
disk = vm->def->disks[idx];
if (mode == BLOCK_JOB_PULL && disk->mirror) {
virReportError(VIR_ERR_BLOCK_COPY_ACTIVE,
_("disk '%s' already in active block copy job"),
disk->dst);
- goto cleanup;
+ goto endjob;
}
if (mode == BLOCK_JOB_ABORT &&
(flags & VIR_DOMAIN_BLOCK_JOB_ABORT_PIVOT) &&
@@ -14266,15 +14275,6 @@ qemuDomainBlockJobImpl(virDomainObjPtr vm,
virReportError(VIR_ERR_OPERATION_INVALID,
_("pivot of disk '%s' requires an active copy job"),
disk->dst);
- goto cleanup;
- }
-
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_MODIFY) < 0)
- goto cleanup;
-
- if (!virDomainObjIsActive(vm)) {
- virReportError(VIR_ERR_OPERATION_INVALID, "%s",
- _("domain is not running"));
goto endjob;
}
--
1.7.1
From b799259583bd65c0b2f5042e6c3ff19637ade881 Mon Sep 17 00:00:00 2001
From: Jiri Denemark <jdenemar@redhat.com>
Date: Fri, 20 Dec 2013 14:50:02 +0100
Subject: [PATCH] qemu: Avoid using stale data in virDomainGetBlockInfo
CVE-2013-6458
Generally, every API that is going to begin a job should do that before
fetching data from vm->def. However, qemuDomainGetBlockInfo does not
know whether it will have to start a job or not before checking vm->def.
To avoid using disk alias that might have been freed while we were
waiting for a job, we use its copy. In case the disk was removed in the
meantime, we will fail with "cannot find statistics for device '...'"
error message.
---
src/qemu/qemu_driver.c | 17 ++++++++++++-----
1 files changed, 12 insertions(+), 5 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index eff3ac4..1e9cd28 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -9788,10 +9788,12 @@ cleanup:
}
-static int qemuDomainGetBlockInfo(virDomainPtr dom,
- const char *path,
- virDomainBlockInfoPtr info,
- unsigned int flags) {
+static int
+qemuDomainGetBlockInfo(virDomainPtr dom,
+ const char *path,
+ virDomainBlockInfoPtr info,
+ unsigned int flags)
+{
virQEMUDriverPtr driver = dom->conn->privateData;
virDomainObjPtr vm;
int ret = -1;
@@ -9803,6 +9805,7 @@ static int qemuDomainGetBlockInfo(virDomainPtr dom,
int idx;
int format;
virQEMUDriverConfigPtr cfg = NULL;
+ char *alias = NULL;
virCheckFlags(0, -1);
@@ -9909,13 +9912,16 @@ static int qemuDomainGetBlockInfo(virDomainPtr dom,
virDomainObjIsActive(vm)) {
qemuDomainObjPrivatePtr priv = vm->privateData;
+ if (VIR_STRDUP(alias, disk->info.alias) < 0)
+ goto cleanup;
+
if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
goto cleanup;
if (virDomainObjIsActive(vm)) {
qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorGetBlockExtent(priv->mon,
- disk->info.alias,
+ alias,
&info->allocation);
qemuDomainObjExitMonitor(driver, vm);
} else {
@@ -9929,6 +9935,7 @@ static int qemuDomainGetBlockInfo(virDomainPtr dom,
}
cleanup:
+ VIR_FREE(alias);
virStorageFileFreeMetadata(meta);
VIR_FORCE_CLOSE(fd);
if (vm)
--
1.7.1
From db86da5ca2109e4006c286a09b6c75bfe10676ad Mon Sep 17 00:00:00 2001
From: Jiri Denemark <jdenemar@redhat.com>
Date: Thu, 19 Dec 2013 22:10:04 +0100
Subject: [PATCH] qemu: Do not access stale data in virDomainBlockStats
CVE-2013-6458
https://bugzilla.redhat.com/show_bug.cgi?id=1043069
When virDomainDetachDeviceFlags is called concurrently to
virDomainBlockStats: libvirtd may crash because qemuDomainBlockStats
finds a disk in vm->def before getting a job on a domain and uses the
disk pointer after getting the job. However, the domain in unlocked
while waiting on a job condition and thus data behind the disk pointer
may disappear. This happens when thread 1 runs
virDomainDetachDeviceFlags and enters monitor to actually remove the
disk. Then another thread starts running virDomainBlockStats, finds the
disk in vm->def, and while it's waiting on the job condition (owned by
the first thread), the first thread finishes the disk removal. When the
second thread gets the job, the memory pointed to be the disk pointer is
already gone.
That said, every API that is going to begin a job should do that before
fetching data from vm->def.
---
src/qemu/qemu_driver.c | 17 ++++++-----------
1 files changed, 6 insertions(+), 11 deletions(-)
diff --git a/src/qemu/qemu_driver.c b/src/qemu/qemu_driver.c
index 004ec88..eff3ac4 100644
--- a/src/qemu/qemu_driver.c
+++ b/src/qemu/qemu_driver.c
@@ -9028,34 +9028,29 @@ qemuDomainBlockStats(virDomainPtr dom,
if (virDomainBlockStatsEnsureACL(dom->conn, vm->def) < 0)
goto cleanup;
+ if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
+ goto cleanup;
+
if (!virDomainObjIsActive(vm)) {
virReportError(VIR_ERR_OPERATION_INVALID,
"%s", _("domain is not running"));
- goto cleanup;
+ goto endjob;
}
if ((idx = virDomainDiskIndexByName(vm->def, path, false)) < 0) {
virReportError(VIR_ERR_INVALID_ARG,
_("invalid path: %s"), path);
- goto cleanup;
+ goto endjob;
}
disk = vm->def->disks[idx];
if (!disk->info.alias) {
virReportError(VIR_ERR_INTERNAL_ERROR,
_("missing disk device alias name for %s"), disk->dst);
- goto cleanup;
+ goto endjob;
}
priv = vm->privateData;
- if (qemuDomainObjBeginJob(driver, vm, QEMU_JOB_QUERY) < 0)
- goto cleanup;
-
- if (!virDomainObjIsActive(vm)) {
- virReportError(VIR_ERR_OPERATION_INVALID,
- "%s", _("domain is not running"));
- goto endjob;
- }
qemuDomainObjEnterMonitor(driver, vm);
ret = qemuMonitorGetBlockStatsInfo(priv->mon,
--
1.7.1
From 173c2914734eb5c32df6d35a82bf503e12261bcf Mon Sep 17 00:00:00 2001
From: Jiri Denemark <jdenemar@redhat.com>
Date: Thu, 9 Jan 2014 22:26:40 +0100
Subject: [PATCH] Don't crash if a connection closes early
https://bugzilla.redhat.com/show_bug.cgi?id=1047577
When a client closes its connection to libvirtd early during
virConnectOpen, more specifically just after making
REMOTE_PROC_CONNECT_SUPPORTS_FEATURE call to check if
VIR_DRV_FEATURE_PROGRAM_KEEPALIVE is supported without even waiting for
the result, libvirtd may crash due to a race in keep-alive
initialization. Once receiving the REMOTE_PROC_CONNECT_SUPPORTS_FEATURE
call, the daemon's event loop delegates it to a worker thread. In case
the event loop detects EOF on the connection and calls
virNetServerClientClose before the worker thread starts to handle
REMOTE_PROC_CONNECT_SUPPORTS_FEATURE call, client->keepalive will be
disposed by the time virNetServerClientStartKeepAlive gets called from
remoteDispatchConnectSupportsFeature. Because the flow is common for
both authenticated and read-only connections, even unprivileged clients
may cause the daemon to crash.
To avoid the crash, virNetServerClientStartKeepAlive needs to check if
the connection is still open before starting keep-alive protocol.
Every libvirt release since 0.9.8 is affected by this bug.
---
src/rpc/virnetserverclient.c | 15 ++++++++++++++-
1 files changed, 14 insertions(+), 1 deletions(-)
diff --git a/src/rpc/virnetserverclient.c b/src/rpc/virnetserverclient.c
index 8aebeb0..7830b7f 100644
--- a/src/rpc/virnetserverclient.c
+++ b/src/rpc/virnetserverclient.c
@@ -1539,9 +1539,22 @@ cleanup:
int
virNetServerClientStartKeepAlive(virNetServerClientPtr client)
{
- int ret;
+ int ret = -1;
+
virObjectLock(client);
+
+ /* The connection might have been closed before we got here and thus the
+ * keepalive object could have been removed too.
+ */
+ if (!client->sock) {
+ virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
+ _("connection not open"));
+ goto cleanup;
+ }
+
ret = virKeepAliveStart(client->keepalive, 0, 0);
+
+cleanup:
virObjectUnlock(client);
return ret;
}
--
1.7.1
From 066c8ef6c18bc1faf8b3e10787b39796a7a06cc0 Mon Sep 17 00:00:00 2001
From: Jiri Denemark <jdenemar@redhat.com>
Date: Mon, 13 Jan 2014 15:46:24 +0100
Subject: [PATCH] Really don't crash if a connection closes early
https://bugzilla.redhat.com/show_bug.cgi?id=1047577
When writing commit 173c291, I missed the fact virNetServerClientClose
unlocks the client object before actually clearing client->sock and thus
it is possible to hit a window when client->keepalive is NULL while
client->sock is not NULL. I was thinking client->sock == NULL was a
better check for a closed connection but apparently we have to go with
client->keepalive == NULL to actually fix the crash.
Signed-off-by: Jiri Denemark <jdenemar@redhat.com>
---
src/rpc/virnetserverclient.c | 2 +-
1 files changed, 1 insertions(+), 1 deletions(-)
diff --git a/src/rpc/virnetserverclient.c b/src/rpc/virnetserverclient.c
index 7830b7f..52b4941 100644
--- a/src/rpc/virnetserverclient.c
+++ b/src/rpc/virnetserverclient.c
@@ -1546,7 +1546,7 @@ virNetServerClientStartKeepAlive(virNetServerClientPtr client)
/* The connection might have been closed before we got here and thus the
* keepalive object could have been removed too.
*/
- if (!client->sock) {
+ if (!client->keepalive) {
virReportError(VIR_ERR_INTERNAL_ERROR, "%s",
_("connection not open"));
goto cleanup;
--
1.7.1
Markdown is supported
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment