Commit 54ac0229 authored by Linux User's avatar Linux User Committed by Ariadne Conill

main/xen: XSA-254 XPTI

Add Xen page-table isolation (XPTI) for XEN 4.7.3

More info: http://xenbits.xen.org/xsa/xsa254/README.pti
parent 9d4623dc
From 5a013cb9851d1deeaeaa8564f292940a99dbb1d1 Mon Sep 17 00:00:00 2001
From: Andrew Cooper <andrew.cooper3@citrix.com>
Date: Wed, 17 Jan 2018 17:22:34 +0100
Subject: [PATCH 1/4] x86/entry: Remove support for partial cpu_user_regs
frames
Save all GPRs on entry to Xen.
The entry_int82() path is via a DPL1 gate, only usable by 32bit PV guests, so
can get away with only saving the 32bit registers. All other entrypoints can
be reached from 32 or 64bit contexts.
This is part of XSA-254.
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Acked-by: Jan Beulich <jbeulich@suse.com>
master commit: f9eb74789af77e985ae653193f3622263499f674
master date: 2018-01-05 19:57:07 +0000
(cherry picked from commit 0e6c6fc449000d97f9fa87ed1fbe23f0cf21406b)
---
tools/tests/x86_emulator/x86_emulate.c | 1 -
xen/arch/x86/domain.c | 1 -
xen/arch/x86/traps.c | 2 -
xen/arch/x86/x86_64/compat/entry.S | 7 ++-
xen/arch/x86/x86_64/entry.S | 12 ++--
xen/arch/x86/x86_64/traps.c | 13 ++--
xen/arch/x86/x86_emulate.c | 1 -
xen/arch/x86/x86_emulate/x86_emulate.c | 8 +--
xen/common/wait.c | 1 -
xen/include/asm-x86/asm_defns.h | 107 +++------------------------------
10 files changed, 26 insertions(+), 127 deletions(-)
diff --git a/tools/tests/x86_emulator/x86_emulate.c b/tools/tests/x86_emulator/x86_emulate.c
index 10e3f61baa..c12527a50b 100644
--- a/tools/tests/x86_emulator/x86_emulate.c
+++ b/tools/tests/x86_emulator/x86_emulate.c
@@ -24,7 +24,6 @@ typedef bool bool_t;
#endif
#define cpu_has_amd_erratum(nr) 0
-#define mark_regs_dirty(r) ((void)(r))
#define __packed __attribute__((packed))
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 452748dd5b..c9328f804e 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -148,7 +148,6 @@ static void noreturn continue_idle_domain(struct vcpu *v)
static void noreturn continue_nonidle_domain(struct vcpu *v)
{
check_wakeup_from_wait();
- mark_regs_dirty(guest_cpu_user_regs());
reset_stack_and_jump(ret_from_intr);
}
diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
index 90b6071796..1ec0d48cce 100644
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2456,7 +2456,6 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
goto fail;
if ( admin_io_okay(port, op_bytes, currd) )
{
- mark_regs_dirty(regs);
io_emul(regs);
}
else
@@ -2486,7 +2485,6 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
goto fail;
if ( admin_io_okay(port, op_bytes, currd) )
{
- mark_regs_dirty(regs);
io_emul(regs);
if ( (op_bytes == 1) && pv_post_outb_hook )
pv_post_outb_hook(port, regs->eax);
diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S
index 794bb44266..7ee01597a3 100644
--- a/xen/arch/x86/x86_64/compat/entry.S
+++ b/xen/arch/x86/x86_64/compat/entry.S
@@ -15,7 +15,8 @@
ENTRY(compat_hypercall)
ASM_CLAC
pushq $0
- SAVE_VOLATILE type=TRAP_syscall compat=1
+ movl $TRAP_syscall, 4(%rsp)
+ SAVE_ALL compat=1 /* DPL1 gate, restricted to 32bit PV guests only. */
CR4_PV32_RESTORE
cmpb $0,untrusted_msi(%rip)
@@ -127,7 +128,6 @@ compat_test_guest_events:
/* %rbx: struct vcpu */
compat_process_softirqs:
sti
- andl $~TRAP_regs_partial,UREGS_entry_vector(%rsp)
call do_softirq
jmp compat_test_all_events
@@ -268,7 +268,8 @@ ENTRY(cstar_enter)
pushq $FLAT_USER_CS32
pushq %rcx
pushq $0
- SAVE_VOLATILE TRAP_syscall
+ movl $TRAP_syscall, 4(%rsp)
+ SAVE_ALL
GET_CURRENT(bx)
movq VCPU_domain(%rbx),%rcx
cmpb $0,DOMAIN_is_32bit_pv(%rcx)
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index 708d9b9402..cebb1e4f4f 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -97,7 +97,8 @@ ENTRY(lstar_enter)
pushq $FLAT_KERNEL_CS64
pushq %rcx
pushq $0
- SAVE_VOLATILE TRAP_syscall
+ movl $TRAP_syscall, 4(%rsp)
+ SAVE_ALL
GET_CURRENT(bx)
testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
jz switch_to_kernel
@@ -192,7 +193,6 @@ test_guest_events:
/* %rbx: struct vcpu */
process_softirqs:
sti
- SAVE_PRESERVED
call do_softirq
jmp test_all_events
@@ -246,7 +246,8 @@ GLOBAL(sysenter_eflags_saved)
pushq $3 /* ring 3 null cs */
pushq $0 /* null rip */
pushq $0
- SAVE_VOLATILE TRAP_syscall
+ movl $TRAP_syscall, 4(%rsp)
+ SAVE_ALL
GET_CURRENT(bx)
cmpb $0,VCPU_sysenter_disables_events(%rbx)
movq VCPU_sysenter_addr(%rbx),%rax
@@ -263,7 +264,6 @@ UNLIKELY_END(sysenter_nt_set)
leal (,%rcx,TBF_INTERRUPT),%ecx
UNLIKELY_START(z, sysenter_gpf)
movq VCPU_trap_ctxt(%rbx),%rsi
- SAVE_PRESERVED
movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
movl %eax,TRAPBOUNCE_error_code(%rdx)
movq TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_eip(%rsi),%rax
@@ -281,7 +281,8 @@ UNLIKELY_END(sysenter_gpf)
ENTRY(int80_direct_trap)
ASM_CLAC
pushq $0
- SAVE_VOLATILE 0x80
+ movl $0x80, 4(%rsp)
+ SAVE_ALL
cmpb $0,untrusted_msi(%rip)
UNLIKELY_START(ne, msi_check)
@@ -309,7 +310,6 @@ int80_slow_path:
* IDT entry with DPL==0.
*/
movl $((0x80 << 3) | X86_XEC_IDT),UREGS_error_code(%rsp)
- SAVE_PRESERVED
movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
/* A GPF wouldn't have incremented the instruction pointer. */
subq $2,UREGS_rip(%rsp)
diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
index 5b71537a9d..3161dcc18b 100644
--- a/xen/arch/x86/x86_64/traps.c
+++ b/xen/arch/x86/x86_64/traps.c
@@ -65,15 +65,10 @@ static void _show_registers(
regs->rbp, regs->rsp, regs->r8);
printk("r9: %016lx r10: %016lx r11: %016lx\n",
regs->r9, regs->r10, regs->r11);
- if ( !(regs->entry_vector & TRAP_regs_partial) )
- {
- printk("r12: %016lx r13: %016lx r14: %016lx\n",
- regs->r12, regs->r13, regs->r14);
- printk("r15: %016lx cr0: %016lx cr4: %016lx\n",
- regs->r15, crs[0], crs[4]);
- }
- else
- printk("cr0: %016lx cr4: %016lx\n", crs[0], crs[4]);
+ printk("r12: %016lx r13: %016lx r14: %016lx\n",
+ regs->r12, regs->r13, regs->r14);
+ printk("r15: %016lx cr0: %016lx cr4: %016lx\n",
+ regs->r15, crs[0], crs[4]);
printk("cr3: %016lx cr2: %016lx\n", crs[3], crs[2]);
printk("ds: %04x es: %04x fs: %04x gs: %04x "
"ss: %04x cs: %04x\n",
diff --git a/xen/arch/x86/x86_emulate.c b/xen/arch/x86/x86_emulate.c
index 28132b5dbc..43730026c2 100644
--- a/xen/arch/x86/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate.c
@@ -11,7 +11,6 @@
#include <xen/domain_page.h>
#include <asm/x86_emulate.h>
-#include <asm/asm_defns.h> /* mark_regs_dirty() */
#include <asm/processor.h> /* current_cpu_info */
#include <asm/amd.h> /* cpu_has_amd_erratum() */
diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
index 5db017b1b0..81e8bc6ace 100644
--- a/xen/arch/x86/x86_emulate/x86_emulate.c
+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
@@ -1424,10 +1424,10 @@ decode_register(
case 9: p = &regs->r9; break;
case 10: p = &regs->r10; break;
case 11: p = &regs->r11; break;
- case 12: mark_regs_dirty(regs); p = &regs->r12; break;
- case 13: mark_regs_dirty(regs); p = &regs->r13; break;
- case 14: mark_regs_dirty(regs); p = &regs->r14; break;
- case 15: mark_regs_dirty(regs); p = &regs->r15; break;
+ case 12: p = &regs->r12; break;
+ case 13: p = &regs->r13; break;
+ case 14: p = &regs->r14; break;
+ case 15: p = &regs->r15; break;
#endif
default: BUG(); p = NULL; break;
}
diff --git a/xen/common/wait.c b/xen/common/wait.c
index 4ac98c07fe..398f653174 100644
--- a/xen/common/wait.c
+++ b/xen/common/wait.c
@@ -128,7 +128,6 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv)
unsigned long dummy;
u32 entry_vector = cpu_info->guest_cpu_user_regs.entry_vector;
- cpu_info->guest_cpu_user_regs.entry_vector &= ~TRAP_regs_partial;
ASSERT(wqv->esp == 0);
/* Save current VCPU affinity; force wakeup on *this* CPU only. */
diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h
index 279d70298f..6e5c079ad8 100644
--- a/xen/include/asm-x86/asm_defns.h
+++ b/xen/include/asm-x86/asm_defns.h
@@ -17,15 +17,6 @@
void ret_from_intr(void);
#endif
-#ifdef CONFIG_FRAME_POINTER
-/* Indicate special exception stack frame by inverting the frame pointer. */
-#define SETUP_EXCEPTION_FRAME_POINTER(offs) \
- leaq offs(%rsp),%rbp; \
- notq %rbp
-#else
-#define SETUP_EXCEPTION_FRAME_POINTER(offs)
-#endif
-
#ifndef NDEBUG
#define ASSERT_INTERRUPT_STATUS(x, msg) \
pushf; \
@@ -42,31 +33,6 @@ void ret_from_intr(void);
#define ASSERT_INTERRUPTS_DISABLED \
ASSERT_INTERRUPT_STATUS(z, "INTERRUPTS DISABLED")
-/*
- * This flag is set in an exception frame when registers R12-R15 did not get
- * saved.
- */
-#define _TRAP_regs_partial 16
-#define TRAP_regs_partial (1 << _TRAP_regs_partial)
-/*
- * This flag gets set in an exception frame when registers R12-R15 possibly
- * get modified from their originally saved values and hence need to be
- * restored even if the normal call flow would restore register values.
- *
- * The flag being set implies _TRAP_regs_partial to be unset. Restoring
- * R12-R15 thus is
- * - required when this flag is set,
- * - safe when _TRAP_regs_partial is unset.
- */
-#define _TRAP_regs_dirty 17
-#define TRAP_regs_dirty (1 << _TRAP_regs_dirty)
-
-#define mark_regs_dirty(r) ({ \
- struct cpu_user_regs *r__ = (r); \
- ASSERT(!((r__)->entry_vector & TRAP_regs_partial)); \
- r__->entry_vector |= TRAP_regs_dirty; \
-})
-
#ifdef __ASSEMBLY__
# define _ASM_EX(p) p-.
#else
@@ -236,7 +202,7 @@ static always_inline void stac(void)
#endif
#ifdef __ASSEMBLY__
-.macro SAVE_ALL op
+.macro SAVE_ALL op, compat=0
.ifeqs "\op", "CLAC"
ASM_CLAC
.else
@@ -255,40 +221,6 @@ static always_inline void stac(void)
movq %rdx,UREGS_rdx(%rsp)
movq %rcx,UREGS_rcx(%rsp)
movq %rax,UREGS_rax(%rsp)
- movq %r8,UREGS_r8(%rsp)
- movq %r9,UREGS_r9(%rsp)
- movq %r10,UREGS_r10(%rsp)
- movq %r11,UREGS_r11(%rsp)
- movq %rbx,UREGS_rbx(%rsp)
- movq %rbp,UREGS_rbp(%rsp)
- SETUP_EXCEPTION_FRAME_POINTER(UREGS_rbp)
- movq %r12,UREGS_r12(%rsp)
- movq %r13,UREGS_r13(%rsp)
- movq %r14,UREGS_r14(%rsp)
- movq %r15,UREGS_r15(%rsp)
-.endm
-
-/*
- * Save all registers not preserved by C code or used in entry/exit code. Mark
- * the frame as partial.
- *
- * @type: exception type
- * @compat: R8-R15 don't need saving, and the frame nevertheless is complete
- */
-.macro SAVE_VOLATILE type compat=0
-.if \compat
- movl $\type,UREGS_entry_vector-UREGS_error_code(%rsp)
-.else
- movl $\type|TRAP_regs_partial,\
- UREGS_entry_vector-UREGS_error_code(%rsp)
-.endif
- addq $-(UREGS_error_code-UREGS_r15),%rsp
- cld
- movq %rdi,UREGS_rdi(%rsp)
- movq %rsi,UREGS_rsi(%rsp)
- movq %rdx,UREGS_rdx(%rsp)
- movq %rcx,UREGS_rcx(%rsp)
- movq %rax,UREGS_rax(%rsp)
.if !\compat
movq %r8,UREGS_r8(%rsp)
movq %r9,UREGS_r9(%rsp)
@@ -297,20 +229,17 @@ static always_inline void stac(void)
.endif
movq %rbx,UREGS_rbx(%rsp)
movq %rbp,UREGS_rbp(%rsp)
- SETUP_EXCEPTION_FRAME_POINTER(UREGS_rbp)
-.endm
-
-/*
- * Complete a frame potentially only partially saved.
- */
-.macro SAVE_PRESERVED
- btrl $_TRAP_regs_partial,UREGS_entry_vector(%rsp)
- jnc 987f
+#ifdef CONFIG_FRAME_POINTER
+/* Indicate special exception stack frame by inverting the frame pointer. */
+ leaq UREGS_rbp(%rsp), %rbp
+ notq %rbp
+#endif
+.if !\compat
movq %r12,UREGS_r12(%rsp)
movq %r13,UREGS_r13(%rsp)
movq %r14,UREGS_r14(%rsp)
movq %r15,UREGS_r15(%rsp)
-987:
+.endif
.endm
#define LOAD_ONE_REG(reg, compat) \
@@ -351,33 +280,13 @@ static always_inline void stac(void)
* @compat: R8-R15 don't need reloading
*/
.macro RESTORE_ALL adj=0 compat=0
-.if !\compat
- testl $TRAP_regs_dirty,UREGS_entry_vector(%rsp)
-.endif
LOAD_C_CLOBBERED \compat
.if !\compat
- jz 987f
movq UREGS_r15(%rsp),%r15
movq UREGS_r14(%rsp),%r14
movq UREGS_r13(%rsp),%r13
movq UREGS_r12(%rsp),%r12
-#ifndef NDEBUG
- .subsection 1
-987: testl $TRAP_regs_partial,UREGS_entry_vector(%rsp)
- jnz 987f
- cmpq UREGS_r15(%rsp),%r15
- jne 789f
- cmpq UREGS_r14(%rsp),%r14
- jne 789f
- cmpq UREGS_r13(%rsp),%r13
- jne 789f
- cmpq UREGS_r12(%rsp),%r12
- je 987f
-789: BUG /* Corruption of partial register state. */
- .subsection 0
-#endif
.endif
-987:
LOAD_ONE_REG(bp, \compat)
LOAD_ONE_REG(bx, \compat)
subq $-(UREGS_error_code-UREGS_r15+\adj), %rsp
--
2.11.3
From 828e290b7dfb90c266ccf53d75ac2b68dc206647 Mon Sep 17 00:00:00 2001
From: Andrew Cooper <andrew.cooper3@citrix.com>
Date: Wed, 17 Jan 2018 17:23:37 +0100
Subject: [PATCH 2/4] x86/mm: Always set _PAGE_ACCESSED on L4e updates
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
master commit: bd61fe94bee0556bc2f64999a4a8315b93f90f21
master date: 2018-01-15 13:53:16 +0000
(cherry picked from commit 9b76908e6e074d7efbeafe6bad066ecc5f3c3c43)
---
xen/arch/x86/mm.c | 14 +++++++++++++-
1 file changed, 13 insertions(+), 1 deletion(-)
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 0426b6e00d..8b611022db 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -1187,11 +1187,23 @@ get_page_from_l4e(
_PAGE_USER|_PAGE_RW); \
} while ( 0 )
+/*
+ * When shadowing an L4 behind the guests back (e.g. for per-pcpu
+ * purposes), we cannot efficiently sync access bit updates from hardware
+ * (on the shadow tables) back into the guest view.
+ *
+ * We therefore unconditionally set _PAGE_ACCESSED even in the guests
+ * view. This will appear to the guest as a CPU which proactively pulls
+ * all valid L4e's into its TLB, which is compatible with the x86 ABI.
+ *
+ * At the time of writing, all PV guests set the access bit anyway, so
+ * this is no actual change in their behaviour.
+ */
#define adjust_guest_l4e(pl4e, d) \
do { \
if ( likely(l4e_get_flags((pl4e)) & _PAGE_PRESENT) && \
likely(!is_pv_32bit_domain(d)) ) \
- l4e_add_flags((pl4e), _PAGE_USER); \
+ l4e_add_flags((pl4e), _PAGE_USER | _PAGE_ACCESSED); \
} while ( 0 )
#define unadjust_guest_l3e(pl3e, d) \
--
2.11.3
From 72428e7318bf6368883622142344dbebd895c161 Mon Sep 17 00:00:00 2001
From: Jan Beulich <jbeulich@suse.com>
Date: Wed, 17 Jan 2018 17:24:59 +0100
Subject: [PATCH 4/4] x86: allow Meltdown band-aid to be disabled
First of all we don't need it on AMD systems. Additionally allow its use
to be controlled by command line option. For best backportability, this
intentionally doesn't use alternative instruction patching to achieve
the intended effect - while we likely want it, this will be later
follow-up.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
master commit: e871e80c38547d9faefc6604532ba3e985e65873
master date: 2018-01-16 17:50:59 +0100
(cherry picked from commit e19d0af4ee2ae9e42a85db639fd6848e72f5658b)
---
docs/misc/xen-command-line.markdown | 12 ++++++++++++
xen/arch/x86/domain.c | 7 +++++--
xen/arch/x86/mm.c | 2 +-
xen/arch/x86/smpboot.c | 17 ++++++++++++++---
xen/arch/x86/x86_64/entry.S | 2 ++
5 files changed, 34 insertions(+), 6 deletions(-)
diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown
index 73f5265fc6..ee9aa7b8d5 100644
--- a/docs/misc/xen-command-line.markdown
+++ b/docs/misc/xen-command-line.markdown
@@ -1602,6 +1602,18 @@ In the case that x2apic is in use, this option switches between physical and
clustered mode. The default, given no hint from the **FADT**, is cluster
mode.
+### xpti
+> `= <boolean>`
+
+> Default: `false` on AMD hardware
+> Default: `true` everywhere else
+
+Override default selection of whether to isolate 64-bit PV guest page
+tables.
+
+** WARNING: Not yet a complete isolation implementation, but better than
+nothing. **
+
### xsave
> `= <boolean>`
diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
index 512b77a5d0..534bf0161f 100644
--- a/xen/arch/x86/domain.c
+++ b/xen/arch/x86/domain.c
@@ -1926,12 +1926,15 @@ static void paravirt_ctxt_switch_from(struct vcpu *v)
static void paravirt_ctxt_switch_to(struct vcpu *v)
{
+ root_pgentry_t *root_pgt = this_cpu(root_pgt);
unsigned long cr4;
switch_kernel_stack(v);
- this_cpu(root_pgt)[root_table_offset(PERDOMAIN_VIRT_START)] =
- l4e_from_page(v->domain->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
+ if ( root_pgt )
+ root_pgt[root_table_offset(PERDOMAIN_VIRT_START)] =
+ l4e_from_page(v->domain->arch.perdomain_l3_pg,
+ __PAGE_HYPERVISOR_RW);
cr4 = pv_guest_cr4_to_real_cr4(v);
if ( unlikely(cr4 != read_cr4()) )
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index 22cd8550fc..b5eac345af 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -3846,7 +3846,7 @@ long do_mmu_update(
rc = mod_l4_entry(va, l4e_from_intpte(req.val), mfn,
cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
if ( !rc )
- sync_guest = 1;
+ sync_guest = !!this_cpu(root_pgt);
break;
case PGT_writable_page:
perfc_incr(writable_mmu_updates);
diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
index dc212710cd..bed2758625 100644
--- a/xen/arch/x86/smpboot.c
+++ b/xen/arch/x86/smpboot.c
@@ -320,7 +320,7 @@ void start_secondary(void *unused)
spin_debug_disable();
get_cpu_info()->xen_cr3 = 0;
- get_cpu_info()->pv_cr3 = __pa(this_cpu(root_pgt));
+ get_cpu_info()->pv_cr3 = this_cpu(root_pgt) ? __pa(this_cpu(root_pgt)) : 0;
load_system_tables();
@@ -729,14 +729,20 @@ static int clone_mapping(const void *ptr, root_pgentry_t *rpt)
return 0;
}
+static __read_mostly int8_t opt_xpti = -1;
+boolean_param("xpti", opt_xpti);
DEFINE_PER_CPU(root_pgentry_t *, root_pgt);
static int setup_cpu_root_pgt(unsigned int cpu)
{
- root_pgentry_t *rpt = alloc_xen_pagetable();
+ root_pgentry_t *rpt;
unsigned int off;
int rc;
+ if ( !opt_xpti )
+ return 0;
+
+ rpt = alloc_xen_pagetable();
if ( !rpt )
return -ENOMEM;
@@ -974,10 +980,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
stack_base[0] = stack_start;
+ if ( opt_xpti < 0 )
+ opt_xpti = boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
+
rc = setup_cpu_root_pgt(0);
if ( rc )
panic("Error %d setting up PV root page table\n", rc);
- get_cpu_info()->pv_cr3 = __pa(per_cpu(root_pgt, 0));
+ if ( per_cpu(root_pgt, 0) )
+ get_cpu_info()->pv_cr3 = __pa(per_cpu(root_pgt, 0));
set_nr_sockets();
@@ -1045,6 +1055,7 @@ void __init smp_prepare_boot_cpu(void)
cpumask_set_cpu(smp_processor_id(), &cpu_present_map);
get_cpu_info()->xen_cr3 = 0;
+ get_cpu_info()->pv_cr3 = 0;
}
static void
diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
index d63e734bb3..2a569952e3 100644
--- a/xen/arch/x86/x86_64/entry.S
+++ b/xen/arch/x86/x86_64/entry.S
@@ -45,6 +45,7 @@ restore_all_guest:
movabs $DIRECTMAP_VIRT_START, %rcx
mov %rdi, %rax
and %rsi, %rdi
+ jz .Lrag_keep_cr3
and %r9, %rsi
add %rcx, %rdi
add %rcx, %rsi
@@ -61,6 +62,7 @@ restore_all_guest:
rep movsq
mov %r9, STACK_CPUINFO_FIELD(xen_cr3)(%rdx)
write_cr3 rax, rdi, rsi
+.Lrag_keep_cr3:
RESTORE_ALL
testw $TRAP_syscall,4(%rsp)
--
2.11.3
......@@ -3,7 +3,7 @@
# Maintainer: William Pitcock <nenolod@dereferenced.org>
pkgname=xen
pkgver=4.7.3
pkgrel=3
pkgrel=4
pkgdesc="Xen hypervisor"
url="http://www.xen.org/"
arch="x86_64 armhf"
......@@ -97,6 +97,9 @@ makedepends="$depends_dev autoconf automake libtool "
# - CVE-2017-15592 XSA-243
# - CVE-2017-15594 XSA-244
# - CVE-2017-17046 XSA-245
# 4.7.3-r4:
# - XSA-254 XPTI
case "$CARCH" in
x86*)
......@@ -169,6 +172,11 @@ source="https://downloads.xenproject.org/release/xen/$pkgver/$pkgname-$pkgver.ta
0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch