aboutsummaryrefslogtreecommitdiffstats
path: root/main/xen
diff options
context:
space:
mode:
authorLeonardo Arena <rnalrd@alpinelinux.org>2016-12-20 10:20:32 +0000
committerLeonardo Arena <rnalrd@alpinelinux.org>2016-12-20 10:20:36 +0000
commit3b5fa3b170637b8149c63d415d3a42c638b8b71a (patch)
tree3e62f8b4221c67aac5d4bbaaffbb99ca72e2dcfb /main/xen
parentcf24cc64fbe2e718b0bee91cc486ca9071a87ddf (diff)
downloadaports-3b5fa3b170637b8149c63d415d3a42c638b8b71a.tar.bz2
aports-3b5fa3b170637b8149c63d415d3a42c638b8b71a.tar.xz
main/xen: security fixes. Fixes #6540
Diffstat (limited to 'main/xen')
-rw-r--r--main/xen/APKBUILD8
-rw-r--r--main/xen/xsa200-4.7.patch55
-rw-r--r--main/xen/xsa201-1.patch87
-rw-r--r--main/xen/xsa201-2.patch199
-rw-r--r--main/xen/xsa201-3-4.7.patch47
-rw-r--r--main/xen/xsa201-4.patch130
6 files changed, 525 insertions, 1 deletions
diff --git a/main/xen/APKBUILD b/main/xen/APKBUILD
index f067ac030d..206739fc27 100644
--- a/main/xen/APKBUILD
+++ b/main/xen/APKBUILD
@@ -3,7 +3,7 @@
# Maintainer: William Pitcock <nenolod@dereferenced.org>
pkgname=xen
pkgver=4.7.1
-pkgrel=1
+pkgrel=2
pkgdesc="Xen hypervisor"
url="http://www.xen.org/"
arch="x86_64 armhf"
@@ -37,6 +37,12 @@ makedepends="$depends_dev autoconf automake libtool "
# - CVE-2016-9381 XSA-197
# - CVE-2016-9379 XSA-198
# - CVE-2016-9380 XSA-198
+# 4.7.1-r2:
+# - CVE-2016-9932 XSA-200
+# - CVE-2016-9815 XSA-201
+# - CVE-2016-9816 XSA-201
+# - CVE-2016-9817 XSA-201
+# - CVE-2016-9818 XSA-201
case "$CARCH" in
x86*)
diff --git a/main/xen/xsa200-4.7.patch b/main/xen/xsa200-4.7.patch
new file mode 100644
index 0000000000..69608f6fc3
--- /dev/null
+++ b/main/xen/xsa200-4.7.patch
@@ -0,0 +1,55 @@
+From: Jan Beulich <jbeulich@suse.com>
+Subject: x86emul: CMPXCHG8B ignores operand size prefix
+
+Otherwise besides mis-handling the instruction, the comparison failure
+case would result in uninitialized stack data being handed back to the
+guest in rDX:rAX (32 bits leaked for 32-bit guests, 96 bits for 64-bit
+ones).
+
+This is XSA-200.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/tools/tests/x86_emulator/test_x86_emulator.c
++++ b/tools/tests/x86_emulator/test_x86_emulator.c
+@@ -435,6 +435,24 @@ int main(int argc, char **argv)
+ goto fail;
+ printf("okay\n");
+
++ printf("%-40s", "Testing cmpxchg8b (%edi) [opsize]...");
++ instr[0] = 0x66; instr[1] = 0x0f; instr[2] = 0xc7; instr[3] = 0x0f;
++ res[0] = 0x12345678;
++ res[1] = 0x87654321;
++ regs.eflags = 0x200;
++ regs.eip = (unsigned long)&instr[0];
++ regs.edi = (unsigned long)res;
++ rc = x86_emulate(&ctxt, &emulops);
++ if ( (rc != X86EMUL_OKAY) ||
++ (res[0] != 0x12345678) ||
++ (res[1] != 0x87654321) ||
++ (regs.eax != 0x12345678) ||
++ (regs.edx != 0x87654321) ||
++ ((regs.eflags&0x240) != 0x200) ||
++ (regs.eip != (unsigned long)&instr[4]) )
++ goto fail;
++ printf("okay\n");
++
+ printf("%-40s", "Testing movsxbd (%%eax),%%ecx...");
+ instr[0] = 0x0f; instr[1] = 0xbe; instr[2] = 0x08;
+ regs.eflags = 0x200;
+--- a/xen/arch/x86/x86_emulate/x86_emulate.c
++++ b/xen/arch/x86/x86_emulate/x86_emulate.c
+@@ -4775,8 +4775,12 @@ x86_emulate(
+ generate_exception_if((modrm_reg & 7) != 1, EXC_UD, -1);
+ generate_exception_if(ea.type != OP_MEM, EXC_UD, -1);
+ if ( op_bytes == 8 )
++ {
+ host_and_vcpu_must_have(cx16);
+- op_bytes *= 2;
++ op_bytes = 16;
++ }
++ else
++ op_bytes = 8;
+
+ /* Get actual old value. */
+ if ( (rc = ops->read(ea.mem.seg, ea.mem.off, old, op_bytes,
diff --git a/main/xen/xsa201-1.patch b/main/xen/xsa201-1.patch
new file mode 100644
index 0000000000..50983b852f
--- /dev/null
+++ b/main/xen/xsa201-1.patch
@@ -0,0 +1,87 @@
+From: Wei Chen <Wei.Chen@arm.com>
+Subject: arm64: handle guest-generated EL1 asynchronous abort
+
+In current code, when the hypervisor receives an asynchronous abort
+from a guest, the hypervisor will do panic, the host will be down.
+We have to prevent such security issue, so, in this patch we crash
+the guest, when the hypervisor receives an asynchronous abort from
+the guest.
+
+This is CVE-2016-9815, part of XSA-201.
+
+Signed-off-by: Wei Chen <Wei.Chen@arm.com>
+Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
+Reviewed-by: Steve Capper <steve.capper@arm.com>
+Reviewed-by: Julien Grall <Julien.Grall@arm.com>
+
+--- a/xen/arch/arm/arm64/entry.S
++++ b/xen/arch/arm/arm64/entry.S
+@@ -204,9 +204,12 @@ guest_fiq_invalid:
+ entry hyp=0, compat=0
+ invalid BAD_FIQ
+
+-guest_error_invalid:
++guest_error:
+ entry hyp=0, compat=0
+- invalid BAD_ERROR
++ msr daifclr, #2
++ mov x0, sp
++ bl do_trap_guest_error
++ exit hyp=0, compat=0
+
+ guest_sync_compat:
+ entry hyp=0, compat=1
+@@ -225,9 +228,12 @@ guest_fiq_invalid_compat:
+ entry hyp=0, compat=1
+ invalid BAD_FIQ
+
+-guest_error_invalid_compat:
++guest_error_compat:
+ entry hyp=0, compat=1
+- invalid BAD_ERROR
++ msr daifclr, #2
++ mov x0, sp
++ bl do_trap_guest_error
++ exit hyp=0, compat=1
+
+ ENTRY(return_to_new_vcpu32)
+ exit hyp=0, compat=1
+@@ -286,12 +292,12 @@ ENTRY(hyp_traps_vector)
+ ventry guest_sync // Synchronous 64-bit EL0/EL1
+ ventry guest_irq // IRQ 64-bit EL0/EL1
+ ventry guest_fiq_invalid // FIQ 64-bit EL0/EL1
+- ventry guest_error_invalid // Error 64-bit EL0/EL1
++ ventry guest_error // Error 64-bit EL0/EL1
+
+ ventry guest_sync_compat // Synchronous 32-bit EL0/EL1
+ ventry guest_irq_compat // IRQ 32-bit EL0/EL1
+ ventry guest_fiq_invalid_compat // FIQ 32-bit EL0/EL1
+- ventry guest_error_invalid_compat // Error 32-bit EL0/EL1
++ ventry guest_error_compat // Error 32-bit EL0/EL1
+
+ /*
+ * struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next)
+--- a/xen/arch/arm/traps.c
++++ b/xen/arch/arm/traps.c
+@@ -2723,6 +2723,21 @@ asmlinkage void do_trap_hypervisor(struct cpu_user_regs *regs)
+ }
+ }
+
++asmlinkage void do_trap_guest_error(struct cpu_user_regs *regs)
++{
++ enter_hypervisor_head(regs);
++
++ /*
++ * Currently, to ensure hypervisor safety, when we received a
++ * guest-generated vSerror/vAbort, we just crash the guest to protect
++ * the hypervisor. In future we can better handle this by injecting
++ * a vSerror/vAbort to the guest.
++ */
++ gdprintk(XENLOG_WARNING, "Guest(Dom-%u) will be crashed by vSError\n",
++ current->domain->domain_id);
++ domain_crash_synchronous();
++}
++
+ asmlinkage void do_trap_irq(struct cpu_user_regs *regs)
+ {
+ enter_hypervisor_head(regs);
diff --git a/main/xen/xsa201-2.patch b/main/xen/xsa201-2.patch
new file mode 100644
index 0000000000..9bd1f8f89d
--- /dev/null
+++ b/main/xen/xsa201-2.patch
@@ -0,0 +1,199 @@
+From: Wei Chen <Wei.Chen@arm.com>
+Subject: arm64: handle async aborts delivered while at EL2
+
+If EL1 generates an asynchronous abort and then traps into EL2
+(by HVC or IRQ) before the abort has been delivered, the hypervisor
+could not catch it, because the PSTATE.A bit is masked all the time
+in hypervisor. So this asynchronous abort may be slipped to next
+running guest with PSTATE.A bit unmasked.
+
+In order to avoid this, it is necessary to take the abort at EL2, by
+clearing the PSTATE.A bit. In this patch, we unmask the PSTATE.A bit
+to open a window to catch guest-generated asynchronous abort in all
+EL1 -> EL2 swich paths. If we catched such asynchronous abort in
+checking window, the hyp_error exception will be triggered and the
+abort source guest will be crashed.
+
+This is CVE-2016-9816, part of XSA-201.
+
+Signed-off-by: Wei Chen <Wei.Chen@arm.com>
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+
+--- a/xen/arch/arm/arm64/entry.S
++++ b/xen/arch/arm/arm64/entry.S
+@@ -173,6 +173,43 @@ hyp_error_invalid:
+ entry hyp=1
+ invalid BAD_ERROR
+
++hyp_error:
++ /*
++ * Only two possibilities:
++ * 1) Either we come from the exit path, having just unmasked
++ * PSTATE.A: change the return code to an EL2 fault, and
++ * carry on, as we're already in a sane state to handle it.
++ * 2) Or we come from anywhere else, and that's a bug: we panic.
++ */
++ entry hyp=1
++ msr daifclr, #2
++
++ /*
++ * The ELR_EL2 may be modified by an interrupt, so we have to use the
++ * saved value in cpu_user_regs to check whether we come from 1) or
++ * not.
++ */
++ ldr x0, [sp, #UREGS_PC]
++ adr x1, abort_guest_exit_start
++ cmp x0, x1
++ adr x1, abort_guest_exit_end
++ ccmp x0, x1, #4, ne
++ mov x0, sp
++ mov x1, #BAD_ERROR
++
++ /*
++ * Not equal, the exception come from 2). It's a bug, we have to
++ * panic the hypervisor.
++ */
++ b.ne do_bad_mode
++
++ /*
++ * Otherwise, the exception come from 1). It happened because of
++ * the guest. Crash this guest.
++ */
++ bl do_trap_guest_error
++ exit hyp=1
++
+ /* Traps taken in Current EL with SP_ELx */
+ hyp_sync:
+ entry hyp=1
+@@ -189,15 +226,29 @@ hyp_irq:
+
+ guest_sync:
+ entry hyp=0, compat=0
++ bl check_pending_vserror
++ /*
++ * If x0 is Non-zero, a vSError took place, the initial exception
++ * doesn't have any significance to be handled. Exit ASAP
++ */
++ cbnz x0, 1f
+ msr daifclr, #2
+ mov x0, sp
+ bl do_trap_hypervisor
++1:
+ exit hyp=0, compat=0
+
+ guest_irq:
+ entry hyp=0, compat=0
++ bl check_pending_vserror
++ /*
++ * If x0 is Non-zero, a vSError took place, the initial exception
++ * doesn't have any significance to be handled. Exit ASAP
++ */
++ cbnz x0, 1f
+ mov x0, sp
+ bl do_trap_irq
++1:
+ exit hyp=0, compat=0
+
+ guest_fiq_invalid:
+@@ -213,15 +264,29 @@ guest_error:
+
+ guest_sync_compat:
+ entry hyp=0, compat=1
++ bl check_pending_vserror
++ /*
++ * If x0 is Non-zero, a vSError took place, the initial exception
++ * doesn't have any significance to be handled. Exit ASAP
++ */
++ cbnz x0, 1f
+ msr daifclr, #2
+ mov x0, sp
+ bl do_trap_hypervisor
++1:
+ exit hyp=0, compat=1
+
+ guest_irq_compat:
+ entry hyp=0, compat=1
++ bl check_pending_vserror
++ /*
++ * If x0 is Non-zero, a vSError took place, the initial exception
++ * doesn't have any significance to be handled. Exit ASAP
++ */
++ cbnz x0, 1f
+ mov x0, sp
+ bl do_trap_irq
++1:
+ exit hyp=0, compat=1
+
+ guest_fiq_invalid_compat:
+@@ -270,6 +335,62 @@ return_from_trap:
+ eret
+
+ /*
++ * This function is used to check pending virtual SError in the gap of
++ * EL1 -> EL2 world switch.
++ * The x0 register will be used to indicate the results of detection.
++ * x0 -- Non-zero indicates a pending virtual SError took place.
++ * x0 -- Zero indicates no pending virtual SError took place.
++ */
++check_pending_vserror:
++ /*
++ * Save elr_el2 to check whether the pending SError exception takes
++ * place while we are doing this sync exception.
++ */
++ mrs x0, elr_el2
++
++ /* Synchronize against in-flight ld/st */
++ dsb sy
++
++ /*
++ * Unmask PSTATE asynchronous abort bit. If there is a pending
++ * SError, the EL2 error exception will happen after PSTATE.A
++ * is cleared.
++ */
++ msr daifclr, #4
++
++ /*
++ * This is our single instruction exception window. A pending
++ * SError is guaranteed to occur at the earliest when we unmask
++ * it, and at the latest just after the ISB.
++ *
++ * If a pending SError occurs, the program will jump to EL2 error
++ * exception handler, and the elr_el2 will be set to
++ * abort_guest_exit_start or abort_guest_exit_end.
++ */
++abort_guest_exit_start:
++
++ isb
++
++abort_guest_exit_end:
++ /* Mask PSTATE asynchronous abort bit, close the checking window. */
++ msr daifset, #4
++
++ /*
++ * Compare elr_el2 and the saved value to check whether we are
++ * returning from a valid exception caused by pending SError.
++ */
++ mrs x1, elr_el2
++ cmp x0, x1
++
++ /*
++ * Not equal, the pending SError exception took place, set
++ * x0 to non-zero.
++ */
++ cset x0, ne
++
++ ret
++
++/*
+ * Exception vectors.
+ */
+ .macro ventry label
+@@ -287,7 +408,7 @@ ENTRY(hyp_traps_vector)
+ ventry hyp_sync // Synchronous EL2h
+ ventry hyp_irq // IRQ EL2h
+ ventry hyp_fiq_invalid // FIQ EL2h
+- ventry hyp_error_invalid // Error EL2h
++ ventry hyp_error // Error EL2h
+
+ ventry guest_sync // Synchronous 64-bit EL0/EL1
+ ventry guest_irq // IRQ 64-bit EL0/EL1
diff --git a/main/xen/xsa201-3-4.7.patch b/main/xen/xsa201-3-4.7.patch
new file mode 100644
index 0000000000..af7fc3703e
--- /dev/null
+++ b/main/xen/xsa201-3-4.7.patch
@@ -0,0 +1,47 @@
+From: Wei Chen <Wei.Chen@arm.com>
+Subject: arm: crash the guest when it traps on external abort
+
+If we spot a data or prefetch abort bearing the ESR_EL2.EA bit set, we
+know that this is an external abort, and that should crash the guest.
+
+This is CVE-2016-9817, part of XSA-201.
+
+Signed-off-by: Wei Chen <Wei.Chen@arm.com>
+Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
+Reviewed-by: Steve Capper <steve.capper@arm.com>
+Reviewed-by: Julien Grall <Julien.Grall@arm.com>
+
+--- a/xen/arch/arm/traps.c
++++ b/xen/arch/arm/traps.c
+@@ -2383,6 +2383,15 @@ static void do_trap_instr_abort_guest(struct cpu_user_regs *regs,
+ int rc;
+ register_t gva = READ_SYSREG(FAR_EL2);
+
++ /*
++ * If this bit has been set, it means that this instruction abort is caused
++ * by a guest external abort. Currently we crash the guest to protect the
++ * hypervisor. In future one can better handle this by injecting a virtual
++ * abort to the guest.
++ */
++ if ( hsr.iabt.eat )
++ domain_crash_synchronous();
++
+ switch ( hsr.iabt.ifsc & 0x3f )
+ {
+ case FSC_FLT_PERM ... FSC_FLT_PERM + 3:
+@@ -2437,6 +2446,15 @@ static void do_trap_data_abort_guest(struct cpu_user_regs *regs,
+ return;
+ }
+
++ /*
++ * If this bit has been set, it means that this data abort is caused
++ * by a guest external abort. Currently we crash the guest to protect the
++ * hypervisor. In future one can better handle this by injecting a virtual
++ * abort to the guest.
++ */
++ if ( dabt.eat )
++ domain_crash_synchronous();
++
+ info.dabt = dabt;
+ #ifdef CONFIG_ARM_32
+ info.gva = READ_CP32(HDFAR);
diff --git a/main/xen/xsa201-4.patch b/main/xen/xsa201-4.patch
new file mode 100644
index 0000000000..8060a5be13
--- /dev/null
+++ b/main/xen/xsa201-4.patch
@@ -0,0 +1,130 @@
+From: Wei Chen <Wei.Chen@arm.com>
+Subject: arm32: handle async aborts delivered while at HYP
+
+If guest generates an asynchronous abort and then traps into HYP
+(by HVC or IRQ) before the abort has been delivered, the hypervisor
+could not catch it, because the PSTATE.A bit is masked all the time
+in hypervisor. So this asynchronous abort may be slipped to next
+running guest with PSTATE.A bit unmasked.
+
+In order to avoid this, it is necessary to take the abort at HYP, by
+clearing the PSTATE.A bit. In this patch, we unmask the PSTATE.A bit
+to open a window to catch guest-generated asynchronous abort in all
+Guest -> HYP switch paths. If we caught such asynchronous abort in
+checking window, the HYP data abort exception will be triggered and
+the abort source guest will be crashed.
+
+This is CVE-2016-9818, part of XSA-201.
+
+Signed-off-by: Wei Chen <Wei.Chen@arm.com>
+Reviewed-by: Julien Grall <julien.grall@arm.com>
+
+--- a/xen/arch/arm/arm32/entry.S
++++ b/xen/arch/arm/arm32/entry.S
+@@ -42,6 +42,61 @@ save_guest_regs:
+ SAVE_BANKED(fiq)
+ SAVE_ONE_BANKED(R8_fiq); SAVE_ONE_BANKED(R9_fiq); SAVE_ONE_BANKED(R10_fiq)
+ SAVE_ONE_BANKED(R11_fiq); SAVE_ONE_BANKED(R12_fiq);
++ /*
++ * Start to check pending virtual abort in the gap of Guest -> HYP
++ * world switch.
++ *
++ * Save ELR_hyp to check whether the pending virtual abort exception
++ * takes place while we are doing this trap exception.
++ */
++ mrs r1, ELR_hyp
++
++ /*
++ * Force loads and stores to complete before unmasking asynchronous
++ * aborts and forcing the delivery of the exception.
++ */
++ dsb sy
++
++ /*
++ * Unmask asynchronous abort bit. If there is a pending asynchronous
++ * abort, the data_abort exception will happen after A bit is cleared.
++ */
++ cpsie a
++
++ /*
++ * This is our single instruction exception window. A pending
++ * asynchronous abort is guaranteed to occur at the earliest when we
++ * unmask it, and at the latest just after the ISB.
++ *
++ * If a pending abort occurs, the program will jump to data_abort
++ * exception handler, and the ELR_hyp will be set to
++ * abort_guest_exit_start or abort_guest_exit_end.
++ */
++ .global abort_guest_exit_start
++abort_guest_exit_start:
++
++ isb
++
++ .global abort_guest_exit_end
++abort_guest_exit_end:
++ /* Mask CPSR asynchronous abort bit, close the checking window. */
++ cpsid a
++
++ /*
++ * Compare ELR_hyp and the saved value to check whether we are
++ * returning from a valid exception caused by pending virtual
++ * abort.
++ */
++ mrs r2, ELR_hyp
++ cmp r1, r2
++
++ /*
++ * Not equal, the pending virtual abort exception took place, the
++ * initial exception does not have any significance to be handled.
++ * Exit ASAP.
++ */
++ bne return_from_trap
++
+ mov pc, lr
+
+ #define DEFINE_TRAP_ENTRY(trap) \
+--- a/xen/arch/arm/arm32/traps.c
++++ b/xen/arch/arm/arm32/traps.c
+@@ -63,7 +63,10 @@ asmlinkage void do_trap_prefetch_abort(struct cpu_user_regs *regs)
+
+ asmlinkage void do_trap_data_abort(struct cpu_user_regs *regs)
+ {
+- do_unexpected_trap("Data Abort", regs);
++ if ( VABORT_GEN_BY_GUEST(regs) )
++ do_trap_guest_error(regs);
++ else
++ do_unexpected_trap("Data Abort", regs);
+ }
+
+ /*
+--- a/xen/include/asm-arm/arm32/processor.h
++++ b/xen/include/asm-arm/arm32/processor.h
+@@ -55,6 +55,17 @@ struct cpu_user_regs
+
+ uint32_t pad1; /* Doubleword-align the user half of the frame */
+ };
++
++/* Functions for pending virtual abort checking window. */
++void abort_guest_exit_start(void);
++void abort_guest_exit_end(void);
++
++#define VABORT_GEN_BY_GUEST(r) \
++( \
++ ( (unsigned long)abort_guest_exit_start == (r)->pc ) || \
++ ( (unsigned long)abort_guest_exit_end == (r)->pc ) \
++)
++
+ #endif
+
+ /* Layout as used in assembly, with src/dest registers mixed in */
+--- a/xen/include/asm-arm/processor.h
++++ b/xen/include/asm-arm/processor.h
+@@ -690,6 +690,8 @@ void vcpu_regs_user_to_hyp(struct vcpu *vcpu,
+ int call_smc(register_t function_id, register_t arg0, register_t arg1,
+ register_t arg2);
+
++void do_trap_guest_error(struct cpu_user_regs *regs);
++
+ #endif /* __ASSEMBLY__ */
+ #endif /* __ASM_ARM_PROCESSOR_H */
+ /*