diff options
author | Roger Pau Monne <roger.pau@citrix.com> | 2012-06-12 13:52:42 +0100 |
---|---|---|
committer | Natanael Copa <ncopa@alpinelinux.org> | 2012-06-12 12:59:52 +0000 |
commit | be9fdf040952f1d584775c20c39632a62de34c48 (patch) | |
tree | 0597a3147c99bdd0b513af324385199324d4b73a | |
parent | 5ea246f3ae270c02ead13231f79923cc1b5a411a (diff) | |
download | aports-be9fdf040952f1d584775c20c39632a62de34c48.tar.bz2 aports-be9fdf040952f1d584775c20c39632a62de34c48.tar.xz |
This should be backported to stable.
Signed-off-by: Roger Pau Monne <roger.pau@citrix.com>
-rw-r--r-- | main/xen/APKBUILD | 8 | ||||
-rw-r--r-- | main/xen/xsa7-xsa8-xen-4.1.patch | 122 | ||||
-rw-r--r-- | main/xen/xsa9-xen-4.1.patch | 46 |
3 files changed, 174 insertions, 2 deletions
diff --git a/main/xen/APKBUILD b/main/xen/APKBUILD index dda50521c3..28dc252098 100644 --- a/main/xen/APKBUILD +++ b/main/xen/APKBUILD @@ -3,7 +3,7 @@ # Maintainer: William Pitcock <nenolod@dereferenced.org> pkgname=xen pkgver=4.1.2 -pkgrel=9 +pkgrel=10 pkgdesc="Xen hypervisor" url="http://www.xen.org/" arch="x86 x86_64" @@ -25,7 +25,9 @@ source="http://bits.xensource.com/oss-xen/release/$pkgver/$pkgname-$pkgver.tar.g librt.patch busybox-sed.patch e1000.patch - + xsa7-xsa8-xen-4.1.patch + xsa9-xen-4.1.patch + xenstored.initd xenstored.confd xenconsoled.initd @@ -127,6 +129,8 @@ a7500c42804abdf68e051dc667e65f93 pygrub_alpine.patch fa06495a175571f4aa3b6cb88937953e librt.patch 1bea3543ddc712330527b62fd9ff6520 busybox-sed.patch c31163a3cd6cf58b4e9cac0e96812d65 e1000.patch +912bdedbec1359b6215dbf51e0608b7a xsa7-xsa8-xen-4.1.patch +6bebaad2cfb7eed0bc7c166048e523e3 xsa9-xen-4.1.patch 6e5739dad7e2bd1b625e55ddc6c782b7 xenstored.initd b017ccdd5e1c27bbf1513e3569d4ff07 xenstored.confd ed262f15fb880badb53575539468646c xenconsoled.initd diff --git a/main/xen/xsa7-xsa8-xen-4.1.patch b/main/xen/xsa7-xsa8-xen-4.1.patch new file mode 100644 index 0000000000..b92ceb24fa --- /dev/null +++ b/main/xen/xsa7-xsa8-xen-4.1.patch @@ -0,0 +1,122 @@ +diff -r 35248be669e7 xen/arch/x86/x86_64/asm-offsets.c +--- a/xen/arch/x86/x86_64/asm-offsets.c Mon May 14 16:59:12 2012 +0100 ++++ b/xen/arch/x86/x86_64/asm-offsets.c Thu May 24 11:12:33 2012 +0100 +@@ -90,6 +90,8 @@ void __dummy__(void) + arch.guest_context.trap_ctxt[TRAP_gp_fault].address); + OFFSET(VCPU_gp_fault_sel, struct vcpu, + arch.guest_context.trap_ctxt[TRAP_gp_fault].cs); ++ OFFSET(VCPU_gp_fault_flags, struct vcpu, ++ arch.guest_context.trap_ctxt[TRAP_gp_fault].flags); + OFFSET(VCPU_kernel_sp, struct vcpu, arch.guest_context.kernel_sp); + OFFSET(VCPU_kernel_ss, struct vcpu, arch.guest_context.kernel_ss); + OFFSET(VCPU_guest_context_flags, struct vcpu, arch.guest_context.flags); +diff -r 35248be669e7 xen/arch/x86/x86_64/compat/entry.S +--- a/xen/arch/x86/x86_64/compat/entry.S Mon May 14 16:59:12 2012 +0100 ++++ b/xen/arch/x86/x86_64/compat/entry.S Thu May 24 11:12:33 2012 +0100 +@@ -214,6 +214,7 @@ 1: call compat_create_bounce_frame + ENTRY(compat_post_handle_exception) + testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx) + jz compat_test_all_events ++.Lcompat_bounce_exception: + call compat_create_bounce_frame + movb $0,TRAPBOUNCE_flags(%rdx) + jmp compat_test_all_events +@@ -226,19 +227,20 @@ ENTRY(compat_syscall) + leaq VCPU_trap_bounce(%rbx),%rdx + testl $~3,%esi + leal (,%rcx,TBF_INTERRUPT),%ecx +- jz 2f +-1: movq %rax,TRAPBOUNCE_eip(%rdx) ++UNLIKELY_START(z, compat_syscall_gpf) ++ movl $TRAP_gp_fault,UREGS_entry_vector(%rsp) ++ subl $2,UREGS_rip(%rsp) ++ movl $0,TRAPBOUNCE_error_code(%rdx) ++ movl VCPU_gp_fault_addr(%rbx),%eax ++ movzwl VCPU_gp_fault_sel(%rbx),%esi ++ testb $4,VCPU_gp_fault_flags(%rbx) ++ setnz %cl ++ leal TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE(,%rcx,TBF_INTERRUPT),%ecx ++UNLIKELY_END(compat_syscall_gpf) ++ movq %rax,TRAPBOUNCE_eip(%rdx) + movw %si,TRAPBOUNCE_cs(%rdx) + movb %cl,TRAPBOUNCE_flags(%rdx) +- call compat_create_bounce_frame +- jmp compat_test_all_events +-2: movl $TRAP_gp_fault,UREGS_entry_vector(%rsp) +- subl $2,UREGS_rip(%rsp) +- movq VCPU_gp_fault_addr(%rbx),%rax +- movzwl VCPU_gp_fault_sel(%rbx),%esi +- movb $(TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE|TBF_INTERRUPT),%cl +- movl $0,TRAPBOUNCE_error_code(%rdx) +- jmp 1b ++ jmp .Lcompat_bounce_exception + + ENTRY(compat_sysenter) + cmpl $TRAP_gp_fault,UREGS_entry_vector(%rsp) +diff -r 35248be669e7 xen/arch/x86/x86_64/entry.S +--- a/xen/arch/x86/x86_64/entry.S Mon May 14 16:59:12 2012 +0100 ++++ b/xen/arch/x86/x86_64/entry.S Thu May 24 11:12:33 2012 +0100 +@@ -40,6 +40,13 @@ restore_all_guest: + testw $TRAP_syscall,4(%rsp) + jz iret_exit_to_guest + ++ /* Don't use SYSRET path if the return address is not canonical. */ ++ movq 8(%rsp),%rcx ++ sarq $47,%rcx ++ incl %ecx ++ cmpl $1,%ecx ++ ja .Lforce_iret ++ + addq $8,%rsp + popq %rcx # RIP + popq %r11 # CS +@@ -50,6 +57,10 @@ restore_all_guest: + sysretq + 1: sysretl + ++.Lforce_iret: ++ /* Mimic SYSRET behavior. */ ++ movq 8(%rsp),%rcx # RIP ++ movq 24(%rsp),%r11 # RFLAGS + ALIGN + /* No special register assumptions. */ + iret_exit_to_guest: +@@ -278,19 +289,21 @@ sysenter_eflags_saved: + leaq VCPU_trap_bounce(%rbx),%rdx + testq %rax,%rax + leal (,%rcx,TBF_INTERRUPT),%ecx +- jz 2f +-1: movq VCPU_domain(%rbx),%rdi ++UNLIKELY_START(z, sysenter_gpf) ++ movl $TRAP_gp_fault,UREGS_entry_vector(%rsp) ++ subq $2,UREGS_rip(%rsp) ++ movl %eax,TRAPBOUNCE_error_code(%rdx) ++ movq VCPU_gp_fault_addr(%rbx),%rax ++ testb $4,VCPU_gp_fault_flags(%rbx) ++ setnz %cl ++ leal TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE(,%rcx,TBF_INTERRUPT),%ecx ++UNLIKELY_END(sysenter_gpf) ++ movq VCPU_domain(%rbx),%rdi + movq %rax,TRAPBOUNCE_eip(%rdx) + movb %cl,TRAPBOUNCE_flags(%rdx) + testb $1,DOMAIN_is_32bit_pv(%rdi) + jnz compat_sysenter +- call create_bounce_frame +- jmp test_all_events +-2: movl %eax,TRAPBOUNCE_error_code(%rdx) +- movq VCPU_gp_fault_addr(%rbx),%rax +- movb $(TBF_EXCEPTION|TBF_EXCEPTION_ERRCODE|TBF_INTERRUPT),%cl +- movl $TRAP_gp_fault,UREGS_entry_vector(%rsp) +- jmp 1b ++ jmp .Lbounce_exception + + ENTRY(int80_direct_trap) + pushq $0 +@@ -482,6 +495,7 @@ 1: movq %rsp,%rdi + jnz compat_post_handle_exception + testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx) + jz test_all_events ++.Lbounce_exception: + call create_bounce_frame + movb $0,TRAPBOUNCE_flags(%rdx) + jmp test_all_events diff --git a/main/xen/xsa9-xen-4.1.patch b/main/xen/xsa9-xen-4.1.patch new file mode 100644 index 0000000000..a0ded0c825 --- /dev/null +++ b/main/xen/xsa9-xen-4.1.patch @@ -0,0 +1,46 @@ +x86-64: detect processors subject to AMD erratum #121 and refuse to boot + +Processors with this erratum are subject to a DoS attack by unprivileged +guest users. + +This is XSA-9 / CVE-2006-0744. + +Signed-off-by: Jan Beulich <JBeulich@suse.com> +Signed-off-by: Ian Campbell <ian.campbell@citrix.com> + +--- a/xen/arch/x86/cpu/amd.c ++++ b/xen/arch/x86/cpu/amd.c +@@ -32,6 +32,9 @@ + static char opt_famrev[14]; + string_param("cpuid_mask_cpu", opt_famrev); + ++static int opt_allow_unsafe; ++boolean_param("allow_unsafe", opt_allow_unsafe); ++ + static inline void wrmsr_amd(unsigned int index, unsigned int lo, + unsigned int hi) + { +@@ -620,6 +623,11 @@ static void __devinit init_amd(struct cp + clear_bit(X86_FEATURE_MCE, c->x86_capability); + + #ifdef __x86_64__ ++ if (cpu_has_amd_erratum(c, AMD_ERRATUM_121) && !opt_allow_unsafe) ++ panic("Xen will not boot on this CPU for security reasons.\n" ++ "Pass \"allow_unsafe\" if you're trusting all your" ++ " (PV) guest kernels.\n"); ++ + /* AMD CPUs do not support SYSENTER outside of legacy mode. */ + clear_bit(X86_FEATURE_SEP, c->x86_capability); + +--- a/xen/include/asm-x86/amd.h ++++ b/xen/include/asm-x86/amd.h +@@ -127,6 +127,9 @@ + #define AMD_MODEL_RANGE_START(range) (((range) >> 12) & 0xfff) + #define AMD_MODEL_RANGE_END(range) ((range) & 0xfff) + ++#define AMD_ERRATUM_121 \ ++ AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x0f, 0x0, 0x0, 0x3f, 0xf)) ++ + #define AMD_ERRATUM_170 \ + AMD_LEGACY_ERRATUM(AMD_MODEL_RANGE(0x0f, 0x0, 0x0, 0x67, 0xf)) + |