diff options
author | Leonardo Arena <rnalrd@alpinelinux.org> | 2016-05-09 13:02:15 +0000 |
---|---|---|
committer | Leonardo Arena <rnalrd@alpinelinux.org> | 2016-05-09 13:02:15 +0000 |
commit | 2e04022d1a3fe8b9e3ff2e830cfeca39b4b610aa (patch) | |
tree | a28318ac96293b6d66c0519f719e58b472ef893a | |
parent | ae07363ba5d06022ffa7d161ab322fae828b7600 (diff) | |
download | aports-2e04022d1a3fe8b9e3ff2e830cfeca39b4b610aa.tar.bz2 aports-2e04022d1a3fe8b9e3ff2e830cfeca39b4b610aa.tar.xz |
-rw-r--r-- | main/xen/APKBUILD | 10 | ||||
-rw-r--r-- | main/xen/xsa172.patch | 39 | ||||
-rw-r--r-- | main/xen/xsa173-4.4.patch | 251 |
3 files changed, 299 insertions, 1 deletions
diff --git a/main/xen/APKBUILD b/main/xen/APKBUILD index 9c1120918c..09b8113ecc 100644 --- a/main/xen/APKBUILD +++ b/main/xen/APKBUILD @@ -3,7 +3,7 @@ # Maintainer: William Pitcock <nenolod@dereferenced.org> pkgname=xen pkgver=4.4.2 -pkgrel=5 +pkgrel=6 pkgdesc="Xen hypervisor" url="http://www.xen.org/" arch="x86_64" @@ -38,6 +38,8 @@ source="http://bits.xensource.com/oss-xen/release/$pkgver/$pkgname-$pkgver.tar.g xsa167-4.4.patch xsa168.patch xsa170-4.5.patch + xsa172.patch + xsa173-4.4.patch qemu-coroutine-gthread.patch qemu-xen-musl-openpty.patch @@ -252,6 +254,8 @@ ff35b1f11bcddbd651445406bbc88a76 xsa136.patch 1832af4e78d994617c18bd7df58e6409 xsa167-4.4.patch b837726ce186fa61cfe7238b225b0335 xsa168.patch 2c2adc771c0ac13671bdcdd314602fcd xsa170-4.5.patch +b14d9a4247ae654579cb757c9b0e949a xsa172.patch +6de4080f47887c2e28911d4dd2f05fee xsa173-4.4.patch de1a3db370b87cfb0bddb51796b50315 qemu-coroutine-gthread.patch dd8603eaab5857816843bfc37647d569 qemu-xen-musl-openpty.patch c4d2d95ae3e5f538b7145becb3c6098e qemu-xen_paths.patch @@ -294,6 +298,8 @@ dd15e301f2757e0c7975bdccfe49ddf41c730bc124dd90166e0844d332eeedad xsa165-4.5.pat 194c1ce89292f4cbb9980baa703095bcbeb5849abf46d193e07a98a0d8301f78 xsa167-4.4.patch c95198a66485d6e538d113ce2b84630d77c15f597113c38fadd6bf1e24e4c8ec xsa168.patch 1df068fb439c7edc1e86dfa9ea3b9ae99b58cdc3ac874b96cdf63b26ef9a6b98 xsa170-4.5.patch +f18282fcb794b8772bc3af51d56860050071bd62a5a909b8f2fc2018e2958154 xsa172.patch +35e02b8d4c2841ad951dd967b4f11aa7911fe5d52be2cb605b174e8c2e9214ca xsa173-4.4.patch 3941f99b49c7e8dafc9fae8aad2136a14c6d84533cd542cc5f1040a41ef7c6fe qemu-coroutine-gthread.patch fe76c7c8faf686060b20491bfed4a13ce37b1bc3dcdbf33d242e388cee14c7c1 qemu-xen-musl-openpty.patch a6ccc0ed0dab8465188f92ceb3c983f10d65cd93bb2c8bab4e4155ef13536f5d qemu-xen_paths.patch @@ -336,6 +342,8 @@ f12e15fe6e67cc61ba462993f69e5c247ae58652fe84ecd5a2f48379a9734cafcb22c83c3398aeca 61c41491a6086a96bcdf830c7330d71561b6229cd71672075d00ef011e84e3e00b15a1ff650a8ec36a3936e970593c2ce0f986810cecca1d2f1973e1378c4e22 xsa167-4.4.patch c55ee924b21edf54ce3c873d952a20f32f851661a13514528d42d2ef36767cfa9e31b1a42a4e0f40ff1011c692c406155fcc59be0c43fd44973cd0a5acee2ac7 xsa168.patch 1b6f700bd9239ec7fad8e9c6d3f3c83b051dd5bc374c92d131211688985f8d8389bc2c24aa784fe1647524af4bdebd50943187218e7cf842154b16b8d4fcd9db xsa170-4.5.patch +8636f74b270b0ccf56ea6bab4c90d0ee909e5d2891987b4572df4a0906e2230e046aad0c99add6c1d70f7023cc6d99bcfd2947c953f600074a6ed7c176a5d3dc xsa172.patch +0f916156724995abe68bdd66a704c2e80d5b9c7883fd406cbd67e47e626e5c309418d4117588388bdd626b0876c0f7664733815c492c29e0c9d3a46460a01124 xsa173-4.4.patch c3c46f232f0bd9f767b232af7e8ce910a6166b126bd5427bb8dc325aeb2c634b956de3fc225cab5af72649070c8205cc8e1cab7689fc266c204f525086f1a562 qemu-coroutine-gthread.patch a8b7378516172389450834985e8558d7a86d7cd808154bdc846bb98325e40fc4e87b1fc6d725297f4bef6eb54ebcbcbfa4d9d0363d83f635755795fb0726e006 qemu-xen-musl-openpty.patch 1f19cf495142dfc9f1786af6d4f7d497a482119fa2f1c10d4f9174994d38562719bc5190820dd444c32da0fb9af78fadac8dc8958437c26d6ca385f2409794e8 qemu-xen_paths.patch diff --git a/main/xen/xsa172.patch b/main/xen/xsa172.patch new file mode 100644 index 0000000000..8b1d01fa84 --- /dev/null +++ b/main/xen/xsa172.patch @@ -0,0 +1,39 @@ +x86: fix information leak on AMD CPUs + +The fix for XSA-52 was wrong, and so was the change synchronizing that +new behavior to the FXRSTOR logic: AMD's manuals explictly state that +writes to the ES bit are ignored, and it instead gets calculated from +the exception and mask bits (it gets set whenever there is an unmasked +exception, and cleared otherwise). Hence we need to follow that model +in our workaround. + +This is XSA-172. + +The first hunk (xen/arch/x86/i387.c:fpu_fxrstor) is CVE-2016-3159. +The second hunk (xen/arch/x86/xstate.c:xrstor) is CVE-2016-3158. + +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com> + +--- a/xen/arch/x86/i387.c ++++ b/xen/arch/x86/i387.c +@@ -49,7 +49,7 @@ static inline void fpu_fxrstor(struct vc + * sometimes new user value. Both should be ok. Use the FPU saved + * data block as a safe address because it should be in L1. + */ +- if ( !(fpu_ctxt->fsw & 0x0080) && ++ if ( !(fpu_ctxt->fsw & ~fpu_ctxt->fcw & 0x003f) && + boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) + { + asm volatile ( "fnclex\n\t" +--- a/xen/arch/x86/xstate.c ++++ b/xen/arch/x86/xstate.c +@@ -344,7 +344,7 @@ void xrstor(struct vcpu *v, uint64_t mas + * data block as a safe address because it should be in L1. + */ + if ( (mask & ptr->xsave_hdr.xstate_bv & XSTATE_FP) && +- !(ptr->fpu_sse.fsw & 0x0080) && ++ !(ptr->fpu_sse.fsw & ~ptr->fpu_sse.fcw & 0x003f) && + boot_cpu_data.x86_vendor == X86_VENDOR_AMD ) + asm volatile ( "fnclex\n\t" /* clear exceptions */ + "ffree %%st(7)\n\t" /* clear stack tag */ diff --git a/main/xen/xsa173-4.4.patch b/main/xen/xsa173-4.4.patch new file mode 100644 index 0000000000..3c9e10645e --- /dev/null +++ b/main/xen/xsa173-4.4.patch @@ -0,0 +1,251 @@ +commit 5893f9ea852f428e7120a4f3184a20deeb145d91 +Author: Tim Deegan <tim@xen.org> +Date: Wed Mar 16 17:05:25 2016 +0000 + + x86: limit GFNs to 32 bits for shadowed superpages. + + Superpage shadows store the shadowed GFN in the backpointer field, + which for non-BIGMEM builds is 32 bits wide. Shadowing a superpage + mapping of a guest-physical address above 2^44 would lead to the GFN + being truncated there, and a crash when we come to remove the shadow + from the hash table. + + Track the valid width of a GFN for each guest, including reporting it + through CPUID, and enforce it in the shadow pagetables. Set the + maximum witth to 32 for guests where this truncation could occur. + + This is XSA-173. + + Signed-off-by: Tim Deegan <tim@xen.org> + Signed-off-by: Jan Beulich <jbeulich@suse.com> + +Reported-by: Ling Liu <liuling-it@360.cn> +diff --git a/xen/arch/x86/cpu/common.c b/xen/arch/x86/cpu/common.c +index 4221826..f436f91 100644 +--- a/xen/arch/x86/cpu/common.c ++++ b/xen/arch/x86/cpu/common.c +@@ -37,6 +37,7 @@ integer_param("cpuid_mask_ext_edx", opt_cpuid_mask_ext_edx); + struct cpu_dev * cpu_devs[X86_VENDOR_NUM] = {}; + + unsigned int paddr_bits __read_mostly = 36; ++unsigned int hap_paddr_bits __read_mostly = 36; + + /* + * Default host IA32_CR_PAT value to cover all memory types. +@@ -195,7 +196,7 @@ static void __init early_cpu_detect(void) + + static void __cpuinit generic_identify(struct cpuinfo_x86 *c) + { +- u32 tfms, xlvl, capability, excap, ebx; ++ u32 tfms, xlvl, capability, excap, eax, ebx; + + /* Get vendor name */ + cpuid(0x00000000, &c->cpuid_level, +@@ -230,8 +231,11 @@ static void __cpuinit generic_identify(struct cpuinfo_x86 *c) + } + if ( xlvl >= 0x80000004 ) + get_model_name(c); /* Default name */ +- if ( xlvl >= 0x80000008 ) +- paddr_bits = cpuid_eax(0x80000008) & 0xff; ++ if ( xlvl >= 0x80000008 ) { ++ eax = cpuid_eax(0x80000008); ++ paddr_bits = eax & 0xff; ++ hap_paddr_bits = ((eax >> 16) & 0xff) ?: paddr_bits; ++ } + } + + /* Might lift BIOS max_leaf=3 limit. */ +diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c +index f3f6c61..a4bfb90 100644 +--- a/xen/arch/x86/hvm/hvm.c ++++ b/xen/arch/x86/hvm/hvm.c +@@ -2966,8 +2966,7 @@ void hvm_cpuid(unsigned int input, unsigned int *eax, unsigned int *ebx, + break; + + case 0x80000008: +- count = cpuid_eax(0x80000008); +- count = (count >> 16) & 0xff ?: count & 0xff; ++ count = d->arch.paging.gfn_bits + PAGE_SHIFT; + if ( (*eax & 0xff) > count ) + *eax = (*eax & ~0xff) | count; + +diff --git a/xen/arch/x86/mm/guest_walk.c b/xen/arch/x86/mm/guest_walk.c +index 70460b6..09511f0 100644 +--- a/xen/arch/x86/mm/guest_walk.c ++++ b/xen/arch/x86/mm/guest_walk.c +@@ -94,6 +94,12 @@ void *map_domain_gfn(struct p2m_domain *p2m, gfn_t gfn, mfn_t *mfn, + struct page_info *page; + void *map; + ++ if ( gfn_x(gfn) >> p2m->domain->arch.paging.gfn_bits ) ++ { ++ *rc = _PAGE_INVALID_BIT; ++ return NULL; ++ } ++ + /* Translate the gfn, unsharing if shared */ + page = get_page_from_gfn_p2m(p2m->domain, p2m, gfn_x(gfn), p2mt, NULL, + q); +@@ -294,20 +300,8 @@ guest_walk_tables(struct vcpu *v, struct p2m_domain *p2m, + flags &= ~_PAGE_PAT; + + if ( gfn_x(start) & GUEST_L2_GFN_MASK & ~0x1 ) +- { +-#if GUEST_PAGING_LEVELS == 2 +- /* +- * Note that _PAGE_INVALID_BITS is zero in this case, yielding a +- * no-op here. +- * +- * Architecturally, the walk should fail if bit 21 is set (others +- * aren't being checked at least in PSE36 mode), but we'll ignore +- * this here in order to avoid specifying a non-natural, non-zero +- * _PAGE_INVALID_BITS value just for that case. +- */ +-#endif + rc |= _PAGE_INVALID_BITS; +- } ++ + /* Increment the pfn by the right number of 4k pages. + * Mask out PAT and invalid bits. */ + start = _gfn((gfn_x(start) & ~GUEST_L2_GFN_MASK) + +@@ -390,5 +384,11 @@ set_ad: + put_page(mfn_to_page(mfn_x(gw->l1mfn))); + } + ++ /* If this guest has a restricted physical address space then the ++ * target GFN must fit within it. */ ++ if ( !(rc & _PAGE_PRESENT) ++ && gfn_x(guest_l1e_get_gfn(gw->l1e)) >> d->arch.paging.gfn_bits ) ++ rc |= _PAGE_INVALID_BITS; ++ + return rc; + } +diff --git a/xen/arch/x86/mm/hap/hap.c b/xen/arch/x86/mm/hap/hap.c +index c06369b..ccc4174 100644 +--- a/xen/arch/x86/mm/hap/hap.c ++++ b/xen/arch/x86/mm/hap/hap.c +@@ -428,6 +428,7 @@ static void hap_destroy_monitor_table(struct vcpu* v, mfn_t mmfn) + void hap_domain_init(struct domain *d) + { + INIT_PAGE_LIST_HEAD(&d->arch.paging.hap.freelist); ++ d->arch.paging.gfn_bits = hap_paddr_bits - PAGE_SHIFT; + } + + /* return 0 for success, -errno for failure */ +diff --git a/xen/arch/x86/mm/shadow/common.c b/xen/arch/x86/mm/shadow/common.c +index 90ba4d6..06a04ad 100644 +--- a/xen/arch/x86/mm/shadow/common.c ++++ b/xen/arch/x86/mm/shadow/common.c +@@ -48,6 +48,16 @@ void shadow_domain_init(struct domain *d, unsigned int domcr_flags) + INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.freelist); + INIT_PAGE_LIST_HEAD(&d->arch.paging.shadow.pinned_shadows); + ++ d->arch.paging.gfn_bits = paddr_bits - PAGE_SHIFT; ++#ifndef CONFIG_BIGMEM ++ /* ++ * Shadowed superpages store GFNs in 32-bit page_info fields. ++ * Note that we cannot use guest_supports_superpages() here. ++ */ ++ if ( !is_pv_domain(d) || opt_allow_superpage ) ++ d->arch.paging.gfn_bits = 32; ++#endif ++ + /* Use shadow pagetables for log-dirty support */ + paging_log_dirty_init(d, shadow_enable_log_dirty, + shadow_disable_log_dirty, shadow_clean_dirty_bitmap); +diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c +index 0f499bf..6c88d4e 100644 +--- a/xen/arch/x86/mm/shadow/multi.c ++++ b/xen/arch/x86/mm/shadow/multi.c +@@ -527,7 +527,8 @@ _sh_propagate(struct vcpu *v, + ASSERT(GUEST_PAGING_LEVELS > 3 || level != 3); + + /* Check there's something for the shadows to map to */ +- if ( !p2m_is_valid(p2mt) && !p2m_is_grant(p2mt) ) ++ if ( (!p2m_is_valid(p2mt) && !p2m_is_grant(p2mt)) ++ || gfn_x(target_gfn) >> d->arch.paging.gfn_bits ) + { + *sp = shadow_l1e_empty(); + goto done; +diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h +index 7dfbbcb..a03fc2e 100644 +--- a/xen/include/asm-x86/domain.h ++++ b/xen/include/asm-x86/domain.h +@@ -187,6 +187,9 @@ struct paging_domain { + /* log dirty support */ + struct log_dirty_domain log_dirty; + ++ /* Number of valid bits in a gfn. */ ++ unsigned int gfn_bits; ++ + /* preemption handling */ + struct { + const struct domain *dom; +diff --git a/xen/include/asm-x86/guest_pt.h b/xen/include/asm-x86/guest_pt.h +index d2a8250..d95f835 100644 +--- a/xen/include/asm-x86/guest_pt.h ++++ b/xen/include/asm-x86/guest_pt.h +@@ -220,15 +220,17 @@ guest_supports_nx(struct vcpu *v) + } + + +-/* Some bits are invalid in any pagetable entry. */ +-#if GUEST_PAGING_LEVELS == 2 +-#define _PAGE_INVALID_BITS (0) +-#elif GUEST_PAGING_LEVELS == 3 +-#define _PAGE_INVALID_BITS \ +- get_pte_flags(((1ull<<63) - 1) & ~((1ull<<paddr_bits) - 1)) +-#else /* GUEST_PAGING_LEVELS == 4 */ ++/* ++ * Some bits are invalid in any pagetable entry. ++ * Normal flags values get represented in 24-bit values (see ++ * get_pte_flags() and put_pte_flags()), so set bit 24 in ++ * addition to be able to flag out of range frame numbers. ++ */ ++#if GUEST_PAGING_LEVELS == 3 + #define _PAGE_INVALID_BITS \ +- get_pte_flags(((1ull<<52) - 1) & ~((1ull<<paddr_bits) - 1)) ++ (_PAGE_INVALID_BIT | get_pte_flags(((1ull << 63) - 1) & ~(PAGE_SIZE - 1))) ++#else /* 2-level and 4-level */ ++#define _PAGE_INVALID_BITS _PAGE_INVALID_BIT + #endif + + +diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h +index ec3da9b..8182afd 100644 +--- a/xen/include/asm-x86/processor.h ++++ b/xen/include/asm-x86/processor.h +@@ -194,6 +194,8 @@ extern bool_t opt_cpu_info; + + /* Maximum width of physical addresses supported by the hardware */ + extern unsigned int paddr_bits; ++/* Max physical address width supported within HAP guests */ ++extern unsigned int hap_paddr_bits; + + extern void identify_cpu(struct cpuinfo_x86 *); + extern void setup_clear_cpu_cap(unsigned int); +diff --git a/xen/include/asm-x86/x86_64/page.h b/xen/include/asm-x86/x86_64/page.h +index c193c88..a48c650 100644 +--- a/xen/include/asm-x86/x86_64/page.h ++++ b/xen/include/asm-x86/x86_64/page.h +@@ -166,6 +166,7 @@ typedef l4_pgentry_t root_pgentry_t; + + #define USER_MAPPINGS_ARE_GLOBAL + #ifdef USER_MAPPINGS_ARE_GLOBAL ++ + /* + * Bit 12 of a 24-bit flag mask. This corresponds to bit 52 of a pte. + * This is needed to distinguish between user and kernel PTEs since _PAGE_USER +@@ -176,6 +177,12 @@ typedef l4_pgentry_t root_pgentry_t; + #define _PAGE_GUEST_KERNEL 0 + #endif + ++/* ++ * Bit 24 of a 24-bit flag mask! This is not any bit of a real pte, ++ * and is only used for signalling in variables that contain flags. ++ */ ++#define _PAGE_INVALID_BIT (1U<<24) ++ + #endif /* __X86_64_PAGE_H__ */ + + /* |