diff options
author | Natanael Copa <ncopa@alpinelinux.org> | 2014-08-25 11:10:07 +0000 |
---|---|---|
committer | Natanael Copa <ncopa@alpinelinux.org> | 2014-08-25 11:10:07 +0000 |
commit | deffb3e9dbc9198afd2ea6c8e648a32a574a0056 (patch) | |
tree | bb889b39db8dc54389a241f36aad795e92a3d96c /main/xen | |
parent | bcf7b52774f1b0a3e405a207c3c4a5342b951f40 (diff) | |
download | aports-deffb3e9dbc9198afd2ea6c8e648a32a574a0056.tar.bz2 aports-deffb3e9dbc9198afd2ea6c8e648a32a574a0056.tar.xz |
main/xen: security fix for XSA-97 (CVE-2014-5146,CVE-2014-5149)
ref #3290
Diffstat (limited to 'main/xen')
-rw-r--r-- | main/xen/APKBUILD | 6 | ||||
-rw-r--r-- | main/xen/xsa97-hap-4_3.patch | 485 |
2 files changed, 490 insertions, 1 deletions
diff --git a/main/xen/APKBUILD b/main/xen/APKBUILD index 88cdd1bad2..e9c1c8aa8b 100644 --- a/main/xen/APKBUILD +++ b/main/xen/APKBUILD @@ -3,7 +3,7 @@ # Maintainer: William Pitcock <nenolod@dereferenced.org> pkgname=xen pkgver=4.3.2 -pkgrel=3 +pkgrel=4 pkgdesc="Xen hypervisor" url="http://www.xen.org/" arch="x86_64" @@ -24,6 +24,7 @@ source="http://bits.xensource.com/oss-xen/release/$pkgver/$pkgname-$pkgver.tar.g xsa41.patch xsa41b.patch xsa41c.patch + xsa97-hap-4_3.patch fix-pod2man-choking.patch @@ -211,6 +212,7 @@ md5sums="83e0e13678383e4fbcaa69ce6064b187 xen-4.3.2.tar.gz 8ad8942000b8a4be4917599cad9209cf xsa41.patch ed7d0399c6ca6aeee479da5d8f807fe0 xsa41b.patch 2f3dd7bdc59d104370066d6582725575 xsa41c.patch +8b0feffc89e3f34d835d60ad62688b30 xsa97-hap-4_3.patch 4c5455d1adc09752a835e241097fbc39 fix-pod2man-choking.patch a4097e06a7e000ed00f4607db014d277 qemu-xen-websocket.patch 35bdea1d4e3ae2565edc7e40906efdd5 qemu-xen-tls-websockets.patch @@ -242,6 +244,7 @@ a0c225d716d343fe041b63e3940900c5b3573ed3bcfc5b7c2d52ea2861c3fc28 docs-Fix-gener 93452beba88a8da8e89b8bfa743074a358ba1d9052151c608e21c4d62f8c4867 xsa41.patch 896a07f57310c9bea9bc2a305166cf796282c381cb7839be49105b1726a860b5 xsa41b.patch 683dd96a0a8899f794070c8c09643dfeeb39f92da531955cba961b45f6075914 xsa41c.patch +cfab6521221a5058a0dfbb6d59c3c4cd0e7f4239bb6cbee2723de22c33caafda xsa97-hap-4_3.patch fcb5b9ff0bc4b4d39fed9b88891491b91628aa449914cfea321abe5da24c1da2 fix-pod2man-choking.patch e9f6c482fc449e0b540657a8988ad31f2e680b8933e50e6486687a52f6a9ed04 qemu-xen-websocket.patch 435dd428d83acdfde58888532a1cece1e9075b2a2460fe3f6cd33c7d400f2715 qemu-xen-tls-websockets.patch @@ -273,6 +276,7 @@ sha512sums="ec94d849b56ec590b89022075ce43768d8ef44b7be9580ce032509b44c085f0f6649 94672a4d37db4e370370157cac9507ee1a75832f4be779fba148c1faa0b18f26ed57126eee6256ccd5d218463325a730266b53139554f4865adedb7659154c16 xsa41.patch bda9105793f2327e1317991762120d0668af0e964076b18c9fdbfd509984b2e88d85df95702c46b2e00d5350e8113f6aa7b34b19064d19abbeb4d43f0c431d38 xsa41b.patch 36b60478660ff7748328f5ab9adff13286eee1a1bad06e42fdf7e6aafe105103988525725aacd660cf5b2a184a9e2d6b3818655203c1fa07e07dcebdf23f35d9 xsa41c.patch +acfd1058632d42bef061a9586565d184c0010d74870a25bc9b0a0bf40dda8abfd882056b8340dec45355efd9326d05f92a933f5d5c1c58e97597a8e88c61c639 xsa97-hap-4_3.patch 2e95ad43bb66f928fe1e8caf474a3211571f75f79ea32aaa3eddb3aed9963444bd131006b67e682395af0d79118b2634bf808404693b813a94662d2a9d665ac2 fix-pod2man-choking.patch 45f1da45f3ff937d0a626e37c130d76f5b97f49a57ddeb11ef2a8e850c04c32c819a3dfcef501eb3784db5fe7b39c88230063e56aa6e5197fd9c7b7d424fff77 qemu-xen-websocket.patch 11eaccc346440ff285552f204d491e3b31bda1665c3219ecae3061b5d55db9dec885af0c031fa19c67e87bbe238002b1911bbd5bfea2f2ba0d61e6b3d0c952c9 qemu-xen-tls-websockets.patch diff --git a/main/xen/xsa97-hap-4_3.patch b/main/xen/xsa97-hap-4_3.patch new file mode 100644 index 0000000000..6d7c1d36eb --- /dev/null +++ b/main/xen/xsa97-hap-4_3.patch @@ -0,0 +1,485 @@ +x86/paging: make log-dirty operations preemptible + +Both the freeing and the inspection of the bitmap get done in (nested) +loops which - besides having a rather high iteration count in general, +albeit that would be covered by XSA-77 - have the number of non-trivial +iterations they need to perform (indirectly) controllable by both the +guest they are for and any domain controlling the guest (including the +one running qemu for it). + +This is XSA-97. + +Signed-off-by: Jan Beulich <jbeulich@suse.com> +Reviewed-by: Tim Deegan <tim@xen.org> + +--- a/xen/arch/x86/domain.c ++++ b/xen/arch/x86/domain.c +@@ -1867,7 +1867,9 @@ int domain_relinquish_resources(struct d + pci_release_devices(d); + + /* Tear down paging-assistance stuff. */ +- paging_teardown(d); ++ ret = paging_teardown(d); ++ if ( ret ) ++ return ret; + + /* Drop the in-use references to page-table bases. */ + for_each_vcpu ( d, v ) +--- a/xen/arch/x86/domctl.c ++++ b/xen/arch/x86/domctl.c +@@ -61,6 +61,9 @@ long arch_do_domctl( + ret = paging_domctl(d, + &domctl->u.shadow_op, + guest_handle_cast(u_domctl, void)); ++ if ( ret == -EAGAIN ) ++ return hypercall_create_continuation(__HYPERVISOR_domctl, ++ "h", u_domctl); + copyback = 1; + } + break; +--- a/xen/arch/x86/mm/hap/hap.c ++++ b/xen/arch/x86/mm/hap/hap.c +@@ -565,8 +565,7 @@ int hap_domctl(struct domain *d, xen_dom + paging_unlock(d); + if ( preempted ) + /* Not finished. Set up to re-run the call. */ +- rc = hypercall_create_continuation(__HYPERVISOR_domctl, "h", +- u_domctl); ++ rc = -EAGAIN; + else + /* Finished. Return the new allocation */ + sc->mb = hap_get_allocation(d); +--- a/xen/arch/x86/mm/paging.c ++++ b/xen/arch/x86/mm/paging.c +@@ -26,6 +26,7 @@ + #include <asm/shadow.h> + #include <asm/p2m.h> + #include <asm/hap.h> ++#include <asm/event.h> + #include <asm/hvm/nestedhvm.h> + #include <xen/numa.h> + #include <xsm/xsm.h> +@@ -116,26 +117,46 @@ static void paging_free_log_dirty_page(s + d->arch.paging.free_page(d, mfn_to_page(mfn)); + } + +-void paging_free_log_dirty_bitmap(struct domain *d) ++static int paging_free_log_dirty_bitmap(struct domain *d, int rc) + { + mfn_t *l4, *l3, *l2; + int i4, i3, i2; + ++ paging_lock(d); ++ + if ( !mfn_valid(d->arch.paging.log_dirty.top) ) +- return; ++ { ++ paging_unlock(d); ++ return 0; ++ } + +- paging_lock(d); ++ if ( !d->arch.paging.preempt.vcpu ) ++ { ++ memset(&d->arch.paging.preempt.log_dirty, 0, ++ sizeof(d->arch.paging.preempt.log_dirty)); ++ ASSERT(rc <= 0); ++ d->arch.paging.preempt.log_dirty.done = -rc; ++ } ++ else if ( d->arch.paging.preempt.vcpu != current || ++ d->arch.paging.preempt.op != XEN_DOMCTL_SHADOW_OP_OFF ) ++ { ++ paging_unlock(d); ++ return -EBUSY; ++ } + + l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top)); ++ i4 = d->arch.paging.preempt.log_dirty.i4; ++ i3 = d->arch.paging.preempt.log_dirty.i3; ++ rc = 0; + +- for ( i4 = 0; i4 < LOGDIRTY_NODE_ENTRIES; i4++ ) ++ for ( ; i4 < LOGDIRTY_NODE_ENTRIES; i4++, i3 = 0 ) + { + if ( !mfn_valid(l4[i4]) ) + continue; + + l3 = map_domain_page(mfn_x(l4[i4])); + +- for ( i3 = 0; i3 < LOGDIRTY_NODE_ENTRIES; i3++ ) ++ for ( ; i3 < LOGDIRTY_NODE_ENTRIES; i3++ ) + { + if ( !mfn_valid(l3[i3]) ) + continue; +@@ -148,20 +169,54 @@ void paging_free_log_dirty_bitmap(struct + + unmap_domain_page(l2); + paging_free_log_dirty_page(d, l3[i3]); ++ l3[i3] = _mfn(INVALID_MFN); ++ ++ if ( i3 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() ) ++ { ++ d->arch.paging.preempt.log_dirty.i3 = i3 + 1; ++ d->arch.paging.preempt.log_dirty.i4 = i4; ++ rc = -EAGAIN; ++ break; ++ } + } + + unmap_domain_page(l3); ++ if ( rc ) ++ break; + paging_free_log_dirty_page(d, l4[i4]); ++ l4[i4] = _mfn(INVALID_MFN); ++ ++ if ( i4 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() ) ++ { ++ d->arch.paging.preempt.log_dirty.i3 = 0; ++ d->arch.paging.preempt.log_dirty.i4 = i4 + 1; ++ rc = -EAGAIN; ++ break; ++ } + } + + unmap_domain_page(l4); +- paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top); +- d->arch.paging.log_dirty.top = _mfn(INVALID_MFN); + +- ASSERT(d->arch.paging.log_dirty.allocs == 0); +- d->arch.paging.log_dirty.failed_allocs = 0; ++ if ( !rc ) ++ { ++ paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top); ++ d->arch.paging.log_dirty.top = _mfn(INVALID_MFN); ++ ++ ASSERT(d->arch.paging.log_dirty.allocs == 0); ++ d->arch.paging.log_dirty.failed_allocs = 0; ++ ++ rc = -d->arch.paging.preempt.log_dirty.done; ++ d->arch.paging.preempt.vcpu = NULL; ++ } ++ else ++ { ++ d->arch.paging.preempt.vcpu = current; ++ d->arch.paging.preempt.op = XEN_DOMCTL_SHADOW_OP_OFF; ++ } + + paging_unlock(d); ++ ++ return rc; + } + + int paging_log_dirty_enable(struct domain *d) +@@ -178,15 +233,25 @@ int paging_log_dirty_enable(struct domai + return ret; + } + +-int paging_log_dirty_disable(struct domain *d) ++static int paging_log_dirty_disable(struct domain *d, bool_t resuming) + { +- int ret; ++ int ret = 1; ++ ++ if ( !resuming ) ++ { ++ domain_pause(d); ++ /* Safe because the domain is paused. */ ++ ret = d->arch.paging.log_dirty.disable_log_dirty(d); ++ ASSERT(ret <= 0); ++ } + +- domain_pause(d); +- /* Safe because the domain is paused. */ +- ret = d->arch.paging.log_dirty.disable_log_dirty(d); + if ( !paging_mode_log_dirty(d) ) +- paging_free_log_dirty_bitmap(d); ++ { ++ ret = paging_free_log_dirty_bitmap(d, ret); ++ if ( ret == -EAGAIN ) ++ return ret; ++ } ++ + domain_unpause(d); + + return ret; +@@ -326,7 +391,9 @@ int paging_mfn_is_dirty(struct domain *d + + /* Read a domain's log-dirty bitmap and stats. If the operation is a CLEAN, + * clear the bitmap and stats as well. */ +-int paging_log_dirty_op(struct domain *d, struct xen_domctl_shadow_op *sc) ++static int paging_log_dirty_op(struct domain *d, ++ struct xen_domctl_shadow_op *sc, ++ bool_t resuming) + { + int rv = 0, clean = 0, peek = 1; + unsigned long pages = 0; +@@ -334,9 +401,22 @@ int paging_log_dirty_op(struct domain *d + unsigned long *l1 = NULL; + int i4, i3, i2; + +- domain_pause(d); ++ if ( !resuming ) ++ domain_pause(d); + paging_lock(d); + ++ if ( !d->arch.paging.preempt.vcpu ) ++ memset(&d->arch.paging.preempt.log_dirty, 0, ++ sizeof(d->arch.paging.preempt.log_dirty)); ++ else if ( d->arch.paging.preempt.vcpu != current || ++ d->arch.paging.preempt.op != sc->op ) ++ { ++ paging_unlock(d); ++ ASSERT(!resuming); ++ domain_unpause(d); ++ return -EBUSY; ++ } ++ + clean = (sc->op == XEN_DOMCTL_SHADOW_OP_CLEAN); + + PAGING_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n", +@@ -365,17 +445,15 @@ int paging_log_dirty_op(struct domain *d + goto out; + } + +- pages = 0; + l4 = paging_map_log_dirty_bitmap(d); ++ i4 = d->arch.paging.preempt.log_dirty.i4; ++ i3 = d->arch.paging.preempt.log_dirty.i3; ++ pages = d->arch.paging.preempt.log_dirty.done; + +- for ( i4 = 0; +- (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); +- i4++ ) ++ for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 ) + { + l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL; +- for ( i3 = 0; +- (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); +- i3++ ) ++ for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ ) + { + l2 = ((l3 && mfn_valid(l3[i3])) ? + map_domain_page(mfn_x(l3[i3])) : NULL); +@@ -410,18 +488,51 @@ int paging_log_dirty_op(struct domain *d + } + if ( l2 ) + unmap_domain_page(l2); ++ ++ if ( i3 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() ) ++ { ++ d->arch.paging.preempt.log_dirty.i4 = i4; ++ d->arch.paging.preempt.log_dirty.i3 = i3 + 1; ++ rv = -EAGAIN; ++ break; ++ } + } + if ( l3 ) + unmap_domain_page(l3); ++ ++ if ( !rv && i4 < LOGDIRTY_NODE_ENTRIES - 1 && ++ hypercall_preempt_check() ) ++ { ++ d->arch.paging.preempt.log_dirty.i4 = i4 + 1; ++ d->arch.paging.preempt.log_dirty.i3 = 0; ++ rv = -EAGAIN; ++ } ++ if ( rv ) ++ break; + } + if ( l4 ) + unmap_domain_page(l4); + +- if ( pages < sc->pages ) +- sc->pages = pages; ++ if ( !rv ) ++ d->arch.paging.preempt.vcpu = NULL; ++ else ++ { ++ d->arch.paging.preempt.vcpu = current; ++ d->arch.paging.preempt.op = sc->op; ++ d->arch.paging.preempt.log_dirty.done = pages; ++ } + + paging_unlock(d); + ++ if ( rv ) ++ { ++ /* Never leave the domain paused for other errors. */ ++ ASSERT(rv == -EAGAIN); ++ return rv; ++ } ++ ++ if ( pages < sc->pages ) ++ sc->pages = pages; + if ( clean ) + { + /* We need to further call clean_dirty_bitmap() functions of specific +@@ -432,6 +543,7 @@ int paging_log_dirty_op(struct domain *d + return rv; + + out: ++ d->arch.paging.preempt.vcpu = NULL; + paging_unlock(d); + domain_unpause(d); + +@@ -498,12 +610,6 @@ void paging_log_dirty_init(struct domain + d->arch.paging.log_dirty.clean_dirty_bitmap = clean_dirty_bitmap; + } + +-/* This function fress log dirty bitmap resources. */ +-static void paging_log_dirty_teardown(struct domain*d) +-{ +- paging_free_log_dirty_bitmap(d); +-} +- + /************************************************/ + /* CODE FOR PAGING SUPPORT */ + /************************************************/ +@@ -547,6 +653,7 @@ void paging_vcpu_init(struct vcpu *v) + int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc, + XEN_GUEST_HANDLE_PARAM(void) u_domctl) + { ++ bool_t resuming = 0; + int rc; + + if ( unlikely(d == current->domain) ) +@@ -569,6 +676,20 @@ int paging_domctl(struct domain *d, xen_ + return -EINVAL; + } + ++ if ( d->arch.paging.preempt.vcpu ) ++ { ++ if ( d->arch.paging.preempt.vcpu != current || ++ d->arch.paging.preempt.op != sc->op ) ++ { ++ printk(XENLOG_G_DEBUG ++ "d%d:v%d: Paging op %#x on Dom%u with unfinished prior op %#x\n", ++ current->domain->domain_id, current->vcpu_id, ++ sc->op, d->domain_id, d->arch.paging.preempt.op); ++ return -EBUSY; ++ } ++ resuming = 1; ++ } ++ + rc = xsm_shadow_control(XSM_HOOK, d, sc->op); + if ( rc ) + return rc; +@@ -594,13 +714,13 @@ int paging_domctl(struct domain *d, xen_ + + case XEN_DOMCTL_SHADOW_OP_OFF: + if ( paging_mode_log_dirty(d) ) +- if ( (rc = paging_log_dirty_disable(d)) != 0 ) ++ if ( (rc = paging_log_dirty_disable(d, resuming)) != 0 ) + return rc; + break; + + case XEN_DOMCTL_SHADOW_OP_CLEAN: + case XEN_DOMCTL_SHADOW_OP_PEEK: +- return paging_log_dirty_op(d, sc); ++ return paging_log_dirty_op(d, sc, resuming); + } + + /* Here, dispatch domctl to the appropriate paging code */ +@@ -611,18 +731,24 @@ int paging_domctl(struct domain *d, xen_ + } + + /* Call when destroying a domain */ +-void paging_teardown(struct domain *d) ++int paging_teardown(struct domain *d) + { ++ int rc; ++ + if ( hap_enabled(d) ) + hap_teardown(d); + else + shadow_teardown(d); + + /* clean up log dirty resources. */ +- paging_log_dirty_teardown(d); ++ rc = paging_free_log_dirty_bitmap(d, 0); ++ if ( rc == -EAGAIN ) ++ return rc; + + /* Move populate-on-demand cache back to domain_list for destruction */ + p2m_pod_empty_cache(d); ++ ++ return rc; + } + + /* Call once all of the references to the domain have gone away */ +--- a/xen/arch/x86/mm/shadow/common.c ++++ b/xen/arch/x86/mm/shadow/common.c +@@ -3706,8 +3706,7 @@ int shadow_domctl(struct domain *d, + paging_unlock(d); + if ( preempted ) + /* Not finished. Set up to re-run the call. */ +- rc = hypercall_create_continuation( +- __HYPERVISOR_domctl, "h", u_domctl); ++ rc = -EAGAIN; + else + /* Finished. Return the new allocation */ + sc->mb = shadow_get_allocation(d); +--- a/xen/common/domain.c ++++ b/xen/common/domain.c +@@ -527,7 +527,6 @@ int domain_kill(struct domain *d) + rc = domain_relinquish_resources(d); + if ( rc != 0 ) + { +- BUG_ON(rc != -EAGAIN); + break; + } + if ( sched_move_domain(d, cpupool0) ) +--- a/xen/include/asm-x86/domain.h ++++ b/xen/include/asm-x86/domain.h +@@ -186,6 +186,20 @@ struct paging_domain { + struct hap_domain hap; + /* log dirty support */ + struct log_dirty_domain log_dirty; ++ ++ /* preemption handling */ ++ struct { ++ struct vcpu *vcpu; ++ unsigned int op; ++ union { ++ struct { ++ unsigned long done:PADDR_BITS - PAGE_SHIFT; ++ unsigned long i4:PAGETABLE_ORDER; ++ unsigned long i3:PAGETABLE_ORDER; ++ } log_dirty; ++ }; ++ } preempt; ++ + /* alloc/free pages from the pool for paging-assistance structures + * (used by p2m and log-dirty code for their tries) */ + struct page_info * (*alloc_page)(struct domain *d); +--- a/xen/include/asm-x86/paging.h ++++ b/xen/include/asm-x86/paging.h +@@ -133,9 +133,6 @@ struct paging_mode { + /***************************************************************************** + * Log dirty code */ + +-/* free log dirty bitmap resource */ +-void paging_free_log_dirty_bitmap(struct domain *d); +- + /* get the dirty bitmap for a specific range of pfns */ + void paging_log_dirty_range(struct domain *d, + unsigned long begin_pfn, +@@ -145,9 +142,6 @@ void paging_log_dirty_range(struct domai + /* enable log dirty */ + int paging_log_dirty_enable(struct domain *d); + +-/* disable log dirty */ +-int paging_log_dirty_disable(struct domain *d); +- + /* log dirty initialization */ + void paging_log_dirty_init(struct domain *d, + int (*enable_log_dirty)(struct domain *d), +@@ -206,7 +200,7 @@ int paging_domctl(struct domain *d, xen_ + XEN_GUEST_HANDLE_PARAM(void) u_domctl); + + /* Call when destroying a domain */ +-void paging_teardown(struct domain *d); ++int paging_teardown(struct domain *d); + + /* Call once all of the references to the domain have gone away */ + void paging_final_teardown(struct domain *d); |