aboutsummaryrefslogtreecommitdiffstats
path: root/main/xen
diff options
context:
space:
mode:
authorNatanael Copa <ncopa@alpinelinux.org>2014-08-25 11:35:27 +0000
committerNatanael Copa <ncopa@alpinelinux.org>2014-08-25 11:54:02 +0000
commit9a7a36301a933b7cb457c5d81ecc31c4667d2668 (patch)
treee788adc2407b4e79b2cfa165bd1172c95ed7d781 /main/xen
parentdca967dba133304c05d02e621aefb8790ce37135 (diff)
downloadaports-9a7a36301a933b7cb457c5d81ecc31c4667d2668.tar.bz2
aports-9a7a36301a933b7cb457c5d81ecc31c4667d2668.tar.xz
main/xen: upgrade to 4.3.2 and fix XSA-97 (CVE-2014-5146,CVE-2014-5149)
fixes #3293
Diffstat (limited to 'main/xen')
-rw-r--r--main/xen/APKBUILD22
-rw-r--r--main/xen/xsa73-4_3-unstable.patch105
-rw-r--r--main/xen/xsa75-4.3-unstable.patch55
-rw-r--r--main/xen/xsa97-hap-4_3.patch485
4 files changed, 494 insertions, 173 deletions
diff --git a/main/xen/APKBUILD b/main/xen/APKBUILD
index 1c792ca622..73cab5b960 100644
--- a/main/xen/APKBUILD
+++ b/main/xen/APKBUILD
@@ -2,8 +2,8 @@
# Contributor: Roger Pau Monne <roger.pau@entel.upc.edu>
# Maintainer: William Pitcock <nenolod@dereferenced.org>
pkgname=xen
-pkgver=4.3.1
-pkgrel=1
+pkgver=4.3.2
+pkgrel=0
pkgdesc="Xen hypervisor"
url="http://www.xen.org/"
arch="x86_64"
@@ -24,8 +24,7 @@ source="http://bits.xensource.com/oss-xen/release/$pkgver/$pkgname-$pkgver.tar.g
xsa41b.patch
xsa41c.patch
- xsa73-4_3-unstable.patch
- xsa75-4.3-unstable.patch
+ xsa97-hap-4_3.patch
fix-pod2man-choking.patch
@@ -186,15 +185,14 @@ xend() {
-exec mv '{}' "$subpkgdir"/"$sitepackages"/xen \;
}
-md5sums="7616b8704e1ab89c81f011f0e3703bc8 xen-4.3.1.tar.gz
+md5sums="83e0e13678383e4fbcaa69ce6064b187 xen-4.3.2.tar.gz
2dc5ddf47c53ea168729975046c3c1f9 librt.patch
1ccde6b36a6f9542a16d998204dc9a22 qemu-xen_paths.patch
6dcff640268d514fa9164b4c812cc52d docs-Fix-generating-qemu-doc.html-with-texinfo-5.patch
8ad8942000b8a4be4917599cad9209cf xsa41.patch
ed7d0399c6ca6aeee479da5d8f807fe0 xsa41b.patch
2f3dd7bdc59d104370066d6582725575 xsa41c.patch
-5005efdb8bf44ccc2ce869611b507c83 xsa73-4_3-unstable.patch
-94b925ecbea7c4d879203776dc1903db xsa75-4.3-unstable.patch
+8b0feffc89e3f34d835d60ad62688b30 xsa97-hap-4_3.patch
4c5455d1adc09752a835e241097fbc39 fix-pod2man-choking.patch
a4097e06a7e000ed00f4607db014d277 qemu-xen-websocket.patch
35bdea1d4e3ae2565edc7e40906efdd5 qemu-xen-tls-websockets.patch
@@ -214,15 +212,14 @@ fa8c72b42e0479d521a353386d8543ef xendomains.initd
9df68ac65dc3f372f5d61183abdc83ff xen-consoles.logrotate
6a2f777c16678d84039acf670d86fff6 xenqemu.confd
f9afbf39e2b5a7d9dde60ebbd249ea7d xenqemu.initd"
-sha256sums="3b5b7cc508b1739753585b5c25635471cdcef680e8770a78bf6ef9333d26a9fd xen-4.3.1.tar.gz
+sha256sums="17611d95f955302560ff72d97c08933b4e62bc2e8ffb71400fc54e388746ff69 xen-4.3.2.tar.gz
12bf32f9937b09283f2df4955b50d6739768f66137a7d991f661f45cf77cb53b librt.patch
9440ca31a6911201f02694e93faafb5ca9b17de18b7f15b53ceac39a03411b4a qemu-xen_paths.patch
a0c225d716d343fe041b63e3940900c5b3573ed3bcfc5b7c2d52ea2861c3fc28 docs-Fix-generating-qemu-doc.html-with-texinfo-5.patch
93452beba88a8da8e89b8bfa743074a358ba1d9052151c608e21c4d62f8c4867 xsa41.patch
896a07f57310c9bea9bc2a305166cf796282c381cb7839be49105b1726a860b5 xsa41b.patch
683dd96a0a8899f794070c8c09643dfeeb39f92da531955cba961b45f6075914 xsa41c.patch
-48411cd6b15e4e4fa3c4335298179a4b1094c5e1ae8dc7582bbfb9439d97037b xsa73-4_3-unstable.patch
-91936421279fd2fa5321d9ed5a2b71fe76bc0e1348e67126e8b9cde0cb1d32b2 xsa75-4.3-unstable.patch
+cfab6521221a5058a0dfbb6d59c3c4cd0e7f4239bb6cbee2723de22c33caafda xsa97-hap-4_3.patch
fcb5b9ff0bc4b4d39fed9b88891491b91628aa449914cfea321abe5da24c1da2 fix-pod2man-choking.patch
e9f6c482fc449e0b540657a8988ad31f2e680b8933e50e6486687a52f6a9ed04 qemu-xen-websocket.patch
435dd428d83acdfde58888532a1cece1e9075b2a2460fe3f6cd33c7d400f2715 qemu-xen-tls-websockets.patch
@@ -242,15 +239,14 @@ a50a4485e84bcc098ad021556cd2aa7947c228f0a546ab942e880787ced57be3 xend.initd
0da87a4b9094f934e3de937e8ef8d3afc752e76793aa3d730182d0241e118b19 xen-consoles.logrotate
4cfcddcade5d055422ab4543e8caa6e5c5eee7625c41880a9000b7a87c7c424e xenqemu.confd
bf17808a79c57a9efc38b9f14cc87f556b2bb7ecfdec5763d9cf686255a47fce xenqemu.initd"
-sha512sums="f5250ad5ad3defc5dc1207eb6208a3928128ef57ac4162018bd92b750dc1df1eaaf37835528aca33a0f9e04c82d5f8c4ba79c03a1780d2b72cbb90cc26f77275 xen-4.3.1.tar.gz
+sha512sums="ec94d849b56ec590b89022075ce43768d8ef44b7be9580ce032509b44c085f0f66495845607a18cd3dea6b89c69bc2a18012705556f59288cd8653c3e5eca302 xen-4.3.2.tar.gz
74e3cfc51e367fc445cb3d8149f0c8830e94719a266daf04d2cd0889864591860c4c8842de2bc78070e4c5be7d14dfbb8b236c511d5faeddc2ad97177c1d3764 librt.patch
425149aea57a6deae9f488cea867f125983998dc6e8c63893fb3b9caf0ea34214251dd98ad74db823f5168631c44c49b988b6fe9c11b76bd493ddf51bc0baaa2 qemu-xen_paths.patch
477d3d08bd4fcdfbc54abea1a18acb6a41d298c366cd01c954f474515cb862d0dd59217c0dfca5460a725a8bc036de42132f522c3eefdffcc4fd511f016b783f docs-Fix-generating-qemu-doc.html-with-texinfo-5.patch
94672a4d37db4e370370157cac9507ee1a75832f4be779fba148c1faa0b18f26ed57126eee6256ccd5d218463325a730266b53139554f4865adedb7659154c16 xsa41.patch
bda9105793f2327e1317991762120d0668af0e964076b18c9fdbfd509984b2e88d85df95702c46b2e00d5350e8113f6aa7b34b19064d19abbeb4d43f0c431d38 xsa41b.patch
36b60478660ff7748328f5ab9adff13286eee1a1bad06e42fdf7e6aafe105103988525725aacd660cf5b2a184a9e2d6b3818655203c1fa07e07dcebdf23f35d9 xsa41c.patch
-8eb555bc589bc4848f640dd93bdfaf0d0a61667e26667ff2ff89ab60c8c5a777982647e8c440be7510620281bac8d9bb3281afcae36e974f09bd70184ba6ba9a xsa73-4_3-unstable.patch
-8406618c8b2398aba740713df253763a2b4e102c46ae59a8189acf6069845288fbcb5e8dafe72212b96714a18787c69106255d257b4445d69853dcfec1dc02af xsa75-4.3-unstable.patch
+acfd1058632d42bef061a9586565d184c0010d74870a25bc9b0a0bf40dda8abfd882056b8340dec45355efd9326d05f92a933f5d5c1c58e97597a8e88c61c639 xsa97-hap-4_3.patch
2e95ad43bb66f928fe1e8caf474a3211571f75f79ea32aaa3eddb3aed9963444bd131006b67e682395af0d79118b2634bf808404693b813a94662d2a9d665ac2 fix-pod2man-choking.patch
45f1da45f3ff937d0a626e37c130d76f5b97f49a57ddeb11ef2a8e850c04c32c819a3dfcef501eb3784db5fe7b39c88230063e56aa6e5197fd9c7b7d424fff77 qemu-xen-websocket.patch
11eaccc346440ff285552f204d491e3b31bda1665c3219ecae3061b5d55db9dec885af0c031fa19c67e87bbe238002b1911bbd5bfea2f2ba0d61e6b3d0c952c9 qemu-xen-tls-websockets.patch
diff --git a/main/xen/xsa73-4_3-unstable.patch b/main/xen/xsa73-4_3-unstable.patch
deleted file mode 100644
index efa64f9b17..0000000000
--- a/main/xen/xsa73-4_3-unstable.patch
+++ /dev/null
@@ -1,105 +0,0 @@
-From 068bfa76bbd52430e65853375e1d5db99d193e2f Mon Sep 17 00:00:00 2001
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Date: Thu, 31 Oct 2013 20:49:00 +0000
-Subject: [PATCH] gnttab: correct locking order reversal
-
-Coverity ID 1087189
-
-Correct a lock order reversal between a domains page allocation and grant
-table locks.
-
-This is XSA-73.
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
-Consolidate error handling.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Keir Fraser <keir@xen.org>
-Tested-by: Matthew Daley <mattjd@gmail.com>
----
- xen/common/grant_table.c | 52 +++++++++++++++++++++++++++++++++++++++-------
- 1 file changed, 44 insertions(+), 8 deletions(-)
-
-diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
-index f42bc7a..48df928 100644
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -1517,6 +1517,8 @@ gnttab_transfer(
-
- for ( i = 0; i < count; i++ )
- {
-+ bool_t okay;
-+
- if (i && hypercall_preempt_check())
- return i;
-
-@@ -1625,16 +1627,18 @@ gnttab_transfer(
- * pages when it is dying.
- */
- if ( unlikely(e->is_dying) ||
-- unlikely(e->tot_pages >= e->max_pages) ||
-- unlikely(!gnttab_prepare_for_transfer(e, d, gop.ref)) )
-+ unlikely(e->tot_pages >= e->max_pages) )
- {
-- if ( !e->is_dying )
-- gdprintk(XENLOG_INFO, "gnttab_transfer: "
-- "Transferee has no reservation "
-- "headroom (%d,%d) or provided a bad grant ref (%08x) "
-- "or is dying (%d)\n",
-- e->tot_pages, e->max_pages, gop.ref, e->is_dying);
- spin_unlock(&e->page_alloc_lock);
-+
-+ if ( e->is_dying )
-+ gdprintk(XENLOG_INFO, "gnttab_transfer: "
-+ "Transferee (d%d) is dying\n", e->domain_id);
-+ else
-+ gdprintk(XENLOG_INFO, "gnttab_transfer: "
-+ "Transferee (d%d) has no headroom (tot %u, max %u)\n",
-+ e->domain_id, e->tot_pages, e->max_pages);
-+
- rcu_unlock_domain(e);
- put_gfn(d, gop.mfn);
- page->count_info &= ~(PGC_count_mask|PGC_allocated);
-@@ -1646,6 +1650,38 @@ gnttab_transfer(
- /* Okay, add the page to 'e'. */
- if ( unlikely(domain_adjust_tot_pages(e, 1) == 1) )
- get_knownalive_domain(e);
-+
-+ /*
-+ * We must drop the lock to avoid a possible deadlock in
-+ * gnttab_prepare_for_transfer. We have reserved a page in e so can
-+ * safely drop the lock and re-aquire it later to add page to the
-+ * pagelist.
-+ */
-+ spin_unlock(&e->page_alloc_lock);
-+ okay = gnttab_prepare_for_transfer(e, d, gop.ref);
-+ spin_lock(&e->page_alloc_lock);
-+
-+ if ( unlikely(!okay) || unlikely(e->is_dying) )
-+ {
-+ bool_t drop_dom_ref = (domain_adjust_tot_pages(e, -1) == 0);
-+
-+ spin_unlock(&e->page_alloc_lock);
-+
-+ if ( okay /* i.e. e->is_dying due to the surrounding if() */ )
-+ gdprintk(XENLOG_INFO, "gnttab_transfer: "
-+ "Transferee (d%d) is now dying\n", e->domain_id);
-+
-+ if ( drop_dom_ref )
-+ put_domain(e);
-+ rcu_unlock_domain(e);
-+
-+ put_gfn(d, gop.mfn);
-+ page->count_info &= ~(PGC_count_mask|PGC_allocated);
-+ free_domheap_page(page);
-+ gop.status = GNTST_general_error;
-+ goto copyback;
-+ }
-+
- page_list_add_tail(page, &e->page_list);
- page_set_owner(page, e);
-
---
-1.7.10.4
-
diff --git a/main/xen/xsa75-4.3-unstable.patch b/main/xen/xsa75-4.3-unstable.patch
deleted file mode 100644
index b3724c35d4..0000000000
--- a/main/xen/xsa75-4.3-unstable.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-nested VMX: VMLANUCH/VMRESUME emulation must check permission first thing
-
-Otherwise uninitialized data may be used, leading to crashes.
-
-This is XSA-75.
-
-Reported-and-tested-by: Jeff Zimmerman <Jeff_Zimmerman@McAfee.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-and-tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/hvm/vmx/vvmx.c
-+++ b/xen/arch/x86/hvm/vmx/vvmx.c
-@@ -1509,15 +1509,10 @@ static void clear_vvmcs_launched(struct
- }
- }
-
--int nvmx_vmresume(struct vcpu *v, struct cpu_user_regs *regs)
-+static int nvmx_vmresume(struct vcpu *v, struct cpu_user_regs *regs)
- {
- struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
- struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
-- int rc;
--
-- rc = vmx_inst_check_privilege(regs, 0);
-- if ( rc != X86EMUL_OKAY )
-- return rc;
-
- /* check VMCS is valid and IO BITMAP is set */
- if ( (nvcpu->nv_vvmcxaddr != VMCX_EADDR) &&
-@@ -1536,6 +1531,10 @@ int nvmx_handle_vmresume(struct cpu_user
- struct vcpu *v = current;
- struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
- struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
-+ int rc = vmx_inst_check_privilege(regs, 0);
-+
-+ if ( rc != X86EMUL_OKAY )
-+ return rc;
-
- if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR )
- {
-@@ -1555,10 +1554,13 @@ int nvmx_handle_vmresume(struct cpu_user
- int nvmx_handle_vmlaunch(struct cpu_user_regs *regs)
- {
- bool_t launched;
-- int rc;
- struct vcpu *v = current;
- struct nestedvcpu *nvcpu = &vcpu_nestedhvm(v);
- struct nestedvmx *nvmx = &vcpu_2_nvmx(v);
-+ int rc = vmx_inst_check_privilege(regs, 0);
-+
-+ if ( rc != X86EMUL_OKAY )
-+ return rc;
-
- if ( vcpu_nestedhvm(v).nv_vvmcxaddr == VMCX_EADDR )
- {
diff --git a/main/xen/xsa97-hap-4_3.patch b/main/xen/xsa97-hap-4_3.patch
new file mode 100644
index 0000000000..6d7c1d36eb
--- /dev/null
+++ b/main/xen/xsa97-hap-4_3.patch
@@ -0,0 +1,485 @@
+x86/paging: make log-dirty operations preemptible
+
+Both the freeing and the inspection of the bitmap get done in (nested)
+loops which - besides having a rather high iteration count in general,
+albeit that would be covered by XSA-77 - have the number of non-trivial
+iterations they need to perform (indirectly) controllable by both the
+guest they are for and any domain controlling the guest (including the
+one running qemu for it).
+
+This is XSA-97.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- a/xen/arch/x86/domain.c
++++ b/xen/arch/x86/domain.c
+@@ -1867,7 +1867,9 @@ int domain_relinquish_resources(struct d
+ pci_release_devices(d);
+
+ /* Tear down paging-assistance stuff. */
+- paging_teardown(d);
++ ret = paging_teardown(d);
++ if ( ret )
++ return ret;
+
+ /* Drop the in-use references to page-table bases. */
+ for_each_vcpu ( d, v )
+--- a/xen/arch/x86/domctl.c
++++ b/xen/arch/x86/domctl.c
+@@ -61,6 +61,9 @@ long arch_do_domctl(
+ ret = paging_domctl(d,
+ &domctl->u.shadow_op,
+ guest_handle_cast(u_domctl, void));
++ if ( ret == -EAGAIN )
++ return hypercall_create_continuation(__HYPERVISOR_domctl,
++ "h", u_domctl);
+ copyback = 1;
+ }
+ break;
+--- a/xen/arch/x86/mm/hap/hap.c
++++ b/xen/arch/x86/mm/hap/hap.c
+@@ -565,8 +565,7 @@ int hap_domctl(struct domain *d, xen_dom
+ paging_unlock(d);
+ if ( preempted )
+ /* Not finished. Set up to re-run the call. */
+- rc = hypercall_create_continuation(__HYPERVISOR_domctl, "h",
+- u_domctl);
++ rc = -EAGAIN;
+ else
+ /* Finished. Return the new allocation */
+ sc->mb = hap_get_allocation(d);
+--- a/xen/arch/x86/mm/paging.c
++++ b/xen/arch/x86/mm/paging.c
+@@ -26,6 +26,7 @@
+ #include <asm/shadow.h>
+ #include <asm/p2m.h>
+ #include <asm/hap.h>
++#include <asm/event.h>
+ #include <asm/hvm/nestedhvm.h>
+ #include <xen/numa.h>
+ #include <xsm/xsm.h>
+@@ -116,26 +117,46 @@ static void paging_free_log_dirty_page(s
+ d->arch.paging.free_page(d, mfn_to_page(mfn));
+ }
+
+-void paging_free_log_dirty_bitmap(struct domain *d)
++static int paging_free_log_dirty_bitmap(struct domain *d, int rc)
+ {
+ mfn_t *l4, *l3, *l2;
+ int i4, i3, i2;
+
++ paging_lock(d);
++
+ if ( !mfn_valid(d->arch.paging.log_dirty.top) )
+- return;
++ {
++ paging_unlock(d);
++ return 0;
++ }
+
+- paging_lock(d);
++ if ( !d->arch.paging.preempt.vcpu )
++ {
++ memset(&d->arch.paging.preempt.log_dirty, 0,
++ sizeof(d->arch.paging.preempt.log_dirty));
++ ASSERT(rc <= 0);
++ d->arch.paging.preempt.log_dirty.done = -rc;
++ }
++ else if ( d->arch.paging.preempt.vcpu != current ||
++ d->arch.paging.preempt.op != XEN_DOMCTL_SHADOW_OP_OFF )
++ {
++ paging_unlock(d);
++ return -EBUSY;
++ }
+
+ l4 = map_domain_page(mfn_x(d->arch.paging.log_dirty.top));
++ i4 = d->arch.paging.preempt.log_dirty.i4;
++ i3 = d->arch.paging.preempt.log_dirty.i3;
++ rc = 0;
+
+- for ( i4 = 0; i4 < LOGDIRTY_NODE_ENTRIES; i4++ )
++ for ( ; i4 < LOGDIRTY_NODE_ENTRIES; i4++, i3 = 0 )
+ {
+ if ( !mfn_valid(l4[i4]) )
+ continue;
+
+ l3 = map_domain_page(mfn_x(l4[i4]));
+
+- for ( i3 = 0; i3 < LOGDIRTY_NODE_ENTRIES; i3++ )
++ for ( ; i3 < LOGDIRTY_NODE_ENTRIES; i3++ )
+ {
+ if ( !mfn_valid(l3[i3]) )
+ continue;
+@@ -148,20 +169,54 @@ void paging_free_log_dirty_bitmap(struct
+
+ unmap_domain_page(l2);
+ paging_free_log_dirty_page(d, l3[i3]);
++ l3[i3] = _mfn(INVALID_MFN);
++
++ if ( i3 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() )
++ {
++ d->arch.paging.preempt.log_dirty.i3 = i3 + 1;
++ d->arch.paging.preempt.log_dirty.i4 = i4;
++ rc = -EAGAIN;
++ break;
++ }
+ }
+
+ unmap_domain_page(l3);
++ if ( rc )
++ break;
+ paging_free_log_dirty_page(d, l4[i4]);
++ l4[i4] = _mfn(INVALID_MFN);
++
++ if ( i4 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() )
++ {
++ d->arch.paging.preempt.log_dirty.i3 = 0;
++ d->arch.paging.preempt.log_dirty.i4 = i4 + 1;
++ rc = -EAGAIN;
++ break;
++ }
+ }
+
+ unmap_domain_page(l4);
+- paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top);
+- d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
+
+- ASSERT(d->arch.paging.log_dirty.allocs == 0);
+- d->arch.paging.log_dirty.failed_allocs = 0;
++ if ( !rc )
++ {
++ paging_free_log_dirty_page(d, d->arch.paging.log_dirty.top);
++ d->arch.paging.log_dirty.top = _mfn(INVALID_MFN);
++
++ ASSERT(d->arch.paging.log_dirty.allocs == 0);
++ d->arch.paging.log_dirty.failed_allocs = 0;
++
++ rc = -d->arch.paging.preempt.log_dirty.done;
++ d->arch.paging.preempt.vcpu = NULL;
++ }
++ else
++ {
++ d->arch.paging.preempt.vcpu = current;
++ d->arch.paging.preempt.op = XEN_DOMCTL_SHADOW_OP_OFF;
++ }
+
+ paging_unlock(d);
++
++ return rc;
+ }
+
+ int paging_log_dirty_enable(struct domain *d)
+@@ -178,15 +233,25 @@ int paging_log_dirty_enable(struct domai
+ return ret;
+ }
+
+-int paging_log_dirty_disable(struct domain *d)
++static int paging_log_dirty_disable(struct domain *d, bool_t resuming)
+ {
+- int ret;
++ int ret = 1;
++
++ if ( !resuming )
++ {
++ domain_pause(d);
++ /* Safe because the domain is paused. */
++ ret = d->arch.paging.log_dirty.disable_log_dirty(d);
++ ASSERT(ret <= 0);
++ }
+
+- domain_pause(d);
+- /* Safe because the domain is paused. */
+- ret = d->arch.paging.log_dirty.disable_log_dirty(d);
+ if ( !paging_mode_log_dirty(d) )
+- paging_free_log_dirty_bitmap(d);
++ {
++ ret = paging_free_log_dirty_bitmap(d, ret);
++ if ( ret == -EAGAIN )
++ return ret;
++ }
++
+ domain_unpause(d);
+
+ return ret;
+@@ -326,7 +391,9 @@ int paging_mfn_is_dirty(struct domain *d
+
+ /* Read a domain's log-dirty bitmap and stats. If the operation is a CLEAN,
+ * clear the bitmap and stats as well. */
+-int paging_log_dirty_op(struct domain *d, struct xen_domctl_shadow_op *sc)
++static int paging_log_dirty_op(struct domain *d,
++ struct xen_domctl_shadow_op *sc,
++ bool_t resuming)
+ {
+ int rv = 0, clean = 0, peek = 1;
+ unsigned long pages = 0;
+@@ -334,9 +401,22 @@ int paging_log_dirty_op(struct domain *d
+ unsigned long *l1 = NULL;
+ int i4, i3, i2;
+
+- domain_pause(d);
++ if ( !resuming )
++ domain_pause(d);
+ paging_lock(d);
+
++ if ( !d->arch.paging.preempt.vcpu )
++ memset(&d->arch.paging.preempt.log_dirty, 0,
++ sizeof(d->arch.paging.preempt.log_dirty));
++ else if ( d->arch.paging.preempt.vcpu != current ||
++ d->arch.paging.preempt.op != sc->op )
++ {
++ paging_unlock(d);
++ ASSERT(!resuming);
++ domain_unpause(d);
++ return -EBUSY;
++ }
++
+ clean = (sc->op == XEN_DOMCTL_SHADOW_OP_CLEAN);
+
+ PAGING_DEBUG(LOGDIRTY, "log-dirty %s: dom %u faults=%u dirty=%u\n",
+@@ -365,17 +445,15 @@ int paging_log_dirty_op(struct domain *d
+ goto out;
+ }
+
+- pages = 0;
+ l4 = paging_map_log_dirty_bitmap(d);
++ i4 = d->arch.paging.preempt.log_dirty.i4;
++ i3 = d->arch.paging.preempt.log_dirty.i3;
++ pages = d->arch.paging.preempt.log_dirty.done;
+
+- for ( i4 = 0;
+- (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES);
+- i4++ )
++ for ( ; (pages < sc->pages) && (i4 < LOGDIRTY_NODE_ENTRIES); i4++, i3 = 0 )
+ {
+ l3 = (l4 && mfn_valid(l4[i4])) ? map_domain_page(mfn_x(l4[i4])) : NULL;
+- for ( i3 = 0;
+- (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES);
+- i3++ )
++ for ( ; (pages < sc->pages) && (i3 < LOGDIRTY_NODE_ENTRIES); i3++ )
+ {
+ l2 = ((l3 && mfn_valid(l3[i3])) ?
+ map_domain_page(mfn_x(l3[i3])) : NULL);
+@@ -410,18 +488,51 @@ int paging_log_dirty_op(struct domain *d
+ }
+ if ( l2 )
+ unmap_domain_page(l2);
++
++ if ( i3 < LOGDIRTY_NODE_ENTRIES - 1 && hypercall_preempt_check() )
++ {
++ d->arch.paging.preempt.log_dirty.i4 = i4;
++ d->arch.paging.preempt.log_dirty.i3 = i3 + 1;
++ rv = -EAGAIN;
++ break;
++ }
+ }
+ if ( l3 )
+ unmap_domain_page(l3);
++
++ if ( !rv && i4 < LOGDIRTY_NODE_ENTRIES - 1 &&
++ hypercall_preempt_check() )
++ {
++ d->arch.paging.preempt.log_dirty.i4 = i4 + 1;
++ d->arch.paging.preempt.log_dirty.i3 = 0;
++ rv = -EAGAIN;
++ }
++ if ( rv )
++ break;
+ }
+ if ( l4 )
+ unmap_domain_page(l4);
+
+- if ( pages < sc->pages )
+- sc->pages = pages;
++ if ( !rv )
++ d->arch.paging.preempt.vcpu = NULL;
++ else
++ {
++ d->arch.paging.preempt.vcpu = current;
++ d->arch.paging.preempt.op = sc->op;
++ d->arch.paging.preempt.log_dirty.done = pages;
++ }
+
+ paging_unlock(d);
+
++ if ( rv )
++ {
++ /* Never leave the domain paused for other errors. */
++ ASSERT(rv == -EAGAIN);
++ return rv;
++ }
++
++ if ( pages < sc->pages )
++ sc->pages = pages;
+ if ( clean )
+ {
+ /* We need to further call clean_dirty_bitmap() functions of specific
+@@ -432,6 +543,7 @@ int paging_log_dirty_op(struct domain *d
+ return rv;
+
+ out:
++ d->arch.paging.preempt.vcpu = NULL;
+ paging_unlock(d);
+ domain_unpause(d);
+
+@@ -498,12 +610,6 @@ void paging_log_dirty_init(struct domain
+ d->arch.paging.log_dirty.clean_dirty_bitmap = clean_dirty_bitmap;
+ }
+
+-/* This function fress log dirty bitmap resources. */
+-static void paging_log_dirty_teardown(struct domain*d)
+-{
+- paging_free_log_dirty_bitmap(d);
+-}
+-
+ /************************************************/
+ /* CODE FOR PAGING SUPPORT */
+ /************************************************/
+@@ -547,6 +653,7 @@ void paging_vcpu_init(struct vcpu *v)
+ int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
+ XEN_GUEST_HANDLE_PARAM(void) u_domctl)
+ {
++ bool_t resuming = 0;
+ int rc;
+
+ if ( unlikely(d == current->domain) )
+@@ -569,6 +676,20 @@ int paging_domctl(struct domain *d, xen_
+ return -EINVAL;
+ }
+
++ if ( d->arch.paging.preempt.vcpu )
++ {
++ if ( d->arch.paging.preempt.vcpu != current ||
++ d->arch.paging.preempt.op != sc->op )
++ {
++ printk(XENLOG_G_DEBUG
++ "d%d:v%d: Paging op %#x on Dom%u with unfinished prior op %#x\n",
++ current->domain->domain_id, current->vcpu_id,
++ sc->op, d->domain_id, d->arch.paging.preempt.op);
++ return -EBUSY;
++ }
++ resuming = 1;
++ }
++
+ rc = xsm_shadow_control(XSM_HOOK, d, sc->op);
+ if ( rc )
+ return rc;
+@@ -594,13 +714,13 @@ int paging_domctl(struct domain *d, xen_
+
+ case XEN_DOMCTL_SHADOW_OP_OFF:
+ if ( paging_mode_log_dirty(d) )
+- if ( (rc = paging_log_dirty_disable(d)) != 0 )
++ if ( (rc = paging_log_dirty_disable(d, resuming)) != 0 )
+ return rc;
+ break;
+
+ case XEN_DOMCTL_SHADOW_OP_CLEAN:
+ case XEN_DOMCTL_SHADOW_OP_PEEK:
+- return paging_log_dirty_op(d, sc);
++ return paging_log_dirty_op(d, sc, resuming);
+ }
+
+ /* Here, dispatch domctl to the appropriate paging code */
+@@ -611,18 +731,24 @@ int paging_domctl(struct domain *d, xen_
+ }
+
+ /* Call when destroying a domain */
+-void paging_teardown(struct domain *d)
++int paging_teardown(struct domain *d)
+ {
++ int rc;
++
+ if ( hap_enabled(d) )
+ hap_teardown(d);
+ else
+ shadow_teardown(d);
+
+ /* clean up log dirty resources. */
+- paging_log_dirty_teardown(d);
++ rc = paging_free_log_dirty_bitmap(d, 0);
++ if ( rc == -EAGAIN )
++ return rc;
+
+ /* Move populate-on-demand cache back to domain_list for destruction */
+ p2m_pod_empty_cache(d);
++
++ return rc;
+ }
+
+ /* Call once all of the references to the domain have gone away */
+--- a/xen/arch/x86/mm/shadow/common.c
++++ b/xen/arch/x86/mm/shadow/common.c
+@@ -3706,8 +3706,7 @@ int shadow_domctl(struct domain *d,
+ paging_unlock(d);
+ if ( preempted )
+ /* Not finished. Set up to re-run the call. */
+- rc = hypercall_create_continuation(
+- __HYPERVISOR_domctl, "h", u_domctl);
++ rc = -EAGAIN;
+ else
+ /* Finished. Return the new allocation */
+ sc->mb = shadow_get_allocation(d);
+--- a/xen/common/domain.c
++++ b/xen/common/domain.c
+@@ -527,7 +527,6 @@ int domain_kill(struct domain *d)
+ rc = domain_relinquish_resources(d);
+ if ( rc != 0 )
+ {
+- BUG_ON(rc != -EAGAIN);
+ break;
+ }
+ if ( sched_move_domain(d, cpupool0) )
+--- a/xen/include/asm-x86/domain.h
++++ b/xen/include/asm-x86/domain.h
+@@ -186,6 +186,20 @@ struct paging_domain {
+ struct hap_domain hap;
+ /* log dirty support */
+ struct log_dirty_domain log_dirty;
++
++ /* preemption handling */
++ struct {
++ struct vcpu *vcpu;
++ unsigned int op;
++ union {
++ struct {
++ unsigned long done:PADDR_BITS - PAGE_SHIFT;
++ unsigned long i4:PAGETABLE_ORDER;
++ unsigned long i3:PAGETABLE_ORDER;
++ } log_dirty;
++ };
++ } preempt;
++
+ /* alloc/free pages from the pool for paging-assistance structures
+ * (used by p2m and log-dirty code for their tries) */
+ struct page_info * (*alloc_page)(struct domain *d);
+--- a/xen/include/asm-x86/paging.h
++++ b/xen/include/asm-x86/paging.h
+@@ -133,9 +133,6 @@ struct paging_mode {
+ /*****************************************************************************
+ * Log dirty code */
+
+-/* free log dirty bitmap resource */
+-void paging_free_log_dirty_bitmap(struct domain *d);
+-
+ /* get the dirty bitmap for a specific range of pfns */
+ void paging_log_dirty_range(struct domain *d,
+ unsigned long begin_pfn,
+@@ -145,9 +142,6 @@ void paging_log_dirty_range(struct domai
+ /* enable log dirty */
+ int paging_log_dirty_enable(struct domain *d);
+
+-/* disable log dirty */
+-int paging_log_dirty_disable(struct domain *d);
+-
+ /* log dirty initialization */
+ void paging_log_dirty_init(struct domain *d,
+ int (*enable_log_dirty)(struct domain *d),
+@@ -206,7 +200,7 @@ int paging_domctl(struct domain *d, xen_
+ XEN_GUEST_HANDLE_PARAM(void) u_domctl);
+
+ /* Call when destroying a domain */
+-void paging_teardown(struct domain *d);
++int paging_teardown(struct domain *d);
+
+ /* Call once all of the references to the domain have gone away */
+ void paging_final_teardown(struct domain *d);