aboutsummaryrefslogtreecommitdiffstats
path: root/main/xen
diff options
context:
space:
mode:
authorNatanael Copa <ncopa@alpinelinux.org>2015-01-23 09:06:40 +0000
committerNatanael Copa <ncopa@alpinelinux.org>2015-01-23 09:11:02 +0000
commit621b3e6ae3cef5a89353cb0868372c2b94ffa454 (patch)
treecac7bb05c5a82d191af7795cb9cc97c6a65a71b3 /main/xen
parentbc7a651405864891312f3556d8f87c6bcb822c7b (diff)
downloadaports-621b3e6ae3cef5a89353cb0868372c2b94ffa454.tar.bz2
aports-621b3e6ae3cef5a89353cb0868372c2b94ffa454.tar.xz
main/xen: various sec fixes (xsa109 - xsa116)
ref #3704 XSA-116 CVE-2015-0361 xen crash due to use after free on hvm guest teardown XSA-114 CVE-2014-9065 CVE-2014-9066 p2m lock starvation XSA-113 CVE-2014-9030 Guest effectable page reference leak in MMU_MACHPHYS_UPDATE handling XSA-112 CVE-2014-8867 Insufficient bounding of "REP MOVS" to MMIO emulated inside the hypervisor XSA-111 CVE-2014-8866 Excessive checking in compatibility mode hypercall argument translation XSA-110 CVE-2014-8595 Missing privilege level checks in x86 emulation of far branches XSA-109 CVE-2014-8594 Insufficient restrictions on certain MMU update hypercalls
Diffstat (limited to 'main/xen')
-rw-r--r--main/xen/APKBUILD32
-rw-r--r--main/xen/xsa109.patch25
-rw-r--r--main/xen/xsa110.patch156
-rw-r--r--main/xen/xsa111.patch122
-rw-r--r--main/xen/xsa112.patch88
-rw-r--r--main/xen/xsa113.patch45
-rw-r--r--main/xen/xsa114-4.4.patch498
-rw-r--r--main/xen/xsa116.patch33
8 files changed, 997 insertions, 2 deletions
diff --git a/main/xen/APKBUILD b/main/xen/APKBUILD
index 7a95eead2c..47d341ef59 100644
--- a/main/xen/APKBUILD
+++ b/main/xen/APKBUILD
@@ -3,7 +3,7 @@
# Maintainer: William Pitcock <nenolod@dereferenced.org>
pkgname=xen
pkgver=4.4.1
-pkgrel=6
+pkgrel=7
pkgdesc="Xen hypervisor"
url="http://www.xen.org/"
arch="x86_64"
@@ -12,13 +12,20 @@ depends="syslinux bash iproute2 logrotate"
depends_dev="openssl-dev python-dev e2fsprogs-dev gettext zlib-dev ncurses-dev
libiconv-dev dev86 texinfo perl iasl pciutils-dev glib-dev yajl-dev
spice-dev gnutls-dev curl-dev libaio-dev lzo-dev xz-dev util-linux-dev
- e2fsprogs-dev"
+ e2fsprogs-dev linux-headers"
makedepends="$depends_dev autoconf automake libtool"
install=""
subpackages="$pkgname-doc $pkgname-dev $pkgname-libs $pkgname-hypervisor
py-$pkgname:_py"
source="http://bits.xensource.com/oss-xen/release/$pkgver/$pkgname-$pkgver.tar.gz
xsa108.patch
+ xsa109.patch
+ xsa110.patch
+ xsa111.patch
+ xsa112.patch
+ xsa113.patch
+ xsa114-4.4.patch
+ xsa116.patch
qemu-coroutine-gthread.patch
qemu-xen-musl-openpty.patch
@@ -214,6 +221,13 @@ _py() {
md5sums="1868433f393503000877d15cd4e93d15 xen-4.4.1.tar.gz
1f66f6c52941309c825f60e1bf144987 xsa108.patch
+c8bd3d689db98dddee53f6ad97010ada xsa109.patch
+e8d2d4ca0c48570c8b0da7505e2e0d34 xsa110.patch
+887c8ca8b57d07e22dfd18890493dd47 xsa111.patch
+e0f14ff509c91b324e367ee35f024b85 xsa112.patch
+8c802cd95e29ecb085a8c436d3539c36 xsa113.patch
+7566238066a655770dfba9fe30e3a347 xsa114-4.4.patch
+6f0ed43665d54dada7a8ff10ec53563c xsa116.patch
de1a3db370b87cfb0bddb51796b50315 qemu-coroutine-gthread.patch
dd8603eaab5857816843bfc37647d569 qemu-xen-musl-openpty.patch
c4d2d95ae3e5f538b7145becb3c6098e qemu-xen_paths.patch
@@ -237,6 +251,13 @@ dcdd1de2c29e469e834a02ede4f47806 xendomains.confd
f9afbf39e2b5a7d9dde60ebbd249ea7d xenqemu.initd"
sha256sums="55b49d3c4575d7791275125ff87c0f86f1d1e0f7f2718b6fd1c4f88a9bc7ea25 xen-4.4.1.tar.gz
cf7ecf4b4680c09e8b1f03980d8350a0e1e7eb03060031788f972e0d4d47203e xsa108.patch
+729b87c2b9979fbda47c96e934db6fcfaeb10e07b4cfd66bb1e9f746a908576b xsa109.patch
+eac4691848dcd093903e0a0f5fd7ab15be15d0f10b98575379911e91e5dcbd70 xsa110.patch
+3c418f065cd452c225af34c3cccf9bdbc37efb6c6a5fc5940fd83ad8620510d3 xsa111.patch
+cc39a4cdcb52929ed36ab696807d2405aa552177a6f029d8a1a52041ca1ed519 xsa112.patch
+a0f2b792a6b4648151f85fe13961b0bf309a568ed03e1b1d4ea01e4eabf1b18e xsa113.patch
+b35ed8710693163cc33772c36e4c17dc76e25a0b2025fff4a5aa3b46c459938a xsa114-4.4.patch
+84b5a7bb2386e3d95d9d836a4a2504870723694ddaf537f1b59db75b7c63e9bd xsa116.patch
3941f99b49c7e8dafc9fae8aad2136a14c6d84533cd542cc5f1040a41ef7c6fe qemu-coroutine-gthread.patch
fe76c7c8faf686060b20491bfed4a13ce37b1bc3dcdbf33d242e388cee14c7c1 qemu-xen-musl-openpty.patch
a6ccc0ed0dab8465188f92ceb3c983f10d65cd93bb2c8bab4e4155ef13536f5d qemu-xen_paths.patch
@@ -260,6 +281,13 @@ a3ab3779c589e1a24a38a802afc238cb451f6a3779a7d6041b3f1517027ea3da xendomains.ini
bf17808a79c57a9efc38b9f14cc87f556b2bb7ecfdec5763d9cf686255a47fce xenqemu.initd"
sha512sums="bcd577014f4e8cb37b934f17a4dfb6f12e72e865a9e553cc435fdbe6665c733a4d20a812bf126727eca0946188a6abbd9419579757d7e03a38059f3656371c1c xen-4.4.1.tar.gz
f511a13ee4223ea2fa9d109fea1802b462f178d3be7de630aeba6eb40ef5d17c7db9d3b99ea414c5794d92d181a60c0bd2061f51987c6deb3a9071f5626fd049 xsa108.patch
+0b3675cfdd7f026802b7ff6c604f22c785ddbaad09faa48782d21526aaecb8d7a9ff30e908eec847c9bd5fea83d80e5903202368a748134732c9b9ecda003416 xsa109.patch
+0975ea1abf34d8d75396bed2ba65f9b146f8b6cace2e367772226d65f8fea9af57e965684c25f79db934467a99397073685f99b1435d7ce113ecfc6241cb20f3 xsa110.patch
+bfb12408bf9f5449fbe11bb24f1f4b13c6dafbc755b6696658235ad2387933bc449e2012c217a5fbb937cc2d0ef2895f52a201646ff1a8b61360199c1456d821 xsa111.patch
+d9d08039c0127007ea0db792d2b1375ac9f94d91982324cc945afd97dd3d14049195f5dceea37969442e36e49fa008053e75255dd4cbffc7d7fd265080f6ca4d xsa112.patch
+be8223e778eb529d10a752f507c0dfaef0a607191924b400979dc5fd4c1f2806e39ec49c84fb299d5d06505ffe2d4b4268551db6e909a2520f70f70bb40bb3cb xsa113.patch
+c8ed45c7a6bb9bc7cfe08aae06e36c6a88ce79c3c33ad6f707fea88b5fb70e9eb1c1ee98534b7e49ca6e52cdea56b0893d6c839874057b05ae815c2c94b7ce8f xsa114-4.4.patch
+8d0d6b01e4836195f0c57c5102fdb933c742cb941e0c565adff0dfd4ad660618dfd6b5c2c7bf367e73645a560d097d2677511d52399d19100e26e55d25aaacd1 xsa116.patch
c3c46f232f0bd9f767b232af7e8ce910a6166b126bd5427bb8dc325aeb2c634b956de3fc225cab5af72649070c8205cc8e1cab7689fc266c204f525086f1a562 qemu-coroutine-gthread.patch
a8b7378516172389450834985e8558d7a86d7cd808154bdc846bb98325e40fc4e87b1fc6d725297f4bef6eb54ebcbcbfa4d9d0363d83f635755795fb0726e006 qemu-xen-musl-openpty.patch
1f19cf495142dfc9f1786af6d4f7d497a482119fa2f1c10d4f9174994d38562719bc5190820dd444c32da0fb9af78fadac8dc8958437c26d6ca385f2409794e8 qemu-xen_paths.patch
diff --git a/main/xen/xsa109.patch b/main/xen/xsa109.patch
new file mode 100644
index 0000000000..0028b2fdc0
--- /dev/null
+++ b/main/xen/xsa109.patch
@@ -0,0 +1,25 @@
+x86: don't allow page table updates on non-PV page tables in do_mmu_update()
+
+paging_write_guest_entry() and paging_cmpxchg_guest_entry() aren't
+consistently supported for non-PV guests (they'd deref NULL for PVH or
+non-HAP HVM ones). Don't allow respective MMU_* operations on the
+page tables of such domains.
+
+This is XSA-109.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Acked-by: Tim Deegan <tim@xen.org>
+
+--- a/xen/arch/x86/mm.c
++++ b/xen/arch/x86/mm.c
+@@ -3493,6 +3493,10 @@ long do_mmu_update(
+ {
+ p2m_type_t p2mt;
+
++ rc = -EOPNOTSUPP;
++ if ( unlikely(paging_mode_refcounts(pt_owner)) )
++ break;
++
+ xsm_needed |= XSM_MMU_NORMAL_UPDATE;
+ if ( get_pte_flags(req.val) & _PAGE_PRESENT )
+ {
diff --git a/main/xen/xsa110.patch b/main/xen/xsa110.patch
new file mode 100644
index 0000000000..3e7479ffd5
--- /dev/null
+++ b/main/xen/xsa110.patch
@@ -0,0 +1,156 @@
+x86emul: enforce privilege level restrictions when loading CS
+
+Privilege level checks were basically missing for the CS case, the
+only check that was done (RPL == DPL for nonconforming segments)
+was solely covering a single special case (return to non-conforming
+segment).
+
+Additionally in long mode the L bit set requires the D bit to be clear,
+as was recently pointed out for KVM by Nadav Amit
+<namit@cs.technion.ac.il>.
+
+Finally we also need to force the loaded selector's RPL to CPL (at
+least as long as lret/retf emulation doesn't support privilege level
+changes).
+
+This is XSA-110.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- a/xen/arch/x86/x86_emulate/x86_emulate.c
++++ b/xen/arch/x86/x86_emulate/x86_emulate.c
+@@ -1119,7 +1119,7 @@ realmode_load_seg(
+ static int
+ protmode_load_seg(
+ enum x86_segment seg,
+- uint16_t sel,
++ uint16_t sel, bool_t is_ret,
+ struct x86_emulate_ctxt *ctxt,
+ const struct x86_emulate_ops *ops)
+ {
+@@ -1185,9 +1185,23 @@ protmode_load_seg(
+ /* Code segment? */
+ if ( !(desc.b & (1u<<11)) )
+ goto raise_exn;
+- /* Non-conforming segment: check DPL against RPL. */
+- if ( ((desc.b & (6u<<9)) != (6u<<9)) && (dpl != rpl) )
++ if ( is_ret
++ ? /*
++ * Really rpl < cpl, but our sole caller doesn't handle
++ * privilege level changes.
++ */
++ rpl != cpl || (desc.b & (1 << 10) ? dpl > rpl : dpl != rpl)
++ : desc.b & (1 << 10)
++ /* Conforming segment: check DPL against CPL. */
++ ? dpl > cpl
++ /* Non-conforming segment: check RPL and DPL against CPL. */
++ : rpl > cpl || dpl != cpl )
+ goto raise_exn;
++ /* 64-bit code segments (L bit set) must have D bit clear. */
++ if ( in_longmode(ctxt, ops) &&
++ (desc.b & (1 << 21)) && (desc.b & (1 << 22)) )
++ goto raise_exn;
++ sel = (sel ^ rpl) | cpl;
+ break;
+ case x86_seg_ss:
+ /* Writable data segment? */
+@@ -1252,7 +1266,7 @@ protmode_load_seg(
+ static int
+ load_seg(
+ enum x86_segment seg,
+- uint16_t sel,
++ uint16_t sel, bool_t is_ret,
+ struct x86_emulate_ctxt *ctxt,
+ const struct x86_emulate_ops *ops)
+ {
+@@ -1261,7 +1275,7 @@ load_seg(
+ return X86EMUL_UNHANDLEABLE;
+
+ if ( in_protmode(ctxt, ops) )
+- return protmode_load_seg(seg, sel, ctxt, ops);
++ return protmode_load_seg(seg, sel, is_ret, ctxt, ops);
+
+ return realmode_load_seg(seg, sel, ctxt, ops);
+ }
+@@ -2003,7 +2017,7 @@ x86_emulate(
+ if ( (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes),
+ &dst.val, op_bytes, ctxt, ops)) != 0 )
+ goto done;
+- if ( (rc = load_seg(src.val, (uint16_t)dst.val, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(src.val, dst.val, 0, ctxt, ops)) != 0 )
+ return rc;
+ break;
+
+@@ -2357,7 +2371,7 @@ x86_emulate(
+ enum x86_segment seg = decode_segment(modrm_reg);
+ generate_exception_if(seg == decode_segment_failed, EXC_UD, -1);
+ generate_exception_if(seg == x86_seg_cs, EXC_UD, -1);
+- if ( (rc = load_seg(seg, (uint16_t)src.val, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(seg, src.val, 0, ctxt, ops)) != 0 )
+ goto done;
+ if ( seg == x86_seg_ss )
+ ctxt->retire.flags.mov_ss = 1;
+@@ -2438,7 +2452,7 @@ x86_emulate(
+ &_regs.eip, op_bytes, ctxt)) )
+ goto done;
+
+- if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(x86_seg_cs, sel, 0, ctxt, ops)) != 0 )
+ goto done;
+ _regs.eip = eip;
+ break;
+@@ -2662,7 +2676,7 @@ x86_emulate(
+ if ( (rc = read_ulong(src.mem.seg, src.mem.off + src.bytes,
+ &sel, 2, ctxt, ops)) != 0 )
+ goto done;
+- if ( (rc = load_seg(dst.val, (uint16_t)sel, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(dst.val, sel, 0, ctxt, ops)) != 0 )
+ goto done;
+ dst.val = src.val;
+ break;
+@@ -2736,7 +2750,7 @@ x86_emulate(
+ &dst.val, op_bytes, ctxt, ops)) ||
+ (rc = read_ulong(x86_seg_ss, sp_post_inc(op_bytes + offset),
+ &src.val, op_bytes, ctxt, ops)) ||
+- (rc = load_seg(x86_seg_cs, (uint16_t)src.val, ctxt, ops)) )
++ (rc = load_seg(x86_seg_cs, src.val, 1, ctxt, ops)) )
+ goto done;
+ _regs.eip = dst.val;
+ break;
+@@ -2785,7 +2799,7 @@ x86_emulate(
+ _regs.eflags &= mask;
+ _regs.eflags |= (uint32_t)(eflags & ~mask) | 0x02;
+ _regs.eip = eip;
+- if ( (rc = load_seg(x86_seg_cs, (uint16_t)cs, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(x86_seg_cs, cs, 1, ctxt, ops)) != 0 )
+ goto done;
+ break;
+ }
+@@ -3415,7 +3429,7 @@ x86_emulate(
+ generate_exception_if(mode_64bit(), EXC_UD, -1);
+ eip = insn_fetch_bytes(op_bytes);
+ sel = insn_fetch_type(uint16_t);
+- if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(x86_seg_cs, sel, 0, ctxt, ops)) != 0 )
+ goto done;
+ _regs.eip = eip;
+ break;
+@@ -3714,7 +3728,7 @@ x86_emulate(
+ goto done;
+ }
+
+- if ( (rc = load_seg(x86_seg_cs, sel, ctxt, ops)) != 0 )
++ if ( (rc = load_seg(x86_seg_cs, sel, 0, ctxt, ops)) != 0 )
+ goto done;
+ _regs.eip = src.val;
+
+@@ -3781,7 +3795,7 @@ x86_emulate(
+ generate_exception_if(!in_protmode(ctxt, ops), EXC_UD, -1);
+ generate_exception_if(!mode_ring0(), EXC_GP, 0);
+ if ( (rc = load_seg((modrm_reg & 1) ? x86_seg_tr : x86_seg_ldtr,
+- src.val, ctxt, ops)) != 0 )
++ src.val, 0, ctxt, ops)) != 0 )
+ goto done;
+ break;
+
diff --git a/main/xen/xsa111.patch b/main/xen/xsa111.patch
new file mode 100644
index 0000000000..1761316210
--- /dev/null
+++ b/main/xen/xsa111.patch
@@ -0,0 +1,122 @@
+x86: limit checks in hypercall_xlat_continuation() to actual arguments
+
+HVM/PVH guests can otherwise trigger the final BUG_ON() in that
+function by entering 64-bit mode, setting the high halves of affected
+registers to non-zero values, leaving 64-bit mode, and issuing a
+hypercall that might get preempted and hence become subject to
+continuation argument translation (HYPERVISOR_memory_op being the only
+one possible for HVM, PVH also having the option of using
+HYPERVISOR_mmuext_op). This issue got introduced when HVM code was
+switched to use compat_memory_op() - neither that nor
+hypercall_xlat_continuation() were originally intended to be used by
+other than PV guests (which can't enter 64-bit mode and hence have no
+way to alter the high halves of 64-bit registers).
+
+This is XSA-111.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- a/xen/arch/x86/domain.c
++++ b/xen/arch/x86/domain.c
+@@ -1750,7 +1750,8 @@ unsigned long hypercall_create_continuat
+ return op;
+ }
+
+-int hypercall_xlat_continuation(unsigned int *id, unsigned int mask, ...)
++int hypercall_xlat_continuation(unsigned int *id, unsigned int nr,
++ unsigned int mask, ...)
+ {
+ int rc = 0;
+ struct mc_state *mcs = &current->mc_state;
+@@ -1759,7 +1760,10 @@ int hypercall_xlat_continuation(unsigned
+ unsigned long nval = 0;
+ va_list args;
+
+- BUG_ON(id && *id > 5);
++ ASSERT(nr <= ARRAY_SIZE(mcs->call.args));
++ ASSERT(!(mask >> nr));
++
++ BUG_ON(id && *id >= nr);
+ BUG_ON(id && (mask & (1U << *id)));
+
+ va_start(args, mask);
+@@ -1772,7 +1776,7 @@ int hypercall_xlat_continuation(unsigned
+ return 0;
+ }
+
+- for ( i = 0; i < 6; ++i, mask >>= 1 )
++ for ( i = 0; i < nr; ++i, mask >>= 1 )
+ {
+ if ( mask & 1 )
+ {
+@@ -1800,7 +1804,7 @@ int hypercall_xlat_continuation(unsigned
+ else
+ {
+ regs = guest_cpu_user_regs();
+- for ( i = 0; i < 6; ++i, mask >>= 1 )
++ for ( i = 0; i < nr; ++i, mask >>= 1 )
+ {
+ unsigned long *reg;
+
+--- a/xen/arch/x86/x86_64/compat/mm.c
++++ b/xen/arch/x86/x86_64/compat/mm.c
+@@ -118,7 +118,7 @@ int compat_arch_memory_op(unsigned long
+ break;
+
+ if ( rc == __HYPERVISOR_memory_op )
+- hypercall_xlat_continuation(NULL, 0x2, nat, arg);
++ hypercall_xlat_continuation(NULL, 2, 0x2, nat, arg);
+
+ XLAT_pod_target(&cmp, nat);
+
+@@ -354,7 +354,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE_PA
+ left = 1;
+ if ( arg1 != MMU_UPDATE_PREEMPTED )
+ {
+- BUG_ON(!hypercall_xlat_continuation(&left, 0x01, nat_ops,
++ BUG_ON(!hypercall_xlat_continuation(&left, 4, 0x01, nat_ops,
+ cmp_uops));
+ if ( !test_bit(_MCSF_in_multicall, &mcs->flags) )
+ regs->_ecx += count - i;
+@@ -362,7 +362,7 @@ int compat_mmuext_op(XEN_GUEST_HANDLE_PA
+ mcs->compat_call.args[1] += count - i;
+ }
+ else
+- BUG_ON(hypercall_xlat_continuation(&left, 0));
++ BUG_ON(hypercall_xlat_continuation(&left, 4, 0));
+ BUG_ON(left != arg1);
+ }
+ else
+--- a/xen/common/compat/memory.c
++++ b/xen/common/compat/memory.c
+@@ -282,7 +282,7 @@ int compat_memory_op(unsigned int cmd, X
+ break;
+
+ cmd = 0;
+- if ( hypercall_xlat_continuation(&cmd, 0x02, nat.hnd, compat) )
++ if ( hypercall_xlat_continuation(&cmd, 2, 0x02, nat.hnd, compat) )
+ {
+ BUG_ON(rc != __HYPERVISOR_memory_op);
+ BUG_ON((cmd & MEMOP_CMD_MASK) != op);
+--- a/xen/include/xen/compat.h
++++ b/xen/include/xen/compat.h
+@@ -195,6 +195,8 @@ static inline int name(k xen_ ## n *x, k
+ * This option is useful for extracting the "op" argument or similar from the
+ * hypercall to enable further xlat processing.
+ *
++ * nr: Total number of arguments the hypercall has.
++ *
+ * mask: Specifies which of the hypercall arguments require compat translation.
+ * bit 0 indicates that the 0'th argument requires translation, bit 1 indicates
+ * that the first argument requires translation and so on. Native and compat
+@@ -214,7 +216,8 @@ static inline int name(k xen_ ## n *x, k
+ *
+ * Return: Number of arguments which were actually translated.
+ */
+-int hypercall_xlat_continuation(unsigned int *id, unsigned int mask, ...);
++int hypercall_xlat_continuation(unsigned int *id, unsigned int nr,
++ unsigned int mask, ...);
+
+ /* In-place translation functons: */
+ struct start_info;
diff --git a/main/xen/xsa112.patch b/main/xen/xsa112.patch
new file mode 100644
index 0000000000..a3da6267f8
--- /dev/null
+++ b/main/xen/xsa112.patch
@@ -0,0 +1,88 @@
+x86/HVM: confine internally handled MMIO to solitary regions
+
+While it is generally wrong to cross region boundaries when dealing
+with MMIO accesses of repeated string instructions (currently only
+MOVS) as that would do things a guest doesn't expect (leaving aside
+that none of these regions would normally be accessed with repeated
+string instructions in the first place), this is even more of a problem
+for all virtual MSI-X page accesses (both msixtbl_{read,write}() can be
+made dereference NULL "entry" pointers this way) as well as undersized
+(1- or 2-byte) LAPIC writes (causing vlapic_read_aligned() to access
+space beyond the one memory page set up for holding LAPIC register
+values).
+
+Since those functions validly assume to be called only with addresses
+their respective checking functions indicated to be okay, it is generic
+code that needs to be fixed to clip the repetition count.
+
+To be on the safe side (and consistent), also do the same for buffered
+I/O intercepts, even if their only client (stdvga) doesn't put the
+hypervisor at risk (i.e. "only" guest misbehavior would result).
+
+This is CVE-2014-8867 / XSA-112.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- a/xen/arch/x86/hvm/intercept.c
++++ b/xen/arch/x86/hvm/intercept.c
+@@ -181,11 +181,24 @@ int hvm_mmio_intercept(ioreq_t *p)
+ int i;
+
+ for ( i = 0; i < HVM_MMIO_HANDLER_NR; i++ )
+- if ( hvm_mmio_handlers[i]->check_handler(v, p->addr) )
++ {
++ hvm_mmio_check_t check_handler =
++ hvm_mmio_handlers[i]->check_handler;
++
++ if ( check_handler(v, p->addr) )
++ {
++ if ( unlikely(p->count > 1) &&
++ !check_handler(v, unlikely(p->df)
++ ? p->addr - (p->count - 1L) * p->size
++ : p->addr + (p->count - 1L) * p->size) )
++ p->count = 1;
++
+ return hvm_mmio_access(
+ v, p,
+ hvm_mmio_handlers[i]->read_handler,
+ hvm_mmio_handlers[i]->write_handler);
++ }
++ }
+
+ return X86EMUL_UNHANDLEABLE;
+ }
+@@ -342,6 +355,13 @@ int hvm_io_intercept(ioreq_t *p, int typ
+ if ( type == HVM_PORTIO )
+ return process_portio_intercept(
+ handler->hdl_list[i].action.portio, p);
++
++ if ( unlikely(p->count > 1) &&
++ (unlikely(p->df)
++ ? p->addr - (p->count - 1L) * p->size < addr
++ : p->addr + p->count * 1L * p->size - 1 >= addr + size) )
++ p->count = 1;
++
+ return handler->hdl_list[i].action.mmio(p);
+ }
+ }
+--- a/xen/arch/x86/hvm/vmsi.c
++++ b/xen/arch/x86/hvm/vmsi.c
+@@ -226,6 +226,8 @@ static int msixtbl_read(
+ rcu_read_lock(&msixtbl_rcu_lock);
+
+ entry = msixtbl_find_entry(v, address);
++ if ( !entry )
++ goto out;
+ offset = address & (PCI_MSIX_ENTRY_SIZE - 1);
+
+ if ( offset != PCI_MSIX_ENTRY_VECTOR_CTRL_OFFSET )
+@@ -268,6 +270,8 @@ static int msixtbl_write(struct vcpu *v,
+ rcu_read_lock(&msixtbl_rcu_lock);
+
+ entry = msixtbl_find_entry(v, address);
++ if ( !entry )
++ goto out;
+ nr_entry = (address - entry->gtable) / PCI_MSIX_ENTRY_SIZE;
+
+ offset = address & (PCI_MSIX_ENTRY_SIZE - 1);
diff --git a/main/xen/xsa113.patch b/main/xen/xsa113.patch
new file mode 100644
index 0000000000..adc8bba064
--- /dev/null
+++ b/main/xen/xsa113.patch
@@ -0,0 +1,45 @@
+x86/mm: fix a reference counting error in MMU_MACHPHYS_UPDATE
+
+Any domain which can pass the XSM check against a translated guest can cause a
+page reference to be leaked.
+
+While shuffling the order of checks, drop the quite-pointless MEM_LOG(). This
+brings the check in line with similar checks in the vicinity.
+
+Discovered while reviewing the XSA-109/110 followup series.
+
+This is XSA-113.
+
+Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Tim Deegan <tim@xen.org>
+
+--- a/xen/arch/x86/mm.c
++++ b/xen/arch/x86/mm.c
+@@ -3619,6 +3619,12 @@ long do_mmu_update(
+
+ case MMU_MACHPHYS_UPDATE:
+
++ if ( unlikely(paging_mode_translate(pg_owner)) )
++ {
++ rc = -EINVAL;
++ break;
++ }
++
+ mfn = req.ptr >> PAGE_SHIFT;
+ gpfn = req.val;
+
+@@ -3638,13 +3644,6 @@ long do_mmu_update(
+ break;
+ }
+
+- if ( unlikely(paging_mode_translate(pg_owner)) )
+- {
+- MEM_LOG("Mach-phys update on auto-translate guest");
+- rc = -EINVAL;
+- break;
+- }
+-
+ set_gpfn_from_mfn(mfn, gpfn);
+
+ paging_mark_dirty(pg_owner, mfn);
diff --git a/main/xen/xsa114-4.4.patch b/main/xen/xsa114-4.4.patch
new file mode 100644
index 0000000000..a640747031
--- /dev/null
+++ b/main/xen/xsa114-4.4.patch
@@ -0,0 +1,498 @@
+switch to write-biased r/w locks
+
+This is to improve fairness: A permanent flow of read acquires can
+otherwise lock out eventual writers indefinitely.
+
+This is XSA-114 / CVE-2014-9065.
+
+Signed-off-by: Keir Fraser <keir@xen.org>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
+
+--- a/xen/common/spinlock.c
++++ b/xen/common/spinlock.c
+@@ -271,112 +271,151 @@ void _spin_unlock_recursive(spinlock_t *
+
+ void _read_lock(rwlock_t *lock)
+ {
++ uint32_t x;
++
+ check_lock(&lock->debug);
+- while ( unlikely(!_raw_read_trylock(&lock->raw)) )
+- {
+- while ( likely(_raw_rw_is_write_locked(&lock->raw)) )
++ do {
++ while ( (x = lock->lock) & RW_WRITE_FLAG )
+ cpu_relax();
+- }
++ } while ( cmpxchg(&lock->lock, x, x+1) != x );
+ preempt_disable();
+ }
+
+ void _read_lock_irq(rwlock_t *lock)
+ {
++ uint32_t x;
++
+ ASSERT(local_irq_is_enabled());
+ local_irq_disable();
+ check_lock(&lock->debug);
+- while ( unlikely(!_raw_read_trylock(&lock->raw)) )
+- {
+- local_irq_enable();
+- while ( likely(_raw_rw_is_write_locked(&lock->raw)) )
+- cpu_relax();
+- local_irq_disable();
+- }
++ do {
++ if ( (x = lock->lock) & RW_WRITE_FLAG )
++ {
++ local_irq_enable();
++ while ( (x = lock->lock) & RW_WRITE_FLAG )
++ cpu_relax();
++ local_irq_disable();
++ }
++ } while ( cmpxchg(&lock->lock, x, x+1) != x );
+ preempt_disable();
+ }
+
+ unsigned long _read_lock_irqsave(rwlock_t *lock)
+ {
++ uint32_t x;
+ unsigned long flags;
++
+ local_irq_save(flags);
+ check_lock(&lock->debug);
+- while ( unlikely(!_raw_read_trylock(&lock->raw)) )
+- {
+- local_irq_restore(flags);
+- while ( likely(_raw_rw_is_write_locked(&lock->raw)) )
+- cpu_relax();
+- local_irq_save(flags);
+- }
++ do {
++ if ( (x = lock->lock) & RW_WRITE_FLAG )
++ {
++ local_irq_restore(flags);
++ while ( (x = lock->lock) & RW_WRITE_FLAG )
++ cpu_relax();
++ local_irq_save(flags);
++ }
++ } while ( cmpxchg(&lock->lock, x, x+1) != x );
+ preempt_disable();
+ return flags;
+ }
+
+ int _read_trylock(rwlock_t *lock)
+ {
++ uint32_t x;
++
+ check_lock(&lock->debug);
+- if ( !_raw_read_trylock(&lock->raw) )
+- return 0;
++ do {
++ if ( (x = lock->lock) & RW_WRITE_FLAG )
++ return 0;
++ } while ( cmpxchg(&lock->lock, x, x+1) != x );
+ preempt_disable();
+ return 1;
+ }
+
+ void _read_unlock(rwlock_t *lock)
+ {
++ uint32_t x, y;
++
+ preempt_enable();
+- _raw_read_unlock(&lock->raw);
++ x = lock->lock;
++ while ( (y = cmpxchg(&lock->lock, x, x-1)) != x )
++ x = y;
+ }
+
+ void _read_unlock_irq(rwlock_t *lock)
+ {
+- preempt_enable();
+- _raw_read_unlock(&lock->raw);
++ _read_unlock(lock);
+ local_irq_enable();
+ }
+
+ void _read_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ {
+- preempt_enable();
+- _raw_read_unlock(&lock->raw);
++ _read_unlock(lock);
+ local_irq_restore(flags);
+ }
+
+ void _write_lock(rwlock_t *lock)
+ {
++ uint32_t x;
++
+ check_lock(&lock->debug);
+- while ( unlikely(!_raw_write_trylock(&lock->raw)) )
+- {
+- while ( likely(_raw_rw_is_locked(&lock->raw)) )
++ do {
++ while ( (x = lock->lock) & RW_WRITE_FLAG )
+ cpu_relax();
++ } while ( cmpxchg(&lock->lock, x, x|RW_WRITE_FLAG) != x );
++ while ( x != 0 )
++ {
++ cpu_relax();
++ x = lock->lock & ~RW_WRITE_FLAG;
+ }
+ preempt_disable();
+ }
+
+ void _write_lock_irq(rwlock_t *lock)
+ {
++ uint32_t x;
++
+ ASSERT(local_irq_is_enabled());
+ local_irq_disable();
+ check_lock(&lock->debug);
+- while ( unlikely(!_raw_write_trylock(&lock->raw)) )
++ do {
++ if ( (x = lock->lock) & RW_WRITE_FLAG )
++ {
++ local_irq_enable();
++ while ( (x = lock->lock) & RW_WRITE_FLAG )
++ cpu_relax();
++ local_irq_disable();
++ }
++ } while ( cmpxchg(&lock->lock, x, x|RW_WRITE_FLAG) != x );
++ while ( x != 0 )
+ {
+- local_irq_enable();
+- while ( likely(_raw_rw_is_locked(&lock->raw)) )
+- cpu_relax();
+- local_irq_disable();
++ cpu_relax();
++ x = lock->lock & ~RW_WRITE_FLAG;
+ }
+ preempt_disable();
+ }
+
+ unsigned long _write_lock_irqsave(rwlock_t *lock)
+ {
++ uint32_t x;
+ unsigned long flags;
++
+ local_irq_save(flags);
+ check_lock(&lock->debug);
+- while ( unlikely(!_raw_write_trylock(&lock->raw)) )
++ do {
++ if ( (x = lock->lock) & RW_WRITE_FLAG )
++ {
++ local_irq_restore(flags);
++ while ( (x = lock->lock) & RW_WRITE_FLAG )
++ cpu_relax();
++ local_irq_save(flags);
++ }
++ } while ( cmpxchg(&lock->lock, x, x|RW_WRITE_FLAG) != x );
++ while ( x != 0 )
+ {
+- local_irq_restore(flags);
+- while ( likely(_raw_rw_is_locked(&lock->raw)) )
+- cpu_relax();
+- local_irq_save(flags);
++ cpu_relax();
++ x = lock->lock & ~RW_WRITE_FLAG;
+ }
+ preempt_disable();
+ return flags;
+@@ -384,9 +423,13 @@ unsigned long _write_lock_irqsave(rwlock
+
+ int _write_trylock(rwlock_t *lock)
+ {
++ uint32_t x;
++
+ check_lock(&lock->debug);
+- if ( !_raw_write_trylock(&lock->raw) )
+- return 0;
++ do {
++ if ( (x = lock->lock) != 0 )
++ return 0;
++ } while ( cmpxchg(&lock->lock, x, x|RW_WRITE_FLAG) != x );
+ preempt_disable();
+ return 1;
+ }
+@@ -394,33 +437,32 @@ int _write_trylock(rwlock_t *lock)
+ void _write_unlock(rwlock_t *lock)
+ {
+ preempt_enable();
+- _raw_write_unlock(&lock->raw);
++ if ( cmpxchg(&lock->lock, RW_WRITE_FLAG, 0) != RW_WRITE_FLAG )
++ BUG();
+ }
+
+ void _write_unlock_irq(rwlock_t *lock)
+ {
+- preempt_enable();
+- _raw_write_unlock(&lock->raw);
++ _write_unlock(lock);
+ local_irq_enable();
+ }
+
+ void _write_unlock_irqrestore(rwlock_t *lock, unsigned long flags)
+ {
+- preempt_enable();
+- _raw_write_unlock(&lock->raw);
++ _write_unlock(lock);
+ local_irq_restore(flags);
+ }
+
+ int _rw_is_locked(rwlock_t *lock)
+ {
+ check_lock(&lock->debug);
+- return _raw_rw_is_locked(&lock->raw);
++ return (lock->lock != 0); /* anyone in critical section? */
+ }
+
+ int _rw_is_write_locked(rwlock_t *lock)
+ {
+ check_lock(&lock->debug);
+- return _raw_rw_is_write_locked(&lock->raw);
++ return (lock->lock == RW_WRITE_FLAG); /* writer in critical section? */
+ }
+
+ #ifdef LOCK_PROFILE
+--- a/xen/include/asm-arm/arm32/spinlock.h
++++ b/xen/include/asm-arm/arm32/spinlock.h
+@@ -55,84 +55,6 @@ static always_inline int _raw_spin_trylo
+ }
+ }
+
+-typedef struct {
+- volatile unsigned int lock;
+-} raw_rwlock_t;
+-
+-#define _RAW_RW_LOCK_UNLOCKED { 0 }
+-
+-static always_inline int _raw_read_trylock(raw_rwlock_t *rw)
+-{
+- unsigned long tmp, tmp2 = 1;
+-
+- __asm__ __volatile__(
+-"1: ldrex %0, [%2]\n"
+-" adds %0, %0, #1\n"
+-" strexpl %1, %0, [%2]\n"
+- : "=&r" (tmp), "+r" (tmp2)
+- : "r" (&rw->lock)
+- : "cc");
+-
+- smp_mb();
+- return tmp2 == 0;
+-}
+-
+-static always_inline int _raw_write_trylock(raw_rwlock_t *rw)
+-{
+- unsigned long tmp;
+-
+- __asm__ __volatile__(
+-"1: ldrex %0, [%1]\n"
+-" teq %0, #0\n"
+-" strexeq %0, %2, [%1]"
+- : "=&r" (tmp)
+- : "r" (&rw->lock), "r" (0x80000000)
+- : "cc");
+-
+- if (tmp == 0) {
+- smp_mb();
+- return 1;
+- } else {
+- return 0;
+- }
+-}
+-
+-static inline void _raw_read_unlock(raw_rwlock_t *rw)
+-{
+- unsigned long tmp, tmp2;
+-
+- smp_mb();
+-
+- __asm__ __volatile__(
+-"1: ldrex %0, [%2]\n"
+-" sub %0, %0, #1\n"
+-" strex %1, %0, [%2]\n"
+-" teq %1, #0\n"
+-" bne 1b"
+- : "=&r" (tmp), "=&r" (tmp2)
+- : "r" (&rw->lock)
+- : "cc");
+-
+- if (tmp == 0)
+- dsb_sev();
+-}
+-
+-static inline void _raw_write_unlock(raw_rwlock_t *rw)
+-{
+- smp_mb();
+-
+- __asm__ __volatile__(
+- "str %1, [%0]\n"
+- :
+- : "r" (&rw->lock), "r" (0)
+- : "cc");
+-
+- dsb_sev();
+-}
+-
+-#define _raw_rw_is_locked(x) ((x)->lock != 0)
+-#define _raw_rw_is_write_locked(x) ((x)->lock == 0x80000000)
+-
+ #endif /* __ASM_SPINLOCK_H */
+ /*
+ * Local variables:
+--- a/xen/include/asm-arm/arm64/spinlock.h
++++ b/xen/include/asm-arm/arm64/spinlock.h
+@@ -52,69 +52,6 @@ static always_inline int _raw_spin_trylo
+ return !tmp;
+ }
+
+-typedef struct {
+- volatile unsigned int lock;
+-} raw_rwlock_t;
+-
+-#define _RAW_RW_LOCK_UNLOCKED { 0 }
+-
+-static always_inline int _raw_read_trylock(raw_rwlock_t *rw)
+-{
+- unsigned int tmp, tmp2 = 1;
+-
+- asm volatile(
+- " ldaxr %w0, %2\n"
+- " add %w0, %w0, #1\n"
+- " tbnz %w0, #31, 1f\n"
+- " stxr %w1, %w0, %2\n"
+- "1:\n"
+- : "=&r" (tmp), "+r" (tmp2), "+Q" (rw->lock)
+- :
+- : "cc", "memory");
+-
+- return !tmp2;
+-}
+-
+-static always_inline int _raw_write_trylock(raw_rwlock_t *rw)
+-{
+- unsigned int tmp;
+-
+- asm volatile(
+- " ldaxr %w0, %1\n"
+- " cbnz %w0, 1f\n"
+- " stxr %w0, %w2, %1\n"
+- "1:\n"
+- : "=&r" (tmp), "+Q" (rw->lock)
+- : "r" (0x80000000)
+- : "cc", "memory");
+-
+- return !tmp;
+-}
+-
+-static inline void _raw_read_unlock(raw_rwlock_t *rw)
+-{
+- unsigned int tmp, tmp2;
+-
+- asm volatile(
+- " 1: ldxr %w0, %2\n"
+- " sub %w0, %w0, #1\n"
+- " stlxr %w1, %w0, %2\n"
+- " cbnz %w1, 1b\n"
+- : "=&r" (tmp), "=&r" (tmp2), "+Q" (rw->lock)
+- :
+- : "cc", "memory");
+-}
+-
+-static inline void _raw_write_unlock(raw_rwlock_t *rw)
+-{
+- asm volatile(
+- " stlr %w1, %0\n"
+- : "=Q" (rw->lock) : "r" (0) : "memory");
+-}
+-
+-#define _raw_rw_is_locked(x) ((x)->lock != 0)
+-#define _raw_rw_is_write_locked(x) ((x)->lock == 0x80000000)
+-
+ #endif /* __ASM_SPINLOCK_H */
+ /*
+ * Local variables:
+--- a/xen/include/asm-x86/spinlock.h
++++ b/xen/include/asm-x86/spinlock.h
+@@ -31,58 +31,4 @@ static always_inline int _raw_spin_trylo
+ return (oldval > 0);
+ }
+
+-typedef struct {
+- volatile int lock;
+-} raw_rwlock_t;
+-
+-#define RW_WRITE_BIAS 0x7fffffff
+-#define _RAW_RW_LOCK_UNLOCKED /*(raw_rwlock_t)*/ { 0 }
+-
+-static always_inline int _raw_read_trylock(raw_rwlock_t *rw)
+-{
+- int acquired;
+-
+- asm volatile (
+- " lock; decl %0 \n"
+- " jns 2f \n"
+-#ifdef __clang__ /* clang's builtin assember can't do .subsection */
+- "1: .pushsection .fixup,\"ax\"\n"
+-#else
+- "1: .subsection 1 \n"
+-#endif
+- "2: lock; incl %0 \n"
+- " decl %1 \n"
+- " jmp 1b \n"
+-#ifdef __clang__
+- " .popsection \n"
+-#else
+- " .subsection 0 \n"
+-#endif
+- : "=m" (rw->lock), "=r" (acquired) : "1" (1) : "memory" );
+-
+- return acquired;
+-}
+-
+-static always_inline int _raw_write_trylock(raw_rwlock_t *rw)
+-{
+- return (cmpxchg(&rw->lock, 0, RW_WRITE_BIAS) == 0);
+-}
+-
+-static always_inline void _raw_read_unlock(raw_rwlock_t *rw)
+-{
+- asm volatile (
+- "lock ; incl %0"
+- : "=m" ((rw)->lock) : : "memory" );
+-}
+-
+-static always_inline void _raw_write_unlock(raw_rwlock_t *rw)
+-{
+- asm volatile (
+- "lock ; subl %1,%0"
+- : "=m" ((rw)->lock) : "i" (RW_WRITE_BIAS) : "memory" );
+-}
+-
+-#define _raw_rw_is_locked(x) ((x)->lock != 0)
+-#define _raw_rw_is_write_locked(x) ((x)->lock > 0)
+-
+ #endif /* __ASM_SPINLOCK_H */
+--- a/xen/include/xen/spinlock.h
++++ b/xen/include/xen/spinlock.h
+@@ -141,11 +141,13 @@ typedef struct spinlock {
+ #define spin_lock_init(l) (*(l) = (spinlock_t)SPIN_LOCK_UNLOCKED)
+
+ typedef struct {
+- raw_rwlock_t raw;
++ volatile uint32_t lock;
+ struct lock_debug debug;
+ } rwlock_t;
+
+-#define RW_LOCK_UNLOCKED { _RAW_RW_LOCK_UNLOCKED, _LOCK_DEBUG }
++#define RW_WRITE_FLAG (1u<<31)
++
++#define RW_LOCK_UNLOCKED { 0, _LOCK_DEBUG }
+ #define DEFINE_RWLOCK(l) rwlock_t l = RW_LOCK_UNLOCKED
+ #define rwlock_init(l) (*(l) = (rwlock_t)RW_LOCK_UNLOCKED)
+
diff --git a/main/xen/xsa116.patch b/main/xen/xsa116.patch
new file mode 100644
index 0000000000..816a9bd84e
--- /dev/null
+++ b/main/xen/xsa116.patch
@@ -0,0 +1,33 @@
+x86/HVM: prevent use-after-free when destroying a domain
+
+hvm_domain_relinquish_resources() can free certain domain resources
+which can still be accessed, e.g. by HVMOP_set_param, while the domain
+is being cleaned up.
+
+Signed-off-by: Mihai Donțu <mdontu@bitdefender.com>
+Tested-by: Răzvan Cojocaru <rcojocaru@bitdefender.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Reviewed-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/arch/x86/hvm/hvm.c
++++ b/xen/arch/x86/hvm/hvm.c
+@@ -1487,9 +1487,6 @@ int hvm_domain_initialise(struct domain
+
+ void hvm_domain_relinquish_resources(struct domain *d)
+ {
+- xfree(d->arch.hvm_domain.io_handler);
+- xfree(d->arch.hvm_domain.params);
+-
+ if ( is_pvh_domain(d) )
+ return;
+
+@@ -1511,6 +1508,9 @@ void hvm_domain_relinquish_resources(str
+
+ void hvm_domain_destroy(struct domain *d)
+ {
++ xfree(d->arch.hvm_domain.io_handler);
++ xfree(d->arch.hvm_domain.params);
++
+ hvm_destroy_cacheattr_region_list(d);
+
+ if ( is_pvh_domain(d) )