aboutsummaryrefslogtreecommitdiffstats
path: root/main
diff options
context:
space:
mode:
authorHenrik Riomar <henrik.riomar@gmail.com>2018-04-04 13:35:27 +0200
committerLeonardo Arena <rnalrd@alpinelinux.org>2018-07-11 10:25:37 +0000
commitafa60b4355e66c59078ac08cf7997c5f9c4d9f48 (patch)
treed78d4d23529ae02a17229e10f87b53c0007dcf93 /main
parent26d5c4761fea87a71fbecad5e960975778b82e2a (diff)
downloadaports-afa60b4355e66c59078ac08cf7997c5f9c4d9f48.tar.bz2
aports-afa60b4355e66c59078ac08cf7997c5f9c4d9f48.tar.xz
main/xen: upgrade to 4.7.6
While at it, drop unused patches and patch files rombios-no-pie.patch: not needed due to upstream: b704b1a09b rombios: prevent building with PIC/PIE
Diffstat (limited to 'main')
-rw-r--r--main/xen/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch176
-rw-r--r--main/xen/0001-x86-dont-allow-MSI-pIRQ-mapping-on-unowned-device.patch27
-rw-r--r--main/xen/0001-x86-entry-Remove-support-for-partial-cpu_user_regs-f.patch402
-rw-r--r--main/xen/0001-x86-limit-linear-page-table-use-to-a-single-level.patch494
-rw-r--r--main/xen/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch109
-rw-r--r--main/xen/0002-x86-enforce-proper-privilege-when-mapping-pIRQ-s.patch66
-rw-r--r--main/xen/0002-x86-mm-Always-set-_PAGE_ACCESSED-on-L4e-updates.patch47
-rw-r--r--main/xen/0002-x86-mm-Disable-PV-linear-pagetables-by-default.patch82
-rw-r--r--main/xen/0003-x86-MSI-disallow-redundant-enabling.patch55
-rw-r--r--main/xen/0003-x86-Meltdown-band-aid-against-malicious-64-bit-PV-gu.patch760
-rw-r--r--main/xen/0004-x86-IRQ-conditionally-preserve-irq-pirq-mapping-on-error.patch124
-rw-r--r--main/xen/0004-x86-allow-Meltdown-band-aid-to-be-disabled.patch165
-rw-r--r--main/xen/0005-x86-FLASK-fix-unmap-domain-IRQ-XSM-hook.patch37
-rw-r--r--main/xen/APKBUILD220
-rw-r--r--main/xen/rombios-no-pie.patch26
-rw-r--r--main/xen/xsa215.patch37
-rw-r--r--main/xen/xsa216-qemuu-4.7.patch111
-rw-r--r--main/xen/xsa217.patch41
-rw-r--r--main/xen/xsa226-4.7.patch133
-rw-r--r--main/xen/xsa227.patch52
-rw-r--r--main/xen/xsa228-4.8.patch198
-rw-r--r--main/xen/xsa228.patch198
-rw-r--r--main/xen/xsa229.patch59
-rw-r--r--main/xen/xsa230.patch38
-rw-r--r--main/xen/xsa231-4.7.patch108
-rw-r--r--main/xen/xsa232.patch23
-rw-r--r--main/xen/xsa233.patch52
-rw-r--r--main/xen/xsa234-4.6.patch185
-rw-r--r--main/xen/xsa235-4.7.patch49
-rw-r--r--main/xen/xsa236-4.9.patch66
-rw-r--r--main/xen/xsa238.patch45
-rw-r--r--main/xen/xsa239.patch46
-rw-r--r--main/xen/xsa241-4.8.patch120
-rw-r--r--main/xen/xsa242-4.9.patch43
-rw-r--r--main/xen/xsa243-2.patch54
-rw-r--r--main/xen/xsa243-4.7-1.patch93
-rw-r--r--main/xen/xsa244-4.7.patch51
-rw-r--r--main/xen/xsa246-4.7.patch74
-rw-r--r--main/xen/xsa248-4.8.patch162
-rw-r--r--main/xen/xsa249.patch42
-rw-r--r--main/xen/xsa250.patch67
-rw-r--r--main/xen/xsa251-4.8.patch21
-rw-r--r--main/xen/xsa252-4.7.patch25
-rw-r--r--main/xen/xsa255-4.7-1.patch126
-rw-r--r--main/xen/xsa255-4.7-2.patch187
-rw-r--r--main/xen/xsa258-4.8.patch106
-rw-r--r--main/xen/xsa259.patch29
-rw-r--r--main/xen/xsa260-1.patch71
-rw-r--r--main/xen/xsa260-2.patch110
-rw-r--r--main/xen/xsa260-3.patch138
-rw-r--r--main/xen/xsa260-4.patch72
-rw-r--r--main/xen/xsa261-4.7.patch264
-rw-r--r--main/xen/xsa262-4.9.patch76
53 files changed, 17 insertions, 6145 deletions
diff --git a/main/xen/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch b/main/xen/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
deleted file mode 100644
index bba280c926..0000000000
--- a/main/xen/0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
+++ /dev/null
@@ -1,176 +0,0 @@
-From f345ca185e0c042ed12bf929a9e93efaf33397bb Mon Sep 17 00:00:00 2001
-From: George Dunlap <george.dunlap@citrix.com>
-Date: Fri, 10 Nov 2017 16:53:54 +0000
-Subject: [PATCH 1/2] p2m: Always check to see if removing a p2m entry actually
- worked
-
-The PoD zero-check functions speculatively remove memory from the p2m,
-then check to see if it's completely zeroed, before putting it in the
-cache.
-
-Unfortunately, the p2m_set_entry() calls may fail if the underlying
-pagetable structure needs to change and the domain has exhausted its
-p2m memory pool: for instance, if we're removing a 2MiB region out of
-a 1GiB entry (in the p2m_pod_zero_check_superpage() case), or a 4k
-region out of a 2MiB or larger entry (in the p2m_pod_zero_check()
-case); and the return value is not checked.
-
-The underlying mfn will then be added into the PoD cache, and at some
-point mapped into another location in the p2m. If the guest
-afterwards ballons out this memory, it will be freed to the hypervisor
-and potentially reused by another domain, in spite of the fact that
-the original domain still has writable mappings to it.
-
-There are several places where p2m_set_entry() shouldn't be able to
-fail, as it is guaranteed to write an entry of the same order that
-succeeded before. Add a backstop of crashing the domain just in case,
-and an ASSERT_UNREACHABLE() to flag up the broken assumption on debug
-builds.
-
-While we're here, use PAGE_ORDER_2M rather than a magic constant.
-
-This is part of XSA-247.
-
-Reported-by: George Dunlap <george.dunlap.com>
-Signed-off-by: George Dunlap <george.dunlap@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
----
-v4:
-- Removed some training whitespace
-v3:
-- Reformat reset clause to be more compact
-- Make sure to set map[i] = NULL when unmapping in case we need to bail
-v2:
-- Crash a domain if a p2m_set_entry we think cannot fail fails anyway.
----
- xen/arch/x86/mm/p2m-pod.c | 77 +++++++++++++++++++++++++++++++++++++----------
- 1 file changed, 61 insertions(+), 16 deletions(-)
-
-diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
-index 87082cf65f..5ec8a37949 100644
---- a/xen/arch/x86/mm/p2m-pod.c
-+++ b/xen/arch/x86/mm/p2m-pod.c
-@@ -754,8 +754,10 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
- }
-
- /* Try to remove the page, restoring old mapping if it fails. */
-- p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_2M,
-- p2m_populate_on_demand, p2m->default_access);
-+ if ( p2m_set_entry(p2m, gfn, _mfn(INVALID_MFN), PAGE_ORDER_2M,
-+ p2m_populate_on_demand, p2m->default_access) )
-+ goto out;
-+
- p2m_tlb_flush_sync(p2m);
-
- /* Make none of the MFNs are used elsewhere... for example, mapped
-@@ -812,9 +814,18 @@ p2m_pod_zero_check_superpage(struct p2m_domain *p2m, unsigned long gfn)
- ret = SUPERPAGE_PAGES;
-
- out_reset:
-- if ( reset )
-- p2m_set_entry(p2m, gfn, mfn0, 9, type0, p2m->default_access);
--
-+ /*
-+ * This p2m_set_entry() call shouldn't be able to fail, since the same order
-+ * on the same gfn succeeded above. If that turns out to be false, crashing
-+ * the domain should be the safest way of making sure we don't leak memory.
-+ */
-+ if ( reset && p2m_set_entry(p2m, gfn, mfn0, PAGE_ORDER_2M,
-+ type0, p2m->default_access) )
-+ {
-+ ASSERT_UNREACHABLE();
-+ domain_crash(d);
-+ }
-+
- out:
- gfn_unlock(p2m, gfn, SUPERPAGE_ORDER);
- return ret;
-@@ -871,19 +882,30 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
- }
-
- /* Try to remove the page, restoring old mapping if it fails. */
-- p2m_set_entry(p2m, gfns[i], _mfn(INVALID_MFN), PAGE_ORDER_4K,
-- p2m_populate_on_demand, p2m->default_access);
-+ if ( p2m_set_entry(p2m, gfns[i], _mfn(INVALID_MFN), PAGE_ORDER_4K,
-+ p2m_populate_on_demand, p2m->default_access) )
-+ goto skip;
-
- /* See if the page was successfully unmapped. (Allow one refcount
- * for being allocated to a domain.) */
- if ( (mfn_to_page(mfns[i])->count_info & PGC_count_mask) > 1 )
- {
-+ /*
-+ * If the previous p2m_set_entry call succeeded, this one shouldn't
-+ * be able to fail. If it does, crashing the domain should be safe.
-+ */
-+ if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
-+ types[i], p2m->default_access) )
-+ {
-+ ASSERT_UNREACHABLE();
-+ domain_crash(d);
-+ goto out_unmap;
-+ }
-+
-+ skip:
- unmap_domain_page(map[i]);
- map[i] = NULL;
-
-- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
-- types[i], p2m->default_access);
--
- continue;
- }
- }
-@@ -902,12 +924,25 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
-
- unmap_domain_page(map[i]);
-
-- /* See comment in p2m_pod_zero_check_superpage() re gnttab
-- * check timing. */
-- if ( j < PAGE_SIZE/sizeof(*map[i]) )
-+ map[i] = NULL;
-+
-+ /*
-+ * See comment in p2m_pod_zero_check_superpage() re gnttab
-+ * check timing.
-+ */
-+ if ( j < (PAGE_SIZE / sizeof(*map[i])) )
- {
-- p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
-- types[i], p2m->default_access);
-+ /*
-+ * If the previous p2m_set_entry call succeeded, this one shouldn't
-+ * be able to fail. If it does, crashing the domain should be safe.
-+ */
-+ if ( p2m_set_entry(p2m, gfns[i], mfns[i], PAGE_ORDER_4K,
-+ types[i], p2m->default_access) )
-+ {
-+ ASSERT_UNREACHABLE();
-+ domain_crash(d);
-+ goto out_unmap;
-+ }
- }
- else
- {
-@@ -931,7 +966,17 @@ p2m_pod_zero_check(struct p2m_domain *p2m, unsigned long *gfns, int count)
- p2m->pod.entry_count++;
- }
- }
--
-+
-+ return;
-+
-+out_unmap:
-+ /*
-+ * Something went wrong, probably crashing the domain. Unmap
-+ * everything and return.
-+ */
-+ for ( i = 0; i < count; i++ )
-+ if ( map[i] )
-+ unmap_domain_page(map[i]);
- }
-
- #define POD_SWEEP_LIMIT 1024
---
-2.15.0
-
diff --git a/main/xen/0001-x86-dont-allow-MSI-pIRQ-mapping-on-unowned-device.patch b/main/xen/0001-x86-dont-allow-MSI-pIRQ-mapping-on-unowned-device.patch
deleted file mode 100644
index b3edeb54fe..0000000000
--- a/main/xen/0001-x86-dont-allow-MSI-pIRQ-mapping-on-unowned-device.patch
+++ /dev/null
@@ -1,27 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86: don't allow MSI pIRQ mapping on unowned device
-
-MSI setup should be permitted only for existing devices owned by the
-respective guest (the operation may still be carried out by the domain
-controlling that guest).
-
-This is part of XSA-237.
-
-Reported-by: HW42 <hw42@ipsumj.de>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/irq.c
-+++ b/xen/arch/x86/irq.c
-@@ -1964,7 +1964,10 @@ int map_domain_pirq(
- if ( !cpu_has_apic )
- goto done;
-
-- pdev = pci_get_pdev(msi->seg, msi->bus, msi->devfn);
-+ pdev = pci_get_pdev_by_domain(d, msi->seg, msi->bus, msi->devfn);
-+ if ( !pdev )
-+ goto done;
-+
- ret = pci_enable_msi(msi, &msi_desc);
- if ( ret )
- {
diff --git a/main/xen/0001-x86-entry-Remove-support-for-partial-cpu_user_regs-f.patch b/main/xen/0001-x86-entry-Remove-support-for-partial-cpu_user_regs-f.patch
deleted file mode 100644
index ce922a7cb2..0000000000
--- a/main/xen/0001-x86-entry-Remove-support-for-partial-cpu_user_regs-f.patch
+++ /dev/null
@@ -1,402 +0,0 @@
-From 5a013cb9851d1deeaeaa8564f292940a99dbb1d1 Mon Sep 17 00:00:00 2001
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Date: Wed, 17 Jan 2018 17:22:34 +0100
-Subject: [PATCH 1/4] x86/entry: Remove support for partial cpu_user_regs
- frames
-
-Save all GPRs on entry to Xen.
-
-The entry_int82() path is via a DPL1 gate, only usable by 32bit PV guests, so
-can get away with only saving the 32bit registers. All other entrypoints can
-be reached from 32 or 64bit contexts.
-
-This is part of XSA-254.
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Wei Liu <wei.liu2@citrix.com>
-Acked-by: Jan Beulich <jbeulich@suse.com>
-master commit: f9eb74789af77e985ae653193f3622263499f674
-master date: 2018-01-05 19:57:07 +0000
-
-(cherry picked from commit 0e6c6fc449000d97f9fa87ed1fbe23f0cf21406b)
----
- tools/tests/x86_emulator/x86_emulate.c | 1 -
- xen/arch/x86/domain.c | 1 -
- xen/arch/x86/traps.c | 2 -
- xen/arch/x86/x86_64/compat/entry.S | 7 ++-
- xen/arch/x86/x86_64/entry.S | 12 ++--
- xen/arch/x86/x86_64/traps.c | 13 ++--
- xen/arch/x86/x86_emulate.c | 1 -
- xen/arch/x86/x86_emulate/x86_emulate.c | 8 +--
- xen/common/wait.c | 1 -
- xen/include/asm-x86/asm_defns.h | 107 +++------------------------------
- 10 files changed, 26 insertions(+), 127 deletions(-)
-
-diff --git a/tools/tests/x86_emulator/x86_emulate.c b/tools/tests/x86_emulator/x86_emulate.c
-index 10e3f61baa..c12527a50b 100644
---- a/tools/tests/x86_emulator/x86_emulate.c
-+++ b/tools/tests/x86_emulator/x86_emulate.c
-@@ -24,7 +24,6 @@ typedef bool bool_t;
- #endif
-
- #define cpu_has_amd_erratum(nr) 0
--#define mark_regs_dirty(r) ((void)(r))
-
- #define __packed __attribute__((packed))
-
-diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
-index 452748dd5b..c9328f804e 100644
---- a/xen/arch/x86/domain.c
-+++ b/xen/arch/x86/domain.c
-@@ -148,7 +148,6 @@ static void noreturn continue_idle_domain(struct vcpu *v)
- static void noreturn continue_nonidle_domain(struct vcpu *v)
- {
- check_wakeup_from_wait();
-- mark_regs_dirty(guest_cpu_user_regs());
- reset_stack_and_jump(ret_from_intr);
- }
-
-diff --git a/xen/arch/x86/traps.c b/xen/arch/x86/traps.c
-index 90b6071796..1ec0d48cce 100644
---- a/xen/arch/x86/traps.c
-+++ b/xen/arch/x86/traps.c
-@@ -2456,7 +2456,6 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
- goto fail;
- if ( admin_io_okay(port, op_bytes, currd) )
- {
-- mark_regs_dirty(regs);
- io_emul(regs);
- }
- else
-@@ -2486,7 +2485,6 @@ static int emulate_privileged_op(struct cpu_user_regs *regs)
- goto fail;
- if ( admin_io_okay(port, op_bytes, currd) )
- {
-- mark_regs_dirty(regs);
- io_emul(regs);
- if ( (op_bytes == 1) && pv_post_outb_hook )
- pv_post_outb_hook(port, regs->eax);
-diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S
-index 794bb44266..7ee01597a3 100644
---- a/xen/arch/x86/x86_64/compat/entry.S
-+++ b/xen/arch/x86/x86_64/compat/entry.S
-@@ -15,7 +15,8 @@
- ENTRY(compat_hypercall)
- ASM_CLAC
- pushq $0
-- SAVE_VOLATILE type=TRAP_syscall compat=1
-+ movl $TRAP_syscall, 4(%rsp)
-+ SAVE_ALL compat=1 /* DPL1 gate, restricted to 32bit PV guests only. */
- CR4_PV32_RESTORE
-
- cmpb $0,untrusted_msi(%rip)
-@@ -127,7 +128,6 @@ compat_test_guest_events:
- /* %rbx: struct vcpu */
- compat_process_softirqs:
- sti
-- andl $~TRAP_regs_partial,UREGS_entry_vector(%rsp)
- call do_softirq
- jmp compat_test_all_events
-
-@@ -268,7 +268,8 @@ ENTRY(cstar_enter)
- pushq $FLAT_USER_CS32
- pushq %rcx
- pushq $0
-- SAVE_VOLATILE TRAP_syscall
-+ movl $TRAP_syscall, 4(%rsp)
-+ SAVE_ALL
- GET_CURRENT(bx)
- movq VCPU_domain(%rbx),%rcx
- cmpb $0,DOMAIN_is_32bit_pv(%rcx)
-diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
-index 708d9b9402..cebb1e4f4f 100644
---- a/xen/arch/x86/x86_64/entry.S
-+++ b/xen/arch/x86/x86_64/entry.S
-@@ -97,7 +97,8 @@ ENTRY(lstar_enter)
- pushq $FLAT_KERNEL_CS64
- pushq %rcx
- pushq $0
-- SAVE_VOLATILE TRAP_syscall
-+ movl $TRAP_syscall, 4(%rsp)
-+ SAVE_ALL
- GET_CURRENT(bx)
- testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
- jz switch_to_kernel
-@@ -192,7 +193,6 @@ test_guest_events:
- /* %rbx: struct vcpu */
- process_softirqs:
- sti
-- SAVE_PRESERVED
- call do_softirq
- jmp test_all_events
-
-@@ -246,7 +246,8 @@ GLOBAL(sysenter_eflags_saved)
- pushq $3 /* ring 3 null cs */
- pushq $0 /* null rip */
- pushq $0
-- SAVE_VOLATILE TRAP_syscall
-+ movl $TRAP_syscall, 4(%rsp)
-+ SAVE_ALL
- GET_CURRENT(bx)
- cmpb $0,VCPU_sysenter_disables_events(%rbx)
- movq VCPU_sysenter_addr(%rbx),%rax
-@@ -263,7 +264,6 @@ UNLIKELY_END(sysenter_nt_set)
- leal (,%rcx,TBF_INTERRUPT),%ecx
- UNLIKELY_START(z, sysenter_gpf)
- movq VCPU_trap_ctxt(%rbx),%rsi
-- SAVE_PRESERVED
- movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
- movl %eax,TRAPBOUNCE_error_code(%rdx)
- movq TRAP_gp_fault * TRAPINFO_sizeof + TRAPINFO_eip(%rsi),%rax
-@@ -281,7 +281,8 @@ UNLIKELY_END(sysenter_gpf)
- ENTRY(int80_direct_trap)
- ASM_CLAC
- pushq $0
-- SAVE_VOLATILE 0x80
-+ movl $0x80, 4(%rsp)
-+ SAVE_ALL
-
- cmpb $0,untrusted_msi(%rip)
- UNLIKELY_START(ne, msi_check)
-@@ -309,7 +310,6 @@ int80_slow_path:
- * IDT entry with DPL==0.
- */
- movl $((0x80 << 3) | X86_XEC_IDT),UREGS_error_code(%rsp)
-- SAVE_PRESERVED
- movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
- /* A GPF wouldn't have incremented the instruction pointer. */
- subq $2,UREGS_rip(%rsp)
-diff --git a/xen/arch/x86/x86_64/traps.c b/xen/arch/x86/x86_64/traps.c
-index 5b71537a9d..3161dcc18b 100644
---- a/xen/arch/x86/x86_64/traps.c
-+++ b/xen/arch/x86/x86_64/traps.c
-@@ -65,15 +65,10 @@ static void _show_registers(
- regs->rbp, regs->rsp, regs->r8);
- printk("r9: %016lx r10: %016lx r11: %016lx\n",
- regs->r9, regs->r10, regs->r11);
-- if ( !(regs->entry_vector & TRAP_regs_partial) )
-- {
-- printk("r12: %016lx r13: %016lx r14: %016lx\n",
-- regs->r12, regs->r13, regs->r14);
-- printk("r15: %016lx cr0: %016lx cr4: %016lx\n",
-- regs->r15, crs[0], crs[4]);
-- }
-- else
-- printk("cr0: %016lx cr4: %016lx\n", crs[0], crs[4]);
-+ printk("r12: %016lx r13: %016lx r14: %016lx\n",
-+ regs->r12, regs->r13, regs->r14);
-+ printk("r15: %016lx cr0: %016lx cr4: %016lx\n",
-+ regs->r15, crs[0], crs[4]);
- printk("cr3: %016lx cr2: %016lx\n", crs[3], crs[2]);
- printk("ds: %04x es: %04x fs: %04x gs: %04x "
- "ss: %04x cs: %04x\n",
-diff --git a/xen/arch/x86/x86_emulate.c b/xen/arch/x86/x86_emulate.c
-index 28132b5dbc..43730026c2 100644
---- a/xen/arch/x86/x86_emulate.c
-+++ b/xen/arch/x86/x86_emulate.c
-@@ -11,7 +11,6 @@
-
- #include <xen/domain_page.h>
- #include <asm/x86_emulate.h>
--#include <asm/asm_defns.h> /* mark_regs_dirty() */
- #include <asm/processor.h> /* current_cpu_info */
- #include <asm/amd.h> /* cpu_has_amd_erratum() */
-
-diff --git a/xen/arch/x86/x86_emulate/x86_emulate.c b/xen/arch/x86/x86_emulate/x86_emulate.c
-index 5db017b1b0..81e8bc6ace 100644
---- a/xen/arch/x86/x86_emulate/x86_emulate.c
-+++ b/xen/arch/x86/x86_emulate/x86_emulate.c
-@@ -1424,10 +1424,10 @@ decode_register(
- case 9: p = &regs->r9; break;
- case 10: p = &regs->r10; break;
- case 11: p = &regs->r11; break;
-- case 12: mark_regs_dirty(regs); p = &regs->r12; break;
-- case 13: mark_regs_dirty(regs); p = &regs->r13; break;
-- case 14: mark_regs_dirty(regs); p = &regs->r14; break;
-- case 15: mark_regs_dirty(regs); p = &regs->r15; break;
-+ case 12: p = &regs->r12; break;
-+ case 13: p = &regs->r13; break;
-+ case 14: p = &regs->r14; break;
-+ case 15: p = &regs->r15; break;
- #endif
- default: BUG(); p = NULL; break;
- }
-diff --git a/xen/common/wait.c b/xen/common/wait.c
-index 4ac98c07fe..398f653174 100644
---- a/xen/common/wait.c
-+++ b/xen/common/wait.c
-@@ -128,7 +128,6 @@ static void __prepare_to_wait(struct waitqueue_vcpu *wqv)
- unsigned long dummy;
- u32 entry_vector = cpu_info->guest_cpu_user_regs.entry_vector;
-
-- cpu_info->guest_cpu_user_regs.entry_vector &= ~TRAP_regs_partial;
- ASSERT(wqv->esp == 0);
-
- /* Save current VCPU affinity; force wakeup on *this* CPU only. */
-diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h
-index 279d70298f..6e5c079ad8 100644
---- a/xen/include/asm-x86/asm_defns.h
-+++ b/xen/include/asm-x86/asm_defns.h
-@@ -17,15 +17,6 @@
- void ret_from_intr(void);
- #endif
-
--#ifdef CONFIG_FRAME_POINTER
--/* Indicate special exception stack frame by inverting the frame pointer. */
--#define SETUP_EXCEPTION_FRAME_POINTER(offs) \
-- leaq offs(%rsp),%rbp; \
-- notq %rbp
--#else
--#define SETUP_EXCEPTION_FRAME_POINTER(offs)
--#endif
--
- #ifndef NDEBUG
- #define ASSERT_INTERRUPT_STATUS(x, msg) \
- pushf; \
-@@ -42,31 +33,6 @@ void ret_from_intr(void);
- #define ASSERT_INTERRUPTS_DISABLED \
- ASSERT_INTERRUPT_STATUS(z, "INTERRUPTS DISABLED")
-
--/*
-- * This flag is set in an exception frame when registers R12-R15 did not get
-- * saved.
-- */
--#define _TRAP_regs_partial 16
--#define TRAP_regs_partial (1 << _TRAP_regs_partial)
--/*
-- * This flag gets set in an exception frame when registers R12-R15 possibly
-- * get modified from their originally saved values and hence need to be
-- * restored even if the normal call flow would restore register values.
-- *
-- * The flag being set implies _TRAP_regs_partial to be unset. Restoring
-- * R12-R15 thus is
-- * - required when this flag is set,
-- * - safe when _TRAP_regs_partial is unset.
-- */
--#define _TRAP_regs_dirty 17
--#define TRAP_regs_dirty (1 << _TRAP_regs_dirty)
--
--#define mark_regs_dirty(r) ({ \
-- struct cpu_user_regs *r__ = (r); \
-- ASSERT(!((r__)->entry_vector & TRAP_regs_partial)); \
-- r__->entry_vector |= TRAP_regs_dirty; \
--})
--
- #ifdef __ASSEMBLY__
- # define _ASM_EX(p) p-.
- #else
-@@ -236,7 +202,7 @@ static always_inline void stac(void)
- #endif
-
- #ifdef __ASSEMBLY__
--.macro SAVE_ALL op
-+.macro SAVE_ALL op, compat=0
- .ifeqs "\op", "CLAC"
- ASM_CLAC
- .else
-@@ -255,40 +221,6 @@ static always_inline void stac(void)
- movq %rdx,UREGS_rdx(%rsp)
- movq %rcx,UREGS_rcx(%rsp)
- movq %rax,UREGS_rax(%rsp)
-- movq %r8,UREGS_r8(%rsp)
-- movq %r9,UREGS_r9(%rsp)
-- movq %r10,UREGS_r10(%rsp)
-- movq %r11,UREGS_r11(%rsp)
-- movq %rbx,UREGS_rbx(%rsp)
-- movq %rbp,UREGS_rbp(%rsp)
-- SETUP_EXCEPTION_FRAME_POINTER(UREGS_rbp)
-- movq %r12,UREGS_r12(%rsp)
-- movq %r13,UREGS_r13(%rsp)
-- movq %r14,UREGS_r14(%rsp)
-- movq %r15,UREGS_r15(%rsp)
--.endm
--
--/*
-- * Save all registers not preserved by C code or used in entry/exit code. Mark
-- * the frame as partial.
-- *
-- * @type: exception type
-- * @compat: R8-R15 don't need saving, and the frame nevertheless is complete
-- */
--.macro SAVE_VOLATILE type compat=0
--.if \compat
-- movl $\type,UREGS_entry_vector-UREGS_error_code(%rsp)
--.else
-- movl $\type|TRAP_regs_partial,\
-- UREGS_entry_vector-UREGS_error_code(%rsp)
--.endif
-- addq $-(UREGS_error_code-UREGS_r15),%rsp
-- cld
-- movq %rdi,UREGS_rdi(%rsp)
-- movq %rsi,UREGS_rsi(%rsp)
-- movq %rdx,UREGS_rdx(%rsp)
-- movq %rcx,UREGS_rcx(%rsp)
-- movq %rax,UREGS_rax(%rsp)
- .if !\compat
- movq %r8,UREGS_r8(%rsp)
- movq %r9,UREGS_r9(%rsp)
-@@ -297,20 +229,17 @@ static always_inline void stac(void)
- .endif
- movq %rbx,UREGS_rbx(%rsp)
- movq %rbp,UREGS_rbp(%rsp)
-- SETUP_EXCEPTION_FRAME_POINTER(UREGS_rbp)
--.endm
--
--/*
-- * Complete a frame potentially only partially saved.
-- */
--.macro SAVE_PRESERVED
-- btrl $_TRAP_regs_partial,UREGS_entry_vector(%rsp)
-- jnc 987f
-+#ifdef CONFIG_FRAME_POINTER
-+/* Indicate special exception stack frame by inverting the frame pointer. */
-+ leaq UREGS_rbp(%rsp), %rbp
-+ notq %rbp
-+#endif
-+.if !\compat
- movq %r12,UREGS_r12(%rsp)
- movq %r13,UREGS_r13(%rsp)
- movq %r14,UREGS_r14(%rsp)
- movq %r15,UREGS_r15(%rsp)
--987:
-+.endif
- .endm
-
- #define LOAD_ONE_REG(reg, compat) \
-@@ -351,33 +280,13 @@ static always_inline void stac(void)
- * @compat: R8-R15 don't need reloading
- */
- .macro RESTORE_ALL adj=0 compat=0
--.if !\compat
-- testl $TRAP_regs_dirty,UREGS_entry_vector(%rsp)
--.endif
- LOAD_C_CLOBBERED \compat
- .if !\compat
-- jz 987f
- movq UREGS_r15(%rsp),%r15
- movq UREGS_r14(%rsp),%r14
- movq UREGS_r13(%rsp),%r13
- movq UREGS_r12(%rsp),%r12
--#ifndef NDEBUG
-- .subsection 1
--987: testl $TRAP_regs_partial,UREGS_entry_vector(%rsp)
-- jnz 987f
-- cmpq UREGS_r15(%rsp),%r15
-- jne 789f
-- cmpq UREGS_r14(%rsp),%r14
-- jne 789f
-- cmpq UREGS_r13(%rsp),%r13
-- jne 789f
-- cmpq UREGS_r12(%rsp),%r12
-- je 987f
--789: BUG /* Corruption of partial register state. */
-- .subsection 0
--#endif
- .endif
--987:
- LOAD_ONE_REG(bp, \compat)
- LOAD_ONE_REG(bx, \compat)
- subq $-(UREGS_error_code-UREGS_r15+\adj), %rsp
---
-2.11.3
-
diff --git a/main/xen/0001-x86-limit-linear-page-table-use-to-a-single-level.patch b/main/xen/0001-x86-limit-linear-page-table-use-to-a-single-level.patch
deleted file mode 100644
index 9310f01ba4..0000000000
--- a/main/xen/0001-x86-limit-linear-page-table-use-to-a-single-level.patch
+++ /dev/null
@@ -1,494 +0,0 @@
-From ea7513a3e3f28cfec59dda6e128b6b4968685762 Mon Sep 17 00:00:00 2001
-From: Jan Beulich <jbeulich@suse.com>
-Date: Thu, 28 Sep 2017 15:17:27 +0100
-Subject: [PATCH 1/2] x86: limit linear page table use to a single level
-
-That's the only way that they're meant to be used. Without such a
-restriction arbitrarily long chains of same-level page tables can be
-built, tearing down of which may then cause arbitrarily deep recursion,
-causing a stack overflow. To facilitate this restriction, a counter is
-being introduced to track both the number of same-level entries in a
-page table as well as the number of uses of a page table in another
-same-level one (counting into positive and negative direction
-respectively, utilizing the fact that both counts can't be non-zero at
-the same time).
-
-Note that the added accounting introduces a restriction on the number
-of times a page can be used in other same-level page tables - more than
-32k of such uses are no longer possible.
-
-Note also that some put_page_and_type[_preemptible]() calls are
-replaced with open-coded equivalents. This seemed preferrable to
-adding "parent_table" to the matrix of functions.
-
-Note further that cross-domain same-level page table references are no
-longer permitted (they probably never should have been).
-
-This is XSA-240.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Signed-off-by: George Dunlap <george.dunlap@citrix.com>
----
- xen/arch/x86/domain.c | 1 +
- xen/arch/x86/mm.c | 171 ++++++++++++++++++++++++++++++++++++++-----
- xen/include/asm-x86/domain.h | 2 +
- xen/include/asm-x86/mm.h | 25 +++++--
- 4 files changed, 175 insertions(+), 24 deletions(-)
-
-diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
-index 452748dd5b..44ed2ccd0a 100644
---- a/xen/arch/x86/domain.c
-+++ b/xen/arch/x86/domain.c
-@@ -1237,6 +1237,7 @@ int arch_set_info_guest(
- case -EINTR:
- rc = -ERESTART;
- case -ERESTART:
-+ v->arch.old_guest_ptpg = NULL;
- v->arch.old_guest_table =
- pagetable_get_page(v->arch.guest_table);
- v->arch.guest_table = pagetable_null();
-diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
-index e97ecccd93..e81a461b91 100644
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -732,6 +732,61 @@ static void put_data_page(
- put_page(page);
- }
-
-+static bool_t inc_linear_entries(struct page_info *pg)
-+{
-+ typeof(pg->linear_pt_count) nc = read_atomic(&pg->linear_pt_count), oc;
-+
-+ do {
-+ /*
-+ * The check below checks for the "linear use" count being non-zero
-+ * as well as overflow. Signed integer overflow is undefined behavior
-+ * according to the C spec. However, as long as linear_pt_count is
-+ * smaller in size than 'int', the arithmetic operation of the
-+ * increment below won't overflow; rather the result will be truncated
-+ * when stored. Ensure that this is always true.
-+ */
-+ BUILD_BUG_ON(sizeof(nc) >= sizeof(int));
-+ oc = nc++;
-+ if ( nc <= 0 )
-+ return 0;
-+ nc = cmpxchg(&pg->linear_pt_count, oc, nc);
-+ } while ( oc != nc );
-+
-+ return 1;
-+}
-+
-+static void dec_linear_entries(struct page_info *pg)
-+{
-+ typeof(pg->linear_pt_count) oc;
-+
-+ oc = arch_fetch_and_add(&pg->linear_pt_count, -1);
-+ ASSERT(oc > 0);
-+}
-+
-+static bool_t inc_linear_uses(struct page_info *pg)
-+{
-+ typeof(pg->linear_pt_count) nc = read_atomic(&pg->linear_pt_count), oc;
-+
-+ do {
-+ /* See the respective comment in inc_linear_entries(). */
-+ BUILD_BUG_ON(sizeof(nc) >= sizeof(int));
-+ oc = nc--;
-+ if ( nc >= 0 )
-+ return 0;
-+ nc = cmpxchg(&pg->linear_pt_count, oc, nc);
-+ } while ( oc != nc );
-+
-+ return 1;
-+}
-+
-+static void dec_linear_uses(struct page_info *pg)
-+{
-+ typeof(pg->linear_pt_count) oc;
-+
-+ oc = arch_fetch_and_add(&pg->linear_pt_count, 1);
-+ ASSERT(oc < 0);
-+}
-+
- /*
- * We allow root tables to map each other (a.k.a. linear page tables). It
- * needs some special care with reference counts and access permissions:
-@@ -761,15 +816,35 @@ get_##level##_linear_pagetable( \
- \
- if ( (pfn = level##e_get_pfn(pde)) != pde_pfn ) \
- { \
-+ struct page_info *ptpg = mfn_to_page(pde_pfn); \
-+ \
-+ /* Make sure the page table belongs to the correct domain. */ \
-+ if ( unlikely(page_get_owner(ptpg) != d) ) \
-+ return 0; \
-+ \
- /* Make sure the mapped frame belongs to the correct domain. */ \
- if ( unlikely(!get_page_from_pagenr(pfn, d)) ) \
- return 0; \
- \
- /* \
-- * Ensure that the mapped frame is an already-validated page table. \
-+ * Ensure that the mapped frame is an already-validated page table \
-+ * and is not itself having linear entries, as well as that the \
-+ * containing page table is not iself in use as a linear page table \
-+ * elsewhere. \
- * If so, atomically increment the count (checking for overflow). \
- */ \
- page = mfn_to_page(pfn); \
-+ if ( !inc_linear_entries(ptpg) ) \
-+ { \
-+ put_page(page); \
-+ return 0; \
-+ } \
-+ if ( !inc_linear_uses(page) ) \
-+ { \
-+ dec_linear_entries(ptpg); \
-+ put_page(page); \
-+ return 0; \
-+ } \
- y = page->u.inuse.type_info; \
- do { \
- x = y; \
-@@ -777,6 +852,8 @@ get_##level##_linear_pagetable( \
- unlikely((x & (PGT_type_mask|PGT_validated)) != \
- (PGT_##level##_page_table|PGT_validated)) ) \
- { \
-+ dec_linear_uses(page); \
-+ dec_linear_entries(ptpg); \
- put_page(page); \
- return 0; \
- } \
-@@ -1201,6 +1278,9 @@ get_page_from_l4e(
- l3e_remove_flags((pl3e), _PAGE_USER|_PAGE_RW|_PAGE_ACCESSED); \
- } while ( 0 )
-
-+static int _put_page_type(struct page_info *page, bool_t preemptible,
-+ struct page_info *ptpg);
-+
- void put_page_from_l1e(l1_pgentry_t l1e, struct domain *l1e_owner)
- {
- unsigned long pfn = l1e_get_pfn(l1e);
-@@ -1270,17 +1350,22 @@ static int put_page_from_l2e(l2_pgentry_t l2e, unsigned long pfn)
- if ( l2e_get_flags(l2e) & _PAGE_PSE )
- put_superpage(l2e_get_pfn(l2e));
- else
-- put_page_and_type(l2e_get_page(l2e));
-+ {
-+ struct page_info *pg = l2e_get_page(l2e);
-+ int rc = _put_page_type(pg, 0, mfn_to_page(pfn));
-+
-+ ASSERT(!rc);
-+ put_page(pg);
-+ }
-
- return 0;
- }
-
--static int __put_page_type(struct page_info *, int preemptible);
--
- static int put_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn,
- int partial, bool_t defer)
- {
- struct page_info *pg;
-+ int rc;
-
- if ( !(l3e_get_flags(l3e) & _PAGE_PRESENT) || (l3e_get_pfn(l3e) == pfn) )
- return 1;
-@@ -1303,21 +1388,28 @@ static int put_page_from_l3e(l3_pgentry_t l3e, unsigned long pfn,
- if ( unlikely(partial > 0) )
- {
- ASSERT(!defer);
-- return __put_page_type(pg, 1);
-+ return _put_page_type(pg, 1, mfn_to_page(pfn));
- }
-
- if ( defer )
- {
-+ current->arch.old_guest_ptpg = mfn_to_page(pfn);
- current->arch.old_guest_table = pg;
- return 0;
- }
-
-- return put_page_and_type_preemptible(pg);
-+ rc = _put_page_type(pg, 1, mfn_to_page(pfn));
-+ if ( likely(!rc) )
-+ put_page(pg);
-+
-+ return rc;
- }
-
- static int put_page_from_l4e(l4_pgentry_t l4e, unsigned long pfn,
- int partial, bool_t defer)
- {
-+ int rc = 1;
-+
- if ( (l4e_get_flags(l4e) & _PAGE_PRESENT) &&
- (l4e_get_pfn(l4e) != pfn) )
- {
-@@ -1326,18 +1418,22 @@ static int put_page_from_l4e(l4_pgentry_t l4e, unsigned long pfn,
- if ( unlikely(partial > 0) )
- {
- ASSERT(!defer);
-- return __put_page_type(pg, 1);
-+ return _put_page_type(pg, 1, mfn_to_page(pfn));
- }
-
- if ( defer )
- {
-+ current->arch.old_guest_ptpg = mfn_to_page(pfn);
- current->arch.old_guest_table = pg;
- return 0;
- }
-
-- return put_page_and_type_preemptible(pg);
-+ rc = _put_page_type(pg, 1, mfn_to_page(pfn));
-+ if ( likely(!rc) )
-+ put_page(pg);
- }
-- return 1;
-+
-+ return rc;
- }
-
- static int alloc_l1_table(struct page_info *page)
-@@ -1535,6 +1631,7 @@ static int alloc_l3_table(struct page_info *page)
- {
- page->nr_validated_ptes = i;
- page->partial_pte = 0;
-+ current->arch.old_guest_ptpg = NULL;
- current->arch.old_guest_table = page;
- }
- while ( i-- > 0 )
-@@ -1627,6 +1724,7 @@ static int alloc_l4_table(struct page_info *page)
- {
- if ( current->arch.old_guest_table )
- page->nr_validated_ptes++;
-+ current->arch.old_guest_ptpg = NULL;
- current->arch.old_guest_table = page;
- }
- }
-@@ -2369,14 +2467,20 @@ int free_page_type(struct page_info *pag
- }
-
-
--static int __put_final_page_type(
-- struct page_info *page, unsigned long type, int preemptible)
-+static int _put_final_page_type(struct page_info *page, unsigned long type,
-+ bool_t preemptible, struct page_info *ptpg)
- {
- int rc = free_page_type(page, type, preemptible);
-
- /* No need for atomic update of type_info here: noone else updates it. */
- if ( rc == 0 )
- {
-+ if ( ptpg && PGT_type_equal(type, ptpg->u.inuse.type_info) )
-+ {
-+ dec_linear_uses(page);
-+ dec_linear_entries(ptpg);
-+ }
-+ ASSERT(!page->linear_pt_count || page_get_owner(page)->is_dying);
- /*
- * Record TLB information for flush later. We do not stamp page tables
- * when running in shadow mode:
-@@ -2412,8 +2516,8 @@ static int __put_final_page_type(
- }
-
-
--static int __put_page_type(struct page_info *page,
-- int preemptible)
-+static int _put_page_type(struct page_info *page, bool_t preemptible,
-+ struct page_info *ptpg)
- {
- unsigned long nx, x, y = page->u.inuse.type_info;
- int rc = 0;
-@@ -2440,12 +2544,28 @@ static int __put_page_type(struct page_info *page,
- x, nx)) != x) )
- continue;
- /* We cleared the 'valid bit' so we do the clean up. */
-- rc = __put_final_page_type(page, x, preemptible);
-+ rc = _put_final_page_type(page, x, preemptible, ptpg);
-+ ptpg = NULL;
- if ( x & PGT_partial )
- put_page(page);
- break;
- }
-
-+ if ( ptpg && PGT_type_equal(x, ptpg->u.inuse.type_info) )
-+ {
-+ /*
-+ * page_set_tlbflush_timestamp() accesses the same union
-+ * linear_pt_count lives in. Unvalidated page table pages,
-+ * however, should occur during domain destruction only
-+ * anyway. Updating of linear_pt_count luckily is not
-+ * necessary anymore for a dying domain.
-+ */
-+ ASSERT(page_get_owner(page)->is_dying);
-+ ASSERT(page->linear_pt_count < 0);
-+ ASSERT(ptpg->linear_pt_count > 0);
-+ ptpg = NULL;
-+ }
-+
- /*
- * Record TLB information for flush later. We do not stamp page
- * tables when running in shadow mode:
-@@ -2465,6 +2585,13 @@ static int __put_page_type(struct page_info *page,
- return -EINTR;
- }
-
-+ if ( ptpg && PGT_type_equal(x, ptpg->u.inuse.type_info) )
-+ {
-+ ASSERT(!rc);
-+ dec_linear_uses(page);
-+ dec_linear_entries(ptpg);
-+ }
-+
- return rc;
- }
-
-@@ -2599,6 +2726,7 @@ static int __get_page_type(struct page_info *page, unsigned long type,
- page->nr_validated_ptes = 0;
- page->partial_pte = 0;
- }
-+ page->linear_pt_count = 0;
- rc = alloc_page_type(page, type, preemptible);
- }
-
-@@ -2610,7 +2738,7 @@ static int __get_page_type(struct page_info *page, unsigned long type,
-
- void put_page_type(struct page_info *page)
- {
-- int rc = __put_page_type(page, 0);
-+ int rc = _put_page_type(page, 0, NULL);
- ASSERT(rc == 0);
- (void)rc;
- }
-@@ -2626,7 +2754,7 @@ int get_page_type(struct page_info *page, unsigned long type)
-
- int put_page_type_preemptible(struct page_info *page)
- {
-- return __put_page_type(page, 1);
-+ return _put_page_type(page, 1, NULL);
- }
-
- int get_page_type_preemptible(struct page_info *page, unsigned long type)
-@@ -2832,11 +2960,14 @@ int put_old_guest_table(struct vcpu *v)
- if ( !v->arch.old_guest_table )
- return 0;
-
-- switch ( rc = put_page_and_type_preemptible(v->arch.old_guest_table) )
-+ switch ( rc = _put_page_type(v->arch.old_guest_table, 1,
-+ v->arch.old_guest_ptpg) )
- {
- case -EINTR:
- case -ERESTART:
- return -ERESTART;
-+ case 0:
-+ put_page(v->arch.old_guest_table);
- }
-
- v->arch.old_guest_table = NULL;
-@@ -2993,6 +3124,7 @@ int new_guest_cr3(unsigned long mfn)
- rc = -ERESTART;
- /* fallthrough */
- case -ERESTART:
-+ curr->arch.old_guest_ptpg = NULL;
- curr->arch.old_guest_table = page;
- break;
- default:
-@@ -3260,7 +3392,10 @@ long do_mmuext_op(
- if ( type == PGT_l1_page_table )
- put_page_and_type(page);
- else
-+ {
-+ curr->arch.old_guest_ptpg = NULL;
- curr->arch.old_guest_table = page;
-+ }
- }
- }
-
-@@ -3293,6 +3428,7 @@ long do_mmuext_op(
- {
- case -EINTR:
- case -ERESTART:
-+ curr->arch.old_guest_ptpg = NULL;
- curr->arch.old_guest_table = page;
- rc = 0;
- break;
-@@ -3371,6 +3507,7 @@ long do_mmuext_op(
- rc = -ERESTART;
- /* fallthrough */
- case -ERESTART:
-+ curr->arch.old_guest_ptpg = NULL;
- curr->arch.old_guest_table = page;
- break;
- default:
-diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
-index 165e533ab3..5ef761be8b 100644
---- a/xen/include/asm-x86/domain.h
-+++ b/xen/include/asm-x86/domain.h
-@@ -529,6 +529,8 @@ struct arch_vcpu
- pagetable_t guest_table_user; /* (MFN) x86/64 user-space pagetable */
- pagetable_t guest_table; /* (MFN) guest notion of cr3 */
- struct page_info *old_guest_table; /* partially destructed pagetable */
-+ struct page_info *old_guest_ptpg; /* containing page table of the */
-+ /* former, if any */
- /* guest_table holds a ref to the page, and also a type-count unless
- * shadow refcounts are in use */
- pagetable_t shadow_table[4]; /* (MFN) shadow(s) of guest */
-diff --git a/xen/include/asm-x86/mm.h b/xen/include/asm-x86/mm.h
-index a30e76db1e..905c7971f2 100644
---- a/xen/include/asm-x86/mm.h
-+++ b/xen/include/asm-x86/mm.h
-@@ -125,11 +125,11 @@ struct page_info
- u32 tlbflush_timestamp;
-
- /*
-- * When PGT_partial is true then this field is valid and indicates
-- * that PTEs in the range [0, @nr_validated_ptes) have been validated.
-- * An extra page reference must be acquired (or not dropped) whenever
-- * PGT_partial gets set, and it must be dropped when the flag gets
-- * cleared. This is so that a get() leaving a page in partially
-+ * When PGT_partial is true then the first two fields are valid and
-+ * indicate that PTEs in the range [0, @nr_validated_ptes) have been
-+ * validated. An extra page reference must be acquired (or not dropped)
-+ * whenever PGT_partial gets set, and it must be dropped when the flag
-+ * gets cleared. This is so that a get() leaving a page in partially
- * validated state (where the caller would drop the reference acquired
- * due to the getting of the type [apparently] failing [-ERESTART])
- * would not accidentally result in a page left with zero general
-@@ -153,10 +153,18 @@ struct page_info
- * put_page_from_lNe() (due to the apparent failure), and hence it
- * must be dropped when the put operation is resumed (and completes),
- * but it must not be acquired if picking up the page for validation.
-+ *
-+ * The 3rd field, @linear_pt_count, indicates
-+ * - by a positive value, how many same-level page table entries a page
-+ * table has,
-+ * - by a negative value, in how many same-level page tables a page is
-+ * in use.
- */
- struct {
-- u16 nr_validated_ptes;
-- s8 partial_pte;
-+ u16 nr_validated_ptes:PAGETABLE_ORDER + 1;
-+ u16 :16 - PAGETABLE_ORDER - 1 - 2;
-+ s16 partial_pte:2;
-+ s16 linear_pt_count;
- };
-
- /*
-@@ -207,6 +215,9 @@ struct page_info
- #define PGT_count_width PG_shift(9)
- #define PGT_count_mask ((1UL<<PGT_count_width)-1)
-
-+/* Are the 'type mask' bits identical? */
-+#define PGT_type_equal(x, y) (!(((x) ^ (y)) & PGT_type_mask))
-+
- /* Cleared when the owning guest 'frees' this page. */
- #define _PGC_allocated PG_shift(1)
- #define PGC_allocated PG_mask(1, 1)
---
-2.14.1
-
diff --git a/main/xen/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch b/main/xen/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
deleted file mode 100644
index e72d7511b3..0000000000
--- a/main/xen/0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
+++ /dev/null
@@ -1,109 +0,0 @@
-From 01feeda5363dd8d2fea8395c2c435203751c8ba5 Mon Sep 17 00:00:00 2001
-From: George Dunlap <george.dunlap@citrix.com>
-Date: Fri, 10 Nov 2017 16:53:55 +0000
-Subject: [PATCH 2/2] p2m: Check return value of p2m_set_entry() when
- decreasing reservation
-
-If the entire range specified to p2m_pod_decrease_reservation() is marked
-populate-on-demand, then it will make a single p2m_set_entry() call,
-reducing its PoD entry count.
-
-Unfortunately, in the right circumstances, this p2m_set_entry() call
-may fail. It that case, repeated calls to decrease_reservation() may
-cause p2m->pod.entry_count to fall below zero, potentially tripping
-over BUG_ON()s to the contrary.
-
-Instead, check to see if the entry succeeded, and return false if not.
-The caller will then call guest_remove_page() on the gfns, which will
-return -EINVAL upon finding no valid memory there to return.
-
-Unfortunately if the order > 0, the entry may have partially changed.
-A domain_crash() is probably the safest thing in that case.
-
-Other p2m_set_entry() calls in the same function should be fine,
-because they are writing the entry at its current order. Nonetheless,
-check the return value and crash if our assumption turns otu to be
-wrong.
-
-This is part of XSA-247.
-
-Reported-by: George Dunlap <george.dunlap.com>
-Signed-off-by: George Dunlap <george.dunlap@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
----
-v2: Crash the domain if we're not sure it's safe (or if we think it
-can't happen)
----
- xen/arch/x86/mm/p2m-pod.c | 42 +++++++++++++++++++++++++++++++++---------
- 1 file changed, 33 insertions(+), 9 deletions(-)
-
-diff --git a/xen/arch/x86/mm/p2m-pod.c b/xen/arch/x86/mm/p2m-pod.c
-index 5ec8a37949..91d309647e 100644
---- a/xen/arch/x86/mm/p2m-pod.c
-+++ b/xen/arch/x86/mm/p2m-pod.c
-@@ -557,11 +557,23 @@ p2m_pod_decrease_reservation(struct domain *d,
-
- if ( !nonpod )
- {
-- /* All PoD: Mark the whole region invalid and tell caller
-- * we're done. */
-- p2m_set_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid,
-- p2m->default_access);
-- p2m->pod.entry_count-=(1<<order);
-+ /*
-+ * All PoD: Mark the whole region invalid and tell caller
-+ * we're done.
-+ */
-+ if ( p2m_set_entry(p2m, gpfn, _mfn(INVALID_MFN), order, p2m_invalid,
-+ p2m->default_access) )
-+ {
-+ /*
-+ * If this fails, we can't tell how much of the range was changed.
-+ * Best to crash the domain unless we're sure a partial change is
-+ * impossible.
-+ */
-+ if ( order != 0 )
-+ domain_crash(d);
-+ goto out_unlock;
-+ }
-+ p2m->pod.entry_count -= 1UL << order;
- BUG_ON(p2m->pod.entry_count < 0);
- ret = 1;
- goto out_entry_check;
-@@ -602,8 +614,14 @@ p2m_pod_decrease_reservation(struct domain *d,
- n = 1UL << cur_order;
- if ( t == p2m_populate_on_demand )
- {
-- p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
-- p2m_invalid, p2m->default_access);
-+ /* This shouldn't be able to fail */
-+ if ( p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
-+ p2m_invalid, p2m->default_access) )
-+ {
-+ ASSERT_UNREACHABLE();
-+ domain_crash(d);
-+ goto out_unlock;
-+ }
- p2m->pod.entry_count -= n;
- BUG_ON(p2m->pod.entry_count < 0);
- pod -= n;
-@@ -624,8 +642,14 @@ p2m_pod_decrease_reservation(struct domain *d,
-
- page = mfn_to_page(mfn);
-
-- p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
-- p2m_invalid, p2m->default_access);
-+ /* This shouldn't be able to fail */
-+ if ( p2m_set_entry(p2m, gpfn + i, _mfn(INVALID_MFN), cur_order,
-+ p2m_invalid, p2m->default_access) )
-+ {
-+ ASSERT_UNREACHABLE();
-+ domain_crash(d);
-+ goto out_unlock;
-+ }
- p2m_tlb_flush_sync(p2m);
- for ( j = 0; j < n; ++j )
- set_gpfn_from_mfn(mfn_x(mfn), INVALID_M2P_ENTRY);
---
-2.15.0
-
diff --git a/main/xen/0002-x86-enforce-proper-privilege-when-mapping-pIRQ-s.patch b/main/xen/0002-x86-enforce-proper-privilege-when-mapping-pIRQ-s.patch
deleted file mode 100644
index fa7b95038c..0000000000
--- a/main/xen/0002-x86-enforce-proper-privilege-when-mapping-pIRQ-s.patch
+++ /dev/null
@@ -1,66 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86: enforce proper privilege when (un)mapping pIRQ-s
-
-(Un)mapping of IRQs, just like other RESOURCE__ADD* / RESOURCE__REMOVE*
-actions (in FLASK terms) should be XSM_DM_PRIV rather than XSM_TARGET.
-This in turn requires bypassing the XSM check in physdev_unmap_pirq()
-for the HVM emuirq case just like is being done in physdev_map_pirq().
-The primary goal security wise, however, is to no longer allow HVM
-guests, by specifying their own domain ID instead of DOMID_SELF, to
-enter code paths intended for PV guest and the control domains of HVM
-guests only.
-
-This is part of XSA-237.
-
-Reported-by: HW42 <hw42@ipsumj.de>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: George Dunlap <george.dunlap@citrix.com>
-
---- a/xen/arch/x86/physdev.c
-+++ b/xen/arch/x86/physdev.c
-@@ -110,7 +110,7 @@ int physdev_map_pirq(domid_t domid, int
- if ( d == NULL )
- return -ESRCH;
-
-- ret = xsm_map_domain_pirq(XSM_TARGET, d);
-+ ret = xsm_map_domain_pirq(XSM_DM_PRIV, d);
- if ( ret )
- goto free_domain;
-
-@@ -255,13 +255,14 @@ int physdev_map_pirq(domid_t domid, int
- int physdev_unmap_pirq(domid_t domid, int pirq)
- {
- struct domain *d;
-- int ret;
-+ int ret = 0;
-
- d = rcu_lock_domain_by_any_id(domid);
- if ( d == NULL )
- return -ESRCH;
-
-- ret = xsm_unmap_domain_pirq(XSM_TARGET, d);
-+ if ( domid != DOMID_SELF || !is_hvm_domain(d) )
-+ ret = xsm_unmap_domain_pirq(XSM_DM_PRIV, d);
- if ( ret )
- goto free_domain;
-
---- a/xen/include/xsm/dummy.h
-+++ b/xen/include/xsm/dummy.h
-@@ -453,7 +453,7 @@ static XSM_INLINE char *xsm_show_irq_sid
-
- static XSM_INLINE int xsm_map_domain_pirq(XSM_DEFAULT_ARG struct domain *d)
- {
-- XSM_ASSERT_ACTION(XSM_TARGET);
-+ XSM_ASSERT_ACTION(XSM_DM_PRIV);
- return xsm_default_action(action, current->domain, d);
- }
-
-@@ -465,7 +465,7 @@ static XSM_INLINE int xsm_map_domain_irq
-
- static XSM_INLINE int xsm_unmap_domain_pirq(XSM_DEFAULT_ARG struct domain *d)
- {
-- XSM_ASSERT_ACTION(XSM_TARGET);
-+ XSM_ASSERT_ACTION(XSM_DM_PRIV);
- return xsm_default_action(action, current->domain, d);
- }
-
diff --git a/main/xen/0002-x86-mm-Always-set-_PAGE_ACCESSED-on-L4e-updates.patch b/main/xen/0002-x86-mm-Always-set-_PAGE_ACCESSED-on-L4e-updates.patch
deleted file mode 100644
index 0b0d00c273..0000000000
--- a/main/xen/0002-x86-mm-Always-set-_PAGE_ACCESSED-on-L4e-updates.patch
+++ /dev/null
@@ -1,47 +0,0 @@
-From 828e290b7dfb90c266ccf53d75ac2b68dc206647 Mon Sep 17 00:00:00 2001
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Date: Wed, 17 Jan 2018 17:23:37 +0100
-Subject: [PATCH 2/4] x86/mm: Always set _PAGE_ACCESSED on L4e updates
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
-master commit: bd61fe94bee0556bc2f64999a4a8315b93f90f21
-master date: 2018-01-15 13:53:16 +0000
-
-(cherry picked from commit 9b76908e6e074d7efbeafe6bad066ecc5f3c3c43)
----
- xen/arch/x86/mm.c | 14 +++++++++++++-
- 1 file changed, 13 insertions(+), 1 deletion(-)
-
-diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
-index 0426b6e00d..8b611022db 100644
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -1187,11 +1187,23 @@ get_page_from_l4e(
- _PAGE_USER|_PAGE_RW); \
- } while ( 0 )
-
-+/*
-+ * When shadowing an L4 behind the guests back (e.g. for per-pcpu
-+ * purposes), we cannot efficiently sync access bit updates from hardware
-+ * (on the shadow tables) back into the guest view.
-+ *
-+ * We therefore unconditionally set _PAGE_ACCESSED even in the guests
-+ * view. This will appear to the guest as a CPU which proactively pulls
-+ * all valid L4e's into its TLB, which is compatible with the x86 ABI.
-+ *
-+ * At the time of writing, all PV guests set the access bit anyway, so
-+ * this is no actual change in their behaviour.
-+ */
- #define adjust_guest_l4e(pl4e, d) \
- do { \
- if ( likely(l4e_get_flags((pl4e)) & _PAGE_PRESENT) && \
- likely(!is_pv_32bit_domain(d)) ) \
-- l4e_add_flags((pl4e), _PAGE_USER); \
-+ l4e_add_flags((pl4e), _PAGE_USER | _PAGE_ACCESSED); \
- } while ( 0 )
-
- #define unadjust_guest_l3e(pl3e, d) \
---
-2.11.3
-
diff --git a/main/xen/0002-x86-mm-Disable-PV-linear-pagetables-by-default.patch b/main/xen/0002-x86-mm-Disable-PV-linear-pagetables-by-default.patch
deleted file mode 100644
index a5c06af6dd..0000000000
--- a/main/xen/0002-x86-mm-Disable-PV-linear-pagetables-by-default.patch
+++ /dev/null
@@ -1,82 +0,0 @@
-From 9a4b34729f1bb92eea1e1efe52e6face9f0b17ae Mon Sep 17 00:00:00 2001
-From: George Dunlap <george.dunlap@citrix.com>
-Date: Fri, 22 Sep 2017 11:46:55 +0100
-Subject: [PATCH 2/2] x86/mm: Disable PV linear pagetables by default
-
-Allowing pagetables to point to other pagetables of the same level
-(often called 'linear pagetables') has been included in Xen since its
-inception. But it is not used by the most common PV guests (Linux,
-NetBSD, minios), and has been the source of a number of subtle
-reference-counting bugs.
-
-Add a command-line option to control whether PV linear pagetables are
-allowed (disabled by default).
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: George Dunlap <george.dunlap@citrix.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
----
-Changes since v2:
-- s/_/-/; in command-line option
-- Added __read_mostly
----
- docs/misc/xen-command-line.markdown | 15 +++++++++++++++
- xen/arch/x86/mm.c | 9 +++++++++
- 2 files changed, 24 insertions(+)
-
-diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown
-index 73f5265fc6..061aff5edc 100644
---- a/docs/misc/xen-command-line.markdown
-+++ b/docs/misc/xen-command-line.markdown
-@@ -1280,6 +1280,21 @@ The following resources are available:
- CDP, one COS will corespond two CBMs other than one with CAT, due to the
- sum of CBMs is fixed, that means actual `cos_max` in use will automatically
- reduce to half when CDP is enabled.
-+
-+### pv-linear-pt
-+> `= <boolean>`
-+
-+> Default: `false`
-+
-+Allow PV guests to have pagetable entries pointing to other pagetables
-+of the same level (i.e., allowing L2 PTEs to point to other L2 pages).
-+This technique is often called "linear pagetables", and is sometimes
-+used to allow operating systems a simple way to consistently map the
-+current process's pagetables into its own virtual address space.
-+
-+None of the most common PV operating systems (Linux, NetBSD, MiniOS)
-+use this technique, but there may be custom operating systems which
-+do.
-
- ### reboot
- > `= t[riple] | k[bd] | a[cpi] | p[ci] | P[ower] | e[fi] | n[o] [, [w]arm | [c]old]`
-diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
-index e81a461b91..f748d4a221 100644
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -799,6 +799,9 @@ static void dec_linear_uses(struct page_info *pg)
- * frame if it is mapped by a different root table. This is sufficient and
- * also necessary to allow validation of a root table mapping itself.
- */
-+static bool_t __read_mostly pv_linear_pt_enable = 0;
-+boolean_param("pv-linear-pt", pv_linear_pt_enable);
-+
- #define define_get_linear_pagetable(level) \
- static int \
- get_##level##_linear_pagetable( \
-@@ -808,6 +811,12 @@ get_##level##_linear_pagetable( \
- struct page_info *page; \
- unsigned long pfn; \
- \
-+ if ( !pv_linear_pt_enable ) \
-+ { \
-+ MEM_LOG("Attempt to create linear p.t. (feature disabled)"); \
-+ return 0; \
-+ } \
-+ \
- if ( (level##e_get_flags(pde) & _PAGE_RW) ) \
- { \
- MEM_LOG("Attempt to create linear p.t. with write perms"); \
---
-2.14.1
-
diff --git a/main/xen/0003-x86-MSI-disallow-redundant-enabling.patch b/main/xen/0003-x86-MSI-disallow-redundant-enabling.patch
deleted file mode 100644
index 5c69c48265..0000000000
--- a/main/xen/0003-x86-MSI-disallow-redundant-enabling.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/MSI: disallow redundant enabling
-
-At the moment, Xen attempts to allow redundant enabling of MSI by
-having pci_enable_msi() return 0, and point to the existing MSI
-descriptor, when the msi already exists.
-
-Unfortunately, if subsequent errors are encountered, the cleanup
-paths assume pci_enable_msi() had done full initialization, and
-hence undo everything that was assumed to be done by that
-function without also undoing other setup that would normally
-occur only after that function was called (in map_domain_pirq()
-itself).
-
-Rather than try to make the redundant enabling case work properly, just
-forbid it entirely by having pci_enable_msi() return -EEXIST when MSI
-is already set up.
-
-This is part of XSA-237.
-
-Reported-by: HW42 <hw42@ipsumj.de>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: George Dunlap <george.dunlap@citrix.com>
-
---- a/xen/arch/x86/msi.c
-+++ b/xen/arch/x86/msi.c
-@@ -1050,11 +1050,10 @@ static int __pci_enable_msi(struct msi_i
- old_desc = find_msi_entry(pdev, msi->irq, PCI_CAP_ID_MSI);
- if ( old_desc )
- {
-- printk(XENLOG_WARNING "irq %d already mapped to MSI on %04x:%02x:%02x.%u\n",
-+ printk(XENLOG_ERR "irq %d already mapped to MSI on %04x:%02x:%02x.%u\n",
- msi->irq, msi->seg, msi->bus,
- PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
-- *desc = old_desc;
-- return 0;
-+ return -EEXIST;
- }
-
- old_desc = find_msi_entry(pdev, -1, PCI_CAP_ID_MSIX);
-@@ -1118,11 +1117,10 @@ static int __pci_enable_msix(struct msi_
- old_desc = find_msi_entry(pdev, msi->irq, PCI_CAP_ID_MSIX);
- if ( old_desc )
- {
-- printk(XENLOG_WARNING "irq %d already mapped to MSI-X on %04x:%02x:%02x.%u\n",
-+ printk(XENLOG_ERR "irq %d already mapped to MSI-X on %04x:%02x:%02x.%u\n",
- msi->irq, msi->seg, msi->bus,
- PCI_SLOT(msi->devfn), PCI_FUNC(msi->devfn));
-- *desc = old_desc;
-- return 0;
-+ return -EEXIST;
- }
-
- old_desc = find_msi_entry(pdev, -1, PCI_CAP_ID_MSI);
diff --git a/main/xen/0003-x86-Meltdown-band-aid-against-malicious-64-bit-PV-gu.patch b/main/xen/0003-x86-Meltdown-band-aid-against-malicious-64-bit-PV-gu.patch
deleted file mode 100644
index 12af08f144..0000000000
--- a/main/xen/0003-x86-Meltdown-band-aid-against-malicious-64-bit-PV-gu.patch
+++ /dev/null
@@ -1,760 +0,0 @@
-From 50c9c86f4b02bbebbacfa812dd25f8e9176633b2 Mon Sep 17 00:00:00 2001
-From: Jan Beulich <jbeulich@suse.com>
-Date: Wed, 17 Jan 2018 17:24:12 +0100
-Subject: [PATCH 3/4] x86: Meltdown band-aid against malicious 64-bit PV guests
-
-This is a very simplistic change limiting the amount of memory a running
-64-bit PV guest has mapped (and hence available for attacking): Only the
-mappings of stack, IDT, and TSS are being cloned from the direct map
-into per-CPU page tables. Guest controlled parts of the page tables are
-being copied into those per-CPU page tables upon entry into the guest.
-Cross-vCPU synchronization of top level page table entry changes is
-being effected by forcing other active vCPU-s of the guest into the
-hypervisor.
-
-The change to context_switch() isn't strictly necessary, but there's no
-reason to keep switching page tables once a PV guest is being scheduled
-out.
-
-This isn't providing full isolation yet, but it should be covering all
-pieces of information exposure of which would otherwise require an XSA.
-
-There is certainly much room for improvement, especially of performance,
-here - first and foremost suppressing all the negative effects on AMD
-systems. But in the interest of backportability (including to really old
-hypervisors, which may not even have alternative patching) any such is
-being left out here.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-master commit: 5784de3e2067ed73efc2fe42e62831e8ae7f46c4
-master date: 2018-01-16 17:49:03 +0100
-
-(cherry picked from commit e19517a3355acaaa2ff83018bc41e7fd044161e5)
----
- xen/arch/x86/domain.c | 5 +
- xen/arch/x86/mm.c | 19 +++-
- xen/arch/x86/smpboot.c | 198 +++++++++++++++++++++++++++++++++++++
- xen/arch/x86/x86_64/asm-offsets.c | 2 +
- xen/arch/x86/x86_64/compat/entry.S | 11 +++
- xen/arch/x86/x86_64/entry.S | 149 +++++++++++++++++++++++++++-
- xen/include/asm-x86/asm_defns.h | 30 ++++++
- xen/include/asm-x86/current.h | 12 +++
- xen/include/asm-x86/processor.h | 1 +
- xen/include/asm-x86/x86_64/page.h | 5 +-
- 10 files changed, 425 insertions(+), 7 deletions(-)
-
-diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
-index c9328f804e..512b77a5d0 100644
---- a/xen/arch/x86/domain.c
-+++ b/xen/arch/x86/domain.c
-@@ -1930,6 +1930,9 @@ static void paravirt_ctxt_switch_to(struct vcpu *v)
-
- switch_kernel_stack(v);
-
-+ this_cpu(root_pgt)[root_table_offset(PERDOMAIN_VIRT_START)] =
-+ l4e_from_page(v->domain->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
-+
- cr4 = pv_guest_cr4_to_real_cr4(v);
- if ( unlikely(cr4 != read_cr4()) )
- write_cr4(cr4);
-@@ -2077,6 +2080,8 @@ void context_switch(struct vcpu *prev, struct vcpu *next)
-
- ASSERT(local_irq_is_enabled());
-
-+ get_cpu_info()->xen_cr3 = 0;
-+
- cpumask_copy(&dirty_mask, next->vcpu_dirty_cpumask);
- /* Allow at most one CPU at a time to be dirty. */
- ASSERT(cpumask_weight(&dirty_mask) <= 1);
-diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
-index 8b611022db..22cd8550fc 100644
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -3659,6 +3659,7 @@ long do_mmu_update(
- struct vcpu *curr = current, *v = curr;
- struct domain *d = v->domain, *pt_owner = d, *pg_owner;
- struct domain_mmap_cache mapcache;
-+ bool_t sync_guest = 0;
- uint32_t xsm_needed = 0;
- uint32_t xsm_checked = 0;
- int rc = put_old_guest_table(curr);
-@@ -3844,7 +3845,9 @@ long do_mmu_update(
- case PGT_l4_page_table:
- rc = mod_l4_entry(va, l4e_from_intpte(req.val), mfn,
- cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
-- break;
-+ if ( !rc )
-+ sync_guest = 1;
-+ break;
- case PGT_writable_page:
- perfc_incr(writable_mmu_updates);
- if ( paging_write_guest_entry(v, va, req.val, _mfn(mfn)) )
-@@ -3946,6 +3949,20 @@ long do_mmu_update(
-
- domain_mmap_cache_destroy(&mapcache);
-
-+ if ( sync_guest )
-+ {
-+ /*
-+ * Force other vCPU-s of the affected guest to pick up L4 entry
-+ * changes (if any). Issue a flush IPI with empty operation mask to
-+ * facilitate this (including ourselves waiting for the IPI to
-+ * actually have arrived). Utilize the fact that FLUSH_VA_VALID is
-+ * meaningless without FLUSH_CACHE, but will allow to pass the no-op
-+ * check in flush_area_mask().
-+ */
-+ flush_area_mask(pt_owner->domain_dirty_cpumask,
-+ ZERO_BLOCK_PTR, FLUSH_VA_VALID);
-+ }
-+
- perfc_add(num_page_updates, i);
-
- out:
-diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
-index 1c02681fb6..dc212710cd 100644
---- a/xen/arch/x86/smpboot.c
-+++ b/xen/arch/x86/smpboot.c
-@@ -319,6 +319,9 @@ void start_secondary(void *unused)
- */
- spin_debug_disable();
-
-+ get_cpu_info()->xen_cr3 = 0;
-+ get_cpu_info()->pv_cr3 = __pa(this_cpu(root_pgt));
-+
- load_system_tables();
-
- /* Full exception support from here on in. */
-@@ -628,6 +631,187 @@ void cpu_exit_clear(unsigned int cpu)
- set_cpu_state(CPU_STATE_DEAD);
- }
-
-+static int clone_mapping(const void *ptr, root_pgentry_t *rpt)
-+{
-+ unsigned long linear = (unsigned long)ptr, pfn;
-+ unsigned int flags;
-+ l3_pgentry_t *pl3e = l4e_to_l3e(idle_pg_table[root_table_offset(linear)]) +
-+ l3_table_offset(linear);
-+ l2_pgentry_t *pl2e;
-+ l1_pgentry_t *pl1e;
-+
-+ if ( linear < DIRECTMAP_VIRT_START )
-+ return 0;
-+
-+ flags = l3e_get_flags(*pl3e);
-+ ASSERT(flags & _PAGE_PRESENT);
-+ if ( flags & _PAGE_PSE )
-+ {
-+ pfn = (l3e_get_pfn(*pl3e) & ~((1UL << (2 * PAGETABLE_ORDER)) - 1)) |
-+ (PFN_DOWN(linear) & ((1UL << (2 * PAGETABLE_ORDER)) - 1));
-+ flags &= ~_PAGE_PSE;
-+ }
-+ else
-+ {
-+ pl2e = l3e_to_l2e(*pl3e) + l2_table_offset(linear);
-+ flags = l2e_get_flags(*pl2e);
-+ ASSERT(flags & _PAGE_PRESENT);
-+ if ( flags & _PAGE_PSE )
-+ {
-+ pfn = (l2e_get_pfn(*pl2e) & ~((1UL << PAGETABLE_ORDER) - 1)) |
-+ (PFN_DOWN(linear) & ((1UL << PAGETABLE_ORDER) - 1));
-+ flags &= ~_PAGE_PSE;
-+ }
-+ else
-+ {
-+ pl1e = l2e_to_l1e(*pl2e) + l1_table_offset(linear);
-+ flags = l1e_get_flags(*pl1e);
-+ if ( !(flags & _PAGE_PRESENT) )
-+ return 0;
-+ pfn = l1e_get_pfn(*pl1e);
-+ }
-+ }
-+
-+ if ( !(root_get_flags(rpt[root_table_offset(linear)]) & _PAGE_PRESENT) )
-+ {
-+ pl3e = alloc_xen_pagetable();
-+ if ( !pl3e )
-+ return -ENOMEM;
-+ clear_page(pl3e);
-+ l4e_write(&rpt[root_table_offset(linear)],
-+ l4e_from_paddr(__pa(pl3e), __PAGE_HYPERVISOR));
-+ }
-+ else
-+ pl3e = l4e_to_l3e(rpt[root_table_offset(linear)]);
-+
-+ pl3e += l3_table_offset(linear);
-+
-+ if ( !(l3e_get_flags(*pl3e) & _PAGE_PRESENT) )
-+ {
-+ pl2e = alloc_xen_pagetable();
-+ if ( !pl2e )
-+ return -ENOMEM;
-+ clear_page(pl2e);
-+ l3e_write(pl3e, l3e_from_paddr(__pa(pl2e), __PAGE_HYPERVISOR));
-+ }
-+ else
-+ {
-+ ASSERT(!(l3e_get_flags(*pl3e) & _PAGE_PSE));
-+ pl2e = l3e_to_l2e(*pl3e);
-+ }
-+
-+ pl2e += l2_table_offset(linear);
-+
-+ if ( !(l2e_get_flags(*pl2e) & _PAGE_PRESENT) )
-+ {
-+ pl1e = alloc_xen_pagetable();
-+ if ( !pl1e )
-+ return -ENOMEM;
-+ clear_page(pl1e);
-+ l2e_write(pl2e, l2e_from_paddr(__pa(pl1e), __PAGE_HYPERVISOR));
-+ }
-+ else
-+ {
-+ ASSERT(!(l2e_get_flags(*pl2e) & _PAGE_PSE));
-+ pl1e = l2e_to_l1e(*pl2e);
-+ }
-+
-+ pl1e += l1_table_offset(linear);
-+
-+ if ( l1e_get_flags(*pl1e) & _PAGE_PRESENT )
-+ {
-+ ASSERT(l1e_get_pfn(*pl1e) == pfn);
-+ ASSERT(l1e_get_flags(*pl1e) == flags);
-+ }
-+ else
-+ l1e_write(pl1e, l1e_from_pfn(pfn, flags));
-+
-+ return 0;
-+}
-+
-+DEFINE_PER_CPU(root_pgentry_t *, root_pgt);
-+
-+static int setup_cpu_root_pgt(unsigned int cpu)
-+{
-+ root_pgentry_t *rpt = alloc_xen_pagetable();
-+ unsigned int off;
-+ int rc;
-+
-+ if ( !rpt )
-+ return -ENOMEM;
-+
-+ clear_page(rpt);
-+ per_cpu(root_pgt, cpu) = rpt;
-+
-+ rpt[root_table_offset(RO_MPT_VIRT_START)] =
-+ idle_pg_table[root_table_offset(RO_MPT_VIRT_START)];
-+ /* SH_LINEAR_PT inserted together with guest mappings. */
-+ /* PERDOMAIN inserted during context switch. */
-+ rpt[root_table_offset(XEN_VIRT_START)] =
-+ idle_pg_table[root_table_offset(XEN_VIRT_START)];
-+
-+ /* Install direct map page table entries for stack, IDT, and TSS. */
-+ for ( off = rc = 0; !rc && off < STACK_SIZE; off += PAGE_SIZE )
-+ rc = clone_mapping(__va(__pa(stack_base[cpu])) + off, rpt);
-+
-+ if ( !rc )
-+ rc = clone_mapping(idt_tables[cpu], rpt);
-+ if ( !rc )
-+ rc = clone_mapping(&per_cpu(init_tss, cpu), rpt);
-+
-+ return rc;
-+}
-+
-+static void cleanup_cpu_root_pgt(unsigned int cpu)
-+{
-+ root_pgentry_t *rpt = per_cpu(root_pgt, cpu);
-+ unsigned int r;
-+
-+ if ( !rpt )
-+ return;
-+
-+ per_cpu(root_pgt, cpu) = NULL;
-+
-+ for ( r = root_table_offset(DIRECTMAP_VIRT_START);
-+ r < root_table_offset(HYPERVISOR_VIRT_END); ++r )
-+ {
-+ l3_pgentry_t *l3t;
-+ unsigned int i3;
-+
-+ if ( !(root_get_flags(rpt[r]) & _PAGE_PRESENT) )
-+ continue;
-+
-+ l3t = l4e_to_l3e(rpt[r]);
-+
-+ for ( i3 = 0; i3 < L3_PAGETABLE_ENTRIES; ++i3 )
-+ {
-+ l2_pgentry_t *l2t;
-+ unsigned int i2;
-+
-+ if ( !(l3e_get_flags(l3t[i3]) & _PAGE_PRESENT) )
-+ continue;
-+
-+ ASSERT(!(l3e_get_flags(l3t[i3]) & _PAGE_PSE));
-+ l2t = l3e_to_l2e(l3t[i3]);
-+
-+ for ( i2 = 0; i2 < L2_PAGETABLE_ENTRIES; ++i2 )
-+ {
-+ if ( !(l2e_get_flags(l2t[i2]) & _PAGE_PRESENT) )
-+ continue;
-+
-+ ASSERT(!(l2e_get_flags(l2t[i2]) & _PAGE_PSE));
-+ free_xen_pagetable(l2e_to_l1e(l2t[i2]));
-+ }
-+
-+ free_xen_pagetable(l2t);
-+ }
-+
-+ free_xen_pagetable(l3t);
-+ }
-+
-+ free_xen_pagetable(rpt);
-+}
-+
- static void cpu_smpboot_free(unsigned int cpu)
- {
- unsigned int order, socket = cpu_to_socket(cpu);
-@@ -664,6 +848,8 @@ static void cpu_smpboot_free(unsigned int cpu)
- free_domheap_page(mfn_to_page(mfn));
- }
-
-+ cleanup_cpu_root_pgt(cpu);
-+
- order = get_order_from_pages(NR_RESERVED_GDT_PAGES);
- free_xenheap_pages(per_cpu(gdt_table, cpu), order);
-
-@@ -716,6 +902,9 @@ static int cpu_smpboot_alloc(unsigned int cpu)
- goto oom;
- memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES * sizeof(idt_entry_t));
-
-+ if ( setup_cpu_root_pgt(cpu) )
-+ goto oom;
-+
- for ( stub_page = 0, i = cpu & ~(STUBS_PER_PAGE - 1);
- i < nr_cpu_ids && i <= (cpu | (STUBS_PER_PAGE - 1)); ++i )
- if ( cpu_online(i) && cpu_to_node(i) == node )
-@@ -770,6 +959,8 @@ static struct notifier_block cpu_smpboot_nfb = {
-
- void __init smp_prepare_cpus(unsigned int max_cpus)
- {
-+ int rc;
-+
- register_cpu_notifier(&cpu_smpboot_nfb);
-
- mtrr_aps_sync_begin();
-@@ -783,6 +974,11 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
-
- stack_base[0] = stack_start;
-
-+ rc = setup_cpu_root_pgt(0);
-+ if ( rc )
-+ panic("Error %d setting up PV root page table\n", rc);
-+ get_cpu_info()->pv_cr3 = __pa(per_cpu(root_pgt, 0));
-+
- set_nr_sockets();
-
- socket_cpumask = xzalloc_array(cpumask_t *, nr_sockets);
-@@ -847,6 +1043,8 @@ void __init smp_prepare_boot_cpu(void)
- {
- cpumask_set_cpu(smp_processor_id(), &cpu_online_map);
- cpumask_set_cpu(smp_processor_id(), &cpu_present_map);
-+
-+ get_cpu_info()->xen_cr3 = 0;
- }
-
- static void
-diff --git a/xen/arch/x86/x86_64/asm-offsets.c b/xen/arch/x86/x86_64/asm-offsets.c
-index a3ae7a475f..4f2ba28520 100644
---- a/xen/arch/x86/x86_64/asm-offsets.c
-+++ b/xen/arch/x86/x86_64/asm-offsets.c
-@@ -137,6 +137,8 @@ void __dummy__(void)
- OFFSET(CPUINFO_processor_id, struct cpu_info, processor_id);
- OFFSET(CPUINFO_current_vcpu, struct cpu_info, current_vcpu);
- OFFSET(CPUINFO_cr4, struct cpu_info, cr4);
-+ OFFSET(CPUINFO_xen_cr3, struct cpu_info, xen_cr3);
-+ OFFSET(CPUINFO_pv_cr3, struct cpu_info, pv_cr3);
- DEFINE(CPUINFO_sizeof, sizeof(struct cpu_info));
- BLANK();
-
-diff --git a/xen/arch/x86/x86_64/compat/entry.S b/xen/arch/x86/x86_64/compat/entry.S
-index 7ee01597a3..f7e53fb3cb 100644
---- a/xen/arch/x86/x86_64/compat/entry.S
-+++ b/xen/arch/x86/x86_64/compat/entry.S
-@@ -270,6 +270,17 @@ ENTRY(cstar_enter)
- pushq $0
- movl $TRAP_syscall, 4(%rsp)
- SAVE_ALL
-+
-+ GET_STACK_END(bx)
-+ mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
-+ neg %rcx
-+ jz .Lcstar_cr3_okay
-+ mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
-+ neg %rcx
-+ write_cr3 rcx, rdi, rsi
-+ movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
-+.Lcstar_cr3_okay:
-+
- GET_CURRENT(bx)
- movq VCPU_domain(%rbx),%rcx
- cmpb $0,DOMAIN_is_32bit_pv(%rcx)
-diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
-index cebb1e4f4f..d63e734bb3 100644
---- a/xen/arch/x86/x86_64/entry.S
-+++ b/xen/arch/x86/x86_64/entry.S
-@@ -36,6 +36,32 @@ ENTRY(switch_to_kernel)
- /* %rbx: struct vcpu, interrupts disabled */
- restore_all_guest:
- ASSERT_INTERRUPTS_DISABLED
-+
-+ /* Copy guest mappings and switch to per-CPU root page table. */
-+ mov %cr3, %r9
-+ GET_STACK_END(dx)
-+ mov STACK_CPUINFO_FIELD(pv_cr3)(%rdx), %rdi
-+ movabs $PADDR_MASK & PAGE_MASK, %rsi
-+ movabs $DIRECTMAP_VIRT_START, %rcx
-+ mov %rdi, %rax
-+ and %rsi, %rdi
-+ and %r9, %rsi
-+ add %rcx, %rdi
-+ add %rcx, %rsi
-+ mov $ROOT_PAGETABLE_FIRST_XEN_SLOT, %ecx
-+ mov root_table_offset(SH_LINEAR_PT_VIRT_START)*8(%rsi), %r8
-+ mov %r8, root_table_offset(SH_LINEAR_PT_VIRT_START)*8(%rdi)
-+ rep movsq
-+ mov $ROOT_PAGETABLE_ENTRIES - \
-+ ROOT_PAGETABLE_LAST_XEN_SLOT - 1, %ecx
-+ sub $(ROOT_PAGETABLE_FIRST_XEN_SLOT - \
-+ ROOT_PAGETABLE_LAST_XEN_SLOT - 1) * 8, %rsi
-+ sub $(ROOT_PAGETABLE_FIRST_XEN_SLOT - \
-+ ROOT_PAGETABLE_LAST_XEN_SLOT - 1) * 8, %rdi
-+ rep movsq
-+ mov %r9, STACK_CPUINFO_FIELD(xen_cr3)(%rdx)
-+ write_cr3 rax, rdi, rsi
-+
- RESTORE_ALL
- testw $TRAP_syscall,4(%rsp)
- jz iret_exit_to_guest
-@@ -70,6 +96,22 @@ iret_exit_to_guest:
- ALIGN
- /* No special register assumptions. */
- restore_all_xen:
-+ /*
-+ * Check whether we need to switch to the per-CPU page tables, in
-+ * case we return to late PV exit code (from an NMI or #MC).
-+ */
-+ GET_STACK_END(ax)
-+ mov STACK_CPUINFO_FIELD(xen_cr3)(%rax), %rdx
-+ mov STACK_CPUINFO_FIELD(pv_cr3)(%rax), %rax
-+ test %rdx, %rdx
-+ /*
-+ * Ideally the condition would be "nsz", but such doesn't exist,
-+ * so "g" will have to do.
-+ */
-+UNLIKELY_START(g, exit_cr3)
-+ write_cr3 rax, rdi, rsi
-+UNLIKELY_END(exit_cr3)
-+
- RESTORE_ALL adj=8
- iretq
-
-@@ -99,7 +141,18 @@ ENTRY(lstar_enter)
- pushq $0
- movl $TRAP_syscall, 4(%rsp)
- SAVE_ALL
-- GET_CURRENT(bx)
-+
-+ GET_STACK_END(bx)
-+ mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
-+ neg %rcx
-+ jz .Llstar_cr3_okay
-+ mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
-+ neg %rcx
-+ write_cr3 rcx, r11, r12
-+ movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
-+.Llstar_cr3_okay:
-+
-+ __GET_CURRENT(bx)
- testb $TF_kernel_mode,VCPU_thread_flags(%rbx)
- jz switch_to_kernel
-
-@@ -248,7 +301,18 @@ GLOBAL(sysenter_eflags_saved)
- pushq $0
- movl $TRAP_syscall, 4(%rsp)
- SAVE_ALL
-- GET_CURRENT(bx)
-+
-+ GET_STACK_END(bx)
-+ mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
-+ neg %rcx
-+ jz .Lsyse_cr3_okay
-+ mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
-+ neg %rcx
-+ write_cr3 rcx, rdi, rsi
-+ movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
-+.Lsyse_cr3_okay:
-+
-+ __GET_CURRENT(bx)
- cmpb $0,VCPU_sysenter_disables_events(%rbx)
- movq VCPU_sysenter_addr(%rbx),%rax
- setne %cl
-@@ -284,13 +348,23 @@ ENTRY(int80_direct_trap)
- movl $0x80, 4(%rsp)
- SAVE_ALL
-
-+ GET_STACK_END(bx)
-+ mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rcx
-+ neg %rcx
-+ jz .Lint80_cr3_okay
-+ mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
-+ neg %rcx
-+ write_cr3 rcx, rdi, rsi
-+ movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%rbx)
-+.Lint80_cr3_okay:
-+
- cmpb $0,untrusted_msi(%rip)
- UNLIKELY_START(ne, msi_check)
- movl $0x80,%edi
- call check_for_unexpected_msi
- UNLIKELY_END(msi_check)
-
-- GET_CURRENT(bx)
-+ __GET_CURRENT(bx)
-
- /* Check that the callback is non-null. */
- leaq VCPU_int80_bounce(%rbx),%rdx
-@@ -441,9 +515,27 @@ ENTRY(dom_crash_sync_extable)
-
- ENTRY(common_interrupt)
- SAVE_ALL CLAC
-+
-+ GET_STACK_END(14)
-+ mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx
-+ mov %rcx, %r15
-+ neg %rcx
-+ jz .Lintr_cr3_okay
-+ jns .Lintr_cr3_load
-+ mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
-+ neg %rcx
-+.Lintr_cr3_load:
-+ write_cr3 rcx, rdi, rsi
-+ xor %ecx, %ecx
-+ mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
-+ testb $3, UREGS_cs(%rsp)
-+ cmovnz %rcx, %r15
-+.Lintr_cr3_okay:
-+
- CR4_PV32_RESTORE
- movq %rsp,%rdi
- callq do_IRQ
-+ mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
- jmp ret_from_intr
-
- /* No special register assumptions. */
-@@ -461,6 +553,23 @@ ENTRY(page_fault)
- /* No special register assumptions. */
- GLOBAL(handle_exception)
- SAVE_ALL CLAC
-+
-+ GET_STACK_END(14)
-+ mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx
-+ mov %rcx, %r15
-+ neg %rcx
-+ jz .Lxcpt_cr3_okay
-+ jns .Lxcpt_cr3_load
-+ mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
-+ neg %rcx
-+.Lxcpt_cr3_load:
-+ write_cr3 rcx, rdi, rsi
-+ xor %ecx, %ecx
-+ mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
-+ testb $3, UREGS_cs(%rsp)
-+ cmovnz %rcx, %r15
-+.Lxcpt_cr3_okay:
-+
- handle_exception_saved:
- GET_CURRENT(bx)
- testb $X86_EFLAGS_IF>>8,UREGS_eflags+1(%rsp)
-@@ -525,6 +634,7 @@ handle_exception_saved:
- leaq exception_table(%rip),%rdx
- PERFC_INCR(exceptions, %rax, %rbx)
- callq *(%rdx,%rax,8)
-+ mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
- testb $3,UREGS_cs(%rsp)
- jz restore_all_xen
- leaq VCPU_trap_bounce(%rbx),%rdx
-@@ -557,6 +667,7 @@ exception_with_ints_disabled:
- rep; movsq # make room for ec/ev
- 1: movq UREGS_error_code(%rsp),%rax # ec/ev
- movq %rax,UREGS_kernel_sizeof(%rsp)
-+ mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
- jmp restore_all_xen # return to fixup code
-
- /* No special register assumptions. */
-@@ -634,6 +745,17 @@ ENTRY(double_fault)
- movl $TRAP_double_fault,4(%rsp)
- /* Set AC to reduce chance of further SMAP faults */
- SAVE_ALL STAC
-+
-+ GET_STACK_END(bx)
-+ mov STACK_CPUINFO_FIELD(xen_cr3)(%rbx), %rbx
-+ test %rbx, %rbx
-+ jz .Ldblf_cr3_okay
-+ jns .Ldblf_cr3_load
-+ neg %rbx
-+.Ldblf_cr3_load:
-+ write_cr3 rbx, rdi, rsi
-+.Ldblf_cr3_okay:
-+
- movq %rsp,%rdi
- call do_double_fault
- BUG /* do_double_fault() shouldn't return. */
-@@ -652,10 +774,28 @@ ENTRY(nmi)
- movl $TRAP_nmi,4(%rsp)
- handle_ist_exception:
- SAVE_ALL CLAC
-+
-+ GET_STACK_END(14)
-+ mov STACK_CPUINFO_FIELD(xen_cr3)(%r14), %rcx
-+ mov %rcx, %r15
-+ neg %rcx
-+ jz .List_cr3_okay
-+ jns .List_cr3_load
-+ mov %rcx, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
-+ neg %rcx
-+.List_cr3_load:
-+ write_cr3 rcx, rdi, rsi
-+ movq $0, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
-+.List_cr3_okay:
-+
- CR4_PV32_RESTORE
- testb $3,UREGS_cs(%rsp)
- jz 1f
-- /* Interrupted guest context. Copy the context to stack bottom. */
-+ /*
-+ * Interrupted guest context. Clear the restore value for xen_cr3
-+ * and copy the context to stack bottom.
-+ */
-+ xor %r15, %r15
- GET_CPUINFO_FIELD(guest_cpu_user_regs,di)
- movq %rsp,%rsi
- movl $UREGS_kernel_sizeof/8,%ecx
-@@ -665,6 +805,7 @@ handle_ist_exception:
- movzbl UREGS_entry_vector(%rsp),%eax
- leaq exception_table(%rip),%rdx
- callq *(%rdx,%rax,8)
-+ mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
- cmpb $TRAP_nmi,UREGS_entry_vector(%rsp)
- jne ret_from_intr
-
-diff --git a/xen/include/asm-x86/asm_defns.h b/xen/include/asm-x86/asm_defns.h
-index 6e5c079ad8..6cfdaa1aa0 100644
---- a/xen/include/asm-x86/asm_defns.h
-+++ b/xen/include/asm-x86/asm_defns.h
-@@ -93,9 +93,30 @@ void ret_from_intr(void);
- UNLIKELY_DONE(mp, tag); \
- __UNLIKELY_END(tag)
-
-+ .equ .Lrax, 0
-+ .equ .Lrcx, 1
-+ .equ .Lrdx, 2
-+ .equ .Lrbx, 3
-+ .equ .Lrsp, 4
-+ .equ .Lrbp, 5
-+ .equ .Lrsi, 6
-+ .equ .Lrdi, 7
-+ .equ .Lr8, 8
-+ .equ .Lr9, 9
-+ .equ .Lr10, 10
-+ .equ .Lr11, 11
-+ .equ .Lr12, 12
-+ .equ .Lr13, 13
-+ .equ .Lr14, 14
-+ .equ .Lr15, 15
-+
- #define STACK_CPUINFO_FIELD(field) (1 - CPUINFO_sizeof + CPUINFO_##field)
- #define GET_STACK_END(reg) \
-+ .if .Lr##reg > 8; \
-+ movq $STACK_SIZE-1, %r##reg; \
-+ .else; \
- movl $STACK_SIZE-1, %e##reg; \
-+ .endif; \
- orq %rsp, %r##reg
-
- #define GET_CPUINFO_FIELD(field, reg) \
-@@ -177,6 +198,15 @@ void ret_from_intr(void);
- #define ASM_STAC ASM_AC(STAC)
- #define ASM_CLAC ASM_AC(CLAC)
-
-+.macro write_cr3 val:req, tmp1:req, tmp2:req
-+ mov %cr4, %\tmp1
-+ mov %\tmp1, %\tmp2
-+ and $~X86_CR4_PGE, %\tmp1
-+ mov %\tmp1, %cr4
-+ mov %\val, %cr3
-+ mov %\tmp2, %cr4
-+.endm
-+
- #define CR4_PV32_RESTORE \
- 667: ASM_NOP5; \
- .pushsection .altinstr_replacement, "ax"; \
-diff --git a/xen/include/asm-x86/current.h b/xen/include/asm-x86/current.h
-index e6587e684c..397fa4c38f 100644
---- a/xen/include/asm-x86/current.h
-+++ b/xen/include/asm-x86/current.h
-@@ -42,6 +42,18 @@ struct cpu_info {
- struct vcpu *current_vcpu;
- unsigned long per_cpu_offset;
- unsigned long cr4;
-+ /*
-+ * Of the two following fields the latter is being set to the CR3 value
-+ * to be used on the given pCPU for loading whenever 64-bit PV guest
-+ * context is being entered. The value never changes once set.
-+ * The former is the value to restore when re-entering Xen, if any. IOW
-+ * its value being zero means there's nothing to restore. However, its
-+ * value can also be negative, indicating to the exit-to-Xen code that
-+ * restoring is not necessary, but allowing any nested entry code paths
-+ * to still know the value to put back into CR3.
-+ */
-+ unsigned long xen_cr3;
-+ unsigned long pv_cr3;
- /* get_stack_bottom() must be 16-byte aligned */
- };
-
-diff --git a/xen/include/asm-x86/processor.h b/xen/include/asm-x86/processor.h
-index ccd406a3fe..9906f38f2d 100644
---- a/xen/include/asm-x86/processor.h
-+++ b/xen/include/asm-x86/processor.h
-@@ -517,6 +517,7 @@ extern idt_entry_t idt_table[];
- extern idt_entry_t *idt_tables[];
-
- DECLARE_PER_CPU(struct tss_struct, init_tss);
-+DECLARE_PER_CPU(root_pgentry_t *, root_pgt);
-
- extern void init_int80_direct_trap(struct vcpu *v);
-
-diff --git a/xen/include/asm-x86/x86_64/page.h b/xen/include/asm-x86/x86_64/page.h
-index 589f22552e..afc77c3237 100644
---- a/xen/include/asm-x86/x86_64/page.h
-+++ b/xen/include/asm-x86/x86_64/page.h
-@@ -25,8 +25,8 @@
- /* These are architectural limits. Current CPUs support only 40-bit phys. */
- #define PADDR_BITS 52
- #define VADDR_BITS 48
--#define PADDR_MASK ((1UL << PADDR_BITS)-1)
--#define VADDR_MASK ((1UL << VADDR_BITS)-1)
-+#define PADDR_MASK ((_AC(1,UL) << PADDR_BITS) - 1)
-+#define VADDR_MASK ((_AC(1,UL) << VADDR_BITS) - 1)
-
- #define is_canonical_address(x) (((long)(x) >> 47) == ((long)(x) >> 63))
-
-@@ -117,6 +117,7 @@ typedef l4_pgentry_t root_pgentry_t;
- : (((_s) < ROOT_PAGETABLE_FIRST_XEN_SLOT) || \
- ((_s) > ROOT_PAGETABLE_LAST_XEN_SLOT)))
-
-+#define root_table_offset l4_table_offset
- #define root_get_pfn l4e_get_pfn
- #define root_get_flags l4e_get_flags
- #define root_get_intpte l4e_get_intpte
---
-2.11.3
-
diff --git a/main/xen/0004-x86-IRQ-conditionally-preserve-irq-pirq-mapping-on-error.patch b/main/xen/0004-x86-IRQ-conditionally-preserve-irq-pirq-mapping-on-error.patch
deleted file mode 100644
index acbbccc72e..0000000000
--- a/main/xen/0004-x86-IRQ-conditionally-preserve-irq-pirq-mapping-on-error.patch
+++ /dev/null
@@ -1,124 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/IRQ: conditionally preserve irq <-> pirq mapping on map error paths
-
-Mappings that had been set up before should not be torn down when
-handling unrelated errors.
-
-This is part of XSA-237.
-
-Reported-by: HW42 <hw42@ipsumj.de>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: George Dunlap <george.dunlap@citrix.com>
-
---- a/xen/arch/x86/irq.c
-+++ b/xen/arch/x86/irq.c
-@@ -1252,7 +1252,8 @@ static int prepare_domain_irq_pirq(struc
- return -ENOMEM;
- }
- *pinfo = info;
-- return 0;
-+
-+ return !!err;
- }
-
- static void set_domain_irq_pirq(struct domain *d, int irq, struct pirq *pirq)
-@@ -1295,7 +1296,10 @@ int init_domain_irq_mapping(struct domai
- continue;
- err = prepare_domain_irq_pirq(d, i, i, &info);
- if ( err )
-+ {
-+ ASSERT(err < 0);
- break;
-+ }
- set_domain_irq_pirq(d, i, info);
- }
-
-@@ -1903,6 +1907,7 @@ int map_domain_pirq(
- struct pirq *info;
- struct irq_desc *desc;
- unsigned long flags;
-+ DECLARE_BITMAP(prepared, MAX_MSI_IRQS) = {};
-
- ASSERT(spin_is_locked(&d->event_lock));
-
-@@ -1946,8 +1951,10 @@ int map_domain_pirq(
- }
-
- ret = prepare_domain_irq_pirq(d, irq, pirq, &info);
-- if ( ret )
-+ if ( ret < 0 )
- goto revoke;
-+ if ( !ret )
-+ __set_bit(0, prepared);
-
- desc = irq_to_desc(irq);
-
-@@ -2019,8 +2026,10 @@ int map_domain_pirq(
- irq = create_irq(NUMA_NO_NODE);
- ret = irq >= 0 ? prepare_domain_irq_pirq(d, irq, pirq + nr, &info)
- : irq;
-- if ( ret )
-+ if ( ret < 0 )
- break;
-+ if ( !ret )
-+ __set_bit(nr, prepared);
- msi_desc[nr].irq = irq;
-
- if ( irq_permit_access(d, irq) != 0 )
-@@ -2053,15 +2062,15 @@ int map_domain_pirq(
- desc->msi_desc = NULL;
- spin_unlock_irqrestore(&desc->lock, flags);
- }
-- while ( nr-- )
-+ while ( nr )
- {
- if ( irq >= 0 && irq_deny_access(d, irq) )
- printk(XENLOG_G_ERR
- "dom%d: could not revoke access to IRQ%d (pirq %d)\n",
- d->domain_id, irq, pirq);
-- if ( info )
-+ if ( info && test_bit(nr, prepared) )
- cleanup_domain_irq_pirq(d, irq, info);
-- info = pirq_info(d, pirq + nr);
-+ info = pirq_info(d, pirq + --nr);
- irq = info->arch.irq;
- }
- msi_desc->irq = -1;
-@@ -2077,12 +2086,14 @@ int map_domain_pirq(
- spin_lock_irqsave(&desc->lock, flags);
- set_domain_irq_pirq(d, irq, info);
- spin_unlock_irqrestore(&desc->lock, flags);
-+ ret = 0;
- }
-
- done:
- if ( ret )
- {
-- cleanup_domain_irq_pirq(d, irq, info);
-+ if ( test_bit(0, prepared) )
-+ cleanup_domain_irq_pirq(d, irq, info);
- revoke:
- if ( irq_deny_access(d, irq) )
- printk(XENLOG_G_ERR
---- a/xen/arch/x86/physdev.c
-+++ b/xen/arch/x86/physdev.c
-@@ -185,7 +185,7 @@ int physdev_map_pirq(domid_t domid, int
- }
- else if ( type == MAP_PIRQ_TYPE_MULTI_MSI )
- {
-- if ( msi->entry_nr <= 0 || msi->entry_nr > 32 )
-+ if ( msi->entry_nr <= 0 || msi->entry_nr > MAX_MSI_IRQS )
- ret = -EDOM;
- else if ( msi->entry_nr != 1 && !iommu_intremap )
- ret = -EOPNOTSUPP;
---- a/xen/include/asm-x86/msi.h
-+++ b/xen/include/asm-x86/msi.h
-@@ -55,6 +55,8 @@
- /* MAX fixed pages reserved for mapping MSIX tables. */
- #define FIX_MSIX_MAX_PAGES 512
-
-+#define MAX_MSI_IRQS 32 /* limited by MSI capability struct properties */
-+
- struct msi_info {
- u16 seg;
- u8 bus;
diff --git a/main/xen/0004-x86-allow-Meltdown-band-aid-to-be-disabled.patch b/main/xen/0004-x86-allow-Meltdown-band-aid-to-be-disabled.patch
deleted file mode 100644
index 405c4f78ad..0000000000
--- a/main/xen/0004-x86-allow-Meltdown-band-aid-to-be-disabled.patch
+++ /dev/null
@@ -1,165 +0,0 @@
-From 72428e7318bf6368883622142344dbebd895c161 Mon Sep 17 00:00:00 2001
-From: Jan Beulich <jbeulich@suse.com>
-Date: Wed, 17 Jan 2018 17:24:59 +0100
-Subject: [PATCH 4/4] x86: allow Meltdown band-aid to be disabled
-
-First of all we don't need it on AMD systems. Additionally allow its use
-to be controlled by command line option. For best backportability, this
-intentionally doesn't use alternative instruction patching to achieve
-the intended effect - while we likely want it, this will be later
-follow-up.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-master commit: e871e80c38547d9faefc6604532ba3e985e65873
-master date: 2018-01-16 17:50:59 +0100
-
-(cherry picked from commit e19d0af4ee2ae9e42a85db639fd6848e72f5658b)
----
- docs/misc/xen-command-line.markdown | 12 ++++++++++++
- xen/arch/x86/domain.c | 7 +++++--
- xen/arch/x86/mm.c | 2 +-
- xen/arch/x86/smpboot.c | 17 ++++++++++++++---
- xen/arch/x86/x86_64/entry.S | 2 ++
- 5 files changed, 34 insertions(+), 6 deletions(-)
-
-diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown
-index 73f5265fc6..ee9aa7b8d5 100644
---- a/docs/misc/xen-command-line.markdown
-+++ b/docs/misc/xen-command-line.markdown
-@@ -1602,6 +1602,18 @@ In the case that x2apic is in use, this option switches between physical and
- clustered mode. The default, given no hint from the **FADT**, is cluster
- mode.
-
-+### xpti
-+> `= <boolean>`
-+
-+> Default: `false` on AMD hardware
-+> Default: `true` everywhere else
-+
-+Override default selection of whether to isolate 64-bit PV guest page
-+tables.
-+
-+** WARNING: Not yet a complete isolation implementation, but better than
-+nothing. **
-+
- ### xsave
- > `= <boolean>`
-
-diff --git a/xen/arch/x86/domain.c b/xen/arch/x86/domain.c
-index 512b77a5d0..534bf0161f 100644
---- a/xen/arch/x86/domain.c
-+++ b/xen/arch/x86/domain.c
-@@ -1926,12 +1926,15 @@ static void paravirt_ctxt_switch_from(struct vcpu *v)
-
- static void paravirt_ctxt_switch_to(struct vcpu *v)
- {
-+ root_pgentry_t *root_pgt = this_cpu(root_pgt);
- unsigned long cr4;
-
- switch_kernel_stack(v);
-
-- this_cpu(root_pgt)[root_table_offset(PERDOMAIN_VIRT_START)] =
-- l4e_from_page(v->domain->arch.perdomain_l3_pg, __PAGE_HYPERVISOR_RW);
-+ if ( root_pgt )
-+ root_pgt[root_table_offset(PERDOMAIN_VIRT_START)] =
-+ l4e_from_page(v->domain->arch.perdomain_l3_pg,
-+ __PAGE_HYPERVISOR_RW);
-
- cr4 = pv_guest_cr4_to_real_cr4(v);
- if ( unlikely(cr4 != read_cr4()) )
-diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
-index 22cd8550fc..b5eac345af 100644
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -3846,7 +3846,7 @@ long do_mmu_update(
- rc = mod_l4_entry(va, l4e_from_intpte(req.val), mfn,
- cmd == MMU_PT_UPDATE_PRESERVE_AD, v);
- if ( !rc )
-- sync_guest = 1;
-+ sync_guest = !!this_cpu(root_pgt);
- break;
- case PGT_writable_page:
- perfc_incr(writable_mmu_updates);
-diff --git a/xen/arch/x86/smpboot.c b/xen/arch/x86/smpboot.c
-index dc212710cd..bed2758625 100644
---- a/xen/arch/x86/smpboot.c
-+++ b/xen/arch/x86/smpboot.c
-@@ -320,7 +320,7 @@ void start_secondary(void *unused)
- spin_debug_disable();
-
- get_cpu_info()->xen_cr3 = 0;
-- get_cpu_info()->pv_cr3 = __pa(this_cpu(root_pgt));
-+ get_cpu_info()->pv_cr3 = this_cpu(root_pgt) ? __pa(this_cpu(root_pgt)) : 0;
-
- load_system_tables();
-
-@@ -729,14 +729,20 @@ static int clone_mapping(const void *ptr, root_pgentry_t *rpt)
- return 0;
- }
-
-+static __read_mostly int8_t opt_xpti = -1;
-+boolean_param("xpti", opt_xpti);
- DEFINE_PER_CPU(root_pgentry_t *, root_pgt);
-
- static int setup_cpu_root_pgt(unsigned int cpu)
- {
-- root_pgentry_t *rpt = alloc_xen_pagetable();
-+ root_pgentry_t *rpt;
- unsigned int off;
- int rc;
-
-+ if ( !opt_xpti )
-+ return 0;
-+
-+ rpt = alloc_xen_pagetable();
- if ( !rpt )
- return -ENOMEM;
-
-@@ -974,10 +980,14 @@ void __init smp_prepare_cpus(unsigned int max_cpus)
-
- stack_base[0] = stack_start;
-
-+ if ( opt_xpti < 0 )
-+ opt_xpti = boot_cpu_data.x86_vendor != X86_VENDOR_AMD;
-+
- rc = setup_cpu_root_pgt(0);
- if ( rc )
- panic("Error %d setting up PV root page table\n", rc);
-- get_cpu_info()->pv_cr3 = __pa(per_cpu(root_pgt, 0));
-+ if ( per_cpu(root_pgt, 0) )
-+ get_cpu_info()->pv_cr3 = __pa(per_cpu(root_pgt, 0));
-
- set_nr_sockets();
-
-@@ -1045,6 +1055,7 @@ void __init smp_prepare_boot_cpu(void)
- cpumask_set_cpu(smp_processor_id(), &cpu_present_map);
-
- get_cpu_info()->xen_cr3 = 0;
-+ get_cpu_info()->pv_cr3 = 0;
- }
-
- static void
-diff --git a/xen/arch/x86/x86_64/entry.S b/xen/arch/x86/x86_64/entry.S
-index d63e734bb3..2a569952e3 100644
---- a/xen/arch/x86/x86_64/entry.S
-+++ b/xen/arch/x86/x86_64/entry.S
-@@ -45,6 +45,7 @@ restore_all_guest:
- movabs $DIRECTMAP_VIRT_START, %rcx
- mov %rdi, %rax
- and %rsi, %rdi
-+ jz .Lrag_keep_cr3
- and %r9, %rsi
- add %rcx, %rdi
- add %rcx, %rsi
-@@ -61,6 +62,7 @@ restore_all_guest:
- rep movsq
- mov %r9, STACK_CPUINFO_FIELD(xen_cr3)(%rdx)
- write_cr3 rax, rdi, rsi
-+.Lrag_keep_cr3:
-
- RESTORE_ALL
- testw $TRAP_syscall,4(%rsp)
---
-2.11.3
-
diff --git a/main/xen/0005-x86-FLASK-fix-unmap-domain-IRQ-XSM-hook.patch b/main/xen/0005-x86-FLASK-fix-unmap-domain-IRQ-XSM-hook.patch
deleted file mode 100644
index 622c6c7f25..0000000000
--- a/main/xen/0005-x86-FLASK-fix-unmap-domain-IRQ-XSM-hook.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/FLASK: fix unmap-domain-IRQ XSM hook
-
-The caller and the FLASK implementation of xsm_unmap_domain_irq()
-disagreed about what the "data" argument points to in the MSI case:
-Change both sides to pass/take a PCI device.
-
-This is part of XSA-237.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/irq.c
-+++ b/xen/arch/x86/irq.c
-@@ -2144,7 +2144,8 @@ int unmap_domain_pirq(struct domain *d,
- nr = msi_desc->msi.nvec;
- }
-
-- ret = xsm_unmap_domain_irq(XSM_HOOK, d, irq, msi_desc);
-+ ret = xsm_unmap_domain_irq(XSM_HOOK, d, irq,
-+ msi_desc ? msi_desc->dev : NULL);
- if ( ret )
- goto done;
-
---- a/xen/xsm/flask/hooks.c
-+++ b/xen/xsm/flask/hooks.c
-@@ -915,8 +915,8 @@ static int flask_unmap_domain_msi (struc
- u32 *sid, struct avc_audit_data *ad)
- {
- #ifdef CONFIG_HAS_PCI
-- struct msi_info *msi = data;
-- u32 machine_bdf = (msi->seg << 16) | (msi->bus << 8) | msi->devfn;
-+ const struct pci_dev *pdev = data;
-+ u32 machine_bdf = (pdev->seg << 16) | (pdev->bus << 8) | pdev->devfn;
-
- AVC_AUDIT_DATA_INIT(ad, DEV);
- ad->device = machine_bdf;
diff --git a/main/xen/APKBUILD b/main/xen/APKBUILD
index 58b98cb716..d21b6bc4d2 100644
--- a/main/xen/APKBUILD
+++ b/main/xen/APKBUILD
@@ -2,8 +2,8 @@
# Contributor: Roger Pau Monne <roger.pau@entel.upc.edu>
# Maintainer: William Pitcock <nenolod@dereferenced.org>
pkgname=xen
-pkgver=4.7.3
-pkgrel=9
+pkgver=4.7.6
+pkgrel=0
pkgdesc="Xen hypervisor"
url="http://www.xen.org/"
arch="x86_64 armhf"
@@ -89,7 +89,7 @@ makedepends="$depends_dev autoconf automake libtool "
# - CVE-2017-15596 XSA-235
# - CVE-2017-15597 XSA-236
# - CVE-2017-15590 XSA-237
-# - XSA-238
+# - CVE-2017-15591 XSA-238
# - CVE-2017-15589 XSA-239
# - CVE-2017-15595 XSA-240
# - CVE-2017-15588 XSA-241
@@ -98,21 +98,29 @@ makedepends="$depends_dev autoconf automake libtool "
# - CVE-2017-15594 XSA-244
# - CVE-2017-17046 XSA-245
# 4.7.3-r4:
-# - XSA-254 XPTI
+# - CVE-2017-5753
+# - CVE-2017-5715
+# - CVE-2017-5754 XSA-254
# 4.7.3-r5:
# - CVE-2017-17566 XSA-248
# - CVE-2017-17563 XSA-249
# - CVE-2017-17564 XSA-250
# - CVE-2017-17565 XSA-251
# 4.7.3-r7:
-# - CVE-2018-7540, XSA-252
-# - CVE-2018-7541, XSA-255
+# - CVE-2018-7540 XSA-252
+# - CVE-2018-7541 XSA-255
# 4.7.3-r9:
# - CVE-2018-10472 XSA-258
# - CVE-2018-10471 XSA-259
# - CVE-2018-8897 XSA-260
# - CVE-2018-10982 XSA-261
# - CVE-2018-10981 XSA-262
+# 4.7.6-r0:
+# - CVE-2018-3639 XSA-263
+# - CVE-2018-12891 XSA-264
+# - CVE-2018-12893 XSA-265
+# - CVE-2018-12892 XSA-266
+# - CVE-2018-3665 XSA-267
case "$CARCH" in
x86*)
@@ -156,61 +164,11 @@ source="https://downloads.xenproject.org/release/xen/$pkgver/$pkgname-$pkgver.ta
http://xenbits.xen.org/xen-extfiles/zlib-$_ZLIB_VERSION.tar.gz
http://xenbits.xen.org/xen-extfiles/ipxe-git-$_IPXE_GIT_TAG.tar.gz
- xsa215.patch
- xsa226-4.7.patch
- xsa227.patch
- xsa228-4.8.patch
- xsa230.patch
- xsa231-4.7.patch
- xsa232.patch
- xsa233.patch
- xsa234-4.6.patch
- xsa235-4.7.patch
- xsa236-4.9.patch
- 0001-x86-dont-allow-MSI-pIRQ-mapping-on-unowned-device.patch
- 0002-x86-enforce-proper-privilege-when-mapping-pIRQ-s.patch
- 0003-x86-MSI-disallow-redundant-enabling.patch
- 0004-x86-IRQ-conditionally-preserve-irq-pirq-mapping-on-error.patch
- 0005-x86-FLASK-fix-unmap-domain-IRQ-XSM-hook.patch
- xsa238.patch
- xsa239.patch
- 0001-x86-limit-linear-page-table-use-to-a-single-level.patch
- 0002-x86-mm-Disable-PV-linear-pagetables-by-default.patch
- xsa241-4.8.patch
- xsa242-4.9.patch
- xsa243-4.7-1.patch
- xsa243-2.patch
- xsa244-4.7.patch
- xsa246-4.7.patch
- 0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
- 0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
- xsa248-4.8.patch
- xsa249.patch
- xsa250.patch
- xsa251-4.8.patch
- xsa252-4.7.patch
- xsa255-4.7-1.patch
- xsa255-4.7-2.patch
-
- 0001-x86-entry-Remove-support-for-partial-cpu_user_regs-f.patch
- 0002-x86-mm-Always-set-_PAGE_ACCESSED-on-L4e-updates.patch
- 0003-x86-Meltdown-band-aid-against-malicious-64-bit-PV-gu.patch
- 0004-x86-allow-Meltdown-band-aid-to-be-disabled.patch
-
- xsa258-4.8.patch
- xsa259.patch
- xsa260-1.patch
- xsa260-2.patch
- xsa260-3.patch
- xsa260-4.patch
- xsa261-4.7.patch
- xsa262-4.9.patch
qemu-coroutine-gthread.patch
qemu-xen_paths.patch
hotplug-vif-vtrill.patch
- rombios-no-pie.patch
0001-ipxe-dont-clobber-ebp.patch
musl-support.patch
@@ -426,7 +384,7 @@ hypervisor() {
mv "$pkgdir"/boot "$subpkgdir"/
}
-md5sums="d0bab56ef9c067bce3218a9f7a4b6f9c xen-4.7.3.tar.gz
+md5sums="85924a96df2497a5a2343a1a05ed3da1 xen-4.7.6.tar.gz
dd60683d7057917e34630b4a787932e8 gmp-4.3.2.tar.bz2
cd3f3eb54446be6003156158d51f4884 grub-0.97.tar.gz
36cc57650cffda9a0269493be2a169bb lwip-1.3.0.tar.gz
@@ -436,57 +394,9 @@ cec05e7785497c5e19da2f114b934ffd pciutils-2.2.9.tar.bz2
e26becb8a6a2b6695f6b3e8097593db8 tpm_emulator-0.7.4.tar.gz
debc62758716a169df9f62e6ab2bc634 zlib-1.2.3.tar.gz
7496268cebf47d5c9ccb0696e3b26065 ipxe-git-9a93db3f0947484e30e753bbd61a10b17336e20e.tar.gz
-e5847b6c87c60de11ba7a128d7babe10 xsa215.patch
-7e9cd75651d13afebf35463ba810f1b6 xsa226-4.7.patch
-24e95e93fa29899b7b37fb9595b1d615 xsa227.patch
-f66e7149f5a579ead59cc416d23bdefb xsa228-4.8.patch
-9230643cea68c0dea122b3631078059a xsa230.patch
-0144a1d4b2c989231f36a7828c52261f xsa231-4.7.patch
-d582d6a402935ea1aa2f6d9435ffef52 xsa232.patch
-2f027cddb9401ca25add6ae229cb52c6 xsa233.patch
-c07c078bb0b94783741c998d3afdabd8 xsa234-4.6.patch
-fd6fb62075d0fc4ba9a14bbbc010a41f xsa235-4.7.patch
-f8797c74a1186f610835bd8bc5daaddb xsa236-4.9.patch
-e96050a4d241ed8dc0c8a39beb6f3284 0001-x86-dont-allow-MSI-pIRQ-mapping-on-unowned-device.patch
-24a503341fca703f851182bdf2bbbb3c 0002-x86-enforce-proper-privilege-when-mapping-pIRQ-s.patch
-9f849dab12ad214be8eb6682a0873856 0003-x86-MSI-disallow-redundant-enabling.patch
-f3689ebfc50bb463c35a2556e39f762a 0004-x86-IRQ-conditionally-preserve-irq-pirq-mapping-on-error.patch
-5d119f231b38e61b2178cac637dc42d3 0005-x86-FLASK-fix-unmap-domain-IRQ-XSM-hook.patch
-1d7afbe5d47d87aebb2b4022d0be338e xsa238.patch
-5c564209bd30cade592c8e3f39edc279 xsa239.patch
-4642495f2b5a1feeb42d014a04042246 0001-x86-limit-linear-page-table-use-to-a-single-level.patch
-0a50531d1ce5e29e01bdcc468cb4c597 0002-x86-mm-Disable-PV-linear-pagetables-by-default.patch
-c4e34874e6169cf3b68b0f1508e282a2 xsa241-4.8.patch
-e25e98c3e699d90ad617f6f7e0106a5c xsa242-4.9.patch
-f7d66f6e40dec159b073000baf8a2b84 xsa243-4.7-1.patch
-93ffcf8e416b426f1b0088e61f538a3a xsa243-2.patch
-1c81aebd57755934d0b88c082fe9020a xsa244-4.7.patch
-ed39b07fbee1f07d767921732806578b xsa246-4.7.patch
-5681e5bba8270223bf7ccd0beaf3ad12 0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
-54a258e7b548fa656e7babc73fca394d 0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
-389e8bf7400c826a957e82d1e80cd086 xsa248-4.8.patch
-bf2dee471f8b9d235005f62f8db581c1 xsa249.patch
-06cdce66d0fb6ae33e0fa05af8c7fc71 xsa250.patch
-486252987844bc59c4fb468cd485fdf3 xsa251-4.8.patch
-efeb810fffa753885ac657fd6b97abf7 xsa252-4.7.patch
-2adecabe0bff20c185be3893880940d3 xsa255-4.7-1.patch
-87544228dea9b96611239d7ff0f48223 xsa255-4.7-2.patch
-1d20f4010215c17329bfaa6fe015aa91 0001-x86-entry-Remove-support-for-partial-cpu_user_regs-f.patch
-0b606cb7490fdb1816294e4748440f8a 0002-x86-mm-Always-set-_PAGE_ACCESSED-on-L4e-updates.patch
-d2017505b704d00b3d8bb91ff9c857ae 0003-x86-Meltdown-band-aid-against-malicious-64-bit-PV-gu.patch
-7b507a4b0f6fc62862ab45a46a1f2903 0004-x86-allow-Meltdown-band-aid-to-be-disabled.patch
-8305aad22ea230e1f921a6d6c6263743 xsa258-4.8.patch
-25aed044d315e02c1822b8802fb30ca1 xsa259.patch
-d6f0691c797496291ae5f9101d5d3d31 xsa260-1.patch
-331ec87a892835cee94f4bbec00d3f5b xsa260-2.patch
-45130a2d6c47901e75a614a500cc7f7d xsa260-3.patch
-07eeb2e08e445d9a33305d47aef1c099 xsa260-4.patch
-9db85300961126d32e1c3540c78bb4ff xsa261-4.7.patch
-71a30d34a4d5e8fb7fcb1f183e398099 xsa262-4.9.patch
de1a3db370b87cfb0bddb51796b50315 qemu-coroutine-gthread.patch
08bfdf8caff5d631f53660bf3fd4edaf qemu-xen_paths.patch
e449bb3359b490804ffc7b0ae08d62a0 hotplug-vif-vtrill.patch
-5fab5487fe92fa29302db9ccb04af564 rombios-no-pie.patch
3a04998db5cc3c5c86f3b46e97e9cd82 0001-ipxe-dont-clobber-ebp.patch
0984e3000de17a6d14b8014a3ced46a4 musl-support.patch
513456607a2adfaa0baf1e3ae5124b23 musl-hvmloader-fix-stdint.patch
@@ -518,7 +428,7 @@ dcdd1de2c29e469e834a02ede4f47806 xendomains.confd
9df68ac65dc3f372f5d61183abdc83ff xen-consoles.logrotate
6a2f777c16678d84039acf670d86fff6 xenqemu.confd
e1c9e1c83a5cc49224608a48060bd677 xenqemu.initd"
-sha256sums="5b5385b476e59e4cf31ecc6dd605df38814b83432b8e8d917f18c8edfdfb708f xen-4.7.3.tar.gz
+sha256sums="3ae4ddb4faa6c10dd020f12cfd868aef84efefa42e58e708cf4f4818e30aa788 xen-4.7.6.tar.gz
936162c0312886c21581002b79932829aa048cfaf9937c6265aeaa14f1cd1775 gmp-4.3.2.tar.bz2
4e1d15d12dbd3e9208111d6b806ad5a9857ca8850c47877d36575b904559260b grub-0.97.tar.gz
772e4d550e07826665ed0528c071dd5404ef7dbe1825a38c8adbc2a00bca948f lwip-1.3.0.tar.gz
@@ -528,57 +438,9 @@ f60ae61cfbd5da1d849d0beaa21f593c38dac9359f0b3ddc612f447408265b24 pciutils-2.2.9
4e48ea0d83dd9441cc1af04ab18cd6c961b9fa54d5cbf2c2feee038988dea459 tpm_emulator-0.7.4.tar.gz
1795c7d067a43174113fdf03447532f373e1c6c57c08d61d9e4e9be5e244b05e zlib-1.2.3.tar.gz
632ce8c193ccacc3012bd354bdb733a4be126f7c098e111930aa41dad537405c ipxe-git-9a93db3f0947484e30e753bbd61a10b17336e20e.tar.gz
-5be4ff661dd22890b0120f86beee3ec809e2a29f833db8c48bd70ce98e9691ee xsa215.patch
-fffcc0a4428723e6aea391ff4f1d27326b5a3763d2308cbde64e6a786502c702 xsa226-4.7.patch
-9923a47e5f86949800887596f098954a08ef73a01d74b1dbe16cab2e6b1fabb2 xsa227.patch
-5a7416f15ac9cd7cace354b6102ff58199fe0581f65a36a36869650c71784e48 xsa228-4.8.patch
-77a73f1c32d083e315ef0b1bbb119cb8840ceb5ada790cad76cbfb9116f725cc xsa230.patch
-ce29b56a0480f4835b37835b351e704d204bb0ccd22325f487127aa2776cc2cf xsa231-4.7.patch
-5068a78293daa58557c30c95141b775becfb650de6a5eda0d82a4a321ced551c xsa232.patch
-f721cc49ba692b2f36299b631451f51d7340b8b4732f74c98f01cb7a80d8662b xsa233.patch
-3df4ce173196111c1ff849039ea4927c0b4bd632b08a501fb26f64e31b951fba xsa234-4.6.patch
-f30848eee71e66687b421b87be1d8e3f454c0eb395422546c62a689153d1e31c xsa235-4.7.patch
-b6fe5604af26e93184f30127ebbb644f127ecc7116b093c161ca3044b44d2fe9 xsa236-4.9.patch
-1ae6aefb86ba0c48a45ecc14ff56ea0bc3d9d354937668bcacadaed1225017a8 0001-x86-dont-allow-MSI-pIRQ-mapping-on-unowned-device.patch
-bf2ca9cb99ee64d7db77d628cec1a84684c360fd36de433cbc78fbcde8095319 0002-x86-enforce-proper-privilege-when-mapping-pIRQ-s.patch
-494a79332fc5f854f0dc7606669201717a41e5b89b44db2fb30607a326930bfb 0003-x86-MSI-disallow-redundant-enabling.patch
-9a38899afd728d504382954de28657aa82af7da352eb4e45a5e615bd646834c5 0004-x86-IRQ-conditionally-preserve-irq-pirq-mapping-on-error.patch
-fef5c77f19e2c6229912f1fd19cbcb41c1ce554ff53be22198b2f34ea7a27314 0005-x86-FLASK-fix-unmap-domain-IRQ-XSM-hook.patch
-85d3f9713bef1bc86c682857dbd7388a1d1f20089363ddfc4cb9ecbd88eaffec xsa238.patch
-087a8b3cf7ecbdbde593033c127cbcf6c37f532bf33d90f72c19e493970a799c xsa239.patch
-5a9b8e0a4e55482826e6cf820ea01fbf6674fecf19b101f4578396e3fa98a8fd 0001-x86-limit-linear-page-table-use-to-a-single-level.patch
-acf9744b853c1c6ac071bcf5776d8d9463781bbdf658c2fecc59ee338c094750 0002-x86-mm-Disable-PV-linear-pagetables-by-default.patch
-443a5b0818045ada44fad0370ac01af0c96181be5a4078ae3b2575799e4a4e5b xsa241-4.8.patch
-5e66b6b1d1cd400905d3abd3478144539c3afa24f5a744a11809d9c5eb517b98 xsa242-4.9.patch
-465ba9e3293591a3c84c122ffd73474fe96483f5e21565440d5fbc207fa4c4a9 xsa243-4.7-1.patch
-013cff90312305b7f4ce6818a25760bcfca61bfadd860b694afa04d56e60c563 xsa243-2.patch
-4d8cf754f760ef05488e9fb25a7ebd9a7e46f3742e91eee1a8385fd1e611ea8c xsa244-4.7.patch
-b41550688e88a2a7a22349a07168f3a3ddf6fad8b3389fa27de44ae6731b6a8b xsa246-4.7.patch
-d149342e4d40dfb550f8af6d05cd20a34889d64fb33f967fe77cf89b4ea8504a 0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
-3c8a7bfdb408af0224cf6f5471b0fd9dd1a9a1ded7207e427b02268ca2906aa6 0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
-3bbd9fd92e5ffab1ddd7ff804bfbab09c1c654af3aa7f80f742f321da120b715 xsa248-4.8.patch
-e99066b0171d4757c6a66e1223aabe01e990de2d0dc50416936e064e6e750d00 xsa249.patch
-adf4d8242dbddb4ec52fe1effc1f8b233d33d8d6a59c1bb677dcc6e2ed2bf711 xsa250.patch
-f8cecf013a3628038e0a4566778852a560b25a1ce2f3872a989087ab2fc9a913 xsa251-4.8.patch
-14f37eb6b7a9fb19b258ca3c0e2da71dbc4240e6273137d5eb4003b122101aa6 xsa252-4.7.patch
-9bfc4a33a0faeb36aec8449ea940cef52d523cc3d13529b4eeaae64bf5a7b644 xsa255-4.7-1.patch
-6d95ceb54298de7863dc7133c0f3adf85f7da9b8d326146ff46e641194a47fc0 xsa255-4.7-2.patch
-b10a98962ab23d9fe8203288e04071cde8bc92aad71b1afb366b0e9f1812930f 0001-x86-entry-Remove-support-for-partial-cpu_user_regs-f.patch
-db48f9f7ffbd7767083af19f19ca184f386ddc5736b661ad99c1776ced2f2fa0 0002-x86-mm-Always-set-_PAGE_ACCESSED-on-L4e-updates.patch
-0cd7c0e6211cafd8428e2b10a8ea2057a6f785ae3f86e4005c9ebafc887c6d63 0003-x86-Meltdown-band-aid-against-malicious-64-bit-PV-gu.patch
-2322c76a57baadbfd1b9bc39c3af5bf29e7ba9b537d4fd3e11a6462ef8c89ec0 0004-x86-allow-Meltdown-band-aid-to-be-disabled.patch
-ebba2f1f084249cd1e1c2f59e338412161884c31c83dbba03fc1e10bf4ba57a1 xsa258-4.8.patch
-ff2efb5eb2502ded988d0aa15351030a15494a9e2223eafbb88377a8e4d39dcb xsa259.patch
-0c2552a36737975f4f46d7054b49fd018b68c302cef3b39b27c2f17cc60eb531 xsa260-1.patch
-a92ef233a83923d6a18d51528ff28630ae3f1134ee76f2347397e22da9c84c24 xsa260-2.patch
-8469af8ba5b6722738b27c328eccc1d341af49c2e2bb23fe7b327a3349267b0e xsa260-3.patch
-0327c2ef7984a4aa000849c68a01181fdb01962637e78629c6fb34bb95414a74 xsa260-4.patch
-98fb28bac871aae7c2f897a5506a2b03f340bf122a3a7f65aa65f3b3c9a525b4 xsa261-4.7.patch
-ec2b6ba9ed1d5e97fed4b54767160a75fe19d67e4519f716739bebdb78816191 xsa262-4.9.patch
3941f99b49c7e8dafc9fae8aad2136a14c6d84533cd542cc5f1040a41ef7c6fe qemu-coroutine-gthread.patch
e4e5e838e259a3116978aabbcebc1865a895179a7fcbf4bad195c83e9b4c0f98 qemu-xen_paths.patch
dd1e784bc455eb62cb85b3fa24bfc34f575ceaab9597ef6a2f1ee7ff7b3cae0a hotplug-vif-vtrill.patch
-74cb62a4614dd042ea9169112fb677bfef751a760aae34c7e73391fa857a8429 rombios-no-pie.patch
ac8bbd0b864c7de278fd9b68392b71863581ec21622c2e9b87e501e492e414d3 0001-ipxe-dont-clobber-ebp.patch
2fea4ceec8872f5560023fa135e3ff03d6deee4299e53d3a33ec59c31779b2c5 musl-support.patch
479b9605e85c865be6117b6d1993124dbbb7da7f95d0e896e4c0fe5cdfeb74d3 musl-hvmloader-fix-stdint.patch
@@ -610,7 +472,7 @@ d13719093a2c3824525f36ac91ac3c9bd1154e5ba0974e5441e4a2ab5e883521 xenconsoled.in
0da87a4b9094f934e3de937e8ef8d3afc752e76793aa3d730182d0241e118b19 xen-consoles.logrotate
4cfcddcade5d055422ab4543e8caa6e5c5eee7625c41880a9000b7a87c7c424e xenqemu.confd
c92bbb1166edd61141fdf678116974209c4422daf373cdd5bc438aa4adb25b8d xenqemu.initd"
-sha512sums="df596bef7f0f0e7f35246fb025913721dae5aa91da27f1b47a49256e9faa8f7dcb80e8c4679345c19614093c02059b6ced54fc51e6900a372b0e76b4ad827126 xen-4.7.3.tar.gz
+sha512sums="53cc659908b9f229c8e352a3027212a1b3e8c8f5cb0a48d32f9e5fc6418fc2e23987394df4f465781c050179b763a31e6e397412915240e2ad5f42be3e41f7a2 xen-4.7.6.tar.gz
2e0b0fd23e6f10742a5517981e5171c6e88b0a93c83da701b296f5c0861d72c19782daab589a7eac3f9032152a0fc7eff7f5362db8fccc4859564a9aa82329cf gmp-4.3.2.tar.bz2
c2bc9ffc8583aeae71cee9ddcc4418969768d4e3764d47307da54f93981c0109fb07d84b061b3a3628bd00ba4d14a54742bc04848110eb3ae8ca25dbfbaabadb grub-0.97.tar.gz
1465b58279af1647f909450e394fe002ca165f0ff4a0254bfa9fe0e64316f50facdde2729d79a4e632565b4500cf4d6c74192ac0dd3bc9fe09129bbd67ba089d lwip-1.3.0.tar.gz
@@ -620,57 +482,9 @@ c2bc9ffc8583aeae71cee9ddcc4418969768d4e3764d47307da54f93981c0109fb07d84b061b3a36
4928b5b82f57645be9408362706ff2c4d9baa635b21b0d41b1c82930e8c60a759b1ea4fa74d7e6c7cae1b7692d006aa5cb72df0c3b88bf049779aa2b566f9d35 tpm_emulator-0.7.4.tar.gz
021b958fcd0d346c4ba761bcf0cc40f3522de6186cf5a0a6ea34a70504ce9622b1c2626fce40675bc8282cf5f5ade18473656abc38050f72f5d6480507a2106e zlib-1.2.3.tar.gz
c5cb1cdff40d2d71fd3e692a9d0efadf2aa17290daf5195391a1c81ddd9dfc913a8e44d5be2b12be85b2a5565ea31631c99c7053564f2fb2225c80ea0bb0e4a4 ipxe-git-9a93db3f0947484e30e753bbd61a10b17336e20e.tar.gz
-3e6a2589cc6ff4b8f15ae1aaac5d71b601bfb88e88bbc0b28047a3afd62a10a1bf5cd13bcb919fec687f155c4cd3fe50e50868601896fa34dde65e6d7a3b6e2b xsa215.patch
-640fb5acd478daf33446933c572389740e91e23c26dfe19ec123e76cb29f514deb12e438a134f20d3e393330d534ee863f257ba650917b256c430a5c8e0f8379 xsa226-4.7.patch
-7d66494e833d46f8a213af0f2b107a12617d5e8b45c3b07daee229c75bd6aad98284bc0e19f15706d044b58273cc7f0c193ef8553faa22fadeae349689e763c8 xsa227.patch
-e7c6f248979e23a681aad07357baace71bee56d64c0897e7b49d4c2aaff6e4784a0e649d407a388662ed57c157bd4024cca7155e9fba2a494539d51345bdba90 xsa228-4.8.patch
-df174a1675f74b73e78bc3cb1c9f16536199dfd1922c0cc545a807e92bc24941a816891838258e118f477109548487251a7eaccb2d1dd9b6994c8c76fc5b058f xsa230.patch
-c1c05c2ec68486a3721ae9c305a4f7a01a1c38a62f468ba97be22ee583b5690b92fa1cb3c8a4ea657429483d844ee8dd66f96d6f602cabeaeb50db4a459317b4 xsa231-4.7.patch
-fb742225a4f3dbf2a574c4a6e3ef61a5da0c91aaeed77a2247023bdefcd4e0b6c08f1c9ffb42eaac3d38739c401443c3cf7aebb507b1d779c415b6cbffabbc10 xsa232.patch
-a322ac6c5ac2f858a59096108032fd42974eaaeeebd8f4966119149665f32bed281e333e743136e79add2e6f3844d88b6a3e4d5a685c2808702fd3a9e6396cd4 xsa233.patch
-9f578606c3ffbbf3eb3dda82130336e155a502c2065841856e04f6935cf77b3da59d1ff7e6583c6425ccdefd673ad2b07ca3b3ad15aa6ca9765ac3a28d784f2c xsa234-4.6.patch
-2048fd831c4b5e36db7eee0a32c804714b8e5f02e49317d4c26b564932158f9d16688667f20c89bc61a1d91c0f9d32fff0d172bcff819b46d6e3f22097b2e7bb xsa235-4.7.patch
-a951c3d29a6b05b42021bd49419becff51123a245256659240a3af5701bbf51e7d3c1a79835a7cc9a5fdf7c1c6aa330a35a586cb56d69d847c256642f0fc8e55 xsa236-4.9.patch
-a7491ef0c0b770f16198a36c02cfee7f6c55a3aaad1fc05dd7532ce3bd2cc84981253a8703244f79c552d3a8e21531891f911e91ddc7035fe3ddf0928b5577b3 0001-x86-dont-allow-MSI-pIRQ-mapping-on-unowned-device.patch
-5f9ea6e66eb3a507497ad80956c690f6e45a6743f235aafc5a428df292a18b9d614915163726227851d32a22d9789450deff974fb861b9278504c6eb7b9b222e 0002-x86-enforce-proper-privilege-when-mapping-pIRQ-s.patch
-50607fca2e02eed322927e0288c77e7a6c541794fa2c70c78ada0c2fa762b5ad0f3b5108ecb9f01d8826f89dab492d56c502236c70234e6ba741e94a39356ea3 0003-x86-MSI-disallow-redundant-enabling.patch
-c29b4747ded7ac070f325ce00355a682e8ac742257d25d8b9493c6cbf7a24060c09fb311a3ba5443b733513103f1afe87b57edf44a7274be60b648f230a7d6a2 0004-x86-IRQ-conditionally-preserve-irq-pirq-mapping-on-error.patch
-0a367c1839f4cb19e3b2fd22b782d32fe97de8f991c99e4881162c06c9a608bebcd6d4bf6d44af9cd55fd45981125e13727bd368a646ed59d4e8b6a535c776e1 0005-x86-FLASK-fix-unmap-domain-IRQ-XSM-hook.patch
-b154c0925bbceab40e8f3b689e2d1fb321b42c685fdcb6bd29b0411ccd856731480a2fbb8025c633f9edf34cec938e5d8888cc71e8158212c078bb595d07a29d xsa238.patch
-8b09cd12c7adfef69a02a2965cda22ef6499fd42c8a84a20a6af231f422a6e8a0e597501c327532e1580c1067ee4bf35579e3cf98dee9302ed34ba87f74bf6d2 xsa239.patch
-66071fa4ff14ce604d2b67fe37bb06dbbabb38b86d51c14c5d0700b26f9049b202e8a17bdac4440ab7281625e8b12a47f23bdb0f30f93f66cac2152b0c7be5d7 0001-x86-limit-linear-page-table-use-to-a-single-level.patch
-58b6e1308e0ce8ab19a814f49e1472d332af8245328599e8446cbe3e192025f2ef017572bef02ab4b1e8b1074618a816eed81d1e456f3d5f9e12caa80e143409 0002-x86-mm-Disable-PV-linear-pagetables-by-default.patch
-7716b76365b96ee7f80ea3c74ab450e184935babd708ff98b90c2d27d318beb4f9ba3534595a9fa06ec0bce561f62922d0cac0f0e9bb4801dcdfb6d3b7b5ea9b xsa241-4.8.patch
-86aa763949ca36a36120a40eafbdf3a8e8bc04acd32ee6bc1e3ae90b189b86b9b166b81a9e0a4f86a7eb1fcc8723ae8ba6bd0f84fa9732e7e4e1ccea45d0b7c1 xsa242-4.9.patch
-825f45f1b7ea75ed821c48c7fb989562fc884835919ec498369eca903c92ee83d389469c171beea84d637342221ae17782bb1741c1bfcaf17a597f4db2b90b47 xsa243-4.7-1.patch
-8aaf0599259b1ff34171684467089da4a26af8fe67eedf22066955b34b2460c45abdf0f19a5a5e3dd3231b944674c62b9d3112ad7d765afc4bdbcdcfbad226e1 xsa243-2.patch
-8ab78d48c6fce2bb34416bba0a72904a7baa64de912143647ff984eb3c428f7f9c98d3a4e8de0e101ebb643d3c0bffd6043f5ce4c2b4f943da102164551e23e6 xsa244-4.7.patch
-082480ba79f4bf400d2b1a28a1a85e373a5681c02c0a470801d88b319cc5c21e739590fdf6468371edcc4745308128f0ce4f83ee4e94ba8e06bb7df211b80511 xsa246-4.7.patch
-4fb72d13a22fc55660247182012e3541aeee39258b70dc9faf2f47351a15234c57d1626ec2336c3c463b80a22d6fc54e593e8e7c12b70d2ee69f3d1a2f83b7c9 0001-p2m-Always-check-to-see-if-removing-a-p2m-entry-actu.patch
-f59cbda14300a62f3dc21c41609d7f13c99e46565863ecd887e5dff561d533c58cb07aff6d6b342c68e64d4b6f28643f86d090dc2b28c7092d995525cf8542a3 0002-p2m-Check-return-value-of-p2m_set_entry-when-decreas.patch
-3c891f57c6403212ec3c75166502b4f00eedbab1207eab45ccd3a53bfdb4e1ca9aa4c46a505faaf95607275909bedb23d067e07a2cfb1279ff9010f2883791e2 xsa248-4.8.patch
-05a2e954bab1877500eb5ed3a8c49edb27411ed3ec9dbfb2115b7804a3b03c6d45c9f08a7ed96ff2b586346f321142065a8c5a5d996468496b373637b6ee31b9 xsa249.patch
-b3030f09ddb4f9e4a356519c7b74d393e8db085278a1e616788c81d19988699a6efdd8568277c25514f3298ca92e5a09e3cd08b0a308a4d2ddb55374a8445657 xsa250.patch
-22ac1c788e5c4c6b03e4d6c04ef97819fda4d5fb22015aa3a79d2f9a7dbac050f0b516401c0392c237576087306a810155a2dcdc6918d3de46f1ceb06b0b8a25 xsa251-4.8.patch
-66004b80a85c94d8cc9de002cc2d9c2a80ab2bdb83e86a370975fb2728b6122e44210645f627e5a12c16de3623ed22f0c3e3624a0112a03bf6c8c95de034f93a xsa252-4.7.patch
-3e96ff8befc82fef31856938bcc1f16497a8fc7116f167b9556f595417b9fbb50769ca880ac2ab6f3d7394b683e3dac81a9fb620536a6dfb12748aacfa6b6202 xsa255-4.7-1.patch
-676458bfa452930834da429bc58a37a7f383f34bd9fb793064848143f9e92f16690a130f7f8933326970194c6796a193fb08e839f95e2b2ef0906d958a3216f1 xsa255-4.7-2.patch
-7b5e04e6b1c15b233c4d70ec55769731fe026fabc2f2d3f8e0101e2630c00cfdcaeb480991f5cd5375c5f85961d2951cfd87a53096010425825425c2469fcc4d 0001-x86-entry-Remove-support-for-partial-cpu_user_regs-f.patch
-4aee749ec6fe18ec119a90caae5034fbe649437be2ba9999b6f78c584454db93de57557029db28bca53c73d880dd10e828e429dc6b1e2ac5b47d8bb8caeb4729 0002-x86-mm-Always-set-_PAGE_ACCESSED-on-L4e-updates.patch
-4e3cae2026aec223e0e9d0200704c5964f4ea5e8b46e782d074233b44ac13b0908e6059a1e432376a90e84375a70b8babe1ace0f85fe7ad97a2b70de3caa4446 0003-x86-Meltdown-band-aid-against-malicious-64-bit-PV-gu.patch
-6338e85e772c0cc1e6c776152de99a9e28cf8c459d13950752d48bd1f1454d5eda8c78c7f31ffd07312a97904b8cf7d4c7f8f98f2f346a8cf4644f9bd4bf1725 0004-x86-allow-Meltdown-band-aid-to-be-disabled.patch
-10a1895ed8aedb1dcfd47fbf4d1bd0d77103e15537234c98a6571e6e6add91ac87e921ace1463e15bc583b83be6b557698a5ae65918278c1f6d15e92f24d476d xsa258-4.8.patch
-023b34174d059d1e22a070e9f48d0601b0a6e735acef5b74af7f8613d3267e66773e662790a8f2a5b7fda32b017bf654a059e4fecd94a82ff8502900e4b24b25 xsa259.patch
-ab3678feff91c15952c8e60bee34983ebc31734424734fc9a38f1e775e08cf314952777691c81efc5b2b01766a3b90f2091ab68897297576a19368b44f3f8b65 xsa260-1.patch
-cb31d6d295a9cb3a2265f1721e874609e5b0fcb6884e274a3f55f87493543bd8f758e55cc8928034bc1ce787d46fae251da48f83d2dbf893281bd2cecc69abe2 xsa260-2.patch
-079ba89096870aab3c6082c42e3aed6486111d32901914eff45c69403c7235334dab2278caa01f484671e65ab9acca0bca1bffbabcb783d81cddfc3d3cfccef2 xsa260-3.patch
-28438390039e78274ceda8fe8b828d1b0801b5db6b1251854c0070f6cd26977b92773758e3d24129ab6f41a216d2a9b818ef6dd6ea4b4e9e8dd44f639955afa3 xsa260-4.patch
-c76d4e3d30147fda07e63a935c93395c7b36b95b35e25cbca16569d5053be22e418e35f86381d0a6f2b0007884fcd15dd763e09061d62dfb5abe99ea77de9491 xsa261-4.7.patch
-05dc19a710290b49ba6dbd1181dd0e69fc9ad524870c008813d22c8b0c2d6af573034e7a44b0b75dd2d112989caf1447b0d6b42d007f1bffc3442828a88ac8f3 xsa262-4.9.patch
c3c46f232f0bd9f767b232af7e8ce910a6166b126bd5427bb8dc325aeb2c634b956de3fc225cab5af72649070c8205cc8e1cab7689fc266c204f525086f1a562 qemu-coroutine-gthread.patch
1936ab39a1867957fa640eb81c4070214ca4856a2743ba7e49c0cd017917071a9680d015f002c57fa7b9600dbadd29dcea5887f50e6c133305df2669a7a933f3 qemu-xen_paths.patch
f095ea373f36381491ad36f0662fb4f53665031973721256b23166e596318581da7cbb0146d0beb2446729adfdb321e01468e377793f6563a67d68b8b0f7ffe3 hotplug-vif-vtrill.patch
-71d0ebcda62259a1bf056807363015f2370f12daa5774f16150da42cba66bb5b65ec82f1f806fe147346560aa4d0e78bc5b5d8ae9f7e82d0aabae9d63fc876f6 rombios-no-pie.patch
a6455988477a29d856924651db5e14f96d835413b956278d2291cbb8e5877d7bf6f462890f607ecf1c7b4003997295d0ba7852e110fc20df3a3edf1845e778ba 0001-ipxe-dont-clobber-ebp.patch
76bd60768b296752ca11195bb03a57584686461da45255cb540977111a73c42b5b92362fd46d97bfd20487c96971dd5aed7eae7d8bf1aad7d5199adb875d4962 musl-support.patch
08cf7fac825dd3da5f33856abf6692da00d8928ab73050b3ae0a643ddb97c8ae323238a80152fd31595ac1c31678d559232264258c189e2c05ecaf33e295f13e musl-hvmloader-fix-stdint.patch
diff --git a/main/xen/rombios-no-pie.patch b/main/xen/rombios-no-pie.patch
deleted file mode 100644
index 3e98bb497d..0000000000
--- a/main/xen/rombios-no-pie.patch
+++ /dev/null
@@ -1,26 +0,0 @@
-diff --git a/tools/firmware/rombios/32bit/Makefile b/tools/firmware/rombios/32bit/Makefile
-index 396906c..07168eb 100644
---- a/tools/firmware/rombios/32bit/Makefile
-+++ b/tools/firmware/rombios/32bit/Makefile
-@@ -3,7 +3,7 @@ include $(XEN_ROOT)/tools/firmware/Rules.mk
-
- TARGET = 32bitbios_flat.h
-
--CFLAGS += $(CFLAGS_xeninclude) -I..
-+CFLAGS += $(CFLAGS_xeninclude) -I.. -fno-pie
-
- SUBDIRS = tcgbios
-
-diff --git a/tools/firmware/rombios/32bit/tcgbios/Makefile b/tools/firmware/rombios/32bit/tcgbios/Makefile
-index f6f2649..104496a 100644
---- a/tools/firmware/rombios/32bit/tcgbios/Makefile
-+++ b/tools/firmware/rombios/32bit/tcgbios/Makefile
-@@ -3,7 +3,7 @@ include $(XEN_ROOT)/tools/firmware/Rules.mk
-
- TARGET = tcgbiosext.o
-
--CFLAGS += $(CFLAGS_xeninclude) -I.. -I../..
-+CFLAGS += $(CFLAGS_xeninclude) -I.. -I../.. -fno-pie
-
- .PHONY: all
- all: $(TARGET)
diff --git a/main/xen/xsa215.patch b/main/xen/xsa215.patch
deleted file mode 100644
index f18a1cd01f..0000000000
--- a/main/xen/xsa215.patch
+++ /dev/null
@@ -1,37 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86: correct create_bounce_frame
-
-We may push up to 96 bytes on the guest (kernel) stack, so we should
-also cover as much in the early range check. Note that this is the
-simplest possible patch, which has the theoretical potential of
-breaking a guest: We only really push 96 bytes when invoking the
-failsafe callback, ordinary exceptions only have 56 or 64 bytes pushed
-(without / with error code respectively). There is, however, no PV OS
-known to place a kernel stack there.
-
-This is XSA-215.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/x86_64/entry.S
-+++ b/xen/arch/x86/x86_64/entry.S
-@@ -347,7 +347,7 @@ int80_slow_path:
- jmp handle_exception_saved
-
- /* CREATE A BASIC EXCEPTION FRAME ON GUEST OS STACK: */
--/* { RCX, R11, [DS-GS,] [CR2,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
-+/* { RCX, R11, [DS-GS,] [ERRCODE,] RIP, CS, RFLAGS, RSP, SS } */
- /* %rdx: trap_bounce, %rbx: struct vcpu */
- /* On return only %rbx and %rdx are guaranteed non-clobbered. */
- create_bounce_frame:
-@@ -367,7 +367,7 @@ create_bounce_frame:
- 2: andq $~0xf,%rsi # Stack frames are 16-byte aligned.
- movq $HYPERVISOR_VIRT_START,%rax
- cmpq %rax,%rsi
-- movq $HYPERVISOR_VIRT_END+60,%rax
-+ movq $HYPERVISOR_VIRT_END+12*8,%rax
- sbb %ecx,%ecx # In +ve address space? Then okay.
- cmpq %rax,%rsi
- adc %ecx,%ecx # Above Xen private area? Then okay.
diff --git a/main/xen/xsa216-qemuu-4.7.patch b/main/xen/xsa216-qemuu-4.7.patch
deleted file mode 100644
index 9bf66fab8f..0000000000
--- a/main/xen/xsa216-qemuu-4.7.patch
+++ /dev/null
@@ -1,111 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: xen/disk: don't leak stack data via response ring
-
-Rather than constructing a local structure instance on the stack, fill
-the fields directly on the shared ring, just like other (Linux)
-backends do. Build on the fact that all response structure flavors are
-actually identical (the old code did make this assumption too).
-
-This is XSA-216.
-
-Reported-by: Anthony Perard <anthony.perard@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-Acked-by: Anthony PERARD <anthony.perard@citrix.com>
-
---- a/hw/block/xen_blkif.h
-+++ b/hw/block/xen_blkif.h
-@@ -12,9 +12,6 @@
- struct blkif_common_request {
- char dummy;
- };
--struct blkif_common_response {
-- char dummy;
--};
-
- /* i386 protocol version */
- #pragma pack(push, 4)
-@@ -26,13 +23,7 @@ struct blkif_x86_32_request {
- blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
- struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
- };
--struct blkif_x86_32_response {
-- uint64_t id; /* copied from request */
-- uint8_t operation; /* copied from request */
-- int16_t status; /* BLKIF_RSP_??? */
--};
- typedef struct blkif_x86_32_request blkif_x86_32_request_t;
--typedef struct blkif_x86_32_response blkif_x86_32_response_t;
- #pragma pack(pop)
-
- /* x86_64 protocol version */
-@@ -44,17 +35,14 @@ struct blkif_x86_64_request {
- blkif_sector_t sector_number;/* start sector idx on disk (r/w only) */
- struct blkif_request_segment seg[BLKIF_MAX_SEGMENTS_PER_REQUEST];
- };
--struct blkif_x86_64_response {
-- uint64_t __attribute__((__aligned__(8))) id;
-- uint8_t operation; /* copied from request */
-- int16_t status; /* BLKIF_RSP_??? */
--};
- typedef struct blkif_x86_64_request blkif_x86_64_request_t;
--typedef struct blkif_x86_64_response blkif_x86_64_response_t;
-
--DEFINE_RING_TYPES(blkif_common, struct blkif_common_request, struct blkif_common_response);
--DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request, struct blkif_x86_32_response);
--DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request, struct blkif_x86_64_response);
-+DEFINE_RING_TYPES(blkif_common, struct blkif_common_request,
-+ struct blkif_response);
-+DEFINE_RING_TYPES(blkif_x86_32, struct blkif_x86_32_request,
-+ struct blkif_response QEMU_PACKED);
-+DEFINE_RING_TYPES(blkif_x86_64, struct blkif_x86_64_request,
-+ struct blkif_response);
-
- union blkif_back_rings {
- blkif_back_ring_t native;
---- a/hw/block/xen_disk.c
-+++ b/hw/block/xen_disk.c
-@@ -614,31 +614,30 @@ static int blk_send_response_one(struct
- struct XenBlkDev *blkdev = ioreq->blkdev;
- int send_notify = 0;
- int have_requests = 0;
-- blkif_response_t resp;
-- void *dst;
--
-- resp.id = ioreq->req.id;
-- resp.operation = ioreq->req.operation;
-- resp.status = ioreq->status;
-+ blkif_response_t *resp;
-
- /* Place on the response ring for the relevant domain. */
- switch (blkdev->protocol) {
- case BLKIF_PROTOCOL_NATIVE:
-- dst = RING_GET_RESPONSE(&blkdev->rings.native, blkdev->rings.native.rsp_prod_pvt);
-+ resp = RING_GET_RESPONSE(&blkdev->rings.native,
-+ blkdev->rings.native.rsp_prod_pvt);
- break;
- case BLKIF_PROTOCOL_X86_32:
-- dst = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
-- blkdev->rings.x86_32_part.rsp_prod_pvt);
-+ resp = RING_GET_RESPONSE(&blkdev->rings.x86_32_part,
-+ blkdev->rings.x86_32_part.rsp_prod_pvt);
- break;
- case BLKIF_PROTOCOL_X86_64:
-- dst = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
-- blkdev->rings.x86_64_part.rsp_prod_pvt);
-+ resp = RING_GET_RESPONSE(&blkdev->rings.x86_64_part,
-+ blkdev->rings.x86_64_part.rsp_prod_pvt);
- break;
- default:
-- dst = NULL;
- return 0;
- }
-- memcpy(dst, &resp, sizeof(resp));
-+
-+ resp->id = ioreq->req.id;
-+ resp->operation = ioreq->req.operation;
-+ resp->status = ioreq->status;
-+
- blkdev->rings.common.rsp_prod_pvt++;
-
- RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&blkdev->rings.common, send_notify);
diff --git a/main/xen/xsa217.patch b/main/xen/xsa217.patch
deleted file mode 100644
index 1d4eb01f23..0000000000
--- a/main/xen/xsa217.patch
+++ /dev/null
@@ -1,41 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/mm: disallow page stealing from HVM domains
-
-The operation's success can't be controlled by the guest, as the device
-model may have an active mapping of the page. If we nevertheless
-permitted this operation, we'd have to add further TLB flushing to
-prevent scenarios like
-
-"Domains A (HVM), B (PV), C (PV); B->target==A
- Steps:
- 1. B maps page X from A as writable
- 2. B unmaps page X without a TLB flush
- 3. A sends page X to C via GNTTABOP_transfer
- 4. C maps page X as pagetable (potentially causing a TLB flush in C,
- but not in B)
-
- At this point, X would be mapped as a pagetable in C while being
- writable through a stale TLB entry in B."
-
-A similar scenario could be constructed for A using XENMEM_exchange and
-some arbitrary PV domain C then having this page allocated.
-
-This is XSA-217.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Acked-by: George Dunlap <george.dunlap@citrix.com>
-Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
-
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -4449,6 +4449,9 @@ int steal_page(
- bool_t drop_dom_ref = 0;
- const struct domain *owner = dom_xen;
-
-+ if ( paging_mode_external(d) )
-+ return -1;
-+
- spin_lock(&d->page_alloc_lock);
-
- if ( is_xen_heap_page(page) || ((owner = page_get_owner(page)) != d) )
diff --git a/main/xen/xsa226-4.7.patch b/main/xen/xsa226-4.7.patch
deleted file mode 100644
index e45e73a864..0000000000
--- a/main/xen/xsa226-4.7.patch
+++ /dev/null
@@ -1,133 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: grant_table: Default to v1, and disallow transitive grants
-
-The reference counting and locking discipline for transitive grants is broken.
-Their use is therefore declared out of security support.
-
-This is XSA-226.
-
-Transitive grants are expected to be unconditionally available with grant
-table v2. Hiding transitive grants alone is an ABI breakage for the guest.
-Modern versions of Linux and the Windows PV drivers use grant table v1, but
-older versions did use v2.
-
-In principle, disabling gnttab v2 entirely is the safer way to cause guests to
-avoid using transitive grants. However, some older guests which defaulted to
-using gnttab v2 don't tolerate falling back from v2 to v1 over migrate.
-
-This patch introduces a new command line option to control grant table
-behaviour. One suboption allows a choice of the maximum grant table version
-Xen will allow the guest to use, and defaults to v2. A different suboption
-independently controls whether transitive grants can be used.
-
-The default case is:
-
- gnttab=max_ver:2
-
-To disable gnttab v2 entirely, use:
-
- gnttab=max_ver:1
-
-To allow gnttab v2 and transitive grants, use:
-
- gnttab=max_ver:2,transitive
-
-Reported-by: Jan Beulich <jbeulich@suse.com>
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-diff --git a/docs/misc/xen-command-line.markdown b/docs/misc/xen-command-line.markdown
-index 73f5265..b792abf 100644
---- a/docs/misc/xen-command-line.markdown
-+++ b/docs/misc/xen-command-line.markdown
-@@ -758,6 +758,22 @@ Controls EPT related features.
-
- Specify which console gdbstub should use. See **console**.
-
-+### gnttab
-+> `= List of [ max_ver:<integer>, transitive ]`
-+
-+> Default: `gnttab=max_ver:2,no-transitive`
-+
-+Control various aspects of the grant table behaviour available to guests.
-+
-+* `max_ver` Select the maximum grant table version to offer to guests. Valid
-+version are 1 and 2.
-+* `transitive` Permit or disallow the use of transitive grants. Note that the
-+use of grant table v2 without transitive grants is an ABI breakage from the
-+guests point of view.
-+
-+*Warning:*
-+Due to XSA-226, the use of transitive grants is outside of security support.
-+
- ### gnttab\_max\_frames
- > `= <integer>`
-
-diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
-index f06b664..109c552 100644
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -50,6 +50,42 @@ integer_param("gnttab_max_nr_frames", max_nr_grant_frames);
- unsigned int __read_mostly max_grant_frames;
- integer_param("gnttab_max_frames", max_grant_frames);
-
-+static unsigned int __read_mostly opt_gnttab_max_version = 2;
-+static bool_t __read_mostly opt_transitive_grants;
-+
-+static void __init parse_gnttab(char *s)
-+{
-+ char *ss;
-+
-+ do {
-+ ss = strchr(s, ',');
-+ if ( ss )
-+ *ss = '\0';
-+
-+ if ( !strncmp(s, "max_ver:", 8) )
-+ {
-+ long ver = simple_strtol(s + 8, NULL, 10);
-+
-+ if ( ver >= 1 && ver <= 2 )
-+ opt_gnttab_max_version = ver;
-+ }
-+ else
-+ {
-+ bool_t val = !!strncmp(s, "no-", 3);
-+
-+ if ( !val )
-+ s += 3;
-+
-+ if ( !strcmp(s, "transitive") )
-+ opt_transitive_grants = val;
-+ }
-+
-+ s = ss + 1;
-+ } while ( ss );
-+}
-+
-+custom_param("gnttab", parse_gnttab);
-+
- /* The maximum number of grant mappings is defined as a multiplier of the
- * maximum number of grant table entries. This defines the multiplier used.
- * Pretty arbitrary. [POLICY]
-@@ -2188,6 +2224,10 @@ __acquire_grant_for_copy(
- }
- else if ( (shah->flags & GTF_type_mask) == GTF_transitive )
- {
-+ if ( !opt_transitive_grants )
-+ PIN_FAIL(unlock_out_clear, GNTST_general_error,
-+ "transitive grant disallowed by policy\n");
-+
- if ( !allow_transitive )
- PIN_FAIL(unlock_out_clear, GNTST_general_error,
- "transitive grant when transitivity not allowed\n");
-@@ -3156,7 +3196,10 @@ do_grant_table_op(
- }
- case GNTTABOP_set_version:
- {
-- rc = gnttab_set_version(guest_handle_cast(uop, gnttab_set_version_t));
-+ if ( opt_gnttab_max_version == 1 )
-+ rc = -ENOSYS; /* Behave as before set_version was introduced. */
-+ else
-+ rc = gnttab_set_version(guest_handle_cast(uop, gnttab_set_version_t));
- break;
- }
- case GNTTABOP_get_status_frames:
diff --git a/main/xen/xsa227.patch b/main/xen/xsa227.patch
deleted file mode 100644
index 86aa41e2d4..0000000000
--- a/main/xen/xsa227.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From fa7268b94f8a0a7792ee12d5b8e23a60e52a3a84 Mon Sep 17 00:00:00 2001
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Date: Tue, 20 Jun 2017 19:18:54 +0100
-Subject: [PATCH] x86/grant: Disallow misaligned PTEs
-
-Pagetable entries must be aligned to function correctly. Disallow attempts
-from the guest to have a grant PTE created at a misaligned address, which
-would result in corruption of the L1 table with largely-guest-controlled
-values.
-
-This is XSA-227
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
----
- xen/arch/x86/mm.c | 13 +++++++++++++
- 1 file changed, 13 insertions(+)
-
-diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
-index 97b3b4b..00f517a 100644
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -3763,6 +3763,9 @@ static int create_grant_pte_mapping(
- l1_pgentry_t ol1e;
- struct domain *d = v->domain;
-
-+ if ( !IS_ALIGNED(pte_addr, sizeof(nl1e)) )
-+ return GNTST_general_error;
-+
- adjust_guest_l1e(nl1e, d);
-
- gmfn = pte_addr >> PAGE_SHIFT;
-@@ -3819,6 +3822,16 @@ static int destroy_grant_pte_mapping(
- struct page_info *page;
- l1_pgentry_t ol1e;
-
-+ /*
-+ * addr comes from Xen's active_entry tracking so isn't guest controlled,
-+ * but it had still better be PTE-aligned.
-+ */
-+ if ( !IS_ALIGNED(addr, sizeof(ol1e)) )
-+ {
-+ ASSERT_UNREACHABLE();
-+ return GNTST_general_error;
-+ }
-+
- gmfn = addr >> PAGE_SHIFT;
- page = get_page_from_gfn(d, gmfn, NULL, P2M_ALLOC);
-
---
-2.1.4
-
diff --git a/main/xen/xsa228-4.8.patch b/main/xen/xsa228-4.8.patch
deleted file mode 100644
index 57e6661cdb..0000000000
--- a/main/xen/xsa228-4.8.patch
+++ /dev/null
@@ -1,198 +0,0 @@
-From cb91f4c43bd4158daa6561c73921a6455176f278 Mon Sep 17 00:00:00 2001
-From: Jan Beulich <jbeulich@suse.com>
-Date: Mon, 31 Jul 2017 15:17:56 +0100
-Subject: [PATCH] gnttab: split maptrack lock to make it fulfill its purpose
- again
-
-The way the lock is currently being used in get_maptrack_handle(), it
-protects only the maptrack limit: The function acts on current's list
-only, so races on list accesses are impossible even without the lock.
-
-Otoh list access races are possible between __get_maptrack_handle() and
-put_maptrack_handle(), due to the invocation of the former for other
-than current from steal_maptrack_handle(). Introduce a per-vCPU lock
-for list accesses to become race free again. This lock will be
-uncontended except when it becomes necessary to take the steal path,
-i.e. in the common case there should be no meaningful performance
-impact.
-
-When in get_maptrack_handle adds a stolen entry to a fresh, empty,
-freelist, we think that there is probably no concurrency. However,
-this is not a fast path and adding the locking there makes the code
-clearly correct.
-
-Also, while we are here: the stolen maptrack_entry's tail pointer was
-not properly set. Set it.
-
-This is XSA-228.
-
-Reported-by: Ian Jackson <ian.jackson@eu.citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Signed-off-by: Ian Jackson <Ian.Jackson@eu.citrix.com>
----
- docs/misc/grant-tables.txt | 7 ++++++-
- xen/common/grant_table.c | 30 ++++++++++++++++++++++++------
- xen/include/xen/grant_table.h | 2 +-
- xen/include/xen/sched.h | 1 +
- 4 files changed, 32 insertions(+), 8 deletions(-)
-
-diff --git a/docs/misc/grant-tables.txt b/docs/misc/grant-tables.txt
-index 417ce2d..64da5cf 100644
---- a/docs/misc/grant-tables.txt
-+++ b/docs/misc/grant-tables.txt
-@@ -87,7 +87,8 @@ is complete.
- inconsistent grant table state such as current
- version, partially initialized active table pages,
- etc.
-- grant_table->maptrack_lock : spinlock used to protect the maptrack free list
-+ grant_table->maptrack_lock : spinlock used to protect the maptrack limit
-+ v->maptrack_freelist_lock : spinlock used to protect the maptrack free list
- active_grant_entry->lock : spinlock used to serialize modifications to
- active entries
-
-@@ -102,6 +103,10 @@ is complete.
- The maptrack free list is protected by its own spinlock. The maptrack
- lock may be locked while holding the grant table lock.
-
-+ The maptrack_freelist_lock is an innermost lock. It may be locked
-+ while holding other locks, but no other locks may be acquired within
-+ it.
-+
- Active entries are obtained by calling active_entry_acquire(gt, ref).
- This function returns a pointer to the active entry after locking its
- spinlock. The caller must hold the grant table read lock before
-diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
-index f9654f1..593121c 100644
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -304,11 +304,16 @@ __get_maptrack_handle(
- {
- unsigned int head, next, prev_head;
-
-+ spin_lock(&v->maptrack_freelist_lock);
-+
- do {
- /* No maptrack pages allocated for this VCPU yet? */
- head = read_atomic(&v->maptrack_head);
- if ( unlikely(head == MAPTRACK_TAIL) )
-+ {
-+ spin_unlock(&v->maptrack_freelist_lock);
- return -1;
-+ }
-
- /*
- * Always keep one entry in the free list to make it easier to
-@@ -316,12 +321,17 @@ __get_maptrack_handle(
- */
- next = read_atomic(&maptrack_entry(t, head).ref);
- if ( unlikely(next == MAPTRACK_TAIL) )
-+ {
-+ spin_unlock(&v->maptrack_freelist_lock);
- return -1;
-+ }
-
- prev_head = head;
- head = cmpxchg(&v->maptrack_head, prev_head, next);
- } while ( head != prev_head );
-
-+ spin_unlock(&v->maptrack_freelist_lock);
-+
- return head;
- }
-
-@@ -380,6 +390,8 @@ put_maptrack_handle(
- /* 2. Add entry to the tail of the list on the original VCPU. */
- v = currd->vcpu[maptrack_entry(t, handle).vcpu];
-
-+ spin_lock(&v->maptrack_freelist_lock);
-+
- cur_tail = read_atomic(&v->maptrack_tail);
- do {
- prev_tail = cur_tail;
-@@ -388,6 +400,8 @@ put_maptrack_handle(
-
- /* 3. Update the old tail entry to point to the new entry. */
- write_atomic(&maptrack_entry(t, prev_tail).ref, handle);
-+
-+ spin_unlock(&v->maptrack_freelist_lock);
- }
-
- static inline int
-@@ -411,10 +425,6 @@ get_maptrack_handle(
- */
- if ( nr_maptrack_frames(lgt) >= max_maptrack_frames )
- {
-- /*
-- * Can drop the lock since no other VCPU can be adding a new
-- * frame once they've run out.
-- */
- spin_unlock(&lgt->maptrack_lock);
-
- /*
-@@ -426,8 +436,12 @@ get_maptrack_handle(
- handle = steal_maptrack_handle(lgt, curr);
- if ( handle == -1 )
- return -1;
-+ spin_lock(&curr->maptrack_freelist_lock);
-+ maptrack_entry(lgt, handle).ref = MAPTRACK_TAIL;
- curr->maptrack_tail = handle;
-- write_atomic(&curr->maptrack_head, handle);
-+ if ( curr->maptrack_head == MAPTRACK_TAIL )
-+ write_atomic(&curr->maptrack_head, handle);
-+ spin_unlock(&curr->maptrack_freelist_lock);
- }
- return steal_maptrack_handle(lgt, curr);
- }
-@@ -460,12 +474,15 @@ get_maptrack_handle(
- smp_wmb();
- lgt->maptrack_limit += MAPTRACK_PER_PAGE;
-
-+ spin_unlock(&lgt->maptrack_lock);
-+ spin_lock(&curr->maptrack_freelist_lock);
-+
- do {
- new_mt[i - 1].ref = read_atomic(&curr->maptrack_head);
- head = cmpxchg(&curr->maptrack_head, new_mt[i - 1].ref, handle + 1);
- } while ( head != new_mt[i - 1].ref );
-
-- spin_unlock(&lgt->maptrack_lock);
-+ spin_unlock(&curr->maptrack_freelist_lock);
-
- return handle;
- }
-@@ -3474,6 +3491,7 @@ grant_table_destroy(
-
- void grant_table_init_vcpu(struct vcpu *v)
- {
-+ spin_lock_init(&v->maptrack_freelist_lock);
- v->maptrack_head = MAPTRACK_TAIL;
- v->maptrack_tail = MAPTRACK_TAIL;
- }
-diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
-index 4e77899..100f2b3 100644
---- a/xen/include/xen/grant_table.h
-+++ b/xen/include/xen/grant_table.h
-@@ -78,7 +78,7 @@ struct grant_table {
- /* Mapping tracking table per vcpu. */
- struct grant_mapping **maptrack;
- unsigned int maptrack_limit;
-- /* Lock protecting the maptrack page list, head, and limit */
-+ /* Lock protecting the maptrack limit */
- spinlock_t maptrack_lock;
- /* The defined versions are 1 and 2. Set to 0 if we don't know
- what version to use yet. */
-diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
-index 1fbda87..ff0f38f 100644
---- a/xen/include/xen/sched.h
-+++ b/xen/include/xen/sched.h
-@@ -223,6 +223,7 @@ struct vcpu
- int controller_pause_count;
-
- /* Maptrack */
-+ spinlock_t maptrack_freelist_lock;
- unsigned int maptrack_head;
- unsigned int maptrack_tail;
-
---
-2.1.4
-
diff --git a/main/xen/xsa228.patch b/main/xen/xsa228.patch
deleted file mode 100644
index 65add3a588..0000000000
--- a/main/xen/xsa228.patch
+++ /dev/null
@@ -1,198 +0,0 @@
-From 9a52c78eb4ff7836bf7ac9ecd918b289cead1f3f Mon Sep 17 00:00:00 2001
-From: Jan Beulich <jbeulich@suse.com>
-Date: Mon, 31 Jul 2017 15:17:56 +0100
-Subject: [PATCH] gnttab: split maptrack lock to make it fulfill its purpose
- again
-
-The way the lock is currently being used in get_maptrack_handle(), it
-protects only the maptrack limit: The function acts on current's list
-only, so races on list accesses are impossible even without the lock.
-
-Otoh list access races are possible between __get_maptrack_handle() and
-put_maptrack_handle(), due to the invocation of the former for other
-than current from steal_maptrack_handle(). Introduce a per-vCPU lock
-for list accesses to become race free again. This lock will be
-uncontended except when it becomes necessary to take the steal path,
-i.e. in the common case there should be no meaningful performance
-impact.
-
-When in get_maptrack_handle adds a stolen entry to a fresh, empty,
-freelist, we think that there is probably no concurrency. However,
-this is not a fast path and adding the locking there makes the code
-clearly correct.
-
-Also, while we are here: the stolen maptrack_entry's tail pointer was
-not properly set. Set it.
-
-This is XSA-228.
-
-Reported-by: Ian Jackson <ian.jackson@eu.citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Signed-off-by: Ian Jackson <Ian.Jackson@eu.citrix.com>
----
- docs/misc/grant-tables.txt | 7 ++++++-
- xen/common/grant_table.c | 30 ++++++++++++++++++++++++------
- xen/include/xen/grant_table.h | 2 +-
- xen/include/xen/sched.h | 1 +
- 4 files changed, 32 insertions(+), 8 deletions(-)
-
-diff --git a/docs/misc/grant-tables.txt b/docs/misc/grant-tables.txt
-index 417ce2d..64da5cf 100644
---- a/docs/misc/grant-tables.txt
-+++ b/docs/misc/grant-tables.txt
-@@ -87,7 +87,8 @@ is complete.
- inconsistent grant table state such as current
- version, partially initialized active table pages,
- etc.
-- grant_table->maptrack_lock : spinlock used to protect the maptrack free list
-+ grant_table->maptrack_lock : spinlock used to protect the maptrack limit
-+ v->maptrack_freelist_lock : spinlock used to protect the maptrack free list
- active_grant_entry->lock : spinlock used to serialize modifications to
- active entries
-
-@@ -102,6 +103,10 @@ is complete.
- The maptrack free list is protected by its own spinlock. The maptrack
- lock may be locked while holding the grant table lock.
-
-+ The maptrack_freelist_lock is an innermost lock. It may be locked
-+ while holding other locks, but no other locks may be acquired within
-+ it.
-+
- Active entries are obtained by calling active_entry_acquire(gt, ref).
- This function returns a pointer to the active entry after locking its
- spinlock. The caller must hold the grant table read lock before
-diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
-index ae34547..ee33bd8 100644
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -304,11 +304,16 @@ __get_maptrack_handle(
- {
- unsigned int head, next, prev_head;
-
-+ spin_lock(&v->maptrack_freelist_lock);
-+
- do {
- /* No maptrack pages allocated for this VCPU yet? */
- head = read_atomic(&v->maptrack_head);
- if ( unlikely(head == MAPTRACK_TAIL) )
-+ {
-+ spin_unlock(&v->maptrack_freelist_lock);
- return -1;
-+ }
-
- /*
- * Always keep one entry in the free list to make it easier to
-@@ -316,12 +321,17 @@ __get_maptrack_handle(
- */
- next = read_atomic(&maptrack_entry(t, head).ref);
- if ( unlikely(next == MAPTRACK_TAIL) )
-+ {
-+ spin_unlock(&v->maptrack_freelist_lock);
- return -1;
-+ }
-
- prev_head = head;
- head = cmpxchg(&v->maptrack_head, prev_head, next);
- } while ( head != prev_head );
-
-+ spin_unlock(&v->maptrack_freelist_lock);
-+
- return head;
- }
-
-@@ -380,6 +390,8 @@ put_maptrack_handle(
- /* 2. Add entry to the tail of the list on the original VCPU. */
- v = currd->vcpu[maptrack_entry(t, handle).vcpu];
-
-+ spin_lock(&v->maptrack_freelist_lock);
-+
- cur_tail = read_atomic(&v->maptrack_tail);
- do {
- prev_tail = cur_tail;
-@@ -388,6 +400,8 @@ put_maptrack_handle(
-
- /* 3. Update the old tail entry to point to the new entry. */
- write_atomic(&maptrack_entry(t, prev_tail).ref, handle);
-+
-+ spin_unlock(&v->maptrack_freelist_lock);
- }
-
- static inline int
-@@ -411,10 +425,6 @@ get_maptrack_handle(
- */
- if ( nr_maptrack_frames(lgt) >= max_maptrack_frames )
- {
-- /*
-- * Can drop the lock since no other VCPU can be adding a new
-- * frame once they've run out.
-- */
- spin_unlock(&lgt->maptrack_lock);
-
- /*
-@@ -426,8 +436,12 @@ get_maptrack_handle(
- handle = steal_maptrack_handle(lgt, curr);
- if ( handle == -1 )
- return -1;
-+ spin_lock(&curr->maptrack_freelist_lock);
-+ maptrack_entry(lgt, handle).ref = MAPTRACK_TAIL;
- curr->maptrack_tail = handle;
-- write_atomic(&curr->maptrack_head, handle);
-+ if ( curr->maptrack_head == MAPTRACK_TAIL )
-+ write_atomic(&curr->maptrack_head, handle);
-+ spin_unlock(&curr->maptrack_freelist_lock);
- }
- return steal_maptrack_handle(lgt, curr);
- }
-@@ -460,12 +474,15 @@ get_maptrack_handle(
- smp_wmb();
- lgt->maptrack_limit += MAPTRACK_PER_PAGE;
-
-+ spin_unlock(&lgt->maptrack_lock);
-+ spin_lock(&curr->maptrack_freelist_lock);
-+
- do {
- new_mt[i - 1].ref = read_atomic(&curr->maptrack_head);
- head = cmpxchg(&curr->maptrack_head, new_mt[i - 1].ref, handle + 1);
- } while ( head != new_mt[i - 1].ref );
-
-- spin_unlock(&lgt->maptrack_lock);
-+ spin_unlock(&curr->maptrack_freelist_lock);
-
- return handle;
- }
-@@ -3475,6 +3492,7 @@ grant_table_destroy(
-
- void grant_table_init_vcpu(struct vcpu *v)
- {
-+ spin_lock_init(&v->maptrack_freelist_lock);
- v->maptrack_head = MAPTRACK_TAIL;
- v->maptrack_tail = MAPTRACK_TAIL;
- }
-diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
-index 4e77899..100f2b3 100644
---- a/xen/include/xen/grant_table.h
-+++ b/xen/include/xen/grant_table.h
-@@ -78,7 +78,7 @@ struct grant_table {
- /* Mapping tracking table per vcpu. */
- struct grant_mapping **maptrack;
- unsigned int maptrack_limit;
-- /* Lock protecting the maptrack page list, head, and limit */
-+ /* Lock protecting the maptrack limit */
- spinlock_t maptrack_lock;
- /* The defined versions are 1 and 2. Set to 0 if we don't know
- what version to use yet. */
-diff --git a/xen/include/xen/sched.h b/xen/include/xen/sched.h
-index 6673b27..8690f29 100644
---- a/xen/include/xen/sched.h
-+++ b/xen/include/xen/sched.h
-@@ -230,6 +230,7 @@ struct vcpu
- int controller_pause_count;
-
- /* Grant table map tracking. */
-+ spinlock_t maptrack_freelist_lock;
- unsigned int maptrack_head;
- unsigned int maptrack_tail;
-
---
-2.1.4
-
diff --git a/main/xen/xsa229.patch b/main/xen/xsa229.patch
deleted file mode 100644
index 47e9538436..0000000000
--- a/main/xen/xsa229.patch
+++ /dev/null
@@ -1,59 +0,0 @@
-From 84882133e793299f685991e20a9631acfd0a5608 Mon Sep 17 00:00:00 2001
-From: Roger Pau Monne <roger.pau@citrix.com>
-Date: Tue, 18 Jul 2017 15:01:00 +0100
-Subject: xen: fix bio vec merging
-MIME-Version: 1.0
-Content-Type: text/plain; charset=UTF-8
-Content-Transfer-Encoding: 8bit
-
-The current test for bio vec merging is not fully accurate and can be
-tricked into merging bios when certain grant combinations are used.
-The result of these malicious bio merges is a bio that extends past
-the memory page used by any of the originating bios.
-
-Take into account the following scenario, where a guest creates two
-grant references that point to the same mfn, ie: grant 1 -> mfn A,
-grant 2 -> mfn A.
-
-These references are then used in a PV block request, and mapped by
-the backend domain, thus obtaining two different pfns that point to
-the same mfn, pfn B -> mfn A, pfn C -> mfn A.
-
-If those grants happen to be used in two consecutive sectors of a disk
-IO operation becoming two different bios in the backend domain, the
-checks in xen_biovec_phys_mergeable will succeed, because bfn1 == bfn2
-(they both point to the same mfn). However due to the bio merging,
-the backend domain will end up with a bio that expands past mfn A into
-mfn A + 1.
-
-Fix this by making sure the check in xen_biovec_phys_mergeable takes
-into account the offset and the length of the bio, this basically
-replicates whats done in __BIOVEC_PHYS_MERGEABLE using mfns (bus
-addresses). While there also remove the usage of
-__BIOVEC_PHYS_MERGEABLE, since that's already checked by the callers
-of xen_biovec_phys_mergeable.
-
-Reported-by: "Jan H. Schönherr" <jschoenh@amazon.de>
-Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
-Reviewed-by: Juergen Gross <jgross@suse.com>
----
- drivers/xen/biomerge.c | 3 +--
- 1 file changed, 1 insertion(+), 2 deletions(-)
-
-diff --git a/drivers/xen/biomerge.c b/drivers/xen/biomerge.c
-index 4da69dbf7dca..1bdd02a6d6ac 100644
---- a/drivers/xen/biomerge.c
-+++ b/drivers/xen/biomerge.c
-@@ -10,8 +10,7 @@ bool xen_biovec_phys_mergeable(const struct bio_vec *vec1,
- unsigned long bfn1 = pfn_to_bfn(page_to_pfn(vec1->bv_page));
- unsigned long bfn2 = pfn_to_bfn(page_to_pfn(vec2->bv_page));
-
-- return __BIOVEC_PHYS_MERGEABLE(vec1, vec2) &&
-- ((bfn1 == bfn2) || ((bfn1+1) == bfn2));
-+ return bfn1 + PFN_DOWN(vec1->bv_offset + vec1->bv_len) == bfn2;
- #else
- /*
- * XXX: Add support for merging bio_vec when using different page
---
-2.11.0 (Apple Git-81)
-
diff --git a/main/xen/xsa230.patch b/main/xen/xsa230.patch
deleted file mode 100644
index c3b50c8aaa..0000000000
--- a/main/xen/xsa230.patch
+++ /dev/null
@@ -1,38 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: gnttab: correct pin status fixup for copy
-
-Regardless of copy operations only setting GNTPIN_hst*, GNTPIN_dev*
-also need to be taken into account when deciding whether to clear
-_GTF_{read,writ}ing. At least for consistency with code elsewhere the
-read part better doesn't use any mask at all.
-
-This is XSA-230.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
-index ae34547..9c9d33c 100644
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -2107,10 +2107,10 @@ __release_grant_for_copy(
- static void __fixup_status_for_copy_pin(const struct active_grant_entry *act,
- uint16_t *status)
- {
-- if ( !(act->pin & GNTPIN_hstw_mask) )
-+ if ( !(act->pin & (GNTPIN_hstw_mask | GNTPIN_devw_mask)) )
- gnttab_clear_flag(_GTF_writing, status);
-
-- if ( !(act->pin & GNTPIN_hstr_mask) )
-+ if ( !act->pin )
- gnttab_clear_flag(_GTF_reading, status);
- }
-
-@@ -2318,7 +2318,7 @@ __acquire_grant_for_copy(
-
- unlock_out_clear:
- if ( !(readonly) &&
-- !(act->pin & GNTPIN_hstw_mask) )
-+ !(act->pin & (GNTPIN_hstw_mask | GNTPIN_devw_mask)) )
- gnttab_clear_flag(_GTF_writing, status);
-
- if ( !act->pin )
diff --git a/main/xen/xsa231-4.7.patch b/main/xen/xsa231-4.7.patch
deleted file mode 100644
index 4474949a37..0000000000
--- a/main/xen/xsa231-4.7.patch
+++ /dev/null
@@ -1,108 +0,0 @@
-From: George Dunlap <george.dunlap@citrix.com>
-Subject: xen/mm: make sure node is less than MAX_NUMNODES
-
-The output of MEMF_get_node(memflags) can be as large as nodeid_t can
-hold (currently 255). This is then used as an index to arrays of size
-MAX_NUMNODE, which is 64 on x86 and 1 on ARM, can be passed in by an
-untrusted guest (via memory_exchange and increase_reservation) and is
-not currently bounds-checked.
-
-Check the value in page_alloc.c before using it, and also check the
-value in the hypercall call sites and return -EINVAL if appropriate.
-Don't permit domains other than the hardware or control domain to
-allocate node-constrained memory.
-
-This is XSA-231.
-
-Reported-by: Matthew Daley <mattd@bugfuzz.com>
-Signed-off-by: George Dunlap <george.dunlap@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/common/memory.c
-+++ b/xen/common/memory.c
-@@ -390,6 +390,31 @@ static void decrease_reservation(struct
- a->nr_done = i;
- }
-
-+static bool_t propagate_node(unsigned int xmf, unsigned int *memflags)
-+{
-+ const struct domain *currd = current->domain;
-+
-+ BUILD_BUG_ON(XENMEMF_get_node(0) != NUMA_NO_NODE);
-+ BUILD_BUG_ON(MEMF_get_node(0) != NUMA_NO_NODE);
-+
-+ if ( XENMEMF_get_node(xmf) == NUMA_NO_NODE )
-+ return 1;
-+
-+ if ( is_hardware_domain(currd) || is_control_domain(currd) )
-+ {
-+ if ( XENMEMF_get_node(xmf) >= MAX_NUMNODES )
-+ return 0;
-+
-+ *memflags |= MEMF_node(XENMEMF_get_node(xmf));
-+ if ( xmf & XENMEMF_exact_node_request )
-+ *memflags |= MEMF_exact_node;
-+ }
-+ else if ( xmf & XENMEMF_exact_node_request )
-+ return 0;
-+
-+ return 1;
-+}
-+
- static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
- {
- struct xen_memory_exchange exch;
-@@ -462,6 +487,12 @@ static long memory_exchange(XEN_GUEST_HA
- }
- }
-
-+ if ( unlikely(!propagate_node(exch.out.mem_flags, &memflags)) )
-+ {
-+ rc = -EINVAL;
-+ goto fail_early;
-+ }
-+
- d = rcu_lock_domain_by_any_id(exch.in.domid);
- if ( d == NULL )
- {
-@@ -480,7 +511,6 @@ static long memory_exchange(XEN_GUEST_HA
- d,
- XENMEMF_get_address_bits(exch.out.mem_flags) ? :
- (BITS_PER_LONG+PAGE_SHIFT)));
-- memflags |= MEMF_node(XENMEMF_get_node(exch.out.mem_flags));
-
- for ( i = (exch.nr_exchanged >> in_chunk_order);
- i < (exch.in.nr_extents >> in_chunk_order);
-@@ -834,12 +864,8 @@ static int construct_memop_from_reservat
- }
- read_unlock(&d->vnuma_rwlock);
- }
-- else
-- {
-- a->memflags |= MEMF_node(XENMEMF_get_node(r->mem_flags));
-- if ( r->mem_flags & XENMEMF_exact_node_request )
-- a->memflags |= MEMF_exact_node;
-- }
-+ else if ( unlikely(!propagate_node(r->mem_flags, &a->memflags)) )
-+ return -EINVAL;
-
- return 0;
- }
---- a/xen/common/page_alloc.c
-+++ b/xen/common/page_alloc.c
-@@ -711,9 +711,13 @@ static struct page_info *alloc_heap_page
- if ( node >= MAX_NUMNODES )
- node = cpu_to_node(smp_processor_id());
- }
-+ else if ( unlikely(node >= MAX_NUMNODES) )
-+ {
-+ ASSERT_UNREACHABLE();
-+ return NULL;
-+ }
- first_node = node;
-
-- ASSERT(node < MAX_NUMNODES);
- ASSERT(zone_lo <= zone_hi);
- ASSERT(zone_hi < NR_ZONES);
-
diff --git a/main/xen/xsa232.patch b/main/xen/xsa232.patch
deleted file mode 100644
index 9e5f35c7d6..0000000000
--- a/main/xen/xsa232.patch
+++ /dev/null
@@ -1,23 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: grant_table: fix GNTTABOP_cache_flush handling
-
-Don't fall over a NULL grant_table pointer when the owner of the domain
-is a system domain (DOMID_{XEN,IO} etc).
-
-This is XSA-232.
-
-Reported-by: Matthew Daley <mattd@bugfuzz.com>
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
-
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -3053,7 +3053,7 @@ static int cache_flush(gnttab_cache_flus
-
- page = mfn_to_page(mfn);
- owner = page_get_owner_and_reference(page);
-- if ( !owner )
-+ if ( !owner || !owner->grant_table )
- {
- rcu_unlock_domain(d);
- return -EPERM;
diff --git a/main/xen/xsa233.patch b/main/xen/xsa233.patch
deleted file mode 100644
index 6013c52b41..0000000000
--- a/main/xen/xsa233.patch
+++ /dev/null
@@ -1,52 +0,0 @@
-From: Juergen Gross <jgross@suse.com>
-Subject: tools/xenstore: dont unlink connection object twice
-
-A connection object of a domain with associated stubdom has two
-parents: the domain and the stubdom. When cleaning up the list of
-active domains in domain_cleanup() make sure not to unlink the
-connection twice from the same domain. This could happen when the
-domain and its stubdom are being destroyed at the same time leading
-to the domain loop being entered twice.
-
-Additionally don't use talloc_free() in this case as it will remove
-a random parent link, leading eventually to a memory leak. Use
-talloc_unlink() instead specifying the context from which the
-connection object should be removed.
-
-This is XSA-233.
-
-Reported-by: Eric Chanudet <chanudete@ainfosec.com>
-Signed-off-by: Juergen Gross <jgross@suse.com>
-Reviewed-by: Ian Jackson <ian.jackson@eu.citrix.com>
-
---- a/tools/xenstore/xenstored_domain.c
-+++ b/tools/xenstore/xenstored_domain.c
-@@ -221,10 +221,11 @@ static int destroy_domain(void *_domain)
- static void domain_cleanup(void)
- {
- xc_dominfo_t dominfo;
-- struct domain *domain, *tmp;
-+ struct domain *domain;
- int notify = 0;
-
-- list_for_each_entry_safe(domain, tmp, &domains, list) {
-+ again:
-+ list_for_each_entry(domain, &domains, list) {
- if (xc_domain_getinfo(*xc_handle, domain->domid, 1,
- &dominfo) == 1 &&
- dominfo.domid == domain->domid) {
-@@ -236,8 +237,12 @@ static void domain_cleanup(void)
- if (!dominfo.dying)
- continue;
- }
-- talloc_free(domain->conn);
-- notify = 0; /* destroy_domain() fires the watch */
-+ if (domain->conn) {
-+ talloc_unlink(talloc_autofree_context(), domain->conn);
-+ domain->conn = NULL;
-+ notify = 0; /* destroy_domain() fires the watch */
-+ goto again;
-+ }
- }
-
- if (notify)
diff --git a/main/xen/xsa234-4.6.patch b/main/xen/xsa234-4.6.patch
deleted file mode 100644
index 6bd95f2d3a..0000000000
--- a/main/xen/xsa234-4.6.patch
+++ /dev/null
@@ -1,185 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: gnttab: also validate PTE permissions upon destroy/replace
-
-In order for PTE handling to match up with the reference counting done
-by common code, presence and writability of grant mapping PTEs must
-also be taken into account; validating just the frame number is not
-enough. This is in particular relevant if a guest fiddles with grant
-PTEs via non-grant hypercalls.
-
-Note that the flags being passed to replace_grant_host_mapping()
-already happen to be those of the existing mapping, so no new function
-parameter is needed.
-
-This is XSA-234.
-
-Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -3930,7 +3930,8 @@ static int create_grant_pte_mapping(
- }
-
- static int destroy_grant_pte_mapping(
-- uint64_t addr, unsigned long frame, struct domain *d)
-+ uint64_t addr, unsigned long frame, unsigned int grant_pte_flags,
-+ struct domain *d)
- {
- int rc = GNTST_okay;
- void *va;
-@@ -3976,16 +3977,27 @@ static int destroy_grant_pte_mapping(
-
- ol1e = *(l1_pgentry_t *)va;
-
-- /* Check that the virtual address supplied is actually mapped to frame. */
-- if ( unlikely(l1e_get_pfn(ol1e) != frame) )
-+ /*
-+ * Check that the PTE supplied actually maps frame (with appropriate
-+ * permissions).
-+ */
-+ if ( unlikely(l1e_get_pfn(ol1e) != frame) ||
-+ unlikely((l1e_get_flags(ol1e) ^ grant_pte_flags) &
-+ (_PAGE_PRESENT | _PAGE_RW)) )
- {
- page_unlock(page);
-- MEM_LOG("PTE entry %lx for address %"PRIx64" doesn't match frame %lx",
-- (unsigned long)l1e_get_intpte(ol1e), addr, frame);
-+ MEM_LOG("PTE %"PRIpte" at %"PRIx64" doesn't match grant (%"PRIpte")",
-+ l1e_get_intpte(ol1e), addr,
-+ l1e_get_intpte(l1e_from_pfn(frame, grant_pte_flags)));
- rc = GNTST_general_error;
- goto failed;
- }
-
-+ if ( unlikely((l1e_get_flags(ol1e) ^ grant_pte_flags) &
-+ ~(_PAGE_AVAIL | PAGE_CACHE_ATTRS)) )
-+ MEM_LOG("PTE flags %x at %"PRIx64" don't match grant (%x)\n",
-+ l1e_get_flags(ol1e), addr, grant_pte_flags);
-+
- /* Delete pagetable entry. */
- if ( unlikely(!UPDATE_ENTRY
- (l1,
-@@ -3994,7 +4006,7 @@ static int destroy_grant_pte_mapping(
- 0)) )
- {
- page_unlock(page);
-- MEM_LOG("Cannot delete PTE entry at %p", va);
-+ MEM_LOG("Cannot delete PTE entry at %"PRIx64, addr);
- rc = GNTST_general_error;
- goto failed;
- }
-@@ -4062,7 +4074,8 @@ static int create_grant_va_mapping(
- }
-
- static int replace_grant_va_mapping(
-- unsigned long addr, unsigned long frame, l1_pgentry_t nl1e, struct vcpu *v)
-+ unsigned long addr, unsigned long frame, unsigned int grant_pte_flags,
-+ l1_pgentry_t nl1e, struct vcpu *v)
- {
- l1_pgentry_t *pl1e, ol1e;
- unsigned long gl1mfn;
-@@ -4098,19 +4111,30 @@ static int replace_grant_va_mapping(
-
- ol1e = *pl1e;
-
-- /* Check that the virtual address supplied is actually mapped to frame. */
-- if ( unlikely(l1e_get_pfn(ol1e) != frame) )
-- {
-- MEM_LOG("PTE entry %lx for address %lx doesn't match frame %lx",
-- l1e_get_pfn(ol1e), addr, frame);
-+ /*
-+ * Check that the virtual address supplied is actually mapped to frame
-+ * (with appropriate permissions).
-+ */
-+ if ( unlikely(l1e_get_pfn(ol1e) != frame) ||
-+ unlikely((l1e_get_flags(ol1e) ^ grant_pte_flags) &
-+ (_PAGE_PRESENT | _PAGE_RW)) )
-+ {
-+ MEM_LOG("PTE %"PRIpte" for %lx doesn't match grant (%"PRIpte")",
-+ l1e_get_intpte(ol1e), addr,
-+ l1e_get_intpte(l1e_from_pfn(frame, grant_pte_flags)));
- rc = GNTST_general_error;
- goto unlock_and_out;
- }
-
-+ if ( unlikely((l1e_get_flags(ol1e) ^ grant_pte_flags) &
-+ ~(_PAGE_AVAIL | PAGE_CACHE_ATTRS)) )
-+ MEM_LOG("PTE flags %x for %"PRIx64" don't match grant (%x)",
-+ l1e_get_flags(ol1e), addr, grant_pte_flags);
-+
- /* Delete pagetable entry. */
- if ( unlikely(!UPDATE_ENTRY(l1, pl1e, ol1e, nl1e, gl1mfn, v, 0)) )
- {
-- MEM_LOG("Cannot delete PTE entry at %p", (unsigned long *)pl1e);
-+ MEM_LOG("Cannot delete PTE entry for %"PRIx64, addr);
- rc = GNTST_general_error;
- goto unlock_and_out;
- }
-@@ -4124,9 +4148,11 @@ static int replace_grant_va_mapping(
- }
-
- static int destroy_grant_va_mapping(
-- unsigned long addr, unsigned long frame, struct vcpu *v)
-+ unsigned long addr, unsigned long frame, unsigned int grant_pte_flags,
-+ struct vcpu *v)
- {
-- return replace_grant_va_mapping(addr, frame, l1e_empty(), v);
-+ return replace_grant_va_mapping(addr, frame, grant_pte_flags,
-+ l1e_empty(), v);
- }
-
- static int create_grant_p2m_mapping(uint64_t addr, unsigned long frame,
-@@ -4219,21 +4245,40 @@ int replace_grant_host_mapping(
- unsigned long gl1mfn;
- struct page_info *l1pg;
- int rc;
-+ unsigned int grant_pte_flags;
-
- if ( paging_mode_external(current->domain) )
- return replace_grant_p2m_mapping(addr, frame, new_addr, flags);
-
-+ grant_pte_flags =
-+ _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_GNTTAB | _PAGE_NX;
-+
-+ if ( flags & GNTMAP_application_map )
-+ grant_pte_flags |= _PAGE_USER;
-+ if ( !(flags & GNTMAP_readonly) )
-+ grant_pte_flags |= _PAGE_RW;
-+ /*
-+ * On top of the explicit settings done by create_grant_host_mapping()
-+ * also open-code relevant parts of adjust_guest_l1e(). Don't mirror
-+ * available and cachability flags, though.
-+ */
-+ if ( !is_pv_32bit_domain(curr->domain) )
-+ grant_pte_flags |= (grant_pte_flags & _PAGE_USER)
-+ ? _PAGE_GLOBAL
-+ : _PAGE_GUEST_KERNEL | _PAGE_USER;
-+
- if ( flags & GNTMAP_contains_pte )
- {
- if ( !new_addr )
-- return destroy_grant_pte_mapping(addr, frame, curr->domain);
-+ return destroy_grant_pte_mapping(addr, frame, grant_pte_flags,
-+ curr->domain);
-
- MEM_LOG("Unsupported grant table operation");
- return GNTST_general_error;
- }
-
- if ( !new_addr )
-- return destroy_grant_va_mapping(addr, frame, curr);
-+ return destroy_grant_va_mapping(addr, frame, grant_pte_flags, curr);
-
- pl1e = guest_map_l1e(curr, new_addr, &gl1mfn);
- if ( !pl1e )
-@@ -4281,7 +4326,7 @@ int replace_grant_host_mapping(
- put_page(l1pg);
- guest_unmap_l1e(curr, pl1e);
-
-- rc = replace_grant_va_mapping(addr, frame, ol1e, curr);
-+ rc = replace_grant_va_mapping(addr, frame, grant_pte_flags, ol1e, curr);
- if ( rc && !paging_mode_refcounts(curr->domain) )
- put_page_from_l1e(ol1e, curr->domain);
-
diff --git a/main/xen/xsa235-4.7.patch b/main/xen/xsa235-4.7.patch
deleted file mode 100644
index 54f3aa512d..0000000000
--- a/main/xen/xsa235-4.7.patch
+++ /dev/null
@@ -1,49 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: arm/mm: release grant lock on xenmem_add_to_physmap_one() error paths
-
-Commit 55021ff9ab ("xen/arm: add_to_physmap_one: Avoid to map mfn 0 if
-an error occurs") introduced error paths not releasing the grant table
-lock. Replace them by a suitable check after the lock was dropped.
-
-This is XSA-235.
-
-Reported-by: Wei Liu <wei.liu2@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Julien Grall <julien.grall@arm.com>
-
---- a/xen/arch/arm/mm.c
-+++ b/xen/arch/arm/mm.c
-@@ -1081,7 +1081,7 @@ int xenmem_add_to_physmap_one(
- if ( idx < nr_status_frames(d->grant_table) )
- mfn = virt_to_mfn(d->grant_table->status[idx]);
- else
-- return -EINVAL;
-+ mfn = INVALID_MFN;
- }
- else
- {
-@@ -1092,14 +1092,21 @@ int xenmem_add_to_physmap_one(
- if ( idx < nr_grant_frames(d->grant_table) )
- mfn = virt_to_mfn(d->grant_table->shared_raw[idx]);
- else
-- return -EINVAL;
-+ mfn = INVALID_MFN;
- }
-
-- d->arch.grant_table_gpfn[idx] = gpfn;
-+ if ( mfn != INVALID_MFN )
-+ {
-+ d->arch.grant_table_gpfn[idx] = gpfn;
-
-- t = p2m_ram_rw;
-+ t = p2m_ram_rw;
-+ }
-
- grant_write_unlock(d->grant_table);
-+
-+ if ( mfn == INVALID_MFN )
-+ return -EINVAL;
-+
- break;
- case XENMAPSPACE_shared_info:
- if ( idx != 0 )
diff --git a/main/xen/xsa236-4.9.patch b/main/xen/xsa236-4.9.patch
deleted file mode 100644
index 203025dbae..0000000000
--- a/main/xen/xsa236-4.9.patch
+++ /dev/null
@@ -1,66 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: gnttab: fix pin count / page reference race
-
-Dropping page references before decrementing pin counts is a bad idea
-if assumptions are being made that a non-zero pin count implies a valid
-page. Fix the order of operations in gnttab_copy_release_buf(), but at
-the same time also remove the assertion that was found to trigger:
-map_grant_ref() also has the potential of causing a race here, and
-changing the order of operations there would likely be quite a bit more
-involved.
-
-This is XSA-236.
-
-Reported-by: Pawel Wieczorkiewicz <wipawel@amazon.de>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -2330,9 +2330,20 @@ __acquire_grant_for_copy(
- td = page_get_owner_and_reference(*page);
- /*
- * act->pin being non-zero should guarantee the page to have a
-- * non-zero refcount and hence a valid owner.
-+ * non-zero refcount and hence a valid owner (matching the one on
-+ * record), with one exception: If the owning domain is dying we
-+ * had better not make implications from pin count (map_grant_ref()
-+ * updates pin counts before obtaining page references, for
-+ * example).
- */
-- ASSERT(td);
-+ if ( td != rd || rd->is_dying )
-+ {
-+ if ( td )
-+ put_page(*page);
-+ *page = NULL;
-+ rc = GNTST_bad_domain;
-+ goto unlock_out_clear;
-+ }
- }
-
- act->pin += readonly ? GNTPIN_hstr_inc : GNTPIN_hstw_inc;
-@@ -2451,6 +2462,11 @@ static void gnttab_copy_release_buf(stru
- unmap_domain_page(buf->virt);
- buf->virt = NULL;
- }
-+ if ( buf->have_grant )
-+ {
-+ __release_grant_for_copy(buf->domain, buf->ptr.u.ref, buf->read_only);
-+ buf->have_grant = 0;
-+ }
- if ( buf->have_type )
- {
- put_page_type(buf->page);
-@@ -2461,11 +2477,6 @@ static void gnttab_copy_release_buf(stru
- put_page(buf->page);
- buf->page = NULL;
- }
-- if ( buf->have_grant )
-- {
-- __release_grant_for_copy(buf->domain, buf->ptr.u.ref, buf->read_only);
-- buf->have_grant = 0;
-- }
- }
-
- static int gnttab_copy_claim_buf(const struct gnttab_copy *op,
diff --git a/main/xen/xsa238.patch b/main/xen/xsa238.patch
deleted file mode 100644
index 0d7d48fef8..0000000000
--- a/main/xen/xsa238.patch
+++ /dev/null
@@ -1,45 +0,0 @@
-From cdc2887076b19b39fab9faec495082586f3113df Mon Sep 17 00:00:00 2001
-From: XenProject Security Team <security@xenproject.org>
-Date: Tue, 5 Sep 2017 13:41:37 +0200
-Subject: x86/ioreq server: correctly handle bogus
- XEN_DMOP_{,un}map_io_range_to_ioreq_server arguments
-
-Misbehaving device model can pass incorrect XEN_DMOP_map/
-unmap_io_range_to_ioreq_server arguments, namely end < start when
-specifying address range. When this happens we hit ASSERT(s <= e) in
-rangeset_contains_range()/rangeset_overlaps_range() with debug builds.
-Production builds will not trap right away but may misbehave later
-while handling such bogus ranges.
-
-This is XSA-238.
-
-Signed-off-by: Vitaly Kuznetsov <vkuznets@redhat.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
----
- xen/arch/x86/hvm/ioreq.c | 6 ++++++
- 1 file changed, 6 insertions(+)
-
-diff --git a/xen/arch/x86/hvm/ioreq.c b/xen/arch/x86/hvm/ioreq.c
-index b2a8b0e986..8c8bf1f0ec 100644
---- a/xen/arch/x86/hvm/ioreq.c
-+++ b/xen/arch/x86/hvm/ioreq.c
-@@ -820,6 +820,9 @@ int hvm_map_io_range_to_ioreq_server(struct domain *d, ioservid_t id,
- struct hvm_ioreq_server *s;
- int rc;
-
-+ if ( start > end )
-+ return -EINVAL;
-+
- spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
-
- rc = -ENOENT;
-@@ -872,6 +875,9 @@ int hvm_unmap_io_range_from_ioreq_server(struct domain *d, ioservid_t id,
- struct hvm_ioreq_server *s;
- int rc;
-
-+ if ( start > end )
-+ return -EINVAL;
-+
- spin_lock_recursive(&d->arch.hvm_domain.ioreq_server.lock);
-
- rc = -ENOENT;
diff --git a/main/xen/xsa239.patch b/main/xen/xsa239.patch
deleted file mode 100644
index 5daecb5e47..0000000000
--- a/main/xen/xsa239.patch
+++ /dev/null
@@ -1,46 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/HVM: prefill partially used variable on emulation paths
-
-Certain handlers ignore the access size (vioapic_write() being the
-example this was found with), perhaps leading to subsequent reads
-seeing data that wasn't actually written by the guest. For
-consistency and extra safety also do this on the read path of
-hvm_process_io_intercept(), even if this doesn't directly affect what
-guests get to see, as we've supposedly already dealt with read handlers
-leaving data completely unitialized.
-
-This is XSA-239.
-
-Reported-by: Roger Pau Monné <roger.pau@citrix.com>
-Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-
---- a/xen/arch/x86/hvm/emulate.c
-+++ b/xen/arch/x86/hvm/emulate.c
-@@ -129,7 +129,7 @@ static int hvmemul_do_io(
- .count = *reps,
- .dir = dir,
- .df = df,
-- .data = data,
-+ .data = data_is_addr ? data : 0,
- .data_is_ptr = data_is_addr, /* ioreq_t field name is misleading */
- .state = STATE_IOREQ_READY,
- };
---- a/xen/arch/x86/hvm/intercept.c
-+++ b/xen/arch/x86/hvm/intercept.c
-@@ -127,6 +127,7 @@ int hvm_process_io_intercept(const struc
- addr = (p->type == IOREQ_TYPE_COPY) ?
- p->addr + step * i :
- p->addr;
-+ data = 0;
- rc = ops->read(handler, addr, p->size, &data);
- if ( rc != X86EMUL_OKAY )
- break;
-@@ -161,6 +162,7 @@ int hvm_process_io_intercept(const struc
- {
- if ( p->data_is_ptr )
- {
-+ data = 0;
- switch ( hvm_copy_from_guest_phys(&data, p->data + step * i,
- p->size) )
- {
diff --git a/main/xen/xsa241-4.8.patch b/main/xen/xsa241-4.8.patch
deleted file mode 100644
index e2370af761..0000000000
--- a/main/xen/xsa241-4.8.patch
+++ /dev/null
@@ -1,120 +0,0 @@
-x86: don't store possibly stale TLB flush time stamp
-
-While the timing window is extremely narrow, it is theoretically
-possible for an update to the TLB flush clock and a subsequent flush
-IPI to happen between the read and write parts of the update of the
-per-page stamp. Exclude this possibility by disabling interrupts
-across the update, preventing the IPI to be serviced in the middle.
-
-This is XSA-241.
-
-Reported-by: Jann Horn <jannh@google.com>
-Suggested-by: George Dunlap <george.dunlap@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: George Dunlap <george.dunlap@citrix.com>
-
---- a/xen/arch/arm/smp.c
-+++ b/xen/arch/arm/smp.c
-@@ -1,4 +1,5 @@
- #include <xen/config.h>
-+#include <xen/mm.h>
- #include <asm/system.h>
- #include <asm/smp.h>
- #include <asm/cpregs.h>
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -2524,7 +2524,7 @@ static int _put_final_page_type(struct p
- */
- if ( !(shadow_mode_enabled(page_get_owner(page)) &&
- (page->count_info & PGC_page_table)) )
-- page->tlbflush_timestamp = tlbflush_current_time();
-+ page_set_tlbflush_timestamp(page);
- wmb();
- page->u.inuse.type_info--;
- }
-@@ -2534,7 +2534,7 @@ static int _put_final_page_type(struct p
- (PGT_count_mask|PGT_validated|PGT_partial)) == 1);
- if ( !(shadow_mode_enabled(page_get_owner(page)) &&
- (page->count_info & PGC_page_table)) )
-- page->tlbflush_timestamp = tlbflush_current_time();
-+ page_set_tlbflush_timestamp(page);
- wmb();
- page->u.inuse.type_info |= PGT_validated;
- }
-@@ -2588,7 +2588,7 @@ static int _put_page_type(struct page_in
- if ( ptpg && PGT_type_equal(x, ptpg->u.inuse.type_info) )
- {
- /*
-- * page_set_tlbflush_timestamp() accesses the same union
-+ * set_tlbflush_timestamp() accesses the same union
- * linear_pt_count lives in. Unvalidated page table pages,
- * however, should occur during domain destruction only
- * anyway. Updating of linear_pt_count luckily is not
-@@ -2609,7 +2609,7 @@ static int _put_page_type(struct page_in
- */
- if ( !(shadow_mode_enabled(page_get_owner(page)) &&
- (page->count_info & PGC_page_table)) )
-- page->tlbflush_timestamp = tlbflush_current_time();
-+ page_set_tlbflush_timestamp(page);
- }
-
- if ( likely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) == x) )
---- a/xen/arch/x86/mm/shadow/common.c
-+++ b/xen/arch/x86/mm/shadow/common.c
-@@ -1464,7 +1464,7 @@ void shadow_free(struct domain *d, mfn_t
- * TLBs when we reuse the page. Because the destructors leave the
- * contents of the pages in place, we can delay TLB flushes until
- * just before the allocator hands the page out again. */
-- sp->tlbflush_timestamp = tlbflush_current_time();
-+ page_set_tlbflush_timestamp(sp);
- perfc_decr(shadow_alloc_count);
- page_list_add_tail(sp, &d->arch.paging.shadow.freelist);
- sp = next;
---- a/xen/common/page_alloc.c
-+++ b/xen/common/page_alloc.c
-@@ -960,7 +960,7 @@ static void free_heap_pages(
- /* If a page has no owner it will need no safety TLB flush. */
- pg[i].u.free.need_tlbflush = (page_get_owner(&pg[i]) != NULL);
- if ( pg[i].u.free.need_tlbflush )
-- pg[i].tlbflush_timestamp = tlbflush_current_time();
-+ page_set_tlbflush_timestamp(&pg[i]);
-
- /* This page is not a guest frame any more. */
- page_set_owner(&pg[i], NULL); /* set_gpfn_from_mfn snoops pg owner */
---- a/xen/include/asm-arm/flushtlb.h
-+++ b/xen/include/asm-arm/flushtlb.h
-@@ -12,6 +12,11 @@ static inline void tlbflush_filter(cpuma
-
- #define tlbflush_current_time() (0)
-
-+static inline void page_set_tlbflush_timestamp(struct page_info *page)
-+{
-+ page->tlbflush_timestamp = tlbflush_current_time();
-+}
-+
- #if defined(CONFIG_ARM_32)
- # include <asm/arm32/flushtlb.h>
- #elif defined(CONFIG_ARM_64)
---- a/xen/include/asm-x86/flushtlb.h
-+++ b/xen/include/asm-x86/flushtlb.h
-@@ -23,6 +23,20 @@ DECLARE_PER_CPU(u32, tlbflush_time);
-
- #define tlbflush_current_time() tlbflush_clock
-
-+static inline void page_set_tlbflush_timestamp(struct page_info *page)
-+{
-+ /*
-+ * Prevent storing a stale time stamp, which could happen if an update
-+ * to tlbflush_clock plus a subsequent flush IPI happen between the
-+ * reading of tlbflush_clock and the writing of the struct page_info
-+ * field.
-+ */
-+ ASSERT(local_irq_is_enabled());
-+ local_irq_disable();
-+ page->tlbflush_timestamp = tlbflush_current_time();
-+ local_irq_enable();
-+}
-+
- /*
- * @cpu_stamp is the timestamp at last TLB flush for the CPU we are testing.
- * @lastuse_stamp is a timestamp taken when the PFN we are testing was last
diff --git a/main/xen/xsa242-4.9.patch b/main/xen/xsa242-4.9.patch
deleted file mode 100644
index 8adfa61fd7..0000000000
--- a/main/xen/xsa242-4.9.patch
+++ /dev/null
@@ -1,43 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86: don't allow page_unlock() to drop the last type reference
-
-Only _put_page_type() does the necessary cleanup, and hence not all
-domain pages can be released during guest cleanup (leaving around
-zombie domains) if we get this wrong.
-
-This is XSA-242.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-
---- a/xen/arch/x86/mm.c
-+++ b/xen/arch/x86/mm.c
-@@ -1923,7 +1923,11 @@ void page_unlock(struct page_info *page)
-
- do {
- x = y;
-+ ASSERT((x & PGT_count_mask) && (x & PGT_locked));
-+
- nx = x - (1 | PGT_locked);
-+ /* We must not drop the last reference here. */
-+ ASSERT(nx & PGT_count_mask);
- } while ( (y = cmpxchg(&page->u.inuse.type_info, x, nx)) != x );
- }
-
-@@ -2611,6 +2615,17 @@ static int _put_page_type(struct page_in
- (page->count_info & PGC_page_table)) )
- page_set_tlbflush_timestamp(page);
- }
-+ else if ( unlikely((nx & (PGT_locked | PGT_count_mask)) ==
-+ (PGT_locked | 1)) )
-+ {
-+ /*
-+ * We must not drop the second to last reference when the page is
-+ * locked, as page_unlock() doesn't do any cleanup of the type.
-+ */
-+ cpu_relax();
-+ y = page->u.inuse.type_info;
-+ continue;
-+ }
-
- if ( likely((y = cmpxchg(&page->u.inuse.type_info, x, nx)) == x) )
- break;
diff --git a/main/xen/xsa243-2.patch b/main/xen/xsa243-2.patch
deleted file mode 100644
index 1aca5d3dbd..0000000000
--- a/main/xen/xsa243-2.patch
+++ /dev/null
@@ -1,54 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: x86/shadow: correct SH_LINEAR mapping detection in sh_guess_wrmap()
-
-The fix for XSA-243 / CVE-2017-15592 (c/s bf2b4eadcf379) introduced a change
-in behaviour for sh_guest_wrmap(), where it had to cope with no shadow linear
-mapping being present.
-
-As the name suggests, guest_vtable is a mapping of the guests pagetable, not
-Xen's pagetable, meaning that it isn't the pagetable we need to check for the
-shadow linear slot in.
-
-The practical upshot is that a shadow HVM vcpu which switches into 4-level
-paging mode, with an L4 pagetable that contains a mapping which aliases Xen's
-SH_LINEAR_PT_VIRT_START will fool the safety check for whether a SHADOW_LINEAR
-mapping is present. As the check passes (when it should have failed), Xen
-subsequently falls over the missing mapping with a pagefault such as:
-
- (XEN) Pagetable walk from ffff8140a0503880:
- (XEN) L4[0x102] = 000000046c218063 ffffffffffffffff
- (XEN) L3[0x102] = 000000046c218063 ffffffffffffffff
- (XEN) L2[0x102] = 000000046c218063 ffffffffffffffff
- (XEN) L1[0x103] = 0000000000000000 ffffffffffffffff
-
-This is part of XSA-243.
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Tim Deegan <tim@xen.org>
-
---- a/xen/arch/x86/mm/shadow/multi.c
-+++ b/xen/arch/x86/mm/shadow/multi.c
-@@ -4350,11 +4350,18 @@ static int sh_guess_wrmap(struct vcpu *v
-
- /* Carefully look in the shadow linear map for the l1e we expect */
- #if SHADOW_PAGING_LEVELS >= 4
-- /* Is a shadow linear map is installed in the first place? */
-- sl4p = v->arch.paging.shadow.guest_vtable;
-- sl4p += shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START);
-- if ( !(shadow_l4e_get_flags(*sl4p) & _PAGE_PRESENT) )
-- return 0;
-+ /*
-+ * Non-external guests (i.e. PV) have a SHADOW_LINEAR mapping from the
-+ * moment their shadows are created. External guests (i.e. HVM) may not,
-+ * but always have a regular linear mapping, which we can use to observe
-+ * whether a SHADOW_LINEAR mapping is present.
-+ */
-+ if ( paging_mode_external(d) )
-+ {
-+ sl4p = __linear_l4_table + l4_linear_offset(SH_LINEAR_PT_VIRT_START);
-+ if ( !(shadow_l4e_get_flags(*sl4p) & _PAGE_PRESENT) )
-+ return 0;
-+ }
- sl4p = sh_linear_l4_table(v) + shadow_l4_linear_offset(vaddr);
- if ( !(shadow_l4e_get_flags(*sl4p) & _PAGE_PRESENT) )
- return 0;
diff --git a/main/xen/xsa243-4.7-1.patch b/main/xen/xsa243-4.7-1.patch
deleted file mode 100644
index 9752737e27..0000000000
--- a/main/xen/xsa243-4.7-1.patch
+++ /dev/null
@@ -1,93 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: x86/shadow: Don't create self-linear shadow mappings for 4-level translated guests
-
-When initially creating a monitor table for 4-level translated guests, don't
-install a shadow-linear mapping. This mapping is actually self-linear, and
-trips up the writeable heuristic logic into following Xen's mappings, not the
-guests' shadows it was expecting to follow.
-
-A consequence of this is that sh_guess_wrmap() needs to cope with there being
-no shadow-linear mapping present, which in practice occurs once each time a
-vcpu switches to 4-level paging from a different paging mode.
-
-An appropriate shadow-linear slot will be inserted into the monitor table
-either while constructing lower level monitor tables, or by sh_update_cr3().
-
-While fixing this, clarify the safety of the other mappings. Despite
-appearing unsafe, it is correct to create a guest-linear mapping for
-translated domains; this is self-linear and doesn't point into the translated
-domain. Drop a dead clause for translate != external guests.
-
-This is XSA-243.
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Acked-by: Tim Deegan <tim@xen.org>
-
-diff --git a/xen/arch/x86/mm/shadow/multi.c b/xen/arch/x86/mm/shadow/multi.c
-index 428be37..c83932f 100644
---- a/xen/arch/x86/mm/shadow/multi.c
-+++ b/xen/arch/x86/mm/shadow/multi.c
-@@ -1472,26 +1472,38 @@ void sh_install_xen_entries_in_l4(struct domain *d, mfn_t gl4mfn, mfn_t sl4mfn)
- sl4e[shadow_l4_table_offset(RO_MPT_VIRT_START)] = shadow_l4e_empty();
- }
-
-- /* Shadow linear mapping for 4-level shadows. N.B. for 3-level
-- * shadows on 64-bit xen, this linear mapping is later replaced by the
-- * monitor pagetable structure, which is built in make_monitor_table
-- * and maintained by sh_update_linear_entries. */
-- sl4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
-- shadow_l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR);
--
-- /* Self linear mapping. */
-- if ( shadow_mode_translate(d) && !shadow_mode_external(d) )
-+ /*
-+ * Linear mapping slots:
-+ *
-+ * Calling this function with gl4mfn == sl4mfn is used to construct a
-+ * monitor table for translated domains. In this case, gl4mfn forms the
-+ * self-linear mapping (i.e. not pointing into the translated domain), and
-+ * the shadow-linear slot is skipped. The shadow-linear slot is either
-+ * filled when constructing lower level monitor tables, or via
-+ * sh_update_cr3() for 4-level guests.
-+ *
-+ * Calling this function with gl4mfn != sl4mfn is used for non-translated
-+ * guests, where the shadow-linear slot is actually self-linear, and the
-+ * guest-linear slot points into the guests view of its pagetables.
-+ */
-+ if ( shadow_mode_translate(d) )
- {
-- // linear tables may not be used with translated PV guests
-- sl4e[shadow_l4_table_offset(LINEAR_PT_VIRT_START)] =
-+ ASSERT(mfn_x(gl4mfn) == mfn_x(sl4mfn));
-+
-+ sl4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
- shadow_l4e_empty();
- }
- else
- {
-- sl4e[shadow_l4_table_offset(LINEAR_PT_VIRT_START)] =
-- shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR);
-+ ASSERT(mfn_x(gl4mfn) != mfn_x(sl4mfn));
-+
-+ sl4e[shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START)] =
-+ shadow_l4e_from_mfn(sl4mfn, __PAGE_HYPERVISOR);
- }
-
-+ sl4e[shadow_l4_table_offset(LINEAR_PT_VIRT_START)] =
-+ shadow_l4e_from_mfn(gl4mfn, __PAGE_HYPERVISOR);
-+
- unmap_domain_page(sl4e);
- }
- #endif
-@@ -4293,6 +4305,11 @@ static int sh_guess_wrmap(struct vcpu *v, unsigned long vaddr, mfn_t gmfn)
-
- /* Carefully look in the shadow linear map for the l1e we expect */
- #if SHADOW_PAGING_LEVELS >= 4
-+ /* Is a shadow linear map is installed in the first place? */
-+ sl4p = v->arch.paging.shadow.guest_vtable;
-+ sl4p += shadow_l4_table_offset(SH_LINEAR_PT_VIRT_START);
-+ if ( !(shadow_l4e_get_flags(*sl4p) & _PAGE_PRESENT) )
-+ return 0;
- sl4p = sh_linear_l4_table(v) + shadow_l4_linear_offset(vaddr);
- if ( !(shadow_l4e_get_flags(*sl4p) & _PAGE_PRESENT) )
- return 0;
diff --git a/main/xen/xsa244-4.7.patch b/main/xen/xsa244-4.7.patch
deleted file mode 100644
index 4c5e1e0619..0000000000
--- a/main/xen/xsa244-4.7.patch
+++ /dev/null
@@ -1,51 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: x86/cpu: fix IST handling during PCPU bringup
-
-Clear IST references in newly allocated IDTs. Nothing good will come of
-having them set before the TSS is suitably constructed (although the chances
-of the CPU surviving such an IST interrupt/exception is extremely slim).
-
-Uniformly set the IST references after the TSS is in place. This fixes an
-issue on AMD hardware, where onlining a PCPU while PCPU0 is in HVM context
-will cause IST_NONE to be copied into the new IDT, making that PCPU vulnerable
-to privilege escalation from PV guests until it subsequently schedules an HVM
-guest.
-
-This is XSA-244.
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
-
---- a/xen/arch/x86/cpu/common.c
-+++ b/xen/arch/x86/cpu/common.c
-@@ -617,6 +617,7 @@ void __init early_cpu_init(void)
- * - Sets up TSS with stack pointers, including ISTs
- * - Inserts TSS selector into regular and compat GDTs
- * - Loads GDT, IDT, TR then null LDT
-+ * - Sets up IST references in the IDT
- */
- void load_system_tables(void)
- {
-@@ -663,6 +664,10 @@ void load_system_tables(void)
- asm volatile ("lidt %0" : : "m" (idtr) );
- asm volatile ("ltr %w0" : : "rm" (TSS_ENTRY << 3) );
- asm volatile ("lldt %w0" : : "rm" (0) );
-+
-+ set_ist(&idt_tables[cpu][TRAP_double_fault], IST_DF);
-+ set_ist(&idt_tables[cpu][TRAP_nmi], IST_NMI);
-+ set_ist(&idt_tables[cpu][TRAP_machine_check], IST_MCE);
- }
-
- /*
---- a/xen/arch/x86/smpboot.c
-+++ b/xen/arch/x86/smpboot.c
-@@ -715,6 +715,9 @@ static int cpu_smpboot_alloc(unsigned in
- if ( idt_tables[cpu] == NULL )
- goto oom;
- memcpy(idt_tables[cpu], idt_table, IDT_ENTRIES * sizeof(idt_entry_t));
-+ set_ist(&idt_tables[cpu][TRAP_double_fault], IST_NONE);
-+ set_ist(&idt_tables[cpu][TRAP_nmi], IST_NONE);
-+ set_ist(&idt_tables[cpu][TRAP_machine_check], IST_NONE);
-
- for ( stub_page = 0, i = cpu & ~(STUBS_PER_PAGE - 1);
- i < nr_cpu_ids && i <= (cpu | (STUBS_PER_PAGE - 1)); ++i )
diff --git a/main/xen/xsa246-4.7.patch b/main/xen/xsa246-4.7.patch
deleted file mode 100644
index bb58d6e7c8..0000000000
--- a/main/xen/xsa246-4.7.patch
+++ /dev/null
@@ -1,74 +0,0 @@
-From: Julien Grall <julien.grall@linaro.org>
-Subject: x86/pod: prevent infinite loop when shattering large pages
-
-When populating pages, the PoD may need to split large ones using
-p2m_set_entry and request the caller to retry (see ept_get_entry for
-instance).
-
-p2m_set_entry may fail to shatter if it is not possible to allocate
-memory for the new page table. However, the error is not propagated
-resulting to the callers to retry infinitely the PoD.
-
-Prevent the infinite loop by return false when it is not possible to
-shatter the large mapping.
-
-This is XSA-246.
-
-Signed-off-by: Julien Grall <julien.grall@linaro.org>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: George Dunlap <george.dunlap@citrix.com>
-
---- a/xen/arch/x86/mm/p2m-pod.c
-+++ b/xen/arch/x86/mm/p2m-pod.c
-@@ -1073,9 +1073,8 @@ p2m_pod_demand_populate(struct p2m_domai
- * NOTE: In a fine-grained p2m locking scenario this operation
- * may need to promote its locking from gfn->1g superpage
- */
-- p2m_set_entry(p2m, gfn_aligned, _mfn(INVALID_MFN), PAGE_ORDER_2M,
-- p2m_populate_on_demand, p2m->default_access);
-- return 0;
-+ return p2m_set_entry(p2m, gfn_aligned, _mfn(INVALID_MFN), PAGE_ORDER_2M,
-+ p2m_populate_on_demand, p2m->default_access);
- }
-
- /* Only reclaim if we're in actual need of more cache. */
-@@ -1106,8 +1105,12 @@ p2m_pod_demand_populate(struct p2m_domai
-
- gfn_aligned = (gfn >> order) << order;
-
-- p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
-- p2m->default_access);
-+ if ( p2m_set_entry(p2m, gfn_aligned, mfn, order, p2m_ram_rw,
-+ p2m->default_access) )
-+ {
-+ p2m_pod_cache_add(p2m, p, order);
-+ goto out_fail;
-+ }
-
- for( i = 0; i < (1UL << order); i++ )
- {
-@@ -1152,13 +1155,18 @@ remap_and_retry:
- BUG_ON(order != PAGE_ORDER_2M);
- pod_unlock(p2m);
-
-- /* Remap this 2-meg region in singleton chunks */
-- /* NOTE: In a p2m fine-grained lock scenario this might
-- * need promoting the gfn lock from gfn->2M superpage */
-+ /*
-+ * Remap this 2-meg region in singleton chunks. See the comment on the
-+ * 1G page splitting path above for why a single call suffices.
-+ *
-+ * NOTE: In a p2m fine-grained lock scenario this might
-+ * need promoting the gfn lock from gfn->2M superpage.
-+ */
- gfn_aligned = (gfn>>order)<<order;
-- for(i=0; i<(1<<order); i++)
-- p2m_set_entry(p2m, gfn_aligned + i, _mfn(INVALID_MFN), PAGE_ORDER_4K,
-- p2m_populate_on_demand, p2m->default_access);
-+ if ( p2m_set_entry(p2m, gfn_aligned, _mfn(INVALID_MFN), PAGE_ORDER_4K,
-+ p2m_populate_on_demand, p2m->default_access) )
-+ return -1;
-+
- if ( tb_init_done )
- {
- struct {
diff --git a/main/xen/xsa248-4.8.patch b/main/xen/xsa248-4.8.patch
deleted file mode 100644
index d15297e78d..0000000000
--- a/main/xen/xsa248-4.8.patch
+++ /dev/null
@@ -1,162 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/mm: don't wrongly set page ownership
-
-PV domains can obtain mappings of any pages owned by the correct domain,
-including ones that aren't actually assigned as "normal" RAM, but used
-by Xen internally. At the moment such "internal" pages marked as owned
-by a guest include pages used to track logdirty bits, as well as p2m
-pages and the "unpaged pagetable" for HVM guests. Since the PV memory
-management and shadow code conflict in their use of struct page_info
-fields, and since shadow code is being used for log-dirty handling for
-PV domains, pages coming from the shadow pool must, for PV domains, not
-have the domain set as their owner.
-
-While the change could be done conditionally for just the PV case in
-shadow code, do it unconditionally (and for consistency also for HAP),
-just to be on the safe side.
-
-There's one special case though for shadow code: The page table used for
-running a HVM guest in unpaged mode is subject to get_page() (in
-set_shadow_status()) and hence must have its owner set.
-
-This is XSA-248.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Tim Deegan <tim@xen.org>
-Reviewed-by: George Dunlap <george.dunlap@citrix.com>
-
---- a/xen/arch/x86/mm/hap/hap.c
-+++ b/xen/arch/x86/mm/hap/hap.c
-@@ -283,8 +283,7 @@ static struct page_info *hap_alloc_p2m_p
- {
- d->arch.paging.hap.total_pages--;
- d->arch.paging.hap.p2m_pages++;
-- page_set_owner(pg, d);
-- pg->count_info |= 1;
-+ ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask));
- }
- else if ( !d->arch.paging.p2m_alloc_failed )
- {
-@@ -299,21 +298,23 @@ static struct page_info *hap_alloc_p2m_p
-
- static void hap_free_p2m_page(struct domain *d, struct page_info *pg)
- {
-+ struct domain *owner = page_get_owner(pg);
-+
- /* This is called both from the p2m code (which never holds the
- * paging lock) and the log-dirty code (which always does). */
- paging_lock_recursive(d);
-
-- ASSERT(page_get_owner(pg) == d);
-- /* Should have just the one ref we gave it in alloc_p2m_page() */
-- if ( (pg->count_info & PGC_count_mask) != 1 ) {
-- HAP_ERROR("Odd p2m page %p count c=%#lx t=%"PRtype_info"\n",
-- pg, pg->count_info, pg->u.inuse.type_info);
-+ /* Should still have no owner and count zero. */
-+ if ( owner || (pg->count_info & PGC_count_mask) )
-+ {
-+ HAP_ERROR("d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n",
-+ d->domain_id, mfn_x(page_to_mfn(pg)),
-+ owner ? owner->domain_id : DOMID_INVALID,
-+ pg->count_info, pg->u.inuse.type_info);
- WARN();
-+ pg->count_info &= ~PGC_count_mask;
-+ page_set_owner(pg, NULL);
- }
-- pg->count_info &= ~PGC_count_mask;
-- /* Free should not decrement domain's total allocation, since
-- * these pages were allocated without an owner. */
-- page_set_owner(pg, NULL);
- d->arch.paging.hap.p2m_pages--;
- d->arch.paging.hap.total_pages++;
- hap_free(d, page_to_mfn(pg));
---- a/xen/arch/x86/mm/shadow/common.c
-+++ b/xen/arch/x86/mm/shadow/common.c
-@@ -1573,32 +1573,29 @@ shadow_alloc_p2m_page(struct domain *d)
- pg = mfn_to_page(shadow_alloc(d, SH_type_p2m_table, 0));
- d->arch.paging.shadow.p2m_pages++;
- d->arch.paging.shadow.total_pages--;
-+ ASSERT(!page_get_owner(pg) && !(pg->count_info & PGC_count_mask));
-
- paging_unlock(d);
-
-- /* Unlike shadow pages, mark p2m pages as owned by the domain.
-- * Marking the domain as the owner would normally allow the guest to
-- * create mappings of these pages, but these p2m pages will never be
-- * in the domain's guest-physical address space, and so that is not
-- * believed to be a concern. */
-- page_set_owner(pg, d);
-- pg->count_info |= 1;
- return pg;
- }
-
- static void
- shadow_free_p2m_page(struct domain *d, struct page_info *pg)
- {
-- ASSERT(page_get_owner(pg) == d);
-- /* Should have just the one ref we gave it in alloc_p2m_page() */
-- if ( (pg->count_info & PGC_count_mask) != 1 )
-+ struct domain *owner = page_get_owner(pg);
-+
-+ /* Should still have no owner and count zero. */
-+ if ( owner || (pg->count_info & PGC_count_mask) )
- {
-- SHADOW_ERROR("Odd p2m page count c=%#lx t=%"PRtype_info"\n",
-+ SHADOW_ERROR("d%d: Odd p2m page %"PRI_mfn" d=%d c=%lx t=%"PRtype_info"\n",
-+ d->domain_id, mfn_x(page_to_mfn(pg)),
-+ owner ? owner->domain_id : DOMID_INVALID,
- pg->count_info, pg->u.inuse.type_info);
-+ pg->count_info &= ~PGC_count_mask;
-+ page_set_owner(pg, NULL);
- }
-- pg->count_info &= ~PGC_count_mask;
- pg->u.sh.type = SH_type_p2m_table; /* p2m code reuses type-info */
-- page_set_owner(pg, NULL);
-
- /* This is called both from the p2m code (which never holds the
- * paging lock) and the log-dirty code (which always does). */
-@@ -3216,7 +3213,9 @@ int shadow_enable(struct domain *d, u32
- | _PAGE_PRESENT | _PAGE_RW | _PAGE_USER
- | _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_PSE);
- unmap_domain_page(e);
-+ pg->count_info = 1;
- pg->u.inuse.type_info = PGT_l2_page_table | 1 | PGT_validated;
-+ page_set_owner(pg, d);
- }
-
- paging_lock(d);
-@@ -3254,7 +3253,11 @@ int shadow_enable(struct domain *d, u32
- if ( rv != 0 && !pagetable_is_null(p2m_get_pagetable(p2m)) )
- p2m_teardown(p2m);
- if ( rv != 0 && pg != NULL )
-+ {
-+ pg->count_info &= ~PGC_count_mask;
-+ page_set_owner(pg, NULL);
- shadow_free_p2m_page(d, pg);
-+ }
- domain_unpause(d);
- return rv;
- }
-@@ -3363,7 +3366,22 @@ out:
-
- /* Must be called outside the lock */
- if ( unpaged_pagetable )
-+ {
-+ if ( page_get_owner(unpaged_pagetable) == d &&
-+ (unpaged_pagetable->count_info & PGC_count_mask) == 1 )
-+ {
-+ unpaged_pagetable->count_info &= ~PGC_count_mask;
-+ page_set_owner(unpaged_pagetable, NULL);
-+ }
-+ /* Complain here in cases where shadow_free_p2m_page() won't. */
-+ else if ( !page_get_owner(unpaged_pagetable) &&
-+ !(unpaged_pagetable->count_info & PGC_count_mask) )
-+ SHADOW_ERROR("d%d: Odd unpaged pt %"PRI_mfn" c=%lx t=%"PRtype_info"\n",
-+ d->domain_id, mfn_x(page_to_mfn(unpaged_pagetable)),
-+ unpaged_pagetable->count_info,
-+ unpaged_pagetable->u.inuse.type_info);
- shadow_free_p2m_page(d, unpaged_pagetable);
-+ }
- }
-
- void shadow_final_teardown(struct domain *d)
diff --git a/main/xen/xsa249.patch b/main/xen/xsa249.patch
deleted file mode 100644
index ecfa4305e5..0000000000
--- a/main/xen/xsa249.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/shadow: fix refcount overflow check
-
-Commit c385d27079 ("x86 shadow: for multi-page shadows, explicitly track
-the first page") reduced the refcount width to 25, without adjusting the
-overflow check. Eliminate the disconnect by using a manifest constant.
-
-Interestingly, up to commit 047782fa01 ("Out-of-sync L1 shadows: OOS
-snapshot") the refcount was 27 bits wide, yet the check was already
-using 26.
-
-This is XSA-249.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: George Dunlap <george.dunlap@citrix.com>
-Reviewed-by: Tim Deegan <tim@xen.org>
----
-v2: Simplify expression back to the style it was.
-
---- a/xen/arch/x86/mm/shadow/private.h
-+++ b/xen/arch/x86/mm/shadow/private.h
-@@ -529,7 +529,7 @@ static inline int sh_get_ref(struct doma
- x = sp->u.sh.count;
- nx = x + 1;
-
-- if ( unlikely(nx >= 1U<<26) )
-+ if ( unlikely(nx >= (1U << PAGE_SH_REFCOUNT_WIDTH)) )
- {
- SHADOW_PRINTK("shadow ref overflow, gmfn=%lx smfn=%lx\n",
- __backpointer(sp), mfn_x(smfn));
---- a/xen/include/asm-x86/mm.h
-+++ b/xen/include/asm-x86/mm.h
-@@ -82,7 +82,8 @@ struct page_info
- unsigned long type:5; /* What kind of shadow is this? */
- unsigned long pinned:1; /* Is the shadow pinned? */
- unsigned long head:1; /* Is this the first page of the shadow? */
-- unsigned long count:25; /* Reference count */
-+#define PAGE_SH_REFCOUNT_WIDTH 25
-+ unsigned long count:PAGE_SH_REFCOUNT_WIDTH; /* Reference count */
- } sh;
-
- /* Page is on a free list: ((count_info & PGC_count_mask) == 0). */
diff --git a/main/xen/xsa250.patch b/main/xen/xsa250.patch
deleted file mode 100644
index 26aeb33fed..0000000000
--- a/main/xen/xsa250.patch
+++ /dev/null
@@ -1,67 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/shadow: fix ref-counting error handling
-
-The old-Linux handling in shadow_set_l4e() mistakenly ORed together the
-results of sh_get_ref() and sh_pin(). As the latter failing is not a
-correctness problem, simply ignore its return value.
-
-In sh_set_toplevel_shadow() a failing sh_get_ref() must not be
-accompanied by installing the entry, despite the domain being crashed.
-
-This is XSA-250.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Tim Deegan <tim@xen.org>
-
---- a/xen/arch/x86/mm/shadow/multi.c
-+++ b/xen/arch/x86/mm/shadow/multi.c
-@@ -923,7 +923,7 @@ static int shadow_set_l4e(struct domain
- shadow_l4e_t new_sl4e,
- mfn_t sl4mfn)
- {
-- int flags = 0, ok;
-+ int flags = 0;
- shadow_l4e_t old_sl4e;
- paddr_t paddr;
- ASSERT(sl4e != NULL);
-@@ -938,15 +938,16 @@ static int shadow_set_l4e(struct domain
- {
- /* About to install a new reference */
- mfn_t sl3mfn = shadow_l4e_get_mfn(new_sl4e);
-- ok = sh_get_ref(d, sl3mfn, paddr);
-- /* Are we pinning l3 shadows to handle wierd linux behaviour? */
-- if ( sh_type_is_pinnable(d, SH_type_l3_64_shadow) )
-- ok |= sh_pin(d, sl3mfn);
-- if ( !ok )
-+
-+ if ( !sh_get_ref(d, sl3mfn, paddr) )
- {
- domain_crash(d);
- return SHADOW_SET_ERROR;
- }
-+
-+ /* Are we pinning l3 shadows to handle weird Linux behaviour? */
-+ if ( sh_type_is_pinnable(d, SH_type_l3_64_shadow) )
-+ sh_pin(d, sl3mfn);
- }
-
- /* Write the new entry */
-@@ -3965,14 +3966,15 @@ sh_set_toplevel_shadow(struct vcpu *v,
-
- /* Take a ref to this page: it will be released in sh_detach_old_tables()
- * or the next call to set_toplevel_shadow() */
-- if ( !sh_get_ref(d, smfn, 0) )
-+ if ( sh_get_ref(d, smfn, 0) )
-+ new_entry = pagetable_from_mfn(smfn);
-+ else
- {
- SHADOW_ERROR("can't install %#lx as toplevel shadow\n", mfn_x(smfn));
- domain_crash(d);
-+ new_entry = pagetable_null();
- }
-
-- new_entry = pagetable_from_mfn(smfn);
--
- install_new_entry:
- /* Done. Install it */
- SHADOW_PRINTK("%u/%u [%u] gmfn %#"PRI_mfn" smfn %#"PRI_mfn"\n",
diff --git a/main/xen/xsa251-4.8.patch b/main/xen/xsa251-4.8.patch
deleted file mode 100644
index fffe54d0e1..0000000000
--- a/main/xen/xsa251-4.8.patch
+++ /dev/null
@@ -1,21 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/paging: don't unconditionally BUG() on finding SHARED_M2P_ENTRY
-
-PV guests can fully control the values written into the P2M.
-
-This is XSA-251.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/mm/paging.c
-+++ b/xen/arch/x86/mm/paging.c
-@@ -276,7 +276,7 @@ void paging_mark_pfn_dirty(struct domain
- return;
-
- /* Shared MFNs should NEVER be marked dirty */
-- BUG_ON(SHARED_M2P(pfn));
-+ BUG_ON(paging_mode_translate(d) && SHARED_M2P(pfn));
-
- /*
- * Values with the MSB set denote MFNs that aren't really part of the
diff --git a/main/xen/xsa252-4.7.patch b/main/xen/xsa252-4.7.patch
deleted file mode 100644
index 65ad8fc5df..0000000000
--- a/main/xen/xsa252-4.7.patch
+++ /dev/null
@@ -1,25 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: memory: don't implicitly unpin for decrease-reservation
-
-It very likely was a mistake (copy-and-paste from domain cleanup code)
-to implicitly unpin here: The caller should really unpin itself before
-(or after, if they so wish) requesting the page to be removed.
-
-This is XSA-252.
-
-Reported-by: Jann Horn <jannh@google.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/common/memory.c
-+++ b/xen/common/memory.c
-@@ -320,9 +320,6 @@ int guest_remove_page(struct domain *d,
-
- rc = guest_physmap_remove_page(d, gmfn, mfn, 0);
-
-- if ( !rc && test_and_clear_bit(_PGT_pinned, &page->u.inuse.type_info) )
-- put_page_and_type(page);
--
- /*
- * With the lack of an IOMMU on some platforms, domains with DMA-capable
- * device must retrieve the same pfn when the hypercall populate_physmap
diff --git a/main/xen/xsa255-4.7-1.patch b/main/xen/xsa255-4.7-1.patch
deleted file mode 100644
index 72b2c41756..0000000000
--- a/main/xen/xsa255-4.7-1.patch
+++ /dev/null
@@ -1,126 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: gnttab/ARM: don't corrupt shared GFN array
-
-... by writing status GFNs to it. Introduce a second array instead.
-Also implement gnttab_status_gmfn() properly now that the information is
-suitably being tracked.
-
-While touching it anyway, remove a misguided (but luckily benign) upper
-bound check from gnttab_shared_gmfn(): We should never access beyond the
-bounds of that array.
-
-This is part of XSA-255.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/arm/domain.c
-+++ b/xen/arch/arm/domain.c
-@@ -438,19 +438,37 @@ void startup_cpu_idle_loop(void)
- struct domain *alloc_domain_struct(void)
- {
- struct domain *d;
-+ unsigned int i, max_status_frames;
-+
- BUILD_BUG_ON(sizeof(*d) > PAGE_SIZE);
- d = alloc_xenheap_pages(0, 0);
- if ( d == NULL )
- return NULL;
-
- clear_page(d);
-- d->arch.grant_table_gpfn = xzalloc_array(xen_pfn_t, max_grant_frames);
-+
-+ d->arch.grant_shared_gfn = xmalloc_array(gfn_t, max_grant_frames);
-+ max_status_frames = grant_to_status_frames(max_grant_frames);
-+ d->arch.grant_status_gfn = xmalloc_array(gfn_t, max_status_frames);
-+ if ( !d->arch.grant_shared_gfn || !d->arch.grant_status_gfn )
-+ {
-+ free_domain_struct(d);
-+ return NULL;
-+ }
-+
-+ for ( i = 0; i < max_grant_frames; ++i )
-+ d->arch.grant_shared_gfn[i] = _gfn(INVALID_GFN);
-+
-+ for ( i = 0; i < max_status_frames; ++i )
-+ d->arch.grant_status_gfn[i] = _gfn(INVALID_GFN);
-+
- return d;
- }
-
- void free_domain_struct(struct domain *d)
- {
-- xfree(d->arch.grant_table_gpfn);
-+ xfree(d->arch.grant_shared_gfn);
-+ xfree(d->arch.grant_status_gfn);
- free_xenheap_page(d);
- }
-
---- a/xen/arch/arm/mm.c
-+++ b/xen/arch/arm/mm.c
-@@ -1065,6 +1065,7 @@ int xenmem_add_to_physmap_one(
- int rc;
- p2m_type_t t;
- struct page_info *page = NULL;
-+ bool_t status = 0;
-
- switch ( space )
- {
-@@ -1082,6 +1083,7 @@ int xenmem_add_to_physmap_one(
- mfn = virt_to_mfn(d->grant_table->status[idx]);
- else
- mfn = INVALID_MFN;
-+ status = 1;
- }
- else
- {
-@@ -1097,7 +1099,10 @@ int xenmem_add_to_physmap_one(
-
- if ( mfn != INVALID_MFN )
- {
-- d->arch.grant_table_gpfn[idx] = gpfn;
-+ if ( status )
-+ d->arch.grant_status_gfn[idx] = _gfn(gpfn);
-+ else
-+ d->arch.grant_shared_gfn[idx] = _gfn(gpfn);
-
- t = p2m_ram_rw;
- }
---- a/xen/include/asm-arm/domain.h
-+++ b/xen/include/asm-arm/domain.h
-@@ -51,7 +51,8 @@ struct arch_domain
- uint64_t vttbr;
-
- struct hvm_domain hvm_domain;
-- xen_pfn_t *grant_table_gpfn;
-+ gfn_t *grant_shared_gfn;
-+ gfn_t *grant_status_gfn;
-
- struct vmmio vmmio;
-
---- a/xen/include/asm-arm/grant_table.h
-+++ b/xen/include/asm-arm/grant_table.h
-@@ -14,7 +14,6 @@ int replace_grant_host_mapping(unsigned
- unsigned long new_gpaddr, unsigned int flags);
- void gnttab_mark_dirty(struct domain *d, unsigned long l);
- #define gnttab_create_status_page(d, t, i) do {} while (0)
--#define gnttab_status_gmfn(d, t, i) (0)
- #define gnttab_release_host_mappings(domain) 1
- static inline int replace_grant_supported(void)
- {
-@@ -29,8 +28,12 @@ static inline int replace_grant_supporte
- } while ( 0 )
-
- #define gnttab_shared_gmfn(d, t, i) \
-- ( ((i >= nr_grant_frames(d->grant_table)) && \
-- (i < max_grant_frames)) ? 0 : (d->arch.grant_table_gpfn[i]))
-+ gfn_x(((i) >= nr_grant_frames(t)) ? _gfn(INVALID_GFN) \
-+ : (d)->arch.grant_shared_gfn[i])
-+
-+#define gnttab_status_gmfn(d, t, i) \
-+ gfn_x(((i) >= nr_status_frames(t)) ? _gfn(INVALID_GFN) \
-+ : (d)->arch.grant_status_gfn[i])
-
- #define gnttab_need_iommu_mapping(d) \
- (is_domain_direct_mapped(d) && need_iommu(d))
diff --git a/main/xen/xsa255-4.7-2.patch b/main/xen/xsa255-4.7-2.patch
deleted file mode 100644
index 57a6907c43..0000000000
--- a/main/xen/xsa255-4.7-2.patch
+++ /dev/null
@@ -1,187 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: gnttab: don't blindly free status pages upon version change
-
-There may still be active mappings, which would trigger the respective
-BUG_ON(). Split the loop into one dealing with the page attributes and
-the second (when the first fully passed) freeing the pages. Return an
-error if any pages still have pending references.
-
-This is part of XSA-255.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Stefano Stabellini <sstabellini@kernel.org>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/arm/mm.c
-+++ b/xen/arch/arm/mm.c
-@@ -1097,12 +1097,23 @@ int xenmem_add_to_physmap_one(
- mfn = INVALID_MFN;
- }
-
-+ if ( mfn != INVALID_MFN &&
-+ gfn_x(gnttab_get_frame_gfn(d, status, idx)) != INVALID_GFN )
-+ {
-+ rc = guest_physmap_remove_page(d,
-+ gfn_x(gnttab_get_frame_gfn(d, status,
-+ idx)),
-+ mfn, 0);
-+ if ( rc )
-+ {
-+ grant_write_unlock(d->grant_table);
-+ return rc;
-+ }
-+ }
-+
- if ( mfn != INVALID_MFN )
- {
-- if ( status )
-- d->arch.grant_status_gfn[idx] = _gfn(gpfn);
-- else
-- d->arch.grant_shared_gfn[idx] = _gfn(gpfn);
-+ gnttab_set_frame_gfn(d, status, idx, _gfn(gpfn));
-
- t = p2m_ram_rw;
- }
---- a/xen/common/grant_table.c
-+++ b/xen/common/grant_table.c
-@@ -1516,23 +1516,74 @@ status_alloc_failed:
- return -ENOMEM;
- }
-
--static void
-+static int
- gnttab_unpopulate_status_frames(struct domain *d, struct grant_table *gt)
- {
-- int i;
-+ unsigned int i;
-
- for ( i = 0; i < nr_status_frames(gt); i++ )
- {
- struct page_info *pg = virt_to_page(gt->status[i]);
-+ gfn_t gfn = gnttab_get_frame_gfn(d, 1, i);
-+
-+ /*
-+ * For translated domains, recovering from failure after partial
-+ * changes were made is more complicated than it seems worth
-+ * implementing at this time. Hence respective error paths below
-+ * crash the domain in such a case.
-+ */
-+ if ( paging_mode_translate(d) )
-+ {
-+ int rc = gfn_x(gfn) == INVALID_GFN
-+ ? 0
-+ : guest_physmap_remove_page(d, gfn_x(gfn),
-+ page_to_mfn(pg), 0);
-+
-+ if ( rc )
-+ {
-+ gprintk(XENLOG_ERR,
-+ "Could not remove status frame %u (GFN %#lx) from P2M\n",
-+ i, gfn_x(gfn));
-+ domain_crash(d);
-+ return rc;
-+ }
-+ gnttab_set_frame_gfn(d, 1, i, _gfn(INVALID_GFN));
-+ }
-
- BUG_ON(page_get_owner(pg) != d);
- if ( test_and_clear_bit(_PGC_allocated, &pg->count_info) )
- put_page(pg);
-- BUG_ON(pg->count_info & ~PGC_xen_heap);
-+
-+ if ( pg->count_info & ~PGC_xen_heap )
-+ {
-+ if ( paging_mode_translate(d) )
-+ {
-+ gprintk(XENLOG_ERR,
-+ "Wrong page state %#lx of status frame %u (GFN %#lx)\n",
-+ pg->count_info, i, gfn_x(gfn));
-+ domain_crash(d);
-+ }
-+ else
-+ {
-+ if ( get_page(pg, d) )
-+ set_bit(_PGC_allocated, &pg->count_info);
-+ while ( i-- )
-+ gnttab_create_status_page(d, gt, i);
-+ }
-+ return -EBUSY;
-+ }
-+
-+ page_set_owner(pg, NULL);
-+ }
-+
-+ for ( i = 0; i < nr_status_frames(gt); i++ )
-+ {
- free_xenheap_page(gt->status[i]);
- gt->status[i] = NULL;
- }
- gt->nr_status_frames = 0;
-+
-+ return 0;
- }
-
- /*
-@@ -2773,8 +2824,9 @@ gnttab_set_version(XEN_GUEST_HANDLE_PARA
- break;
- }
-
-- if ( op.version < 2 && gt->gt_version == 2 )
-- gnttab_unpopulate_status_frames(currd, gt);
-+ if ( op.version < 2 && gt->gt_version == 2 &&
-+ (res = gnttab_unpopulate_status_frames(currd, gt)) != 0 )
-+ goto out_unlock;
-
- /* Make sure there's no crud left over from the old version. */
- for ( i = 0; i < nr_grant_frames(gt); i++ )
---- a/xen/include/asm-arm/grant_table.h
-+++ b/xen/include/asm-arm/grant_table.h
-@@ -20,6 +20,17 @@ static inline int replace_grant_supporte
- return 1;
- }
-
-+#define gnttab_set_frame_gfn(d, st, idx, gfn) \
-+ do { \
-+ ((st) ? (d)->arch.grant_status_gfn \
-+ : (d)->arch.grant_shared_gfn)[idx] = (gfn); \
-+ } while ( 0 )
-+
-+#define gnttab_get_frame_gfn(d, st, idx) ({ \
-+ _gfn((st) ? gnttab_status_gmfn(d, (d)->grant_table, idx) \
-+ : gnttab_shared_gmfn(d, (d)->grant_table, idx)); \
-+})
-+
- #define gnttab_create_shared_page(d, t, i) \
- do { \
- share_xen_page_with_guest( \
---- a/xen/include/asm-x86/grant_table.h
-+++ b/xen/include/asm-x86/grant_table.h
-@@ -18,6 +18,14 @@ int create_grant_host_mapping(uint64_t a
- int replace_grant_host_mapping(
- uint64_t addr, unsigned long frame, uint64_t new_addr, unsigned int flags);
-
-+#define gnttab_set_frame_gfn(d, st, idx, gfn) do {} while ( 0 )
-+#define gnttab_get_frame_gfn(d, st, idx) ({ \
-+ unsigned long mfn_ = (st) ? gnttab_status_mfn((d)->grant_table, idx) \
-+ : gnttab_shared_mfn((d)->grant_table, idx); \
-+ unsigned long gpfn_ = get_gpfn_from_mfn(mfn_); \
-+ _gfn(VALID_M2P(gpfn_) ? gpfn_ : INVALID_GFN); \
-+})
-+
- #define gnttab_create_shared_page(d, t, i) \
- do { \
- share_xen_page_with_guest( \
-@@ -33,11 +41,11 @@ int replace_grant_host_mapping(
- } while ( 0 )
-
-
--#define gnttab_shared_mfn(d, t, i) \
-+#define gnttab_shared_mfn(t, i) \
- ((virt_to_maddr((t)->shared_raw[i]) >> PAGE_SHIFT))
-
- #define gnttab_shared_gmfn(d, t, i) \
-- (mfn_to_gmfn(d, gnttab_shared_mfn(d, t, i)))
-+ (mfn_to_gmfn(d, gnttab_shared_mfn(t, i)))
-
-
- #define gnttab_status_mfn(t, i) \
diff --git a/main/xen/xsa258-4.8.patch b/main/xen/xsa258-4.8.patch
deleted file mode 100644
index 2c6ba32ce0..0000000000
--- a/main/xen/xsa258-4.8.patch
+++ /dev/null
@@ -1,106 +0,0 @@
-From 437c3b3ad337c43056903e4824448428d3b5a956 Mon Sep 17 00:00:00 2001
-From: Anthony PERARD <anthony.perard@citrix.com>
-Date: Thu, 8 Mar 2018 18:16:41 +0000
-Subject: [PATCH] libxl: Specify format of inserted cdrom
-
-Without this extra parameter on the QMP command, QEMU will guess the
-format of the new file.
-
-This is XSA-258.
-
-Reported-by: Anthony PERARD <anthony.perard@citrix.com>
-Signed-off-by: Anthony PERARD <anthony.perard@citrix.com>
-Acked-by: Ian Jackson <ian.jackson@eu.citrix.com>
----
- tools/libxl/libxl_device.c | 12 ++++++++++++
- tools/libxl/libxl_dm.c | 16 ++--------------
- tools/libxl/libxl_internal.h | 1 +
- tools/libxl/libxl_qmp.c | 2 ++
- 4 files changed, 17 insertions(+), 14 deletions(-)
-
-diff --git a/tools/libxl/libxl_device.c b/tools/libxl/libxl_device.c
-index 3e7a1026c4..cd2a980f18 100644
---- a/tools/libxl/libxl_device.c
-+++ b/tools/libxl/libxl_device.c
-@@ -425,6 +425,18 @@ char *libxl__device_disk_string_of_backend(libxl_disk_backend backend)
- }
- }
-
-+const char *libxl__qemu_disk_format_string(libxl_disk_format format)
-+{
-+ switch (format) {
-+ case LIBXL_DISK_FORMAT_QCOW: return "qcow";
-+ case LIBXL_DISK_FORMAT_QCOW2: return "qcow2";
-+ case LIBXL_DISK_FORMAT_VHD: return "vpc";
-+ case LIBXL_DISK_FORMAT_RAW: return "raw";
-+ case LIBXL_DISK_FORMAT_EMPTY: return NULL;
-+ default: return NULL;
-+ }
-+}
-+
- int libxl__device_physdisk_major_minor(const char *physpath, int *major, int *minor)
- {
- struct stat buf;
-diff --git a/tools/libxl/libxl_dm.c b/tools/libxl/libxl_dm.c
-index ad366a8cd3..b6bc407795 100644
---- a/tools/libxl/libxl_dm.c
-+++ b/tools/libxl/libxl_dm.c
-@@ -669,18 +669,6 @@ static int libxl__build_device_model_args_old(libxl__gc *gc,
- return 0;
- }
-
--static const char *qemu_disk_format_string(libxl_disk_format format)
--{
-- switch (format) {
-- case LIBXL_DISK_FORMAT_QCOW: return "qcow";
-- case LIBXL_DISK_FORMAT_QCOW2: return "qcow2";
-- case LIBXL_DISK_FORMAT_VHD: return "vpc";
-- case LIBXL_DISK_FORMAT_RAW: return "raw";
-- case LIBXL_DISK_FORMAT_EMPTY: return NULL;
-- default: return NULL;
-- }
--}
--
- static char *dm_spice_options(libxl__gc *gc,
- const libxl_spice_info *spice)
- {
-@@ -1342,9 +1330,9 @@ static int libxl__build_device_model_args_new(libxl__gc *gc,
- * always raw
- */
- if (disks[i].backend == LIBXL_DISK_BACKEND_QDISK)
-- format = qemu_disk_format_string(disks[i].format);
-+ format = libxl__qemu_disk_format_string(disks[i].format);
- else
-- format = qemu_disk_format_string(LIBXL_DISK_FORMAT_RAW);
-+ format = libxl__qemu_disk_format_string(LIBXL_DISK_FORMAT_RAW);
-
- if (disks[i].format == LIBXL_DISK_FORMAT_EMPTY) {
- if (!disks[i].is_cdrom) {
-diff --git a/tools/libxl/libxl_internal.h b/tools/libxl/libxl_internal.h
-index 8366fee25f..c32a40576a 100644
---- a/tools/libxl/libxl_internal.h
-+++ b/tools/libxl/libxl_internal.h
-@@ -1170,6 +1170,7 @@ _hidden int libxl__domain_pvcontrol_write(libxl__gc *gc, xs_transaction_t t,
- /* from xl_device */
- _hidden char *libxl__device_disk_string_of_backend(libxl_disk_backend backend);
- _hidden char *libxl__device_disk_string_of_format(libxl_disk_format format);
-+_hidden const char *libxl__qemu_disk_format_string(libxl_disk_format format);
- _hidden int libxl__device_disk_set_backend(libxl__gc*, libxl_device_disk*);
-
- _hidden int libxl__device_physdisk_major_minor(const char *physpath, int *major, int *minor);
-diff --git a/tools/libxl/libxl_qmp.c b/tools/libxl/libxl_qmp.c
-index f8addf9ba6..6fc5454a6e 100644
---- a/tools/libxl/libxl_qmp.c
-+++ b/tools/libxl/libxl_qmp.c
-@@ -982,6 +982,8 @@ int libxl__qmp_insert_cdrom(libxl__gc *gc, int domid,
- return qmp_run_command(gc, domid, "eject", args, NULL, NULL);
- } else {
- qmp_parameters_add_string(gc, &args, "target", disk->pdev_path);
-+ qmp_parameters_add_string(gc, &args, "arg",
-+ libxl__qemu_disk_format_string(disk->format));
- return qmp_run_command(gc, domid, "change", args, NULL, NULL);
- }
- }
---
-2.16.2
-
diff --git a/main/xen/xsa259.patch b/main/xen/xsa259.patch
deleted file mode 100644
index 7848fa87e9..0000000000
--- a/main/xen/xsa259.patch
+++ /dev/null
@@ -1,29 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86: fix slow int80 path after XPTI additions
-
-For the int80 slow path to jump to handle_exception_saved, %r14 needs to
-be set up suitably for XPTI purposes. This is because of the difference
-in nature between the int80 path (which is synchronous WRT guest
-actions) and the exception path which is potentially asynchronous.
-
-This is XSA-259.
-
-Reported-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
-
---- a/xen/arch/x86/x86_64/entry.S
-+++ b/xen/arch/x86/x86_64/entry.S
-@@ -396,6 +396,12 @@ int80_slow_path:
- movl $TRAP_gp_fault,UREGS_entry_vector(%rsp)
- /* A GPF wouldn't have incremented the instruction pointer. */
- subq $2,UREGS_rip(%rsp)
-+ /*
-+ * While we've cleared xen_cr3 above already, normal exception handling
-+ * code has logic to restore the original value from %r15. Therefore we
-+ * need to set up %r14 here, while %r15 is required to still be zero.
-+ */
-+ GET_STACK_END(14)
- jmp handle_exception_saved
-
- /* create_bounce_frame & helpers don't need to be in .text.entry */
diff --git a/main/xen/xsa260-1.patch b/main/xen/xsa260-1.patch
deleted file mode 100644
index 386ae0335c..0000000000
--- a/main/xen/xsa260-1.patch
+++ /dev/null
@@ -1,71 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: x86/traps: Fix %dr6 handing in #DB handler
-
-Most bits in %dr6 accumulate, rather than being set directly based on the
-current source of #DB. Have the handler follow the manuals guidance, which
-avoids leaking hypervisor debugging activities into guest context.
-
-This is part of XSA-260 / CVE-2018-8897.
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
-
---- a/xen/arch/x86/traps.c
-+++ b/xen/arch/x86/traps.c
-@@ -3842,10 +3842,35 @@ static void ler_enable(void)
-
- void do_debug(struct cpu_user_regs *regs)
- {
-+ unsigned long dr6;
- struct vcpu *v = current;
-
-+ /* Stash dr6 as early as possible. */
-+ dr6 = read_debugreg(6);
-+
- DEBUGGER_trap_entry(TRAP_debug, regs);
-
-+ /*
-+ * At the time of writing (March 2018), on the subject of %dr6:
-+ *
-+ * The Intel manual says:
-+ * Certain debug exceptions may clear bits 0-3. The remaining contents
-+ * of the DR6 register are never cleared by the processor. To avoid
-+ * confusion in identifying debug exceptions, debug handlers should
-+ * clear the register (except bit 16, which they should set) before
-+ * returning to the interrupted task.
-+ *
-+ * The AMD manual says:
-+ * Bits 15:13 of the DR6 register are not cleared by the processor and
-+ * must be cleared by software after the contents have been read.
-+ *
-+ * Some bits are reserved set, some are reserved clear, and some bits
-+ * which were previously reserved set are reused and cleared by hardware.
-+ * For future compatibility, reset to the default value, which will allow
-+ * us to spot any bit being changed by hardware to its non-default value.
-+ */
-+ write_debugreg(6, X86_DR6_DEFAULT);
-+
- if ( !guest_mode(regs) )
- {
- if ( regs->eflags & X86_EFLAGS_TF )
-@@ -3878,7 +3903,8 @@ void do_debug(struct cpu_user_regs *regs
- }
-
- /* Save debug status register where guest OS can peek at it */
-- v->arch.debugreg[6] = read_debugreg(6);
-+ v->arch.debugreg[6] |= (dr6 & ~X86_DR6_DEFAULT);
-+ v->arch.debugreg[6] &= (dr6 | ~X86_DR6_DEFAULT);
-
- ler_enable();
- do_guest_trap(TRAP_debug, regs, 0);
---- a/xen/include/asm-x86/debugreg.h
-+++ b/xen/include/asm-x86/debugreg.h
-@@ -24,6 +24,8 @@
- #define DR_STATUS_RESERVED_ZERO (~0xffffeffful) /* Reserved, read as zero */
- #define DR_STATUS_RESERVED_ONE 0xffff0ff0ul /* Reserved, read as one */
-
-+#define X86_DR6_DEFAULT 0xffff0ff0ul /* Default %dr6 value. */
-+
- /* Now define a bunch of things for manipulating the control register.
- The top two bytes of the control register consist of 4 fields of 4
- bits - each field corresponds to one of the four debug registers,
diff --git a/main/xen/xsa260-2.patch b/main/xen/xsa260-2.patch
deleted file mode 100644
index 81ebea165c..0000000000
--- a/main/xen/xsa260-2.patch
+++ /dev/null
@@ -1,110 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: x86/pv: Move exception injection into {,compat_}test_all_events()
-
-This allows paths to jump straight to {,compat_}test_all_events() and have
-injection of pending exceptions happen automatically, rather than requiring
-all calling paths to handle exceptions themselves.
-
-The normal exception path is simplified as a result, and
-compat_post_handle_exception() is removed entirely.
-
-This is part of XSA-260 / CVE-2018-8897.
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
-
---- a/xen/arch/x86/x86_64/compat/entry.S
-+++ b/xen/arch/x86/x86_64/compat/entry.S
-@@ -107,6 +107,12 @@ ENTRY(compat_test_all_events)
- leaq irq_stat+IRQSTAT_softirq_pending(%rip),%rcx
- cmpl $0,(%rcx,%rax,1)
- jne compat_process_softirqs
-+
-+ /* Inject exception if pending. */
-+ lea VCPU_trap_bounce(%rbx), %rdx
-+ testb $TBF_EXCEPTION, TRAPBOUNCE_flags(%rdx)
-+ jnz .Lcompat_process_trapbounce
-+
- testb $1,VCPU_mce_pending(%rbx)
- jnz compat_process_mce
- .Lcompat_test_guest_nmi:
-@@ -136,6 +142,15 @@ compat_process_softirqs:
- call do_softirq
- jmp compat_test_all_events
-
-+ ALIGN
-+/* %rbx: struct vcpu, %rdx: struct trap_bounce */
-+.Lcompat_process_trapbounce:
-+ sti
-+.Lcompat_bounce_exception:
-+ call compat_create_bounce_frame
-+ movb $0, TRAPBOUNCE_flags(%rdx)
-+ jmp compat_test_all_events
-+
- ALIGN
- /* %rbx: struct vcpu */
- compat_process_mce:
-@@ -260,15 +275,6 @@ ENTRY(cr4_pv32_restore)
- xor %eax, %eax
- ret
-
--/* %rdx: trap_bounce, %rbx: struct vcpu */
--ENTRY(compat_post_handle_exception)
-- testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
-- jz compat_test_all_events
--.Lcompat_bounce_exception:
-- call compat_create_bounce_frame
-- movb $0,TRAPBOUNCE_flags(%rdx)
-- jmp compat_test_all_events
--
- /* See lstar_enter for entry register state. */
- ENTRY(cstar_enter)
- /* sti could live here when we don't switch page tables below. */
---- a/xen/arch/x86/x86_64/entry.S
-+++ b/xen/arch/x86/x86_64/entry.S
-@@ -253,6 +253,12 @@ test_all_events:
- leaq irq_stat+IRQSTAT_softirq_pending(%rip),%rcx
- cmpl $0,(%rcx,%rax,1)
- jne process_softirqs
-+
-+ /* Inject exception if pending. */
-+ lea VCPU_trap_bounce(%rbx), %rdx
-+ testb $TBF_EXCEPTION, TRAPBOUNCE_flags(%rdx)
-+ jnz .Lprocess_trapbounce
-+
- testb $1,VCPU_mce_pending(%rbx)
- jnz process_mce
- .Ltest_guest_nmi:
-@@ -281,6 +287,15 @@ process_softirqs:
- jmp test_all_events
-
- ALIGN
-+/* %rbx: struct vcpu, %rdx struct trap_bounce */
-+.Lprocess_trapbounce:
-+ sti
-+.Lbounce_exception:
-+ call create_bounce_frame
-+ movb $0, TRAPBOUNCE_flags(%rdx)
-+ jmp test_all_events
-+
-+ ALIGN
- /* %rbx: struct vcpu */
- process_mce:
- testb $1 << VCPU_TRAP_MCE,VCPU_async_exception_mask(%rbx)
-@@ -698,15 +713,9 @@ handle_exception_saved:
- mov %r15, STACK_CPUINFO_FIELD(xen_cr3)(%r14)
- testb $3,UREGS_cs(%rsp)
- jz restore_all_xen
-- leaq VCPU_trap_bounce(%rbx),%rdx
- movq VCPU_domain(%rbx),%rax
- testb $1,DOMAIN_is_32bit_pv(%rax)
-- jnz compat_post_handle_exception
-- testb $TBF_EXCEPTION,TRAPBOUNCE_flags(%rdx)
-- jz test_all_events
--.Lbounce_exception:
-- call create_bounce_frame
-- movb $0,TRAPBOUNCE_flags(%rdx)
-+ jnz compat_test_all_events
- jmp test_all_events
-
- /* No special register assumptions. */
diff --git a/main/xen/xsa260-3.patch b/main/xen/xsa260-3.patch
deleted file mode 100644
index 6efdd511ea..0000000000
--- a/main/xen/xsa260-3.patch
+++ /dev/null
@@ -1,138 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: x86/traps: Use an Interrupt Stack Table for #DB
-
-PV guests can use architectural corner cases to cause #DB to be raised after
-transitioning into supervisor mode.
-
-Use an interrupt stack table for #DB to prevent the exception being taken with
-a guest controlled stack pointer.
-
-This is part of XSA-260 / CVE-2018-8897.
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
-
---- a/xen/arch/x86/cpu/common.c
-+++ b/xen/arch/x86/cpu/common.c
-@@ -672,6 +672,7 @@ void load_system_tables(void)
- tss->ist[IST_MCE - 1] = stack_top + IST_MCE * PAGE_SIZE;
- tss->ist[IST_DF - 1] = stack_top + IST_DF * PAGE_SIZE;
- tss->ist[IST_NMI - 1] = stack_top + IST_NMI * PAGE_SIZE;
-+ tss->ist[IST_DB - 1] = stack_top + IST_DB * PAGE_SIZE;
-
- _set_tssldt_desc(
- gdt + TSS_ENTRY,
-@@ -692,6 +693,7 @@ void load_system_tables(void)
- set_ist(&idt_tables[cpu][TRAP_double_fault], IST_DF);
- set_ist(&idt_tables[cpu][TRAP_nmi], IST_NMI);
- set_ist(&idt_tables[cpu][TRAP_machine_check], IST_MCE);
-+ set_ist(&idt_tables[cpu][TRAP_debug], IST_DB);
- }
-
- /*
---- a/xen/arch/x86/hvm/svm/svm.c
-+++ b/xen/arch/x86/hvm/svm/svm.c
-@@ -1045,6 +1045,7 @@ static void svm_ctxt_switch_from(struct
- set_ist(&idt_tables[cpu][TRAP_double_fault], IST_DF);
- set_ist(&idt_tables[cpu][TRAP_nmi], IST_NMI);
- set_ist(&idt_tables[cpu][TRAP_machine_check], IST_MCE);
-+ set_ist(&idt_tables[cpu][TRAP_debug], IST_DB);
- }
-
- static void svm_ctxt_switch_to(struct vcpu *v)
-@@ -1069,6 +1070,7 @@ static void svm_ctxt_switch_to(struct vc
- set_ist(&idt_tables[cpu][TRAP_double_fault], IST_NONE);
- set_ist(&idt_tables[cpu][TRAP_nmi], IST_NONE);
- set_ist(&idt_tables[cpu][TRAP_machine_check], IST_NONE);
-+ set_ist(&idt_tables[cpu][TRAP_debug], IST_NONE);
-
- svm_restore_dr(v);
-
---- a/xen/arch/x86/smpboot.c
-+++ b/xen/arch/x86/smpboot.c
-@@ -962,6 +962,7 @@ static int cpu_smpboot_alloc(unsigned in
- set_ist(&idt_tables[cpu][TRAP_double_fault], IST_NONE);
- set_ist(&idt_tables[cpu][TRAP_nmi], IST_NONE);
- set_ist(&idt_tables[cpu][TRAP_machine_check], IST_NONE);
-+ set_ist(&idt_tables[cpu][TRAP_debug], IST_NONE);
-
- for ( stub_page = 0, i = cpu & ~(STUBS_PER_PAGE - 1);
- i < nr_cpu_ids && i <= (cpu | (STUBS_PER_PAGE - 1)); ++i )
---- a/xen/arch/x86/traps.c
-+++ b/xen/arch/x86/traps.c
-@@ -259,13 +259,13 @@ static void show_guest_stack(struct vcpu
- /*
- * Notes for get_stack_trace_bottom() and get_stack_dump_bottom()
- *
-- * Stack pages 0, 1 and 2:
-+ * Stack pages 0 - 3:
- * These are all 1-page IST stacks. Each of these stacks have an exception
- * frame and saved register state at the top. The interesting bound for a
- * trace is the word adjacent to this, while the bound for a dump is the
- * very top, including the exception frame.
- *
-- * Stack pages 3, 4 and 5:
-+ * Stack pages 4 and 5:
- * None of these are particularly interesting. With MEMORY_GUARD, page 5 is
- * explicitly not present, so attempting to dump or trace it is
- * counterproductive. Without MEMORY_GUARD, it is possible for a call chain
-@@ -286,12 +286,12 @@ unsigned long get_stack_trace_bottom(uns
- {
- switch ( get_stack_page(sp) )
- {
-- case 0 ... 2:
-+ case 0 ... 3:
- return ROUNDUP(sp, PAGE_SIZE) -
- offsetof(struct cpu_user_regs, es) - sizeof(unsigned long);
-
- #ifndef MEMORY_GUARD
-- case 3 ... 5:
-+ case 4 ... 5:
- #endif
- case 6 ... 7:
- return ROUNDUP(sp, STACK_SIZE) -
-@@ -306,11 +306,11 @@ unsigned long get_stack_dump_bottom(unsi
- {
- switch ( get_stack_page(sp) )
- {
-- case 0 ... 2:
-+ case 0 ... 3:
- return ROUNDUP(sp, PAGE_SIZE) - sizeof(unsigned long);
-
- #ifndef MEMORY_GUARD
-- case 3 ... 5:
-+ case 4 ... 5:
- #endif
- case 6 ... 7:
- return ROUNDUP(sp, STACK_SIZE) - sizeof(unsigned long);
-@@ -4022,6 +4022,7 @@ void __init init_idt_traps(void)
- set_ist(&idt_table[TRAP_double_fault], IST_DF);
- set_ist(&idt_table[TRAP_nmi], IST_NMI);
- set_ist(&idt_table[TRAP_machine_check], IST_MCE);
-+ set_ist(&idt_table[TRAP_debug], IST_DB);
-
- /* CPU0 uses the master IDT. */
- idt_tables[0] = idt_table;
---- a/xen/arch/x86/x86_64/entry.S
-+++ b/xen/arch/x86/x86_64/entry.S
-@@ -769,7 +769,7 @@ ENTRY(device_not_available)
- ENTRY(debug)
- pushq $0
- movl $TRAP_debug,4(%rsp)
-- jmp handle_exception
-+ jmp handle_ist_exception
-
- ENTRY(int3)
- pushq $0
---- a/xen/include/asm-x86/processor.h
-+++ b/xen/include/asm-x86/processor.h
-@@ -498,7 +498,8 @@ struct __packed __cacheline_aligned tss_
- #define IST_DF 1UL
- #define IST_NMI 2UL
- #define IST_MCE 3UL
--#define IST_MAX 3UL
-+#define IST_DB 4UL
-+#define IST_MAX 4UL
-
- /* Set the interrupt stack table used by a particular interrupt
- * descriptor table entry. */
diff --git a/main/xen/xsa260-4.patch b/main/xen/xsa260-4.patch
deleted file mode 100644
index c2b77afc89..0000000000
--- a/main/xen/xsa260-4.patch
+++ /dev/null
@@ -1,72 +0,0 @@
-From: Andrew Cooper <andrew.cooper3@citrix.com>
-Subject: x86/traps: Fix handling of #DB exceptions in hypervisor context
-
-The WARN_ON() can be triggered by guest activities, and emits a full stack
-trace without rate limiting. Swap it out for a ratelimited printk with just
-enough information to work out what is going on.
-
-Not all #DB exceptions are traps, so blindly continuing is not a safe action
-to take. We don't let PV guests select these settings in the real %dr7 to
-begin with, but for added safety against unexpected situations, detect the
-fault cases and crash in an obvious manner.
-
-This is part of XSA-260 / CVE-2018-8897.
-
-Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
-Reviewed-by: Jan Beulich <jbeulich@suse.com>
-
---- a/xen/arch/x86/traps.c
-+++ b/xen/arch/x86/traps.c
-@@ -3889,16 +3889,44 @@ void do_debug(struct cpu_user_regs *regs
- regs->eflags &= ~X86_EFLAGS_TF;
- }
- }
-- else
-+
-+ /*
-+ * Check for fault conditions. General Detect, and instruction
-+ * breakpoints are faults rather than traps, at which point attempting
-+ * to ignore and continue will result in a livelock.
-+ */
-+ if ( dr6 & DR_GENERAL_DETECT )
-+ {
-+ printk(XENLOG_ERR "Hit General Detect in Xen context\n");
-+ fatal_trap(regs);
-+ }
-+
-+ if ( dr6 & (DR_TRAP3 | DR_TRAP2 | DR_TRAP1 | DR_TRAP0) )
- {
-- /*
-- * We ignore watchpoints when they trigger within Xen. This may
-- * happen when a buffer is passed to us which previously had a
-- * watchpoint set on it. No need to bump EIP; the only faulting
-- * trap is an instruction breakpoint, which can't happen to us.
-- */
-- WARN_ON(!search_exception_table(regs->eip));
-+ unsigned int bp, dr7 = read_debugreg(7) >> DR_CONTROL_SHIFT;
-+
-+ for ( bp = 0; bp < 4; ++bp )
-+ {
-+ if ( (dr6 & (1u << bp)) && /* Breakpoint triggered? */
-+ ((dr7 & (3u << (bp * DR_CONTROL_SIZE))) == 0) /* Insn? */ )
-+ {
-+ printk(XENLOG_ERR
-+ "Hit instruction breakpoint in Xen context\n");
-+ fatal_trap(regs);
-+ }
-+ }
- }
-+
-+ /*
-+ * Whatever caused this #DB should be a trap. Note it and continue.
-+ * Guests can trigger this in certain corner cases, so ensure the
-+ * message is ratelimited.
-+ */
-+ gprintk(XENLOG_WARNING,
-+ "Hit #DB in Xen context: %04x:%p [%ps], stk %04x:%p, dr6 %lx\n",
-+ regs->cs, _p(regs->rip), _p(regs->rip),
-+ regs->ss, _p(regs->rsp), dr6);
-+
- goto out;
- }
-
diff --git a/main/xen/xsa261-4.7.patch b/main/xen/xsa261-4.7.patch
deleted file mode 100644
index eaa93f1cee..0000000000
--- a/main/xen/xsa261-4.7.patch
+++ /dev/null
@@ -1,264 +0,0 @@
-From 0ce5019f87abe20d1e714f8d983418ecfb0ad5df Mon Sep 17 00:00:00 2001
-From: Xen Project Security Team <security@xenproject.org>
-Date: Mon, 23 Apr 2018 16:56:47 +0100
-Subject: [PATCH] x86/vpt: add support for IO-APIC routed interrupts
-
-And modify the HPET code to make use of it. Currently HPET interrupts
-are always treated as ISA and thus injected through the vPIC. This is
-wrong because HPET interrupts when not in legacy mode should be
-injected from the IO-APIC.
-
-To make things worse, the supported interrupt routing values are set
-to [20..23], which clearly falls outside of the ISA range, thus
-leading to an ASSERT in debug builds or memory corruption in non-debug
-builds because the interrupt injection code will write out of the
-bounds of the arch.hvm_domain.vpic array.
-
-Since the HPET interrupt source can change between ISA and IO-APIC
-always destroy the timer before changing the mode, or else Xen risks
-changing it while the timer is active.
-
-Note that vpt interrupt injection is racy in the sense that the
-vIO-APIC RTE entry can be written by the guest in between the call to
-pt_irq_masked and hvm_ioapic_assert, or the call to pt_update_irq and
-pt_intr_post. Those are not deemed to be security issues, but rather
-quirks of the current implementation. In the worse case the guest
-might lose interrupts or get multiple interrupt vectors injected for
-the same timer source.
-
-This is part of XSA-261.
-
-Address actual and potential compiler warnings. Fix formatting.
-
-Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
----
- xen/arch/x86/hvm/hpet.c | 7 ++++
- xen/arch/x86/hvm/irq.c | 19 ++++++++++
- xen/arch/x86/hvm/vpt.c | 85 ++++++++++++++++++++++++++++++-------------
- xen/include/asm-x86/hvm/irq.h | 3 ++
- xen/include/asm-x86/hvm/vpt.h | 1 +
- 5 files changed, 89 insertions(+), 26 deletions(-)
-
-diff --git a/xen/arch/x86/hvm/hpet.c b/xen/arch/x86/hvm/hpet.c
-index f7aed7f..2837709 100644
---- a/xen/arch/x86/hvm/hpet.c
-+++ b/xen/arch/x86/hvm/hpet.c
-@@ -264,13 +264,20 @@ static void hpet_set_timer(HPETState *h, unsigned int tn,
- diff = (timer_is_32bit(h, tn) && (-diff > HPET_TINY_TIME_SPAN))
- ? (uint32_t)diff : 0;
-
-+ destroy_periodic_time(&h->pt[tn]);
- if ( (tn <= 1) && (h->hpet.config & HPET_CFG_LEGACY) )
-+ {
- /* if LegacyReplacementRoute bit is set, HPET specification requires
- timer0 be routed to IRQ0 in NON-APIC or IRQ2 in the I/O APIC,
- timer1 be routed to IRQ8 in NON-APIC or IRQ8 in the I/O APIC. */
- irq = (tn == 0) ? 0 : 8;
-+ h->pt[tn].source = PTSRC_isa;
-+ }
- else
-+ {
- irq = timer_int_route(h, tn);
-+ h->pt[tn].source = PTSRC_ioapic;
-+ }
-
- /*
- * diff is the time from now when the timer should fire, for a periodic
-diff --git a/xen/arch/x86/hvm/irq.c b/xen/arch/x86/hvm/irq.c
-index be1d4e8..2d1990b 100644
---- a/xen/arch/x86/hvm/irq.c
-+++ b/xen/arch/x86/hvm/irq.c
-@@ -42,6 +42,25 @@ static void assert_gsi(struct domain *d, unsigned ioapic_gsi)
- vioapic_irq_positive_edge(d, ioapic_gsi);
- }
-
-+int hvm_ioapic_assert(struct domain *d, unsigned int gsi, bool_t level)
-+{
-+ int vector;
-+
-+ if ( gsi >= VIOAPIC_NUM_PINS )
-+ {
-+ ASSERT_UNREACHABLE();
-+ return -1;
-+ }
-+
-+ spin_lock(&d->arch.hvm_domain.irq_lock);
-+ if ( !level || d->arch.hvm_domain.irq.gsi_assert_count[gsi]++ == 0 )
-+ assert_gsi(d, gsi);
-+ vector = domain_vioapic(d)->redirtbl[gsi].fields.vector;
-+ spin_unlock(&d->arch.hvm_domain.irq_lock);
-+
-+ return vector;
-+}
-+
- static void assert_irq(struct domain *d, unsigned ioapic_gsi, unsigned pic_irq)
- {
- assert_gsi(d, ioapic_gsi);
-diff --git a/xen/arch/x86/hvm/vpt.c b/xen/arch/x86/hvm/vpt.c
-index 358ec57..f0fe31c 100644
---- a/xen/arch/x86/hvm/vpt.c
-+++ b/xen/arch/x86/hvm/vpt.c
-@@ -97,22 +97,36 @@ static int pt_irq_vector(struct periodic_time *pt, enum hvm_intsrc src)
- static int pt_irq_masked(struct periodic_time *pt)
- {
- struct vcpu *v = pt->vcpu;
-- unsigned int gsi, isa_irq;
-- uint8_t pic_imr;
-+ unsigned int gsi = pt->irq;
-
-- if ( pt->source == PTSRC_lapic )
-+ switch ( pt->source )
-+ {
-+ case PTSRC_lapic:
- {
- struct vlapic *vlapic = vcpu_vlapic(v);
-+
- return (!vlapic_enabled(vlapic) ||
- (vlapic_get_reg(vlapic, APIC_LVTT) & APIC_LVT_MASKED));
- }
-
-- isa_irq = pt->irq;
-- gsi = hvm_isa_irq_to_gsi(isa_irq);
-- pic_imr = v->domain->arch.hvm_domain.vpic[isa_irq >> 3].imr;
-+ case PTSRC_isa:
-+ {
-+ uint8_t pic_imr = v->domain->arch.hvm_domain.vpic[pt->irq >> 3].imr;
-
-- return (((pic_imr & (1 << (isa_irq & 7))) || !vlapic_accept_pic_intr(v)) &&
-- domain_vioapic(v->domain)->redirtbl[gsi].fields.mask);
-+ /* Check if the interrupt is unmasked in the PIC. */
-+ if ( !(pic_imr & (1 << (pt->irq & 7))) && vlapic_accept_pic_intr(v) )
-+ return 0;
-+
-+ gsi = hvm_isa_irq_to_gsi(pt->irq);
-+ }
-+
-+ /* Fallthrough to check if the interrupt is masked on the IO APIC. */
-+ case PTSRC_ioapic:
-+ return domain_vioapic(v->domain)->redirtbl[gsi].fields.mask;
-+ }
-+
-+ ASSERT_UNREACHABLE();
-+ return 1;
- }
-
- static void pt_lock(struct periodic_time *pt)
-@@ -233,7 +247,7 @@ int pt_update_irq(struct vcpu *v)
- struct list_head *head = &v->arch.hvm_vcpu.tm_list;
- struct periodic_time *pt, *temp, *earliest_pt;
- uint64_t max_lag;
-- int irq, is_lapic;
-+ int irq, pt_vector = -1;
-
- spin_lock(&v->arch.hvm_vcpu.tm_lock);
-
-@@ -269,29 +283,42 @@ int pt_update_irq(struct vcpu *v)
-
- earliest_pt->irq_issued = 1;
- irq = earliest_pt->irq;
-- is_lapic = (earliest_pt->source == PTSRC_lapic);
-
- spin_unlock(&v->arch.hvm_vcpu.tm_lock);
-
-- if ( is_lapic )
-- vlapic_set_irq(vcpu_vlapic(v), irq, 0);
-- else
-+ switch ( earliest_pt->source )
- {
-+ case PTSRC_lapic:
-+ /*
-+ * If periodic timer interrupt is handled by lapic, its vector in
-+ * IRR is returned and used to set eoi_exit_bitmap for virtual
-+ * interrupt delivery case. Otherwise return -1 to do nothing.
-+ */
-+ vlapic_set_irq(vcpu_vlapic(v), irq, 0);
-+ pt_vector = irq;
-+ break;
-+
-+ case PTSRC_isa:
- hvm_isa_irq_deassert(v->domain, irq);
- hvm_isa_irq_assert(v->domain, irq);
-+
-+ if ( platform_legacy_irq(irq) && vlapic_accept_pic_intr(v) &&
-+ v->domain->arch.hvm_domain.vpic[irq >> 3].int_output )
-+ return -1;
-+
-+ pt_vector = pt_irq_vector(earliest_pt, hvm_intsrc_lapic);
-+ break;
-+
-+ case PTSRC_ioapic:
-+ /*
-+ * NB: At the moment IO-APIC routed interrupts generated by vpt devices
-+ * (HPET) are edge-triggered.
-+ */
-+ pt_vector = hvm_ioapic_assert(v->domain, irq, 0);
-+ break;
- }
-
-- /*
-- * If periodic timer interrut is handled by lapic, its vector in
-- * IRR is returned and used to set eoi_exit_bitmap for virtual
-- * interrupt delivery case. Otherwise return -1 to do nothing.
-- */
-- if ( !is_lapic &&
-- platform_legacy_irq(irq) && vlapic_accept_pic_intr(v) &&
-- (&v->domain->arch.hvm_domain)->vpic[irq >> 3].int_output )
-- return -1;
-- else
-- return pt_irq_vector(earliest_pt, hvm_intsrc_lapic);
-+ return pt_vector;
- }
-
- static struct periodic_time *is_pt_irq(
-@@ -386,7 +413,13 @@ void create_periodic_time(
- struct vcpu *v, struct periodic_time *pt, uint64_t delta,
- uint64_t period, uint8_t irq, time_cb *cb, void *data)
- {
-- ASSERT(pt->source != 0);
-+ if ( !pt->source ||
-+ (pt->irq >= NR_ISAIRQS && pt->source == PTSRC_isa) ||
-+ (pt->irq >= VIOAPIC_NUM_PINS && pt->source == PTSRC_ioapic) )
-+ {
-+ ASSERT_UNREACHABLE();
-+ return;
-+ }
-
- destroy_periodic_time(pt);
-
-@@ -466,7 +499,7 @@ static void pt_adjust_vcpu(struct periodic_time *pt, struct vcpu *v)
- {
- int on_list;
-
-- ASSERT(pt->source == PTSRC_isa);
-+ ASSERT(pt->source == PTSRC_isa || pt->source == PTSRC_ioapic);
-
- if ( pt->vcpu == NULL )
- return;
-diff --git a/xen/include/asm-x86/hvm/irq.h b/xen/include/asm-x86/hvm/irq.h
-index 73b8fb0..c60036b 100644
---- a/xen/include/asm-x86/hvm/irq.h
-+++ b/xen/include/asm-x86/hvm/irq.h
-@@ -103,4 +103,7 @@ struct hvm_intack hvm_vcpu_has_pending_irq(struct vcpu *v);
- struct hvm_intack hvm_vcpu_ack_pending_irq(struct vcpu *v,
- struct hvm_intack intack);
-
-+/* Assert an IO APIC pin. */
-+int hvm_ioapic_assert(struct domain *d, unsigned int gsi, bool_t level);
-+
- #endif /* __ASM_X86_HVM_IRQ_H__ */
-diff --git a/xen/include/asm-x86/hvm/vpt.h b/xen/include/asm-x86/hvm/vpt.h
-index a27bea4..616f040 100644
---- a/xen/include/asm-x86/hvm/vpt.h
-+++ b/xen/include/asm-x86/hvm/vpt.h
-@@ -45,6 +45,7 @@ struct periodic_time {
- bool_t warned_timeout_too_short;
- #define PTSRC_isa 1 /* ISA time source */
- #define PTSRC_lapic 2 /* LAPIC time source */
-+#define PTSRC_ioapic 3 /* IOAPIC time source */
- u8 source; /* PTSRC_ */
- u8 irq;
- struct vcpu *vcpu; /* vcpu timer interrupt delivers to */
---
-2.1.4
-
diff --git a/main/xen/xsa262-4.9.patch b/main/xen/xsa262-4.9.patch
deleted file mode 100644
index f9a7a4ce3a..0000000000
--- a/main/xen/xsa262-4.9.patch
+++ /dev/null
@@ -1,76 +0,0 @@
-From: Jan Beulich <jbeulich@suse.com>
-Subject: x86/HVM: guard against emulator driving ioreq state in weird ways
-
-In the case where hvm_wait_for_io() calls wait_on_xen_event_channel(),
-p->state ends up being read twice in succession: once to determine that
-state != p->state, and then again at the top of the loop. This gives a
-compromised emulator a chance to change the state back between the two
-reads, potentially keeping Xen in a loop indefinitely.
-
-Instead:
-* Read p->state once in each of the wait_on_xen_event_channel() tests,
-* re-use that value the next time around,
-* and insist that the states continue to transition "forward" (with the
- exception of the transition to STATE_IOREQ_NONE).
-
-This is XSA-262.
-
-Signed-off-by: Jan Beulich <jbeulich@suse.com>
-Reviewed-by: George Dunlap <george.dunlap@citrix.com>
-
---- a/xen/arch/x86/hvm/ioreq.c
-+++ b/xen/arch/x86/hvm/ioreq.c
-@@ -87,14 +87,17 @@ static void hvm_io_assist(struct hvm_ior
-
- static bool_t hvm_wait_for_io(struct hvm_ioreq_vcpu *sv, ioreq_t *p)
- {
-+ unsigned int prev_state = STATE_IOREQ_NONE;
-+
- while ( sv->pending )
- {
- unsigned int state = p->state;
-
-- rmb();
-- switch ( state )
-+ smp_rmb();
-+
-+ recheck:
-+ if ( unlikely(state == STATE_IOREQ_NONE) )
- {
-- case STATE_IOREQ_NONE:
- /*
- * The only reason we should see this case is when an
- * emulator is dying and it races with an I/O being
-@@ -102,14 +105,30 @@ static bool_t hvm_wait_for_io(struct hvm
- */
- hvm_io_assist(sv, ~0ul);
- break;
-+ }
-+
-+ if ( unlikely(state < prev_state) )
-+ {
-+ gdprintk(XENLOG_ERR, "Weird HVM ioreq state transition %u -> %u\n",
-+ prev_state, state);
-+ sv->pending = 0;
-+ domain_crash(sv->vcpu->domain);
-+ return 0; /* bail */
-+ }
-+
-+ switch ( prev_state = state )
-+ {
- case STATE_IORESP_READY: /* IORESP_READY -> NONE */
- p->state = STATE_IOREQ_NONE;
- hvm_io_assist(sv, p->data);
- break;
- case STATE_IOREQ_READY: /* IOREQ_{READY,INPROCESS} -> IORESP_READY */
- case STATE_IOREQ_INPROCESS:
-- wait_on_xen_event_channel(sv->ioreq_evtchn, p->state != state);
-- break;
-+ wait_on_xen_event_channel(sv->ioreq_evtchn,
-+ ({ state = p->state;
-+ smp_rmb();
-+ state != prev_state; }));
-+ goto recheck;
- default:
- gdprintk(XENLOG_ERR, "Weird HVM iorequest state %u\n", state);
- sv->pending = 0;