aboutsummaryrefslogtreecommitdiffstats
path: root/main/xen/xsa156.patch
diff options
context:
space:
mode:
Diffstat (limited to 'main/xen/xsa156.patch')
-rw-r--r--main/xen/xsa156.patch127
1 files changed, 127 insertions, 0 deletions
diff --git a/main/xen/xsa156.patch b/main/xen/xsa156.patch
new file mode 100644
index 0000000000..d37dff1cd7
--- /dev/null
+++ b/main/xen/xsa156.patch
@@ -0,0 +1,127 @@
+x86/HVM: always intercept #AC and #DB
+
+Both being benign exceptions, and both being possible to get triggered
+by exception delivery, this is required to prevent a guest from locking
+up a CPU (resulting from no other VM exits occurring once getting into
+such a loop).
+
+The specific scenarios:
+
+1) #AC may be raised during exception delivery if the handler is set to
+be a ring-3 one by a 32-bit guest, and the stack is misaligned.
+
+2) #DB may be raised during exception delivery when a breakpoint got
+placed on a data structure involved in delivering the exception. This
+can result in an endless loop when a 64-bit guest uses a non-zero IST
+for the vector 1 IDT entry, but even without use of IST the time it
+takes until a contributory fault would get raised (results depending
+on the handler) may be quite long.
+
+This is XSA-156.
+
+Reported-by: Benjamin Serebrin <serebrin@google.com>
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
+Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>
+
+--- a/xen/arch/x86/hvm/svm/svm.c
++++ b/xen/arch/x86/hvm/svm/svm.c
+@@ -1043,10 +1043,11 @@ static void noreturn svm_do_resume(struc
+ unlikely(v->arch.hvm_vcpu.debug_state_latch != debug_state) )
+ {
+ uint32_t intercepts = vmcb_get_exception_intercepts(vmcb);
+- uint32_t mask = (1U << TRAP_debug) | (1U << TRAP_int3);
++
+ v->arch.hvm_vcpu.debug_state_latch = debug_state;
+ vmcb_set_exception_intercepts(
+- vmcb, debug_state ? (intercepts | mask) : (intercepts & ~mask));
++ vmcb, debug_state ? (intercepts | (1U << TRAP_int3))
++ : (intercepts & ~(1U << TRAP_int3)));
+ }
+
+ if ( v->arch.hvm_svm.launch_core != smp_processor_id() )
+@@ -2434,8 +2435,9 @@ void svm_vmexit_handler(struct cpu_user_
+
+ case VMEXIT_EXCEPTION_DB:
+ if ( !v->domain->debugger_attached )
+- goto unexpected_exit_type;
+- domain_pause_for_debugger();
++ hvm_inject_hw_exception(TRAP_debug, HVM_DELIVER_NO_ERROR_CODE);
++ else
++ domain_pause_for_debugger();
+ break;
+
+ case VMEXIT_EXCEPTION_BP:
+@@ -2483,6 +2485,11 @@ void svm_vmexit_handler(struct cpu_user_
+ break;
+ }
+
++ case VMEXIT_EXCEPTION_AC:
++ HVMTRACE_1D(TRAP, TRAP_alignment_check);
++ hvm_inject_hw_exception(TRAP_alignment_check, vmcb->exitinfo1);
++ break;
++
+ case VMEXIT_EXCEPTION_UD:
+ svm_vmexit_ud_intercept(regs);
+ break;
+--- a/xen/arch/x86/hvm/vmx/vmx.c
++++ b/xen/arch/x86/hvm/vmx/vmx.c
+@@ -1224,16 +1224,10 @@ static void vmx_update_host_cr3(struct v
+
+ void vmx_update_debug_state(struct vcpu *v)
+ {
+- unsigned long mask;
+-
+- mask = 1u << TRAP_int3;
+- if ( !cpu_has_monitor_trap_flag )
+- mask |= 1u << TRAP_debug;
+-
+ if ( v->arch.hvm_vcpu.debug_state_latch )
+- v->arch.hvm_vmx.exception_bitmap |= mask;
++ v->arch.hvm_vmx.exception_bitmap |= 1U << TRAP_int3;
+ else
+- v->arch.hvm_vmx.exception_bitmap &= ~mask;
++ v->arch.hvm_vmx.exception_bitmap &= ~(1U << TRAP_int3);
+
+ vmx_vmcs_enter(v);
+ vmx_update_exception_bitmap(v);
+@@ -3060,9 +3054,10 @@ void vmx_vmexit_handler(struct cpu_user_
+ __vmread(EXIT_QUALIFICATION, &exit_qualification);
+ HVMTRACE_1D(TRAP_DEBUG, exit_qualification);
+ write_debugreg(6, exit_qualification | DR_STATUS_RESERVED_ONE);
+- if ( !v->domain->debugger_attached || cpu_has_monitor_trap_flag )
+- goto exit_and_crash;
+- domain_pause_for_debugger();
++ if ( !v->domain->debugger_attached )
++ hvm_inject_hw_exception(vector, HVM_DELIVER_NO_ERROR_CODE);
++ else
++ domain_pause_for_debugger();
+ break;
+ case TRAP_int3:
+ {
+@@ -3127,6 +3122,11 @@ void vmx_vmexit_handler(struct cpu_user_
+
+ hvm_inject_page_fault(regs->error_code, exit_qualification);
+ break;
++ case TRAP_alignment_check:
++ HVMTRACE_1D(TRAP, vector);
++ __vmread(VM_EXIT_INTR_ERROR_CODE, &ecode);
++ hvm_inject_hw_exception(vector, ecode);
++ break;
+ case TRAP_nmi:
+ if ( MASK_EXTR(intr_info, INTR_INFO_INTR_TYPE_MASK) !=
+ X86_EVENTTYPE_NMI )
+--- a/xen/include/asm-x86/hvm/hvm.h
++++ b/xen/include/asm-x86/hvm/hvm.h
+@@ -385,7 +385,10 @@ static inline int hvm_event_pending(stru
+ (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE))
+
+ /* These exceptions must always be intercepted. */
+-#define HVM_TRAP_MASK ((1U << TRAP_machine_check) | (1U << TRAP_invalid_op))
++#define HVM_TRAP_MASK ((1U << TRAP_debug) | \
++ (1U << TRAP_invalid_op) | \
++ (1U << TRAP_alignment_check) | \
++ (1U << TRAP_machine_check))
+
+ /*
+ * x86 event types. This enumeration is valid for: