aboutsummaryrefslogtreecommitdiffstats
path: root/main/xen/xsa53-4.2.patch
diff options
context:
space:
mode:
Diffstat (limited to 'main/xen/xsa53-4.2.patch')
-rw-r--r--main/xen/xsa53-4.2.patch57
1 files changed, 57 insertions, 0 deletions
diff --git a/main/xen/xsa53-4.2.patch b/main/xen/xsa53-4.2.patch
new file mode 100644
index 0000000000..eb8e79bed2
--- /dev/null
+++ b/main/xen/xsa53-4.2.patch
@@ -0,0 +1,57 @@
+x86/xsave: recover from faults on XRSTOR
+
+Just like FXRSTOR, XRSTOR can raise #GP if bad content is being passed
+to it in the memory block (i.e. aspects not under the control of the
+hypervisor, other than e.g. proper alignment of the block).
+
+Also correct the comment explaining why FXRSTOR needs exception
+recovery code to not wrongly state that this can only be a result of
+the control tools passing a bad image.
+
+This is CVE-2013-2077 / XSA-53.
+
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/arch/x86/i387.c
++++ b/xen/arch/x86/i387.c
+@@ -53,7 +53,7 @@ static inline void fpu_fxrstor(struct vc
+ /*
+ * FXRSTOR can fault if passed a corrupted data block. We handle this
+ * possibility, which may occur if the block was passed to us by control
+- * tools, by silently clearing the block.
++ * tools or through VCPUOP_initialise, by silently clearing the block.
+ */
+ asm volatile (
+ #ifdef __i386__
+--- a/xen/arch/x86/xstate.c
++++ b/xen/arch/x86/xstate.c
+@@ -93,10 +93,25 @@ void xrstor(struct vcpu *v, uint64_t mas
+ "fildl %0" /* load to clear state */
+ : : "m" (ptr->fpu_sse) );
+
+- asm volatile (
+- ".byte " REX_PREFIX "0x0f,0xae,0x2f"
+- :
+- : "m" (*ptr), "a" (lmask), "d" (hmask), "D"(ptr) );
++ /*
++ * XRSTOR can fault if passed a corrupted data block. We handle this
++ * possibility, which may occur if the block was passed to us by control
++ * tools or through VCPUOP_initialise, by silently clearing the block.
++ */
++ asm volatile ( "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
++ ".section .fixup,\"ax\"\n"
++ "2: mov %5,%%ecx \n"
++ " xor %1,%1 \n"
++ " rep stosb \n"
++ " lea %2,%0 \n"
++ " mov %3,%1 \n"
++ " jmp 1b \n"
++ ".previous \n"
++ _ASM_EXTABLE(1b, 2b)
++ : "+&D" (ptr), "+&a" (lmask)
++ : "m" (*ptr), "g" (lmask), "d" (hmask),
++ "m" (xsave_cntxt_size)
++ : "ecx" );
+ }
+
+ bool_t xsave_enabled(const struct vcpu *v)