summaryrefslogtreecommitdiffstats
path: root/main/xen/xsa53-4.2.patch
blob: eb8e79bed2e4cfcaf8a723fbc36d9a4fa60c19a2 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
x86/xsave: recover from faults on XRSTOR

Just like FXRSTOR, XRSTOR can raise #GP if bad content is being passed
to it in the memory block (i.e. aspects not under the control of the
hypervisor, other than e.g. proper alignment of the block).

Also correct the comment explaining why FXRSTOR needs exception
recovery code to not wrongly state that this can only be a result of
the control tools passing a bad image.

This is CVE-2013-2077 / XSA-53.

Signed-off-by: Jan Beulich <jbeulich@suse.com>

--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -53,7 +53,7 @@ static inline void fpu_fxrstor(struct vc
     /*
      * FXRSTOR can fault if passed a corrupted data block. We handle this
      * possibility, which may occur if the block was passed to us by control
-     * tools, by silently clearing the block.
+     * tools or through VCPUOP_initialise, by silently clearing the block.
      */
     asm volatile (
 #ifdef __i386__
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -93,10 +93,25 @@ void xrstor(struct vcpu *v, uint64_t mas
                        "fildl %0"          /* load to clear state */
                        : : "m" (ptr->fpu_sse) );
 
-    asm volatile (
-        ".byte " REX_PREFIX "0x0f,0xae,0x2f"
-        :
-        : "m" (*ptr), "a" (lmask), "d" (hmask), "D"(ptr) );
+    /*
+     * XRSTOR can fault if passed a corrupted data block. We handle this
+     * possibility, which may occur if the block was passed to us by control
+     * tools or through VCPUOP_initialise, by silently clearing the block.
+     */
+    asm volatile ( "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
+                   ".section .fixup,\"ax\"\n"
+                   "2: mov %5,%%ecx       \n"
+                   "   xor %1,%1          \n"
+                   "   rep stosb          \n"
+                   "   lea %2,%0          \n"
+                   "   mov %3,%1          \n"
+                   "   jmp 1b             \n"
+                   ".previous             \n"
+                   _ASM_EXTABLE(1b, 2b)
+                   : "+&D" (ptr), "+&a" (lmask)
+                   : "m" (*ptr), "g" (lmask), "d" (hmask),
+                     "m" (xsave_cntxt_size)
+                   : "ecx" );
 }
 
 bool_t xsave_enabled(const struct vcpu *v)