1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
|
From: Andrew Cooper <andrew.cooper3@citrix.com>
Subject: x86: Support fully eager FPU context switching
This is controlled on a per-vcpu bases for flexibility.
This is part of XSA-267 / CVE-2018-3665
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
diff --git a/xen/arch/x86/i387.c b/xen/arch/x86/i387.c
index 9a172db..1da31af 100644
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -210,7 +210,7 @@ void vcpu_restore_fpu_eager(struct vcpu *v)
ASSERT(!is_idle_vcpu(v));
/* Restore nonlazy extended state (i.e. parts not tracked by CR0.TS). */
- if ( !v->arch.nonlazy_xstate_used )
+ if ( !v->arch.fully_eager_fpu && !v->arch.nonlazy_xstate_used )
return;
/* Avoid recursion */
@@ -221,11 +221,19 @@ void vcpu_restore_fpu_eager(struct vcpu *v)
* above) we also need to restore full state, to prevent subsequently
* saving state belonging to another vCPU.
*/
- if ( xstate_all(v) )
+ if ( v->arch.fully_eager_fpu || (v->arch.xsave_area && xstate_all(v)) )
{
- fpu_xrstor(v, XSTATE_ALL);
+ if ( cpu_has_xsave )
+ fpu_xrstor(v, XSTATE_ALL);
+ else
+ fpu_fxrstor(v);
+
v->fpu_initialised = 1;
v->fpu_dirtied = 1;
+
+ /* Xen doesn't need TS set, but the guest might. */
+ if ( is_pv_vcpu(v) && (v->arch.pv_vcpu.ctrlreg[0] & X86_CR0_TS) )
+ stts();
}
else
{
@@ -247,6 +255,8 @@ void vcpu_restore_fpu_lazy(struct vcpu *v)
if ( v->fpu_dirtied )
return;
+ ASSERT(!v->arch.fully_eager_fpu);
+
if ( cpu_has_xsave )
fpu_xrstor(v, XSTATE_LAZY);
else
diff --git a/xen/include/asm-x86/domain.h b/xen/include/asm-x86/domain.h
index c08ddc0..702ec64 100644
--- a/xen/include/asm-x86/domain.h
+++ b/xen/include/asm-x86/domain.h
@@ -593,6 +593,9 @@ struct arch_vcpu
* and thus should be saved/restored. */
bool_t nonlazy_xstate_used;
+ /* Restore all FPU state (lazy and non-lazy state) on context switch? */
+ bool fully_eager_fpu;
+
/* Has the guest enabled CPUID faulting? */
bool cpuid_faulting;
|