aboutsummaryrefslogtreecommitdiffstats
path: root/main/xen/xsa192.patch
blob: b573a132c9fde6badc8923e0a04e8e9659c4ad77 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
From: Jan Beulich <jbeulich@suse.com>
Subject: x86/HVM: don't load LDTR with VM86 mode attrs during task switch

Just like TR, LDTR is purely a protected mode facility and hence needs
to be loaded accordingly. Also move its loading to where it
architecurally belongs.

This is XSA-192.

Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
Tested-by: Andrew Cooper <andrew.cooper3@citrix.com>

--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -2728,17 +2728,16 @@ static void hvm_unmap_entry(void *p)
 }
 
 static int hvm_load_segment_selector(
-    enum x86_segment seg, uint16_t sel)
+    enum x86_segment seg, uint16_t sel, unsigned int eflags)
 {
     struct segment_register desctab, cs, segr;
     struct desc_struct *pdesc, desc;
     u8 dpl, rpl, cpl;
     bool_t writable;
     int fault_type = TRAP_invalid_tss;
-    struct cpu_user_regs *regs = guest_cpu_user_regs();
     struct vcpu *v = current;
 
-    if ( regs->eflags & X86_EFLAGS_VM )
+    if ( eflags & X86_EFLAGS_VM )
     {
         segr.sel = sel;
         segr.base = (uint32_t)sel << 4;
@@ -2986,6 +2985,8 @@ void hvm_task_switch(
     if ( rc != HVMCOPY_okay )
         goto out;
 
+    if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt, 0) )
+        goto out;
 
     if ( hvm_set_cr3(tss.cr3, 1) )
         goto out;
@@ -3008,13 +3009,12 @@ void hvm_task_switch(
     }
 
     exn_raised = 0;
-    if ( hvm_load_segment_selector(x86_seg_ldtr, tss.ldt) ||
-         hvm_load_segment_selector(x86_seg_es, tss.es) ||
-         hvm_load_segment_selector(x86_seg_cs, tss.cs) ||
-         hvm_load_segment_selector(x86_seg_ss, tss.ss) ||
-         hvm_load_segment_selector(x86_seg_ds, tss.ds) ||
-         hvm_load_segment_selector(x86_seg_fs, tss.fs) ||
-         hvm_load_segment_selector(x86_seg_gs, tss.gs) )
+    if ( hvm_load_segment_selector(x86_seg_es, tss.es, tss.eflags) ||
+         hvm_load_segment_selector(x86_seg_cs, tss.cs, tss.eflags) ||
+         hvm_load_segment_selector(x86_seg_ss, tss.ss, tss.eflags) ||
+         hvm_load_segment_selector(x86_seg_ds, tss.ds, tss.eflags) ||
+         hvm_load_segment_selector(x86_seg_fs, tss.fs, tss.eflags) ||
+         hvm_load_segment_selector(x86_seg_gs, tss.gs, tss.eflags) )
         exn_raised = 1;
 
     rc = hvm_copy_to_guest_virt(