1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
|
From 6d4c4f06467e3d126ca45c2e6b09a66a2a60a1ae Mon Sep 17 00:00:00 2001
From: Andrew Cooper <andrew.cooper3@citrix.com>
Date: Wed, 18 Apr 2018 16:38:50 +0200
Subject: [PATCH] x86/spec_ctrl: Fix several bugs in
SPEC_CTRL_ENTRY_FROM_INTR_IST
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
DO_OVERWRITE_RSB clobbers %rax, meaning in practice that the bti_ist_info
field gets zeroed. Older versions of this code had the DO_OVERWRITE_RSB
register selectable, so reintroduce this ability and use it to cause the
INTR_IST path to use %rdx instead.
The use of %dl for the %cs.rpl check means that when an IST interrupt hits
Xen, we try to load 1 into the high 32 bits of MSR_SPEC_CTRL, suffering a #GP
fault instead.
Also, drop an unused label which was a copy/paste mistake.
Reported-by: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Reported-by: Zhenzhong Duan <zhenzhong.duan@oracle.com>
Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
Reviewed-by: Roger Pau Monné <roger.pau@citrix.com>
master commit: a2b08fbed388f18235fda5ba1655c1483ef3e215
master date: 2018-02-14 13:22:15 +0000
---
xen/include/asm-x86/spec_ctrl_asm.h | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/xen/include/asm-x86/spec_ctrl_asm.h b/xen/include/asm-x86/spec_ctrl_asm.h
index 7a43daf231..69cf3cc2f1 100644
--- a/xen/include/asm-x86/spec_ctrl_asm.h
+++ b/xen/include/asm-x86/spec_ctrl_asm.h
@@ -79,10 +79,10 @@
* - SPEC_CTRL_EXIT_TO_GUEST
*/
-.macro DO_OVERWRITE_RSB
+.macro DO_OVERWRITE_RSB tmp=rax
/*
* Requires nothing
- * Clobbers %rax, %rcx
+ * Clobbers \tmp (%rax by default), %rcx
*
* Requires 256 bytes of stack space, but %rsp has no net change. Based on
* Google's performance numbers, the loop is unrolled to 16 iterations and two
@@ -97,7 +97,7 @@
* optimised with mov-elimination in modern cores.
*/
mov $16, %ecx /* 16 iterations, two calls per loop */
- mov %rsp, %rax /* Store the current %rsp */
+ mov %rsp, %\tmp /* Store the current %rsp */
.L\@_fill_rsb_loop:
@@ -114,7 +114,7 @@
sub $1, %ecx
jnz .L\@_fill_rsb_loop
- mov %rax, %rsp /* Restore old %rsp */
+ mov %\tmp, %rsp /* Restore old %rsp */
.endm
.macro DO_SPEC_CTRL_ENTRY_FROM_VMEXIT ibrs_val:req
@@ -273,7 +273,7 @@
testb $BTI_IST_RSB, %al
jz .L\@_skip_rsb
- DO_OVERWRITE_RSB
+ DO_OVERWRITE_RSB tmp=rdx /* Clobbers %rcx/%rdx */
.L\@_skip_rsb:
@@ -285,13 +285,13 @@
setz %dl
and %dl, STACK_CPUINFO_FIELD(use_shadow_spec_ctrl)(%r14)
-.L\@_entry_from_xen:
/*
* Load Xen's intended value. SPEC_CTRL_IBRS vs 0 is encoded in the
* bottom bit of bti_ist_info, via a deliberate alias with BTI_IST_IBRS.
*/
mov $MSR_SPEC_CTRL, %ecx
and $BTI_IST_IBRS, %eax
+ xor %edx, %edx
wrmsr
/* Opencoded UNLIKELY_START() with no condition. */
--
2.15.2
|