1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
|
x86/xsave: fix information leak on AMD CPUs
Just like for FXSAVE/FXRSTOR, XSAVE/XRSTOR also don't save/restore the
last instruction and operand pointers as well as the last opcode if
there's no pending unmasked exception (see CVE-2006-1056 and commit
9747:4d667a139318).
While the FXSR solution sits in the save path, I prefer to have this in
the restore path because there the handling is simpler (namely in the
context of the pending changes to properly save the selector values for
32-bit guest code).
Also this is using FFREE instead of EMMS, as it doesn't seem unlikely
that in the future we may see CPUs with x87 and SSE/AVX but no MMX
support. The goal here anyway is just to avoid an FPU stack overflow.
I would have preferred to use FFREEP instead of FFREE (freeing two
stack slots at once), but AMD doesn't document that instruction.
This is CVE-2013-2076 / XSA-52.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -78,6 +78,21 @@ void xrstor(struct vcpu *v, uint64_t mas
struct xsave_struct *ptr = v->arch.xsave_area;
+ /*
+ * AMD CPUs don't save/restore FDP/FIP/FOP unless an exception
+ * is pending. Clear the x87 state here by setting it to fixed
+ * values. The hypervisor data segment can be sometimes 0 and
+ * sometimes new user value. Both should be ok. Use the FPU saved
+ * data block as a safe address because it should be in L1.
+ */
+ if ( (mask & ptr->xsave_hdr.xstate_bv & XSTATE_FP) &&
+ !(ptr->fpu_sse.fsw & 0x0080) &&
+ boot_cpu_data.x86_vendor == X86_VENDOR_AMD )
+ asm volatile ( "fnclex\n\t" /* clear exceptions */
+ "ffree %%st(7)\n\t" /* clear stack tag */
+ "fildl %0" /* load to clear state */
+ : : "m" (ptr->fpu_sse) );
+
asm volatile (
".byte " REX_PREFIX "0x0f,0xae,0x2f"
:
#x86/xsave: recover from faults on XRSTOR
#
#Just like FXRSTOR, XRSTOR can raise #GP if bad content is being passed
#to it in the memory block (i.e. aspects not under the control of the
#hypervisor, other than e.g. proper alignment of the block).
#
#Also correct the comment explaining why FXRSTOR needs exception
#recovery code to not wrongly state that this can only be a result of
#the control tools passing a bad image.
#
#This is CVE-2013-2077 / XSA-53.
#
#Signed-off-by: Jan Beulich <jbeulich@suse.com>
#
--- a/xen/arch/x86/i387.c
+++ b/xen/arch/x86/i387.c
@@ -53,7 +53,7 @@ static inline void fpu_fxrstor(struct vc
/*
* FXRSTOR can fault if passed a corrupted data block. We handle this
* possibility, which may occur if the block was passed to us by control
- * tools, by silently clearing the block.
+ * tools or through VCPUOP_initialise, by silently clearing the block.
*/
asm volatile (
#ifdef __i386__
--- a/xen/arch/x86/xstate.c
+++ b/xen/arch/x86/xstate.c
@@ -93,10 +93,25 @@ void xrstor(struct vcpu *v, uint64_t mas
"fildl %0" /* load to clear state */
: : "m" (ptr->fpu_sse) );
- asm volatile (
- ".byte " REX_PREFIX "0x0f,0xae,0x2f"
- :
- : "m" (*ptr), "a" (lmask), "d" (hmask), "D"(ptr) );
+ /*
+ * XRSTOR can fault if passed a corrupted data block. We handle this
+ * possibility, which may occur if the block was passed to us by control
+ * tools or through VCPUOP_initialise, by silently clearing the block.
+ */
+ asm volatile ( "1: .byte " REX_PREFIX "0x0f,0xae,0x2f\n"
+ ".section .fixup,\"ax\"\n"
+ "2: mov %5,%%ecx \n"
+ " xor %1,%1 \n"
+ " rep stosb \n"
+ " lea %2,%0 \n"
+ " mov %3,%1 \n"
+ " jmp 1b \n"
+ ".previous \n"
+ _ASM_EXTABLE(1b, 2b)
+ : "+&D" (ptr), "+&a" (lmask)
+ : "m" (*ptr), "g" (lmask), "d" (hmask),
+ "m" (xsave_cntxt_size)
+ : "ecx" );
}
bool_t xsave_enabled(const struct vcpu *v)
#x86/xsave: properly check guest input to XSETBV
#
#Other than the HVM emulation path, the PV case so far failed to check
#that YMM state requires SSE state to be enabled, allowing for a #GP to
#occur upon passing the inputs to XSETBV inside the hypervisor.
#
#This is CVE-2013-2078 / XSA-54.
#
#Signed-off-by: Jan Beulich <jbeulich@suse.com>
#
--- a/xen/arch/x86/traps.c
+++ b/xen/arch/x86/traps.c
@@ -2205,6 +2205,11 @@ static int emulate_privileged_op(struct
if ( !(new_xfeature & XSTATE_FP) || (new_xfeature & ~xfeature_mask) )
goto fail;
+ /* YMM state takes SSE state as prerequisite. */
+ if ( (xfeature_mask & new_xfeature & XSTATE_YMM) &&
+ !(new_xfeature & XSTATE_SSE) )
+ goto fail;
+
v->arch.xcr0 = new_xfeature;
v->arch.xcr0_accum |= new_xfeature;
set_xcr0(new_xfeature);
|