aboutsummaryrefslogtreecommitdiffstats
path: root/main/xen/xsa104.patch
blob: 2c5b39ee9b6ae436a1719c11bf50b22770142588 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
x86/shadow: fix race condition sampling the dirty vram state

d->arch.hvm_domain.dirty_vram must be read with the domain's paging lock held.

If not, two concurrent hypercalls could both end up attempting to free
dirty_vram (the second of which will free a wild pointer), or both end up
allocating a new dirty_vram structure (the first of which will be leaked).

This is XSA-104.

Signed-off-by: Andrew Cooper <andrew.cooper3@citrix.com>
Reviewed-by: Tim Deegan <tim@xen.org>

--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3485,7 +3485,7 @@ int shadow_track_dirty_vram(struct domai
     int flush_tlb = 0;
     unsigned long i;
     p2m_type_t t;
-    struct sh_dirty_vram *dirty_vram = d->arch.hvm_domain.dirty_vram;
+    struct sh_dirty_vram *dirty_vram;
     struct p2m_domain *p2m = p2m_get_hostp2m(d);
 
     if ( end_pfn < begin_pfn || end_pfn > p2m->max_mapped_pfn + 1 )
@@ -3495,6 +3495,8 @@ int shadow_track_dirty_vram(struct domai
     p2m_lock(p2m_get_hostp2m(d));
     paging_lock(d);
 
+    dirty_vram = d->arch.hvm_domain.dirty_vram;
+
     if ( dirty_vram && (!nr ||
              ( begin_pfn != dirty_vram->begin_pfn
             || end_pfn   != dirty_vram->end_pfn )) )
--- a/xen/include/asm-x86/hvm/domain.h
+++ b/xen/include/asm-x86/hvm/domain.h
@@ -112,7 +112,7 @@ struct hvm_domain {
     /* Memory ranges with pinned cache attributes. */
     struct list_head       pinned_cacheattr_ranges;
 
-    /* VRAM dirty support. */
+    /* VRAM dirty support.  Protect with the domain paging lock. */
     struct sh_dirty_vram *dirty_vram;
 
     /* If one of vcpus of this domain is in no_fill_mode or