1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
|
From: George Dunlap <george.dunlap@citrix.com>
Subject: xen/mm: make sure node is less than MAX_NUMNODES
The output of MEMF_get_node(memflags) can be as large as nodeid_t can
hold (currently 255). This is then used as an index to arrays of size
MAX_NUMNODE, which is 64 on x86 and 1 on ARM, can be passed in by an
untrusted guest (via memory_exchange and increase_reservation) and is
not currently bounds-checked.
Check the value in page_alloc.c before using it, and also check the
value in the hypercall call sites and return -EINVAL if appropriate.
Don't permit domains other than the hardware or control domain to
allocate node-constrained memory.
This is XSA-231.
Reported-by: Matthew Daley <mattd@bugfuzz.com>
Signed-off-by: George Dunlap <george.dunlap@citrix.com>
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
--- a/xen/common/memory.c
+++ b/xen/common/memory.c
@@ -411,6 +411,31 @@ static void decrease_reservation(struct
a->nr_done = i;
}
+static bool propagate_node(unsigned int xmf, unsigned int *memflags)
+{
+ const struct domain *currd = current->domain;
+
+ BUILD_BUG_ON(XENMEMF_get_node(0) != NUMA_NO_NODE);
+ BUILD_BUG_ON(MEMF_get_node(0) != NUMA_NO_NODE);
+
+ if ( XENMEMF_get_node(xmf) == NUMA_NO_NODE )
+ return true;
+
+ if ( is_hardware_domain(currd) || is_control_domain(currd) )
+ {
+ if ( XENMEMF_get_node(xmf) >= MAX_NUMNODES )
+ return false;
+
+ *memflags |= MEMF_node(XENMEMF_get_node(xmf));
+ if ( xmf & XENMEMF_exact_node_request )
+ *memflags |= MEMF_exact_node;
+ }
+ else if ( xmf & XENMEMF_exact_node_request )
+ return false;
+
+ return true;
+}
+
static long memory_exchange(XEN_GUEST_HANDLE_PARAM(xen_memory_exchange_t) arg)
{
struct xen_memory_exchange exch;
@@ -483,6 +508,12 @@ static long memory_exchange(XEN_GUEST_HA
}
}
+ if ( unlikely(!propagate_node(exch.out.mem_flags, &memflags)) )
+ {
+ rc = -EINVAL;
+ goto fail_early;
+ }
+
d = rcu_lock_domain_by_any_id(exch.in.domid);
if ( d == NULL )
{
@@ -501,7 +532,6 @@ static long memory_exchange(XEN_GUEST_HA
d,
XENMEMF_get_address_bits(exch.out.mem_flags) ? :
(BITS_PER_LONG+PAGE_SHIFT)));
- memflags |= MEMF_node(XENMEMF_get_node(exch.out.mem_flags));
for ( i = (exch.nr_exchanged >> in_chunk_order);
i < (exch.in.nr_extents >> in_chunk_order);
@@ -864,12 +894,8 @@ static int construct_memop_from_reservat
}
read_unlock(&d->vnuma_rwlock);
}
- else
- {
- a->memflags |= MEMF_node(XENMEMF_get_node(r->mem_flags));
- if ( r->mem_flags & XENMEMF_exact_node_request )
- a->memflags |= MEMF_exact_node;
- }
+ else if ( unlikely(!propagate_node(r->mem_flags, &a->memflags)) )
+ return -EINVAL;
return 0;
}
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -706,9 +706,13 @@ static struct page_info *alloc_heap_page
if ( node >= MAX_NUMNODES )
node = cpu_to_node(smp_processor_id());
}
+ else if ( unlikely(node >= MAX_NUMNODES) )
+ {
+ ASSERT_UNREACHABLE();
+ return NULL;
+ }
first_node = node;
- ASSERT(node < MAX_NUMNODES);
ASSERT(zone_lo <= zone_hi);
ASSERT(zone_hi < NR_ZONES);
|