1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
|
From: Jan Beulich <jbeulich@suse.com>
Subject: x86: extend get_platform_badpages() interface
Use a structure so along with an address (now frame number) an order can
also be specified.
This is part of XSA-282.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
Reviewed-by: Andrew Cooper <andrew.cooper3@citrix.com>
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -7111,23 +7111,23 @@ void arch_dump_shared_mem_info(void)
mem_sharing_get_nr_saved_mfns());
}
-const unsigned long *__init get_platform_badpages(unsigned int *array_size)
+const struct platform_bad_page *__init get_platform_badpages(unsigned int *array_size)
{
u32 igd_id;
- static unsigned long __initdata bad_pages[] = {
- 0x20050000,
- 0x20110000,
- 0x20130000,
- 0x20138000,
- 0x40004000,
+ static const struct platform_bad_page __initconst snb_bad_pages[] = {
+ { .mfn = 0x20050000 >> PAGE_SHIFT },
+ { .mfn = 0x20110000 >> PAGE_SHIFT },
+ { .mfn = 0x20130000 >> PAGE_SHIFT },
+ { .mfn = 0x20138000 >> PAGE_SHIFT },
+ { .mfn = 0x40004000 >> PAGE_SHIFT },
};
- *array_size = ARRAY_SIZE(bad_pages);
+ *array_size = ARRAY_SIZE(snb_bad_pages);
igd_id = pci_conf_read32(0, 0, 2, 0, 0);
- if ( !IS_SNB_GFX(igd_id) )
- return NULL;
+ if ( IS_SNB_GFX(igd_id) )
+ return snb_bad_pages;
- return bad_pages;
+ return NULL;
}
void paging_invlpg(struct vcpu *v, unsigned long va)
--- a/xen/common/page_alloc.c
+++ b/xen/common/page_alloc.c
@@ -270,7 +270,7 @@ void __init init_boot_pages(paddr_t ps,
unsigned long bad_spfn, bad_epfn;
const char *p;
#ifdef CONFIG_X86
- const unsigned long *badpage = NULL;
+ const struct platform_bad_page *badpage;
unsigned int i, array_size;
#endif
@@ -295,8 +295,8 @@ void __init init_boot_pages(paddr_t ps,
{
for ( i = 0; i < array_size; i++ )
{
- bootmem_region_zap(*badpage >> PAGE_SHIFT,
- (*badpage >> PAGE_SHIFT) + 1);
+ bootmem_region_zap(badpage->mfn,
+ badpage->mfn + (1U << badpage->order));
badpage++;
}
}
--- a/xen/include/asm-x86/mm.h
+++ b/xen/include/asm-x86/mm.h
@@ -350,7 +350,13 @@ bool is_iomem_page(mfn_t mfn);
void clear_superpage_mark(struct page_info *page);
-const unsigned long *get_platform_badpages(unsigned int *array_size);
+struct platform_bad_page {
+ unsigned long mfn;
+ unsigned int order;
+};
+
+const struct platform_bad_page *get_platform_badpages(unsigned int *array_size);
+
/* Per page locks:
* page_lock() is used for two purposes: pte serialization, and memory sharing.
*
|