aboutsummaryrefslogtreecommitdiffstats
path: root/main/xen/xsa275-4.11-1.patch
diff options
context:
space:
mode:
authorLeonardo Arena <rnalrd@alpinelinux.org>2019-02-04 08:21:18 +0000
committerLeonardo Arena <rnalrd@alpinelinux.org>2019-02-04 08:21:42 +0000
commitf39fc76089047b3f9b0e1ec06aecb40cc1ac1786 (patch)
tree682b45d3e61bc3df3c9695ab9b7672d442f6fb3f /main/xen/xsa275-4.11-1.patch
parentd39be709613fbc979651b09ac2bc27c6591afd99 (diff)
downloadaports-f39fc76089047b3f9b0e1ec06aecb40cc1ac1786.tar.bz2
aports-f39fc76089047b3f9b0e1ec06aecb40cc1ac1786.tar.xz
main/xen: security fixes
Diffstat (limited to 'main/xen/xsa275-4.11-1.patch')
-rw-r--r--main/xen/xsa275-4.11-1.patch104
1 files changed, 104 insertions, 0 deletions
diff --git a/main/xen/xsa275-4.11-1.patch b/main/xen/xsa275-4.11-1.patch
new file mode 100644
index 0000000000..932d8f1132
--- /dev/null
+++ b/main/xen/xsa275-4.11-1.patch
@@ -0,0 +1,104 @@
+From: Roger Pau Monné <roger.pau@citrix.com>
+Subject: amd/iommu: fix flush checks
+
+Flush checking for AMD IOMMU didn't check whether the previous entry
+was present, or whether the flags (writable/readable) changed in order
+to decide whether a flush should be executed.
+
+Fix this by taking the writable/readable/next-level fields into account,
+together with the present bit.
+
+Along these lines the flushing in amd_iommu_map_page() must not be
+omitted for PV domains. The comment there was simply wrong: Mappings may
+very well change, both their addresses and their permissions. Ultimately
+this should honor iommu_dont_flush_iotlb, but to achieve this
+amd_iommu_ops first needs to gain an .iotlb_flush hook.
+
+Also make clear_iommu_pte_present() static, to demonstrate there's no
+caller omitting the (subsequent) flush.
+
+This is part of XSA-275.
+
+Reported-by: Paul Durrant <paul.durrant@citrix.com>
+Signed-off-by: Roger Pau Monné <roger.pau@citrix.com>
+Signed-off-by: Jan Beulich <jbeulich@suse.com>
+
+--- a/xen/drivers/passthrough/amd/iommu_map.c
++++ b/xen/drivers/passthrough/amd/iommu_map.c
+@@ -35,7 +35,7 @@ static unsigned int pfn_to_pde_idx(unsig
+ return idx;
+ }
+
+-void clear_iommu_pte_present(unsigned long l1_mfn, unsigned long gfn)
++static void clear_iommu_pte_present(unsigned long l1_mfn, unsigned long gfn)
+ {
+ u64 *table, *pte;
+
+@@ -49,23 +49,42 @@ static bool_t set_iommu_pde_present(u32
+ unsigned int next_level,
+ bool_t iw, bool_t ir)
+ {
+- u64 addr_lo, addr_hi, maddr_old, maddr_next;
++ uint64_t addr_lo, addr_hi, maddr_next;
+ u32 entry;
+- bool_t need_flush = 0;
++ bool need_flush = false, old_present;
+
+ maddr_next = (u64)next_mfn << PAGE_SHIFT;
+
+- addr_hi = get_field_from_reg_u32(pde[1],
+- IOMMU_PTE_ADDR_HIGH_MASK,
+- IOMMU_PTE_ADDR_HIGH_SHIFT);
+- addr_lo = get_field_from_reg_u32(pde[0],
+- IOMMU_PTE_ADDR_LOW_MASK,
+- IOMMU_PTE_ADDR_LOW_SHIFT);
+-
+- maddr_old = (addr_hi << 32) | (addr_lo << PAGE_SHIFT);
+-
+- if ( maddr_old != maddr_next )
+- need_flush = 1;
++ old_present = get_field_from_reg_u32(pde[0], IOMMU_PTE_PRESENT_MASK,
++ IOMMU_PTE_PRESENT_SHIFT);
++ if ( old_present )
++ {
++ bool old_r, old_w;
++ unsigned int old_level;
++ uint64_t maddr_old;
++
++ addr_hi = get_field_from_reg_u32(pde[1],
++ IOMMU_PTE_ADDR_HIGH_MASK,
++ IOMMU_PTE_ADDR_HIGH_SHIFT);
++ addr_lo = get_field_from_reg_u32(pde[0],
++ IOMMU_PTE_ADDR_LOW_MASK,
++ IOMMU_PTE_ADDR_LOW_SHIFT);
++ old_level = get_field_from_reg_u32(pde[0],
++ IOMMU_PDE_NEXT_LEVEL_MASK,
++ IOMMU_PDE_NEXT_LEVEL_SHIFT);
++ old_w = get_field_from_reg_u32(pde[1],
++ IOMMU_PTE_IO_WRITE_PERMISSION_MASK,
++ IOMMU_PTE_IO_WRITE_PERMISSION_SHIFT);
++ old_r = get_field_from_reg_u32(pde[1],
++ IOMMU_PTE_IO_READ_PERMISSION_MASK,
++ IOMMU_PTE_IO_READ_PERMISSION_SHIFT);
++
++ maddr_old = (addr_hi << 32) | (addr_lo << PAGE_SHIFT);
++
++ if ( maddr_old != maddr_next || iw != old_w || ir != old_r ||
++ old_level != next_level )
++ need_flush = true;
++ }
+
+ addr_lo = maddr_next & DMA_32BIT_MASK;
+ addr_hi = maddr_next >> 32;
+@@ -687,10 +706,7 @@ int amd_iommu_map_page(struct domain *d,
+ if ( !need_flush )
+ goto out;
+
+- /* 4K mapping for PV guests never changes,
+- * no need to flush if we trust non-present bits */
+- if ( is_hvm_domain(d) )
+- amd_iommu_flush_pages(d, gfn, 0);
++ amd_iommu_flush_pages(d, gfn, 0);
+
+ for ( merge_level = IOMMU_PAGING_MODE_LEVEL_2;
+ merge_level <= hd->arch.paging_mode; merge_level++ )