From: Jan Beulich Subject: x86/shadow: move OOS flag bit positions In preparation of reducing struct page_info's shadow_flags field to 16 bits, lower the bit positions used for SHF_out_of_sync and SHF_oos_may_write. Instead of also adjusting the open coded use in _get_page_type(), introduce shadow_prepare_page_type_change() to contain knowledge of the bit positions to shadow code. This is part of XSA-280. Signed-off-by: Jan Beulich Reviewed-by: Tim Deegan --- a/xen/arch/x86/mm.c +++ b/xen/arch/x86/mm.c @@ -2799,15 +2799,8 @@ static int __get_page_type(struct page_i { struct domain *d = page_get_owner(page); - /* Normally we should never let a page go from type count 0 - * to type count 1 when it is shadowed. One exception: - * out-of-sync shadowed pages are allowed to become - * writeable. */ - if ( d && shadow_mode_enabled(d) - && (page->count_info & PGC_page_table) - && !((page->shadow_flags & (1u<<29)) - && type == PGT_writable_page) ) - shadow_remove_all_shadows(d, _mfn(page_to_mfn(page))); + if ( d && shadow_mode_enabled(d) ) + shadow_prepare_page_type_change(d, page, type); ASSERT(!(x & PGT_pae_xen_l2)); if ( (x & PGT_type_mask) != type ) --- a/xen/arch/x86/mm/shadow/common.c +++ b/xen/arch/x86/mm/shadow/common.c @@ -919,6 +919,9 @@ int sh_unsync(struct vcpu *v, mfn_t gmfn || !v->domain->arch.paging.shadow.oos_active ) return 0; + BUILD_BUG_ON(!(typeof(pg->shadow_flags))SHF_out_of_sync); + BUILD_BUG_ON(!(typeof(pg->shadow_flags))SHF_oos_may_write); + pg->shadow_flags |= SHF_out_of_sync|SHF_oos_may_write; oos_hash_add(v, gmfn); perfc_incr(shadow_unsync); @@ -2810,6 +2813,26 @@ void sh_remove_shadows(struct domain *d, paging_unlock(d); } +void shadow_prepare_page_type_change(struct domain *d, struct page_info *page, + unsigned long new_type) +{ + if ( !(page->count_info & PGC_page_table) ) + return; + +#if (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) + /* + * Normally we should never let a page go from type count 0 to type + * count 1 when it is shadowed. One exception: out-of-sync shadowed + * pages are allowed to become writeable. + */ + if ( (page->shadow_flags & SHF_oos_may_write) && + new_type == PGT_writable_page ) + return; +#endif + + shadow_remove_all_shadows(d, page_to_mfn(page)); +} + static void sh_remove_all_shadows_and_parents(struct domain *d, mfn_t gmfn) /* Even harsher: this is a HVM page that we thing is no longer a pagetable. --- a/xen/arch/x86/mm/shadow/private.h +++ b/xen/arch/x86/mm/shadow/private.h @@ -287,8 +287,8 @@ static inline void sh_terminate_list(str * codepath is called during that time and is sensitive to oos issues, it may * need to use the second flag. */ -#define SHF_out_of_sync (1u<<30) -#define SHF_oos_may_write (1u<<29) +#define SHF_out_of_sync (1u << (SH_type_max_shadow + 1)) +#define SHF_oos_may_write (1u << (SH_type_max_shadow + 2)) #endif /* (SHADOW_OPTIMIZATIONS & SHOPT_OUT_OF_SYNC) */ --- a/xen/include/asm-x86/shadow.h +++ b/xen/include/asm-x86/shadow.h @@ -81,6 +81,10 @@ void shadow_final_teardown(struct domain void sh_remove_shadows(struct domain *d, mfn_t gmfn, int fast, int all); +/* Adjust shadows ready for a guest page to change its type. */ +void shadow_prepare_page_type_change(struct domain *d, struct page_info *page, + unsigned long new_type); + /* Discard _all_ mappings from the domain's shadows. */ void shadow_blow_tables_per_domain(struct domain *d); @@ -105,6 +109,10 @@ int shadow_set_allocation(struct domain static inline void sh_remove_shadows(struct domain *d, mfn_t gmfn, bool_t fast, bool_t all) {} +static inline void shadow_prepare_page_type_change(struct domain *d, + struct page_info *page, + unsigned long new_type) {} + static inline void shadow_blow_tables_per_domain(struct domain *d) {} static inline int shadow_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,