x86: properly handle hvm_copy_from_guest_{phys,virt}() errors Ignoring them generally implies using uninitialized data and, in all cases dealt with here, potentially leaking hypervisor stack contents to guests. This is XSA-63. Signed-off-by: Jan Beulich Reviewed-by: Tim Deegan Reviewed-by: Andrew Cooper --- a/xen/arch/x86/hvm/hvm.c +++ b/xen/arch/x86/hvm/hvm.c @@ -2308,11 +2308,7 @@ void hvm_task_switch( rc = hvm_copy_from_guest_virt( &tss, prev_tr.base, sizeof(tss), PFEC_page_present); - if ( rc == HVMCOPY_bad_gva_to_gfn ) - goto out; - if ( rc == HVMCOPY_gfn_paged_out ) - goto out; - if ( rc == HVMCOPY_gfn_shared ) + if ( rc != HVMCOPY_okay ) goto out; eflags = regs->eflags; @@ -2357,13 +2353,11 @@ void hvm_task_switch( rc = hvm_copy_from_guest_virt( &tss, tr.base, sizeof(tss), PFEC_page_present); - if ( rc == HVMCOPY_bad_gva_to_gfn ) - goto out; - if ( rc == HVMCOPY_gfn_paged_out ) - goto out; - /* Note: this could be optimised, if the callee functions knew we want RO - * access */ - if ( rc == HVMCOPY_gfn_shared ) + /* + * Note: The HVMCOPY_gfn_shared case could be optimised, if the callee + * functions knew we want RO access. + */ + if ( rc != HVMCOPY_okay ) goto out; --- a/xen/arch/x86/hvm/intercept.c +++ b/xen/arch/x86/hvm/intercept.c @@ -87,17 +87,28 @@ static int hvm_mmio_access(struct vcpu * { for ( i = 0; i < p->count; i++ ) { - int ret; - - ret = hvm_copy_from_guest_phys(&data, - p->data + (sign * i * p->size), - p->size); - if ( (ret == HVMCOPY_gfn_paged_out) || - (ret == HVMCOPY_gfn_shared) ) + switch ( hvm_copy_from_guest_phys(&data, + p->data + sign * i * p->size, + p->size) ) { + case HVMCOPY_okay: + break; + case HVMCOPY_gfn_paged_out: + case HVMCOPY_gfn_shared: rc = X86EMUL_RETRY; break; + case HVMCOPY_bad_gfn_to_mfn: + data = ~0; + break; + case HVMCOPY_bad_gva_to_gfn: + ASSERT(0); + /* fall through */ + default: + rc = X86EMUL_UNHANDLEABLE; + break; } + if ( rc != X86EMUL_OKAY ) + break; rc = write_handler(v, p->addr + (sign * i * p->size), p->size, data); if ( rc != X86EMUL_OKAY ) @@ -165,8 +176,28 @@ static int process_portio_intercept(port for ( i = 0; i < p->count; i++ ) { data = 0; - (void)hvm_copy_from_guest_phys(&data, p->data + sign*i*p->size, - p->size); + switch ( hvm_copy_from_guest_phys(&data, + p->data + sign * i * p->size, + p->size) ) + { + case HVMCOPY_okay: + break; + case HVMCOPY_gfn_paged_out: + case HVMCOPY_gfn_shared: + rc = X86EMUL_RETRY; + break; + case HVMCOPY_bad_gfn_to_mfn: + data = ~0; + break; + case HVMCOPY_bad_gva_to_gfn: + ASSERT(0); + /* fall through */ + default: + rc = X86EMUL_UNHANDLEABLE; + break; + } + if ( rc != X86EMUL_OKAY ) + break; rc = action(IOREQ_WRITE, p->addr, p->size, &data); if ( rc != X86EMUL_OKAY ) break; --- a/xen/arch/x86/hvm/io.c +++ b/xen/arch/x86/hvm/io.c @@ -340,14 +340,24 @@ static int dpci_ioport_write(uint32_t mp data = p->data; if ( p->data_is_ptr ) { - int ret; - - ret = hvm_copy_from_guest_phys(&data, - p->data + (sign * i * p->size), - p->size); - if ( (ret == HVMCOPY_gfn_paged_out) && - (ret == HVMCOPY_gfn_shared) ) + switch ( hvm_copy_from_guest_phys(&data, + p->data + sign * i * p->size, + p->size) ) + { + case HVMCOPY_okay: + break; + case HVMCOPY_gfn_paged_out: + case HVMCOPY_gfn_shared: return X86EMUL_RETRY; + case HVMCOPY_bad_gfn_to_mfn: + data = ~0; + break; + case HVMCOPY_bad_gva_to_gfn: + ASSERT(0); + /* fall through */ + default: + return X86EMUL_UNHANDLEABLE; + } } switch ( p->size ) --- a/xen/arch/x86/hvm/vmx/realmode.c +++ b/xen/arch/x86/hvm/vmx/realmode.c @@ -39,7 +39,9 @@ static void realmode_deliver_exception( again: last_byte = (vector * 4) + 3; - if ( idtr->limit < last_byte ) + if ( idtr->limit < last_byte || + hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4) != + HVMCOPY_okay ) { /* Software interrupt? */ if ( insn_len != 0 ) @@ -64,8 +66,6 @@ static void realmode_deliver_exception( } } - (void)hvm_copy_from_guest_phys(&cs_eip, idtr->base + vector * 4, 4); - frame[0] = regs->eip + insn_len; frame[1] = csr->sel; frame[2] = regs->eflags & ~X86_EFLAGS_RF;