From mboxrd@z Thu Jan 1 00:00:00 1970 From: Andres Lagar-Cavilla Subject: [PATCH 3 of 6] x86/mm/shadow: fix potential p2m/paging deadlock when emulating page table writes Date: Fri, 13 Apr 2012 12:22:21 -0400 Message-ID: <964c6cbad92639029f4a.1334334141@xdev.gridcentric.ca> References: Mime-Version: 1.0 Content-Type: text/plain; charset="us-ascii" Content-Transfer-Encoding: 7bit Return-path: In-Reply-To: List-Unsubscribe: , List-Post: List-Help: List-Subscribe: , Sender: xen-devel-bounces@lists.xen.org Errors-To: xen-devel-bounces@lists.xen.org To: xen-devel@lists.xen.org Cc: andres@gridcentric.ca, tim@xen.org List-Id: xen-devel@lists.xenproject.org xen/arch/x86/mm/shadow/multi.c | 25 +++++++++++++++++++++++++ 1 files changed, 25 insertions(+), 0 deletions(-) Signed-off-by: Andres Lagar-Cavilla diff -r f8fd4a4239a8 -r 964c6cbad926 xen/arch/x86/mm/shadow/multi.c --- a/xen/arch/x86/mm/shadow/multi.c +++ b/xen/arch/x86/mm/shadow/multi.c @@ -5033,9 +5033,21 @@ sh_x86_emulate_write(struct vcpu *v, uns if ( (vaddr & (bytes - 1)) && !is_hvm_vcpu(v) ) return X86EMUL_UNHANDLEABLE; + /* To prevent a shadow mode deadlock, we need to hold the p2m from here + * onwards. emulate_unmap_dest may need to verify the pte that is being + * written to here, and thus get_gfn on the gfn contained in the payload + * that is being written here. p2m_lock is recursive, so all is well on + * that regard. Further, holding the p2m lock ensures the page table gfn + * being written to won't go away (although that could be achieved with + * a page reference, as done elsewhere). + */ + p2m_lock(p2m_get_hostp2m(v->domain)); addr = emulate_map_dest(v, vaddr, bytes, sh_ctxt); if ( emulate_map_dest_failed(addr) ) + { + p2m_unlock(p2m_get_hostp2m(v->domain)); return (long)addr; + } paging_lock(v->domain); memcpy(addr, src, bytes); @@ -5059,6 +5071,7 @@ sh_x86_emulate_write(struct vcpu *v, uns emulate_unmap_dest(v, addr, bytes, sh_ctxt); shadow_audit_tables(v); paging_unlock(v->domain); + p2m_unlock(p2m_get_hostp2m(v->domain)); return X86EMUL_OKAY; } @@ -5075,9 +5088,14 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u if ( (vaddr & (bytes - 1)) && !is_hvm_vcpu(v) ) return X86EMUL_UNHANDLEABLE; + /* see comment in sh_x86_emulate_write. */ + p2m_lock(p2m_get_hostp2m(v->domain)); addr = emulate_map_dest(v, vaddr, bytes, sh_ctxt); if ( emulate_map_dest_failed(addr) ) + { + p2m_unlock(p2m_get_hostp2m(v->domain)); return (long)addr; + } paging_lock(v->domain); switch ( bytes ) @@ -5101,6 +5119,7 @@ sh_x86_emulate_cmpxchg(struct vcpu *v, u emulate_unmap_dest(v, addr, bytes, sh_ctxt); shadow_audit_tables(v); paging_unlock(v->domain); + p2m_unlock(p2m_get_hostp2m(v->domain)); return rv; } @@ -5119,9 +5138,14 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v, if ( (vaddr & 7) && !is_hvm_vcpu(v) ) return X86EMUL_UNHANDLEABLE; + /* see comment in sh_x86_emulate_write. */ + p2m_lock(p2m_get_hostp2m(v->domain)); addr = emulate_map_dest(v, vaddr, 8, sh_ctxt); if ( emulate_map_dest_failed(addr) ) + { + p2m_unlock(p2m_get_hostp2m(v->domain)); return (long)addr; + } old = (((u64) old_hi) << 32) | (u64) old_lo; new = (((u64) new_hi) << 32) | (u64) new_lo; @@ -5135,6 +5159,7 @@ sh_x86_emulate_cmpxchg8b(struct vcpu *v, emulate_unmap_dest(v, addr, 8, sh_ctxt); shadow_audit_tables(v); paging_unlock(v->domain); + p2m_unlock(p2m_get_hostp2m(v->domain)); return rv; } #endif