* [PATCH] enable tmem functionality for PV on HVM guests
@ 2010-06-21 17:14 Dan Magenheimer
2010-06-21 17:39 ` Keir Fraser
0 siblings, 1 reply; 3+ messages in thread
From: Dan Magenheimer @ 2010-06-21 17:14 UTC (permalink / raw)
To: xen-devel, Keir Fraser
[-- Attachment #1: Type: text/plain, Size: 5309 bytes --]
(Keir, please also apply for 4.0.1 if it's not too late.)
Enable tmem functionality for PV on HVM guests. Guest kernel
must still be tmem-enabled to use this functionality (e.g.
won't work for Windows), but upstream Linux tmem (aka
cleancache and frontswap) patches apply cleanly on top
of PV on HVM patches.
Also, fix up some ASSERTS and code used only when bad guest
mfns are passed to tmem. Previous code could crash Xen
if a buggy/malicious guest passes bad gmfns.
Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
diff -r ba2c0eecaf7f xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Tue Jun 15 11:31:43 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c Mon Jun 21 10:37:05 2010 -0600
@@ -2265,7 +2265,8 @@ static hvm_hypercall_t *hvm_hypercall32_
HYPERCALL(xen_version),
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
- HYPERCALL(hvm_op)
+ HYPERCALL(hvm_op),
+ HYPERCALL(tmem_op)
};
#else /* defined(__x86_64__) */
@@ -2313,7 +2314,8 @@ static hvm_hypercall_t *hvm_hypercall64_
HYPERCALL(xen_version),
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
- HYPERCALL(hvm_op)
+ HYPERCALL(hvm_op),
+ HYPERCALL(tmem_op)
};
static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
@@ -2323,7 +2325,8 @@ static hvm_hypercall_t *hvm_hypercall32_
HYPERCALL(xen_version),
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
- HYPERCALL(hvm_op)
+ HYPERCALL(hvm_op),
+ HYPERCALL(tmem_op)
};
#endif /* defined(__x86_64__) */
diff -r ba2c0eecaf7f xen/common/tmem.c
--- a/xen/common/tmem.c Tue Jun 15 11:31:43 2010 +0100
+++ b/xen/common/tmem.c Mon Jun 21 10:37:05 2010 -0600
@@ -1483,6 +1483,7 @@ copy_uncompressed:
pgp_free_data(pgp, pool);
if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL )
goto failed_dup;
+ pgp->size = 0;
/* tmh_copy_from_client properly handles len==0 and offsets != 0 */
ret = tmh_copy_from_client(pgp->pfp,cmfn,tmem_offset,pfn_offset,len,0);
if ( ret == -EFAULT )
@@ -1492,7 +1493,6 @@ copy_uncompressed:
if ( pcd_associate(pgp,NULL,0) == -ENOMEM )
goto failed_dup;
}
- pgp->size = 0;
done:
/* successfully replaced data, clean up and return success */
@@ -1509,12 +1509,14 @@ bad_copy:
bad_copy:
/* this should only happen if the client passed a bad mfn */
failed_copies++;
-ASSERT(0);
- return -EFAULT;
+ ret = -EFAULT;
+ goto cleanup;
failed_dup:
/* couldn't change out the data, flush the old data and return
* -ENOSPC instead of -ENOMEM to differentiate failed _dup_ put */
+ ret = -ENOSPC;
+cleanup:
pgpfound = pgp_delete_from_obj(obj, pgp->index);
ASSERT(pgpfound == pgp);
pgp_delete(pgpfound,0);
@@ -1528,7 +1530,7 @@ failed_dup:
tmem_spin_unlock(&obj->obj_spinlock);
}
pool->dup_puts_flushed++;
- return -ENOSPC;
+ return ret;
}
@@ -1579,6 +1581,7 @@ static NOINLINE int do_tmem_put(pool_t *
goto free;
ASSERT(ret != -EEXIST);
pgp->index = index;
+ pgp->size = 0;
if ( len != 0 && client->compress )
{
@@ -1615,7 +1618,6 @@ copy_uncompressed:
if ( pcd_associate(pgp,NULL,0) == -ENOMEM )
goto delete_and_free;
}
- pgp->size = 0;
insert_page:
if ( is_ephemeral(pool) )
@@ -1648,6 +1650,11 @@ insert_page:
tot_good_eph_puts++;
return 1;
+bad_copy:
+ /* this should only happen if the client passed a bad mfn */
+ ret = -EFAULT;
+ failed_copies++;
+
delete_and_free:
ASSERT((obj != NULL) && (pgp != NULL) && (pgp->index != -1));
pgpdel = pgp_delete_from_obj(obj, pgp->index);
@@ -1669,12 +1676,6 @@ free:
}
pool->no_mem_puts++;
return ret;
-
-bad_copy:
- /* this should only happen if the client passed a bad mfn */
- failed_copies++;
-ASSERT(0);
- goto free;
}
static NOINLINE int do_tmem_get(pool_t *pool, uint64_t oid, uint32_t index,
@@ -1758,7 +1759,6 @@ bad_copy:
bad_copy:
/* this should only happen if the client passed a bad mfn */
failed_copies++;
-ASSERT(0);
return -EFAULT;
}
diff -r ba2c0eecaf7f xen/common/tmem_xen.c
--- a/xen/common/tmem_xen.c Tue Jun 15 11:31:43 2010 +0100
+++ b/xen/common/tmem_xen.c Mon Jun 21 10:37:05 2010 -0600
@@ -100,7 +100,7 @@ static inline void *cli_mfn_to_va(tmem_c
p2m_type_t t;
cli_mfn = mfn_x(gfn_to_mfn(current->domain, cmfn, &t));
- if (t != p2m_ram_rw)
+ if (t != p2m_ram_rw || cli_mfn == INVALID_MFN)
return NULL;
if (pcli_mfn != NULL)
*pcli_mfn = cli_mfn;
diff -r ba2c0eecaf7f xen/include/xen/tmem_xen.h
--- a/xen/include/xen/tmem_xen.h Tue Jun 15 11:31:43 2010 +0100
+++ b/xen/include/xen/tmem_xen.h Mon Jun 21 10:37:05 2010 -0600
@@ -456,7 +456,9 @@ static inline int tmh_get_tmemop_from_cl
static inline int tmh_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
{
#ifdef CONFIG_COMPAT
- if ( is_pv_32on64_vcpu(current) )
+ if ( is_hvm_vcpu(current) ?
+ hvm_guest_x86_mode(current) != 8 :
+ is_pv_32on64_vcpu(current) )
{
int rc;
enum XLAT_tmem_op_u u;
[-- Attachment #2: tmem-hvm.patch --]
[-- Type: application/octet-stream, Size: 4607 bytes --]
diff -r ba2c0eecaf7f xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Tue Jun 15 11:31:43 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c Mon Jun 21 10:37:05 2010 -0600
@@ -2265,7 +2265,8 @@ static hvm_hypercall_t *hvm_hypercall32_
HYPERCALL(xen_version),
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
- HYPERCALL(hvm_op)
+ HYPERCALL(hvm_op),
+ HYPERCALL(tmem_op)
};
#else /* defined(__x86_64__) */
@@ -2313,7 +2314,8 @@ static hvm_hypercall_t *hvm_hypercall64_
HYPERCALL(xen_version),
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
- HYPERCALL(hvm_op)
+ HYPERCALL(hvm_op),
+ HYPERCALL(tmem_op)
};
static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
@@ -2323,7 +2325,8 @@ static hvm_hypercall_t *hvm_hypercall32_
HYPERCALL(xen_version),
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
- HYPERCALL(hvm_op)
+ HYPERCALL(hvm_op),
+ HYPERCALL(tmem_op)
};
#endif /* defined(__x86_64__) */
diff -r ba2c0eecaf7f xen/common/tmem.c
--- a/xen/common/tmem.c Tue Jun 15 11:31:43 2010 +0100
+++ b/xen/common/tmem.c Mon Jun 21 10:37:05 2010 -0600
@@ -1483,6 +1483,7 @@ copy_uncompressed:
pgp_free_data(pgp, pool);
if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL )
goto failed_dup;
+ pgp->size = 0;
/* tmh_copy_from_client properly handles len==0 and offsets != 0 */
ret = tmh_copy_from_client(pgp->pfp,cmfn,tmem_offset,pfn_offset,len,0);
if ( ret == -EFAULT )
@@ -1492,7 +1493,6 @@ copy_uncompressed:
if ( pcd_associate(pgp,NULL,0) == -ENOMEM )
goto failed_dup;
}
- pgp->size = 0;
done:
/* successfully replaced data, clean up and return success */
@@ -1509,12 +1509,14 @@ bad_copy:
bad_copy:
/* this should only happen if the client passed a bad mfn */
failed_copies++;
-ASSERT(0);
- return -EFAULT;
+ ret = -EFAULT;
+ goto cleanup;
failed_dup:
/* couldn't change out the data, flush the old data and return
* -ENOSPC instead of -ENOMEM to differentiate failed _dup_ put */
+ ret = -ENOSPC;
+cleanup:
pgpfound = pgp_delete_from_obj(obj, pgp->index);
ASSERT(pgpfound == pgp);
pgp_delete(pgpfound,0);
@@ -1528,7 +1530,7 @@ failed_dup:
tmem_spin_unlock(&obj->obj_spinlock);
}
pool->dup_puts_flushed++;
- return -ENOSPC;
+ return ret;
}
@@ -1579,6 +1581,7 @@ static NOINLINE int do_tmem_put(pool_t *
goto free;
ASSERT(ret != -EEXIST);
pgp->index = index;
+ pgp->size = 0;
if ( len != 0 && client->compress )
{
@@ -1615,7 +1618,6 @@ copy_uncompressed:
if ( pcd_associate(pgp,NULL,0) == -ENOMEM )
goto delete_and_free;
}
- pgp->size = 0;
insert_page:
if ( is_ephemeral(pool) )
@@ -1648,6 +1650,11 @@ insert_page:
tot_good_eph_puts++;
return 1;
+bad_copy:
+ /* this should only happen if the client passed a bad mfn */
+ ret = -EFAULT;
+ failed_copies++;
+
delete_and_free:
ASSERT((obj != NULL) && (pgp != NULL) && (pgp->index != -1));
pgpdel = pgp_delete_from_obj(obj, pgp->index);
@@ -1669,12 +1676,6 @@ free:
}
pool->no_mem_puts++;
return ret;
-
-bad_copy:
- /* this should only happen if the client passed a bad mfn */
- failed_copies++;
-ASSERT(0);
- goto free;
}
static NOINLINE int do_tmem_get(pool_t *pool, uint64_t oid, uint32_t index,
@@ -1758,7 +1759,6 @@ bad_copy:
bad_copy:
/* this should only happen if the client passed a bad mfn */
failed_copies++;
-ASSERT(0);
return -EFAULT;
}
diff -r ba2c0eecaf7f xen/common/tmem_xen.c
--- a/xen/common/tmem_xen.c Tue Jun 15 11:31:43 2010 +0100
+++ b/xen/common/tmem_xen.c Mon Jun 21 10:37:05 2010 -0600
@@ -100,7 +100,7 @@ static inline void *cli_mfn_to_va(tmem_c
p2m_type_t t;
cli_mfn = mfn_x(gfn_to_mfn(current->domain, cmfn, &t));
- if (t != p2m_ram_rw)
+ if (t != p2m_ram_rw || cli_mfn == INVALID_MFN)
return NULL;
if (pcli_mfn != NULL)
*pcli_mfn = cli_mfn;
diff -r ba2c0eecaf7f xen/include/xen/tmem_xen.h
--- a/xen/include/xen/tmem_xen.h Tue Jun 15 11:31:43 2010 +0100
+++ b/xen/include/xen/tmem_xen.h Mon Jun 21 10:37:05 2010 -0600
@@ -456,7 +456,9 @@ static inline int tmh_get_tmemop_from_cl
static inline int tmh_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
{
#ifdef CONFIG_COMPAT
- if ( is_pv_32on64_vcpu(current) )
+ if ( is_hvm_vcpu(current) ?
+ hvm_guest_x86_mode(current) != 8 :
+ is_pv_32on64_vcpu(current) )
{
int rc;
enum XLAT_tmem_op_u u;
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply [flat|nested] 3+ messages in thread* Re: [PATCH] enable tmem functionality for PV on HVM guests
2010-06-21 17:14 [PATCH] enable tmem functionality for PV on HVM guests Dan Magenheimer
@ 2010-06-21 17:39 ` Keir Fraser
2010-06-21 17:54 ` Dan Magenheimer
0 siblings, 1 reply; 3+ messages in thread
From: Keir Fraser @ 2010-06-21 17:39 UTC (permalink / raw)
To: Dan Magenheimer, xen-devel@lists.xensource.com
On 21/06/2010 18:14, "Dan Magenheimer" <dan.magenheimer@oracle.com> wrote:
> (Keir, please also apply for 4.0.1 if it's not too late.)
>
> Enable tmem functionality for PV on HVM guests. Guest kernel
> must still be tmem-enabled to use this functionality (e.g.
> won't work for Windows), but upstream Linux tmem (aka
> cleancache and frontswap) patches apply cleanly on top
> of PV on HVM patches.
>
> Also, fix up some ASSERTS and code used only when bad guest
> mfns are passed to tmem. Previous code could crash Xen
> if a buggy/malicious guest passes bad gmfns.
This patch doesn't apply to xen-unstable tip.
-- Keir
^ permalink raw reply [flat|nested] 3+ messages in thread
* RE: [PATCH] enable tmem functionality for PV on HVM guests
2010-06-21 17:39 ` Keir Fraser
@ 2010-06-21 17:54 ` Dan Magenheimer
0 siblings, 0 replies; 3+ messages in thread
From: Dan Magenheimer @ 2010-06-21 17:54 UTC (permalink / raw)
To: Keir Fraser, xen-devel
[-- Attachment #1: Type: text/plain, Size: 5505 bytes --]
> This patch doesn't apply to xen-unstable tip.
>
> -- Keir
Oops, sorry, I sent the 4.0-testing patch. Here's
the one for xen-unstable. Everything is the same
except for the hunks in hvm.c move slightly.
Dan
=========================
Enable tmem functionality for PV on HVM guests. Guest kernel
must still be tmem-enabled to use this functionality (e.g.
won't work for Windows), but upstream Linux tmem (aka
cleancache and frontswap) patches apply cleanly on top
of PV on HVM patches.
Also, fix up some ASSERTS and code used only when bad guest
mfns are passed to tmem. Previous code could crash Xen
if a buggy/malicious guest passes bad gmfns.
Signed-off-by: Dan Magenheimer <dan.magenheimer@oracle.com>
diff -r 4892d31a78b1 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Mon Jun 21 18:37:34 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c Mon Jun 21 11:48:10 2010 -0600
@@ -2302,7 +2302,8 @@ static hvm_hypercall_t *hvm_hypercall32_
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
HYPERCALL(set_timer_op),
- HYPERCALL(hvm_op)
+ HYPERCALL(hvm_op),
+ HYPERCALL(tmem_op)
};
#else /* defined(__x86_64__) */
@@ -2355,7 +2356,8 @@ static hvm_hypercall_t *hvm_hypercall64_
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
HYPERCALL(set_timer_op),
- HYPERCALL(hvm_op)
+ HYPERCALL(hvm_op),
+ HYPERCALL(tmem_op)
};
static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
@@ -2366,7 +2368,8 @@ static hvm_hypercall_t *hvm_hypercall32_
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
HYPERCALL(set_timer_op),
- HYPERCALL(hvm_op)
+ HYPERCALL(hvm_op),
+ HYPERCALL(tmem_op)
};
#endif /* defined(__x86_64__) */
diff -r 4892d31a78b1 xen/common/tmem.c
--- a/xen/common/tmem.c Mon Jun 21 18:37:34 2010 +0100
+++ b/xen/common/tmem.c Mon Jun 21 11:48:10 2010 -0600
@@ -1483,6 +1483,7 @@ copy_uncompressed:
pgp_free_data(pgp, pool);
if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL )
goto failed_dup;
+ pgp->size = 0;
/* tmh_copy_from_client properly handles len==0 and offsets != 0 */
ret = tmh_copy_from_client(pgp->pfp,cmfn,tmem_offset,pfn_offset,len,0);
if ( ret == -EFAULT )
@@ -1492,7 +1493,6 @@ copy_uncompressed:
if ( pcd_associate(pgp,NULL,0) == -ENOMEM )
goto failed_dup;
}
- pgp->size = 0;
done:
/* successfully replaced data, clean up and return success */
@@ -1509,12 +1509,14 @@ bad_copy:
bad_copy:
/* this should only happen if the client passed a bad mfn */
failed_copies++;
-ASSERT(0);
- return -EFAULT;
+ ret = -EFAULT;
+ goto cleanup;
failed_dup:
/* couldn't change out the data, flush the old data and return
* -ENOSPC instead of -ENOMEM to differentiate failed _dup_ put */
+ ret = -ENOSPC;
+cleanup:
pgpfound = pgp_delete_from_obj(obj, pgp->index);
ASSERT(pgpfound == pgp);
pgp_delete(pgpfound,0);
@@ -1528,7 +1530,7 @@ failed_dup:
tmem_spin_unlock(&obj->obj_spinlock);
}
pool->dup_puts_flushed++;
- return -ENOSPC;
+ return ret;
}
@@ -1579,6 +1581,7 @@ static NOINLINE int do_tmem_put(pool_t *
goto free;
ASSERT(ret != -EEXIST);
pgp->index = index;
+ pgp->size = 0;
if ( len != 0 && client->compress )
{
@@ -1615,7 +1618,6 @@ copy_uncompressed:
if ( pcd_associate(pgp,NULL,0) == -ENOMEM )
goto delete_and_free;
}
- pgp->size = 0;
insert_page:
if ( is_ephemeral(pool) )
@@ -1648,6 +1650,11 @@ insert_page:
tot_good_eph_puts++;
return 1;
+bad_copy:
+ /* this should only happen if the client passed a bad mfn */
+ ret = -EFAULT;
+ failed_copies++;
+
delete_and_free:
ASSERT((obj != NULL) && (pgp != NULL) && (pgp->index != -1));
pgpdel = pgp_delete_from_obj(obj, pgp->index);
@@ -1669,12 +1676,6 @@ free:
}
pool->no_mem_puts++;
return ret;
-
-bad_copy:
- /* this should only happen if the client passed a bad mfn */
- failed_copies++;
-ASSERT(0);
- goto free;
}
static NOINLINE int do_tmem_get(pool_t *pool, uint64_t oid, uint32_t index,
@@ -1758,7 +1759,6 @@ bad_copy:
bad_copy:
/* this should only happen if the client passed a bad mfn */
failed_copies++;
-ASSERT(0);
return -EFAULT;
}
diff -r 4892d31a78b1 xen/common/tmem_xen.c
--- a/xen/common/tmem_xen.c Mon Jun 21 18:37:34 2010 +0100
+++ b/xen/common/tmem_xen.c Mon Jun 21 11:48:10 2010 -0600
@@ -101,7 +101,7 @@ static inline void *cli_mfn_to_va(tmem_c
p2m_type_t t;
cli_mfn = mfn_x(gfn_to_mfn(current->domain, cmfn, &t));
- if (t != p2m_ram_rw)
+ if (t != p2m_ram_rw || cli_mfn == INVALID_MFN)
return NULL;
if (pcli_mfn != NULL)
*pcli_mfn = cli_mfn;
diff -r 4892d31a78b1 xen/include/xen/tmem_xen.h
--- a/xen/include/xen/tmem_xen.h Mon Jun 21 18:37:34 2010 +0100
+++ b/xen/include/xen/tmem_xen.h Mon Jun 21 11:48:10 2010 -0600
@@ -456,7 +456,9 @@ static inline int tmh_get_tmemop_from_cl
static inline int tmh_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
{
#ifdef CONFIG_COMPAT
- if ( is_pv_32on64_vcpu(current) )
+ if ( is_hvm_vcpu(current) ?
+ hvm_guest_x86_mode(current) != 8 :
+ is_pv_32on64_vcpu(current) )
{
int rc;
enum XLAT_tmem_op_u u;
[-- Attachment #2: tmem-hvm-unstable.patch --]
[-- Type: application/octet-stream, Size: 4610 bytes --]
diff -r 4892d31a78b1 xen/arch/x86/hvm/hvm.c
--- a/xen/arch/x86/hvm/hvm.c Mon Jun 21 18:37:34 2010 +0100
+++ b/xen/arch/x86/hvm/hvm.c Mon Jun 21 11:48:10 2010 -0600
@@ -2302,7 +2302,8 @@ static hvm_hypercall_t *hvm_hypercall32_
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
HYPERCALL(set_timer_op),
- HYPERCALL(hvm_op)
+ HYPERCALL(hvm_op),
+ HYPERCALL(tmem_op)
};
#else /* defined(__x86_64__) */
@@ -2355,7 +2356,8 @@ static hvm_hypercall_t *hvm_hypercall64_
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
HYPERCALL(set_timer_op),
- HYPERCALL(hvm_op)
+ HYPERCALL(hvm_op),
+ HYPERCALL(tmem_op)
};
static hvm_hypercall_t *hvm_hypercall32_table[NR_hypercalls] = {
@@ -2366,7 +2368,8 @@ static hvm_hypercall_t *hvm_hypercall32_
HYPERCALL(event_channel_op),
HYPERCALL(sched_op),
HYPERCALL(set_timer_op),
- HYPERCALL(hvm_op)
+ HYPERCALL(hvm_op),
+ HYPERCALL(tmem_op)
};
#endif /* defined(__x86_64__) */
diff -r 4892d31a78b1 xen/common/tmem.c
--- a/xen/common/tmem.c Mon Jun 21 18:37:34 2010 +0100
+++ b/xen/common/tmem.c Mon Jun 21 11:48:10 2010 -0600
@@ -1483,6 +1483,7 @@ copy_uncompressed:
pgp_free_data(pgp, pool);
if ( ( pgp->pfp = tmem_page_alloc(pool) ) == NULL )
goto failed_dup;
+ pgp->size = 0;
/* tmh_copy_from_client properly handles len==0 and offsets != 0 */
ret = tmh_copy_from_client(pgp->pfp,cmfn,tmem_offset,pfn_offset,len,0);
if ( ret == -EFAULT )
@@ -1492,7 +1493,6 @@ copy_uncompressed:
if ( pcd_associate(pgp,NULL,0) == -ENOMEM )
goto failed_dup;
}
- pgp->size = 0;
done:
/* successfully replaced data, clean up and return success */
@@ -1509,12 +1509,14 @@ bad_copy:
bad_copy:
/* this should only happen if the client passed a bad mfn */
failed_copies++;
-ASSERT(0);
- return -EFAULT;
+ ret = -EFAULT;
+ goto cleanup;
failed_dup:
/* couldn't change out the data, flush the old data and return
* -ENOSPC instead of -ENOMEM to differentiate failed _dup_ put */
+ ret = -ENOSPC;
+cleanup:
pgpfound = pgp_delete_from_obj(obj, pgp->index);
ASSERT(pgpfound == pgp);
pgp_delete(pgpfound,0);
@@ -1528,7 +1530,7 @@ failed_dup:
tmem_spin_unlock(&obj->obj_spinlock);
}
pool->dup_puts_flushed++;
- return -ENOSPC;
+ return ret;
}
@@ -1579,6 +1581,7 @@ static NOINLINE int do_tmem_put(pool_t *
goto free;
ASSERT(ret != -EEXIST);
pgp->index = index;
+ pgp->size = 0;
if ( len != 0 && client->compress )
{
@@ -1615,7 +1618,6 @@ copy_uncompressed:
if ( pcd_associate(pgp,NULL,0) == -ENOMEM )
goto delete_and_free;
}
- pgp->size = 0;
insert_page:
if ( is_ephemeral(pool) )
@@ -1648,6 +1650,11 @@ insert_page:
tot_good_eph_puts++;
return 1;
+bad_copy:
+ /* this should only happen if the client passed a bad mfn */
+ ret = -EFAULT;
+ failed_copies++;
+
delete_and_free:
ASSERT((obj != NULL) && (pgp != NULL) && (pgp->index != -1));
pgpdel = pgp_delete_from_obj(obj, pgp->index);
@@ -1669,12 +1676,6 @@ free:
}
pool->no_mem_puts++;
return ret;
-
-bad_copy:
- /* this should only happen if the client passed a bad mfn */
- failed_copies++;
-ASSERT(0);
- goto free;
}
static NOINLINE int do_tmem_get(pool_t *pool, uint64_t oid, uint32_t index,
@@ -1758,7 +1759,6 @@ bad_copy:
bad_copy:
/* this should only happen if the client passed a bad mfn */
failed_copies++;
-ASSERT(0);
return -EFAULT;
}
diff -r 4892d31a78b1 xen/common/tmem_xen.c
--- a/xen/common/tmem_xen.c Mon Jun 21 18:37:34 2010 +0100
+++ b/xen/common/tmem_xen.c Mon Jun 21 11:48:10 2010 -0600
@@ -101,7 +101,7 @@ static inline void *cli_mfn_to_va(tmem_c
p2m_type_t t;
cli_mfn = mfn_x(gfn_to_mfn(current->domain, cmfn, &t));
- if (t != p2m_ram_rw)
+ if (t != p2m_ram_rw || cli_mfn == INVALID_MFN)
return NULL;
if (pcli_mfn != NULL)
*pcli_mfn = cli_mfn;
diff -r 4892d31a78b1 xen/include/xen/tmem_xen.h
--- a/xen/include/xen/tmem_xen.h Mon Jun 21 18:37:34 2010 +0100
+++ b/xen/include/xen/tmem_xen.h Mon Jun 21 11:48:10 2010 -0600
@@ -456,7 +456,9 @@ static inline int tmh_get_tmemop_from_cl
static inline int tmh_get_tmemop_from_client(tmem_op_t *op, tmem_cli_op_t uops)
{
#ifdef CONFIG_COMPAT
- if ( is_pv_32on64_vcpu(current) )
+ if ( is_hvm_vcpu(current) ?
+ hvm_guest_x86_mode(current) != 8 :
+ is_pv_32on64_vcpu(current) )
{
int rc;
enum XLAT_tmem_op_u u;
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
^ permalink raw reply [flat|nested] 3+ messages in thread
end of thread, other threads:[~2010-06-21 17:54 UTC | newest]
Thread overview: 3+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2010-06-21 17:14 [PATCH] enable tmem functionality for PV on HVM guests Dan Magenheimer
2010-06-21 17:39 ` Keir Fraser
2010-06-21 17:54 ` Dan Magenheimer
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).