* [PATCH v2 1/8] xen: move XENMAPSPACE_grant_table code into grant_table.c
2017-09-06 8:26 [PATCH v2 0/8] xen: better grant v2 support Juergen Gross
@ 2017-09-06 8:26 ` Juergen Gross
2017-09-06 8:34 ` Paul Durrant
2017-09-06 10:04 ` Wei Liu
2017-09-06 8:26 ` [PATCH v2 2/8] xen: clean up grant_table.h Juergen Gross
` (6 subsequent siblings)
7 siblings, 2 replies; 23+ messages in thread
From: Juergen Gross @ 2017-09-06 8:26 UTC (permalink / raw)
To: xen-devel
Cc: Juergen Gross, sstabellini, wei.liu2, George.Dunlap,
andrew.cooper3, ian.jackson, tim, jbeulich
The x86 and arm versions of XENMAPSPACE_grant_table handling are nearly
identical. Move the code into a function in grant_table.c and add an
architecture dependant hook to handle the differences.
This at once fixes a bug in the arm code which didn't unlock the grant
table in error case.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
V2:
- rebased to staging
---
xen/arch/arm/mm.c | 36 ++++------------------------------
xen/arch/x86/mm.c | 41 ++++++++++-----------------------------
xen/common/grant_table.c | 38 ++++++++++++++++++++++++++++++++++++
xen/include/asm-arm/grant_table.h | 7 +++++++
xen/include/asm-x86/grant_table.h | 5 +++++
xen/include/xen/grant_table.h | 3 +++
6 files changed, 67 insertions(+), 63 deletions(-)
diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
index b39677eac9..3db0e3bdea 100644
--- a/xen/arch/arm/mm.c
+++ b/xen/arch/arm/mm.c
@@ -1229,39 +1229,11 @@ int xenmem_add_to_physmap_one(
switch ( space )
{
case XENMAPSPACE_grant_table:
- grant_write_lock(d->grant_table);
-
- if ( d->grant_table->gt_version == 0 )
- d->grant_table->gt_version = 1;
-
- if ( d->grant_table->gt_version == 2 &&
- (idx & XENMAPIDX_grant_table_status) )
- {
- idx &= ~XENMAPIDX_grant_table_status;
- if ( idx < nr_status_frames(d->grant_table) )
- mfn = virt_to_mfn(d->grant_table->status[idx]);
- }
- else
- {
- if ( (idx >= nr_grant_frames(d->grant_table)) &&
- (idx < max_grant_frames) )
- gnttab_grow_table(d, idx + 1);
-
- if ( idx < nr_grant_frames(d->grant_table) )
- mfn = virt_to_mfn(d->grant_table->shared_raw[idx]);
- }
-
- if ( !mfn_eq(mfn, INVALID_MFN) )
- {
- d->arch.grant_table_gfn[idx] = gfn;
-
- t = p2m_ram_rw;
- }
-
- grant_write_unlock(d->grant_table);
+ rc = gnttab_map_frame(d, idx, gfn, &mfn);
+ if ( rc )
+ return rc;
- if ( mfn_eq(mfn, INVALID_MFN) )
- return -EINVAL;
+ t = p2m_ram_rw;
break;
case XENMAPSPACE_shared_info:
diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
index e5a029c9be..3d899e4a8e 100644
--- a/xen/arch/x86/mm.c
+++ b/xen/arch/x86/mm.c
@@ -4631,40 +4631,19 @@ int xenmem_add_to_physmap_one(
{
struct page_info *page = NULL;
unsigned long gfn = 0; /* gcc ... */
- unsigned long prev_mfn, mfn = 0, old_gpfn;
+ unsigned long prev_mfn, old_gpfn;
int rc = 0;
+ mfn_t mfn = INVALID_MFN;
p2m_type_t p2mt;
switch ( space )
{
case XENMAPSPACE_shared_info:
if ( idx == 0 )
- mfn = virt_to_mfn(d->shared_info);
+ mfn = _mfn(virt_to_mfn(d->shared_info));
break;
case XENMAPSPACE_grant_table:
- grant_write_lock(d->grant_table);
-
- if ( d->grant_table->gt_version == 0 )
- d->grant_table->gt_version = 1;
-
- if ( d->grant_table->gt_version == 2 &&
- (idx & XENMAPIDX_grant_table_status) )
- {
- idx &= ~XENMAPIDX_grant_table_status;
- if ( idx < nr_status_frames(d->grant_table) )
- mfn = virt_to_mfn(d->grant_table->status[idx]);
- }
- else
- {
- if ( (idx >= nr_grant_frames(d->grant_table)) &&
- (idx < max_grant_frames) )
- gnttab_grow_table(d, idx + 1);
-
- if ( idx < nr_grant_frames(d->grant_table) )
- mfn = virt_to_mfn(d->grant_table->shared_raw[idx]);
- }
-
- grant_write_unlock(d->grant_table);
+ gnttab_map_frame(d, idx, gpfn, &mfn);
break;
case XENMAPSPACE_gmfn_range:
case XENMAPSPACE_gmfn:
@@ -4681,8 +4660,8 @@ int xenmem_add_to_physmap_one(
}
if ( !get_page_from_mfn(_mfn(idx), d) )
break;
- mfn = idx;
- page = mfn_to_page(_mfn(mfn));
+ mfn = _mfn(idx);
+ page = mfn_to_page(mfn);
break;
}
case XENMAPSPACE_gmfn_foreign:
@@ -4691,7 +4670,7 @@ int xenmem_add_to_physmap_one(
break;
}
- if ( !paging_mode_translate(d) || (mfn == 0) )
+ if ( !paging_mode_translate(d) || mfn_eq(mfn, INVALID_MFN) )
{
rc = -EINVAL;
goto put_both;
@@ -4715,16 +4694,16 @@ int xenmem_add_to_physmap_one(
goto put_both;
/* Unmap from old location, if any. */
- old_gpfn = get_gpfn_from_mfn(mfn);
+ old_gpfn = get_gpfn_from_mfn(mfn_x(mfn));
ASSERT( old_gpfn != SHARED_M2P_ENTRY );
if ( space == XENMAPSPACE_gmfn || space == XENMAPSPACE_gmfn_range )
ASSERT( old_gpfn == gfn );
if ( old_gpfn != INVALID_M2P_ENTRY )
- rc = guest_physmap_remove_page(d, _gfn(old_gpfn), _mfn(mfn), PAGE_ORDER_4K);
+ rc = guest_physmap_remove_page(d, _gfn(old_gpfn), mfn, PAGE_ORDER_4K);
/* Map at new location. */
if ( !rc )
- rc = guest_physmap_add_page(d, gpfn, _mfn(mfn), PAGE_ORDER_4K);
+ rc = guest_physmap_add_page(d, gpfn, mfn, PAGE_ORDER_4K);
put_both:
/* In the XENMAPSPACE_gmfn, we took a ref of the gfn at the top */
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index fb3859ce8e..4c2e9e40a5 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -3607,6 +3607,44 @@ int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref,
}
#endif
+int gnttab_map_frame(struct domain *d, unsigned long idx, gfn_t gfn,
+ mfn_t *mfn)
+{
+ int rc = 0;
+ struct grant_table *gt = d->grant_table;
+
+ grant_write_lock(gt);
+
+ if ( gt->gt_version == 0 )
+ gt->gt_version = 1;
+
+ if ( gt->gt_version == 2 &&
+ (idx & XENMAPIDX_grant_table_status) )
+ {
+ idx &= ~XENMAPIDX_grant_table_status;
+ if ( idx < nr_status_frames(gt) )
+ *mfn = _mfn(virt_to_mfn(gt->status[idx]));
+ else
+ rc = -EINVAL;
+ }
+ else
+ {
+ if ( (idx >= nr_grant_frames(gt)) && (idx < max_grant_frames) )
+ gnttab_grow_table(d, idx + 1);
+
+ if ( idx < nr_grant_frames(gt) )
+ *mfn = _mfn(virt_to_mfn(gt->shared_raw[idx]));
+ else
+ rc = -EINVAL;
+ }
+
+ gnttab_set_frame_gfn(d, idx, gfn);
+
+ grant_write_unlock(gt);
+
+ return rc;
+}
+
static void gnttab_usage_print(struct domain *rd)
{
int first = 1;
diff --git a/xen/include/asm-arm/grant_table.h b/xen/include/asm-arm/grant_table.h
index bc4d61a940..0a248a765a 100644
--- a/xen/include/asm-arm/grant_table.h
+++ b/xen/include/asm-arm/grant_table.h
@@ -2,6 +2,7 @@
#define __ASM_GRANT_TABLE_H__
#include <xen/grant_table.h>
+#include <xen/sched.h>
#define INITIAL_NR_GRANT_FRAMES 4
@@ -21,6 +22,12 @@ static inline int replace_grant_supported(void)
return 1;
}
+static inline void gnttab_set_frame_gfn(struct domain *d, unsigned long idx,
+ gfn_t gfn)
+{
+ d->arch.grant_table_gfn[idx] = gfn;
+}
+
#define gnttab_create_shared_page(d, t, i) \
do { \
share_xen_page_with_guest( \
diff --git a/xen/include/asm-x86/grant_table.h b/xen/include/asm-x86/grant_table.h
index 33b2f88b96..c865999a33 100644
--- a/xen/include/asm-x86/grant_table.h
+++ b/xen/include/asm-x86/grant_table.h
@@ -75,6 +75,11 @@ static inline void gnttab_clear_flag(unsigned int nr, uint16_t *st)
asm volatile ("lock btrw %w1,%0" : "=m" (*st) : "Ir" (nr), "m" (*st));
}
+static inline void gnttab_set_frame_gfn(struct domain *d, unsigned long idx,
+ gfn_t gfn)
+{
+}
+
/* Foreign mappings of HHVM-guest pages do not modify the type count. */
#define gnttab_host_mapping_get_page_type(ro, ld, rd) \
(!(ro) && (((ld) == (rd)) || !paging_mode_external(rd)))
diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
index af269a108d..43ec6c4d80 100644
--- a/xen/include/xen/grant_table.h
+++ b/xen/include/xen/grant_table.h
@@ -136,4 +136,7 @@ static inline unsigned int grant_to_status_frames(int grant_frames)
int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref,
gfn_t *gfn, uint16_t *status);
+int gnttab_map_frame(struct domain *d, unsigned long idx, gfn_t gfn,
+ mfn_t *mfn);
+
#endif /* __XEN_GRANT_TABLE_H__ */
--
2.12.3
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 23+ messages in thread* Re: [PATCH v2 1/8] xen: move XENMAPSPACE_grant_table code into grant_table.c
2017-09-06 8:26 ` [PATCH v2 1/8] xen: move XENMAPSPACE_grant_table code into grant_table.c Juergen Gross
@ 2017-09-06 8:34 ` Paul Durrant
2017-09-06 9:31 ` Juergen Gross
2017-09-06 10:04 ` Wei Liu
1 sibling, 1 reply; 23+ messages in thread
From: Paul Durrant @ 2017-09-06 8:34 UTC (permalink / raw)
To: 'Juergen Gross', xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, Wei Liu, Andrew Cooper, Tim (Xen.org),
George Dunlap, jbeulich@suse.com, Ian Jackson
> -----Original Message-----
> From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of
> Juergen Gross
> Sent: 06 September 2017 09:26
> To: xen-devel@lists.xen.org
> Cc: Juergen Gross <jgross@suse.com>; sstabellini@kernel.org; Wei Liu
> <wei.liu2@citrix.com>; George Dunlap <George.Dunlap@citrix.com>;
> Andrew Cooper <Andrew.Cooper3@citrix.com>; Ian Jackson
> <Ian.Jackson@citrix.com>; Tim (Xen.org) <tim@xen.org>;
> jbeulich@suse.com
> Subject: [Xen-devel] [PATCH v2 1/8] xen: move XENMAPSPACE_grant_table
> code into grant_table.c
>
> The x86 and arm versions of XENMAPSPACE_grant_table handling are nearly
> identical. Move the code into a function in grant_table.c and add an
> architecture dependant hook to handle the differences.
>
> This at once fixes a bug in the arm code which didn't unlock the grant
> table in error case.
You also move to greater use of type-safe mfn in x86 xenmem_add_to_physmap_one(), which is a good thing :-). Might be worth a mention in the commit comment though.
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
>
> Signed-off-by: Juergen Gross <jgross@suse.com>
> ---
> V2:
> - rebased to staging
> ---
> xen/arch/arm/mm.c | 36 ++++------------------------------
> xen/arch/x86/mm.c | 41 ++++++++++-----------------------------
> xen/common/grant_table.c | 38
> ++++++++++++++++++++++++++++++++++++
> xen/include/asm-arm/grant_table.h | 7 +++++++
> xen/include/asm-x86/grant_table.h | 5 +++++
> xen/include/xen/grant_table.h | 3 +++
> 6 files changed, 67 insertions(+), 63 deletions(-)
>
> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
> index b39677eac9..3db0e3bdea 100644
> --- a/xen/arch/arm/mm.c
> +++ b/xen/arch/arm/mm.c
> @@ -1229,39 +1229,11 @@ int xenmem_add_to_physmap_one(
> switch ( space )
> {
> case XENMAPSPACE_grant_table:
> - grant_write_lock(d->grant_table);
> -
> - if ( d->grant_table->gt_version == 0 )
> - d->grant_table->gt_version = 1;
> -
> - if ( d->grant_table->gt_version == 2 &&
> - (idx & XENMAPIDX_grant_table_status) )
> - {
> - idx &= ~XENMAPIDX_grant_table_status;
> - if ( idx < nr_status_frames(d->grant_table) )
> - mfn = virt_to_mfn(d->grant_table->status[idx]);
> - }
> - else
> - {
> - if ( (idx >= nr_grant_frames(d->grant_table)) &&
> - (idx < max_grant_frames) )
> - gnttab_grow_table(d, idx + 1);
> -
> - if ( idx < nr_grant_frames(d->grant_table) )
> - mfn = virt_to_mfn(d->grant_table->shared_raw[idx]);
> - }
> -
> - if ( !mfn_eq(mfn, INVALID_MFN) )
> - {
> - d->arch.grant_table_gfn[idx] = gfn;
> -
> - t = p2m_ram_rw;
> - }
> -
> - grant_write_unlock(d->grant_table);
> + rc = gnttab_map_frame(d, idx, gfn, &mfn);
> + if ( rc )
> + return rc;
>
> - if ( mfn_eq(mfn, INVALID_MFN) )
> - return -EINVAL;
> + t = p2m_ram_rw;
>
> break;
> case XENMAPSPACE_shared_info:
> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
> index e5a029c9be..3d899e4a8e 100644
> --- a/xen/arch/x86/mm.c
> +++ b/xen/arch/x86/mm.c
> @@ -4631,40 +4631,19 @@ int xenmem_add_to_physmap_one(
> {
> struct page_info *page = NULL;
> unsigned long gfn = 0; /* gcc ... */
> - unsigned long prev_mfn, mfn = 0, old_gpfn;
> + unsigned long prev_mfn, old_gpfn;
> int rc = 0;
> + mfn_t mfn = INVALID_MFN;
> p2m_type_t p2mt;
>
> switch ( space )
> {
> case XENMAPSPACE_shared_info:
> if ( idx == 0 )
> - mfn = virt_to_mfn(d->shared_info);
> + mfn = _mfn(virt_to_mfn(d->shared_info));
> break;
> case XENMAPSPACE_grant_table:
> - grant_write_lock(d->grant_table);
> -
> - if ( d->grant_table->gt_version == 0 )
> - d->grant_table->gt_version = 1;
> -
> - if ( d->grant_table->gt_version == 2 &&
> - (idx & XENMAPIDX_grant_table_status) )
> - {
> - idx &= ~XENMAPIDX_grant_table_status;
> - if ( idx < nr_status_frames(d->grant_table) )
> - mfn = virt_to_mfn(d->grant_table->status[idx]);
> - }
> - else
> - {
> - if ( (idx >= nr_grant_frames(d->grant_table)) &&
> - (idx < max_grant_frames) )
> - gnttab_grow_table(d, idx + 1);
> -
> - if ( idx < nr_grant_frames(d->grant_table) )
> - mfn = virt_to_mfn(d->grant_table->shared_raw[idx]);
> - }
> -
> - grant_write_unlock(d->grant_table);
> + gnttab_map_frame(d, idx, gpfn, &mfn);
> break;
> case XENMAPSPACE_gmfn_range:
> case XENMAPSPACE_gmfn:
> @@ -4681,8 +4660,8 @@ int xenmem_add_to_physmap_one(
> }
> if ( !get_page_from_mfn(_mfn(idx), d) )
> break;
> - mfn = idx;
> - page = mfn_to_page(_mfn(mfn));
> + mfn = _mfn(idx);
> + page = mfn_to_page(mfn);
> break;
> }
> case XENMAPSPACE_gmfn_foreign:
> @@ -4691,7 +4670,7 @@ int xenmem_add_to_physmap_one(
> break;
> }
>
> - if ( !paging_mode_translate(d) || (mfn == 0) )
> + if ( !paging_mode_translate(d) || mfn_eq(mfn, INVALID_MFN) )
> {
> rc = -EINVAL;
> goto put_both;
> @@ -4715,16 +4694,16 @@ int xenmem_add_to_physmap_one(
> goto put_both;
>
> /* Unmap from old location, if any. */
> - old_gpfn = get_gpfn_from_mfn(mfn);
> + old_gpfn = get_gpfn_from_mfn(mfn_x(mfn));
> ASSERT( old_gpfn != SHARED_M2P_ENTRY );
> if ( space == XENMAPSPACE_gmfn || space ==
> XENMAPSPACE_gmfn_range )
> ASSERT( old_gpfn == gfn );
> if ( old_gpfn != INVALID_M2P_ENTRY )
> - rc = guest_physmap_remove_page(d, _gfn(old_gpfn), _mfn(mfn),
> PAGE_ORDER_4K);
> + rc = guest_physmap_remove_page(d, _gfn(old_gpfn), mfn,
> PAGE_ORDER_4K);
>
> /* Map at new location. */
> if ( !rc )
> - rc = guest_physmap_add_page(d, gpfn, _mfn(mfn), PAGE_ORDER_4K);
> + rc = guest_physmap_add_page(d, gpfn, mfn, PAGE_ORDER_4K);
>
> put_both:
> /* In the XENMAPSPACE_gmfn, we took a ref of the gfn at the top */
> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
> index fb3859ce8e..4c2e9e40a5 100644
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -3607,6 +3607,44 @@ int mem_sharing_gref_to_gfn(struct grant_table
> *gt, grant_ref_t ref,
> }
> #endif
>
> +int gnttab_map_frame(struct domain *d, unsigned long idx, gfn_t gfn,
> + mfn_t *mfn)
> +{
> + int rc = 0;
> + struct grant_table *gt = d->grant_table;
> +
> + grant_write_lock(gt);
> +
> + if ( gt->gt_version == 0 )
> + gt->gt_version = 1;
> +
> + if ( gt->gt_version == 2 &&
> + (idx & XENMAPIDX_grant_table_status) )
> + {
> + idx &= ~XENMAPIDX_grant_table_status;
> + if ( idx < nr_status_frames(gt) )
> + *mfn = _mfn(virt_to_mfn(gt->status[idx]));
> + else
> + rc = -EINVAL;
> + }
> + else
> + {
> + if ( (idx >= nr_grant_frames(gt)) && (idx < max_grant_frames) )
> + gnttab_grow_table(d, idx + 1);
> +
> + if ( idx < nr_grant_frames(gt) )
> + *mfn = _mfn(virt_to_mfn(gt->shared_raw[idx]));
> + else
> + rc = -EINVAL;
> + }
> +
> + gnttab_set_frame_gfn(d, idx, gfn);
> +
> + grant_write_unlock(gt);
> +
> + return rc;
> +}
> +
> static void gnttab_usage_print(struct domain *rd)
> {
> int first = 1;
> diff --git a/xen/include/asm-arm/grant_table.h b/xen/include/asm-
> arm/grant_table.h
> index bc4d61a940..0a248a765a 100644
> --- a/xen/include/asm-arm/grant_table.h
> +++ b/xen/include/asm-arm/grant_table.h
> @@ -2,6 +2,7 @@
> #define __ASM_GRANT_TABLE_H__
>
> #include <xen/grant_table.h>
> +#include <xen/sched.h>
>
> #define INITIAL_NR_GRANT_FRAMES 4
>
> @@ -21,6 +22,12 @@ static inline int replace_grant_supported(void)
> return 1;
> }
>
> +static inline void gnttab_set_frame_gfn(struct domain *d, unsigned long
> idx,
> + gfn_t gfn)
> +{
> + d->arch.grant_table_gfn[idx] = gfn;
> +}
> +
> #define gnttab_create_shared_page(d, t, i) \
> do { \
> share_xen_page_with_guest( \
> diff --git a/xen/include/asm-x86/grant_table.h b/xen/include/asm-
> x86/grant_table.h
> index 33b2f88b96..c865999a33 100644
> --- a/xen/include/asm-x86/grant_table.h
> +++ b/xen/include/asm-x86/grant_table.h
> @@ -75,6 +75,11 @@ static inline void gnttab_clear_flag(unsigned int nr,
> uint16_t *st)
> asm volatile ("lock btrw %w1,%0" : "=m" (*st) : "Ir" (nr), "m" (*st));
> }
>
> +static inline void gnttab_set_frame_gfn(struct domain *d, unsigned long
> idx,
> + gfn_t gfn)
> +{
> +}
> +
> /* Foreign mappings of HHVM-guest pages do not modify the type count. */
> #define gnttab_host_mapping_get_page_type(ro, ld, rd) \
> (!(ro) && (((ld) == (rd)) || !paging_mode_external(rd)))
> diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
> index af269a108d..43ec6c4d80 100644
> --- a/xen/include/xen/grant_table.h
> +++ b/xen/include/xen/grant_table.h
> @@ -136,4 +136,7 @@ static inline unsigned int grant_to_status_frames(int
> grant_frames)
> int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref,
> gfn_t *gfn, uint16_t *status);
>
> +int gnttab_map_frame(struct domain *d, unsigned long idx, gfn_t gfn,
> + mfn_t *mfn);
> +
> #endif /* __XEN_GRANT_TABLE_H__ */
> --
> 2.12.3
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH v2 1/8] xen: move XENMAPSPACE_grant_table code into grant_table.c
2017-09-06 8:34 ` Paul Durrant
@ 2017-09-06 9:31 ` Juergen Gross
0 siblings, 0 replies; 23+ messages in thread
From: Juergen Gross @ 2017-09-06 9:31 UTC (permalink / raw)
To: Paul Durrant, xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, Wei Liu, Andrew Cooper, Tim (Xen.org),
George Dunlap, jbeulich@suse.com, Ian Jackson
On 06/09/17 10:34, Paul Durrant wrote:
>> -----Original Message-----
>> From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of
>> Juergen Gross
>> Sent: 06 September 2017 09:26
>> To: xen-devel@lists.xen.org
>> Cc: Juergen Gross <jgross@suse.com>; sstabellini@kernel.org; Wei Liu
>> <wei.liu2@citrix.com>; George Dunlap <George.Dunlap@citrix.com>;
>> Andrew Cooper <Andrew.Cooper3@citrix.com>; Ian Jackson
>> <Ian.Jackson@citrix.com>; Tim (Xen.org) <tim@xen.org>;
>> jbeulich@suse.com
>> Subject: [Xen-devel] [PATCH v2 1/8] xen: move XENMAPSPACE_grant_table
>> code into grant_table.c
>>
>> The x86 and arm versions of XENMAPSPACE_grant_table handling are nearly
>> identical. Move the code into a function in grant_table.c and add an
>> architecture dependant hook to handle the differences.
>>
>> This at once fixes a bug in the arm code which didn't unlock the grant
>> table in error case.
>
> You also move to greater use of type-safe mfn in x86 xenmem_add_to_physmap_one(), which is a good thing :-). Might be worth a mention in the commit comment though.
Yeah. And I should remove the paragraph with the error on ARM, as it
is no longer true.
Juergen
>
> Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
>
>>
>> Signed-off-by: Juergen Gross <jgross@suse.com>
>> ---
>> V2:
>> - rebased to staging
>> ---
>> xen/arch/arm/mm.c | 36 ++++------------------------------
>> xen/arch/x86/mm.c | 41 ++++++++++-----------------------------
>> xen/common/grant_table.c | 38
>> ++++++++++++++++++++++++++++++++++++
>> xen/include/asm-arm/grant_table.h | 7 +++++++
>> xen/include/asm-x86/grant_table.h | 5 +++++
>> xen/include/xen/grant_table.h | 3 +++
>> 6 files changed, 67 insertions(+), 63 deletions(-)
>>
>> diff --git a/xen/arch/arm/mm.c b/xen/arch/arm/mm.c
>> index b39677eac9..3db0e3bdea 100644
>> --- a/xen/arch/arm/mm.c
>> +++ b/xen/arch/arm/mm.c
>> @@ -1229,39 +1229,11 @@ int xenmem_add_to_physmap_one(
>> switch ( space )
>> {
>> case XENMAPSPACE_grant_table:
>> - grant_write_lock(d->grant_table);
>> -
>> - if ( d->grant_table->gt_version == 0 )
>> - d->grant_table->gt_version = 1;
>> -
>> - if ( d->grant_table->gt_version == 2 &&
>> - (idx & XENMAPIDX_grant_table_status) )
>> - {
>> - idx &= ~XENMAPIDX_grant_table_status;
>> - if ( idx < nr_status_frames(d->grant_table) )
>> - mfn = virt_to_mfn(d->grant_table->status[idx]);
>> - }
>> - else
>> - {
>> - if ( (idx >= nr_grant_frames(d->grant_table)) &&
>> - (idx < max_grant_frames) )
>> - gnttab_grow_table(d, idx + 1);
>> -
>> - if ( idx < nr_grant_frames(d->grant_table) )
>> - mfn = virt_to_mfn(d->grant_table->shared_raw[idx]);
>> - }
>> -
>> - if ( !mfn_eq(mfn, INVALID_MFN) )
>> - {
>> - d->arch.grant_table_gfn[idx] = gfn;
>> -
>> - t = p2m_ram_rw;
>> - }
>> -
>> - grant_write_unlock(d->grant_table);
>> + rc = gnttab_map_frame(d, idx, gfn, &mfn);
>> + if ( rc )
>> + return rc;
>>
>> - if ( mfn_eq(mfn, INVALID_MFN) )
>> - return -EINVAL;
>> + t = p2m_ram_rw;
>>
>> break;
>> case XENMAPSPACE_shared_info:
>> diff --git a/xen/arch/x86/mm.c b/xen/arch/x86/mm.c
>> index e5a029c9be..3d899e4a8e 100644
>> --- a/xen/arch/x86/mm.c
>> +++ b/xen/arch/x86/mm.c
>> @@ -4631,40 +4631,19 @@ int xenmem_add_to_physmap_one(
>> {
>> struct page_info *page = NULL;
>> unsigned long gfn = 0; /* gcc ... */
>> - unsigned long prev_mfn, mfn = 0, old_gpfn;
>> + unsigned long prev_mfn, old_gpfn;
>> int rc = 0;
>> + mfn_t mfn = INVALID_MFN;
>> p2m_type_t p2mt;
>>
>> switch ( space )
>> {
>> case XENMAPSPACE_shared_info:
>> if ( idx == 0 )
>> - mfn = virt_to_mfn(d->shared_info);
>> + mfn = _mfn(virt_to_mfn(d->shared_info));
>> break;
>> case XENMAPSPACE_grant_table:
>> - grant_write_lock(d->grant_table);
>> -
>> - if ( d->grant_table->gt_version == 0 )
>> - d->grant_table->gt_version = 1;
>> -
>> - if ( d->grant_table->gt_version == 2 &&
>> - (idx & XENMAPIDX_grant_table_status) )
>> - {
>> - idx &= ~XENMAPIDX_grant_table_status;
>> - if ( idx < nr_status_frames(d->grant_table) )
>> - mfn = virt_to_mfn(d->grant_table->status[idx]);
>> - }
>> - else
>> - {
>> - if ( (idx >= nr_grant_frames(d->grant_table)) &&
>> - (idx < max_grant_frames) )
>> - gnttab_grow_table(d, idx + 1);
>> -
>> - if ( idx < nr_grant_frames(d->grant_table) )
>> - mfn = virt_to_mfn(d->grant_table->shared_raw[idx]);
>> - }
>> -
>> - grant_write_unlock(d->grant_table);
>> + gnttab_map_frame(d, idx, gpfn, &mfn);
>> break;
>> case XENMAPSPACE_gmfn_range:
>> case XENMAPSPACE_gmfn:
>> @@ -4681,8 +4660,8 @@ int xenmem_add_to_physmap_one(
>> }
>> if ( !get_page_from_mfn(_mfn(idx), d) )
>> break;
>> - mfn = idx;
>> - page = mfn_to_page(_mfn(mfn));
>> + mfn = _mfn(idx);
>> + page = mfn_to_page(mfn);
>> break;
>> }
>> case XENMAPSPACE_gmfn_foreign:
>> @@ -4691,7 +4670,7 @@ int xenmem_add_to_physmap_one(
>> break;
>> }
>>
>> - if ( !paging_mode_translate(d) || (mfn == 0) )
>> + if ( !paging_mode_translate(d) || mfn_eq(mfn, INVALID_MFN) )
>> {
>> rc = -EINVAL;
>> goto put_both;
>> @@ -4715,16 +4694,16 @@ int xenmem_add_to_physmap_one(
>> goto put_both;
>>
>> /* Unmap from old location, if any. */
>> - old_gpfn = get_gpfn_from_mfn(mfn);
>> + old_gpfn = get_gpfn_from_mfn(mfn_x(mfn));
>> ASSERT( old_gpfn != SHARED_M2P_ENTRY );
>> if ( space == XENMAPSPACE_gmfn || space ==
>> XENMAPSPACE_gmfn_range )
>> ASSERT( old_gpfn == gfn );
>> if ( old_gpfn != INVALID_M2P_ENTRY )
>> - rc = guest_physmap_remove_page(d, _gfn(old_gpfn), _mfn(mfn),
>> PAGE_ORDER_4K);
>> + rc = guest_physmap_remove_page(d, _gfn(old_gpfn), mfn,
>> PAGE_ORDER_4K);
>>
>> /* Map at new location. */
>> if ( !rc )
>> - rc = guest_physmap_add_page(d, gpfn, _mfn(mfn), PAGE_ORDER_4K);
>> + rc = guest_physmap_add_page(d, gpfn, mfn, PAGE_ORDER_4K);
>>
>> put_both:
>> /* In the XENMAPSPACE_gmfn, we took a ref of the gfn at the top */
>> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
>> index fb3859ce8e..4c2e9e40a5 100644
>> --- a/xen/common/grant_table.c
>> +++ b/xen/common/grant_table.c
>> @@ -3607,6 +3607,44 @@ int mem_sharing_gref_to_gfn(struct grant_table
>> *gt, grant_ref_t ref,
>> }
>> #endif
>>
>> +int gnttab_map_frame(struct domain *d, unsigned long idx, gfn_t gfn,
>> + mfn_t *mfn)
>> +{
>> + int rc = 0;
>> + struct grant_table *gt = d->grant_table;
>> +
>> + grant_write_lock(gt);
>> +
>> + if ( gt->gt_version == 0 )
>> + gt->gt_version = 1;
>> +
>> + if ( gt->gt_version == 2 &&
>> + (idx & XENMAPIDX_grant_table_status) )
>> + {
>> + idx &= ~XENMAPIDX_grant_table_status;
>> + if ( idx < nr_status_frames(gt) )
>> + *mfn = _mfn(virt_to_mfn(gt->status[idx]));
>> + else
>> + rc = -EINVAL;
>> + }
>> + else
>> + {
>> + if ( (idx >= nr_grant_frames(gt)) && (idx < max_grant_frames) )
>> + gnttab_grow_table(d, idx + 1);
>> +
>> + if ( idx < nr_grant_frames(gt) )
>> + *mfn = _mfn(virt_to_mfn(gt->shared_raw[idx]));
>> + else
>> + rc = -EINVAL;
>> + }
>> +
>> + gnttab_set_frame_gfn(d, idx, gfn);
>> +
>> + grant_write_unlock(gt);
>> +
>> + return rc;
>> +}
>> +
>> static void gnttab_usage_print(struct domain *rd)
>> {
>> int first = 1;
>> diff --git a/xen/include/asm-arm/grant_table.h b/xen/include/asm-
>> arm/grant_table.h
>> index bc4d61a940..0a248a765a 100644
>> --- a/xen/include/asm-arm/grant_table.h
>> +++ b/xen/include/asm-arm/grant_table.h
>> @@ -2,6 +2,7 @@
>> #define __ASM_GRANT_TABLE_H__
>>
>> #include <xen/grant_table.h>
>> +#include <xen/sched.h>
>>
>> #define INITIAL_NR_GRANT_FRAMES 4
>>
>> @@ -21,6 +22,12 @@ static inline int replace_grant_supported(void)
>> return 1;
>> }
>>
>> +static inline void gnttab_set_frame_gfn(struct domain *d, unsigned long
>> idx,
>> + gfn_t gfn)
>> +{
>> + d->arch.grant_table_gfn[idx] = gfn;
>> +}
>> +
>> #define gnttab_create_shared_page(d, t, i) \
>> do { \
>> share_xen_page_with_guest( \
>> diff --git a/xen/include/asm-x86/grant_table.h b/xen/include/asm-
>> x86/grant_table.h
>> index 33b2f88b96..c865999a33 100644
>> --- a/xen/include/asm-x86/grant_table.h
>> +++ b/xen/include/asm-x86/grant_table.h
>> @@ -75,6 +75,11 @@ static inline void gnttab_clear_flag(unsigned int nr,
>> uint16_t *st)
>> asm volatile ("lock btrw %w1,%0" : "=m" (*st) : "Ir" (nr), "m" (*st));
>> }
>>
>> +static inline void gnttab_set_frame_gfn(struct domain *d, unsigned long
>> idx,
>> + gfn_t gfn)
>> +{
>> +}
>> +
>> /* Foreign mappings of HHVM-guest pages do not modify the type count. */
>> #define gnttab_host_mapping_get_page_type(ro, ld, rd) \
>> (!(ro) && (((ld) == (rd)) || !paging_mode_external(rd)))
>> diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
>> index af269a108d..43ec6c4d80 100644
>> --- a/xen/include/xen/grant_table.h
>> +++ b/xen/include/xen/grant_table.h
>> @@ -136,4 +136,7 @@ static inline unsigned int grant_to_status_frames(int
>> grant_frames)
>> int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref,
>> gfn_t *gfn, uint16_t *status);
>>
>> +int gnttab_map_frame(struct domain *d, unsigned long idx, gfn_t gfn,
>> + mfn_t *mfn);
>> +
>> #endif /* __XEN_GRANT_TABLE_H__ */
>> --
>> 2.12.3
>>
>>
>> _______________________________________________
>> Xen-devel mailing list
>> Xen-devel@lists.xen.org
>> https://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH v2 1/8] xen: move XENMAPSPACE_grant_table code into grant_table.c
2017-09-06 8:26 ` [PATCH v2 1/8] xen: move XENMAPSPACE_grant_table code into grant_table.c Juergen Gross
2017-09-06 8:34 ` Paul Durrant
@ 2017-09-06 10:04 ` Wei Liu
1 sibling, 0 replies; 23+ messages in thread
From: Wei Liu @ 2017-09-06 10:04 UTC (permalink / raw)
To: Juergen Gross
Cc: tim, sstabellini, wei.liu2, George.Dunlap, andrew.cooper3,
ian.jackson, xen-devel, jbeulich
On Wed, Sep 06, 2017 at 10:26:25AM +0200, Juergen Gross wrote:
> The x86 and arm versions of XENMAPSPACE_grant_table handling are nearly
> identical. Move the code into a function in grant_table.c and add an
> architecture dependant hook to handle the differences.
>
> This at once fixes a bug in the arm code which didn't unlock the grant
> table in error case.
Please fix the commit message.
Code-wise:
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH v2 2/8] xen: clean up grant_table.h
2017-09-06 8:26 [PATCH v2 0/8] xen: better grant v2 support Juergen Gross
2017-09-06 8:26 ` [PATCH v2 1/8] xen: move XENMAPSPACE_grant_table code into grant_table.c Juergen Gross
@ 2017-09-06 8:26 ` Juergen Gross
2017-09-06 8:37 ` Paul Durrant
2017-09-06 10:11 ` Wei Liu
2017-09-06 8:26 ` [PATCH v2 3/8] xen: delay allocation of grant table sub structures Juergen Gross
` (5 subsequent siblings)
7 siblings, 2 replies; 23+ messages in thread
From: Juergen Gross @ 2017-09-06 8:26 UTC (permalink / raw)
To: xen-devel
Cc: Juergen Gross, sstabellini, wei.liu2, George.Dunlap,
andrew.cooper3, ian.jackson, tim, jbeulich
Many definitions can be moved from xen/grant_table.h to
common/grant_table.c now, as they are no longer used in other sources.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
xen/common/grant_table.c | 81 +++++++++++++++++++++++++++++++++++++++--
xen/include/xen/grant_table.h | 84 -------------------------------------------
2 files changed, 79 insertions(+), 86 deletions(-)
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 4c2e9e40a5..a94dfdda7b 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -40,6 +40,42 @@
#include <xsm/xsm.h>
#include <asm/flushtlb.h>
+/* Per-domain grant information. */
+struct grant_table {
+ /*
+ * Lock protecting updates to grant table state (version, active
+ * entry list, etc.)
+ */
+ percpu_rwlock_t lock;
+ /* Table size. Number of frames shared with guest */
+ unsigned int nr_grant_frames;
+ /* Shared grant table (see include/public/grant_table.h). */
+ union {
+ void **shared_raw;
+ struct grant_entry_v1 **shared_v1;
+ union grant_entry_v2 **shared_v2;
+ };
+ /* Number of grant status frames shared with guest (for version 2) */
+ unsigned int nr_status_frames;
+ /* State grant table (see include/public/grant_table.h). */
+ grant_status_t **status;
+ /* Active grant table. */
+ struct active_grant_entry **active;
+ /* Mapping tracking table per vcpu. */
+ struct grant_mapping **maptrack;
+ unsigned int maptrack_limit;
+ /* Lock protecting the maptrack limit */
+ spinlock_t maptrack_lock;
+ /* The defined versions are 1 and 2. Set to 0 if we don't know
+ what version to use yet. */
+ unsigned gt_version;
+};
+
+#ifndef DEFAULT_MAX_NR_GRANT_FRAMES /* to allow arch to override */
+/* Default maximum size of a grant table. [POLICY] */
+#define DEFAULT_MAX_NR_GRANT_FRAMES 32
+#endif
+
unsigned int __read_mostly max_grant_frames;
integer_param("gnttab_max_frames", max_grant_frames);
@@ -118,6 +154,18 @@ struct grant_mapping {
uint32_t pad; /* round size to a power of 2 */
};
+/* Number of grant table frames. Caller must hold d's grant table lock. */
+static inline unsigned int nr_grant_frames(const struct grant_table *gt)
+{
+ return gt->nr_grant_frames;
+}
+
+/* Number of status grant table frames. Caller must hold d's gr. table lock.*/
+static inline unsigned int nr_status_frames(const struct grant_table *gt)
+{
+ return gt->nr_status_frames;
+}
+
#define MAPTRACK_PER_PAGE (PAGE_SIZE / sizeof(struct grant_mapping))
#define maptrack_entry(t, e) \
((t)->maptrack[(e)/MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE])
@@ -197,7 +245,27 @@ static inline void act_set_gfn(struct active_grant_entry *act, gfn_t gfn)
#endif
}
-DEFINE_PERCPU_RWLOCK_GLOBAL(grant_rwlock);
+static DEFINE_PERCPU_RWLOCK_GLOBAL(grant_rwlock);
+
+static inline void grant_read_lock(struct grant_table *gt)
+{
+ percpu_read_lock(grant_rwlock, >->lock);
+}
+
+static inline void grant_read_unlock(struct grant_table *gt)
+{
+ percpu_read_unlock(grant_rwlock, >->lock);
+}
+
+static inline void grant_write_lock(struct grant_table *gt)
+{
+ percpu_write_lock(grant_rwlock, >->lock);
+}
+
+static inline void grant_write_unlock(struct grant_table *gt)
+{
+ percpu_write_unlock(grant_rwlock, >->lock);
+}
static inline void gnttab_flush_tlb(const struct domain *d)
{
@@ -250,6 +318,15 @@ static inline void active_entry_release(struct active_grant_entry *act)
spin_unlock(&act->lock);
}
+#define GRANT_STATUS_PER_PAGE (PAGE_SIZE / sizeof(grant_status_t))
+#define GRANT_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_v2_t))
+/* Number of grant table status entries. Caller must hold d's gr. table lock.*/
+static inline unsigned int grant_to_status_frames(unsigned int grant_frames)
+{
+ return (grant_frames * GRANT_PER_PAGE + GRANT_STATUS_PER_PAGE - 1) /
+ GRANT_STATUS_PER_PAGE;
+}
+
/* Check if the page has been paged out, or needs unsharing.
If rc == GNTST_okay, *page contains the page struct with a ref taken.
Caller must do put_page(*page).
@@ -1580,7 +1657,7 @@ gnttab_unpopulate_status_frames(struct domain *d, struct grant_table *gt)
* Grow the grant table. The caller must hold the grant table's
* write lock before calling this function.
*/
-int
+static int
gnttab_grow_table(struct domain *d, unsigned int req_nr_frames)
{
struct grant_table *gt = d->grant_table;
diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
index 43ec6c4d80..43b07e60c5 100644
--- a/xen/include/xen/grant_table.h
+++ b/xen/include/xen/grant_table.h
@@ -29,66 +29,9 @@
#include <asm/page.h>
#include <asm/grant_table.h>
-#ifndef DEFAULT_MAX_NR_GRANT_FRAMES /* to allow arch to override */
-/* Default maximum size of a grant table. [POLICY] */
-#define DEFAULT_MAX_NR_GRANT_FRAMES 32
-#endif
/* The maximum size of a grant table. */
extern unsigned int max_grant_frames;
-DECLARE_PERCPU_RWLOCK_GLOBAL(grant_rwlock);
-
-/* Per-domain grant information. */
-struct grant_table {
- /*
- * Lock protecting updates to grant table state (version, active
- * entry list, etc.)
- */
- percpu_rwlock_t lock;
- /* Table size. Number of frames shared with guest */
- unsigned int nr_grant_frames;
- /* Shared grant table (see include/public/grant_table.h). */
- union {
- void **shared_raw;
- struct grant_entry_v1 **shared_v1;
- union grant_entry_v2 **shared_v2;
- };
- /* Number of grant status frames shared with guest (for version 2) */
- unsigned int nr_status_frames;
- /* State grant table (see include/public/grant_table.h). */
- grant_status_t **status;
- /* Active grant table. */
- struct active_grant_entry **active;
- /* Mapping tracking table per vcpu. */
- struct grant_mapping **maptrack;
- unsigned int maptrack_limit;
- /* Lock protecting the maptrack limit */
- spinlock_t maptrack_lock;
- /* The defined versions are 1 and 2. Set to 0 if we don't know
- what version to use yet. */
- unsigned gt_version;
-};
-
-static inline void grant_read_lock(struct grant_table *gt)
-{
- percpu_read_lock(grant_rwlock, >->lock);
-}
-
-static inline void grant_read_unlock(struct grant_table *gt)
-{
- percpu_read_unlock(grant_rwlock, >->lock);
-}
-
-static inline void grant_write_lock(struct grant_table *gt)
-{
- percpu_write_lock(grant_rwlock, >->lock);
-}
-
-static inline void grant_write_unlock(struct grant_table *gt)
-{
- percpu_write_unlock(grant_rwlock, >->lock);
-}
-
/* Create/destroy per-domain grant table context. */
int grant_table_create(
struct domain *d);
@@ -106,33 +49,6 @@ void
gnttab_release_mappings(
struct domain *d);
-/* Increase the size of a domain's grant table.
- * Caller must hold d's grant table write lock.
- */
-int
-gnttab_grow_table(struct domain *d, unsigned int req_nr_frames);
-
-/* Number of grant table frames. Caller must hold d's grant table lock. */
-static inline unsigned int nr_grant_frames(struct grant_table *gt)
-{
- return gt->nr_grant_frames;
-}
-
-/* Number of status grant table frames. Caller must hold d's gr. table lock.*/
-static inline unsigned int nr_status_frames(struct grant_table *gt)
-{
- return gt->nr_status_frames;
-}
-
-#define GRANT_STATUS_PER_PAGE (PAGE_SIZE / sizeof(grant_status_t))
-#define GRANT_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_v2_t))
-/* Number of grant table status entries. Caller must hold d's gr. table lock.*/
-static inline unsigned int grant_to_status_frames(int grant_frames)
-{
- return (grant_frames * GRANT_PER_PAGE + GRANT_STATUS_PER_PAGE - 1) /
- GRANT_STATUS_PER_PAGE;
-}
-
int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref,
gfn_t *gfn, uint16_t *status);
--
2.12.3
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 23+ messages in thread* Re: [PATCH v2 2/8] xen: clean up grant_table.h
2017-09-06 8:26 ` [PATCH v2 2/8] xen: clean up grant_table.h Juergen Gross
@ 2017-09-06 8:37 ` Paul Durrant
2017-09-06 10:11 ` Wei Liu
1 sibling, 0 replies; 23+ messages in thread
From: Paul Durrant @ 2017-09-06 8:37 UTC (permalink / raw)
To: 'Juergen Gross', xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, Wei Liu, Andrew Cooper, Tim (Xen.org),
George Dunlap, jbeulich@suse.com, Ian Jackson
> -----Original Message-----
> From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of
> Juergen Gross
> Sent: 06 September 2017 09:26
> To: xen-devel@lists.xen.org
> Cc: Juergen Gross <jgross@suse.com>; sstabellini@kernel.org; Wei Liu
> <wei.liu2@citrix.com>; George Dunlap <George.Dunlap@citrix.com>;
> Andrew Cooper <Andrew.Cooper3@citrix.com>; Ian Jackson
> <Ian.Jackson@citrix.com>; Tim (Xen.org) <tim@xen.org>;
> jbeulich@suse.com
> Subject: [Xen-devel] [PATCH v2 2/8] xen: clean up grant_table.h
>
> Many definitions can be moved from xen/grant_table.h to
> common/grant_table.c now, as they are no longer used in other sources.
>
> Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
> ---
> xen/common/grant_table.c | 81
> +++++++++++++++++++++++++++++++++++++++--
> xen/include/xen/grant_table.h | 84 -------------------------------------------
> 2 files changed, 79 insertions(+), 86 deletions(-)
>
> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
> index 4c2e9e40a5..a94dfdda7b 100644
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -40,6 +40,42 @@
> #include <xsm/xsm.h>
> #include <asm/flushtlb.h>
>
> +/* Per-domain grant information. */
> +struct grant_table {
> + /*
> + * Lock protecting updates to grant table state (version, active
> + * entry list, etc.)
> + */
> + percpu_rwlock_t lock;
> + /* Table size. Number of frames shared with guest */
> + unsigned int nr_grant_frames;
> + /* Shared grant table (see include/public/grant_table.h). */
> + union {
> + void **shared_raw;
> + struct grant_entry_v1 **shared_v1;
> + union grant_entry_v2 **shared_v2;
> + };
> + /* Number of grant status frames shared with guest (for version 2) */
> + unsigned int nr_status_frames;
> + /* State grant table (see include/public/grant_table.h). */
> + grant_status_t **status;
> + /* Active grant table. */
> + struct active_grant_entry **active;
> + /* Mapping tracking table per vcpu. */
> + struct grant_mapping **maptrack;
> + unsigned int maptrack_limit;
> + /* Lock protecting the maptrack limit */
> + spinlock_t maptrack_lock;
> + /* The defined versions are 1 and 2. Set to 0 if we don't know
> + what version to use yet. */
> + unsigned gt_version;
> +};
> +
> +#ifndef DEFAULT_MAX_NR_GRANT_FRAMES /* to allow arch to override
> */
> +/* Default maximum size of a grant table. [POLICY] */
> +#define DEFAULT_MAX_NR_GRANT_FRAMES 32
> +#endif
> +
> unsigned int __read_mostly max_grant_frames;
> integer_param("gnttab_max_frames", max_grant_frames);
>
> @@ -118,6 +154,18 @@ struct grant_mapping {
> uint32_t pad; /* round size to a power of 2 */
> };
>
> +/* Number of grant table frames. Caller must hold d's grant table lock. */
> +static inline unsigned int nr_grant_frames(const struct grant_table *gt)
> +{
> + return gt->nr_grant_frames;
> +}
> +
> +/* Number of status grant table frames. Caller must hold d's gr. table lock.*/
> +static inline unsigned int nr_status_frames(const struct grant_table *gt)
> +{
> + return gt->nr_status_frames;
> +}
> +
> #define MAPTRACK_PER_PAGE (PAGE_SIZE / sizeof(struct grant_mapping))
> #define maptrack_entry(t, e) \
> ((t)->maptrack[(e)/MAPTRACK_PER_PAGE][(e)%MAPTRACK_PER_PAGE])
> @@ -197,7 +245,27 @@ static inline void act_set_gfn(struct
> active_grant_entry *act, gfn_t gfn)
> #endif
> }
>
> -DEFINE_PERCPU_RWLOCK_GLOBAL(grant_rwlock);
> +static DEFINE_PERCPU_RWLOCK_GLOBAL(grant_rwlock);
> +
> +static inline void grant_read_lock(struct grant_table *gt)
> +{
> + percpu_read_lock(grant_rwlock, >->lock);
> +}
> +
> +static inline void grant_read_unlock(struct grant_table *gt)
> +{
> + percpu_read_unlock(grant_rwlock, >->lock);
> +}
> +
> +static inline void grant_write_lock(struct grant_table *gt)
> +{
> + percpu_write_lock(grant_rwlock, >->lock);
> +}
> +
> +static inline void grant_write_unlock(struct grant_table *gt)
> +{
> + percpu_write_unlock(grant_rwlock, >->lock);
> +}
>
> static inline void gnttab_flush_tlb(const struct domain *d)
> {
> @@ -250,6 +318,15 @@ static inline void active_entry_release(struct
> active_grant_entry *act)
> spin_unlock(&act->lock);
> }
>
> +#define GRANT_STATUS_PER_PAGE (PAGE_SIZE / sizeof(grant_status_t))
> +#define GRANT_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_v2_t))
> +/* Number of grant table status entries. Caller must hold d's gr. table lock.*/
> +static inline unsigned int grant_to_status_frames(unsigned int
> grant_frames)
> +{
> + return (grant_frames * GRANT_PER_PAGE + GRANT_STATUS_PER_PAGE
> - 1) /
> + GRANT_STATUS_PER_PAGE;
> +}
> +
> /* Check if the page has been paged out, or needs unsharing.
> If rc == GNTST_okay, *page contains the page struct with a ref taken.
> Caller must do put_page(*page).
> @@ -1580,7 +1657,7 @@ gnttab_unpopulate_status_frames(struct domain
> *d, struct grant_table *gt)
> * Grow the grant table. The caller must hold the grant table's
> * write lock before calling this function.
> */
> -int
> +static int
> gnttab_grow_table(struct domain *d, unsigned int req_nr_frames)
> {
> struct grant_table *gt = d->grant_table;
> diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
> index 43ec6c4d80..43b07e60c5 100644
> --- a/xen/include/xen/grant_table.h
> +++ b/xen/include/xen/grant_table.h
> @@ -29,66 +29,9 @@
> #include <asm/page.h>
> #include <asm/grant_table.h>
>
> -#ifndef DEFAULT_MAX_NR_GRANT_FRAMES /* to allow arch to override */
> -/* Default maximum size of a grant table. [POLICY] */
> -#define DEFAULT_MAX_NR_GRANT_FRAMES 32
> -#endif
> /* The maximum size of a grant table. */
> extern unsigned int max_grant_frames;
>
> -DECLARE_PERCPU_RWLOCK_GLOBAL(grant_rwlock);
> -
> -/* Per-domain grant information. */
> -struct grant_table {
> - /*
> - * Lock protecting updates to grant table state (version, active
> - * entry list, etc.)
> - */
> - percpu_rwlock_t lock;
> - /* Table size. Number of frames shared with guest */
> - unsigned int nr_grant_frames;
> - /* Shared grant table (see include/public/grant_table.h). */
> - union {
> - void **shared_raw;
> - struct grant_entry_v1 **shared_v1;
> - union grant_entry_v2 **shared_v2;
> - };
> - /* Number of grant status frames shared with guest (for version 2) */
> - unsigned int nr_status_frames;
> - /* State grant table (see include/public/grant_table.h). */
> - grant_status_t **status;
> - /* Active grant table. */
> - struct active_grant_entry **active;
> - /* Mapping tracking table per vcpu. */
> - struct grant_mapping **maptrack;
> - unsigned int maptrack_limit;
> - /* Lock protecting the maptrack limit */
> - spinlock_t maptrack_lock;
> - /* The defined versions are 1 and 2. Set to 0 if we don't know
> - what version to use yet. */
> - unsigned gt_version;
> -};
> -
> -static inline void grant_read_lock(struct grant_table *gt)
> -{
> - percpu_read_lock(grant_rwlock, >->lock);
> -}
> -
> -static inline void grant_read_unlock(struct grant_table *gt)
> -{
> - percpu_read_unlock(grant_rwlock, >->lock);
> -}
> -
> -static inline void grant_write_lock(struct grant_table *gt)
> -{
> - percpu_write_lock(grant_rwlock, >->lock);
> -}
> -
> -static inline void grant_write_unlock(struct grant_table *gt)
> -{
> - percpu_write_unlock(grant_rwlock, >->lock);
> -}
> -
> /* Create/destroy per-domain grant table context. */
> int grant_table_create(
> struct domain *d);
> @@ -106,33 +49,6 @@ void
> gnttab_release_mappings(
> struct domain *d);
>
> -/* Increase the size of a domain's grant table.
> - * Caller must hold d's grant table write lock.
> - */
> -int
> -gnttab_grow_table(struct domain *d, unsigned int req_nr_frames);
> -
> -/* Number of grant table frames. Caller must hold d's grant table lock. */
> -static inline unsigned int nr_grant_frames(struct grant_table *gt)
> -{
> - return gt->nr_grant_frames;
> -}
> -
> -/* Number of status grant table frames. Caller must hold d's gr. table lock.*/
> -static inline unsigned int nr_status_frames(struct grant_table *gt)
> -{
> - return gt->nr_status_frames;
> -}
> -
> -#define GRANT_STATUS_PER_PAGE (PAGE_SIZE / sizeof(grant_status_t))
> -#define GRANT_PER_PAGE (PAGE_SIZE / sizeof(grant_entry_v2_t))
> -/* Number of grant table status entries. Caller must hold d's gr. table lock.*/
> -static inline unsigned int grant_to_status_frames(int grant_frames)
> -{
> - return (grant_frames * GRANT_PER_PAGE + GRANT_STATUS_PER_PAGE
> - 1) /
> - GRANT_STATUS_PER_PAGE;
> -}
> -
> int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref,
> gfn_t *gfn, uint16_t *status);
>
> --
> 2.12.3
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH v2 2/8] xen: clean up grant_table.h
2017-09-06 8:26 ` [PATCH v2 2/8] xen: clean up grant_table.h Juergen Gross
2017-09-06 8:37 ` Paul Durrant
@ 2017-09-06 10:11 ` Wei Liu
2017-09-06 10:12 ` Juergen Gross
1 sibling, 1 reply; 23+ messages in thread
From: Wei Liu @ 2017-09-06 10:11 UTC (permalink / raw)
To: Juergen Gross
Cc: tim, sstabellini, wei.liu2, George.Dunlap, andrew.cooper3,
ian.jackson, xen-devel, jbeulich
On Wed, Sep 06, 2017 at 10:26:26AM +0200, Juergen Gross wrote:
> +struct grant_table {
> + /*
> + * Lock protecting updates to grant table state (version, active
> + * entry list, etc.)
> + */
> + percpu_rwlock_t lock;
> + /* Table size. Number of frames shared with guest */
> + unsigned int nr_grant_frames;
> + /* Shared grant table (see include/public/grant_table.h). */
> + union {
> + void **shared_raw;
> + struct grant_entry_v1 **shared_v1;
> + union grant_entry_v2 **shared_v2;
> + };
> + /* Number of grant status frames shared with guest (for version 2) */
> + unsigned int nr_status_frames;
> + /* State grant table (see include/public/grant_table.h). */
> + grant_status_t **status;
> + /* Active grant table. */
> + struct active_grant_entry **active;
> + /* Mapping tracking table per vcpu. */
> + struct grant_mapping **maptrack;
> + unsigned int maptrack_limit;
> + /* Lock protecting the maptrack limit */
> + spinlock_t maptrack_lock;
> + /* The defined versions are 1 and 2. Set to 0 if we don't know
> + what version to use yet. */
Could you please fix the style of the comment?
Otherwise:
Reviewed-by: Wei Liu <wei.liu2@citrix.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH v2 2/8] xen: clean up grant_table.h
2017-09-06 10:11 ` Wei Liu
@ 2017-09-06 10:12 ` Juergen Gross
0 siblings, 0 replies; 23+ messages in thread
From: Juergen Gross @ 2017-09-06 10:12 UTC (permalink / raw)
To: Wei Liu
Cc: tim, sstabellini, George.Dunlap, andrew.cooper3, ian.jackson,
xen-devel, jbeulich
On 06/09/17 12:11, Wei Liu wrote:
> On Wed, Sep 06, 2017 at 10:26:26AM +0200, Juergen Gross wrote:
>> +struct grant_table {
>> + /*
>> + * Lock protecting updates to grant table state (version, active
>> + * entry list, etc.)
>> + */
>> + percpu_rwlock_t lock;
>> + /* Table size. Number of frames shared with guest */
>> + unsigned int nr_grant_frames;
>> + /* Shared grant table (see include/public/grant_table.h). */
>> + union {
>> + void **shared_raw;
>> + struct grant_entry_v1 **shared_v1;
>> + union grant_entry_v2 **shared_v2;
>> + };
>> + /* Number of grant status frames shared with guest (for version 2) */
>> + unsigned int nr_status_frames;
>> + /* State grant table (see include/public/grant_table.h). */
>> + grant_status_t **status;
>> + /* Active grant table. */
>> + struct active_grant_entry **active;
>> + /* Mapping tracking table per vcpu. */
>> + struct grant_mapping **maptrack;
>> + unsigned int maptrack_limit;
>> + /* Lock protecting the maptrack limit */
>> + spinlock_t maptrack_lock;
>> + /* The defined versions are 1 and 2. Set to 0 if we don't know
>> + what version to use yet. */
>
> Could you please fix the style of the comment?
Okay.
Juergen
>
> Otherwise:
> Reviewed-by: Wei Liu <wei.liu2@citrix.com>
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH v2 3/8] xen: delay allocation of grant table sub structures
2017-09-06 8:26 [PATCH v2 0/8] xen: better grant v2 support Juergen Gross
2017-09-06 8:26 ` [PATCH v2 1/8] xen: move XENMAPSPACE_grant_table code into grant_table.c Juergen Gross
2017-09-06 8:26 ` [PATCH v2 2/8] xen: clean up grant_table.h Juergen Gross
@ 2017-09-06 8:26 ` Juergen Gross
2017-09-06 8:44 ` Paul Durrant
2017-09-06 8:26 ` [PATCH v2 4/8] xen: make grant resource limits per domain Juergen Gross
` (4 subsequent siblings)
7 siblings, 1 reply; 23+ messages in thread
From: Juergen Gross @ 2017-09-06 8:26 UTC (permalink / raw)
To: xen-devel
Cc: Juergen Gross, sstabellini, wei.liu2, George.Dunlap,
andrew.cooper3, ian.jackson, tim, jbeulich
Delay the allocation of the grant table sub structures in order to
allow modifying parameters needed for sizing of these structures at a
per domain basis. Either do it from gnttab_setup_table() or just
before the domain is started the first time.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
xen/common/domain.c | 17 +++++-
xen/common/grant_table.c | 139 ++++++++++++++++++++++++------------------
xen/include/xen/grant_table.h | 2 +
3 files changed, 97 insertions(+), 61 deletions(-)
diff --git a/xen/common/domain.c b/xen/common/domain.c
index 5aebcf265f..11eb1778a3 100644
--- a/xen/common/domain.c
+++ b/xen/common/domain.c
@@ -363,6 +363,9 @@ struct domain *domain_create(domid_t domid, unsigned int domcr_flags,
goto fail;
init_status |= INIT_gnttab;
+ if ( domid == 0 && grant_table_init(d) )
+ goto fail;
+
poolid = 0;
err = -ENOMEM;
@@ -998,7 +1001,8 @@ int __domain_pause_by_systemcontroller(struct domain *d,
prev = cmpxchg(&d->controller_pause_count, old, new);
} while ( prev != old );
- pause_fn(d);
+ if ( pause_fn )
+ pause_fn(d);
return 0;
}
@@ -1006,6 +1010,7 @@ int __domain_pause_by_systemcontroller(struct domain *d,
int domain_unpause_by_systemcontroller(struct domain *d)
{
int old, new, prev = d->controller_pause_count;
+ int ret;
do
{
@@ -1029,8 +1034,16 @@ int domain_unpause_by_systemcontroller(struct domain *d)
* Creation is considered finished when the controller reference count
* first drops to 0.
*/
- if ( new == 0 )
+ if ( new == 0 && !d->creation_finished )
+ {
+ ret = grant_table_init(d);
+ if ( ret )
+ {
+ __domain_pause_by_systemcontroller(d, NULL);
+ return ret;
+ }
d->creation_finished = true;
+ }
domain_unpause(d);
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index a94dfdda7b..b50c334f70 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -1719,6 +1719,78 @@ active_alloc_failed:
return 0;
}
+int
+grant_table_init(struct domain *d)
+{
+ struct grant_table *gt = d->grant_table;
+ unsigned int i, j;
+
+ if ( gt->nr_grant_frames )
+ return 0;
+
+ gt->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
+
+ /* Active grant table. */
+ if ( (gt->active = xzalloc_array(struct active_grant_entry *,
+ max_nr_active_grant_frames)) == NULL )
+ goto no_mem_1;
+ for ( i = 0;
+ i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
+ {
+ if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
+ goto no_mem_2;
+ clear_page(gt->active[i]);
+ for ( j = 0; j < ACGNT_PER_PAGE; j++ )
+ spin_lock_init(>->active[i][j].lock);
+ }
+
+ /* Tracking of mapped foreign frames table */
+ gt->maptrack = vzalloc(max_maptrack_frames * sizeof(*gt->maptrack));
+ if ( gt->maptrack == NULL )
+ goto no_mem_2;
+
+ /* Shared grant table. */
+ if ( (gt->shared_raw = xzalloc_array(void *, max_grant_frames)) == NULL )
+ goto no_mem_3;
+ for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
+ {
+ if ( (gt->shared_raw[i] = alloc_xenheap_page()) == NULL )
+ goto no_mem_4;
+ clear_page(gt->shared_raw[i]);
+ }
+
+ /* Status pages for grant table - for version 2 */
+ gt->status = xzalloc_array(grant_status_t *,
+ grant_to_status_frames(max_grant_frames));
+ if ( gt->status == NULL )
+ goto no_mem_4;
+
+ for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
+ gnttab_create_shared_page(d, gt, i);
+
+ gt->nr_status_frames = 0;
+
+ return 0;
+
+ no_mem_4:
+ for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
+ free_xenheap_page(gt->shared_raw[i]);
+ xfree(gt->shared_raw);
+ gt->shared_raw = NULL;
+ no_mem_3:
+ vfree(gt->maptrack);
+ gt->maptrack = NULL;
+ no_mem_2:
+ for ( i = 0;
+ i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
+ free_xenheap_page(gt->active[i]);
+ xfree(gt->active);
+ gt->active = NULL;
+ no_mem_1:
+ gt->nr_grant_frames = 0;
+ return -ENOMEM;
+}
+
static long
gnttab_setup_table(
XEN_GUEST_HANDLE_PARAM(gnttab_setup_table_t) uop, unsigned int count)
@@ -1764,7 +1836,14 @@ gnttab_setup_table(
grant_write_lock(gt);
if ( gt->gt_version == 0 )
+ {
+ if ( grant_table_init(d) )
+ {
+ op.status = GNTST_general_error;
+ goto unlock;
+ }
gt->gt_version = 1;
+ }
if ( (op.nr_frames > nr_grant_frames(gt) ||
((gt->gt_version > 1) &&
@@ -3378,75 +3457,17 @@ grant_table_create(
struct domain *d)
{
struct grant_table *t;
- unsigned int i, j;
if ( (t = xzalloc(struct grant_table)) == NULL )
- goto no_mem_0;
+ return -ENOMEM;
/* Simple stuff. */
percpu_rwlock_resource_init(&t->lock, grant_rwlock);
spin_lock_init(&t->maptrack_lock);
- t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
-
- /* Active grant table. */
- if ( (t->active = xzalloc_array(struct active_grant_entry *,
- max_nr_active_grant_frames)) == NULL )
- goto no_mem_1;
- for ( i = 0;
- i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
- {
- if ( (t->active[i] = alloc_xenheap_page()) == NULL )
- goto no_mem_2;
- clear_page(t->active[i]);
- for ( j = 0; j < ACGNT_PER_PAGE; j++ )
- spin_lock_init(&t->active[i][j].lock);
- }
-
- /* Tracking of mapped foreign frames table */
- t->maptrack = vzalloc(max_maptrack_frames * sizeof(*t->maptrack));
- if ( t->maptrack == NULL )
- goto no_mem_2;
-
- /* Shared grant table. */
- if ( (t->shared_raw = xzalloc_array(void *, max_grant_frames)) == NULL )
- goto no_mem_3;
- for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
- {
- if ( (t->shared_raw[i] = alloc_xenheap_page()) == NULL )
- goto no_mem_4;
- clear_page(t->shared_raw[i]);
- }
-
- /* Status pages for grant table - for version 2 */
- t->status = xzalloc_array(grant_status_t *,
- grant_to_status_frames(max_grant_frames));
- if ( t->status == NULL )
- goto no_mem_4;
-
- for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
- gnttab_create_shared_page(d, t, i);
-
- t->nr_status_frames = 0;
/* Okay, install the structure. */
d->grant_table = t;
return 0;
-
- no_mem_4:
- for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
- free_xenheap_page(t->shared_raw[i]);
- xfree(t->shared_raw);
- no_mem_3:
- vfree(t->maptrack);
- no_mem_2:
- for ( i = 0;
- i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
- free_xenheap_page(t->active[i]);
- xfree(t->active);
- no_mem_1:
- xfree(t);
- no_mem_0:
- return -ENOMEM;
}
void
diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
index 43b07e60c5..84a8d61616 100644
--- a/xen/include/xen/grant_table.h
+++ b/xen/include/xen/grant_table.h
@@ -35,6 +35,8 @@ extern unsigned int max_grant_frames;
/* Create/destroy per-domain grant table context. */
int grant_table_create(
struct domain *d);
+int grant_table_init(
+ struct domain *d);
void grant_table_destroy(
struct domain *d);
void grant_table_init_vcpu(struct vcpu *v);
--
2.12.3
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 23+ messages in thread* Re: [PATCH v2 3/8] xen: delay allocation of grant table sub structures
2017-09-06 8:26 ` [PATCH v2 3/8] xen: delay allocation of grant table sub structures Juergen Gross
@ 2017-09-06 8:44 ` Paul Durrant
2017-09-06 9:42 ` Juergen Gross
0 siblings, 1 reply; 23+ messages in thread
From: Paul Durrant @ 2017-09-06 8:44 UTC (permalink / raw)
To: 'Juergen Gross', xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, Wei Liu, Andrew Cooper, Tim (Xen.org),
George Dunlap, jbeulich@suse.com, Ian Jackson
> -----Original Message-----
> From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of
> Juergen Gross
> Sent: 06 September 2017 09:26
> To: xen-devel@lists.xen.org
> Cc: Juergen Gross <jgross@suse.com>; sstabellini@kernel.org; Wei Liu
> <wei.liu2@citrix.com>; George Dunlap <George.Dunlap@citrix.com>;
> Andrew Cooper <Andrew.Cooper3@citrix.com>; Ian Jackson
> <Ian.Jackson@citrix.com>; Tim (Xen.org) <tim@xen.org>;
> jbeulich@suse.com
> Subject: [Xen-devel] [PATCH v2 3/8] xen: delay allocation of grant table sub
> structures
>
> Delay the allocation of the grant table sub structures in order to
> allow modifying parameters needed for sizing of these structures at a
> per domain basis. Either do it from gnttab_setup_table() or just
> before the domain is started the first time.
Why does it need to be in two places? Could it not just be done on the first call to gnttab_grow_table()?
(My new direct mapping series drops the call to gnttab_setup_table() if the new API is available).
Paul
>
> Signed-off-by: Juergen Gross <jgross@suse.com>
> ---
> xen/common/domain.c | 17 +++++-
> xen/common/grant_table.c | 139 ++++++++++++++++++++++++----------
> --------
> xen/include/xen/grant_table.h | 2 +
> 3 files changed, 97 insertions(+), 61 deletions(-)
>
> diff --git a/xen/common/domain.c b/xen/common/domain.c
> index 5aebcf265f..11eb1778a3 100644
> --- a/xen/common/domain.c
> +++ b/xen/common/domain.c
> @@ -363,6 +363,9 @@ struct domain *domain_create(domid_t domid,
> unsigned int domcr_flags,
> goto fail;
> init_status |= INIT_gnttab;
>
> + if ( domid == 0 && grant_table_init(d) )
> + goto fail;
> +
> poolid = 0;
>
> err = -ENOMEM;
> @@ -998,7 +1001,8 @@ int __domain_pause_by_systemcontroller(struct
> domain *d,
> prev = cmpxchg(&d->controller_pause_count, old, new);
> } while ( prev != old );
>
> - pause_fn(d);
> + if ( pause_fn )
> + pause_fn(d);
>
> return 0;
> }
> @@ -1006,6 +1010,7 @@ int __domain_pause_by_systemcontroller(struct
> domain *d,
> int domain_unpause_by_systemcontroller(struct domain *d)
> {
> int old, new, prev = d->controller_pause_count;
> + int ret;
>
> do
> {
> @@ -1029,8 +1034,16 @@ int domain_unpause_by_systemcontroller(struct
> domain *d)
> * Creation is considered finished when the controller reference count
> * first drops to 0.
> */
> - if ( new == 0 )
> + if ( new == 0 && !d->creation_finished )
> + {
> + ret = grant_table_init(d);
> + if ( ret )
> + {
> + __domain_pause_by_systemcontroller(d, NULL);
> + return ret;
> + }
> d->creation_finished = true;
> + }
>
> domain_unpause(d);
>
> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
> index a94dfdda7b..b50c334f70 100644
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -1719,6 +1719,78 @@ active_alloc_failed:
> return 0;
> }
>
> +int
> +grant_table_init(struct domain *d)
> +{
> + struct grant_table *gt = d->grant_table;
> + unsigned int i, j;
> +
> + if ( gt->nr_grant_frames )
> + return 0;
> +
> + gt->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
> +
> + /* Active grant table. */
> + if ( (gt->active = xzalloc_array(struct active_grant_entry *,
> + max_nr_active_grant_frames)) == NULL )
> + goto no_mem_1;
> + for ( i = 0;
> + i <
> num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
> + {
> + if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
> + goto no_mem_2;
> + clear_page(gt->active[i]);
> + for ( j = 0; j < ACGNT_PER_PAGE; j++ )
> + spin_lock_init(>->active[i][j].lock);
> + }
> +
> + /* Tracking of mapped foreign frames table */
> + gt->maptrack = vzalloc(max_maptrack_frames * sizeof(*gt->maptrack));
> + if ( gt->maptrack == NULL )
> + goto no_mem_2;
> +
> + /* Shared grant table. */
> + if ( (gt->shared_raw = xzalloc_array(void *, max_grant_frames)) == NULL
> )
> + goto no_mem_3;
> + for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
> + {
> + if ( (gt->shared_raw[i] = alloc_xenheap_page()) == NULL )
> + goto no_mem_4;
> + clear_page(gt->shared_raw[i]);
> + }
> +
> + /* Status pages for grant table - for version 2 */
> + gt->status = xzalloc_array(grant_status_t *,
> + grant_to_status_frames(max_grant_frames));
> + if ( gt->status == NULL )
> + goto no_mem_4;
> +
> + for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
> + gnttab_create_shared_page(d, gt, i);
> +
> + gt->nr_status_frames = 0;
> +
> + return 0;
> +
> + no_mem_4:
> + for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
> + free_xenheap_page(gt->shared_raw[i]);
> + xfree(gt->shared_raw);
> + gt->shared_raw = NULL;
> + no_mem_3:
> + vfree(gt->maptrack);
> + gt->maptrack = NULL;
> + no_mem_2:
> + for ( i = 0;
> + i <
> num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
> + free_xenheap_page(gt->active[i]);
> + xfree(gt->active);
> + gt->active = NULL;
> + no_mem_1:
> + gt->nr_grant_frames = 0;
> + return -ENOMEM;
> +}
> +
> static long
> gnttab_setup_table(
> XEN_GUEST_HANDLE_PARAM(gnttab_setup_table_t) uop, unsigned int
> count)
> @@ -1764,7 +1836,14 @@ gnttab_setup_table(
> grant_write_lock(gt);
>
> if ( gt->gt_version == 0 )
> + {
> + if ( grant_table_init(d) )
> + {
> + op.status = GNTST_general_error;
> + goto unlock;
> + }
> gt->gt_version = 1;
> + }
>
> if ( (op.nr_frames > nr_grant_frames(gt) ||
> ((gt->gt_version > 1) &&
> @@ -3378,75 +3457,17 @@ grant_table_create(
> struct domain *d)
> {
> struct grant_table *t;
> - unsigned int i, j;
>
> if ( (t = xzalloc(struct grant_table)) == NULL )
> - goto no_mem_0;
> + return -ENOMEM;
>
> /* Simple stuff. */
> percpu_rwlock_resource_init(&t->lock, grant_rwlock);
> spin_lock_init(&t->maptrack_lock);
> - t->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
> -
> - /* Active grant table. */
> - if ( (t->active = xzalloc_array(struct active_grant_entry *,
> - max_nr_active_grant_frames)) == NULL )
> - goto no_mem_1;
> - for ( i = 0;
> - i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES);
> i++ )
> - {
> - if ( (t->active[i] = alloc_xenheap_page()) == NULL )
> - goto no_mem_2;
> - clear_page(t->active[i]);
> - for ( j = 0; j < ACGNT_PER_PAGE; j++ )
> - spin_lock_init(&t->active[i][j].lock);
> - }
> -
> - /* Tracking of mapped foreign frames table */
> - t->maptrack = vzalloc(max_maptrack_frames * sizeof(*t->maptrack));
> - if ( t->maptrack == NULL )
> - goto no_mem_2;
> -
> - /* Shared grant table. */
> - if ( (t->shared_raw = xzalloc_array(void *, max_grant_frames)) == NULL )
> - goto no_mem_3;
> - for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
> - {
> - if ( (t->shared_raw[i] = alloc_xenheap_page()) == NULL )
> - goto no_mem_4;
> - clear_page(t->shared_raw[i]);
> - }
> -
> - /* Status pages for grant table - for version 2 */
> - t->status = xzalloc_array(grant_status_t *,
> - grant_to_status_frames(max_grant_frames));
> - if ( t->status == NULL )
> - goto no_mem_4;
> -
> - for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
> - gnttab_create_shared_page(d, t, i);
> -
> - t->nr_status_frames = 0;
>
> /* Okay, install the structure. */
> d->grant_table = t;
> return 0;
> -
> - no_mem_4:
> - for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
> - free_xenheap_page(t->shared_raw[i]);
> - xfree(t->shared_raw);
> - no_mem_3:
> - vfree(t->maptrack);
> - no_mem_2:
> - for ( i = 0;
> - i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES);
> i++ )
> - free_xenheap_page(t->active[i]);
> - xfree(t->active);
> - no_mem_1:
> - xfree(t);
> - no_mem_0:
> - return -ENOMEM;
> }
>
> void
> diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
> index 43b07e60c5..84a8d61616 100644
> --- a/xen/include/xen/grant_table.h
> +++ b/xen/include/xen/grant_table.h
> @@ -35,6 +35,8 @@ extern unsigned int max_grant_frames;
> /* Create/destroy per-domain grant table context. */
> int grant_table_create(
> struct domain *d);
> +int grant_table_init(
> + struct domain *d);
> void grant_table_destroy(
> struct domain *d);
> void grant_table_init_vcpu(struct vcpu *v);
> --
> 2.12.3
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH v2 3/8] xen: delay allocation of grant table sub structures
2017-09-06 8:44 ` Paul Durrant
@ 2017-09-06 9:42 ` Juergen Gross
2017-09-06 9:43 ` Paul Durrant
0 siblings, 1 reply; 23+ messages in thread
From: Juergen Gross @ 2017-09-06 9:42 UTC (permalink / raw)
To: Paul Durrant, xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, Wei Liu, Andrew Cooper, Tim (Xen.org),
George Dunlap, jbeulich@suse.com, Ian Jackson
On 06/09/17 10:44, Paul Durrant wrote:
>> -----Original Message-----
>> From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of
>> Juergen Gross
>> Sent: 06 September 2017 09:26
>> To: xen-devel@lists.xen.org
>> Cc: Juergen Gross <jgross@suse.com>; sstabellini@kernel.org; Wei Liu
>> <wei.liu2@citrix.com>; George Dunlap <George.Dunlap@citrix.com>;
>> Andrew Cooper <Andrew.Cooper3@citrix.com>; Ian Jackson
>> <Ian.Jackson@citrix.com>; Tim (Xen.org) <tim@xen.org>;
>> jbeulich@suse.com
>> Subject: [Xen-devel] [PATCH v2 3/8] xen: delay allocation of grant table sub
>> structures
>>
>> Delay the allocation of the grant table sub structures in order to
>> allow modifying parameters needed for sizing of these structures at a
>> per domain basis. Either do it from gnttab_setup_table() or just
>> before the domain is started the first time.
>
> Why does it need to be in two places? Could it not just be done on the first call to gnttab_grow_table()?
Could be difficult for e.g. a xenstore domain: it might make use of
the maptrack frames before that, so it would explode then.
It should be possible, however, to move the call to grant_table_init()
from gnttab_setup_table() to gnttab_grow_table().
Juergen
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 23+ messages in thread
* Re: [PATCH v2 3/8] xen: delay allocation of grant table sub structures
2017-09-06 9:42 ` Juergen Gross
@ 2017-09-06 9:43 ` Paul Durrant
0 siblings, 0 replies; 23+ messages in thread
From: Paul Durrant @ 2017-09-06 9:43 UTC (permalink / raw)
To: 'Juergen Gross', xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, Wei Liu, Andrew Cooper, Tim (Xen.org),
George Dunlap, jbeulich@suse.com, Ian Jackson
> -----Original Message-----
> From: Juergen Gross [mailto:jgross@suse.com]
> Sent: 06 September 2017 10:42
> To: Paul Durrant <Paul.Durrant@citrix.com>; xen-devel@lists.xen.org
> Cc: sstabellini@kernel.org; Wei Liu <wei.liu2@citrix.com>; George Dunlap
> <George.Dunlap@citrix.com>; Andrew Cooper
> <Andrew.Cooper3@citrix.com>; Ian Jackson <Ian.Jackson@citrix.com>; Tim
> (Xen.org) <tim@xen.org>; jbeulich@suse.com
> Subject: Re: [Xen-devel] [PATCH v2 3/8] xen: delay allocation of grant table
> sub structures
>
> On 06/09/17 10:44, Paul Durrant wrote:
> >> -----Original Message-----
> >> From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of
> >> Juergen Gross
> >> Sent: 06 September 2017 09:26
> >> To: xen-devel@lists.xen.org
> >> Cc: Juergen Gross <jgross@suse.com>; sstabellini@kernel.org; Wei Liu
> >> <wei.liu2@citrix.com>; George Dunlap <George.Dunlap@citrix.com>;
> >> Andrew Cooper <Andrew.Cooper3@citrix.com>; Ian Jackson
> >> <Ian.Jackson@citrix.com>; Tim (Xen.org) <tim@xen.org>;
> >> jbeulich@suse.com
> >> Subject: [Xen-devel] [PATCH v2 3/8] xen: delay allocation of grant table
> sub
> >> structures
> >>
> >> Delay the allocation of the grant table sub structures in order to
> >> allow modifying parameters needed for sizing of these structures at a
> >> per domain basis. Either do it from gnttab_setup_table() or just
> >> before the domain is started the first time.
> >
> > Why does it need to be in two places? Could it not just be done on the first
> call to gnttab_grow_table()?
>
> Could be difficult for e.g. a xenstore domain: it might make use of
> the maptrack frames before that, so it would explode then.
>
> It should be possible, however, to move the call to grant_table_init()
> from gnttab_setup_table() to gnttab_grow_table().
>
Ok. Thanks,
Paul
>
> Juergen
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH v2 4/8] xen: make grant resource limits per domain
2017-09-06 8:26 [PATCH v2 0/8] xen: better grant v2 support Juergen Gross
` (2 preceding siblings ...)
2017-09-06 8:26 ` [PATCH v2 3/8] xen: delay allocation of grant table sub structures Juergen Gross
@ 2017-09-06 8:26 ` Juergen Gross
2017-09-06 9:10 ` Paul Durrant
2017-09-06 8:26 ` [PATCH v2 5/8] xen: double default grant frame limit for huge hosts Juergen Gross
` (3 subsequent siblings)
7 siblings, 1 reply; 23+ messages in thread
From: Juergen Gross @ 2017-09-06 8:26 UTC (permalink / raw)
To: xen-devel
Cc: Juergen Gross, sstabellini, wei.liu2, George.Dunlap,
andrew.cooper3, ian.jackson, tim, jbeulich
Instead of using the same global resource limits of grant tables (max.
number of grant frames, max. number of maptrack frames) for all domains
make these limits per domain. This will allow setting individual limits
in the future. For now initialize the per domain limits with the global
values.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
xen/common/grant_table.c | 83 +++++++++++++++++++++++++++---------------------
1 file changed, 46 insertions(+), 37 deletions(-)
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index b50c334f70..ae5f1262e8 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -69,6 +69,9 @@ struct grant_table {
/* The defined versions are 1 and 2. Set to 0 if we don't know
what version to use yet. */
unsigned gt_version;
+ /* Resource limits of the domain. */
+ unsigned int max_grant_frames;
+ unsigned int max_maptrack_frames;
};
#ifndef DEFAULT_MAX_NR_GRANT_FRAMES /* to allow arch to override */
@@ -285,8 +288,8 @@ num_act_frames_from_sha_frames(const unsigned int num)
return DIV_ROUND_UP(num * sha_per_page, ACGNT_PER_PAGE);
}
-#define max_nr_active_grant_frames \
- num_act_frames_from_sha_frames(max_grant_frames)
+#define max_nr_active_grant_frames(gt) \
+ num_act_frames_from_sha_frames(gt->max_grant_frames)
static inline unsigned int
nr_active_grant_frames(struct grant_table *gt)
@@ -524,7 +527,7 @@ get_maptrack_handle(
* out of memory, try stealing an entry from another VCPU (in case the
* guest isn't mapping across its VCPUs evenly).
*/
- if ( nr_maptrack_frames(lgt) < max_maptrack_frames )
+ if ( nr_maptrack_frames(lgt) < lgt->max_maptrack_frames )
new_mt = alloc_xenheap_page();
if ( !new_mt )
@@ -1663,7 +1666,7 @@ gnttab_grow_table(struct domain *d, unsigned int req_nr_frames)
struct grant_table *gt = d->grant_table;
unsigned int i, j;
- ASSERT(req_nr_frames <= max_grant_frames);
+ ASSERT(req_nr_frames <= gt->max_grant_frames);
gdprintk(XENLOG_INFO,
"Expanding dom (%d) grant table from (%d) to (%d) frames.\n",
@@ -1728,14 +1731,15 @@ grant_table_init(struct domain *d)
if ( gt->nr_grant_frames )
return 0;
- gt->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
+ gt->nr_grant_frames = min_t(unsigned int, INITIAL_NR_GRANT_FRAMES,
+ gt->max_grant_frames);
/* Active grant table. */
if ( (gt->active = xzalloc_array(struct active_grant_entry *,
- max_nr_active_grant_frames)) == NULL )
+ max_nr_active_grant_frames(gt))) == NULL )
goto no_mem_1;
for ( i = 0;
- i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
+ i < num_act_frames_from_sha_frames(gt->nr_grant_frames); i++ )
{
if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
goto no_mem_2;
@@ -1745,14 +1749,14 @@ grant_table_init(struct domain *d)
}
/* Tracking of mapped foreign frames table */
- gt->maptrack = vzalloc(max_maptrack_frames * sizeof(*gt->maptrack));
+ gt->maptrack = vzalloc(gt->max_maptrack_frames * sizeof(*gt->maptrack));
if ( gt->maptrack == NULL )
goto no_mem_2;
/* Shared grant table. */
- if ( (gt->shared_raw = xzalloc_array(void *, max_grant_frames)) == NULL )
+ if ( (gt->shared_raw = xzalloc_array(void *, gt->max_grant_frames)) == NULL )
goto no_mem_3;
- for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
+ for ( i = 0; i < gt->nr_grant_frames; i++ )
{
if ( (gt->shared_raw[i] = alloc_xenheap_page()) == NULL )
goto no_mem_4;
@@ -1761,11 +1765,11 @@ grant_table_init(struct domain *d)
/* Status pages for grant table - for version 2 */
gt->status = xzalloc_array(grant_status_t *,
- grant_to_status_frames(max_grant_frames));
+ grant_to_status_frames(gt->max_grant_frames));
if ( gt->status == NULL )
goto no_mem_4;
- for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
+ for ( i = 0; i < gt->nr_grant_frames; i++ )
gnttab_create_shared_page(d, gt, i);
gt->nr_status_frames = 0;
@@ -1773,7 +1777,7 @@ grant_table_init(struct domain *d)
return 0;
no_mem_4:
- for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
+ for ( i = 0; i < gt->nr_grant_frames; i++ )
free_xenheap_page(gt->shared_raw[i]);
xfree(gt->shared_raw);
gt->shared_raw = NULL;
@@ -1782,7 +1786,7 @@ grant_table_init(struct domain *d)
gt->maptrack = NULL;
no_mem_2:
for ( i = 0;
- i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES); i++ )
+ i < num_act_frames_from_sha_frames(gt->nr_grant_frames); i++ )
free_xenheap_page(gt->active[i]);
xfree(gt->active);
gt->active = NULL;
@@ -1807,15 +1811,6 @@ gnttab_setup_table(
if ( unlikely(copy_from_guest(&op, uop, 1)) )
return -EFAULT;
- if ( unlikely(op.nr_frames > max_grant_frames) )
- {
- gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table frames"
- " per domain.\n",
- max_grant_frames);
- op.status = GNTST_general_error;
- goto out;
- }
-
if ( !guest_handle_okay(op.frame_list, op.nr_frames) )
return -EFAULT;
@@ -1835,6 +1830,15 @@ gnttab_setup_table(
gt = d->grant_table;
grant_write_lock(gt);
+ if ( unlikely(op.nr_frames > gt->max_grant_frames) )
+ {
+ gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table frames"
+ " per domain.\n",
+ gt->max_grant_frames);
+ op.status = GNTST_general_error;
+ goto unlock;
+ }
+
if ( gt->gt_version == 0 )
{
if ( grant_table_init(d) )
@@ -1852,7 +1856,7 @@ gnttab_setup_table(
{
gdprintk(XENLOG_INFO,
"Expand grant table to %u failed. Current: %u Max: %u\n",
- op.nr_frames, nr_grant_frames(gt), max_grant_frames);
+ op.nr_frames, nr_grant_frames(gt), gt->max_grant_frames);
op.status = GNTST_general_error;
goto unlock;
}
@@ -1887,6 +1891,7 @@ gnttab_query_size(
{
struct gnttab_query_size op;
struct domain *d;
+ struct grant_table *gt;
if ( count != 1 )
return -EINVAL;
@@ -1907,13 +1912,15 @@ gnttab_query_size(
goto out;
}
- grant_read_lock(d->grant_table);
+ gt = d->grant_table;
- op.nr_frames = nr_grant_frames(d->grant_table);
- op.max_nr_frames = max_grant_frames;
+ grant_read_lock(gt);
+
+ op.nr_frames = nr_grant_frames(gt);
+ op.max_nr_frames = gt->max_grant_frames;
op.status = GNTST_okay;
- grant_read_unlock(d->grant_table);
+ grant_read_unlock(gt);
out:
if ( d )
@@ -3464,6 +3471,8 @@ grant_table_create(
/* Simple stuff. */
percpu_rwlock_resource_init(&t->lock, grant_rwlock);
spin_lock_init(&t->maptrack_lock);
+ t->max_grant_frames = max_grant_frames;
+ t->max_maptrack_frames = max_maptrack_frames;
/* Okay, install the structure. */
d->grant_table = t;
@@ -3727,7 +3736,7 @@ int gnttab_map_frame(struct domain *d, unsigned long idx, gfn_t gfn,
}
else
{
- if ( (idx >= nr_grant_frames(gt)) && (idx < max_grant_frames) )
+ if ( (idx >= nr_grant_frames(gt)) && (idx < gt->max_grant_frames) )
gnttab_grow_table(d, idx + 1);
if ( idx < nr_grant_frames(gt) )
@@ -3754,6 +3763,12 @@ static void gnttab_usage_print(struct domain *rd)
grant_read_lock(gt);
+ printk("grant-table for remote domain:%5d (v%d)\n"
+ " %d frames (%d max), %d maptrack frames (%d max)\n",
+ rd->domain_id, gt->gt_version,
+ nr_grant_frames(gt), gt->max_grant_frames,
+ nr_maptrack_frames(gt), gt->max_maptrack_frames);
+
for ( ref = 0; ref != nr_grant_entries(gt); ref++ )
{
struct active_grant_entry *act;
@@ -3781,12 +3796,7 @@ static void gnttab_usage_print(struct domain *rd)
status = status_entry(gt, ref);
}
- if ( first )
- {
- printk("grant-table for remote domain:%5d (v%d)\n",
- rd->domain_id, gt->gt_version);
- first = 0;
- }
+ first = 0;
/* [0xXXX] ddddd 0xXXXXXX 0xXXXXXXXX ddddd 0xXXXXXX 0xXX */
printk("[0x%03x] %5d 0x%06lx 0x%08x %5d 0x%06"PRIx64" 0x%02x\n",
@@ -3798,8 +3808,7 @@ static void gnttab_usage_print(struct domain *rd)
grant_read_unlock(gt);
if ( first )
- printk("grant-table for remote domain:%5d ... "
- "no active grant table entries\n", rd->domain_id);
+ printk("no active grant table entries\n");
}
static void gnttab_usage_print_all(unsigned char key)
--
2.12.3
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 23+ messages in thread* Re: [PATCH v2 4/8] xen: make grant resource limits per domain
2017-09-06 8:26 ` [PATCH v2 4/8] xen: make grant resource limits per domain Juergen Gross
@ 2017-09-06 9:10 ` Paul Durrant
2017-09-06 9:42 ` Juergen Gross
0 siblings, 1 reply; 23+ messages in thread
From: Paul Durrant @ 2017-09-06 9:10 UTC (permalink / raw)
To: 'Juergen Gross', xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, Wei Liu, Andrew Cooper, Tim (Xen.org),
George Dunlap, jbeulich@suse.com, Ian Jackson
> -----Original Message-----
> From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of
> Juergen Gross
> Sent: 06 September 2017 09:26
> To: xen-devel@lists.xen.org
> Cc: Juergen Gross <jgross@suse.com>; sstabellini@kernel.org; Wei Liu
> <wei.liu2@citrix.com>; George Dunlap <George.Dunlap@citrix.com>;
> Andrew Cooper <Andrew.Cooper3@citrix.com>; Ian Jackson
> <Ian.Jackson@citrix.com>; Tim (Xen.org) <tim@xen.org>;
> jbeulich@suse.com
> Subject: [Xen-devel] [PATCH v2 4/8] xen: make grant resource limits per
> domain
>
> Instead of using the same global resource limits of grant tables (max.
> number of grant frames, max. number of maptrack frames) for all domains
> make these limits per domain. This will allow setting individual limits
> in the future. For now initialize the per domain limits with the global
> values.
>
> Signed-off-by: Juergen Gross <jgross@suse.com>
> ---
> xen/common/grant_table.c | 83 +++++++++++++++++++++++++++----------
> -----------
> 1 file changed, 46 insertions(+), 37 deletions(-)
>
> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
> index b50c334f70..ae5f1262e8 100644
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -69,6 +69,9 @@ struct grant_table {
> /* The defined versions are 1 and 2. Set to 0 if we don't know
> what version to use yet. */
> unsigned gt_version;
> + /* Resource limits of the domain. */
> + unsigned int max_grant_frames;
> + unsigned int max_maptrack_frames;
> };
>
> #ifndef DEFAULT_MAX_NR_GRANT_FRAMES /* to allow arch to override */
> @@ -285,8 +288,8 @@ num_act_frames_from_sha_frames(const unsigned
> int num)
> return DIV_ROUND_UP(num * sha_per_page, ACGNT_PER_PAGE);
> }
>
> -#define max_nr_active_grant_frames \
> - num_act_frames_from_sha_frames(max_grant_frames)
> +#define max_nr_active_grant_frames(gt) \
> + num_act_frames_from_sha_frames(gt->max_grant_frames)
>
> static inline unsigned int
> nr_active_grant_frames(struct grant_table *gt)
> @@ -524,7 +527,7 @@ get_maptrack_handle(
> * out of memory, try stealing an entry from another VCPU (in case the
> * guest isn't mapping across its VCPUs evenly).
> */
> - if ( nr_maptrack_frames(lgt) < max_maptrack_frames )
> + if ( nr_maptrack_frames(lgt) < lgt->max_maptrack_frames )
> new_mt = alloc_xenheap_page();
>
> if ( !new_mt )
> @@ -1663,7 +1666,7 @@ gnttab_grow_table(struct domain *d, unsigned int
> req_nr_frames)
> struct grant_table *gt = d->grant_table;
> unsigned int i, j;
>
> - ASSERT(req_nr_frames <= max_grant_frames);
> + ASSERT(req_nr_frames <= gt->max_grant_frames);
>
> gdprintk(XENLOG_INFO,
> "Expanding dom (%d) grant table from (%d) to (%d) frames.\n",
> @@ -1728,14 +1731,15 @@ grant_table_init(struct domain *d)
> if ( gt->nr_grant_frames )
> return 0;
>
> - gt->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
> + gt->nr_grant_frames = min_t(unsigned int,
> INITIAL_NR_GRANT_FRAMES,
> + gt->max_grant_frames);
>
> /* Active grant table. */
> if ( (gt->active = xzalloc_array(struct active_grant_entry *,
> - max_nr_active_grant_frames)) == NULL )
> + max_nr_active_grant_frames(gt))) == NULL )
> goto no_mem_1;
> for ( i = 0;
> - i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES);
> i++ )
> + i < num_act_frames_from_sha_frames(gt->nr_grant_frames); i++ )
> {
> if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
> goto no_mem_2;
> @@ -1745,14 +1749,14 @@ grant_table_init(struct domain *d)
> }
>
> /* Tracking of mapped foreign frames table */
> - gt->maptrack = vzalloc(max_maptrack_frames * sizeof(*gt->maptrack));
> + gt->maptrack = vzalloc(gt->max_maptrack_frames * sizeof(*gt-
> >maptrack));
> if ( gt->maptrack == NULL )
> goto no_mem_2;
>
> /* Shared grant table. */
> - if ( (gt->shared_raw = xzalloc_array(void *, max_grant_frames)) == NULL )
> + if ( (gt->shared_raw = xzalloc_array(void *, gt->max_grant_frames)) ==
> NULL )
> goto no_mem_3;
> - for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
> + for ( i = 0; i < gt->nr_grant_frames; i++ )
> {
> if ( (gt->shared_raw[i] = alloc_xenheap_page()) == NULL )
> goto no_mem_4;
> @@ -1761,11 +1765,11 @@ grant_table_init(struct domain *d)
>
> /* Status pages for grant table - for version 2 */
> gt->status = xzalloc_array(grant_status_t *,
> - grant_to_status_frames(max_grant_frames));
> + grant_to_status_frames(gt->max_grant_frames));
> if ( gt->status == NULL )
> goto no_mem_4;
>
> - for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
> + for ( i = 0; i < gt->nr_grant_frames; i++ )
> gnttab_create_shared_page(d, gt, i);
>
> gt->nr_status_frames = 0;
> @@ -1773,7 +1777,7 @@ grant_table_init(struct domain *d)
> return 0;
>
> no_mem_4:
> - for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
> + for ( i = 0; i < gt->nr_grant_frames; i++ )
> free_xenheap_page(gt->shared_raw[i]);
> xfree(gt->shared_raw);
> gt->shared_raw = NULL;
> @@ -1782,7 +1786,7 @@ grant_table_init(struct domain *d)
> gt->maptrack = NULL;
> no_mem_2:
> for ( i = 0;
> - i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES);
> i++ )
> + i < num_act_frames_from_sha_frames(gt->nr_grant_frames); i++ )
> free_xenheap_page(gt->active[i]);
> xfree(gt->active);
> gt->active = NULL;
> @@ -1807,15 +1811,6 @@ gnttab_setup_table(
> if ( unlikely(copy_from_guest(&op, uop, 1)) )
> return -EFAULT;
>
> - if ( unlikely(op.nr_frames > max_grant_frames) )
> - {
> - gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table
> frames"
> - " per domain.\n",
> - max_grant_frames);
> - op.status = GNTST_general_error;
> - goto out;
> - }
> -
> if ( !guest_handle_okay(op.frame_list, op.nr_frames) )
> return -EFAULT;
>
> @@ -1835,6 +1830,15 @@ gnttab_setup_table(
> gt = d->grant_table;
> grant_write_lock(gt);
>
> + if ( unlikely(op.nr_frames > gt->max_grant_frames) )
> + {
> + gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table
> frames"
> + " per domain.\n",
> + gt->max_grant_frames);
The message should probably now say that the domain has only been provisioned with X grant frames, rather than implying that the limit is still global as it does.
Otherwise LGTM so with that tweak...
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
> + op.status = GNTST_general_error;
> + goto unlock;
> + }
> +
> if ( gt->gt_version == 0 )
> {
> if ( grant_table_init(d) )
> @@ -1852,7 +1856,7 @@ gnttab_setup_table(
> {
> gdprintk(XENLOG_INFO,
> "Expand grant table to %u failed. Current: %u Max: %u\n",
> - op.nr_frames, nr_grant_frames(gt), max_grant_frames);
> + op.nr_frames, nr_grant_frames(gt), gt->max_grant_frames);
> op.status = GNTST_general_error;
> goto unlock;
> }
> @@ -1887,6 +1891,7 @@ gnttab_query_size(
> {
> struct gnttab_query_size op;
> struct domain *d;
> + struct grant_table *gt;
>
> if ( count != 1 )
> return -EINVAL;
> @@ -1907,13 +1912,15 @@ gnttab_query_size(
> goto out;
> }
>
> - grant_read_lock(d->grant_table);
> + gt = d->grant_table;
>
> - op.nr_frames = nr_grant_frames(d->grant_table);
> - op.max_nr_frames = max_grant_frames;
> + grant_read_lock(gt);
> +
> + op.nr_frames = nr_grant_frames(gt);
> + op.max_nr_frames = gt->max_grant_frames;
> op.status = GNTST_okay;
>
> - grant_read_unlock(d->grant_table);
> + grant_read_unlock(gt);
>
> out:
> if ( d )
> @@ -3464,6 +3471,8 @@ grant_table_create(
> /* Simple stuff. */
> percpu_rwlock_resource_init(&t->lock, grant_rwlock);
> spin_lock_init(&t->maptrack_lock);
> + t->max_grant_frames = max_grant_frames;
> + t->max_maptrack_frames = max_maptrack_frames;
>
> /* Okay, install the structure. */
> d->grant_table = t;
> @@ -3727,7 +3736,7 @@ int gnttab_map_frame(struct domain *d, unsigned
> long idx, gfn_t gfn,
> }
> else
> {
> - if ( (idx >= nr_grant_frames(gt)) && (idx < max_grant_frames) )
> + if ( (idx >= nr_grant_frames(gt)) && (idx < gt->max_grant_frames) )
> gnttab_grow_table(d, idx + 1);
>
> if ( idx < nr_grant_frames(gt) )
> @@ -3754,6 +3763,12 @@ static void gnttab_usage_print(struct domain *rd)
>
> grant_read_lock(gt);
>
> + printk("grant-table for remote domain:%5d (v%d)\n"
> + " %d frames (%d max), %d maptrack frames (%d max)\n",
> + rd->domain_id, gt->gt_version,
> + nr_grant_frames(gt), gt->max_grant_frames,
> + nr_maptrack_frames(gt), gt->max_maptrack_frames);
> +
> for ( ref = 0; ref != nr_grant_entries(gt); ref++ )
> {
> struct active_grant_entry *act;
> @@ -3781,12 +3796,7 @@ static void gnttab_usage_print(struct domain *rd)
> status = status_entry(gt, ref);
> }
>
> - if ( first )
> - {
> - printk("grant-table for remote domain:%5d (v%d)\n",
> - rd->domain_id, gt->gt_version);
> - first = 0;
> - }
> + first = 0;
>
> /* [0xXXX] ddddd 0xXXXXXX 0xXXXXXXXX ddddd 0xXXXXXX 0xXX
> */
> printk("[0x%03x] %5d 0x%06lx 0x%08x %5d 0x%06"PRIx64"
> 0x%02x\n",
> @@ -3798,8 +3808,7 @@ static void gnttab_usage_print(struct domain *rd)
> grant_read_unlock(gt);
>
> if ( first )
> - printk("grant-table for remote domain:%5d ... "
> - "no active grant table entries\n", rd->domain_id);
> + printk("no active grant table entries\n");
> }
>
> static void gnttab_usage_print_all(unsigned char key)
> --
> 2.12.3
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH v2 4/8] xen: make grant resource limits per domain
2017-09-06 9:10 ` Paul Durrant
@ 2017-09-06 9:42 ` Juergen Gross
0 siblings, 0 replies; 23+ messages in thread
From: Juergen Gross @ 2017-09-06 9:42 UTC (permalink / raw)
To: Paul Durrant, xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, Wei Liu, Andrew Cooper, Tim (Xen.org),
George Dunlap, jbeulich@suse.com, Ian Jackson
On 06/09/17 11:10, Paul Durrant wrote:
>> -----Original Message-----
>> From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of
>> Juergen Gross
>> Sent: 06 September 2017 09:26
>> To: xen-devel@lists.xen.org
>> Cc: Juergen Gross <jgross@suse.com>; sstabellini@kernel.org; Wei Liu
>> <wei.liu2@citrix.com>; George Dunlap <George.Dunlap@citrix.com>;
>> Andrew Cooper <Andrew.Cooper3@citrix.com>; Ian Jackson
>> <Ian.Jackson@citrix.com>; Tim (Xen.org) <tim@xen.org>;
>> jbeulich@suse.com
>> Subject: [Xen-devel] [PATCH v2 4/8] xen: make grant resource limits per
>> domain
>>
>> Instead of using the same global resource limits of grant tables (max.
>> number of grant frames, max. number of maptrack frames) for all domains
>> make these limits per domain. This will allow setting individual limits
>> in the future. For now initialize the per domain limits with the global
>> values.
>>
>> Signed-off-by: Juergen Gross <jgross@suse.com>
>> ---
>> xen/common/grant_table.c | 83 +++++++++++++++++++++++++++----------
>> -----------
>> 1 file changed, 46 insertions(+), 37 deletions(-)
>>
>> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
>> index b50c334f70..ae5f1262e8 100644
>> --- a/xen/common/grant_table.c
>> +++ b/xen/common/grant_table.c
>> @@ -69,6 +69,9 @@ struct grant_table {
>> /* The defined versions are 1 and 2. Set to 0 if we don't know
>> what version to use yet. */
>> unsigned gt_version;
>> + /* Resource limits of the domain. */
>> + unsigned int max_grant_frames;
>> + unsigned int max_maptrack_frames;
>> };
>>
>> #ifndef DEFAULT_MAX_NR_GRANT_FRAMES /* to allow arch to override */
>> @@ -285,8 +288,8 @@ num_act_frames_from_sha_frames(const unsigned
>> int num)
>> return DIV_ROUND_UP(num * sha_per_page, ACGNT_PER_PAGE);
>> }
>>
>> -#define max_nr_active_grant_frames \
>> - num_act_frames_from_sha_frames(max_grant_frames)
>> +#define max_nr_active_grant_frames(gt) \
>> + num_act_frames_from_sha_frames(gt->max_grant_frames)
>>
>> static inline unsigned int
>> nr_active_grant_frames(struct grant_table *gt)
>> @@ -524,7 +527,7 @@ get_maptrack_handle(
>> * out of memory, try stealing an entry from another VCPU (in case the
>> * guest isn't mapping across its VCPUs evenly).
>> */
>> - if ( nr_maptrack_frames(lgt) < max_maptrack_frames )
>> + if ( nr_maptrack_frames(lgt) < lgt->max_maptrack_frames )
>> new_mt = alloc_xenheap_page();
>>
>> if ( !new_mt )
>> @@ -1663,7 +1666,7 @@ gnttab_grow_table(struct domain *d, unsigned int
>> req_nr_frames)
>> struct grant_table *gt = d->grant_table;
>> unsigned int i, j;
>>
>> - ASSERT(req_nr_frames <= max_grant_frames);
>> + ASSERT(req_nr_frames <= gt->max_grant_frames);
>>
>> gdprintk(XENLOG_INFO,
>> "Expanding dom (%d) grant table from (%d) to (%d) frames.\n",
>> @@ -1728,14 +1731,15 @@ grant_table_init(struct domain *d)
>> if ( gt->nr_grant_frames )
>> return 0;
>>
>> - gt->nr_grant_frames = INITIAL_NR_GRANT_FRAMES;
>> + gt->nr_grant_frames = min_t(unsigned int,
>> INITIAL_NR_GRANT_FRAMES,
>> + gt->max_grant_frames);
>>
>> /* Active grant table. */
>> if ( (gt->active = xzalloc_array(struct active_grant_entry *,
>> - max_nr_active_grant_frames)) == NULL )
>> + max_nr_active_grant_frames(gt))) == NULL )
>> goto no_mem_1;
>> for ( i = 0;
>> - i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES);
>> i++ )
>> + i < num_act_frames_from_sha_frames(gt->nr_grant_frames); i++ )
>> {
>> if ( (gt->active[i] = alloc_xenheap_page()) == NULL )
>> goto no_mem_2;
>> @@ -1745,14 +1749,14 @@ grant_table_init(struct domain *d)
>> }
>>
>> /* Tracking of mapped foreign frames table */
>> - gt->maptrack = vzalloc(max_maptrack_frames * sizeof(*gt->maptrack));
>> + gt->maptrack = vzalloc(gt->max_maptrack_frames * sizeof(*gt-
>>> maptrack));
>> if ( gt->maptrack == NULL )
>> goto no_mem_2;
>>
>> /* Shared grant table. */
>> - if ( (gt->shared_raw = xzalloc_array(void *, max_grant_frames)) == NULL )
>> + if ( (gt->shared_raw = xzalloc_array(void *, gt->max_grant_frames)) ==
>> NULL )
>> goto no_mem_3;
>> - for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
>> + for ( i = 0; i < gt->nr_grant_frames; i++ )
>> {
>> if ( (gt->shared_raw[i] = alloc_xenheap_page()) == NULL )
>> goto no_mem_4;
>> @@ -1761,11 +1765,11 @@ grant_table_init(struct domain *d)
>>
>> /* Status pages for grant table - for version 2 */
>> gt->status = xzalloc_array(grant_status_t *,
>> - grant_to_status_frames(max_grant_frames));
>> + grant_to_status_frames(gt->max_grant_frames));
>> if ( gt->status == NULL )
>> goto no_mem_4;
>>
>> - for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
>> + for ( i = 0; i < gt->nr_grant_frames; i++ )
>> gnttab_create_shared_page(d, gt, i);
>>
>> gt->nr_status_frames = 0;
>> @@ -1773,7 +1777,7 @@ grant_table_init(struct domain *d)
>> return 0;
>>
>> no_mem_4:
>> - for ( i = 0; i < INITIAL_NR_GRANT_FRAMES; i++ )
>> + for ( i = 0; i < gt->nr_grant_frames; i++ )
>> free_xenheap_page(gt->shared_raw[i]);
>> xfree(gt->shared_raw);
>> gt->shared_raw = NULL;
>> @@ -1782,7 +1786,7 @@ grant_table_init(struct domain *d)
>> gt->maptrack = NULL;
>> no_mem_2:
>> for ( i = 0;
>> - i < num_act_frames_from_sha_frames(INITIAL_NR_GRANT_FRAMES);
>> i++ )
>> + i < num_act_frames_from_sha_frames(gt->nr_grant_frames); i++ )
>> free_xenheap_page(gt->active[i]);
>> xfree(gt->active);
>> gt->active = NULL;
>> @@ -1807,15 +1811,6 @@ gnttab_setup_table(
>> if ( unlikely(copy_from_guest(&op, uop, 1)) )
>> return -EFAULT;
>>
>> - if ( unlikely(op.nr_frames > max_grant_frames) )
>> - {
>> - gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table
>> frames"
>> - " per domain.\n",
>> - max_grant_frames);
>> - op.status = GNTST_general_error;
>> - goto out;
>> - }
>> -
>> if ( !guest_handle_okay(op.frame_list, op.nr_frames) )
>> return -EFAULT;
>>
>> @@ -1835,6 +1830,15 @@ gnttab_setup_table(
>> gt = d->grant_table;
>> grant_write_lock(gt);
>>
>> + if ( unlikely(op.nr_frames > gt->max_grant_frames) )
>> + {
>> + gdprintk(XENLOG_INFO, "Xen only supports up to %d grant-table
>> frames"
>> + " per domain.\n",
>> + gt->max_grant_frames);
>
> The message should probably now say that the domain has only been provisioned with X grant frames, rather than implying that the limit is still global as it does.
Aah, of course.
Juergen
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH v2 5/8] xen: double default grant frame limit for huge hosts
2017-09-06 8:26 [PATCH v2 0/8] xen: better grant v2 support Juergen Gross
` (3 preceding siblings ...)
2017-09-06 8:26 ` [PATCH v2 4/8] xen: make grant resource limits per domain Juergen Gross
@ 2017-09-06 8:26 ` Juergen Gross
2017-09-06 9:12 ` Paul Durrant
2017-09-06 8:26 ` [PATCH v2 6/8] xen: add new domctl hypercall to set grant table resource limits Juergen Gross
` (2 subsequent siblings)
7 siblings, 1 reply; 23+ messages in thread
From: Juergen Gross @ 2017-09-06 8:26 UTC (permalink / raw)
To: xen-devel
Cc: Juergen Gross, sstabellini, wei.liu2, George.Dunlap,
andrew.cooper3, ian.jackson, tim, jbeulich
In case a system has memory above the 16TB boundary double the default
grant frame number limit per domain. This ensures a pv domain can still
establish the same number of grants even if it is required to use
version 2 grants which need twice the space of v1 grants.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
xen/common/grant_table.c | 9 ++++++++-
1 file changed, 8 insertions(+), 1 deletion(-)
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index ae5f1262e8..709fd55490 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -3824,8 +3824,15 @@ static int __init gnttab_usage_init(void)
{
BUILD_BUG_ON(DEFAULT_MAX_MAPTRACK_FRAMES < DEFAULT_MAX_NR_GRANT_FRAMES);
+ /*
+ * In case grant v2 is required for pv domains to reference any possible
+ * memory page (i.e. memory is installed above 16TB boundary) double the
+ * grant frame limit. This will allow a guest using v2 grants without
+ * having to lower the number of usable grants.
+ */
if ( !max_grant_frames )
- max_grant_frames = DEFAULT_MAX_NR_GRANT_FRAMES;
+ max_grant_frames = ((max_page >> 32) ? 2 : 1) *
+ DEFAULT_MAX_NR_GRANT_FRAMES;
if ( !max_maptrack_frames )
max_maptrack_frames = DEFAULT_MAX_MAPTRACK_FRAMES;
--
2.12.3
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 23+ messages in thread* Re: [PATCH v2 5/8] xen: double default grant frame limit for huge hosts
2017-09-06 8:26 ` [PATCH v2 5/8] xen: double default grant frame limit for huge hosts Juergen Gross
@ 2017-09-06 9:12 ` Paul Durrant
0 siblings, 0 replies; 23+ messages in thread
From: Paul Durrant @ 2017-09-06 9:12 UTC (permalink / raw)
To: 'Juergen Gross', xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, Wei Liu, Andrew Cooper, Tim (Xen.org),
George Dunlap, jbeulich@suse.com, Ian Jackson
> -----Original Message-----
> From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of
> Juergen Gross
> Sent: 06 September 2017 09:26
> To: xen-devel@lists.xen.org
> Cc: Juergen Gross <jgross@suse.com>; sstabellini@kernel.org; Wei Liu
> <wei.liu2@citrix.com>; George Dunlap <George.Dunlap@citrix.com>;
> Andrew Cooper <Andrew.Cooper3@citrix.com>; Ian Jackson
> <Ian.Jackson@citrix.com>; Tim (Xen.org) <tim@xen.org>;
> jbeulich@suse.com
> Subject: [Xen-devel] [PATCH v2 5/8] xen: double default grant frame limit for
> huge hosts
>
> In case a system has memory above the 16TB boundary double the default
> grant frame number limit per domain. This ensures a pv domain can still
> establish the same number of grants even if it is required to use
> version 2 grants which need twice the space of v1 grants.
>
> Signed-off-by: Juergen Gross <jgross@suse.com>
Reviewed-by: Paul Durrant <paul.durrant@citrix.com>
> ---
> xen/common/grant_table.c | 9 ++++++++-
> 1 file changed, 8 insertions(+), 1 deletion(-)
>
> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
> index ae5f1262e8..709fd55490 100644
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -3824,8 +3824,15 @@ static int __init gnttab_usage_init(void)
> {
> BUILD_BUG_ON(DEFAULT_MAX_MAPTRACK_FRAMES <
> DEFAULT_MAX_NR_GRANT_FRAMES);
>
> + /*
> + * In case grant v2 is required for pv domains to reference any possible
> + * memory page (i.e. memory is installed above 16TB boundary) double
> the
> + * grant frame limit. This will allow a guest using v2 grants without
> + * having to lower the number of usable grants.
> + */
> if ( !max_grant_frames )
> - max_grant_frames = DEFAULT_MAX_NR_GRANT_FRAMES;
> + max_grant_frames = ((max_page >> 32) ? 2 : 1) *
> + DEFAULT_MAX_NR_GRANT_FRAMES;
>
> if ( !max_maptrack_frames )
> max_maptrack_frames = DEFAULT_MAX_MAPTRACK_FRAMES;
> --
> 2.12.3
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH v2 6/8] xen: add new domctl hypercall to set grant table resource limits
2017-09-06 8:26 [PATCH v2 0/8] xen: better grant v2 support Juergen Gross
` (4 preceding siblings ...)
2017-09-06 8:26 ` [PATCH v2 5/8] xen: double default grant frame limit for huge hosts Juergen Gross
@ 2017-09-06 8:26 ` Juergen Gross
2017-09-06 9:17 ` Paul Durrant
2017-09-06 8:26 ` [PATCH v2 7/8] libxc: add libxc support for setting " Juergen Gross
2017-09-06 8:26 ` [PATCH v2 8/8] libxl: add libxl " Juergen Gross
7 siblings, 1 reply; 23+ messages in thread
From: Juergen Gross @ 2017-09-06 8:26 UTC (permalink / raw)
To: xen-devel
Cc: Juergen Gross, sstabellini, wei.liu2, George.Dunlap,
andrew.cooper3, ian.jackson, tim, jbeulich
Add a domctl hypercall to set the domain's resource limits regarding
grant tables. It is accepted only as long as neither
gnttab_setup_table() has been called for the domain, nor the domain
has started to run.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
tools/flask/policy/modules/dom0.te | 2 +-
xen/common/domctl.c | 6 ++++++
xen/common/grant_table.c | 26 ++++++++++++++++++++++++++
xen/include/public/domctl.h | 9 +++++++++
xen/include/xen/grant_table.h | 2 ++
xen/xsm/flask/hooks.c | 3 +++
xen/xsm/flask/policy/access_vectors | 2 ++
7 files changed, 49 insertions(+), 1 deletion(-)
diff --git a/tools/flask/policy/modules/dom0.te b/tools/flask/policy/modules/dom0.te
index 338caaf41e..fe672a61be 100644
--- a/tools/flask/policy/modules/dom0.te
+++ b/tools/flask/policy/modules/dom0.te
@@ -39,7 +39,7 @@ allow dom0_t dom0_t:domain {
};
allow dom0_t dom0_t:domain2 {
set_cpuid gettsc settsc setscheduler set_max_evtchn set_vnumainfo
- get_vnumainfo psr_cmt_op psr_cat_op
+ get_vnumainfo psr_cmt_op psr_cat_op set_gnttbl_limits
};
allow dom0_t dom0_t:resource { add remove };
diff --git a/xen/common/domctl.c b/xen/common/domctl.c
index 42658e5744..027501c8ec 100644
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -14,6 +14,7 @@
#include <xen/sched-if.h>
#include <xen/domain.h>
#include <xen/event.h>
+#include <xen/grant_table.h>
#include <xen/domain_page.h>
#include <xen/trace.h>
#include <xen/console.h>
@@ -1149,6 +1150,11 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
copyback = 1;
break;
+ case XEN_DOMCTL_set_gnttbl_limits:
+ ret = grant_table_set_limits(d, op->u.set_gnttbl_limits.grant_frames,
+ op->u.set_gnttbl_limits.maptrack_frames);
+ break;
+
default:
ret = arch_do_domctl(op, d, u_domctl);
break;
diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
index 709fd55490..ed57677f1a 100644
--- a/xen/common/grant_table.c
+++ b/xen/common/grant_table.c
@@ -3667,6 +3667,32 @@ void grant_table_init_vcpu(struct vcpu *v)
v->maptrack_tail = MAPTRACK_TAIL;
}
+int grant_table_set_limits(struct domain *d, unsigned int grant_frames,
+ unsigned int maptrack_frames)
+{
+ struct grant_table *gt = d->grant_table;
+ int ret = -EBUSY;
+
+ if ( !gt )
+ return -EEXIST;
+
+ grant_write_lock(gt);
+
+ if ( gt->nr_grant_frames )
+ goto unlock;
+
+ ret = 0;
+ if ( grant_frames )
+ gt->max_grant_frames = grant_frames;
+ if ( maptrack_frames )
+ gt->max_maptrack_frames = maptrack_frames;
+
+ unlock:
+ grant_write_unlock(gt);
+
+ return ret;
+}
+
#ifdef CONFIG_HAS_MEM_SHARING
int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref,
gfn_t *gfn, uint16_t *status)
diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
index 50ff58f5b9..05757c06ac 100644
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -1163,6 +1163,13 @@ struct xen_domctl_psr_cat_op {
typedef struct xen_domctl_psr_cat_op xen_domctl_psr_cat_op_t;
DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cat_op_t);
+struct xen_domctl_set_gnttbl_limits {
+ uint32_t grant_frames; /* IN: if 0, dont change */
+ uint32_t maptrack_frames; /* IN: if 0, dont change */
+};
+typedef struct xen_domctl_set_gnttbl_limits xen_domctl_set_gnttbl_limits_t;
+DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_gnttbl_limits_t);
+
struct xen_domctl {
uint32_t cmd;
#define XEN_DOMCTL_createdomain 1
@@ -1240,6 +1247,7 @@ struct xen_domctl {
#define XEN_DOMCTL_monitor_op 77
#define XEN_DOMCTL_psr_cat_op 78
#define XEN_DOMCTL_soft_reset 79
+#define XEN_DOMCTL_set_gnttbl_limits 80
#define XEN_DOMCTL_gdbsx_guestmemio 1000
#define XEN_DOMCTL_gdbsx_pausevcpu 1001
#define XEN_DOMCTL_gdbsx_unpausevcpu 1002
@@ -1302,6 +1310,7 @@ struct xen_domctl {
struct xen_domctl_psr_cmt_op psr_cmt_op;
struct xen_domctl_monitor_op monitor_op;
struct xen_domctl_psr_cat_op psr_cat_op;
+ struct xen_domctl_set_gnttbl_limits set_gnttbl_limits;
uint8_t pad[128];
} u;
};
diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
index 84a8d61616..dd9aa3b9ee 100644
--- a/xen/include/xen/grant_table.h
+++ b/xen/include/xen/grant_table.h
@@ -40,6 +40,8 @@ int grant_table_init(
void grant_table_destroy(
struct domain *d);
void grant_table_init_vcpu(struct vcpu *v);
+int grant_table_set_limits(struct domain *d, unsigned int grant_frames,
+ unsigned int maptrack_frames);
/*
* Check if domain has active grants and log first 10 of them.
diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
index 56dc5b0ab9..ae5122cfdc 100644
--- a/xen/xsm/flask/hooks.c
+++ b/xen/xsm/flask/hooks.c
@@ -749,6 +749,9 @@ static int flask_domctl(struct domain *d, int cmd)
case XEN_DOMCTL_soft_reset:
return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SOFT_RESET);
+ case XEN_DOMCTL_set_gnttbl_limits:
+ return current_has_perm(d, SECCLASS_DOMAIN2, DOMAIN2__SET_GNTTBL_LIMITS);
+
default:
return avc_unknown_permission("domctl", cmd);
}
diff --git a/xen/xsm/flask/policy/access_vectors b/xen/xsm/flask/policy/access_vectors
index da9f3dfb2e..aa33f1ecb0 100644
--- a/xen/xsm/flask/policy/access_vectors
+++ b/xen/xsm/flask/policy/access_vectors
@@ -248,6 +248,8 @@ class domain2
mem_sharing
# XEN_DOMCTL_psr_cat_op
psr_cat_op
+# XEN_DOMCTL_set_gnttbl_limits
+ set_gnttbl_limits
}
# Similar to class domain, but primarily contains domctls related to HVM domains
--
2.12.3
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 23+ messages in thread* Re: [PATCH v2 6/8] xen: add new domctl hypercall to set grant table resource limits
2017-09-06 8:26 ` [PATCH v2 6/8] xen: add new domctl hypercall to set grant table resource limits Juergen Gross
@ 2017-09-06 9:17 ` Paul Durrant
2017-09-06 9:43 ` Juergen Gross
0 siblings, 1 reply; 23+ messages in thread
From: Paul Durrant @ 2017-09-06 9:17 UTC (permalink / raw)
To: 'Juergen Gross', xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, Wei Liu, Andrew Cooper, Tim (Xen.org),
George Dunlap, jbeulich@suse.com, Ian Jackson
> -----Original Message-----
> From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of
> Juergen Gross
> Sent: 06 September 2017 09:27
> To: xen-devel@lists.xen.org
> Cc: Juergen Gross <jgross@suse.com>; sstabellini@kernel.org; Wei Liu
> <wei.liu2@citrix.com>; George Dunlap <George.Dunlap@citrix.com>;
> Andrew Cooper <Andrew.Cooper3@citrix.com>; Ian Jackson
> <Ian.Jackson@citrix.com>; Tim (Xen.org) <tim@xen.org>;
> jbeulich@suse.com
> Subject: [Xen-devel] [PATCH v2 6/8] xen: add new domctl hypercall to set
> grant table resource limits
>
> Add a domctl hypercall to set the domain's resource limits regarding
> grant tables. It is accepted only as long as neither
> gnttab_setup_table() has been called for the domain, nor the domain
> has started to run.
>
> Signed-off-by: Juergen Gross <jgross@suse.com>
The code LGTM but I wonder, do we want to introduce another contraction of 'grant table'? The usual one seems to be 'gnttab' rather than 'gnttbl' as you have here.
Paul
> ---
> tools/flask/policy/modules/dom0.te | 2 +-
> xen/common/domctl.c | 6 ++++++
> xen/common/grant_table.c | 26 ++++++++++++++++++++++++++
> xen/include/public/domctl.h | 9 +++++++++
> xen/include/xen/grant_table.h | 2 ++
> xen/xsm/flask/hooks.c | 3 +++
> xen/xsm/flask/policy/access_vectors | 2 ++
> 7 files changed, 49 insertions(+), 1 deletion(-)
>
> diff --git a/tools/flask/policy/modules/dom0.te
> b/tools/flask/policy/modules/dom0.te
> index 338caaf41e..fe672a61be 100644
> --- a/tools/flask/policy/modules/dom0.te
> +++ b/tools/flask/policy/modules/dom0.te
> @@ -39,7 +39,7 @@ allow dom0_t dom0_t:domain {
> };
> allow dom0_t dom0_t:domain2 {
> set_cpuid gettsc settsc setscheduler set_max_evtchn
> set_vnumainfo
> - get_vnumainfo psr_cmt_op psr_cat_op
> + get_vnumainfo psr_cmt_op psr_cat_op set_gnttbl_limits
> };
> allow dom0_t dom0_t:resource { add remove };
>
> diff --git a/xen/common/domctl.c b/xen/common/domctl.c
> index 42658e5744..027501c8ec 100644
> --- a/xen/common/domctl.c
> +++ b/xen/common/domctl.c
> @@ -14,6 +14,7 @@
> #include <xen/sched-if.h>
> #include <xen/domain.h>
> #include <xen/event.h>
> +#include <xen/grant_table.h>
> #include <xen/domain_page.h>
> #include <xen/trace.h>
> #include <xen/console.h>
> @@ -1149,6 +1150,11 @@ long
> do_domctl(XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
> copyback = 1;
> break;
>
> + case XEN_DOMCTL_set_gnttbl_limits:
> + ret = grant_table_set_limits(d, op->u.set_gnttbl_limits.grant_frames,
> + op->u.set_gnttbl_limits.maptrack_frames);
> + break;
> +
> default:
> ret = arch_do_domctl(op, d, u_domctl);
> break;
> diff --git a/xen/common/grant_table.c b/xen/common/grant_table.c
> index 709fd55490..ed57677f1a 100644
> --- a/xen/common/grant_table.c
> +++ b/xen/common/grant_table.c
> @@ -3667,6 +3667,32 @@ void grant_table_init_vcpu(struct vcpu *v)
> v->maptrack_tail = MAPTRACK_TAIL;
> }
>
> +int grant_table_set_limits(struct domain *d, unsigned int grant_frames,
> + unsigned int maptrack_frames)
> +{
> + struct grant_table *gt = d->grant_table;
> + int ret = -EBUSY;
> +
> + if ( !gt )
> + return -EEXIST;
> +
> + grant_write_lock(gt);
> +
> + if ( gt->nr_grant_frames )
> + goto unlock;
> +
> + ret = 0;
> + if ( grant_frames )
> + gt->max_grant_frames = grant_frames;
> + if ( maptrack_frames )
> + gt->max_maptrack_frames = maptrack_frames;
> +
> + unlock:
> + grant_write_unlock(gt);
> +
> + return ret;
> +}
> +
> #ifdef CONFIG_HAS_MEM_SHARING
> int mem_sharing_gref_to_gfn(struct grant_table *gt, grant_ref_t ref,
> gfn_t *gfn, uint16_t *status)
> diff --git a/xen/include/public/domctl.h b/xen/include/public/domctl.h
> index 50ff58f5b9..05757c06ac 100644
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -1163,6 +1163,13 @@ struct xen_domctl_psr_cat_op {
> typedef struct xen_domctl_psr_cat_op xen_domctl_psr_cat_op_t;
> DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cat_op_t);
>
> +struct xen_domctl_set_gnttbl_limits {
> + uint32_t grant_frames; /* IN: if 0, dont change */
> + uint32_t maptrack_frames; /* IN: if 0, dont change */
> +};
> +typedef struct xen_domctl_set_gnttbl_limits
> xen_domctl_set_gnttbl_limits_t;
> +DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_gnttbl_limits_t);
> +
> struct xen_domctl {
> uint32_t cmd;
> #define XEN_DOMCTL_createdomain 1
> @@ -1240,6 +1247,7 @@ struct xen_domctl {
> #define XEN_DOMCTL_monitor_op 77
> #define XEN_DOMCTL_psr_cat_op 78
> #define XEN_DOMCTL_soft_reset 79
> +#define XEN_DOMCTL_set_gnttbl_limits 80
> #define XEN_DOMCTL_gdbsx_guestmemio 1000
> #define XEN_DOMCTL_gdbsx_pausevcpu 1001
> #define XEN_DOMCTL_gdbsx_unpausevcpu 1002
> @@ -1302,6 +1310,7 @@ struct xen_domctl {
> struct xen_domctl_psr_cmt_op psr_cmt_op;
> struct xen_domctl_monitor_op monitor_op;
> struct xen_domctl_psr_cat_op psr_cat_op;
> + struct xen_domctl_set_gnttbl_limits set_gnttbl_limits;
> uint8_t pad[128];
> } u;
> };
> diff --git a/xen/include/xen/grant_table.h b/xen/include/xen/grant_table.h
> index 84a8d61616..dd9aa3b9ee 100644
> --- a/xen/include/xen/grant_table.h
> +++ b/xen/include/xen/grant_table.h
> @@ -40,6 +40,8 @@ int grant_table_init(
> void grant_table_destroy(
> struct domain *d);
> void grant_table_init_vcpu(struct vcpu *v);
> +int grant_table_set_limits(struct domain *d, unsigned int grant_frames,
> + unsigned int maptrack_frames);
>
> /*
> * Check if domain has active grants and log first 10 of them.
> diff --git a/xen/xsm/flask/hooks.c b/xen/xsm/flask/hooks.c
> index 56dc5b0ab9..ae5122cfdc 100644
> --- a/xen/xsm/flask/hooks.c
> +++ b/xen/xsm/flask/hooks.c
> @@ -749,6 +749,9 @@ static int flask_domctl(struct domain *d, int cmd)
> case XEN_DOMCTL_soft_reset:
> return current_has_perm(d, SECCLASS_DOMAIN2,
> DOMAIN2__SOFT_RESET);
>
> + case XEN_DOMCTL_set_gnttbl_limits:
> + return current_has_perm(d, SECCLASS_DOMAIN2,
> DOMAIN2__SET_GNTTBL_LIMITS);
> +
> default:
> return avc_unknown_permission("domctl", cmd);
> }
> diff --git a/xen/xsm/flask/policy/access_vectors
> b/xen/xsm/flask/policy/access_vectors
> index da9f3dfb2e..aa33f1ecb0 100644
> --- a/xen/xsm/flask/policy/access_vectors
> +++ b/xen/xsm/flask/policy/access_vectors
> @@ -248,6 +248,8 @@ class domain2
> mem_sharing
> # XEN_DOMCTL_psr_cat_op
> psr_cat_op
> +# XEN_DOMCTL_set_gnttbl_limits
> + set_gnttbl_limits
> }
>
> # Similar to class domain, but primarily contains domctls related to HVM
> domains
> --
> 2.12.3
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 23+ messages in thread* Re: [PATCH v2 6/8] xen: add new domctl hypercall to set grant table resource limits
2017-09-06 9:17 ` Paul Durrant
@ 2017-09-06 9:43 ` Juergen Gross
0 siblings, 0 replies; 23+ messages in thread
From: Juergen Gross @ 2017-09-06 9:43 UTC (permalink / raw)
To: Paul Durrant, xen-devel@lists.xen.org
Cc: sstabellini@kernel.org, Wei Liu, Andrew Cooper, Tim (Xen.org),
George Dunlap, jbeulich@suse.com, Ian Jackson
On 06/09/17 11:17, Paul Durrant wrote:
>> -----Original Message-----
>> From: Xen-devel [mailto:xen-devel-bounces@lists.xen.org] On Behalf Of
>> Juergen Gross
>> Sent: 06 September 2017 09:27
>> To: xen-devel@lists.xen.org
>> Cc: Juergen Gross <jgross@suse.com>; sstabellini@kernel.org; Wei Liu
>> <wei.liu2@citrix.com>; George Dunlap <George.Dunlap@citrix.com>;
>> Andrew Cooper <Andrew.Cooper3@citrix.com>; Ian Jackson
>> <Ian.Jackson@citrix.com>; Tim (Xen.org) <tim@xen.org>;
>> jbeulich@suse.com
>> Subject: [Xen-devel] [PATCH v2 6/8] xen: add new domctl hypercall to set
>> grant table resource limits
>>
>> Add a domctl hypercall to set the domain's resource limits regarding
>> grant tables. It is accepted only as long as neither
>> gnttab_setup_table() has been called for the domain, nor the domain
>> has started to run.
>>
>> Signed-off-by: Juergen Gross <jgross@suse.com>
>
> The code LGTM but I wonder, do we want to introduce another contraction of 'grant table'? The usual one seems to be 'gnttab' rather than 'gnttbl' as you have here.
Okay, I'll rename it.
Juergen
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 23+ messages in thread
* [PATCH v2 7/8] libxc: add libxc support for setting grant table resource limits
2017-09-06 8:26 [PATCH v2 0/8] xen: better grant v2 support Juergen Gross
` (5 preceding siblings ...)
2017-09-06 8:26 ` [PATCH v2 6/8] xen: add new domctl hypercall to set grant table resource limits Juergen Gross
@ 2017-09-06 8:26 ` Juergen Gross
2017-09-06 8:26 ` [PATCH v2 8/8] libxl: add libxl " Juergen Gross
7 siblings, 0 replies; 23+ messages in thread
From: Juergen Gross @ 2017-09-06 8:26 UTC (permalink / raw)
To: xen-devel
Cc: Juergen Gross, sstabellini, wei.liu2, George.Dunlap,
andrew.cooper3, ian.jackson, tim, jbeulich
Add a new libxc function xc_domain_set_gnttbl_limits() setting the
limits for the maximum numbers of grant table frames and maptrack
frames of a domain.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
tools/libxc/include/xenctrl.h | 14 ++++++++++++++
tools/libxc/xc_domain.c | 13 +++++++++++++
2 files changed, 27 insertions(+)
diff --git a/tools/libxc/include/xenctrl.h b/tools/libxc/include/xenctrl.h
index 43151cb415..b3454e6bac 100644
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1064,6 +1064,20 @@ int xc_domain_set_virq_handler(xc_interface *xch, uint32_t domid, int virq);
int xc_domain_set_max_evtchn(xc_interface *xch, uint32_t domid,
uint32_t max_port);
+/**
+ * Set the maximum number of grant frames and/or maptrack frames a domain
+ * can have. Can only be used at domain setup time. A zero value means
+ * no change.
+ *
+ * @param xch a handle to an open hypervisor interface
+ * @param domid the domain id
+ * @param grant_frames max. number of grant frames
+ * @param maptrack_frames max. number of maptrack frames
+ */
+int xc_domain_set_gnttbl_limits(xc_interface *xch, uint32_t domid,
+ uint32_t grant_frames,
+ uint32_t maptrack_frames);
+
/*
* CPUPOOL MANAGEMENT FUNCTIONS
*/
diff --git a/tools/libxc/xc_domain.c b/tools/libxc/xc_domain.c
index 3bab4e8bab..385545de53 100644
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -2268,6 +2268,19 @@ int xc_domain_set_max_evtchn(xc_interface *xch, uint32_t domid,
return do_domctl(xch, &domctl);
}
+int xc_domain_set_gnttbl_limits(xc_interface *xch, uint32_t domid,
+ uint32_t grant_frames,
+ uint32_t maptrack_frames)
+{
+ DECLARE_DOMCTL;
+
+ domctl.cmd = XEN_DOMCTL_set_gnttbl_limits;
+ domctl.domain = domid;
+ domctl.u.set_gnttbl_limits.grant_frames = grant_frames;
+ domctl.u.set_gnttbl_limits.maptrack_frames = maptrack_frames;
+ return do_domctl(xch, &domctl);
+}
+
/* Plumbing Xen with vNUMA topology */
int xc_domain_setvnuma(xc_interface *xch,
uint32_t domid,
--
2.12.3
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 23+ messages in thread* [PATCH v2 8/8] libxl: add libxl support for setting grant table resource limits
2017-09-06 8:26 [PATCH v2 0/8] xen: better grant v2 support Juergen Gross
` (6 preceding siblings ...)
2017-09-06 8:26 ` [PATCH v2 7/8] libxc: add libxc support for setting " Juergen Gross
@ 2017-09-06 8:26 ` Juergen Gross
7 siblings, 0 replies; 23+ messages in thread
From: Juergen Gross @ 2017-09-06 8:26 UTC (permalink / raw)
To: xen-devel
Cc: Juergen Gross, sstabellini, wei.liu2, George.Dunlap,
andrew.cooper3, ian.jackson, tim, jbeulich
Add new domain config items for setting the limits for the maximum
numbers of grant table frames and maptrack frames of a domain.
Signed-off-by: Juergen Gross <jgross@suse.com>
---
docs/man/xl.cfg.pod.5.in | 15 +++++++++++++++
tools/libxl/libxl.h | 6 ++++++
tools/libxl/libxl_dom.c | 8 ++++++++
tools/libxl/libxl_types.idl | 3 +++
tools/xl/xl_parse.c | 5 +++++
tools/xl/xl_sxp.c | 2 ++
6 files changed, 39 insertions(+)
diff --git a/docs/man/xl.cfg.pod.5.in b/docs/man/xl.cfg.pod.5.in
index 79cb2eaea7..dd0b232020 100644
--- a/docs/man/xl.cfg.pod.5.in
+++ b/docs/man/xl.cfg.pod.5.in
@@ -444,6 +444,21 @@ unpausing the domain. With a properly constructed security policy (such
as nomigrate_t in the example policy), this can be used to build a
domain whose memory is not accessible to the toolstack domain.
+=item B<grant_frames=NUMBER>
+
+Specify the maximum number of grant frames the domain is allowed to have.
+This value controls how many pages the domain is able to grant access to for
+other domains, needed e.g. for the operation of paravirtualized devices.
+The default is 32, if not set to another value via a Xen boot parameter.
+
+=item B<maptrack_frames=NUMBER>
+
+Specify the maximum number of grant maptrack frames the domain is allowed
+to have. This value controls how many pages of foreign domains can be accessed
+via the grant mechanism by this domain. A value higher than the normal default
+of 1024 is normally needed only for very large configurations for driver
+domains.
+
=item B<nomigrate=BOOLEAN>
Disable migration of this domain. This enables certain other features
diff --git a/tools/libxl/libxl.h b/tools/libxl/libxl.h
index 812b7ea95d..fef22c2306 100644
--- a/tools/libxl/libxl.h
+++ b/tools/libxl/libxl.h
@@ -311,6 +311,12 @@
#define LIBXL_HAVE_P9S 1
/*
+ * LIBXL_HAVE_BUILDINFO_GRANT_LIMITS indicates that libxl_domain_build_info
+ * has the grant_frames and maptrack_frames fields.
+ */
+#define LIBXL_HAVE_BUILDINFO_GRANT_LIMITS 1
+
+/*
* libxl ABI compatibility
*
* The only guarantee which libxl makes regarding ABI compatibility
diff --git a/tools/libxl/libxl_dom.c b/tools/libxl/libxl_dom.c
index f54fd49a73..f0a090b2e0 100644
--- a/tools/libxl/libxl_dom.c
+++ b/tools/libxl/libxl_dom.c
@@ -322,6 +322,14 @@ int libxl__build_pre(libxl__gc *gc, uint32_t domid,
return ERROR_FAIL;
}
+ if (info->grant_frames || info->maptrack_frames) {
+ if (xc_domain_set_gnttbl_limits(ctx->xch, domid, info->grant_frames,
+ info->maptrack_frames) != 0) {
+ LOG(ERROR, "Couldn't set grant table limits");
+ return ERROR_FAIL;
+ }
+ }
+
/*
* Check if the domain has any CPU or node affinity already. If not, try
* to build up the latter via automatic NUMA placement. In fact, in case
diff --git a/tools/libxl/libxl_types.idl b/tools/libxl/libxl_types.idl
index 173d70acec..2aa7dae83e 100644
--- a/tools/libxl/libxl_types.idl
+++ b/tools/libxl/libxl_types.idl
@@ -472,6 +472,9 @@ libxl_domain_build_info = Struct("domain_build_info",[
("blkdev_start", string),
("vnuma_nodes", Array(libxl_vnode_info, "num_vnuma_nodes")),
+
+ ("grant_frames", uint32),
+ ("maptrack_frames", uint32),
("device_model_version", libxl_device_model_version),
("device_model_stubdomain", libxl_defbool),
diff --git a/tools/xl/xl_parse.c b/tools/xl/xl_parse.c
index 02ddd2e90d..dae3a238a4 100644
--- a/tools/xl/xl_parse.c
+++ b/tools/xl/xl_parse.c
@@ -943,6 +943,11 @@ void parse_config_data(const char *config_source,
!xlu_cfg_get_string (config, "cpus_soft", &buf, 0))
parse_vcpu_affinity(b_info, cpus, buf, num_cpus, false);
+ if (!xlu_cfg_get_long (config, "grant_frames", &l, 0))
+ b_info->grant_frames = l;
+ if (!xlu_cfg_get_long (config, "maptrack_frames", &l, 0))
+ b_info->maptrack_frames = l;
+
libxl_defbool_set(&b_info->claim_mode, claim_mode);
if (xlu_cfg_get_string (config, "on_poweroff", &buf, 0))
diff --git a/tools/xl/xl_sxp.c b/tools/xl/xl_sxp.c
index e738bf2465..4b2fab2d35 100644
--- a/tools/xl/xl_sxp.c
+++ b/tools/xl/xl_sxp.c
@@ -64,6 +64,8 @@ void printf_info_sexp(int domid, libxl_domain_config *d_config, FILE *fh)
fprintf(fh, "\t(build_info)\n");
fprintf(fh, "\t(max_vcpus %d)\n", b_info->max_vcpus);
+ fprintf(fh, "\t(grant_frames %d)\n", b_info->grant_frames);
+ fprintf(fh, "\t(maptrack_frames %d)\n", b_info->maptrack_frames);
fprintf(fh, "\t(tsc_mode %s)\n", libxl_tsc_mode_to_string(b_info->tsc_mode));
fprintf(fh, "\t(max_memkb %"PRId64")\n", b_info->max_memkb);
fprintf(fh, "\t(target_memkb %"PRId64")\n", b_info->target_memkb);
--
2.12.3
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply related [flat|nested] 23+ messages in thread