From: Julien Grall <julien.grall@citrix.com>
To: qemu-devel@nongnu.org
Cc: Julien Grall <julien.grall@citrix.com>,
christian.limpach@gmail.com, Stefano.Stabellini@eu.citrix.com,
xen-devel@lists.xen.org
Subject: [Qemu-devel] [XEN][RFC PATCH V2 05/17] hvm: Modify hvm_op
Date: Wed, 22 Aug 2012 13:31:51 +0100 [thread overview]
Message-ID: <c378b04ee29071c1d6d68bd3ef48fedadb493b10.1345552068.git.julien.grall@citrix.com> (raw)
In-Reply-To: <cover.1345552068.git.julien.grall@citrix.com>
This patch removes useless hvm_param due to structure modification
and bind new hypercalls to handle ioreq servers and PCI.
Signed-off-by: Julien Grall <julien.grall@citrix.com>
---
xen/arch/x86/hvm/hvm.c | 150 +++++++++++++++++++++------------------
xen/include/public/hvm/params.h | 5 --
2 files changed, 81 insertions(+), 74 deletions(-)
diff --git a/xen/arch/x86/hvm/hvm.c b/xen/arch/x86/hvm/hvm.c
index 292d57b..a2cd9b3 100644
--- a/xen/arch/x86/hvm/hvm.c
+++ b/xen/arch/x86/hvm/hvm.c
@@ -571,8 +571,7 @@ int hvm_domain_initialise(struct domain *d)
register_portio_handler(d, 0xe9, 1, hvm_print_line);
- if ( hvm_init_pci_emul(d) )
- goto fail2;
+ hvm_init_pci_emul(d);
rc = hvm_funcs.domain_initialise(d);
if ( rc != 0 )
@@ -650,6 +649,7 @@ void hvm_domain_relinquish_resources(struct domain *d)
{
hvm_destroy_ioreq_servers(d);
hvm_destroy_pci_emul(d);
+ hvm_destroy_ioreq_page(d, &d->arch.hvm_domain.ioreq);
msixtbl_pt_cleanup(d);
@@ -3742,21 +3742,6 @@ static int hvmop_flush_tlb_all(void)
return 0;
}
-static int hvm_replace_event_channel(struct vcpu *v, domid_t remote_domid,
- int *p_port)
-{
- int old_port, new_port;
-
- new_port = alloc_unbound_xen_event_channel(v, remote_domid, NULL);
- if ( new_port < 0 )
- return new_port;
-
- /* xchg() ensures that only we call free_xen_event_channel(). */
- old_port = xchg(p_port, new_port);
- free_xen_event_channel(v, old_port);
- return 0;
-}
-
static int hvm_alloc_ioreq_server_page(struct domain *d,
struct hvm_ioreq_server *s,
struct hvm_ioreq_page *pfn,
@@ -4041,7 +4026,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
case HVMOP_get_param:
{
struct xen_hvm_param a;
- struct hvm_ioreq_page *iorp;
struct domain *d;
struct vcpu *v;
@@ -4069,20 +4053,12 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
switch ( a.index )
{
- case HVM_PARAM_IOREQ_PFN:
- iorp = &d->arch.hvm_domain.ioreq;
- if ( (rc = hvm_set_ioreq_page(d, iorp, a.value)) != 0 )
- break;
- spin_lock(&iorp->lock);
- if ( iorp->va != NULL )
- /* Initialise evtchn port info if VCPUs already created. */
- for_each_vcpu ( d, v )
- get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
- spin_unlock(&iorp->lock);
+ case HVM_PARAM_IO_PFN_FIRST:
+ rc = hvm_set_ioreq_page(d, &d->arch.hvm_domain.ioreq, a.value);
break;
- case HVM_PARAM_BUFIOREQ_PFN:
- iorp = &d->arch.hvm_domain.buf_ioreq;
- rc = hvm_set_ioreq_page(d, iorp, a.value);
+ case HVM_PARAM_IO_PFN_LAST:
+ if ( (d->arch.hvm_domain.params[HVM_PARAM_IO_PFN_LAST]) )
+ rc = -EINVAL;
break;
case HVM_PARAM_CALLBACK_IRQ:
hvm_set_callback_via(d, a.value);
@@ -4128,41 +4104,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
domctl_lock_release();
break;
- case HVM_PARAM_DM_DOMAIN:
- /* Not reflexive, as we must domain_pause(). */
- rc = -EPERM;
- if ( curr_d == d )
- break;
-
- if ( a.value == DOMID_SELF )
- a.value = curr_d->domain_id;
-
- rc = 0;
- domain_pause(d); /* safe to change per-vcpu xen_port */
- if ( d->vcpu[0] )
- rc = hvm_replace_event_channel(d->vcpu[0], a.value,
- (int *)&d->vcpu[0]->domain->arch.hvm_domain.params
- [HVM_PARAM_BUFIOREQ_EVTCHN]);
- if ( rc )
- {
- domain_unpause(d);
- break;
- }
- iorp = &d->arch.hvm_domain.ioreq;
- for_each_vcpu ( d, v )
- {
- rc = hvm_replace_event_channel(v, a.value,
- &v->arch.hvm_vcpu.xen_port);
- if ( rc )
- break;
-
- spin_lock(&iorp->lock);
- if ( iorp->va != NULL )
- get_ioreq(v)->vp_eport = v->arch.hvm_vcpu.xen_port;
- spin_unlock(&iorp->lock);
- }
- domain_unpause(d);
- break;
case HVM_PARAM_ACPI_S_STATE:
/* Not reflexive, as we must domain_pause(). */
rc = -EPERM;
@@ -4213,9 +4154,6 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
if ( rc == 0 )
rc = nestedhvm_vcpu_initialise(v);
break;
- case HVM_PARAM_BUFIOREQ_EVTCHN:
- rc = -EINVAL;
- break;
}
if ( rc == 0 )
@@ -4669,6 +4607,80 @@ long do_hvm_op(unsigned long op, XEN_GUEST_HANDLE(void) arg)
break;
}
+ case HVMOP_register_ioreq_server:
+ {
+ struct xen_hvm_register_ioreq_server a;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ rc = hvmop_register_ioreq_server(&a);
+ if ( rc != 0 )
+ return rc;
+
+ rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+ break;
+ }
+
+ case HVMOP_get_ioreq_server_buf_channel:
+ {
+ struct xen_hvm_get_ioreq_server_buf_channel a;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ rc = hvmop_get_ioreq_server_buf_channel(&a);
+ if ( rc != 0 )
+ return rc;
+
+ rc = copy_to_guest(arg, &a, 1) ? -EFAULT : 0;
+
+ break;
+ }
+
+ case HVMOP_map_io_range_to_ioreq_server:
+ {
+ struct xen_hvm_map_io_range_to_ioreq_server a;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ rc = hvmop_map_io_range_to_ioreq_server(&a);
+ if ( rc != 0 )
+ return rc;
+
+ break;
+ }
+
+ case HVMOP_unmap_io_range_from_ioreq_server:
+ {
+ struct xen_hvm_unmap_io_range_from_ioreq_server a;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ rc = hvmop_unmap_io_range_from_ioreq_server(&a);
+ if ( rc != 0 )
+ return rc;
+
+ break;
+ }
+
+ case HVMOP_register_pcidev:
+ {
+ struct xen_hvm_register_pcidev a;
+
+ if ( copy_from_guest(&a, arg, 1) )
+ return -EFAULT;
+
+ rc = hvm_register_pcidev(a.domid, a.id, a.domain,
+ a.bus, a.device, a.function);
+ if ( rc != 0 )
+ return rc;
+
+ break;
+ }
+
default:
{
gdprintk(XENLOG_DEBUG, "Bad HVM op %ld.\n", op);
diff --git a/xen/include/public/hvm/params.h b/xen/include/public/hvm/params.h
index 309ac1b..017493b 100644
--- a/xen/include/public/hvm/params.h
+++ b/xen/include/public/hvm/params.h
@@ -49,11 +49,6 @@
#define HVM_PARAM_PAE_ENABLED 4
-#define HVM_PARAM_IOREQ_PFN 5
-
-#define HVM_PARAM_BUFIOREQ_PFN 6
-#define HVM_PARAM_BUFIOREQ_EVTCHN 26
-
#ifdef __ia64__
#define HVM_PARAM_NVRAM_FD 7
--
Julien Grall
next prev parent reply other threads:[~2012-08-22 18:55 UTC|newest]
Thread overview: 40+ messages / expand[flat|nested] mbox.gz Atom feed top
2012-08-22 12:31 [Qemu-devel] [XEN][RFC PATCH V2 00/17] QEMU disaggregation in Xen environment Julien Grall
2012-08-22 12:31 ` [Qemu-devel] [XEN][RFC PATCH V2 01/17] hvm: Modify interface to support multiple ioreq server Julien Grall
2012-08-23 13:18 ` [Qemu-devel] [Xen-devel] " Ian Campbell
2012-08-23 13:26 ` Keir Fraser
2012-08-24 10:33 ` Julien Grall
2012-08-22 12:31 ` [Qemu-devel] [XEN][RFC PATCH V2 02/17] hvm: Add functions to handle ioreq servers Julien Grall
2012-08-22 12:31 ` [Qemu-devel] [XEN][RFC PATCH V2 03/17] hvm-pci: Handle PCI config space in Xen Julien Grall
2012-08-23 7:20 ` [Qemu-devel] [Xen-devel] " Jan Beulich
2012-08-22 12:31 ` [Qemu-devel] [XEN][RFC PATCH V2 04/17] hvm: Change initialization/destruction of an hvm Julien Grall
2012-08-22 12:31 ` Julien Grall [this message]
2012-08-23 7:27 ` [Qemu-devel] [Xen-devel] [XEN][RFC PATCH V2 05/17] hvm: Modify hvm_op Jan Beulich
2012-08-22 12:31 ` [Qemu-devel] [XEN][RFC PATCH V2 06/17] hvm-io: IO refactoring with ioreq server Julien Grall
2012-08-22 12:31 ` [Qemu-devel] [XEN][RFC PATCH V2 07/17] hvm-io: send invalidate map cache to each registered servers Julien Grall
2012-08-22 12:31 ` [Qemu-devel] [XEN][RFC PATCH V2 08/17] hvm-io: Handle server in buffered IO Julien Grall
2012-08-22 12:31 ` [Qemu-devel] [XEN][RFC PATCH V2 09/17] xc: Add the hypercall for multiple servers Julien Grall
2012-08-23 13:21 ` [Qemu-devel] [Xen-devel] " Ian Campbell
2012-08-22 12:31 ` [Qemu-devel] [XEN][RFC PATCH V2 10/17] xc: Add argument to allocate more special pages Julien Grall
2012-08-22 12:31 ` [Qemu-devel] [XEN][RFC PATCH V2 11/17] xc: modify save/restore to support multiple device models Julien Grall
2012-08-23 13:27 ` [Qemu-devel] [Xen-devel] " Ian Campbell
2012-08-23 19:13 ` Julien Grall
2012-08-23 19:52 ` Ian Campbell
2012-08-24 10:27 ` Julien Grall
2012-08-24 10:35 ` Ian Campbell
2012-08-22 12:31 ` [Qemu-devel] [XEN][RFC PATCH V2 12/17] xl: Add interface to handle qemu disaggregation Julien Grall
2012-08-23 13:30 ` [Qemu-devel] [Xen-devel] " Ian Campbell
2012-08-24 12:56 ` Julien Grall
2012-08-24 13:03 ` Ian Campbell
2012-08-24 13:23 ` Julien Grall
2012-08-22 12:31 ` [Qemu-devel] [XEN][RFC PATCH V2 13/17] xl: add device model id to qmp functions Julien Grall
2012-08-22 12:32 ` [Qemu-devel] [XEN][RFC PATCH V2 14/17] xl-parsing: Parse new device_models option Julien Grall
2012-08-23 13:35 ` [Qemu-devel] [Xen-devel] " Ian Campbell
2012-08-24 13:12 ` Julien Grall
2012-08-22 12:32 ` [Qemu-devel] [XEN][RFC PATCH V2 15/17] xl: support spawn/destroy on multiple device model Julien Grall
2012-08-23 13:56 ` [Qemu-devel] [Xen-devel] " Ian Campbell
2012-08-24 13:51 ` Julien Grall
2012-08-24 14:09 ` Ian Campbell
2012-08-24 14:37 ` Julien Grall
2012-08-24 14:45 ` Ian Campbell
2012-08-22 12:32 ` [Qemu-devel] [XEN][RFC PATCH V2 16/17] xl: Fix PCI library Julien Grall
2012-08-22 12:32 ` [Qemu-devel] [XEN][RFC PATCH V2 17/17] xl: implement save/restore for multiple device models Julien Grall
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=c378b04ee29071c1d6d68bd3ef48fedadb493b10.1345552068.git.julien.grall@citrix.com \
--to=julien.grall@citrix.com \
--cc=Stefano.Stabellini@eu.citrix.com \
--cc=christian.limpach@gmail.com \
--cc=qemu-devel@nongnu.org \
--cc=xen-devel@lists.xen.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).