From: "Jan Beulich" <JBeulich@novell.com>
To: xen-devel@lists.xensource.com
Cc: xen-ia64-devel@lists.xensource.com
Subject: [PATCH] ia64: build fixes
Date: Tue, 04 May 2010 17:04:45 +0100 [thread overview]
Message-ID: <4BE061BD020000780000136D@vpn.id2.novell.com> (raw)
[-- Attachment #1: Type: text/plain, Size: 3813 bytes --]
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- 2010-05-04.orig/xen/arch/ia64/xen/dom0_ops.c 2010-01-21 15:36:53.000000000 +0100
+++ 2010-05-04/xen/arch/ia64/xen/dom0_ops.c 2010-05-04 14:03:40.000000000 +0200
@@ -735,20 +735,13 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
{
case XEN_SYSCTL_physinfo:
{
- int i;
- uint32_t max_array_ent;
- XEN_GUEST_HANDLE_64(uint32) cpu_to_node_arr;
-
xen_sysctl_physinfo_t *pi = &op->u.physinfo;
- max_array_ent = pi->max_cpu_id;
- cpu_to_node_arr = pi->cpu_to_node;
-
memset(pi, 0, sizeof(*pi));
- pi->cpu_to_node = cpu_to_node_arr;
pi->threads_per_core = cpus_weight(per_cpu(cpu_sibling_map, 0));
pi->cores_per_socket =
cpus_weight(per_cpu(cpu_core_map, 0)) / pi->threads_per_core;
+ pi->nr_nodes = (u32)num_online_nodes();
pi->nr_cpus = (u32)num_online_cpus();
pi->total_pages = total_pages;
pi->free_pages = avail_domheap_pages();
@@ -757,21 +750,55 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
pi->max_node_id = last_node(node_online_map);
pi->max_cpu_id = last_cpu(cpu_online_map);
- max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
- ret = 0;
+ if ( copy_field_to_guest(u_sysctl, op, u.physinfo) )
+ ret = -EFAULT;
+ }
+ break;
+
+ case XEN_SYSCTL_topologyinfo:
+ {
+ xen_sysctl_topologyinfo_t *ti = &op->u.topologyinfo;
+ XEN_GUEST_HANDLE_64(uint32) arr;
+ uint32_t i, val, max_array_ent = ti->max_cpu_index;
+
+ ti->max_cpu_index = last_cpu(cpu_online_map);
+ max_array_ent = min(max_array_ent, ti->max_cpu_index);
+
+ arr = ti->cpu_to_core;
+ if ( !guest_handle_is_null(arr) )
+ {
+ for ( i = 0; ret == 0 && i <= max_array_ent; i++ )
+ {
+ val = cpu_online(i) ? cpu_to_core(i) : ~0u;
+ if ( copy_to_guest_offset(arr, i, &val, 1) )
+ ret = -EFAULT;
+ }
+ }
+
+ arr = ti->cpu_to_socket;
+ if ( !guest_handle_is_null(arr) )
+ {
+ for ( i = 0; ret == 0 && i <= max_array_ent; i++ )
+ {
+ val = cpu_online(i) ? cpu_to_socket(i) : ~0u;
+ if ( copy_to_guest_offset(arr, i, &val, 1) )
+ ret = -EFAULT;
+ }
+ }
- if (!guest_handle_is_null(cpu_to_node_arr)) {
- for (i = 0; i <= max_array_ent; i++) {
- uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
- if (copy_to_guest_offset(cpu_to_node_arr, i, &node, 1)) {
+ arr = ti->cpu_to_node;
+ if ( !guest_handle_is_null(arr) )
+ {
+ for ( i = 0; ret == 0 && i <= max_array_ent; i++ )
+ {
+ val = cpu_online(i) ? cpu_to_node(i) : ~0u;
+ if ( copy_to_guest_offset(arr, i, &val, 1) )
ret = -EFAULT;
- break;
- }
}
}
- if ( copy_to_guest(u_sysctl, op, 1) )
+ if ( copy_field_to_guest(u_sysctl, op, u.topologyinfo.max_cpu_index) )
ret = -EFAULT;
}
break;
--- 2010-05-04.orig/xen/common/domain.c 2010-04-22 14:43:25.000000000 +0200
+++ 2010-05-04/xen/common/domain.c 2010-05-04 13:34:25.000000000 +0200
@@ -625,8 +625,10 @@ static void complete_domain_destroy(stru
xfree(d->pirq_mask);
xfree(d->pirq_to_evtchn);
+#ifdef dom_vmce
xfree(dom_vmce(d)->mci_ctl);
xfree(dom_vmce(d));
+#endif
xsm_free_security_domain(d);
free_domain_struct(d);
[-- Attachment #2: ia64-build-fixes.patch --]
[-- Type: text/plain, Size: 3807 bytes --]
Signed-off-by: Jan Beulich <jbeulich@novell.com>
--- 2010-05-04.orig/xen/arch/ia64/xen/dom0_ops.c 2010-01-21 15:36:53.000000000 +0100
+++ 2010-05-04/xen/arch/ia64/xen/dom0_ops.c 2010-05-04 14:03:40.000000000 +0200
@@ -735,20 +735,13 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
{
case XEN_SYSCTL_physinfo:
{
- int i;
- uint32_t max_array_ent;
- XEN_GUEST_HANDLE_64(uint32) cpu_to_node_arr;
-
xen_sysctl_physinfo_t *pi = &op->u.physinfo;
- max_array_ent = pi->max_cpu_id;
- cpu_to_node_arr = pi->cpu_to_node;
-
memset(pi, 0, sizeof(*pi));
- pi->cpu_to_node = cpu_to_node_arr;
pi->threads_per_core = cpus_weight(per_cpu(cpu_sibling_map, 0));
pi->cores_per_socket =
cpus_weight(per_cpu(cpu_core_map, 0)) / pi->threads_per_core;
+ pi->nr_nodes = (u32)num_online_nodes();
pi->nr_cpus = (u32)num_online_cpus();
pi->total_pages = total_pages;
pi->free_pages = avail_domheap_pages();
@@ -757,21 +750,55 @@ long arch_do_sysctl(xen_sysctl_t *op, XE
pi->max_node_id = last_node(node_online_map);
pi->max_cpu_id = last_cpu(cpu_online_map);
- max_array_ent = min_t(uint32_t, max_array_ent, pi->max_cpu_id);
- ret = 0;
+ if ( copy_field_to_guest(u_sysctl, op, u.physinfo) )
+ ret = -EFAULT;
+ }
+ break;
+
+ case XEN_SYSCTL_topologyinfo:
+ {
+ xen_sysctl_topologyinfo_t *ti = &op->u.topologyinfo;
+ XEN_GUEST_HANDLE_64(uint32) arr;
+ uint32_t i, val, max_array_ent = ti->max_cpu_index;
+
+ ti->max_cpu_index = last_cpu(cpu_online_map);
+ max_array_ent = min(max_array_ent, ti->max_cpu_index);
+
+ arr = ti->cpu_to_core;
+ if ( !guest_handle_is_null(arr) )
+ {
+ for ( i = 0; ret == 0 && i <= max_array_ent; i++ )
+ {
+ val = cpu_online(i) ? cpu_to_core(i) : ~0u;
+ if ( copy_to_guest_offset(arr, i, &val, 1) )
+ ret = -EFAULT;
+ }
+ }
+
+ arr = ti->cpu_to_socket;
+ if ( !guest_handle_is_null(arr) )
+ {
+ for ( i = 0; ret == 0 && i <= max_array_ent; i++ )
+ {
+ val = cpu_online(i) ? cpu_to_socket(i) : ~0u;
+ if ( copy_to_guest_offset(arr, i, &val, 1) )
+ ret = -EFAULT;
+ }
+ }
- if (!guest_handle_is_null(cpu_to_node_arr)) {
- for (i = 0; i <= max_array_ent; i++) {
- uint32_t node = cpu_online(i) ? cpu_to_node(i) : ~0u;
- if (copy_to_guest_offset(cpu_to_node_arr, i, &node, 1)) {
+ arr = ti->cpu_to_node;
+ if ( !guest_handle_is_null(arr) )
+ {
+ for ( i = 0; ret == 0 && i <= max_array_ent; i++ )
+ {
+ val = cpu_online(i) ? cpu_to_node(i) : ~0u;
+ if ( copy_to_guest_offset(arr, i, &val, 1) )
ret = -EFAULT;
- break;
- }
}
}
- if ( copy_to_guest(u_sysctl, op, 1) )
+ if ( copy_field_to_guest(u_sysctl, op, u.topologyinfo.max_cpu_index) )
ret = -EFAULT;
}
break;
--- 2010-05-04.orig/xen/common/domain.c 2010-04-22 14:43:25.000000000 +0200
+++ 2010-05-04/xen/common/domain.c 2010-05-04 13:34:25.000000000 +0200
@@ -625,8 +625,10 @@ static void complete_domain_destroy(stru
xfree(d->pirq_mask);
xfree(d->pirq_to_evtchn);
+#ifdef dom_vmce
xfree(dom_vmce(d)->mci_ctl);
xfree(dom_vmce(d));
+#endif
xsm_free_security_domain(d);
free_domain_struct(d);
[-- Attachment #3: Type: text/plain, Size: 138 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xensource.com
http://lists.xensource.com/xen-devel
next reply other threads:[~2010-05-04 16:04 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-05-04 16:04 Jan Beulich [this message]
-- strict thread matches above, loose matches on Subject: below --
2006-03-31 23:44 [PATCH] ia64 build fixes Alex Williamson
2006-04-01 10:10 ` Keir Fraser
2006-04-01 21:46 ` Alex Williamson
2006-04-02 8:20 ` Keir Fraser
2006-03-14 4:56 Alex Williamson
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=4BE061BD020000780000136D@vpn.id2.novell.com \
--to=jbeulich@novell.com \
--cc=xen-devel@lists.xensource.com \
--cc=xen-ia64-devel@lists.xensource.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).