* [PATCH 0/2] public/*ctl: drop unnecessary typedefs and handles
@ 2017-09-12 14:25 Jan Beulich
2017-09-12 15:08 ` [PATCH 1/2] public/domctl: " Jan Beulich
` (5 more replies)
0 siblings, 6 replies; 20+ messages in thread
From: Jan Beulich @ 2017-09-12 14:25 UTC (permalink / raw)
To: xen-devel
Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
Ian Jackson, Tim Deegan, Julien Grall
1: public/domctl: drop unnecessary typedefs and handles
2: public/sysctl: drop unnecessary typedefs and handles
Signed-off-by: Jan Beulich <jbeulich@suse.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* [PATCH 1/2] public/domctl: drop unnecessary typedefs and handles
2017-09-12 14:25 [PATCH 0/2] public/*ctl: drop unnecessary typedefs and handles Jan Beulich
@ 2017-09-12 15:08 ` Jan Beulich
2017-09-12 15:33 ` Razvan Cojocaru
` (5 more replies)
2017-09-12 15:10 ` [PATCH 2/2] public/sysctl: " Jan Beulich
` (4 subsequent siblings)
5 siblings, 6 replies; 20+ messages in thread
From: Jan Beulich @ 2017-09-12 15:08 UTC (permalink / raw)
To: xen-devel
Cc: Stefano Stabellini, Wei Liu, Razvan Cojocaru, George Dunlap,
Andrew Cooper, Dario Faggioli, Ian Jackson, Tim Deegan,
Julien Grall, tamas, Meng Xu
By virtue of the struct xen_domctl container structure, most of them
are really just cluttering the name space.
While doing so,
- convert an enum typed (pt_irq_type_t) structure field to a fixed
width type,
- make x86's paging_domctl() and descendants take a properly typed
handle,
- add const in a few places.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -903,7 +903,7 @@ int xc_vcpu_get_extstate(xc_interface *x
uint32_t vcpu,
xc_vcpu_extstate_t *extstate);
-typedef xen_domctl_getvcpuinfo_t xc_vcpuinfo_t;
+typedef struct xen_domctl_getvcpuinfo xc_vcpuinfo_t;
int xc_vcpu_getinfo(xc_interface *xch,
uint32_t domid,
uint32_t vcpu,
@@ -916,7 +916,7 @@ long long xc_domain_get_cpu_usage(xc_int
int xc_domain_sethandle(xc_interface *xch, uint32_t domid,
xen_domain_handle_t handle);
-typedef xen_domctl_shadow_op_stats_t xc_shadow_op_stats_t;
+typedef struct xen_domctl_shadow_op_stats xc_shadow_op_stats_t;
int xc_shadow_control(xc_interface *xch,
uint32_t domid,
unsigned int sop,
--- a/tools/libxc/xc_domain.c
+++ b/tools/libxc/xc_domain.c
@@ -1714,8 +1714,7 @@ int xc_domain_update_msi_irq(
uint64_t gtable)
{
int rc;
- xen_domctl_bind_pt_irq_t *bind;
-
+ struct xen_domctl_bind_pt_irq *bind;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_bind_pt_irq;
@@ -1740,8 +1739,7 @@ int xc_domain_unbind_msi_irq(
uint32_t gflags)
{
int rc;
- xen_domctl_bind_pt_irq_t *bind;
-
+ struct xen_domctl_bind_pt_irq *bind;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_unbind_pt_irq;
@@ -1770,7 +1768,7 @@ static int xc_domain_bind_pt_irq_int(
uint16_t spi)
{
int rc;
- xen_domctl_bind_pt_irq_t * bind;
+ struct xen_domctl_bind_pt_irq *bind;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_bind_pt_irq;
@@ -1828,7 +1826,7 @@ static int xc_domain_unbind_pt_irq_int(
uint8_t spi)
{
int rc;
- xen_domctl_bind_pt_irq_t * bind;
+ struct xen_domctl_bind_pt_irq *bind;
DECLARE_DOMCTL;
domctl.cmd = XEN_DOMCTL_unbind_pt_irq;
--- a/xen/arch/arm/domctl.c
+++ b/xen/arch/arm/domctl.c
@@ -41,7 +41,7 @@ long arch_do_domctl(struct xen_domctl *d
case XEN_DOMCTL_bind_pt_irq:
{
int rc;
- xen_domctl_bind_pt_irq_t *bind = &domctl->u.bind_pt_irq;
+ struct xen_domctl_bind_pt_irq *bind = &domctl->u.bind_pt_irq;
uint32_t irq = bind->u.spi.spi;
uint32_t virq = bind->machine_irq;
@@ -87,7 +87,7 @@ long arch_do_domctl(struct xen_domctl *d
case XEN_DOMCTL_unbind_pt_irq:
{
int rc;
- xen_domctl_bind_pt_irq_t *bind = &domctl->u.bind_pt_irq;
+ struct xen_domctl_bind_pt_irq *bind = &domctl->u.bind_pt_irq;
uint32_t irq = bind->u.spi.spi;
uint32_t virq = bind->machine_irq;
--- a/xen/arch/x86/domctl.c
+++ b/xen/arch/x86/domctl.c
@@ -48,7 +48,7 @@ static int gdbsx_guest_mem_io(domid_t do
}
static int update_domain_cpuid_info(struct domain *d,
- const xen_domctl_cpuid_t *ctl)
+ const struct xen_domctl_cpuid *ctl)
{
struct cpuid_policy *p = d->arch.cpuid;
const struct cpuid_leaf leaf = { ctl->eax, ctl->ebx, ctl->ecx, ctl->edx };
@@ -363,8 +363,7 @@ long arch_do_domctl(
{
case XEN_DOMCTL_shadow_op:
- ret = paging_domctl(d, &domctl->u.shadow_op,
- guest_handle_cast(u_domctl, void), 0);
+ ret = paging_domctl(d, &domctl->u.shadow_op, u_domctl, 0);
if ( ret == -ERESTART )
return hypercall_create_continuation(__HYPERVISOR_arch_1,
"h", u_domctl);
@@ -707,7 +706,7 @@ long arch_do_domctl(
case XEN_DOMCTL_bind_pt_irq:
{
- xen_domctl_bind_pt_irq_t *bind = &domctl->u.bind_pt_irq;
+ struct xen_domctl_bind_pt_irq *bind = &domctl->u.bind_pt_irq;
int irq;
ret = -EINVAL;
@@ -738,7 +737,7 @@ long arch_do_domctl(
case XEN_DOMCTL_unbind_pt_irq:
{
- xen_domctl_bind_pt_irq_t *bind = &domctl->u.bind_pt_irq;
+ struct xen_domctl_bind_pt_irq *bind = &domctl->u.bind_pt_irq;
int irq = domain_pirq_to_irq(d, bind->machine_irq);
ret = -EPERM;
--- a/xen/arch/x86/hvm/vioapic.c
+++ b/xen/arch/x86/hvm/vioapic.c
@@ -162,7 +162,7 @@ static int vioapic_hwdom_map_gsi(unsigne
unsigned int pol)
{
struct domain *currd = current->domain;
- xen_domctl_bind_pt_irq_t pt_irq_bind = {
+ struct xen_domctl_bind_pt_irq pt_irq_bind = {
.irq_type = PT_IRQ_TYPE_PCI,
.machine_irq = gsi,
};
--- a/xen/arch/x86/mm/hap/hap.c
+++ b/xen/arch/x86/mm/hap/hap.c
@@ -608,8 +608,8 @@ out:
paging_unlock(d);
}
-int hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
- XEN_GUEST_HANDLE_PARAM(void) u_domctl)
+int hap_domctl(struct domain *d, struct xen_domctl_shadow_op *sc,
+ XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
{
int rc;
bool preempted = false;
--- a/xen/arch/x86/mm/mem_sharing.c
+++ b/xen/arch/x86/mm/mem_sharing.c
@@ -1606,7 +1606,7 @@ out:
return rc;
}
-int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec)
+int mem_sharing_domctl(struct domain *d, struct xen_domctl_mem_sharing_op *mec)
{
int rc;
--- a/xen/arch/x86/mm/paging.c
+++ b/xen/arch/x86/mm/paging.c
@@ -674,8 +674,9 @@ void paging_vcpu_init(struct vcpu *v)
}
-int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
- XEN_GUEST_HANDLE_PARAM(void) u_domctl, bool_t resuming)
+int paging_domctl(struct domain *d, struct xen_domctl_shadow_op *sc,
+ XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl,
+ bool_t resuming)
{
int rc;
@@ -775,8 +776,7 @@ long paging_domctl_continuation(XEN_GUES
{
if ( domctl_lock_acquire() )
{
- ret = paging_domctl(d, &op.u.shadow_op,
- guest_handle_cast(u_domctl, void), 1);
+ ret = paging_domctl(d, &op.u.shadow_op, u_domctl, 1);
domctl_lock_release();
}
--- a/xen/arch/x86/mm/shadow/common.c
+++ b/xen/arch/x86/mm/shadow/common.c
@@ -3809,8 +3809,8 @@ out:
/* Shadow-control XEN_DOMCTL dispatcher */
int shadow_domctl(struct domain *d,
- xen_domctl_shadow_op_t *sc,
- XEN_GUEST_HANDLE_PARAM(void) u_domctl)
+ struct xen_domctl_shadow_op *sc,
+ XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
{
int rc;
bool preempted = false;
--- a/xen/common/domctl.c
+++ b/xen/common/domctl.c
@@ -243,7 +243,7 @@ void domctl_lock_release(void)
}
static inline
-int vcpuaffinity_params_invalid(const xen_domctl_vcpuaffinity_t *vcpuaff)
+int vcpuaffinity_params_invalid(const struct xen_domctl_vcpuaffinity *vcpuaff)
{
return vcpuaff->flags == 0 ||
((vcpuaff->flags & XEN_VCPUAFFINITY_HARD) &&
@@ -690,7 +690,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
case XEN_DOMCTL_getvcpuaffinity:
{
struct vcpu *v;
- xen_domctl_vcpuaffinity_t *vcpuaff = &op->u.vcpuaffinity;
+ struct xen_domctl_vcpuaffinity *vcpuaff = &op->u.vcpuaffinity;
ret = -EINVAL;
if ( vcpuaff->vcpu >= d->max_vcpus )
--- a/xen/common/sched_rt.c
+++ b/xen/common/sched_rt.c
@@ -1345,7 +1345,7 @@ rt_dom_cntl(
struct vcpu *v;
unsigned long flags;
int rc = 0;
- xen_domctl_schedparam_vcpu_t local_sched;
+ struct xen_domctl_schedparam_vcpu local_sched;
s_time_t period, budget;
uint32_t index = 0;
--- a/xen/common/vm_event.c
+++ b/xen/common/vm_event.c
@@ -41,7 +41,7 @@
static int vm_event_enable(
struct domain *d,
- xen_domctl_vm_event_op_t *vec,
+ struct xen_domctl_vm_event_op *vec,
struct vm_event_domain **ved,
int pause_flag,
int param,
@@ -587,7 +587,7 @@ void vm_event_cleanup(struct domain *d)
#endif
}
-int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
+int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
XEN_GUEST_HANDLE_PARAM(void) u_domctl)
{
int rc;
--- a/xen/drivers/passthrough/io.c
+++ b/xen/drivers/passthrough/io.c
@@ -276,7 +276,7 @@ static struct vcpu *vector_hashing_dest(
}
int pt_irq_create_bind(
- struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
+ struct domain *d, const struct xen_domctl_bind_pt_irq *pt_irq_bind)
{
struct hvm_irq_dpci *hvm_irq_dpci;
struct hvm_pirq_dpci *pirq_dpci;
@@ -620,7 +620,7 @@ int pt_irq_create_bind(
}
int pt_irq_destroy_bind(
- struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
+ struct domain *d, const struct xen_domctl_bind_pt_irq *pt_irq_bind)
{
struct hvm_irq_dpci *hvm_irq_dpci;
struct hvm_pirq_dpci *pirq_dpci;
--- a/xen/include/asm-x86/hap.h
+++ b/xen/include/asm-x86/hap.h
@@ -34,8 +34,8 @@
/* hap domain level functions */
/************************************************/
void hap_domain_init(struct domain *d);
-int hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
- XEN_GUEST_HANDLE_PARAM(void) u_domctl);
+int hap_domctl(struct domain *d, struct xen_domctl_shadow_op *sc,
+ XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl);
int hap_enable(struct domain *d, u32 mode);
void hap_final_teardown(struct domain *d);
void hap_teardown(struct domain *d, bool *preempted);
--- a/xen/include/asm-x86/mem_sharing.h
+++ b/xen/include/asm-x86/mem_sharing.h
@@ -87,7 +87,7 @@ int mem_sharing_notify_enomem(struct dom
bool_t allow_sleep);
int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg);
int mem_sharing_domctl(struct domain *d,
- xen_domctl_mem_sharing_op_t *mec);
+ struct xen_domctl_mem_sharing_op *mec);
void mem_sharing_init(void);
/* Scans the p2m and relinquishes any shared pages, destroying
--- a/xen/include/asm-x86/paging.h
+++ b/xen/include/asm-x86/paging.h
@@ -202,8 +202,9 @@ int paging_domain_init(struct domain *d,
/* Handler for paging-control ops: operations from user-space to enable
* and disable ephemeral shadow modes (test mode and log-dirty mode) and
* manipulate the log-dirty bitmap. */
-int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
- XEN_GUEST_HANDLE_PARAM(void) u_domctl, bool_t resuming);
+int paging_domctl(struct domain *d, struct xen_domctl_shadow_op *sc,
+ XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl,
+ bool_t resuming);
/* Helper hypercall for dealing with continuations. */
long paging_domctl_continuation(XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
--- a/xen/include/asm-x86/shadow.h
+++ b/xen/include/asm-x86/shadow.h
@@ -69,8 +69,8 @@ int shadow_track_dirty_vram(struct domai
* and disable ephemeral shadow modes (test mode and log-dirty mode) and
* manipulate the log-dirty bitmap. */
int shadow_domctl(struct domain *d,
- xen_domctl_shadow_op_t *sc,
- XEN_GUEST_HANDLE_PARAM(void) u_domctl);
+ struct xen_domctl_shadow_op *sc,
+ XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl);
/* Call when destroying a domain */
void shadow_teardown(struct domain *d, bool *preempted);
@@ -106,8 +106,9 @@ static inline void sh_remove_shadows(str
static inline void shadow_blow_tables_per_domain(struct domain *d) {}
-static inline int shadow_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
- XEN_GUEST_HANDLE_PARAM(void) u_domctl)
+static inline int shadow_domctl(struct domain *d,
+ struct xen_domctl_shadow_op *sc,
+ XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
{
return -EINVAL;
}
--- a/xen/include/public/domctl.h
+++ b/xen/include/public/domctl.h
@@ -66,8 +66,6 @@ struct xen_domctl_createdomain {
uint32_t flags;
struct xen_arch_domainconfig config;
};
-typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
/* XEN_DOMCTL_getdomaininfo */
struct xen_domctl_getdomaininfo {
@@ -133,8 +131,6 @@ struct xen_domctl_getmemlist {
/* OUT variables. */
uint64_aligned_t num_pfns;
};
-typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t);
/* XEN_DOMCTL_getpageframeinfo */
@@ -225,8 +221,6 @@ struct xen_domctl_shadow_op_stats {
uint32_t fault_count;
uint32_t dirty_count;
};
-typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t);
struct xen_domctl_shadow_op {
/* IN variables. */
@@ -244,8 +238,6 @@ struct xen_domctl_shadow_op {
uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */
struct xen_domctl_shadow_op_stats stats;
};
-typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t);
/* XEN_DOMCTL_max_mem */
@@ -253,8 +245,6 @@ struct xen_domctl_max_mem {
/* IN variables. */
uint64_aligned_t max_memkb;
};
-typedef struct xen_domctl_max_mem xen_domctl_max_mem_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t);
/* XEN_DOMCTL_setvcpucontext */
@@ -263,8 +253,6 @@ struct xen_domctl_vcpucontext {
uint32_t vcpu; /* IN */
XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */
};
-typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t);
/* XEN_DOMCTL_getvcpuinfo */
@@ -278,8 +266,6 @@ struct xen_domctl_getvcpuinfo {
uint64_aligned_t cpu_time; /* total cpu time consumed (ns) */
uint32_t cpu; /* current mapping */
};
-typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t);
/* Get/set the NUMA node(s) with which the guest has affinity with. */
@@ -288,8 +274,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvc
struct xen_domctl_nodeaffinity {
struct xenctl_bitmap nodemap;/* IN */
};
-typedef struct xen_domctl_nodeaffinity xen_domctl_nodeaffinity_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_nodeaffinity_t);
/* Get/set which physical cpus a vcpu can execute on. */
@@ -327,16 +311,12 @@ struct xen_domctl_vcpuaffinity {
struct xenctl_bitmap cpumap_hard;
struct xenctl_bitmap cpumap_soft;
};
-typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
/* XEN_DOMCTL_max_vcpus */
struct xen_domctl_max_vcpus {
uint32_t max; /* maximum number of vcpus */
};
-typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
/* XEN_DOMCTL_scheduler_op */
@@ -348,25 +328,25 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_v
#define XEN_SCHEDULER_RTDS 8
#define XEN_SCHEDULER_NULL 9
-typedef struct xen_domctl_sched_credit {
+struct xen_domctl_sched_credit {
uint16_t weight;
uint16_t cap;
-} xen_domctl_sched_credit_t;
+};
-typedef struct xen_domctl_sched_credit2 {
+struct xen_domctl_sched_credit2 {
uint16_t weight;
-} xen_domctl_sched_credit2_t;
+};
-typedef struct xen_domctl_sched_rtds {
+struct xen_domctl_sched_rtds {
uint32_t period;
uint32_t budget;
-} xen_domctl_sched_rtds_t;
+};
typedef struct xen_domctl_schedparam_vcpu {
union {
- xen_domctl_sched_credit_t credit;
- xen_domctl_sched_credit2_t credit2;
- xen_domctl_sched_rtds_t rtds;
+ struct xen_domctl_sched_credit credit;
+ struct xen_domctl_sched_credit2 credit2;
+ struct xen_domctl_sched_rtds rtds;
} u;
uint32_t vcpuid;
} xen_domctl_schedparam_vcpu_t;
@@ -393,9 +373,9 @@ struct xen_domctl_scheduler_op {
uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */
/* IN/OUT */
union {
- xen_domctl_sched_credit_t credit;
- xen_domctl_sched_credit2_t credit2;
- xen_domctl_sched_rtds_t rtds;
+ struct xen_domctl_sched_credit credit;
+ struct xen_domctl_sched_credit2 credit2;
+ struct xen_domctl_sched_rtds rtds;
struct {
XEN_GUEST_HANDLE_64(xen_domctl_schedparam_vcpu_t) vcpus;
/*
@@ -407,24 +387,18 @@ struct xen_domctl_scheduler_op {
} v;
} u;
};
-typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t);
/* XEN_DOMCTL_setdomainhandle */
struct xen_domctl_setdomainhandle {
xen_domain_handle_t handle;
};
-typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t);
/* XEN_DOMCTL_setdebugging */
struct xen_domctl_setdebugging {
uint8_t enable;
};
-typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t);
/* XEN_DOMCTL_irq_permission */
@@ -432,8 +406,6 @@ struct xen_domctl_irq_permission {
uint8_t pirq;
uint8_t allow_access; /* flag to specify enable/disable of IRQ access */
};
-typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t);
/* XEN_DOMCTL_iomem_permission */
@@ -442,8 +414,6 @@ struct xen_domctl_iomem_permission {
uint64_aligned_t nr_mfns; /* number of pages in range (>0) */
uint8_t allow_access; /* allow (!0) or deny (0) access to range? */
};
-typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t);
/* XEN_DOMCTL_ioport_permission */
@@ -452,42 +422,34 @@ struct xen_domctl_ioport_permission {
uint32_t nr_ports; /* size of port range */
uint8_t allow_access; /* allow or deny access to range? */
};
-typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t);
/* XEN_DOMCTL_hypercall_init */
struct xen_domctl_hypercall_init {
uint64_aligned_t gmfn; /* GMFN to be initialised */
};
-typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t);
/* XEN_DOMCTL_settimeoffset */
struct xen_domctl_settimeoffset {
int64_aligned_t time_offset_seconds; /* applied to domain wallclock time */
};
-typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
/* XEN_DOMCTL_gethvmcontext */
/* XEN_DOMCTL_sethvmcontext */
-typedef struct xen_domctl_hvmcontext {
+struct xen_domctl_hvmcontext {
uint32_t size; /* IN/OUT: size of buffer / bytes filled */
XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call
* gethvmcontext with NULL
* buffer to get size req'd */
-} xen_domctl_hvmcontext_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
+};
/* XEN_DOMCTL_set_address_size */
/* XEN_DOMCTL_get_address_size */
-typedef struct xen_domctl_address_size {
+struct xen_domctl_address_size {
uint32_t size;
-} xen_domctl_address_size_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t);
+};
/* XEN_DOMCTL_sendtrigger */
@@ -500,8 +462,6 @@ struct xen_domctl_sendtrigger {
uint32_t trigger; /* IN */
uint32_t vcpu; /* IN */
};
-typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t);
/* Assign a device to a guest. Sets up IOMMU structures. */
@@ -536,8 +496,6 @@ struct xen_domctl_assign_device {
} dt;
} u;
};
-typedef struct xen_domctl_assign_device xen_domctl_assign_device_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t);
/* Retrieve sibling devices infomation of machine_sbdf */
/* XEN_DOMCTL_get_device_group */
@@ -547,22 +505,20 @@ struct xen_domctl_get_device_group {
uint32_t num_sdevs; /* OUT */
XEN_GUEST_HANDLE_64(uint32) sdev_array; /* OUT */
};
-typedef struct xen_domctl_get_device_group xen_domctl_get_device_group_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_get_device_group_t);
/* Pass-through interrupts: bind real irq -> hvm devfn. */
/* XEN_DOMCTL_bind_pt_irq */
/* XEN_DOMCTL_unbind_pt_irq */
-typedef enum pt_irq_type_e {
+enum pt_irq_type {
PT_IRQ_TYPE_PCI,
PT_IRQ_TYPE_ISA,
PT_IRQ_TYPE_MSI,
PT_IRQ_TYPE_MSI_TRANSLATE,
PT_IRQ_TYPE_SPI, /* ARM: valid range 32-1019 */
-} pt_irq_type_t;
+};
struct xen_domctl_bind_pt_irq {
uint32_t machine_irq;
- pt_irq_type_t irq_type;
+ uint32_t irq_type; /* enum pt_irq_type */
union {
struct {
@@ -590,8 +546,6 @@ struct xen_domctl_bind_pt_irq {
} spi;
} u;
};
-typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t);
/* Bind machine I/O address range -> HVM address range. */
@@ -613,8 +567,6 @@ struct xen_domctl_memory_mapping {
uint32_t add_mapping; /* add or remove mapping */
uint32_t padding; /* padding for 64-bit aligned structure */
};
-typedef struct xen_domctl_memory_mapping xen_domctl_memory_mapping_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t);
/* Bind machine I/O port range -> HVM I/O port range. */
@@ -625,8 +577,6 @@ struct xen_domctl_ioport_mapping {
uint32_t nr_ports; /* size of port range */
uint32_t add_mapping; /* add or remove mapping */
};
-typedef struct xen_domctl_ioport_mapping xen_domctl_ioport_mapping_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t);
/*
@@ -645,8 +595,6 @@ struct xen_domctl_pin_mem_cacheattr {
uint64_aligned_t start, end;
uint32_t type; /* XEN_DOMCTL_MEM_CACHEATTR_* */
};
-typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t);
/* XEN_DOMCTL_set_ext_vcpucontext */
@@ -678,8 +626,6 @@ struct xen_domctl_ext_vcpucontext {
#endif
#endif
};
-typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t);
/*
* Set the target domain for a domain
@@ -688,8 +634,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_v
struct xen_domctl_set_target {
domid_t target;
};
-typedef struct xen_domctl_set_target xen_domctl_set_target_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_target_t);
#if defined(__i386__) || defined(__x86_64__)
# define XEN_CPUID_INPUT_UNUSED 0xFFFFFFFF
@@ -701,8 +645,6 @@ struct xen_domctl_cpuid {
uint32_t ecx;
uint32_t edx;
};
-typedef struct xen_domctl_cpuid xen_domctl_cpuid_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t);
#endif
/*
@@ -725,8 +667,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid
struct xen_domctl_subscribe {
uint32_t port; /* IN */
};
-typedef struct xen_domctl_subscribe xen_domctl_subscribe_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t);
/*
* Define the maximum machine address size which should be allocated
@@ -747,37 +687,34 @@ struct xen_domctl_debug_op {
uint32_t op; /* IN */
uint32_t vcpu; /* IN */
};
-typedef struct xen_domctl_debug_op xen_domctl_debug_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_debug_op_t);
/*
* Request a particular record from the HVM context
*/
/* XEN_DOMCTL_gethvmcontext_partial */
-typedef struct xen_domctl_hvmcontext_partial {
+struct xen_domctl_hvmcontext_partial {
uint32_t type; /* IN: Type of record required */
uint32_t instance; /* IN: Instance of that type */
uint64_aligned_t bufsz; /* IN: size of buffer */
XEN_GUEST_HANDLE_64(uint8) buffer; /* OUT: buffer to write record into */
-} xen_domctl_hvmcontext_partial_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_partial_t);
+};
/* XEN_DOMCTL_disable_migrate */
-typedef struct xen_domctl_disable_migrate {
+struct xen_domctl_disable_migrate {
uint32_t disable; /* IN: 1: disable migration and restore */
-} xen_domctl_disable_migrate_t;
+};
/* XEN_DOMCTL_gettscinfo */
/* XEN_DOMCTL_settscinfo */
-typedef struct xen_domctl_tsc_info {
+struct xen_domctl_tsc_info {
/* IN/OUT */
uint32_t tsc_mode;
uint32_t gtsc_khz;
uint32_t incarnation;
uint32_t pad;
uint64_aligned_t elapsed_nsec;
-} xen_domctl_tsc_info_t;
+};
/* XEN_DOMCTL_gdbsx_guestmemio guest mem io */
struct xen_domctl_gdbsx_memio {
@@ -885,8 +822,6 @@ struct xen_domctl_vm_event_op {
uint32_t port; /* OUT: event channel for ring */
};
-typedef struct xen_domctl_vm_event_op xen_domctl_vm_event_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_vm_event_op_t);
/*
* Memory sharing operations
@@ -902,8 +837,6 @@ struct xen_domctl_mem_sharing_op {
uint8_t enable; /* CONTROL */
} u;
};
-typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
struct xen_domctl_audit_p2m {
/* OUT error counts */
@@ -911,14 +844,10 @@ struct xen_domctl_audit_p2m {
uint64_t m2p_bad;
uint64_t p2m_bad;
};
-typedef struct xen_domctl_audit_p2m xen_domctl_audit_p2m_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_audit_p2m_t);
struct xen_domctl_set_virq_handler {
uint32_t virq; /* IN */
};
-typedef struct xen_domctl_set_virq_handler xen_domctl_set_virq_handler_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_virq_handler_t);
#if defined(__i386__) || defined(__x86_64__)
/* XEN_DOMCTL_setvcpuextstate */
@@ -941,8 +870,6 @@ struct xen_domctl_vcpuextstate {
uint64_aligned_t size;
XEN_GUEST_HANDLE_64(uint64) buffer;
};
-typedef struct xen_domctl_vcpuextstate xen_domctl_vcpuextstate_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuextstate_t);
#endif
/* XEN_DOMCTL_set_access_required: sets whether a memory event listener
@@ -952,14 +879,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpue
struct xen_domctl_set_access_required {
uint8_t access_required;
};
-typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t);
struct xen_domctl_set_broken_page_p2m {
uint64_aligned_t pfn;
};
-typedef struct xen_domctl_set_broken_page_p2m xen_domctl_set_broken_page_p2m_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_broken_page_p2m_t);
/*
* XEN_DOMCTL_set_max_evtchn: sets the maximum event channel port
@@ -969,8 +892,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_b
struct xen_domctl_set_max_evtchn {
uint32_t max_port;
};
-typedef struct xen_domctl_set_max_evtchn xen_domctl_set_max_evtchn_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t);
/*
* ARM: Clean and invalidate caches associated with given region of
@@ -980,8 +901,6 @@ struct xen_domctl_cacheflush {
/* IN: page range to flush. */
xen_pfn_t start_pfn, nr_pfns;
};
-typedef struct xen_domctl_cacheflush xen_domctl_cacheflush_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_cacheflush_t);
#if defined(__i386__) || defined(__x86_64__)
struct xen_domctl_vcpu_msr {
@@ -1014,8 +933,6 @@ struct xen_domctl_vcpu_msrs {
uint32_t msr_count; /* IN/OUT */
XEN_GUEST_HANDLE_64(xen_domctl_vcpu_msr_t) msrs; /* IN/OUT */
};
-typedef struct xen_domctl_vcpu_msrs xen_domctl_vcpu_msrs_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpu_msrs_t);
#endif
/* XEN_DOMCTL_setvnumainfo: specifies a virtual NUMA topology for the guest */
@@ -1052,8 +969,6 @@ struct xen_domctl_vnuma {
*/
XEN_GUEST_HANDLE_64(xen_vmemrange_t) vmemrange;
};
-typedef struct xen_domctl_vnuma xen_domctl_vnuma_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_vnuma_t);
struct xen_domctl_psr_cmt_op {
#define XEN_DOMCTL_PSR_CMT_OP_DETACH 0
@@ -1062,8 +977,6 @@ struct xen_domctl_psr_cmt_op {
uint32_t cmd;
uint32_t data;
};
-typedef struct xen_domctl_psr_cmt_op xen_domctl_psr_cmt_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cmt_op_t);
/* XEN_DOMCTL_MONITOR_*
*
@@ -1144,8 +1057,6 @@ struct xen_domctl_monitor_op {
} debug_exception;
} u;
};
-typedef struct xen_domctl_monitor_op xen_domctl_monitor_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_monitor_op_t);
struct xen_domctl_psr_cat_op {
#define XEN_DOMCTL_PSR_CAT_OP_SET_L3_CBM 0
@@ -1160,8 +1071,6 @@ struct xen_domctl_psr_cat_op {
uint32_t target; /* IN */
uint64_t data; /* IN/OUT */
};
-typedef struct xen_domctl_psr_cat_op xen_domctl_psr_cat_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cat_op_t);
struct xen_domctl {
uint32_t cmd;
--- a/xen/include/xen/iommu.h
+++ b/xen/include/xen/iommu.h
@@ -96,8 +96,8 @@ void pt_pci_init(void);
struct pirq;
int hvm_do_IRQ_dpci(struct domain *, struct pirq *);
-int pt_irq_create_bind(struct domain *, xen_domctl_bind_pt_irq_t *);
-int pt_irq_destroy_bind(struct domain *, xen_domctl_bind_pt_irq_t *);
+int pt_irq_create_bind(struct domain *, const struct xen_domctl_bind_pt_irq *);
+int pt_irq_destroy_bind(struct domain *, const struct xen_domctl_bind_pt_irq *);
void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq);
struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *);
--- a/xen/include/xen/vm_event.h
+++ b/xen/include/xen/vm_event.h
@@ -69,7 +69,7 @@ int vm_event_get_response(struct domain
void vm_event_resume(struct domain *d, struct vm_event_domain *ved);
-int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
+int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
XEN_GUEST_HANDLE_PARAM(void) u_domctl);
void vm_event_vcpu_pause(struct vcpu *v);
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* [PATCH 2/2] public/sysctl: drop unnecessary typedefs and handles
2017-09-12 14:25 [PATCH 0/2] public/*ctl: drop unnecessary typedefs and handles Jan Beulich
2017-09-12 15:08 ` [PATCH 1/2] public/domctl: " Jan Beulich
@ 2017-09-12 15:10 ` Jan Beulich
2017-09-12 15:57 ` Dario Faggioli
` (3 more replies)
2017-09-12 16:00 ` [PATCH 0/2] public/*ctl: " Wei Liu
` (3 subsequent siblings)
5 siblings, 4 replies; 20+ messages in thread
From: Jan Beulich @ 2017-09-12 15:10 UTC (permalink / raw)
To: xen-devel
Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
Dario Faggioli, Ian Jackson, Robert VanVossen, Tim Deegan,
Ross Lagerwall, Julien Grall, josh.whitehead
By virtue of the struct xen_sysctl container structure, most of them
are really just cluttering the name space.
Signed-off-by: Jan Beulich <jbeulich@suse.com>
--- a/tools/libxc/include/xenctrl.h
+++ b/tools/libxc/include/xenctrl.h
@@ -1212,11 +1212,11 @@ int xc_readconsolering(xc_interface *xch
int xc_send_debug_keys(xc_interface *xch, char *keys);
int xc_set_parameters(xc_interface *xch, char *params);
-typedef xen_sysctl_physinfo_t xc_physinfo_t;
-typedef xen_sysctl_cputopo_t xc_cputopo_t;
-typedef xen_sysctl_numainfo_t xc_numainfo_t;
-typedef xen_sysctl_meminfo_t xc_meminfo_t;
-typedef xen_sysctl_pcitopoinfo_t xc_pcitopoinfo_t;
+typedef struct xen_sysctl_physinfo xc_physinfo_t;
+typedef struct xen_sysctl_cputopo xc_cputopo_t;
+typedef struct xen_sysctl_numainfo xc_numainfo_t;
+typedef struct xen_sysctl_meminfo xc_meminfo_t;
+typedef struct xen_sysctl_pcitopoinfo xc_pcitopoinfo_t;
typedef uint32_t xc_cpu_to_node_t;
typedef uint32_t xc_cpu_to_socket_t;
@@ -1240,7 +1240,7 @@ int xc_machphys_mfn_list(xc_interface *x
unsigned long max_extents,
xen_pfn_t *extent_start);
-typedef xen_sysctl_cpuinfo_t xc_cpuinfo_t;
+typedef struct xen_sysctl_cpuinfo xc_cpuinfo_t;
int xc_getcpuinfo(xc_interface *xch, int max_cpus,
xc_cpuinfo_t *info, int *nr_cpus);
@@ -1853,8 +1853,8 @@ int xc_cpu_offline(xc_interface *xch, in
* cpufreq para name of this structure named
* same as sysfs file name of native linux
*/
-typedef xen_userspace_t xc_userspace_t;
-typedef xen_ondemand_t xc_ondemand_t;
+typedef struct xen_userspace xc_userspace_t;
+typedef struct xen_ondemand xc_ondemand_t;
struct xc_get_cpufreq_para {
/* IN/OUT variable */
--- a/tools/libxc/xc_misc.c
+++ b/tools/libxc/xc_misc.c
@@ -547,7 +547,7 @@ int xc_livepatch_upload(xc_interface *xc
DECLARE_SYSCTL;
DECLARE_HYPERCALL_BUFFER(char, local);
DECLARE_HYPERCALL_BOUNCE(name, 0 /* later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
- xen_livepatch_name_t def_name = { .pad = { 0, 0, 0 } };
+ struct xen_livepatch_name def_name = { };
if ( !name || !payload )
{
@@ -594,12 +594,12 @@ int xc_livepatch_upload(xc_interface *xc
int xc_livepatch_get(xc_interface *xch,
char *name,
- xen_livepatch_status_t *status)
+ struct xen_livepatch_status *status)
{
int rc;
DECLARE_SYSCTL;
DECLARE_HYPERCALL_BOUNCE(name, 0 /*adjust later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
- xen_livepatch_name_t def_name = { .pad = { 0, 0, 0 } };
+ struct xen_livepatch_name def_name = { };
if ( !name )
{
@@ -677,7 +677,7 @@ int xc_livepatch_get(xc_interface *xch,
* retrieved (if any).
*/
int xc_livepatch_list(xc_interface *xch, unsigned int max, unsigned int start,
- xen_livepatch_status_t *info,
+ struct xen_livepatch_status *info,
char *name, uint32_t *len,
unsigned int *done,
unsigned int *left)
@@ -837,7 +837,7 @@ static int _xc_livepatch_action(xc_inter
DECLARE_SYSCTL;
/* The size is figured out when we strlen(name) */
DECLARE_HYPERCALL_BOUNCE(name, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
- xen_livepatch_name_t def_name = { .pad = { 0, 0, 0 } };
+ struct xen_livepatch_name def_name = { };
def_name.size = strlen(name) + 1;
--- a/xen/arch/arm/sysctl.c
+++ b/xen/arch/arm/sysctl.c
@@ -12,7 +12,7 @@
#include <xen/hypercall.h>
#include <public/sysctl.h>
-void arch_do_physinfo(xen_sysctl_physinfo_t *pi) { }
+void arch_do_physinfo(struct xen_sysctl_physinfo *pi) { }
long arch_do_sysctl(struct xen_sysctl *sysctl,
XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
--- a/xen/arch/x86/sysctl.c
+++ b/xen/arch/x86/sysctl.c
@@ -72,7 +72,7 @@ long cpu_down_helper(void *data)
return ret;
}
-void arch_do_physinfo(xen_sysctl_physinfo_t *pi)
+void arch_do_physinfo(struct xen_sysctl_physinfo *pi)
{
memcpy(pi->hw_cap, boot_cpu_data.x86_capability,
min(sizeof(pi->hw_cap), sizeof(boot_cpu_data.x86_capability)));
--- a/xen/common/gcov/gcov.c
+++ b/xen/common/gcov/gcov.c
@@ -209,7 +209,7 @@ static int gcov_dump_all(XEN_GUEST_HANDL
return ret;
}
-int sysctl_gcov_op(xen_sysctl_gcov_op_t *op)
+int sysctl_gcov_op(struct xen_sysctl_gcov_op *op)
{
int ret;
--- a/xen/common/livepatch.c
+++ b/xen/common/livepatch.c
@@ -104,7 +104,7 @@ static struct livepatch_work livepatch_w
*/
static DEFINE_PER_CPU(bool_t, work_to_do);
-static int get_name(const xen_livepatch_name_t *name, char *n)
+static int get_name(const struct xen_livepatch_name *name, char *n)
{
if ( !name->size || name->size > XEN_LIVEPATCH_NAME_SIZE )
return -EINVAL;
@@ -121,7 +121,7 @@ static int get_name(const xen_livepatch_
return 0;
}
-static int verify_payload(const xen_sysctl_livepatch_upload_t *upload, char *n)
+static int verify_payload(const struct xen_sysctl_livepatch_upload *upload, char *n)
{
if ( get_name(&upload->name, n) )
return -EINVAL;
@@ -897,7 +897,7 @@ static int load_payload_data(struct payl
return rc;
}
-static int livepatch_upload(xen_sysctl_livepatch_upload_t *upload)
+static int livepatch_upload(struct xen_sysctl_livepatch_upload *upload)
{
struct payload *data, *found;
char n[XEN_LIVEPATCH_NAME_SIZE];
@@ -954,7 +954,7 @@ static int livepatch_upload(xen_sysctl_l
return rc;
}
-static int livepatch_get(xen_sysctl_livepatch_get_t *get)
+static int livepatch_get(struct xen_sysctl_livepatch_get *get)
{
struct payload *data;
int rc;
@@ -985,9 +985,9 @@ static int livepatch_get(xen_sysctl_live
return 0;
}
-static int livepatch_list(xen_sysctl_livepatch_list_t *list)
+static int livepatch_list(struct xen_sysctl_livepatch_list *list)
{
- xen_livepatch_status_t status;
+ struct xen_livepatch_status status;
struct payload *data;
unsigned int idx = 0, i = 0;
int rc = 0;
@@ -1451,7 +1451,7 @@ static int build_id_dep(struct payload *
return 0;
}
-static int livepatch_action(xen_sysctl_livepatch_action_t *action)
+static int livepatch_action(struct xen_sysctl_livepatch_action *action)
{
struct payload *data;
char n[XEN_LIVEPATCH_NAME_SIZE];
@@ -1560,7 +1560,7 @@ static int livepatch_action(xen_sysctl_l
return rc;
}
-int livepatch_op(xen_sysctl_livepatch_op_t *livepatch)
+int livepatch_op(struct xen_sysctl_livepatch_op *livepatch)
{
int rc;
--- a/xen/common/perfc.c
+++ b/xen/common/perfc.c
@@ -152,8 +152,8 @@ void perfc_reset(unsigned char key)
arch_perfc_reset();
}
-static xen_sysctl_perfc_desc_t perfc_d[NR_PERFCTRS];
-static xen_sysctl_perfc_val_t *perfc_vals;
+static struct xen_sysctl_perfc_desc perfc_d[NR_PERFCTRS];
+static struct xen_sysctl_perfc_val *perfc_vals;
static unsigned int perfc_nbr_vals;
static cpumask_t perfc_cpumap;
@@ -190,7 +190,7 @@ static int perfc_copy_info(XEN_GUEST_HAN
}
xfree(perfc_vals);
- perfc_vals = xmalloc_array(xen_sysctl_perfc_val_t, perfc_nbr_vals);
+ perfc_vals = xmalloc_array(struct xen_sysctl_perfc_val, perfc_nbr_vals);
}
if ( guest_handle_is_null(desc) )
@@ -241,7 +241,7 @@ static int perfc_copy_info(XEN_GUEST_HAN
}
/* Dom0 control of perf counters */
-int perfc_control(xen_sysctl_perfc_op_t *pc)
+int perfc_control(struct xen_sysctl_perfc_op *pc)
{
static DEFINE_SPINLOCK(lock);
int rc;
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -694,7 +694,7 @@ static int
a653sched_adjust_global(const struct scheduler *ops,
struct xen_sysctl_scheduler_op *sc)
{
- xen_sysctl_arinc653_schedule_t local_sched;
+ struct xen_sysctl_arinc653_schedule local_sched;
int rc = -EINVAL;
switch ( sc->cmd )
--- a/xen/common/sched_credit.c
+++ b/xen/common/sched_credit.c
@@ -1240,7 +1240,7 @@ csched_sys_cntl(const struct scheduler *
struct xen_sysctl_scheduler_op *sc)
{
int rc = -EINVAL;
- xen_sysctl_credit_schedule_t *params = &sc->u.sched_credit;
+ struct xen_sysctl_credit_schedule *params = &sc->u.sched_credit;
struct csched_private *prv = CSCHED_PRIV(ops);
unsigned long flags;
--- a/xen/common/sched_credit2.c
+++ b/xen/common/sched_credit2.c
@@ -2443,7 +2443,7 @@ csched2_dom_cntl(
static int csched2_sys_cntl(const struct scheduler *ops,
struct xen_sysctl_scheduler_op *sc)
{
- xen_sysctl_credit2_schedule_t *params = &sc->u.sched_credit2;
+ struct xen_sysctl_credit2_schedule *params = &sc->u.sched_credit2;
struct csched2_private *prv = csched2_priv(ops);
unsigned long flags;
--- a/xen/common/spinlock.c
+++ b/xen/common/spinlock.c
@@ -380,7 +380,7 @@ void spinlock_profile_reset(unsigned cha
}
typedef struct {
- xen_sysctl_lockprof_op_t *pc;
+ struct xen_sysctl_lockprof_op *pc;
int rc;
} spinlock_profile_ucopy_t;
@@ -388,7 +388,7 @@ static void spinlock_profile_ucopy_elem(
int32_t type, int32_t idx, void *par)
{
spinlock_profile_ucopy_t *p = par;
- xen_sysctl_lockprof_data_t elem;
+ struct xen_sysctl_lockprof_data elem;
if ( p->rc )
return;
@@ -411,7 +411,7 @@ static void spinlock_profile_ucopy_elem(
}
/* Dom0 control of lock profiling */
-int spinlock_profile_control(xen_sysctl_lockprof_op_t *pc)
+int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc)
{
int rc = 0;
spinlock_profile_ucopy_t par;
--- a/xen/common/sysctl.c
+++ b/xen/common/sysctl.c
@@ -250,7 +250,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
case XEN_SYSCTL_physinfo:
{
- xen_sysctl_physinfo_t *pi = &op->u.physinfo;
+ struct xen_sysctl_physinfo *pi = &op->u.physinfo;
memset(pi, 0, sizeof(*pi));
pi->threads_per_core =
@@ -276,7 +276,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
case XEN_SYSCTL_numainfo:
{
unsigned int i, j, num_nodes;
- xen_sysctl_numainfo_t *ni = &op->u.numainfo;
+ struct xen_sysctl_numainfo *ni = &op->u.numainfo;
bool_t do_meminfo = !guest_handle_is_null(ni->meminfo);
bool_t do_distance = !guest_handle_is_null(ni->distance);
@@ -284,7 +284,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
if ( do_meminfo || do_distance )
{
- xen_sysctl_meminfo_t meminfo = { 0 };
+ struct xen_sysctl_meminfo meminfo = { };
if ( num_nodes > ni->num_nodes )
num_nodes = ni->num_nodes;
@@ -346,12 +346,12 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
case XEN_SYSCTL_cputopoinfo:
{
unsigned int i, num_cpus;
- xen_sysctl_cputopoinfo_t *ti = &op->u.cputopoinfo;
+ struct xen_sysctl_cputopoinfo *ti = &op->u.cputopoinfo;
num_cpus = cpumask_last(&cpu_online_map) + 1;
if ( !guest_handle_is_null(ti->cputopo) )
{
- xen_sysctl_cputopo_t cputopo = { 0 };
+ struct xen_sysctl_cputopo cputopo = { };
if ( num_cpus > ti->num_cpus )
num_cpus = ti->num_cpus;
@@ -405,7 +405,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
#ifdef CONFIG_HAS_PCI
case XEN_SYSCTL_pcitopoinfo:
{
- xen_sysctl_pcitopoinfo_t *ti = &op->u.pcitopoinfo;
+ struct xen_sysctl_pcitopoinfo *ti = &op->u.pcitopoinfo;
unsigned int i = 0;
if ( guest_handle_is_null(ti->devs) ||
--- a/xen/common/trace.c
+++ b/xen/common/trace.c
@@ -367,9 +367,9 @@ void __init init_trace_bufs(void)
/**
* tb_control - sysctl operations on trace buffers.
- * @tbc: a pointer to a xen_sysctl_tbuf_op_t to be filled out
+ * @tbc: a pointer to a struct xen_sysctl_tbuf_op to be filled out
*/
-int tb_control(xen_sysctl_tbuf_op_t *tbc)
+int tb_control(struct xen_sysctl_tbuf_op *tbc)
{
static DEFINE_SPINLOCK(lock);
int rc = 0;
--- a/xen/include/public/sysctl.h
+++ b/xen/include/public/sysctl.h
@@ -58,8 +58,6 @@ struct xen_sysctl_readconsole {
/* IN: Size of buffer; OUT: Bytes written to buffer. */
uint32_t count;
};
-typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
/* Get trace buffers machine base address */
/* XEN_SYSCTL_tbuf_op */
@@ -79,8 +77,6 @@ struct xen_sysctl_tbuf_op {
uint64_aligned_t buffer_mfn;
uint32_t size; /* Also an IN variable! */
};
-typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
/*
* Get physical information about the host machine
@@ -109,8 +105,6 @@ struct xen_sysctl_physinfo {
/* XEN_SYSCTL_PHYSCAP_??? */
uint32_t capabilities;
};
-typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
/*
* Get the ID of the current scheduler.
@@ -120,8 +114,6 @@ struct xen_sysctl_sched_id {
/* OUT variable */
uint32_t sched_id;
};
-typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
/* Interface for controlling Xen software performance counters. */
/* XEN_SYSCTL_perfc_op */
@@ -148,8 +140,6 @@ struct xen_sysctl_perfc_op {
/* counter values (or NULL) */
XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
};
-typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
/* XEN_SYSCTL_getdomaininfolist */
struct xen_sysctl_getdomaininfolist {
@@ -160,8 +150,6 @@ struct xen_sysctl_getdomaininfolist {
/* OUT variables. */
uint32_t num_domains;
};
-typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
/* Inject debug keys into Xen. */
/* XEN_SYSCTL_debug_keys */
@@ -170,8 +158,6 @@ struct xen_sysctl_debug_keys {
XEN_GUEST_HANDLE_64(char) keys;
uint32_t nr_keys;
};
-typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t);
/* Get physical CPU information. */
/* XEN_SYSCTL_getcpuinfo */
@@ -187,8 +173,6 @@ struct xen_sysctl_getcpuinfo {
/* OUT variables. */
uint32_t nr_cpus;
};
-typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t);
/* XEN_SYSCTL_availheap */
struct xen_sysctl_availheap {
@@ -199,8 +183,6 @@ struct xen_sysctl_availheap {
/* OUT variables. */
uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */
};
-typedef struct xen_sysctl_availheap xen_sysctl_availheap_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t);
/* XEN_SYSCTL_get_pmstat */
struct pm_px_val {
@@ -219,8 +201,6 @@ struct pm_px_stat {
XEN_GUEST_HANDLE_64(uint64) trans_pt; /* Px transition table */
XEN_GUEST_HANDLE_64(pm_px_val_t) pt;
};
-typedef struct pm_px_stat pm_px_stat_t;
-DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t);
struct pm_cx_stat {
uint32_t nr; /* entry nr in triggers & residencies, including C0 */
@@ -259,8 +239,6 @@ struct xen_sysctl_get_pmstat {
/* other struct for tx, etc */
} u;
};
-typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t);
/* XEN_SYSCTL_cpu_hotplug */
struct xen_sysctl_cpu_hotplug {
@@ -270,8 +248,6 @@ struct xen_sysctl_cpu_hotplug {
#define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1
uint32_t op; /* hotplug opcode */
};
-typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t);
/*
* Get/set xen power management, include
@@ -281,7 +257,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_h
struct xen_userspace {
uint32_t scaling_setspeed;
};
-typedef struct xen_userspace xen_userspace_t;
struct xen_ondemand {
uint32_t sampling_rate_max;
@@ -290,7 +265,6 @@ struct xen_ondemand {
uint32_t sampling_rate;
uint32_t up_threshold;
};
-typedef struct xen_ondemand xen_ondemand_t;
/*
* cpufreq para name of this structure named
@@ -461,8 +435,6 @@ struct xen_sysctl_lockprof_op {
/* profile information (or NULL) */
XEN_GUEST_HANDLE_64(xen_sysctl_lockprof_data_t) data;
};
-typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t);
/* XEN_SYSCTL_cputopoinfo */
#define XEN_INVALID_CORE_ID (~0U)
@@ -493,8 +465,6 @@ struct xen_sysctl_cputopoinfo {
uint32_t num_cpus;
XEN_GUEST_HANDLE_64(xen_sysctl_cputopo_t) cputopo;
};
-typedef struct xen_sysctl_cputopoinfo xen_sysctl_cputopoinfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopoinfo_t);
/* XEN_SYSCTL_numainfo */
#define XEN_INVALID_MEM_SZ (~0U)
@@ -535,8 +505,6 @@ struct xen_sysctl_numainfo {
*/
XEN_GUEST_HANDLE_64(uint32) distance;
};
-typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t);
/* XEN_SYSCTL_cpupool_op */
#define XEN_SYSCTL_CPUPOOL_OP_CREATE 1 /* C */
@@ -556,8 +524,6 @@ struct xen_sysctl_cpupool_op {
uint32_t n_dom; /* OUT: I */
struct xenctl_bitmap cpumap; /* OUT: IF */
};
-typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t);
/*
* Error return values of cpupool operations:
@@ -637,14 +603,10 @@ struct xen_sysctl_credit_schedule {
unsigned tslice_ms;
unsigned ratelimit_us;
};
-typedef struct xen_sysctl_credit_schedule xen_sysctl_credit_schedule_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_credit_schedule_t);
struct xen_sysctl_credit2_schedule {
unsigned ratelimit_us;
};
-typedef struct xen_sysctl_credit2_schedule xen_sysctl_credit2_schedule_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_credit2_schedule_t);
/* XEN_SYSCTL_scheduler_op */
/* Set or get info? */
@@ -662,8 +624,6 @@ struct xen_sysctl_scheduler_op {
struct xen_sysctl_credit2_schedule sched_credit2;
} u;
};
-typedef struct xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_scheduler_op_t);
/*
* Output format of gcov data:
@@ -696,8 +656,6 @@ struct xen_sysctl_gcov_op {
uint32_t size; /* IN/OUT: size of the buffer */
XEN_GUEST_HANDLE_64(char) buffer; /* OUT */
};
-typedef struct xen_sysctl_gcov_op xen_sysctl_gcov_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_gcov_op_t);
#define XEN_SYSCTL_PSR_CMT_get_total_rmid 0
#define XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor 1
@@ -716,8 +674,6 @@ struct xen_sysctl_psr_cmt_op {
} l3_cache;
} u;
};
-typedef struct xen_sysctl_psr_cmt_op xen_sysctl_psr_cmt_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cmt_op_t);
/* XEN_SYSCTL_pcitopoinfo */
#define XEN_INVALID_DEV (XEN_INVALID_NODE_ID - 1)
@@ -740,8 +696,6 @@ struct xen_sysctl_pcitopoinfo {
*/
XEN_GUEST_HANDLE_64(uint32) nodes;
};
-typedef struct xen_sysctl_pcitopoinfo xen_sysctl_pcitopoinfo_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_pcitopoinfo_t);
#define XEN_SYSCTL_PSR_CAT_get_l3_info 0
#define XEN_SYSCTL_PSR_CAT_get_l2_info 1
@@ -757,8 +711,6 @@ struct xen_sysctl_psr_cat_op {
} cat_info;
} u;
};
-typedef struct xen_sysctl_psr_cat_op xen_sysctl_psr_cat_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
#define XEN_SYSCTL_TMEM_OP_ALL_CLIENTS 0xFFFFU
@@ -863,8 +815,6 @@ struct xen_sysctl_tmem_op {
/* of them. */
} u;
};
-typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tmem_op_t);
/*
* XEN_SYSCTL_get_cpu_levelling_caps (x86 specific)
@@ -884,8 +834,6 @@ struct xen_sysctl_cpu_levelling_caps {
#define XEN_SYSCTL_CPU_LEVELCAP_l7s0_ebx (1ul << 8) /* 0x00000007:0.ebx */
uint32_t caps;
};
-typedef struct xen_sysctl_cpu_levelling_caps xen_sysctl_cpu_levelling_caps_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_levelling_caps_t);
/*
* XEN_SYSCTL_get_cpu_featureset (x86 specific)
@@ -909,8 +857,6 @@ struct xen_sysctl_cpu_featureset {
* maximum length. */
XEN_GUEST_HANDLE_64(uint32) features; /* OUT: */
};
-typedef struct xen_sysctl_featureset xen_sysctl_featureset_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_featureset_t);
/*
* XEN_SYSCTL_LIVEPATCH_op
@@ -966,8 +912,6 @@ struct xen_livepatch_name {
XEN_LIVEPATCH_NAME_SIZE. */
uint16_t pad[3]; /* IN: MUST be zero. */
};
-typedef struct xen_livepatch_name xen_livepatch_name_t;
-DEFINE_XEN_GUEST_HANDLE(xen_livepatch_name_t);
/*
* Upload a payload to the hypervisor. The payload is verified
@@ -986,12 +930,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_livepatch_na
*/
#define XEN_SYSCTL_LIVEPATCH_UPLOAD 0
struct xen_sysctl_livepatch_upload {
- xen_livepatch_name_t name; /* IN, name of the patch. */
+ struct xen_livepatch_name name; /* IN, name of the patch. */
uint64_t size; /* IN, size of the ELF file. */
XEN_GUEST_HANDLE_64(uint8) payload; /* IN, the ELF file. */
};
-typedef struct xen_sysctl_livepatch_upload xen_sysctl_livepatch_upload_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_upload_t);
/*
* Retrieve an status of an specific payload.
@@ -1013,11 +955,9 @@ typedef struct xen_livepatch_status xen_
DEFINE_XEN_GUEST_HANDLE(xen_livepatch_status_t);
struct xen_sysctl_livepatch_get {
- xen_livepatch_name_t name; /* IN, name of the payload. */
- xen_livepatch_status_t status; /* IN/OUT, state of it. */
+ struct xen_livepatch_name name; /* IN, name of the payload. */
+ struct xen_livepatch_status status; /* IN/OUT, state of it. */
};
-typedef struct xen_sysctl_livepatch_get xen_sysctl_livepatch_get_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_get_t);
/*
* Retrieve an array of abbreviated status and names of payloads that are
@@ -1059,8 +999,6 @@ struct xen_sysctl_livepatch_list {
XEN_GUEST_HANDLE_64(uint32) len; /* OUT: Array of lengths of name's.
Must have nr of them. */
};
-typedef struct xen_sysctl_livepatch_list xen_sysctl_livepatch_list_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_list_t);
/*
* Perform an operation on the payload structure referenced by the `name` field.
@@ -1069,7 +1007,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livep
*/
#define XEN_SYSCTL_LIVEPATCH_ACTION 3
struct xen_sysctl_livepatch_action {
- xen_livepatch_name_t name; /* IN, name of the patch. */
+ struct xen_livepatch_name name; /* IN, name of the patch. */
#define LIVEPATCH_ACTION_UNLOAD 1
#define LIVEPATCH_ACTION_REVERT 2
#define LIVEPATCH_ACTION_APPLY 3
@@ -1080,21 +1018,17 @@ struct xen_sysctl_livepatch_action {
/* Or upper bound of time (ns) */
/* for operation to take. */
};
-typedef struct xen_sysctl_livepatch_action xen_sysctl_livepatch_action_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_action_t);
struct xen_sysctl_livepatch_op {
uint32_t cmd; /* IN: XEN_SYSCTL_LIVEPATCH_*. */
uint32_t pad; /* IN: Always zero. */
union {
- xen_sysctl_livepatch_upload_t upload;
- xen_sysctl_livepatch_list_t list;
- xen_sysctl_livepatch_get_t get;
- xen_sysctl_livepatch_action_t action;
+ struct xen_sysctl_livepatch_upload upload;
+ struct xen_sysctl_livepatch_list list;
+ struct xen_sysctl_livepatch_get get;
+ struct xen_sysctl_livepatch_action action;
} u;
};
-typedef struct xen_sysctl_livepatch_op xen_sysctl_livepatch_op_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_op_t);
/*
* XEN_SYSCTL_set_parameter
@@ -1111,8 +1045,6 @@ struct xen_sysctl_set_parameter {
uint16_t size; /* IN: size of parameters. */
uint16_t pad[3]; /* IN: MUST be zero. */
};
-typedef struct xen_sysctl_set_parameter xen_sysctl_set_parameter_t;
-DEFINE_XEN_GUEST_HANDLE(xen_sysctl_set_parameter_t);
struct xen_sysctl {
uint32_t cmd;
--- a/xen/include/xen/gcov.h
+++ b/xen/include/xen/gcov.h
@@ -3,7 +3,7 @@
#ifdef CONFIG_GCOV
#include <public/sysctl.h>
-int sysctl_gcov_op(xen_sysctl_gcov_op_t *op);
+int sysctl_gcov_op(struct xen_sysctl_gcov_op *op);
#endif
#endif /* _XEN_GCOV_H */
--- a/xen/include/xen/sched.h
+++ b/xen/include/xen/sched.h
@@ -914,7 +914,7 @@ int cpupool_do_sysctl(struct xen_sysctl_
void schedule_dump(struct cpupool *c);
extern void dump_runq(unsigned char key);
-void arch_do_physinfo(xen_sysctl_physinfo_t *pi);
+void arch_do_physinfo(struct xen_sysctl_physinfo *pi);
#endif /* __SCHED_H__ */
--- a/xen/include/xen/spinlock.h
+++ b/xen/include/xen/spinlock.h
@@ -110,7 +110,7 @@ void _lock_profile_deregister_struct(int
#define lock_profile_deregister_struct(type, ptr) \
_lock_profile_deregister_struct(type, &((ptr)->profile_head))
-extern int spinlock_profile_control(xen_sysctl_lockprof_op_t *pc);
+extern int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc);
extern void spinlock_profile_printall(unsigned char key);
extern void spinlock_profile_reset(unsigned char key);
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [PATCH 1/2] public/domctl: drop unnecessary typedefs and handles
2017-09-12 15:08 ` [PATCH 1/2] public/domctl: " Jan Beulich
@ 2017-09-12 15:33 ` Razvan Cojocaru
2017-09-12 15:58 ` Dario Faggioli
` (4 subsequent siblings)
5 siblings, 0 replies; 20+ messages in thread
From: Razvan Cojocaru @ 2017-09-12 15:33 UTC (permalink / raw)
To: Jan Beulich, xen-devel
Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
Dario Faggioli, Ian Jackson, Tim Deegan, Julien Grall, tamas,
Meng Xu
On 09/12/2017 06:08 PM, Jan Beulich wrote:
> By virtue of the struct xen_domctl container structure, most of them
> are really just cluttering the name space.
>
> While doing so,
> - convert an enum typed (pt_irq_type_t) structure field to a fixed
> width type,
> - make x86's paging_domctl() and descendants take a properly typed
> handle,
> - add const in a few places.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Razvan Cojocaru <rcojocaru@bitdefender.com>
Thanks,
Razvan
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [PATCH 2/2] public/sysctl: drop unnecessary typedefs and handles
2017-09-12 15:10 ` [PATCH 2/2] public/sysctl: " Jan Beulich
@ 2017-09-12 15:57 ` Dario Faggioli
2017-09-19 15:31 ` Ping: " Jan Beulich
` (2 subsequent siblings)
3 siblings, 0 replies; 20+ messages in thread
From: Dario Faggioli @ 2017-09-12 15:57 UTC (permalink / raw)
To: Jan Beulich, xen-devel
Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
Ian Jackson, Robert VanVossen, Tim Deegan, Ross Lagerwall,
Julien Grall, josh.whitehead
[-- Attachment #1.1: Type: text/plain, Size: 561 bytes --]
On Tue, 2017-09-12 at 09:10 -0600, Jan Beulich wrote:
> By virtue of the struct xen_sysctl container structure, most of them
> are really just cluttering the name space.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
Acked-by: Dario Faggioli <dario.faggioli@citrix.com>
Regards,
Dario
--
<<This happens because I choose it to happen!>> (Raistlin Majere)
-----------------------------------------------------------------
Dario Faggioli, Ph.D, http://about.me/dario.faggioli
Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)
[-- Attachment #1.2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 819 bytes --]
[-- Attachment #2: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [PATCH 1/2] public/domctl: drop unnecessary typedefs and handles
2017-09-12 15:08 ` [PATCH 1/2] public/domctl: " Jan Beulich
2017-09-12 15:33 ` Razvan Cojocaru
@ 2017-09-12 15:58 ` Dario Faggioli
2017-09-12 16:11 ` Meng Xu
` (3 subsequent siblings)
5 siblings, 0 replies; 20+ messages in thread
From: Dario Faggioli @ 2017-09-12 15:58 UTC (permalink / raw)
To: Jan Beulich, xen-devel
Cc: Stefano Stabellini, Wei Liu, Razvan Cojocaru, George Dunlap,
Andrew Cooper, Ian Jackson, Tim Deegan, Julien Grall, tamas,
Meng Xu
[-- Attachment #1.1: Type: text/plain, Size: 789 bytes --]
On Tue, 2017-09-12 at 09:08 -0600, Jan Beulich wrote:
> By virtue of the struct xen_domctl container structure, most of them
> are really just cluttering the name space.
>
> While doing so,
> - convert an enum typed (pt_irq_type_t) structure field to a fixed
> width type,
> - make x86's paging_domctl() and descendants take a properly typed
> handle,
> - add const in a few places.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
Acked-by: Dario Faggioli <dario.faggioli@citrix.com>
Regards,
Dario
--
<<This happens because I choose it to happen!>> (Raistlin Majere)
-----------------------------------------------------------------
Dario Faggioli, Ph.D, http://about.me/dario.faggioli
Senior Software Engineer, Citrix Systems R&D Ltd., Cambridge (UK)
[-- Attachment #1.2: This is a digitally signed message part --]
[-- Type: application/pgp-signature, Size: 819 bytes --]
[-- Attachment #2: Type: text/plain, Size: 127 bytes --]
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [PATCH 0/2] public/*ctl: drop unnecessary typedefs and handles
2017-09-12 14:25 [PATCH 0/2] public/*ctl: drop unnecessary typedefs and handles Jan Beulich
2017-09-12 15:08 ` [PATCH 1/2] public/domctl: " Jan Beulich
2017-09-12 15:10 ` [PATCH 2/2] public/sysctl: " Jan Beulich
@ 2017-09-12 16:00 ` Wei Liu
2017-09-12 16:12 ` Andrew Cooper
` (2 subsequent siblings)
5 siblings, 0 replies; 20+ messages in thread
From: Wei Liu @ 2017-09-12 16:00 UTC (permalink / raw)
To: Jan Beulich
Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
Ian Jackson, Tim Deegan, Julien Grall, xen-devel
On Tue, Sep 12, 2017 at 08:25:47AM -0600, Jan Beulich wrote:
> 1: public/domctl: drop unnecessary typedefs and handles
> 2: public/sysctl: drop unnecessary typedefs and handles
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
Acked-by: Wei Liu <wei.liu2@citrix.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [PATCH 1/2] public/domctl: drop unnecessary typedefs and handles
2017-09-12 15:08 ` [PATCH 1/2] public/domctl: " Jan Beulich
2017-09-12 15:33 ` Razvan Cojocaru
2017-09-12 15:58 ` Dario Faggioli
@ 2017-09-12 16:11 ` Meng Xu
2017-09-19 15:28 ` Ping: " Jan Beulich
` (2 subsequent siblings)
5 siblings, 0 replies; 20+ messages in thread
From: Meng Xu @ 2017-09-12 16:11 UTC (permalink / raw)
To: Jan Beulich
Cc: Stefano Stabellini, Wei Liu, Razvan Cojocaru, George Dunlap,
Andrew Cooper, Dario Faggioli, Ian Jackson, Tim Deegan,
Julien Grall, tamas, xen-devel
On Tue, Sep 12, 2017 at 11:08 AM, Jan Beulich <JBeulich@suse.com> wrote:
>
> By virtue of the struct xen_domctl container structure, most of them
> are really just cluttering the name space.
>
> While doing so,
> - convert an enum typed (pt_irq_type_t) structure field to a fixed
> width type,
> - make x86's paging_domctl() and descendants take a properly typed
> handle,
> - add const in a few places.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Meng Xu <mengxu@cis.upenn.edu>
Thanks,
Meng
--
Meng Xu
Ph.D. Candidate in Computer and Information Science
University of Pennsylvania
http://www.cis.upenn.edu/~mengxu/
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [PATCH 0/2] public/*ctl: drop unnecessary typedefs and handles
2017-09-12 14:25 [PATCH 0/2] public/*ctl: drop unnecessary typedefs and handles Jan Beulich
` (2 preceding siblings ...)
2017-09-12 16:00 ` [PATCH 0/2] public/*ctl: " Wei Liu
@ 2017-09-12 16:12 ` Andrew Cooper
2017-09-12 16:15 ` Julien Grall
2017-09-20 11:07 ` Julien Grall
5 siblings, 0 replies; 20+ messages in thread
From: Andrew Cooper @ 2017-09-12 16:12 UTC (permalink / raw)
To: Jan Beulich, xen-devel
Cc: Stefano Stabellini, Wei Liu, George Dunlap, Tim Deegan,
Ian Jackson, Julien Grall
On 12/09/17 15:25, Jan Beulich wrote:
> 1: public/domctl: drop unnecessary typedefs and handles
> 2: public/sysctl: drop unnecessary typedefs and handles
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
Acked-by: Andrew Cooper <andrew.cooper3@citrix.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [PATCH 0/2] public/*ctl: drop unnecessary typedefs and handles
2017-09-12 14:25 [PATCH 0/2] public/*ctl: drop unnecessary typedefs and handles Jan Beulich
` (3 preceding siblings ...)
2017-09-12 16:12 ` Andrew Cooper
@ 2017-09-12 16:15 ` Julien Grall
2017-09-20 11:07 ` Julien Grall
5 siblings, 0 replies; 20+ messages in thread
From: Julien Grall @ 2017-09-12 16:15 UTC (permalink / raw)
To: Jan Beulich, xen-devel
Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
Ian Jackson, Tim Deegan
Hi,
On 12/09/17 15:25, Jan Beulich wrote:
> 1: public/domctl: drop unnecessary typedefs and handles
> 2: public/sysctl: drop unnecessary typedefs and handles
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Julien Grall <julien.grall@arm.com>
Cheers,
>
--
Julien Grall
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Ping: [PATCH 1/2] public/domctl: drop unnecessary typedefs and handles
2017-09-12 15:08 ` [PATCH 1/2] public/domctl: " Jan Beulich
` (2 preceding siblings ...)
2017-09-12 16:11 ` Meng Xu
@ 2017-09-19 15:28 ` Jan Beulich
2017-09-20 10:39 ` George Dunlap
2017-09-20 14:46 ` Tamas K Lengyel
5 siblings, 0 replies; 20+ messages in thread
From: Jan Beulich @ 2017-09-19 15:28 UTC (permalink / raw)
To: George Dunlap, tamas, Tim Deegan
Cc: Stefano Stabellini, Wei Liu, Razvan Cojocaru, Andrew Cooper,
Dario Faggioli, Ian Jackson, Julien Grall, Meng Xu, xen-devel
>>> On 12.09.17 at 17:08, <JBeulich@suse.com> wrote:
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -608,8 +608,8 @@ out:
> paging_unlock(d);
> }
>
> -int hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
> - XEN_GUEST_HANDLE_PARAM(void) u_domctl)
> +int hap_domctl(struct domain *d, struct xen_domctl_shadow_op *sc,
> + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
> {
> int rc;
> bool preempted = false;
George (also parts further down)?
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -1606,7 +1606,7 @@ out:
> return rc;
> }
>
> -int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec)
> +int mem_sharing_domctl(struct domain *d, struct xen_domctl_mem_sharing_op *mec)
> {
> int rc;
>
Tamas (plus the corresponding header change)?
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -3809,8 +3809,8 @@ out:
> /* Shadow-control XEN_DOMCTL dispatcher */
>
> int shadow_domctl(struct domain *d,
> - xen_domctl_shadow_op_t *sc,
> - XEN_GUEST_HANDLE_PARAM(void) u_domctl)
> + struct xen_domctl_shadow_op *sc,
> + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
> {
> int rc;
> bool preempted = false;
Tim (plus the corresponding header change)?
Thanks, Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Ping: [PATCH 2/2] public/sysctl: drop unnecessary typedefs and handles
2017-09-12 15:10 ` [PATCH 2/2] public/sysctl: " Jan Beulich
2017-09-12 15:57 ` Dario Faggioli
@ 2017-09-19 15:31 ` Jan Beulich
2017-09-19 15:40 ` Konrad Rzeszutek Wilk
2017-09-20 10:41 ` George Dunlap
2017-09-20 18:07 ` Robert VanVossen
3 siblings, 1 reply; 20+ messages in thread
From: Jan Beulich @ 2017-09-19 15:31 UTC (permalink / raw)
To: Ross Lagerwall, josh.whitehead, Robert VanVossen, George Dunlap,
Konrad Rzeszutek Wilk
Cc: Stefano Stabellini, Wei Liu, Andrew Cooper, Dario Faggioli,
Ian Jackson, Tim Deegan, Julien Grall, xen-devel
>>> On 12.09.17 at 17:10, <JBeulich@suse.com> wrote:
> --- a/xen/common/livepatch.c
> +++ b/xen/common/livepatch.c
> @@ -104,7 +104,7 @@ static struct livepatch_work livepatch_w
> */
> static DEFINE_PER_CPU(bool_t, work_to_do);
>
> -static int get_name(const xen_livepatch_name_t *name, char *n)
> +static int get_name(const struct xen_livepatch_name *name, char *n)
> {
> if ( !name->size || name->size > XEN_LIVEPATCH_NAME_SIZE )
> return -EINVAL;
> @@ -121,7 +121,7 @@ static int get_name(const xen_livepatch_
> return 0;
> }
>
> -static int verify_payload(const xen_sysctl_livepatch_upload_t *upload, char *n)
> +static int verify_payload(const struct xen_sysctl_livepatch_upload *upload, char *n)
> {
> if ( get_name(&upload->name, n) )
> return -EINVAL;
> @@ -897,7 +897,7 @@ static int load_payload_data(struct payl
> return rc;
> }
>
> -static int livepatch_upload(xen_sysctl_livepatch_upload_t *upload)
> +static int livepatch_upload(struct xen_sysctl_livepatch_upload *upload)
> {
> struct payload *data, *found;
> char n[XEN_LIVEPATCH_NAME_SIZE];
> @@ -954,7 +954,7 @@ static int livepatch_upload(xen_sysctl_l
> return rc;
> }
>
> -static int livepatch_get(xen_sysctl_livepatch_get_t *get)
> +static int livepatch_get(struct xen_sysctl_livepatch_get *get)
> {
> struct payload *data;
> int rc;
> @@ -985,9 +985,9 @@ static int livepatch_get(xen_sysctl_live
> return 0;
> }
>
> -static int livepatch_list(xen_sysctl_livepatch_list_t *list)
> +static int livepatch_list(struct xen_sysctl_livepatch_list *list)
> {
> - xen_livepatch_status_t status;
> + struct xen_livepatch_status status;
> struct payload *data;
> unsigned int idx = 0, i = 0;
> int rc = 0;
> @@ -1451,7 +1451,7 @@ static int build_id_dep(struct payload *
> return 0;
> }
>
> -static int livepatch_action(xen_sysctl_livepatch_action_t *action)
> +static int livepatch_action(struct xen_sysctl_livepatch_action *action)
> {
> struct payload *data;
> char n[XEN_LIVEPATCH_NAME_SIZE];
> @@ -1560,7 +1560,7 @@ static int livepatch_action(xen_sysctl_l
> return rc;
> }
>
> -int livepatch_op(xen_sysctl_livepatch_op_t *livepatch)
> +int livepatch_op(struct xen_sysctl_livepatch_op *livepatch)
> {
> int rc;
>
Konrad, Ross?
> --- a/xen/common/sched_arinc653.c
> +++ b/xen/common/sched_arinc653.c
> @@ -694,7 +694,7 @@ static int
> a653sched_adjust_global(const struct scheduler *ops,
> struct xen_sysctl_scheduler_op *sc)
> {
> - xen_sysctl_arinc653_schedule_t local_sched;
> + struct xen_sysctl_arinc653_schedule local_sched;
> int rc = -EINVAL;
>
> switch ( sc->cmd )
Robert, Josh?
> --- a/xen/common/trace.c
> +++ b/xen/common/trace.c
> @@ -367,9 +367,9 @@ void __init init_trace_bufs(void)
>
> /**
> * tb_control - sysctl operations on trace buffers.
> - * @tbc: a pointer to a xen_sysctl_tbuf_op_t to be filled out
> + * @tbc: a pointer to a struct xen_sysctl_tbuf_op to be filled out
> */
> -int tb_control(xen_sysctl_tbuf_op_t *tbc)
> +int tb_control(struct xen_sysctl_tbuf_op *tbc)
> {
> static DEFINE_SPINLOCK(lock);
> int rc = 0;
George?
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: Ping: [PATCH 2/2] public/sysctl: drop unnecessary typedefs and handles
2017-09-19 15:31 ` Ping: " Jan Beulich
@ 2017-09-19 15:40 ` Konrad Rzeszutek Wilk
0 siblings, 0 replies; 20+ messages in thread
From: Konrad Rzeszutek Wilk @ 2017-09-19 15:40 UTC (permalink / raw)
To: Jan Beulich, Ross Lagerwall, josh.whitehead, Robert VanVossen,
George Dunlap
Cc: Stefano Stabellini, Wei Liu, Andrew Cooper, Dario Faggioli,
Ian Jackson, Tim Deegan, Julien Grall, xen-devel
On September 19, 2017 11:31:40 AM EDT, Jan Beulich <JBeulich@suse.com> wrote:
>>>> On 12.09.17 at 17:10, <JBeulich@suse.com> wrote:
>> --- a/xen/common/livepatch.c
>> +++ b/xen/common/livepatch.c
>> @@ -104,7 +104,7 @@ static struct livepatch_work livepatch_w
>> */
>> static DEFINE_PER_CPU(bool_t, work_to_do);
>>
>> -static int get_name(const xen_livepatch_name_t *name, char *n)
>> +static int get_name(const struct xen_livepatch_name *name, char *n)
>> {
>> if ( !name->size || name->size > XEN_LIVEPATCH_NAME_SIZE )
>> return -EINVAL;
>> @@ -121,7 +121,7 @@ static int get_name(const xen_livepatch_
>> return 0;
>> }
>>
>> -static int verify_payload(const xen_sysctl_livepatch_upload_t
>*upload, char *n)
>> +static int verify_payload(const struct xen_sysctl_livepatch_upload
>*upload, char *n)
>> {
>> if ( get_name(&upload->name, n) )
>> return -EINVAL;
>> @@ -897,7 +897,7 @@ static int load_payload_data(struct payl
>> return rc;
>> }
>>
>> -static int livepatch_upload(xen_sysctl_livepatch_upload_t *upload)
>> +static int livepatch_upload(struct xen_sysctl_livepatch_upload
>*upload)
>> {
>> struct payload *data, *found;
>> char n[XEN_LIVEPATCH_NAME_SIZE];
>> @@ -954,7 +954,7 @@ static int livepatch_upload(xen_sysctl_l
>> return rc;
>> }
>>
>> -static int livepatch_get(xen_sysctl_livepatch_get_t *get)
>> +static int livepatch_get(struct xen_sysctl_livepatch_get *get)
>> {
>> struct payload *data;
>> int rc;
>> @@ -985,9 +985,9 @@ static int livepatch_get(xen_sysctl_live
>> return 0;
>> }
>>
>> -static int livepatch_list(xen_sysctl_livepatch_list_t *list)
>> +static int livepatch_list(struct xen_sysctl_livepatch_list *list)
>> {
>> - xen_livepatch_status_t status;
>> + struct xen_livepatch_status status;
>> struct payload *data;
>> unsigned int idx = 0, i = 0;
>> int rc = 0;
>> @@ -1451,7 +1451,7 @@ static int build_id_dep(struct payload *
>> return 0;
>> }
>>
>> -static int livepatch_action(xen_sysctl_livepatch_action_t *action)
>> +static int livepatch_action(struct xen_sysctl_livepatch_action
>*action)
>> {
>> struct payload *data;
>> char n[XEN_LIVEPATCH_NAME_SIZE];
>> @@ -1560,7 +1560,7 @@ static int livepatch_action(xen_sysctl_l
>> return rc;
>> }
>>
>> -int livepatch_op(xen_sysctl_livepatch_op_t *livepatch)
>> +int livepatch_op(struct xen_sysctl_livepatch_op *livepatch)
>> {
>> int rc;
>>
>
>Konrad, Ross?
Reviewed-by: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
>
>> --- a/xen/common/sched_arinc653.c
>> +++ b/xen/common/sched_arinc653.c
>> @@ -694,7 +694,7 @@ static int
>> a653sched_adjust_global(const struct scheduler *ops,
>> struct xen_sysctl_scheduler_op *sc)
>> {
>> - xen_sysctl_arinc653_schedule_t local_sched;
>> + struct xen_sysctl_arinc653_schedule local_sched;
>> int rc = -EINVAL;
>>
>> switch ( sc->cmd )
>
>Robert, Josh?
>
>> --- a/xen/common/trace.c
>> +++ b/xen/common/trace.c
>> @@ -367,9 +367,9 @@ void __init init_trace_bufs(void)
>>
>> /**
>> * tb_control - sysctl operations on trace buffers.
>> - * @tbc: a pointer to a xen_sysctl_tbuf_op_t to be filled out
>> + * @tbc: a pointer to a struct xen_sysctl_tbuf_op to be filled out
>> */
>> -int tb_control(xen_sysctl_tbuf_op_t *tbc)
>> +int tb_control(struct xen_sysctl_tbuf_op *tbc)
>> {
>> static DEFINE_SPINLOCK(lock);
>> int rc = 0;
>
>George?
>
>Jan
Thanks!
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [PATCH 1/2] public/domctl: drop unnecessary typedefs and handles
2017-09-12 15:08 ` [PATCH 1/2] public/domctl: " Jan Beulich
` (3 preceding siblings ...)
2017-09-19 15:28 ` Ping: " Jan Beulich
@ 2017-09-20 10:39 ` George Dunlap
2017-09-20 14:46 ` Tamas K Lengyel
5 siblings, 0 replies; 20+ messages in thread
From: George Dunlap @ 2017-09-20 10:39 UTC (permalink / raw)
To: Jan Beulich
Cc: Stefano Stabellini, Wei Liu, Razvan Cojocaru, Andrew Cooper,
Dario Faggioli, Ian Jackson, Tim Deegan, Julien Grall,
Tamas K Lengyel, Meng Xu, xen-devel
On Tue, Sep 12, 2017 at 4:08 PM, Jan Beulich <JBeulich@suse.com> wrote:
> By virtue of the struct xen_domctl container structure, most of them
> are really just cluttering the name space.
>
> While doing so,
> - convert an enum typed (pt_irq_type_t) structure field to a fixed
> width type,
> - make x86's paging_domctl() and descendants take a properly typed
> handle,
> - add const in a few places.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
>
> --- a/tools/libxc/include/xenctrl.h
> +++ b/tools/libxc/include/xenctrl.h
> @@ -903,7 +903,7 @@ int xc_vcpu_get_extstate(xc_interface *x
> uint32_t vcpu,
> xc_vcpu_extstate_t *extstate);
>
> -typedef xen_domctl_getvcpuinfo_t xc_vcpuinfo_t;
> +typedef struct xen_domctl_getvcpuinfo xc_vcpuinfo_t;
> int xc_vcpu_getinfo(xc_interface *xch,
> uint32_t domid,
> uint32_t vcpu,
> @@ -916,7 +916,7 @@ long long xc_domain_get_cpu_usage(xc_int
> int xc_domain_sethandle(xc_interface *xch, uint32_t domid,
> xen_domain_handle_t handle);
>
> -typedef xen_domctl_shadow_op_stats_t xc_shadow_op_stats_t;
> +typedef struct xen_domctl_shadow_op_stats xc_shadow_op_stats_t;
> int xc_shadow_control(xc_interface *xch,
> uint32_t domid,
> unsigned int sop,
> --- a/tools/libxc/xc_domain.c
> +++ b/tools/libxc/xc_domain.c
> @@ -1714,8 +1714,7 @@ int xc_domain_update_msi_irq(
> uint64_t gtable)
> {
> int rc;
> - xen_domctl_bind_pt_irq_t *bind;
> -
> + struct xen_domctl_bind_pt_irq *bind;
> DECLARE_DOMCTL;
>
> domctl.cmd = XEN_DOMCTL_bind_pt_irq;
> @@ -1740,8 +1739,7 @@ int xc_domain_unbind_msi_irq(
> uint32_t gflags)
> {
> int rc;
> - xen_domctl_bind_pt_irq_t *bind;
> -
> + struct xen_domctl_bind_pt_irq *bind;
> DECLARE_DOMCTL;
>
> domctl.cmd = XEN_DOMCTL_unbind_pt_irq;
> @@ -1770,7 +1768,7 @@ static int xc_domain_bind_pt_irq_int(
> uint16_t spi)
> {
> int rc;
> - xen_domctl_bind_pt_irq_t * bind;
> + struct xen_domctl_bind_pt_irq *bind;
> DECLARE_DOMCTL;
>
> domctl.cmd = XEN_DOMCTL_bind_pt_irq;
> @@ -1828,7 +1826,7 @@ static int xc_domain_unbind_pt_irq_int(
> uint8_t spi)
> {
> int rc;
> - xen_domctl_bind_pt_irq_t * bind;
> + struct xen_domctl_bind_pt_irq *bind;
> DECLARE_DOMCTL;
>
> domctl.cmd = XEN_DOMCTL_unbind_pt_irq;
> --- a/xen/arch/arm/domctl.c
> +++ b/xen/arch/arm/domctl.c
> @@ -41,7 +41,7 @@ long arch_do_domctl(struct xen_domctl *d
> case XEN_DOMCTL_bind_pt_irq:
> {
> int rc;
> - xen_domctl_bind_pt_irq_t *bind = &domctl->u.bind_pt_irq;
> + struct xen_domctl_bind_pt_irq *bind = &domctl->u.bind_pt_irq;
> uint32_t irq = bind->u.spi.spi;
> uint32_t virq = bind->machine_irq;
>
> @@ -87,7 +87,7 @@ long arch_do_domctl(struct xen_domctl *d
> case XEN_DOMCTL_unbind_pt_irq:
> {
> int rc;
> - xen_domctl_bind_pt_irq_t *bind = &domctl->u.bind_pt_irq;
> + struct xen_domctl_bind_pt_irq *bind = &domctl->u.bind_pt_irq;
> uint32_t irq = bind->u.spi.spi;
> uint32_t virq = bind->machine_irq;
>
> --- a/xen/arch/x86/domctl.c
> +++ b/xen/arch/x86/domctl.c
> @@ -48,7 +48,7 @@ static int gdbsx_guest_mem_io(domid_t do
> }
>
> static int update_domain_cpuid_info(struct domain *d,
> - const xen_domctl_cpuid_t *ctl)
> + const struct xen_domctl_cpuid *ctl)
> {
> struct cpuid_policy *p = d->arch.cpuid;
> const struct cpuid_leaf leaf = { ctl->eax, ctl->ebx, ctl->ecx, ctl->edx };
> @@ -363,8 +363,7 @@ long arch_do_domctl(
> {
>
> case XEN_DOMCTL_shadow_op:
> - ret = paging_domctl(d, &domctl->u.shadow_op,
> - guest_handle_cast(u_domctl, void), 0);
> + ret = paging_domctl(d, &domctl->u.shadow_op, u_domctl, 0);
> if ( ret == -ERESTART )
> return hypercall_create_continuation(__HYPERVISOR_arch_1,
> "h", u_domctl);
> @@ -707,7 +706,7 @@ long arch_do_domctl(
>
> case XEN_DOMCTL_bind_pt_irq:
> {
> - xen_domctl_bind_pt_irq_t *bind = &domctl->u.bind_pt_irq;
> + struct xen_domctl_bind_pt_irq *bind = &domctl->u.bind_pt_irq;
> int irq;
>
> ret = -EINVAL;
> @@ -738,7 +737,7 @@ long arch_do_domctl(
>
> case XEN_DOMCTL_unbind_pt_irq:
> {
> - xen_domctl_bind_pt_irq_t *bind = &domctl->u.bind_pt_irq;
> + struct xen_domctl_bind_pt_irq *bind = &domctl->u.bind_pt_irq;
> int irq = domain_pirq_to_irq(d, bind->machine_irq);
>
> ret = -EPERM;
> --- a/xen/arch/x86/hvm/vioapic.c
> +++ b/xen/arch/x86/hvm/vioapic.c
> @@ -162,7 +162,7 @@ static int vioapic_hwdom_map_gsi(unsigne
> unsigned int pol)
> {
> struct domain *currd = current->domain;
> - xen_domctl_bind_pt_irq_t pt_irq_bind = {
> + struct xen_domctl_bind_pt_irq pt_irq_bind = {
> .irq_type = PT_IRQ_TYPE_PCI,
> .machine_irq = gsi,
> };
> --- a/xen/arch/x86/mm/hap/hap.c
> +++ b/xen/arch/x86/mm/hap/hap.c
> @@ -608,8 +608,8 @@ out:
> paging_unlock(d);
> }
>
> -int hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
> - XEN_GUEST_HANDLE_PARAM(void) u_domctl)
> +int hap_domctl(struct domain *d, struct xen_domctl_shadow_op *sc,
> + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
> {
> int rc;
> bool preempted = false;
> --- a/xen/arch/x86/mm/mem_sharing.c
> +++ b/xen/arch/x86/mm/mem_sharing.c
> @@ -1606,7 +1606,7 @@ out:
> return rc;
> }
>
> -int mem_sharing_domctl(struct domain *d, xen_domctl_mem_sharing_op_t *mec)
> +int mem_sharing_domctl(struct domain *d, struct xen_domctl_mem_sharing_op *mec)
> {
> int rc;
>
> --- a/xen/arch/x86/mm/paging.c
> +++ b/xen/arch/x86/mm/paging.c
> @@ -674,8 +674,9 @@ void paging_vcpu_init(struct vcpu *v)
> }
>
>
> -int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
> - XEN_GUEST_HANDLE_PARAM(void) u_domctl, bool_t resuming)
> +int paging_domctl(struct domain *d, struct xen_domctl_shadow_op *sc,
> + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl,
> + bool_t resuming)
> {
> int rc;
>
> @@ -775,8 +776,7 @@ long paging_domctl_continuation(XEN_GUES
> {
> if ( domctl_lock_acquire() )
> {
> - ret = paging_domctl(d, &op.u.shadow_op,
> - guest_handle_cast(u_domctl, void), 1);
> + ret = paging_domctl(d, &op.u.shadow_op, u_domctl, 1);
>
> domctl_lock_release();
> }
> --- a/xen/arch/x86/mm/shadow/common.c
> +++ b/xen/arch/x86/mm/shadow/common.c
> @@ -3809,8 +3809,8 @@ out:
> /* Shadow-control XEN_DOMCTL dispatcher */
>
> int shadow_domctl(struct domain *d,
> - xen_domctl_shadow_op_t *sc,
> - XEN_GUEST_HANDLE_PARAM(void) u_domctl)
> + struct xen_domctl_shadow_op *sc,
> + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
> {
> int rc;
> bool preempted = false;
> --- a/xen/common/domctl.c
> +++ b/xen/common/domctl.c
> @@ -243,7 +243,7 @@ void domctl_lock_release(void)
> }
>
> static inline
> -int vcpuaffinity_params_invalid(const xen_domctl_vcpuaffinity_t *vcpuaff)
> +int vcpuaffinity_params_invalid(const struct xen_domctl_vcpuaffinity *vcpuaff)
> {
> return vcpuaff->flags == 0 ||
> ((vcpuaff->flags & XEN_VCPUAFFINITY_HARD) &&
> @@ -690,7 +690,7 @@ long do_domctl(XEN_GUEST_HANDLE_PARAM(xe
> case XEN_DOMCTL_getvcpuaffinity:
> {
> struct vcpu *v;
> - xen_domctl_vcpuaffinity_t *vcpuaff = &op->u.vcpuaffinity;
> + struct xen_domctl_vcpuaffinity *vcpuaff = &op->u.vcpuaffinity;
>
> ret = -EINVAL;
> if ( vcpuaff->vcpu >= d->max_vcpus )
> --- a/xen/common/sched_rt.c
> +++ b/xen/common/sched_rt.c
> @@ -1345,7 +1345,7 @@ rt_dom_cntl(
> struct vcpu *v;
> unsigned long flags;
> int rc = 0;
> - xen_domctl_schedparam_vcpu_t local_sched;
> + struct xen_domctl_schedparam_vcpu local_sched;
> s_time_t period, budget;
> uint32_t index = 0;
>
> --- a/xen/common/vm_event.c
> +++ b/xen/common/vm_event.c
> @@ -41,7 +41,7 @@
>
> static int vm_event_enable(
> struct domain *d,
> - xen_domctl_vm_event_op_t *vec,
> + struct xen_domctl_vm_event_op *vec,
> struct vm_event_domain **ved,
> int pause_flag,
> int param,
> @@ -587,7 +587,7 @@ void vm_event_cleanup(struct domain *d)
> #endif
> }
>
> -int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
> +int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
> XEN_GUEST_HANDLE_PARAM(void) u_domctl)
> {
> int rc;
> --- a/xen/drivers/passthrough/io.c
> +++ b/xen/drivers/passthrough/io.c
> @@ -276,7 +276,7 @@ static struct vcpu *vector_hashing_dest(
> }
>
> int pt_irq_create_bind(
> - struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
> + struct domain *d, const struct xen_domctl_bind_pt_irq *pt_irq_bind)
> {
> struct hvm_irq_dpci *hvm_irq_dpci;
> struct hvm_pirq_dpci *pirq_dpci;
> @@ -620,7 +620,7 @@ int pt_irq_create_bind(
> }
>
> int pt_irq_destroy_bind(
> - struct domain *d, xen_domctl_bind_pt_irq_t *pt_irq_bind)
> + struct domain *d, const struct xen_domctl_bind_pt_irq *pt_irq_bind)
> {
> struct hvm_irq_dpci *hvm_irq_dpci;
> struct hvm_pirq_dpci *pirq_dpci;
> --- a/xen/include/asm-x86/hap.h
> +++ b/xen/include/asm-x86/hap.h
> @@ -34,8 +34,8 @@
> /* hap domain level functions */
> /************************************************/
> void hap_domain_init(struct domain *d);
> -int hap_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
> - XEN_GUEST_HANDLE_PARAM(void) u_domctl);
> +int hap_domctl(struct domain *d, struct xen_domctl_shadow_op *sc,
> + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl);
> int hap_enable(struct domain *d, u32 mode);
> void hap_final_teardown(struct domain *d);
> void hap_teardown(struct domain *d, bool *preempted);
> --- a/xen/include/asm-x86/mem_sharing.h
> +++ b/xen/include/asm-x86/mem_sharing.h
> @@ -87,7 +87,7 @@ int mem_sharing_notify_enomem(struct dom
> bool_t allow_sleep);
> int mem_sharing_memop(XEN_GUEST_HANDLE_PARAM(xen_mem_sharing_op_t) arg);
> int mem_sharing_domctl(struct domain *d,
> - xen_domctl_mem_sharing_op_t *mec);
> + struct xen_domctl_mem_sharing_op *mec);
> void mem_sharing_init(void);
>
> /* Scans the p2m and relinquishes any shared pages, destroying
> --- a/xen/include/asm-x86/paging.h
> +++ b/xen/include/asm-x86/paging.h
> @@ -202,8 +202,9 @@ int paging_domain_init(struct domain *d,
> /* Handler for paging-control ops: operations from user-space to enable
> * and disable ephemeral shadow modes (test mode and log-dirty mode) and
> * manipulate the log-dirty bitmap. */
> -int paging_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
> - XEN_GUEST_HANDLE_PARAM(void) u_domctl, bool_t resuming);
> +int paging_domctl(struct domain *d, struct xen_domctl_shadow_op *sc,
> + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl,
> + bool_t resuming);
>
> /* Helper hypercall for dealing with continuations. */
> long paging_domctl_continuation(XEN_GUEST_HANDLE_PARAM(xen_domctl_t));
> --- a/xen/include/asm-x86/shadow.h
> +++ b/xen/include/asm-x86/shadow.h
> @@ -69,8 +69,8 @@ int shadow_track_dirty_vram(struct domai
> * and disable ephemeral shadow modes (test mode and log-dirty mode) and
> * manipulate the log-dirty bitmap. */
> int shadow_domctl(struct domain *d,
> - xen_domctl_shadow_op_t *sc,
> - XEN_GUEST_HANDLE_PARAM(void) u_domctl);
> + struct xen_domctl_shadow_op *sc,
> + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl);
>
> /* Call when destroying a domain */
> void shadow_teardown(struct domain *d, bool *preempted);
> @@ -106,8 +106,9 @@ static inline void sh_remove_shadows(str
>
> static inline void shadow_blow_tables_per_domain(struct domain *d) {}
>
> -static inline int shadow_domctl(struct domain *d, xen_domctl_shadow_op_t *sc,
> - XEN_GUEST_HANDLE_PARAM(void) u_domctl)
> +static inline int shadow_domctl(struct domain *d,
> + struct xen_domctl_shadow_op *sc,
> + XEN_GUEST_HANDLE_PARAM(xen_domctl_t) u_domctl)
> {
> return -EINVAL;
> }
> --- a/xen/include/public/domctl.h
> +++ b/xen/include/public/domctl.h
> @@ -66,8 +66,6 @@ struct xen_domctl_createdomain {
> uint32_t flags;
> struct xen_arch_domainconfig config;
> };
> -typedef struct xen_domctl_createdomain xen_domctl_createdomain_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_createdomain_t);
>
> /* XEN_DOMCTL_getdomaininfo */
> struct xen_domctl_getdomaininfo {
> @@ -133,8 +131,6 @@ struct xen_domctl_getmemlist {
> /* OUT variables. */
> uint64_aligned_t num_pfns;
> };
> -typedef struct xen_domctl_getmemlist xen_domctl_getmemlist_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_getmemlist_t);
>
>
> /* XEN_DOMCTL_getpageframeinfo */
> @@ -225,8 +221,6 @@ struct xen_domctl_shadow_op_stats {
> uint32_t fault_count;
> uint32_t dirty_count;
> };
> -typedef struct xen_domctl_shadow_op_stats xen_domctl_shadow_op_stats_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_stats_t);
>
> struct xen_domctl_shadow_op {
> /* IN variables. */
> @@ -244,8 +238,6 @@ struct xen_domctl_shadow_op {
> uint64_aligned_t pages; /* Size of buffer. Updated with actual size. */
> struct xen_domctl_shadow_op_stats stats;
> };
> -typedef struct xen_domctl_shadow_op xen_domctl_shadow_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_shadow_op_t);
>
>
> /* XEN_DOMCTL_max_mem */
> @@ -253,8 +245,6 @@ struct xen_domctl_max_mem {
> /* IN variables. */
> uint64_aligned_t max_memkb;
> };
> -typedef struct xen_domctl_max_mem xen_domctl_max_mem_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_mem_t);
>
>
> /* XEN_DOMCTL_setvcpucontext */
> @@ -263,8 +253,6 @@ struct xen_domctl_vcpucontext {
> uint32_t vcpu; /* IN */
> XEN_GUEST_HANDLE_64(vcpu_guest_context_t) ctxt; /* IN/OUT */
> };
> -typedef struct xen_domctl_vcpucontext xen_domctl_vcpucontext_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpucontext_t);
>
>
> /* XEN_DOMCTL_getvcpuinfo */
> @@ -278,8 +266,6 @@ struct xen_domctl_getvcpuinfo {
> uint64_aligned_t cpu_time; /* total cpu time consumed (ns) */
> uint32_t cpu; /* current mapping */
> };
> -typedef struct xen_domctl_getvcpuinfo xen_domctl_getvcpuinfo_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvcpuinfo_t);
>
>
> /* Get/set the NUMA node(s) with which the guest has affinity with. */
> @@ -288,8 +274,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_getvc
> struct xen_domctl_nodeaffinity {
> struct xenctl_bitmap nodemap;/* IN */
> };
> -typedef struct xen_domctl_nodeaffinity xen_domctl_nodeaffinity_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_nodeaffinity_t);
>
>
> /* Get/set which physical cpus a vcpu can execute on. */
> @@ -327,16 +311,12 @@ struct xen_domctl_vcpuaffinity {
> struct xenctl_bitmap cpumap_hard;
> struct xenctl_bitmap cpumap_soft;
> };
> -typedef struct xen_domctl_vcpuaffinity xen_domctl_vcpuaffinity_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuaffinity_t);
>
>
> /* XEN_DOMCTL_max_vcpus */
> struct xen_domctl_max_vcpus {
> uint32_t max; /* maximum number of vcpus */
> };
> -typedef struct xen_domctl_max_vcpus xen_domctl_max_vcpus_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_vcpus_t);
>
>
> /* XEN_DOMCTL_scheduler_op */
> @@ -348,25 +328,25 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_max_v
> #define XEN_SCHEDULER_RTDS 8
> #define XEN_SCHEDULER_NULL 9
>
> -typedef struct xen_domctl_sched_credit {
> +struct xen_domctl_sched_credit {
> uint16_t weight;
> uint16_t cap;
> -} xen_domctl_sched_credit_t;
> +};
>
> -typedef struct xen_domctl_sched_credit2 {
> +struct xen_domctl_sched_credit2 {
> uint16_t weight;
> -} xen_domctl_sched_credit2_t;
> +};
>
> -typedef struct xen_domctl_sched_rtds {
> +struct xen_domctl_sched_rtds {
> uint32_t period;
> uint32_t budget;
> -} xen_domctl_sched_rtds_t;
> +};
>
> typedef struct xen_domctl_schedparam_vcpu {
> union {
> - xen_domctl_sched_credit_t credit;
> - xen_domctl_sched_credit2_t credit2;
> - xen_domctl_sched_rtds_t rtds;
> + struct xen_domctl_sched_credit credit;
> + struct xen_domctl_sched_credit2 credit2;
> + struct xen_domctl_sched_rtds rtds;
> } u;
> uint32_t vcpuid;
> } xen_domctl_schedparam_vcpu_t;
> @@ -393,9 +373,9 @@ struct xen_domctl_scheduler_op {
> uint32_t cmd; /* XEN_DOMCTL_SCHEDOP_* */
> /* IN/OUT */
> union {
> - xen_domctl_sched_credit_t credit;
> - xen_domctl_sched_credit2_t credit2;
> - xen_domctl_sched_rtds_t rtds;
> + struct xen_domctl_sched_credit credit;
> + struct xen_domctl_sched_credit2 credit2;
> + struct xen_domctl_sched_rtds rtds;
> struct {
> XEN_GUEST_HANDLE_64(xen_domctl_schedparam_vcpu_t) vcpus;
> /*
> @@ -407,24 +387,18 @@ struct xen_domctl_scheduler_op {
> } v;
> } u;
> };
> -typedef struct xen_domctl_scheduler_op xen_domctl_scheduler_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_scheduler_op_t);
>
>
> /* XEN_DOMCTL_setdomainhandle */
> struct xen_domctl_setdomainhandle {
> xen_domain_handle_t handle;
> };
> -typedef struct xen_domctl_setdomainhandle xen_domctl_setdomainhandle_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdomainhandle_t);
>
>
> /* XEN_DOMCTL_setdebugging */
> struct xen_domctl_setdebugging {
> uint8_t enable;
> };
> -typedef struct xen_domctl_setdebugging xen_domctl_setdebugging_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_setdebugging_t);
>
>
> /* XEN_DOMCTL_irq_permission */
> @@ -432,8 +406,6 @@ struct xen_domctl_irq_permission {
> uint8_t pirq;
> uint8_t allow_access; /* flag to specify enable/disable of IRQ access */
> };
> -typedef struct xen_domctl_irq_permission xen_domctl_irq_permission_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_irq_permission_t);
>
>
> /* XEN_DOMCTL_iomem_permission */
> @@ -442,8 +414,6 @@ struct xen_domctl_iomem_permission {
> uint64_aligned_t nr_mfns; /* number of pages in range (>0) */
> uint8_t allow_access; /* allow (!0) or deny (0) access to range? */
> };
> -typedef struct xen_domctl_iomem_permission xen_domctl_iomem_permission_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_iomem_permission_t);
>
>
> /* XEN_DOMCTL_ioport_permission */
> @@ -452,42 +422,34 @@ struct xen_domctl_ioport_permission {
> uint32_t nr_ports; /* size of port range */
> uint8_t allow_access; /* allow or deny access to range? */
> };
> -typedef struct xen_domctl_ioport_permission xen_domctl_ioport_permission_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_permission_t);
>
>
> /* XEN_DOMCTL_hypercall_init */
> struct xen_domctl_hypercall_init {
> uint64_aligned_t gmfn; /* GMFN to be initialised */
> };
> -typedef struct xen_domctl_hypercall_init xen_domctl_hypercall_init_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_hypercall_init_t);
>
>
> /* XEN_DOMCTL_settimeoffset */
> struct xen_domctl_settimeoffset {
> int64_aligned_t time_offset_seconds; /* applied to domain wallclock time */
> };
> -typedef struct xen_domctl_settimeoffset xen_domctl_settimeoffset_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_settimeoffset_t);
>
> /* XEN_DOMCTL_gethvmcontext */
> /* XEN_DOMCTL_sethvmcontext */
> -typedef struct xen_domctl_hvmcontext {
> +struct xen_domctl_hvmcontext {
> uint32_t size; /* IN/OUT: size of buffer / bytes filled */
> XEN_GUEST_HANDLE_64(uint8) buffer; /* IN/OUT: data, or call
> * gethvmcontext with NULL
> * buffer to get size req'd */
> -} xen_domctl_hvmcontext_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_t);
> +};
>
>
> /* XEN_DOMCTL_set_address_size */
> /* XEN_DOMCTL_get_address_size */
> -typedef struct xen_domctl_address_size {
> +struct xen_domctl_address_size {
> uint32_t size;
> -} xen_domctl_address_size_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_address_size_t);
> +};
>
>
> /* XEN_DOMCTL_sendtrigger */
> @@ -500,8 +462,6 @@ struct xen_domctl_sendtrigger {
> uint32_t trigger; /* IN */
> uint32_t vcpu; /* IN */
> };
> -typedef struct xen_domctl_sendtrigger xen_domctl_sendtrigger_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_sendtrigger_t);
>
>
> /* Assign a device to a guest. Sets up IOMMU structures. */
> @@ -536,8 +496,6 @@ struct xen_domctl_assign_device {
> } dt;
> } u;
> };
> -typedef struct xen_domctl_assign_device xen_domctl_assign_device_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_assign_device_t);
>
> /* Retrieve sibling devices infomation of machine_sbdf */
> /* XEN_DOMCTL_get_device_group */
> @@ -547,22 +505,20 @@ struct xen_domctl_get_device_group {
> uint32_t num_sdevs; /* OUT */
> XEN_GUEST_HANDLE_64(uint32) sdev_array; /* OUT */
> };
> -typedef struct xen_domctl_get_device_group xen_domctl_get_device_group_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_get_device_group_t);
>
> /* Pass-through interrupts: bind real irq -> hvm devfn. */
> /* XEN_DOMCTL_bind_pt_irq */
> /* XEN_DOMCTL_unbind_pt_irq */
> -typedef enum pt_irq_type_e {
> +enum pt_irq_type {
> PT_IRQ_TYPE_PCI,
> PT_IRQ_TYPE_ISA,
> PT_IRQ_TYPE_MSI,
> PT_IRQ_TYPE_MSI_TRANSLATE,
> PT_IRQ_TYPE_SPI, /* ARM: valid range 32-1019 */
> -} pt_irq_type_t;
> +};
> struct xen_domctl_bind_pt_irq {
> uint32_t machine_irq;
> - pt_irq_type_t irq_type;
> + uint32_t irq_type; /* enum pt_irq_type */
>
> union {
> struct {
> @@ -590,8 +546,6 @@ struct xen_domctl_bind_pt_irq {
> } spi;
> } u;
> };
> -typedef struct xen_domctl_bind_pt_irq xen_domctl_bind_pt_irq_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_bind_pt_irq_t);
>
>
> /* Bind machine I/O address range -> HVM address range. */
> @@ -613,8 +567,6 @@ struct xen_domctl_memory_mapping {
> uint32_t add_mapping; /* add or remove mapping */
> uint32_t padding; /* padding for 64-bit aligned structure */
> };
> -typedef struct xen_domctl_memory_mapping xen_domctl_memory_mapping_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_memory_mapping_t);
>
>
> /* Bind machine I/O port range -> HVM I/O port range. */
> @@ -625,8 +577,6 @@ struct xen_domctl_ioport_mapping {
> uint32_t nr_ports; /* size of port range */
> uint32_t add_mapping; /* add or remove mapping */
> };
> -typedef struct xen_domctl_ioport_mapping xen_domctl_ioport_mapping_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_ioport_mapping_t);
>
>
> /*
> @@ -645,8 +595,6 @@ struct xen_domctl_pin_mem_cacheattr {
> uint64_aligned_t start, end;
> uint32_t type; /* XEN_DOMCTL_MEM_CACHEATTR_* */
> };
> -typedef struct xen_domctl_pin_mem_cacheattr xen_domctl_pin_mem_cacheattr_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_pin_mem_cacheattr_t);
>
>
> /* XEN_DOMCTL_set_ext_vcpucontext */
> @@ -678,8 +626,6 @@ struct xen_domctl_ext_vcpucontext {
> #endif
> #endif
> };
> -typedef struct xen_domctl_ext_vcpucontext xen_domctl_ext_vcpucontext_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_vcpucontext_t);
>
> /*
> * Set the target domain for a domain
> @@ -688,8 +634,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_ext_v
> struct xen_domctl_set_target {
> domid_t target;
> };
> -typedef struct xen_domctl_set_target xen_domctl_set_target_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_target_t);
>
> #if defined(__i386__) || defined(__x86_64__)
> # define XEN_CPUID_INPUT_UNUSED 0xFFFFFFFF
> @@ -701,8 +645,6 @@ struct xen_domctl_cpuid {
> uint32_t ecx;
> uint32_t edx;
> };
> -typedef struct xen_domctl_cpuid xen_domctl_cpuid_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid_t);
> #endif
>
> /*
> @@ -725,8 +667,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_cpuid
> struct xen_domctl_subscribe {
> uint32_t port; /* IN */
> };
> -typedef struct xen_domctl_subscribe xen_domctl_subscribe_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_subscribe_t);
>
> /*
> * Define the maximum machine address size which should be allocated
> @@ -747,37 +687,34 @@ struct xen_domctl_debug_op {
> uint32_t op; /* IN */
> uint32_t vcpu; /* IN */
> };
> -typedef struct xen_domctl_debug_op xen_domctl_debug_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_debug_op_t);
>
> /*
> * Request a particular record from the HVM context
> */
> /* XEN_DOMCTL_gethvmcontext_partial */
> -typedef struct xen_domctl_hvmcontext_partial {
> +struct xen_domctl_hvmcontext_partial {
> uint32_t type; /* IN: Type of record required */
> uint32_t instance; /* IN: Instance of that type */
> uint64_aligned_t bufsz; /* IN: size of buffer */
> XEN_GUEST_HANDLE_64(uint8) buffer; /* OUT: buffer to write record into */
> -} xen_domctl_hvmcontext_partial_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_hvmcontext_partial_t);
> +};
>
> /* XEN_DOMCTL_disable_migrate */
> -typedef struct xen_domctl_disable_migrate {
> +struct xen_domctl_disable_migrate {
> uint32_t disable; /* IN: 1: disable migration and restore */
> -} xen_domctl_disable_migrate_t;
> +};
>
>
> /* XEN_DOMCTL_gettscinfo */
> /* XEN_DOMCTL_settscinfo */
> -typedef struct xen_domctl_tsc_info {
> +struct xen_domctl_tsc_info {
> /* IN/OUT */
> uint32_t tsc_mode;
> uint32_t gtsc_khz;
> uint32_t incarnation;
> uint32_t pad;
> uint64_aligned_t elapsed_nsec;
> -} xen_domctl_tsc_info_t;
> +};
>
> /* XEN_DOMCTL_gdbsx_guestmemio guest mem io */
> struct xen_domctl_gdbsx_memio {
> @@ -885,8 +822,6 @@ struct xen_domctl_vm_event_op {
>
> uint32_t port; /* OUT: event channel for ring */
> };
> -typedef struct xen_domctl_vm_event_op xen_domctl_vm_event_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_vm_event_op_t);
>
> /*
> * Memory sharing operations
> @@ -902,8 +837,6 @@ struct xen_domctl_mem_sharing_op {
> uint8_t enable; /* CONTROL */
> } u;
> };
> -typedef struct xen_domctl_mem_sharing_op xen_domctl_mem_sharing_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_mem_sharing_op_t);
>
> struct xen_domctl_audit_p2m {
> /* OUT error counts */
> @@ -911,14 +844,10 @@ struct xen_domctl_audit_p2m {
> uint64_t m2p_bad;
> uint64_t p2m_bad;
> };
> -typedef struct xen_domctl_audit_p2m xen_domctl_audit_p2m_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_audit_p2m_t);
>
> struct xen_domctl_set_virq_handler {
> uint32_t virq; /* IN */
> };
> -typedef struct xen_domctl_set_virq_handler xen_domctl_set_virq_handler_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_virq_handler_t);
>
> #if defined(__i386__) || defined(__x86_64__)
> /* XEN_DOMCTL_setvcpuextstate */
> @@ -941,8 +870,6 @@ struct xen_domctl_vcpuextstate {
> uint64_aligned_t size;
> XEN_GUEST_HANDLE_64(uint64) buffer;
> };
> -typedef struct xen_domctl_vcpuextstate xen_domctl_vcpuextstate_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpuextstate_t);
> #endif
>
> /* XEN_DOMCTL_set_access_required: sets whether a memory event listener
> @@ -952,14 +879,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpue
> struct xen_domctl_set_access_required {
> uint8_t access_required;
> };
> -typedef struct xen_domctl_set_access_required xen_domctl_set_access_required_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_access_required_t);
>
> struct xen_domctl_set_broken_page_p2m {
> uint64_aligned_t pfn;
> };
> -typedef struct xen_domctl_set_broken_page_p2m xen_domctl_set_broken_page_p2m_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_broken_page_p2m_t);
>
> /*
> * XEN_DOMCTL_set_max_evtchn: sets the maximum event channel port
> @@ -969,8 +892,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_b
> struct xen_domctl_set_max_evtchn {
> uint32_t max_port;
> };
> -typedef struct xen_domctl_set_max_evtchn xen_domctl_set_max_evtchn_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_set_max_evtchn_t);
>
> /*
> * ARM: Clean and invalidate caches associated with given region of
> @@ -980,8 +901,6 @@ struct xen_domctl_cacheflush {
> /* IN: page range to flush. */
> xen_pfn_t start_pfn, nr_pfns;
> };
> -typedef struct xen_domctl_cacheflush xen_domctl_cacheflush_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_cacheflush_t);
>
> #if defined(__i386__) || defined(__x86_64__)
> struct xen_domctl_vcpu_msr {
> @@ -1014,8 +933,6 @@ struct xen_domctl_vcpu_msrs {
> uint32_t msr_count; /* IN/OUT */
> XEN_GUEST_HANDLE_64(xen_domctl_vcpu_msr_t) msrs; /* IN/OUT */
> };
> -typedef struct xen_domctl_vcpu_msrs xen_domctl_vcpu_msrs_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_vcpu_msrs_t);
> #endif
>
> /* XEN_DOMCTL_setvnumainfo: specifies a virtual NUMA topology for the guest */
> @@ -1052,8 +969,6 @@ struct xen_domctl_vnuma {
> */
> XEN_GUEST_HANDLE_64(xen_vmemrange_t) vmemrange;
> };
> -typedef struct xen_domctl_vnuma xen_domctl_vnuma_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_vnuma_t);
>
> struct xen_domctl_psr_cmt_op {
> #define XEN_DOMCTL_PSR_CMT_OP_DETACH 0
> @@ -1062,8 +977,6 @@ struct xen_domctl_psr_cmt_op {
> uint32_t cmd;
> uint32_t data;
> };
> -typedef struct xen_domctl_psr_cmt_op xen_domctl_psr_cmt_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cmt_op_t);
>
> /* XEN_DOMCTL_MONITOR_*
> *
> @@ -1144,8 +1057,6 @@ struct xen_domctl_monitor_op {
> } debug_exception;
> } u;
> };
> -typedef struct xen_domctl_monitor_op xen_domctl_monitor_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_monitor_op_t);
>
> struct xen_domctl_psr_cat_op {
> #define XEN_DOMCTL_PSR_CAT_OP_SET_L3_CBM 0
> @@ -1160,8 +1071,6 @@ struct xen_domctl_psr_cat_op {
> uint32_t target; /* IN */
> uint64_t data; /* IN/OUT */
> };
> -typedef struct xen_domctl_psr_cat_op xen_domctl_psr_cat_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_domctl_psr_cat_op_t);
>
> struct xen_domctl {
> uint32_t cmd;
> --- a/xen/include/xen/iommu.h
> +++ b/xen/include/xen/iommu.h
> @@ -96,8 +96,8 @@ void pt_pci_init(void);
>
> struct pirq;
> int hvm_do_IRQ_dpci(struct domain *, struct pirq *);
> -int pt_irq_create_bind(struct domain *, xen_domctl_bind_pt_irq_t *);
> -int pt_irq_destroy_bind(struct domain *, xen_domctl_bind_pt_irq_t *);
> +int pt_irq_create_bind(struct domain *, const struct xen_domctl_bind_pt_irq *);
> +int pt_irq_destroy_bind(struct domain *, const struct xen_domctl_bind_pt_irq *);
>
> void hvm_dpci_isairq_eoi(struct domain *d, unsigned int isairq);
> struct hvm_irq_dpci *domain_get_irq_dpci(const struct domain *);
> --- a/xen/include/xen/vm_event.h
> +++ b/xen/include/xen/vm_event.h
> @@ -69,7 +69,7 @@ int vm_event_get_response(struct domain
>
> void vm_event_resume(struct domain *d, struct vm_event_domain *ved);
>
> -int vm_event_domctl(struct domain *d, xen_domctl_vm_event_op_t *vec,
> +int vm_event_domctl(struct domain *d, struct xen_domctl_vm_event_op *vec,
> XEN_GUEST_HANDLE_PARAM(void) u_domctl);
>
> void vm_event_vcpu_pause(struct vcpu *v);
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [PATCH 2/2] public/sysctl: drop unnecessary typedefs and handles
2017-09-12 15:10 ` [PATCH 2/2] public/sysctl: " Jan Beulich
2017-09-12 15:57 ` Dario Faggioli
2017-09-19 15:31 ` Ping: " Jan Beulich
@ 2017-09-20 10:41 ` George Dunlap
2017-09-20 15:36 ` Robert VanVossen
2017-09-20 18:07 ` Robert VanVossen
3 siblings, 1 reply; 20+ messages in thread
From: George Dunlap @ 2017-09-20 10:41 UTC (permalink / raw)
To: Jan Beulich
Cc: Stefano Stabellini, Wei Liu, Andrew Cooper, Dario Faggioli,
Ian Jackson, Robert VanVossen, Tim Deegan, Ross Lagerwall,
Julien Grall, Josh Whitehead, xen-devel
On Tue, Sep 12, 2017 at 4:10 PM, Jan Beulich <JBeulich@suse.com> wrote:
> By virtue of the struct xen_sysctl container structure, most of them
> are really just cluttering the name space.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: George Dunlap <george.dunlap@citrix.com>
>
> --- a/tools/libxc/include/xenctrl.h
> +++ b/tools/libxc/include/xenctrl.h
> @@ -1212,11 +1212,11 @@ int xc_readconsolering(xc_interface *xch
> int xc_send_debug_keys(xc_interface *xch, char *keys);
> int xc_set_parameters(xc_interface *xch, char *params);
>
> -typedef xen_sysctl_physinfo_t xc_physinfo_t;
> -typedef xen_sysctl_cputopo_t xc_cputopo_t;
> -typedef xen_sysctl_numainfo_t xc_numainfo_t;
> -typedef xen_sysctl_meminfo_t xc_meminfo_t;
> -typedef xen_sysctl_pcitopoinfo_t xc_pcitopoinfo_t;
> +typedef struct xen_sysctl_physinfo xc_physinfo_t;
> +typedef struct xen_sysctl_cputopo xc_cputopo_t;
> +typedef struct xen_sysctl_numainfo xc_numainfo_t;
> +typedef struct xen_sysctl_meminfo xc_meminfo_t;
> +typedef struct xen_sysctl_pcitopoinfo xc_pcitopoinfo_t;
>
> typedef uint32_t xc_cpu_to_node_t;
> typedef uint32_t xc_cpu_to_socket_t;
> @@ -1240,7 +1240,7 @@ int xc_machphys_mfn_list(xc_interface *x
> unsigned long max_extents,
> xen_pfn_t *extent_start);
>
> -typedef xen_sysctl_cpuinfo_t xc_cpuinfo_t;
> +typedef struct xen_sysctl_cpuinfo xc_cpuinfo_t;
> int xc_getcpuinfo(xc_interface *xch, int max_cpus,
> xc_cpuinfo_t *info, int *nr_cpus);
>
> @@ -1853,8 +1853,8 @@ int xc_cpu_offline(xc_interface *xch, in
> * cpufreq para name of this structure named
> * same as sysfs file name of native linux
> */
> -typedef xen_userspace_t xc_userspace_t;
> -typedef xen_ondemand_t xc_ondemand_t;
> +typedef struct xen_userspace xc_userspace_t;
> +typedef struct xen_ondemand xc_ondemand_t;
>
> struct xc_get_cpufreq_para {
> /* IN/OUT variable */
> --- a/tools/libxc/xc_misc.c
> +++ b/tools/libxc/xc_misc.c
> @@ -547,7 +547,7 @@ int xc_livepatch_upload(xc_interface *xc
> DECLARE_SYSCTL;
> DECLARE_HYPERCALL_BUFFER(char, local);
> DECLARE_HYPERCALL_BOUNCE(name, 0 /* later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
> - xen_livepatch_name_t def_name = { .pad = { 0, 0, 0 } };
> + struct xen_livepatch_name def_name = { };
>
> if ( !name || !payload )
> {
> @@ -594,12 +594,12 @@ int xc_livepatch_upload(xc_interface *xc
>
> int xc_livepatch_get(xc_interface *xch,
> char *name,
> - xen_livepatch_status_t *status)
> + struct xen_livepatch_status *status)
> {
> int rc;
> DECLARE_SYSCTL;
> DECLARE_HYPERCALL_BOUNCE(name, 0 /*adjust later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
> - xen_livepatch_name_t def_name = { .pad = { 0, 0, 0 } };
> + struct xen_livepatch_name def_name = { };
>
> if ( !name )
> {
> @@ -677,7 +677,7 @@ int xc_livepatch_get(xc_interface *xch,
> * retrieved (if any).
> */
> int xc_livepatch_list(xc_interface *xch, unsigned int max, unsigned int start,
> - xen_livepatch_status_t *info,
> + struct xen_livepatch_status *info,
> char *name, uint32_t *len,
> unsigned int *done,
> unsigned int *left)
> @@ -837,7 +837,7 @@ static int _xc_livepatch_action(xc_inter
> DECLARE_SYSCTL;
> /* The size is figured out when we strlen(name) */
> DECLARE_HYPERCALL_BOUNCE(name, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
> - xen_livepatch_name_t def_name = { .pad = { 0, 0, 0 } };
> + struct xen_livepatch_name def_name = { };
>
> def_name.size = strlen(name) + 1;
>
> --- a/xen/arch/arm/sysctl.c
> +++ b/xen/arch/arm/sysctl.c
> @@ -12,7 +12,7 @@
> #include <xen/hypercall.h>
> #include <public/sysctl.h>
>
> -void arch_do_physinfo(xen_sysctl_physinfo_t *pi) { }
> +void arch_do_physinfo(struct xen_sysctl_physinfo *pi) { }
>
> long arch_do_sysctl(struct xen_sysctl *sysctl,
> XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
> --- a/xen/arch/x86/sysctl.c
> +++ b/xen/arch/x86/sysctl.c
> @@ -72,7 +72,7 @@ long cpu_down_helper(void *data)
> return ret;
> }
>
> -void arch_do_physinfo(xen_sysctl_physinfo_t *pi)
> +void arch_do_physinfo(struct xen_sysctl_physinfo *pi)
> {
> memcpy(pi->hw_cap, boot_cpu_data.x86_capability,
> min(sizeof(pi->hw_cap), sizeof(boot_cpu_data.x86_capability)));
> --- a/xen/common/gcov/gcov.c
> +++ b/xen/common/gcov/gcov.c
> @@ -209,7 +209,7 @@ static int gcov_dump_all(XEN_GUEST_HANDL
> return ret;
> }
>
> -int sysctl_gcov_op(xen_sysctl_gcov_op_t *op)
> +int sysctl_gcov_op(struct xen_sysctl_gcov_op *op)
> {
> int ret;
>
> --- a/xen/common/livepatch.c
> +++ b/xen/common/livepatch.c
> @@ -104,7 +104,7 @@ static struct livepatch_work livepatch_w
> */
> static DEFINE_PER_CPU(bool_t, work_to_do);
>
> -static int get_name(const xen_livepatch_name_t *name, char *n)
> +static int get_name(const struct xen_livepatch_name *name, char *n)
> {
> if ( !name->size || name->size > XEN_LIVEPATCH_NAME_SIZE )
> return -EINVAL;
> @@ -121,7 +121,7 @@ static int get_name(const xen_livepatch_
> return 0;
> }
>
> -static int verify_payload(const xen_sysctl_livepatch_upload_t *upload, char *n)
> +static int verify_payload(const struct xen_sysctl_livepatch_upload *upload, char *n)
> {
> if ( get_name(&upload->name, n) )
> return -EINVAL;
> @@ -897,7 +897,7 @@ static int load_payload_data(struct payl
> return rc;
> }
>
> -static int livepatch_upload(xen_sysctl_livepatch_upload_t *upload)
> +static int livepatch_upload(struct xen_sysctl_livepatch_upload *upload)
> {
> struct payload *data, *found;
> char n[XEN_LIVEPATCH_NAME_SIZE];
> @@ -954,7 +954,7 @@ static int livepatch_upload(xen_sysctl_l
> return rc;
> }
>
> -static int livepatch_get(xen_sysctl_livepatch_get_t *get)
> +static int livepatch_get(struct xen_sysctl_livepatch_get *get)
> {
> struct payload *data;
> int rc;
> @@ -985,9 +985,9 @@ static int livepatch_get(xen_sysctl_live
> return 0;
> }
>
> -static int livepatch_list(xen_sysctl_livepatch_list_t *list)
> +static int livepatch_list(struct xen_sysctl_livepatch_list *list)
> {
> - xen_livepatch_status_t status;
> + struct xen_livepatch_status status;
> struct payload *data;
> unsigned int idx = 0, i = 0;
> int rc = 0;
> @@ -1451,7 +1451,7 @@ static int build_id_dep(struct payload *
> return 0;
> }
>
> -static int livepatch_action(xen_sysctl_livepatch_action_t *action)
> +static int livepatch_action(struct xen_sysctl_livepatch_action *action)
> {
> struct payload *data;
> char n[XEN_LIVEPATCH_NAME_SIZE];
> @@ -1560,7 +1560,7 @@ static int livepatch_action(xen_sysctl_l
> return rc;
> }
>
> -int livepatch_op(xen_sysctl_livepatch_op_t *livepatch)
> +int livepatch_op(struct xen_sysctl_livepatch_op *livepatch)
> {
> int rc;
>
> --- a/xen/common/perfc.c
> +++ b/xen/common/perfc.c
> @@ -152,8 +152,8 @@ void perfc_reset(unsigned char key)
> arch_perfc_reset();
> }
>
> -static xen_sysctl_perfc_desc_t perfc_d[NR_PERFCTRS];
> -static xen_sysctl_perfc_val_t *perfc_vals;
> +static struct xen_sysctl_perfc_desc perfc_d[NR_PERFCTRS];
> +static struct xen_sysctl_perfc_val *perfc_vals;
> static unsigned int perfc_nbr_vals;
> static cpumask_t perfc_cpumap;
>
> @@ -190,7 +190,7 @@ static int perfc_copy_info(XEN_GUEST_HAN
> }
>
> xfree(perfc_vals);
> - perfc_vals = xmalloc_array(xen_sysctl_perfc_val_t, perfc_nbr_vals);
> + perfc_vals = xmalloc_array(struct xen_sysctl_perfc_val, perfc_nbr_vals);
> }
>
> if ( guest_handle_is_null(desc) )
> @@ -241,7 +241,7 @@ static int perfc_copy_info(XEN_GUEST_HAN
> }
>
> /* Dom0 control of perf counters */
> -int perfc_control(xen_sysctl_perfc_op_t *pc)
> +int perfc_control(struct xen_sysctl_perfc_op *pc)
> {
> static DEFINE_SPINLOCK(lock);
> int rc;
> --- a/xen/common/sched_arinc653.c
> +++ b/xen/common/sched_arinc653.c
> @@ -694,7 +694,7 @@ static int
> a653sched_adjust_global(const struct scheduler *ops,
> struct xen_sysctl_scheduler_op *sc)
> {
> - xen_sysctl_arinc653_schedule_t local_sched;
> + struct xen_sysctl_arinc653_schedule local_sched;
> int rc = -EINVAL;
>
> switch ( sc->cmd )
> --- a/xen/common/sched_credit.c
> +++ b/xen/common/sched_credit.c
> @@ -1240,7 +1240,7 @@ csched_sys_cntl(const struct scheduler *
> struct xen_sysctl_scheduler_op *sc)
> {
> int rc = -EINVAL;
> - xen_sysctl_credit_schedule_t *params = &sc->u.sched_credit;
> + struct xen_sysctl_credit_schedule *params = &sc->u.sched_credit;
> struct csched_private *prv = CSCHED_PRIV(ops);
> unsigned long flags;
>
> --- a/xen/common/sched_credit2.c
> +++ b/xen/common/sched_credit2.c
> @@ -2443,7 +2443,7 @@ csched2_dom_cntl(
> static int csched2_sys_cntl(const struct scheduler *ops,
> struct xen_sysctl_scheduler_op *sc)
> {
> - xen_sysctl_credit2_schedule_t *params = &sc->u.sched_credit2;
> + struct xen_sysctl_credit2_schedule *params = &sc->u.sched_credit2;
> struct csched2_private *prv = csched2_priv(ops);
> unsigned long flags;
>
> --- a/xen/common/spinlock.c
> +++ b/xen/common/spinlock.c
> @@ -380,7 +380,7 @@ void spinlock_profile_reset(unsigned cha
> }
>
> typedef struct {
> - xen_sysctl_lockprof_op_t *pc;
> + struct xen_sysctl_lockprof_op *pc;
> int rc;
> } spinlock_profile_ucopy_t;
>
> @@ -388,7 +388,7 @@ static void spinlock_profile_ucopy_elem(
> int32_t type, int32_t idx, void *par)
> {
> spinlock_profile_ucopy_t *p = par;
> - xen_sysctl_lockprof_data_t elem;
> + struct xen_sysctl_lockprof_data elem;
>
> if ( p->rc )
> return;
> @@ -411,7 +411,7 @@ static void spinlock_profile_ucopy_elem(
> }
>
> /* Dom0 control of lock profiling */
> -int spinlock_profile_control(xen_sysctl_lockprof_op_t *pc)
> +int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc)
> {
> int rc = 0;
> spinlock_profile_ucopy_t par;
> --- a/xen/common/sysctl.c
> +++ b/xen/common/sysctl.c
> @@ -250,7 +250,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
>
> case XEN_SYSCTL_physinfo:
> {
> - xen_sysctl_physinfo_t *pi = &op->u.physinfo;
> + struct xen_sysctl_physinfo *pi = &op->u.physinfo;
>
> memset(pi, 0, sizeof(*pi));
> pi->threads_per_core =
> @@ -276,7 +276,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
> case XEN_SYSCTL_numainfo:
> {
> unsigned int i, j, num_nodes;
> - xen_sysctl_numainfo_t *ni = &op->u.numainfo;
> + struct xen_sysctl_numainfo *ni = &op->u.numainfo;
> bool_t do_meminfo = !guest_handle_is_null(ni->meminfo);
> bool_t do_distance = !guest_handle_is_null(ni->distance);
>
> @@ -284,7 +284,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
>
> if ( do_meminfo || do_distance )
> {
> - xen_sysctl_meminfo_t meminfo = { 0 };
> + struct xen_sysctl_meminfo meminfo = { };
>
> if ( num_nodes > ni->num_nodes )
> num_nodes = ni->num_nodes;
> @@ -346,12 +346,12 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
> case XEN_SYSCTL_cputopoinfo:
> {
> unsigned int i, num_cpus;
> - xen_sysctl_cputopoinfo_t *ti = &op->u.cputopoinfo;
> + struct xen_sysctl_cputopoinfo *ti = &op->u.cputopoinfo;
>
> num_cpus = cpumask_last(&cpu_online_map) + 1;
> if ( !guest_handle_is_null(ti->cputopo) )
> {
> - xen_sysctl_cputopo_t cputopo = { 0 };
> + struct xen_sysctl_cputopo cputopo = { };
>
> if ( num_cpus > ti->num_cpus )
> num_cpus = ti->num_cpus;
> @@ -405,7 +405,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
> #ifdef CONFIG_HAS_PCI
> case XEN_SYSCTL_pcitopoinfo:
> {
> - xen_sysctl_pcitopoinfo_t *ti = &op->u.pcitopoinfo;
> + struct xen_sysctl_pcitopoinfo *ti = &op->u.pcitopoinfo;
> unsigned int i = 0;
>
> if ( guest_handle_is_null(ti->devs) ||
> --- a/xen/common/trace.c
> +++ b/xen/common/trace.c
> @@ -367,9 +367,9 @@ void __init init_trace_bufs(void)
>
> /**
> * tb_control - sysctl operations on trace buffers.
> - * @tbc: a pointer to a xen_sysctl_tbuf_op_t to be filled out
> + * @tbc: a pointer to a struct xen_sysctl_tbuf_op to be filled out
> */
> -int tb_control(xen_sysctl_tbuf_op_t *tbc)
> +int tb_control(struct xen_sysctl_tbuf_op *tbc)
> {
> static DEFINE_SPINLOCK(lock);
> int rc = 0;
> --- a/xen/include/public/sysctl.h
> +++ b/xen/include/public/sysctl.h
> @@ -58,8 +58,6 @@ struct xen_sysctl_readconsole {
> /* IN: Size of buffer; OUT: Bytes written to buffer. */
> uint32_t count;
> };
> -typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
>
> /* Get trace buffers machine base address */
> /* XEN_SYSCTL_tbuf_op */
> @@ -79,8 +77,6 @@ struct xen_sysctl_tbuf_op {
> uint64_aligned_t buffer_mfn;
> uint32_t size; /* Also an IN variable! */
> };
> -typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
>
> /*
> * Get physical information about the host machine
> @@ -109,8 +105,6 @@ struct xen_sysctl_physinfo {
> /* XEN_SYSCTL_PHYSCAP_??? */
> uint32_t capabilities;
> };
> -typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
>
> /*
> * Get the ID of the current scheduler.
> @@ -120,8 +114,6 @@ struct xen_sysctl_sched_id {
> /* OUT variable */
> uint32_t sched_id;
> };
> -typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
>
> /* Interface for controlling Xen software performance counters. */
> /* XEN_SYSCTL_perfc_op */
> @@ -148,8 +140,6 @@ struct xen_sysctl_perfc_op {
> /* counter values (or NULL) */
> XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
> };
> -typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
>
> /* XEN_SYSCTL_getdomaininfolist */
> struct xen_sysctl_getdomaininfolist {
> @@ -160,8 +150,6 @@ struct xen_sysctl_getdomaininfolist {
> /* OUT variables. */
> uint32_t num_domains;
> };
> -typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
>
> /* Inject debug keys into Xen. */
> /* XEN_SYSCTL_debug_keys */
> @@ -170,8 +158,6 @@ struct xen_sysctl_debug_keys {
> XEN_GUEST_HANDLE_64(char) keys;
> uint32_t nr_keys;
> };
> -typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t);
>
> /* Get physical CPU information. */
> /* XEN_SYSCTL_getcpuinfo */
> @@ -187,8 +173,6 @@ struct xen_sysctl_getcpuinfo {
> /* OUT variables. */
> uint32_t nr_cpus;
> };
> -typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t);
>
> /* XEN_SYSCTL_availheap */
> struct xen_sysctl_availheap {
> @@ -199,8 +183,6 @@ struct xen_sysctl_availheap {
> /* OUT variables. */
> uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */
> };
> -typedef struct xen_sysctl_availheap xen_sysctl_availheap_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t);
>
> /* XEN_SYSCTL_get_pmstat */
> struct pm_px_val {
> @@ -219,8 +201,6 @@ struct pm_px_stat {
> XEN_GUEST_HANDLE_64(uint64) trans_pt; /* Px transition table */
> XEN_GUEST_HANDLE_64(pm_px_val_t) pt;
> };
> -typedef struct pm_px_stat pm_px_stat_t;
> -DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t);
>
> struct pm_cx_stat {
> uint32_t nr; /* entry nr in triggers & residencies, including C0 */
> @@ -259,8 +239,6 @@ struct xen_sysctl_get_pmstat {
> /* other struct for tx, etc */
> } u;
> };
> -typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t);
>
> /* XEN_SYSCTL_cpu_hotplug */
> struct xen_sysctl_cpu_hotplug {
> @@ -270,8 +248,6 @@ struct xen_sysctl_cpu_hotplug {
> #define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1
> uint32_t op; /* hotplug opcode */
> };
> -typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t);
>
> /*
> * Get/set xen power management, include
> @@ -281,7 +257,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_h
> struct xen_userspace {
> uint32_t scaling_setspeed;
> };
> -typedef struct xen_userspace xen_userspace_t;
>
> struct xen_ondemand {
> uint32_t sampling_rate_max;
> @@ -290,7 +265,6 @@ struct xen_ondemand {
> uint32_t sampling_rate;
> uint32_t up_threshold;
> };
> -typedef struct xen_ondemand xen_ondemand_t;
>
> /*
> * cpufreq para name of this structure named
> @@ -461,8 +435,6 @@ struct xen_sysctl_lockprof_op {
> /* profile information (or NULL) */
> XEN_GUEST_HANDLE_64(xen_sysctl_lockprof_data_t) data;
> };
> -typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t);
>
> /* XEN_SYSCTL_cputopoinfo */
> #define XEN_INVALID_CORE_ID (~0U)
> @@ -493,8 +465,6 @@ struct xen_sysctl_cputopoinfo {
> uint32_t num_cpus;
> XEN_GUEST_HANDLE_64(xen_sysctl_cputopo_t) cputopo;
> };
> -typedef struct xen_sysctl_cputopoinfo xen_sysctl_cputopoinfo_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopoinfo_t);
>
> /* XEN_SYSCTL_numainfo */
> #define XEN_INVALID_MEM_SZ (~0U)
> @@ -535,8 +505,6 @@ struct xen_sysctl_numainfo {
> */
> XEN_GUEST_HANDLE_64(uint32) distance;
> };
> -typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t);
>
> /* XEN_SYSCTL_cpupool_op */
> #define XEN_SYSCTL_CPUPOOL_OP_CREATE 1 /* C */
> @@ -556,8 +524,6 @@ struct xen_sysctl_cpupool_op {
> uint32_t n_dom; /* OUT: I */
> struct xenctl_bitmap cpumap; /* OUT: IF */
> };
> -typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t);
>
> /*
> * Error return values of cpupool operations:
> @@ -637,14 +603,10 @@ struct xen_sysctl_credit_schedule {
> unsigned tslice_ms;
> unsigned ratelimit_us;
> };
> -typedef struct xen_sysctl_credit_schedule xen_sysctl_credit_schedule_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_credit_schedule_t);
>
> struct xen_sysctl_credit2_schedule {
> unsigned ratelimit_us;
> };
> -typedef struct xen_sysctl_credit2_schedule xen_sysctl_credit2_schedule_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_credit2_schedule_t);
>
> /* XEN_SYSCTL_scheduler_op */
> /* Set or get info? */
> @@ -662,8 +624,6 @@ struct xen_sysctl_scheduler_op {
> struct xen_sysctl_credit2_schedule sched_credit2;
> } u;
> };
> -typedef struct xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_scheduler_op_t);
>
> /*
> * Output format of gcov data:
> @@ -696,8 +656,6 @@ struct xen_sysctl_gcov_op {
> uint32_t size; /* IN/OUT: size of the buffer */
> XEN_GUEST_HANDLE_64(char) buffer; /* OUT */
> };
> -typedef struct xen_sysctl_gcov_op xen_sysctl_gcov_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_gcov_op_t);
>
> #define XEN_SYSCTL_PSR_CMT_get_total_rmid 0
> #define XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor 1
> @@ -716,8 +674,6 @@ struct xen_sysctl_psr_cmt_op {
> } l3_cache;
> } u;
> };
> -typedef struct xen_sysctl_psr_cmt_op xen_sysctl_psr_cmt_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cmt_op_t);
>
> /* XEN_SYSCTL_pcitopoinfo */
> #define XEN_INVALID_DEV (XEN_INVALID_NODE_ID - 1)
> @@ -740,8 +696,6 @@ struct xen_sysctl_pcitopoinfo {
> */
> XEN_GUEST_HANDLE_64(uint32) nodes;
> };
> -typedef struct xen_sysctl_pcitopoinfo xen_sysctl_pcitopoinfo_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_pcitopoinfo_t);
>
> #define XEN_SYSCTL_PSR_CAT_get_l3_info 0
> #define XEN_SYSCTL_PSR_CAT_get_l2_info 1
> @@ -757,8 +711,6 @@ struct xen_sysctl_psr_cat_op {
> } cat_info;
> } u;
> };
> -typedef struct xen_sysctl_psr_cat_op xen_sysctl_psr_cat_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
>
> #define XEN_SYSCTL_TMEM_OP_ALL_CLIENTS 0xFFFFU
>
> @@ -863,8 +815,6 @@ struct xen_sysctl_tmem_op {
> /* of them. */
> } u;
> };
> -typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tmem_op_t);
>
> /*
> * XEN_SYSCTL_get_cpu_levelling_caps (x86 specific)
> @@ -884,8 +834,6 @@ struct xen_sysctl_cpu_levelling_caps {
> #define XEN_SYSCTL_CPU_LEVELCAP_l7s0_ebx (1ul << 8) /* 0x00000007:0.ebx */
> uint32_t caps;
> };
> -typedef struct xen_sysctl_cpu_levelling_caps xen_sysctl_cpu_levelling_caps_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_levelling_caps_t);
>
> /*
> * XEN_SYSCTL_get_cpu_featureset (x86 specific)
> @@ -909,8 +857,6 @@ struct xen_sysctl_cpu_featureset {
> * maximum length. */
> XEN_GUEST_HANDLE_64(uint32) features; /* OUT: */
> };
> -typedef struct xen_sysctl_featureset xen_sysctl_featureset_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_featureset_t);
>
> /*
> * XEN_SYSCTL_LIVEPATCH_op
> @@ -966,8 +912,6 @@ struct xen_livepatch_name {
> XEN_LIVEPATCH_NAME_SIZE. */
> uint16_t pad[3]; /* IN: MUST be zero. */
> };
> -typedef struct xen_livepatch_name xen_livepatch_name_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_livepatch_name_t);
>
> /*
> * Upload a payload to the hypervisor. The payload is verified
> @@ -986,12 +930,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_livepatch_na
> */
> #define XEN_SYSCTL_LIVEPATCH_UPLOAD 0
> struct xen_sysctl_livepatch_upload {
> - xen_livepatch_name_t name; /* IN, name of the patch. */
> + struct xen_livepatch_name name; /* IN, name of the patch. */
> uint64_t size; /* IN, size of the ELF file. */
> XEN_GUEST_HANDLE_64(uint8) payload; /* IN, the ELF file. */
> };
> -typedef struct xen_sysctl_livepatch_upload xen_sysctl_livepatch_upload_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_upload_t);
>
> /*
> * Retrieve an status of an specific payload.
> @@ -1013,11 +955,9 @@ typedef struct xen_livepatch_status xen_
> DEFINE_XEN_GUEST_HANDLE(xen_livepatch_status_t);
>
> struct xen_sysctl_livepatch_get {
> - xen_livepatch_name_t name; /* IN, name of the payload. */
> - xen_livepatch_status_t status; /* IN/OUT, state of it. */
> + struct xen_livepatch_name name; /* IN, name of the payload. */
> + struct xen_livepatch_status status; /* IN/OUT, state of it. */
> };
> -typedef struct xen_sysctl_livepatch_get xen_sysctl_livepatch_get_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_get_t);
>
> /*
> * Retrieve an array of abbreviated status and names of payloads that are
> @@ -1059,8 +999,6 @@ struct xen_sysctl_livepatch_list {
> XEN_GUEST_HANDLE_64(uint32) len; /* OUT: Array of lengths of name's.
> Must have nr of them. */
> };
> -typedef struct xen_sysctl_livepatch_list xen_sysctl_livepatch_list_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_list_t);
>
> /*
> * Perform an operation on the payload structure referenced by the `name` field.
> @@ -1069,7 +1007,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livep
> */
> #define XEN_SYSCTL_LIVEPATCH_ACTION 3
> struct xen_sysctl_livepatch_action {
> - xen_livepatch_name_t name; /* IN, name of the patch. */
> + struct xen_livepatch_name name; /* IN, name of the patch. */
> #define LIVEPATCH_ACTION_UNLOAD 1
> #define LIVEPATCH_ACTION_REVERT 2
> #define LIVEPATCH_ACTION_APPLY 3
> @@ -1080,21 +1018,17 @@ struct xen_sysctl_livepatch_action {
> /* Or upper bound of time (ns) */
> /* for operation to take. */
> };
> -typedef struct xen_sysctl_livepatch_action xen_sysctl_livepatch_action_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_action_t);
>
> struct xen_sysctl_livepatch_op {
> uint32_t cmd; /* IN: XEN_SYSCTL_LIVEPATCH_*. */
> uint32_t pad; /* IN: Always zero. */
> union {
> - xen_sysctl_livepatch_upload_t upload;
> - xen_sysctl_livepatch_list_t list;
> - xen_sysctl_livepatch_get_t get;
> - xen_sysctl_livepatch_action_t action;
> + struct xen_sysctl_livepatch_upload upload;
> + struct xen_sysctl_livepatch_list list;
> + struct xen_sysctl_livepatch_get get;
> + struct xen_sysctl_livepatch_action action;
> } u;
> };
> -typedef struct xen_sysctl_livepatch_op xen_sysctl_livepatch_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_op_t);
>
> /*
> * XEN_SYSCTL_set_parameter
> @@ -1111,8 +1045,6 @@ struct xen_sysctl_set_parameter {
> uint16_t size; /* IN: size of parameters. */
> uint16_t pad[3]; /* IN: MUST be zero. */
> };
> -typedef struct xen_sysctl_set_parameter xen_sysctl_set_parameter_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_set_parameter_t);
>
> struct xen_sysctl {
> uint32_t cmd;
> --- a/xen/include/xen/gcov.h
> +++ b/xen/include/xen/gcov.h
> @@ -3,7 +3,7 @@
>
> #ifdef CONFIG_GCOV
> #include <public/sysctl.h>
> -int sysctl_gcov_op(xen_sysctl_gcov_op_t *op);
> +int sysctl_gcov_op(struct xen_sysctl_gcov_op *op);
> #endif
>
> #endif /* _XEN_GCOV_H */
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -914,7 +914,7 @@ int cpupool_do_sysctl(struct xen_sysctl_
> void schedule_dump(struct cpupool *c);
> extern void dump_runq(unsigned char key);
>
> -void arch_do_physinfo(xen_sysctl_physinfo_t *pi);
> +void arch_do_physinfo(struct xen_sysctl_physinfo *pi);
>
> #endif /* __SCHED_H__ */
>
> --- a/xen/include/xen/spinlock.h
> +++ b/xen/include/xen/spinlock.h
> @@ -110,7 +110,7 @@ void _lock_profile_deregister_struct(int
> #define lock_profile_deregister_struct(type, ptr) \
> _lock_profile_deregister_struct(type, &((ptr)->profile_head))
>
> -extern int spinlock_profile_control(xen_sysctl_lockprof_op_t *pc);
> +extern int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc);
> extern void spinlock_profile_printall(unsigned char key);
> extern void spinlock_profile_reset(unsigned char key);
>
>
>
>
> _______________________________________________
> Xen-devel mailing list
> Xen-devel@lists.xen.org
> https://lists.xen.org/xen-devel
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [PATCH 0/2] public/*ctl: drop unnecessary typedefs and handles
2017-09-12 14:25 [PATCH 0/2] public/*ctl: drop unnecessary typedefs and handles Jan Beulich
` (4 preceding siblings ...)
2017-09-12 16:15 ` Julien Grall
@ 2017-09-20 11:07 ` Julien Grall
2017-09-20 11:38 ` Jan Beulich
5 siblings, 1 reply; 20+ messages in thread
From: Julien Grall @ 2017-09-20 11:07 UTC (permalink / raw)
To: Jan Beulich, xen-devel
Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
Ian Jackson, Tim Deegan
Hi,
On 12/09/17 15:25, Jan Beulich wrote:
> 1: public/domctl: drop unnecessary typedefs and handles
> 2: public/sysctl: drop unnecessary typedefs and handles
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
For the ARM changes:
Acked-by: Julien Grall <julien.grall@arm.com>
Cheers,
>
--
Julien Grall
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [PATCH 0/2] public/*ctl: drop unnecessary typedefs and handles
2017-09-20 11:07 ` Julien Grall
@ 2017-09-20 11:38 ` Jan Beulich
0 siblings, 0 replies; 20+ messages in thread
From: Jan Beulich @ 2017-09-20 11:38 UTC (permalink / raw)
To: Julien Grall
Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
Ian Jackson, Tim Deegan, xen-devel
>>> On 20.09.17 at 13:07, <julien.grall@arm.com> wrote:
> On 12/09/17 15:25, Jan Beulich wrote:
>> 1: public/domctl: drop unnecessary typedefs and handles
>> 2: public/sysctl: drop unnecessary typedefs and handles
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> For the ARM changes:
>
> Acked-by: Julien Grall <julien.grall@arm.com>
Thanks, but I have this on record already.
Jan
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [PATCH 1/2] public/domctl: drop unnecessary typedefs and handles
2017-09-12 15:08 ` [PATCH 1/2] public/domctl: " Jan Beulich
` (4 preceding siblings ...)
2017-09-20 10:39 ` George Dunlap
@ 2017-09-20 14:46 ` Tamas K Lengyel
5 siblings, 0 replies; 20+ messages in thread
From: Tamas K Lengyel @ 2017-09-20 14:46 UTC (permalink / raw)
To: Jan Beulich
Cc: Stefano Stabellini, Wei Liu, Razvan Cojocaru, George Dunlap,
Andrew Cooper, Dario Faggioli, Ian Jackson, Tim Deegan,
Julien Grall, Meng Xu, xen-devel
On Tue, Sep 12, 2017 at 9:08 AM, Jan Beulich <JBeulich@suse.com> wrote:
> By virtue of the struct xen_domctl container structure, most of them
> are really just cluttering the name space.
>
> While doing so,
> - convert an enum typed (pt_irq_type_t) structure field to a fixed
> width type,
> - make x86's paging_domctl() and descendants take a properly typed
> handle,
> - add const in a few places.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
Acked-by: Tamas K Lengyel <tamas@tklengyel.com>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [PATCH 2/2] public/sysctl: drop unnecessary typedefs and handles
2017-09-20 10:41 ` George Dunlap
@ 2017-09-20 15:36 ` Robert VanVossen
0 siblings, 0 replies; 20+ messages in thread
From: Robert VanVossen @ 2017-09-20 15:36 UTC (permalink / raw)
To: George Dunlap, Jan Beulich
Cc: Stefano Stabellini, Wei Liu, Andrew Cooper, Dario Faggioli,
Ian Jackson, Tim Deegan, Ross Lagerwall, Julien Grall,
Josh Whitehead, xen-devel
On 9/20/2017 6:41 AM, George Dunlap wrote:
> On Tue, Sep 12, 2017 at 4:10 PM, Jan Beulich <JBeulich@suse.com> wrote:
>> By virtue of the struct xen_sysctl container structure, most of them
>> are really just cluttering the name space.
>>
>> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> Acked-by: George Dunlap <george.dunlap@citrix.com>
>
>>
>> --- a/xen/common/sched_arinc653.c
>> +++ b/xen/common/sched_arinc653.c
>> @@ -694,7 +694,7 @@ static int
>> a653sched_adjust_global(const struct scheduler *ops,
>> struct xen_sysctl_scheduler_op *sc)
>> {
>> - xen_sysctl_arinc653_schedule_t local_sched;
>> + struct xen_sysctl_arinc653_schedule local_sched;
>> int rc = -EINVAL;
>>
>> switch ( sc->cmd )
Acked-by: Robert VanVossen <robert.vanvossen@dornerworks.com>
Thanks,
Robbie VanVossen
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
* Re: [PATCH 2/2] public/sysctl: drop unnecessary typedefs and handles
2017-09-12 15:10 ` [PATCH 2/2] public/sysctl: " Jan Beulich
` (2 preceding siblings ...)
2017-09-20 10:41 ` George Dunlap
@ 2017-09-20 18:07 ` Robert VanVossen
3 siblings, 0 replies; 20+ messages in thread
From: Robert VanVossen @ 2017-09-20 18:07 UTC (permalink / raw)
To: Jan Beulich, xen-devel
Cc: Stefano Stabellini, Wei Liu, George Dunlap, Andrew Cooper,
Dario Faggioli, Ian Jackson, Tim Deegan, Ross Lagerwall,
Julien Grall, josh.whitehead
On 9/12/2017 11:10 AM, Jan Beulich wrote:
> By virtue of the struct xen_sysctl container structure, most of them
> are really just cluttering the name space.
>
> Signed-off-by: Jan Beulich <jbeulich@suse.com>
>
> --- a/tools/libxc/include/xenctrl.h
> +++ b/tools/libxc/include/xenctrl.h
> @@ -1212,11 +1212,11 @@ int xc_readconsolering(xc_interface *xch
> int xc_send_debug_keys(xc_interface *xch, char *keys);
> int xc_set_parameters(xc_interface *xch, char *params);
>
> -typedef xen_sysctl_physinfo_t xc_physinfo_t;
> -typedef xen_sysctl_cputopo_t xc_cputopo_t;
> -typedef xen_sysctl_numainfo_t xc_numainfo_t;
> -typedef xen_sysctl_meminfo_t xc_meminfo_t;
> -typedef xen_sysctl_pcitopoinfo_t xc_pcitopoinfo_t;
> +typedef struct xen_sysctl_physinfo xc_physinfo_t;
> +typedef struct xen_sysctl_cputopo xc_cputopo_t;
> +typedef struct xen_sysctl_numainfo xc_numainfo_t;
> +typedef struct xen_sysctl_meminfo xc_meminfo_t;
> +typedef struct xen_sysctl_pcitopoinfo xc_pcitopoinfo_t;
>
> typedef uint32_t xc_cpu_to_node_t;
> typedef uint32_t xc_cpu_to_socket_t;
> @@ -1240,7 +1240,7 @@ int xc_machphys_mfn_list(xc_interface *x
> unsigned long max_extents,
> xen_pfn_t *extent_start);
>
> -typedef xen_sysctl_cpuinfo_t xc_cpuinfo_t;
> +typedef struct xen_sysctl_cpuinfo xc_cpuinfo_t;
> int xc_getcpuinfo(xc_interface *xch, int max_cpus,
> xc_cpuinfo_t *info, int *nr_cpus);
>
> @@ -1853,8 +1853,8 @@ int xc_cpu_offline(xc_interface *xch, in
> * cpufreq para name of this structure named
> * same as sysfs file name of native linux
> */
> -typedef xen_userspace_t xc_userspace_t;
> -typedef xen_ondemand_t xc_ondemand_t;
> +typedef struct xen_userspace xc_userspace_t;
> +typedef struct xen_ondemand xc_ondemand_t;
>
> struct xc_get_cpufreq_para {
> /* IN/OUT variable */
> --- a/tools/libxc/xc_misc.c
> +++ b/tools/libxc/xc_misc.c
> @@ -547,7 +547,7 @@ int xc_livepatch_upload(xc_interface *xc
> DECLARE_SYSCTL;
> DECLARE_HYPERCALL_BUFFER(char, local);
> DECLARE_HYPERCALL_BOUNCE(name, 0 /* later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
> - xen_livepatch_name_t def_name = { .pad = { 0, 0, 0 } };
> + struct xen_livepatch_name def_name = { };
>
> if ( !name || !payload )
> {
> @@ -594,12 +594,12 @@ int xc_livepatch_upload(xc_interface *xc
>
> int xc_livepatch_get(xc_interface *xch,
> char *name,
> - xen_livepatch_status_t *status)
> + struct xen_livepatch_status *status)
> {
> int rc;
> DECLARE_SYSCTL;
> DECLARE_HYPERCALL_BOUNCE(name, 0 /*adjust later */, XC_HYPERCALL_BUFFER_BOUNCE_IN);
> - xen_livepatch_name_t def_name = { .pad = { 0, 0, 0 } };
> + struct xen_livepatch_name def_name = { };
>
> if ( !name )
> {
> @@ -677,7 +677,7 @@ int xc_livepatch_get(xc_interface *xch,
> * retrieved (if any).
> */
> int xc_livepatch_list(xc_interface *xch, unsigned int max, unsigned int start,
> - xen_livepatch_status_t *info,
> + struct xen_livepatch_status *info,
> char *name, uint32_t *len,
> unsigned int *done,
> unsigned int *left)
> @@ -837,7 +837,7 @@ static int _xc_livepatch_action(xc_inter
> DECLARE_SYSCTL;
> /* The size is figured out when we strlen(name) */
> DECLARE_HYPERCALL_BOUNCE(name, 0, XC_HYPERCALL_BUFFER_BOUNCE_IN);
> - xen_livepatch_name_t def_name = { .pad = { 0, 0, 0 } };
> + struct xen_livepatch_name def_name = { };
>
> def_name.size = strlen(name) + 1;
>
> --- a/xen/arch/arm/sysctl.c
> +++ b/xen/arch/arm/sysctl.c
> @@ -12,7 +12,7 @@
> #include <xen/hypercall.h>
> #include <public/sysctl.h>
>
> -void arch_do_physinfo(xen_sysctl_physinfo_t *pi) { }
> +void arch_do_physinfo(struct xen_sysctl_physinfo *pi) { }
>
> long arch_do_sysctl(struct xen_sysctl *sysctl,
> XEN_GUEST_HANDLE_PARAM(xen_sysctl_t) u_sysctl)
> --- a/xen/arch/x86/sysctl.c
> +++ b/xen/arch/x86/sysctl.c
> @@ -72,7 +72,7 @@ long cpu_down_helper(void *data)
> return ret;
> }
>
> -void arch_do_physinfo(xen_sysctl_physinfo_t *pi)
> +void arch_do_physinfo(struct xen_sysctl_physinfo *pi)
> {
> memcpy(pi->hw_cap, boot_cpu_data.x86_capability,
> min(sizeof(pi->hw_cap), sizeof(boot_cpu_data.x86_capability)));
> --- a/xen/common/gcov/gcov.c
> +++ b/xen/common/gcov/gcov.c
> @@ -209,7 +209,7 @@ static int gcov_dump_all(XEN_GUEST_HANDL
> return ret;
> }
>
> -int sysctl_gcov_op(xen_sysctl_gcov_op_t *op)
> +int sysctl_gcov_op(struct xen_sysctl_gcov_op *op)
> {
> int ret;
>
> --- a/xen/common/livepatch.c
> +++ b/xen/common/livepatch.c
> @@ -104,7 +104,7 @@ static struct livepatch_work livepatch_w
> */
> static DEFINE_PER_CPU(bool_t, work_to_do);
>
> -static int get_name(const xen_livepatch_name_t *name, char *n)
> +static int get_name(const struct xen_livepatch_name *name, char *n)
> {
> if ( !name->size || name->size > XEN_LIVEPATCH_NAME_SIZE )
> return -EINVAL;
> @@ -121,7 +121,7 @@ static int get_name(const xen_livepatch_
> return 0;
> }
>
> -static int verify_payload(const xen_sysctl_livepatch_upload_t *upload, char *n)
> +static int verify_payload(const struct xen_sysctl_livepatch_upload *upload, char *n)
> {
> if ( get_name(&upload->name, n) )
> return -EINVAL;
> @@ -897,7 +897,7 @@ static int load_payload_data(struct payl
> return rc;
> }
>
> -static int livepatch_upload(xen_sysctl_livepatch_upload_t *upload)
> +static int livepatch_upload(struct xen_sysctl_livepatch_upload *upload)
> {
> struct payload *data, *found;
> char n[XEN_LIVEPATCH_NAME_SIZE];
> @@ -954,7 +954,7 @@ static int livepatch_upload(xen_sysctl_l
> return rc;
> }
>
> -static int livepatch_get(xen_sysctl_livepatch_get_t *get)
> +static int livepatch_get(struct xen_sysctl_livepatch_get *get)
> {
> struct payload *data;
> int rc;
> @@ -985,9 +985,9 @@ static int livepatch_get(xen_sysctl_live
> return 0;
> }
>
> -static int livepatch_list(xen_sysctl_livepatch_list_t *list)
> +static int livepatch_list(struct xen_sysctl_livepatch_list *list)
> {
> - xen_livepatch_status_t status;
> + struct xen_livepatch_status status;
> struct payload *data;
> unsigned int idx = 0, i = 0;
> int rc = 0;
> @@ -1451,7 +1451,7 @@ static int build_id_dep(struct payload *
> return 0;
> }
>
> -static int livepatch_action(xen_sysctl_livepatch_action_t *action)
> +static int livepatch_action(struct xen_sysctl_livepatch_action *action)
> {
> struct payload *data;
> char n[XEN_LIVEPATCH_NAME_SIZE];
> @@ -1560,7 +1560,7 @@ static int livepatch_action(xen_sysctl_l
> return rc;
> }
>
> -int livepatch_op(xen_sysctl_livepatch_op_t *livepatch)
> +int livepatch_op(struct xen_sysctl_livepatch_op *livepatch)
> {
> int rc;
>
> --- a/xen/common/perfc.c
> +++ b/xen/common/perfc.c
> @@ -152,8 +152,8 @@ void perfc_reset(unsigned char key)
> arch_perfc_reset();
> }
>
> -static xen_sysctl_perfc_desc_t perfc_d[NR_PERFCTRS];
> -static xen_sysctl_perfc_val_t *perfc_vals;
> +static struct xen_sysctl_perfc_desc perfc_d[NR_PERFCTRS];
> +static struct xen_sysctl_perfc_val *perfc_vals;
> static unsigned int perfc_nbr_vals;
> static cpumask_t perfc_cpumap;
>
> @@ -190,7 +190,7 @@ static int perfc_copy_info(XEN_GUEST_HAN
> }
>
> xfree(perfc_vals);
> - perfc_vals = xmalloc_array(xen_sysctl_perfc_val_t, perfc_nbr_vals);
> + perfc_vals = xmalloc_array(struct xen_sysctl_perfc_val, perfc_nbr_vals);
> }
>
> if ( guest_handle_is_null(desc) )
> @@ -241,7 +241,7 @@ static int perfc_copy_info(XEN_GUEST_HAN
> }
>
> /* Dom0 control of perf counters */
> -int perfc_control(xen_sysctl_perfc_op_t *pc)
> +int perfc_control(struct xen_sysctl_perfc_op *pc)
> {
> static DEFINE_SPINLOCK(lock);
> int rc;
> --- a/xen/common/sched_arinc653.c
> +++ b/xen/common/sched_arinc653.c
> @@ -694,7 +694,7 @@ static int
> a653sched_adjust_global(const struct scheduler *ops,
> struct xen_sysctl_scheduler_op *sc)
> {
> - xen_sysctl_arinc653_schedule_t local_sched;
> + struct xen_sysctl_arinc653_schedule local_sched;
> int rc = -EINVAL;
>
> switch ( sc->cmd )
Acked-By: Robert VanVossen <robert.vanvossen@dornerworks.com>
> --- a/xen/common/sched_credit.c
> +++ b/xen/common/sched_credit.c
> @@ -1240,7 +1240,7 @@ csched_sys_cntl(const struct scheduler *
> struct xen_sysctl_scheduler_op *sc)
> {
> int rc = -EINVAL;
> - xen_sysctl_credit_schedule_t *params = &sc->u.sched_credit;
> + struct xen_sysctl_credit_schedule *params = &sc->u.sched_credit;
> struct csched_private *prv = CSCHED_PRIV(ops);
> unsigned long flags;
>
> --- a/xen/common/sched_credit2.c
> +++ b/xen/common/sched_credit2.c
> @@ -2443,7 +2443,7 @@ csched2_dom_cntl(
> static int csched2_sys_cntl(const struct scheduler *ops,
> struct xen_sysctl_scheduler_op *sc)
> {
> - xen_sysctl_credit2_schedule_t *params = &sc->u.sched_credit2;
> + struct xen_sysctl_credit2_schedule *params = &sc->u.sched_credit2;
> struct csched2_private *prv = csched2_priv(ops);
> unsigned long flags;
>
> --- a/xen/common/spinlock.c
> +++ b/xen/common/spinlock.c
> @@ -380,7 +380,7 @@ void spinlock_profile_reset(unsigned cha
> }
>
> typedef struct {
> - xen_sysctl_lockprof_op_t *pc;
> + struct xen_sysctl_lockprof_op *pc;
> int rc;
> } spinlock_profile_ucopy_t;
>
> @@ -388,7 +388,7 @@ static void spinlock_profile_ucopy_elem(
> int32_t type, int32_t idx, void *par)
> {
> spinlock_profile_ucopy_t *p = par;
> - xen_sysctl_lockprof_data_t elem;
> + struct xen_sysctl_lockprof_data elem;
>
> if ( p->rc )
> return;
> @@ -411,7 +411,7 @@ static void spinlock_profile_ucopy_elem(
> }
>
> /* Dom0 control of lock profiling */
> -int spinlock_profile_control(xen_sysctl_lockprof_op_t *pc)
> +int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc)
> {
> int rc = 0;
> spinlock_profile_ucopy_t par;
> --- a/xen/common/sysctl.c
> +++ b/xen/common/sysctl.c
> @@ -250,7 +250,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
>
> case XEN_SYSCTL_physinfo:
> {
> - xen_sysctl_physinfo_t *pi = &op->u.physinfo;
> + struct xen_sysctl_physinfo *pi = &op->u.physinfo;
>
> memset(pi, 0, sizeof(*pi));
> pi->threads_per_core =
> @@ -276,7 +276,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
> case XEN_SYSCTL_numainfo:
> {
> unsigned int i, j, num_nodes;
> - xen_sysctl_numainfo_t *ni = &op->u.numainfo;
> + struct xen_sysctl_numainfo *ni = &op->u.numainfo;
> bool_t do_meminfo = !guest_handle_is_null(ni->meminfo);
> bool_t do_distance = !guest_handle_is_null(ni->distance);
>
> @@ -284,7 +284,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
>
> if ( do_meminfo || do_distance )
> {
> - xen_sysctl_meminfo_t meminfo = { 0 };
> + struct xen_sysctl_meminfo meminfo = { };
>
> if ( num_nodes > ni->num_nodes )
> num_nodes = ni->num_nodes;
> @@ -346,12 +346,12 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
> case XEN_SYSCTL_cputopoinfo:
> {
> unsigned int i, num_cpus;
> - xen_sysctl_cputopoinfo_t *ti = &op->u.cputopoinfo;
> + struct xen_sysctl_cputopoinfo *ti = &op->u.cputopoinfo;
>
> num_cpus = cpumask_last(&cpu_online_map) + 1;
> if ( !guest_handle_is_null(ti->cputopo) )
> {
> - xen_sysctl_cputopo_t cputopo = { 0 };
> + struct xen_sysctl_cputopo cputopo = { };
>
> if ( num_cpus > ti->num_cpus )
> num_cpus = ti->num_cpus;
> @@ -405,7 +405,7 @@ long do_sysctl(XEN_GUEST_HANDLE_PARAM(xe
> #ifdef CONFIG_HAS_PCI
> case XEN_SYSCTL_pcitopoinfo:
> {
> - xen_sysctl_pcitopoinfo_t *ti = &op->u.pcitopoinfo;
> + struct xen_sysctl_pcitopoinfo *ti = &op->u.pcitopoinfo;
> unsigned int i = 0;
>
> if ( guest_handle_is_null(ti->devs) ||
> --- a/xen/common/trace.c
> +++ b/xen/common/trace.c
> @@ -367,9 +367,9 @@ void __init init_trace_bufs(void)
>
> /**
> * tb_control - sysctl operations on trace buffers.
> - * @tbc: a pointer to a xen_sysctl_tbuf_op_t to be filled out
> + * @tbc: a pointer to a struct xen_sysctl_tbuf_op to be filled out
> */
> -int tb_control(xen_sysctl_tbuf_op_t *tbc)
> +int tb_control(struct xen_sysctl_tbuf_op *tbc)
> {
> static DEFINE_SPINLOCK(lock);
> int rc = 0;
> --- a/xen/include/public/sysctl.h
> +++ b/xen/include/public/sysctl.h
> @@ -58,8 +58,6 @@ struct xen_sysctl_readconsole {
> /* IN: Size of buffer; OUT: Bytes written to buffer. */
> uint32_t count;
> };
> -typedef struct xen_sysctl_readconsole xen_sysctl_readconsole_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_readconsole_t);
>
> /* Get trace buffers machine base address */
> /* XEN_SYSCTL_tbuf_op */
> @@ -79,8 +77,6 @@ struct xen_sysctl_tbuf_op {
> uint64_aligned_t buffer_mfn;
> uint32_t size; /* Also an IN variable! */
> };
> -typedef struct xen_sysctl_tbuf_op xen_sysctl_tbuf_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tbuf_op_t);
>
> /*
> * Get physical information about the host machine
> @@ -109,8 +105,6 @@ struct xen_sysctl_physinfo {
> /* XEN_SYSCTL_PHYSCAP_??? */
> uint32_t capabilities;
> };
> -typedef struct xen_sysctl_physinfo xen_sysctl_physinfo_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_physinfo_t);
>
> /*
> * Get the ID of the current scheduler.
> @@ -120,8 +114,6 @@ struct xen_sysctl_sched_id {
> /* OUT variable */
> uint32_t sched_id;
> };
> -typedef struct xen_sysctl_sched_id xen_sysctl_sched_id_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_sched_id_t);
>
> /* Interface for controlling Xen software performance counters. */
> /* XEN_SYSCTL_perfc_op */
> @@ -148,8 +140,6 @@ struct xen_sysctl_perfc_op {
> /* counter values (or NULL) */
> XEN_GUEST_HANDLE_64(xen_sysctl_perfc_val_t) val;
> };
> -typedef struct xen_sysctl_perfc_op xen_sysctl_perfc_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_perfc_op_t);
>
> /* XEN_SYSCTL_getdomaininfolist */
> struct xen_sysctl_getdomaininfolist {
> @@ -160,8 +150,6 @@ struct xen_sysctl_getdomaininfolist {
> /* OUT variables. */
> uint32_t num_domains;
> };
> -typedef struct xen_sysctl_getdomaininfolist xen_sysctl_getdomaininfolist_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getdomaininfolist_t);
>
> /* Inject debug keys into Xen. */
> /* XEN_SYSCTL_debug_keys */
> @@ -170,8 +158,6 @@ struct xen_sysctl_debug_keys {
> XEN_GUEST_HANDLE_64(char) keys;
> uint32_t nr_keys;
> };
> -typedef struct xen_sysctl_debug_keys xen_sysctl_debug_keys_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_debug_keys_t);
>
> /* Get physical CPU information. */
> /* XEN_SYSCTL_getcpuinfo */
> @@ -187,8 +173,6 @@ struct xen_sysctl_getcpuinfo {
> /* OUT variables. */
> uint32_t nr_cpus;
> };
> -typedef struct xen_sysctl_getcpuinfo xen_sysctl_getcpuinfo_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_getcpuinfo_t);
>
> /* XEN_SYSCTL_availheap */
> struct xen_sysctl_availheap {
> @@ -199,8 +183,6 @@ struct xen_sysctl_availheap {
> /* OUT variables. */
> uint64_aligned_t avail_bytes;/* Bytes available in the specified region. */
> };
> -typedef struct xen_sysctl_availheap xen_sysctl_availheap_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_availheap_t);
>
> /* XEN_SYSCTL_get_pmstat */
> struct pm_px_val {
> @@ -219,8 +201,6 @@ struct pm_px_stat {
> XEN_GUEST_HANDLE_64(uint64) trans_pt; /* Px transition table */
> XEN_GUEST_HANDLE_64(pm_px_val_t) pt;
> };
> -typedef struct pm_px_stat pm_px_stat_t;
> -DEFINE_XEN_GUEST_HANDLE(pm_px_stat_t);
>
> struct pm_cx_stat {
> uint32_t nr; /* entry nr in triggers & residencies, including C0 */
> @@ -259,8 +239,6 @@ struct xen_sysctl_get_pmstat {
> /* other struct for tx, etc */
> } u;
> };
> -typedef struct xen_sysctl_get_pmstat xen_sysctl_get_pmstat_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_get_pmstat_t);
>
> /* XEN_SYSCTL_cpu_hotplug */
> struct xen_sysctl_cpu_hotplug {
> @@ -270,8 +248,6 @@ struct xen_sysctl_cpu_hotplug {
> #define XEN_SYSCTL_CPU_HOTPLUG_OFFLINE 1
> uint32_t op; /* hotplug opcode */
> };
> -typedef struct xen_sysctl_cpu_hotplug xen_sysctl_cpu_hotplug_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_hotplug_t);
>
> /*
> * Get/set xen power management, include
> @@ -281,7 +257,6 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_h
> struct xen_userspace {
> uint32_t scaling_setspeed;
> };
> -typedef struct xen_userspace xen_userspace_t;
>
> struct xen_ondemand {
> uint32_t sampling_rate_max;
> @@ -290,7 +265,6 @@ struct xen_ondemand {
> uint32_t sampling_rate;
> uint32_t up_threshold;
> };
> -typedef struct xen_ondemand xen_ondemand_t;
>
> /*
> * cpufreq para name of this structure named
> @@ -461,8 +435,6 @@ struct xen_sysctl_lockprof_op {
> /* profile information (or NULL) */
> XEN_GUEST_HANDLE_64(xen_sysctl_lockprof_data_t) data;
> };
> -typedef struct xen_sysctl_lockprof_op xen_sysctl_lockprof_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_lockprof_op_t);
>
> /* XEN_SYSCTL_cputopoinfo */
> #define XEN_INVALID_CORE_ID (~0U)
> @@ -493,8 +465,6 @@ struct xen_sysctl_cputopoinfo {
> uint32_t num_cpus;
> XEN_GUEST_HANDLE_64(xen_sysctl_cputopo_t) cputopo;
> };
> -typedef struct xen_sysctl_cputopoinfo xen_sysctl_cputopoinfo_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cputopoinfo_t);
>
> /* XEN_SYSCTL_numainfo */
> #define XEN_INVALID_MEM_SZ (~0U)
> @@ -535,8 +505,6 @@ struct xen_sysctl_numainfo {
> */
> XEN_GUEST_HANDLE_64(uint32) distance;
> };
> -typedef struct xen_sysctl_numainfo xen_sysctl_numainfo_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_numainfo_t);
>
> /* XEN_SYSCTL_cpupool_op */
> #define XEN_SYSCTL_CPUPOOL_OP_CREATE 1 /* C */
> @@ -556,8 +524,6 @@ struct xen_sysctl_cpupool_op {
> uint32_t n_dom; /* OUT: I */
> struct xenctl_bitmap cpumap; /* OUT: IF */
> };
> -typedef struct xen_sysctl_cpupool_op xen_sysctl_cpupool_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpupool_op_t);
>
> /*
> * Error return values of cpupool operations:
> @@ -637,14 +603,10 @@ struct xen_sysctl_credit_schedule {
> unsigned tslice_ms;
> unsigned ratelimit_us;
> };
> -typedef struct xen_sysctl_credit_schedule xen_sysctl_credit_schedule_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_credit_schedule_t);
>
> struct xen_sysctl_credit2_schedule {
> unsigned ratelimit_us;
> };
> -typedef struct xen_sysctl_credit2_schedule xen_sysctl_credit2_schedule_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_credit2_schedule_t);
>
> /* XEN_SYSCTL_scheduler_op */
> /* Set or get info? */
> @@ -662,8 +624,6 @@ struct xen_sysctl_scheduler_op {
> struct xen_sysctl_credit2_schedule sched_credit2;
> } u;
> };
> -typedef struct xen_sysctl_scheduler_op xen_sysctl_scheduler_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_scheduler_op_t);
>
> /*
> * Output format of gcov data:
> @@ -696,8 +656,6 @@ struct xen_sysctl_gcov_op {
> uint32_t size; /* IN/OUT: size of the buffer */
> XEN_GUEST_HANDLE_64(char) buffer; /* OUT */
> };
> -typedef struct xen_sysctl_gcov_op xen_sysctl_gcov_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_gcov_op_t);
>
> #define XEN_SYSCTL_PSR_CMT_get_total_rmid 0
> #define XEN_SYSCTL_PSR_CMT_get_l3_upscaling_factor 1
> @@ -716,8 +674,6 @@ struct xen_sysctl_psr_cmt_op {
> } l3_cache;
> } u;
> };
> -typedef struct xen_sysctl_psr_cmt_op xen_sysctl_psr_cmt_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cmt_op_t);
>
> /* XEN_SYSCTL_pcitopoinfo */
> #define XEN_INVALID_DEV (XEN_INVALID_NODE_ID - 1)
> @@ -740,8 +696,6 @@ struct xen_sysctl_pcitopoinfo {
> */
> XEN_GUEST_HANDLE_64(uint32) nodes;
> };
> -typedef struct xen_sysctl_pcitopoinfo xen_sysctl_pcitopoinfo_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_pcitopoinfo_t);
>
> #define XEN_SYSCTL_PSR_CAT_get_l3_info 0
> #define XEN_SYSCTL_PSR_CAT_get_l2_info 1
> @@ -757,8 +711,6 @@ struct xen_sysctl_psr_cat_op {
> } cat_info;
> } u;
> };
> -typedef struct xen_sysctl_psr_cat_op xen_sysctl_psr_cat_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_psr_cat_op_t);
>
> #define XEN_SYSCTL_TMEM_OP_ALL_CLIENTS 0xFFFFU
>
> @@ -863,8 +815,6 @@ struct xen_sysctl_tmem_op {
> /* of them. */
> } u;
> };
> -typedef struct xen_sysctl_tmem_op xen_sysctl_tmem_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_tmem_op_t);
>
> /*
> * XEN_SYSCTL_get_cpu_levelling_caps (x86 specific)
> @@ -884,8 +834,6 @@ struct xen_sysctl_cpu_levelling_caps {
> #define XEN_SYSCTL_CPU_LEVELCAP_l7s0_ebx (1ul << 8) /* 0x00000007:0.ebx */
> uint32_t caps;
> };
> -typedef struct xen_sysctl_cpu_levelling_caps xen_sysctl_cpu_levelling_caps_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_cpu_levelling_caps_t);
>
> /*
> * XEN_SYSCTL_get_cpu_featureset (x86 specific)
> @@ -909,8 +857,6 @@ struct xen_sysctl_cpu_featureset {
> * maximum length. */
> XEN_GUEST_HANDLE_64(uint32) features; /* OUT: */
> };
> -typedef struct xen_sysctl_featureset xen_sysctl_featureset_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_featureset_t);
>
> /*
> * XEN_SYSCTL_LIVEPATCH_op
> @@ -966,8 +912,6 @@ struct xen_livepatch_name {
> XEN_LIVEPATCH_NAME_SIZE. */
> uint16_t pad[3]; /* IN: MUST be zero. */
> };
> -typedef struct xen_livepatch_name xen_livepatch_name_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_livepatch_name_t);
>
> /*
> * Upload a payload to the hypervisor. The payload is verified
> @@ -986,12 +930,10 @@ DEFINE_XEN_GUEST_HANDLE(xen_livepatch_na
> */
> #define XEN_SYSCTL_LIVEPATCH_UPLOAD 0
> struct xen_sysctl_livepatch_upload {
> - xen_livepatch_name_t name; /* IN, name of the patch. */
> + struct xen_livepatch_name name; /* IN, name of the patch. */
> uint64_t size; /* IN, size of the ELF file. */
> XEN_GUEST_HANDLE_64(uint8) payload; /* IN, the ELF file. */
> };
> -typedef struct xen_sysctl_livepatch_upload xen_sysctl_livepatch_upload_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_upload_t);
>
> /*
> * Retrieve an status of an specific payload.
> @@ -1013,11 +955,9 @@ typedef struct xen_livepatch_status xen_
> DEFINE_XEN_GUEST_HANDLE(xen_livepatch_status_t);
>
> struct xen_sysctl_livepatch_get {
> - xen_livepatch_name_t name; /* IN, name of the payload. */
> - xen_livepatch_status_t status; /* IN/OUT, state of it. */
> + struct xen_livepatch_name name; /* IN, name of the payload. */
> + struct xen_livepatch_status status; /* IN/OUT, state of it. */
> };
> -typedef struct xen_sysctl_livepatch_get xen_sysctl_livepatch_get_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_get_t);
>
> /*
> * Retrieve an array of abbreviated status and names of payloads that are
> @@ -1059,8 +999,6 @@ struct xen_sysctl_livepatch_list {
> XEN_GUEST_HANDLE_64(uint32) len; /* OUT: Array of lengths of name's.
> Must have nr of them. */
> };
> -typedef struct xen_sysctl_livepatch_list xen_sysctl_livepatch_list_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_list_t);
>
> /*
> * Perform an operation on the payload structure referenced by the `name` field.
> @@ -1069,7 +1007,7 @@ DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livep
> */
> #define XEN_SYSCTL_LIVEPATCH_ACTION 3
> struct xen_sysctl_livepatch_action {
> - xen_livepatch_name_t name; /* IN, name of the patch. */
> + struct xen_livepatch_name name; /* IN, name of the patch. */
> #define LIVEPATCH_ACTION_UNLOAD 1
> #define LIVEPATCH_ACTION_REVERT 2
> #define LIVEPATCH_ACTION_APPLY 3
> @@ -1080,21 +1018,17 @@ struct xen_sysctl_livepatch_action {
> /* Or upper bound of time (ns) */
> /* for operation to take. */
> };
> -typedef struct xen_sysctl_livepatch_action xen_sysctl_livepatch_action_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_action_t);
>
> struct xen_sysctl_livepatch_op {
> uint32_t cmd; /* IN: XEN_SYSCTL_LIVEPATCH_*. */
> uint32_t pad; /* IN: Always zero. */
> union {
> - xen_sysctl_livepatch_upload_t upload;
> - xen_sysctl_livepatch_list_t list;
> - xen_sysctl_livepatch_get_t get;
> - xen_sysctl_livepatch_action_t action;
> + struct xen_sysctl_livepatch_upload upload;
> + struct xen_sysctl_livepatch_list list;
> + struct xen_sysctl_livepatch_get get;
> + struct xen_sysctl_livepatch_action action;
> } u;
> };
> -typedef struct xen_sysctl_livepatch_op xen_sysctl_livepatch_op_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_livepatch_op_t);
>
> /*
> * XEN_SYSCTL_set_parameter
> @@ -1111,8 +1045,6 @@ struct xen_sysctl_set_parameter {
> uint16_t size; /* IN: size of parameters. */
> uint16_t pad[3]; /* IN: MUST be zero. */
> };
> -typedef struct xen_sysctl_set_parameter xen_sysctl_set_parameter_t;
> -DEFINE_XEN_GUEST_HANDLE(xen_sysctl_set_parameter_t);
>
> struct xen_sysctl {
> uint32_t cmd;
> --- a/xen/include/xen/gcov.h
> +++ b/xen/include/xen/gcov.h
> @@ -3,7 +3,7 @@
>
> #ifdef CONFIG_GCOV
> #include <public/sysctl.h>
> -int sysctl_gcov_op(xen_sysctl_gcov_op_t *op);
> +int sysctl_gcov_op(struct xen_sysctl_gcov_op *op);
> #endif
>
> #endif /* _XEN_GCOV_H */
> --- a/xen/include/xen/sched.h
> +++ b/xen/include/xen/sched.h
> @@ -914,7 +914,7 @@ int cpupool_do_sysctl(struct xen_sysctl_
> void schedule_dump(struct cpupool *c);
> extern void dump_runq(unsigned char key);
>
> -void arch_do_physinfo(xen_sysctl_physinfo_t *pi);
> +void arch_do_physinfo(struct xen_sysctl_physinfo *pi);
>
> #endif /* __SCHED_H__ */
>
> --- a/xen/include/xen/spinlock.h
> +++ b/xen/include/xen/spinlock.h
> @@ -110,7 +110,7 @@ void _lock_profile_deregister_struct(int
> #define lock_profile_deregister_struct(type, ptr) \
> _lock_profile_deregister_struct(type, &((ptr)->profile_head))
>
> -extern int spinlock_profile_control(xen_sysctl_lockprof_op_t *pc);
> +extern int spinlock_profile_control(struct xen_sysctl_lockprof_op *pc);
> extern void spinlock_profile_printall(unsigned char key);
> extern void spinlock_profile_reset(unsigned char key);
>
>
>
_______________________________________________
Xen-devel mailing list
Xen-devel@lists.xen.org
https://lists.xen.org/xen-devel
^ permalink raw reply [flat|nested] 20+ messages in thread
end of thread, other threads:[~2017-09-20 18:08 UTC | newest]
Thread overview: 20+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2017-09-12 14:25 [PATCH 0/2] public/*ctl: drop unnecessary typedefs and handles Jan Beulich
2017-09-12 15:08 ` [PATCH 1/2] public/domctl: " Jan Beulich
2017-09-12 15:33 ` Razvan Cojocaru
2017-09-12 15:58 ` Dario Faggioli
2017-09-12 16:11 ` Meng Xu
2017-09-19 15:28 ` Ping: " Jan Beulich
2017-09-20 10:39 ` George Dunlap
2017-09-20 14:46 ` Tamas K Lengyel
2017-09-12 15:10 ` [PATCH 2/2] public/sysctl: " Jan Beulich
2017-09-12 15:57 ` Dario Faggioli
2017-09-19 15:31 ` Ping: " Jan Beulich
2017-09-19 15:40 ` Konrad Rzeszutek Wilk
2017-09-20 10:41 ` George Dunlap
2017-09-20 15:36 ` Robert VanVossen
2017-09-20 18:07 ` Robert VanVossen
2017-09-12 16:00 ` [PATCH 0/2] public/*ctl: " Wei Liu
2017-09-12 16:12 ` Andrew Cooper
2017-09-12 16:15 ` Julien Grall
2017-09-20 11:07 ` Julien Grall
2017-09-20 11:38 ` Jan Beulich
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).