* [PATCH v2 1/2] arinc: Add cpu-pool support to scheduler.
2013-12-03 22:24 [PATCH v2 0/2] arinc: Implement cpu-pool support Nathan Studer
@ 2013-12-03 22:24 ` Nathan Studer
2013-12-04 11:00 ` Dario Faggioli
2013-12-03 22:24 ` [PATCH v2 2/2] arinc: Add poolid parameter to scheduler get/set functions Nathan Studer
2013-12-04 11:20 ` [PATCH v2 0/2] arinc: Implement cpu-pool support George Dunlap
2 siblings, 1 reply; 7+ messages in thread
From: Nathan Studer @ 2013-12-03 22:24 UTC (permalink / raw)
To: xen-devel
Cc: Simon Martin, Ian Campbell, Stefano Stabellini, George Dunlap,
Andrew Cooper, Ian Jackson, Robert VanVossen, Nathan Studer
From: Nathan Studer <nate.studer@dornerworks.com>
1. Remove the restriction that dom0 must be in the schedule, since dom-0 may
not belong to the scheduler's pool.
2. Add a schedule entry for each of dom-0's vcpus as they are created.
3. Add code to deal with empty schedules in the do_schedule function.
4. Call the correct idle task for the pcpu on which the scheduling decision
is being made in do_schedule.
5. Add code to prevent migration of a vcpu.
6. Implement a proper cpu_pick function, which prefers the current processor.
7. Add a scheduler lock to protect access to global variables from multiple
PCPUs.
These changes do not implement arinc653 multicore. Since the schedule only
supports 1 vcpu entry per slot, even if the vcpus of a domain are run on
multiple pcpus, the scheduler will essentially serialize their execution.
Signed-off-by: Nathan Studer <nate.studer@dornerworks.com>
---
v2:
Address Andrew Cooper's review comments.
Add lock to protect global variables that could be accessed from more
than one PCPU.
---
xen/common/sched_arinc653.c | 111 ++++++++++++++++++++++++++++++++-----------
1 file changed, 84 insertions(+), 27 deletions(-)
diff --git a/xen/common/sched_arinc653.c b/xen/common/sched_arinc653.c
index f4eb943..5f09ded 100644
--- a/xen/common/sched_arinc653.c
+++ b/xen/common/sched_arinc653.c
@@ -41,6 +41,11 @@
**************************************************************************/
/**
+ * Default timeslice for domain 0.
+ */
+#define DEFAULT_TIMESLICE MILLISECS(10)
+
+/**
* Retrieve the idle VCPU for a given physical CPU
*/
#define IDLETASK(cpu) (idle_vcpu[cpu])
@@ -100,6 +105,9 @@ typedef struct sched_entry_s
*/
typedef struct a653sched_priv_s
{
+ /* lock for the whole pluggable scheduler, nests inside cpupool_lock */
+ spinlock_t lock;
+
/**
* This array holds the active ARINC 653 schedule.
*
@@ -119,7 +127,7 @@ typedef struct a653sched_priv_s
* or a domain with multiple VCPUs could have a different
* schedule entry for each VCPU.
*/
- int num_schedule_entries;
+ unsigned int num_schedule_entries;
/**
* the major frame time for the ARINC 653 schedule.
@@ -224,9 +232,11 @@ arinc653_sched_set(
{
a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
s_time_t total_runtime = 0;
- bool_t found_dom0 = 0;
- const static xen_domain_handle_t dom0_handle = {0};
unsigned int i;
+ unsigned long flags;
+ int rc = -EINVAL;
+
+ spin_lock_irqsave(&sched_priv->lock, flags);
/* Check for valid major frame and number of schedule entries. */
if ( (schedule->major_frame <= 0)
@@ -236,10 +246,6 @@ arinc653_sched_set(
for ( i = 0; i < schedule->num_sched_entries; i++ )
{
- if ( dom_handle_cmp(schedule->sched_entries[i].dom_handle,
- dom0_handle) == 0 )
- found_dom0 = 1;
-
/* Check for a valid VCPU ID and run time. */
if ( (schedule->sched_entries[i].vcpu_id >= MAX_VIRT_CPUS)
|| (schedule->sched_entries[i].runtime <= 0) )
@@ -249,10 +255,6 @@ arinc653_sched_set(
total_runtime += schedule->sched_entries[i].runtime;
}
- /* Error if the schedule doesn't contain a slot for domain 0. */
- if ( !found_dom0 )
- goto fail;
-
/*
* Error if the major frame is not large enough to run all entries as
* indicated by comparing the total run time to the major frame length.
@@ -284,10 +286,11 @@ arinc653_sched_set(
*/
sched_priv->next_major_frame = NOW();
- return 0;
+ rc = 0;
fail:
- return -EINVAL;
+ spin_unlock_irqrestore(&sched_priv->lock, flags);
+ return rc;
}
/**
@@ -307,6 +310,9 @@ arinc653_sched_get(
{
a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
unsigned int i;
+ unsigned long flags;
+
+ spin_lock_irqsave(&sched_priv->lock, flags);
schedule->num_sched_entries = sched_priv->num_schedule_entries;
schedule->major_frame = sched_priv->major_frame;
@@ -319,6 +325,8 @@ arinc653_sched_get(
schedule->sched_entries[i].runtime = sched_priv->schedule[i].runtime;
}
+ spin_unlock_irqrestore(&sched_priv->lock, flags);
+
return 0;
}
@@ -347,13 +355,8 @@ a653sched_init(struct scheduler *ops)
ops->sched_data = prv;
- prv->schedule[0].dom_handle[0] = '\0';
- prv->schedule[0].vcpu_id = 0;
- prv->schedule[0].runtime = MILLISECS(10);
- prv->schedule[0].vc = NULL;
- prv->num_schedule_entries = 1;
- prv->major_frame = MILLISECS(10);
prv->next_major_frame = 0;
+ spin_lock_init(&prv->lock);
INIT_LIST_HEAD(&prv->vcpu_list);
return 0;
@@ -380,7 +383,10 @@ a653sched_deinit(const struct scheduler *ops)
static void *
a653sched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
{
+ a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
arinc653_vcpu_t *svc;
+ unsigned int entry;
+ unsigned long flags;
/*
* Allocate memory for the ARINC 653-specific scheduler data information
@@ -390,6 +396,28 @@ a653sched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
if ( svc == NULL )
return NULL;
+ spin_lock_irqsave(&sched_priv->lock, flags);
+
+ /*
+ * Add every one of dom0's vcpus to the schedule, as long as there are
+ * slots available.
+ */
+ if ( vc->domain->domain_id == 0 )
+ {
+ entry = sched_priv->num_schedule_entries;
+
+ if ( entry < ARINC653_MAX_DOMAINS_PER_SCHEDULE )
+ {
+ sched_priv->schedule[entry].dom_handle[0] = '\0';
+ sched_priv->schedule[entry].vcpu_id = vc->vcpu_id;
+ sched_priv->schedule[entry].runtime = DEFAULT_TIMESLICE;
+ sched_priv->schedule[entry].vc = vc;
+
+ sched_priv->major_frame += DEFAULT_TIMESLICE;
+ ++sched_priv->num_schedule_entries;
+ }
+ }
+
/*
* Initialize our ARINC 653 scheduler-specific information for the VCPU.
* The VCPU starts "asleep." When Xen is ready for the VCPU to run, it
@@ -402,6 +430,8 @@ a653sched_alloc_vdata(const struct scheduler *ops, struct vcpu *vc, void *dd)
list_add(&svc->list, &SCHED_PRIV(ops)->vcpu_list);
update_schedule_vcpus(ops);
+ spin_unlock_irqrestore(&sched_priv->lock, flags);
+
return svc;
}
@@ -535,11 +565,17 @@ a653sched_do_schedule(
{
struct task_slice ret; /* hold the chosen domain */
struct vcpu * new_task = NULL;
- static int sched_index = 0;
+ static unsigned int sched_index = 0;
static s_time_t next_switch_time;
a653sched_priv_t *sched_priv = SCHED_PRIV(ops);
+ const unsigned int cpu = smp_processor_id();
+ unsigned long flags;
+
+ spin_lock_irqsave(&sched_priv->lock, flags);
- if ( now >= sched_priv->next_major_frame )
+ if ( sched_priv->num_schedule_entries < 1 )
+ sched_priv->next_major_frame = now + DEFAULT_TIMESLICE;
+ else if ( now >= sched_priv->next_major_frame )
{
/* time to enter a new major frame
* the first time this function is called, this will be true */
@@ -574,14 +610,14 @@ a653sched_do_schedule(
*/
new_task = (sched_index < sched_priv->num_schedule_entries)
? sched_priv->schedule[sched_index].vc
- : IDLETASK(0);
+ : IDLETASK(cpu);
/* Check to see if the new task can be run (awake & runnable). */
if ( !((new_task != NULL)
&& (AVCPU(new_task) != NULL)
&& AVCPU(new_task)->awake
&& vcpu_runnable(new_task)) )
- new_task = IDLETASK(0);
+ new_task = IDLETASK(cpu);
BUG_ON(new_task == NULL);
/*
@@ -590,9 +626,16 @@ a653sched_do_schedule(
*/
BUG_ON(now >= sched_priv->next_major_frame);
+ spin_unlock_irqrestore(&sched_priv->lock, flags);
+
/* Tasklet work (which runs in idle VCPU context) overrides all else. */
if ( tasklet_work_scheduled )
- new_task = IDLETASK(0);
+ new_task = IDLETASK(cpu);
+
+ /* Running this task would result in a migration */
+ if ( !is_idle_vcpu(new_task)
+ && (new_task->processor != cpu) )
+ new_task = IDLETASK(cpu);
/*
* Return the amount of time the next domain has to run and the address
@@ -600,7 +643,7 @@ a653sched_do_schedule(
*/
ret.time = next_switch_time - now;
ret.task = new_task;
- ret.migrated = 0; /* we do not support migration */
+ ret.migrated = 0;
BUG_ON(ret.time <= 0);
@@ -618,8 +661,22 @@ a653sched_do_schedule(
static int
a653sched_pick_cpu(const struct scheduler *ops, struct vcpu *vc)
{
- /* this implementation only supports one physical CPU */
- return 0;
+ cpumask_t *online;
+ unsigned int cpu;
+
+ /*
+ * If present, prefer vc's current processor, else
+ * just find the first valid vcpu .
+ */
+ online = cpupool_scheduler_cpumask(vc->domain->cpupool);
+
+ cpu = cpumask_first(online);
+
+ if ( cpumask_test_cpu(vc->processor, online)
+ || (cpu >= nr_cpu_ids) )
+ cpu = vc->processor;
+
+ return cpu;
}
/**
--
1.7.9.5
^ permalink raw reply related [flat|nested] 7+ messages in thread
* [PATCH v2 2/2] arinc: Add poolid parameter to scheduler get/set functions
2013-12-03 22:24 [PATCH v2 0/2] arinc: Implement cpu-pool support Nathan Studer
2013-12-03 22:24 ` [PATCH v2 1/2] arinc: Add cpu-pool support to scheduler Nathan Studer
@ 2013-12-03 22:24 ` Nathan Studer
2013-12-04 10:50 ` Dario Faggioli
2013-12-04 11:20 ` [PATCH v2 0/2] arinc: Implement cpu-pool support George Dunlap
2 siblings, 1 reply; 7+ messages in thread
From: Nathan Studer @ 2013-12-03 22:24 UTC (permalink / raw)
To: xen-devel
Cc: Simon Martin, Ian Campbell, Stefano Stabellini, George Dunlap,
Andrew Cooper, Ian Jackson, Robert VanVossen, Nathan Studer
From: Nathan Studer <nate.studer@dornerworks.com>
Signed-off-by: Nathan Studer <nate.studer@dornerworks.com>
---
v2: Remove _v2 suffixes
---
tools/libxc/xc_arinc653.c | 6 ++++--
tools/libxc/xenctrl.h | 2 ++
2 files changed, 6 insertions(+), 2 deletions(-)
diff --git a/tools/libxc/xc_arinc653.c b/tools/libxc/xc_arinc653.c
index fe2ddcb..5d61c1a 100644
--- a/tools/libxc/xc_arinc653.c
+++ b/tools/libxc/xc_arinc653.c
@@ -29,6 +29,7 @@
int
xc_sched_arinc653_schedule_set(
xc_interface *xch,
+ uint32_t cpupool_id,
struct xen_sysctl_arinc653_schedule *schedule)
{
int rc;
@@ -42,7 +43,7 @@ xc_sched_arinc653_schedule_set(
return -1;
sysctl.cmd = XEN_SYSCTL_scheduler_op;
- sysctl.u.scheduler_op.cpupool_id = 0;
+ sysctl.u.scheduler_op.cpupool_id = cpupool_id;
sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_ARINC653;
sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_putinfo;
set_xen_guest_handle(sysctl.u.scheduler_op.u.sched_arinc653.schedule,
@@ -58,6 +59,7 @@ xc_sched_arinc653_schedule_set(
int
xc_sched_arinc653_schedule_get(
xc_interface *xch,
+ uint32_t cpupool_id,
struct xen_sysctl_arinc653_schedule *schedule)
{
int rc;
@@ -71,7 +73,7 @@ xc_sched_arinc653_schedule_get(
return -1;
sysctl.cmd = XEN_SYSCTL_scheduler_op;
- sysctl.u.scheduler_op.cpupool_id = 0;
+ sysctl.u.scheduler_op.cpupool_id = cpupool_id;
sysctl.u.scheduler_op.sched_id = XEN_SCHEDULER_ARINC653;
sysctl.u.scheduler_op.cmd = XEN_SYSCTL_SCHEDOP_getinfo;
set_xen_guest_handle(sysctl.u.scheduler_op.u.sched_arinc653.schedule,
diff --git a/tools/libxc/xenctrl.h b/tools/libxc/xenctrl.h
index 4ac6b8a..cced208 100644
--- a/tools/libxc/xenctrl.h
+++ b/tools/libxc/xenctrl.h
@@ -796,11 +796,13 @@ int xc_sched_credit2_domain_get(xc_interface *xch,
int
xc_sched_arinc653_schedule_set(
xc_interface *xch,
+ uint32_t cpupool_id,
struct xen_sysctl_arinc653_schedule *schedule);
int
xc_sched_arinc653_schedule_get(
xc_interface *xch,
+ uint32_t cpupool_id,
struct xen_sysctl_arinc653_schedule *schedule);
/**
--
1.7.9.5
^ permalink raw reply related [flat|nested] 7+ messages in thread
* Re: [PATCH v2 0/2] arinc: Implement cpu-pool support
2013-12-03 22:24 [PATCH v2 0/2] arinc: Implement cpu-pool support Nathan Studer
2013-12-03 22:24 ` [PATCH v2 1/2] arinc: Add cpu-pool support to scheduler Nathan Studer
2013-12-03 22:24 ` [PATCH v2 2/2] arinc: Add poolid parameter to scheduler get/set functions Nathan Studer
@ 2013-12-04 11:20 ` George Dunlap
2013-12-04 14:30 ` Ian Campbell
2 siblings, 1 reply; 7+ messages in thread
From: George Dunlap @ 2013-12-04 11:20 UTC (permalink / raw)
To: Nathan Studer, xen-devel
Cc: Simon Martin, Ian Campbell, Stefano Stabellini, Andrew Cooper,
Ian Jackson, Robert VanVossen
On 12/03/2013 10:24 PM, Nathan Studer wrote:
> From: Nathan Studer <nate.studer@dornerworks.com>
>
> Update the arinc653 scheduler to support CPU cpu pools.
>
> These changes do not implement arinc653 multicore. Since the schedule only
> supports 1 vcpu entry per slot, even if the vcpus of a domain are run on
> multiple pcpus, the scheduler will essentially serialize their execution.
>
> The formatting patch from the original series has already been applied and
> has been dropped from v2.
>
> George, you granted a release-ack for the original series, which I sent
> in almost two weeks ago (I left on paternity leave in the meantime).
> I would argue that this functionality could be granted an exception for
> the same reasons you listed in your original release-ack, but if this is
> no longer the case, I defer to your judgement.
Just as a reminder, the original reasons were that the changes are
limited to arinc653-related code, and that there are as far as we know
no other users at present.
Release-acked-by: George Dunlap <george.dunlap@eu.citrix.com>
^ permalink raw reply [flat|nested] 7+ messages in thread