linux-kernel.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: "Bill Huey (hui)" <bill.huey@gmail.com>
To: Peter Zijlstra <a.p.zijlstra@chello.nl>,
	Steven Rostedt <rostedt@goodmis.org>,
	linux-kernel@vger.kernel.org
Cc: Dario Faggioli <raistlin@linux.it>,
	Alessandro Zummo <a.zummo@towertech.it>,
	Thomas Gleixner <tglx@linutronix.de>,
	KY Srinivasan <kys@microsoft.com>,
	Amir Frenkel <frenkel.amir@gmail.com>,
	Bdale Garbee <bdale@gag.com>
Subject: [PATCH RFC v0 03/12] Add cyclic support to rtc-dev.c
Date: Mon, 11 Apr 2016 22:29:11 -0700	[thread overview]
Message-ID: <1460438960-32060-4-git-send-email-bill.huey@gmail.com> (raw)
In-Reply-To: <1460438960-32060-1-git-send-email-bill.huey@gmail.com>

wait-queue changes to rtc_dev_read so that it can support overrun count
reporting when multiple threads are blocked against a single wait object.

ioctl() additions to allow for those calling it to admit the thread to the
cyclic scheduler.

Signed-off-by: Bill Huey (hui) <bill.huey@gmail.com>
---
 drivers/rtc/rtc-dev.c | 161 ++++++++++++++++++++++++++++++++++++++++++++++++++
 1 file changed, 161 insertions(+)

diff --git a/drivers/rtc/rtc-dev.c b/drivers/rtc/rtc-dev.c
index a6d9434..0fc9a8c 100644
--- a/drivers/rtc/rtc-dev.c
+++ b/drivers/rtc/rtc-dev.c
@@ -18,6 +18,15 @@
 #include <linux/sched.h>
 #include "rtc-core.h"
 
+#ifdef CONFIG_RTC_CYCLIC
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <../kernel/sched/sched.h>
+#include <../kernel/sched/cyclic.h>
+//#include <../kernel/sched/cyclic_rt.h>
+#endif
+
 static dev_t rtc_devt;
 
 #define RTC_DEV_MAX 16 /* 16 RTCs should be enough for everyone... */
@@ -29,6 +38,10 @@ static int rtc_dev_open(struct inode *inode, struct file *file)
 					struct rtc_device, char_dev);
 	const struct rtc_class_ops *ops = rtc->ops;
 
+#ifdef CONFIG_RTC_CYCLIC
+	reset_rt_overrun();
+#endif
+
 	if (test_and_set_bit_lock(RTC_DEV_BUSY, &rtc->flags))
 		return -EBUSY;
 
@@ -153,13 +166,26 @@ rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 {
 	struct rtc_device *rtc = file->private_data;
 
+#ifdef CONFIG_RTC_CYCLIC
+	DEFINE_WAIT_FUNC(wait, single_default_wake_function);
+#else
 	DECLARE_WAITQUEUE(wait, current);
+#endif
 	unsigned long data;
+	unsigned long flags;
+#ifdef CONFIG_RTC_CYCLIC
+	int wake = 0, block = 0;
+#endif
 	ssize_t ret;
 
 	if (count != sizeof(unsigned int) && count < sizeof(unsigned long))
 		return -EINVAL;
 
+#ifdef CONFIG_RTC_CYCLIC
+	if (rt_overrun_task_yield(current))
+		goto yield;
+#endif
+printk("%s: 0 color = %d \n", __func__, current->rt.rt_overrun.color);
 	add_wait_queue(&rtc->irq_queue, &wait);
 	do {
 		__set_current_state(TASK_INTERRUPTIBLE);
@@ -169,23 +195,59 @@ rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 		rtc->irq_data = 0;
 		spin_unlock_irq(&rtc->irq_lock);
 
+if (block) {
+	block = 0;
+	if (wake) {
+		printk("%s: wake \n", __func__);
+		wake = 0;
+	} else {
+		printk("%s: ~wake \n", __func__);
+	}
+}
 		if (data != 0) {
+#ifdef CONFIG_RTC_CYCLIC
+			/* overrun reporting */
+			raw_spin_lock_irqsave(&rt_overrun_lock, flags);
+			if (_on_rt_overrun_admitted(current)) {
+				/* pass back to userspace */
+				data = rt_task_count(current);
+				rt_task_count(current) = 0;
+			}
+			raw_spin_unlock_irqrestore(&rt_overrun_lock, flags);
+			ret = 0;
+printk("%s: 1 color = %d \n", __func__, current->rt.rt_overrun.color);
+			break;
+		}
+#else
 			ret = 0;
 			break;
 		}
+#endif
 		if (file->f_flags & O_NONBLOCK) {
 			ret = -EAGAIN;
+printk("%s: 2 color = %d \n", __func__, current->rt.rt_overrun.color);
 			break;
 		}
 		if (signal_pending(current)) {
+printk("%s: 3 color = %d \n", __func__, current->rt.rt_overrun.color);
 			ret = -ERESTARTSYS;
 			break;
 		}
+#ifdef CONFIG_RTC_CYCLIC
+		block = 1;
+#endif
 		schedule();
+#ifdef CONFIG_RTC_CYCLIC
+		/* debugging */
+		wake = 1;
+#endif
 	} while (1);
 	set_current_state(TASK_RUNNING);
 	remove_wait_queue(&rtc->irq_queue, &wait);
 
+#ifdef CONFIG_RTC_CYCLIC
+ret:
+#endif
 	if (ret == 0) {
 		/* Check for any data updates */
 		if (rtc->ops->read_callback)
@@ -201,6 +263,29 @@ rtc_dev_read(struct file *file, char __user *buf, size_t count, loff_t *ppos)
 				sizeof(unsigned long);
 	}
 	return ret;
+
+#ifdef CONFIG_RTC_CYCLIC
+yield:
+
+	spin_lock_irq(&rtc->irq_lock);
+	data = rtc->irq_data;
+	rtc->irq_data = 0;
+	spin_unlock_irq(&rtc->irq_lock);
+
+	raw_spin_lock_irqsave(&rt_overrun_lock, flags);
+	if (_on_rt_overrun_admitted(current)) {
+		/* pass back to userspace */
+		data = rt_task_count(current);
+		rt_task_count(current) = 0;
+	}
+	else {
+	}
+
+	raw_spin_unlock_irqrestore(&rt_overrun_lock, flags);
+	ret = 0;
+
+	goto ret;
+#endif
 }
 
 static unsigned int rtc_dev_poll(struct file *file, poll_table *wait)
@@ -215,6 +300,56 @@ static unsigned int rtc_dev_poll(struct file *file, poll_table *wait)
 	return (data != 0) ? (POLLIN | POLLRDNORM) : 0;
 }
 
+#ifdef CONFIG_RTC_CYCLIC
+extern asmlinkage __visible void __sched notrace preempt_schedule(void);
+
+/* yield behavior * /
+int rt_overrun_task_yield_block(struct task_struct *p)
+{
+	struct rq *rq = task_rq(p);
+	unsigned int block = 1;
+
+	if (test_case)
+	else
+		return 1;
+
+	if (rt_overrun_task_is_best_effort(p)) {
+		// assert that it should be on the rq
+		// move to the end, let pick_next_task_rt() deal with the next runnable task
+		requeue_task_rt2(rq, p, false);
+
+		//clear_overrun_log();
+
+		if (_cond_resched()) {
+			// we reschedule here
+		}
+
+		block = 0;
+	}
+
+	return block;
+} */
+
+int test_admit(u64 slots)
+{
+	/* Only allow the current task to be admitted for now
+	 * and allow for /proc to show the slot pattern
+	 * in a global fashion */
+	return rt_overrun_task_admit(current, slots);
+}
+
+int test_yield(u64 slots)
+{
+	rt_task_yield(current) = slots;
+	return 0;
+}
+
+void test_replenish(void)
+{
+	rt_overrun_task_replenish(current);
+}
+#endif
+
 static long rtc_dev_ioctl(struct file *file,
 		unsigned int cmd, unsigned long arg)
 {
@@ -223,6 +358,9 @@ static long rtc_dev_ioctl(struct file *file,
 	const struct rtc_class_ops *ops = rtc->ops;
 	struct rtc_time tm;
 	struct rtc_wkalrm alarm;
+#ifdef CONFIG_RTC_CYCLIC
+	u64 slots;
+#endif
 	void __user *uarg = (void __user *) arg;
 
 	err = mutex_lock_interruptible(&rtc->ops_lock);
@@ -250,6 +388,12 @@ static long rtc_dev_ioctl(struct file *file,
 				!capable(CAP_SYS_RESOURCE))
 			err = -EACCES;
 		break;
+#ifdef CONFIG_RTC_CYCLIC
+	case RTC_OV_REPLEN:
+		test_replenish();
+		err = -EACCES;
+		break;
+#endif
 	}
 
 	if (err)
@@ -380,7 +524,21 @@ static long rtc_dev_ioctl(struct file *file,
 	case RTC_IRQP_READ:
 		err = put_user(rtc->irq_freq, (unsigned long __user *)uarg);
 		break;
+#ifdef CONFIG_RTC_CYCLIC
+	case RTC_OV_YIELD:
+		mutex_unlock(&rtc->ops_lock);
+		if (copy_from_user(&slots, uarg, sizeof(u64)))
+			return -EFAULT;
+
+		return test_yield(slots);
+
+	case RTC_OV_ADMIT:
+		mutex_unlock(&rtc->ops_lock);
+		if (copy_from_user(&slots, uarg, sizeof(u64)))
+			return -EFAULT;
 
+		return test_admit(slots);
+#endif
 	case RTC_WKALM_SET:
 		mutex_unlock(&rtc->ops_lock);
 		if (copy_from_user(&alarm, uarg, sizeof(alarm)))
@@ -424,6 +582,9 @@ static int rtc_dev_release(struct inode *inode, struct file *file)
 {
 	struct rtc_device *rtc = file->private_data;
 
+#ifdef CONFIG_RTC_CYCLIC
+	rt_overrun_entries_delete_all(rtc);
+#endif
 	/* We shut down the repeating IRQs that userspace enabled,
 	 * since nothing is listening to them.
 	 *  - Update (UIE) ... currently only managed through ioctls
-- 
2.5.0

  parent reply	other threads:[~2016-04-12  5:32 UTC|newest]

Thread overview: 19+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2016-04-12  5:29 [PATCH RFC v0 00/12] Cyclic Scheduler Against RTC Bill Huey (hui)
2016-04-12  5:29 ` [PATCH RFC v0 01/12] Kconfig change Bill Huey (hui)
2016-04-12  5:29 ` [PATCH RFC v0 02/12] Reroute rtc update irqs to the cyclic scheduler handler Bill Huey (hui)
2016-04-12  5:29 ` Bill Huey (hui) [this message]
2016-04-12  5:29 ` [PATCH RFC v0 04/12] Anonymous struct initialization Bill Huey (hui)
2016-04-12  5:29 ` [PATCH RFC v0 05/12] Task tracking per file descriptor Bill Huey (hui)
2016-04-12  5:29 ` [PATCH RFC v0 06/12] Add anonymous struct to sched_rt_entity Bill Huey (hui)
2016-04-12  5:29 ` [PATCH RFC v0 07/12] kernel/userspace additions for addition ioctl() support for rtc Bill Huey (hui)
2016-04-12  5:29 ` [PATCH RFC v0 08/12] Compilation support Bill Huey (hui)
2016-04-12  5:29 ` [PATCH RFC v0 09/12] Add priority support for the cyclic scheduler Bill Huey (hui)
2016-04-12  5:29 ` [PATCH RFC v0 10/12] Export SCHED_FIFO/RT requeuing functions Bill Huey (hui)
2016-04-12  5:29 ` [PATCH RFC v0 11/12] Cyclic scheduler support Bill Huey (hui)
2016-04-12  5:29 ` [PATCH RFC v0 12/12] Cyclic/rtc documentation Bill Huey (hui)
2016-04-12  5:58 ` [PATCH RFC v0 00/12] Cyclic Scheduler Against RTC Mike Galbraith
     [not found]   ` <CAAmnkz=X4TtY7LQwPuWWD0q99XeZQT+53RZ_7dNb3P=X=+jxrg@mail.gmail.com>
2016-04-12  6:05     ` Mike Galbraith
2016-04-13  8:57 ` Juri Lelli
2016-04-13  9:37   ` Bill Huey (hui)
2016-04-13 10:08     ` Juri Lelli
2016-04-13 10:35       ` Bill Huey (hui)

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=1460438960-32060-4-git-send-email-bill.huey@gmail.com \
    --to=bill.huey@gmail.com \
    --cc=a.p.zijlstra@chello.nl \
    --cc=a.zummo@towertech.it \
    --cc=bdale@gag.com \
    --cc=frenkel.amir@gmail.com \
    --cc=kys@microsoft.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=raistlin@linux.it \
    --cc=rostedt@goodmis.org \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).