linux-rt-users.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Steven Rostedt <rostedt@goodmis.org>
To: linux-kernel@vger.kernel.org,
	linux-rt-users <linux-rt-users@vger.kernel.org>
Cc: Thomas Gleixner <tglx@linutronix.de>,
	Carsten Emde <C.Emde@osadl.org>,
	Sebastian Andrzej Siewior <bigeasy@linutronix.de>,
	John Kacur <jkacur@redhat.com>,
	Paul Gortmaker <paul.gortmaker@windriver.com>,
	Daniel Wagner <daniel.wagner@bmw-carit.de>
Subject: [PATCH RT 26/36] work-simple: Simple work queue implemenation
Date: Thu, 12 Mar 2015 15:22:05 -0400	[thread overview]
Message-ID: <20150312192159.049121641@goodmis.org> (raw)
In-Reply-To: 20150312192139.799127123@goodmis.org

[-- Attachment #1: 0026-work-simple-Simple-work-queue-implemenation.patch --]
[-- Type: text/plain, Size: 5734 bytes --]

3.12.38-rt53-rc1 stable review patch.
If anyone has any objections, please let me know.

------------------

From: Daniel Wagner <daniel.wagner@bmw-carit.de>

Provides a framework for enqueuing callbacks from irq context
PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.

Bases on wait-simple.

Signed-off-by: Daniel Wagner <daniel.wagner@bmw-carit.de>
Cc: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
---
 include/linux/work-simple.h |  24 +++++++
 kernel/sched/Makefile       |   1 +
 kernel/sched/work-simple.c  | 172 ++++++++++++++++++++++++++++++++++++++++++++
 3 files changed, 197 insertions(+)
 create mode 100644 include/linux/work-simple.h
 create mode 100644 kernel/sched/work-simple.c

diff --git a/include/linux/work-simple.h b/include/linux/work-simple.h
new file mode 100644
index 000000000000..f175fa9a6016
--- /dev/null
+++ b/include/linux/work-simple.h
@@ -0,0 +1,24 @@
+#ifndef _LINUX_SWORK_H
+#define _LINUX_SWORK_H
+
+#include <linux/list.h>
+
+struct swork_event {
+	struct list_head item;
+	unsigned long flags;
+	void (*func)(struct swork_event *);
+};
+
+static inline void INIT_SWORK(struct swork_event *event,
+			      void (*func)(struct swork_event *))
+{
+	event->flags = 0;
+	event->func = func;
+}
+
+bool swork_queue(struct swork_event *sev);
+
+int swork_get(void);
+void swork_put(void);
+
+#endif /* _LINUX_SWORK_H */
diff --git a/kernel/sched/Makefile b/kernel/sched/Makefile
index 54adcf35f495..fc4b0c1d9823 100644
--- a/kernel/sched/Makefile
+++ b/kernel/sched/Makefile
@@ -12,6 +12,7 @@ CFLAGS_core.o := $(PROFILING) -fno-omit-frame-pointer
 endif
 
 obj-y += core.o proc.o clock.o cputime.o idle_task.o fair.o rt.o stop_task.o
+obj-y += work-simple.o
 obj-$(CONFIG_SMP) += cpupri.o
 obj-$(CONFIG_SCHED_AUTOGROUP) += auto_group.o
 obj-$(CONFIG_SCHEDSTATS) += stats.o
diff --git a/kernel/sched/work-simple.c b/kernel/sched/work-simple.c
new file mode 100644
index 000000000000..c996f755dba6
--- /dev/null
+++ b/kernel/sched/work-simple.c
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner daniel.wagner@bmw-carit.de
+ *
+ * Provides a framework for enqueuing callbacks from irq context
+ * PREEMPT_RT_FULL safe. The callbacks are executed in kthread context.
+ */
+
+#include <linux/wait-simple.h>
+#include <linux/work-simple.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/spinlock.h>
+
+#define SWORK_EVENT_PENDING     (1 << 0)
+
+static DEFINE_MUTEX(worker_mutex);
+static struct sworker *glob_worker;
+
+struct sworker {
+	struct list_head events;
+	struct swait_head wq;
+
+	raw_spinlock_t lock;
+
+	struct task_struct *task;
+	int refs;
+};
+
+static bool swork_readable(struct sworker *worker)
+{
+	bool r;
+
+	if (kthread_should_stop())
+		return true;
+
+	raw_spin_lock_irq(&worker->lock);
+	r = !list_empty(&worker->events);
+	raw_spin_unlock_irq(&worker->lock);
+
+	return r;
+}
+
+static int swork_kthread(void *arg)
+{
+	struct sworker *worker = arg;
+
+	for (;;) {
+		swait_event_interruptible(worker->wq,
+					swork_readable(worker));
+		if (kthread_should_stop())
+			break;
+
+		raw_spin_lock_irq(&worker->lock);
+		while (!list_empty(&worker->events)) {
+			struct swork_event *sev;
+
+			sev = list_first_entry(&worker->events,
+					struct swork_event, item);
+			list_del(&sev->item);
+			raw_spin_unlock_irq(&worker->lock);
+
+			WARN_ON_ONCE(!test_and_clear_bit(SWORK_EVENT_PENDING,
+							 &sev->flags));
+			sev->func(sev);
+			raw_spin_lock_irq(&worker->lock);
+		}
+		raw_spin_unlock_irq(&worker->lock);
+	}
+	return 0;
+}
+
+static struct sworker *swork_create(void)
+{
+	struct sworker *worker;
+
+	worker = kzalloc(sizeof(*worker), GFP_KERNEL);
+	if (!worker)
+		return ERR_PTR(-ENOMEM);
+
+	INIT_LIST_HEAD(&worker->events);
+	raw_spin_lock_init(&worker->lock);
+	init_swait_head(&worker->wq);
+
+	worker->task = kthread_run(swork_kthread, worker, "kswork");
+	if (IS_ERR(worker->task)) {
+		kfree(worker);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	return worker;
+}
+
+static void swork_destroy(struct sworker *worker)
+{
+	kthread_stop(worker->task);
+
+	WARN_ON(!list_empty(&worker->events));
+	kfree(worker);
+}
+
+/**
+ * swork_queue - queue swork
+ *
+ * Returns %false if @work was already on a queue, %true otherwise.
+ *
+ * The work is queued and processed on a random CPU
+ */
+bool swork_queue(struct swork_event *sev)
+{
+	unsigned long flags;
+
+	if (test_and_set_bit(SWORK_EVENT_PENDING, &sev->flags))
+		return false;
+
+	raw_spin_lock_irqsave(&glob_worker->lock, flags);
+	list_add_tail(&sev->item, &glob_worker->events);
+	raw_spin_unlock_irqrestore(&glob_worker->lock, flags);
+
+	swait_wake(&glob_worker->wq);
+	return true;
+}
+EXPORT_SYMBOL_GPL(swork_queue);
+
+/**
+ * swork_get - get an instance of the sworker
+ *
+ * Returns an negative error code if the initialization if the worker did not
+ * work, %0 otherwise.
+ *
+ */
+int swork_get(void)
+{
+	struct sworker *worker;
+
+	mutex_lock(&worker_mutex);
+	if (!glob_worker) {
+		worker = swork_create();
+		if (IS_ERR(worker)) {
+			mutex_unlock(&worker_mutex);
+			return -ENOMEM;
+		}
+
+		glob_worker = worker;
+	}
+
+	glob_worker->refs++;
+	mutex_unlock(&worker_mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(swork_get);
+
+/**
+ * swork_put - puts an instance of the sworker
+ *
+ * Will destroy the sworker thread. This function must not be called until all
+ * queued events have been completed.
+ */
+void swork_put(void)
+{
+	mutex_lock(&worker_mutex);
+
+	glob_worker->refs--;
+	if (glob_worker->refs > 0)
+		goto out;
+
+	swork_destroy(glob_worker);
+	glob_worker = NULL;
+out:
+	mutex_unlock(&worker_mutex);
+}
+EXPORT_SYMBOL_GPL(swork_put);
-- 
2.1.4



  parent reply	other threads:[~2015-03-12 19:22 UTC|newest]

Thread overview: 37+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2015-03-12 19:21 [PATCH RT 00/36] Linux 3.12.38-rt53-rc1 Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 01/36] gpio: omap: use raw locks for locking Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 02/36] create-rt-enqueue Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 03/36] rtmutex: Simplify rtmutex_slowtrylock() Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 04/36] rtmutex: Simplify and document try_to_take_rtmutex() Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 05/36] rtmutex: No need to keep task ref for lock owner check Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 06/36] rtmutex: Clarify the boost/deboost part Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 07/36] rtmutex: Document pi chain walk Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 08/36] rtmutex: Simplify remove_waiter() Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 09/36] rtmutex: Confine deadlock logic to futex Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 10/36] rtmutex: Cleanup deadlock detector debug logic Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 11/36] rtmutex: Avoid pointless requeueing in the deadlock detection chain walk Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 12/36] futex: Make unlock_pi more robust Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 13/36] futex: Use futex_top_waiter() in lookup_pi_state() Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 14/36] futex: Split out the waiter check from lookup_pi_state() Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 15/36] futex: Split out the first waiter attachment " Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 16/36] futex: Simplify futex_lock_pi_atomic() and make it more robust Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 17/36] rt-mutex: avoid a NULL pointer dereference on deadlock Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 18/36] rt: fix __ww_mutex_lock_interruptible() lockdep annotation Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 19/36] rtmutex: enable deadlock detection in ww_mutex_lock functions Steven Rostedt
2015-03-12 19:21 ` [PATCH RT 20/36] x86: UV: raw_spinlock conversion Steven Rostedt
2015-03-13 15:13   ` [PATCH RT 21/36] ARM: enable irq in translation/section permission fault handlers Steven Rostedt
2015-03-12 19:22 ` [PATCH RT 22/36] arm/futex: disable preemption during futex_atomic_cmpxchg_inatomic() Steven Rostedt
2015-03-12 19:22 ` [PATCH RT 23/36] ARM: cmpxchg: define __HAVE_ARCH_CMPXCHG for armv6 and later Steven Rostedt
2015-03-12 19:22 ` [PATCH RT 24/36] sas-ata/isci: dontt disable interrupts in qc_issue handler Steven Rostedt
2015-03-12 19:22 ` [PATCH RT 25/36] scheduling while atomic in cgroup code Steven Rostedt
2015-03-12 19:22 ` Steven Rostedt [this message]
2015-03-12 19:22 ` [PATCH RT 27/36] sunrpc: make svc_xprt_do_enqueue() use get_cpu_light() Steven Rostedt
2015-03-12 19:22 ` [PATCH RT 28/36] locking: ww_mutex: fix ww_mutex vs self-deadlock Steven Rostedt
2015-03-12 19:22 ` [PATCH RT 29/36] thermal: Defer thermal wakups to threads Steven Rostedt
2015-03-12 19:22 ` [PATCH RT 30/36] lockdep: selftest: fix warnings due to missing PREEMPT_RT conditionals Steven Rostedt
2015-03-12 19:22 ` [PATCH RT 31/36] fs/aio: simple simple work Steven Rostedt
2015-03-12 19:22 ` [PATCH RT 32/36] timers: Track total number of timers in list Steven Rostedt
2015-03-12 19:22 ` [PATCH RT 33/36] timers: Reduce __run_timers() latency for empty list Steven Rostedt
2015-03-12 19:22 ` [PATCH RT 34/36] timers: Reduce future __run_timers() latency for newly emptied list Steven Rostedt
2015-03-12 19:22 ` [PATCH RT 35/36] timers: Reduce future __run_timers() latency for first add to empty list Steven Rostedt
2015-03-12 19:22 ` [PATCH RT 36/36] Linux 3.12.38-rt53-rc1 Steven Rostedt

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20150312192159.049121641@goodmis.org \
    --to=rostedt@goodmis.org \
    --cc=C.Emde@osadl.org \
    --cc=bigeasy@linutronix.de \
    --cc=daniel.wagner@bmw-carit.de \
    --cc=jkacur@redhat.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-rt-users@vger.kernel.org \
    --cc=paul.gortmaker@windriver.com \
    --cc=tglx@linutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).