From: Lyude Paul <lyude@redhat.com>
To: nouveau@lists.freedesktop.org, dri-devel@lists.freedesktop.org
Cc: David Airlie <airlied@linux.ie>,
linux-kernel@vger.kernel.org,
Thomas Zimmermann <tzimmermann@suse.de>
Subject: [PATCH 1/9] drm/vblank: Add vblank works
Date: Tue, 17 Mar 2020 20:40:58 -0400 [thread overview]
Message-ID: <20200318004159.235623-2-lyude@redhat.com> (raw)
In-Reply-To: <20200318004159.235623-1-lyude@redhat.com>
From: Ville Syrjälä <ville.syrjala@linux.intel.com>
Add some kind of vblank workers. The interface is similar to regular
delayed works, and also allows for re-scheduling.
Whatever hardware programming we do in the work must be fast
(must at least complete during the vblank, sometimes during
the first few scanlines of vblank), so we'll fire up a per-crtc
high priority thread for this.
[based off patches from Ville Syrjälä <ville.syrjala@linux.intel.com>,
change below to signoff later]
Cc: Ville Syrjälä <ville.syrjala@linux.intel.com>
Signed-off-by: Lyude Paul <lyude@redhat.com>
---
drivers/gpu/drm/drm_vblank.c | 322 +++++++++++++++++++++++++++++++++++
include/drm/drm_vblank.h | 34 ++++
2 files changed, 356 insertions(+)
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index da7b0b0c1090..06c796b6c381 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -25,7 +25,9 @@
*/
#include <linux/export.h>
+#include <linux/kthread.h>
#include <linux/moduleparam.h>
+#include <uapi/linux/sched/types.h>
#include <drm/drm_crtc.h>
#include <drm/drm_drv.h>
@@ -91,6 +93,7 @@
static bool
drm_get_last_vbltimestamp(struct drm_device *dev, unsigned int pipe,
ktime_t *tvblank, bool in_vblank_irq);
+static int drm_vblank_get(struct drm_device *dev, unsigned int pipe);
static unsigned int drm_timestamp_precision = 20; /* Default to 20 usecs. */
@@ -440,6 +443,9 @@ void drm_vblank_cleanup(struct drm_device *dev)
drm_core_check_feature(dev, DRIVER_MODESET));
del_timer_sync(&vblank->disable_timer);
+
+ wake_up_all(&vblank->vblank_work.work_wait);
+ kthread_stop(vblank->vblank_work.thread);
}
kfree(dev->vblank);
@@ -447,6 +453,108 @@ void drm_vblank_cleanup(struct drm_device *dev)
dev->num_crtcs = 0;
}
+static int vblank_work_thread(void *data)
+{
+ struct drm_vblank_crtc *vblank = data;
+
+ while (!kthread_should_stop()) {
+ struct drm_vblank_work *work, *next;
+ LIST_HEAD(list);
+ u64 count;
+ int ret;
+
+ spin_lock_irq(&vblank->dev->event_lock);
+
+ ret = wait_event_interruptible_lock_irq(vblank->queue,
+ kthread_should_stop() ||
+ !list_empty(&vblank->vblank_work.work_list),
+ vblank->dev->event_lock);
+
+ WARN_ON(ret && !kthread_should_stop() &&
+ list_empty(&vblank->vblank_work.irq_list) &&
+ list_empty(&vblank->vblank_work.work_list));
+
+ list_for_each_entry_safe(work, next,
+ &vblank->vblank_work.work_list,
+ list) {
+ list_move_tail(&work->list, &list);
+ work->state = DRM_VBL_WORK_RUNNING;
+ }
+
+ spin_unlock_irq(&vblank->dev->event_lock);
+
+ if (list_empty(&list))
+ continue;
+
+ count = atomic64_read(&vblank->count);
+ list_for_each_entry(work, &list, list)
+ work->func(work, count);
+
+ spin_lock_irq(&vblank->dev->event_lock);
+
+ list_for_each_entry_safe(work, next, &list, list) {
+ if (work->reschedule) {
+ list_move_tail(&work->list,
+ &vblank->vblank_work.irq_list);
+ drm_vblank_get(vblank->dev, vblank->pipe);
+ work->reschedule = false;
+ work->state = DRM_VBL_WORK_WAITING;
+ } else {
+ list_del_init(&work->list);
+ work->cancel = false;
+ work->state = DRM_VBL_WORK_IDLE;
+ }
+ }
+
+ spin_unlock_irq(&vblank->dev->event_lock);
+
+ wake_up_all(&vblank->vblank_work.work_wait);
+ }
+
+ return 0;
+}
+
+static void vblank_work_init(struct drm_vblank_crtc *vblank)
+{
+ struct sched_param param = {
+ .sched_priority = MAX_RT_PRIO - 1,
+ };
+ int ret;
+
+ INIT_LIST_HEAD(&vblank->vblank_work.irq_list);
+ INIT_LIST_HEAD(&vblank->vblank_work.work_list);
+ init_waitqueue_head(&vblank->vblank_work.work_wait);
+
+ vblank->vblank_work.thread =
+ kthread_run(vblank_work_thread, vblank, "card %d crtc %d",
+ vblank->dev->primary->index, vblank->pipe);
+
+ ret = sched_setscheduler(vblank->vblank_work.thread,
+ SCHED_FIFO, ¶m);
+ WARN_ON(ret);
+}
+
+/**
+ * drm_vblank_work_init - initialize a vblank work item
+ * @work: vblank work item
+ * @crtc: CRTC whose vblank will trigger the work execution
+ * @func: work function to be executed
+ *
+ * Initialize a vblank work item for a specific crtc.
+ */
+void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
+ void (*func)(struct drm_vblank_work *work, u64 count))
+{
+ struct drm_device *dev = crtc->dev;
+ struct drm_vblank_crtc *vblank = &dev->vblank[drm_crtc_index(crtc)];
+
+ work->vblank = vblank;
+ work->state = DRM_VBL_WORK_IDLE;
+ work->func = func;
+ INIT_LIST_HEAD(&work->list);
+}
+EXPORT_SYMBOL(drm_vblank_work_init);
+
/**
* drm_vblank_init - initialize vblank support
* @dev: DRM device
@@ -481,6 +589,8 @@ int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs)
init_waitqueue_head(&vblank->queue);
timer_setup(&vblank->disable_timer, vblank_disable_fn, 0);
seqlock_init(&vblank->seqlock);
+
+ vblank_work_init(vblank);
}
DRM_INFO("Supports vblank timestamp caching Rev 2 (21.10.2013).\n");
@@ -1825,6 +1935,22 @@ static void drm_handle_vblank_events(struct drm_device *dev, unsigned int pipe)
trace_drm_vblank_event(pipe, seq, now, high_prec);
}
+static void drm_handle_vblank_works(struct drm_vblank_crtc *vblank)
+{
+ struct drm_vblank_work *work, *next;
+ u64 count = atomic64_read(&vblank->count);
+
+ list_for_each_entry_safe(work, next, &vblank->vblank_work.irq_list,
+ list) {
+ if (!vblank_passed(count, work->count))
+ continue;
+
+ drm_vblank_put(vblank->dev, vblank->pipe);
+ list_move_tail(&work->list, &vblank->vblank_work.work_list);
+ work->state = DRM_VBL_WORK_SCHEDULED;
+ }
+}
+
/**
* drm_handle_vblank - handle a vblank event
* @dev: DRM device
@@ -1866,6 +1992,7 @@ bool drm_handle_vblank(struct drm_device *dev, unsigned int pipe)
spin_unlock(&dev->vblank_time_lock);
+ drm_handle_vblank_works(vblank);
wake_up(&vblank->queue);
/* With instant-off, we defer disabling the interrupt until after
@@ -2076,3 +2203,198 @@ int drm_crtc_queue_sequence_ioctl(struct drm_device *dev, void *data,
kfree(e);
return ret;
}
+
+/**
+ * drm_vblank_work_schedule - schedule a vblank work
+ * @work: vblank work to schedule
+ * @count: target vblank count
+ * @nextonmiss: defer until the next vblank if target vblank was missed
+ *
+ * Schedule @work for execution once the crtc vblank count reaches @count.
+ *
+ * If the crtc vblank count has already reached @count and @nextonmiss is
+ * %false the work starts to execute immediately.
+ *
+ * If the crtc vblank count has already reached @count and @nextonmiss is
+ * %true the work is deferred until the next vblank (as if @count has been
+ * specified as crtc vblank count + 1).
+ *
+ * If @work is already scheduled, this function will reschedule said work
+ * using the new @count.
+ *
+ * Returns:
+ * 0 on success, error code on failure.
+ */
+int drm_vblank_work_schedule(struct drm_vblank_work *work,
+ u64 count, bool nextonmiss)
+{
+ struct drm_vblank_crtc *vblank = work->vblank;
+ unsigned long irqflags;
+ u64 cur_vbl;
+ int ret = 0;
+ bool rescheduling = false;
+ bool passed;
+
+ spin_lock_irqsave(&vblank->dev->event_lock, irqflags);
+
+ if (work->cancel)
+ goto out;
+
+ if (work->state == DRM_VBL_WORK_RUNNING) {
+ work->reschedule = true;
+ work->count = count;
+ goto out;
+ } else if (work->state != DRM_VBL_WORK_IDLE) {
+ if (work->count == count)
+ goto out;
+ rescheduling = true;
+ }
+
+ if (work->state != DRM_VBL_WORK_WAITING) {
+ ret = drm_vblank_get(vblank->dev, vblank->pipe);
+ if (ret)
+ goto out;
+ }
+
+ work->count = count;
+
+ cur_vbl = atomic64_read(&vblank->count);
+ passed = vblank_passed(cur_vbl, count);
+ if (passed)
+ DRM_ERROR("crtc %d vblank %llu already passed (current %llu)\n",
+ vblank->pipe, count, cur_vbl);
+
+ if (!nextonmiss && passed) {
+ drm_vblank_put(vblank->dev, vblank->pipe);
+ if (rescheduling)
+ list_move_tail(&work->list,
+ &vblank->vblank_work.work_list);
+ else
+ list_add_tail(&work->list,
+ &vblank->vblank_work.work_list);
+ work->state = DRM_VBL_WORK_SCHEDULED;
+ wake_up_all(&vblank->queue);
+ } else {
+ if (rescheduling)
+ list_move_tail(&work->list,
+ &vblank->vblank_work.irq_list);
+ else
+ list_add_tail(&work->list,
+ &vblank->vblank_work.irq_list);
+ work->state = DRM_VBL_WORK_WAITING;
+ }
+
+ out:
+ spin_unlock_irqrestore(&vblank->dev->event_lock, irqflags);
+
+ return ret;
+}
+EXPORT_SYMBOL(drm_vblank_work_schedule);
+
+static bool vblank_work_cancel(struct drm_vblank_work *work)
+{
+ struct drm_vblank_crtc *vblank = work->vblank;
+
+ switch (work->state) {
+ case DRM_VBL_WORK_RUNNING:
+ work->cancel = true;
+ work->reschedule = false;
+ /* fall through */
+ default:
+ case DRM_VBL_WORK_IDLE:
+ return false;
+ case DRM_VBL_WORK_WAITING:
+ drm_vblank_put(vblank->dev, vblank->pipe);
+ /* fall through */
+ case DRM_VBL_WORK_SCHEDULED:
+ list_del_init(&work->list);
+ work->state = DRM_VBL_WORK_IDLE;
+ return true;
+ }
+}
+
+/**
+ * drm_vblank_work_cancel - cancel a vblank work
+ * @work: vblank work to cancel
+ *
+ * Cancel an already scheduled vblank work.
+ *
+ * On return @work may still be executing, unless the return
+ * value is %true.
+ *
+ * Returns:
+ * True if the work was cancelled before it started to excute, false otherwise.
+ */
+bool drm_vblank_work_cancel(struct drm_vblank_work *work)
+{
+ struct drm_vblank_crtc *vblank = work->vblank;
+ bool cancelled;
+
+ spin_lock_irq(&vblank->dev->event_lock);
+
+ cancelled = vblank_work_cancel(work);
+
+ spin_unlock_irq(&vblank->dev->event_lock);
+
+ return cancelled;
+}
+EXPORT_SYMBOL(drm_vblank_work_cancel);
+
+/**
+ * drm_vblank_work_cancel_sync - cancel a vblank work and wait for it to finish executing
+ * @work: vblank work to cancel
+ *
+ * Cancel an already scheduled vblank work and wait for its
+ * execution to finish.
+ *
+ * On return @work is no longer guaraneed to be executing.
+ *
+ * Returns:
+ * True if the work was cancelled before it started to excute, false otherwise.
+ */
+bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work)
+{
+ struct drm_vblank_crtc *vblank = work->vblank;
+ bool cancelled;
+ long ret;
+
+ spin_lock_irq(&vblank->dev->event_lock);
+
+ cancelled = vblank_work_cancel(work);
+
+ ret = wait_event_lock_irq_timeout(vblank->vblank_work.work_wait,
+ work->state == DRM_VBL_WORK_IDLE,
+ vblank->dev->event_lock,
+ 10 * HZ);
+
+ spin_unlock_irq(&vblank->dev->event_lock);
+
+ WARN(!ret, "crtc %d vblank work timed out\n", vblank->pipe);
+
+ return cancelled;
+}
+EXPORT_SYMBOL(drm_vblank_work_cancel_sync);
+
+/**
+ * drm_vblank_work_flush - wait for a scheduled vblank work to finish excuting
+ * @work: vblank work to flush
+ *
+ * Wait until @work has finished executing.
+ */
+void drm_vblank_work_flush(struct drm_vblank_work *work)
+{
+ struct drm_vblank_crtc *vblank = work->vblank;
+ long ret;
+
+ spin_lock_irq(&vblank->dev->event_lock);
+
+ ret = wait_event_lock_irq_timeout(vblank->vblank_work.work_wait,
+ work->state == DRM_VBL_WORK_IDLE,
+ vblank->dev->event_lock,
+ 10 * HZ);
+
+ spin_unlock_irq(&vblank->dev->event_lock);
+
+ WARN(!ret, "crtc %d vblank work timed out\n", vblank->pipe);
+}
+EXPORT_SYMBOL(drm_vblank_work_flush);
diff --git a/include/drm/drm_vblank.h b/include/drm/drm_vblank.h
index dd9f5b9e56e4..ac9130f419af 100644
--- a/include/drm/drm_vblank.h
+++ b/include/drm/drm_vblank.h
@@ -203,8 +203,42 @@ struct drm_vblank_crtc {
* disabling functions multiple times.
*/
bool enabled;
+
+ struct {
+ struct task_struct *thread;
+ struct list_head irq_list, work_list;
+ wait_queue_head_t work_wait;
+ } vblank_work;
+};
+
+struct drm_vblank_work {
+ u64 count;
+ struct drm_vblank_crtc *vblank;
+ void (*func)(struct drm_vblank_work *work, u64 count);
+ struct list_head list;
+ enum {
+ DRM_VBL_WORK_IDLE,
+ DRM_VBL_WORK_WAITING,
+ DRM_VBL_WORK_SCHEDULED,
+ DRM_VBL_WORK_RUNNING,
+ } state;
+ bool cancel : 1;
+ bool reschedule : 1;
};
+int drm_vblank_work_schedule(struct drm_vblank_work *work,
+ u64 count, bool nextonmiss);
+void drm_vblank_work_init(struct drm_vblank_work *work, struct drm_crtc *crtc,
+ void (*func)(struct drm_vblank_work *work, u64 count));
+bool drm_vblank_work_cancel(struct drm_vblank_work *work);
+bool drm_vblank_work_cancel_sync(struct drm_vblank_work *work);
+void drm_vblank_work_flush(struct drm_vblank_work *work);
+
+static inline bool drm_vblank_work_pending(struct drm_vblank_work *work)
+{
+ return work->state != DRM_VBL_WORK_IDLE;
+}
+
int drm_vblank_init(struct drm_device *dev, unsigned int num_crtcs);
bool drm_dev_has_vblank(const struct drm_device *dev);
u64 drm_crtc_vblank_count(struct drm_crtc *crtc);
--
2.24.1
_______________________________________________
dri-devel mailing list
dri-devel@lists.freedesktop.org
https://lists.freedesktop.org/mailman/listinfo/dri-devel
next prev parent reply other threads:[~2020-03-18 0:42 UTC|newest]
Thread overview: 22+ messages / expand[flat|nested] mbox.gz Atom feed top
2020-03-18 0:40 [PATCH 0/9] drm/nouveau: Introduce CRC support for gf119+ Lyude Paul
2020-03-18 0:40 ` Lyude Paul [this message]
2020-03-18 13:46 ` [PATCH 1/9] drm/vblank: Add vblank works Daniel Vetter
2020-03-19 20:12 ` Lyude Paul
2020-03-27 20:29 ` Lyude Paul
2020-03-27 20:38 ` Lyude Paul
2020-04-13 20:18 ` Lyude Paul
2020-04-13 20:42 ` Tejun Heo
2020-04-13 21:07 ` Sam Ravnborg
2020-04-14 16:52 ` Lyude Paul
2020-04-14 18:17 ` Tejun Heo
2020-05-07 18:57 ` Lyude Paul
2020-03-18 0:40 ` [PATCH 2/9] drm/nouveau/kms/nv50-: Unroll error cleanup in nv50_head_create() Lyude Paul
2020-03-18 0:41 ` [PATCH 3/9] drm/nouveau/kms/nv140-: Don't modify depth in state during atomic commit Lyude Paul
2020-03-18 0:41 ` [PATCH 4/9] drm/nouveau/kms/nv50-: Fix disabling dithering Lyude Paul
2020-03-18 0:41 ` [PATCH 5/9] drm/nouveau/kms/nv50-: s/harm/armh/g Lyude Paul
2020-03-18 0:41 ` [PATCH 6/9] drm/nouveau/kms/nv140-: Track wndw mappings in nv50_head_atom Lyude Paul
2020-03-18 0:41 ` [PATCH 7/9] drm/nouveau/kms/nv50-: Expose nv50_outp_atom in disp.h Lyude Paul
2020-03-18 0:41 ` [PATCH 8/9] drm/nouveau/kms/nv50-: Move hard-coded object handles into header Lyude Paul
2020-03-18 0:41 ` [PATCH 9/9] drm/nouveau/kms/nvd9-: Add CRC support Lyude Paul
2020-03-18 1:09 ` [PATCH v2] " Lyude Paul
2020-03-18 6:02 ` [PATCH 9/9] " Greg Kroah-Hartman
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20200318004159.235623-2-lyude@redhat.com \
--to=lyude@redhat.com \
--cc=airlied@linux.ie \
--cc=dri-devel@lists.freedesktop.org \
--cc=linux-kernel@vger.kernel.org \
--cc=nouveau@lists.freedesktop.org \
--cc=tzimmermann@suse.de \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).