From: "Henrik Rydberg" <rydberg@euromail.se>
To: Dmitry Torokhov <dmitry.torokhov@gmail.com>
Cc: linux-input@vger.kernel.org, linux-kernel@vger.kernel.org,
Jiri Kosina <jkosina@suse.cz>,
Mika Kuoppala <mika.kuoppala@nokia.com>,
Benjamin Tissoires <tissoire@cena.fr>,
Rafi Rubin <rafi@seas.upenn.edu>,
Henrik Rydberg <rydberg@euromail.se>
Subject: [PATCH 1/3] input: evdev: Use multi-reader buffer to save space (rev4)
Date: Sat, 5 Jun 2010 13:04:27 +0200 [thread overview]
Message-ID: <1275735869-2185-2-git-send-email-rydberg@euromail.se> (raw)
In-Reply-To: <1275735869-2185-1-git-send-email-rydberg@euromail.se>
Preparing for larger buffer needs, convert the current per-client
circular buffer to a single buffer with multiple clients. Ideally, there
should be a mechanism where clients wait during buffer collision only.
Meanwhile, let clients take the dev->event_lock, which is already held
during buffer writes.
Signed-off-by: Henrik Rydberg <rydberg@euromail.se>
---
drivers/input/evdev.c | 46 ++++++++++++++++++++++++++--------------------
1 files changed, 26 insertions(+), 20 deletions(-)
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 2ee6c7a..7117589 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -33,13 +33,13 @@ struct evdev {
spinlock_t client_lock; /* protects client_list */
struct mutex mutex;
struct device dev;
+ int head;
+ struct input_event buffer[EVDEV_BUFFER_SIZE];
};
struct evdev_client {
- struct input_event buffer[EVDEV_BUFFER_SIZE];
int head;
int tail;
- spinlock_t buffer_lock; /* protects access to buffer, head and tail */
struct fasync_struct *fasync;
struct evdev *evdev;
struct list_head node;
@@ -48,18 +48,13 @@ struct evdev_client {
static struct evdev *evdev_table[EVDEV_MINORS];
static DEFINE_MUTEX(evdev_table_mutex);
-static void evdev_pass_event(struct evdev_client *client,
- struct input_event *event)
+static inline void evdev_sync_event(struct evdev_client *client,
+ struct evdev *evdev, int type)
{
- /*
- * Interrupts are disabled, just acquire the lock
- */
- spin_lock(&client->buffer_lock);
- client->buffer[client->head++] = *event;
- client->head &= EVDEV_BUFFER_SIZE - 1;
- spin_unlock(&client->buffer_lock);
-
- if (event->type == EV_SYN)
+ /* sync the reader such that it never becomes empty */
+ if (client->tail != evdev->head)
+ client->head = evdev->head;
+ if (type == EV_SYN)
kill_fasync(&client->fasync, SIGIO, POLL_IN);
}
@@ -78,14 +73,18 @@ static void evdev_event(struct input_handle *handle,
event.code = code;
event.value = value;
+ /* dev->event_lock held */
+ evdev->buffer[evdev->head] = event;
+ evdev->head = (evdev->head + 1) & (EVDEV_BUFFER_SIZE - 1);
+
rcu_read_lock();
client = rcu_dereference(evdev->grab);
if (client)
- evdev_pass_event(client, &event);
+ evdev_sync_event(client, evdev, type);
else
list_for_each_entry_rcu(client, &evdev->client_list, node)
- evdev_pass_event(client, &event);
+ evdev_sync_event(client, evdev, type);
rcu_read_unlock();
@@ -269,7 +268,6 @@ static int evdev_open(struct inode *inode, struct file *file)
goto err_put_evdev;
}
- spin_lock_init(&client->buffer_lock);
client->evdev = evdev;
evdev_attach_client(evdev, client);
@@ -325,19 +323,27 @@ static ssize_t evdev_write(struct file *file, const char __user *buffer,
}
static int evdev_fetch_next_event(struct evdev_client *client,
+ struct evdev *evdev,
struct input_event *event)
{
+ struct input_dev *dev = evdev->handle.dev;
int have_event;
- spin_lock_irq(&client->buffer_lock);
+ /*
+ * FIXME: taking event_lock protects against reentrant fops
+ * reads and provides sufficient buffer locking. However,
+ * clients should not block writes, and having multiple clients
+ * waiting for each other is suboptimal.
+ */
+ spin_lock_irq(&dev->event_lock);
have_event = client->head != client->tail;
if (have_event) {
- *event = client->buffer[client->tail++];
+ *event = evdev->buffer[client->tail++];
client->tail &= EVDEV_BUFFER_SIZE - 1;
}
- spin_unlock_irq(&client->buffer_lock);
+ spin_unlock_irq(&dev->event_lock);
return have_event;
}
@@ -366,7 +372,7 @@ static ssize_t evdev_read(struct file *file, char __user *buffer,
return -ENODEV;
while (retval + input_event_size() <= count &&
- evdev_fetch_next_event(client, &event)) {
+ evdev_fetch_next_event(client, evdev, &event)) {
if (input_event_to_user(buffer + retval, &event))
return -EFAULT;
--
1.6.3.3
next prev parent reply other threads:[~2010-06-05 11:11 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2010-06-05 11:04 [PATCH 0/3] input: evdev: Dynamic buffers (rev4) Henrik Rydberg
2010-06-05 11:04 ` Henrik Rydberg [this message]
2010-06-05 11:04 ` [PATCH 2/3] input: evdev: Convert to dynamic event buffer (rev4) Henrik Rydberg
2010-06-05 11:04 ` [PATCH 3/3] input: Use driver hint to compute the evdev buffer size (rev2) Henrik Rydberg
2010-06-10 14:21 ` [PATCH 0/3] input: evdev: Dynamic buffers (rev4) Chase Douglas
2010-06-10 19:11 ` Dmitry Torokhov
2010-06-15 9:43 ` Henrik Rydberg
2010-06-16 20:34 ` Dmitry Torokhov
2010-06-16 14:46 ` Jiri Kosina
2010-06-16 16:17 ` Henrik Rydberg
2010-06-16 20:31 ` Dmitry Torokhov
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1275735869-2185-2-git-send-email-rydberg@euromail.se \
--to=rydberg@euromail.se \
--cc=dmitry.torokhov@gmail.com \
--cc=jkosina@suse.cz \
--cc=linux-input@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mika.kuoppala@nokia.com \
--cc=rafi@seas.upenn.edu \
--cc=tissoire@cena.fr \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).