From: Xabier Marquiegui <reibax@gmail.com>
To: richardcochran@gmail.com
Cc: chrony-dev@chrony.tuxfamily.org, mlichvar@redhat.com,
netdev@vger.kernel.org, ntp-lists@mattcorallo.com,
reibax@gmail.com
Subject: [PATCH 2/3] ptp: support multiple timestamp event readers
Date: Wed, 6 Sep 2023 12:47:53 +0200 [thread overview]
Message-ID: <20230906104754.1324412-3-reibax@gmail.com> (raw)
In-Reply-To: <20230906104754.1324412-1-reibax@gmail.com>
Use linked lists to create one event queue per open file. This enables
simultaneous readers for timestamp event queues.
Signed-off-by: Xabier Marquiegui <reibax@gmail.com>
---
drivers/ptp/ptp_chardev.c | 95 ++++++++++++++++++++++++++++++---------
drivers/ptp/ptp_clock.c | 6 +--
drivers/ptp/ptp_private.h | 4 +-
drivers/ptp/ptp_sysfs.c | 4 --
4 files changed, 80 insertions(+), 29 deletions(-)
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 1ea11f864abb..c65dc6fefaa6 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -103,9 +103,39 @@ int ptp_set_pinfunc(struct ptp_clock *ptp, unsigned int pin,
int ptp_open(struct posix_clock *pc, fmode_t fmode)
{
+ struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ struct timestamp_event_queue *queue;
+
+ queue = kzalloc(sizeof(struct timestamp_event_queue), GFP_KERNEL);
+ if (queue == NULL)
+ return -EINVAL;
+ queue->reader_pid = task_pid_nr(current);
+ list_add_tail(&queue->qlist, &ptp->tsevqs);
+
return 0;
}
+int ptp_release(struct posix_clock *pc)
+{
+ struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
+ struct list_head *pos, *n;
+ struct timestamp_event_queue *element;
+ int found = -1;
+ pid_t reader_pid = task_pid_nr(current);
+
+ list_for_each_safe(pos, n, &ptp->tsevqs) {
+ element = list_entry(pos, struct timestamp_event_queue, qlist);
+ if (element->reader_pid == reader_pid) {
+ list_del(pos);
+ kfree(element);
+ found = 0;
+ return found;
+ }
+ }
+
+ return found;
+}
+
long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
@@ -436,14 +466,25 @@ __poll_t ptp_poll(struct posix_clock *pc, struct file *fp, poll_table *wait)
{
struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
struct timestamp_event_queue *queue;
+ struct list_head *pos, *n;
+ bool found = false;
+ pid_t reader_pid = task_pid_nr(current);
poll_wait(fp, &ptp->tsev_wq, wait);
/*
- * Extract only the first element in the queue list
- * TODO: Identify the relevant queue
+ * Extract only the desired element in the queue list
*/
- queue = list_entry(&ptp->tsevqs, struct timestamp_event_queue, qlist);
+ list_for_each_safe(pos, n, &ptp->tsevqs) {
+ queue = list_entry(pos, struct timestamp_event_queue, qlist);
+ if (queue->reader_pid == reader_pid) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found)
+ return -EINVAL;
return queue_cnt(queue) ? EPOLLIN : 0;
}
@@ -459,40 +500,50 @@ ssize_t ptp_read(struct posix_clock *pc,
unsigned long flags;
size_t qcnt, i;
int result;
+ struct list_head *pos, *n;
+ bool found = false;
+ pid_t reader_pid = task_pid_nr(current);
/*
- * Extract only the first element in the queue list
- * TODO: Identify the relevant queue
+ * Extract only the desired element in the queue list
*/
- queue = list_first_entry(&ptp->tsevqs, struct timestamp_event_queue,
- qlist);
+ list_for_each_safe(pos, n, &ptp->tsevqs) {
+ queue = list_entry(pos, struct timestamp_event_queue, qlist);
+ if (queue->reader_pid == reader_pid) {
+ found = true;
+ break;
+ }
+ }
- if (cnt % sizeof(struct ptp_extts_event) != 0)
- return -EINVAL;
+ if (!found) {
+ result = -EINVAL;
+ goto exit;
+ }
+
+ if (cnt % sizeof(struct ptp_extts_event) != 0) {
+ result = -EINVAL;
+ goto exit;
+ }
if (cnt > EXTTS_BUFSIZE)
cnt = EXTTS_BUFSIZE;
cnt = cnt / sizeof(struct ptp_extts_event);
- if (mutex_lock_interruptible(&ptp->tsevq_mux))
- return -ERESTARTSYS;
-
if (wait_event_interruptible(ptp->tsev_wq,
ptp->defunct || queue_cnt(queue))) {
- mutex_unlock(&ptp->tsevq_mux);
return -ERESTARTSYS;
}
if (ptp->defunct) {
- mutex_unlock(&ptp->tsevq_mux);
- return -ENODEV;
+ result = -ENODEV;
+ goto exit;
}
event = kmalloc(EXTTS_BUFSIZE, GFP_KERNEL);
if (!event) {
- mutex_unlock(&ptp->tsevq_mux);
- return -ENOMEM;
+ result = -ENOMEM;
+ goto exit;
}
spin_lock_irqsave(&queue->lock, flags);
@@ -511,12 +562,16 @@ ssize_t ptp_read(struct posix_clock *pc,
cnt = cnt * sizeof(struct ptp_extts_event);
- mutex_unlock(&ptp->tsevq_mux);
-
result = cnt;
- if (copy_to_user(buf, event, cnt))
+ if (copy_to_user(buf, event, cnt)) {
result = -EFAULT;
+ goto free_event;
+ }
+free_event:
kfree(event);
+exit:
+ if (result < 0)
+ ptp_release(pc);
return result;
}
diff --git a/drivers/ptp/ptp_clock.c b/drivers/ptp/ptp_clock.c
index dd48b9f41535..dc2f045cacbd 100644
--- a/drivers/ptp/ptp_clock.c
+++ b/drivers/ptp/ptp_clock.c
@@ -162,6 +162,7 @@ static struct posix_clock_operations ptp_clock_ops = {
.clock_settime = ptp_clock_settime,
.ioctl = ptp_ioctl,
.open = ptp_open,
+ .release = ptp_release,
.poll = ptp_poll,
.read = ptp_read,
};
@@ -184,7 +185,6 @@ static void ptp_clock_release(struct device *dev)
ptp_cleanup_pin_groups(ptp);
kfree(ptp->vclock_index);
- mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
mutex_destroy(&ptp->n_vclocks_mux);
ptp_clean_queue_list(ptp);
@@ -246,10 +246,9 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
queue = kzalloc(sizeof(struct timestamp_event_queue), GFP_KERNEL);
if (queue == NULL)
goto no_memory_queue;
+ queue->reader_pid = 0;
spin_lock_init(&queue->lock);
list_add_tail(&queue->qlist, &ptp->tsevqs);
- /* TODO - Transform or delete this mutex */
- mutex_init(&ptp->tsevq_mux);
mutex_init(&ptp->pincfg_mux);
mutex_init(&ptp->n_vclocks_mux);
init_waitqueue_head(&ptp->tsev_wq);
@@ -350,7 +349,6 @@ struct ptp_clock *ptp_clock_register(struct ptp_clock_info *info,
if (ptp->kworker)
kthread_destroy_worker(ptp->kworker);
kworker_err:
- mutex_destroy(&ptp->tsevq_mux);
mutex_destroy(&ptp->pincfg_mux);
mutex_destroy(&ptp->n_vclocks_mux);
ptp_clean_queue_list(ptp);
diff --git a/drivers/ptp/ptp_private.h b/drivers/ptp/ptp_private.h
index 014293255677..56b0c9df188d 100644
--- a/drivers/ptp/ptp_private.h
+++ b/drivers/ptp/ptp_private.h
@@ -27,6 +27,7 @@ struct timestamp_event_queue {
int tail;
spinlock_t lock;
struct list_head qlist; /* Link to other queues */
+ pid_t reader_pid;
};
struct ptp_clock {
@@ -38,7 +39,6 @@ struct ptp_clock {
struct pps_device *pps_source;
long dialed_frequency; /* remembers the frequency adjustment */
struct list_head tsevqs; /* timestamp fifo list */
- struct mutex tsevq_mux; /* one process at a time reading the fifo */
struct mutex pincfg_mux; /* protect concurrent info->pin_config access */
wait_queue_head_t tsev_wq;
int defunct; /* tells readers to go away when clock is being removed */
@@ -124,6 +124,8 @@ long ptp_ioctl(struct posix_clock *pc,
int ptp_open(struct posix_clock *pc, fmode_t fmode);
+int ptp_release(struct posix_clock *pc);
+
ssize_t ptp_read(struct posix_clock *pc,
uint flags, char __user *buf, size_t cnt);
diff --git a/drivers/ptp/ptp_sysfs.c b/drivers/ptp/ptp_sysfs.c
index 2675f383cd0a..512b0164ef18 100644
--- a/drivers/ptp/ptp_sysfs.c
+++ b/drivers/ptp/ptp_sysfs.c
@@ -87,9 +87,6 @@ static ssize_t extts_fifo_show(struct device *dev,
memset(&event, 0, sizeof(event));
- if (mutex_lock_interruptible(&ptp->tsevq_mux))
- return -ERESTARTSYS;
-
spin_lock_irqsave(&queue->lock, flags);
qcnt = queue_cnt(queue);
if (qcnt) {
@@ -104,7 +101,6 @@ static ssize_t extts_fifo_show(struct device *dev,
cnt = snprintf(page, PAGE_SIZE, "%u %lld %u\n",
event.index, event.t.sec, event.t.nsec);
out:
- mutex_unlock(&ptp->tsevq_mux);
return cnt;
}
static DEVICE_ATTR(fifo, 0444, extts_fifo_show, NULL);
--
2.34.1
next prev parent reply other threads:[~2023-09-06 10:48 UTC|newest]
Thread overview: 26+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <72ac9741-27f5-36a5-f64c-7d81008eebbc@bluematt.me>
[not found] ` <Y+3m/PpzkBN9kxJY@localhost>
2023-02-16 17:54 ` [chrony-dev] Support for Multiple PPS Inputs on single PHC Matt Corallo
2023-02-16 22:54 ` Richard Cochran
2023-02-17 0:58 ` Matt Corallo
2023-02-20 10:08 ` Miroslav Lichvar
2023-02-20 15:24 ` Richard Cochran
2023-02-23 20:56 ` Matt Corallo
2023-02-24 0:19 ` Richard Cochran
2023-02-24 1:18 ` Matt Corallo
2023-02-24 5:07 ` Richard Cochran
2023-08-29 11:47 ` Xabier Marquiegui
2023-08-29 11:47 ` [PATCH] ptp: Demultiplexed timestamp channels Xabier Marquiegui
2023-08-29 14:07 ` Richard Cochran
2023-08-29 14:15 ` Richard Cochran
2023-08-30 21:41 ` [chrony-dev] Support for Multiple PPS Inputs on single PHC Xabier Marquiegui
2023-08-30 21:41 ` [PATCH] ptp: Demultiplexed timestamp channels Xabier Marquiegui
2023-08-30 22:01 ` Richard Cochran
2023-09-06 10:47 ` Xabier Marquiegui
2023-09-06 10:47 ` [PATCH 1/3] ptp: Replace timestamp event queue with linked list Xabier Marquiegui
2023-09-06 10:47 ` Xabier Marquiegui [this message]
2023-09-06 18:13 ` [PATCH 2/3] ptp: support multiple timestamp event readers Simon Horman
2023-09-06 22:13 ` kernel test robot
2023-09-06 10:47 ` [PATCH 3/3] ptp: support event queue reader channel masks Xabier Marquiegui
2023-09-06 18:18 ` kernel test robot
2023-08-31 0:29 ` [PATCH] ptp: Demultiplexed timestamp channels kernel test robot
2023-08-31 13:28 ` kernel test robot
2023-08-31 16:20 ` kernel test robot
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20230906104754.1324412-3-reibax@gmail.com \
--to=reibax@gmail.com \
--cc=chrony-dev@chrony.tuxfamily.org \
--cc=mlichvar@redhat.com \
--cc=netdev@vger.kernel.org \
--cc=ntp-lists@mattcorallo.com \
--cc=richardcochran@gmail.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).