From: Abhijit Gangurde <abhijit.gangurde@amd.com>
To: <jgg@ziepe.ca>, <leon@kernel.org>, <brett.creeley@amd.com>,
<andrew+netdev@lunn.ch>, <davem@davemloft.net>,
<edumazet@google.com>, <kuba@kernel.org>, <pabeni@redhat.com>
Cc: <allen.hubbe@amd.com>, <nikhil.agarwal@amd.com>,
<linux-rdma@vger.kernel.org>, <netdev@vger.kernel.org>,
<linux-kernel@vger.kernel.org>,
Abhijit Gangurde <abhijit.gangurde@amd.com>
Subject: [PATCH 2/4] net: ionic: Add PHC state page for user space access
Date: Wed, 1 Apr 2026 15:54:59 +0530 [thread overview]
Message-ID: <20260401102501.3395305-3-abhijit.gangurde@amd.com> (raw)
In-Reply-To: <20260401102501.3395305-1-abhijit.gangurde@amd.com>
Add a page associated with the PHC that can be mapped to user space,
allowing applications to access hardware timestamp information.
In order to synchronize between kernel and user space, a sequence
number is incremented at the beginning and end of each update.
An odd number means the data is being updated while an even number
means the update is complete. To guarantee that the data structure
was accessed atomically, user space will:
repeat:
seq1 = <read sequence>
goto <repeat> if odd
<read PHC state>
seq2 = <read sequence>
if seq1 != seq2 goto repeat
This mechanism acts as a guard against reading invalid state during
concurrent updates.
Co-developed-by: Allen Hubbe <allen.hubbe@amd.com>
Signed-off-by: Allen Hubbe <allen.hubbe@amd.com>
Signed-off-by: Abhijit Gangurde <abhijit.gangurde@amd.com>
---
.../net/ethernet/pensando/ionic/ionic_lif.h | 3 +-
.../net/ethernet/pensando/ionic/ionic_phc.c | 41 +++++++++++++++++++
include/uapi/rdma/ionic-abi.h | 11 +++++
3 files changed, 54 insertions(+), 1 deletion(-)
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_lif.h b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
index 8e10f66dc50e..0b820af2b523 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_lif.h
+++ b/drivers/net/ethernet/pensando/ionic/ionic_lif.h
@@ -249,7 +249,7 @@ struct ionic_lif {
};
struct ionic_phc {
- spinlock_t lock; /* lock for cc and tc */
+ spinlock_t lock; /* lock for state_page, cc and tc */
struct cyclecounter cc;
struct timecounter tc;
@@ -262,6 +262,7 @@ struct ionic_phc {
long aux_work_delay;
struct ptp_clock_info ptp_info;
+ struct ionic_phc_state *state_page;
struct ptp_clock *ptp;
struct ionic_lif *lif;
};
diff --git a/drivers/net/ethernet/pensando/ionic/ionic_phc.c b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
index 116408099974..61eaf3834608 100644
--- a/drivers/net/ethernet/pensando/ionic/ionic_phc.c
+++ b/drivers/net/ethernet/pensando/ionic/ionic_phc.c
@@ -3,6 +3,7 @@
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
+#include <rdma/ionic-abi.h>
#include "ionic.h"
#include "ionic_bus.h"
@@ -334,6 +335,26 @@ static int ionic_setphc_cmd(struct ionic_phc *phc, struct ionic_admin_ctx *ctx)
return ionic_adminq_post(phc->lif, ctx);
}
+static void ionic_phc_state_page_update(struct ionic_phc *phc)
+{
+ struct ionic_phc_state *state = phc->state_page;
+ u32 seq;
+
+ /* read current seq */
+ seq = smp_load_acquire(&state->seq) & ~1;
+
+ /* make seq odd for updating */
+ smp_store_mb(state->seq, seq | 1);
+
+ state->tick = phc->tc.cycle_last;
+ state->nsec = phc->tc.nsec;
+ state->frac = phc->tc.frac;
+ state->mult = phc->cc.mult;
+
+ /* make seq the next even number for update completed */
+ smp_store_release(&state->seq, seq + 2);
+}
+
static int ionic_phc_adjfine(struct ptp_clock_info *info, long scaled_ppm)
{
struct ionic_phc *phc = container_of(info, struct ionic_phc, ptp_info);
@@ -361,6 +382,8 @@ static int ionic_phc_adjfine(struct ptp_clock_info *info, long scaled_ppm)
timecounter_read(&phc->tc);
phc->cc.mult = adj;
+ ionic_phc_state_page_update(phc);
+
/* Setphc commands are posted in-order, sequenced by phc->lock. We
* need to drop the lock before waiting for the command to complete.
*/
@@ -386,6 +409,8 @@ static int ionic_phc_adjtime(struct ptp_clock_info *info, s64 delta)
timecounter_adjtime(&phc->tc, delta);
+ ionic_phc_state_page_update(phc);
+
/* Setphc commands are posted in-order, sequenced by phc->lock. We
* need to drop the lock before waiting for the command to complete.
*/
@@ -415,6 +440,8 @@ static int ionic_phc_settime64(struct ptp_clock_info *info,
timecounter_init(&phc->tc, &phc->cc, ns);
+ ionic_phc_state_page_update(phc);
+
/* Setphc commands are posted in-order, sequenced by phc->lock. We
* need to drop the lock before waiting for the command to complete.
*/
@@ -472,6 +499,8 @@ static long ionic_phc_aux_work(struct ptp_clock_info *info)
/* update point-in-time basis to now */
timecounter_read(&phc->tc);
+ ionic_phc_state_page_update(phc);
+
/* Setphc commands are posted in-order, sequenced by phc->lock. We
* need to drop the lock before waiting for the command to complete.
*/
@@ -558,6 +587,12 @@ void ionic_lif_alloc_phc(struct ionic_lif *lif)
if (!phc)
return;
+ phc->state_page = (void *)get_zeroed_page(GFP_KERNEL);
+ if (!phc->state_page) {
+ devm_kfree(ionic->dev, phc);
+ return;
+ }
+
phc->lif = lif;
phc->cc.read = ionic_cc_read;
@@ -569,6 +604,7 @@ void ionic_lif_alloc_phc(struct ionic_lif *lif)
dev_err(lif->ionic->dev,
"Invalid device PHC mask multiplier %u, disabling HW timestamp support\n",
phc->cc.mult);
+ free_page((unsigned long)phc->state_page);
devm_kfree(lif->ionic->dev, phc);
lif->phc = NULL;
return;
@@ -652,6 +688,10 @@ void ionic_lif_alloc_phc(struct ionic_lif *lif)
*/
phc->ptp_info.max_adj = NORMAL_PPB;
+ phc->state_page->mask = phc->cc.mask;
+ phc->state_page->shift = phc->cc.shift;
+ ionic_phc_state_page_update(phc);
+
lif->phc = phc;
}
@@ -662,6 +702,7 @@ void ionic_lif_free_phc(struct ionic_lif *lif)
mutex_destroy(&lif->phc->config_lock);
+ free_page((unsigned long)lif->phc->state_page);
devm_kfree(lif->ionic->dev, lif->phc);
lif->phc = NULL;
}
diff --git a/include/uapi/rdma/ionic-abi.h b/include/uapi/rdma/ionic-abi.h
index 7b589d3e9728..97f695510380 100644
--- a/include/uapi/rdma/ionic-abi.h
+++ b/include/uapi/rdma/ionic-abi.h
@@ -112,4 +112,15 @@ struct ionic_srq_resp {
__aligned_u64 rq_cmb_offset;
};
+struct ionic_phc_state {
+ __u32 seq;
+ __u32 rsvd;
+ __aligned_u64 mask;
+ __aligned_u64 tick;
+ __aligned_u64 nsec;
+ __aligned_u64 frac;
+ __u32 mult;
+ __u32 shift;
+};
+
#endif /* IONIC_ABI_H */
--
2.43.0
next prev parent reply other threads:[~2026-04-01 10:25 UTC|newest]
Thread overview: 6+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-04-01 10:24 [PATCH 0/4] ionic: RDMA completion timestamping support Abhijit Gangurde
2026-04-01 10:24 ` [PATCH 1/4] net: ionic: register PHC for rdma timestamping Abhijit Gangurde
2026-04-01 10:24 ` Abhijit Gangurde [this message]
2026-04-02 0:06 ` [PATCH 2/4] net: ionic: Add PHC state page for user space access Jakub Kicinski
2026-04-01 10:25 ` [PATCH 3/4] RDMA/ionic: map PHC state into user space Abhijit Gangurde
2026-04-01 10:25 ` [PATCH 4/4] RDMA/ionic: add completion timestamp to CQE format Abhijit Gangurde
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260401102501.3395305-3-abhijit.gangurde@amd.com \
--to=abhijit.gangurde@amd.com \
--cc=allen.hubbe@amd.com \
--cc=andrew+netdev@lunn.ch \
--cc=brett.creeley@amd.com \
--cc=davem@davemloft.net \
--cc=edumazet@google.com \
--cc=jgg@ziepe.ca \
--cc=kuba@kernel.org \
--cc=leon@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-rdma@vger.kernel.org \
--cc=netdev@vger.kernel.org \
--cc=nikhil.agarwal@amd.com \
--cc=pabeni@redhat.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox