public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
* [PATCH] mailbox: pcc: support polling mode when there is no platform IRQ
@ 2025-11-10 15:08 Andrea Tomassetti
  2025-11-11 18:29 ` kernel test robot
                   ` (2 more replies)
  0 siblings, 3 replies; 13+ messages in thread
From: Andrea Tomassetti @ 2025-11-10 15:08 UTC (permalink / raw)
  To: sudeep.holla, jassisinghbrar, rafael
  Cc: Andrea Tomassetti, lenb, linux-acpi, linux-kernel,
	olivierdautricourt, Thibault Cantori, Olivier Dautricourt

The goal is to allow clients to submit a message in both irq and polling
mode of the pcc mailbox. The ACPI specification does not require a
platform irq for pcc channels. Let's implement the case where it is not
available.

peek_data is mapped to pcc_mbox_error_check_and_clear, so that it
returns true if no error occurred while the platform processed last
message, and therefore clients can fetch response data provided by the
platform.

Tested-by: Thibault Cantori <thibault.cantori@sipearl.com>
Co-developed-by: Olivier Dautricourt <olivier.dautricourt@sipearl.com>
Signed-off-by: Olivier Dautricourt <olivier.dautricourt@sipearl.com>
Signed-off-by: Andrea Tomassetti <andrea.tomassetti@sipearl.com>
---
 drivers/acpi/acpi_pcc.c | 77 ++++++++++++++++++++++++++++-------------
 drivers/mailbox/pcc.c   | 12 ++++++-
 2 files changed, 63 insertions(+), 26 deletions(-)

diff --git a/drivers/acpi/acpi_pcc.c b/drivers/acpi/acpi_pcc.c
index 97064e943768..ef5d7eae150b 100644
--- a/drivers/acpi/acpi_pcc.c
+++ b/drivers/acpi/acpi_pcc.c
@@ -51,7 +51,6 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
 {
 	struct pcc_data *data;
 	struct acpi_pcc_info *ctx = handler_context;
-	struct pcc_mbox_chan *pcc_chan;
 	static acpi_status ret;

 	data = kzalloc(sizeof(*data), GFP_KERNEL);
@@ -59,7 +58,7 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
 		return AE_NO_MEMORY;

 	data->cl.rx_callback = pcc_rx_callback;
-	data->cl.knows_txdone = true;
+	data->cl.knows_txdone = false;
 	data->ctx.length = ctx->length;
 	data->ctx.subspace_id = ctx->subspace_id;
 	data->ctx.internal_buffer = ctx->internal_buffer;
@@ -73,61 +72,89 @@ acpi_pcc_address_space_setup(acpi_handle region_handle, u32 function,
 		goto err_free_data;
 	}

-	pcc_chan = data->pcc_chan;
-	if (!pcc_chan->mchan->mbox->txdone_irq) {
-		pr_err("This channel-%d does not support interrupt.\n",
-		       ctx->subspace_id);
-		ret = AE_SUPPORT;
-		goto err_free_channel;
-	}
-
 	*region_context = data;
 	return AE_OK;

-err_free_channel:
-	pcc_mbox_free_channel(data->pcc_chan);
 err_free_data:
 	kfree(data);

 	return ret;
 }

+static acpi_status
+acpi_pcc_send_msg_polling(struct pcc_data *data)
+{
+	int ret;
+
+	ret = mbox_send_message(data->pcc_chan->mchan, data->pcc_chan->shmem);
+	if (ret == -ETIME) {
+		pr_err("PCC command executed timeout!\n");
+		return AE_TIME;
+	}
+
+	if (ret < 0)
+		return AE_ERROR;
+
+	if (!mbox_client_peek_data(data->pcc_chan->mchan))
+		return AE_ERROR;
+
+	return AE_OK;
+}
+
+static acpi_status
+acpi_pcc_send_msg_irq(struct pcc_data *data)
+{
+	int ret;
+
+	ret = mbox_send_message(data->pcc_chan->mchan, NULL);
+	if (ret < 0)
+		return AE_ERROR;
+
+	ret = wait_for_completion_timeout(&data->done,
+					  usecs_to_jiffies(data->cl.tx_tout * USEC_PER_MSEC));
+	if (ret == 0) {
+		pr_err("PCC command executed timeout!\n");
+		return AE_TIME;
+	}
+
+	mbox_chan_txdone(data->pcc_chan->mchan, ret);
+
+	return AE_OK;
+}
+
 static acpi_status
 acpi_pcc_address_space_handler(u32 function, acpi_physical_address addr,
 			       u32 bits, acpi_integer *value,
 			       void *handler_context, void *region_context)
 {
-	int ret;
+	acpi_status ret;
 	struct pcc_data *data = region_context;
 	u64 usecs_lat;
+	bool use_polling = data->pcc_chan->mchan->mbox->txdone_poll;

 	reinit_completion(&data->done);

 	/* Write to Shared Memory */
 	memcpy_toio(data->pcc_chan->shmem, (void *)value, data->ctx.length);

-	ret = mbox_send_message(data->pcc_chan->mchan, NULL);
-	if (ret < 0)
-		return AE_ERROR;
-
 	/*
 	 * pcc_chan->latency is just a Nominal value. In reality the remote
 	 * processor could be much slower to reply. So add an arbitrary
 	 * amount of wait on top of Nominal.
 	 */
 	usecs_lat = PCC_CMD_WAIT_RETRIES_NUM * data->pcc_chan->latency;
-	ret = wait_for_completion_timeout(&data->done,
-						usecs_to_jiffies(usecs_lat));
-	if (ret == 0) {
-		pr_err("PCC command executed timeout!\n");
-		return AE_TIME;
-	}

-	mbox_chan_txdone(data->pcc_chan->mchan, ret);
+	data->cl.tx_block = use_polling;
+	data->cl.tx_tout = usecs_lat / USEC_PER_MSEC;
+
+	if (use_polling)
+		ret = acpi_pcc_send_msg_polling(data);
+	else
+		ret = acpi_pcc_send_msg_irq(data);

 	memcpy_fromio(value, data->pcc_chan->shmem, data->ctx.length);

-	return AE_OK;
+	return ret;
 }

 void __init acpi_init_pcc(void)
diff --git a/drivers/mailbox/pcc.c b/drivers/mailbox/pcc.c
index 0a00719b2482..e4e744669f81 100644
--- a/drivers/mailbox/pcc.c
+++ b/drivers/mailbox/pcc.c
@@ -579,11 +579,17 @@ static void pcc_shutdown(struct mbox_chan *chan)
 		devm_free_irq(chan->mbox->dev, pchan->plat_irq, chan);
 }

+static bool pcc_peek_data(struct mbox_chan *chan)
+{
+	return pcc_mbox_error_check_and_clear(chan->con_priv) == 0;
+}
+
 static const struct mbox_chan_ops pcc_chan_ops = {
 	.send_data = pcc_send_data,
 	.startup = pcc_startup,
 	.shutdown = pcc_shutdown,
 	.last_tx_done = pcc_last_tx_done,
+	.peek_data = pcc_peek_data,
 };

 /**
@@ -877,8 +883,12 @@ static int pcc_mbox_probe(struct platform_device *pdev)
 		(unsigned long) pcct_tbl + sizeof(struct acpi_table_pcct));

 	acpi_pcct_tbl = (struct acpi_table_pcct *) pcct_tbl;
-	if (acpi_pcct_tbl->flags & ACPI_PCCT_DOORBELL)
+	if (acpi_pcct_tbl->flags & ACPI_PCCT_DOORBELL) {
 		pcc_mbox_ctrl->txdone_irq = true;
+	} else {
+		pcc_mbox_ctrl->txdone_poll = true;
+		pcc_mbox_ctrl->txpoll_period = 1;
+	}

 	for (i = 0; i < count; i++) {
 		struct pcc_chan_info *pchan = chan_info + i;
--
2.25.0

^ permalink raw reply related	[flat|nested] 13+ messages in thread

end of thread, other threads:[~2025-12-09 11:11 UTC | newest]

Thread overview: 13+ messages (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2025-11-10 15:08 [PATCH] mailbox: pcc: support polling mode when there is no platform IRQ Andrea Tomassetti
2025-11-11 18:29 ` kernel test robot
2025-11-11 21:47 ` kernel test robot
2025-11-12  0:07 ` kernel test robot
2025-11-19  9:01   ` [PATCH v2] " Andrea Tomassetti
2025-11-27 14:51     ` Sudeep Holla
2025-12-02 10:12       ` [PATCH v3 1/2] " Andrea Tomassetti
2025-12-03 10:28         ` Sudeep Holla
2025-12-04 12:59           ` Andrea Tomassetti
2025-12-04 13:14             ` Sudeep Holla
2025-12-09 10:39               ` Andrea Tomassetti
2025-12-09 11:11                 ` Sudeep Holla
2025-12-02 10:12       ` [PATCH v3 2/2] mailbox: pcc: add peek_data handler Andrea Tomassetti

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox