public inbox for linux-kernel@vger.kernel.org
 help / color / mirror / Atom feed
From: Sakari Ailus <sakari.ailus@linux.intel.com>
To: "Sebastian Reichel" <sre@kernel.org>,
	"Sakari Ailus" <sakari.ailus@linux.intel.com>,
	"Uwe Kleine-König" <u.kleine-koenig@pengutronix.de>
Cc: linux-kernel@vger.kernel.org
Subject: [PATCH 14/51] HSI: omap_ssi_port: Switch to __pm_runtime_put_autosuspend()
Date: Fri,  4 Oct 2024 12:41:22 +0300	[thread overview]
Message-ID: <20241004094122.113589-1-sakari.ailus@linux.intel.com> (raw)
In-Reply-To: <20241004094101.113349-1-sakari.ailus@linux.intel.com>

pm_runtime_put_autosuspend() will soon be changed to include a call to
pm_runtime_mark_last_busy(). This patch switches the current users to
__pm_runtime_put_autosuspend() which will continue to have the
functionality of old pm_runtime_put_autosuspend().

Signed-off-by: Sakari Ailus <sakari.ailus@linux.intel.com>
---
 drivers/hsi/controllers/omap_ssi_port.c | 42 ++++++++++++-------------
 1 file changed, 21 insertions(+), 21 deletions(-)

diff --git a/drivers/hsi/controllers/omap_ssi_port.c b/drivers/hsi/controllers/omap_ssi_port.c
index f0b3eca7376e..893702eeaa86 100644
--- a/drivers/hsi/controllers/omap_ssi_port.c
+++ b/drivers/hsi/controllers/omap_ssi_port.c
@@ -114,7 +114,7 @@ static int ssi_port_regs_show(struct seq_file *m, void *p __maybe_unused)
 		seq_printf(m, "BUFFER_CH%d\t: 0x%08x\n", ch,
 				readl(base + SSI_SSR_BUFFER_CH_REG(ch)));
 	}
-	pm_runtime_put_autosuspend(omap_port->pdev);
+	__pm_runtime_put_autosuspend(omap_port->pdev);
 
 	return 0;
 }
@@ -128,7 +128,7 @@ static int ssi_div_get(void *data, u64 *val)
 
 	pm_runtime_get_sync(omap_port->pdev);
 	*val = readl(omap_port->sst_base + SSI_SST_DIVISOR_REG);
-	pm_runtime_put_autosuspend(omap_port->pdev);
+	__pm_runtime_put_autosuspend(omap_port->pdev);
 
 	return 0;
 }
@@ -144,7 +144,7 @@ static int ssi_div_set(void *data, u64 val)
 	pm_runtime_get_sync(omap_port->pdev);
 	writel(val, omap_port->sst_base + SSI_SST_DIVISOR_REG);
 	omap_port->sst.divisor = val;
-	pm_runtime_put_autosuspend(omap_port->pdev);
+	__pm_runtime_put_autosuspend(omap_port->pdev);
 
 	return 0;
 }
@@ -217,7 +217,7 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch)
 
 	if (!pm_runtime_active(omap_port->pdev)) {
 		dev_warn(&port->device, "ssi_start_dma called without runtime PM!\n");
-		pm_runtime_put_autosuspend(omap_port->pdev);
+		__pm_runtime_put_autosuspend(omap_port->pdev);
 		return -EREMOTEIO;
 	}
 
@@ -226,7 +226,7 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch)
 							DMA_FROM_DEVICE);
 		if (!err) {
 			dev_dbg(&ssi->device, "DMA map SG failed !\n");
-			pm_runtime_put_autosuspend(omap_port->pdev);
+			__pm_runtime_put_autosuspend(omap_port->pdev);
 			return -EIO;
 		}
 		csdp = SSI_DST_BURST_4x32_BIT | SSI_DST_MEMORY_PORT |
@@ -243,7 +243,7 @@ static int ssi_start_dma(struct hsi_msg *msg, int lch)
 							DMA_TO_DEVICE);
 		if (!err) {
 			dev_dbg(&ssi->device, "DMA map SG failed !\n");
-			pm_runtime_put_autosuspend(omap_port->pdev);
+			__pm_runtime_put_autosuspend(omap_port->pdev);
 			return -EIO;
 		}
 		csdp = SSI_SRC_BURST_4x32_BIT | SSI_SRC_MEMORY_PORT |
@@ -289,7 +289,7 @@ static int ssi_start_pio(struct hsi_msg *msg)
 
 	if (!pm_runtime_active(omap_port->pdev)) {
 		dev_warn(&port->device, "ssi_start_pio called without runtime PM!\n");
-		pm_runtime_put_autosuspend(omap_port->pdev);
+		__pm_runtime_put_autosuspend(omap_port->pdev);
 		return -EREMOTEIO;
 	}
 
@@ -304,7 +304,7 @@ static int ssi_start_pio(struct hsi_msg *msg)
 						msg->ttype ? "write" : "read");
 	val |= readl(omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
 	writel(val, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
-	pm_runtime_put_autosuspend(omap_port->pdev);
+	__pm_runtime_put_autosuspend(omap_port->pdev);
 	msg->actual_len = 0;
 	msg->status = HSI_STATUS_PROCEEDING;
 
@@ -363,7 +363,7 @@ static int ssi_async_break(struct hsi_msg *msg)
 	}
 out:
 	pm_runtime_mark_last_busy(omap_port->pdev);
-	pm_runtime_put_autosuspend(omap_port->pdev);
+	__pm_runtime_put_autosuspend(omap_port->pdev);
 
 	return err;
 }
@@ -402,7 +402,7 @@ static int ssi_async(struct hsi_msg *msg)
 	}
 	spin_unlock_bh(&omap_port->lock);
 	pm_runtime_mark_last_busy(omap_port->pdev);
-	pm_runtime_put_autosuspend(omap_port->pdev);
+	__pm_runtime_put_autosuspend(omap_port->pdev);
 	dev_dbg(&port->device, "msg status %d ttype %d ch %d\n",
 				msg->status, msg->ttype, msg->channel);
 
@@ -505,7 +505,7 @@ static int ssi_setup(struct hsi_client *cl)
 out:
 	spin_unlock_bh(&omap_port->lock);
 	pm_runtime_mark_last_busy(omap_port->pdev);
-	pm_runtime_put_autosuspend(omap_port->pdev);
+	__pm_runtime_put_autosuspend(omap_port->pdev);
 
 	return err;
 }
@@ -536,7 +536,7 @@ static int ssi_flush(struct hsi_client *cl)
 			continue;
 		writew_relaxed(0, omap_ssi->gdd + SSI_GDD_CCR_REG(i));
 		if (msg->ttype == HSI_MSG_READ)
-			pm_runtime_put_autosuspend(omap_port->pdev);
+			__pm_runtime_put_autosuspend(omap_port->pdev);
 		omap_ssi->gdd_trn[i].msg = NULL;
 	}
 	/* Flush all SST buffers */
@@ -560,7 +560,7 @@ static int ssi_flush(struct hsi_client *cl)
 	for (i = 0; i < omap_port->channels; i++) {
 		/* Release write clocks */
 		if (!list_empty(&omap_port->txqueue[i]))
-			pm_runtime_put_autosuspend(omap_port->pdev);
+			__pm_runtime_put_autosuspend(omap_port->pdev);
 		ssi_flush_queue(&omap_port->txqueue[i], NULL);
 		ssi_flush_queue(&omap_port->rxqueue[i], NULL);
 	}
@@ -571,7 +571,7 @@ static int ssi_flush(struct hsi_client *cl)
 
 	spin_unlock_bh(&omap_port->lock);
 	pm_runtime_mark_last_busy(omap_port->pdev);
-	pm_runtime_put_autosuspend(omap_port->pdev);
+	__pm_runtime_put_autosuspend(omap_port->pdev);
 
 	return 0;
 }
@@ -626,7 +626,7 @@ static int ssi_stop_tx(struct hsi_client *cl)
 	spin_unlock_bh(&omap_port->wk_lock);
 
 	pm_runtime_mark_last_busy(omap_port->pdev);
-	pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
+	__pm_runtime_put_autosuspend(omap_port->pdev); /* Release clocks */
 
 
 	return 0;
@@ -654,7 +654,7 @@ static void ssi_transfer(struct omap_ssi_port *omap_port,
 	}
 	spin_unlock_bh(&omap_port->lock);
 	pm_runtime_mark_last_busy(omap_port->pdev);
-	pm_runtime_put_autosuspend(omap_port->pdev);
+	__pm_runtime_put_autosuspend(omap_port->pdev);
 }
 
 static void ssi_cleanup_queues(struct hsi_client *cl)
@@ -684,7 +684,7 @@ static void ssi_cleanup_queues(struct hsi_client *cl)
 			status |= SSI_DATAACCEPT(i);
 			/* Release the clocks writes, also GDD ones */
 			pm_runtime_mark_last_busy(omap_port->pdev);
-			pm_runtime_put_autosuspend(omap_port->pdev);
+			__pm_runtime_put_autosuspend(omap_port->pdev);
 		}
 		ssi_flush_queue(&omap_port->txqueue[i], cl);
 	}
@@ -740,7 +740,7 @@ static void ssi_cleanup_gdd(struct hsi_controller *ssi, struct hsi_client *cl)
 		 */
 		if (msg->ttype == HSI_MSG_READ) {
 			pm_runtime_mark_last_busy(omap_port->pdev);
-			pm_runtime_put_autosuspend(omap_port->pdev);
+			__pm_runtime_put_autosuspend(omap_port->pdev);
 		}
 		omap_ssi->gdd_trn[i].msg = NULL;
 	}
@@ -937,7 +937,7 @@ static void ssi_pio_complete(struct hsi_port *port, struct list_head *queue)
 	if (msg->ttype == HSI_MSG_WRITE) {
 		/* Release clocks for write transfer */
 		pm_runtime_mark_last_busy(omap_port->pdev);
-		pm_runtime_put_autosuspend(omap_port->pdev);
+		__pm_runtime_put_autosuspend(omap_port->pdev);
 	}
 	reg &= ~val;
 	writel_relaxed(reg, omap_ssi->sys + SSI_MPU_ENABLE_REG(port->num, 0));
@@ -982,7 +982,7 @@ static irqreturn_t ssi_pio_thread(int irq, void *ssi_port)
 	} while (status_reg);
 
 	pm_runtime_mark_last_busy(omap_port->pdev);
-	pm_runtime_put_autosuspend(omap_port->pdev);
+	__pm_runtime_put_autosuspend(omap_port->pdev);
 
 	return IRQ_HANDLED;
 }
@@ -1019,7 +1019,7 @@ static irqreturn_t ssi_wake_thread(int irq __maybe_unused, void *ssi_port)
 		hsi_event(port, HSI_EVENT_STOP_RX);
 		if (test_and_clear_bit(SSI_WAKE_EN, &omap_port->flags)) {
 			pm_runtime_mark_last_busy(omap_port->pdev);
-			pm_runtime_put_autosuspend(omap_port->pdev);
+			__pm_runtime_put_autosuspend(omap_port->pdev);
 		}
 	}
 
-- 
2.39.5


  parent reply	other threads:[~2024-10-04  9:41 UTC|newest]

Thread overview: 20+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2024-10-04  9:41 [PATCH 00/51] treewide: Switch to __pm_runtime_put_autosuspend() Sakari Ailus
2024-10-04  9:41 ` [PATCH 04/51] hwrng: " Sakari Ailus
2024-10-04  9:41 ` [PATCH 03/51] bus: sunxi-rsb: " Sakari Ailus
2024-10-04  9:41 ` Sakari Ailus [this message]
2024-10-04  9:41 ` [PATCH 26/51] mfd: " Sakari Ailus
2024-10-04  9:41 ` [PATCH 27/51] mei: " Sakari Ailus
2024-10-04  9:41 ` [PATCH 35/51] power: " Sakari Ailus
2024-10-04  9:41 ` [PATCH 46/51] w1: omap-hdq: " Sakari Ailus
2024-10-04 14:05   ` Krzysztof Kozlowski
2024-10-04 14:38 ` [PATCH 00/51] treewide: " Ulf Hansson
2024-10-07 18:49   ` Laurent Pinchart
2024-10-07 22:08     ` Ulf Hansson
2024-10-07 22:25       ` Laurent Pinchart
2024-10-07 22:34         ` Ulf Hansson
2024-10-08 18:24           ` Rafael J. Wysocki
2024-10-09 10:20             ` Rafael J. Wysocki
2024-10-09 10:27             ` Ulf Hansson
2024-10-09 12:48             ` Richard Fitzgerald
2024-10-09 13:34               ` Rafael J. Wysocki
2024-10-08 20:38     ` Uwe Kleine-König

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20241004094122.113589-1-sakari.ailus@linux.intel.com \
    --to=sakari.ailus@linux.intel.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=sre@kernel.org \
    --cc=u.kleine-koenig@pengutronix.de \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox