From: Pierre-Louis Bossart <pierre-louis.bossart@linux.intel.com>
To: Vijendar Mukunda <Vijendar.Mukunda@amd.com>, vkoul@kernel.org
Cc: amadeuszx.slawinski@linux.intel.com, Mario.Limonciello@amd.com,
Sunil-kumar.Dommati@amd.com, Basavaraj.Hiregoudar@amd.com,
Mastan.Katragadda@amd.com, Arungopal.kondaveeti@amd.com,
Bard Liao <yung-chuan.liao@linux.intel.com>,
Sanyog Kale <sanyog.r.kale@intel.com>,
"moderated list:SOUNDWIRE SUBSYSTEM"
<alsa-devel@alsa-project.org>,
open list <linux-kernel@vger.kernel.org>
Subject: Re: [PATCH V2 5/8] soundwire: amd: add soundwire manager interrupt handling
Date: Mon, 13 Feb 2023 12:15:44 -0600 [thread overview]
Message-ID: <9505941e-73a1-249b-08c2-83b86e7ef5c6@linux.intel.com> (raw)
In-Reply-To: <20230213094031.2231058-6-Vijendar.Mukunda@amd.com>
On 2/13/23 03:40, Vijendar Mukunda wrote:
> Add support for handling soundwire manager interrupts.
Try using the MIPI spelling: SoundWire
>
> Signed-off-by: Vijendar Mukunda <Vijendar.Mukunda@amd.com>
> Signed-off-by: Mastan Katragadda <Mastan.Katragadda@amd.com>
> ---
> drivers/soundwire/amd_manager.c | 132 ++++++++++++++++++++++++++++++
> drivers/soundwire/amd_manager.h | 1 +
> include/linux/soundwire/sdw_amd.h | 7 ++
> 3 files changed, 140 insertions(+)
>
> diff --git a/drivers/soundwire/amd_manager.c b/drivers/soundwire/amd_manager.c
> index 14c88b80ab6d..87f9a987d93a 100644
> --- a/drivers/soundwire/amd_manager.c
> +++ b/drivers/soundwire/amd_manager.c
> @@ -417,6 +417,47 @@ static enum sdw_command_response amd_sdw_xfer_msg(struct sdw_bus *bus, struct sd
> return SDW_CMD_OK;
> }
>
> +static void amd_sdw_process_ping_status(u64 response, struct amd_sdw_manager *amd_manager)
> +{
> + u64 slave_stat = 0;
useless init
> + u32 val = 0;
useless init
> + u16 dev_index;
> +
> + /* slave status response*/
response */
> + slave_stat = FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_0_3, response);
> + slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STAT_4_11, response) << 8;
> +
> + dev_dbg(amd_manager->dev, "%s: slave_stat:0x%llx\n", __func__, slave_stat);
newline?
> + for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
> + val = (slave_stat >> (dev_index * 2)) & AMD_SDW_MCP_SLAVE_STATUS_MASK;
> + dev_dbg(amd_manager->dev, "%s val:0x%x\n", __func__, val);
you don't need __func__ in dev_dbg() logs, they can be added e.g. with
the option dyndbg=+pmf
> + switch (val) {
> + case SDW_SLAVE_ATTACHED:
> + amd_manager->status[dev_index] = SDW_SLAVE_ATTACHED;
> + break;
> + case SDW_SLAVE_UNATTACHED:
> + amd_manager->status[dev_index] = SDW_SLAVE_UNATTACHED;
> + break;
> + case SDW_SLAVE_ALERT:
> + amd_manager->status[dev_index] = SDW_SLAVE_ALERT;
> + break;
> + default:
> + amd_manager->status[dev_index] = SDW_SLAVE_RESERVED;
> + break;
> + }
> + }
> +}
> +
> +static void amd_sdw_read_and_process_ping_status(struct amd_sdw_manager *amd_manager)
> +{
> + u64 response = 0;
useless init
> +
> + mutex_lock(&amd_manager->bus.msg_lock);
> + response = amd_sdw_send_cmd_get_resp(amd_manager, 0, 0);
> + mutex_unlock(&amd_manager->bus.msg_lock);
> + amd_sdw_process_ping_status(response, amd_manager);
> +}
> +
> static u32 amd_sdw_read_ping_status(struct sdw_bus *bus)
> {
> struct amd_sdw_manager *amd_manager = to_amd_sdw(bus);
> @@ -817,6 +858,95 @@ static int amd_sdw_register_dais(struct amd_sdw_manager *amd_manager)
> dais, num_dais);
> }
>
> +static void amd_sdw_update_slave_status_work(struct work_struct *work)
> +{
> + struct amd_sdw_manager *amd_manager =
> + container_of(work, struct amd_sdw_manager, amd_sdw_work);
> + int retry_count = 0;
> +
> + if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
> + acp_reg_writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
> + acp_reg_writel(0, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
> + }
> +
> +update_status:
> + sdw_handle_slave_status(&amd_manager->bus, amd_manager->status);
> + if (amd_manager->status[0] == SDW_SLAVE_ATTACHED) {
> + if (retry_count++ < SDW_MAX_DEVICES) {
> + acp_reg_writel(AMD_SDW_IRQ_MASK_0TO7, amd_manager->mmio +
> + ACP_SW_STATE_CHANGE_STATUS_MASK_0TO7);
> + acp_reg_writel(AMD_SDW_IRQ_MASK_8TO11,
> + amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_MASK_8TO11);
> + amd_sdw_read_and_process_ping_status(amd_manager);
> + goto update_status;
> + } else {
> + dev_err_ratelimited(amd_manager->dev,
> + "Device0 detected after %d iterations\n",
> + retry_count);
> + }
> + }
this seems rather inspired by the Cadence code, but is there really a
case where you need to re-check for devices? In the Cadence case, this
was added because we have a logical OR and new devices would not be handled.
> +}
> +
> +static void amd_sdw_update_slave_status(u32 status_change_0to7, u32 status_change_8to11,
> + struct amd_sdw_manager *amd_manager)
> +{
> + u64 slave_stat = 0;
useless init
> + u32 val = 0;
useless init
> + int dev_index;
> +
> + if (status_change_0to7 == AMD_SDW_SLAVE_0_ATTACHED)
> + memset(amd_manager->status, 0, sizeof(amd_manager->status));
> + slave_stat = status_change_0to7;
> + slave_stat |= FIELD_GET(AMD_SDW_MCP_SLAVE_STATUS_8TO_11, status_change_8to11) << 32;
> + dev_dbg(amd_manager->dev, "%s: status_change_0to7:0x%x status_change_8to11:0x%x\n",
> + __func__, status_change_0to7, status_change_8to11);
> + if (slave_stat) {
> + for (dev_index = 0; dev_index <= SDW_MAX_DEVICES; ++dev_index) {
> + if (slave_stat & AMD_SDW_MCP_SLAVE_STATUS_VALID_MASK(dev_index)) {
> + val = (slave_stat >> AMD_SDW_MCP_SLAVE_STAT_SHIFT_MASK(dev_index)) &
> + AMD_SDW_MCP_SLAVE_STATUS_MASK;
> + switch (val) {
> + case SDW_SLAVE_ATTACHED:
> + amd_manager->status[dev_index] = SDW_SLAVE_ATTACHED;
> + break;
> + case SDW_SLAVE_UNATTACHED:
> + amd_manager->status[dev_index] = SDW_SLAVE_UNATTACHED;
> + break;
> + case SDW_SLAVE_ALERT:
> + amd_manager->status[dev_index] = SDW_SLAVE_ALERT;
> + break;
> + default:
> + amd_manager->status[dev_index] = SDW_SLAVE_RESERVED;
> + break;
> + }
the code seems identical to that in amd_sdw_process_ping_status(), is
there a need for a helper function?
> + }
> + }
> + }
> +}
> +
> +static void amd_sdw_irq_thread(struct work_struct *work)
> +{
> + struct amd_sdw_manager *amd_manager =
> + container_of(work, struct amd_sdw_manager, amd_sdw_irq_thread);
> + u32 status_change_8to11;
> + u32 status_change_0to7;
> +
> + status_change_8to11 = acp_reg_readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
> + status_change_0to7 = acp_reg_readl(amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
> + dev_dbg(amd_manager->dev, "%s [SDW%d] SDW INT: 0to7=0x%x, 8to11=0x%x\n",
> + __func__, amd_manager->instance, status_change_0to7, status_change_8to11);
remove __func__
> + if (status_change_8to11 & AMD_SDW_PREQ_INTR_STAT) {
> + amd_sdw_read_and_process_ping_status(amd_manager);
> + } else {
> + /* Check for the updated status on peripheral device */
> + amd_sdw_update_slave_status(status_change_0to7, status_change_8to11, amd_manager);
> + }
> + if (status_change_8to11 || status_change_0to7)
> + schedule_work(&amd_manager->amd_sdw_work);
> + acp_reg_writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_8TO11);
> + acp_reg_writel(0x00, amd_manager->mmio + ACP_SW_STATE_CHANGE_STATUS_0TO7);
> +}
next prev parent reply other threads:[~2023-02-13 18:24 UTC|newest]
Thread overview: 27+ messages / expand[flat|nested] mbox.gz Atom feed top
[not found] <20230213094031.2231058-1-Vijendar.Mukunda@amd.com>
2023-02-13 9:40 ` [PATCH V2 1/8] soundwire: export sdw_compute_slave_ports() function Vijendar Mukunda
2023-02-13 9:40 ` [PATCH V2 2/8] soundwire: amd: Add support for AMD Manager driver Vijendar Mukunda
2023-02-13 18:05 ` Pierre-Louis Bossart
2023-02-14 5:28 ` Mukunda,Vijendar
2023-02-14 13:21 ` Pierre-Louis Bossart
2023-02-14 22:29 ` Mukunda,Vijendar
2023-02-13 9:40 ` [PATCH V2 3/8] soundwire: amd: register soundwire manager dai ops Vijendar Mukunda
2023-02-13 18:09 ` Pierre-Louis Bossart
2023-02-14 5:49 ` Mukunda,Vijendar
2023-02-13 9:40 ` [PATCH V2 4/8] soundwire: amd: enable build for AMD soundwire manager driver Vijendar Mukunda
2023-02-13 9:40 ` [PATCH V2 5/8] soundwire: amd: add soundwire manager interrupt handling Vijendar Mukunda
2023-02-13 18:15 ` Pierre-Louis Bossart [this message]
2023-02-14 5:56 ` Mukunda,Vijendar
2023-02-14 7:54 ` Mukunda,Vijendar
2023-02-14 13:28 ` Pierre-Louis Bossart
2023-02-14 22:18 ` Mukunda,Vijendar
2023-02-13 9:40 ` [PATCH V2 6/8] soundwire: amd: add runtime pm ops for AMD soundwire manager driver Vijendar Mukunda
2023-02-13 18:20 ` Pierre-Louis Bossart
2023-02-14 6:13 ` Mukunda,Vijendar
2023-02-14 13:33 ` Pierre-Louis Bossart
2023-02-14 21:44 ` Mukunda,Vijendar
2023-02-13 9:40 ` [PATCH V2 7/8] soundwire: amd: handle soundwire wake enable interrupt Vijendar Mukunda
2023-02-13 18:24 ` Pierre-Louis Bossart
2023-02-14 6:15 ` Mukunda,Vijendar
2023-02-14 13:35 ` Pierre-Louis Bossart
2023-02-14 21:24 ` Mukunda,Vijendar
2023-02-13 9:40 ` [PATCH V2 8/8] soundwire: amd: add pm_prepare callback and pm ops support Vijendar Mukunda
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=9505941e-73a1-249b-08c2-83b86e7ef5c6@linux.intel.com \
--to=pierre-louis.bossart@linux.intel.com \
--cc=Arungopal.kondaveeti@amd.com \
--cc=Basavaraj.Hiregoudar@amd.com \
--cc=Mario.Limonciello@amd.com \
--cc=Mastan.Katragadda@amd.com \
--cc=Sunil-kumar.Dommati@amd.com \
--cc=Vijendar.Mukunda@amd.com \
--cc=alsa-devel@alsa-project.org \
--cc=amadeuszx.slawinski@linux.intel.com \
--cc=linux-kernel@vger.kernel.org \
--cc=sanyog.r.kale@intel.com \
--cc=vkoul@kernel.org \
--cc=yung-chuan.liao@linux.intel.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox