From: Yuanfang Zhang <yuanfang.zhang@oss.qualcomm.com>
To: Suzuki K Poulose <suzuki.poulose@arm.com>,
Mike Leach <mike.leach@linaro.org>,
James Clark <james.clark@linaro.org>,
Rob Herring <robh@kernel.org>,
Krzysztof Kozlowski <krzk+dt@kernel.org>,
Conor Dooley <conor+dt@kernel.org>,
Mathieu Poirier <mathieu.poirier@linaro.org>,
Leo Yan <leo.yan@linux.dev>,
Alexander Shishkin <alexander.shishkin@linux.intel.com>,
Bjorn Andersson <andersson@kernel.org>,
Konrad Dybcio <konradybcio@kernel.org>
Cc: kernel@oss.qualcomm.com, coresight@lists.linaro.org,
linux-arm-kernel@lists.infradead.org, devicetree@vger.kernel.org,
linux-kernel@vger.kernel.org, linux-arm-msm@vger.kernel.org,
Yuanfang Zhang <yuanfang.zhang@oss.qualcomm.com>
Subject: [PATCH 04/12] coresight-replicator: Add support for CPU cluster replicator
Date: Mon, 27 Oct 2025 23:28:06 -0700 [thread overview]
Message-ID: <20251027-cpu_cluster_component_pm-v1-4-31355ac588c2@oss.qualcomm.com> (raw)
In-Reply-To: <20251027-cpu_cluster_component_pm-v1-0-31355ac588c2@oss.qualcomm.com>
The CPU cluster replicator is a type of CoreSight replicator that resides
inside the CPU cluster's power domain. Unlike system-wide replicators,
CPU replicators are tightly coupled with CPU clusters and inherit their
power management characteristics. When the CPU cluster enters low-power
mode (LPM), the replicator registers become inaccessible. Moreover,
runtime PM alone cannot bring the CPU cluster out of LPM, making standard
register access unreliable.
This patch enhances the existing CoreSight replicator platform driver to
support CPU cluster replicators by:
- Adding replicator_claim/disclaim_device_unlocked() to handle device
claim/disclaim before CoreSight device registration.
- Wrapping replicator_reset and clear_clear_tag in replicator_init_hw.
For cluster replicators, use smp_call_function_single() to ensure
register visibility.
- Encapsulating csdev registration in replicator_add_coresight_dev().
- Refactoring replicator_enable function. For cluster replicators, use
smp_call_function_single() to ensure register visibility.
- Maintaining compatibility with existing static/dynamic replicators while
minimizing duplication.
This ensures replicator operations remain safe and functional even when
the CPU cluster is in low-power states.
Signed-off-by: Yuanfang Zhang <yuanfang.zhang@oss.qualcomm.com>
---
drivers/hwtracing/coresight/coresight-replicator.c | 202 +++++++++++++++++----
1 file changed, 169 insertions(+), 33 deletions(-)
diff --git a/drivers/hwtracing/coresight/coresight-replicator.c b/drivers/hwtracing/coresight/coresight-replicator.c
index e6472658235dc479cec91ac18f3737f76f8c74f0..c5a9c7a2adfa90ae22890ed730fc008fe6901778 100644
--- a/drivers/hwtracing/coresight/coresight-replicator.c
+++ b/drivers/hwtracing/coresight/coresight-replicator.c
@@ -13,6 +13,7 @@
#include <linux/io.h>
#include <linux/err.h>
#include <linux/slab.h>
+#include <linux/pm_domain.h>
#include <linux/pm_runtime.h>
#include <linux/property.h>
#include <linux/clk.h>
@@ -35,6 +36,7 @@ DEFINE_CORESIGHT_DEVLIST(replicator_devs, "replicator");
* @csdev: component vitals needed by the framework
* @spinlock: serialize enable/disable operations.
* @check_idfilter_val: check if the context is lost upon clock removal.
+ * @cpumask: CPU mask representing the CPUs related to this replicator.
*/
struct replicator_drvdata {
void __iomem *base;
@@ -43,18 +45,61 @@ struct replicator_drvdata {
struct coresight_device *csdev;
raw_spinlock_t spinlock;
bool check_idfilter_val;
+ struct cpumask *cpumask;
};
-static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
+struct replicator_smp_arg {
+ struct replicator_drvdata *drvdata;
+ int outport;
+ int rc;
+};
+
+static void replicator_clear_self_claim_tag(struct replicator_drvdata *drvdata)
+{
+ struct csdev_access access = CSDEV_ACCESS_IOMEM(drvdata->base);
+
+ coresight_clear_self_claim_tag(&access);
+}
+
+static int replicator_claim_device_unlocked(struct replicator_drvdata *drvdata)
+{
+ struct coresight_device *csdev = drvdata->csdev;
+ struct csdev_access access = CSDEV_ACCESS_IOMEM(drvdata->base);
+ u32 claim_tag;
+
+ if (csdev)
+ return coresight_claim_device_unlocked(csdev);
+
+ writel_relaxed(CORESIGHT_CLAIM_SELF_HOSTED, drvdata->base + CORESIGHT_CLAIMSET);
+
+ claim_tag = readl_relaxed(drvdata->base + CORESIGHT_CLAIMCLR);
+ if (claim_tag != CORESIGHT_CLAIM_SELF_HOSTED) {
+ coresight_clear_self_claim_tag_unlocked(&access);
+ return -EBUSY;
+ }
+
+ return 0;
+}
+
+static void replicator_disclaim_device_unlocked(struct replicator_drvdata *drvdata)
{
struct coresight_device *csdev = drvdata->csdev;
+ struct csdev_access access = CSDEV_ACCESS_IOMEM(drvdata->base);
+
+ if (csdev)
+ return coresight_disclaim_device_unlocked(csdev);
+ coresight_clear_self_claim_tag_unlocked(&access);
+}
+
+static void dynamic_replicator_reset(struct replicator_drvdata *drvdata)
+{
CS_UNLOCK(drvdata->base);
- if (!coresight_claim_device_unlocked(csdev)) {
+ if (!replicator_claim_device_unlocked(drvdata)) {
writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER0);
writel_relaxed(0xff, drvdata->base + REPLICATOR_IDFILTER1);
- coresight_disclaim_device_unlocked(csdev);
+ replicator_disclaim_device_unlocked(drvdata);
}
CS_LOCK(drvdata->base);
@@ -116,6 +161,34 @@ static int dynamic_replicator_enable(struct replicator_drvdata *drvdata,
return rc;
}
+static void replicator_enable_hw_smp_call(void *info)
+{
+ struct replicator_smp_arg *arg = info;
+
+ arg->rc = dynamic_replicator_enable(arg->drvdata, 0, arg->outport);
+}
+
+static int replicator_enable_hw(struct replicator_drvdata *drvdata,
+ int inport, int outport)
+{
+ int cpu, ret;
+ struct replicator_smp_arg arg = { 0 };
+
+ if (!drvdata->cpumask)
+ return dynamic_replicator_enable(drvdata, 0, outport);
+
+ arg.drvdata = drvdata;
+ arg.outport = outport;
+
+ for_each_cpu(cpu, drvdata->cpumask) {
+ ret = smp_call_function_single(cpu, replicator_enable_hw_smp_call, &arg, 1);
+ if (!ret)
+ return arg.rc;
+ }
+
+ return ret;
+}
+
static int replicator_enable(struct coresight_device *csdev,
struct coresight_connection *in,
struct coresight_connection *out)
@@ -126,19 +199,24 @@ static int replicator_enable(struct coresight_device *csdev,
bool first_enable = false;
raw_spin_lock_irqsave(&drvdata->spinlock, flags);
- if (out->src_refcnt == 0) {
- if (drvdata->base)
- rc = dynamic_replicator_enable(drvdata, in->dest_port,
- out->src_port);
- if (!rc)
- first_enable = true;
- }
- if (!rc)
+
+ if (out->src_refcnt == 0)
+ first_enable = true;
+ else
out->src_refcnt++;
raw_spin_unlock_irqrestore(&drvdata->spinlock, flags);
- if (first_enable)
- dev_dbg(&csdev->dev, "REPLICATOR enabled\n");
+ if (first_enable) {
+ if (drvdata->base)
+ rc = replicator_enable_hw(drvdata, in->dest_port,
+ out->src_port);
+ if (!rc) {
+ out->src_refcnt++;
+ dev_dbg(&csdev->dev, "REPLICATOR enabled\n");
+ return rc;
+ }
+ }
+
return rc;
}
@@ -217,23 +295,69 @@ static const struct attribute_group *replicator_groups[] = {
NULL,
};
+static int replicator_add_coresight_dev(struct device *dev)
+{
+ struct coresight_desc desc = { 0 };
+ struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
+
+ if (drvdata->base) {
+ desc.groups = replicator_groups;
+ desc.access = CSDEV_ACCESS_IOMEM(drvdata->base);
+ }
+
+ desc.name = coresight_alloc_device_name(&replicator_devs, dev);
+ if (!desc.name)
+ return -ENOMEM;
+
+ desc.type = CORESIGHT_DEV_TYPE_LINK;
+ desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
+ desc.ops = &replicator_cs_ops;
+ desc.pdata = dev->platform_data;
+ desc.dev = dev;
+
+ drvdata->csdev = coresight_register(&desc);
+ if (IS_ERR(drvdata->csdev))
+ return PTR_ERR(drvdata->csdev);
+
+ return 0;
+}
+
+static void replicator_init_hw(struct replicator_drvdata *drvdata)
+{
+ replicator_clear_self_claim_tag(drvdata);
+ replicator_reset(drvdata);
+}
+
+static void replicator_init_on_cpu(void *info)
+{
+ struct replicator_drvdata *drvdata = info;
+
+ replicator_init_hw(drvdata);
+}
+
+static struct cpumask *replicator_get_cpumask(struct device *dev)
+{
+ struct generic_pm_domain *pd;
+
+ pd = pd_to_genpd(dev->pm_domain);
+ if (pd)
+ return pd->cpus;
+
+ return NULL;
+}
+
static int replicator_probe(struct device *dev, struct resource *res)
{
struct coresight_platform_data *pdata = NULL;
struct replicator_drvdata *drvdata;
- struct coresight_desc desc = { 0 };
void __iomem *base;
- int ret;
+ int cpu, ret;
if (is_of_node(dev_fwnode(dev)) &&
of_device_is_compatible(dev->of_node, "arm,coresight-replicator"))
dev_warn_once(dev,
"Uses OBSOLETE CoreSight replicator binding\n");
- desc.name = coresight_alloc_device_name(&replicator_devs, dev);
- if (!desc.name)
- return -ENOMEM;
-
drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
if (!drvdata)
return -ENOMEM;
@@ -251,9 +375,6 @@ static int replicator_probe(struct device *dev, struct resource *res)
if (IS_ERR(base))
return PTR_ERR(base);
drvdata->base = base;
- desc.groups = replicator_groups;
- desc.access = CSDEV_ACCESS_IOMEM(base);
- coresight_clear_self_claim_tag(&desc.access);
}
if (fwnode_property_present(dev_fwnode(dev),
@@ -268,25 +389,39 @@ static int replicator_probe(struct device *dev, struct resource *res)
dev->platform_data = pdata;
raw_spin_lock_init(&drvdata->spinlock);
- desc.type = CORESIGHT_DEV_TYPE_LINK;
- desc.subtype.link_subtype = CORESIGHT_DEV_SUBTYPE_LINK_SPLIT;
- desc.ops = &replicator_cs_ops;
- desc.pdata = dev->platform_data;
- desc.dev = dev;
- drvdata->csdev = coresight_register(&desc);
- if (IS_ERR(drvdata->csdev))
- return PTR_ERR(drvdata->csdev);
+ if (is_of_node(dev_fwnode(dev)) &&
+ of_device_is_compatible(dev->of_node, "arm,coresight-cpu-replicator")) {
+ drvdata->cpumask = replicator_get_cpumask(dev);
+ if (!drvdata->cpumask)
+ return -EINVAL;
+
+ cpus_read_lock();
+ for_each_cpu(cpu, drvdata->cpumask) {
+ ret = smp_call_function_single(cpu,
+ replicator_init_on_cpu, drvdata, 1);
+ if (!ret)
+ break;
+ }
+ cpus_read_unlock();
- replicator_reset(drvdata);
- return 0;
+ if (ret)
+ return 0;
+ } else if (res) {
+ replicator_init_hw(drvdata);
+ }
+
+ ret = replicator_add_coresight_dev(dev);
+
+ return ret;
}
static int replicator_remove(struct device *dev)
{
struct replicator_drvdata *drvdata = dev_get_drvdata(dev);
- coresight_unregister(drvdata->csdev);
+ if (drvdata->csdev)
+ coresight_unregister(drvdata->csdev);
return 0;
}
@@ -354,6 +489,7 @@ static const struct dev_pm_ops replicator_dev_pm_ops = {
static const struct of_device_id replicator_match[] = {
{.compatible = "arm,coresight-replicator"},
{.compatible = "arm,coresight-static-replicator"},
+ {.compatible = "arm,coresight-cpu-replicator"},
{}
};
--
2.34.1
next prev parent reply other threads:[~2025-10-28 6:28 UTC|newest]
Thread overview: 20+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-10-28 6:28 [PATCH 00/12] coresight: Add CPU cluster funnel/replicator/tmc support Yuanfang Zhang
2025-10-28 6:28 ` [PATCH 01/12] dt-bindings: arm: coresight: Add cpu cluster tmc/funnel/replicator support Yuanfang Zhang
2025-10-28 9:09 ` Krzysztof Kozlowski
2025-10-29 9:39 ` Mike Leach
2025-10-28 6:28 ` [PATCH 02/12] coresight-funnel: Add support for CPU cluster funnel Yuanfang Zhang
2025-10-28 6:28 ` [PATCH 03/12] coresight-funnel: Handle delay probe " Yuanfang Zhang
2025-10-28 6:28 ` Yuanfang Zhang [this message]
2025-10-28 6:28 ` [PATCH 05/12] coresight-replicator: Handle delayed probe for CPU cluster replicator Yuanfang Zhang
2025-10-28 6:28 ` [PATCH 06/12] coresight-replicator: Update mgmt_attrs for CPU cluster replicator compatibility Yuanfang Zhang
2025-10-28 6:28 ` [PATCH 07/12] coresight-tmc: Add support for CPU cluster ETF and refactor probe flow Yuanfang Zhang
2025-10-28 6:28 ` [PATCH 08/12] coresight-tmc-etf: Refactor enable function for CPU cluster ETF support Yuanfang Zhang
2025-10-28 6:28 ` [PATCH 09/12] coresight-tmc: Update tmc_mgmt_attrs for CPU cluster TMC compatibility Yuanfang Zhang
2025-10-28 6:28 ` [PATCH 10/12] coresight-tmc: Handle delayed probe for CPU cluster TMC Yuanfang Zhang
2025-10-28 6:28 ` [PATCH 11/12] coresight: add 'cs_mode' to link enable functions Yuanfang Zhang
2025-10-28 6:28 ` [PATCH 12/12] arm64: dts: qcom: x1e80100: add Coresight nodes for APSS debug block Yuanfang Zhang
2025-10-28 9:28 ` Konrad Dybcio
2025-10-29 11:01 ` [PATCH 00/12] coresight: Add CPU cluster funnel/replicator/tmc support Mike Leach
2025-10-30 7:51 ` yuanfang zhang
2025-10-30 9:58 ` Mike Leach
2025-10-31 10:16 ` yuanfang zhang
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20251027-cpu_cluster_component_pm-v1-4-31355ac588c2@oss.qualcomm.com \
--to=yuanfang.zhang@oss.qualcomm.com \
--cc=alexander.shishkin@linux.intel.com \
--cc=andersson@kernel.org \
--cc=conor+dt@kernel.org \
--cc=coresight@lists.linaro.org \
--cc=devicetree@vger.kernel.org \
--cc=james.clark@linaro.org \
--cc=kernel@oss.qualcomm.com \
--cc=konradybcio@kernel.org \
--cc=krzk+dt@kernel.org \
--cc=leo.yan@linux.dev \
--cc=linux-arm-kernel@lists.infradead.org \
--cc=linux-arm-msm@vger.kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=mathieu.poirier@linaro.org \
--cc=mike.leach@linaro.org \
--cc=robh@kernel.org \
--cc=suzuki.poulose@arm.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).