devicetree.vger.kernel.org archive mirror
 help / color / mirror / Atom feed
From: Sumit Gupta <sumitg@nvidia.com>
To: <treding@nvidia.com>, <krzysztof.kozlowski@linaro.org>,
	<dmitry.osipenko@collabora.com>, <viresh.kumar@linaro.org>,
	<rafael@kernel.org>, <jonathanh@nvidia.com>, <robh+dt@kernel.org>,
	<linux-kernel@vger.kernel.org>, <linux-tegra@vger.kernel.org>,
	<linux-pm@vger.kernel.org>, <devicetree@vger.kernel.org>
Cc: <sanjayc@nvidia.com>, <ksitaraman@nvidia.com>, <ishah@nvidia.com>,
	<bbasu@nvidia.com>, <sumitg@nvidia.com>
Subject: [Patch v1 04/10] memory: tegra: add support for software mc clients in Tegra234
Date: Tue, 20 Dec 2022 21:32:34 +0530	[thread overview]
Message-ID: <20221220160240.27494-5-sumitg@nvidia.com> (raw)
In-Reply-To: <20221220160240.27494-1-sumitg@nvidia.com>

Adding support for dummy memory controller clients for use by
software.
---
 drivers/memory/tegra/mc.c       | 65 +++++++++++++++++++++++----------
 drivers/memory/tegra/tegra234.c | 21 +++++++++++
 include/soc/tegra/mc.h          |  3 ++
 include/soc/tegra/tegra-icc.h   |  7 ++++
 4 files changed, 76 insertions(+), 20 deletions(-)

diff --git a/drivers/memory/tegra/mc.c b/drivers/memory/tegra/mc.c
index ff887fb03bce..4ddf9808fe6b 100644
--- a/drivers/memory/tegra/mc.c
+++ b/drivers/memory/tegra/mc.c
@@ -755,6 +755,39 @@ const char *const tegra_mc_error_names[8] = {
 	[6] = "SMMU translation error",
 };
 
+static int tegra_mc_add_icc_node(struct tegra_mc *mc, unsigned int id, const char *name,
+				 unsigned int bpmp_id, unsigned int type)
+{
+	struct tegra_icc_node *tnode;
+	struct icc_node *node;
+	int err;
+
+	tnode = kzalloc(sizeof(*tnode), GFP_KERNEL);
+	if (!tnode)
+		return -ENOMEM;
+
+	/* create MC client node */
+	node = icc_node_create(id);
+	if (IS_ERR(node))
+		return -EINVAL;
+
+	node->name = name;
+	icc_node_add(node, &mc->provider);
+
+	/* link Memory Client to Memory Controller */
+	err = icc_link_create(node, TEGRA_ICC_MC);
+	if (err)
+		return err;
+
+	node->data = tnode;
+	tnode->node = node;
+	tnode->bpmp_id = bpmp_id;
+	tnode->type = type;
+	tnode->mc = mc;
+
+	return 0;
+}
+
 /*
  * Memory Controller (MC) has few Memory Clients that are issuing memory
  * bandwidth allocation requests to the MC interconnect provider. The MC
@@ -780,7 +813,6 @@ const char *const tegra_mc_error_names[8] = {
  */
 static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
 {
-	struct tegra_icc_node *tnode;
 	struct icc_node *node;
 	unsigned int i;
 	int err;
@@ -820,30 +852,23 @@ static int tegra_mc_interconnect_setup(struct tegra_mc *mc)
 		goto remove_nodes;
 
 	for (i = 0; i < mc->soc->num_clients; i++) {
-		tnode = kzalloc(sizeof(*tnode), GFP_KERNEL);
-		if (!tnode)
-			return -ENOMEM;
-
-		/* create MC client node */
-		node = icc_node_create(mc->soc->clients[i].id);
-		if (IS_ERR(node)) {
-			err = PTR_ERR(node);
+		err = tegra_mc_add_icc_node(mc, mc->soc->clients[i].id,
+					    mc->soc->clients[i].name,
+					    mc->soc->clients[i].bpmp_id,
+					    mc->soc->clients[i].type);
+		if (err)
 			goto remove_nodes;
-		}
 
-		node->name = mc->soc->clients[i].name;
-		icc_node_add(node, &mc->provider);
+	}
+
+	for (i = 0; i < mc->soc->num_sw_clients; i++) {
+		err =  tegra_mc_add_icc_node(mc, mc->soc->sw_clients[i].id,
+					     mc->soc->sw_clients[i].name,
+					     mc->soc->sw_clients[i].bpmp_id,
+					     mc->soc->sw_clients[i].type);
 
-		/* link Memory Client to Memory Controller */
-		err = icc_link_create(node, TEGRA_ICC_MC);
 		if (err)
 			goto remove_nodes;
-
-		node->data = tnode;
-		tnode->node = node;
-		tnode->type = mc->soc->clients[i].type;
-		tnode->bpmp_id = mc->soc->clients[i].bpmp_id;
-		tnode->mc = mc;
 	}
 
 	return 0;
diff --git a/drivers/memory/tegra/tegra234.c b/drivers/memory/tegra/tegra234.c
index 420546270c8b..82ce6c3c3eb0 100644
--- a/drivers/memory/tegra/tegra234.c
+++ b/drivers/memory/tegra/tegra234.c
@@ -780,6 +780,25 @@ static const struct tegra_mc_client tegra234_mc_clients[] = {
 	}
 };
 
+static const struct tegra_mc_sw_client tegra234_mc_sw_clients[] = {
+	{
+		.id = TEGRA_ICC_MC_CPU_CLUSTER0,
+		.name = "sw_cluster0",
+		.bpmp_id = TEGRA_ICC_BPMP_CPU_CLUSTER0,
+		.type = TEGRA_ICC_NISO,
+	}, {
+		.id = TEGRA_ICC_MC_CPU_CLUSTER1,
+		.name = "sw_cluster1",
+		.bpmp_id = TEGRA_ICC_BPMP_CPU_CLUSTER1,
+		.type = TEGRA_ICC_NISO,
+	}, {
+		.id = TEGRA_ICC_MC_CPU_CLUSTER2,
+		.name = "sw_cluster2",
+		.bpmp_id = TEGRA_ICC_BPMP_CPU_CLUSTER2,
+		.type = TEGRA_ICC_NISO,
+	},
+};
+
 /*
  * tegra234_mc_icc_set() - Pass MC client info to External Memory Controller (EMC)
  * @src: ICC node for Memory Controller's (MC) Client
@@ -854,6 +873,8 @@ static const struct tegra_mc_icc_ops tegra234_mc_icc_ops = {
 const struct tegra_mc_soc tegra234_mc_soc = {
 	.num_clients = ARRAY_SIZE(tegra234_mc_clients),
 	.clients = tegra234_mc_clients,
+	.num_sw_clients = ARRAY_SIZE(tegra234_mc_sw_clients),
+	.sw_clients = tegra234_mc_sw_clients,
 	.num_address_bits = 40,
 	.num_channels = 16,
 	.client_id_mask = 0x1ff,
diff --git a/include/soc/tegra/mc.h b/include/soc/tegra/mc.h
index 0a32a9eb12a4..6a94e88b6100 100644
--- a/include/soc/tegra/mc.h
+++ b/include/soc/tegra/mc.h
@@ -192,6 +192,9 @@ struct tegra_mc_soc {
 	const struct tegra_mc_client *clients;
 	unsigned int num_clients;
 
+	const struct tegra_mc_sw_client *sw_clients;
+	unsigned int num_sw_clients;
+
 	const unsigned long *emem_regs;
 	unsigned int num_emem_regs;
 
diff --git a/include/soc/tegra/tegra-icc.h b/include/soc/tegra/tegra-icc.h
index 3855d8571281..f9bcaae8ffee 100644
--- a/include/soc/tegra/tegra-icc.h
+++ b/include/soc/tegra/tegra-icc.h
@@ -22,6 +22,13 @@ struct tegra_icc_node {
 	u32 type;
 };
 
+struct tegra_mc_sw_client {
+	unsigned int id;
+	unsigned int bpmp_id;
+	unsigned int type;
+	const char *name;
+};
+
 /* ICC ID's for MC client's used in BPMP */
 #define TEGRA_ICC_BPMP_DEBUG		1
 #define TEGRA_ICC_BPMP_CPU_CLUSTER0	2
-- 
2.17.1


  parent reply	other threads:[~2022-12-20 16:04 UTC|newest]

Thread overview: 53+ messages / expand[flat|nested]  mbox.gz  Atom feed  top
2022-12-20 16:02 [Patch v1 00/10] Tegra234 Memory interconnect support Sumit Gupta
2022-12-20 16:02 ` [Patch v1 01/10] memory: tegra: add interconnect support for DRAM scaling in Tegra234 Sumit Gupta
2022-12-20 18:05   ` Dmitry Osipenko
2022-12-21  7:53     ` Sumit Gupta
2022-12-20 18:06   ` Dmitry Osipenko
2022-12-21  7:54     ` Sumit Gupta
2022-12-20 18:07   ` Dmitry Osipenko
2022-12-21  8:05     ` Sumit Gupta
2022-12-21 16:44       ` Dmitry Osipenko
2023-01-17 13:03         ` Sumit Gupta
2022-12-20 18:10   ` Dmitry Osipenko
2022-12-21  9:35     ` Sumit Gupta
2022-12-21 16:43       ` Dmitry Osipenko
2023-01-13 12:15         ` Sumit Gupta
2022-12-21  0:55   ` Dmitry Osipenko
2022-12-21  8:07     ` Sumit Gupta
2022-12-21 16:54   ` Dmitry Osipenko
2023-01-13 12:25     ` Sumit Gupta
2022-12-21 19:17   ` Dmitry Osipenko
2022-12-21 19:20   ` Dmitry Osipenko
2022-12-22 15:56     ` Dmitry Osipenko
2023-01-13 12:35       ` Sumit Gupta
2023-01-13 12:40     ` Sumit Gupta
2022-12-21 19:43   ` Dmitry Osipenko
2022-12-22 11:32   ` Krzysztof Kozlowski
2023-03-06 19:28     ` Sumit Gupta
2022-12-20 16:02 ` [Patch v1 02/10] memory: tegra: adding iso mc clients for Tegra234 Sumit Gupta
2022-12-20 16:02 ` [Patch v1 03/10] memory: tegra: add pcie " Sumit Gupta
2022-12-22 11:33   ` Krzysztof Kozlowski
2023-01-13 14:51     ` Sumit Gupta
2022-12-20 16:02 ` Sumit Gupta [this message]
2022-12-22 11:36   ` [Patch v1 04/10] memory: tegra: add support for software mc clients in Tegra234 Krzysztof Kozlowski
2023-03-06 19:41     ` Sumit Gupta
2022-12-20 16:02 ` [Patch v1 05/10] dt-bindings: tegra: add icc ids for dummy MC clients Sumit Gupta
2022-12-22 11:29   ` Krzysztof Kozlowski
2023-01-13 14:44     ` Sumit Gupta
2023-01-13 17:11   ` Krzysztof Kozlowski
2022-12-20 16:02 ` [Patch v1 06/10] arm64: tegra: Add cpu OPP tables and interconnects property Sumit Gupta
2023-01-16 16:29   ` Thierry Reding
2022-12-20 16:02 ` [Patch v1 07/10] cpufreq: Add Tegra234 to cpufreq-dt-platdev blocklist Sumit Gupta
2022-12-21  5:01   ` Viresh Kumar
2022-12-20 16:02 ` [Patch v1 08/10] cpufreq: tegra194: add OPP support and set bandwidth Sumit Gupta
2022-12-22 15:46   ` Dmitry Osipenko
2023-01-13 13:50     ` Sumit Gupta
2023-01-16 12:16       ` Dmitry Osipenko
2023-01-19 10:26         ` Thierry Reding
2023-01-19 13:01           ` Dmitry Osipenko
2023-02-06 13:31             ` Sumit Gupta
2022-12-20 16:02 ` [Patch v1 09/10] memory: tegra: get number of enabled mc channels Sumit Gupta
2022-12-22 11:37   ` Krzysztof Kozlowski
2023-01-13 15:04     ` Sumit Gupta
2023-01-16 16:30       ` Thierry Reding
2022-12-20 16:02 ` [Patch v1 10/10] memory: tegra: make cluster bw request a multiple of mc_channels Sumit Gupta

Reply instructions:

You may reply publicly to this message via plain-text email
using any one of the following methods:

* Save the following mbox file, import it into your mail client,
  and reply-to-all from there: mbox

  Avoid top-posting and favor interleaved quoting:
  https://en.wikipedia.org/wiki/Posting_style#Interleaved_style

* Reply using the --to, --cc, and --in-reply-to
  switches of git-send-email(1):

  git send-email \
    --in-reply-to=20221220160240.27494-5-sumitg@nvidia.com \
    --to=sumitg@nvidia.com \
    --cc=bbasu@nvidia.com \
    --cc=devicetree@vger.kernel.org \
    --cc=dmitry.osipenko@collabora.com \
    --cc=ishah@nvidia.com \
    --cc=jonathanh@nvidia.com \
    --cc=krzysztof.kozlowski@linaro.org \
    --cc=ksitaraman@nvidia.com \
    --cc=linux-kernel@vger.kernel.org \
    --cc=linux-pm@vger.kernel.org \
    --cc=linux-tegra@vger.kernel.org \
    --cc=rafael@kernel.org \
    --cc=robh+dt@kernel.org \
    --cc=sanjayc@nvidia.com \
    --cc=treding@nvidia.com \
    --cc=viresh.kumar@linaro.org \
    /path/to/YOUR_REPLY

  https://kernel.org/pub/software/scm/git/docs/git-send-email.html

* If your mail client supports setting the In-Reply-To header
  via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).