From: Sumit Gupta <sumitg@nvidia.com>
To: <treding@nvidia.com>, <jonathanh@nvidia.com>, <robh@kernel.org>,
<krzk+dt@kernel.org>, <conor+dt@kernel.org>,
<linux-tegra@vger.kernel.org>, <linux-kernel@vger.kernel.org>,
<devicetree@vger.kernel.org>
Cc: <--to=tbergstrom@nvidia.com>, <bbasu@nvidia.com>, <sumitg@nvidia.com>
Subject: [Patch 8/8] soc: tegra: cbb: add support for cbb fabrics in GB10
Date: Fri, 30 May 2025 19:03:36 +0530 [thread overview]
Message-ID: <20250530133336.1419971-9-sumitg@nvidia.com> (raw)
In-Reply-To: <20250530133336.1419971-1-sumitg@nvidia.com>
Add support for CBB 2.0 based fabrics in GB10 SoC using ACPI.
Fabrics reporting errors are: C2C, GPU, Display_Cluster.
GB10 is using hardware based lookup to get target node address.
So, the target_map tables for each fabric are not needed now.
Signed-off-by: Sumit Gupta <sumitg@nvidia.com>
---
drivers/soc/tegra/cbb/tegra234-cbb.c | 58 ++++++++++++++++++++++++++++
1 file changed, 58 insertions(+)
diff --git a/drivers/soc/tegra/cbb/tegra234-cbb.c b/drivers/soc/tegra/cbb/tegra234-cbb.c
index 69c704938679..99a4a636a04c 100644
--- a/drivers/soc/tegra/cbb/tegra234-cbb.c
+++ b/drivers/soc/tegra/cbb/tegra234-cbb.c
@@ -117,6 +117,15 @@ enum tegra264_cbb_fabric_ids {
T264_RSVD7_FABRIC_ID,
};
+enum gb10_cbb_fabric_ids {
+ GB10_DCE_FABRIC_ID = 19,
+ GB10_DISP_CLUSTER_FABRIC_ID = 25,
+ GB10_C2C_FABRIC_ID = 26,
+ GB10_GPU_FABRIC_ID = 27,
+ GB10_DISP_CLUSTER_1_FABRIC_ID = 28,
+ GB10_MAX_FABRIC_ID,
+};
+
struct tegra234_target_lookup {
const char *name;
unsigned int offset;
@@ -1418,6 +1427,52 @@ static const struct tegra234_cbb_fabric tegra264_vision_cbb_fabric = {
.firewall_wr_ctl = 0x5c8,
};
+static const struct tegra234_fabric_lookup gb10_cbb_fab_list[] = {
+ [GB10_C2C_FABRIC_ID] = { "c2c-fabric", true },
+ [GB10_DISP_CLUSTER_FABRIC_ID] = { "display-cluster-fabric", true },
+ [GB10_GPU_FABRIC_ID] = { "gpu-fabric", true },
+};
+
+static const struct tegra234_cbb_fabric gb10s_c2c_fabric = {
+ .fab_id = GB10_C2C_FABRIC_ID,
+ .fab_list = gb10_cbb_fab_list,
+ .errors = tegra241_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .err_intr_enbl = 0xf,
+ .err_status_clr = 0x1ff007f,
+ .notifier_offset = 0x50000,
+ .off_mask_erd = 0x14004,
+ .firewall_base = 0x40000,
+ .firewall_ctl = 0x9b0,
+ .firewall_wr_ctl = 0x9a8,
+};
+
+static const struct tegra234_cbb_fabric gb10s_disp_fabric = {
+ .fab_id = GB10_DISP_CLUSTER_FABRIC_ID,
+ .fab_list = gb10_cbb_fab_list,
+ .errors = tegra241_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .err_intr_enbl = 0x1,
+ .err_status_clr = 0x1ff007f,
+ .notifier_offset = 0x50000,
+ .firewall_base = 0x30000,
+ .firewall_ctl = 0x810,
+ .firewall_wr_ctl = 0x808,
+};
+
+static const struct tegra234_cbb_fabric gb10g_gpu_fabric = {
+ .fab_id = GB10_GPU_FABRIC_ID,
+ .fab_list = gb10_cbb_fab_list,
+ .errors = tegra241_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra241_cbb_errors),
+ .err_intr_enbl = 0x1f,
+ .err_status_clr = 0x1ff007f,
+ .notifier_offset = 0x50000,
+ .firewall_base = 0x30000,
+ .firewall_ctl = 0x930,
+ .firewall_wr_ctl = 0x928,
+};
+
static const struct of_device_id tegra234_cbb_dt_ids[] = {
{ .compatible = "nvidia,tegra234-cbb-fabric", .data = &tegra234_cbb_fabric },
{ .compatible = "nvidia,tegra234-aon-fabric", .data = &tegra234_aon_fabric },
@@ -1442,6 +1497,9 @@ struct tegra234_cbb_acpi_uid {
static const struct tegra234_cbb_acpi_uid tegra234_cbb_acpi_uids[] = {
{ "NVDA1070", "1", &tegra241_cbb_fabric },
{ "NVDA1070", "2", &tegra241_bpmp_fabric },
+ { "NVDA1070", "3", &gb10s_c2c_fabric },
+ { "NVDA1070", "4", &gb10s_disp_fabric },
+ { "NVDA1070", "5", &gb10g_gpu_fabric },
{ },
};
--
2.25.1
prev parent reply other threads:[~2025-05-30 13:36 UTC|newest]
Thread overview: 10+ messages / expand[flat|nested] mbox.gz Atom feed top
2025-05-30 13:33 [Patch 0/8] Support for Tegra264 and GB10 in CBB driver Sumit Gupta
2025-05-30 13:33 ` [Patch 1/8] soc: tegra: cbb: clear err force register with err status Sumit Gupta
2025-05-30 13:33 ` [Patch 2/8] soc: tegra: cbb: change master-slave to initiator-target Sumit Gupta
2025-05-30 13:33 ` [Patch 3/8] soc: tegra: cbb: make error interrupt enable and status per SoC Sumit Gupta
2025-05-30 13:33 ` [Patch 4/8] soc: tegra: cbb: improve handling for per SoC fabric data Sumit Gupta
2025-05-30 13:33 ` [Patch 5/8] soc: tegra: cbb: support hw lookup to get timed out target address Sumit Gupta
2025-05-30 13:33 ` [Patch 6/8] dt-bindings: arm: tegra: Add NVIDIA Tegra264 CBB 2.0 binding Sumit Gupta
2025-06-03 7:09 ` Krzysztof Kozlowski
2025-05-30 13:33 ` [Patch 7/8] soc: tegra: cbb: add support for cbb fabrics in Tegra264 Sumit Gupta
2025-05-30 13:33 ` Sumit Gupta [this message]
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20250530133336.1419971-9-sumitg@nvidia.com \
--to=sumitg@nvidia.com \
--cc=--to=tbergstrom@nvidia.com \
--cc=bbasu@nvidia.com \
--cc=conor+dt@kernel.org \
--cc=devicetree@vger.kernel.org \
--cc=jonathanh@nvidia.com \
--cc=krzk+dt@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-tegra@vger.kernel.org \
--cc=robh@kernel.org \
--cc=treding@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox