From: Sumit Gupta <sumitg@nvidia.com>
To: <treding@nvidia.com>, <jonathanh@nvidia.com>, <robh@kernel.org>,
<krzk+dt@kernel.org>, <conor+dt@kernel.org>
Cc: <devicetree@vger.kernel.org>, <linux-tegra@vger.kernel.org>,
<linux-kernel@vger.kernel.org>, <bbasu@nvidia.com>,
<sumitg@nvidia.com>
Subject: [PATCH v2 2/2] soc/tegra: cbb: Add support for CBB fabrics in Tegra238
Date: Wed, 25 Mar 2026 18:27:26 +0530 [thread overview]
Message-ID: <20260325125726.2694144-3-sumitg@nvidia.com> (raw)
In-Reply-To: <20260325125726.2694144-1-sumitg@nvidia.com>
Add support for CBB 2.0 based fabrics in Tegra238 SoC using DT.
Fabrics reporting errors are: CBB, AON, BPMP, APE.
Signed-off-by: Sumit Gupta <sumitg@nvidia.com>
---
drivers/soc/tegra/cbb/tegra234-cbb.c | 134 +++++++++++++++++++++++++++
1 file changed, 134 insertions(+)
diff --git a/drivers/soc/tegra/cbb/tegra234-cbb.c b/drivers/soc/tegra/cbb/tegra234-cbb.c
index a9adbcecd47c..30f421c8e90c 100644
--- a/drivers/soc/tegra/cbb/tegra234-cbb.c
+++ b/drivers/soc/tegra/cbb/tegra234-cbb.c
@@ -89,6 +89,15 @@ enum tegra234_cbb_fabric_ids {
T234_MAX_FABRIC_ID,
};
+enum tegra238_cbb_fabric_ids {
+ T238_CBB_FABRIC_ID = 0,
+ T238_AON_FABRIC_ID = 4,
+ T238_PSC_FABRIC_ID = 5,
+ T238_BPMP_FABRIC_ID = 6,
+ T238_APE_FABRIC_ID = 7,
+ T238_MAX_FABRIC_ID,
+};
+
enum tegra264_cbb_fabric_ids {
T264_SYSTEM_CBB_FABRIC_ID,
T264_TOP_0_CBB_FABRIC_ID,
@@ -974,6 +983,127 @@ static const struct tegra234_cbb_fabric tegra234_sce_fabric = {
.firewall_wr_ctl = 0x288,
};
+static const struct tegra234_target_lookup tegra238_ape_target_map[] = {
+ { "AXI2APB", 0x00000 },
+ { "AGIC", 0x15000 },
+ { "AMC", 0x16000 },
+ { "AST0", 0x17000 },
+ { "AST1", 0x18000 },
+ { "AST2", 0x19000 },
+ { "CBB", 0x1A000 },
+};
+
+static const struct tegra234_target_lookup tegra238_cbb_target_map[] = {
+ { "AON", 0x40000 },
+ { "APE", 0x50000 },
+ { "BPMP", 0x41000 },
+ { "HOST1X", 0x43000 },
+ { "STM", 0x44000 },
+ { "CBB_CENTRAL", 0x00000 },
+ { "PCIE_C0", 0x51000 },
+ { "PCIE_C1", 0x47000 },
+ { "PCIE_C2", 0x48000 },
+ { "PCIE_C3", 0x49000 },
+ { "GPU", 0x4C000 },
+ { "SMMU0", 0x4D000 },
+ { "SMMU1", 0x4E000 },
+ { "SMMU2", 0x4F000 },
+ { "PSC", 0x52000 },
+ { "AXI2APB_1", 0x70000 },
+ { "AXI2APB_12", 0x73000 },
+ { "AXI2APB_13", 0x74000 },
+ { "AXI2APB_15", 0x76000 },
+ { "AXI2APB_16", 0x77000 },
+ { "AXI2APB_18", 0x79000 },
+ { "AXI2APB_19", 0x7A000 },
+ { "AXI2APB_2", 0x7B000 },
+ { "AXI2APB_23", 0x7F000 },
+ { "AXI2APB_25", 0x80000 },
+ { "AXI2APB_26", 0x81000 },
+ { "AXI2APB_27", 0x82000 },
+ { "AXI2APB_28", 0x83000 },
+ { "AXI2APB_32", 0x87000 },
+ { "AXI2APB_33", 0x88000 },
+ { "AXI2APB_4", 0x8B000 },
+ { "AXI2APB_5", 0x8C000 },
+ { "AXI2APB_6", 0x93000 },
+ { "AXI2APB_9", 0x90000 },
+ { "AXI2APB_3", 0x91000 },
+};
+
+static const struct tegra234_fabric_lookup tegra238_cbb_fab_list[] = {
+ [T238_CBB_FABRIC_ID] = { "cbb-fabric", true,
+ tegra238_cbb_target_map,
+ ARRAY_SIZE(tegra238_cbb_target_map) },
+ [T238_AON_FABRIC_ID] = { "aon-fabric", true,
+ tegra234_aon_target_map,
+ ARRAY_SIZE(tegra234_aon_target_map) },
+ [T238_PSC_FABRIC_ID] = { "psc-fabric" },
+ [T238_BPMP_FABRIC_ID] = { "bpmp-fabric", true,
+ tegra234_bpmp_target_map,
+ ARRAY_SIZE(tegra234_bpmp_target_map) },
+ [T238_APE_FABRIC_ID] = { "ape-fabric", true,
+ tegra238_ape_target_map,
+ ARRAY_SIZE(tegra238_ape_target_map) },
+};
+
+static const struct tegra234_cbb_fabric tegra238_aon_fabric = {
+ .fab_id = T238_AON_FABRIC_ID,
+ .fab_list = tegra238_cbb_fab_list,
+ .initiator_id = tegra234_initiator_id,
+ .errors = tegra234_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .err_intr_enbl = 0x7,
+ .err_status_clr = 0x3f,
+ .notifier_offset = 0x17000,
+ .firewall_base = 0x30000,
+ .firewall_ctl = 0x8f0,
+ .firewall_wr_ctl = 0x8e8,
+};
+
+static const struct tegra234_cbb_fabric tegra238_ape_fabric = {
+ .fab_id = T238_APE_FABRIC_ID,
+ .fab_list = tegra238_cbb_fab_list,
+ .initiator_id = tegra234_initiator_id,
+ .errors = tegra234_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .err_intr_enbl = 0xf,
+ .err_status_clr = 0x3f,
+ .notifier_offset = 0x1E000,
+ .firewall_base = 0x30000,
+ .firewall_ctl = 0xad0,
+ .firewall_wr_ctl = 0xac8,
+};
+
+static const struct tegra234_cbb_fabric tegra238_bpmp_fabric = {
+ .fab_id = T238_BPMP_FABRIC_ID,
+ .fab_list = tegra238_cbb_fab_list,
+ .initiator_id = tegra234_initiator_id,
+ .errors = tegra234_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .err_intr_enbl = 0xf,
+ .err_status_clr = 0x3f,
+ .notifier_offset = 0x19000,
+ .firewall_base = 0x30000,
+ .firewall_ctl = 0x8f0,
+ .firewall_wr_ctl = 0x8e8,
+};
+
+static const struct tegra234_cbb_fabric tegra238_cbb_fabric = {
+ .fab_id = T238_CBB_FABRIC_ID,
+ .fab_list = tegra238_cbb_fab_list,
+ .initiator_id = tegra234_initiator_id,
+ .errors = tegra234_cbb_errors,
+ .max_errors = ARRAY_SIZE(tegra234_cbb_errors),
+ .err_intr_enbl = 0x3f,
+ .err_status_clr = 0x3f,
+ .notifier_offset = 0x60000,
+ .off_mask_erd = 0x3d004,
+ .firewall_base = 0x10000,
+ .firewall_ctl = 0x2230,
+ .firewall_wr_ctl = 0x2228,
+};
+
static const char * const tegra241_initiator_id[] = {
[0x0] = "TZ",
[0x1] = "CCPLEX",
@@ -1480,6 +1610,10 @@ static const struct of_device_id tegra234_cbb_dt_ids[] = {
{ .compatible = "nvidia,tegra234-dce-fabric", .data = &tegra234_dce_fabric },
{ .compatible = "nvidia,tegra234-rce-fabric", .data = &tegra234_rce_fabric },
{ .compatible = "nvidia,tegra234-sce-fabric", .data = &tegra234_sce_fabric },
+ { .compatible = "nvidia,tegra238-aon-fabric", .data = &tegra238_aon_fabric },
+ { .compatible = "nvidia,tegra238-ape-fabric", .data = &tegra238_ape_fabric },
+ { .compatible = "nvidia,tegra238-bpmp-fabric", .data = &tegra238_bpmp_fabric },
+ { .compatible = "nvidia,tegra238-cbb-fabric", .data = &tegra238_cbb_fabric },
{ .compatible = "nvidia,tegra264-sys-cbb-fabric", .data = &tegra264_sys_cbb_fabric },
{ .compatible = "nvidia,tegra264-top0-cbb-fabric", .data = &tegra264_top0_cbb_fabric },
{ .compatible = "nvidia,tegra264-uphy0-cbb-fabric", .data = &tegra264_uphy0_cbb_fabric },
--
2.34.1
next prev parent reply other threads:[~2026-03-25 12:58 UTC|newest]
Thread overview: 5+ messages / expand[flat|nested] mbox.gz Atom feed top
2026-03-25 12:57 [PATCH v2 0/2] soc/tegra: cbb: Add Tegra238 support Sumit Gupta
2026-03-25 12:57 ` [PATCH v2 1/2] dt-bindings: arm: tegra: Add Tegra238 CBB compatible strings Sumit Gupta
2026-03-27 7:26 ` Krzysztof Kozlowski
2026-03-25 12:57 ` Sumit Gupta [this message]
2026-03-27 14:27 ` [PATCH v2 0/2] soc/tegra: cbb: Add Tegra238 support Thierry Reding
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=20260325125726.2694144-3-sumitg@nvidia.com \
--to=sumitg@nvidia.com \
--cc=bbasu@nvidia.com \
--cc=conor+dt@kernel.org \
--cc=devicetree@vger.kernel.org \
--cc=jonathanh@nvidia.com \
--cc=krzk+dt@kernel.org \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-tegra@vger.kernel.org \
--cc=robh@kernel.org \
--cc=treding@nvidia.com \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox