From: Sumit Gupta <sumitg@nvidia.com>
To: Lorenzo Pieralisi <lpieralisi@kernel.org>
Cc: <treding@nvidia.com>, <krzysztof.kozlowski@linaro.org>,
<dmitry.osipenko@collabora.com>, <viresh.kumar@linaro.org>,
<rafael@kernel.org>, <jonathanh@nvidia.com>, <robh+dt@kernel.org>,
<helgaas@kernel.org>, <linux-kernel@vger.kernel.org>,
<linux-tegra@vger.kernel.org>, <linux-pm@vger.kernel.org>,
<devicetree@vger.kernel.org>, <linux-pci@vger.kernel.org>,
<mmaddireddy@nvidia.com>, <kw@linux.com>, <bhelgaas@google.com>,
<vidyas@nvidia.com>, <sanjayc@nvidia.com>,
<ksitaraman@nvidia.com>, <ishah@nvidia.com>, <bbasu@nvidia.com>,
Sumit Gupta <sumitg@nvidia.com>
Subject: Re: [Patch v5 7/8] PCI: tegra194: add interconnect support in Tegra234
Date: Thu, 6 Apr 2023 17:43:55 +0530 [thread overview]
Message-ID: <141f83be-e08b-9c0b-8939-554e3d5ed37d@nvidia.com> (raw)
In-Reply-To: <ZC1Rnrb0MObR5S42@lpieralisi>
>
> You should still capitalize the subject.
>
> "PCI: tegra194: Add interconnect.."
>
Sure.
> On Thu, Mar 30, 2023 at 07:03:53PM +0530, Sumit Gupta wrote:
>> Add support to request DRAM bandwidth with Memory Interconnect
>> in Tegra234 SoC. The DRAM BW required for different modes depends
>> on speed (Gen-1/2/3/4) and width/lanes (x1/x2/x4/x8).
>>
>> Suggested-by: Manikanta Maddireddy <mmaddireddy@nvidia.com>
>
> You should add a Link to the relevant lore archive, I am
> pretty sure Bjorn chimed in too.
>
Sure.
> This patch does too many things at once; more importantly it
> does *not* explain why we request memory bandwidth and why it
> is required and *safe* given that the current code works so far.
>
> So:
>
> patch 1: fix the array overflow issues with the current code
> patch 2: add memory bandwidth interconnect support
>
> Thanks,
> Lorenzo
>
Thank you for the review.
I will split this patch into two and add the info as sugested in v6.
Will spin a v6 soon if there is no further comment.
Thanks,
Sumit
>> Signed-off-by: Sumit Gupta <sumitg@nvidia.com>
>> ---
>> drivers/pci/controller/dwc/pcie-tegra194.c | 44 ++++++++++++++++++----
>> 1 file changed, 36 insertions(+), 8 deletions(-)
>>
>> diff --git a/drivers/pci/controller/dwc/pcie-tegra194.c b/drivers/pci/controller/dwc/pcie-tegra194.c
>> index 09825b4a075e..89d829a946ee 100644
>> --- a/drivers/pci/controller/dwc/pcie-tegra194.c
>> +++ b/drivers/pci/controller/dwc/pcie-tegra194.c
>> @@ -14,6 +14,7 @@
>> #include <linux/delay.h>
>> #include <linux/gpio.h>
>> #include <linux/gpio/consumer.h>
>> +#include <linux/interconnect.h>
>> #include <linux/interrupt.h>
>> #include <linux/iopoll.h>
>> #include <linux/kernel.h>
>> @@ -223,6 +224,7 @@
>> #define EP_STATE_ENABLED 1
>>
>> static const unsigned int pcie_gen_freq[] = {
>> + GEN1_CORE_CLK_FREQ, /* PCI_EXP_LNKSTA_CLS == 0; undefined */
>> GEN1_CORE_CLK_FREQ,
>> GEN2_CORE_CLK_FREQ,
>> GEN3_CORE_CLK_FREQ,
>> @@ -287,6 +289,7 @@ struct tegra_pcie_dw {
>> unsigned int pex_rst_irq;
>> int ep_state;
>> long link_status;
>> + struct icc_path *icc_path;
>> };
>>
>> static inline struct tegra_pcie_dw *to_tegra_pcie(struct dw_pcie *pci)
>> @@ -309,6 +312,27 @@ struct tegra_pcie_soc {
>> enum dw_pcie_device_mode mode;
>> };
>>
>> +static void tegra_pcie_icc_set(struct tegra_pcie_dw *pcie)
>> +{
>> + struct dw_pcie *pci = &pcie->pci;
>> + u32 val, speed, width;
>> +
>> + val = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA);
>> +
>> + speed = FIELD_GET(PCI_EXP_LNKSTA_CLS, val);
>> + width = FIELD_GET(PCI_EXP_LNKSTA_NLW, val);
>> +
>> + val = width * (PCIE_SPEED2MBS_ENC(pcie_link_speed[speed]) / BITS_PER_BYTE);
>> +
>> + if (icc_set_bw(pcie->icc_path, MBps_to_icc(val), 0))
>> + dev_err(pcie->dev, "can't set bw[%u]\n", val);
>> +
>> + if (speed >= ARRAY_SIZE(pcie_gen_freq))
>> + speed = 0;
>> +
>> + clk_set_rate(pcie->core_clk, pcie_gen_freq[speed]);
>> +}
>> +
>> static void apply_bad_link_workaround(struct dw_pcie_rp *pp)
>> {
>> struct dw_pcie *pci = to_dw_pcie_from_pp(pp);
>> @@ -452,14 +476,12 @@ static irqreturn_t tegra_pcie_ep_irq_thread(int irq, void *arg)
>> struct tegra_pcie_dw *pcie = arg;
>> struct dw_pcie_ep *ep = &pcie->pci.ep;
>> struct dw_pcie *pci = &pcie->pci;
>> - u32 val, speed;
>> + u32 val;
>>
>> if (test_and_clear_bit(0, &pcie->link_status))
>> dw_pcie_ep_linkup(ep);
>>
>> - speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
>> - PCI_EXP_LNKSTA_CLS;
>> - clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
>> + tegra_pcie_icc_set(pcie);
>>
>> if (pcie->of_data->has_ltr_req_fix)
>> return IRQ_HANDLED;
>> @@ -945,9 +967,9 @@ static int tegra_pcie_dw_host_init(struct dw_pcie_rp *pp)
>>
>> static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
>> {
>> - u32 val, offset, speed, tmp;
>> struct tegra_pcie_dw *pcie = to_tegra_pcie(pci);
>> struct dw_pcie_rp *pp = &pci->pp;
>> + u32 val, offset, tmp;
>> bool retry = true;
>>
>> if (pcie->of_data->mode == DW_PCIE_EP_TYPE) {
>> @@ -1018,9 +1040,7 @@ static int tegra_pcie_dw_start_link(struct dw_pcie *pci)
>> goto retry_link;
>> }
>>
>> - speed = dw_pcie_readw_dbi(pci, pcie->pcie_cap_base + PCI_EXP_LNKSTA) &
>> - PCI_EXP_LNKSTA_CLS;
>> - clk_set_rate(pcie->core_clk, pcie_gen_freq[speed - 1]);
>> + tegra_pcie_icc_set(pcie);
>>
>> tegra_pcie_enable_interrupts(pp);
>>
>> @@ -2224,6 +2244,14 @@ static int tegra_pcie_dw_probe(struct platform_device *pdev)
>>
>> platform_set_drvdata(pdev, pcie);
>>
>> + pcie->icc_path = devm_of_icc_get(&pdev->dev, "write");
>> + ret = PTR_ERR_OR_ZERO(pcie->icc_path);
>> + if (ret) {
>> + tegra_bpmp_put(pcie->bpmp);
>> + dev_err_probe(&pdev->dev, ret, "failed to get write interconnect\n");
>> + return ret;
>> + }
>> +
>> switch (pcie->of_data->mode) {
>> case DW_PCIE_RC_TYPE:
>> ret = devm_request_irq(dev, pp->irq, tegra_pcie_rp_irq_handler,
>> --
>> 2.17.1
>>
next prev parent reply other threads:[~2023-04-06 12:14 UTC|newest]
Thread overview: 11+ messages / expand[flat|nested] mbox.gz Atom feed top
2023-03-30 13:33 [Patch v5 0/8] Tegra234 Memory interconnect support Sumit Gupta
2023-03-30 13:33 ` [Patch v5 1/8] memory: tegra: add interconnect support for DRAM scaling in Tegra234 Sumit Gupta
2023-03-30 13:33 ` [Patch v5 2/8] memory: tegra: add mc clients for Tegra234 Sumit Gupta
2023-03-30 13:33 ` [Patch v5 3/8] memory: tegra: add software mc clients in Tegra234 Sumit Gupta
2023-03-30 13:33 ` [Patch v5 4/8] dt-bindings: tegra: add icc ids for dummy MC clients Sumit Gupta
2023-03-30 13:33 ` [Patch v5 5/8] memory: tegra: make cpu cluster bw request a multiple of mc channels Sumit Gupta
2023-03-30 13:33 ` [Patch v5 6/8] cpufreq: tegra194: add OPP support and set bandwidth Sumit Gupta
2023-03-30 13:33 ` [Patch v5 7/8] PCI: tegra194: add interconnect support in Tegra234 Sumit Gupta
2023-04-05 10:46 ` Lorenzo Pieralisi
2023-04-06 12:13 ` Sumit Gupta [this message]
2023-03-30 13:33 ` [Patch v5 8/8] arm64: tegra: Add cpu OPP tables and interconnects property Sumit Gupta
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=141f83be-e08b-9c0b-8939-554e3d5ed37d@nvidia.com \
--to=sumitg@nvidia.com \
--cc=bbasu@nvidia.com \
--cc=bhelgaas@google.com \
--cc=devicetree@vger.kernel.org \
--cc=dmitry.osipenko@collabora.com \
--cc=helgaas@kernel.org \
--cc=ishah@nvidia.com \
--cc=jonathanh@nvidia.com \
--cc=krzysztof.kozlowski@linaro.org \
--cc=ksitaraman@nvidia.com \
--cc=kw@linux.com \
--cc=linux-kernel@vger.kernel.org \
--cc=linux-pci@vger.kernel.org \
--cc=linux-pm@vger.kernel.org \
--cc=linux-tegra@vger.kernel.org \
--cc=lpieralisi@kernel.org \
--cc=mmaddireddy@nvidia.com \
--cc=rafael@kernel.org \
--cc=robh+dt@kernel.org \
--cc=sanjayc@nvidia.com \
--cc=treding@nvidia.com \
--cc=vidyas@nvidia.com \
--cc=viresh.kumar@linaro.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox;
as well as URLs for NNTP newsgroup(s).